diff --git a/vendor/github.com/elastic/beats/.appveyor.yml b/vendor/github.com/elastic/beats/.appveyor.yml deleted file mode 100644 index c2ad65f9..00000000 --- a/vendor/github.com/elastic/beats/.appveyor.yml +++ /dev/null @@ -1,111 +0,0 @@ -# Version format -version: "{build}" - -# Operating system (build VM template) -os: Windows Server 2012 R2 - -# Environment variables -environment: - GOPATH: c:\gopath - GVM_DL: https://github.com/andrewkroh/gvm/releases/download/v0.0.1/gvm-windows-amd64.exe - PYWIN_DL: https://beats-files.s3.amazonaws.com/deps/pywin32-220.win32-py2.7.exe - matrix: - - PROJ: github.com\elastic\beats\metricbeat - BEAT: metricbeat - - PROJ: github.com\elastic\beats\filebeat - BEAT: filebeat - - PROJ: github.com\elastic\beats\winlogbeat - BEAT: winlogbeat - -# Custom clone folder (variables are not expanded here). -clone_folder: c:\gopath\src\github.com\elastic\beats - -# Cache files until appveyor.yml is modified. -cache: -- C:\ProgramData\chocolatey\bin -> .appveyor.yml -- C:\ProgramData\chocolatey\lib -> .appveyor.yml -- C:\Users\appveyor\.gvm -> .go-version -- C:\Windows\System32\gvm.exe -> .appveyor.yml -- C:\tools\mingw64 -> .appveyor.yml -- C:\pywin_inst.exe -> .appveyor.yml - -# Scripts that run after cloning repository -install: - - ps: >- - if(!(Test-Path "C:\Windows\System32\gvm.exe")) { - wget "$env:GVM_DL" -Outfile C:\Windows\System32\gvm.exe - } - - ps: gvm --format=powershell $(Get-Content .go-version) | Invoke-Expression - # AppVeyor installed mingw is 32-bit only so install 64-bit version. - - ps: >- - if(!(Test-Path "C:\tools\mingw64\bin\gcc.exe")) { - cinst mingw > mingw-install.txt - Push-AppveyorArtifact mingw-install.txt - } - - set PATH=C:\tools\mingw64\bin;%PATH% - - set PATH=%GOPATH%\bin;%PATH% - - go install github.com/elastic/beats/vendor/github.com/pierrre/gotestcover - - go version - - go env - # Download the PyWin32 installer if it is not cached. - - ps: >- - if(!(Test-Path "C:\pywin_inst.exe")) { - (new-object net.webclient).DownloadFile("$env:PYWIN_DL", 'C:/pywin_inst.exe') - } - - set PYTHONPATH=C:\Python27 - - set PATH=%PYTHONPATH%;%PYTHONPATH%\Scripts;%PATH% - - python --version - - pip install six jinja2 nose nose-timer PyYAML redis elasticsearch - - easy_install C:/pywin_inst.exe - -# To run your custom scripts instead of automatic MSBuild -build_script: - # Compile - - appveyor AddCompilationMessage "Starting Compile" - - ps: cd $env:BEAT - - go build - - appveyor AddCompilationMessage "Compile Success" -FileName "%BEAT%.exe" - -# To run your custom scripts instead of automatic tests -test_script: - # Unit tests - - ps: Add-AppveyorTest "Unit Tests" -Outcome Running - - mkdir build\coverage - - gotestcover -race -coverprofile=build/coverage/integration.cov github.com/elastic/beats/%BEAT%/... - - ps: Update-AppveyorTest "Unit Tests" -Outcome Passed - # System tests - - ps: Add-AppveyorTest "System tests" -Outcome Running - - go test -race -c -cover -covermode=atomic -coverpkg ./... - - ps: | - if ($env:BEAT -eq "metricbeat") { - cp .\_meta\fields.common.yml .\_meta\fields.generated.yml - python .\scripts\fields_collector.py | out-file -append -encoding UTF8 -filepath .\_meta\fields.generated.yml - } - - ps: cd tests/system - - nosetests --with-timer - - ps: Update-AppveyorTest "System tests" -Outcome Passed - -after_test: - - ps: cd $env:GOPATH\src\$env:PROJ - - python ..\dev-tools\aggregate_coverage.py -o build\coverage\system.cov .\build\system-tests\run - - python ..\dev-tools\aggregate_coverage.py -o build\coverage\full.cov .\build\coverage - - go tool cover -html=build\coverage\full.cov -o build\coverage\full.html - - ps: Push-AppveyorArtifact build\coverage\full.cov - - ps: Push-AppveyorArtifact build\coverage\full.html - # Upload coverage report. - - "SET PATH=C:\\Python34;C:\\Python34\\Scripts;%PATH%" - - pip install codecov - - ps: cd $env:GOPATH\src\github.com\elastic\beats - - codecov -X gcov -f "%BEAT%\build\coverage\full.cov" - -# Executes for both successful and failed builds -on_finish: - - ps: cd $env:GOPATH\src\$env:PROJ - - 7z a -r system-tests-output.zip build\system-tests\run - - ps: Push-AppveyorArtifact system-tests-output.zip - -# To disable deployment -deploy: off - -# Notifications should only be setup using the AppVeyor UI so that -# forks can be created without inheriting the settings. diff --git a/vendor/github.com/elastic/beats/.gitignore b/vendor/github.com/elastic/beats/.gitignore index fa69db58..56e3cccf 100644 --- a/vendor/github.com/elastic/beats/.gitignore +++ b/vendor/github.com/elastic/beats/.gitignore @@ -7,6 +7,7 @@ /*/logs /*/fields.yml /*/*.template*.json +**/html_docs # Files .DS_Store diff --git a/vendor/github.com/elastic/beats/.go-version b/vendor/github.com/elastic/beats/.go-version index 8fdcf386..d615fd0c 100644 --- a/vendor/github.com/elastic/beats/.go-version +++ b/vendor/github.com/elastic/beats/.go-version @@ -1 +1 @@ -1.9.2 +1.9.4 diff --git a/vendor/github.com/elastic/beats/.travis.yml b/vendor/github.com/elastic/beats/.travis.yml index d79cd3f4..8328ae78 100644 --- a/vendor/github.com/elastic/beats/.travis.yml +++ b/vendor/github.com/elastic/beats/.travis.yml @@ -14,7 +14,7 @@ env: - GOX_FLAGS="-arch amd64" - DOCKER_COMPOSE_VERSION=1.11.1 - GO_VERSION="$(cat .go-version)" - - TRAVIS_ETCD_VERSION=v3.2.8 + - TRAVIS_MINIKUBE_VERSION=v0.25.2 jobs: include: @@ -50,7 +50,7 @@ jobs: go: $GO_VERSION stage: test - os: osx - env: TARGETS="-C auditbeat testsuite" + env: TARGETS="TEST_ENVIRONMENT=0 -C auditbeat testsuite" go: $GO_VERSION stage: test - os: linux @@ -68,7 +68,7 @@ jobs: go: $GO_VERSION stage: test - os: linux - env: TARGETS="-C libbeat stress-tests" + env: STRESS_TEST_OPTIONS="-timeout=20m -race -v -parallel 1" TARGETS="-C libbeat stress-tests" go: $GO_VERSION stage: test @@ -113,19 +113,31 @@ jobs: install: deploy/kubernetes/.travis/setup.sh env: - TARGETS="-C deploy/kubernetes test" - - TRAVIS_KUBE_VERSION=v1.6.11 + - TRAVIS_K8S_VERSION=v1.6.4 + stage: test + - os: linux + install: deploy/kubernetes/.travis/setup.sh + env: + - TARGETS="-C deploy/kubernetes test" + - TRAVIS_K8S_VERSION=v1.7.5 + stage: test + - os: linux + install: deploy/kubernetes/.travis/setup.sh + env: + - TARGETS="-C deploy/kubernetes test" + - TRAVIS_K8S_VERSION=v1.8.0 stage: test - os: linux install: deploy/kubernetes/.travis/setup.sh env: - TARGETS="-C deploy/kubernetes test" - - TRAVIS_KUBE_VERSION=v1.7.7 + - TRAVIS_K8S_VERSION=v1.9.4 stage: test - os: linux install: deploy/kubernetes/.travis/setup.sh env: - TARGETS="-C deploy/kubernetes test" - - TRAVIS_KUBE_VERSION=v1.8.0 + - TRAVIS_K8S_VERSION=v1.10.0 stage: test addons: diff --git a/vendor/github.com/elastic/beats/CHANGELOG-developer.asciidoc b/vendor/github.com/elastic/beats/CHANGELOG-developer.asciidoc new file mode 100644 index 00000000..8a670158 --- /dev/null +++ b/vendor/github.com/elastic/beats/CHANGELOG-developer.asciidoc @@ -0,0 +1,29 @@ +// Use these for links to issue and pulls. Note issues and pulls redirect one to +// each other on Github, so don't worry too much on using the right prefix. +:issue: https://github.com/elastic/beats/issues/ +:pull: https://github.com/elastic/beats/pull/ + +This changelog is intended for community Beat developers. It covers the major +breaking changes to the internal APIs in the official Beats and changes related +to developing a Beat like code generators or `fields.yml`. Only the major +changes will be covered in this changelog that are expected to affect community +developers. Each breaking change added here should have an explanation on how +other Beats should be migrated. + +Note: This changelog was only started after the 6.3 release. + +=== Beats version HEAD +https://github.com/elastic/beats/compare/v6.3.0..master[Check the HEAD diff] + +The list below covers the major changes between 6.3.0 and master only. + +==== Breaking changes + +- The beat.Pipeline is now passed to cfgfile.RunnerFactory. Beats using libbeat for module reloading or autodiscovery need to be adapted. {pull}7018[7017] +- Moving of TLS helper functions and structs from `output/tls` to `tlscommon`. {pull}7054[7054] + +==== Bugfixes + +- Fix permissions of generated Filebeat filesets. {pull}7140[7140] + +==== Added diff --git a/vendor/github.com/elastic/beats/CHANGELOG.asciidoc b/vendor/github.com/elastic/beats/CHANGELOG.asciidoc index 1a217be3..4db21a60 100644 --- a/vendor/github.com/elastic/beats/CHANGELOG.asciidoc +++ b/vendor/github.com/elastic/beats/CHANGELOG.asciidoc @@ -1,4 +1,3 @@ - // Use these for links to issue and pulls. Note issues and pulls redirect one to // each other on Github, so don't worry too much on using the right prefix. :issue: https://github.com/elastic/beats/issues/ @@ -8,7 +7,7 @@ // Template, add newest changes here === Beats version HEAD -https://github.com/elastic/beats/compare/v6.2.2...6.2[Check the HEAD diff] +https://github.com/elastic/beats/compare/v6.3.2...6.3[Check the HEAD diff] ==== Breaking changes @@ -32,8 +31,6 @@ https://github.com/elastic/beats/compare/v6.2.2...6.2[Check the HEAD diff] *Auditbeat* -- Add hex decoding for the name field in audit path records. {pull}6687[6687] - *Filebeat* *Heartbeat* @@ -42,8 +39,6 @@ https://github.com/elastic/beats/compare/v6.2.2...6.2[Check the HEAD diff] *Packetbeat* -- HTTP parses successfully on empty status phrase. {issue}6176[6176] - *Winlogbeat* ==== Added @@ -81,6 +76,292 @@ https://github.com/elastic/beats/compare/v6.2.2...6.2[Check the HEAD diff] //////////////////////////////////////////////////////////// +[[release-notes-6.3.2]] +=== Beats version 6.3.2 +https://github.com/elastic/beats/compare/v6.3.1...v6.3.2[View commits] + +==== Bugfixes + +*Affecting all Beats* + +- Fix registry duplicates and log resending on upgrade. {issue}7634[7634] +- Fix default value for logging.files.keepfiles. It was being set to 0 and now + it's set to the documented value of 7. {issue}7494[7494] +- Retain compatibility with older Docker server versions. {issue}7542[7542] + +*Metricbeat* + +- Fix missing hosts config option in Ceph module. {pull}7596[7596] +- Ensure metadata updates don't replace existing pod metrics. {pull}7573[7573] + +==== Added + +*Metricbeat* + +- Add support for bearer token files to HTTP helper. {pull}7527[7527] + +*Packetbeat* + +- Updated the TLS protocol parser with new cipher suites added to TLS 1.3. {issue}7455[7455] + + +[[release-notes-6.3.1]] +=== Beats version 6.3.1 +https://github.com/elastic/beats/compare/v6.3.0...v6.3.1[View commits] + +==== Bugfixes + +*Affecting all Beats* + +- Allow index-pattern only setup when setup.dashboards.only_index=true. {pull}7285[7285] +- Preserve the event when source matching fails in `add_docker_metadata`. {pull}7133[7133] +- Negotiate Docker API version from our client instead of using a hardcoded one. {pull}7165[7165] +- Fix duplicating dynamic_fields in template when overwriting the template. {pull}7352[7352] + +*Auditbeat* + +- Fixed parsing of AppArmor audit messages. {pull}6978[6978] + +*Filebeat* + +- Comply with PostgreSQL database name format {pull}7198[7198] +- Optimize PostgreSQL ingest pipeline to use anchored regexp and merge multiple regexp into a single expression. {pull}7269[7269] +- Keep different registry entry per container stream to avoid wrong offsets. {issue}7281[7281] +- Fix offset field pointing at end of a line. {issue}6514[6514] +- Commit registry writes to stable storage to avoid corrupt registry files. {issue}6792[6792] + +*Metricbeat* + +- Fix field mapping for the system process CPU ticks fields. {pull}7230[7230] +- Ensure canonical naming for JMX beans is disabled in Jolokia module. {pull}7047[7047] +- Fix Jolokia attribute mapping when using wildcards and MBean names with multiple properties. {pull}7321[7321] + +*Packetbeat* + +- Fix an out of bounds access in HTTP parser caused by malformed request. {pull}6997[6997] +- Fix missing type for `http.response.body` field. {pull}7169[7169] + +==== Added + +*Auditbeat* + +- Added caching of UID and GID values to auditd module. {pull}6978[6978] +- Updated syscall tables for Linux 4.16. {pull}6978[6978] +- Added better error messages for when the auditd module fails due to the + Linux kernel not supporting auditing (CONFIG_AUDIT=n). {pull}7012[7012] + +*Metricbeat* + +- Collect accumulated docker network metrics and mark old ones as deprecated. {pull}7253[7253] + + + +[[release-notes-6.3.0]] +=== Beats version 6.3.0 +https://github.com/elastic/beats/compare/v6.2.3...v6.3.0[View commits] + +==== Breaking changes + +*Affecting all Beats* + +- De dot keys of labels and annotations in kubernetes meta processors to prevent collisions. {pull}6203[6203] +- Rename `beat.cpu.*.time metrics` to `beat.cpu.*.time.ms`. {pull}6449[6449] +- Add `host.name` field to all events, to avoid mapping conflicts. This could be breaking Logstash configs if you rely on the `host` field being a string. {pull}7051[7051] + +*Filebeat* + +- Add validation for Stdin, when Filebeat is configured with Stdin and any other inputs, Filebeat + will now refuse to start. {pull}6463[6463] +- Mark `system.syslog.message` and `system.auth.message` as `text` instead of `keyword`. {pull}6589[6589] + +*Metricbeat* + +- De dot keys in kubernetes/event metricset to prevent collisions. {pull}6203[6203] +- Add config option for windows/perfmon metricset to ignore non existent counters. {pull}6432[6432] +- Refactor docker CPU calculations to be more consistent with `docker stats`. {pull}6608[6608] +- Update logstash.node_stats metricset to write data under `logstash.node.stats.*`. {pull}6714[6714] + +==== Bugfixes + +*Affecting all Beats* + +- Fix panic when Events containing a float32 value are normalized. {pull}6129[6129] +- Fix `setup.dashboards.always_kibana` when using Kibana 5.6. {issue}6090[6090] +- Fix for Kafka logger. {pull}6430[6430] +- Remove double slashes in Windows service script. {pull}6491[6491] +- Ensure Kubernetes labels/annotations don't break mapping {pull}6490[6490] +- Ensure that the dashboard zip files can't contain files outside of the kibana directory. {pull}6921[6921] +- Fix map overwrite panics by cloning shared structs before doing the update. {pull}6947[6947] +- Fix delays on autodiscovery events handling caused by blocking runner stops. {pull}7170[7170] +- Do not emit Kubernetes autodiscover events for Pods without IP address. {pull}7235[7235] + +*Auditbeat* + +- Add hex decoding for the name field in audit path records. {pull}6687[6687] +- Fixed a deadlock in the file_integrity module under Windows. {issue}6864[6864] + +*Filebeat* + +- Fix panic when log prospector configuration fails to load. {issue}6800[6800] +- Fix memory leak in log prospector when files cannot be read. {issue}6797[6797] +- Add raw JSON to message field when JSON parsing fails. {issue}6516[6516] +- Commit registry writes to stable storage to avoid corrupt registry files. {pull}6877[6877] +- Fix a parsing issue in the syslog input for RFC3339 timestamp and time with nanoseconds. {pull}7046[7046] +- Fix an issue with an overflowing wait group when using the TCP input. {issue}7202[7202] +- Fix an issue when parsing ISO8601 dates with timezone definition {issue}7367[7367] + +*Heartbeat* + +- Fix race due to updates of shared a map, that was not supposed to be shared between multiple go-routines. {issue}6616[6616] + +*Metricbeat* + +- Fix the default configuration for Logstash to include the default port. {pull}6279[6279] +- Fix dealing with new process status codes in Linux kernel 4.14+. {pull}6306[6306] +- Add filtering option by exact device names in system.diskio. `diskio.include_devices`. {pull}6085[6085] +- Add connections metricset to RabbitMQ module {pull}6548[6548] +- Fix panic in http dependent modules when invalid config was used. {pull}6205[6205] +- Fix system.filesystem.used.pct value to match what df reports. {issue}5494[5494] +- Fix namespace disambiguation in Kubernetes state_* metricsets. {issue}6281[6281] +- Fix Windows perfmon metricset so that it sends metrics when an error occurs. {pull}6542[6542] +- Fix Kubernetes calculated fields store. {pull}6564{6564} +- Exclude bind mounts in fsstat and filesystem metricsets. {pull}6819[6819] +- Don't stop Metricbeat if aerospike server is down. {pull}6874[6874] +- disk reads and write count metrics in RabbitMQ queue metricset made optional. {issue}6876[6876] +- Add mapping for docker metrics per cpu. {pull}6843[6843] + +*Winlogbeat* + +- Fixed a crash under Windows 2003 and XP when an event had less insert strings than required by its format string. {pull}6247[6247] + +==== Added + +*Affecting all Beats* + +- Update Golang 1.9.4 {pull}6326[6326] +- Add the ability to log to the Windows Event Log. {pull}5913[5913] +- The node name can be discovered automatically by machine-id matching when beat deployed outside Kubernetes cluster. {pull}6146[6146] +- Panics will be written to the logger before exiting. {pull}6199[6199] +- Add builder support for autodiscover and annotations builder {pull}6408[6408] +- Add plugin support for autodiscover builders, providers {pull}6457[6457] +- Preserve runtime from container statuses in Kubernetes autodiscover {pull}6456[6456] +- Experimental feature setup.template.append_fields added. {pull}6024[6024] +- Add appender support to autodiscover {pull}6469[6469] +- Add add_host_metadata processor {pull}5968[5968] +- Retry configuration to load dashboards if Kibana is not reachable when the beat starts. {pull}6560[6560] +- Add `has_fields` conditional to filter events based on the existence of all the given fields. {issue}6285[6285] {pull}6653[6653] +- Add support for spooling to disk to the beats event publishing pipeline. {pull}6581[6581] +- Added logging of system info at Beat startup. {issue}5946[5946] +- Do not log errors if X-Pack Monitoring is enabled but Elastisearch X-Pack is not. {pull}6627[6627] +- Add rename processor. {pull}6292[6292] +- Allow override of dynamic template `match_mapping_type` for fields with object_type. {pull}6691[6691] + +*Filebeat* + +- Add IIS module to parse access log and error log. {pull}6127[6127] +- Renaming of the prospector type to the input type and all prospectors are now moved to the input + folder, to maintain backward compatibility type aliasing was used to map the old type to the new + one. This change also affect YAML configuration. {pull}6078[6078] +- Addition of the TCP input {pull}6700[6700] +- Add option to convert the timestamps to UTC in the system module. {pull}5647[5647] +- Add Logstash module support for main log and the slow log, support the plain text or structured JSON format {pull}5481[5481] +- Add stream filtering when using `docker` prospector. {pull}6057[6057] +- Add support for CRI logs format. {issue}5630[5630] +- Add json.ignore_decoding_error config to not log json decoding erors. {issue}6547[6547] +- Make registry file permission configurable. {pull}6455[6455] +- Add MongoDB module. {pull}6283[6238] +- Add Ingest pipeline loading to setup. {pull}6814[6814] +- Add support of log_format combined to NGINX access logs. {pull}6858[6858] +- Release config reloading feature as GA. +- Add support human friendly size for the UDP input. {pull}6886[6886] +- Add Syslog input to ingest RFC3164 Events via TCP and UDP {pull}6842[6842] +- Remove the undefined `username` option from the Redis input and clarify the documentation. {pull}6662[6662] + +*Heartbeat* + +- Made the URL field of Heartbeat aggregateable. {pull}6263[6263] +- Use `match.Matcher` for checking Heartbeat response bodies with regular expressions. {pull}6539[6539] + +*Metricbeat* + +- Support apache status pages for versions older than 2.4.16. {pull}6450[6450] +- Add support for huge pages on Linux. {pull}6436[6436] +- Support to optionally 'de dot' keys in http/json metricset to prevent collisions. {pull}5970[5970] +- Add graphite protocol metricbeat module. {pull}4734[4734] +- Add http server metricset to support push metrics via http. {pull}4770[4770] +- Make config object public for graphite and http server {pull}4820[4820] +- Add system uptime metricset. {issue}4848[4848] +- Add experimental `queue` metricset to RabbitMQ module. {pull}4788[4788] +- Add additional php-fpm pool status kpis for Metricbeat module {pull}5287[5287] +- Add etcd module. {issue}4970[4970] +- Add ip address of docker containers to event. {pull}5379[5379] +- Add ceph osd tree information to metricbeat {pull}5498[5498] +- Add ceph osd_df to metricbeat {pull}5606[5606] +- Add basic Logstash module. {pull}5540[5540] +- Add dashboard for Windows service metricset. {pull}5603[5603] +- Add pct calculated fields for Pod and container CPU and memory usages. {pull}6158[6158] +- Add statefulset support to Kubernetes module. {pull}6236[6236] +- Refactor prometheus endpoint parsing to look similar to upstream prometheus {pull}6332[6332] +- Making the http/json metricset GA. {pull}6471[6471] +- Add support for array in http/json metricset. {pull}6480[6480] +- Making the jolokia/jmx module GA. {pull}6143[6143] +- Making the MongoDB module GA. {pull}6554[6554] +- Allow to disable labels `dedot` in Docker module, in favor of a safe way to keep dots. {pull}6490[6490] +- Add experimental module to collect metrics from munin nodes. {pull}6517[6517] +- Add support for wildcards and explicit metrics grouping in jolokia/jmx. {pull}6462[6462] +- Set `collector` as default metricset in Prometheus module. {pull}6636[6636] {pull}6747[6747] +- Set `mntr` as default metricset in Zookeeper module. {pull}6674[6674] +- Set default metricsets in vSphere module. {pull}6676[6676] +- Set `status` as default metricset in Apache module. {pull}6673[6673] +- Set `namespace` as default metricset in Aerospike module. {pull}6669[6669] +- Set `service` as default metricset in Windows module. {pull}6675[6675] +- Set all metricsets as default metricsets in uwsgi module. {pull}6688[6688] +- Allow autodiscover to monitor unexposed ports {pull}6727[6727] +- Mark kubernetes.event metricset as beta. {pull}6715[6715] +- Set all metricsets as default metricsets in couchbase module. {pull}6683[6683] +- Mark uwsgi module and metricset as beta. {pull}6717[6717] +- Mark Golang module and metricsets as beta. {pull}6711[6711] +- Mark system.raid metricset as beta. {pull}6710[6710] +- Mark http.server metricset as beta. {pull}6712[6712] +- Mark metricbeat logstash module and metricsets as beta. {pull}6713[6713] +- Set all metricsets as default metricsets in Ceph module. {pull}6676[6676] +- Set `container`, `cpu`, `diskio`, `healthcheck`, `info`, `memory` and `network` in docker module as default. {pull}6718[6718] +- Set `cpu`, `load`, `memory`, `network`, `process` and `process_summary` as default metricsets in system module. {pull}6689[6689] +- Set `collector` as default metricset in Dropwizard module. {pull}6669[6669] +- Set `info` and `keyspace` as default metricsets in redis module. {pull}6742[6742] +- Set `connection` as default metricset in rabbitmq module. {pull}6743[6743] +- Set all metricsets as default metricsets in Elasticsearch module. {pull}6755[6755] +- Set all metricsets as default metricsets in Etcd module. {pull}6756[6756] +- Set server metricsets as default in Graphite module. {pull}6757[6757] +- Set all metricsets as default metricsets in HAProxy module. {pull}6758[6758] +- Set all metricsets as default metricsets in Kafka module. {pull}6759[6759] +- Set all metricsets as default metricsets in postgresql module. {pull}6761[6761] +- Set status metricsets as default in Kibana module. {pull}6762[6762] +- Set all metricsets as default metricsets in Logstash module. {pull}6763[6763] +- Set `container`, `node`, `pod`, `system`, `volume` as default in Kubernetes module. {pull} 6764[6764] +- Set `stats` as default in memcached module. {pull}6765[6765] +- Set all metricsets as default metricsets in Mongodb module. {pull}6766[6766] +- Set `pool` as default metricset for php_fpm module. {pull}6768[6768] +- Set `status` as default metricset for mysql module. {pull} 6769[6769] +- Set `stubstatus` as default metricset for nginx module. {pull}6770[6770] +- Added support for haproxy 1.7 and 1.8. {pull}6793[6793] +- Add accumulated I/O stats to diskio in the line of `docker stats`. {pull}6701[6701] +- Ignore virtual filesystem types by default in system module. {pull}6819[6819] +- Release config reloading feature as GA. {pull}6891[6891] +- Kubernetes deployment: Add ServiceAccount config to system metricbeat. {pull}6824[6824] +- Kubernetes deployment: Add DNS Policy to system metricbeat. {pull}6656[6656] + +*Packetbeat* + +- Add support for condition on bool type {issue}5659[5659] {pull}5954[5954] +- Fix high memory usage on HTTP body if body is not published. {pull}6680[6680] +- Allow to capture the HTTP request or response bodies independently. {pull}6784[6784] +- HTTP publishes an Error event for unmatched requests or responses. {pull}6794[6794] + +*Winlogbeat* + +- Use bookmarks to persist the last published event. {pull}6150[6150] + [[release-notes-6.2.3]] === Beats version 6.2.3 @@ -308,6 +589,10 @@ https://github.com/elastic/beats/compare/v6.0.1...v6.1.0[View commits] - Fix http parse to allow to parse get request with space in the URI. {pull}5495[5495] - Fix mysql SQL parser to trim `\r` from Windows Server `SELECT\r\n\t1`. {pull}5572[5572] - Fix corruption when parsing repeated headers in an HTTP request or response. {pull}6325[6325] +- Fix panic when parsing partial AMQP messages. {pull}6384[6384] +- Fix out of bounds access to slice in MongoDB parser. {pull}6256[6256] +- Fix sniffer hanging on exit under Linux. {pull}6535[6535] +- Fix bounds check error in http parser causing a panic. {pull}6750[6750] *Winlogbeat* @@ -419,6 +704,7 @@ The list below covers the changes between 6.0.0-rc2 and 6.0.0 GA only. *Filebeat* - Add Kubernetes manifests to deploy Filebeat. {pull}5349[5349] +- Add container short ID matching to add_docker_metadata. {pull}6172[6172] *Metricbeat* @@ -923,63 +1209,6 @@ https://github.com/elastic/beats/compare/v5.4.0...v6.0.0-alpha1[View commits] - Prospector reloading only works properly with new files. {pull}3546[3546] -[[release-notes-5.6.7]] -=== Beats version 5.6.7 -https://github.com/elastic/beats/compare/v5.6.6...v5.6.7[View commits] - -No changes in this release. - - -[[release-notes-5.6.6]] -=== Beats version 5.6.6 -https://github.com/elastic/beats/compare/v5.6.5...v5.6.6[View commits] - -No changes in this release. - - -[[release-notes-5.6.5]] -=== Beats version 5.6.5 -https://github.com/elastic/beats/compare/v5.6.4...v5.6.5[View commits] - -==== Bugfixes - -*Affecting all Beats* - -- Fix duplicate batches of events in retry queue. {pull}5520[5520] - -*Metricbeat* - -- Clarify meaning of percentages reported by system core metricset. {pull}5565[5565] -- Fix map overwrite in docker diskio module. {issue}5582[5582] - -[[release-notes-5.6.4]] -=== Beats version 5.6.4 -https://github.com/elastic/beats/compare/v5.6.3...v5.6.4[View commits] - -==== Bugfixes - -*Affecting all Beats* - -- Fix race condition in internal logging rotator. {pull}4519[4519] - -*Packetbeat* - -- Fix missing length check in the PostgreSQL module. {pull}5457[5457] - -==== Added - -*Affecting all Beats* - -- Add support for enabling TLS renegotiation. {issue}4386[4386] -- Add setting to enable/disable the slow start in logstash output. {pull}5400[5400] - -[[release-notes-5.6.3]] -=== Beats version 5.6.3 -https://github.com/elastic/beats/compare/v5.6.2...v5.6.3[View commits] - -No changes in this release. - - [[release-notes-5.6.2]] === Beats version 5.6.2 https://github.com/elastic/beats/compare/v5.6.1...v5.6.2[View commits] diff --git a/vendor/github.com/elastic/beats/LICENSE.txt b/vendor/github.com/elastic/beats/LICENSE.txt index edfa6086..e601d438 100644 --- a/vendor/github.com/elastic/beats/LICENSE.txt +++ b/vendor/github.com/elastic/beats/LICENSE.txt @@ -1,13 +1,13 @@ -Copyright (c) 2012–2017 Elastic +Source code in this repository is variously licensed under the Apache License +Version 2.0, an Apache compatible license, or the Elastic License. Outside of +the "x-pack" folder, source code in a given file is licensed under the Apache +License Version 2.0, unless otherwise noted at the beginning of the file or a +LICENSE file present in the directory subtree declares a separate license. +Within the "x-pack" folder, source code in a given file is licensed under the +Elastic License, unless otherwise noted at the beginning of the file or a +LICENSE file present in the directory subtree declares a separate license. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. +The build produces two sets of binaries - one set that falls under the Elastic +License and another set that falls under Apache License Version 2.0. The +binaries that contain `-oss` in the artifact name are licensed under the Apache +License Version 2.0. diff --git a/vendor/github.com/elastic/beats/Makefile b/vendor/github.com/elastic/beats/Makefile index cf5d9b53..ea1f5beb 100644 --- a/vendor/github.com/elastic/beats/Makefile +++ b/vendor/github.com/elastic/beats/Makefile @@ -110,13 +110,13 @@ docs: @$(foreach var,$(PROJECTS),BUILD_DIR=${BUILD_DIR} $(MAKE) -C $(var) docs || exit 1;) sh ./script/build_docs.sh dev-guide github.com/elastic/beats/docs/devguide ${BUILD_DIR} -.PHONY: package -package: update beats-dashboards - @$(foreach var,$(BEATS),SNAPSHOT=$(SNAPSHOT) $(MAKE) -C $(var) package || exit 1;) +.PHONY: package-all +package-all: update beats-dashboards + @$(foreach var,$(BEATS),SNAPSHOT=$(SNAPSHOT) $(MAKE) -C $(var) package-all || exit 1;) @echo "Start building the dashboards package" @mkdir -p build/upload/ - @BUILD_DIR=${BUILD_DIR} SNAPSHOT=$(SNAPSHOT) $(MAKE) -C dev-tools/packer package-dashboards ${BUILD_DIR}/upload/build_id.txt + @BUILD_DIR=${BUILD_DIR} UPLOAD_DIR=${BUILD_DIR}/upload SNAPSHOT=$(SNAPSHOT) $(MAKE) -C dev-tools/packer package-dashboards ${BUILD_DIR}/upload/build_id.txt @mv build/upload build/dashboards-upload @# Copy build files over to top build directory @@ -153,6 +153,8 @@ notice: python-env python-env: @test -d $(PYTHON_ENV) || virtualenv $(VIRTUALENV_PARAMS) $(PYTHON_ENV) @$(PYTHON_ENV)/bin/pip install -q --upgrade pip autopep8 six + @# Work around pip bug. See: https://github.com/pypa/pip/issues/4464 + @find $(PYTHON_ENV) -type d -name dist-packages -exec sh -c "echo dist-packages > {}.pth" ';' # Tests if apm works with the current code .PHONY: test-apm diff --git a/vendor/github.com/elastic/beats/NOTICE.txt b/vendor/github.com/elastic/beats/NOTICE.txt index 6b80ef2b..14b9f658 100644 --- a/vendor/github.com/elastic/beats/NOTICE.txt +++ b/vendor/github.com/elastic/beats/NOTICE.txt @@ -145,11 +145,34 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------- Dependency: github.com/davecgh/go-spew -Revision: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d +Version: v1.1.0 +Revision: 346938d642f2ec3594ed81d874461961cd0faa76 License type (autodetected): MIT ./vendor/github.com/davecgh/go-spew/LICENSE: -------------------------------------------------------------------- -Copyright (c) 2012-2013 Dave Collins +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-------------------------------------------------------------------- +Dependency: github.com/davecgh/go-xdr +Revision: e6a2ba005892b6a5b27cb5352f64c2e96942dd28 +License type (autodetected): MIT +./metricbeat/module/kvm/vendor/github.com/davecgh/go-xdr/LICENSE: +-------------------------------------------------------------------- +Copyright (c) 2012-2014 Dave Collins Permission to use, copy, modify, and distribute this software for any purpose with or without fee is hereby granted, provided that the above @@ -162,6 +185,14 @@ ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +-------------------------------------------------------------------- +Dependency: github.com/digitalocean/go-libvirt +Revision: 59d541f19311883ad82708651353009fb207d8a9 +License type (autodetected): Apache-2.0 +./metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/LICENSE.md: +-------------------------------------------------------------------- +Apache License 2.0 + -------------------------------------------------------------------- Dependency: github.com/docker/distribution @@ -342,13 +373,19 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------- Dependency: github.com/elastic/go-libaudit -Version: v0.1.0 -Revision: 4a806edf821706e315ef7d4f3b5d0cac6d638b34 +Version: v0.2.1 +Revision: 55225d06b15c74082f9a7af75aa4284dbe48d20a License type (autodetected): Apache-2.0 -./vendor/github.com/elastic/go-libaudit/LICENSE: +./vendor/github.com/elastic/go-libaudit/LICENSE.txt: -------------------------------------------------------------------- Apache License 2.0 +-------NOTICE.txt----- +Elastic go-libaudit +Copyright 2017-2018 Elasticsearch B.V. + +This product includes software developed at +Elasticsearch, B.V. (https://www.elastic.co/). -------------------------------------------------------------------- Dependency: github.com/elastic/go-lumber @@ -359,6 +396,41 @@ License type (autodetected): Apache-2.0 Apache License 2.0 +-------------------------------------------------------------------- +Dependency: github.com/elastic/go-structform +Version: v0.0.3 +Revision: 0a66add879601f69f55663f4c913c72988218982 +License type (autodetected): Apache-2.0 +./vendor/github.com/elastic/go-structform/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + + +-------------------------------------------------------------------- +Dependency: github.com/elastic/go-sysinfo +Revision: fda017eee28b7420d931e08c7361c1c17f516aa2 +License type (autodetected): Apache-2.0 +./vendor/github.com/elastic/go-sysinfo/LICENSE.txt: +-------------------------------------------------------------------- +Apache License 2.0 + +-------NOTICE.txt----- +Elastic go-sysinfo +Copyright 2017-2018 Elasticsearch B.V. + +This product includes software developed at +Elasticsearch, B.V. (https://www.elastic.co/). + +-------------------------------------------------------------------- +Dependency: github.com/elastic/go-txfile +Version: v0.0.1 +Revision: 7e7e33cc236f30fff545f3ee2c35ada5b70b6b13 +License type (autodetected): Apache-2.0 +./vendor/github.com/elastic/go-txfile/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + + -------------------------------------------------------------------- Dependency: github.com/elastic/go-ucfg Version: v0.5.1 @@ -369,10 +441,25 @@ License type (autodetected): Apache-2.0 Apache License 2.0 +-------------------------------------------------------------------- +Dependency: github.com/elastic/go-windows +Revision: a730c8b4e08aef7e1ebb642928bf862996ad2383 +License type (autodetected): Apache-2.0 +./vendor/github.com/elastic/go-windows/LICENSE.txt: +-------------------------------------------------------------------- +Apache License 2.0 + +-------NOTICE.txt----- +Elastic go-windows +Copyright 2017-2018 Elasticsearch B.V. + +This product includes software developed at +Elasticsearch, B.V. (https://www.elastic.co/). + -------------------------------------------------------------------- Dependency: github.com/elastic/gosigar -Version: v0.8.0 -Revision: 16df19fe5efee4ea2938bde5f56c02d9929dc054 +Version: v0.9.0 +Revision: 237dff72b4ba95da2cd985f96a9c0ede4aefc760 License type (autodetected): Apache-2.0 ./vendor/github.com/elastic/gosigar/LICENSE: -------------------------------------------------------------------- @@ -388,26 +475,10 @@ This product includes a number of subcomponents with separate copyright notices and license terms. Your use of these subcomponents is subject to the terms and conditions of the subcomponent's license, as noted in the LICENSE file. --------------------------------------------------------------------- -Dependency: github.com/elastic/procfs -Revision: 664e6bc79eb43c956507b6e20a867140516ad15a -License type (autodetected): Apache-2.0 -./vendor/github.com/elastic/procfs/LICENSE: --------------------------------------------------------------------- -Apache License 2.0 - --------NOTICE----- -procfs provides functions to retrieve system, kernel and process -metrics from the pseudo-filesystem proc. - -Copyright 2014-2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). - -------------------------------------------------------------------- Dependency: github.com/ericchiang/k8s -Revision: 5803ed75e31fc1998b5f781ac08e22ff985c3f8f +Version: v1.0.0 +Revision: 5912993f00cb7c971aaa54529a06bd3eecd6c3d4 License type (autodetected): Apache-2.0 ./vendor/github.com/ericchiang/k8s/LICENSE: -------------------------------------------------------------------- @@ -444,7 +515,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------- Dependency: github.com/fsnotify/fsevents -Revision: 690cb784149d5facd7fe613c52757445c43afcde +Revision: 70114c7d2e1e4d1ae5179b285d65ea21aae111cc License type (autodetected): BSD-3-Clause ./vendor/github.com/fsnotify/fsevents/LICENSE: -------------------------------------------------------------------- @@ -478,7 +549,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------- Dependency: github.com/fsnotify/fsnotify -Revision: 4da3e2cfbabc9f751898f250b49f2439785783a1 +Revision: c9bbe1f46f1da9904baf3916a4ba4aec7f1e9000 License type (autodetected): BSD-3-Clause ./vendor/github.com/fsnotify/fsnotify/LICENSE: -------------------------------------------------------------------- @@ -511,36 +582,6 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------------------------- -Dependency: github.com/fsouza/go-dockerclient -Version: beats-branch -Revision: ba365ff5e4281feb28654e4ca599a1defd063497 -License type (autodetected): BSD-2-Clause -./metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/LICENSE: --------------------------------------------------------------------- -Copyright (c) 2013-2017, go-dockerclient authors -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, -this list of conditions and the following disclaimer in the documentation -and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -------------------------------------------------------------------- Dependency: github.com/garyburd/redigo Revision: b8dc90050f24c1a73a52f107f3f575be67b21b7c @@ -607,6 +648,20 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------- +Dependency: github.com/go-ole/go-ole +Revision: de8695c8edbf8236f30d6e1376e20b198a028d42 +License type (autodetected): MIT +./vendor/github.com/go-ole/go-ole/LICENSE: +-------------------------------------------------------------------- +Copyright © 2013-2018 Yasuhiro Matsumoto, http://mattn.kaoriya.net + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + -------------------------------------------------------------------- Dependency: github.com/go-sql-driver/mysql Revision: 9dee4ca50b83acdf57a35fb9e6fb4be640afa2f3 @@ -1054,7 +1109,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------- Dependency: github.com/golang/protobuf -Revision: 18c9bb3261723cd5401db4d0c9fbc5c3b6c70fe8 +Revision: bbd03ef6da3a115852eaf24c8a1c46aeb39aa175 License type (autodetected): BSD-3-Clause ./metricbeat/vendor/github.com/golang/protobuf/LICENSE: -------------------------------------------------------------------- @@ -1168,373 +1223,12 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------- -Dependency: github.com/hashicorp/go-cleanhttp -Revision: 3573b8b52aa7b37b9358d966a898feb387f62437 -License type (autodetected): MPL-2.0 -./metricbeat/module/docker/vendor/github.com/hashicorp/go-cleanhttp/LICENSE: +Dependency: github.com/gorhill/cronexpr +Revision: d520615e531a6bf3fb69406b9eba718261285ec8 +License type (autodetected): Apache-2.0 +./vendor/github.com/gorhill/cronexpr/APLv2: -------------------------------------------------------------------- -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. +Apache License 2.0 -------------------------------------------------------------------- @@ -1869,866 +1563,11 @@ THE SOFTWARE. -------------------------------------------------------------------- Dependency: github.com/opencontainers/go-digest Revision: eaa60544f31ccf3b0653b1a118b76d33418ff41b -License type (autodetected): CC-BY-SA-4.0 -./vendor/github.com/opencontainers/go-digest/LICENSE.docs: --------------------------------------------------------------------- -Attribution-ShareAlike 4.0 International - -======================================================================= - -Creative Commons Corporation ("Creative Commons") is not a law firm and -does not provide legal services or legal advice. Distribution of -Creative Commons public licenses does not create a lawyer-client or -other relationship. Creative Commons makes its licenses and related -information available on an "as-is" basis. Creative Commons gives no -warranties regarding its licenses, any material licensed under their -terms and conditions, or any related information. Creative Commons -disclaims all liability for damages resulting from their use to the -fullest extent possible. - -Using Creative Commons Public Licenses - -Creative Commons public licenses provide a standard set of terms and -conditions that creators and other rights holders may use to share -original works of authorship and other material subject to copyright -and certain other rights specified in the public license below. The -following considerations are for informational purposes only, are not -exhaustive, and do not form part of our licenses. - - Considerations for licensors: Our public licenses are - intended for use by those authorized to give the public - permission to use material in ways otherwise restricted by - copyright and certain other rights. Our licenses are - irrevocable. Licensors should read and understand the terms - and conditions of the license they choose before applying it. - Licensors should also secure all rights necessary before - applying our licenses so that the public can reuse the - material as expected. Licensors should clearly mark any - material not subject to the license. This includes other CC- - licensed material, or material used under an exception or - limitation to copyright. More considerations for licensors: - wiki.creativecommons.org/Considerations_for_licensors - - Considerations for the public: By using one of our public - licenses, a licensor grants the public permission to use the - licensed material under specified terms and conditions. If - the licensor's permission is not necessary for any reason--for - example, because of any applicable exception or limitation to - copyright--then that use is not regulated by the license. Our - licenses grant only permissions under copyright and certain - other rights that a licensor has authority to grant. Use of - the licensed material may still be restricted for other - reasons, including because others have copyright or other - rights in the material. A licensor may make special requests, - such as asking that all changes be marked or described. - Although not required by our licenses, you are encouraged to - respect those requests where reasonable. More_considerations - for the public: - wiki.creativecommons.org/Considerations_for_licensees - -======================================================================= - -Creative Commons Attribution-ShareAlike 4.0 International Public -License - -By exercising the Licensed Rights (defined below), You accept and agree -to be bound by the terms and conditions of this Creative Commons -Attribution-ShareAlike 4.0 International Public License ("Public -License"). To the extent this Public License may be interpreted as a -contract, You are granted the Licensed Rights in consideration of Your -acceptance of these terms and conditions, and the Licensor grants You -such rights in consideration of benefits the Licensor receives from -making the Licensed Material available under these terms and -conditions. - - -Section 1 -- Definitions. - - a. Adapted Material means material subject to Copyright and Similar - Rights that is derived from or based upon the Licensed Material - and in which the Licensed Material is translated, altered, - arranged, transformed, or otherwise modified in a manner requiring - permission under the Copyright and Similar Rights held by the - Licensor. For purposes of this Public License, where the Licensed - Material is a musical work, performance, or sound recording, - Adapted Material is always produced where the Licensed Material is - synched in timed relation with a moving image. - - b. Adapter's License means the license You apply to Your Copyright - and Similar Rights in Your contributions to Adapted Material in - accordance with the terms and conditions of this Public License. - - c. BY-SA Compatible License means a license listed at - creativecommons.org/compatiblelicenses, approved by Creative - Commons as essentially the equivalent of this Public License. - - d. Copyright and Similar Rights means copyright and/or similar rights - closely related to copyright including, without limitation, - performance, broadcast, sound recording, and Sui Generis Database - Rights, without regard to how the rights are labeled or - categorized. For purposes of this Public License, the rights - specified in Section 2(b)(1)-(2) are not Copyright and Similar - Rights. - - e. Effective Technological Measures means those measures that, in the - absence of proper authority, may not be circumvented under laws - fulfilling obligations under Article 11 of the WIPO Copyright - Treaty adopted on December 20, 1996, and/or similar international - agreements. - - f. Exceptions and Limitations means fair use, fair dealing, and/or - any other exception or limitation to Copyright and Similar Rights - that applies to Your use of the Licensed Material. - - g. License Elements means the license attributes listed in the name - of a Creative Commons Public License. The License Elements of this - Public License are Attribution and ShareAlike. - - h. Licensed Material means the artistic or literary work, database, - or other material to which the Licensor applied this Public - License. - - i. Licensed Rights means the rights granted to You subject to the - terms and conditions of this Public License, which are limited to - all Copyright and Similar Rights that apply to Your use of the - Licensed Material and that the Licensor has authority to license. - - j. Licensor means the individual(s) or entity(ies) granting rights - under this Public License. - - k. Share means to provide material to the public by any means or - process that requires permission under the Licensed Rights, such - as reproduction, public display, public performance, distribution, - dissemination, communication, or importation, and to make material - available to the public including in ways that members of the - public may access the material from a place and at a time - individually chosen by them. - - l. Sui Generis Database Rights means rights other than copyright - resulting from Directive 96/9/EC of the European Parliament and of - the Council of 11 March 1996 on the legal protection of databases, - as amended and/or succeeded, as well as other essentially - equivalent rights anywhere in the world. - - m. You means the individual or entity exercising the Licensed Rights - under this Public License. Your has a corresponding meaning. - - -Section 2 -- Scope. - - a. License grant. - - 1. Subject to the terms and conditions of this Public License, - the Licensor hereby grants You a worldwide, royalty-free, - non-sublicensable, non-exclusive, irrevocable license to - exercise the Licensed Rights in the Licensed Material to: - - a. reproduce and Share the Licensed Material, in whole or - in part; and - - b. produce, reproduce, and Share Adapted Material. - - 2. Exceptions and Limitations. For the avoidance of doubt, where - Exceptions and Limitations apply to Your use, this Public - License does not apply, and You do not need to comply with - its terms and conditions. - - 3. Term. The term of this Public License is specified in Section - 6(a). - - 4. Media and formats; technical modifications allowed. The - Licensor authorizes You to exercise the Licensed Rights in - all media and formats whether now known or hereafter created, - and to make technical modifications necessary to do so. The - Licensor waives and/or agrees not to assert any right or - authority to forbid You from making technical modifications - necessary to exercise the Licensed Rights, including - technical modifications necessary to circumvent Effective - Technological Measures. For purposes of this Public License, - simply making modifications authorized by this Section 2(a) - (4) never produces Adapted Material. - - 5. Downstream recipients. - - a. Offer from the Licensor -- Licensed Material. Every - recipient of the Licensed Material automatically - receives an offer from the Licensor to exercise the - Licensed Rights under the terms and conditions of this - Public License. - - b. Additional offer from the Licensor -- Adapted Material. - Every recipient of Adapted Material from You - automatically receives an offer from the Licensor to - exercise the Licensed Rights in the Adapted Material - under the conditions of the Adapter's License You apply. - - c. No downstream restrictions. You may not offer or impose - any additional or different terms or conditions on, or - apply any Effective Technological Measures to, the - Licensed Material if doing so restricts exercise of the - Licensed Rights by any recipient of the Licensed - Material. - - 6. No endorsement. Nothing in this Public License constitutes or - may be construed as permission to assert or imply that You - are, or that Your use of the Licensed Material is, connected - with, or sponsored, endorsed, or granted official status by, - the Licensor or others designated to receive attribution as - provided in Section 3(a)(1)(A)(i). - - b. Other rights. - - 1. Moral rights, such as the right of integrity, are not - licensed under this Public License, nor are publicity, - privacy, and/or other similar personality rights; however, to - the extent possible, the Licensor waives and/or agrees not to - assert any such rights held by the Licensor to the limited - extent necessary to allow You to exercise the Licensed - Rights, but not otherwise. - - 2. Patent and trademark rights are not licensed under this - Public License. - - 3. To the extent possible, the Licensor waives any right to - collect royalties from You for the exercise of the Licensed - Rights, whether directly or through a collecting society - under any voluntary or waivable statutory or compulsory - licensing scheme. In all other cases the Licensor expressly - reserves any right to collect such royalties. - - -Section 3 -- License Conditions. - -Your exercise of the Licensed Rights is expressly made subject to the -following conditions. - - a. Attribution. - - 1. If You Share the Licensed Material (including in modified - form), You must: - - a. retain the following if it is supplied by the Licensor - with the Licensed Material: - - i. identification of the creator(s) of the Licensed - Material and any others designated to receive - attribution, in any reasonable manner requested by - the Licensor (including by pseudonym if - designated); - - ii. a copyright notice; - - iii. a notice that refers to this Public License; - - iv. a notice that refers to the disclaimer of - warranties; - - v. a URI or hyperlink to the Licensed Material to the - extent reasonably practicable; - - b. indicate if You modified the Licensed Material and - retain an indication of any previous modifications; and - - c. indicate the Licensed Material is licensed under this - Public License, and include the text of, or the URI or - hyperlink to, this Public License. - - 2. You may satisfy the conditions in Section 3(a)(1) in any - reasonable manner based on the medium, means, and context in - which You Share the Licensed Material. For example, it may be - reasonable to satisfy the conditions by providing a URI or - hyperlink to a resource that includes the required - information. - - 3. If requested by the Licensor, You must remove any of the - information required by Section 3(a)(1)(A) to the extent - reasonably practicable. - - b. ShareAlike. - - In addition to the conditions in Section 3(a), if You Share - Adapted Material You produce, the following conditions also apply. - - 1. The Adapter's License You apply must be a Creative Commons - license with the same License Elements, this version or - later, or a BY-SA Compatible License. - - 2. You must include the text of, or the URI or hyperlink to, the - Adapter's License You apply. You may satisfy this condition - in any reasonable manner based on the medium, means, and - context in which You Share Adapted Material. - - 3. You may not offer or impose any additional or different terms - or conditions on, or apply any Effective Technological - Measures to, Adapted Material that restrict exercise of the - rights granted under the Adapter's License You apply. - - -Section 4 -- Sui Generis Database Rights. - -Where the Licensed Rights include Sui Generis Database Rights that -apply to Your use of the Licensed Material: - - a. for the avoidance of doubt, Section 2(a)(1) grants You the right - to extract, reuse, reproduce, and Share all or a substantial - portion of the contents of the database; - - b. if You include all or a substantial portion of the database - contents in a database in which You have Sui Generis Database - Rights, then the database in which You have Sui Generis Database - Rights (but not its individual contents) is Adapted Material, - - including for purposes of Section 3(b); and - c. You must comply with the conditions in Section 3(a) if You Share - all or a substantial portion of the contents of the database. - -For the avoidance of doubt, this Section 4 supplements and does not -replace Your obligations under this Public License where the Licensed -Rights include other Copyright and Similar Rights. - - -Section 5 -- Disclaimer of Warranties and Limitation of Liability. - - a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE - EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS - AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF - ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, - IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, - WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR - PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, - ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT - KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT - ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. - - b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE - TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, - NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, - INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, - COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR - USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN - ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR - DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR - IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. - - c. The disclaimer of warranties and limitation of liability provided - above shall be interpreted in a manner that, to the extent - possible, most closely approximates an absolute disclaimer and - waiver of all liability. - - -Section 6 -- Term and Termination. - - a. This Public License applies for the term of the Copyright and - Similar Rights licensed here. However, if You fail to comply with - this Public License, then Your rights under this Public License - terminate automatically. - - b. Where Your right to use the Licensed Material has terminated under - Section 6(a), it reinstates: - - 1. automatically as of the date the violation is cured, provided - it is cured within 30 days of Your discovery of the - violation; or - - 2. upon express reinstatement by the Licensor. - - For the avoidance of doubt, this Section 6(b) does not affect any - right the Licensor may have to seek remedies for Your violations - of this Public License. - - c. For the avoidance of doubt, the Licensor may also offer the - Licensed Material under separate terms or conditions or stop - distributing the Licensed Material at any time; however, doing so - will not terminate this Public License. - - d. Sections 1, 5, 6, 7, and 8 survive termination of this Public - License. - - -Section 7 -- Other Terms and Conditions. - - a. The Licensor shall not be bound by any additional or different - terms or conditions communicated by You unless expressly agreed. - - b. Any arrangements, understandings, or agreements regarding the - Licensed Material not stated herein are separate from and - independent of the terms and conditions of this Public License. - - -Section 8 -- Interpretation. - - a. For the avoidance of doubt, this Public License does not, and - shall not be interpreted to, reduce, limit, restrict, or impose - conditions on any use of the Licensed Material that could lawfully - be made without permission under this Public License. - - b. To the extent possible, if any provision of this Public License is - deemed unenforceable, it shall be automatically reformed to the - minimum extent necessary to make it enforceable. If the provision - cannot be reformed, it shall be severed from this Public License - without affecting the enforceability of the remaining terms and - conditions. - - c. No term or condition of this Public License will be waived and no - failure to comply consented to unless expressly agreed to by the - Licensor. - - d. Nothing in this Public License constitutes or may be interpreted - as a limitation upon, or waiver of, any privileges and immunities - that apply to the Licensor or You, including from the legal - processes of any jurisdiction or authority. - - -======================================================================= - -Creative Commons is not a party to its public licenses. -Notwithstanding, Creative Commons may elect to apply one of its public -licenses to material it publishes and in those instances will be -considered the "Licensor." Except for the limited purpose of indicating -that material is shared under a Creative Commons public license or as -otherwise permitted by the Creative Commons policies published at -creativecommons.org/policies, Creative Commons does not authorize the -use of the trademark "Creative Commons" or any other trademark or logo -of Creative Commons without its prior written consent including, -without limitation, in connection with any unauthorized modifications -to any of its public licenses or any other arrangements, -understandings, or agreements concerning use of licensed material. For -the avoidance of doubt, this paragraph does not form part of the public -licenses. - -Creative Commons may be contacted at creativecommons.org. - +License type (autodetected): Apache-2.0 +./vendor/github.com/opencontainers/go-digest/LICENSE.code: -------------------------------------------------------------------- -Dependency: github.com/opencontainers/go-digest -Revision: eaa60544f31ccf3b0653b1a118b76d33418ff41b -License type (autodetected): CC-BY-SA-4.0 -./vendor/github.com/opencontainers/go-digest/LICENSE.docs: --------------------------------------------------------------------- -Attribution-ShareAlike 4.0 International - -======================================================================= - -Creative Commons Corporation ("Creative Commons") is not a law firm and -does not provide legal services or legal advice. Distribution of -Creative Commons public licenses does not create a lawyer-client or -other relationship. Creative Commons makes its licenses and related -information available on an "as-is" basis. Creative Commons gives no -warranties regarding its licenses, any material licensed under their -terms and conditions, or any related information. Creative Commons -disclaims all liability for damages resulting from their use to the -fullest extent possible. - -Using Creative Commons Public Licenses - -Creative Commons public licenses provide a standard set of terms and -conditions that creators and other rights holders may use to share -original works of authorship and other material subject to copyright -and certain other rights specified in the public license below. The -following considerations are for informational purposes only, are not -exhaustive, and do not form part of our licenses. - - Considerations for licensors: Our public licenses are - intended for use by those authorized to give the public - permission to use material in ways otherwise restricted by - copyright and certain other rights. Our licenses are - irrevocable. Licensors should read and understand the terms - and conditions of the license they choose before applying it. - Licensors should also secure all rights necessary before - applying our licenses so that the public can reuse the - material as expected. Licensors should clearly mark any - material not subject to the license. This includes other CC- - licensed material, or material used under an exception or - limitation to copyright. More considerations for licensors: - wiki.creativecommons.org/Considerations_for_licensors - - Considerations for the public: By using one of our public - licenses, a licensor grants the public permission to use the - licensed material under specified terms and conditions. If - the licensor's permission is not necessary for any reason--for - example, because of any applicable exception or limitation to - copyright--then that use is not regulated by the license. Our - licenses grant only permissions under copyright and certain - other rights that a licensor has authority to grant. Use of - the licensed material may still be restricted for other - reasons, including because others have copyright or other - rights in the material. A licensor may make special requests, - such as asking that all changes be marked or described. - Although not required by our licenses, you are encouraged to - respect those requests where reasonable. More_considerations - for the public: - wiki.creativecommons.org/Considerations_for_licensees - -======================================================================= - -Creative Commons Attribution-ShareAlike 4.0 International Public -License - -By exercising the Licensed Rights (defined below), You accept and agree -to be bound by the terms and conditions of this Creative Commons -Attribution-ShareAlike 4.0 International Public License ("Public -License"). To the extent this Public License may be interpreted as a -contract, You are granted the Licensed Rights in consideration of Your -acceptance of these terms and conditions, and the Licensor grants You -such rights in consideration of benefits the Licensor receives from -making the Licensed Material available under these terms and -conditions. - - -Section 1 -- Definitions. - - a. Adapted Material means material subject to Copyright and Similar - Rights that is derived from or based upon the Licensed Material - and in which the Licensed Material is translated, altered, - arranged, transformed, or otherwise modified in a manner requiring - permission under the Copyright and Similar Rights held by the - Licensor. For purposes of this Public License, where the Licensed - Material is a musical work, performance, or sound recording, - Adapted Material is always produced where the Licensed Material is - synched in timed relation with a moving image. - - b. Adapter's License means the license You apply to Your Copyright - and Similar Rights in Your contributions to Adapted Material in - accordance with the terms and conditions of this Public License. - - c. BY-SA Compatible License means a license listed at - creativecommons.org/compatiblelicenses, approved by Creative - Commons as essentially the equivalent of this Public License. - - d. Copyright and Similar Rights means copyright and/or similar rights - closely related to copyright including, without limitation, - performance, broadcast, sound recording, and Sui Generis Database - Rights, without regard to how the rights are labeled or - categorized. For purposes of this Public License, the rights - specified in Section 2(b)(1)-(2) are not Copyright and Similar - Rights. - - e. Effective Technological Measures means those measures that, in the - absence of proper authority, may not be circumvented under laws - fulfilling obligations under Article 11 of the WIPO Copyright - Treaty adopted on December 20, 1996, and/or similar international - agreements. - - f. Exceptions and Limitations means fair use, fair dealing, and/or - any other exception or limitation to Copyright and Similar Rights - that applies to Your use of the Licensed Material. - - g. License Elements means the license attributes listed in the name - of a Creative Commons Public License. The License Elements of this - Public License are Attribution and ShareAlike. - - h. Licensed Material means the artistic or literary work, database, - or other material to which the Licensor applied this Public - License. - - i. Licensed Rights means the rights granted to You subject to the - terms and conditions of this Public License, which are limited to - all Copyright and Similar Rights that apply to Your use of the - Licensed Material and that the Licensor has authority to license. - - j. Licensor means the individual(s) or entity(ies) granting rights - under this Public License. - - k. Share means to provide material to the public by any means or - process that requires permission under the Licensed Rights, such - as reproduction, public display, public performance, distribution, - dissemination, communication, or importation, and to make material - available to the public including in ways that members of the - public may access the material from a place and at a time - individually chosen by them. - - l. Sui Generis Database Rights means rights other than copyright - resulting from Directive 96/9/EC of the European Parliament and of - the Council of 11 March 1996 on the legal protection of databases, - as amended and/or succeeded, as well as other essentially - equivalent rights anywhere in the world. - - m. You means the individual or entity exercising the Licensed Rights - under this Public License. Your has a corresponding meaning. - - -Section 2 -- Scope. - - a. License grant. - - 1. Subject to the terms and conditions of this Public License, - the Licensor hereby grants You a worldwide, royalty-free, - non-sublicensable, non-exclusive, irrevocable license to - exercise the Licensed Rights in the Licensed Material to: - - a. reproduce and Share the Licensed Material, in whole or - in part; and - - b. produce, reproduce, and Share Adapted Material. - - 2. Exceptions and Limitations. For the avoidance of doubt, where - Exceptions and Limitations apply to Your use, this Public - License does not apply, and You do not need to comply with - its terms and conditions. - - 3. Term. The term of this Public License is specified in Section - 6(a). - - 4. Media and formats; technical modifications allowed. The - Licensor authorizes You to exercise the Licensed Rights in - all media and formats whether now known or hereafter created, - and to make technical modifications necessary to do so. The - Licensor waives and/or agrees not to assert any right or - authority to forbid You from making technical modifications - necessary to exercise the Licensed Rights, including - technical modifications necessary to circumvent Effective - Technological Measures. For purposes of this Public License, - simply making modifications authorized by this Section 2(a) - (4) never produces Adapted Material. - - 5. Downstream recipients. - - a. Offer from the Licensor -- Licensed Material. Every - recipient of the Licensed Material automatically - receives an offer from the Licensor to exercise the - Licensed Rights under the terms and conditions of this - Public License. - - b. Additional offer from the Licensor -- Adapted Material. - Every recipient of Adapted Material from You - automatically receives an offer from the Licensor to - exercise the Licensed Rights in the Adapted Material - under the conditions of the Adapter's License You apply. - - c. No downstream restrictions. You may not offer or impose - any additional or different terms or conditions on, or - apply any Effective Technological Measures to, the - Licensed Material if doing so restricts exercise of the - Licensed Rights by any recipient of the Licensed - Material. - - 6. No endorsement. Nothing in this Public License constitutes or - may be construed as permission to assert or imply that You - are, or that Your use of the Licensed Material is, connected - with, or sponsored, endorsed, or granted official status by, - the Licensor or others designated to receive attribution as - provided in Section 3(a)(1)(A)(i). - - b. Other rights. - - 1. Moral rights, such as the right of integrity, are not - licensed under this Public License, nor are publicity, - privacy, and/or other similar personality rights; however, to - the extent possible, the Licensor waives and/or agrees not to - assert any such rights held by the Licensor to the limited - extent necessary to allow You to exercise the Licensed - Rights, but not otherwise. - - 2. Patent and trademark rights are not licensed under this - Public License. - - 3. To the extent possible, the Licensor waives any right to - collect royalties from You for the exercise of the Licensed - Rights, whether directly or through a collecting society - under any voluntary or waivable statutory or compulsory - licensing scheme. In all other cases the Licensor expressly - reserves any right to collect such royalties. - - -Section 3 -- License Conditions. - -Your exercise of the Licensed Rights is expressly made subject to the -following conditions. - - a. Attribution. - - 1. If You Share the Licensed Material (including in modified - form), You must: - - a. retain the following if it is supplied by the Licensor - with the Licensed Material: - - i. identification of the creator(s) of the Licensed - Material and any others designated to receive - attribution, in any reasonable manner requested by - the Licensor (including by pseudonym if - designated); - - ii. a copyright notice; - - iii. a notice that refers to this Public License; - - iv. a notice that refers to the disclaimer of - warranties; - - v. a URI or hyperlink to the Licensed Material to the - extent reasonably practicable; - - b. indicate if You modified the Licensed Material and - retain an indication of any previous modifications; and - - c. indicate the Licensed Material is licensed under this - Public License, and include the text of, or the URI or - hyperlink to, this Public License. - - 2. You may satisfy the conditions in Section 3(a)(1) in any - reasonable manner based on the medium, means, and context in - which You Share the Licensed Material. For example, it may be - reasonable to satisfy the conditions by providing a URI or - hyperlink to a resource that includes the required - information. - - 3. If requested by the Licensor, You must remove any of the - information required by Section 3(a)(1)(A) to the extent - reasonably practicable. - - b. ShareAlike. - - In addition to the conditions in Section 3(a), if You Share - Adapted Material You produce, the following conditions also apply. - - 1. The Adapter's License You apply must be a Creative Commons - license with the same License Elements, this version or - later, or a BY-SA Compatible License. - - 2. You must include the text of, or the URI or hyperlink to, the - Adapter's License You apply. You may satisfy this condition - in any reasonable manner based on the medium, means, and - context in which You Share Adapted Material. - - 3. You may not offer or impose any additional or different terms - or conditions on, or apply any Effective Technological - Measures to, Adapted Material that restrict exercise of the - rights granted under the Adapter's License You apply. - - -Section 4 -- Sui Generis Database Rights. - -Where the Licensed Rights include Sui Generis Database Rights that -apply to Your use of the Licensed Material: - - a. for the avoidance of doubt, Section 2(a)(1) grants You the right - to extract, reuse, reproduce, and Share all or a substantial - portion of the contents of the database; - - b. if You include all or a substantial portion of the database - contents in a database in which You have Sui Generis Database - Rights, then the database in which You have Sui Generis Database - Rights (but not its individual contents) is Adapted Material, - - including for purposes of Section 3(b); and - c. You must comply with the conditions in Section 3(a) if You Share - all or a substantial portion of the contents of the database. - -For the avoidance of doubt, this Section 4 supplements and does not -replace Your obligations under this Public License where the Licensed -Rights include other Copyright and Similar Rights. - - -Section 5 -- Disclaimer of Warranties and Limitation of Liability. - - a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE - EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS - AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF - ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, - IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, - WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR - PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, - ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT - KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT - ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. - - b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE - TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, - NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, - INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, - COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR - USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN - ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR - DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR - IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. - - c. The disclaimer of warranties and limitation of liability provided - above shall be interpreted in a manner that, to the extent - possible, most closely approximates an absolute disclaimer and - waiver of all liability. - - -Section 6 -- Term and Termination. - - a. This Public License applies for the term of the Copyright and - Similar Rights licensed here. However, if You fail to comply with - this Public License, then Your rights under this Public License - terminate automatically. - - b. Where Your right to use the Licensed Material has terminated under - Section 6(a), it reinstates: - - 1. automatically as of the date the violation is cured, provided - it is cured within 30 days of Your discovery of the - violation; or - - 2. upon express reinstatement by the Licensor. - - For the avoidance of doubt, this Section 6(b) does not affect any - right the Licensor may have to seek remedies for Your violations - of this Public License. - - c. For the avoidance of doubt, the Licensor may also offer the - Licensed Material under separate terms or conditions or stop - distributing the Licensed Material at any time; however, doing so - will not terminate this Public License. - - d. Sections 1, 5, 6, 7, and 8 survive termination of this Public - License. - - -Section 7 -- Other Terms and Conditions. - - a. The Licensor shall not be bound by any additional or different - terms or conditions communicated by You unless expressly agreed. - - b. Any arrangements, understandings, or agreements regarding the - Licensed Material not stated herein are separate from and - independent of the terms and conditions of this Public License. - - -Section 8 -- Interpretation. - - a. For the avoidance of doubt, this Public License does not, and - shall not be interpreted to, reduce, limit, restrict, or impose - conditions on any use of the Licensed Material that could lawfully - be made without permission under this Public License. - - b. To the extent possible, if any provision of this Public License is - deemed unenforceable, it shall be automatically reformed to the - minimum extent necessary to make it enforceable. If the provision - cannot be reformed, it shall be severed from this Public License - without affecting the enforceability of the remaining terms and - conditions. - - c. No term or condition of this Public License will be waived and no - failure to comply consented to unless expressly agreed to by the - Licensor. - - d. Nothing in this Public License constitutes or may be interpreted - as a limitation upon, or waiver of, any privileges and immunities - that apply to the Licensor or You, including from the legal - processes of any jurisdiction or authority. - - -======================================================================= - -Creative Commons is not a party to its public licenses. -Notwithstanding, Creative Commons may elect to apply one of its public -licenses to material it publishes and in those instances will be -considered the "Licensor." Except for the limited purpose of indicating -that material is shared under a Creative Commons public license or as -otherwise permitted by the Creative Commons policies published at -creativecommons.org/policies, Creative Commons does not authorize the -use of the trademark "Creative Commons" or any other trademark or logo -of Creative Commons without its prior written consent including, -without limitation, in connection with any unauthorized modifications -to any of its public licenses or any other arrangements, -understandings, or agreements concerning use of licensed material. For -the avoidance of doubt, this paragraph does not form part of the public -licenses. +Apache License 2.0 -Creative Commons may be contacted at creativecommons.org. -------------------------------------------------------------------- Dependency: github.com/opencontainers/image-spec @@ -2879,9 +1718,44 @@ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------- +Dependency: github.com/pmezard/go-difflib +Version: v1.0.0 +Revision: 792786c7400a136282c1664665ae0a8db921c6c2 +License type (autodetected): BSD-2-Clause +./vendor/github.com/pmezard/go-difflib/LICENSE: +-------------------------------------------------------------------- +Copyright (c) 2013, Patrick Mezard +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + The names of its contributors may not be used to endorse or promote +products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + -------------------------------------------------------------------- Dependency: github.com/prometheus/client_model -Revision: 6f3806018612930941127f2a7c6c453ba2c527d2 +Revision: 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c License type (autodetected): Apache-2.0 ./metricbeat/vendor/github.com/prometheus/client_model/LICENSE: -------------------------------------------------------------------- @@ -2896,7 +1770,7 @@ SoundCloud Ltd. (http://soundcloud.com/). -------------------------------------------------------------------- Dependency: github.com/prometheus/common -Revision: 13ba4ddd0caa9c28ca7b7bffe1dfa9ed8d5ef207 +Revision: 89604d197083d4781071d3c65855d24ecfb0a563 License type (autodetected): Apache-2.0 ./metricbeat/vendor/github.com/prometheus/common/LICENSE: -------------------------------------------------------------------- @@ -2909,6 +1783,23 @@ Copyright 2015 The Prometheus Authors This product includes software developed at SoundCloud Ltd. (http://soundcloud.com/). +-------------------------------------------------------------------- +Dependency: github.com/prometheus/procfs +Revision: 54d17b57dd7d4a3aa092476596b3f8a933bde349 +License type (autodetected): Apache-2.0 +./vendor/github.com/prometheus/procfs/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + +-------NOTICE----- +procfs provides functions to retrieve system, kernel and process +metrics from the pseudo-filesystem proc. + +Copyright 2014-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). + -------------------------------------------------------------------- Dependency: github.com/rcrowley/go-metrics Revision: 1f30fe9094a513ce4c700b9a54458bbb0c96996c @@ -2945,6 +1836,38 @@ The views and conclusions contained in the software and documentation are those of the authors and should not be interpreted as representing official policies, either expressed or implied, of Richard Crowley. +-------------------------------------------------------------------- +Dependency: github.com/samuel/go-parser +Revision: ca8abbf65d0e61dedf061f98bd3850f250e27539 +License type (autodetected): BSD-3-Clause +./vendor/github.com/samuel/go-parser/LICENSE: +-------------------------------------------------------------------- +Copyright (c) 2013, Samuel Stauffer +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +* Neither the name of the author nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + -------------------------------------------------------------------- Dependency: github.com/samuel/go-thrift Revision: 2187045faa54fce7f5028706ffeb2f2fc342aa7e @@ -3006,8 +1929,8 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------- Dependency: github.com/shirou/gopsutil -Version: v2.17.04 -Revision: 9af92986dda65a8c367157a82b484553e1ec1c55 +Version: v2.18.01 +Revision: c432be29ccce470088d07eea25b3ea7e68a8afbb License type (autodetected): BSD-3-Clause ./vendor/github.com/shirou/gopsutil/LICENSE: -------------------------------------------------------------------- @@ -3230,7 +2153,8 @@ SOFTWARE. -------------------------------------------------------------------- Dependency: github.com/stretchr/testify -Revision: f390dcf405f7b83c997eac1b06768bb9f44dec18 +Version: v1.2.0 +Revision: b91bfb9ebec76498946beb6af7c0230c7cc7ba6c License type (autodetected): MIT ./vendor/github.com/stretchr/testify/LICENSE: -------------------------------------------------------------------- @@ -3257,9 +2181,44 @@ DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------- +Dependency: github.com/theckman/go-flock +Version: v0.4.0 +Revision: b139a2487364247d91814e4a7c7b8fdc69e342b2 +License type (autodetected): BSD-3-Clause +./vendor/github.com/theckman/go-flock/LICENSE: +-------------------------------------------------------------------- +Copyright (c) 2015, Tim Heckman +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of linode-netint nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + -------------------------------------------------------------------- Dependency: github.com/tsg/gopacket -Revision: 8e703b9968693c15f25cabb6ba8be4370cf431d0 +Revision: f289b3ea3e41a01b2822be9caf5f40c01fdda05c License type (autodetected): BSD-3-Clause ./vendor/github.com/tsg/gopacket/LICENSE: -------------------------------------------------------------------- @@ -3293,11 +2252,19 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------- -Dependency: github.com/urso/go-structform -Version: v0.0.2 -Revision: 844d7d44009e9e8c0f08016fc4dab64e136ca040 +Dependency: github.com/urso/go-bin +Revision: 781c575c9f0eb3cb9dca94521bd7ad7d5aec7fd4 +License type (autodetected): Apache-2.0 +./vendor/github.com/urso/go-bin/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + + +-------------------------------------------------------------------- +Dependency: github.com/urso/qcgen +Revision: 0b059e7db4f40a062ca3d975b7500c6a0a968d87 License type (autodetected): Apache-2.0 -./vendor/github.com/urso/go-structform/LICENSE: +./vendor/github.com/urso/qcgen/LICENSE: -------------------------------------------------------------------- Apache License 2.0 @@ -3454,7 +2421,7 @@ THE SOFTWARE. -------------------------------------------------------------------- Dependency: golang.org/x/crypto -Revision: d585fd2cc9195196078f516b69daff6744ef5e84 +Revision: 5119cf507ed5294cc409c092980c7497ee5d6fd2 License type (autodetected): BSD-3-Clause ./vendor/golang.org/x/crypto/LICENSE: -------------------------------------------------------------------- @@ -3488,7 +2455,8 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------- Dependency: golang.org/x/net -Revision: e90d6d0afc4c315a0d87a568ae68577cc15149a0 +Version: release-branch.go1.9 +Revision: 44b7c21cbf19450f38b337eb6b6fe4f6496fb5b3 License type (autodetected): BSD-3-Clause ./vendor/golang.org/x/net/LICENSE: -------------------------------------------------------------------- @@ -3522,7 +2490,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------- Dependency: golang.org/x/sys -Revision: b76f9891dc1d975623261def70f9b89661f5baab +Revision: 37707fdb30a5b38865cfb95e5aab41707daec7fd License type (autodetected): BSD-3-Clause ./vendor/golang.org/x/sys/LICENSE: -------------------------------------------------------------------- @@ -3556,7 +2524,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------- Dependency: golang.org/x/text -Revision: 2910a502d2bf9e43193af9d68ca516529614eed3 +Revision: 4e4a3210bb54bb31f6ab2cdca2edcc0b50c420c1 License type (autodetected): BSD-3-Clause ./vendor/golang.org/x/text/LICENSE: -------------------------------------------------------------------- @@ -3624,7 +2592,8 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------- Dependency: golang.org/x/tools -Revision: 9be3b7cbc7ccd19baaa3b7704c22f57db5ebbdf2 +Version: release-branch.go1.9 +Revision: 5d2fd3ccab986d52112bf301d47a819783339d0e License type (autodetected): BSD-3-Clause ./vendor/golang.org/x/tools/LICENSE: -------------------------------------------------------------------- diff --git a/vendor/github.com/elastic/beats/README.md b/vendor/github.com/elastic/beats/README.md index 98a5fb5a..aa735422 100644 --- a/vendor/github.com/elastic/beats/README.md +++ b/vendor/github.com/elastic/beats/README.md @@ -1,5 +1,4 @@ [![Travis](https://travis-ci.org/elastic/beats.svg?branch=master)](https://travis-ci.org/elastic/beats) -[![AppVeyor](https://ci.appveyor.com/api/projects/status/p7y92i6pp2v7vnrd/branch/master?svg=true)](https://ci.appveyor.com/project/elastic-beats/beats/branch/master) [![GoReportCard](http://goreportcard.com/badge/elastic/beats)](http://goreportcard.com/report/elastic/beats) [![codecov.io](https://codecov.io/github/elastic/beats/coverage.svg?branch=master)](https://codecov.io/github/elastic/beats?branch=master) @@ -20,6 +19,7 @@ framework for creating Beats, and all the officially supported Beats: Beat | Description --- | --- +[Auditbeat](https://github.com/elastic/beats/tree/master/auditbeat) | Collect your Linux audit framework data and monitor the integrity of your files. [Filebeat](https://github.com/elastic/beats/tree/master/filebeat) | Tails and ships log files [Heartbeat](https://github.com/elastic/beats/tree/master/heartbeat) | Ping remote services for availability [Metricbeat](https://github.com/elastic/beats/tree/master/metricbeat) | Fetches sets of metrics from the operating system and services @@ -27,9 +27,9 @@ Beat | Description [Winlogbeat](https://github.com/elastic/beats/tree/master/winlogbeat) | Fetches and ships Windows Event logs In addition to the above Beats, which are officially supported by -[Elastic](https://elastic.co), the -community has created a set of other Beats that make use of libbeat but live -outside of this Github repository. We maintain a list of community Beats +[Elastic](https://elastic.co), the community has created a set of other Beats +that make use of libbeat but live outside of this Github repository. We maintain +a list of community Beats [here](https://www.elastic.co/guide/en/beats/libbeat/master/community-beats.html). ## Documentation and Getting Started @@ -38,6 +38,7 @@ You can find the documentation and getting started guides for each of the Beats on the [elastic.co site](https://www.elastic.co/guide/): * [Beats platform](https://www.elastic.co/guide/en/beats/libbeat/current/index.html) +* [Auditbeat](https://www.elastic.co/guide/en/beats/auditbeat/current/index.html) * [Filebeat](https://www.elastic.co/guide/en/beats/filebeat/current/index.html) * [Heartbeat](https://www.elastic.co/guide/en/beats/heartbeat/current/index.html) * [Metricbeat](https://www.elastic.co/guide/en/beats/metricbeat/current/index.html) @@ -65,8 +66,8 @@ create your own Beat. Please start by reading our [CONTRIBUTING](CONTRIBUTING.md) file. If you are creating a new Beat, you don't need to submit the code to this -repository. You can simply start working in a new repository and make use of -the libbeat packages, by following our [developer +repository. You can simply start working in a new repository and make use of the +libbeat packages, by following our [developer guide](https://www.elastic.co/guide/en/beats/libbeat/current/new-beat.html). After you have a working prototype, open a pull request to add your Beat to the list of [community @@ -74,5 +75,5 @@ Beats](https://github.com/elastic/beats/blob/master/libbeat/docs/communitybeats. ## Building Beats from the Source -See our [CONTRIBUTING](CONTRIBUTING.md) file for information about setting up your dev -environment to build Beats from the source. +See our [CONTRIBUTING](CONTRIBUTING.md) file for information about setting up +your dev environment to build Beats from the source. diff --git a/vendor/github.com/elastic/beats/Vagrantfile b/vendor/github.com/elastic/beats/Vagrantfile index 404172a5..d36b2f74 100644 --- a/vendor/github.com/elastic/beats/Vagrantfile +++ b/vendor/github.com/elastic/beats/Vagrantfile @@ -63,7 +63,7 @@ if [ ! -e "~/bin/gvm" ]; then chmod +x ~/bin/gvm echo 'export GOPATH=$HOME/go' >> ~/.bash_profile echo 'export PATH=$HOME/bin:$GOPATH/bin:$PATH' >> ~/.bash_profile - echo 'eval "$(gvm 1.9.2)"' >> ~/.bash_profile + echo 'eval "$(gvm 1.9.4)"' >> ~/.bash_profile fi SCRIPT diff --git a/vendor/github.com/elastic/beats/auditbeat/Dockerfile b/vendor/github.com/elastic/beats/auditbeat/Dockerfile new file mode 100644 index 00000000..208c9e66 --- /dev/null +++ b/vendor/github.com/elastic/beats/auditbeat/Dockerfile @@ -0,0 +1,17 @@ +FROM golang:1.9.2 +MAINTAINER Nicolas Ruflin + +RUN set -x && \ + apt-get update && \ + apt-get install -y --no-install-recommends \ + netcat python-pip virtualenv && \ + apt-get clean + +RUN pip install --upgrade setuptools + +# Setup work environment +ENV AUDITBEAT_PATH /go/src/github.com/elastic/beats/auditbeat + +RUN mkdir -p $AUDITBEAT_PATH/build/coverage +WORKDIR $AUDITBEAT_PATH +HEALTHCHECK CMD exit 0 diff --git a/vendor/github.com/elastic/beats/auditbeat/Makefile b/vendor/github.com/elastic/beats/auditbeat/Makefile index 2f1f36f2..eb7f6283 100644 --- a/vendor/github.com/elastic/beats/auditbeat/Makefile +++ b/vendor/github.com/elastic/beats/auditbeat/Makefile @@ -1,13 +1,15 @@ BEAT_NAME=auditbeat BEAT_TITLE=Auditbeat BEAT_DESCRIPTION=Audit the activities of users and processes on your system. -SYSTEM_TESTS=false -TEST_ENVIRONMENT=false +SYSTEM_TESTS=true +TEST_ENVIRONMENT?=true GOX_OS?=linux windows ## @Building List of all OS to be supported by "make crosscompile". DEV_OS?=linux +TESTING_ENVIRONMENT?=latest +ES_BEATS?=.. # Path to the libbeat Makefile --include ../libbeat/scripts/Makefile +include ${ES_BEATS}/libbeat/scripts/Makefile # This is called by the beats packer before building starts .PHONY: before-build @@ -33,7 +35,7 @@ before-build: ${PREFIX}/${BEAT_NAME}-darwin.reference.yml @cat ${ES_BEATS}/auditbeat/_meta/common.p1.yml \ - <(go run scripts/generate_config.go -os linux -concat) \ + <(go run scripts/generate_config.go -os linux -arch amd64 -concat) \ ${ES_BEATS}/auditbeat/_meta/common.p2.yml \ ${ES_BEATS}/libbeat/_meta/config.yml > \ ${PREFIX}/${BEAT_NAME}-linux.yml @@ -42,6 +44,16 @@ before-build: ${ES_BEATS}/libbeat/_meta/config.reference.yml > \ ${PREFIX}/${BEAT_NAME}-linux.reference.yml + @cat ${ES_BEATS}/auditbeat/_meta/common.p1.yml \ + <(go run scripts/generate_config.go -os linux -arch i386 -concat) \ + ${ES_BEATS}/auditbeat/_meta/common.p2.yml \ + ${ES_BEATS}/libbeat/_meta/config.yml > \ + ${PREFIX}/${BEAT_NAME}-linux-386.yml + @cat ${ES_BEATS}/auditbeat/_meta/common.reference.yml \ + <(go run scripts/generate_config.go -os linux -concat -ref) \ + ${ES_BEATS}/libbeat/_meta/config.reference.yml > \ + ${PREFIX}/${BEAT_NAME}-linux-386.reference.yml + # Collects all dependencies and then calls update .PHONY: collect collect: fields collect-docs configs kibana diff --git a/vendor/github.com/elastic/beats/auditbeat/auditbeat.reference.yml b/vendor/github.com/elastic/beats/auditbeat/auditbeat.reference.yml index 002e4068..ece5571d 100644 --- a/vendor/github.com/elastic/beats/auditbeat/auditbeat.reference.yml +++ b/vendor/github.com/elastic/beats/auditbeat/auditbeat.reference.yml @@ -139,7 +139,8 @@ auditbeat.modules: # Hints the minimum number of events stored in the queue, # before providing a batch of events to the outputs. - # A value of 0 (the default) ensures events are immediately available + # The default value is set to 2048. + # A value of 0 ensures events are immediately available # to be sent to the outputs. #flush.min_events: 2048 @@ -147,6 +148,66 @@ auditbeat.modules: # if the number of events stored in the queue is < min_flush_events. #flush.timeout: 1s + # The spool queue will store events in a local spool file, before + # forwarding the events to the outputs. + # + # Beta: spooling to disk is currently a beta feature. Use with care. + # + # The spool file is a circular buffer, which blocks once the file/buffer is full. + # Events are put into a write buffer and flushed once the write buffer + # is full or the flush_timeout is triggered. + # Once ACKed by the output, events are removed immediately from the queue, + # making space for new events to be persisted. + #spool: + # The file namespace configures the file path and the file creation settings. + # Once the file exists, the `size`, `page_size` and `prealloc` settings + # will have no more effect. + #file: + # Location of spool file. The default value is ${path.data}/spool.dat. + #path: "${path.data}/spool.dat" + + # Configure file permissions if file is created. The default value is 0600. + #permissions: 0600 + + # File size hint. The spool blocks, once this limit is reached. The default value is 100 MiB. + #size: 100MiB + + # The files page size. A file is split into multiple pages of the same size. The default value is 4KiB. + #page_size: 4KiB + + # If prealloc is set, the required space for the file is reserved using + # truncate. The default value is true. + #prealloc: true + + # Spool writer settings + # Events are serialized into a write buffer. The write buffer is flushed if: + # - The buffer limit has been reached. + # - The configured limit of buffered events is reached. + # - The flush timeout is triggered. + #write: + # Sets the write buffer size. + #buffer_size: 1MiB + + # Maximum duration after which events are flushed, if the write buffer + # is not full yet. The default value is 1s. + #flush.timeout: 1s + + # Number of maximum buffered events. The write buffer is flushed once the + # limit is reached. + #flush.events: 16384 + + # Configure the on-disk event encoding. The encoding can be changed + # between restarts. + # Valid encodings are: json, ubjson, and cbor. + #codec: cbor + #read: + # Reader flush timeout, waiting for more events to become available, so + # to fill a complete batch, as required by the outputs. + # If flush_timeout is 0, all available events are forwarded to the + # outputs immediately. + # The default value is 0s. + #flush.timeout: 0s + # Sets the maximum number of CPUs that can be executing simultaneously. The # default is the number of logical CPUs available in the system. #max_procs: @@ -181,6 +242,14 @@ auditbeat.modules: # equals: # http.code: 200 # +# The following example renames the field a to b: +# +#processors: +#- rename: +# fields: +# - from: "a" +# to: "b" +# # The following example enriches each event with metadata from the cloud # provider about the host machine. It works on EC2, GCE, DigitalOcean, # Tencent Cloud, and Alibaba Cloud. @@ -205,6 +274,7 @@ auditbeat.modules: # match_pids: ["process.pid", "process.ppid"] # match_source: true # match_source_index: 4 +# match_short_id: false # cleanup_timeout: 60 # # To connect to Docker over TLS you must specify a client and CA certificate. # #ssl: @@ -218,6 +288,7 @@ auditbeat.modules: # #processors: #- add_docker_metadata: ~ +#- add_host_metadata: ~ #============================= Elastic Cloud ================================== @@ -290,7 +361,18 @@ output.elasticsearch: # The default is 50. #bulk_max_size: 50 - # Configure http request timeout before failing an request to Elasticsearch. + # The number of seconds to wait before trying to reconnect to Elasticsearch + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Elasticsearch after a network error. The default is 60s. + #backoff.max: 60s + + # Configure http request timeout before failing a request to Elasticsearch. #timeout: 90 # Use SSL settings for HTTPS. @@ -354,7 +436,7 @@ output.elasticsearch: # Optional load balance the events between the Logstash hosts. Default is false. #loadbalance: false - # Number of batches to be sent asynchronously to logstash while processing + # Number of batches to be sent asynchronously to Logstash while processing # new batches. #pipelining: 2 @@ -363,6 +445,17 @@ output.elasticsearch: # if no error is encountered. #slow_start: false + # The number of seconds to wait before trying to reconnect to Logstash + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Logstash after a network error. The default is 60s. + #backoff.max: 60s + # Optional index name. The default index name is set to auditbeat # in all lowercase. #index: 'auditbeat' @@ -707,6 +800,10 @@ output.elasticsearch: # the default for the logs path is a logs subdirectory inside the home path. #path.logs: ${path.home}/logs +#================================ Keystore ========================================== +# Location of the Keystore containing the keys and their sensitive values. +#keystore.path: "${path.config}/beats.keystore" + #============================== Dashboards ===================================== # These settings control loading the sample dashboards to the Kibana index. Loading # the dashboards are disabled by default and can be enabled either by setting the @@ -741,6 +838,17 @@ output.elasticsearch: # how to install the dashboards by first querying Elasticsearch. #setup.dashboards.always_kibana: false +# If true and Kibana is not reachable at the time when dashboards are loaded, +# it will retry to reconnect to Kibana instead of exiting with an error. +#setup.dashboards.retry.enabled: false + +# Duration interval between Kibana connection retries. +#setup.dashboards.retry.interval: 1s + +# Maximum number of retries before exiting with an error, 0 for unlimited retrying. +#setup.dashboards.retry.maximum: 0 + + #============================== Template ===================================== # A template is used to set the mapping in Elasticsearch diff --git a/vendor/github.com/elastic/beats/auditbeat/docker-compose.yml b/vendor/github.com/elastic/beats/auditbeat/docker-compose.yml new file mode 100644 index 00000000..f3db9be0 --- /dev/null +++ b/vendor/github.com/elastic/beats/auditbeat/docker-compose.yml @@ -0,0 +1,33 @@ +version: '2.1' +services: + beat: + build: ${PWD}/. + depends_on: + - proxy_dep + env_file: + - ${PWD}/build/test.env + working_dir: /go/src/github.com/elastic/beats/auditbeat + environment: + - KIBANA_HOST=kibana + - KIBANA_PORT=5601 + volumes: + - ${PWD}/..:/go/src/github.com/elastic/beats/ + command: make + + # This is a proxy used to block beats until all services are healthy. + # See: https://github.com/docker/compose/issues/4369 + proxy_dep: + image: busybox + depends_on: + elasticsearch: { condition: service_healthy } + kibana: { condition: service_healthy } + + elasticsearch: + extends: + file: ../testing/environments/${TESTING_ENVIRONMENT}.yml + service: elasticsearch + + kibana: + extends: + file: ../testing/environments/${TESTING_ENVIRONMENT}.yml + service: kibana diff --git a/vendor/github.com/elastic/beats/auditbeat/docs/configuring-howto.asciidoc b/vendor/github.com/elastic/beats/auditbeat/docs/configuring-howto.asciidoc index 7666c4fa..a86085ad 100644 --- a/vendor/github.com/elastic/beats/auditbeat/docs/configuring-howto.asciidoc +++ b/vendor/github.com/elastic/beats/auditbeat/docs/configuring-howto.asciidoc @@ -77,4 +77,3 @@ include::../../libbeat/docs/yaml.asciidoc[] include::../../libbeat/docs/regexp.asciidoc[] include::../../libbeat/docs/reference-yml.asciidoc[] - diff --git a/vendor/github.com/elastic/beats/auditbeat/docs/fields.asciidoc b/vendor/github.com/elastic/beats/auditbeat/docs/fields.asciidoc index 3d65e309..b68b0318 100644 --- a/vendor/github.com/elastic/beats/auditbeat/docs/fields.asciidoc +++ b/vendor/github.com/elastic/beats/auditbeat/docs/fields.asciidoc @@ -18,6 +18,7 @@ grouped in the following categories: * <> * <> * <> +* <> * <> -- @@ -28,9 +29,9 @@ These are the fields generated by the auditd module. -[float] -=== `event.category` - +*`event.category`*:: ++ +-- type: keyword example: audit-rule @@ -38,77 +39,99 @@ example: audit-rule The event's category is a value derived from the `record_type`. -[float] -=== `event.type` +-- +*`event.type`*:: ++ +-- type: keyword The audit record's type. +-- -[float] -=== `user.auid` +*`user.auid`*:: ++ +-- type: keyword login user ID -[float] -=== `user.uid` +-- +*`user.uid`*:: ++ +-- type: keyword user ID -[float] -=== `user.euid` +-- +*`user.euid`*:: ++ +-- type: keyword effective user ID -[float] -=== `user.fsuid` +-- +*`user.fsuid`*:: ++ +-- type: keyword file system user ID -[float] -=== `user.suid` +-- +*`user.suid`*:: ++ +-- type: keyword sent user ID -[float] -=== `user.gid` +-- +*`user.gid`*:: ++ +-- type: keyword group ID -[float] -=== `user.egid` +-- +*`user.egid`*:: ++ +-- type: keyword effective group ID -[float] -=== `user.sgid` +-- +*`user.sgid`*:: ++ +-- type: keyword set group ID -[float] -=== `user.fsgid` +-- +*`user.fsgid`*:: ++ +-- type: keyword file system group ID +-- + [float] == name_map fields @@ -116,268 +139,334 @@ If `resolve_ids` is set to true in the configuration then `name_map` will contai -[float] -=== `user.name_map.auid` - +*`user.name_map.auid`*:: ++ +-- type: keyword login user name -[float] -=== `user.name_map.uid` +-- +*`user.name_map.uid`*:: ++ +-- type: keyword user name -[float] -=== `user.name_map.euid` +-- +*`user.name_map.euid`*:: ++ +-- type: keyword effective user name -[float] -=== `user.name_map.fsuid` +-- +*`user.name_map.fsuid`*:: ++ +-- type: keyword file system user name -[float] -=== `user.name_map.suid` +-- +*`user.name_map.suid`*:: ++ +-- type: keyword sent user name -[float] -=== `user.name_map.gid` +-- +*`user.name_map.gid`*:: ++ +-- type: keyword group name -[float] -=== `user.name_map.egid` +-- +*`user.name_map.egid`*:: ++ +-- type: keyword effective group name -[float] -=== `user.name_map.sgid` +-- +*`user.name_map.sgid`*:: ++ +-- type: keyword set group name -[float] -=== `user.name_map.fsgid` +-- +*`user.name_map.fsgid`*:: ++ +-- type: keyword file system group name +-- + [float] == selinux fields The SELinux identity of the actor. -[float] -=== `user.selinux.user` - +*`user.selinux.user`*:: ++ +-- type: keyword account submitted for authentication -[float] -=== `user.selinux.role` +-- +*`user.selinux.role`*:: ++ +-- type: keyword user's SELinux role -[float] -=== `user.selinux.domain` +-- +*`user.selinux.domain`*:: ++ +-- type: keyword The actor's SELinux domain or type. -[float] -=== `user.selinux.level` +-- +*`user.selinux.level`*:: ++ +-- type: keyword example: s0 The actor's SELinux level. -[float] -=== `user.selinux.category` +-- +*`user.selinux.category`*:: ++ +-- type: keyword The actor's SELinux category or compartments. +-- + [float] == process fields Process attributes. -[float] -=== `process.pid` - +*`process.pid`*:: ++ +-- type: keyword Process ID. -[float] -=== `process.ppid` +-- +*`process.ppid`*:: ++ +-- type: keyword Parent process ID. -[float] -=== `process.name` +-- +*`process.name`*:: ++ +-- type: keyword Process name (comm). -[float] -=== `process.title` +-- +*`process.title`*:: ++ +-- type: keyword Process title or command line parameters (proctitle). -[float] -=== `process.exe` +-- +*`process.exe`*:: ++ +-- type: keyword Absolute path of the executable. -[float] -=== `process.cwd` +-- +*`process.cwd`*:: ++ +-- type: keyword The current working directory. -[float] -=== `process.args` +-- +*`process.args`*:: ++ +-- type: keyword The process arguments as a list. +-- + [float] == source fields Source that triggered the event. -[float] -=== `source.ip` - +*`source.ip`*:: ++ +-- type: ip The remote address. -[float] -=== `source.port` +-- +*`source.port`*:: ++ +-- type: keyword The port number. -[float] -=== `source.hostname` +-- +*`source.hostname`*:: ++ +-- type: keyword Hostname of the source. -[float] -=== `source.path` +-- +*`source.path`*:: ++ +-- type: keyword This is the path associated with a unix socket. +-- + [float] == destination fields Destination address that triggered the event. -[float] -=== `destination.ip` - +*`destination.ip`*:: ++ +-- type: ip The remote address. -[float] -=== `destination.port` +-- +*`destination.port`*:: ++ +-- type: keyword The port number. -[float] -=== `destination.hostname` +-- +*`destination.hostname`*:: ++ +-- type: keyword Hostname of the source. -[float] -=== `destination.path` +-- +*`destination.path`*:: ++ +-- type: keyword This is the path associated with a unix socket. -[float] -=== `network.direction` +-- +*`network.direction`*:: ++ +-- type: keyword Direction of the network traffic (`incoming` or `outgoing`). +-- -[float] -=== `auditd.sequence` +*`auditd.sequence`*:: ++ +-- type: long The sequence number of the event as assigned by the kernel. Sequence numbers are stored as a uint32 in the kernel and can rollover. -[float] -=== `auditd.session` +-- +*`auditd.session`*:: ++ +-- type: keyword The session ID assigned to a login. All events related to a login session will have the same value. -[float] -=== `auditd.result` +-- +*`auditd.result`*:: ++ +-- type: keyword example: success or fail The result of the audited operation (success/fail). +-- + [float] == actor fields @@ -385,21 +474,25 @@ The result of the audited operation (success/fail). The actor is the user that triggered the audit event. -[float] -=== `auditd.summary.actor.primary` - +*`auditd.summary.actor.primary`*:: ++ +-- type: keyword The primary identity of the actor. This is the actor's original login ID. It will not change even if the user changes to another account. -[float] -=== `auditd.summary.actor.secondary` +-- +*`auditd.summary.actor.secondary`*:: ++ +-- type: keyword The secondary identity of the actor. This is typically the same as the primary, except for when the user has used `su`. +-- + [float] == object fields @@ -407,1572 +500,2014 @@ This is the thing or object being acted upon in the event. -[float] -=== `auditd.summary.object.type` - +*`auditd.summary.object.type`*:: ++ +-- type: keyword A description of the what the "thing" is (e.g. file, socket, user-session). -[float] -=== `auditd.summary.object.primary` +-- +*`auditd.summary.object.primary`*:: ++ +-- type: keyword -[float] -=== `auditd.summary.object.secondary` +-- +*`auditd.summary.object.secondary`*:: ++ +-- type: keyword -[float] -=== `auditd.summary.how` +-- +*`auditd.summary.how`*:: ++ +-- type: keyword This describes how the action was performed. Usually this is the exe or command that was being executed that triggered the event. +-- + [float] == paths fields List of paths associated with the event. -[float] -=== `auditd.paths.inode` - +*`auditd.paths.inode`*:: ++ +-- type: keyword inode number -[float] -=== `auditd.paths.dev` +-- +*`auditd.paths.dev`*:: ++ +-- type: keyword device name as found in /dev -[float] -=== `auditd.paths.obj_user` +-- +*`auditd.paths.obj_user`*:: ++ +-- type: keyword -[float] -=== `auditd.paths.obj_role` +-- +*`auditd.paths.obj_role`*:: ++ +-- type: keyword -[float] -=== `auditd.paths.obj_domain` +-- +*`auditd.paths.obj_domain`*:: ++ +-- type: keyword -[float] -=== `auditd.paths.obj_level` +-- +*`auditd.paths.obj_level`*:: ++ +-- type: keyword -[float] -=== `auditd.paths.objtype` +-- +*`auditd.paths.objtype`*:: ++ +-- type: keyword -[float] -=== `auditd.paths.ouid` +-- +*`auditd.paths.ouid`*:: ++ +-- type: keyword file owner user ID -[float] -=== `auditd.paths.rdev` +-- +*`auditd.paths.rdev`*:: ++ +-- type: keyword the device identifier (special files only) -[float] -=== `auditd.paths.nametype` +-- +*`auditd.paths.nametype`*:: ++ +-- type: keyword kind of file operation being referenced -[float] -=== `auditd.paths.ogid` +-- +*`auditd.paths.ogid`*:: ++ +-- type: keyword file owner group ID -[float] -=== `auditd.paths.item` +-- +*`auditd.paths.item`*:: ++ +-- type: keyword which item is being recorded -[float] -=== `auditd.paths.mode` +-- +*`auditd.paths.mode`*:: ++ +-- type: keyword mode flags on a file -[float] -=== `auditd.paths.name` +-- +*`auditd.paths.name`*:: ++ +-- type: keyword file name in avcs +-- + [float] == data fields The data from the audit messages. -[float] -=== `auditd.data.action` - +*`auditd.data.action`*:: ++ +-- type: keyword netfilter packet disposition -[float] -=== `auditd.data.minor` +-- +*`auditd.data.minor`*:: ++ +-- type: keyword device minor number -[float] -=== `auditd.data.acct` +-- +*`auditd.data.acct`*:: ++ +-- type: keyword a user's account name -[float] -=== `auditd.data.addr` +-- +*`auditd.data.addr`*:: ++ +-- type: keyword the remote address that the user is connecting from -[float] -=== `auditd.data.cipher` +-- +*`auditd.data.cipher`*:: ++ +-- type: keyword name of crypto cipher selected -[float] -=== `auditd.data.id` +-- +*`auditd.data.id`*:: ++ +-- type: keyword during account changes -[float] -=== `auditd.data.entries` +-- +*`auditd.data.entries`*:: ++ +-- type: keyword number of entries in the netfilter table -[float] -=== `auditd.data.kind` +-- +*`auditd.data.kind`*:: ++ +-- type: keyword server or client in crypto operation -[float] -=== `auditd.data.ksize` +-- +*`auditd.data.ksize`*:: ++ +-- type: keyword key size for crypto operation -[float] -=== `auditd.data.spid` +-- +*`auditd.data.spid`*:: ++ +-- type: keyword sent process ID -[float] -=== `auditd.data.arch` +-- +*`auditd.data.arch`*:: ++ +-- type: keyword the elf architecture flags -[float] -=== `auditd.data.argc` +-- +*`auditd.data.argc`*:: ++ +-- type: keyword the number of arguments to an execve syscall -[float] -=== `auditd.data.major` +-- +*`auditd.data.major`*:: ++ +-- type: keyword device major number -[float] -=== `auditd.data.unit` +-- +*`auditd.data.unit`*:: ++ +-- type: keyword systemd unit -[float] -=== `auditd.data.table` +-- +*`auditd.data.table`*:: ++ +-- type: keyword netfilter table name -[float] -=== `auditd.data.terminal` +-- +*`auditd.data.terminal`*:: ++ +-- type: keyword terminal name the user is running programs on -[float] -=== `auditd.data.grantors` +-- +*`auditd.data.grantors`*:: ++ +-- type: keyword pam modules approving the action -[float] -=== `auditd.data.direction` +-- +*`auditd.data.direction`*:: ++ +-- type: keyword direction of crypto operation -[float] -=== `auditd.data.op` +-- +*`auditd.data.op`*:: ++ +-- type: keyword the operation being performed that is audited -[float] -=== `auditd.data.tty` +-- +*`auditd.data.tty`*:: ++ +-- type: keyword tty udevice the user is running programs on -[float] -=== `auditd.data.syscall` +-- +*`auditd.data.syscall`*:: ++ +-- type: keyword syscall number in effect when the event occurred -[float] -=== `auditd.data.data` +-- +*`auditd.data.data`*:: ++ +-- type: keyword TTY text -[float] -=== `auditd.data.family` +-- +*`auditd.data.family`*:: ++ +-- type: keyword netfilter protocol -[float] -=== `auditd.data.mac` +-- +*`auditd.data.mac`*:: ++ +-- type: keyword crypto MAC algorithm selected -[float] -=== `auditd.data.pfs` +-- +*`auditd.data.pfs`*:: ++ +-- type: keyword perfect forward secrecy method -[float] -=== `auditd.data.items` +-- +*`auditd.data.items`*:: ++ +-- type: keyword the number of path records in the event -[float] -=== `auditd.data.a0` +-- +*`auditd.data.a0`*:: ++ +-- type: keyword -[float] -=== `auditd.data.a1` +-- +*`auditd.data.a1`*:: ++ +-- type: keyword -[float] -=== `auditd.data.a2` +-- +*`auditd.data.a2`*:: ++ +-- type: keyword -[float] -=== `auditd.data.a3` +-- +*`auditd.data.a3`*:: ++ +-- type: keyword -[float] -=== `auditd.data.hostname` +-- +*`auditd.data.hostname`*:: ++ +-- type: keyword the hostname that the user is connecting from -[float] -=== `auditd.data.lport` +-- +*`auditd.data.lport`*:: ++ +-- type: keyword local network port -[float] -=== `auditd.data.rport` +-- +*`auditd.data.rport`*:: ++ +-- type: keyword remote port number -[float] -=== `auditd.data.exit` +-- +*`auditd.data.exit`*:: ++ +-- type: keyword syscall exit code -[float] -=== `auditd.data.fp` +-- +*`auditd.data.fp`*:: ++ +-- type: keyword crypto key finger print -[float] -=== `auditd.data.laddr` +-- +*`auditd.data.laddr`*:: ++ +-- type: keyword local network address -[float] -=== `auditd.data.sport` +-- +*`auditd.data.sport`*:: ++ +-- type: keyword local port number -[float] -=== `auditd.data.capability` +-- +*`auditd.data.capability`*:: ++ +-- type: keyword posix capabilities -[float] -=== `auditd.data.nargs` +-- +*`auditd.data.nargs`*:: ++ +-- type: keyword the number of arguments to a socket call -[float] -=== `auditd.data.new-enabled` +-- +*`auditd.data.new-enabled`*:: ++ +-- type: keyword new TTY audit enabled setting -[float] -=== `auditd.data.audit_backlog_limit` +-- +*`auditd.data.audit_backlog_limit`*:: ++ +-- type: keyword audit system's backlog queue size -[float] -=== `auditd.data.dir` +-- +*`auditd.data.dir`*:: ++ +-- type: keyword directory name -[float] -=== `auditd.data.cap_pe` +-- +*`auditd.data.cap_pe`*:: ++ +-- type: keyword process effective capability map -[float] -=== `auditd.data.model` +-- +*`auditd.data.model`*:: ++ +-- type: keyword security model being used for virt -[float] -=== `auditd.data.new_pp` +-- +*`auditd.data.new_pp`*:: ++ +-- type: keyword new process permitted capability map -[float] -=== `auditd.data.old-enabled` +-- +*`auditd.data.old-enabled`*:: ++ +-- type: keyword present TTY audit enabled setting -[float] -=== `auditd.data.oauid` +-- +*`auditd.data.oauid`*:: ++ +-- type: keyword object's login user ID -[float] -=== `auditd.data.old` +-- +*`auditd.data.old`*:: ++ +-- type: keyword old value -[float] -=== `auditd.data.banners` +-- +*`auditd.data.banners`*:: ++ +-- type: keyword banners used on printed page -[float] -=== `auditd.data.feature` +-- +*`auditd.data.feature`*:: ++ +-- type: keyword kernel feature being changed -[float] -=== `auditd.data.vm-ctx` +-- +*`auditd.data.vm-ctx`*:: ++ +-- type: keyword the vm's context string -[float] -=== `auditd.data.opid` +-- +*`auditd.data.opid`*:: ++ +-- type: keyword object's process ID -[float] -=== `auditd.data.seperms` +-- +*`auditd.data.seperms`*:: ++ +-- type: keyword SELinux permissions being used -[float] -=== `auditd.data.seresult` +-- +*`auditd.data.seresult`*:: ++ +-- type: keyword SELinux AVC decision granted/denied -[float] -=== `auditd.data.new-rng` +-- +*`auditd.data.new-rng`*:: ++ +-- type: keyword device name of rng being added from a vm -[float] -=== `auditd.data.old-net` +-- +*`auditd.data.old-net`*:: ++ +-- type: keyword present MAC address assigned to vm -[float] -=== `auditd.data.sigev_signo` +-- +*`auditd.data.sigev_signo`*:: ++ +-- type: keyword signal number -[float] -=== `auditd.data.ino` +-- +*`auditd.data.ino`*:: ++ +-- type: keyword inode number -[float] -=== `auditd.data.old_enforcing` +-- +*`auditd.data.old_enforcing`*:: ++ +-- type: keyword old MAC enforcement status -[float] -=== `auditd.data.old-vcpu` +-- +*`auditd.data.old-vcpu`*:: ++ +-- type: keyword present number of CPU cores -[float] -=== `auditd.data.range` +-- +*`auditd.data.range`*:: ++ +-- type: keyword user's SE Linux range -[float] -=== `auditd.data.res` +-- +*`auditd.data.res`*:: ++ +-- type: keyword result of the audited operation(success/fail) -[float] -=== `auditd.data.added` +-- +*`auditd.data.added`*:: ++ +-- type: keyword number of new files detected -[float] -=== `auditd.data.fam` +-- +*`auditd.data.fam`*:: ++ +-- type: keyword socket address family -[float] -=== `auditd.data.nlnk-pid` +-- +*`auditd.data.nlnk-pid`*:: ++ +-- type: keyword pid of netlink packet sender -[float] -=== `auditd.data.subj` +-- +*`auditd.data.subj`*:: ++ +-- type: keyword lspp subject's context string -[float] -=== `auditd.data.a[0-3]` +-- +*`auditd.data.a[0-3]`*:: ++ +-- type: keyword the arguments to a syscall -[float] -=== `auditd.data.cgroup` +-- +*`auditd.data.cgroup`*:: ++ +-- type: keyword path to cgroup in sysfs -[float] -=== `auditd.data.kernel` +-- +*`auditd.data.kernel`*:: ++ +-- type: keyword kernel's version number -[float] -=== `auditd.data.ocomm` +-- +*`auditd.data.ocomm`*:: ++ +-- type: keyword object's command line name -[float] -=== `auditd.data.new-net` +-- +*`auditd.data.new-net`*:: ++ +-- type: keyword MAC address being assigned to vm -[float] -=== `auditd.data.permissive` +-- +*`auditd.data.permissive`*:: ++ +-- type: keyword SELinux is in permissive mode -[float] -=== `auditd.data.class` +-- +*`auditd.data.class`*:: ++ +-- type: keyword resource class assigned to vm -[float] -=== `auditd.data.compat` +-- +*`auditd.data.compat`*:: ++ +-- type: keyword is_compat_task result -[float] -=== `auditd.data.fi` +-- +*`auditd.data.fi`*:: ++ +-- type: keyword file assigned inherited capability map -[float] -=== `auditd.data.changed` +-- +*`auditd.data.changed`*:: ++ +-- type: keyword number of changed files -[float] -=== `auditd.data.msg` +-- +*`auditd.data.msg`*:: ++ +-- type: keyword the payload of the audit record -[float] -=== `auditd.data.dport` +-- +*`auditd.data.dport`*:: ++ +-- type: keyword remote port number -[float] -=== `auditd.data.new-seuser` +-- +*`auditd.data.new-seuser`*:: ++ +-- type: keyword new SELinux user -[float] -=== `auditd.data.invalid_context` +-- +*`auditd.data.invalid_context`*:: ++ +-- type: keyword SELinux context -[float] -=== `auditd.data.dmac` +-- +*`auditd.data.dmac`*:: ++ +-- type: keyword remote MAC address -[float] -=== `auditd.data.ipx-net` +-- +*`auditd.data.ipx-net`*:: ++ +-- type: keyword IPX network number -[float] -=== `auditd.data.iuid` +-- +*`auditd.data.iuid`*:: ++ +-- type: keyword ipc object's user ID -[float] -=== `auditd.data.macproto` +-- +*`auditd.data.macproto`*:: ++ +-- type: keyword ethernet packet type ID field -[float] -=== `auditd.data.obj` +-- +*`auditd.data.obj`*:: ++ +-- type: keyword lspp object context string -[float] -=== `auditd.data.ipid` +-- +*`auditd.data.ipid`*:: ++ +-- type: keyword IP datagram fragment identifier -[float] -=== `auditd.data.new-fs` +-- +*`auditd.data.new-fs`*:: ++ +-- type: keyword file system being added to vm -[float] -=== `auditd.data.vm-pid` +-- +*`auditd.data.vm-pid`*:: ++ +-- type: keyword vm's process ID -[float] -=== `auditd.data.cap_pi` +-- +*`auditd.data.cap_pi`*:: ++ +-- type: keyword process inherited capability map -[float] -=== `auditd.data.old-auid` +-- +*`auditd.data.old-auid`*:: ++ +-- type: keyword previous auid value -[float] -=== `auditd.data.oses` +-- +*`auditd.data.oses`*:: ++ +-- type: keyword object's session ID -[float] -=== `auditd.data.fd` +-- +*`auditd.data.fd`*:: ++ +-- type: keyword file descriptor number -[float] -=== `auditd.data.igid` +-- +*`auditd.data.igid`*:: ++ +-- type: keyword ipc object's group ID -[float] -=== `auditd.data.new-disk` +-- +*`auditd.data.new-disk`*:: ++ +-- type: keyword disk being added to vm -[float] -=== `auditd.data.parent` +-- +*`auditd.data.parent`*:: ++ +-- type: keyword the inode number of the parent file -[float] -=== `auditd.data.len` +-- +*`auditd.data.len`*:: ++ +-- type: keyword length -[float] -=== `auditd.data.oflag` +-- +*`auditd.data.oflag`*:: ++ +-- type: keyword open syscall flags -[float] -=== `auditd.data.uuid` +-- +*`auditd.data.uuid`*:: ++ +-- type: keyword a UUID -[float] -=== `auditd.data.code` +-- +*`auditd.data.code`*:: ++ +-- type: keyword seccomp action code -[float] -=== `auditd.data.nlnk-grp` +-- +*`auditd.data.nlnk-grp`*:: ++ +-- type: keyword netlink group number -[float] -=== `auditd.data.cap_fp` +-- +*`auditd.data.cap_fp`*:: ++ +-- type: keyword file permitted capability map -[float] -=== `auditd.data.new-mem` +-- +*`auditd.data.new-mem`*:: ++ +-- type: keyword new amount of memory in KB -[float] -=== `auditd.data.seperm` +-- +*`auditd.data.seperm`*:: ++ +-- type: keyword SELinux permission being decided on -[float] -=== `auditd.data.enforcing` +-- +*`auditd.data.enforcing`*:: ++ +-- type: keyword new MAC enforcement status -[float] -=== `auditd.data.new-chardev` +-- +*`auditd.data.new-chardev`*:: ++ +-- type: keyword new character device being assigned to vm -[float] -=== `auditd.data.old-rng` +-- +*`auditd.data.old-rng`*:: ++ +-- type: keyword device name of rng being removed from a vm -[float] -=== `auditd.data.outif` +-- +*`auditd.data.outif`*:: ++ +-- type: keyword out interface number -[float] -=== `auditd.data.cmd` +-- +*`auditd.data.cmd`*:: ++ +-- type: keyword command being executed -[float] -=== `auditd.data.hook` +-- +*`auditd.data.hook`*:: ++ +-- type: keyword netfilter hook that packet came from -[float] -=== `auditd.data.new-level` +-- +*`auditd.data.new-level`*:: ++ +-- type: keyword new run level -[float] -=== `auditd.data.sauid` +-- +*`auditd.data.sauid`*:: ++ +-- type: keyword sent login user ID -[float] -=== `auditd.data.sig` +-- +*`auditd.data.sig`*:: ++ +-- type: keyword signal number -[float] -=== `auditd.data.audit_backlog_wait_time` +-- +*`auditd.data.audit_backlog_wait_time`*:: ++ +-- type: keyword audit system's backlog wait time -[float] -=== `auditd.data.printer` +-- +*`auditd.data.printer`*:: ++ +-- type: keyword printer name -[float] -=== `auditd.data.old-mem` +-- +*`auditd.data.old-mem`*:: ++ +-- type: keyword present amount of memory in KB -[float] -=== `auditd.data.perm` +-- +*`auditd.data.perm`*:: ++ +-- type: keyword the file permission being used -[float] -=== `auditd.data.old_pi` +-- +*`auditd.data.old_pi`*:: ++ +-- type: keyword old process inherited capability map -[float] -=== `auditd.data.state` +-- +*`auditd.data.state`*:: ++ +-- type: keyword audit daemon configuration resulting state -[float] -=== `auditd.data.format` +-- +*`auditd.data.format`*:: ++ +-- type: keyword audit log's format -[float] -=== `auditd.data.new_gid` +-- +*`auditd.data.new_gid`*:: ++ +-- type: keyword new group ID being assigned -[float] -=== `auditd.data.tcontext` +-- +*`auditd.data.tcontext`*:: ++ +-- type: keyword the target's or object's context string -[float] -=== `auditd.data.maj` +-- +*`auditd.data.maj`*:: ++ +-- type: keyword device major number -[float] -=== `auditd.data.watch` +-- +*`auditd.data.watch`*:: ++ +-- type: keyword file name in a watch record -[float] -=== `auditd.data.device` +-- +*`auditd.data.device`*:: ++ +-- type: keyword device name -[float] -=== `auditd.data.grp` +-- +*`auditd.data.grp`*:: ++ +-- type: keyword group name -[float] -=== `auditd.data.bool` +-- +*`auditd.data.bool`*:: ++ +-- type: keyword name of SELinux boolean -[float] -=== `auditd.data.icmp_type` +-- +*`auditd.data.icmp_type`*:: ++ +-- type: keyword type of icmp message -[float] -=== `auditd.data.new_lock` +-- +*`auditd.data.new_lock`*:: ++ +-- type: keyword new value of feature lock -[float] -=== `auditd.data.old_prom` +-- +*`auditd.data.old_prom`*:: ++ +-- type: keyword network promiscuity flag -[float] -=== `auditd.data.acl` +-- +*`auditd.data.acl`*:: ++ +-- type: keyword access mode of resource assigned to vm -[float] -=== `auditd.data.ip` +-- +*`auditd.data.ip`*:: ++ +-- type: keyword network address of a printer -[float] -=== `auditd.data.new_pi` +-- +*`auditd.data.new_pi`*:: ++ +-- type: keyword new process inherited capability map -[float] -=== `auditd.data.default-context` +-- +*`auditd.data.default-context`*:: ++ +-- type: keyword default MAC context -[float] -=== `auditd.data.inode_gid` +-- +*`auditd.data.inode_gid`*:: ++ +-- type: keyword group ID of the inode's owner -[float] -=== `auditd.data.new-log_passwd` +-- +*`auditd.data.new-log_passwd`*:: ++ +-- type: keyword new value for TTY password logging -[float] -=== `auditd.data.new_pe` +-- +*`auditd.data.new_pe`*:: ++ +-- type: keyword new process effective capability map -[float] -=== `auditd.data.selected-context` +-- +*`auditd.data.selected-context`*:: ++ +-- type: keyword new MAC context assigned to session -[float] -=== `auditd.data.cap_fver` +-- +*`auditd.data.cap_fver`*:: ++ +-- type: keyword file system capabilities version number -[float] -=== `auditd.data.file` +-- +*`auditd.data.file`*:: ++ +-- type: keyword file name -[float] -=== `auditd.data.net` +-- +*`auditd.data.net`*:: ++ +-- type: keyword network MAC address -[float] -=== `auditd.data.virt` +-- +*`auditd.data.virt`*:: ++ +-- type: keyword kind of virtualization being referenced -[float] -=== `auditd.data.cap_pp` +-- +*`auditd.data.cap_pp`*:: ++ +-- type: keyword process permitted capability map -[float] -=== `auditd.data.old-range` +-- +*`auditd.data.old-range`*:: ++ +-- type: keyword present SELinux range -[float] -=== `auditd.data.resrc` +-- +*`auditd.data.resrc`*:: ++ +-- type: keyword resource being assigned -[float] -=== `auditd.data.new-range` +-- +*`auditd.data.new-range`*:: ++ +-- type: keyword new SELinux range -[float] -=== `auditd.data.obj_gid` +-- +*`auditd.data.obj_gid`*:: ++ +-- type: keyword group ID of object -[float] -=== `auditd.data.proto` +-- +*`auditd.data.proto`*:: ++ +-- type: keyword network protocol -[float] -=== `auditd.data.old-disk` +-- +*`auditd.data.old-disk`*:: ++ +-- type: keyword disk being removed from vm -[float] -=== `auditd.data.audit_failure` +-- +*`auditd.data.audit_failure`*:: ++ +-- type: keyword audit system's failure mode -[float] -=== `auditd.data.inif` +-- +*`auditd.data.inif`*:: ++ +-- type: keyword in interface number -[float] -=== `auditd.data.vm` +-- +*`auditd.data.vm`*:: ++ +-- type: keyword virtual machine name -[float] -=== `auditd.data.flags` +-- +*`auditd.data.flags`*:: ++ +-- type: keyword mmap syscall flags -[float] -=== `auditd.data.nlnk-fam` +-- +*`auditd.data.nlnk-fam`*:: ++ +-- type: keyword netlink protocol number -[float] -=== `auditd.data.old-fs` +-- +*`auditd.data.old-fs`*:: ++ +-- type: keyword file system being removed from vm -[float] -=== `auditd.data.old-ses` +-- +*`auditd.data.old-ses`*:: ++ +-- type: keyword previous ses value -[float] -=== `auditd.data.seqno` +-- +*`auditd.data.seqno`*:: ++ +-- type: keyword sequence number -[float] -=== `auditd.data.fver` +-- +*`auditd.data.fver`*:: ++ +-- type: keyword file system capabilities version number -[float] -=== `auditd.data.qbytes` +-- +*`auditd.data.qbytes`*:: ++ +-- type: keyword ipc objects quantity of bytes -[float] -=== `auditd.data.seuser` +-- +*`auditd.data.seuser`*:: ++ +-- type: keyword user's SE Linux user acct -[float] -=== `auditd.data.cap_fe` +-- +*`auditd.data.cap_fe`*:: ++ +-- type: keyword file assigned effective capability map -[float] -=== `auditd.data.new-vcpu` +-- +*`auditd.data.new-vcpu`*:: ++ +-- type: keyword new number of CPU cores -[float] -=== `auditd.data.old-level` +-- +*`auditd.data.old-level`*:: ++ +-- type: keyword old run level -[float] -=== `auditd.data.old_pp` +-- +*`auditd.data.old_pp`*:: ++ +-- type: keyword old process permitted capability map -[float] -=== `auditd.data.daddr` +-- +*`auditd.data.daddr`*:: ++ +-- type: keyword remote IP address -[float] -=== `auditd.data.old-role` +-- +*`auditd.data.old-role`*:: ++ +-- type: keyword present SELinux role -[float] -=== `auditd.data.ioctlcmd` +-- +*`auditd.data.ioctlcmd`*:: ++ +-- type: keyword The request argument to the ioctl syscall -[float] -=== `auditd.data.smac` +-- +*`auditd.data.smac`*:: ++ +-- type: keyword local MAC address -[float] -=== `auditd.data.apparmor` +-- +*`auditd.data.apparmor`*:: ++ +-- type: keyword apparmor event information -[float] -=== `auditd.data.fe` +-- +*`auditd.data.fe`*:: ++ +-- type: keyword file assigned effective capability map -[float] -=== `auditd.data.perm_mask` +-- +*`auditd.data.perm_mask`*:: ++ +-- type: keyword file permission mask that triggered a watch event -[float] -=== `auditd.data.ses` +-- +*`auditd.data.ses`*:: ++ +-- type: keyword login session ID -[float] -=== `auditd.data.cap_fi` +-- +*`auditd.data.cap_fi`*:: ++ +-- type: keyword file inherited capability map -[float] -=== `auditd.data.obj_uid` +-- +*`auditd.data.obj_uid`*:: ++ +-- type: keyword user ID of object -[float] -=== `auditd.data.reason` +-- +*`auditd.data.reason`*:: ++ +-- type: keyword text string denoting a reason for the action -[float] -=== `auditd.data.list` +-- +*`auditd.data.list`*:: ++ +-- type: keyword the audit system's filter list number -[float] -=== `auditd.data.old_lock` +-- +*`auditd.data.old_lock`*:: ++ +-- type: keyword present value of feature lock -[float] -=== `auditd.data.bus` +-- +*`auditd.data.bus`*:: ++ +-- type: keyword name of subsystem bus a vm resource belongs to -[float] -=== `auditd.data.old_pe` +-- +*`auditd.data.old_pe`*:: ++ +-- type: keyword old process effective capability map -[float] -=== `auditd.data.new-role` +-- +*`auditd.data.new-role`*:: ++ +-- type: keyword new SELinux role -[float] -=== `auditd.data.prom` +-- +*`auditd.data.prom`*:: ++ +-- type: keyword network promiscuity flag -[float] -=== `auditd.data.uri` +-- +*`auditd.data.uri`*:: ++ +-- type: keyword URI pointing to a printer -[float] -=== `auditd.data.audit_enabled` +-- +*`auditd.data.audit_enabled`*:: ++ +-- type: keyword audit systems's enable/disable status -[float] -=== `auditd.data.old-log_passwd` +-- +*`auditd.data.old-log_passwd`*:: ++ +-- type: keyword present value for TTY password logging -[float] -=== `auditd.data.old-seuser` +-- +*`auditd.data.old-seuser`*:: ++ +-- type: keyword present SELinux user -[float] -=== `auditd.data.per` +-- +*`auditd.data.per`*:: ++ +-- type: keyword linux personality -[float] -=== `auditd.data.scontext` +-- +*`auditd.data.scontext`*:: ++ +-- type: keyword the subject's context string -[float] -=== `auditd.data.tclass` +-- +*`auditd.data.tclass`*:: ++ +-- type: keyword target's object classification -[float] -=== `auditd.data.ver` +-- +*`auditd.data.ver`*:: ++ +-- type: keyword audit daemon's version number -[float] -=== `auditd.data.new` +-- +*`auditd.data.new`*:: ++ +-- type: keyword value being set in feature -[float] -=== `auditd.data.val` +-- +*`auditd.data.val`*:: ++ +-- type: keyword generic value associated with the operation -[float] -=== `auditd.data.img-ctx` +-- +*`auditd.data.img-ctx`*:: ++ +-- type: keyword the vm's disk image context string -[float] -=== `auditd.data.old-chardev` +-- +*`auditd.data.old-chardev`*:: ++ +-- type: keyword present character device assigned to vm -[float] -=== `auditd.data.old_val` +-- +*`auditd.data.old_val`*:: ++ +-- type: keyword current value of SELinux boolean -[float] -=== `auditd.data.success` +-- +*`auditd.data.success`*:: ++ +-- type: keyword whether the syscall was successful or not -[float] -=== `auditd.data.inode_uid` +-- +*`auditd.data.inode_uid`*:: ++ +-- type: keyword user ID of the inode's owner -[float] -=== `auditd.data.removed` +-- +*`auditd.data.removed`*:: ++ +-- type: keyword number of deleted files +-- -[float] -=== `auditd.data.socket.port` +*`auditd.data.socket.port`*:: ++ +-- type: keyword The port number. -[float] -=== `auditd.data.socket.saddr` +-- +*`auditd.data.socket.saddr`*:: ++ +-- type: keyword The raw socket address structure. -[float] -=== `auditd.data.socket.addr` +-- +*`auditd.data.socket.addr`*:: ++ +-- type: keyword The remote address. -[float] -=== `auditd.data.socket.family` +-- +*`auditd.data.socket.family`*:: ++ +-- type: keyword example: unix The socket family (unix, ipv4, ipv6, netlink). -[float] -=== `auditd.data.socket.path` +-- +*`auditd.data.socket.path`*:: ++ +-- type: keyword This is the path associated with a unix socket. -[float] -=== `auditd.messages` +-- +*`auditd.messages`*:: ++ +-- type: text An ordered list of the raw messages received from the kernel that were used to construct this document. This field is present if an error occurred processing the data or if `include_raw_message` is set in the config. -[float] -=== `auditd.warnings` +-- +*`auditd.warnings`*:: ++ +-- type: keyword The warnings generated by the Beat during the construction of the event. These are disabled by default and are used for development and debug purposes only. +-- + [float] == geoip fields @@ -1980,46 +2515,56 @@ The geoip fields are defined as a convenience in case you decide to enrich the d -[float] -=== `geoip.continent_name` - +*`geoip.continent_name`*:: ++ +-- type: keyword The name of the continent. -[float] -=== `geoip.city_name` +-- +*`geoip.city_name`*:: ++ +-- type: keyword The name of the city. -[float] -=== `geoip.region_name` +-- +*`geoip.region_name`*:: ++ +-- type: keyword The name of the region. -[float] -=== `geoip.country_iso_code` +-- +*`geoip.country_iso_code`*:: ++ +-- type: keyword Country ISO code. -[float] -=== `geoip.location` +-- +*`geoip.location`*:: ++ +-- type: geo_point The longitude and latitude. +-- + [[exported-fields-beat]] == Beat fields @@ -2027,33 +2572,41 @@ Contains common beat fields available in all event types. -[float] -=== `beat.name` - +*`beat.name`*:: ++ +-- The name of the Beat sending the log messages. If the Beat name is set in the configuration file, then that value is used. If it is not set, the hostname is used. To set the Beat name, use the `name` option in the configuration file. -[float] -=== `beat.hostname` +-- +*`beat.hostname`*:: ++ +-- The hostname as returned by the operating system on which the Beat is running. -[float] -=== `beat.timezone` +-- +*`beat.timezone`*:: ++ +-- The timezone as returned by the operating system on which the Beat is running. -[float] -=== `beat.version` +-- +*`beat.version`*:: ++ +-- The version of the beat that generated this event. -[float] -=== `@timestamp` +-- +*`@timestamp`*:: ++ +-- type: date example: August 26th 2016, 12:35:53.332 @@ -2065,20 +2618,26 @@ required: True The timestamp when the event log record was generated. -[float] -=== `tags` +-- +*`tags`*:: ++ +-- Arbitrary tags that can be set per Beat and per transaction type. -[float] -=== `fields` +-- +*`fields`*:: ++ +-- type: object Contains user configurable fields. +-- + [float] == error fields @@ -2086,30 +2645,36 @@ Error fields containing additional info in case of errors. -[float] -=== `error.message` - +*`error.message`*:: ++ +-- type: text Error message. -[float] -=== `error.code` +-- +*`error.code`*:: ++ +-- type: long Error code. -[float] -=== `error.type` +-- +*`error.type`*:: ++ +-- type: keyword Error type. +-- + [[exported-fields-cloud]] == Cloud provider metadata fields @@ -2117,56 +2682,70 @@ Metadata from cloud providers added by the add_cloud_metadata processor. -[float] -=== `meta.cloud.provider` - +*`meta.cloud.provider`*:: ++ +-- example: ec2 Name of the cloud provider. Possible values are ec2, gce, or digitalocean. -[float] -=== `meta.cloud.instance_id` +-- +*`meta.cloud.instance_id`*:: ++ +-- Instance ID of the host machine. -[float] -=== `meta.cloud.instance_name` +-- +*`meta.cloud.instance_name`*:: ++ +-- Instance name of the host machine. -[float] -=== `meta.cloud.machine_type` +-- +*`meta.cloud.machine_type`*:: ++ +-- example: t2.medium Machine type of the host machine. -[float] -=== `meta.cloud.availability_zone` +-- +*`meta.cloud.availability_zone`*:: ++ +-- example: us-east-1c Availability zone in which this host is running. -[float] -=== `meta.cloud.project_id` +-- +*`meta.cloud.project_id`*:: ++ +-- example: project-x Name of the project in Google Cloud. -[float] -=== `meta.cloud.region` +-- +*`meta.cloud.region`*:: ++ +-- Region in which this host is running. +-- + [[exported-fields-common]] == Common fields @@ -2174,15 +2753,17 @@ Contains common fields available in all event types. -[float] -=== `event.module` - +*`event.module`*:: ++ +-- The name of the module that generated the event. -[float] -=== `event.action` +-- +*`event.action`*:: ++ +-- type: keyword example: logged-in @@ -2191,184 +2772,230 @@ Action describes the change that triggered the event. For the file integrity module the possible values are: attributes_modified, created, deleted, updated, moved, and config_change. +-- + [float] == file fields File attributes. -[float] -=== `file.path` - +*`file.path`*:: ++ +-- type: text The path to the file. -[float] -=== `file.path.raw` - +*`file.path.raw`*:: ++ +-- type: keyword The path to the file. This is a non-analyzed field that is useful for aggregations. -[float] -=== `file.target_path` +-- + +-- +*`file.target_path`*:: ++ +-- type: keyword The target path for symlinks. -[float] -=== `file.type` +-- +*`file.type`*:: ++ +-- type: keyword The file type (file, dir, or symlink). -[float] -=== `file.device` +-- +*`file.device`*:: ++ +-- type: keyword The device. -[float] -=== `file.inode` +-- +*`file.inode`*:: ++ +-- type: keyword The inode representing the file in the filesystem. -[float] -=== `file.uid` +-- +*`file.uid`*:: ++ +-- type: keyword The user ID (UID) or security identifier (SID) of the file owner. -[float] -=== `file.owner` +-- +*`file.owner`*:: ++ +-- type: keyword The file owner's username. -[float] -=== `file.gid` +-- +*`file.gid`*:: ++ +-- type: keyword The primary group ID (GID) of the file. -[float] -=== `file.group` +-- +*`file.group`*:: ++ +-- type: keyword The primary group name of the file. -[float] -=== `file.mode` +-- +*`file.mode`*:: ++ +-- type: keyword example: 416 The mode of the file in octal representation. -[float] -=== `file.setuid` +-- +*`file.setuid`*:: ++ +-- type: boolean example: True Set if the file has the `setuid` bit set. Omitted otherwise. -[float] -=== `file.setgid` +-- +*`file.setgid`*:: ++ +-- type: boolean example: True Set if the file has the `setgid` bit set. Omitted otherwise. -[float] -=== `file.size` +-- +*`file.size`*:: ++ +-- type: long The file size in bytes (field is only added when `type` is `file`). -[float] -=== `file.mtime` +-- +*`file.mtime`*:: ++ +-- type: date The last modified time of the file (time when content was modified). -[float] -=== `file.ctime` +-- +*`file.ctime`*:: ++ +-- type: date The last change time of the file (time when metadata was changed). -[float] -=== `file.origin` +-- +*`file.origin`*:: ++ +-- type: text An array of strings describing a possible external origin for this file. For example, the URL it was downloaded from. Only supported in macOS, via the kMDItemWhereFroms attribute. Omitted if origin information is not available. -[float] -=== `file.origin.raw` - +*`file.origin.raw`*:: ++ +-- type: keyword This is a non-analyzed field that is useful for aggregations on the origin data. +-- + +-- + [float] == selinux fields The SELinux identity of the file. -[float] -=== `file.selinux.user` - +*`file.selinux.user`*:: ++ +-- type: keyword The owner of the object. -[float] -=== `file.selinux.role` +-- +*`file.selinux.role`*:: ++ +-- type: keyword The object's SELinux role. -[float] -=== `file.selinux.domain` +-- +*`file.selinux.domain`*:: ++ +-- type: keyword The object's SELinux domain or type. -[float] -=== `file.selinux.level` +-- +*`file.selinux.level`*:: ++ +-- type: keyword example: s0 The object's SELinux level. +-- + [[exported-fields-docker-processor]] == Docker fields @@ -2377,38 +3004,46 @@ Docker stats collected from Docker. -[float] -=== `docker.container.id` - +*`docker.container.id`*:: ++ +-- type: keyword Unique container id. -[float] -=== `docker.container.image` +-- +*`docker.container.image`*:: ++ +-- type: keyword Name of the image the container was built on. -[float] -=== `docker.container.name` +-- +*`docker.container.name`*:: ++ +-- type: keyword Container name. -[float] -=== `docker.container.labels` +-- +*`docker.container.labels`*:: ++ +-- type: object Image labels. +-- + [[exported-fields-file_integrity]] == File Integrity fields @@ -2422,111 +3057,209 @@ Hashes of the file. The keys are algorithm names and the values are the hex enco -[float] -=== `hash.blake2b_256` - +*`hash.blake2b_256`*:: ++ +-- type: keyword BLAKE2b-256 hash of the file. -[float] -=== `hash.blake2b_384` +-- +*`hash.blake2b_384`*:: ++ +-- type: keyword BLAKE2b-384 hash of the file. -[float] -=== `hash.blake2b_512` +-- +*`hash.blake2b_512`*:: ++ +-- type: keyword BLAKE2b-512 hash of the file. -[float] -=== `hash.md5` +-- +*`hash.md5`*:: ++ +-- type: keyword MD5 hash of the file. -[float] -=== `hash.sha1` +-- +*`hash.sha1`*:: ++ +-- type: keyword SHA1 hash of the file. -[float] -=== `hash.sha224` +-- +*`hash.sha224`*:: ++ +-- type: keyword SHA224 hash of the file. -[float] -=== `hash.sha256` +-- +*`hash.sha256`*:: ++ +-- type: keyword SHA256 hash of the file. -[float] -=== `hash.sha384` +-- +*`hash.sha384`*:: ++ +-- type: keyword SHA384 hash of the file. -[float] -=== `hash.sha3_224` +-- +*`hash.sha3_224`*:: ++ +-- type: keyword SHA3_224 hash of the file. -[float] -=== `hash.sha3_256` +-- +*`hash.sha3_256`*:: ++ +-- type: keyword SHA3_256 hash of the file. -[float] -=== `hash.sha3_384` +-- +*`hash.sha3_384`*:: ++ +-- type: keyword SHA3_384 hash of the file. -[float] -=== `hash.sha3_512` +-- +*`hash.sha3_512`*:: ++ +-- type: keyword SHA3_512 hash of the file. -[float] -=== `hash.sha512` +-- +*`hash.sha512`*:: ++ +-- type: keyword SHA512 hash of the file. -[float] -=== `hash.sha512_224` +-- +*`hash.sha512_224`*:: ++ +-- type: keyword SHA512/224 hash of the file. -[float] -=== `hash.sha512_256` +-- +*`hash.sha512_256`*:: ++ +-- type: keyword SHA512/256 hash of the file. +-- + +[[exported-fields-host-processor]] +== Host fields + +Info collected for the host machine. + + + + +*`host.name`*:: ++ +-- +type: keyword + +Hostname. + + +-- + +*`host.id`*:: ++ +-- +type: keyword + +Unique host id. + + +-- + +*`host.architecture`*:: ++ +-- +type: keyword + +Host architecture (e.g. x86_64, arm, ppc, mips). + + +-- + +*`host.os.platform`*:: ++ +-- +type: keyword + +OS platform (e.g. centos, ubuntu, windows). + + +-- + +*`host.os.version`*:: ++ +-- +type: keyword + +OS version. + + +-- + +*`host.os.family`*:: ++ +-- +type: keyword + +OS family (e.g. redhat, debian, freebsd, windows). + + +-- + [[exported-fields-kubernetes-processor]] == Kubernetes fields @@ -2535,59 +3268,73 @@ Kubernetes metadata added by the kubernetes processor -[float] -=== `kubernetes.pod.name` - +*`kubernetes.pod.name`*:: ++ +-- type: keyword Kubernetes pod name -[float] -=== `kubernetes.namespace` +-- +*`kubernetes.namespace`*:: ++ +-- type: keyword Kubernetes namespace -[float] -=== `kubernetes.node.name` +-- +*`kubernetes.node.name`*:: ++ +-- type: keyword Kubernetes node name -[float] -=== `kubernetes.labels` +-- +*`kubernetes.labels`*:: ++ +-- type: object Kubernetes labels map -[float] -=== `kubernetes.annotations` +-- +*`kubernetes.annotations`*:: ++ +-- type: object Kubernetes annotations map -[float] -=== `kubernetes.container.name` +-- +*`kubernetes.container.name`*:: ++ +-- type: keyword Kubernetes container name -[float] -=== `kubernetes.container.image` +-- +*`kubernetes.container.image`*:: ++ +-- type: keyword Kubernetes container image +-- + diff --git a/vendor/github.com/elastic/beats/auditbeat/docs/getting-started.asciidoc b/vendor/github.com/elastic/beats/auditbeat/docs/getting-started.asciidoc index c9e2636d..46f25474 100644 --- a/vendor/github.com/elastic/beats/auditbeat/docs/getting-started.asciidoc +++ b/vendor/github.com/elastic/beats/auditbeat/docs/getting-started.asciidoc @@ -1,18 +1,7 @@ [id="{beatname_lc}-getting-started"] == Getting started with {beatname_uc} -To get started with your own {beatname_uc} setup, install and configure these -related products: - - * Elasticsearch for storage and indexing the data. - * Kibana for the UI. - * Logstash (optional) for inserting data into Elasticsearch. - -See {libbeat}/getting-started.html[Getting Started with Beats and the Elastic Stack] -for more information. - -After installing the Elastic Stack, read the following topics to learn how to -install, configure, and run {beatname_uc}: +include::../../libbeat/docs/shared-getting-started-intro.asciidoc[] * <<{beatname_lc}-installation>> * <<{beatname_lc}-configuration>> @@ -25,7 +14,7 @@ install, configure, and run {beatname_uc}: [id="{beatname_lc}-installation"] === Step 1: Install {beatname_uc} -You should install {beatname_uc} on all the servers you want to monitor. +Install {beatname_uc} on all the servers you want to monitor. include::../../libbeat/docs/shared-download-and-install.asciidoc[] @@ -123,8 +112,7 @@ https://www.elastic.co/downloads/beats/{beatname_lc}[downloads page]. . Rename the +{beatname_lc}--windows+ directory to +{beatname_uc}+. . Open a PowerShell prompt as an Administrator (right-click the PowerShell icon -and select *Run As Administrator*). If you are running Windows XP, you may need -to download and install PowerShell. +and select *Run As Administrator*). . From the PowerShell prompt, run the following commands to install {beatname_uc} as a Windows service: diff --git a/vendor/github.com/elastic/beats/auditbeat/docs/index.asciidoc b/vendor/github.com/elastic/beats/auditbeat/docs/index.asciidoc index b8d90b17..7327f472 100644 --- a/vendor/github.com/elastic/beats/auditbeat/docs/index.asciidoc +++ b/vendor/github.com/elastic/beats/auditbeat/docs/index.asciidoc @@ -2,19 +2,21 @@ include::../../libbeat/docs/version.asciidoc[] -include::{asciidoc-dir}/../../shared/attributes62.asciidoc[] +include::{asciidoc-dir}/../../shared/attributes.asciidoc[] :version: {stack-version} :beatname_lc: auditbeat :beatname_uc: Auditbeat :beatname_pkg: {beatname_lc} +:github_repo_name: beats +:discuss_forum: beats/{beatname_lc} +:beat_default_index_prefix: {beatname_lc} +:has_ml_jobs: yes include::../../libbeat/docs/shared-beats-attributes.asciidoc[] include::./overview.asciidoc[] -include::../../libbeat/docs/contributing-to-beats.asciidoc[] - include::./getting-started.asciidoc[] include::../../libbeat/docs/repositories.asciidoc[] @@ -38,3 +40,6 @@ include::../../libbeat/docs/security/securing-beats.asciidoc[] include::./troubleshooting.asciidoc[] include::./faq.asciidoc[] + +include::../../libbeat/docs/contributing-to-beats.asciidoc[] + diff --git a/vendor/github.com/elastic/beats/auditbeat/docs/securing-auditbeat.asciidoc b/vendor/github.com/elastic/beats/auditbeat/docs/securing-auditbeat.asciidoc index 44c4f85f..d68b5e4c 100644 --- a/vendor/github.com/elastic/beats/auditbeat/docs/securing-auditbeat.asciidoc +++ b/vendor/github.com/elastic/beats/auditbeat/docs/securing-auditbeat.asciidoc @@ -9,6 +9,7 @@ and other products in the Elastic stack: * <> * <> +* <> //sets block macro for https.asciidoc included in next section diff --git a/vendor/github.com/elastic/beats/auditbeat/docs/setting-up-running.asciidoc b/vendor/github.com/elastic/beats/auditbeat/docs/setting-up-running.asciidoc index c666a8ab..8f7de65d 100644 --- a/vendor/github.com/elastic/beats/auditbeat/docs/setting-up-running.asciidoc +++ b/vendor/github.com/elastic/beats/auditbeat/docs/setting-up-running.asciidoc @@ -4,7 +4,7 @@ // that is unique to each beat. ///// -[[seting-up-and-running]] +[[setting-up-and-running]] == Setting up and running {beatname_uc} Before reading this section, see the @@ -30,3 +30,5 @@ include::../../libbeat/docs/keystore.asciidoc[] include::../../libbeat/docs/command-reference.asciidoc[] include::./running-on-docker.asciidoc[] + +include::../../libbeat/docs/shared-shutdown.asciidoc[] diff --git a/vendor/github.com/elastic/beats/auditbeat/module/auditd/_meta/config.yml.tpl b/vendor/github.com/elastic/beats/auditbeat/module/auditd/_meta/config.yml.tpl index 0ef6465c..30cb58df 100644 --- a/vendor/github.com/elastic/beats/auditbeat/module/auditd/_meta/config.yml.tpl +++ b/vendor/github.com/elastic/beats/auditbeat/module/auditd/_meta/config.yml.tpl @@ -17,17 +17,19 @@ ## Create file watches (-w) or syscall audits (-a or -A). Uncomment these ## examples or add your own rules. + {{ if eq .goarch "amd64" -}} ## If you are on a 64 bit platform, everything should be running ## in 64 bit mode. This rule will detect any use of the 32 bit syscalls ## because this might be a sign of someone exploiting a hole in the 32 ## bit API. #-a always,exit -F arch=b32 -S all -F key=32bit-abi + {{ end -}} ## Executions. - #-a always,exit -F arch=b64 -S execve,execveat -k exec + #-a always,exit -F arch=b{{.arch_bits}} -S execve,execveat -k exec ## External access (warning: these can be expensive to audit). - #-a always,exit -F arch=b64 -S accept,bind,connect -F key=external-access + #-a always,exit -F arch=b{{.arch_bits}} -S accept,bind,connect -F key=external-access ## Identity changes. #-w /etc/group -p wa -k identity @@ -35,6 +37,6 @@ #-w /etc/gshadow -p wa -k identity ## Unauthorized access attempts. - #-a always,exit -F arch=b64 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EACCES -k access - #-a always,exit -F arch=b64 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EPERM -k access + #-a always,exit -F arch=b{{.arch_bits}} -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EACCES -k access + #-a always,exit -F arch=b{{.arch_bits}} -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EPERM -k access {{ end -}} diff --git a/vendor/github.com/elastic/beats/auditbeat/module/auditd/_meta/kibana/6/dashboard/auditbeat-kernel-executions.json b/vendor/github.com/elastic/beats/auditbeat/module/auditd/_meta/kibana/6/dashboard/auditbeat-kernel-executions.json index f84bcccd..1b0f82ea 100644 --- a/vendor/github.com/elastic/beats/auditbeat/module/auditd/_meta/kibana/6/dashboard/auditbeat-kernel-executions.json +++ b/vendor/github.com/elastic/beats/auditbeat/module/auditd/_meta/kibana/6/dashboard/auditbeat-kernel-executions.json @@ -84,7 +84,7 @@ "searchSourceJSON": "{\"query\":{\"language\":\"lucene\",\"query\":\"\"},\"filter\":[],\"highlightAll\":true,\"version\":true}" }, "optionsJSON": "{\"darkTheme\":false,\"useMargins\":false}", - "panelsJSON": "[{\"gridData\":{\"h\":3,\"i\":\"1\",\"w\":4,\"x\":4,\"y\":0},\"id\":\"20a8e8d0-c1c8-11e7-8995-936807a28b16\",\"panelIndex\":\"1\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"gridData\":{\"h\":3,\"i\":\"3\",\"w\":4,\"x\":8,\"y\":0},\"id\":\"f81a6de0-c1c1-11e7-8995-936807a28b16\",\"panelIndex\":\"3\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"gridData\":{\"h\":3,\"i\":\"5\",\"w\":4,\"x\":0,\"y\":0},\"id\":\"2efac370-c1ca-11e7-8995-936807a28b16\",\"panelIndex\":\"5\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"gridData\":{\"h\":5,\"i\":\"6\",\"w\":12,\"x\":0,\"y\":3},\"id\":\"d382f5b0-c1c6-11e7-8995-936807a28b16\",\"panelIndex\":\"6\",\"type\":\"search\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"}]", + "panelsJSON": "[{\"gridData\":{\"h\":3,\"i\":\"1\",\"w\":4,\"x\":4,\"y\":0},\"id\":\"20a8e8d0-c1c8-11e7-8995-936807a28b16\",\"panelIndex\":\"1\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":3,\"i\":\"3\",\"w\":4,\"x\":8,\"y\":0},\"id\":\"f81a6de0-c1c1-11e7-8995-936807a28b16\",\"panelIndex\":\"3\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":3,\"i\":\"5\",\"w\":4,\"x\":0,\"y\":0},\"id\":\"2efac370-c1ca-11e7-8995-936807a28b16\",\"panelIndex\":\"5\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":5,\"i\":\"6\",\"w\":12,\"x\":0,\"y\":3},\"id\":\"d382f5b0-c1c6-11e7-8995-936807a28b16\",\"panelIndex\":\"6\",\"type\":\"search\",\"version\":\"6.2.4\"}]", "timeRestore": false, "title": "[Auditbeat Auditd] Executions", "version": 1 @@ -95,5 +95,5 @@ "version": 5 } ], - "version": "7.0.0-alpha1-SNAPSHOT" -} \ No newline at end of file + "version": "6.2.4" +} diff --git a/vendor/github.com/elastic/beats/auditbeat/module/auditd/_meta/kibana/6/dashboard/auditbeat-kernel-overview.json b/vendor/github.com/elastic/beats/auditbeat/module/auditd/_meta/kibana/6/dashboard/auditbeat-kernel-overview.json index 9a8bdfca..99e1a24c 100644 --- a/vendor/github.com/elastic/beats/auditbeat/module/auditd/_meta/kibana/6/dashboard/auditbeat-kernel-overview.json +++ b/vendor/github.com/elastic/beats/auditbeat/module/auditd/_meta/kibana/6/dashboard/auditbeat-kernel-overview.json @@ -71,7 +71,7 @@ "searchSourceJSON": "{\"query\":{\"language\":\"lucene\",\"query\":\"\"},\"filter\":[],\"highlightAll\":true,\"version\":true}" }, "optionsJSON": "{\"darkTheme\":false,\"useMargins\":false}", - "panelsJSON": "[{\"gridData\":{\"h\":3,\"i\":\"1\",\"w\":7,\"x\":0,\"y\":0},\"id\":\"97680df0-c1c0-11e7-8995-936807a28b16\",\"panelIndex\":\"1\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"gridData\":{\"h\":3,\"i\":\"4\",\"w\":5,\"x\":7,\"y\":0},\"id\":\"08679220-c25a-11e7-8692-232bd1143e8a\",\"panelIndex\":\"4\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"gridData\":{\"h\":5,\"i\":\"5\",\"w\":12,\"x\":0,\"y\":3},\"id\":\"0f10c430-c1c3-11e7-8995-936807a28b16\",\"panelIndex\":\"5\",\"type\":\"search\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"}]", + "panelsJSON": "[{\"gridData\":{\"h\":3,\"i\":\"1\",\"w\":7,\"x\":0,\"y\":0},\"id\":\"97680df0-c1c0-11e7-8995-936807a28b16\",\"panelIndex\":\"1\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":3,\"i\":\"4\",\"w\":5,\"x\":7,\"y\":0},\"id\":\"08679220-c25a-11e7-8692-232bd1143e8a\",\"panelIndex\":\"4\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":5,\"i\":\"5\",\"w\":12,\"x\":0,\"y\":3},\"id\":\"0f10c430-c1c3-11e7-8995-936807a28b16\",\"panelIndex\":\"5\",\"type\":\"search\",\"version\":\"6.2.4\"}]", "timeRestore": false, "title": "[Auditbeat Auditd] Overview", "version": 1 @@ -82,5 +82,5 @@ "version": 5 } ], - "version": "7.0.0-alpha1-SNAPSHOT" -} \ No newline at end of file + "version": "6.2.4" +} diff --git a/vendor/github.com/elastic/beats/auditbeat/module/auditd/_meta/kibana/6/dashboard/auditbeat-kernel-sockets.json b/vendor/github.com/elastic/beats/auditbeat/module/auditd/_meta/kibana/6/dashboard/auditbeat-kernel-sockets.json index fc34a7d2..f78214b2 100644 --- a/vendor/github.com/elastic/beats/auditbeat/module/auditd/_meta/kibana/6/dashboard/auditbeat-kernel-sockets.json +++ b/vendor/github.com/elastic/beats/auditbeat/module/auditd/_meta/kibana/6/dashboard/auditbeat-kernel-sockets.json @@ -173,7 +173,7 @@ "searchSourceJSON": "{\"query\":{\"language\":\"lucene\",\"query\":\"*\"},\"filter\":[],\"highlightAll\":true,\"version\":true}" }, "optionsJSON": "{\"darkTheme\":false,\"useMargins\":false}", - "panelsJSON": "[{\"embeddableConfig\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}},\"gridData\":{\"h\":4,\"i\":\"1\",\"w\":6,\"x\":6,\"y\":3},\"id\":\"faf882f0-c242-11e7-8692-232bd1143e8a\",\"panelIndex\":\"1\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"embeddableConfig\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}},\"gridData\":{\"h\":5,\"i\":\"2\",\"w\":6,\"x\":0,\"y\":7},\"id\":\"ea483730-c246-11e7-8692-232bd1143e8a\",\"panelIndex\":\"2\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"embeddableConfig\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}},\"gridData\":{\"h\":5,\"i\":\"3\",\"w\":6,\"x\":6,\"y\":7},\"id\":\"ceb91de0-c250-11e7-8692-232bd1143e8a\",\"panelIndex\":\"3\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"gridData\":{\"h\":3,\"i\":\"4\",\"w\":12,\"x\":0,\"y\":0},\"id\":\"b21e0c70-c252-11e7-8692-232bd1143e8a\",\"panelIndex\":\"4\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"gridData\":{\"h\":4,\"i\":\"5\",\"w\":6,\"x\":0,\"y\":3},\"id\":\"a8e20450-c256-11e7-8692-232bd1143e8a\",\"panelIndex\":\"5\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"}]", + "panelsJSON": "[{\"embeddableConfig\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}},\"gridData\":{\"h\":4,\"i\":\"1\",\"w\":6,\"x\":6,\"y\":3},\"id\":\"faf882f0-c242-11e7-8692-232bd1143e8a\",\"panelIndex\":\"1\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"embeddableConfig\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}},\"gridData\":{\"h\":5,\"i\":\"2\",\"w\":6,\"x\":0,\"y\":7},\"id\":\"ea483730-c246-11e7-8692-232bd1143e8a\",\"panelIndex\":\"2\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"embeddableConfig\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}},\"gridData\":{\"h\":5,\"i\":\"3\",\"w\":6,\"x\":6,\"y\":7},\"id\":\"ceb91de0-c250-11e7-8692-232bd1143e8a\",\"panelIndex\":\"3\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":3,\"i\":\"4\",\"w\":12,\"x\":0,\"y\":0},\"id\":\"b21e0c70-c252-11e7-8692-232bd1143e8a\",\"panelIndex\":\"4\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":4,\"i\":\"5\",\"w\":6,\"x\":0,\"y\":3},\"id\":\"a8e20450-c256-11e7-8692-232bd1143e8a\",\"panelIndex\":\"5\",\"type\":\"visualization\",\"version\":\"6.2.4\"}]", "timeRestore": false, "title": "[Auditbeat Auditd] Sockets", "version": 1 @@ -184,5 +184,5 @@ "version": 4 } ], - "version": "7.0.0-alpha1-SNAPSHOT" -} \ No newline at end of file + "version": "6.2.4" +} diff --git a/vendor/github.com/elastic/beats/auditbeat/module/auditd/audit_linux_test.go b/vendor/github.com/elastic/beats/auditbeat/module/auditd/audit_linux_test.go index af5489cf..39eef5fe 100644 --- a/vendor/github.com/elastic/beats/auditbeat/module/auditd/audit_linux_test.go +++ b/vendor/github.com/elastic/beats/auditbeat/module/auditd/audit_linux_test.go @@ -12,12 +12,13 @@ import ( "github.com/stretchr/testify/assert" + "github.com/prometheus/procfs" + "github.com/elastic/beats/auditbeat/core" "github.com/elastic/beats/libbeat/logp" mbtest "github.com/elastic/beats/metricbeat/mb/testing" "github.com/elastic/go-libaudit" "github.com/elastic/go-libaudit/auparse" - "github.com/elastic/procfs" ) // Specify the -audit flag when running these tests to interact with the real diff --git a/vendor/github.com/elastic/beats/auditbeat/module/auditd/config_linux_test.go b/vendor/github.com/elastic/beats/auditbeat/module/auditd/config_linux_test.go index a5d48b97..4731a17d 100644 --- a/vendor/github.com/elastic/beats/auditbeat/module/auditd/config_linux_test.go +++ b/vendor/github.com/elastic/beats/auditbeat/module/auditd/config_linux_test.go @@ -14,7 +14,7 @@ audit_rules: | # Comments and empty lines are ignored. -w /etc/passwd -p wa -k auth - -a always,exit -F arch=b64 -S execve -k exec` + -a always,exit -S execve -k exec` config, err := parseConfig(t, data) if err != nil { @@ -26,7 +26,7 @@ audit_rules: | } assert.EqualValues(t, []string{ "-w /etc/passwd -p wa -k auth", - "-a always,exit -F arch=b64 -S execve -k exec", + "-a always,exit -S execve -k exec", }, commands(rules)) } @@ -35,7 +35,7 @@ func TestConfigValidateWithError(t *testing.T) { audit_rules: | -x bad -F flag -a always,exit -w /etc/passwd - -a always,exit -F arch=b64 -S fake -k exec` + -a always,exit -S fake -k exec` _, err := parseConfig(t, data) if err == nil { diff --git a/vendor/github.com/elastic/beats/auditbeat/module/file_integrity/_meta/kibana/6/dashboard/auditbeat-file-integrity.json b/vendor/github.com/elastic/beats/auditbeat/module/file_integrity/_meta/kibana/6/dashboard/auditbeat-file-integrity.json index 043625b5..63a611a1 100644 --- a/vendor/github.com/elastic/beats/auditbeat/module/file_integrity/_meta/kibana/6/dashboard/auditbeat-file-integrity.json +++ b/vendor/github.com/elastic/beats/auditbeat/module/file_integrity/_meta/kibana/6/dashboard/auditbeat-file-integrity.json @@ -218,7 +218,7 @@ "searchSourceJSON": "{\"filter\":[],\"highlightAll\":true,\"version\":true,\"query\":{\"language\":\"lucene\",\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"default_field\":\"*\",\"query\":\"*\"}}}}" }, "optionsJSON": "{\"darkTheme\":false,\"useMargins\":false}", - "panelsJSON": "[{\"gridData\":{\"h\":6,\"i\":\"1\",\"w\":2,\"x\":0,\"y\":0},\"id\":\"AV0tVcg6g1PYniApZa-v\",\"panelIndex\":\"1\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"gridData\":{\"h\":6,\"i\":\"2\",\"w\":7,\"x\":2,\"y\":0},\"id\":\"AV0tV05vg1PYniApZbA2\",\"panelIndex\":\"2\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"gridData\":{\"h\":3,\"i\":\"3\",\"w\":3,\"x\":9,\"y\":0},\"id\":\"AV0tWL-Yg1PYniApZbCs\",\"panelIndex\":\"3\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"gridData\":{\"h\":3,\"i\":\"4\",\"w\":3,\"x\":9,\"y\":3},\"id\":\"AV0tWSdXg1PYniApZbDU\",\"panelIndex\":\"4\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"gridData\":{\"h\":3,\"i\":\"5\",\"w\":4,\"x\":4,\"y\":8},\"id\":\"AV0tW0djg1PYniApZbGL\",\"panelIndex\":\"5\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"gridData\":{\"h\":2,\"i\":\"6\",\"w\":4,\"x\":0,\"y\":6},\"id\":\"AV0tY6jwg1PYniApZbRY\",\"panelIndex\":\"6\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"gridData\":{\"h\":2,\"i\":\"7\",\"w\":4,\"x\":4,\"y\":6},\"id\":\"AV0tav8Ag1PYniApZbbK\",\"panelIndex\":\"7\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"gridData\":{\"h\":2,\"i\":\"8\",\"w\":4,\"x\":8,\"y\":6},\"id\":\"AV0tbcUdg1PYniApZbe1\",\"panelIndex\":\"8\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"gridData\":{\"h\":5,\"i\":\"9\",\"w\":6,\"x\":0,\"y\":11},\"id\":\"AV0tc_xZg1PYniApZbnL\",\"panelIndex\":\"9\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"gridData\":{\"h\":3,\"i\":\"10\",\"w\":4,\"x\":8,\"y\":8},\"id\":\"AV0tes4Eg1PYniApZbwV\",\"panelIndex\":\"10\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"gridData\":{\"h\":3,\"i\":\"11\",\"w\":4,\"x\":0,\"y\":8},\"id\":\"AV0te0TCg1PYniApZbw9\",\"panelIndex\":\"11\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"columns\":[\"file.path\",\"event.action\"],\"gridData\":{\"h\":5,\"i\":\"12\",\"w\":6,\"x\":6,\"y\":11},\"id\":\"a380a060-cb44-11e7-9835-2f31fe08873b\",\"panelIndex\":\"12\",\"sort\":[\"@timestamp\",\"desc\"],\"type\":\"search\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"}]", + "panelsJSON": "[{\"gridData\":{\"h\":6,\"i\":\"1\",\"w\":2,\"x\":0,\"y\":0},\"id\":\"AV0tVcg6g1PYniApZa-v\",\"panelIndex\":\"1\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":6,\"i\":\"2\",\"w\":7,\"x\":2,\"y\":0},\"id\":\"AV0tV05vg1PYniApZbA2\",\"panelIndex\":\"2\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":3,\"i\":\"3\",\"w\":3,\"x\":9,\"y\":0},\"id\":\"AV0tWL-Yg1PYniApZbCs\",\"panelIndex\":\"3\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":3,\"i\":\"4\",\"w\":3,\"x\":9,\"y\":3},\"id\":\"AV0tWSdXg1PYniApZbDU\",\"panelIndex\":\"4\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":3,\"i\":\"5\",\"w\":4,\"x\":4,\"y\":8},\"id\":\"AV0tW0djg1PYniApZbGL\",\"panelIndex\":\"5\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":2,\"i\":\"6\",\"w\":4,\"x\":0,\"y\":6},\"id\":\"AV0tY6jwg1PYniApZbRY\",\"panelIndex\":\"6\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":2,\"i\":\"7\",\"w\":4,\"x\":4,\"y\":6},\"id\":\"AV0tav8Ag1PYniApZbbK\",\"panelIndex\":\"7\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":2,\"i\":\"8\",\"w\":4,\"x\":8,\"y\":6},\"id\":\"AV0tbcUdg1PYniApZbe1\",\"panelIndex\":\"8\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":5,\"i\":\"9\",\"w\":6,\"x\":0,\"y\":11},\"id\":\"AV0tc_xZg1PYniApZbnL\",\"panelIndex\":\"9\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":3,\"i\":\"10\",\"w\":4,\"x\":8,\"y\":8},\"id\":\"AV0tes4Eg1PYniApZbwV\",\"panelIndex\":\"10\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"gridData\":{\"h\":3,\"i\":\"11\",\"w\":4,\"x\":0,\"y\":8},\"id\":\"AV0te0TCg1PYniApZbw9\",\"panelIndex\":\"11\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"columns\":[\"file.path\",\"event.action\"],\"gridData\":{\"h\":5,\"i\":\"12\",\"w\":6,\"x\":6,\"y\":11},\"id\":\"a380a060-cb44-11e7-9835-2f31fe08873b\",\"panelIndex\":\"12\",\"sort\":[\"@timestamp\",\"desc\"],\"type\":\"search\",\"version\":\"6.2.4\"}]", "timeRestore": false, "title": "[Auditbeat File Integrity] Overview", "version": 1 @@ -230,4 +230,4 @@ } ], "version": "6.1.2" -} \ No newline at end of file +} diff --git a/vendor/github.com/elastic/beats/auditbeat/module/file_integrity/eventreader_fsnotify.go b/vendor/github.com/elastic/beats/auditbeat/module/file_integrity/eventreader_fsnotify.go index cac54162..731e370a 100644 --- a/vendor/github.com/elastic/beats/auditbeat/module/file_integrity/eventreader_fsnotify.go +++ b/vendor/github.com/elastic/beats/auditbeat/module/file_integrity/eventreader_fsnotify.go @@ -36,6 +36,14 @@ func NewEventReader(c Config) (EventProducer, error) { } func (r *reader) Start(done <-chan struct{}) (<-chan Event, error) { + if err := r.watcher.Start(); err != nil { + return nil, errors.Wrap(err, "unable to start watcher") + } + go r.consumeEvents(done) + + // Windows implementation of fsnotify needs to have the watched paths + // installed after the event consumer is started, to avoid a potential + // deadlock. Do it on all platforms for simplicity. for _, p := range r.config.Paths { if err := r.watcher.Add(p); err != nil { if err == syscall.EMFILE { @@ -48,10 +56,6 @@ func (r *reader) Start(done <-chan struct{}) (<-chan Event, error) { } } - if err := r.watcher.Start(); err != nil { - return nil, errors.Wrap(err, "unable to start watcher") - } - go r.consumeEvents(done) r.log.Infow("Started fsnotify watcher", "file_path", r.config.Paths, "recursive", r.config.Recursive) diff --git a/vendor/github.com/elastic/beats/auditbeat/module/file_integrity/monitor/filetree_test.go b/vendor/github.com/elastic/beats/auditbeat/module/file_integrity/monitor/filetree_test.go index 710c4bc0..5f501578 100644 --- a/vendor/github.com/elastic/beats/auditbeat/module/file_integrity/monitor/filetree_test.go +++ b/vendor/github.com/elastic/beats/auditbeat/module/file_integrity/monitor/filetree_test.go @@ -1,3 +1,5 @@ +// +build !integration + package monitor import ( diff --git a/vendor/github.com/elastic/beats/auditbeat/module/file_integrity/monitor/monitor.go b/vendor/github.com/elastic/beats/auditbeat/module/file_integrity/monitor/monitor.go index 560527e2..40b77fa9 100644 --- a/vendor/github.com/elastic/beats/auditbeat/module/file_integrity/monitor/monitor.go +++ b/vendor/github.com/elastic/beats/auditbeat/module/file_integrity/monitor/monitor.go @@ -21,7 +21,9 @@ func New(recursive bool) (Watcher, error) { if err != nil { return nil, err } - if recursive { + // Use our simulated recursive watches unless the fsnotify implementation + // supports OS-provided recursive watches + if recursive && fsnotify.SetRecursive() != nil { return newRecursiveWatcher(fsnotify), nil } return (*nonRecursiveWatcher)(fsnotify), nil diff --git a/vendor/github.com/elastic/beats/auditbeat/module/file_integrity/monitor/monitor_test.go b/vendor/github.com/elastic/beats/auditbeat/module/file_integrity/monitor/monitor_test.go index 283c6ac4..9dd15961 100644 --- a/vendor/github.com/elastic/beats/auditbeat/module/file_integrity/monitor/monitor_test.go +++ b/vendor/github.com/elastic/beats/auditbeat/module/file_integrity/monitor/monitor_test.go @@ -1,3 +1,5 @@ +// +build !integration + package monitor import ( diff --git a/vendor/github.com/elastic/beats/auditbeat/scripts/generate_config.go b/vendor/github.com/elastic/beats/auditbeat/scripts/generate_config.go index fd3489c8..9d295010 100644 --- a/vendor/github.com/elastic/beats/auditbeat/scripts/generate_config.go +++ b/vendor/github.com/elastic/beats/auditbeat/scripts/generate_config.go @@ -18,6 +18,7 @@ const defaultGlob = "module/*/_meta/config*.yml.tpl" var ( goos = flag.String("os", runtime.GOOS, "generate config specific to the specified operating system") + goarch = flag.String("arch", runtime.GOARCH, "generate config specific to the specified CPU architecture") reference = flag.Bool("ref", false, "generate a reference config") concat = flag.Bool("concat", false, "concatenate all configs instead writing individual files") ) @@ -40,9 +41,20 @@ func getConfig(file string) ([]byte, error) { return nil, errors.Wrapf(err, "failed reading %v", file) } + var archBits string + switch *goarch { + case "i386": + archBits = "32" + case "amd64": + archBits = "64" + default: + return nil, fmt.Errorf("supporting only i386 and amd64 architecture") + } data := map[string]interface{}{ + "goarch": *goarch, "goos": *goos, "reference": *reference, + "arch_bits": archBits, } buf := new(bytes.Buffer) if err = tpl.Execute(buf, data); err != nil { diff --git a/vendor/github.com/elastic/beats/auditbeat/tests/system/auditbeat.py b/vendor/github.com/elastic/beats/auditbeat/tests/system/auditbeat.py index de1af657..1c0f4e81 100644 --- a/vendor/github.com/elastic/beats/auditbeat/tests/system/auditbeat.py +++ b/vendor/github.com/elastic/beats/auditbeat/tests/system/auditbeat.py @@ -1,14 +1,54 @@ import os +import shutil import sys +import tempfile -sys.path.append('../../../libbeat/tests/system') -from beat.beat import TestCase +sys.path.append(os.path.join(os.path.dirname(__file__), '../../../metricbeat/tests/system')) +if os.name == "nt": + import win32file -class BaseTest(TestCase): +from metricbeat import BaseTest as MetricbeatTest + + +class BaseTest(MetricbeatTest): @classmethod def setUpClass(self): self.beat_name = "auditbeat" self.beat_path = os.path.abspath( os.path.join(os.path.dirname(__file__), "../../")) - super(BaseTest, self).setUpClass() + super(MetricbeatTest, self).setUpClass() + + def create_file(self, path, contents): + f = open(path, 'wb') + f.write(contents) + f.close() + + def check_event(self, event, expected): + for key in expected: + assert key in event, "key '{0}' not found in event".format(key) + assert event[key] == expected[key], \ + "key '{0}' has value '{1}', expected '{2}'".format(key, + event[key], + expected[key]) + + def temp_dir(self, prefix): + # os.path.realpath resolves any symlinks in path. Necessary for macOS + # where /var is a symlink to /private/var + p = os.path.realpath(tempfile.mkdtemp(prefix)) + if os.name == "nt": + # Under windows, get rid of any ~1 in path (short path) + p = str(win32file.GetLongPathName(p)) + return p + + +class PathCleanup: + def __init__(self, paths): + self.paths = paths + + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_val, exc_tb): + for path in self.paths: + shutil.rmtree(path) diff --git a/vendor/github.com/elastic/beats/auditbeat/tests/system/config/auditbeat.yml.j2 b/vendor/github.com/elastic/beats/auditbeat/tests/system/config/auditbeat.yml.j2 index 8ec0ba10..6068b99e 100644 --- a/vendor/github.com/elastic/beats/auditbeat/tests/system/config/auditbeat.yml.j2 +++ b/vendor/github.com/elastic/beats/auditbeat/tests/system/config/auditbeat.yml.j2 @@ -18,15 +18,8 @@ auditbeat.modules: {%- endfor %} queue.mem: - events: 4096 - flush.min_events: 8 - flush.timeout: 0.1s + events: 4 + flush.min_events: 0 + flush.timeout: 0.01s -{%- if elasticsearch %} -output.elasticsearch: - hosts: ["{{ elasticsearch.host }}"] -{%- else %} -output.file: - path: '{{ output_file_path|default(beat.working_dir + "/output") }}' - filename: {{ output_file_filename|default("auditbeat") }} -{%- endif %} +{% include './tests/system/config/libbeat.yml.j2' %} diff --git a/vendor/github.com/elastic/beats/auditbeat/tests/system/test_base.py b/vendor/github.com/elastic/beats/auditbeat/tests/system/test_base.py index 8e622bbc..13990876 100644 --- a/vendor/github.com/elastic/beats/auditbeat/tests/system/test_base.py +++ b/vendor/github.com/elastic/beats/auditbeat/tests/system/test_base.py @@ -1,5 +1,7 @@ import re import sys +import os +import shutil import unittest from auditbeat import BaseTest from elasticsearch import Elasticsearch @@ -7,15 +9,18 @@ class Test(BaseTest): - @unittest.skipUnless(re.match("(?i)linux", sys.platform), "os") def test_start_stop(self): """ Auditbeat starts and stops without error. """ - self.render_config_template(modules=[{ - "name": "audit", - "metricsets": ["kernel"], - }]) + self.render_config_template( + modules=[{ + "name": "file_integrity", + "extras": { + "paths": ["file.example"], + } + }], + ) proc = self.start_beat() self.wait_until(lambda: self.log_contains("start running")) proc.check_kill_and_wait() @@ -35,11 +40,10 @@ def test_template(self): self.render_config_template( modules=[{ - "name": "audit", - "metricsets": ["file"], + "name": "file_integrity", "extras": { - "file.paths": ["file.example"], - }, + "paths": ["file.example"], + } }], elasticsearch={"host": self.get_elasticsearch_url()}) exit_code = self.run_beat(extra_args=["setup", "--template"]) @@ -47,3 +51,28 @@ def test_template(self): assert exit_code == 0 assert self.log_contains('Loaded index template') assert len(es.cat.templates(name='auditbeat-*', h='name')) > 0 + + @unittest.skipUnless(INTEGRATION_TESTS, "integration test") + def test_dashboards(self): + """ + Test that the dashboards can be loaded with `setup --dashboards` + """ + + kibana_dir = os.path.join(self.beat_path, "_meta", "kibana") + shutil.copytree(kibana_dir, os.path.join(self.working_dir, "kibana")) + + es = Elasticsearch([self.get_elasticsearch_url()]) + self.render_config_template( + modules=[{ + "name": "file_integrity", + "extras": { + "paths": ["file.example"], + } + }], + elasticsearch={"host": self.get_elasticsearch_url()}, + kibana={"host": self.get_kibana_url()}, + ) + exit_code = self.run_beat(extra_args=["setup", "--dashboards"]) + + assert exit_code == 0 + assert self.log_contains("Kibana dashboards successfully loaded.") diff --git a/vendor/github.com/elastic/beats/auditbeat/tests/system/test_file_integrity.py b/vendor/github.com/elastic/beats/auditbeat/tests/system/test_file_integrity.py new file mode 100644 index 00000000..8688a5bb --- /dev/null +++ b/vendor/github.com/elastic/beats/auditbeat/tests/system/test_file_integrity.py @@ -0,0 +1,189 @@ +import sys +import os +import shutil +import time +import unittest +from auditbeat import * +from beat.beat import INTEGRATION_TESTS + + +# Escapes a path to match what's printed in the logs +def escape_path(path): + return path.replace('\\', '\\\\') + + +def has_file(objs, path, sha1hash): + found = False + for obj in objs: + if 'file.path' in obj and 'hash.sha1' in obj \ + and obj['file.path'].lower() == path.lower() and obj['hash.sha1'] == sha1hash: + found = True + break + assert found, "File '{0}' with sha1sum '{1}' not found".format(path, sha1hash) + + +def has_dir(objs, path): + found = False + for obj in objs: + if 'file.path' in obj and obj['file.path'].lower() == path.lower() and obj['file.type'] == "dir": + found = True + break + assert found, "Dir '{0}' not found".format(path) + + +def file_events(objs, path, expected): + evts = set() + for obj in objs: + if 'file.path' in obj and 'event.action' in obj and obj['file.path'].lower() == path.lower(): + if type(obj['event.action']) == list: + evts = evts.union(set(obj['event.action'])) + else: + evts.add(obj['event.action']) + for wanted in set(expected): + assert wanted in evts, "Event {0} for path '{1}' not found (got {2})".format( + wanted, path, evts) + + +def wrap_except(expr): + try: + return expr() + except IOError: + return False + + +class Test(BaseTest): + + def wait_output(self, min_events): + self.wait_until(lambda: wrap_except(lambda: len(self.read_output()) >= min_events)) + # wait for the number of lines in the file to stay constant for a second + prev_lines = -1 + while True: + num_lines = self.output_lines() + if prev_lines < num_lines: + prev_lines = num_lines + time.sleep(1) + else: + break + + def test_non_recursive(self): + """ + file_integrity monitors watched directories (non recursive). + """ + + dirs = [self.temp_dir("auditbeat_test"), + self.temp_dir("auditbeat_test")] + + with PathCleanup(dirs): + self.render_config_template( + modules=[{ + "name": "file_integrity", + "extras": { + "paths": dirs, + "scan_at_start": False + } + }], + ) + proc = self.start_beat() + + # wait until the directories to watch are printed in the logs + # this happens when the file_integrity module starts. + # Case must be ignored under windows as capitalisation of paths + # may differ + self.wait_log_contains(escape_path(dirs[0]), max_timeout=30, ignore_case=True) + + file1 = os.path.join(dirs[0], 'file.txt') + self.create_file(file1, "hello world!") + + file2 = os.path.join(dirs[1], 'file2.txt') + self.create_file(file2, "Foo bar") + + # wait until file1 is reported before deleting. Otherwise the hash + # might not be calculated + self.wait_log_contains("\"path\": \"{0}\"".format(escape_path(file1)), ignore_case=True) + + os.unlink(file1) + + subdir = os.path.join(dirs[0], "subdir") + os.mkdir(subdir) + file3 = os.path.join(subdir, "other_file.txt") + self.create_file(file3, "not reported.") + + self.wait_log_contains("\"deleted\"") + self.wait_log_contains("\"path\": \"{0}\"".format(escape_path(subdir)), ignore_case=True) + self.wait_output(3) + + proc.check_kill_and_wait() + self.assert_no_logged_warnings() + + # Ensure all Beater stages are used. + assert self.log_contains("Setup Beat: auditbeat") + assert self.log_contains("auditbeat start running") + assert self.log_contains("auditbeat stopped") + + objs = self.read_output() + + has_file(objs, file1, "430ce34d020724ed75a196dfc2ad67c77772d169") + has_file(objs, file2, "d23be250530a24be33069572db67995f21244c51") + has_dir(objs, subdir) + + file_events(objs, file1, ['created', 'deleted']) + file_events(objs, file2, ['created']) + + # assert file inside subdir is not reported + assert self.log_contains(file3) is False + + def test_recursive(self): + """ + file_integrity monitors watched directories (recursive). + """ + + dirs = [self.temp_dir("auditbeat_test")] + + with PathCleanup(dirs): + self.render_config_template( + modules=[{ + "name": "file_integrity", + "extras": { + "paths": dirs, + "scan_at_start": False, + "recursive": True + } + }], + ) + proc = self.start_beat() + + # wait until the directories to watch are printed in the logs + # this happens when the file_integrity module starts + self.wait_log_contains(escape_path(dirs[0]), max_timeout=30, ignore_case=True) + self.wait_log_contains("\"recursive\": true") + + subdir = os.path.join(dirs[0], "subdir") + os.mkdir(subdir) + file1 = os.path.join(subdir, "file.txt") + self.create_file(file1, "hello world!") + + subdir2 = os.path.join(subdir, "other") + os.mkdir(subdir2) + file2 = os.path.join(subdir2, "more.txt") + self.create_file(file2, "") + + self.wait_log_contains("\"path\": \"{0}\"".format(escape_path(file2)), ignore_case=True) + self.wait_output(4) + + proc.check_kill_and_wait() + self.assert_no_logged_warnings() + + # Ensure all Beater stages are used. + assert self.log_contains("Setup Beat: auditbeat") + assert self.log_contains("auditbeat start running") + assert self.log_contains("auditbeat stopped") + + objs = self.read_output() + + has_file(objs, file1, "430ce34d020724ed75a196dfc2ad67c77772d169") + has_file(objs, file2, "da39a3ee5e6b4b0d3255bfef95601890afd80709") + has_dir(objs, subdir) + has_dir(objs, subdir2) + + file_events(objs, file1, ['created']) + file_events(objs, file2, ['created']) diff --git a/vendor/github.com/elastic/beats/deploy/kubernetes/.travis/setup.sh b/vendor/github.com/elastic/beats/deploy/kubernetes/.travis/setup.sh index 7de6f3ad..1e2bb57a 100755 --- a/vendor/github.com/elastic/beats/deploy/kubernetes/.travis/setup.sh +++ b/vendor/github.com/elastic/beats/deploy/kubernetes/.travis/setup.sh @@ -3,59 +3,13 @@ set -x -# set docker0 to promiscuous mode -sudo ip link set docker0 promisc on +export CHANGE_MINIKUBE_NONE_USER=true -# install etcd -wget https://github.com/coreos/etcd/releases/download/$TRAVIS_ETCD_VERSION/etcd-$TRAVIS_ETCD_VERSION-linux-amd64.tar.gz -tar xzf etcd-$TRAVIS_ETCD_VERSION-linux-amd64.tar.gz -sudo mv etcd-$TRAVIS_ETCD_VERSION-linux-amd64/etcd /usr/local/bin/etcd -rm etcd-$TRAVIS_ETCD_VERSION-linux-amd64.tar.gz -rm -rf etcd-$TRAVIS_ETCD_VERSION-linux-amd64 +curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/$TRAVIS_K8S_VERSION/bin/linux/amd64/kubectl && \ + chmod +x kubectl && sudo mv kubectl /usr/local/bin/ +curl -Lo minikube https://storage.googleapis.com/minikube/releases/$TRAVIS_MINIKUBE_VERSION/minikube-linux-amd64 && chmod +x minikube && sudo mv minikube /usr/local/bin/ +sudo minikube start --vm-driver=none --kubernetes-version=$TRAVIS_K8S_VERSION --logtostderr +minikube update-context +JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}'; \ + until kubectl get nodes -o jsonpath="$JSONPATH" 2>&1 | grep -q "Ready=True"; do sleep 1; done -# download kubectl -wget https://storage.googleapis.com/kubernetes-release/release/$TRAVIS_KUBE_VERSION/bin/linux/amd64/kubectl -chmod +x kubectl -sudo mv kubectl /usr/local/bin/kubectl - -# download kubernetes -git clone https://github.com/kubernetes/kubernetes $HOME/kubernetes - -# install cfssl -go get -u github.com/cloudflare/cfssl/cmd/... - -pushd $HOME/kubernetes - git checkout $TRAVIS_KUBE_VERSION - kubectl config set-credentials myself --username=admin --password=admin - kubectl config set-context local --cluster=local --user=myself - kubectl config set-cluster local --server=http://localhost:8080 - kubectl config use-context local - - # start kubernetes in the background - sudo PATH=$PATH:/home/travis/.gimme/versions/go1.7.linux.amd64/bin/go \ - KUBE_ENABLE_CLUSTER_DNS=true \ - hack/local-up-cluster.sh & -popd - -# Wait until kube is up and running -TIMEOUT=0 -TIMEOUT_COUNT=800 -until $(curl --output /dev/null --silent http://localhost:8080) || [ $TIMEOUT -eq $TIMEOUT_COUNT ]; do - echo "Kube is not up yet" - let TIMEOUT=TIMEOUT+1 - sleep 1 -done - -if [ $TIMEOUT -eq $TIMEOUT_COUNT ]; then - echo "Kubernetes is not up and running" - exit 1 -fi - -echo "Kubernetes is deployed and reachable" - -# Try and sleep before issuing chown. Currently, Kubernetes is started by -# a command that is run in the background. Technically Kubernetes could be -# up and running, but those files might not exist yet as the previous command -# could create them after Kube starts successfully. -sleep 30 -sudo chown -R $USER:$USER $HOME/.kube diff --git a/vendor/github.com/elastic/beats/deploy/kubernetes/filebeat-kubernetes.yaml b/vendor/github.com/elastic/beats/deploy/kubernetes/filebeat-kubernetes.yaml index a8b08b7b..49212246 100644 --- a/vendor/github.com/elastic/beats/deploy/kubernetes/filebeat-kubernetes.yaml +++ b/vendor/github.com/elastic/beats/deploy/kubernetes/filebeat-kubernetes.yaml @@ -6,14 +6,13 @@ metadata: namespace: kube-system labels: k8s-app: filebeat - kubernetes.io/cluster-service: "true" data: filebeat.yml: |- filebeat.config: - prospectors: - # Mounted `filebeat-prospectors` configmap: - path: ${path.config}/prospectors.d/*.yml - # Reload prospectors configs as they change: + inputs: + # Mounted `filebeat-inputs` configmap: + path: ${path.config}/inputs.d/*.yml + # Reload inputs configs as they change: reload.enabled: false modules: path: ${path.config}/modules.d/*.yml @@ -34,11 +33,10 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: filebeat-prospectors + name: filebeat-inputs namespace: kube-system labels: k8s-app: filebeat - kubernetes.io/cluster-service: "true" data: kubernetes.yml: |- - type: docker @@ -55,19 +53,17 @@ metadata: namespace: kube-system labels: k8s-app: filebeat - kubernetes.io/cluster-service: "true" spec: template: metadata: labels: k8s-app: filebeat - kubernetes.io/cluster-service: "true" spec: serviceAccountName: filebeat terminationGracePeriodSeconds: 30 containers: - name: filebeat - image: docker.elastic.co/beats/filebeat:6.2.3 + image: docker.elastic.co/beats/filebeat:6.3.2 args: [ "-c", "/etc/filebeat.yml", "-e", @@ -98,8 +94,8 @@ spec: mountPath: /etc/filebeat.yml readOnly: true subPath: filebeat.yml - - name: prospectors - mountPath: /usr/share/filebeat/prospectors.d + - name: inputs + mountPath: /usr/share/filebeat/inputs.d readOnly: true - name: data mountPath: /usr/share/filebeat/data @@ -114,10 +110,13 @@ spec: - name: varlibdockercontainers hostPath: path: /var/lib/docker/containers - - name: prospectors + - name: inputs configMap: defaultMode: 0600 - name: filebeat-prospectors + name: filebeat-inputs + # We set an `emptyDir` here to ensure the manifest will deploy correctly. + # It's recommended to change this to a `hostPath` folder, to ensure internal data + # files survive pod changes (ie: version upgrade) - name: data emptyDir: {} --- diff --git a/vendor/github.com/elastic/beats/deploy/kubernetes/filebeat/README.md b/vendor/github.com/elastic/beats/deploy/kubernetes/filebeat/README.md index e5e8a70b..f384411e 100644 --- a/vendor/github.com/elastic/beats/deploy/kubernetes/filebeat/README.md +++ b/vendor/github.com/elastic/beats/deploy/kubernetes/filebeat/README.md @@ -8,7 +8,7 @@ By deploying filebeat as a [DaemonSet](https://kubernetes.io/docs/concepts/workl we ensure we get a running filebeat daemon on each node of the cluster. Docker logs host folder (`/var/lib/docker/containers`) is mounted on the -filebeat container. Filebeat will start a prospector for these files and start +filebeat container. Filebeat will start an input for these files and start harvesting them as they appear. Everything is deployed under `kube-system` namespace, you can change that by diff --git a/vendor/github.com/elastic/beats/deploy/kubernetes/filebeat/filebeat-configmap.yaml b/vendor/github.com/elastic/beats/deploy/kubernetes/filebeat/filebeat-configmap.yaml index ab0321a0..ca1d0329 100644 --- a/vendor/github.com/elastic/beats/deploy/kubernetes/filebeat/filebeat-configmap.yaml +++ b/vendor/github.com/elastic/beats/deploy/kubernetes/filebeat/filebeat-configmap.yaml @@ -6,14 +6,13 @@ metadata: namespace: kube-system labels: k8s-app: filebeat - kubernetes.io/cluster-service: "true" data: filebeat.yml: |- filebeat.config: - prospectors: - # Mounted `filebeat-prospectors` configmap: - path: ${path.config}/prospectors.d/*.yml - # Reload prospectors configs as they change: + inputs: + # Mounted `filebeat-inputs` configmap: + path: ${path.config}/inputs.d/*.yml + # Reload inputs configs as they change: reload.enabled: false modules: path: ${path.config}/modules.d/*.yml @@ -34,11 +33,10 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: filebeat-prospectors + name: filebeat-inputs namespace: kube-system labels: k8s-app: filebeat - kubernetes.io/cluster-service: "true" data: kubernetes.yml: |- - type: docker diff --git a/vendor/github.com/elastic/beats/deploy/kubernetes/filebeat/filebeat-daemonset.yaml b/vendor/github.com/elastic/beats/deploy/kubernetes/filebeat/filebeat-daemonset.yaml index 9ffb06d4..a04fceea 100644 --- a/vendor/github.com/elastic/beats/deploy/kubernetes/filebeat/filebeat-daemonset.yaml +++ b/vendor/github.com/elastic/beats/deploy/kubernetes/filebeat/filebeat-daemonset.yaml @@ -5,13 +5,11 @@ metadata: namespace: kube-system labels: k8s-app: filebeat - kubernetes.io/cluster-service: "true" spec: template: metadata: labels: k8s-app: filebeat - kubernetes.io/cluster-service: "true" spec: serviceAccountName: filebeat terminationGracePeriodSeconds: 30 @@ -48,8 +46,8 @@ spec: mountPath: /etc/filebeat.yml readOnly: true subPath: filebeat.yml - - name: prospectors - mountPath: /usr/share/filebeat/prospectors.d + - name: inputs + mountPath: /usr/share/filebeat/inputs.d readOnly: true - name: data mountPath: /usr/share/filebeat/data @@ -64,9 +62,12 @@ spec: - name: varlibdockercontainers hostPath: path: /var/lib/docker/containers - - name: prospectors + - name: inputs configMap: defaultMode: 0600 - name: filebeat-prospectors + name: filebeat-inputs + # We set an `emptyDir` here to ensure the manifest will deploy correctly. + # It's recommended to change this to a `hostPath` folder, to ensure internal data + # files survive pod changes (ie: version upgrade) - name: data emptyDir: {} diff --git a/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat-kubernetes.yaml b/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat-kubernetes.yaml index 46f28220..fac0a7f9 100644 --- a/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat-kubernetes.yaml +++ b/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat-kubernetes.yaml @@ -6,7 +6,6 @@ metadata: namespace: kube-system labels: k8s-app: metricbeat - kubernetes.io/cluster-service: "true" data: metricbeat.yml: |- metricbeat.config.modules: @@ -33,7 +32,6 @@ metadata: namespace: kube-system labels: k8s-app: metricbeat - kubernetes.io/cluster-service: "true" data: system.yml: |- - module: system @@ -80,20 +78,19 @@ metadata: namespace: kube-system labels: k8s-app: metricbeat - kubernetes.io/cluster-service: "true" spec: template: metadata: labels: k8s-app: metricbeat - kubernetes.io/cluster-service: "true" spec: serviceAccountName: metricbeat terminationGracePeriodSeconds: 30 hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet containers: - name: metricbeat - image: docker.elastic.co/beats/metricbeat:6.2.3 + image: docker.elastic.co/beats/metricbeat:6.3.2 args: [ "-c", "/etc/metricbeat.yml", "-e", @@ -158,6 +155,9 @@ spec: configMap: defaultMode: 0600 name: metricbeat-daemonset-modules + # We set an `emptyDir` here to ensure the manifest will deploy correctly. + # It's recommended to change this to a `hostPath` folder, to ensure internal data + # files survive pod changes (ie: version upgrade) - name: data emptyDir: {} --- @@ -168,7 +168,6 @@ metadata: namespace: kube-system labels: k8s-app: metricbeat - kubernetes.io/cluster-service: "true" data: # This module requires `kube-state-metrics` up and running under `kube-system` namespace kubernetes.yml: |- @@ -192,17 +191,16 @@ metadata: namespace: kube-system labels: k8s-app: metricbeat - kubernetes.io/cluster-service: "true" spec: template: metadata: labels: k8s-app: metricbeat - kubernetes.io/cluster-service: "true" spec: + serviceAccountName: metricbeat containers: - name: metricbeat - image: docker.elastic.co/beats/metricbeat:6.2.3 + image: docker.elastic.co/beats/metricbeat:6.3.2 args: [ "-c", "/etc/metricbeat.yml", "-e", diff --git a/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat/metricbeat-daemonset-configmap.yaml b/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat/metricbeat-daemonset-configmap.yaml index c4b0fb5a..1af92f4b 100644 --- a/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat/metricbeat-daemonset-configmap.yaml +++ b/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat/metricbeat-daemonset-configmap.yaml @@ -6,7 +6,6 @@ metadata: namespace: kube-system labels: k8s-app: metricbeat - kubernetes.io/cluster-service: "true" data: metricbeat.yml: |- metricbeat.config.modules: @@ -33,7 +32,6 @@ metadata: namespace: kube-system labels: k8s-app: metricbeat - kubernetes.io/cluster-service: "true" data: system.yml: |- - module: system diff --git a/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat/metricbeat-daemonset.yaml b/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat/metricbeat-daemonset.yaml index c5d99f26..34301262 100644 --- a/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat/metricbeat-daemonset.yaml +++ b/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat/metricbeat-daemonset.yaml @@ -6,17 +6,16 @@ metadata: namespace: kube-system labels: k8s-app: metricbeat - kubernetes.io/cluster-service: "true" spec: template: metadata: labels: k8s-app: metricbeat - kubernetes.io/cluster-service: "true" spec: serviceAccountName: metricbeat terminationGracePeriodSeconds: 30 hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet containers: - name: metricbeat image: docker.elastic.co/beats/metricbeat:%VERSION% @@ -84,5 +83,8 @@ spec: configMap: defaultMode: 0600 name: metricbeat-daemonset-modules + # We set an `emptyDir` here to ensure the manifest will deploy correctly. + # It's recommended to change this to a `hostPath` folder, to ensure internal data + # files survive pod changes (ie: version upgrade) - name: data emptyDir: {} diff --git a/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat/metricbeat-deployment-configmap.yaml b/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat/metricbeat-deployment-configmap.yaml index b526cb71..eb0f672e 100644 --- a/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat/metricbeat-deployment-configmap.yaml +++ b/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat/metricbeat-deployment-configmap.yaml @@ -5,7 +5,6 @@ metadata: namespace: kube-system labels: k8s-app: metricbeat - kubernetes.io/cluster-service: "true" data: # This module requires `kube-state-metrics` up and running under `kube-system` namespace kubernetes.yml: |- diff --git a/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat/metricbeat-deployment.yaml b/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat/metricbeat-deployment.yaml index baf77ab8..6601cdc8 100644 --- a/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat/metricbeat-deployment.yaml +++ b/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat/metricbeat-deployment.yaml @@ -6,14 +6,13 @@ metadata: namespace: kube-system labels: k8s-app: metricbeat - kubernetes.io/cluster-service: "true" spec: template: metadata: labels: k8s-app: metricbeat - kubernetes.io/cluster-service: "true" spec: + serviceAccountName: metricbeat containers: - name: metricbeat image: docker.elastic.co/beats/metricbeat:%VERSION% diff --git a/vendor/github.com/elastic/beats/dev-tools/cherrypick_pr b/vendor/github.com/elastic/beats/dev-tools/cherrypick_pr index a148622c..c648d6a1 100755 --- a/vendor/github.com/elastic/beats/dev-tools/cherrypick_pr +++ b/vendor/github.com/elastic/beats/dev-tools/cherrypick_pr @@ -2,6 +2,7 @@ """Cherry pick and backport a PR""" import sys +import os import argparse from os.path import expanduser import re @@ -57,6 +58,8 @@ def main(): parser.add_argument("--create_pr", action="store_true", help="Create a PR using the Github API " + "(requires token in ~/.elastic/github.token)") + parser.add_argument("--diff", action="store_true", + help="Display the diff before pushing the PR") args = parser.parse_args() print(args) @@ -97,6 +100,12 @@ def main(): print("No commit to push") return 1 + if args.diff: + call("git diff {}".format(args.to_branch), shell=True) + if raw_input("Continue? [y/n]: ") != "y": + print("Aborting cherry-pick.") + return 1 + print("Ready to push branch.") remote = raw_input("To which remote should I push? (your fork): ") call("git push {} :{} > /dev/null".format(remote, tmp_branch), @@ -140,9 +149,23 @@ def main(): # remove needs backport label from the original PR session.delete(base + "/issues/{}/labels/needs_backport".format(args.pr_number)) + # get version and set a version label on the original PR + version = get_version(os.getcwd()) + if version: + session.post( + base + "/issues/{}/labels".format(args.pr_number), json=["v" + version]) + print("\nDone. PR created: {}".format(new_pr["html_url"])) print("Please go and check it and add the review tags") +def get_version(beats_dir): + pattern = re.compile(r'(const\s|)\w*(v|V)ersion\s=\s"(?P.*)"') + with open(os.path.join(beats_dir, "libbeat/version/version.go"), "r") as f: + for line in f: + match = pattern.match(line) + if match: + return match.group('version') + if __name__ == "__main__": sys.exit(main()) diff --git a/vendor/github.com/elastic/beats/dev-tools/deploy b/vendor/github.com/elastic/beats/dev-tools/deploy index f09ae357..812a0034 100755 --- a/vendor/github.com/elastic/beats/dev-tools/deploy +++ b/vendor/github.com/elastic/beats/dev-tools/deploy @@ -16,9 +16,9 @@ def main(): check_call("make clean", shell=True) print("Done building Docker images.") if args.no_snapshot: - check_call("make SNAPSHOT=no package", shell=True) + check_call("make SNAPSHOT=no package-all", shell=True) else: - check_call("make SNAPSHOT=yes package", shell=True) + check_call("make SNAPSHOT=yes package-all", shell=True) print("All done") if __name__ == "__main__": diff --git a/vendor/github.com/elastic/beats/dev-tools/generate_notice.py b/vendor/github.com/elastic/beats/dev-tools/generate_notice.py index f7925e1c..09a4443c 100644 --- a/vendor/github.com/elastic/beats/dev-tools/generate_notice.py +++ b/vendor/github.com/elastic/beats/dev-tools/generate_notice.py @@ -44,33 +44,37 @@ def read_versions(vendor): return libs -def gather_dependencies(vendor_dirs): +def gather_dependencies(vendor_dirs, overrides=None): dependencies = {} # lib_path -> [array of lib] for vendor in vendor_dirs: libs = read_versions(vendor) # walk looking for LICENSE files for root, dirs, filenames in os.walk(vendor): - for filename in sorted(filenames): - if filename.startswith("LICENSE"): - lib_path = get_library_path(root) - lib_search = [l for l in libs if l["path"].startswith(lib_path)] - if len(lib_search) == 0: - print("WARNING: No version information found for: {}".format(lib_path)) - lib = {"path": lib_path} - else: - lib = lib_search[0] - lib["license_file"] = os.path.join(root, filename) - - lib["license_contents"] = read_file(lib["license_file"]) - lib["license_summary"] = detect_license_summary(lib["license_contents"]) - if lib["license_summary"] == "UNKNOWN": - print("WARNING: Unknown license for: {}".format(lib_path)) - - if lib_path not in dependencies: - dependencies[lib_path] = [lib] - else: - dependencies[lib_path].append(lib) + licenses = get_licenses(root) + for filename in licenses: + lib_path = get_library_path(root) + lib_search = [l for l in libs if l["path"].startswith(lib_path)] + if len(lib_search) == 0: + print("WARNING: No version information found for: {}".format(lib_path)) + lib = {"path": lib_path} + else: + lib = lib_search[0] + lib["license_file"] = os.path.join(root, filename) + + lib["license_contents"] = read_file(lib["license_file"]) + lib["license_summary"] = detect_license_summary(lib["license_contents"]) + if lib["license_summary"] == "UNKNOWN": + print("WARNING: Unknown license for: {}".format(lib_path)) + + revision = overrides.get(lib_path, {}).get("revision") + if revision: + lib["revision"] = revision + + if lib_path not in dependencies: + dependencies[lib_path] = [lib] + else: + dependencies[lib_path].append(lib) # don't walk down into another vendor dir if "vendor" in dirs: @@ -78,6 +82,61 @@ def gather_dependencies(vendor_dirs): return dependencies +def get_licenses(folder): + """ + Get a list of license files from a given directory. + """ + licenses = [] + for filename in sorted(os.listdir(folder)): + if filename.startswith("LICENSE") and "docs" not in filename: + licenses.append(filename) + elif filename.startswith("APLv2"): # gorhill/cronexpr + licenses.append(filename) + return licenses + + +def has_license(folder): + """ + Checks if a particular repo has a license files. + + There are two cases accepted: + * The folder contains a LICENSE + * The folder only contains subdirectories AND all these + subdirectories contain a LICENSE + """ + if len(get_licenses(folder)) > 0: + return True, "" + + for subdir in os.listdir(folder): + if not os.path.isdir(os.path.join(folder, subdir)): + return False, folder + if len(get_licenses(os.path.join(folder, subdir))) == 0: + return False, os.path.join(folder, subdir) + return True, "" + + +def check_all_have_license_files(vendor_dirs): + """ + Checks that everything in the vendor folders has a license one way + or the other. This doesn't collect the licenses, because the code that + collects the licenses needs to walk the full tree. This one makes sure + that every folder in the `vendor` directories has at least one license. + """ + issues = [] + for vendor in vendor_dirs: + for root, dirs, filenames in os.walk(vendor): + if root.count(os.sep) - vendor.count(os.sep) == 2: # two levels deep + # Two level deep means folders like `github.com/elastic`. + # look for the license in root but also one level up + ok, issue = has_license(root) + if not ok: + print("No license in: {}".format(issue)) + issues.append(issue) + if len(issues) > 0: + raise Exception("I have found licensing issues in the following folders: {}" + .format(issues)) + + def write_notice_file(f, beat, copyright, dependencies): now = datetime.datetime.now() @@ -137,20 +196,27 @@ def get_url(repo): return "https://github.com/{}/{}".format(words[1], words[2]) -def create_notice(filename, beat, copyright, vendor_dirs, csvfile): - dependencies = gather_dependencies(vendor_dirs) +def create_notice(filename, beat, copyright, vendor_dirs, csvfile, overrides=None): + dependencies = gather_dependencies(vendor_dirs, overrides=overrides) if not csvfile: with open(filename, "w+") as f: write_notice_file(f, beat, copyright, dependencies) + print("Available at {}".format(filename)) else: with open(csvfile, "wb") as f: csvwriter = csv.writer(f) write_csv_file(csvwriter, dependencies) + print("Available at {}".format(csvfile)) + return dependencies APACHE2_LICENSE_TITLES = [ "Apache License Version 2.0", - "Apache License, Version 2.0" + "Apache License, Version 2.0", + re.sub(r"\s+", " ", """Apache License + ============== + + _Version 2.0, January 2004_"""), ] MIT_LICENSES = [ @@ -166,7 +232,7 @@ def create_notice(filename, beat, copyright, vendor_dirs, csvfile): """), re.sub(r"\s+", " ", """Permission to use, copy, modify, and distribute this software for any purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies.""") +copyright notice and this permission notice appear in all copies."""), ] BSD_LICENSE_CONTENTS = [ @@ -209,6 +275,8 @@ def create_notice(filename, beat, copyright, vendor_dirs, csvfile): def detect_license_summary(content): # replace all white spaces with a single space content = re.sub(r"\s+", ' ', content) + # replace smart quotes with less intelligent ones + content = content.replace(b'\xe2\x80\x9c', '"').replace(b'\xe2\x80\x9d', '"') if any(sentence in content[0:1000] for sentence in APACHE2_LICENSE_TITLES): return "Apache-2.0" if any(sentence in content[0:1000] for sentence in MIT_LICENSES): @@ -230,6 +298,14 @@ def detect_license_summary(content): return "UNKNOWN" +ACCEPTED_LICENSES = [ + "Apache-2.0", + "MIT", + "BSD-4-Clause", + "BSD-3-Clause", + "BSD-2-Clause", + "MPL-2.0", +] SKIP_NOTICE = [] if __name__ == "__main__": @@ -246,6 +322,9 @@ def detect_license_summary(content): help="Output to a csv file") parser.add_argument("-e", "--excludes", default=["dev-tools", "build"], help="List of top directories to exclude") + # no need to be generic for now, no other transitive dependency information available + parser.add_argument("--beats-origin", type=argparse.FileType('r'), + help="path to beats vendor.json") parser.add_argument("-s", "--skip-notice", default=[], help="List of NOTICE files to skip") args = parser.parse_args() @@ -273,7 +352,18 @@ def detect_license_summary(content): if exclude in dirs: dirs.remove(exclude) - print("Get the licenses available from {}".format(vendor_dirs)) - create_notice(notice, args.beat, args.copyright, vendor_dirs, args.csvfile) + overrides = {} # revision overrides only for now + if args.beats_origin: + govendor = json.load(args.beats_origin) + overrides = {package['path']: package for package in govendor["package"]} - print("Available at {}".format(notice)) + print("Get the licenses available from {}".format(vendor_dirs)) + check_all_have_license_files(vendor_dirs) + dependencies = create_notice(notice, args.beat, args.copyright, vendor_dirs, args.csvfile, overrides=overrides) + + # check that all licenses are accepted + for _, deps in dependencies.items(): + for dep in deps: + if dep["license_summary"] not in ACCEPTED_LICENSES: + raise Exception("Dependency {} has invalid license {}" + .format(dep["path"], dep["license_summary"])) diff --git a/vendor/github.com/elastic/beats/dev-tools/jenkins_ci.ps1 b/vendor/github.com/elastic/beats/dev-tools/jenkins_ci.ps1 index 35e7ee8d..4db34c03 100755 --- a/vendor/github.com/elastic/beats/dev-tools/jenkins_ci.ps1 +++ b/vendor/github.com/elastic/beats/dev-tools/jenkins_ci.ps1 @@ -36,13 +36,14 @@ exec { go get -u github.com/jstemmer/go-junit-report } echo "Building $env:beat" exec { go build } "Build FAILURE" +# always build the libbeat fields +cp ..\libbeat\_meta\fields.common.yml ..\libbeat\_meta\fields.generated.yml +cat ..\libbeat\processors\*\_meta\fields.yml | Out-File -append -encoding UTF8 -filepath ..\libbeat\_meta\fields.generated.yml +cp ..\libbeat\_meta\fields.generated.yml ..\libbeat\fields.yml + if ($env:beat -eq "metricbeat") { cp .\_meta\fields.common.yml .\_meta\fields.generated.yml python .\scripts\fields_collector.py | out-file -append -encoding UTF8 -filepath .\_meta\fields.generated.yml -} elseif ($env:beat -eq "libbeat") { - cp .\_meta\fields.common.yml .\_meta\fields.generated.yml - cat processors\*\_meta\fields.yml | Out-File -append -encoding UTF8 -filepath .\_meta\fields.generated.yml - cp .\_meta\fields.generated.yml .\fields.yml } echo "Unit testing $env:beat" diff --git a/vendor/github.com/elastic/beats/dev-tools/open_pr b/vendor/github.com/elastic/beats/dev-tools/open_pr index b98599ca..1bde2d38 100755 --- a/vendor/github.com/elastic/beats/dev-tools/open_pr +++ b/vendor/github.com/elastic/beats/dev-tools/open_pr @@ -2,6 +2,7 @@ """Open a PR from the current branch""" import sys +import os import argparse import requests import re @@ -54,6 +55,11 @@ def main(): if args.wip: lables += "in progress" + # get version and set a version label on the original PR + version = get_version(os.getcwd()) + if version: + labels.append("v" + version) + print("Branch: {}".format(args.branch)) print("Remote: {}".format(args.remote)) print("Local branch: {}".format(local_branch)) @@ -98,5 +104,14 @@ def main(): print("Please go and review it for the message and labels.") +def get_version(beats_dir): + pattern = re.compile(r'(const\s|)\w*(v|V)ersion\s=\s"(?P.*)"') + with open(os.path.join(beats_dir, "libbeat/version/version.go"), "r") as f: + for line in f: + match = pattern.match(line) + if match: + return match.group('version') + + if __name__ == "__main__": sys.exit(main()) diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/Makefile b/vendor/github.com/elastic/beats/dev-tools/packer/Makefile index 542e0171..1c157d5d 100644 --- a/vendor/github.com/elastic/beats/dev-tools/packer/Makefile +++ b/vendor/github.com/elastic/beats/dev-tools/packer/Makefile @@ -12,32 +12,32 @@ beat_abspath=${BEATS_GOPATH}/src/${BEAT_PATH} %/deb: ${BUILD_DIR}/god-linux-386 ${BUILD_DIR}/god-linux-amd64 fpm-image echo Creating DEB packages for $(@D) - ARCH=386 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/debian/build.sh - ARCH=amd64 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/debian/build.sh + ARCH=386 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} UPLOAD_DIR=${UPLOAD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/debian/build.sh + ARCH=amd64 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} UPLOAD_DIR=${UPLOAD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/debian/build.sh %/rpm: ${BUILD_DIR}/god-linux-386 ${BUILD_DIR}/god-linux-amd64 fpm-image echo Creating RPM packages for $(@D) - ARCH=386 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/centos/build.sh - ARCH=amd64 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/centos/build.sh + ARCH=386 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} UPLOAD_DIR=${UPLOAD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/centos/build.sh + ARCH=amd64 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} UPLOAD_DIR=${UPLOAD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/centos/build.sh %/darwin: echo Creating Darwin packages for $(@D) - ARCH=amd64 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/darwin/build.sh + ARCH=amd64 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} UPLOAD_DIR=${UPLOAD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/darwin/build.sh %/win: echo Creating Darwin packages for $(@D) - ARCH=386 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/windows/build.sh - ARCH=amd64 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/windows/build.sh + ARCH=386 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} UPLOAD_DIR=${UPLOAD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/windows/build.sh + ARCH=amd64 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} UPLOAD_DIR=${UPLOAD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/windows/build.sh %/bin: echo Creating Linux packages for $(@D) - ARCH=386 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/binary/build.sh - ARCH=amd64 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/binary/build.sh + ARCH=386 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} UPLOAD_DIR=${UPLOAD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/binary/build.sh + ARCH=amd64 BEAT=$(@D) BUILD_DIR=${BUILD_DIR} UPLOAD_DIR=${UPLOAD_DIR} BEAT_PATH=$(beat_abspath) BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/binary/build.sh .PHONY: package-dashboards package-dashboards: echo Creating the Dashboards package - BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/dashboards/build.sh + UPLOAD_DIR=${UPLOAD_DIR} BUILDID=$(BUILDID) SNAPSHOT=$(SNAPSHOT) $(packer_absdir)/platforms/dashboards/build.sh .PHONY: deps deps: @@ -61,11 +61,11 @@ go-daemon-image: ${BUILD_DIR}/god-linux-386 ${BUILD_DIR}/god-linux-amd64: docker run --rm -v ${BUILD_DIR}:/build tudorg/go-daemon -${BUILD_DIR}/upload: - mkdir -p ${BUILD_DIR}/upload +${UPLOAD_DIR}: + mkdir -p ${UPLOAD_DIR} -${BUILD_DIR}/upload/build_id.txt: - echo $(BUILDID) > ${BUILD_DIR}/upload/build_id.txt +${UPLOAD_DIR}/build_id.txt: + echo $(BUILDID) > ${UPLOAD_DIR}/build_id.txt # Build the image required for package-upload. .PHONY: deb-rpm-s3 diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb7/beats-builder/Dockerfile b/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb7/beats-builder/Dockerfile index 9d87bb57..ce4f8922 100644 --- a/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb7/beats-builder/Dockerfile +++ b/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb7/beats-builder/Dockerfile @@ -1,4 +1,4 @@ -FROM tudorg/xgo-deb7-1.9.2 +FROM tudorg/xgo-deb7-1.9.4 MAINTAINER Tudor Golubenco diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb7/beats-builder/gopacket_pcap.patch b/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb7/beats-builder/gopacket_pcap.patch index 12ed61d1..cc7592b7 100644 --- a/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb7/beats-builder/gopacket_pcap.patch +++ b/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb7/beats-builder/gopacket_pcap.patch @@ -7,10 +7,10 @@ index f5612e6..0c77efa 100644 /* -#cgo linux LDFLAGS: -lpcap -+#cgo linux,386 CFLAGS: -I /libpcap/i386/usr/include/ -+#cgo linux,386 LDFLAGS: /libpcap/i386/usr/lib/libpcap.a -+#cgo linux,amd64 CFLAGS: -I /libpcap/amd64/libpcap-1.8.1 -+#cgo linux,amd64 LDFLAGS: /libpcap/amd64/libpcap-1.8.1/libpcap.a ++#cgo linux,386 CFLAGS: -I/libpcap/i386/usr/include/ ++#cgo linux,386 LDFLAGS: -L/libpcap/i386/usr/lib/ -lpcap ++#cgo linux,amd64 CFLAGS: -I/libpcap/amd64/libpcap-1.8.1 ++#cgo linux,amd64 LDFLAGS: -L/libpcap/amd64/libpcap-1.8.1 -lpcap #cgo freebsd LDFLAGS: -lpcap #cgo openbsd LDFLAGS: -lpcap #cgo darwin LDFLAGS: -lpcap @@ -18,9 +18,9 @@ index f5612e6..0c77efa 100644 -#cgo windows CFLAGS: -I C:/WpdPack/Include -#cgo windows,386 LDFLAGS: -L C:/WpdPack/Lib -lwpcap -#cgo windows,amd64 LDFLAGS: -L C:/WpdPack/Lib/x64 -lwpcap -+#cgo windows CFLAGS: -I /libpcap/win/WpdPack/Include -+#cgo windows,386 LDFLAGS: -L /libpcap/win/WpdPack/Lib -lwpcap -+#cgo windows,amd64 LDFLAGS: -L /libpcap/win/WpdPack/Lib/x64 -lwpcap ++#cgo windows CFLAGS: -I/libpcap/win/WpdPack/Include ++#cgo windows,386 LDFLAGS: -L/libpcap/win/WpdPack/Lib -lwpcap ++#cgo windows,amd64 LDFLAGS: -L/libpcap/win/WpdPack/Lib/x64 -lwpcap #include #include diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb7/build.sh b/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb7/build.sh index 7eee43ce..16676c29 100755 --- a/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb7/build.sh +++ b/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb7/build.sh @@ -2,5 +2,5 @@ cp -r ../../../vendor/gopkg.in/yaml.v2 beats-builder/yaml.v2 cp -r ../../../vendor/github.com/tsg/gotpl beats-builder/gotpl docker build --rm=true -t tudorg/xgo-deb7-base base/ && \ - docker build --rm=true -t tudorg/xgo-deb7-1.9.2 go-1.9.2/ && + docker build --rm=true -t tudorg/xgo-deb7-1.9.4 go-1.9.4/ && docker build --rm=true -t tudorg/beats-builder-deb7 beats-builder diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb7/go-1.9.2/Dockerfile b/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb7/go-1.9.4/Dockerfile similarity index 69% rename from vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb7/go-1.9.2/Dockerfile rename to vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb7/go-1.9.4/Dockerfile index b74bb87a..2db357c9 100644 --- a/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb7/go-1.9.2/Dockerfile +++ b/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb7/go-1.9.4/Dockerfile @@ -1,4 +1,4 @@ -# Go cross compiler (xgo): Go 1.9.2 layer +# Go cross compiler (xgo): Go 1.9.4 layer # Copyright (c) 2014 Péter Szilágyi. All rights reserved. # # Released under the MIT license. @@ -9,7 +9,7 @@ MAINTAINER Tudor Golubenco # Configure the root Go distribution and bootstrap based on it RUN \ - export ROOT_DIST="https://storage.googleapis.com/golang/go1.9.2.linux-amd64.tar.gz" && \ - export ROOT_DIST_SHA1="94c889e039e3d2e94ed95e8f8cb747c5bc1c2b58" && \ + export ROOT_DIST="https://storage.googleapis.com/golang/go1.9.4.linux-amd64.tar.gz" && \ + export ROOT_DIST_SHA1="ed1bd37c356338a5a04923c183931a96687f202e" && \ \ $BOOTSTRAP_PURE diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/base/Dockerfile b/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/base/Dockerfile index ceadbb30..81ea1eb4 100644 --- a/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/base/Dockerfile +++ b/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/base/Dockerfile @@ -19,7 +19,10 @@ RUN chmod +x $FETCH # Make sure apt-get is up to date and dependent packages are installed +# XXX: The first line is a workaround for the "Sum hash mismatch" error, from here: +# https://askubuntu.com/questions/760574/sudo-apt-get-update-failes-due-to-hash-sum-mismatch RUN \ + apt-get clean && \ apt-get update && \ apt-get install -y automake autogen build-essential ca-certificates \ gcc-arm-linux-gnueabi g++-arm-linux-gnueabi libc6-dev-armel-cross \ diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/base/build.sh b/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/base/build.sh index 7c58e63d..50adedb1 100644 --- a/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/base/build.sh +++ b/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/base/build.sh @@ -105,7 +105,14 @@ fi if [ "$FLAG_V" == "true" ]; then V=-v; fi if [ "$FLAG_RACE" == "true" ]; then R=-race; fi -if [ "$STATIC" == "true" ]; then LDARGS=--ldflags\ \'-extldflags\ \"-static\"\'; fi + +# exactly one -ldflags allowed +LDFLAGS_STATIC="" +if [ "$STATIC" == "true" ]; then LDFLAGS_STATIC='-extldflags "-static"'; fi +NOW=$(date -u '+%Y-%m-%dT%H:%M:%SZ') +LDFLAGS_VERSION="-X=github.com/elastic/beats/libbeat/version.buildTime=${NOW} -X=github.com/elastic/beats/libbeat/version.commit=${BUILDID}" +LDFLAGS_VENDOR_VERSION="-X=${BEAT_PATH}/vendor/github.com/elastic/beats/libbeat/version.buildTime=${NOW} -X=${BEAT_PATH}/vendor/github.com/elastic/beats/libbeat/version.commit=${BUILDID}" +LDFLAGS="${LDFLAGS_VERSION} ${LDFLAGS_VENDOR_VERSION} ${LDFLAGS_STATIC}" if [ -n $BEFORE_BUILD ]; then chmod +x /scripts/$BEFORE_BUILD @@ -132,20 +139,20 @@ for TARGET in $TARGETS; do export PKG_CONFIG_PATH=/usr/aarch64-linux-gnu/lib/pkgconfig GOOS=linux GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} go get -d ./$PACK - sh -c "GOOS=linux GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} go build $V $R $LDARGS -o /build/$NAME-linux-amd64$R ./$PACK" + sh -c "GOOS=linux GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} go build $V $R -ldflags=\"${LDFLAGS}\" -o /build/$NAME-linux-amd64$R ./$PACK" fi if ([ $XGOOS == "." ] || [ $XGOOS == "linux" ]) && ([ $XGOARCH == "." ] || [ $XGOARCH == "386" ]); then echo "Compiling $PACK for linux/386..." CFLAGS=-m32 CXXFLAGS=-m32 LDFLAGS=-m32 HOST=i686-linux PREFIX=/usr/local $BUILD_DEPS /deps $LIST_DEPS GOOS=linux GOARCH=386 CGO_ENABLED=${CGO_ENABLED} go get -d ./$PACK - sh -c "GOOS=linux GOARCH=386 CGO_ENABLED=${CGO_ENABLED} go build $V $R $LDARGS -o /build/$NAME-linux-386$R ./$PACK" + sh -c "GOOS=linux GOARCH=386 CGO_ENABLED=${CGO_ENABLED} go build $V $R -ldflags=\"${LDFLAGS}\" -o /build/$NAME-linux-386$R ./$PACK" fi if ([ $XGOOS == "." ] || [ $XGOOS == "linux" ]) && ([ $XGOARCH == "." ] || [ $XGOARCH == "arm" ]); then echo "Compiling $PACK for linux/arm..." CC=arm-linux-gnueabi-gcc CXX=rm-linux-gnueabi-g++ HOST=arm-linux PREFIX=/usr/local/arm $BUILD_DEPS /deps $LIST_DEPS CC=arm-linux-gnueabi-gcc CXX=rm-linux-gnueabi-g++ GOOS=linux GOARCH=arm CGO_ENABLED=${CGO_ENABLED} GOARM=5 go get -d ./$PACK - CC=arm-linux-gnueabi-gcc CXX=rm-linux-gnueabi-g++ GOOS=linux GOARCH=arm CGO_ENABLED=${CGO_ENABLED} GOARM=5 go build $V -o /build/$NAME-linux-arm ./$PACK + CC=arm-linux-gnueabi-gcc CXX=rm-linux-gnueabi-g++ GOOS=linux GOARCH=arm CGO_ENABLED=${CGO_ENABLED} GOARM=5 go build $V -ldflags="${LDFLAGS}" -o /build/$NAME-linux-arm ./$PACK fi # Check and build for Windows targets @@ -169,7 +176,7 @@ for TARGET in $TARGETS; do export PKG_CONFIG_PATH=/usr/x86_64-w64-mingw32/lib/pkgconfig CC=x86_64-w64-mingw32-gcc CXX=x86_64-w64-mingw32-g++ GOOS=windows GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} CGO_CFLAGS="$CGO_NTDEF" CGO_CXXFLAGS="$CGO_NTDEF" go get -d ./$PACK - CC=x86_64-w64-mingw32-gcc CXX=x86_64-w64-mingw32-g++ GOOS=windows GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} CGO_CFLAGS="$CGO_NTDEF" CGO_CXXFLAGS="$CGO_NTDEF" go build $V $R -o /build/$NAME-windows-amd64$R.exe ./$PACK + CC=x86_64-w64-mingw32-gcc CXX=x86_64-w64-mingw32-g++ GOOS=windows GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} CGO_CFLAGS="$CGO_NTDEF" CGO_CXXFLAGS="$CGO_NTDEF" go build $V $R -ldflags="${LDFLAGS}" -o /build/$NAME-windows-amd64$R.exe ./$PACK fi if [ $XGOARCH == "." ] || [ $XGOARCH == "386" ]; then @@ -178,7 +185,7 @@ for TARGET in $TARGETS; do export PKG_CONFIG_PATH=/usr/i686-w64-mingw32/lib/pkgconfig CC=i686-w64-mingw32-gcc CXX=i686-w64-mingw32-g++ GOOS=windows GOARCH=386 CGO_ENABLED=${CGO_ENABLED} CGO_CFLAGS="$CGO_NTDEF" CGO_CXXFLAGS="$CGO_NTDEF" go get -d ./$PACK - CC=i686-w64-mingw32-gcc CXX=i686-w64-mingw32-g++ GOOS=windows GOARCH=386 CGO_ENABLED=${CGO_ENABLED} CGO_CFLAGS="$CGO_NTDEF" CGO_CXXFLAGS="$CGO_NTDEF" go build $V -o /build/$NAME-windows-386.exe ./$PACK + CC=i686-w64-mingw32-gcc CXX=i686-w64-mingw32-g++ GOOS=windows GOARCH=386 CGO_ENABLED=${CGO_ENABLED} CGO_CFLAGS="$CGO_NTDEF" CGO_CXXFLAGS="$CGO_NTDEF" go build $V -ldflags="${LDFLAGS}" -o /build/$NAME-windows-386.exe ./$PACK fi fi @@ -187,13 +194,13 @@ for TARGET in $TARGETS; do echo "Compiling $PACK for darwin/amd64..." CC=o64-clang CXX=o64-clang++ HOST=x86_64-apple-darwin10 PREFIX=/usr/local $BUILD_DEPS /deps $LIST_DEPS CC=o64-clang CXX=o64-clang++ GOOS=darwin GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} go get -d ./$PACK - CC=o64-clang CXX=o64-clang++ GOOS=darwin GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} go build -ldflags=-s $V $R -o /build/$NAME-darwin-amd64$R ./$PACK + CC=o64-clang CXX=o64-clang++ GOOS=darwin GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} go build $V $R -ldflags="-s ${LDFLAGS}" -o /build/$NAME-darwin-amd64$R ./$PACK fi if ([ $XGOOS == "." ] || [ $XGOOS == "darwin" ]) && ([ $XGOARCH == "." ] || [ $XGOARCH == "386" ]); then echo "Compiling for darwin/386..." CC=o32-clang CXX=o32-clang++ HOST=i386-apple-darwin10 PREFIX=/usr/local $BUILD_DEPS /deps $LIST_DEPS CC=o32-clang CXX=o32-clang++ GOOS=darwin GOARCH=386 CGO_ENABLED=${CGO_ENABLED} go get -d ./$PACK - CC=o32-clang CXX=o32-clang++ GOOS=darwin GOARCH=386 CGO_ENABLED=${CGO_ENABLED} go build $V -o /build/$NAME-darwin-386 ./$PACK + CC=o32-clang CXX=o32-clang++ GOOS=darwin GOARCH=386 CGO_ENABLED=${CGO_ENABLED} go build $V -ldflags="${LDFLAGS}" -o /build/$NAME-darwin-386 ./$PACK fi done diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/beats-builder/Dockerfile b/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/beats-builder/Dockerfile index c1c0dfb0..8c0bd2c6 100644 --- a/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/beats-builder/Dockerfile +++ b/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/beats-builder/Dockerfile @@ -1,4 +1,4 @@ -FROM tudorg/xgo-1.9.2 +FROM tudorg/xgo-1.9.4 MAINTAINER Tudor Golubenco diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/build.sh b/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/build.sh index f5ba7887..c4b48e5a 100755 --- a/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/build.sh +++ b/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/build.sh @@ -3,5 +3,5 @@ cp -r ../../../vendor/gopkg.in/yaml.v2 beats-builder/yaml.v2 cp -r ../../../vendor/github.com/tsg/gotpl beats-builder/gotpl docker pull tudorg/xgo-base:v20180222 && \ - docker build --rm=true -t tudorg/xgo-1.9.2 go-1.9.2/ && + docker build --rm=true -t tudorg/xgo-1.9.4 go-1.9.4/ && docker build --rm=true -t tudorg/beats-builder beats-builder diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/go-1.9.2/Dockerfile b/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/go-1.9.4/Dockerfile similarity index 69% rename from vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/go-1.9.2/Dockerfile rename to vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/go-1.9.4/Dockerfile index 28a60254..c3ed621a 100644 --- a/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/go-1.9.2/Dockerfile +++ b/vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/go-1.9.4/Dockerfile @@ -1,4 +1,4 @@ -# Go cross compiler (xgo): Go 1.9.2 layer +# Go cross compiler (xgo): Go 1.9.4 layer # Copyright (c) 2014 Péter Szilágyi. All rights reserved. # # Released under the MIT license. @@ -9,7 +9,7 @@ MAINTAINER Tudor Golubenco # Configure the root Go distribution and bootstrap based on it RUN \ - export ROOT_DIST="https://storage.googleapis.com/golang/go1.9.2.linux-amd64.tar.gz" && \ - export ROOT_DIST_SHA1="94c889e039e3d2e94ed95e8f8cb747c5bc1c2b58" && \ + export ROOT_DIST="https://storage.googleapis.com/golang/go1.9.4.linux-amd64.tar.gz" && \ + export ROOT_DIST_SHA1="ed1bd37c356338a5a04923c183931a96687f202e" && \ \ $BOOTSTRAP_PURE diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/binary/build.sh b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/binary/build.sh index f968cbaf..6603b810 100755 --- a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/binary/build.sh +++ b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/binary/build.sh @@ -12,7 +12,7 @@ cat ${BUILD_DIR}/package.yml ${ARCHDIR}/archs/$ARCH.yml > ${BUILD_DIR}/settings- gotpl ${BASEDIR}/run.sh.j2 < ${BUILD_DIR}/settings-$runid.yml > ${BUILD_DIR}/run-$runid.sh chmod +x ${BUILD_DIR}/run-$runid.sh -docker run --rm -v ${BUILD_DIR}:/build \ +docker run --rm -v ${BUILD_DIR}:/build -v ${UPLOAD_DIR}:/upload \ -e BUILDID=$BUILDID -e SNAPSHOT=$SNAPSHOT -e RUNID=$runid \ tudorg/fpm /build/run-$runid.sh diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/binary/run.sh.j2 b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/binary/run.sh.j2 index cec09ba8..f057dcbf 100644 --- a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/binary/run.sh.j2 +++ b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/binary/run.sh.j2 @@ -10,18 +10,20 @@ if [ "$SNAPSHOT" = "yes" ]; then VERSION="${VERSION}-SNAPSHOT" fi +BEATS_YML_NAME="{{.beat_name}}-linux-{{.arch}}" +[ -f "${BEATS_YML_NAME}.yml" ] || BEATS_YML_NAME="{{.beat_name}}-linux" + mkdir /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}} cp -a homedir/. /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}/ cp {{.beat_name}}-linux-{{.arch}} /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}/{{.beat_name}} -cp {{.beat_name}}-linux.yml /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}/{{.beat_name}}.yml -cp {{.beat_name}}-linux.reference.yml /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}/{{.beat_name}}.reference.yml +cp ${BEATS_YML_NAME}.yml /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}/{{.beat_name}}.yml +cp ${BEATS_YML_NAME}.reference.yml /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}/{{.beat_name}}.reference.yml cp fields.yml /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}/ cp -a modules.d-linux/ /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}/modules.d || true -mkdir -p upload -tar czvf upload/{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}.tar.gz /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}} -echo "Created upload/{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}.tar.gz" +tar czvf /upload/{{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-linux-{{.bin_arch}}.tar.gz /{{.beat_name}}-${VERSION}-linux-{{.bin_arch}} +echo "Created /upload/{{.beat_name}}{{.beat_pkg_suffix}}--${VERSION}-linux-{{.bin_arch}}.tar.gz" -cd upload -sha512sum {{.beat_name}}-${VERSION}-linux-{{.bin_arch}}.tar.gz > {{.beat_name}}-${VERSION}-linux-{{.bin_arch}}.tar.gz.sha512 -echo "Created upload/{{.beat_name}}-${VERSION}-linux-{{.bin_arch}}.tar.gz.sha512" +cd /upload +sha512sum {{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-linux-{{.bin_arch}}.tar.gz > {{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-linux-{{.bin_arch}}.tar.gz.sha512 +echo "Created /upload/{{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-linux-{{.bin_arch}}.tar.gz.sha512" diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/centos/build.sh b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/centos/build.sh index 57d02fb5..6fcd4fc9 100755 --- a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/centos/build.sh +++ b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/centos/build.sh @@ -16,7 +16,7 @@ gotpl ${BASEDIR}/systemd.j2 < ${BUILD_DIR}/settings-$runid.yml > ${BUILD_DIR}/$r gotpl ${BASEDIR}/beatname.sh.j2 < ${BUILD_DIR}/settings-$runid.yml > ${BUILD_DIR}/beatname-$runid.sh chmod +x ${BUILD_DIR}/beatname-$runid.sh -docker run --rm -v ${BUILD_DIR}:/build \ +docker run --rm -v ${BUILD_DIR}:/build -v ${UPLOAD_DIR}:/upload \ -e BUILDID=$BUILDID -e SNAPSHOT=$SNAPSHOT -e RUNID=$runid \ tudorg/fpm /build/run-$runid.sh diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/centos/run.sh.j2 b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/centos/run.sh.j2 index baac361f..0ea1bba8 100644 --- a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/centos/run.sh.j2 +++ b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/centos/run.sh.j2 @@ -17,13 +17,16 @@ if [ "$SNAPSHOT" = "yes" ]; then VERSION="${VERSION}-SNAPSHOT" fi +BEATS_YML_NAME="{{.beat_name}}-linux-{{.arch}}" +[ -f "${BEATS_YML_NAME}.yml" ] || BEATS_YML_NAME="{{.beat_name}}-linux" + # fpm replaces - with _ in the version RPM_VERSION=`echo ${VERSION} | sed 's/-/_/g'` # create rpm FPM_ARGS=( --force -s dir -t rpm - -n {{.beat_pkg_name}} -v ${RPM_VERSION} + -n {{.beat_pkg_name}}{{.beat_pkg_suffix}} -v ${RPM_VERSION} --architecture {{.rpm_arch}} --vendor "{{.beat_vendor}}" --license "{{.beat_license}}" @@ -35,8 +38,8 @@ FPM_ARGS=( homedir/=/usr/share/{{.beat_name}} beatname-${RUNID}.sh=/usr/bin/{{.beat_name}} {{.beat_name}}-linux-{{.arch}}=/usr/share/{{.beat_name}}/bin/{{.beat_name}} - {{.beat_name}}-linux.yml=/etc/{{.beat_name}}/{{.beat_name}}.yml - {{.beat_name}}-linux.reference.yml=/etc/{{.beat_name}}/{{.beat_name}}.reference.yml + ${BEATS_YML_NAME}.yml=/etc/{{.beat_name}}/{{.beat_name}}.yml + ${BEATS_YML_NAME}.reference.yml=/etc/{{.beat_name}}/{{.beat_name}}.reference.yml fields.yml=/etc/{{.beat_name}}/fields.yml ${RUNID}.service=/lib/systemd/system/{{.beat_pkg_name}}.service god-linux-{{.arch}}=/usr/share/{{.beat_name}}/bin/{{.beat_name}}-god @@ -49,11 +52,10 @@ fi fpm "${FPM_ARGS[@]}" # rename so that the filename respects semver rules -mkdir -p upload -mv {{.beat_pkg_name}}-${RPM_VERSION}-1.{{.rpm_arch}}.rpm upload/{{.beat_name}}-${VERSION}-{{.rpm_arch}}.rpm -echo "Created upload/{{.beat_name}}-${VERSION}-{{.rpm_arch}}.rpm" +mv {{.beat_pkg_name}}{{.beat_pkg_suffix}}-${RPM_VERSION}-1.{{.rpm_arch}}.rpm /upload/{{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-{{.rpm_arch}}.rpm +echo "Created /upload/{{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-{{.rpm_arch}}.rpm" # create sha512 file -cd upload -sha512sum {{.beat_name}}-${VERSION}-{{.rpm_arch}}.rpm > {{.beat_name}}-${VERSION}-{{.rpm_arch}}.rpm.sha512 -echo "Created upload/{{.beat_name}}-${VERSION}-{{.rpm_arch}}.rpm.sha512" +cd /upload +sha512sum {{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-{{.rpm_arch}}.rpm > {{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-{{.rpm_arch}}.rpm.sha512 +echo "Created /upload/{{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-{{.rpm_arch}}.rpm.sha512" diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/darwin/build.sh b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/darwin/build.sh index 78d23025..d0340f26 100755 --- a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/darwin/build.sh +++ b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/darwin/build.sh @@ -12,7 +12,7 @@ cat ${BUILD_DIR}/package.yml ${ARCHDIR}/archs/$ARCH.yml > ${BUILD_DIR}/settings- gotpl ${BASEDIR}/run.sh.j2 < ${BUILD_DIR}/settings-$runid.yml > ${BUILD_DIR}/run-$runid.sh chmod +x ${BUILD_DIR}/run-$runid.sh -docker run --rm -v ${BUILD_DIR}:/build \ +docker run --rm -v ${BUILD_DIR}:/build -v ${UPLOAD_DIR}:/upload \ -e BUILDID=$BUILDID -e SNAPSHOT=$SNAPSHOT -e RUNID=$runid \ tudorg/fpm /build/run-$runid.sh diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/darwin/run.sh.j2 b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/darwin/run.sh.j2 index 0238950c..42a48220 100644 --- a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/darwin/run.sh.j2 +++ b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/darwin/run.sh.j2 @@ -18,10 +18,9 @@ cp {{.beat_name}}-darwin.reference.yml /{{.beat_name}}-${VERSION}-darwin-x86_64/ cp fields.yml /{{.beat_name}}-${VERSION}-darwin-x86_64/ cp -a modules.d-darwin/ /{{.beat_name}}-${VERSION}-darwin-x86_64/modules.d || true -mkdir -p upload -tar czvf upload/{{.beat_name}}-${VERSION}-darwin-x86_64.tar.gz /{{.beat_name}}-${VERSION}-darwin-x86_64 -echo "Created upload/{{.beat_name}}-${VERSION}-darwin-x86_64.tar.gz" +tar czvf /upload/{{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-darwin-x86_64.tar.gz /{{.beat_name}}-${VERSION}-darwin-x86_64 +echo "Created /upload/{{.beat_name}}-${VERSION}-darwin-x86_64.tar.gz" -cd upload -sha512sum {{.beat_name}}-${VERSION}-darwin-x86_64.tar.gz > {{.beat_name}}-${VERSION}-darwin-x86_64.tar.gz.sha512 -echo "Created upload/{{.beat_name}}-${VERSION}-darwin-x86_64.tar.gz.sha512" +cd /upload +sha512sum {{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-darwin-x86_64.tar.gz > {{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-darwin-x86_64.tar.gz.sha512 +echo "Created /upload/{{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-darwin-x86_64.tar.gz.sha512" diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/dashboards/build.sh b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/dashboards/build.sh index 8051fbb2..8d427f1c 100755 --- a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/dashboards/build.sh +++ b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/dashboards/build.sh @@ -11,7 +11,7 @@ cat ${ARCHDIR}/version.yml > ${BUILD_DIR}/settings-$runid.yml gotpl ${BASEDIR}/run.sh.j2 < ${BUILD_DIR}/settings-$runid.yml > ${BUILD_DIR}/run-$runid.sh chmod +x ${BUILD_DIR}/run-$runid.sh -docker run --rm -v ${BUILD_DIR}:/build \ +docker run --rm -v ${BUILD_DIR}:/build -v ${UPLOAD_DIR}:/upload \ -e BUILDID=$BUILDID -e SNAPSHOT=$SNAPSHOT -e RUNID=$runid -e BEAT_NAME=$BEAT_NAME \ tudorg/fpm /build/run-$runid.sh diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/dashboards/run.sh.j2 b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/dashboards/run.sh.j2 index 494a9b20..96d272aa 100644 --- a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/dashboards/run.sh.j2 +++ b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/dashboards/run.sh.j2 @@ -14,10 +14,9 @@ mkdir /${BEAT_NAME:-beats}-dashboards-${VERSION} cp -a dashboards/. /${BEAT_NAME:-beats}-dashboards-${VERSION}/ echo "$BUILDID" > /${BEAT_NAME:-beats}-dashboards-${VERSION}/.build_hash.txt -mkdir -p upload -zip -r upload/${BEAT_NAME:-beats}-dashboards-${VERSION}.zip /${BEAT_NAME:-beats}-dashboards-${VERSION} -echo "Created upload/${BEAT_NAME:-beats}-dashboards-${VERSION}.zip" +zip -r /upload/${BEAT_NAME:-beats}-dashboards-${VERSION}.zip /${BEAT_NAME:-beats}-dashboards-${VERSION} +echo "Created /upload/${BEAT_NAME:-beats}-dashboards-${VERSION}.zip" -cd upload +cd /upload sha512sum ${BEAT_NAME:-beats}-dashboards-${VERSION}.zip > ${BEAT_NAME:-beats}-dashboards-${VERSION}.zip.sha512 -echo "Created upload/${BEAT_NAME:-beats}-dashboards-${VERSION}.zip.sha512" +echo "Created /upload/${BEAT_NAME:-beats}-dashboards-${VERSION}.zip.sha512" diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/debian/build.sh b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/debian/build.sh index 0093a15a..be79fac7 100755 --- a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/debian/build.sh +++ b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/debian/build.sh @@ -16,7 +16,7 @@ gotpl ${BASEDIR}/systemd.j2 < ${BUILD_DIR}/settings-$runid.yml > ${BUILD_DIR}/$r gotpl ${BASEDIR}/beatname.sh.j2 < ${BUILD_DIR}/settings-$runid.yml > ${BUILD_DIR}/beatname-$runid.sh chmod +x ${BUILD_DIR}/beatname-$runid.sh -docker run --rm -v ${BUILD_DIR}:/build \ +docker run --rm -v ${BUILD_DIR}:/build -v ${UPLOAD_DIR}:/upload \ -e BUILDID=$BUILDID -e SNAPSHOT=$SNAPSHOT -e RUNID=$runid \ tudorg/fpm /build/run-$runid.sh diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/debian/run.sh.j2 b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/debian/run.sh.j2 index 6dcc1c0d..a88afd1e 100644 --- a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/debian/run.sh.j2 +++ b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/debian/run.sh.j2 @@ -17,12 +17,15 @@ if [ "$SNAPSHOT" = "yes" ]; then VERSION="${VERSION}-SNAPSHOT" fi +BEATS_YML_NAME="{{.beat_name}}-linux-{{.arch}}" +[ -f "${BEATS_YML_NAME}.yml" ] || BEATS_YML_NAME="{{.beat_name}}-linux" + # create deb FPM_ARGS=( --force -s dir -t deb - -n {{.beat_pkg_name}} -v ${VERSION} + -n {{.beat_pkg_name}}{{.beat_pkg_suffix}} -v ${VERSION} --vendor "{{.beat_vendor}}" - --license "{{.beat_license}}" + --license $(echo {{.beat_license}} | tr " " "-") --architecture {{.deb_arch}} --description "{{.beat_description}}" --url {{.beat_url}} @@ -32,8 +35,8 @@ FPM_ARGS=( homedir/=/usr/share/{{.beat_name}} beatname-${RUNID}.sh=/usr/bin/{{.beat_name}} {{.beat_name}}-linux-{{.arch}}=/usr/share/{{.beat_name}}/bin/{{.beat_name}} - {{.beat_name}}-linux.yml=/etc/{{.beat_name}}/{{.beat_name}}.yml - {{.beat_name}}-linux.reference.yml=/etc/{{.beat_name}}/{{.beat_name}}.reference.yml + ${BEATS_YML_NAME}.yml=/etc/{{.beat_name}}/{{.beat_name}}.yml + ${BEATS_YML_NAME}.reference.yml=/etc/{{.beat_name}}/{{.beat_name}}.reference.yml fields.yml=/etc/{{.beat_name}}/fields.yml ${RUNID}.service=/lib/systemd/system/{{.beat_pkg_name}}.service god-linux-{{.arch}}=/usr/share/{{.beat_name}}/bin/{{.beat_name}}-god @@ -46,11 +49,10 @@ fi fpm "${FPM_ARGS[@]}" # move and rename to use the elastic conventions -mkdir -p upload -mv {{.beat_pkg_name}}_${VERSION}_{{.deb_arch}}.deb upload/{{.beat_name}}-${VERSION}-{{.deb_arch}}.deb -echo "Created upload/{{.beat_name}}-${VERSION}-{{.deb_arch}}.deb" +mv {{.beat_pkg_name}}{{.beat_pkg_suffix}}_${VERSION}_{{.deb_arch}}.deb /upload/{{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-{{.deb_arch}}.deb +echo "Created /upload/{{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-{{.deb_arch}}.deb" # create sha512 file -cd upload -sha512sum {{.beat_name}}-${VERSION}-{{.deb_arch}}.deb > {{.beat_name}}-${VERSION}-{{.deb_arch}}.deb.sha512 -echo "Created upload/{{.beat_name}}-${VERSION}-{{.deb_arch}}.deb.sha512" +cd /upload +sha512sum {{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-{{.deb_arch}}.deb > {{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-{{.deb_arch}}.deb.sha512 +echo "Created /upload/{{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-{{.deb_arch}}.deb.sha512" diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/windows/build.sh b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/windows/build.sh index c52a9e23..8b5e169e 100755 --- a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/windows/build.sh +++ b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/windows/build.sh @@ -14,7 +14,7 @@ gotpl ${BASEDIR}/install-service.ps1.j2 < ${BUILD_DIR}/settings-$runid.yml > ${B gotpl ${BASEDIR}/uninstall-service.ps1.j2 < ${BUILD_DIR}/settings-$runid.yml > ${BUILD_DIR}/uninstall-service-$BEAT.ps1 chmod +x ${BUILD_DIR}/run-$runid.sh -docker run --rm -v ${BUILD_DIR}:/build \ +docker run --rm -v ${BUILD_DIR}:/build -v ${UPLOAD_DIR}:/upload \ -e BUILDID=$BUILDID -e SNAPSHOT=$SNAPSHOT -e RUNID=$runid \ tudorg/fpm /build/run-$runid.sh diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/windows/install-service.ps1.j2 b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/windows/install-service.ps1.j2 index 556d7a4e..e95e4311 100644 --- a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/windows/install-service.ps1.j2 +++ b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/windows/install-service.ps1.j2 @@ -11,4 +11,4 @@ $workdir = Split-Path $MyInvocation.MyCommand.Path # create new service New-Service -name {{.beat_name}} ` -displayName {{.beat_name}} ` - -binaryPathName "`"$workdir\\{{.beat_name}}.exe`" -c `"$workdir\\{{.beat_name}}.yml`" -path.home `"$workdir`" -path.data `"C:\\ProgramData\\{{.beat_name}}`" -path.logs `"C:\\ProgramData\\{{.beat_name}}\logs`"" + -binaryPathName "`"$workdir\{{.beat_name}}.exe`" -c `"$workdir\{{.beat_name}}.yml`" -path.home `"$workdir`" -path.data `"C:\ProgramData\{{.beat_name}}`" -path.logs `"C:\ProgramData\{{.beat_name}}\logs`"" diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/windows/run.sh.j2 b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/windows/run.sh.j2 index b7c2258e..d57fb15e 100644 --- a/vendor/github.com/elastic/beats/dev-tools/packer/platforms/windows/run.sh.j2 +++ b/vendor/github.com/elastic/beats/dev-tools/packer/platforms/windows/run.sh.j2 @@ -21,10 +21,9 @@ cp -a modules.d-win/ /{{.beat_name}}-${VERSION}-windows-{{.win_arch}}/modules.d cp install-service-{{.beat_name}}.ps1 /{{.beat_name}}-${VERSION}-windows-{{.win_arch}}/ cp uninstall-service-{{.beat_name}}.ps1 /{{.beat_name}}-${VERSION}-windows-{{.win_arch}}/ -mkdir -p upload -zip -r upload/{{.beat_name}}-${VERSION}-windows-{{.win_arch}}.zip /{{.beat_name}}-${VERSION}-windows-{{.win_arch}} -echo "Created upload/{{.beat_name}}-${VERSION}-windows-{{.win_arch}}.zip" +zip -r /upload/{{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-windows-{{.win_arch}}.zip /{{.beat_name}}-${VERSION}-windows-{{.win_arch}} +echo "Created /upload/{{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-windows-{{.win_arch}}.zip" -cd upload -sha512sum {{.beat_name}}-${VERSION}-windows-{{.win_arch}}.zip > {{.beat_name}}-${VERSION}-windows-{{.win_arch}}.zip.sha512 -echo "Created upload/{{.beat_name}}-${VERSION}-windows-{{.win_arch}}.zip.sha512" +cd /upload +sha512sum {{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-windows-{{.win_arch}}.zip > {{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-windows-{{.win_arch}}.zip.sha512 +echo "Created /upload/{{.beat_name}}{{.beat_pkg_suffix}}-${VERSION}-windows-{{.win_arch}}.zip.sha512" diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/version.yml b/vendor/github.com/elastic/beats/dev-tools/packer/version.yml index 7aaf182b..11ec50de 100644 --- a/vendor/github.com/elastic/beats/dev-tools/packer/version.yml +++ b/vendor/github.com/elastic/beats/dev-tools/packer/version.yml @@ -1 +1 @@ -version: "6.2.4" +version: "6.3.3" diff --git a/vendor/github.com/elastic/beats/dev-tools/packer/xgo-scripts/before_build.sh b/vendor/github.com/elastic/beats/dev-tools/packer/xgo-scripts/before_build.sh index e54bead9..6d746678 100755 --- a/vendor/github.com/elastic/beats/dev-tools/packer/xgo-scripts/before_build.sh +++ b/vendor/github.com/elastic/beats/dev-tools/packer/xgo-scripts/before_build.sh @@ -18,6 +18,7 @@ cp fields.yml $PREFIX/fields.yml # linux cp $BEAT_NAME.yml $PREFIX/$BEAT_NAME-linux.yml chmod 0600 $PREFIX/$BEAT_NAME-linux.yml +chmod 0600 $PREFIX/$BEAT_NAME-linux-386.yml || true cp $BEAT_NAME.reference.yml $PREFIX/$BEAT_NAME-linux.reference.yml rm -rf $PREFIX/modules.d-linux cp -r modules.d/ $PREFIX/modules.d-linux || true @@ -44,7 +45,7 @@ PREFIX=$PREFIX make before-build # Add data to the home directory mkdir -p $PREFIX/homedir -make install-home HOME_PREFIX=$PREFIX/homedir +make install-home HOME_PREFIX=$PREFIX/homedir LICENSE_FILE=${LICENSE_FILE} if [ -n "BUILDID" ]; then echo "$BUILDID" > $PREFIX/homedir/.build_hash.txt diff --git a/vendor/github.com/elastic/beats/dev-tools/promote_docs b/vendor/github.com/elastic/beats/dev-tools/promote_docs new file mode 100755 index 00000000..ab3dbcbe --- /dev/null +++ b/vendor/github.com/elastic/beats/dev-tools/promote_docs @@ -0,0 +1,35 @@ +#!/usr/bin/env python +import argparse +from subprocess import check_call + +def main(): + parser = argparse.ArgumentParser( + description="Used to promote doc version and branch. Doesn't commit changes.") + parser.add_argument("version", + help="The new docs version") + parser.add_argument("branch", + help="The new docs branch") + args = parser.parse_args() + version = args.version + branch = args.branch + + # make sure we have no dirty files in this branch (might throw off `make update`) + check_call("git clean -dfx", shell=True) + + # edit the file + with open("libbeat/docs/version.asciidoc", "r") as f: + lines = f.readlines() + for i, line in enumerate(lines): + if line.startswith(":stack-version:"): + lines[i] = ":stack-version: {}\n".format(version) + if line.startswith(":branch:"): + lines[i] = ":branch: {}\n".format(branch) + if line.startswith(":doc-branch:"): + lines[i] = ":doc-branch: {}\n".format(branch) + with open("libbeat/docs/version.asciidoc", "w") as f: + f.writelines(lines) + + check_call("make update", shell=True) + +if __name__ == "__main__": + main() diff --git a/vendor/github.com/elastic/beats/dev-tools/set_version b/vendor/github.com/elastic/beats/dev-tools/set_version index 44888caa..2607e397 100755 --- a/vendor/github.com/elastic/beats/dev-tools/set_version +++ b/vendor/github.com/elastic/beats/dev-tools/set_version @@ -13,7 +13,8 @@ goversion_template = '''package main const appVersion = "{version}" ''' -goversion_template_libbeat = '''package version +goversion_template_libbeat = '''// Code generated by dev-tools/set_version +package version const defaultBeatVersion = "{version}" ''' diff --git a/vendor/github.com/elastic/beats/docs/devguide/contributing.asciidoc b/vendor/github.com/elastic/beats/docs/devguide/contributing.asciidoc index e6d8b354..eab4ff52 100644 --- a/vendor/github.com/elastic/beats/docs/devguide/contributing.asciidoc +++ b/vendor/github.com/elastic/beats/docs/devguide/contributing.asciidoc @@ -16,14 +16,18 @@ The process for contributing to any of the Elastic repositories is similar. [[contribution-steps]] === Contribution Steps -. Please make sure you have signed our https://www.elastic.co/contributor-agreement/[Contributor License Agreement]. We are not asking you to assign -copyright to us, but to give us the right to distribute your code without -restriction. We ask this of all contributors in order to assure our users of the -origin and continuing existence of the code. You only need to sign the CLA once. - -. Send a pull request! Push your changes to your fork of the repository and https://help.github.com/articles/using-pull-requests[submit a pull request]. In +. Please make sure you have signed our +https://www.elastic.co/contributor-agreement/[Contributor License Agreement]. We +are not asking you to assign copyright to us, but to give us the right to +distribute your code without restriction. We ask this of all contributors in +order to assure our users of the origin and continuing existence of the code. +You only need to sign the CLA once. + +. Send a pull request! Push your changes to your fork of the repository and +https://help.github.com/articles/using-pull-requests[submit a pull request]. In the pull request, describe what your changes do and mention any bugs/issues -related to the pull request. Please also add a changelog entry to https://github.com/elastic/beats/blob/master/CHANGELOG.asciidoc[CHANGELOG.asciidoc]. +related to the pull request. Please also add a changelog entry to +https://github.com/elastic/beats/blob/master/CHANGELOG.asciidoc[CHANGELOG.asciidoc]. [float] [[adding-new-beat]] @@ -92,8 +96,9 @@ This command has the following dependencies: * Python >= {python} * https://virtualenv.pypa.io/en/latest/[virtualenv] for Python -Virtualenv can be installed with the command `easy_install virtualenv` or `pip install virtualenv`. -More details can be found https://virtualenv.pypa.io/en/latest/installation.html[here]. +Virtualenv can be installed with the command `easy_install virtualenv` or `pip +install virtualenv`. More details can be found +https://virtualenv.pypa.io/en/latest/installation.html[here]. [float] [[running-testsuite]] @@ -136,3 +141,10 @@ the govendor documentation on how to add or update vendored dependencies. In most cases `govendor fetch your/dependency@version +out` will get the job done. +[float] +[[changelog]] +=== Changelog + +To keep up to date with changes to the official Beats for community developers, +follow the developer changelog +https://github.com/elastic/beats/blob/master/CHANGELOG-developer.md[here]. diff --git a/vendor/github.com/elastic/beats/docs/devguide/index.asciidoc b/vendor/github.com/elastic/beats/docs/devguide/index.asciidoc index 24991084..ad6934c7 100644 --- a/vendor/github.com/elastic/beats/docs/devguide/index.asciidoc +++ b/vendor/github.com/elastic/beats/docs/devguide/index.asciidoc @@ -3,9 +3,12 @@ include::../../libbeat/docs/version.asciidoc[] +:dev-guide: true :beatname_lc: beatname :beatname_uc: a Beat +include::{asciidoc-dir}/../../shared/attributes.asciidoc[] + include::../../libbeat/docs/shared-beats-attributes.asciidoc[] include::./contributing.asciidoc[] diff --git a/vendor/github.com/elastic/beats/docs/devguide/modules-dev-guide.asciidoc b/vendor/github.com/elastic/beats/docs/devguide/modules-dev-guide.asciidoc index b7951f7b..902fa340 100644 --- a/vendor/github.com/elastic/beats/docs/devguide/modules-dev-guide.asciidoc +++ b/vendor/github.com/elastic/beats/docs/devguide/modules-dev-guide.asciidoc @@ -18,25 +18,97 @@ example, the Nginx module has `access` and `error` filesets. You can contribute a new module (with at least one fileset), or a new fileset for an existing module. +NOTE: In this guide we use `{module}` and `{fileset}` as placeholders for the +module and fileset names. You need to replace these with the actual names you +entered when your created the module and fileset. Only use characters `[a-z]` and, if required, underscores (`_`). No other characters are allowed. + [float] -=== Creating a new fileset +=== Creating a new module -Regardless of whether you are creating a fileset in a new or existing module, -the procedure is similar. Run the following command in the `filebeat` folder: +Run the following command in the `filebeat` folder: [source,bash] ---- -make create-fileset MODULE={module} FILESET={fileset} +make create-module MODULE={module} ---- -Only use characters `[a-z]` and, if required, underscores (`_`). -No other characters are allowed. -For the module name, you can us either a new module name or an existing module -name. If the module doesn't exist, it will be created. +After running the `make create-module` command, you'll find the module, +along with its generated files, under `module/{module}`. This +directory contains the following files: + +[source,bash] +---- +module/{module} +├── module.yml +└── _meta +    └── docs.asciidoc +    └── fields.yml +    └── kibana +---- -NOTE: In this guide we use `{fileset}` and `{module}` as placeholders for the -fileset and module names. You need to replace these with the actual names you -entered when your created the module and fileset. +Let's look at these files one by one. + +[float] +==== module.yml + +This file contains list of all the dashboards available for the module and used by `export_dashboards.go` script for exporting dashboards. +Each dashboard is defined by an id and the name of json file where the dashboard is saved locally. +At generation new fileset this file will be automatically updated with "default" dashboard settings for new fileset. +Please ensure that this settings are correct. + +[float] +==== _meta/docs.asciidoc + +This file contains module-specific documentation. You should include information +about which versions of the service were tested and the variables that are +defined in each fileset. + +[float] +==== _meta/fields.yml + +The module level `fields.yml` contains descriptions for the module-level fields. +Please review and update the title and the descriptions in this file. The title +is used as a title in the docs, so it's best to capitalize it. + +[float] +==== _meta/kibana + +This folder contains the sample Kibana dashboards for this module. To create +them, you can build them visually in Kibana and then run the following command: + +[source,shell] +---- +$ cd dev-tools/cmd/dashboards +$ make # if export_dashboard is not built +$ ./export_dashboards -dashboard '{dashboard-id}' -output '../../../filebeat/module/{module}/_meta/kibana/default/dashboard' +---- + +New Filebeat modules might not be compatible with Kibana 5.x. To export dashboards that are compatible with 5.x, run the following command inside the developer virtualenv: + +[source,shell] +---- +$ cd filebeat +$ make python-env +$ cd module/{module}/ +$ python ../../../dev-tools/export_5x_dashboards.py --regex {module} --dir _meta/kibana/5.x +---- + +Where the `--regex` parameter should match the dashboard you want to export. + +Please note that dashboards exported from Kibana 5.x are not compatible with Kibana 6.x. + +You can find more details about the process of creating and exporting the Kibana +dashboards by reading {beatsdevguide}/new-dashboards.html[this guide]. + +[float] +=== Creating a new fileset + +Run the following command in the `filebeat` folder: + +[source,bash] +---- +make create-fileset MODULE={module} FILESET={fileset} +---- After running the `make create-fileset` command, you'll find the fileset, along with its generated files, under `module/{module}/{fileset}`. This @@ -104,8 +176,7 @@ There's quite a lot going on in this file, so let's break it down: element: `"/example/test.log*"`. * Note that variable values don't have to be strings. They can be also numbers, objects, or as shown in this example, arrays. -* We will use the `paths` variable to set the prospector - {filebeat}/configuration-filebeat-options.html#prospector-paths[paths] +* We will use the `paths` variable to set the input `paths` setting, so "glob" values can be used here. * Besides the `default` value, the file defines values for particular operating systems: a default for darwin/OS X/macOS systems and a default for @@ -114,13 +185,13 @@ There's quite a lot going on in this file, so let's break it down: Filebeat is executed on the respective OS. Besides the variable definition, the `manifest.yml` file also contains -references to the ingest pipeline and prospector configuration to use (see next +references to the ingest pipeline and input configuration to use (see next sections): [source,yaml] ---- ingest_pipeline: ingest/pipeline.json -prospector: config/testfileset.yml +input: config/testfileset.yml ---- These should point to the respective files from the fileset. @@ -142,8 +213,8 @@ overridden at runtime.) [float] ==== config/*.yml -The `config/` folder contains template files that generate Filebeat prospector -configurations. The Filebeat prospectors are primarily responsible for tailing +The `config/` folder contains template files that generate Filebeat input +configurations. The Filebeat inputs are primarily responsible for tailing files, filtering, and multi-line stitching, so that's what you configure in the template files. @@ -161,12 +232,12 @@ exclude_files: [".gz$"] You'll find this example in the template file that gets generated automatically when you run `make create-fileset`. In this example, the `paths` variable is -used to construct the `paths` list for the {filebeat}/configuration-filebeat-options.html#prospector-paths[paths] option. +used to construct the `paths` list for the input `paths` option. Any template files that you add to the `config/` folder need to generate a valid -Filebeat prospector configuration in YAML format. The options accepted by the -prospector configuration are documented in the -{filebeat}/configuration-filebeat-options.html[Filebeat Prospectors] section of +Filebeat input configuration in YAML format. The options accepted by the +input configuration are documented in the +{filebeat}/configuration-filebeat-options.html[Filebeat Inputs] section of the Filebeat documentation. The template files use the templating language defined by the @@ -250,6 +321,9 @@ While developing the pipeline definition, we recommend making use of the {elasticsearch}/simulate-pipeline-api.html[Simulate Pipeline API] for testing and quick iteration. +By default Filebeat does not update Ingest pipelines if already loaded. If you want to force updating your pipeline +during development, use `--update-pipelines` flag. This uploads pipelines even if they are already available on the node. + [float] ==== _meta/fields.yml @@ -289,53 +363,3 @@ In addition, assuming you have a `test.log` file, you can add a documents as they are found via an Elasticsearch search. In this case, the integration tests will automatically check that the result is the same on each run. - -[float] -=== Module-level files - -Besides the files in the fileset folder, there is also data that needs to be -filled at the module level. - -[float] -==== _meta/docs.asciidoc - -This file contains module-specific documentation. You should include information -about which versions of the service were tested and the variables that are -defined in each fileset. - -[float] -==== _meta/fields.yml - -The module level `fields.yml` contains descriptions for the module-level fields. -Please review and update the title and the descriptions in this file. The title -is used as a title in the docs, so it's best to capitalize it. - -[float] -==== _meta/kibana - -This folder contains the sample Kibana dashboards for this module. To create -them, you can build them visually in Kibana and then run the following command: - -[source,shell] ----- -$ cd dev-tools/cmd/dashboards -$ make # if export_dashboard is not built -$ ./export_dashboards -dashboard '{dashboard-id}' -output '../../../filebeat/module/{module}/_meta/kibana/default/dashboard' ----- - -New Filebeat modules might not be compatible with Kibana 5.x. To export dashboards that are compatible with 5.x, run the following command inside the developer virtualenv: - -[source,shell] ----- -$ cd filebeat -$ make python-env -$ cd module/{module}/ -$ python ../../../dev-tools/export_5x_dashboards.py --regex {module} --dir _meta/kibana/5.x ----- - -Where the `--regex` parameter should match the dashboard you want to export. - -Please note that dashboards exported from Kibana 5.x are not compatible with Kibana 6.x. - -You can find more details about the process of creating and exporting the Kibana -dashboards by reading {beatsdevguide}/new-dashboards.html[this guide]. diff --git a/vendor/github.com/elastic/beats/docs/devguide/newbeat.asciidoc b/vendor/github.com/elastic/beats/docs/devguide/newbeat.asciidoc index 8722713f..01c926ab 100644 --- a/vendor/github.com/elastic/beats/docs/devguide/newbeat.asciidoc +++ b/vendor/github.com/elastic/beats/docs/devguide/newbeat.asciidoc @@ -41,17 +41,16 @@ For general information about contributing to Beats, see <>. After you have https://golang.org/doc/install[installed Go] and set up the https://golang.org/doc/code.html#GOPATH[GOPATH] environment variable to point to -your preferred workspace location, a simple way of getting the source code for -Beats, including libbeat and the Beat generator, is to do: +your preferred workspace location, clone the Beats repository in the correct location +under `GOPATH`: [source,shell] ---------------------------------------------------------------------- -go get github.com/elastic/beats +mkdir -p ${GOPATH}/src/github.com/elastic +git clone https://github.com/elastic/beats ${GOPATH}/src/github.com/elastic/beats ---------------------------------------------------------------------- -When you run the command, all source files are downloaded to the -`$GOPATH/src/github.com/elastic/beats` path. You can ignore the "no buildable Go source files" message because -you will build the source later. By default `go get` fetches the master branch. To build your beat +To build your beat on a specific version of libbeat, check out the specific branch ({doc-branch} in the example below): ["source","sh",subs="attributes"] @@ -474,9 +473,9 @@ package main import ( "os" - "github.com/spf13/cobra" "github.com/elastic/beats/libbeat/beat" + "github.com/elastic/beats/libbeat/cmd" "github.com/kimjmin/countbeat/beater" ) diff --git a/vendor/github.com/elastic/beats/docs/devguide/newdashboards.asciidoc b/vendor/github.com/elastic/beats/docs/devguide/newdashboards.asciidoc index 8757b0dc..fb431cd5 100644 --- a/vendor/github.com/elastic/beats/docs/devguide/newdashboards.asciidoc +++ b/vendor/github.com/elastic/beats/docs/devguide/newdashboards.asciidoc @@ -85,7 +85,7 @@ import only the dashboards, use the `--dashboards` flag: Starting with Beats 6.0.0, the dashboards are no longer loaded directly into Elasticsearch. Instead, they are imported directly into Kibana. Thus, if your Kibana instance is not listening on localhost, or you enabled -X-Pack for Kibana, you need to either configure the Kibana endpoint in +{xpack} for Kibana, you need to either configure the Kibana endpoint in the config for the Beat, or pass the Kibana host and credentials as arguments to the `setup` command. For example: diff --git a/vendor/github.com/elastic/beats/filebeat/Dockerfile b/vendor/github.com/elastic/beats/filebeat/Dockerfile index b5557a4f..fcd088da 100644 --- a/vendor/github.com/elastic/beats/filebeat/Dockerfile +++ b/vendor/github.com/elastic/beats/filebeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.9.2 +FROM golang:1.9.4 MAINTAINER Nicolas Ruflin RUN set -x && \ diff --git a/vendor/github.com/elastic/beats/filebeat/Makefile b/vendor/github.com/elastic/beats/filebeat/Makefile index 37594d83..af3acdd3 100644 --- a/vendor/github.com/elastic/beats/filebeat/Makefile +++ b/vendor/github.com/elastic/beats/filebeat/Makefile @@ -21,7 +21,7 @@ kibana: # Collects all module and dataset fields .PHONY: fields -fields: +fields: python-env @mkdir -p _meta/ @cp ${ES_BEATS}/filebeat/_meta/fields.common.yml _meta/fields.generated.yml @${PYTHON_ENV}/bin/python ${ES_BEATS}/metricbeat/scripts/fields_collector.py >> _meta/fields.generated.yml @@ -62,6 +62,11 @@ imports: python-env .PHONY: collect collect: fields kibana modules configs collect-docs imports +# Creates a new module. Requires the params MODULE +.PHONY: create-module +create-module: + @go run ${ES_BEATS}/filebeat/scripts/generator/module/main.go --path=$(PWD) --beats_path=$(BEAT_GOPATH)/src/$(BEAT_PATH) --module=$(MODULE) + # Creates a new fileset. Requires the params MODULE and FILESET .PHONY: create-fileset create-fileset: diff --git a/vendor/github.com/elastic/beats/filebeat/_meta/common.p2.yml b/vendor/github.com/elastic/beats/filebeat/_meta/common.p2.yml index e1d40443..c6924437 100644 --- a/vendor/github.com/elastic/beats/filebeat/_meta/common.p2.yml +++ b/vendor/github.com/elastic/beats/filebeat/_meta/common.p2.yml @@ -1,17 +1,17 @@ # For more available modules and options, please see the filebeat.reference.yml sample # configuration file. -#=========================== Filebeat prospectors ============================= +#=========================== Filebeat inputs ============================= -filebeat.prospectors: +filebeat.inputs: -# Each - is a prospector. Most options can be set at the prospector level, so -# you can use different prospectors for various configurations. -# Below are the prospector specific configurations. +# Each - is an input. Most options can be set at the input level, so +# you can use different inputs for various configurations. +# Below are the input specific configurations. - type: log - # Change to true to enable this prospector configuration. + # Change to true to enable this input configuration. enabled: false # Paths that should be crawled and fetched. Glob based paths. diff --git a/vendor/github.com/elastic/beats/filebeat/_meta/common.reference.p2.yml b/vendor/github.com/elastic/beats/filebeat/_meta/common.reference.p2.yml index c6b01a14..1ab1c9c3 100644 --- a/vendor/github.com/elastic/beats/filebeat/_meta/common.reference.p2.yml +++ b/vendor/github.com/elastic/beats/filebeat/_meta/common.reference.p2.yml @@ -1,22 +1,22 @@ -#=========================== Filebeat prospectors ============================= +#=========================== Filebeat inputs ============================= -# List of prospectors to fetch data. -filebeat.prospectors: -# Each - is a prospector. Most options can be set at the prospector level, so -# you can use different prospectors for various configurations. -# Below are the prospector specific configurations. +# List of inputs to fetch data. +filebeat.inputs: +# Each - is an input. Most options can be set at the input level, so +# you can use different inputs for various configurations. +# Below are the input specific configurations. # Type of the files. Based on this the way the file is read is decided. -# The different types cannot be mixed in one prospector +# The different types cannot be mixed in one input # # Possible options are: # * log: Reads every line of the log file (default) # * stdin: Reads the standard in -#------------------------------ Log prospector -------------------------------- +#------------------------------ Log input -------------------------------- - type: log - # Change to true to enable this prospector configuration. + # Change to true to enable this input configuration. enabled: false # Paths that should be crawled and fetched. Glob based paths. @@ -67,7 +67,7 @@ filebeat.prospectors: # Time strings like 2h (2 hours), 5m (5 minutes) can be used. #ignore_older: 0 - # How often the prospector checks for new files in the paths that are specified + # How often the input checks for new files in the paths that are specified # for harvesting. Specify 1s to scan the directory as frequently as possible # without causing Filebeat to scan too frequently. Default: 10s. #scan_frequency: 10s @@ -137,7 +137,7 @@ filebeat.prospectors: # this can mean that the first entries of a new file are skipped. #tail_files: false - # The Ingest Node pipeline ID associated with this prospector. If this is set, it + # The Ingest Node pipeline ID associated with this input. If this is set, it # overwrites the pipeline option from the Elasticsearch output. #pipeline: @@ -203,23 +203,25 @@ filebeat.prospectors: # Note: Potential data loss. Make sure to read and understand the docs for this option. #close_timeout: 0 - # Defines if prospectors is enabled + # Defines if inputs is enabled #enabled: true -#----------------------------- Stdin prospector ------------------------------- +#----------------------------- Stdin input ------------------------------- # Configuration to use stdin input #- type: stdin -#------------------------- Redis slowlog prospector --------------------------- -# Experimental: Config options for the redis slow log prospector +#------------------------- Redis slowlog input --------------------------- +# Experimental: Config options for the redis slow log input #- type: redis - #hosts: ["localhost:6379"] - #username: - #password: #enabled: false + + # List of hosts to pool to retrieve the slow log information. + #hosts: ["localhost:6379"] + + # How often the input checks for redis slow log. #scan_frequency: 10s - # Timeout after which time the prospector should return an error + # Timeout after which time the input should return an error #timeout: 1s # Network type to be used for redis connection. Default: tcp @@ -231,17 +233,64 @@ filebeat.prospectors: # Redis AUTH password. Empty by default. #password: foobared -#------------------------------ Udp prospector -------------------------------- -# Experimental: Config options for the udp prospector +#------------------------------ Udp input -------------------------------- +# Experimental: Config options for the udp input #- type: udp + #enabled: false # Maximum size of the message received over UDP - #max_message_size: 10240 + #max_message_size: 10KiB + +#------------------------------ TCP input -------------------------------- +# Experimental: Config options for the TCP input +#- type: tcp + #enabled: false + + # The host and port to receive the new event + #host: "localhost:9000" + + # Character used to split new message + #line_delimiter: "\n" + + # Maximum size in bytes of the message received over TCP + #max_message_size: 20MiB + + # The number of seconds of inactivity before a remote connection is closed. + #timeout: 300s + +#------------------------------ Syslog input -------------------------------- +# Experimental: Config options for the Syslog input +# Accept RFC3164 formatted syslog event via UDP. +#- type: syslog + #enabled: false + #protocol.udp: + # The host and port to receive the new event + #host: "localhost:9000" + + # Maximum size of the message received over UDP + #max_message_size: 10KiB + +# Accept RFC3164 formatted syslog event via TCP. +#- type: syslog + #enabled: false + + #protocol.tcp: + # The host and port to receive the new event + #host: "localhost:9000" + + # Character used to split new message + #line_delimiter: "\n" + + # Maximum size in bytes of the message received over TCP + #max_message_size: 20MiB + + # The number of seconds of inactivity before a remote connection is closed. + #timeout: 300s #========================== Filebeat autodiscover ============================== # Autodiscover allows you to detect changes in the system and spawn new modules -# or prospectors as they happen. +# or inputs as they happen. #filebeat.autodiscover: # List of enabled autodiscover providers @@ -261,10 +310,15 @@ filebeat.prospectors: # data path. #filebeat.registry_file: ${path.data}/registry -# These config files must have the full filebeat config part inside, but only -# the prospector part is processed. All global options like spool_size are ignored. -# The config_dir MUST point to a different directory then where the main filebeat config file is in. -#filebeat.config_dir: +# The permissions mask to apply on registry file. The default value is 0600. +# Must be a valid Unix-style file permissions mask expressed in octal notation. +# This option is not supported on Windows. +#filebeat.registry_file_permissions: 0600 + +# By default Ingest pipelines are not updated if a pipeline with the same ID +# already exists. If this option is enabled Filebeat overwrites pipelines +# everytime a new Elasticsearch connection is established. +#filebeat.overwrite_pipelines: false # How long filebeat waits on shutdown for the publisher to finish. # Default is 0, not waiting. @@ -272,9 +326,9 @@ filebeat.prospectors: # Enable filebeat config reloading #filebeat.config: - #prospectors: + #inputs: #enabled: false - #path: prospectors.d/*.yml + #path: inputs.d/*.yml #reload.enabled: true #reload.period: 10s #modules: diff --git a/vendor/github.com/elastic/beats/filebeat/_meta/fields.common.yml b/vendor/github.com/elastic/beats/filebeat/_meta/fields.common.yml index 886cde08..1a020265 100644 --- a/vendor/github.com/elastic/beats/filebeat/_meta/fields.common.yml +++ b/vendor/github.com/elastic/beats/filebeat/_meta/fields.common.yml @@ -32,7 +32,14 @@ - name: prospector.type required: true description: > - The prospector type from which the event was generated. This field is set to the value specified for the `type` option in the prospector section of the Filebeat config file. + The input type from which the event was generated. This field is set to the value specified + for the `type` option in the input section of the Filebeat config file. (DEPRECATED: see `input.type`) + + - name: input.type + required: true + description: > + The input type from which the event was generated. This field is set to the value specified + for the `type` option in the input section of the Filebeat config file. - name: read_timestamp description: > @@ -47,3 +54,45 @@ - name: fileset.name description: > The Filebeat fileset that generated this event. + + - name: syslog.facility + type: long + required: false + description: > + The facility extracted from the priority. + + - name: syslog.priority + type: long + required: false + description: > + The priority of the syslog event. + + - name: syslog.severity_label + type: keyword + required: false + description: > + The human readable severity. + + - name: syslog.facility_label + type: keyword + required: false + description: > + The human readable facility. + + - name: process.program + type: keyword + required: false + description: > + The name of the program. + + - name: process.pid + type: long + required: false + description: > + The pid of the process. + + - name: event.severity + type: long + required: false + description: > + The severity of the event. diff --git a/vendor/github.com/elastic/beats/filebeat/beater/autodiscover.go b/vendor/github.com/elastic/beats/filebeat/autodiscover/autodiscover.go similarity index 72% rename from vendor/github.com/elastic/beats/filebeat/beater/autodiscover.go rename to vendor/github.com/elastic/beats/filebeat/autodiscover/autodiscover.go index 01c9757b..08e8c1b4 100644 --- a/vendor/github.com/elastic/beats/filebeat/beater/autodiscover.go +++ b/vendor/github.com/elastic/beats/filebeat/autodiscover/autodiscover.go @@ -1,4 +1,4 @@ -package beater +package autodiscover import ( "errors" @@ -8,17 +8,17 @@ import ( "github.com/elastic/beats/libbeat/common/bus" ) -// AutodiscoverAdapter for Filebeat modules & prospectors +// AutodiscoverAdapter for Filebeat modules & input type AutodiscoverAdapter struct { - prospectorFactory cfgfile.RunnerFactory - moduleFactory cfgfile.RunnerFactory + inputFactory cfgfile.RunnerFactory + moduleFactory cfgfile.RunnerFactory } -// NewAutodiscoverAdapter builds and returns an autodiscover adapter for Filebeat modules & prospectors -func NewAutodiscoverAdapter(prospectorFactory, moduleFactory cfgfile.RunnerFactory) *AutodiscoverAdapter { +// NewAutodiscoverAdapter builds and returns an autodiscover adapter for Filebeat modules & input +func NewAutodiscoverAdapter(inputFactory, moduleFactory cfgfile.RunnerFactory) *AutodiscoverAdapter { return &AutodiscoverAdapter{ - prospectorFactory: prospectorFactory, - moduleFactory: moduleFactory, + inputFactory: inputFactory, + moduleFactory: moduleFactory, } } @@ -37,12 +37,12 @@ func (m *AutodiscoverAdapter) CheckConfig(c *common.Config) error { return nil } -// Create a module or prospector from the given config +// Create a module or input from the given config func (m *AutodiscoverAdapter) Create(c *common.Config, meta *common.MapStrPointer) (cfgfile.Runner, error) { if c.HasField("module") { return m.moduleFactory.Create(c, meta) } - return m.prospectorFactory.Create(c, meta) + return m.inputFactory.Create(c, meta) } // EventFilter returns the bus filter to retrieve runner start/stop triggering events diff --git a/vendor/github.com/elastic/beats/filebeat/autodiscover/builder/hints/config.go b/vendor/github.com/elastic/beats/filebeat/autodiscover/builder/hints/config.go new file mode 100644 index 00000000..adbe9222 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/autodiscover/builder/hints/config.go @@ -0,0 +1,24 @@ +package hints + +import "github.com/elastic/beats/libbeat/common" + +type config struct { + Key string `config:"key"` + Config *common.Config `config:"config"` +} + +func defaultConfig() config { + rawCfg := map[string]interface{}{ + "type": "docker", + "containers": map[string]interface{}{ + "ids": []string{ + "${data.container.id}", + }, + }, + } + cfg, _ := common.NewConfigFrom(rawCfg) + return config{ + Key: "logs", + Config: cfg, + } +} diff --git a/vendor/github.com/elastic/beats/filebeat/autodiscover/builder/hints/logs.go b/vendor/github.com/elastic/beats/filebeat/autodiscover/builder/hints/logs.go new file mode 100644 index 00000000..20c1a678 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/autodiscover/builder/hints/logs.go @@ -0,0 +1,183 @@ +package hints + +import ( + "fmt" + "regexp" + + "github.com/elastic/beats/filebeat/fileset" + "github.com/elastic/beats/libbeat/autodiscover" + "github.com/elastic/beats/libbeat/autodiscover/builder" + "github.com/elastic/beats/libbeat/autodiscover/template" + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/common/bus" + "github.com/elastic/beats/libbeat/common/cfgwarn" + "github.com/elastic/beats/libbeat/logp" +) + +func init() { + autodiscover.Registry.AddBuilder("hints", NewLogHints) +} + +const ( + multiline = "multiline" + includeLines = "include_lines" + excludeLines = "exclude_lines" +) + +// validModuleNames to sanitize user input +var validModuleNames = regexp.MustCompile("[^a-zA-Z0-9]+") + +type logHints struct { + Key string + Config *common.Config + Registry *fileset.ModuleRegistry +} + +// NewLogHints builds a log hints builder +func NewLogHints(cfg *common.Config) (autodiscover.Builder, error) { + cfgwarn.Beta("The hints builder is beta") + config := defaultConfig() + err := cfg.Unpack(&config) + + if err != nil { + return nil, fmt.Errorf("unable to unpack hints config due to error: %v", err) + } + + moduleRegistry, err := fileset.NewModuleRegistry([]*common.Config{}, "", false) + if err != nil { + return nil, err + } + + return &logHints{config.Key, config.Config, moduleRegistry}, nil +} + +// Create config based on input hints in the bus event +func (l *logHints) CreateConfig(event bus.Event) []*common.Config { + // Clone original config + config, _ := common.NewConfigFrom(l.Config) + host, _ := event["host"].(string) + if host == "" { + return []*common.Config{} + } + + var hints common.MapStr + hIface, ok := event["hints"] + if ok { + hints, _ = hIface.(common.MapStr) + } + + if builder.IsNoOp(hints, l.Key) == true { + return []*common.Config{config} + } + + tempCfg := common.MapStr{} + mline := l.getMultiline(hints) + if len(mline) != 0 { + tempCfg.Put(multiline, mline) + } + if ilines := l.getIncludeLines(hints); len(ilines) != 0 { + tempCfg.Put(includeLines, ilines) + } + if elines := l.getExcludeLines(hints); len(elines) != 0 { + tempCfg.Put(excludeLines, elines) + } + + // Merge config template with the configs from the annotations + if err := config.Merge(tempCfg); err != nil { + logp.Debug("hints.builder", "config merge failed with error: %v", err) + return []*common.Config{config} + } + + module := l.getModule(hints) + if module != "" { + moduleConf := map[string]interface{}{ + "module": module, + } + + filesets := l.getFilesets(hints, module) + for fileset, conf := range filesets { + filesetConf, _ := common.NewConfigFrom(config) + filesetConf.SetString("containers.stream", -1, conf.Stream) + + moduleConf[fileset+".enabled"] = conf.Enabled + moduleConf[fileset+".input"] = filesetConf + + logp.Debug("hints.builder", "generated config %+v", moduleConf) + } + config, _ = common.NewConfigFrom(moduleConf) + } + + logp.Debug("hints.builder", "generated config %+v", config) + + // Apply information in event to the template to generate the final config + return template.ApplyConfigTemplate(event, []*common.Config{config}) +} + +func (l *logHints) getMultiline(hints common.MapStr) common.MapStr { + return builder.GetHintMapStr(hints, l.Key, multiline) +} + +func (l *logHints) getIncludeLines(hints common.MapStr) []string { + return builder.GetHintAsList(hints, l.Key, includeLines) +} + +func (l *logHints) getExcludeLines(hints common.MapStr) []string { + return builder.GetHintAsList(hints, l.Key, excludeLines) +} + +func (l *logHints) getModule(hints common.MapStr) string { + module := builder.GetHintString(hints, l.Key, "module") + // for security, strip module name + return validModuleNames.ReplaceAllString(module, "") +} + +type filesetConfig struct { + Enabled bool + Stream string +} + +// Return a map containing filesets -> enabled & stream (stdout, stderr, all) +func (l *logHints) getFilesets(hints common.MapStr, module string) map[string]*filesetConfig { + var configured bool + filesets := make(map[string]*filesetConfig) + + moduleFilesets, err := l.Registry.ModuleFilesets(module) + if err != nil { + logp.Err("Error retrieving module filesets", err) + return nil + } + + for _, fileset := range moduleFilesets { + filesets[fileset] = &filesetConfig{Enabled: false, Stream: "all"} + } + + // If a single fileset is given, pass all streams to it + fileset := builder.GetHintString(hints, l.Key, "fileset") + if fileset != "" { + if conf, ok := filesets[fileset]; ok { + conf.Enabled = true + configured = true + } + } + + // If fileset is defined per stream, return all of them + for _, stream := range []string{"all", "stdout", "stderr"} { + fileset := builder.GetHintString(hints, l.Key, "fileset."+stream) + if fileset != "" { + if conf, ok := filesets[fileset]; ok { + conf.Enabled = true + conf.Stream = stream + configured = true + } + } + } + + // No fileseat defined, return defaults for the module, all streams to all filesets + if !configured { + for _, conf := range filesets { + conf.Enabled = true + } + } + + return filesets +} diff --git a/vendor/github.com/elastic/beats/filebeat/autodiscover/builder/hints/logs_test.go b/vendor/github.com/elastic/beats/filebeat/autodiscover/builder/hints/logs_test.go new file mode 100644 index 00000000..6b1de4da --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/autodiscover/builder/hints/logs_test.go @@ -0,0 +1,295 @@ +package hints + +import ( + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/common/bus" + "github.com/elastic/beats/libbeat/paths" +) + +func TestGenerateHints(t *testing.T) { + tests := []struct { + msg string + event bus.Event + len int + result common.MapStr + }{ + { + msg: "Hints without host should return nothing", + event: bus.Event{ + "hints": common.MapStr{ + "metrics": common.MapStr{ + "module": "prometheus", + }, + }, + }, + len: 0, + result: common.MapStr{}, + }, + { + msg: "Empty event hints should return default config", + event: bus.Event{ + "host": "1.2.3.4", + "kubernetes": common.MapStr{ + "container": common.MapStr{ + "name": "foobar", + "id": "abc", + }, + }, + "container": common.MapStr{ + "name": "foobar", + "id": "abc", + }, + }, + len: 1, + result: common.MapStr{ + "type": "docker", + "containers": map[string]interface{}{ + "ids": []interface{}{"abc"}, + }, + }, + }, + { + msg: "Hint with include|exclude_lines must be part of the input config", + event: bus.Event{ + "host": "1.2.3.4", + "kubernetes": common.MapStr{ + "container": common.MapStr{ + "name": "foobar", + "id": "abc", + }, + }, + "container": common.MapStr{ + "name": "foobar", + "id": "abc", + }, + "hints": common.MapStr{ + "logs": common.MapStr{ + "include_lines": "^test, ^test1", + "exclude_lines": "^test2, ^test3", + }, + }, + }, + len: 1, + result: common.MapStr{ + "type": "docker", + "containers": map[string]interface{}{ + "ids": []interface{}{"abc"}, + }, + "include_lines": []interface{}{"^test", "^test1"}, + "exclude_lines": []interface{}{"^test2", "^test3"}, + }, + }, + { + msg: "Hint with multiline config must have a multiline in the input config", + event: bus.Event{ + "host": "1.2.3.4", + "kubernetes": common.MapStr{ + "container": common.MapStr{ + "name": "foobar", + "id": "abc", + }, + }, + "container": common.MapStr{ + "name": "foobar", + "id": "abc", + }, + "hints": common.MapStr{ + "logs": common.MapStr{ + "multiline": common.MapStr{ + "pattern": "^test", + "negate": "true", + }, + }, + }, + }, + len: 1, + result: common.MapStr{ + "type": "docker", + "containers": map[string]interface{}{ + "ids": []interface{}{"abc"}, + }, + "multiline": map[string]interface{}{ + "pattern": "^test", + "negate": "true", + }, + }, + }, + { + msg: "Hint with module should attach input to its filesets", + event: bus.Event{ + "host": "1.2.3.4", + "kubernetes": common.MapStr{ + "container": common.MapStr{ + "name": "foobar", + "id": "abc", + }, + }, + "container": common.MapStr{ + "name": "foobar", + "id": "abc", + }, + "hints": common.MapStr{ + "logs": common.MapStr{ + "module": "apache2", + }, + }, + }, + len: 1, + result: common.MapStr{ + "module": "apache2", + "error": map[string]interface{}{ + "enabled": true, + "input": map[string]interface{}{ + "type": "docker", + "containers": map[string]interface{}{ + "stream": "all", + "ids": []interface{}{"abc"}, + }, + }, + }, + "access": map[string]interface{}{ + "enabled": true, + "input": map[string]interface{}{ + "type": "docker", + "containers": map[string]interface{}{ + "stream": "all", + "ids": []interface{}{"abc"}, + }, + }, + }, + }, + }, + { + msg: "Hint with module should honor defined filesets", + event: bus.Event{ + "host": "1.2.3.4", + "kubernetes": common.MapStr{ + "container": common.MapStr{ + "name": "foobar", + "id": "abc", + }, + }, + "container": common.MapStr{ + "name": "foobar", + "id": "abc", + }, + "hints": common.MapStr{ + "logs": common.MapStr{ + "module": "apache2", + "fileset": "access", + }, + }, + }, + len: 1, + result: common.MapStr{ + "module": "apache2", + "access": map[string]interface{}{ + "enabled": true, + "input": map[string]interface{}{ + "type": "docker", + "containers": map[string]interface{}{ + "stream": "all", + "ids": []interface{}{"abc"}, + }, + }, + }, + "error": map[string]interface{}{ + "enabled": false, + "input": map[string]interface{}{ + "type": "docker", + "containers": map[string]interface{}{ + "stream": "all", + "ids": []interface{}{"abc"}, + }, + }, + }, + }, + }, + { + msg: "Hint with module should honor defined filesets with streams", + event: bus.Event{ + "host": "1.2.3.4", + "kubernetes": common.MapStr{ + "container": common.MapStr{ + "name": "foobar", + "id": "abc", + }, + }, + "container": common.MapStr{ + "name": "foobar", + "id": "abc", + }, + "hints": common.MapStr{ + "logs": common.MapStr{ + "module": "apache2", + "fileset.stdout": "access", + "fileset.stderr": "error", + }, + }, + }, + len: 1, + result: common.MapStr{ + "module": "apache2", + "access": map[string]interface{}{ + "enabled": true, + "input": map[string]interface{}{ + "type": "docker", + "containers": map[string]interface{}{ + "stream": "stdout", + "ids": []interface{}{"abc"}, + }, + }, + }, + "error": map[string]interface{}{ + "enabled": true, + "input": map[string]interface{}{ + "type": "docker", + "containers": map[string]interface{}{ + "stream": "stderr", + "ids": []interface{}{"abc"}, + }, + }, + }, + }, + }, + } + + for _, test := range tests { + cfg, _ := common.NewConfigFrom(map[string]interface{}{ + "type": "docker", + "containers": map[string]interface{}{ + "ids": []string{ + "${data.container.id}", + }, + }, + }) + + // Configure path for modules access + abs, _ := filepath.Abs("../../..") + err := paths.InitPaths(&paths.Path{ + Home: abs, + }) + + l, err := NewLogHints(cfg) + if err != nil { + t.Fatal(err) + } + + cfgs := l.CreateConfig(test.event) + assert.Equal(t, len(cfgs), test.len, test.msg) + + if test.len != 0 { + config := common.MapStr{} + err := cfgs[0].Unpack(&config) + assert.Nil(t, err, test.msg) + + assert.Equal(t, test.result, config, test.msg) + } + + } +} diff --git a/vendor/github.com/elastic/beats/filebeat/autodiscover/include.go b/vendor/github.com/elastic/beats/filebeat/autodiscover/include.go new file mode 100644 index 00000000..33e1c5ae --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/autodiscover/include.go @@ -0,0 +1,6 @@ +package autodiscover + +import ( + // include all filebeat specific builders + _ "github.com/elastic/beats/filebeat/autodiscover/builder/hints" +) diff --git a/vendor/github.com/elastic/beats/filebeat/beater/acker.go b/vendor/github.com/elastic/beats/filebeat/beater/acker.go index 3c39c551..fd1074d0 100644 --- a/vendor/github.com/elastic/beats/filebeat/beater/acker.go +++ b/vendor/github.com/elastic/beats/filebeat/beater/acker.go @@ -2,31 +2,41 @@ package beater import ( "github.com/elastic/beats/filebeat/input/file" + "github.com/elastic/beats/libbeat/logp" ) // eventAcker handles publisher pipeline ACKs and forwards -// them to the registrar. +// them to the registrar or directly to the stateless logger. type eventACKer struct { - out successLogger + stateful statefulLogger + stateless statelessLogger + log *logp.Logger } -type successLogger interface { +type statefulLogger interface { Published(states []file.State) } -func newEventACKer(out successLogger) *eventACKer { - return &eventACKer{out: out} +type statelessLogger interface { + Published(c int) bool +} + +func newEventACKer(stateless statelessLogger, stateful statefulLogger) *eventACKer { + return &eventACKer{stateless: stateless, stateful: stateful, log: logp.NewLogger("acker")} } func (a *eventACKer) ackEvents(data []interface{}) { + stateless := 0 states := make([]file.State, 0, len(data)) for _, datum := range data { if datum == nil { + stateless++ continue } st, ok := datum.(file.State) if !ok { + stateless++ continue } @@ -34,6 +44,12 @@ func (a *eventACKer) ackEvents(data []interface{}) { } if len(states) > 0 { - a.out.Published(states) + a.log.Debugw("stateful ack", "count", len(states)) + a.stateful.Published(states) + } + + if stateless > 0 { + a.log.Debugw("stateless ack", "count", stateless) + a.stateless.Published(stateless) } } diff --git a/vendor/github.com/elastic/beats/filebeat/beater/acker_test.go b/vendor/github.com/elastic/beats/filebeat/beater/acker_test.go new file mode 100644 index 00000000..806a9773 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/beater/acker_test.go @@ -0,0 +1,71 @@ +package beater + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/filebeat/input/file" +) + +type mockStatefulLogger struct { + states []file.State +} + +func (sf *mockStatefulLogger) Published(states []file.State) { + sf.states = states +} + +type mockStatelessLogger struct { + count int +} + +func (sl *mockStatelessLogger) Published(count int) bool { + sl.count = count + return true +} + +func TestACKer(t *testing.T) { + tests := []struct { + name string + data []interface{} + stateless int + stateful []file.State + }{ + { + name: "only stateless", + data: []interface{}{nil, nil}, + stateless: 2, + }, + { + name: "only stateful", + data: []interface{}{file.State{Source: "-"}, file.State{Source: "-"}}, + stateful: []file.State{file.State{Source: "-"}, file.State{Source: "-"}}, + stateless: 0, + }, + { + name: "both", + data: []interface{}{file.State{Source: "-"}, nil, file.State{Source: "-"}}, + stateful: []file.State{file.State{Source: "-"}, file.State{Source: "-"}}, + stateless: 1, + }, + { + name: "any other Private type", + data: []interface{}{struct{}{}, nil}, + stateless: 2, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + sl := &mockStatelessLogger{} + sf := &mockStatefulLogger{} + + h := newEventACKer(sl, sf) + + h.ackEvents(test.data) + assert.Equal(t, test.stateless, sl.count) + assert.Equal(t, test.stateful, sf.states) + }) + } +} diff --git a/vendor/github.com/elastic/beats/filebeat/beater/filebeat.go b/vendor/github.com/elastic/beats/filebeat/beater/filebeat.go index 0d7ef669..cb42d018 100644 --- a/vendor/github.com/elastic/beats/filebeat/beater/filebeat.go +++ b/vendor/github.com/elastic/beats/filebeat/beater/filebeat.go @@ -3,6 +3,7 @@ package beater import ( "flag" "fmt" + "strings" "github.com/joeshaw/multierror" "github.com/pkg/errors" @@ -15,7 +16,9 @@ import ( "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/libbeat/monitoring" "github.com/elastic/beats/libbeat/outputs/elasticsearch" + "github.com/elastic/beats/libbeat/setup/kibana" + fbautodiscover "github.com/elastic/beats/filebeat/autodiscover" "github.com/elastic/beats/filebeat/channel" cfg "github.com/elastic/beats/filebeat/config" "github.com/elastic/beats/filebeat/crawler" @@ -54,6 +57,22 @@ func New(b *beat.Beat, rawConfig *common.Config) (beat.Beater, error) { return nil, err } + if len(config.Prospectors) > 0 { + cfgwarn.Deprecate("7.0.0", "prospectors are deprecated, Use `inputs` instead.") + if len(config.Inputs) > 0 { + return nil, fmt.Errorf("prospectors and inputs used in the configuration file, define only inputs not both") + } + config.Inputs = config.Prospectors + } + + if config.ConfigProspector != nil { + cfgwarn.Deprecate("7.0.0", "config.prospectors are deprecated, Use `config.inputs` instead.") + if config.ConfigInput != nil { + return nil, fmt.Errorf("config.prospectors and config.inputs used in the configuration file, define only config.inputs not both") + } + config.ConfigInput = config.ConfigProspector + } + moduleRegistry, err := fileset.NewModuleRegistry(config.Modules, b.Info.Version, true) if err != nil { return nil, err @@ -62,7 +81,7 @@ func New(b *beat.Beat, rawConfig *common.Config) (beat.Beater, error) { logp.Info("Enabled modules/filesets: %s", moduleRegistry.InfoString()) } - moduleProspectors, err := moduleRegistry.GetProspectorConfigs() + moduleInputs, err := moduleRegistry.GetInputConfigs() if err != nil { return nil, err } @@ -71,28 +90,30 @@ func New(b *beat.Beat, rawConfig *common.Config) (beat.Beater, error) { return nil, err } - // Add prospectors created by the modules - config.Prospectors = append(config.Prospectors, moduleProspectors...) + // Add inputs created by the modules + config.Inputs = append(config.Inputs, moduleInputs...) - haveEnabledProspectors := false - for _, prospector := range config.Prospectors { - if prospector.Enabled() { - haveEnabledProspectors = true - break - } + enabledInputs := config.ListEnabledInputs() + var haveEnabledInputs bool + if len(enabledInputs) > 0 { + haveEnabledInputs = true } - if !config.ConfigProspector.Enabled() && !config.ConfigModules.Enabled() && !haveEnabledProspectors && config.Autodiscover == nil { + if !config.ConfigInput.Enabled() && !config.ConfigModules.Enabled() && !haveEnabledInputs && config.Autodiscover == nil { if !b.InSetupCmd { - return nil, errors.New("No modules or prospectors enabled and configuration reloading disabled. What files do you want me to watch?") + return nil, errors.New("no modules or inputs enabled and configuration reloading disabled. What files do you want me to watch?") } // in the `setup` command, log this only as a warning logp.Warn("Setup called, but no modules enabled.") } - if *once && config.ConfigProspector.Enabled() && config.ConfigModules.Enabled() { - return nil, errors.New("prospector configs and -once cannot be used together") + if *once && config.ConfigInput.Enabled() && config.ConfigModules.Enabled() { + return nil, errors.New("input configs and -once cannot be used together") + } + + if config.IsInputEnabled("stdin") && len(enabledInputs) > 1 { + return nil, fmt.Errorf("stdin requires to be run in exclusive mode, configured inputs: %s", strings.Join(enabledInputs, ", ")) } fb := &Filebeat{ @@ -102,12 +123,36 @@ func New(b *beat.Beat, rawConfig *common.Config) (beat.Beater, error) { } // register `setup` callback for ML jobs - b.SetupMLCallback = func(b *beat.Beat) error { - return fb.loadModulesML(b) + b.SetupMLCallback = func(b *beat.Beat, kibanaConfig *common.Config) error { + return fb.loadModulesML(b, kibanaConfig) + } + + err = fb.setupPipelineLoaderCallback(b) + if err != nil { + return nil, err } + return fb, nil } +func (fb *Filebeat) setupPipelineLoaderCallback(b *beat.Beat) error { + if !fb.moduleRegistry.Empty() { + overwritePipelines := fb.config.OverwritePipelines + if b.InSetupCmd { + overwritePipelines = true + } + + b.OverwritePipelinesCallback = func(esConfig *common.Config) error { + esClient, err := elasticsearch.NewConnectedClient(esConfig) + if err != nil { + return err + } + return fb.moduleRegistry.LoadPipelines(esClient, overwritePipelines) + } + } + return nil +} + // loadModulesPipelines is called when modules are configured to do the initial // setup. func (fb *Filebeat) loadModulesPipelines(b *beat.Beat) error { @@ -116,20 +161,26 @@ func (fb *Filebeat) loadModulesPipelines(b *beat.Beat) error { return nil } + overwritePipelines := fb.config.OverwritePipelines + if b.InSetupCmd { + overwritePipelines = true + } + // register pipeline loading to happen every time a new ES connection is // established callback := func(esClient *elasticsearch.Client) error { - return fb.moduleRegistry.LoadPipelines(esClient) + return fb.moduleRegistry.LoadPipelines(esClient, overwritePipelines) } elasticsearch.RegisterConnectCallback(callback) return nil } -func (fb *Filebeat) loadModulesML(b *beat.Beat) error { - logp.Debug("machine-learning", "Setting up ML jobs for modules") +func (fb *Filebeat) loadModulesML(b *beat.Beat, kibanaConfig *common.Config) error { var errs multierror.Errors + logp.Debug("machine-learning", "Setting up ML jobs for modules") + if b.Config.Output.Name() != "elasticsearch" { logp.Warn("Filebeat is unable to load the Xpack Machine Learning configurations for the" + " modules because the Elasticsearch output is not configured/enabled.") @@ -141,7 +192,34 @@ func (fb *Filebeat) loadModulesML(b *beat.Beat) error { if err != nil { return errors.Errorf("Error creating Elasticsearch client: %v", err) } - if err := fb.moduleRegistry.LoadML(esClient); err != nil { + + if kibanaConfig == nil { + kibanaConfig = common.NewConfig() + } + + if esConfig.Enabled() { + username, _ := esConfig.String("username", -1) + password, _ := esConfig.String("password", -1) + + if !kibanaConfig.HasField("username") && username != "" { + kibanaConfig.SetString("username", -1, username) + } + if !kibanaConfig.HasField("password") && password != "" { + kibanaConfig.SetString("password", -1, password) + } + } + + kibanaClient, err := kibana.NewKibanaClient(kibanaConfig) + if err != nil { + return errors.Errorf("Error creating Kibana client: %v", err) + } + + kibanaVersion, err := common.NewVersion(kibanaClient.GetVersion()) + if err != nil { + return errors.Errorf("Error checking Kibana version: %v", err) + } + + if err := setupMLBasedOnVersion(fb.moduleRegistry, esClient, kibanaClient, kibanaVersion); err != nil { errs = append(errs, err) } @@ -167,15 +245,30 @@ func (fb *Filebeat) loadModulesML(b *beat.Beat) error { continue } - if err := set.LoadML(esClient); err != nil { + if err := setupMLBasedOnVersion(set, esClient, kibanaClient, kibanaVersion); err != nil { errs = append(errs, err) } + } } return errs.Err() } +func setupMLBasedOnVersion(reg *fileset.ModuleRegistry, esClient *elasticsearch.Client, kibanaClient *kibana.Client, kibanaVersion *common.Version) error { + if isElasticsearchLoads(kibanaVersion) { + return reg.LoadML(esClient) + } + return reg.SetupML(esClient, kibanaClient) +} + +func isElasticsearchLoads(kibanaVersion *common.Version) bool { + if kibanaVersion.Major < 6 || kibanaVersion.Major == 6 && kibanaVersion.Minor < 1 { + return true + } + return false +} + // Run allows the beater to be run as a beat. func (fb *Filebeat) Run(b *beat.Beat) error { var err error @@ -200,7 +293,7 @@ func (fb *Filebeat) Run(b *beat.Beat) error { finishedLogger := newFinishedLogger(wgEvents) // Setup registrar to persist state - registrar, err := registrar.New(config.RegistryFile, config.RegistryFlush, finishedLogger) + registrar, err := registrar.New(config.RegistryFile, config.RegistryFilePermissions, config.RegistryFlush, finishedLogger) if err != nil { logp.Err("Could not init registrar: %v", err) return err @@ -210,7 +303,7 @@ func (fb *Filebeat) Run(b *beat.Beat) error { registrarChannel := newRegistrarLogger(registrar) err = b.Publisher.SetACKHandler(beat.PipelineACKHandler{ - ACKEvents: newEventACKer(registrarChannel).ackEvents, + ACKEvents: newEventACKer(finishedLogger, registrarChannel).ackEvents, }) if err != nil { logp.Err("Failed to install the registry with the publisher pipeline: %v", err) @@ -220,7 +313,7 @@ func (fb *Filebeat) Run(b *beat.Beat) error { outDone := make(chan struct{}) // outDone closes down all active pipeline connections crawler, err := crawler.New( channel.NewOutletFactory(outDone, b.Publisher, wgEvents).Create, - config.Prospectors, + config.Inputs, b.Info.Version, fb.done, *once) @@ -261,7 +354,11 @@ func (fb *Filebeat) Run(b *beat.Beat) error { logp.Warn(pipelinesWarning) } - err = crawler.Start(registrar, config.ConfigProspector, config.ConfigModules, pipelineLoaderFactory) + if config.OverwritePipelines { + logp.Debug("modules", "Existing Ingest pipelines will be updated") + } + + err = crawler.Start(registrar, config.ConfigInput, config.ConfigModules, pipelineLoaderFactory, config.OverwritePipelines) if err != nil { crawler.Stop() return err @@ -279,7 +376,7 @@ func (fb *Filebeat) Run(b *beat.Beat) error { var adiscover *autodiscover.Autodiscover if fb.config.Autodiscover != nil { - adapter := NewAutodiscoverAdapter(crawler.ProspectorsFactory, crawler.ModulesFactory) + adapter := fbautodiscover.NewAutodiscoverAdapter(crawler.InputsFactory, crawler.ModulesFactory) adiscover, err = autodiscover.NewAutodiscover("filebeat", adapter, config.Autodiscover) if err != nil { return err @@ -291,7 +388,7 @@ func (fb *Filebeat) Run(b *beat.Beat) error { waitFinished.AddChan(fb.done) waitFinished.Wait() - // Stop autodiscover -> Stop crawler -> stop prospectors -> stop harvesters + // Stop autodiscover -> Stop crawler -> stop inputs -> stop harvesters // Note: waiting for crawlers to stop here in order to install wgEvents.Wait // after all events have been enqueued for publishing. Otherwise wgEvents.Wait // or publisher might panic due to concurrent updates. diff --git a/vendor/github.com/elastic/beats/filebeat/channel/factory.go b/vendor/github.com/elastic/beats/filebeat/channel/factory.go index 025df89a..cf3b2de3 100644 --- a/vendor/github.com/elastic/beats/filebeat/channel/factory.go +++ b/vendor/github.com/elastic/beats/filebeat/channel/factory.go @@ -24,15 +24,15 @@ type clientEventer struct { wgEvents eventCounter } -// prospectorOutletConfig defines common prospector settings +// inputOutletConfig defines common input settings // for the publisher pipline. -type prospectorOutletConfig struct { +type inputOutletConfig struct { // event processing common.EventMetadata `config:",inline"` // Fields and tags to add to events. Processors processors.PluginConfig `config:"processors"` // implicit event fields - Type string `config:"type"` // prospector.type + Type string `config:"type"` // input.type // hidden filebeat modules settings Module string `config:"_module_name"` // hidden setting @@ -44,7 +44,7 @@ type prospectorOutletConfig struct { } // NewOutletFactory creates a new outlet factory for -// connecting a prospector to the publisher pipeline. +// connecting an input to the publisher pipeline. func NewOutletFactory( done <-chan struct{}, pipeline beat.Pipeline, @@ -63,12 +63,12 @@ func NewOutletFactory( return o } -// Create builds a new Outleter, while applying common prospector settings. -// Prospectors and all harvesters use the same pipeline client instance. +// Create builds a new Outleter, while applying common input settings. +// Inputs and all harvesters use the same pipeline client instance. // This guarantees ordering between events as required by the registrar for // file.State updates func (f *OutletFactory) Create(cfg *common.Config, dynFields *common.MapStrPointer) (Outleter, error) { - config := prospectorOutletConfig{} + config := inputOutletConfig{} if err := cfg.Unpack(&config); err != nil { return nil, err } @@ -99,6 +99,9 @@ func (f *OutletFactory) Create(cfg *common.Config, dynFields *common.MapStrPoint fields["prospector"] = common.MapStr{ "type": config.Type, } + fields["input"] = common.MapStr{ + "type": config.Type, + } } client, err := f.pipeline.ConnectWith(beat.ClientConfig{ diff --git a/vendor/github.com/elastic/beats/filebeat/channel/interface.go b/vendor/github.com/elastic/beats/filebeat/channel/interface.go index 2fbed63b..06bbcd43 100644 --- a/vendor/github.com/elastic/beats/filebeat/channel/interface.go +++ b/vendor/github.com/elastic/beats/filebeat/channel/interface.go @@ -8,7 +8,7 @@ import ( // Factory is used to create a new Outlet instance type Factory func(*common.Config, *common.MapStrPointer) (Outleter, error) -// Outleter is the outlet for a prospector +// Outleter is the outlet for an input type Outleter interface { Close() error OnEvent(data *util.Data) bool diff --git a/vendor/github.com/elastic/beats/filebeat/config/config.go b/vendor/github.com/elastic/beats/filebeat/config/config.go index d3a0ee31..2a1aa3ba 100644 --- a/vendor/github.com/elastic/beats/filebeat/config/config.go +++ b/vendor/github.com/elastic/beats/filebeat/config/config.go @@ -5,6 +5,7 @@ import ( "log" "os" "path/filepath" + "sort" "time" "github.com/elastic/beats/libbeat/autodiscover" @@ -21,21 +22,27 @@ const ( ) type Config struct { - Prospectors []*common.Config `config:"prospectors"` - RegistryFile string `config:"registry_file"` - RegistryFlush time.Duration `config:"registry_flush"` - ConfigDir string `config:"config_dir"` - ShutdownTimeout time.Duration `config:"shutdown_timeout"` - Modules []*common.Config `config:"modules"` - ConfigProspector *common.Config `config:"config.prospectors"` - ConfigModules *common.Config `config:"config.modules"` - Autodiscover *autodiscover.Config `config:"autodiscover"` + Inputs []*common.Config `config:"inputs"` + Prospectors []*common.Config `config:"prospectors"` + RegistryFile string `config:"registry_file"` + RegistryFilePermissions os.FileMode `config:"registry_file_permissions"` + RegistryFlush time.Duration `config:"registry_flush"` + ConfigDir string `config:"config_dir"` + ShutdownTimeout time.Duration `config:"shutdown_timeout"` + Modules []*common.Config `config:"modules"` + ConfigInput *common.Config `config:"config.inputs"` + ConfigProspector *common.Config `config:"config.prospectors"` + ConfigModules *common.Config `config:"config.modules"` + Autodiscover *autodiscover.Config `config:"autodiscover"` + OverwritePipelines bool `config:"overwrite_pipelines"` } var ( DefaultConfig = Config{ - RegistryFile: "registry", - ShutdownTimeout: 0, + RegistryFile: "registry", + RegistryFilePermissions: 0600, + ShutdownTimeout: 0, + OverwritePipelines: false, } ) @@ -82,7 +89,15 @@ func mergeConfigFiles(configFiles []string, config *Config) error { return fmt.Errorf("Failed to read %s: %s", file, err) } - config.Prospectors = append(config.Prospectors, tmpConfig.Filebeat.Prospectors...) + if len(tmpConfig.Filebeat.Prospectors) > 0 { + cfgwarn.Deprecate("7.0.0", "prospectors are deprecated, Use `inputs` instead.") + if len(tmpConfig.Filebeat.Inputs) > 0 { + return fmt.Errorf("prospectors and inputs used in the configuration file, define only inputs not both") + } + tmpConfig.Filebeat.Inputs = append(tmpConfig.Filebeat.Inputs, tmpConfig.Filebeat.Prospectors...) + } + + config.Inputs = append(config.Inputs, tmpConfig.Filebeat.Inputs...) } return nil @@ -97,7 +112,7 @@ func (config *Config) FetchConfigs() error { return nil } - cfgwarn.Deprecate("7.0.0", "config_dir is deprecated. Use `filebeat.config.prospectors` instead.") + cfgwarn.Deprecate("7.0.0", "config_dir is deprecated. Use `filebeat.config.inputs` instead.") // If configDir is relative, consider it relative to the config path configDir = paths.Resolve(paths.Config, configDir) @@ -120,3 +135,30 @@ func (config *Config) FetchConfigs() error { return nil } + +// ListEnabledInputs returns a list of enabled inputs sorted by alphabetical order. +func (config *Config) ListEnabledInputs() []string { + t := struct { + Type string `config:"type"` + }{} + var inputs []string + for _, input := range config.Inputs { + if input.Enabled() { + input.Unpack(&t) + inputs = append(inputs, t.Type) + } + } + sort.Strings(inputs) + return inputs +} + +// IsInputEnabled returns true if the plugin name is enabled. +func (config *Config) IsInputEnabled(name string) bool { + enabledInputs := config.ListEnabledInputs() + for _, input := range enabledInputs { + if name == input { + return true + } + } + return false +} diff --git a/vendor/github.com/elastic/beats/filebeat/config/config_test.go b/vendor/github.com/elastic/beats/filebeat/config/config_test.go index 0c95a5fa..6c793783 100644 --- a/vendor/github.com/elastic/beats/filebeat/config/config_test.go +++ b/vendor/github.com/elastic/beats/filebeat/config/config_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/elastic/beats/libbeat/cfgfile" + "github.com/elastic/beats/libbeat/common" ) func TestReadConfig2(t *testing.T) { @@ -93,5 +94,83 @@ func TestMergeConfigFiles(t *testing.T) { config := &Config{} mergeConfigFiles(files, config) - assert.Equal(t, 4, len(config.Prospectors)) + assert.Equal(t, 4, len(config.Inputs)) +} + +func TestEnabledInputs(t *testing.T) { + stdinEnabled, err := common.NewConfigFrom(map[string]interface{}{ + "type": "stdin", + "enabled": true, + }) + if !assert.NoError(t, err) { + return + } + + udpDisabled, err := common.NewConfigFrom(map[string]interface{}{ + "type": "udp", + "enabled": false, + }) + if !assert.NoError(t, err) { + return + } + + logDisabled, err := common.NewConfigFrom(map[string]interface{}{ + "type": "log", + "enabled": false, + }) + if !assert.NoError(t, err) { + return + } + + t.Run("ListEnabledInputs", func(t *testing.T) { + tests := []struct { + name string + config *Config + expected []string + }{ + { + name: "all inputs disabled", + config: &Config{Inputs: []*common.Config{udpDisabled, logDisabled}}, + expected: []string{}, + }, + { + name: "all inputs enabled", + config: &Config{Inputs: []*common.Config{stdinEnabled}}, + expected: []string{"stdin"}, + }, + { + name: "disabled and enabled inputs", + config: &Config{Inputs: []*common.Config{stdinEnabled, udpDisabled, logDisabled}}, + expected: []string{"stdin"}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + assert.ElementsMatch(t, test.expected, test.config.ListEnabledInputs()) + }) + } + }) + + t.Run("IsInputEnabled", func(t *testing.T) { + config := &Config{Inputs: []*common.Config{stdinEnabled, udpDisabled, logDisabled}} + + tests := []struct { + name string + input string + expected bool + config *Config + }{ + {name: "input exists and enabled", input: "stdin", expected: true, config: config}, + {name: "input exists and disabled", input: "udp", expected: false, config: config}, + {name: "input doesn't exist", input: "redis", expected: false, config: config}, + {name: "no inputs are enabled", input: "redis", expected: false, config: &Config{}}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + assert.Equal(t, test.expected, config.IsInputEnabled(test.input)) + }) + } + }) } diff --git a/vendor/github.com/elastic/beats/filebeat/crawler/crawler.go b/vendor/github.com/elastic/beats/filebeat/crawler/crawler.go index 54a2162a..a1f92005 100644 --- a/vendor/github.com/elastic/beats/filebeat/crawler/crawler.go +++ b/vendor/github.com/elastic/beats/filebeat/crawler/crawler.go @@ -7,7 +7,7 @@ import ( "github.com/elastic/beats/filebeat/channel" "github.com/elastic/beats/filebeat/fileset" "github.com/elastic/beats/filebeat/input/file" - "github.com/elastic/beats/filebeat/prospector" + input "github.com/elastic/beats/filebeat/prospector" "github.com/elastic/beats/filebeat/registrar" "github.com/elastic/beats/libbeat/cfgfile" "github.com/elastic/beats/libbeat/common" @@ -17,57 +17,57 @@ import ( ) type Crawler struct { - prospectors map[uint64]*prospector.Prospector - prospectorConfigs []*common.Config - out channel.Factory - wg sync.WaitGroup - ProspectorsFactory cfgfile.RunnerFactory - ModulesFactory cfgfile.RunnerFactory - modulesReloader *cfgfile.Reloader - prospectorsReloader *cfgfile.Reloader - once bool - beatVersion string - beatDone chan struct{} + inputs map[uint64]*input.Runner + inputConfigs []*common.Config + out channel.Factory + wg sync.WaitGroup + InputsFactory cfgfile.RunnerFactory + ModulesFactory cfgfile.RunnerFactory + modulesReloader *cfgfile.Reloader + inputReloader *cfgfile.Reloader + once bool + beatVersion string + beatDone chan struct{} } -func New(out channel.Factory, prospectorConfigs []*common.Config, beatVersion string, beatDone chan struct{}, once bool) (*Crawler, error) { +func New(out channel.Factory, inputConfigs []*common.Config, beatVersion string, beatDone chan struct{}, once bool) (*Crawler, error) { return &Crawler{ - out: out, - prospectors: map[uint64]*prospector.Prospector{}, - prospectorConfigs: prospectorConfigs, - once: once, - beatVersion: beatVersion, - beatDone: beatDone, + out: out, + inputs: map[uint64]*input.Runner{}, + inputConfigs: inputConfigs, + once: once, + beatVersion: beatVersion, + beatDone: beatDone, }, nil } -// Start starts the crawler with all prospectors -func (c *Crawler) Start(r *registrar.Registrar, configProspectors *common.Config, - configModules *common.Config, pipelineLoaderFactory fileset.PipelineLoaderFactory) error { +// Start starts the crawler with all inputs +func (c *Crawler) Start(r *registrar.Registrar, configInputs *common.Config, + configModules *common.Config, pipelineLoaderFactory fileset.PipelineLoaderFactory, overwritePipelines bool) error { - logp.Info("Loading Prospectors: %v", len(c.prospectorConfigs)) + logp.Info("Loading Inputs: %v", len(c.inputConfigs)) // Prospect the globs/paths given on the command line and launch harvesters - for _, prospectorConfig := range c.prospectorConfigs { - err := c.startProspector(prospectorConfig, r.GetStates()) + for _, inputConfig := range c.inputConfigs { + err := c.startInput(inputConfig, r.GetStates()) if err != nil { return err } } - c.ProspectorsFactory = prospector.NewRunnerFactory(c.out, r, c.beatDone) - if configProspectors.Enabled() { - c.prospectorsReloader = cfgfile.NewReloader(configProspectors) - if err := c.prospectorsReloader.Check(c.ProspectorsFactory); err != nil { + c.InputsFactory = input.NewRunnerFactory(c.out, r, c.beatDone) + if configInputs.Enabled() { + c.inputReloader = cfgfile.NewReloader(configInputs) + if err := c.inputReloader.Check(c.InputsFactory); err != nil { return err } go func() { - c.prospectorsReloader.Run(c.ProspectorsFactory) + c.inputReloader.Run(c.InputsFactory) }() } - c.ModulesFactory = fileset.NewFactory(c.out, r, c.beatVersion, pipelineLoaderFactory, c.beatDone) + c.ModulesFactory = fileset.NewFactory(c.out, r, c.beatVersion, pipelineLoaderFactory, overwritePipelines, c.beatDone) if configModules.Enabled() { c.modulesReloader = cfgfile.NewReloader(configModules) if err := c.modulesReloader.Check(c.ModulesFactory); err != nil { @@ -79,26 +79,26 @@ func (c *Crawler) Start(r *registrar.Registrar, configProspectors *common.Config }() } - logp.Info("Loading and starting Prospectors completed. Enabled prospectors: %v", len(c.prospectors)) + logp.Info("Loading and starting Inputs completed. Enabled inputs: %v", len(c.inputs)) return nil } -func (c *Crawler) startProspector(config *common.Config, states []file.State) error { +func (c *Crawler) startInput(config *common.Config, states []file.State) error { if !config.Enabled() { return nil } - p, err := prospector.New(config, c.out, c.beatDone, states, nil) + p, err := input.New(config, c.out, c.beatDone, states, nil) if err != nil { - return fmt.Errorf("Error in initing prospector: %s", err) + return fmt.Errorf("Error in initing input: %s", err) } p.Once = c.once - if _, ok := c.prospectors[p.ID]; ok { - return fmt.Errorf("Prospector with same ID already exists: %d", p.ID) + if _, ok := c.inputs[p.ID]; ok { + return fmt.Errorf("Input with same ID already exists: %d", p.ID) } - c.prospectors[p.ID] = p + c.inputs[p.ID] = p p.Start() @@ -116,14 +116,14 @@ func (c *Crawler) Stop() { }() } - logp.Info("Stopping %v prospectors", len(c.prospectors)) - for _, p := range c.prospectors { - // Stop prospectors in parallel + logp.Info("Stopping %v inputs", len(c.inputs)) + for _, p := range c.inputs { + // Stop inputs in parallel asyncWaitStop(p.Stop) } - if c.prospectorsReloader != nil { - asyncWaitStop(c.prospectorsReloader.Stop) + if c.inputReloader != nil { + asyncWaitStop(c.inputReloader.Stop) } if c.modulesReloader != nil { diff --git a/vendor/github.com/elastic/beats/filebeat/docker-compose.yml b/vendor/github.com/elastic/beats/filebeat/docker-compose.yml index c0bd070d..f010c8c9 100644 --- a/vendor/github.com/elastic/beats/filebeat/docker-compose.yml +++ b/vendor/github.com/elastic/beats/filebeat/docker-compose.yml @@ -6,7 +6,7 @@ services: - proxy_dep env_file: - ${PWD}/build/test.env - - ${PWD}/prospector/redis/_meta/env + - ${PWD}/input/redis/_meta/env environment: - KIBANA_HOST=kibana - KIBANA_PORT=5601 @@ -37,4 +37,4 @@ services: service: kibana redis: - build: ${PWD}/prospector/redis/_meta + build: ${PWD}/input/redis/_meta diff --git a/vendor/github.com/elastic/beats/filebeat/docs/autodiscover-docker-config.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/autodiscover-docker-config.asciidoc index 236e5107..19be3dfe 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/autodiscover-docker-config.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/autodiscover-docker-config.asciidoc @@ -1,4 +1,4 @@ -Filebeat supports templates for both prospectors and modules. +Filebeat supports templates for inputs and modules. ["source","yaml",subs="attributes"] ------------------------------------------------------------------------------------- @@ -7,7 +7,7 @@ filebeat.autodiscover: - type: docker templates: - condition: - equals: + contains: docker.container.image: redis config: - type: docker @@ -16,9 +16,9 @@ filebeat.autodiscover: exclude_lines: ["^\\s+[\\-`('.|_]"] # drop asciiart lines ------------------------------------------------------------------------------------- -This configuration launches a `docker` logs prospector for all containers running an image with `redis` in the name. +This configuration launches a `docker` logs input for all containers running an image with `redis` in the name. -If you are using modules, you can override the default prospector and use the docker prospector instead. +If you are using modules, you can override the default input and use the docker input instead. ["source","yaml",subs="attributes"] ------------------------------------------------------------------------------------- @@ -27,12 +27,12 @@ filebeat.autodiscover: - type: docker templates: - condition: - equals: - docker.container.image: "redis" + contains: + docker.container.image: redis config: - module: redis log: - prospector: + input: type: docker containers.ids: - "${data.docker.container.id}" diff --git a/vendor/github.com/elastic/beats/filebeat/docs/autodiscover-hints.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/autodiscover-hints.asciidoc new file mode 100644 index 00000000..8e682c3a --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/docs/autodiscover-hints.asciidoc @@ -0,0 +1,122 @@ +{beatname_uc} supports autodiscover based on hints from the provider. The hints system looks for +hints in Kubernetes Pod annotations or Docker labels that have the prefix `co.elastic.logs`. As soon as +the container starts, {beatname_uc} will check if it contains any hints and launch the proper config for +it. Hints tell {beatname_uc} how to get logs for the given container. By default logs will be retrieved +from the container using the `docker` input. You can use hints to modify this behavior. This is the full +list of supported hints: + +[float] +===== `co.elastic.logs/disable` + +Filebeat gets logs from all containers by default, you can set this hint to `true` to ignore +the output of the container. Filebeat won't read or send logs from it. + +[float] +===== `co.elastic.logs/multiline.*` + +Multiline settings. See <> for a full list of all supported options. + +[float] +===== `co.elastic.logs/include_lines` + +A list of regular expressions to match the lines that you want {beatname_uc} to include. +See <> for more info. + +[float] +===== `co.elastic.logs/exclude_lines` + +A list of regular expressions to match the lines that you want {beatname_uc} to exclude. +See <> for more info. + +[float] +===== `co.elastic.logs/module` + +Instead of using raw `docker` input, specifies the module to use to parse logs from the container. See +<> for the list of supported modules. + +[float] +===== `co.elastic.logs/fileset` + +When module is configured, map container logs to module filesets. You can either configure +a single fileset like this: + +["source","yaml",subs="attributes"] +------------------------------------------------------------------------------------- +co.elastic.logs/fileset: access +------------------------------------------------------------------------------------- + +Or configure a fileset per stream in the container (stdout and stderr): + +["source","yaml",subs="attributes"] +------------------------------------------------------------------------------------- +co.elastic.logs/fileset.stdout: access +co.elastic.logs/fileset.stderr: error +------------------------------------------------------------------------------------- + +[float] +==== Kubernetes + +Kubernetes autodiscover provider supports hints in Pod annotations. To enable it just set `hints.enabled`: + +["source","yaml",subs="attributes"] +------------------------------------------------------------------------------------- +filebeat.autodiscover: + providers: + - type: kubernetes + hints.enabled: true +------------------------------------------------------------------------------------- + +You can annotate Kubernetes Pods with useful info to spin up {beatname_uc} inputs or modules: + +["source","yaml",subs="attributes"] +------------------------------------------------------------------------------------- +annotations: + co.elastic.logs/multiline.pattern: '^\[' + co.elastic.logs/multiline.negate: true + co.elastic.logs/multiline.match: after +------------------------------------------------------------------------------------- + + +[float] +===== Multiple containers + +When a pod has multiple containers, the settings are shared unless you put the container name in the +hint. For example, these hints configure multiline settings for all containers in the pod, but set a +specific `exclude_lines` hint for the container called `sidecar`. + + +["source","yaml",subs="attributes"] +------------------------------------------------------------------------------------- +annotations: + co.elastic.logs/multiline.pattern: '^\[' + co.elastic.logs/multiline.negate: true + co.elastic.logs/multiline.match: after + co.elastic.logs.sidecar/exlude_lines: '^DBG' +------------------------------------------------------------------------------------- + + + +[float] +==== Docker + +Docker autodiscover provider supports hints in labels. To enable it just set `hints.enabled`: + +["source","yaml",subs="attributes"] +------------------------------------------------------------------------------------- +filebeat.autodiscover: + providers: + - type: docker + hints.enabled: true +------------------------------------------------------------------------------------- + +You can label Docker containers with useful info to spin up {beatname_uc} inputs, for example: + +["source","yaml",subs="attributes"] +------------------------------------------------------------------------------------- + co.elastic.logs/module: nginx + co.elastic.logs/fileset.stdout: access + co.elastic.logs/fileset.stderr: error +------------------------------------------------------------------------------------- + +The above labels configure {beatname_uc} to use the Nginx module to harvest logs for this container. +Access logs will be retrieved from stdout stream, and error logs from stderr. diff --git a/vendor/github.com/elastic/beats/filebeat/docs/autodiscover-kubernetes-config.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/autodiscover-kubernetes-config.asciidoc index ac4c6196..13927fd2 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/autodiscover-kubernetes-config.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/autodiscover-kubernetes-config.asciidoc @@ -1,4 +1,4 @@ -Filebeat supports templates for both prospectors and modules. +Filebeat supports templates for inputs and modules. ["source","yaml",subs="attributes"] ------------------------------------------------------------------------------------- @@ -16,10 +16,10 @@ filebeat.autodiscover: exclude_lines: ["^\\s+[\\-`('.|_]"] # drop asciiart lines ------------------------------------------------------------------------------------- -This configuration launches a `docker` logs prospector for all containers of pods running in the Kubernetes namespace +This configuration launches a `docker` logs input for all containers of pods running in the Kubernetes namespace `kube-system`. -If you are using modules, you can override the default prospector and use the docker prospector instead. +If you are using modules, you can override the default input and use the docker input instead. ["source","yaml",subs="attributes"] ------------------------------------------------------------------------------------- @@ -33,7 +33,7 @@ filebeat.autodiscover: config: - module: redis log: - prospector: + input: type: docker containers.ids: - "${data.kubernetes.container.id}" diff --git a/vendor/github.com/elastic/beats/filebeat/docs/configuring-howto.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/configuring-howto.asciidoc index f1e3b448..ff190ecd 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/configuring-howto.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/configuring-howto.asciidoc @@ -47,8 +47,6 @@ include::./filebeat-modules-options.asciidoc[] include::./filebeat-options.asciidoc[] -include::./multiple-prospectors.asciidoc[] - include::./multiline.asciidoc[] include::./filebeat-general-options.asciidoc[] diff --git a/vendor/github.com/elastic/beats/filebeat/docs/faq.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/faq.asciidoc index 1ee646e4..4963c09b 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/faq.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/faq.asciidoc @@ -1,29 +1,29 @@ [[faq]] == Frequently asked questions -This section contains frequently asked questions about Filebeat. Also check out the -https://discuss.elastic.co/c/beats/filebeat[Filebeat discussion forum]. +This section contains frequently asked questions about {beatname_uc}. Also check out the +https://discuss.elastic.co/c/beats/filebeat[{beatname_uc} discussion forum]. [float] [[filebeat-network-volumes]] === Can't read log files from network volumes? -We do not recommend reading log files from network volumes. Whenever possible, install Filebeat on the host machine and +We do not recommend reading log files from network volumes. Whenever possible, install {beatname_uc} on the host machine and send the log files directly from there. Reading files from network volumes (especially on Windows) can have unexpected side -effects. For example, changed file identifiers may result in Filebeat reading a log file from scratch again. +effects. For example, changed file identifiers may result in {beatname_uc} reading a log file from scratch again. [float] [[filebeat-not-collecting-lines]] -=== Filebeat isn't collecting lines from a file? +=== {beatname_uc} isn't collecting lines from a file? -Filebeat might be incorrectly configured or unable to send events to the output. To resolve the issue: +{beatname_uc} might be incorrectly configured or unable to send events to the output. To resolve the issue: * Make sure the config file specifies the correct path to the file that you are collecting. See <> for more information. -* Verify that the file is not older than the value specified by <>. ignore_older is disable by +* Verify that the file is not older than the value specified by <<{beatname_lc}-input-log-ignore-older,`ignore_older`>>. `ignore_older` is disable by default so this depends on the value you have set. You can change this behavior by specifying a different value for -<>. -* Make sure that Filebeat is able to send events to the configured output. Run Filebeat in debug mode to determine whether +<<{beatname_lc}-input-log-ignore-older,`ignore_older`>>. +* Make sure that {beatname_uc} is able to send events to the configured output. Run {beatname_uc} in debug mode to determine whether it's publishing events successfully: + ["source","sh",subs="attributes,callouts"] @@ -35,15 +35,15 @@ it's publishing events successfully: [[open-file-handlers]] === Too many open file handlers? -Filebeat keeps the file handler open in case it reaches the end of a file so that it can read new log lines in near real time. If Filebeat is harvesting a large number of files, the number of open files can become an issue. In most environments, the number of files that are actively updated is low. The `close_inactive` configuration option should be set accordingly to close files that are no longer active. +{beatname_uc} keeps the file handler open in case it reaches the end of a file so that it can read new log lines in near real time. If {beatname_uc} is harvesting a large number of files, the number of open files can become an issue. In most environments, the number of files that are actively updated is low. The `close_inactive` configuration option should be set accordingly to close files that are no longer active. There are additional configuration options that you can use to close file handlers, but all of them should be used carefully because they can have side effects. The options are: -* <> -* <> -* <> -* <> -* <> +* <<{beatname_lc}-input-log-close-renamed,`close_renamed`>> +* <<{beatname_lc}-input-log-close-removed,`close_removed`>> +* <<{beatname_lc}-input-log-close-eof,`close_eof`>> +* <<{beatname_lc}-input-log-close-timeout,`close_timeout`>> +* <<{beatname_lc}-input-log-harvester-limit,`harvester_limit`>> The `close_renamed` and `close_removed` options can be useful on Windows to resolve issues related to file rotation. See <>. The `close_eof` option can be useful in environments with a large number of files that have only very few entries. The `close_timeout` option is useful in environments where closing file handlers is more important than sending all log lines. For more details, see <>. @@ -53,36 +53,36 @@ Make sure that you read the documentation for these configuration options before [[reduce-registry-size]] === Registry file is too large? -Filebeat keeps the state of each file and persists the state to disk in the `registry_file`. The file state is used to continue file reading at a previous position when Filebeat is restarted. If a large number of new files are produced every day, the registry file might grow to be too large. To reduce the size of the registry file, there are two configuration options available: <> and <>. +{beatname_uc} keeps the state of each file and persists the state to disk in the `registry_file`. The file state is used to continue file reading at a previous position when {beatname_uc} is restarted. If a large number of new files are produced every day, the registry file might grow to be too large. To reduce the size of the registry file, there are two configuration options available: <<{beatname_lc}-input-log-clean-removed,`clean_removed`>> and <<{beatname_lc}-input-log-clean-inactive,`clean_inactive`>>. -For old files that you no longer touch and are ignored (see <>), we recommended that you use `clean_inactive`. If old files get removed from disk, then use the `clean_removed` option. +For old files that you no longer touch and are ignored (see <<{beatname_lc}-input-log-ignore-older,`ignore_older`>>), we recommended that you use `clean_inactive`. If old files get removed from disk, then use the `clean_removed` option. [float] [[inode-reuse-issue]] -=== Inode reuse causes Filebeat to skip lines? +=== Inode reuse causes {beatname_uc} to skip lines? -On Linux file systems, Filebeat uses the inode and device to identify files. When a file is removed from disk, the inode may be assigned to a new file. In use cases involving file rotation, if an old file is removed and a new one is created immediately afterwards, the new file may have the exact same inode as the file that was removed. In this case, Filebeat assumes that the new file is the same as the old and tries to continue reading at the old position, which is not correct. +On Linux file systems, {beatname_uc} uses the inode and device to identify files. When a file is removed from disk, the inode may be assigned to a new file. In use cases involving file rotation, if an old file is removed and a new one is created immediately afterwards, the new file may have the exact same inode as the file that was removed. In this case, {beatname_uc} assumes that the new file is the same as the old and tries to continue reading at the old position, which is not correct. -By default states are never removed from the registry file. To resolve the inode reuse issue, we recommend that you use the <> options, especially <>, to remove the state of inactive files. For example, if your files get rotated every 24 hours, and the rotated files are not updated anymore, you can set <> to 48 hours and <> to 72 hours. +By default states are never removed from the registry file. To resolve the inode reuse issue, we recommend that you use the <<{beatname_lc}-input-log-clean-options,`clean_*`>> options, especially <<{beatname_lc}-input-log-clean-inactive,`clean_inactive`>>, to remove the state of inactive files. For example, if your files get rotated every 24 hours, and the rotated files are not updated anymore, you can set <<{beatname_lc}-input-log-ignore-older,`ignore_older`>> to 48 hours and <<{beatname_lc}-input-log-clean-inactive,`clean_inactive`>> to 72 hours. -You can use <> for files that are removed from disk. Be aware that `clean_removed` cleans the file state from the registry whenever a file cannot be found during a scan. If the file shows up again later, it will be sent again from scratch. +You can use <<{beatname_lc}-input-log-clean-removed,`clean_removed`>> for files that are removed from disk. Be aware that `clean_removed` cleans the file state from the registry whenever a file cannot be found during a scan. If the file shows up again later, it will be sent again from scratch. [float] [[windows-file-rotation]] === Open file handlers cause issues with Windows file rotation? -On Windows, you might have problems renaming or removing files because Filebeat keeps the file handlers open. This can lead to issues with the file rotating system. To avoid this issue, you can use the <> and <> options together. +On Windows, you might have problems renaming or removing files because {beatname_uc} keeps the file handlers open. This can lead to issues with the file rotating system. To avoid this issue, you can use the <<{beatname_lc}-input-log-close-removed,`close_removed`>> and <<{beatname_lc}-input-log-close-renamed,`close_renamed`>> options together. -IMPORTANT: When you configure these options, files may be closed before the harvester has finished reading the files. If the file cannot be picked up again by the prospector and the harvester hasn't finish reading the file, the missing lines will never be sent to the output. +IMPORTANT: When you configure these options, files may be closed before the harvester has finished reading the files. If the file cannot be picked up again by the input and the harvester hasn't finish reading the file, the missing lines will never be sent to the output. [float] [[filebeat-cpu]] -=== Filebeat is using too much CPU? +=== {beatname_uc} is using too much CPU? -Filebeat might be configured to scan for files too frequently. Check the setting for `scan_frequency` in the `filebeat.yml` -config file. Setting `scan_frequency` to less than 1s may cause Filebeat to scan the disk in a tight loop. +{beatname_uc} might be configured to scan for files too frequently. Check the setting for `scan_frequency` in the `filebeat.yml` +config file. Setting `scan_frequency` to less than 1s may cause {beatname_uc} to scan the disk in a tight loop. [float] [[dashboard-fields-incorrect-filebeat]] @@ -105,28 +105,29 @@ curl -XPOST 'http://localhost:9200/filebeat-2016.08.09/_refresh' [float] [[newline-character-required-eof]] -=== Filebeat isn't shipping the last line of a file? +=== {beatname_uc} isn't shipping the last line of a file? -Filebeat uses a newline character to detect the end of an event. If lines are added incrementally to a file that's being -harvested, a newline character is required after the last line, or Filebeat will not read the last line of +{beatname_uc} uses a newline character to detect the end of an event. If lines are added incrementally to a file that's being +harvested, a newline character is required after the last line, or {beatname_uc} will not read the last line of the file. [float] [[faq-deleted-files-are-not-freed]] -=== Filebeat keeps open file handlers of deleted files for a long time? +=== {beatname_uc} keeps open file handlers of deleted files for a long time? -In the default behaviour, Filebeat opens the files and keeps them open until it +In the default behaviour, {beatname_uc} opens the files and keeps them open until it reaches the end of them. In situations when the configured output is blocked (e.g. Elasticsearch or Logstash is unavailable) for a long time, this can cause -Filebeat to keep file handlers to files that were deleted from the file system -in the mean time. As long as Filebeat keeps the deleted files open, the +{beatname_uc} to keep file handlers to files that were deleted from the file system +in the mean time. As long as {beatname_uc} keeps the deleted files open, the operating system doesn't free up the space on disk, which can lead to increase disk utilisation or even out of disk situations. -To mitigate this issue, you can set the <> setting to `5m`. This -will ensure every file handler is closed once every 5 minutes, regardless of -whether it reached EOF or not. Note that this option can lead to data loss if the -file is deleted before Filebeat reaches the end of the file. +To mitigate this issue, you can set the +<<{beatname_lc}-input-log-close-timeout>> setting to `5m`. This will ensure +every file handler is closed once every 5 minutes, regardless of whether it +reached EOF or not. Note that this option can lead to data loss if the file is +deleted before {beatname_uc} reaches the end of the file. include::../../libbeat/docs/faq-limit-bandwidth.asciidoc[] diff --git a/vendor/github.com/elastic/beats/filebeat/docs/fields.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/fields.asciidoc index e50ad1e9..bd23ef70 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/fields.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/fields.asciidoc @@ -17,11 +17,14 @@ grouped in the following categories: * <> * <> * <> +* <> * <> +* <> * <> * <> * <> * <> +* <> * <> * <> * <> @@ -52,25 +55,29 @@ Contains fields for the Apache2 HTTPD access logs. -[float] -=== `apache2.access.remote_ip` - +*`apache2.access.remote_ip`*:: ++ +-- type: keyword Client IP address. -[float] -=== `apache2.access.user_name` +-- +*`apache2.access.user_name`*:: ++ +-- type: keyword The user name used when basic authentication is used. -[float] -=== `apache2.access.method` +-- +*`apache2.access.method`*:: ++ +-- type: keyword example: GET @@ -78,33 +85,41 @@ example: GET The request HTTP method. -[float] -=== `apache2.access.url` +-- +*`apache2.access.url`*:: ++ +-- type: keyword The request HTTP URL. -[float] -=== `apache2.access.http_version` +-- +*`apache2.access.http_version`*:: ++ +-- type: keyword The HTTP version. -[float] -=== `apache2.access.response_code` +-- +*`apache2.access.response_code`*:: ++ +-- type: long The HTTP response code. -[float] -=== `apache2.access.body_sent.bytes` +-- +*`apache2.access.body_sent.bytes`*:: ++ +-- type: long format: bytes @@ -112,22 +127,28 @@ format: bytes The number of bytes of the server response body. -[float] -=== `apache2.access.referrer` +-- +*`apache2.access.referrer`*:: ++ +-- type: keyword The HTTP referrer. -[float] -=== `apache2.access.agent` +-- +*`apache2.access.agent`*:: ++ +-- type: text Contains the un-parsed user agent string. Only present if the user agent Elasticsearch plugin is not available or not used. +-- + [float] == user_agent fields @@ -135,41 +156,49 @@ Contains the parsed User agent field. Only present if the user agent Elasticsear -[float] -=== `apache2.access.user_agent.device` - +*`apache2.access.user_agent.device`*:: ++ +-- type: keyword The name of the physical device. -[float] -=== `apache2.access.user_agent.major` +-- +*`apache2.access.user_agent.major`*:: ++ +-- type: long The major version of the user agent. -[float] -=== `apache2.access.user_agent.minor` +-- +*`apache2.access.user_agent.minor`*:: ++ +-- type: long The minor version of the user agent. -[float] -=== `apache2.access.user_agent.patch` +-- +*`apache2.access.user_agent.patch`*:: ++ +-- type: keyword The patch version of the user agent. -[float] -=== `apache2.access.user_agent.name` +-- +*`apache2.access.user_agent.name`*:: ++ +-- type: keyword example: Chrome @@ -177,38 +206,48 @@ example: Chrome The name of the user agent. -[float] -=== `apache2.access.user_agent.os` +-- +*`apache2.access.user_agent.os`*:: ++ +-- type: keyword The name of the operating system. -[float] -=== `apache2.access.user_agent.os_major` +-- +*`apache2.access.user_agent.os_major`*:: ++ +-- type: long The major version of the operating system. -[float] -=== `apache2.access.user_agent.os_minor` +-- +*`apache2.access.user_agent.os_minor`*:: ++ +-- type: long The minor version of the operating system. -[float] -=== `apache2.access.user_agent.os_name` +-- +*`apache2.access.user_agent.os_name`*:: ++ +-- type: keyword The name of the operating system. +-- + [float] == geoip fields @@ -216,46 +255,56 @@ Contains GeoIP information gathered based on the remote_ip field. Only present i -[float] -=== `apache2.access.geoip.continent_name` - +*`apache2.access.geoip.continent_name`*:: ++ +-- type: keyword The name of the continent. -[float] -=== `apache2.access.geoip.country_iso_code` +-- +*`apache2.access.geoip.country_iso_code`*:: ++ +-- type: keyword Country ISO code. -[float] -=== `apache2.access.geoip.location` +-- +*`apache2.access.geoip.location`*:: ++ +-- type: geo_point The longitude and latitude. -[float] -=== `apache2.access.geoip.region_name` +-- +*`apache2.access.geoip.region_name`*:: ++ +-- type: keyword The region name. -[float] -=== `apache2.access.geoip.city_name` +-- +*`apache2.access.geoip.city_name`*:: ++ +-- type: keyword The city name. +-- + [float] == error fields @@ -263,54 +312,66 @@ Fields from the Apache error logs. -[float] -=== `apache2.error.level` - +*`apache2.error.level`*:: ++ +-- type: keyword The severity level of the message. -[float] -=== `apache2.error.client` +-- +*`apache2.error.client`*:: ++ +-- type: keyword The IP address of the client that generated the error. -[float] -=== `apache2.error.message` +-- +*`apache2.error.message`*:: ++ +-- type: text The logged message. -[float] -=== `apache2.error.pid` +-- +*`apache2.error.pid`*:: ++ +-- type: long The process ID. -[float] -=== `apache2.error.tid` +-- +*`apache2.error.tid`*:: ++ +-- type: long The thread ID. -[float] -=== `apache2.error.module` +-- +*`apache2.error.module`*:: ++ +-- type: keyword The module producing the logged message. +-- + [[exported-fields-auditd]] == Auditd fields @@ -332,86 +393,112 @@ Fields from the Linux audit log. Not all fields are documented here because they -[float] -=== `auditd.log.record_type` - +*`auditd.log.record_type`*:: ++ +-- The audit event type. -[float] -=== `auditd.log.old_auid` +-- +*`auditd.log.old_auid`*:: ++ +-- For login events this is the old audit ID used for the user prior to this login. -[float] -=== `auditd.log.new_auid` +-- +*`auditd.log.new_auid`*:: ++ +-- For login events this is the new audit ID. The audit ID can be used to trace future events to the user even if their identity changes (like becoming root). -[float] -=== `auditd.log.old_ses` +-- +*`auditd.log.old_ses`*:: ++ +-- For login events this is the old session ID used for the user prior to this login. -[float] -=== `auditd.log.new_ses` +-- +*`auditd.log.new_ses`*:: ++ +-- For login events this is the new session ID. It can be used to tie a user to future events by session ID. -[float] -=== `auditd.log.sequence` +-- +*`auditd.log.sequence`*:: ++ +-- type: long The audit event sequence number. -[float] -=== `auditd.log.acct` +-- +*`auditd.log.acct`*:: ++ +-- The user account name associated with the event. -[float] -=== `auditd.log.pid` +-- +*`auditd.log.pid`*:: ++ +-- The ID of the process. -[float] -=== `auditd.log.ppid` +-- +*`auditd.log.ppid`*:: ++ +-- The ID of the process. -[float] -=== `auditd.log.items` +-- +*`auditd.log.items`*:: ++ +-- The number of items in an event. -[float] -=== `auditd.log.item` +-- +*`auditd.log.item`*:: ++ +-- The item field indicates which item out of the total number of items. This number is zero-based; a value of 0 means it is the first item. -[float] -=== `auditd.log.a0` +-- +*`auditd.log.a0`*:: ++ +-- The first argument to the system call. -[float] -=== `auditd.log.res` +-- +*`auditd.log.res`*:: ++ +-- The result of the system call (success or failure). +-- + [float] == geoip fields @@ -419,46 +506,56 @@ Contains GeoIP information gathered based on the `auditd.log.addr` field. Only p -[float] -=== `auditd.log.geoip.continent_name` - +*`auditd.log.geoip.continent_name`*:: ++ +-- type: keyword The name of the continent. -[float] -=== `auditd.log.geoip.city_name` +-- +*`auditd.log.geoip.city_name`*:: ++ +-- type: keyword The name of the city. -[float] -=== `auditd.log.geoip.region_name` +-- +*`auditd.log.geoip.region_name`*:: ++ +-- type: keyword The name of the region. -[float] -=== `auditd.log.geoip.country_iso_code` +-- +*`auditd.log.geoip.country_iso_code`*:: ++ +-- type: keyword Country ISO code. -[float] -=== `auditd.log.geoip.location` +-- +*`auditd.log.geoip.location`*:: ++ +-- type: geo_point The longitude and latitude. +-- + [[exported-fields-beat]] == Beat fields @@ -466,33 +563,41 @@ Contains common beat fields available in all event types. -[float] -=== `beat.name` - +*`beat.name`*:: ++ +-- The name of the Beat sending the log messages. If the Beat name is set in the configuration file, then that value is used. If it is not set, the hostname is used. To set the Beat name, use the `name` option in the configuration file. -[float] -=== `beat.hostname` +-- +*`beat.hostname`*:: ++ +-- The hostname as returned by the operating system on which the Beat is running. -[float] -=== `beat.timezone` +-- +*`beat.timezone`*:: ++ +-- The timezone as returned by the operating system on which the Beat is running. -[float] -=== `beat.version` +-- +*`beat.version`*:: ++ +-- The version of the beat that generated this event. -[float] -=== `@timestamp` +-- +*`@timestamp`*:: ++ +-- type: date example: August 26th 2016, 12:35:53.332 @@ -504,20 +609,26 @@ required: True The timestamp when the event log record was generated. -[float] -=== `tags` +-- +*`tags`*:: ++ +-- Arbitrary tags that can be set per Beat and per transaction type. -[float] -=== `fields` +-- +*`fields`*:: ++ +-- type: object Contains user configurable fields. +-- + [float] == error fields @@ -525,30 +636,36 @@ Error fields containing additional info in case of errors. -[float] -=== `error.message` - +*`error.message`*:: ++ +-- type: text Error message. -[float] -=== `error.code` +-- +*`error.code`*:: ++ +-- type: long Error code. -[float] -=== `error.type` +-- +*`error.type`*:: ++ +-- type: keyword Error type. +-- + [[exported-fields-cloud]] == Cloud provider metadata fields @@ -556,56 +673,70 @@ Metadata from cloud providers added by the add_cloud_metadata processor. -[float] -=== `meta.cloud.provider` - +*`meta.cloud.provider`*:: ++ +-- example: ec2 Name of the cloud provider. Possible values are ec2, gce, or digitalocean. -[float] -=== `meta.cloud.instance_id` +-- +*`meta.cloud.instance_id`*:: ++ +-- Instance ID of the host machine. -[float] -=== `meta.cloud.instance_name` +-- +*`meta.cloud.instance_name`*:: ++ +-- Instance name of the host machine. -[float] -=== `meta.cloud.machine_type` +-- +*`meta.cloud.machine_type`*:: ++ +-- example: t2.medium Machine type of the host machine. -[float] -=== `meta.cloud.availability_zone` +-- +*`meta.cloud.availability_zone`*:: ++ +-- example: us-east-1c Availability zone in which this host is running. -[float] -=== `meta.cloud.project_id` +-- +*`meta.cloud.project_id`*:: ++ +-- example: project-x Name of the project in Google Cloud. -[float] -=== `meta.cloud.region` +-- +*`meta.cloud.region`*:: ++ +-- Region in which this host is running. +-- + [[exported-fields-docker-processor]] == Docker fields @@ -614,144 +745,787 @@ Docker stats collected from Docker. -[float] -=== `docker.container.id` - +*`docker.container.id`*:: ++ +-- type: keyword Unique container id. -[float] -=== `docker.container.image` +-- +*`docker.container.image`*:: ++ +-- type: keyword Name of the image the container was built on. -[float] -=== `docker.container.name` +-- +*`docker.container.name`*:: ++ +-- type: keyword Container name. -[float] -=== `docker.container.labels` +-- +*`docker.container.labels`*:: ++ +-- type: object Image labels. -[[exported-fields-icinga]] -== Icinga fields - -Icinga Module - - +-- -[float] -== icinga fields +[[exported-fields-host-processor]] +== Host fields +Info collected for the host machine. -[float] -== debug fields -Contains fields for the Icinga debug logs. +*`host.name`*:: ++ +-- +type: keyword +Hostname. -[float] -=== `icinga.debug.facility` +-- +*`host.id`*:: ++ +-- type: keyword -Specifies what component of Icinga logged the message. +Unique host id. -[float] -=== `icinga.debug.severity` +-- +*`host.architecture`*:: ++ +-- type: keyword -Possible values are "debug", "notice", "information", "warning" or "critical". +Host architecture (e.g. x86_64, arm, ppc, mips). -[float] -=== `icinga.debug.message` +-- -type: text +*`host.os.platform`*:: ++ +-- +type: keyword -The logged message. +OS platform (e.g. centos, ubuntu, windows). -[float] -== main fields +-- -Contains fields for the Icinga main logs. +*`host.os.version`*:: ++ +-- +type: keyword +OS version. +-- + +*`host.os.family`*:: ++ +-- +type: keyword + +OS family (e.g. redhat, debian, freebsd, windows). + + +-- + +[[exported-fields-icinga]] +== Icinga fields + +Icinga Module + + + +[float] +== icinga fields + + + + +[float] +== debug fields + +Contains fields for the Icinga debug logs. + + + +*`icinga.debug.facility`*:: ++ +-- +type: keyword + +Specifies what component of Icinga logged the message. + + +-- + +*`icinga.debug.severity`*:: ++ +-- +type: keyword + +Possible values are "debug", "notice", "information", "warning" or "critical". + + +-- + +*`icinga.debug.message`*:: ++ +-- +type: text + +The logged message. + + +-- + [float] -=== `icinga.main.facility` +== main fields + +Contains fields for the Icinga main logs. + + + +*`icinga.main.facility`*:: ++ +-- +type: keyword + +Specifies what component of Icinga logged the message. + + +-- + +*`icinga.main.severity`*:: ++ +-- +type: keyword + +Possible values are "debug", "notice", "information", "warning" or "critical". + + +-- + +*`icinga.main.message`*:: ++ +-- +type: text + +The logged message. + + +-- + +[float] +== startup fields + +Contains fields for the Icinga startup logs. + + + +*`icinga.startup.facility`*:: ++ +-- +type: keyword + +Specifies what component of Icinga logged the message. + + +-- + +*`icinga.startup.severity`*:: ++ +-- +type: keyword + +Possible values are "debug", "notice", "information", "warning" or "critical". + + +-- + +*`icinga.startup.message`*:: ++ +-- +type: text + +The logged message. + + +-- + +[[exported-fields-iis]] +== IIS fields + +Module for parsing IIS log files. + + + +[float] +== iis fields + +Fields from IIS log files. + + + +[float] +== access fields + +Contains fields for IIS access logs. + + + +*`iis.access.server_ip`*:: ++ +-- +type: keyword + +The server IP address. + + +-- + +*`iis.access.method`*:: ++ +-- +type: keyword + +example: GET + +The request HTTP method. + + +-- + +*`iis.access.url`*:: ++ +-- +type: keyword + +The request HTTP URL. + + +-- + +*`iis.access.query_string`*:: ++ +-- +type: keyword + +The request query string, if any. + + +-- + +*`iis.access.port`*:: ++ +-- +type: long + +The request port number. + + +-- + +*`iis.access.user_name`*:: ++ +-- +type: keyword + +The user name used when basic authentication is used. + + +-- + +*`iis.access.remote_ip`*:: ++ +-- +type: keyword + +The client IP address. + + +-- + +*`iis.access.referrer`*:: ++ +-- +type: keyword + +The HTTP referrer. + + +-- + +*`iis.access.response_code`*:: ++ +-- +type: long + +The HTTP response code. + + +-- + +*`iis.access.sub_status`*:: ++ +-- +type: long + +The HTTP substatus code. + + +-- + +*`iis.access.win32_status`*:: ++ +-- +type: long + +The Windows status code. + + +-- + +*`iis.access.request_time_ms`*:: ++ +-- +type: long + +The request time in milliseconds. + + +-- + +*`iis.access.site_name`*:: ++ +-- +type: keyword + +The site name and instance number. + + +-- + +*`iis.access.server_name`*:: ++ +-- +type: keyword + +The name of the server on which the log file entry was generated. + + +-- + +*`iis.access.http_version`*:: ++ +-- +type: keyword + +The HTTP version. + + +-- + +*`iis.access.cookie`*:: ++ +-- +type: keyword + +The content of the cookie sent or received, if any. + + +-- + +*`iis.access.hostname`*:: ++ +-- +type: keyword + +The host header name, if any. + + +-- + +*`iis.access.body_sent.bytes`*:: ++ +-- +type: long + +format: bytes + +The number of bytes of the server response body. + + +-- + +*`iis.access.body_received.bytes`*:: ++ +-- +type: long + +format: bytes + +The number of bytes of the server request body. + + +-- + +*`iis.access.agent`*:: ++ +-- +type: text + +Contains the un-parsed user agent string. Only present if the user agent Elasticsearch plugin is not available or not used. + + +-- + +[float] +== user_agent fields + +Contains the parsed user agent field. Only present if the user agent Elasticsearch plugin is available and used. + + + +*`iis.access.user_agent.device`*:: ++ +-- +type: keyword + +The name of the physical device. + + +-- + +*`iis.access.user_agent.major`*:: ++ +-- +type: long + +The major version of the user agent. + + +-- + +*`iis.access.user_agent.minor`*:: ++ +-- +type: long + +The minor version of the user agent. + + +-- + +*`iis.access.user_agent.patch`*:: ++ +-- +type: keyword + +The patch version of the user agent. + + +-- + +*`iis.access.user_agent.name`*:: ++ +-- +type: keyword + +example: Chrome + +The name of the user agent. + + +-- + +*`iis.access.user_agent.os`*:: ++ +-- +type: keyword + +The name of the operating system. + + +-- + +*`iis.access.user_agent.os_major`*:: ++ +-- +type: long + +The major version of the operating system. + + +-- + +*`iis.access.user_agent.os_minor`*:: ++ +-- +type: long + +The minor version of the operating system. + + +-- + +*`iis.access.user_agent.os_name`*:: ++ +-- +type: keyword + +The name of the operating system. + + +-- + +[float] +== geoip fields + +Contains GeoIP information gathered based on the remote_ip field. Only present if the GeoIP Elasticsearch plugin is available and used. + + + +*`iis.access.geoip.continent_name`*:: ++ +-- +type: keyword + +The name of the continent. + + +-- + +*`iis.access.geoip.country_iso_code`*:: ++ +-- +type: keyword + +Country ISO code. + + +-- + +*`iis.access.geoip.location`*:: ++ +-- +type: geo_point + +The longitude and latitude. + + +-- + +*`iis.access.geoip.region_name`*:: ++ +-- +type: keyword + +The region name. + + +-- + +*`iis.access.geoip.city_name`*:: ++ +-- +type: keyword + +The city name. + + +-- + +[float] +== error fields + +Contains fields for IIS error logs. + + + +*`iis.error.remote_ip`*:: ++ +-- +type: keyword + +The client IP address. + + +-- + +*`iis.error.remote_port`*:: ++ +-- +type: long + +The client port number. + + +-- +*`iis.error.server_ip`*:: ++ +-- type: keyword -Specifies what component of Icinga logged the message. +The server IP address. -[float] -=== `icinga.main.severity` +-- + +*`iis.error.server_port`*:: ++ +-- +type: long + +The server port number. + +-- + +*`iis.error.http_version`*:: ++ +-- type: keyword -Possible values are "debug", "notice", "information", "warning" or "critical". +The HTTP version. -[float] -=== `icinga.main.message` +-- -type: text +*`iis.error.method`*:: ++ +-- +type: keyword -The logged message. +example: GET +The request HTTP method. -[float] -== startup fields -Contains fields for the Icinga startup logs. +-- + +*`iis.error.url`*:: ++ +-- +type: keyword +The request HTTP URL. -[float] -=== `icinga.startup.facility` +-- + +*`iis.error.response_code`*:: ++ +-- +type: long + +The HTTP response code. + + +-- +*`iis.error.reason_phrase`*:: ++ +-- type: keyword -Specifies what component of Icinga logged the message. +The HTTP reason phrase. -[float] -=== `icinga.startup.severity` +-- +*`iis.error.queue_name`*:: ++ +-- type: keyword -Possible values are "debug", "notice", "information", "warning" or "critical". +The IIS application pool name. +-- + [float] -=== `icinga.startup.message` +== geoip fields -type: text +Contains GeoIP information gathered based on the remote_ip field. Only present if the GeoIP Elasticsearch plugin is available and used. -The logged message. +*`iis.error.geoip.continent_name`*:: ++ +-- +type: keyword + +The name of the continent. + + +-- + +*`iis.error.geoip.country_iso_code`*:: ++ +-- +type: keyword + +Country ISO code. + + +-- + +*`iis.error.geoip.location`*:: ++ +-- +type: geo_point + +The longitude and latitude. + + +-- + +*`iis.error.geoip.region_name`*:: ++ +-- +type: keyword + +The region name. + + +-- + +*`iis.error.geoip.city_name`*:: ++ +-- +type: keyword + +The city name. + + +-- + [[exported-fields-kafka]] == Kafka fields @@ -772,44 +1546,54 @@ Kafka log lines. -[float] -=== `kafka.log.timestamp` - +*`kafka.log.timestamp`*:: ++ +-- The timestamp from the log line. -[float] -=== `kafka.log.level` +-- +*`kafka.log.level`*:: ++ +-- example: WARN The log level. -[float] -=== `kafka.log.message` +-- +*`kafka.log.message`*:: ++ +-- type: text The logged message. -[float] -=== `kafka.log.component` +-- +*`kafka.log.component`*:: ++ +-- type: keyword Component the log is coming from. -[float] -=== `kafka.log.class` +-- +*`kafka.log.class`*:: ++ +-- type: text Java class the log is coming from. +-- + [float] == trace fields @@ -817,30 +1601,36 @@ Trace in the log line. -[float] -=== `kafka.log.trace.class` - +*`kafka.log.trace.class`*:: ++ +-- type: keyword Java class the trace is coming from. -[float] -=== `kafka.log.trace.message` +-- +*`kafka.log.trace.message`*:: ++ +-- type: text Message part of the trace. -[float] -=== `kafka.log.trace.full` +-- +*`kafka.log.trace.full`*:: ++ +-- type: text The full trace in the log line. +-- + [[exported-fields-kubernetes-processor]] == Kubernetes fields @@ -849,62 +1639,76 @@ Kubernetes metadata added by the kubernetes processor -[float] -=== `kubernetes.pod.name` - +*`kubernetes.pod.name`*:: ++ +-- type: keyword Kubernetes pod name -[float] -=== `kubernetes.namespace` +-- +*`kubernetes.namespace`*:: ++ +-- type: keyword Kubernetes namespace -[float] -=== `kubernetes.node.name` +-- +*`kubernetes.node.name`*:: ++ +-- type: keyword Kubernetes node name -[float] -=== `kubernetes.labels` +-- +*`kubernetes.labels`*:: ++ +-- type: object Kubernetes labels map -[float] -=== `kubernetes.annotations` +-- +*`kubernetes.annotations`*:: ++ +-- type: object Kubernetes annotations map -[float] -=== `kubernetes.container.name` +-- +*`kubernetes.container.name`*:: ++ +-- type: keyword Kubernetes container name -[float] -=== `kubernetes.container.image` +-- +*`kubernetes.container.image`*:: ++ +-- type: keyword Kubernetes container image +-- + [[exported-fields-log]] == Log file content fields @@ -912,9 +1716,9 @@ Contains log file lines. -[float] -=== `source` - +*`source`*:: ++ +-- type: keyword required: True @@ -922,9 +1726,11 @@ required: True The file from which the line was read. This field contains the absolute path to the file. For example: `/var/log/system.log`. -[float] -=== `offset` +-- +*`offset`*:: ++ +-- type: long required: False @@ -932,9 +1738,11 @@ required: False The file offset the reported line starts at. -[float] -=== `message` +-- +*`message`*:: ++ +-- type: text required: True @@ -942,9 +1750,11 @@ required: True The content of the line read from the log file. -[float] -=== `stream` +-- +*`stream`*:: ++ +-- type: keyword required: False @@ -952,32 +1762,136 @@ required: False Log stream when reading container logs, can be 'stdout' or 'stderr' -[float] -=== `prospector.type` +-- +*`prospector.type`*:: ++ +-- required: True -The prospector type from which the event was generated. This field is set to the value specified for the `type` option in the prospector section of the Filebeat config file. +The input type from which the event was generated. This field is set to the value specified for the `type` option in the input section of the Filebeat config file. (DEPRECATED: see `input.type`) -[float] -=== `read_timestamp` +-- +*`input.type`*:: ++ +-- +required: True + +The input type from which the event was generated. This field is set to the value specified for the `type` option in the input section of the Filebeat config file. + + +-- + +*`read_timestamp`*:: ++ +-- In case the ingest pipeline parses the timestamp from the log contents, it stores the original `@timestamp` (representing the time when the log line was read) in this field. -[float] -=== `fileset.module` +-- +*`fileset.module`*:: ++ +-- The Filebeat module that generated this event. -[float] -=== `fileset.name` +-- +*`fileset.name`*:: ++ +-- The Filebeat fileset that generated this event. +-- + +*`syslog.facility`*:: ++ +-- +type: long + +required: False + +The facility extracted from the priority. + + +-- + +*`syslog.priority`*:: ++ +-- +type: long + +required: False + +The priority of the syslog event. + + +-- + +*`syslog.severity_label`*:: ++ +-- +type: keyword + +required: False + +The human readable severity. + + +-- + +*`syslog.facility_label`*:: ++ +-- +type: keyword + +required: False + +The human readable facility. + + +-- + +*`process.program`*:: ++ +-- +type: keyword + +required: False + +The name of the program. + + +-- + +*`process.pid`*:: ++ +-- +type: long + +required: False + +The pid of the process. + + +-- + +*`event.severity`*:: ++ +-- +type: long + +required: False + +The severity of the event. + + +-- + [[exported-fields-logstash]] == logstash fields @@ -998,46 +1912,56 @@ Fields from the Logstash logs. -[float] -=== `logstash.log.message` - +*`logstash.log.message`*:: ++ +-- type: text Contains the un-parsed log message -[float] -=== `logstash.log.level` +-- +*`logstash.log.level`*:: ++ +-- type: keyword The log level of the message, this correspond to Log4j levels. -[float] -=== `logstash.log.module` +-- +*`logstash.log.module`*:: ++ +-- type: keyword The module or class where the event originate. -[float] -=== `logstash.log.thread` +-- +*`logstash.log.thread`*:: ++ +-- type: text Information about the running thread where the log originate. -[float] -=== `logstash.log.log_event` +-- +*`logstash.log.log_event`*:: ++ +-- type: object key and value debugging information. +-- + [float] == slowlog fields @@ -1045,94 +1969,183 @@ slowlog -[float] -=== `logstash.slowlog.message` - +*`logstash.slowlog.message`*:: ++ +-- type: text Contains the un-parsed log message -[float] -=== `logstash.slowlog.level` +-- +*`logstash.slowlog.level`*:: ++ +-- type: keyword The log level of the message, this correspond to Log4j levels. -[float] -=== `logstash.slowlog.module` +-- +*`logstash.slowlog.module`*:: ++ +-- type: keyword The module or class where the event originate. -[float] -=== `logstash.slowlog.thread` +-- +*`logstash.slowlog.thread`*:: ++ +-- type: text Information about the running thread where the log originate. -[float] -=== `logstash.slowlog.event` +-- +*`logstash.slowlog.event`*:: ++ +-- type: text Raw dump of the original event -[float] -=== `logstash.slowlog.plugin_name` +-- +*`logstash.slowlog.plugin_name`*:: ++ +-- type: keyword Name of the plugin -[float] -=== `logstash.slowlog.plugin_type` +-- +*`logstash.slowlog.plugin_type`*:: ++ +-- type: keyword Type of the plugin: Inputs, Filters, Outputs or Codecs. -[float] -=== `logstash.slowlog.took_in_millis` +-- +*`logstash.slowlog.took_in_millis`*:: ++ +-- type: long Execution time for the plugin in milliseconds. -[float] -=== `logstash.slowlog.took_in_nanos` +-- +*`logstash.slowlog.took_in_nanos`*:: ++ +-- type: long Execution time for the plugin in nanoseconds. -[float] -=== `logstash.slowlog.plugin_params` +-- +*`logstash.slowlog.plugin_params`*:: ++ +-- type: text String value of the plugin configuration -[float] -=== `logstash.slowlog.plugin_params_object` +-- +*`logstash.slowlog.plugin_params_object`*:: ++ +-- type: object key -> value of the configuration used by the plugin. +-- + +[[exported-fields-mongodb]] +== mongodb fields + +Module for parsing MongoDB log files. + + + +[float] +== mongodb fields + +Fields from MongoDB logs. + + + +[float] +== log fields + +Contains fields from MongoDB logs. + + + +*`mongodb.log.severity`*:: ++ +-- +type: keyword + +example: I + +Severity level of message + + +-- + +*`mongodb.log.component`*:: ++ +-- +type: keyword + +example: COMMAND + +Functional categorization of message + + +-- + +*`mongodb.log.context`*:: ++ +-- +type: keyword + +example: initandlisten + +Context of message + + +-- + +*`mongodb.log.message`*:: ++ +-- +type: text + +The message in the log line. + + +-- + [[exported-fields-mysql]] == MySQL fields @@ -1154,35 +2167,43 @@ Contains fields from the MySQL error logs. -[float] -=== `mysql.error.timestamp` - +*`mysql.error.timestamp`*:: ++ +-- The timestamp from the log line. -[float] -=== `mysql.error.thread_id` +-- +*`mysql.error.thread_id`*:: ++ +-- type: long As of MySQL 5.7.2, this is the thread id. For MySQL versions prior to 5.7.2, this field contains the process id. -[float] -=== `mysql.error.level` +-- +*`mysql.error.level`*:: ++ +-- example: Warning The log level. -[float] -=== `mysql.error.message` +-- +*`mysql.error.message`*:: ++ +-- type: text The logged message. +-- + [float] == slowlog fields @@ -1190,78 +2211,98 @@ Contains fields from the MySQL slow logs. -[float] -=== `mysql.slowlog.user` - +*`mysql.slowlog.user`*:: ++ +-- The MySQL user that created the query. -[float] -=== `mysql.slowlog.host` +-- +*`mysql.slowlog.host`*:: ++ +-- The host from where the user that created the query logged in. -[float] -=== `mysql.slowlog.ip` +-- +*`mysql.slowlog.ip`*:: ++ +-- The IP address from where the user that created the query logged in. -[float] -=== `mysql.slowlog.query_time.sec` +-- +*`mysql.slowlog.query_time.sec`*:: ++ +-- type: float The total time the query took, in seconds, as a floating point number. -[float] -=== `mysql.slowlog.lock_time.sec` +-- +*`mysql.slowlog.lock_time.sec`*:: ++ +-- type: float The amount of time the query waited for the lock to be available. The value is in seconds, as a floating point number. -[float] -=== `mysql.slowlog.rows_sent` +-- +*`mysql.slowlog.rows_sent`*:: ++ +-- type: long The number of rows returned by the query. -[float] -=== `mysql.slowlog.rows_examined` +-- +*`mysql.slowlog.rows_examined`*:: ++ +-- type: long The number of rows scanned by the query. -[float] -=== `mysql.slowlog.timestamp` +-- +*`mysql.slowlog.timestamp`*:: ++ +-- type: long The unix timestamp taken from the `SET timestamp` query. -[float] -=== `mysql.slowlog.query` +-- +*`mysql.slowlog.query`*:: ++ +-- The slow query. -[float] -=== `mysql.slowlog.id` +-- +*`mysql.slowlog.id`*:: ++ +-- type: long The connection ID for the query. +-- + [[exported-fields-nginx]] == Nginx fields @@ -1283,33 +2324,39 @@ Contains fields for the Nginx access logs. -[float] -=== `nginx.access.remote_ip_list` - +*`nginx.access.remote_ip_list`*:: ++ +-- type: array An array of remote IP addresses. It is a list because it is common to include, besides the client IP address, IP addresses from headers like `X-Forwarded-For`. See also the `remote_ip` field. -[float] -=== `nginx.access.remote_ip` +-- +*`nginx.access.remote_ip`*:: ++ +-- type: keyword Client IP address. The first public IP address from the `remote_ip_list` array. If no public IP addresses are present, this field contains the first private IP address from the `remote_ip_list` array. -[float] -=== `nginx.access.user_name` +-- +*`nginx.access.user_name`*:: ++ +-- type: keyword The user name used when basic authentication is used. -[float] -=== `nginx.access.method` +-- +*`nginx.access.method`*:: ++ +-- type: keyword example: GET @@ -1317,33 +2364,41 @@ example: GET The request HTTP method. -[float] -=== `nginx.access.url` +-- +*`nginx.access.url`*:: ++ +-- type: keyword The request HTTP URL. -[float] -=== `nginx.access.http_version` +-- +*`nginx.access.http_version`*:: ++ +-- type: keyword The HTTP version. -[float] -=== `nginx.access.response_code` +-- +*`nginx.access.response_code`*:: ++ +-- type: long The HTTP response code. -[float] -=== `nginx.access.body_sent.bytes` +-- +*`nginx.access.body_sent.bytes`*:: ++ +-- type: long format: bytes @@ -1351,22 +2406,28 @@ format: bytes The number of bytes of the server response body. -[float] -=== `nginx.access.referrer` +-- +*`nginx.access.referrer`*:: ++ +-- type: keyword The HTTP referrer. -[float] -=== `nginx.access.agent` +-- +*`nginx.access.agent`*:: ++ +-- type: text Contains the un-parsed user agent string. Only present if the user agent Elasticsearch plugin is not available or not used. +-- + [float] == user_agent fields @@ -1374,41 +2435,49 @@ Contains the parsed User agent field. Only present if the user agent Elasticsear -[float] -=== `nginx.access.user_agent.device` - +*`nginx.access.user_agent.device`*:: ++ +-- type: keyword The name of the physical device. -[float] -=== `nginx.access.user_agent.major` +-- +*`nginx.access.user_agent.major`*:: ++ +-- type: long The major version of the user agent. -[float] -=== `nginx.access.user_agent.minor` +-- +*`nginx.access.user_agent.minor`*:: ++ +-- type: long The minor version of the user agent. -[float] -=== `nginx.access.user_agent.patch` +-- +*`nginx.access.user_agent.patch`*:: ++ +-- type: keyword The patch version of the user agent. -[float] -=== `nginx.access.user_agent.name` +-- +*`nginx.access.user_agent.name`*:: ++ +-- type: keyword example: Chrome @@ -1416,38 +2485,48 @@ example: Chrome The name of the user agent. -[float] -=== `nginx.access.user_agent.os` +-- +*`nginx.access.user_agent.os`*:: ++ +-- type: keyword The name of the operating system. -[float] -=== `nginx.access.user_agent.os_major` +-- +*`nginx.access.user_agent.os_major`*:: ++ +-- type: long The major version of the operating system. -[float] -=== `nginx.access.user_agent.os_minor` +-- +*`nginx.access.user_agent.os_minor`*:: ++ +-- type: long The minor version of the operating system. -[float] -=== `nginx.access.user_agent.os_name` +-- +*`nginx.access.user_agent.os_name`*:: ++ +-- type: keyword The name of the operating system. +-- + [float] == geoip fields @@ -1455,46 +2534,56 @@ Contains GeoIP information gathered based on the remote_ip field. Only present i -[float] -=== `nginx.access.geoip.continent_name` - +*`nginx.access.geoip.continent_name`*:: ++ +-- type: keyword The name of the continent. -[float] -=== `nginx.access.geoip.country_iso_code` +-- +*`nginx.access.geoip.country_iso_code`*:: ++ +-- type: keyword Country ISO code. -[float] -=== `nginx.access.geoip.location` +-- +*`nginx.access.geoip.location`*:: ++ +-- type: geo_point The longitude and latitude. -[float] -=== `nginx.access.geoip.region_name` +-- +*`nginx.access.geoip.region_name`*:: ++ +-- type: keyword The region name. -[float] -=== `nginx.access.geoip.city_name` +-- +*`nginx.access.geoip.city_name`*:: ++ +-- type: keyword The city name. +-- + [float] == error fields @@ -1502,46 +2591,56 @@ Contains fields for the Nginx error logs. -[float] -=== `nginx.error.level` - +*`nginx.error.level`*:: ++ +-- type: keyword Error level (e.g. error, critical). -[float] -=== `nginx.error.pid` +-- +*`nginx.error.pid`*:: ++ +-- type: long Process identifier (PID). -[float] -=== `nginx.error.tid` +-- +*`nginx.error.tid`*:: ++ +-- type: long Thread identifier. -[float] -=== `nginx.error.connection_id` +-- +*`nginx.error.connection_id`*:: ++ +-- type: long Connection identifier. -[float] -=== `nginx.error.message` +-- +*`nginx.error.message`*:: ++ +-- type: text The error message +-- + [[exported-fields-osquery]] == Osquery fields @@ -1562,44 +2661,54 @@ Common fields exported by the result metricset. -[float] -=== `osquery.result.name` - +*`osquery.result.name`*:: ++ +-- type: keyword The name of the query that generated this event. -[float] -=== `osquery.result.action` +-- +*`osquery.result.action`*:: ++ +-- type: keyword For incremental data, marks whether the entry was added or removed. It can be one of "added", "removed", or "snapshot". -[float] -=== `osquery.result.host_identifier` +-- +*`osquery.result.host_identifier`*:: ++ +-- type: keyword The identifier for the host on which the osquery agent is running. Normally the hostname. -[float] -=== `osquery.result.unix_time` +-- +*`osquery.result.unix_time`*:: ++ +-- type: long Unix timestamp of the event, in seconds since the epoch. Used for computing the `@timestamp` column. -[float] -=== `osquery.result.calendar_time` +-- +*`osquery.result.calendar_time`*:: ++ +-- String representation of the collection time, as formatted by osquery. +-- + [[exported-fields-postgresql]] == PostgreSQL fields @@ -1621,71 +2730,89 @@ Fields from the PostgreSQL log files. -[float] -=== `postgresql.log.timestamp` - +*`postgresql.log.timestamp`*:: ++ +-- The timestamp from the log line. -[float] -=== `postgresql.log.timezone` +-- +*`postgresql.log.timezone`*:: ++ +-- The timezone of timestamp. -[float] -=== `postgresql.log.thread_id` +-- +*`postgresql.log.thread_id`*:: ++ +-- type: long Process id -[float] -=== `postgresql.log.user` +-- +*`postgresql.log.user`*:: ++ +-- example: admin Name of user -[float] -=== `postgresql.log.database` +-- +*`postgresql.log.database`*:: ++ +-- example: mydb Name of database -[float] -=== `postgresql.log.level` +-- +*`postgresql.log.level`*:: ++ +-- example: FATAL The log level. -[float] -=== `postgresql.log.duration` +-- +*`postgresql.log.duration`*:: ++ +-- type: float example: 30.0 Duration of a query. -[float] -=== `postgresql.log.query` +-- +*`postgresql.log.query`*:: ++ +-- example: SELECT * FROM users; Query statement. -[float] -=== `postgresql.log.message` +-- +*`postgresql.log.message`*:: ++ +-- type: text The logged message. +-- + [[exported-fields-redis]] == Redis fields @@ -1706,38 +2833,46 @@ Redis log files -[float] -=== `redis.log.pid` - +*`redis.log.pid`*:: ++ +-- type: long The process ID of the Redis server. -[float] -=== `redis.log.role` +-- +*`redis.log.role`*:: ++ +-- type: keyword The role of the Redis instance. Can be one of `master`, `slave`, `child` (for RDF/AOF writing child), or `sentinel`. -[float] -=== `redis.log.level` +-- +*`redis.log.level`*:: ++ +-- type: keyword The log level. Can be one of `debug`, `verbose`, `notice`, or `warning`. -[float] -=== `redis.log.message` +-- +*`redis.log.message`*:: ++ +-- type: text The log message +-- + [float] == slowlog fields @@ -1745,46 +2880,56 @@ Slow logs are retrieved from Redis via a network connection. -[float] -=== `redis.slowlog.cmd` - +*`redis.slowlog.cmd`*:: ++ +-- type: keyword The command executed. -[float] -=== `redis.slowlog.duration.us` +-- +*`redis.slowlog.duration.us`*:: ++ +-- type: long How long it took to execute the command in microseconds. -[float] -=== `redis.slowlog.id` +-- +*`redis.slowlog.id`*:: ++ +-- type: long The ID of the query. -[float] -=== `redis.slowlog.key` +-- +*`redis.slowlog.key`*:: ++ +-- type: keyword The key on which the command was executed. -[float] -=== `redis.slowlog.args` +-- +*`redis.slowlog.args`*:: ++ +-- type: keyword The arguments with which the command was called. +-- + [[exported-fields-system]] == System fields @@ -1806,44 +2951,58 @@ Fields from the Linux authorization logs. -[float] -=== `system.auth.timestamp` - +*`system.auth.timestamp`*:: ++ +-- The timestamp as read from the auth message. -[float] -=== `system.auth.hostname` +-- +*`system.auth.hostname`*:: ++ +-- The hostname as read from the auth message. -[float] -=== `system.auth.program` +-- +*`system.auth.program`*:: ++ +-- The process name as read from the auth message. -[float] -=== `system.auth.pid` +-- +*`system.auth.pid`*:: ++ +-- type: long The PID of the process that sent the auth message. -[float] -=== `system.auth.message` +-- + +*`system.auth.message`*:: ++ +-- +type: text The message in the log line. -[float] -=== `system.auth.user` +-- +*`system.auth.user`*:: ++ +-- The Unix user that this event refers to. +-- + [float] == ssh fields @@ -1851,48 +3010,60 @@ Fields specific to SSH login events. -[float] -=== `system.auth.ssh.event` - +*`system.auth.ssh.event`*:: ++ +-- The SSH login event. Can be one of "Accepted", "Failed", or "Invalid". "Accepted" means a successful login. "Invalid" means that the user is not configured on the system. "Failed" means that the SSH login attempt has failed. -[float] -=== `system.auth.ssh.method` +-- +*`system.auth.ssh.method`*:: ++ +-- The SSH authentication method. Can be one of "password" or "publickey". -[float] -=== `system.auth.ssh.ip` +-- +*`system.auth.ssh.ip`*:: ++ +-- type: ip The client IP from where the login attempt was made. -[float] -=== `system.auth.ssh.dropped_ip` +-- +*`system.auth.ssh.dropped_ip`*:: ++ +-- type: ip The client IP from SSH connections that are open and immediately dropped. -[float] -=== `system.auth.ssh.port` +-- +*`system.auth.ssh.port`*:: ++ +-- type: long The client port from where the login attempt was made. -[float] -=== `system.auth.ssh.signature` +-- +*`system.auth.ssh.signature`*:: ++ +-- The signature of the client public key. +-- + [float] == geoip fields @@ -1900,46 +3071,56 @@ Contains GeoIP information gathered based on the `system.auth.ip` field. Only pr -[float] -=== `system.auth.ssh.geoip.continent_name` - +*`system.auth.ssh.geoip.continent_name`*:: ++ +-- type: keyword The name of the continent. -[float] -=== `system.auth.ssh.geoip.city_name` +-- +*`system.auth.ssh.geoip.city_name`*:: ++ +-- type: keyword The name of the city. -[float] -=== `system.auth.ssh.geoip.region_name` +-- +*`system.auth.ssh.geoip.region_name`*:: ++ +-- type: keyword The name of the region. -[float] -=== `system.auth.ssh.geoip.country_iso_code` +-- +*`system.auth.ssh.geoip.country_iso_code`*:: ++ +-- type: keyword Country ISO code. -[float] -=== `system.auth.ssh.geoip.location` +-- +*`system.auth.ssh.geoip.location`*:: ++ +-- type: geo_point The longitude and latitude. +-- + [float] == sudo fields @@ -1947,40 +3128,50 @@ Fields specific to events created by the `sudo` command. -[float] -=== `system.auth.sudo.error` - +*`system.auth.sudo.error`*:: ++ +-- example: user NOT in sudoers The error message in case the sudo command failed. -[float] -=== `system.auth.sudo.tty` +-- +*`system.auth.sudo.tty`*:: ++ +-- The TTY where the sudo command is executed. -[float] -=== `system.auth.sudo.pwd` +-- +*`system.auth.sudo.pwd`*:: ++ +-- The current directory where the sudo command is executed. -[float] -=== `system.auth.sudo.user` +-- +*`system.auth.sudo.user`*:: ++ +-- example: root The target user to which the sudo command is switching. -[float] -=== `system.auth.sudo.command` +-- +*`system.auth.sudo.command`*:: ++ +-- The command executed via sudo. +-- + [float] == useradd fields @@ -1988,36 +3179,46 @@ Fields specific to events created by the `useradd` command. -[float] -=== `system.auth.useradd.name` - +*`system.auth.useradd.name`*:: ++ +-- The user name being added. -[float] -=== `system.auth.useradd.uid` +-- +*`system.auth.useradd.uid`*:: ++ +-- type: long The user ID. -[float] -=== `system.auth.useradd.gid` +-- +*`system.auth.useradd.gid`*:: ++ +-- type: long The group ID. -[float] -=== `system.auth.useradd.home` +-- +*`system.auth.useradd.home`*:: ++ +-- The home folder for the new user. -[float] -=== `system.auth.useradd.shell` +-- +*`system.auth.useradd.shell`*:: ++ +-- The default shell for the new user. +-- + [float] == groupadd fields @@ -2025,20 +3226,24 @@ Fields specific to events created by the `groupadd` command. -[float] -=== `system.auth.groupadd.name` - +*`system.auth.groupadd.name`*:: ++ +-- The name of the new group. -[float] -=== `system.auth.groupadd.gid` +-- +*`system.auth.groupadd.gid`*:: ++ +-- type: long The ID of the new group. +-- + [float] == syslog fields @@ -2046,36 +3251,48 @@ Contains fields from the syslog system logs. -[float] -=== `system.syslog.timestamp` - +*`system.syslog.timestamp`*:: ++ +-- The timestamp as read from the syslog message. -[float] -=== `system.syslog.hostname` +-- +*`system.syslog.hostname`*:: ++ +-- The hostname as read from the syslog message. -[float] -=== `system.syslog.program` +-- +*`system.syslog.program`*:: ++ +-- The process name as read from the syslog message. -[float] -=== `system.syslog.pid` +-- +*`system.syslog.pid`*:: ++ +-- The PID of the process that sent the syslog message. -[float] -=== `system.syslog.message` +-- + +*`system.syslog.message`*:: ++ +-- +type: text The message in the log line. +-- + [[exported-fields-traefik]] == Traefik fields @@ -2097,25 +3314,29 @@ Contains fields for the Traefik access logs. -[float] -=== `traefik.access.remote_ip` - +*`traefik.access.remote_ip`*:: ++ +-- type: keyword Client IP address. -[float] -=== `traefik.access.user_name` +-- +*`traefik.access.user_name`*:: ++ +-- type: keyword The user name used when basic authentication is used. -[float] -=== `traefik.access.method` +-- +*`traefik.access.method`*:: ++ +-- type: keyword example: GET @@ -2123,33 +3344,41 @@ example: GET The request HTTP method. -[float] -=== `traefik.access.url` +-- +*`traefik.access.url`*:: ++ +-- type: keyword The request HTTP URL. -[float] -=== `traefik.access.http_version` +-- +*`traefik.access.http_version`*:: ++ +-- type: keyword The HTTP version. -[float] -=== `traefik.access.response_code` +-- +*`traefik.access.response_code`*:: ++ +-- type: long The HTTP response code. -[float] -=== `traefik.access.body_sent.bytes` +-- +*`traefik.access.body_sent.bytes`*:: ++ +-- type: long format: bytes @@ -2157,22 +3386,28 @@ format: bytes The number of bytes of the server response body. -[float] -=== `traefik.access.referrer` +-- +*`traefik.access.referrer`*:: ++ +-- type: keyword The HTTP referrer. -[float] -=== `traefik.access.agent` +-- +*`traefik.access.agent`*:: ++ +-- type: text Contains the un-parsed user agent string. Only present if the user agent Elasticsearch plugin is not available or not used. +-- + [float] == user_agent fields @@ -2180,41 +3415,49 @@ Contains the parsed User agent field. Only present if the user agent Elasticsear -[float] -=== `traefik.access.user_agent.device` - +*`traefik.access.user_agent.device`*:: ++ +-- type: keyword The name of the physical device. -[float] -=== `traefik.access.user_agent.major` +-- +*`traefik.access.user_agent.major`*:: ++ +-- type: long The major version of the user agent. -[float] -=== `traefik.access.user_agent.minor` +-- +*`traefik.access.user_agent.minor`*:: ++ +-- type: long The minor version of the user agent. -[float] -=== `traefik.access.user_agent.patch` +-- +*`traefik.access.user_agent.patch`*:: ++ +-- type: keyword The patch version of the user agent. -[float] -=== `traefik.access.user_agent.name` +-- +*`traefik.access.user_agent.name`*:: ++ +-- type: keyword example: Chrome @@ -2222,38 +3465,48 @@ example: Chrome The name of the user agent. -[float] -=== `traefik.access.user_agent.os` +-- +*`traefik.access.user_agent.os`*:: ++ +-- type: keyword The name of the operating system. -[float] -=== `traefik.access.user_agent.os_major` +-- +*`traefik.access.user_agent.os_major`*:: ++ +-- type: long The major version of the operating system. -[float] -=== `traefik.access.user_agent.os_minor` +-- +*`traefik.access.user_agent.os_minor`*:: ++ +-- type: long The minor version of the operating system. -[float] -=== `traefik.access.user_agent.os_name` +-- +*`traefik.access.user_agent.os_name`*:: ++ +-- type: keyword The name of the operating system. +-- + [float] == geoip fields @@ -2261,66 +3514,82 @@ Contains GeoIP information gathered based on the remote_ip field. Only present i -[float] -=== `traefik.access.geoip.continent_name` - +*`traefik.access.geoip.continent_name`*:: ++ +-- type: keyword The name of the continent. -[float] -=== `traefik.access.geoip.country_iso_code` +-- +*`traefik.access.geoip.country_iso_code`*:: ++ +-- type: keyword Country ISO code. -[float] -=== `traefik.access.geoip.location` +-- +*`traefik.access.geoip.location`*:: ++ +-- type: geo_point The longitude and latitude. -[float] -=== `traefik.access.geoip.region_name` +-- +*`traefik.access.geoip.region_name`*:: ++ +-- type: keyword The region name. -[float] -=== `traefik.access.geoip.city_name` +-- +*`traefik.access.geoip.city_name`*:: ++ +-- type: keyword The city name. -[float] -=== `traefik.access.request_count` +-- +*`traefik.access.request_count`*:: ++ +-- type: long The number of requests -[float] -=== `traefik.access.frontend_name` +-- +*`traefik.access.frontend_name`*:: ++ +-- type: text The name of the frontend used -[float] -=== `traefik.access.backend_url` +-- +*`traefik.access.backend_url`*:: ++ +-- type: text The url of the backend where request is forwarded +-- + diff --git a/vendor/github.com/elastic/beats/filebeat/docs/filebeat-filtering.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/filebeat-filtering.asciidoc index f2d1250f..1f584f0b 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/filebeat-filtering.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/filebeat-filtering.asciidoc @@ -1,22 +1,21 @@ [[filtering-and-enhancing-data]] == Filter and enhance the exported data -Your use case might require only a subset of the data exported by Filebeat, or -you might need to enhance the exported data (for example, by adding metadata). -Filebeat provides a couple of options for filtering and enhancing exported -data. - -You can configure each prospector to include or exclude specific lines or files. -This allows you to specify different filtering criteria for each prospector. -To do this, you use the <>, -<>, and <> -options under the `filebeat.prospectors` section of the config file (see -<>). The disadvantage of this approach is that -you need to implement a configuration option for each filtering criteria that -you need. +Your use case might require only a subset of the data exported by {beatname_uc}, +or you might need to enhance the exported data (for example, by adding +metadata). {beatname_uc} provides a couple of options for filtering and +enhancing exported data. + +You can configure each input to include or exclude specific lines or files. This +allows you to specify different filtering criteria for each input. To do this, +you use the `include_lines`, `exclude_lines`, and `exclude_files` options under +the +{beatname_lc}.inputs+ section of the config file (see +<>). The disadvantage of this approach is +that you need to implement a configuration option for each filtering criteria +that you need. Another approach (the one described here) is to define processors to configure -global processing across all data exported by Filebeat. +global processing across all data exported by {beatname_uc}. [float] @@ -55,7 +54,7 @@ processors: [[decode-json-example]] ==== Decode JSON example -In the following example, the fields exported by Filebeat include a +In the following example, the fields exported by {beatname_uc} include a field, `inner`, whose value is a JSON object encoded as a string: [source,json] @@ -65,10 +64,11 @@ field, `inner`, whose value is a JSON object encoded as a string: The following configuration decodes the inner JSON object: -[source,yaml] +["source","yaml",subs="attributes"] ----------------------------------------------------- -filebeat.prospectors: -- paths: +{beatname_lc}.inputs: +- type: log + paths: - input.json json.keys_under_root: true @@ -93,7 +93,7 @@ The resulting output looks something like this: "inner": { "data": "value" }, - "prospector": { + "input": { "type": "log", }, "offset": 55, diff --git a/vendor/github.com/elastic/beats/filebeat/docs/filebeat-general-options.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/filebeat-general-options.asciidoc index 80076d81..947eb27f 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/filebeat-general-options.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/filebeat-general-options.asciidoc @@ -32,13 +32,32 @@ It is not possible to use a symlink as registry file. NOTE: The registry file is only updated when new events are flushed and not on a predefined period. That means in case there are some states where the TTL expired, these are only removed when new event are processed. +[float] +==== `registry_file_permissions` + +The permissions mask to apply on registry file. The default value is 0600. The permissions option must be a valid Unix-style file permissions mask expressed in octal notation. In Go, numbers in octal notation must start with 0. + +This option is not supported on Windows. + +Examples: + + 0644: give read and write access to the file owner, and read access to all others. + 0600: give read and write access to the file owner, and no access to all others. + 0664: give read and write access to the file owner and members of the group associated with the file, as well as read access to all other users. + +[source,yaml] +------------------------------------------------------------------------------------- +filebeat.registry_file_permissions: 0600 +------------------------------------------------------------------------------------- [float] ==== `config_dir` -The full path to the directory that contains additional prospector configuration files. +deprecated[6.0.0, Use <> instead.] + +The full path to the directory that contains additional input configuration files. Each configuration file must end with `.yml`. Each config file must also specify the full Filebeat -config hierarchy even though only the prospector part of the file is processed. All global +config hierarchy even though only the `inputs` part of each file is processed. All global options, such as `registry_file`, are ignored. The `config_dir` option MUST point to a directory other than the directory where the main Filebeat config file resides. @@ -81,4 +100,3 @@ filebeat.shutdown_timeout: 5s ------------------------------------------------------------------------------------- include::../../libbeat/docs/generalconfig.asciidoc[] - diff --git a/vendor/github.com/elastic/beats/filebeat/docs/filebeat-modules-options.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/filebeat-modules-options.asciidoc index a45d6a91..8f254618 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/filebeat-modules-options.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/filebeat-modules-options.asciidoc @@ -1,16 +1,16 @@ -[[configuration-filebeat-modules]] +[id="configuration-{beatname_lc}-modules"] == Specify which modules to run -NOTE: Using Filebeat modules is optional. You may decide to -<> if you are using -a log file type that isn't supported, or you want to use a different setup. +NOTE: Using {beatname_uc} modules is optional. You may decide to +<> if you are using +a log type that isn't supported, or you want to use a different setup. -Filebeat <> provide a quick way for you to get started +{beatname_uc} <<{beatname_lc}-modules,modules>> provide a quick way for you to get started processing common log formats. They contain default configurations, Elasticsearch ingest node pipeline definitions, and Kibana dashboards to help you implement and deploy a log monitoring solution. -Filebeat provides a few different ways to enable modules. You can: +{beatname_uc} provides a few different ways to enable modules. You can: * <> * <> @@ -21,10 +21,10 @@ include::../../libbeat/docs/shared-note-file-permissions.asciidoc[] When you enable modules, you can also <> to change the default behavior of the modules, and you can specify -<> to override prospector settings. +<> to override input settings. -Before running Filebeat with modules enabled, make sure you also set up the -environment to use Kibana dashboards. See <> for +Before running {beatname_uc} with modules enabled, make sure you also set up the +environment to use Kibana dashboards. See <<{beatname_lc}-modules-quickstart>> for more information. [float] @@ -32,27 +32,27 @@ more information. === Enable module configs in the `modules.d` directory The `modules.d` directory contains default configurations for all the modules -available in Filebeat. You can enable or disable specific module configurations +available in {beatname_uc}. You can enable or disable specific module configurations under `modules.d` by running the <> commands. For example, to enable the `apache2` and `mysql` configs in the `modules.d` directory, you use: -[source,shell] +["source","sh",subs="attributes"] ---- -./filebeat modules enable apache2 mysql +./{beatname_lc} modules enable apache2 mysql ---- -Then when you run Filebeat, it loads the corresponding module configurations +Then when you run {beatname_uc}, it loads the corresponding module configurations specified in the `modules.d` directory (for example, `modules.d/apache2.yml` and `modules.d/mysql.yml`). To see a list of enabled and disabled modules, run: -[source,shell] +["source","sh",subs="attributes"] ---- -./filebeat modules list +./{beatname_lc} modules list ---- The default module configurations assume that the logs you’re harvesting are @@ -62,12 +62,12 @@ to specify variable settings. See <>. [float] [[enable-modules-cli]] -=== Enable modules when you run Filebeat +=== Enable modules when you run {beatname_uc} -To enable specific <> when you run Filebeat at the +To enable specific <<{beatname_lc}-modules,modules>> when you run {beatname_uc} at the command line, you can use the `--modules` flag. This approach works well when you're getting started and want to specify different modules and settings each -time you run Filebeat. Any modules specified at the command line will be loaded +time you run {beatname_uc}. Any modules specified at the command line will be loaded along with any modules that are enabled in the configuration file or `modules.d` directory. If there's a conflict, the configuration specified at the command line is used. @@ -75,9 +75,9 @@ line is used. The following example shows how to enable and run the `nginx`,`mysql`, and `system` modules. -[source,shell] +["source","sh",subs="attributes"] ---- -./filebeat -e --modules nginx,mysql,system +./{beatname_lc} -e --modules nginx,mysql,system ---- The default module configurations assume that the logs you’re harvesting are @@ -91,7 +91,7 @@ to specify variable settings. See <>. When possible, you should use the config files in the `modules.d` directory. -However, enabling <> directly in the config file is a +However, enabling <<{beatname_lc}-modules,modules>> directly in the config file is a practical approach if you have upgraded from a previous version of {beatname_uc} and don't want to move your module configs to the `modules.d` directory. You can continue to configure modules in the +{beatname_lc}.yml+ file, but you won't be @@ -105,9 +105,9 @@ a dash (-) and is followed by settings for that module. The following example shows a configuration that runs the `nginx`,`mysql`, and `system` modules. -[source,yaml] +["source","yaml",subs="attributes"] ---- -filebeat.modules: +{beatname_lc}.modules: - module: nginx - module: mysql - module: system @@ -136,9 +136,9 @@ path for `nginx` access log files: To set the path for Nginx access log files at the command line, you use the `-M` flag. For example: -[source,shell] +["source","shell",subs="attributes"] ---- -./filebeat -M "nginx.access.var.paths=[/var/log/nginx/access.log*]" +./{beatname_lc} -M "nginx.access.var.paths=[/var/log/nginx/access.log*]" ---- When you set variables at the command line, the variable name needs to include @@ -148,42 +148,43 @@ must start with `-M`. Here you see how to use the `-M` flag along with the `--modules` flag. This example shows how to set the paths to the access and error logs: -[source,shell] +["source","sh",subs="attributes"] ---- -./filebeat --modules nginx -M "nginx.access.var.paths=[/var/log/nginx/access.log*]" -M "nginx.error.var.paths=[/var/log/nginx/error.log*]" +./{beatname_lc} --modules nginx -M "nginx.access.var.paths=[/var/log/nginx/access.log*]" -M "nginx.error.var.paths=[/var/log/nginx/error.log*]" ---- For information about specific variables that you can set for each fileset, -see the <>. +see the <<{beatname_lc}-modules,documentation for the modules>>. [[advanced-settings]] === Advanced settings -Behind the scenes, each module starts a Filebeat prospector. Advanced users -can add or override any prospector settings. For example, you can set -<> to `true` in the module configuration: +Behind the scenes, each module starts a {beatname_uc} input. Advanced users +can add or override any input settings. For example, you can set +<<{beatname_lc}-input-log-close-eof,close_eof>> to `true` in the module +configuration: [source,yaml] ---------------------------------------------------------------------- - module: nginx access: - prospector: + input: close_eof: true ---------------------------------------------------------------------- Or at the command line like this: -[source,shell] +["source","sh",subs="attributes"] ---------------------------------------------------------------------- -./filebeat -M "nginx.access.prospector.close_eof=true" +./{beatname_lc} -M "nginx.access.input.close_eof=true" ---------------------------------------------------------------------- Here you see how to use the `-M` flag along with the `--modules` flag: -[source,shell] +["source","sh",subs="attributes"] ---------------------------------------------------------------------- -./filebeat --modules nginx -M "nginx.access.prospector.close_eof=true" +./{beatname_lc} --modules nginx -M "nginx.access.input.close_eof=true" ---------------------------------------------------------------------- @@ -191,15 +192,15 @@ You can use wildcards to change variables or settings for multiple modules/filesets at once. For example, the following command enables `close_eof` for all the filesets in the `nginx` module: -[source,shell] +["source","sh",subs="attributes"] ---------------------------------------------------------------------- -./filebeat -M "nginx.*.prospector.close_eof=true" +./{beatname_lc} -M "nginx.*.input.close_eof=true" ---------------------------------------------------------------------- -The following command enables `close_eof` for all prospectors created by any of +The following command enables `close_eof` for all inputs created by any of the modules: -[source,shell] +["source","sh",subs="attributes"] ---------------------------------------------------------------------- -./filebeat -M "*.*.prospector.close_eof=true" +./{beatname_lc} -M "*.*.input.close_eof=true" ---------------------------------------------------------------------- diff --git a/vendor/github.com/elastic/beats/filebeat/docs/filebeat-options.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/filebeat-options.asciidoc index e2483099..c2fcceae 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/filebeat-options.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/filebeat-options.asciidoc @@ -1,539 +1,67 @@ -[[configuration-filebeat-options]] -== Set up prospectors - -TIP: <> provide the fastest getting -started experience for common log formats. See <> -to learn how to get started with modules. Also see -<> for information about enabling and -configuring modules. - -Filebeat uses prospectors to locate and process files. To configure Filebeat, -you specify a list of prospectors in the `filebeat.prospectors` section of the -+{beatname_lc}.yml+ config file. - -Each item in the list begins with a dash (-) and specifies prospector-specific -configuration options, including the list of paths that are crawled to locate -the files. - -Here is a sample configuration: - -[source,yaml] -------------------------------------------------------------------------------------- -filebeat.prospectors: +[id="configuration-{beatname_lc}-options"] +== Configure inputs + +++++ +Configure inputs +++++ + +TIP: <<{beatname_lc}-modules-overview,{beatname_uc} modules>> provide the +fastest getting started experience for common log formats. See +<<{beatname_lc}-modules-quickstart>> to learn how to get started with modules. +Also see <> for information about enabling +and configuring modules. + +To configure {beatname_uc} manually (instead of using +<<{beatname_lc}-modules-overview,modules>>), you specify a list of inputs in the ++{beatname_lc}.inputs+ section of the +{beatname_lc}.yml+. Inputs specify how +{beatname_uc} locates and processes input data. + +The list is a http://yaml.org/[YAML] array, so each input begins with +a dash (`-`). You can specify multiple inputs, and you can specify the same +input type more than once. For example: + +["source","yaml",subs="attributes"] +---- +{beatname_lc}.inputs: - type: log paths: - - /var/log/apache/httpd-*.log - + - /var/log/system.log + - /var/log/wifi.log - type: log paths: - - /var/log/messages - - /var/log/*.log -------------------------------------------------------------------------------------- - -[float] -=== Configuration options - -[float] -==== `type` - -One of the following input types: - - * log: Reads every line of the log file (default). - * stdin: Reads the standard in. - * redis: Reads slow log entries from redis (experimental). - * udp: Reads events over UDP. Also see <>. - * docker: Reads logs from Docker. Also see <> (experimental). - -The value that you specify here is used as the `type` for each event published to Logstash and Elasticsearch. - -[float] -[[prospector-paths]] -==== `paths` - -A list of glob-based paths that should be crawled and fetched. All patterns -supported by https://golang.org/pkg/path/filepath/#Glob[Golang Glob] are also -supported here. For example, to fetch all files from a predefined level of -subdirectories, the following pattern can be used: `/var/log/*/*.log`. This -fetches all `.log` files from the subfolders of `/var/log`. It does not -fetch log files from the `/var/log` folder itself. -It is possible to recursively fetch all files in all subdirectories of a directory -using the optional <> settings. - -Filebeat starts a harvester for each file that it finds under the specified -paths. You can specify one path per line. Each line begins with a dash (-). - -[float] -[[recursive_glob]] -==== `recursive_glob.enabled` - -Enable expanding `**` into recursive glob patterns. With this feature enabled, -the rightmost `**` in each path is expanded into a fixed number of glob -patterns. For example: `/foo/**` expands to `/foo`, `/foo/*`, `/foo/*/*`, and so -on. If enabled it expands a single `**` into a 8-level deep `*` pattern. - -This feature is enabled by default, set to `recursive_glob.enabled` to false to -disable it. - -[float] -==== `encoding` - -The file encoding to use for reading files that contain international characters. -See the encoding names http://www.w3.org/TR/encoding/[recommended by the W3C for use in HTML5]. - -Here are some sample encodings from W3C recommendation: - - * plain, latin1, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, gbk, hz-gb-2312, - * euc-kr, euc-jp, iso-2022-jp, shift-jis, and so on - -The `plain` encoding is special, because it does not validate or transform any input. - -[float] -[[exclude-lines]] -==== `exclude_lines` - -A list of regular expressions to match the lines that you want Filebeat to exclude. Filebeat drops any lines that match a regular expression in the list. By default, no lines are dropped. - -If <> is also specified, each multiline message is combined into a single line before the lines are filtered by `exclude_lines`. - -The following example configures Filebeat to drop any lines that start with "DBG". - -[source,yaml] -------------------------------------------------------------------------------------- -filebeat.prospectors: -- paths: - - /var/log/myapp/*.log - exclude_lines: ['^DBG'] -------------------------------------------------------------------------------------- - -See <> for a list of supported regexp patterns. - -[float] -[[include-lines]] -==== `include_lines` - -A list of regular expressions to match the lines that you want Filebeat to include. Filebeat exports only the lines that match a regular expression in the list. By default, all lines are exported. - -If <> is also specified, each multiline message is combined into a single line before the lines are filtered by `include_lines`. - -The following example configures Filebeat to export any lines that start with "ERR" or "WARN": - -[source,yaml] -------------------------------------------------------------------------------------- -filebeat.prospectors: -- paths: - - /var/log/myapp/*.log - include_lines: ['^ERR', '^WARN'] -------------------------------------------------------------------------------------- - -NOTE: If both `include_lines` and `exclude_lines` are defined, Filebeat executes `include_lines` first and then executes `exclude_lines`. -The order in which the two options are defined doesn't matter. The `include_lines` option will always be executed -before the `exclude_lines` option, even if `exclude_lines` appears before `include_lines` in the config file. - -The following example exports all Apache log lines except the debugging messages (DBGs): - -[source,yaml] -------------------------------------------------------------------------------------- - include_lines: ['apache'] - exclude_lines: ['^DBG'] -------------------------------------------------------------------------------------- - -See <> for a list of supported regexp patterns. - -[float] -[[exclude-files]] -==== `exclude_files` - -A list of regular expressions to match the files that you want Filebeat to ignore. By default no files are excluded. - -The following example configures Filebeat to ignore all the files that have a `gz` extension: - -[source,yaml] -------------------------------------------------------------------------------------- - exclude_files: ['\.gz$'] -------------------------------------------------------------------------------------- - -See <> for a list of supported regexp patterns. - -[float] -==== `tags` - -A list of tags that the Beat includes in the `tags` field of each published -event. Tags make it easy to select specific events in Kibana or apply -conditional filtering in Logstash. These tags will be appended to the list of -tags specified in the general configuration. - -Example: - -[source,yaml] --------------------------------------------------------------------------------- -filebeat.prospectors: -- paths: ["/var/log/app/*.json"] - tags: ["json"] --------------------------------------------------------------------------------- - -[float] -[[configuration-fields]] -==== `fields` - -Optional fields that you can specify to add additional information to the -output. For example, you might add fields that you can use for filtering log -data. Fields can be scalar values, arrays, dictionaries, or any nested -combination of these. By default, the fields that you specify here will be -grouped under a `fields` sub-dictionary in the output document. To store the -custom fields as top-level fields, set the `fields_under_root` option to true. -If a duplicate field is declared in the general configuration, then its value -will be overwritten by the value declared here. - -[source,yaml] --------------------------------------------------------------------------------- -filebeat.prospectors: -- paths: ["/var/log/app/*.log"] + - "/var/log/apache2/*" fields: - app_id: query_engine_12 --------------------------------------------------------------------------------- - -[float] -[[fields-under-root]] -==== `fields_under_root` - -If this option is set to true, the custom <> are stored as -top-level fields in the output document instead of being grouped under a -`fields` sub-dictionary. If the custom field names conflict with other field -names added by Filebeat, then the custom fields overwrite the other fields. - -[float] -==== `processors` - -A list of processors to apply to the data generated by the prospector. - -See <> for information about specifying -processors in your config. - -[float] -[[ignore-older]] -==== `ignore_older` - -If this option is enabled, Filebeat ignores any files that were modified before the specified timespan. Configuring `ignore_older` can be especially useful if you keep log files for a long time. For example, if you want to start Filebeat, but only want to send the newest files and files from last week, you can configure this option. - -You can use time strings like 2h (2 hours) and 5m (5 minutes). The default is 0, which disables the setting. -Commenting out the config has the same effect as setting it to 0. - -IMPORTANT: You must set `ignore_older` to be greater than `close_inactive`. - -The files affected by this setting fall into two categories: - -* Files that were never harvested -* Files that were harvested but weren't updated for longer than `ignore_older` - -For files which were never seen before, the offset state is set to the end of the file. If a state already exist, the offset is not changed. In case a file is updated again later, reading continues at the set offset position. - -The `ignore_older` setting relies on the modification time of the file to determine if a file is ignored. If the modification time of the file is not updated when lines are written to a file (which can happen on Windows), the `ignore_older` setting may cause Filebeat to ignore files even though content was added at a later time. - -To remove the state of previously harvested files from the registry file, use the `clean_inactive` configuration option. - -Before a file can be ignored by the prospector, it must be closed. To ensure a file is no longer being harvested when it is ignored, you must set `ignore_older` to a longer duration than `close_inactive`. - -If a file that's currently being harvested falls under `ignore_older`, the harvester will first finish reading the file and close it after `close_inactive` is reached. Then, after that, the file will be ignored. - -[float] -[[close-options]] -==== `close_*` - -The `close_*` configuration options are used to close the harvester after a certain criteria or time. Closing the harvester means closing the file handler. If a file is updated after the harvester is closed, the file will be picked up again after `scan_frequency` has elapsed. However, if the file is moved or deleted while the harvester is closed, Filebeat will not be able to pick up the file again, and any data that the harvester hasn't read will be lost. - - -[float] -[[close-inactive]] -==== `close_inactive` - -When this option is enabled, Filebeat closes the file handle if a file has not been harvested for the specified duration. The counter for the defined period starts when the last log line was read by the harvester. It is not based on the modification time of the file. If the closed file changes again, a new harvester is started and the latest changes will be picked up after `scan_frequency` has elapsed. - -We recommended that you set `close_inactive` to a value that is larger than the least frequent updates to your log files. For example, if your log files get updated every few seconds, you can safely set `close_inactive` to `1m`. If there are log files with very different update rates, you can use multiple prospector configurations with different values. - -Setting `close_inactive` to a lower value means that file handles are closed sooner. However this has the side effect that new log lines are not sent in near real time if the harvester is closed. - -The timestamp for closing a file does not depend on the modification time of the file. Instead, Filebeat uses an internal timestamp that reflects when the file was last harvested. For example, if `close_inactive` is set to 5 minutes, the countdown for the 5 minutes starts after the harvester reads the last line of the file. - -You can use time strings like 2h (2 hours) and 5m (5 minutes). The default is 5m. - -[float] -[[close-renamed]] -==== `close_renamed` - -WARNING: Only use this option if you understand that data loss is a potential side effect. - -When this option is enabled, Filebeat closes the file handler when a file is renamed. This happens, for example, when rotating files. By default, the harvester stays open and keeps reading the file because the file handler does not depend on the file name. If the `close_renamed` option is enabled and the file is renamed or moved in such a way that it's no longer matched by the file patterns specified for the prospector, the file will not be picked up again. Filebeat will not finish reading the file. - -WINDOWS: If your Windows log rotation system shows errors because it can't rotate the files, you should enable this option. - -[float] -[[close-removed]] -==== `close_removed` - -When this option is enabled, Filebeat closes the harvester when a file is removed. Normally a file should only be removed after it's inactive for the duration specified by `close_inactive`. However, if a file is removed early and you don't enable `close_removed`, Filebeat keeps the file open to make sure the harvester has completed. If this setting results in files that are not completely read because they are removed from disk too early, disable this option. - -This option is enabled by default. If you disable this option, you must also disable `clean_removed`. - -WINDOWS: If your Windows log rotation system shows errors because it can't rotate files, make sure this option is enabled. + apache: true + fields_under_root: true +---- -[float] -[[close-eof]] -==== `close_eof` - -WARNING: Only use this option if you understand that data loss is a potential side effect. - -When this option is enabled, Filebeat closes a file as soon as the end of a file is reached. This is useful when your files are only written once and not updated from time to time. For example, this happens when you are writing every single log event to a new file. This option is disabled by default. - -[float] -[[close-timeout]] -==== `close_timeout` - -WARNING: Only use this option if you understand that data loss is a potential side effect. Another side effect is that multiline events might not be completely sent before the timeout expires. - -When this option is enabled, Filebeat gives every harvester a predefined lifetime. Regardless of where the reader is in the file, reading will stop after the `close_timeout` period has elapsed. This option can be useful for older log files when you want to spend only a predefined amount of time on the files. While `close_timeout` will close the file after the predefined timeout, if the file is still being updated, the prospector will start a new harvester again per the defined `scan_frequency`. And the close_timeout for this harvester will start again with the countdown for the timeout. - -This option is particularly useful in case the output is blocked, which makes Filebeat keep open file handlers even for files that were deleted from the disk. Setting `close_timeout` to `5m` ensures that the files are periodically closed so they can be freed up by the operating system. - -If you set `close_timeout` to equal `ignore_older`, the file will not be picked up if it's modified while the harvester is closed. This combination of settings normally leads to data loss, and the complete file is not sent. - -When you use `close_timeout` for logs that contain multiline events, the harvester might stop in the middle of a multiline event, which means that only parts of the event will be sent. If the harvester is started again and the file still exists, only the second part of the event will be sent. - -This option is set to 0 by default which means it is disabled. - - -[float] -[[clean-options]] -==== `clean_*` - -The `clean_*` options are used to clean up the state entries in the registry file. These settings help to reduce the size of the registry file and can prevent a potential <>. - -[float] -[[clean-inactive]] -==== `clean_inactive` - -WARNING: Only use this option if you understand that data loss is a potential side effect. - -When this option is enabled, Filebeat removes the state of a file after the specified period of inactivity has elapsed. The state can only be removed if the file is already ignored by Filebeat (the file is older than `ignore_older`). The `clean_inactive` setting must be greater than `ignore_older + scan_frequency` to make sure that no states are removed while a file is still being harvested. Otherwise, the setting could result in Filebeat resending the full content constantly because `clean_inactive` removes state for files that are still detected by the prospector. If a file is updated or appears again, the file is read from the beginning. - -The `clean_inactive` configuration option is useful to reduce the size of the registry file, especially if a large amount of new files are generated every day. - -This config option is also useful to prevent Filebeat problems resulting from inode reuse on Linux. For more information, see <>. - -NOTE: Every time a file is renamed, the file state is updated and the counter for `clean_inactive` starts at 0 again. - -[float] -[[clean-removed]] -==== `clean_removed` - -When this option is enabled, Filebeat cleans files from the registry if they cannot be found on disk anymore under the last known name. This means also files which were renamed after the harvester was finished will be removed. This option is enabled by default. - -If a shared drive disappears for a short period and appears again, all files will be read again from the beginning because the states were removed from the registry file. In such cases, we recommend that you disable the `clean_removed` option. - -You must disable this option if you also disable `close_removed`. - -[float] -[[scan-frequency]] -==== `scan_frequency` - -How often the prospector checks for new files in the paths that are specified -for harvesting. For example, if you specify a glob like `/var/log/*`, the -directory is scanned for files using the frequency specified by -`scan_frequency`. Specify 1s to scan the directory as frequently as possible -without causing Filebeat to scan too frequently. We do not recommend to set this value `<1s`. - -If you require log lines to be sent in near real time do not use a very low `scan_frequency` but adjust `close_inactive` so the file handler stays open and constantly polls your files. - -The default setting is 10s. - - -[float] -[[scan-sort]] -==== `scan.sort` - -experimental[] - -If you specify a value other than the empty string for this setting you can determine whether to use ascending or descending order using `scan.order`. Possible values are `modtime` and `filename`. To sort by file modification time, use `modtime`, otherwise use `filename`. Leave this option empty to disable it. - -If you specify a value for this setting, you can use `scan.order` to configure whether files are scanned in ascending or descending order. - -The default setting is disabled. - -[float] -[[scan-order]] -==== `scan.order` - -experimental[] - -Specifies whether to use ascending or descending order when `scan.sort` is set to a value other than none. Possible values are `asc` or `desc`. - -The default setting is `asc`. - -[float] -==== `harvester_buffer_size` - -The size in bytes of the buffer that each harvester uses when fetching a file. The default is 16384. - -[float] -==== `max_bytes` - -The maximum number of bytes that a single log message can have. All bytes after `max_bytes` are discarded and not sent. -This setting is especially useful for multiline log messages, which can get large. The default is 10MB (10485760). [float] -[[config-json]] -==== `json` -These options make it possible for Filebeat to decode logs structured as JSON messages. Filebeat -processes the logs line by line, so the JSON decoding only works if there is one JSON object per -line. +[id="{beatname_lc}-input-types"] +=== Input types -The decoding happens before line filtering and multiline. You can combine JSON decoding with filtering -and multiline if you set the `message_key` option. This can be helpful in situations where the application -logs are wrapped in JSON objects, like it happens for example with Docker. +You can configure {beatname_uc} to use the following inputs: -Example configuration: - -[source,yaml] -------------------------------------------------------------------------------------- -json.keys_under_root: true -json.add_error_key: true -json.message_key: log -------------------------------------------------------------------------------------- - -You must specify at least one of the following settings to enable JSON parsing -mode: - -*`keys_under_root`*:: By default, the decoded JSON is placed under a "json" key in the output document. -If you enable this setting, the keys are copied top level in the output document. The default is false. - -*`overwrite_keys`*:: If `keys_under_root` and this setting are enabled, then the values from the decoded -JSON object overwrite the fields that Filebeat normally adds (type, source, offset, etc.) in case of conflicts. - -*`add_error_key`*:: If this setting is enabled, Filebeat adds a "error.message" and "error.type: json" key in case of JSON -unmarshalling errors or when a `message_key` is defined in the configuration but cannot be used. - -*`message_key`*:: An optional configuration setting that specifies a JSON key on -which to apply the line filtering and multiline settings. If specified the -key must be at the top level in the JSON object and the value associated with -the key must be a string, otherwise no filtering or multiline aggregation will -occur. - -[float] -==== `multiline` - -Options that control how Filebeat deals with log messages that span multiple lines. See <> for more information about configuring multiline options. - -[float] -==== `tail_files` - -If this option is set to true, Filebeat starts reading new files at the end of each file instead of the beginning. When this option is used in combination with log rotation, it's possible that the first log entries in a new file might be skipped. The default setting is false. - -This option applies to files that Filebeat has not already processed. If you ran Filebeat previously and the state of the file was already persisted, `tail_files` will not apply. Harvesting will continue at the previous offset. To apply `tail_files` to all files, you must stop Filebeat and remove the registry file. Be aware that doing this removes ALL previous states. - -NOTE: You can use this setting to avoid indexing old log lines when you run Filebeat on a set of log files for the first time. After the first run, we recommend disabling this option, or you risk losing lines during file rotation. - -[float] -==== `pipeline` - -The Ingest Node pipeline ID to set for the events generated by this prospector. - -NOTE: The pipeline ID can also be configured in the Elasticsearch output, but this - option usually results in simpler configuration files. If the pipeline is configured both - in the prospector and in the output, the option from the prospector is the one used. - -[float] -==== `symlinks` - -The `symlinks` option allows Filebeat to harvest symlinks in addition to regular files. When harvesting symlinks, Filebeat opens and reads the original file even though it reports the path of the symlink. - -When you configure a symlink for harvesting, make sure the original path is excluded. If a single prospector is configured to harvest both the symlink and the original file, the prospector will detect the problem and only process the first file it finds. However, if two different prospectors are configured (one to read the symlink and the other the original path), both paths will be harvested, causing Filebeat to send duplicate data and the prospectors to overwrite each other's state. - -The `symlinks` option can be useful if symlinks to the log files have additional metadata in the file name, and you want to process the metadata in Logstash. This is, for example, the case for Kubernetes log files. - -Because this option may lead to data loss, it is disabled by default. - -[float] -==== `backoff` - -The backoff options specify how aggressively Filebeat crawls open files for updates. -You can use the default values in most cases. - -The `backoff` option defines how long Filebeat -waits before checking a file again after EOF is reached. The default is 1s, which means -the file is checked every second if new lines were added. This enables near real-time crawling. Every time a new line appears in the file, the `backoff` value is reset to the initial -value. The default is 1s. - -[float] -==== `max_backoff` - -The maximum time for Filebeat to wait before checking a file again after EOF is -reached. After having backed off multiple times from checking the file, the wait time -will never exceed `max_backoff` regardless of what is specified for `backoff_factor`. -Because it takes a maximum of 10s to read a new line, specifying 10s for `max_backoff` means that, at the worst, a new line could be added to the log file if Filebeat has -backed off multiple times. The default is 10s. - -Requirement: max_backoff should always be set to `max_backoff <= scan_frequency`. In case `max_backoff` should be bigger, it is recommended to close the file handler instead let the prospector pick up the file again. - -[float] -==== `backoff_factor` - -This option specifies how fast the waiting time is increased. The bigger the -backoff factor, the faster the `max_backoff` value is reached. The backoff factor -increments exponentially. The minimum value allowed is 1. If this value is set to 1, -the backoff algorithm is disabled, and the `backoff` value is used for waiting for new -lines. The `backoff` value will be multiplied each time with the `backoff_factor` until -`max_backoff` is reached. The default is 2. - -[float] -[[harvester-limit]] -==== `harvester_limit` - -The `harvester_limit` option limits the number of harvesters that are started in parallel for one prospector. This directly relates -to the maximum number of file handlers that are opened. The default for `harvester_limit` is 0, which means there is no limit. This -configuration is useful if the number of files to be harvested exceeds the open file handler limit of the operating system. - -Setting a limit on the number of harvesters means that potentially not all files are opened in parallel. Therefore we recommended that you use -this option in combination with the `close_*` options to make sure harvesters are stopped more often so that new files can be -picked up. - -Currently if a new harvester can be started again, the harvester is picked randomly. This means it's possible that the harvester for a file that was just closed and then updated again might be started instead of the harvester for a file that hasn't been harvested for a longer period of time. - -This configuration option applies per prospector. You can use this option to indirectly set higher priorities on certain prospectors -by assigning a higher limit of harvesters. - -[float] -==== `enabled` - -The `enabled` option can be used with each prospector to define if a prospector is enabled or not. By default, enabled is set to true. - -[float] -[[max-message-size]] -==== `max_message_size` - -When used with `type: udp`, specifies the maximum size of the message received over UDP. The default is 10240. - -[float] -[[config-containers]] -==== `containers` +* <<{beatname_lc}-input-log>> +* <<{beatname_lc}-input-stdin>> +* <<{beatname_lc}-input-redis>> +* <<{beatname_lc}-input-udp>> +* <<{beatname_lc}-input-docker>> +* <<{beatname_lc}-input-tcp>> +* <<{beatname_lc}-input-syslog>> -experimental[] -These options are only available when using `docker` prospector type. They allow to configure the list of containers to read logs from. -Docker prospector will search for container logs under its path, and parse them into common message lines, extracting timestamps too. -Everything happens before line filtering, multiline and JSON decoding, so it can be used in combination with them. +include::inputs/input-log.asciidoc[] -Example configuration: +include::inputs/input-stdin.asciidoc[] -[source,yaml] -------------------------------------------------------------------------------------- -containers.ids: - - '8b6fe7dc9e067b58476dc57d6986dd96d7100430c5de3b109a99cd56ac655347' -------------------------------------------------------------------------------------- +include::inputs/input-redis.asciidoc[] -When using `docker` prospector type you must define `containers.ids`, these are all available settings: +include::inputs/input-udp.asciidoc[] -*`ids`*:: Required, the list of Docker container IDs to read logs from, `'*'` can be used as ID to read from all containers. +include::inputs/input-docker.asciidoc[] -*`path`*:: Base path where Docker logs are located. The default is `/var/lib/docker/containers`. +include::inputs/input-tcp.asciidoc[] -*`stream`*:: Only read the given stream, this can be: `all`, `stdout` or `stderr`. The default is `all`. +include::inputs/input-syslog.asciidoc[] diff --git a/vendor/github.com/elastic/beats/filebeat/docs/getting-started.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/getting-started.asciidoc index c1748e4b..68677d2f 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/getting-started.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/getting-started.asciidoc @@ -1,15 +1,7 @@ [[filebeat-getting-started]] == Getting Started With Filebeat -To get started with your own Filebeat setup, install and configure these related products: - - * Elasticsearch for storage and indexing the data. - * Kibana for the UI. - * Logstash (optional) for inserting data into Elasticsearch. - -See {libbeat}/getting-started.html[Getting Started with Beats and the Elastic Stack] for more information. - -After installing the Elastic Stack, read the following topics to learn how to install, configure, and run Filebeat: +include::../../libbeat/docs/shared-getting-started-intro.asciidoc[] * <> * <> @@ -24,9 +16,6 @@ After installing the Elastic Stack, read the following topics to learn how to in [[filebeat-installation]] === Step 1: Install Filebeat -Before running Filebeat, you need to install and configure the Elastic stack. See -{libbeat}/getting-started.html[Getting Started with Beats and the Elastic Stack]. - include::../../libbeat/docs/shared-download-and-install.asciidoc[] [[deb]] @@ -122,7 +111,7 @@ https://www.elastic.co/downloads/beats/filebeat[downloads page]. . Rename the `filebeat--windows` directory to `Filebeat`. -. Open a PowerShell prompt as an Administrator (right-click the PowerShell icon and select *Run As Administrator*). If you are running Windows XP, you may need to download and install PowerShell. +. Open a PowerShell prompt as an Administrator (right-click the PowerShell icon and select *Run As Administrator*). . From the PowerShell prompt, run the following commands to install Filebeat as a Windows service: @@ -154,7 +143,7 @@ default values for most configuration options. [source,yaml] ------------------------------------------------------------------------------------- -filebeat.prospectors: +filebeat.inputs: - type: log enabled: true paths: @@ -166,18 +155,18 @@ To configure Filebeat: . Define the path (or paths) to your log files. + -For the most basic Filebeat configuration, you can define a single prospector with a single path. For example: +For the most basic Filebeat configuration, you can define a single input with a single path. For example: + [source,yaml] ------------------------------------------------------------------------------------- -filebeat.prospectors: +filebeat.inputs: - type: log enabled: true paths: - /var/log/*.log ------------------------------------------------------------------------------------- + -The prospector in this example harvests all files in the path `/var/log/*.log`, which means +The input in this example harvests all files in the path `/var/log/*.log`, which means that Filebeat will harvest all files in the directory `/var/log/` that end with `.log`. All patterns supported by https://golang.org/pkg/path/filepath/#Glob[Golang Glob] are also supported here. + diff --git a/vendor/github.com/elastic/beats/filebeat/docs/how-filebeat-works.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/how-filebeat-works.asciidoc index 21a6673d..6cd53431 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/how-filebeat-works.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/how-filebeat-works.asciidoc @@ -1,81 +1,81 @@ -[[how-filebeat-works]] -== How Filebeat works +[id="how-{beatname_lc}-works"] +== How {beatname_uc} works -In this topic, you learn about the key building blocks of Filebeat and how they work together. Understanding these concepts will help you make informed decisions about configuring Filebeat for specific use cases. +//TODO: Make this topic more generic and move harvester-specific content to +//the topic about the log input -Filebeat consists of two main components: <> and <>. These components work together to tail files and send event data to the output that you specify. +In this topic, you learn about the key building blocks of {beatname_uc} and how they work together. Understanding these concepts will help you make informed decisions about configuring {beatname_uc} for specific use cases. + +{beatname_uc} consists of two main components: <> and <>. These components work together to tail files and send event data to the output that you specify. [float] [[harvester]] === What is a harvester? -A harvester is responsible for reading the content of a single file. The harvester reads each file, line by line, and sends the content to the output. One harvester is started for each file. The harvester is responsible for opening and closing the file, which means that the file descriptor remains open while the harvester is running. If a file is removed or renamed while it's being harvested, Filebeat continues to read the file. This has the side effect that the space on your disk is reserved until the harvester closes. By default, Filebeat keeps the file open until <> is reached. +A harvester is responsible for reading the content of a single file. The harvester reads each file, line by line, and sends the content to the output. One harvester is started for each file. The harvester is responsible for opening and closing the file, which means that the file descriptor remains open while the harvester is running. If a file is removed or renamed while it's being harvested, {beatname_uc} continues to read the file. This has the side effect that the space on your disk is reserved until the harvester closes. By default, {beatname_uc} keeps the file open until <<{beatname_lc}-input-log-close-inactive,`close_inactive`>> is reached. Closing a harvester has the following consequences: * The file handler is closed, freeing up the underlying resources if the file was deleted while the harvester was still reading the file. -* The harvesting of the file will only be started again after <> has elapsed. +* The harvesting of the file will only be started again after <<{beatname_lc}-input-log-scan-frequency,`scan_frequency`>> has elapsed. * If the file is moved or removed while the harvester is closed, harvesting of the file will not continue. -To control when a harvester is closed, use the <> configuration options. +To control when a harvester is closed, use the <<{beatname_lc}-input-log-close-options,`close_*`>> configuration options. [float] -[[prospector]] -=== What is a prospector? +[[input]] +=== What is an input? -A prospector is responsible for managing the harvesters and finding all sources to read from. +An input is responsible for managing the harvesters and finding all sources to read from. -If the input type is `log`, the prospector finds all files on the drive that match the defined glob paths and starts a harvester for each file. Each prospector runs in its own Go routine. +If the input type is `log`, the input finds all files on the drive that match the defined glob paths and starts a harvester for each file. Each input runs in its own Go routine. -The following example configures Filebeat to harvest lines from all log files that match the specified glob patterns: +The following example configures {beatname_uc} to harvest lines from all log files that match the specified glob patterns: -[source,yaml] +["source","yaml",subs="attributes"] ------------------------------------------------------------------------------------- -filebeat.prospectors: +{beatname_lc}.inputs: - type: log paths: - /var/log/*.log - /var/path2/*.log ------------------------------------------------------------------------------------- -Filebeat currently supports two `prospector` types: `log` and `stdin`. Each prospector type can be defined multiple times. The `log` prospector checks each file to see whether a harvester needs to be started, whether one is already running, or whether the file can be ignored (see <>). New lines are only picked up if the size of the file has changed since the harvester was closed. - -NOTE: Filebeat prospectors can only read local files. There is no functionality to connect to remote hosts to read stored files or logs. +{beatname_uc} currently supports <<{beatname_lc}-input-types,several `input` types>>. Each input type can be defined multiple times. The `log` input checks each file to see whether a harvester needs to be started, whether one is already running, or whether the file can be ignored (see <<{beatname_lc}-input-log-ignore-older,`ignore_older`>>). New lines are only picked up if the size of the file has changed since the harvester was closed. [float] -=== How does Filebeat keep the state of files? +=== How does {beatname_uc} keep the state of files? -Filebeat keeps the state of each file and frequently flushes the state to disk in the registry file. The state is used to remember the last offset a harvester was reading from and to ensure all log lines are sent. If the output, such as Elasticsearch or Logstash, is not reachable, Filebeat keeps track of the last lines sent and will continue reading the files as soon as the output becomes available again. While Filebeat is running, the state information is also kept in memory by each prospector. When Filebeat is restarted, data from the registry file is used to rebuild the state, and Filebeat continues each harvester at the last known position. +{beatname_uc} keeps the state of each file and frequently flushes the state to disk in the registry file. The state is used to remember the last offset a harvester was reading from and to ensure all log lines are sent. If the output, such as Elasticsearch or Logstash, is not reachable, {beatname_uc} keeps track of the last lines sent and will continue reading the files as soon as the output becomes available again. While {beatname_uc} is running, the state information is also kept in memory for each input. When {beatname_uc} is restarted, data from the registry file is used to rebuild the state, and {beatname_uc} continues each harvester at the last known position. -Each prospector keeps a state for each file it finds. Because files can be renamed or moved, the filename and path are not enough to identify a file. For each file, Filebeat stores unique identifiers to detect whether a file was harvested previously. +For each input, {beatname_uc} keeps a state of each file it finds. Because files can be renamed or moved, the filename and path are not enough to identify a file. For each file, {beatname_uc} stores unique identifiers to detect whether a file was harvested previously. If your use case involves creating a large number of new files every day, you might find that the registry file grows to be too large. See <> for details about configuration options that you can set to resolve this issue. [float] [[at-least-once-delivery]] -=== How does Filebeat ensure at-least-once delivery? +=== How does {beatname_uc} ensure at-least-once delivery? -Filebeat guarantees that events will be delivered to the configured output at -least once and with no data loss. Filebeat is able to achieve this behavior +{beatname_uc} guarantees that events will be delivered to the configured output at +least once and with no data loss. {beatname_uc} is able to achieve this behavior because it stores the delivery state of each event in the registry file. In situations where the defined output is blocked and has not confirmed all -events, Filebeat will keep trying to send events until the output acknowledges +events, {beatname_uc} will keep trying to send events until the output acknowledges that it has received the events. -If Filebeat shuts down while it's in the process of sending events, it does not +If {beatname_uc} shuts down while it's in the process of sending events, it does not wait for the output to acknowledge all events before shutting down. Any events -that are sent to the output, but not acknowledged before Filebeat shuts down, -are sent again when Filebeat is restarted. This ensures that each event is sent +that are sent to the output, but not acknowledged before {beatname_uc} shuts down, +are sent again when {beatname_uc} is restarted. This ensures that each event is sent at least once, but you can end up with duplicate events being sent to the -output. You can configure Filebeat to wait a specific amount of time before +output. You can configure {beatname_uc} to wait a specific amount of time before shutting down by setting the <> option. -NOTE: There is a limitation to Filebeat's at-least-once delivery guarantee +NOTE: There is a limitation to {beatname_uc}'s at-least-once delivery guarantee involving log rotation and the deletion of old files. If log files are written -to disk and rotated faster than they can be processed by Filebeat, or if files +to disk and rotated faster than they can be processed by {beatname_uc}, or if files are deleted while the output is unavailable, data might be lost. On Linux, it's -also possible for Filebeat to skip lines as the result of inode reuse. See +also possible for {beatname_uc} to skip lines as the result of inode reuse. See <> for more details about the inode reuse issue. - diff --git a/vendor/github.com/elastic/beats/filebeat/docs/images/filebeat-mongodb-overview.png b/vendor/github.com/elastic/beats/filebeat/docs/images/filebeat-mongodb-overview.png new file mode 100644 index 00000000..c77c37d5 Binary files /dev/null and b/vendor/github.com/elastic/beats/filebeat/docs/images/filebeat-mongodb-overview.png differ diff --git a/vendor/github.com/elastic/beats/filebeat/docs/images/filebeat.png b/vendor/github.com/elastic/beats/filebeat/docs/images/filebeat.png index 5e937e70..4463aead 100644 Binary files a/vendor/github.com/elastic/beats/filebeat/docs/images/filebeat.png and b/vendor/github.com/elastic/beats/filebeat/docs/images/filebeat.png differ diff --git a/vendor/github.com/elastic/beats/filebeat/docs/images/kibana-iis.png b/vendor/github.com/elastic/beats/filebeat/docs/images/kibana-iis.png new file mode 100644 index 00000000..eec61a94 Binary files /dev/null and b/vendor/github.com/elastic/beats/filebeat/docs/images/kibana-iis.png differ diff --git a/vendor/github.com/elastic/beats/filebeat/docs/index.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/index.asciidoc index 8ba8c2a1..3839beee 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/index.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/index.asciidoc @@ -2,19 +2,21 @@ include::../../libbeat/docs/version.asciidoc[] -include::{asciidoc-dir}/../../shared/attributes62.asciidoc[] +include::{asciidoc-dir}/../../shared/attributes.asciidoc[] :version: {stack-version} :beatname_lc: filebeat :beatname_uc: Filebeat :beatname_pkg: {beatname_lc} +:github_repo_name: beats +:discuss_forum: beats/{beatname_lc} +:beat_default_index_prefix: {beatname_lc} +:has_ml_jobs: yes include::../../libbeat/docs/shared-beats-attributes.asciidoc[] include::./overview.asciidoc[] -include::../../libbeat/docs/contributing-to-beats.asciidoc[] - include::./getting-started.asciidoc[] include::./modules-getting-started.asciidoc[] @@ -44,3 +46,6 @@ include::./troubleshooting.asciidoc[] include::./faq.asciidoc[] include::./migration.asciidoc[] + +include::../../libbeat/docs/contributing-to-beats.asciidoc[] + diff --git a/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-common-file-options.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-common-file-options.asciidoc new file mode 100644 index 00000000..bebdbfca --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-common-file-options.asciidoc @@ -0,0 +1,387 @@ +////////////////////////////////////////////////////////////////////////// +//// This content is shared by Filebeat inputs that use the prospector +//// to process files on disk (includes options for managing physical files) +//// If you add IDs to sections, make sure you use attributes to create +//// unique IDs for each input that includes this file. Use the format: +//// [id="{beatname_lc}-input-{type}-option-name"] +////////////////////////////////////////////////////////////////////////// + +[float] +[id="{beatname_lc}-input-{type}-exclude-files"] +===== `exclude_files` + +A list of regular expressions to match the files that you want {beatname_uc} to +ignore. By default no files are excluded. + +The following example configures {beatname_uc} to ignore all the files that have +a `gz` extension: + +["source","yaml",subs="attributes"] +---- +{beatname_lc}.inputs: +- type: {type} + ... + exclude_files: ['\.gz$'] +---- + +See <> for a list of supported regexp patterns. + +[float] +[id="{beatname_lc}-input-{type}-ignore-older"] +===== `ignore_older` + +If this option is enabled, {beatname_uc} ignores any files that were modified +before the specified timespan. Configuring `ignore_older` can be especially +useful if you keep log files for a long time. For example, if you want to start +{beatname_uc}, but only want to send the newest files and files from last week, +you can configure this option. + +You can use time strings like 2h (2 hours) and 5m (5 minutes). The default is 0, +which disables the setting. Commenting out the config has the same effect as +setting it to 0. + +IMPORTANT: You must set `ignore_older` to be greater than `close_inactive`. + +The files affected by this setting fall into two categories: + +* Files that were never harvested +* Files that were harvested but weren't updated for longer than `ignore_older` + +For files which were never seen before, the offset state is set to the end of +the file. If a state already exist, the offset is not changed. In case a file is +updated again later, reading continues at the set offset position. + +The `ignore_older` setting relies on the modification time of the file to +determine if a file is ignored. If the modification time of the file is not +updated when lines are written to a file (which can happen on Windows), the +`ignore_older` setting may cause {beatname_uc} to ignore files even though +content was added at a later time. + +To remove the state of previously harvested files from the registry file, use +the `clean_inactive` configuration option. + +Before a file can be ignored by {beatname_uc}, the file must be closed. To +ensure a file is no longer being harvested when it is ignored, you must set +`ignore_older` to a longer duration than `close_inactive`. + +If a file that's currently being harvested falls under `ignore_older`, the +harvester will first finish reading the file and close it after `close_inactive` +is reached. Then, after that, the file will be ignored. + +[float] +[id="{beatname_lc}-input-{type}-close-options"] +===== `close_*` + +The `close_*` configuration options are used to close the harvester after a +certain criteria or time. Closing the harvester means closing the file handler. +If a file is updated after the harvester is closed, the file will be picked up +again after `scan_frequency` has elapsed. However, if the file is moved or +deleted while the harvester is closed, {beatname_uc} will not be able to pick up +the file again, and any data that the harvester hasn't read will be lost. + + +[float] +[id="{beatname_lc}-input-{type}-close-inactive"] +===== `close_inactive` + +When this option is enabled, {beatname_uc} closes the file handle if a file has +not been harvested for the specified duration. The counter for the defined +period starts when the last log line was read by the harvester. It is not based +on the modification time of the file. If the closed file changes again, a new +harvester is started and the latest changes will be picked up after +`scan_frequency` has elapsed. + +We recommended that you set `close_inactive` to a value that is larger than the +least frequent updates to your log files. For example, if your log files get +updated every few seconds, you can safely set `close_inactive` to `1m`. If there +are log files with very different update rates, you can use multiple +configurations with different values. + +Setting `close_inactive` to a lower value means that file handles are closed +sooner. However this has the side effect that new log lines are not sent in near +real time if the harvester is closed. + +The timestamp for closing a file does not depend on the modification time of the +file. Instead, {beatname_uc} uses an internal timestamp that reflects when the +file was last harvested. For example, if `close_inactive` is set to 5 minutes, +the countdown for the 5 minutes starts after the harvester reads the last line +of the file. + +You can use time strings like 2h (2 hours) and 5m (5 minutes). The default is +5m. + +[float] +[id="{beatname_lc}-input-{type}-close-renamed"] +===== `close_renamed` + +WARNING: Only use this option if you understand that data loss is a potential +side effect. + +When this option is enabled, {beatname_uc} closes the file handler when a file +is renamed. This happens, for example, when rotating files. By default, the +harvester stays open and keeps reading the file because the file handler does +not depend on the file name. If the `close_renamed` option is enabled and the +file is renamed or moved in such a way that it's no longer matched by the file +patterns specified for the , the file will not be picked up again. +{beatname_uc} will not finish reading the file. + +WINDOWS: If your Windows log rotation system shows errors because it can't +rotate the files, you should enable this option. + +[float] +[id="{beatname_lc}-input-{type}-close-removed"] +===== `close_removed` + +When this option is enabled, {beatname_uc} closes the harvester when a file is +removed. Normally a file should only be removed after it's inactive for the +duration specified by `close_inactive`. However, if a file is removed early and +you don't enable `close_removed`, {beatname_uc} keeps the file open to make sure +the harvester has completed. If this setting results in files that are not +completely read because they are removed from disk too early, disable this +option. + +This option is enabled by default. If you disable this option, you must also +disable `clean_removed`. + +WINDOWS: If your Windows log rotation system shows errors because it can't +rotate files, make sure this option is enabled. + +[float] +[id="{beatname_lc}-input-{type}-close-eof"] +===== `close_eof` + +WARNING: Only use this option if you understand that data loss is a potential +side effect. + +When this option is enabled, {beatname_uc} closes a file as soon as the end of a +file is reached. This is useful when your files are only written once and not +updated from time to time. For example, this happens when you are writing every +single log event to a new file. This option is disabled by default. + +[float] +[id="{beatname_lc}-input-{type}-close-timeout"] +===== `close_timeout` + +WARNING: Only use this option if you understand that data loss is a potential +side effect. Another side effect is that multiline events might not be +completely sent before the timeout expires. + +When this option is enabled, {beatname_uc} gives every harvester a predefined +lifetime. Regardless of where the reader is in the file, reading will stop after +the `close_timeout` period has elapsed. This option can be useful for older log +files when you want to spend only a predefined amount of time on the files. +While `close_timeout` will close the file after the predefined timeout, if the +file is still being updated, {beatname_uc} will start a new harvester again per +the defined `scan_frequency`. And the close_timeout for this harvester will +start again with the countdown for the timeout. + +This option is particularly useful in case the output is blocked, which makes +{beatname_uc} keep open file handlers even for files that were deleted from the +disk. Setting `close_timeout` to `5m` ensures that the files are periodically +closed so they can be freed up by the operating system. + +If you set `close_timeout` to equal `ignore_older`, the file will not be picked +up if it's modified while the harvester is closed. This combination of settings +normally leads to data loss, and the complete file is not sent. + +When you use `close_timeout` for logs that contain multiline events, the +harvester might stop in the middle of a multiline event, which means that only +parts of the event will be sent. If the harvester is started again and the file +still exists, only the second part of the event will be sent. + +This option is set to 0 by default which means it is disabled. + + +[float] +[id="{beatname_lc}-input-{type}-clean-options"] +===== `clean_*` + +The `clean_*` options are used to clean up the state entries in the registry +file. These settings help to reduce the size of the registry file and can +prevent a potential <>. + +[float] +[id="{beatname_lc}-input-{type}-clean-inactive"] +===== `clean_inactive` + +WARNING: Only use this option if you understand that data loss is a potential +side effect. + +When this option is enabled, {beatname_uc} removes the state of a file after the +specified period of inactivity has elapsed. The state can only be removed if +the file is already ignored by {beatname_uc} (the file is older than +`ignore_older`). The `clean_inactive` setting must be greater than `ignore_older + +scan_frequency` to make sure that no states are removed while a file is still +being harvested. Otherwise, the setting could result in {beatname_uc} resending +the full content constantly because `clean_inactive` removes state for files +that are still detected by {beatname_uc}. If a file is updated or appears +again, the file is read from the beginning. + +The `clean_inactive` configuration option is useful to reduce the size of the +registry file, especially if a large amount of new files are generated every +day. + +This config option is also useful to prevent {beatname_uc} problems resulting +from inode reuse on Linux. For more information, see <>. + +NOTE: Every time a file is renamed, the file state is updated and the counter +for `clean_inactive` starts at 0 again. + +[float] +[id="{beatname_lc}-input-{type}-clean-removed"] +===== `clean_removed` + +When this option is enabled, {beatname_uc} cleans files from the registry if +they cannot be found on disk anymore under the last known name. This means also +files which were renamed after the harvester was finished will be removed. This +option is enabled by default. + +If a shared drive disappears for a short period and appears again, all files +will be read again from the beginning because the states were removed from the +registry file. In such cases, we recommend that you disable the `clean_removed` +option. + +You must disable this option if you also disable `close_removed`. + +[float] +[id="{beatname_lc}-input-{type}-scan-frequency"] +===== `scan_frequency` + +How often {beatname_uc} checks for new files in the paths that are specified +for harvesting. For example, if you specify a glob like `/var/log/*`, the +directory is scanned for files using the frequency specified by +`scan_frequency`. Specify 1s to scan the directory as frequently as possible +without causing {beatname_uc} to scan too frequently. We do not recommend to set +this value `<1s`. + +If you require log lines to be sent in near real time do not use a very low +`scan_frequency` but adjust `close_inactive` so the file handler stays open and +constantly polls your files. + +The default setting is 10s. + +[float] +[id="{beatname_lc}-input-{type}-scan-sort"] +===== `scan.sort` + +experimental[] + +If you specify a value other than the empty string for this setting you can +determine whether to use ascending or descending order using `scan.order`. +Possible values are `modtime` and `filename`. To sort by file modification time, +use `modtime`, otherwise use `filename`. Leave this option empty to disable it. + +If you specify a value for this setting, you can use `scan.order` to configure +whether files are scanned in ascending or descending order. + +The default setting is disabled. + +[float] +[id="{beatname_lc}-input-{type}-scan-order"] +===== `scan.order` + +experimental[] + +Specifies whether to use ascending or descending order when `scan.sort` is set to a value other than none. Possible values are `asc` or `desc`. + +The default setting is `asc`. + +[float] +===== `tail_files` + +If this option is set to true, {beatname_uc} starts reading new files at the end +of each file instead of the beginning. When this option is used in combination +with log rotation, it's possible that the first log entries in a new file might +be skipped. The default setting is false. + +This option applies to files that {beatname_uc} has not already processed. If +you ran {beatname_uc} previously and the state of the file was already +persisted, `tail_files` will not apply. Harvesting will continue at the previous +offset. To apply `tail_files` to all files, you must stop {beatname_uc} and +remove the registry file. Be aware that doing this removes ALL previous states. + +NOTE: You can use this setting to avoid indexing old log lines when you run +{beatname_uc} on a set of log files for the first time. After the first run, we +recommend disabling this option, or you risk losing lines during file rotation. + +[float] +===== `symlinks` + +The `symlinks` option allows {beatname_uc} to harvest symlinks in addition to +regular files. When harvesting symlinks, {beatname_uc} opens and reads the +original file even though it reports the path of the symlink. + +When you configure a symlink for harvesting, make sure the original path is +excluded. If a single input is configured to harvest both the symlink and +the original file, {beatname_uc} will detect the problem and only process the +first file it finds. However, if two different inputs are configured (one +to read the symlink and the other the original path), both paths will be +harvested, causing {beatname_uc} to send duplicate data and the inputs to +overwrite each other's state. + +The `symlinks` option can be useful if symlinks to the log files have additional +metadata in the file name, and you want to process the metadata in Logstash. +This is, for example, the case for Kubernetes log files. + +Because this option may lead to data loss, it is disabled by default. + +[float] +===== `backoff` + +The backoff options specify how aggressively {beatname_uc} crawls open files for +updates. You can use the default values in most cases. + +The `backoff` option defines how long {beatname_uc} waits before checking a file +again after EOF is reached. The default is 1s, which means the file is checked +every second if new lines were added. This enables near real-time crawling. +Every time a new line appears in the file, the `backoff` value is reset to the +initial value. The default is 1s. + +[float] +===== `max_backoff` + +The maximum time for {beatname_uc} to wait before checking a file again after +EOF is reached. After having backed off multiple times from checking the file, +the wait time will never exceed `max_backoff` regardless of what is specified +for `backoff_factor`. Because it takes a maximum of 10s to read a new line, +specifying 10s for `max_backoff` means that, at the worst, a new line could be +added to the log file if {beatname_uc} has backed off multiple times. The +default is 10s. + +Requirement: max_backoff should always be set to `max_backoff <= +scan_frequency`. In case `max_backoff` should be bigger, it is recommended to +close the file handler instead let the {beatname_uc} pick up the file again. + +[float] +===== `backoff_factor` + +This option specifies how fast the waiting time is increased. The bigger the +backoff factor, the faster the `max_backoff` value is reached. The backoff +factor increments exponentially. The minimum value allowed is 1. If this value +is set to 1, the backoff algorithm is disabled, and the `backoff` value is used +for waiting for new lines. The `backoff` value will be multiplied each time with +the `backoff_factor` until `max_backoff` is reached. The default is 2. + +[float] +[id="{beatname_lc}-input-{type}-harvester-limit"] +===== `harvester_limit` + +The `harvester_limit` option limits the number of harvesters that are started in +parallel for one input. This directly relates to the maximum number of file +handlers that are opened. The default for `harvester_limit` is 0, which means +there is no limit. This configuration is useful if the number of files to be +harvested exceeds the open file handler limit of the operating system. + +Setting a limit on the number of harvesters means that potentially not all files +are opened in parallel. Therefore we recommended that you use this option in +combination with the `close_*` options to make sure harvesters are stopped more +often so that new files can be picked up. + +Currently if a new harvester can be started again, the harvester is picked +randomly. This means it's possible that the harvester for a file that was just +closed and then updated again might be started instead of the harvester for a +file that hasn't been harvested for a longer period of time. + +This configuration option applies per input. You can use this option to +indirectly set higher priorities on certain inputs by assigning a higher +limit of harvesters. diff --git a/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-common-harvester-options.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-common-harvester-options.asciidoc new file mode 100644 index 00000000..c8722ff1 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-common-harvester-options.asciidoc @@ -0,0 +1,157 @@ +////////////////////////////////////////////////////////////////////////// +//// This content is shared by Filebeat inputs that use the prospector +//// but do not process files (the options for managing files +//// on disk are not relevant) +//// If you add IDs to sections, make sure you use attributes to create +//// unique IDs for each input that includes this file. Use the format: +//// [id="{beatname_lc}-input-{type}-option-name"] +////////////////////////////////////////////////////////////////////////// + +[float] +===== `encoding` + +The file encoding to use for reading data that contains international +characters. See the encoding names http://www.w3.org/TR/encoding/[recommended by +the W3C for use in HTML5]. + +Here are some sample encodings from W3C recommendation: + + * plain, latin1, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, + gbk, hz-gb-2312, + * euc-kr, euc-jp, iso-2022-jp, shift-jis, and so on + +The `plain` encoding is special, because it does not validate or transform any input. + +[float] +[id="{beatname_lc}-input-{type}-exclude-lines"] +===== `exclude_lines` + +A list of regular expressions to match the lines that you want {beatname_uc} to +exclude. {beatname_uc} drops any lines that match a regular expression in the +list. By default, no lines are dropped. Empty lines are ignored. + +If <> settings are also specified, each multiline message +is combined into a single line before the lines are filtered by `exclude_lines`. + +The following example configures {beatname_uc} to drop any lines that start with +`DBG`. + +["source","yaml",subs="attributes"] +---- +{beatname_lc}.inputs: +- type: {type} + ... + exclude_lines: ['^DBG'] +---- + +See <> for a list of supported regexp patterns. + +[float] +[id="{beatname_lc}-input-{type}-include-lines"] +===== `include_lines` + +A list of regular expressions to match the lines that you want {beatname_uc} to +include. {beatname_uc} exports only the lines that match a regular expression in +the list. By default, all lines are exported. Empty lines are ignored. + +If <> settings also specified, each multiline message is +combined into a single line before the lines are filtered by `include_lines`. + +The following example configures {beatname_uc} to export any lines that start +with `ERR` or `WARN`: + +["source","yaml",subs="attributes"] +---- +{beatname_lc}.inputs: +- type: {type} + ... + include_lines: ['^ERR', '^WARN'] +---- + +NOTE: If both `include_lines` and `exclude_lines` are defined, {beatname_uc} +executes `include_lines` first and then executes `exclude_lines`. The order in +which the two options are defined doesn't matter. The `include_lines` option +will always be executed before the `exclude_lines` option, even if +`exclude_lines` appears before `include_lines` in the config file. + +The following example exports all log lines that contain `sometext`, +except for lines that begin with `DBG` (debug messages): + +["source","yaml",subs="attributes"] +---- +{beatname_lc}.inputs: +- type: {type} + ... + include_lines: ['sometext'] + exclude_lines: ['^DBG'] +---- + +See <> for a list of supported regexp patterns. + +[float] +===== `harvester_buffer_size` + +The size in bytes of the buffer that each harvester uses when fetching a file. +The default is 16384. + +[float] +===== `max_bytes` + +The maximum number of bytes that a single log message can have. All bytes after +`max_bytes` are discarded and not sent. This setting is especially useful for +multiline log messages, which can get large. The default is 10MB (10485760). + +[float] +[id="{beatname_lc}-input-{type}-config-json"] +===== `json` +These options make it possible for {beatname_uc} to decode logs structured as +JSON messages. {beatname_uc} processes the logs line by line, so the JSON +decoding only works if there is one JSON object per line. + +The decoding happens before line filtering and multiline. You can combine JSON +decoding with filtering and multiline if you set the `message_key` option. This +can be helpful in situations where the application logs are wrapped in JSON +objects, as with like it happens for example with Docker. + +Example configuration: + +[source,yaml] +---- +json.keys_under_root: true +json.add_error_key: true +json.message_key: log +---- + +You must specify at least one of the following settings to enable JSON parsing +mode: + +*`keys_under_root`*:: By default, the decoded JSON is placed under a "json" key +in the output document. If you enable this setting, the keys are copied top +level in the output document. The default is false. + +*`overwrite_keys`*:: If `keys_under_root` and this setting are enabled, then the +values from the decoded JSON object overwrite the fields that {beatname_uc} +normally adds (type, source, offset, etc.) in case of conflicts. + +*`add_error_key`*:: If this setting is enabled, {beatname_uc} adds a +"error.message" and "error.type: json" key in case of JSON unmarshalling errors +or when a `message_key` is defined in the configuration but cannot be used. + +*`message_key`*:: An optional configuration setting that specifies a JSON key on +which to apply the line filtering and multiline settings. If specified the key +must be at the top level in the JSON object and the value associated with the +key must be a string, otherwise no filtering or multiline aggregation will +occur. + +*`ignore_decoding_error`*:: An optional configuration setting that specifies if +JSON decoding errors should be logged or not. If set to true, errors will not +be logged. The default is false. + +[float] +===== `multiline` + +Options that control how {beatname_uc} deals with log messages that span +multiple lines. See <> for more information about +configuring multiline options. + + diff --git a/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-common-options.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-common-options.asciidoc new file mode 100644 index 00000000..53d745e0 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-common-options.asciidoc @@ -0,0 +1,86 @@ +////////////////////////////////////////////////////////////////////////// +//// This content is shared by all Filebeat inputs +//// If you add IDs to sections, make sure you use attributes to create +//// unique IDs for each input that includes this file. Use the format: +//// [id="{beatname_lc}-input-{type}-option-name"] +////////////////////////////////////////////////////////////////////////// + +==== Common options + +The following configuration options are supported by all inputs. + +[float] +===== `enabled` + +Use the `enabled` option to enable and disable inputs. By default, enabled is +set to true. + +[float] +===== `tags` + +A list of tags that {beatname_uc} includes in the `tags` field of each published +event. Tags make it easy to select specific events in Kibana or apply +conditional filtering in Logstash. These tags will be appended to the list of +tags specified in the general configuration. + +Example: + +["source","yaml",subs="attributes"] +----- +{beatname_lc}.inputs: +- type: {type} + . . . + tags: ["json"] +----- + + +[float] +[id="{beatname_lc}-input-{type}-fields"] +===== `fields` + +Optional fields that you can specify to add additional information to the +output. For example, you might add fields that you can use for filtering log +data. Fields can be scalar values, arrays, dictionaries, or any nested +combination of these. By default, the fields that you specify here will be +grouped under a `fields` sub-dictionary in the output document. To store the +custom fields as top-level fields, set the `fields_under_root` option to true. +If a duplicate field is declared in the general configuration, then its value +will be overwritten by the value declared here. + +["source","yaml",subs="attributes"] +----- +{beatname_lc}.inputs: +- type: {type} + . . . + fields: + app_id: query_engine_12 +----- + +[float] +[id="fields-under-root-{type}"] +===== `fields_under_root` + +If this option is set to true, the custom +<<{beatname_lc}-input-{type}-fields,fields>> are stored as top-level fields in +the output document instead of being grouped under a `fields` sub-dictionary. If +the custom field names conflict with other field names added by {beatname_uc}, +then the custom fields overwrite the other fields. + +[float] +===== `processors` + +A list of processors to apply to the input data. + +See <> for information about specifying +processors in your config. + +[float] +===== `pipeline` + +The Ingest Node pipeline ID to set for the events generated by this input. + +NOTE: The pipeline ID can also be configured in the Elasticsearch output, but +this option usually results in simpler configuration files. If the pipeline is +configured both in the input and output, the option from the +input is used. + diff --git a/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-common-tcp-options.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-common-tcp-options.asciidoc new file mode 100644 index 00000000..c82dd7ee --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-common-tcp-options.asciidoc @@ -0,0 +1,29 @@ +////////////////////////////////////////////////////////////////////////// +//// This content is shared by Filebeat inputs that use the TCP inputsource +//// If you add IDs to sections, make sure you use attributes to create +//// unique IDs for each input that includes this file. Use the format: +//// [id="{beatname_lc}-input-{type}-option-name"] +////////////////////////////////////////////////////////////////////////// +[float] +[id="{beatname_lc}-input-{type}-tcp-max-message-size"] +==== `max_message_size` + +The maximum size of the message received over TCP. The default is `20MiB`. + +[float] +[id="{beatname_lc}-input-{type}-tcp-host"] +==== `host` + +The host and TCP port to listen on for event streams. + +[float] +[id="{beatname_lc}-input-{type}-tcp-line-delimiter"] +==== `line_delimiter` + +Specify the characters used to split the incoming events. The default is '\n'. + +[float] +[id="{beatname_lc}-input-{type}-tcp-timeout"] +==== `timeout` + +The number of seconds of inactivity before a remote connection is closed. The default is `300s`. diff --git a/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-common-udp-options.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-common-udp-options.asciidoc new file mode 100644 index 00000000..27068d53 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-common-udp-options.asciidoc @@ -0,0 +1,17 @@ +////////////////////////////////////////////////////////////////////////// +//// This content is shared by Filebeat inputs that use the UDP inputsource +//// If you add IDs to sections, make sure you use attributes to create +//// unique IDs for each input that includes this file. Use the format: +//// [id="{beatname_lc}-input-{type}-option-name"] +////////////////////////////////////////////////////////////////////////// +[float] +[id="{beatname_lc}-input-{type}-udp-max-message-size"] +==== `max_message_size` + +The maximum size of the message received over UDP. The default is `10KiB`. + +[float] +[id="{beatname_lc}-input-{type}-udp-host"] +==== `host` + +The host and UDP port to listen on for event streams. diff --git a/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-docker.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-docker.asciidoc new file mode 100644 index 00000000..5a447bc6 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-docker.asciidoc @@ -0,0 +1,72 @@ +:type: docker + +[id="{beatname_lc}-input-{type}"] +=== Docker input + +++++ +Docker +++++ + +experimental[] + +Use the `docker` input to read logs from Docker containers. + +This input searches for container logs under its path, and parse them into +common message lines, extracting timestamps too. Everything happens before line +filtering, multiline, and JSON decoding, so this input can be used in +combination with those settings. + +Example configuration: + +["source","yaml",subs="attributes"] +---- +{beatname_lc}.inputs: +- type: docker + containers.ids: <1> + - '8b6fe7dc9e067b58476dc57d6986dd96d7100430c5de3b109a99cd56ac655347' +---- + +<1> `containers.ids` is required. All other settings are optional. + +==== Configuration options + +The `docker` input supports the following configuration options plus the +<<{beatname_lc}-input-{type}-common-options>> described later. + +[[config-container-ids]] +===== `containers.ids` + +The list of Docker container IDs to read logs from. Specify +`containers.ids: '*'` to read from all containers. + +===== `containers.path` + +The base path where Docker logs are located. The default +is `/var/lib/docker/containers`. + +===== `containers.stream` + +Reads from the specified streams only: `all`, `stdout` or `stderr`. The default +is `all`. + +The following input configures {beatname_uc} to read the `stdout` stream from +all containers under the default Docker containers path: + +[source,yaml] +---- +- type: docker + containers: + path: "/var/lib/docker/containers" + stream: "stdout" + ids: + - "*" +---- + +include::../inputs/input-common-harvester-options.asciidoc[] + +include::../inputs/input-common-file-options.asciidoc[] + +[id="{beatname_lc}-input-{type}-common-options"] +include::../inputs/input-common-options.asciidoc[] + +:type!: diff --git a/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-log.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-log.asciidoc new file mode 100644 index 00000000..7c73c63d --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-log.asciidoc @@ -0,0 +1,102 @@ +:type: log + +[id="{beatname_lc}-input-{type}"] +=== Log input + +++++ +Log +++++ + +Use the `log` input to read lines from log files. + +To configure this input, specify a list of glob-based <> +that must be crawled to locate and fetch the log lines. + +Example configuration: + +["source","yaml",subs="attributes"] +---- +{beatname_lc}.inputs: +- type: log + paths: + - /var/log/messages + - /var/log/*.log +---- + + +You can apply additional +<<{beatname_lc}-input-{type}-options,configuration settings>> (such as `fields`, +`include_lines`, `exclude_lines`, `multiline`, and so on) to the lines harvested +from these files. The options that you specify are applied to all the files +harvested by this input. + +To apply different configuration settings to different files, you need to define +multiple input sections: + +["source","yaml",subs="attributes"] +---- +{beatname_lc}.inputs: +- type: log <1> + paths: + - /var/log/system.log + - /var/log/wifi.log +- type: log <2> + paths: + - "/var/log/apache2/*" + fields: + apache: true + fields_under_root: true +---- + +<1> Harvests lines from two files: `system.log` and +`wifi.log`. +<2> Harvests lines from every file in the `apache2` directory, and uses the +`fields` configuration option to add a field called `apache` to the output. + + +IMPORTANT: Make sure a file is not defined more than once across all inputs +because this can lead to unexpected behaviour. + + +[id="{beatname_lc}-input-{type}-options"] +==== Configuration options + +The `log` input supports the following configuration options plus the +<<{beatname_lc}-input-{type}-common-options>> described later. + +[float] +[[input-paths]] +===== `paths` + +A list of glob-based paths that will be crawled and fetched. All patterns +supported by https://golang.org/pkg/path/filepath/#Glob[Golang Glob] are also +supported here. For example, to fetch all files from a predefined level of +subdirectories, the following pattern can be used: `/var/log/*/*.log`. This +fetches all `.log` files from the subfolders of `/var/log`. It does not +fetch log files from the `/var/log` folder itself. +It is possible to recursively fetch all files in all subdirectories of a directory +using the optional <> settings. + +{beatname_uc} starts a harvester for each file that it finds under the specified +paths. You can specify one path per line. Each line begins with a dash (-). + +[float] +[[recursive_glob]] +===== `recursive_glob.enabled` + +Enable expanding `**` into recursive glob patterns. With this feature enabled, +the rightmost `**` in each path is expanded into a fixed number of glob +patterns. For example: `/foo/**` expands to `/foo`, `/foo/*`, `/foo/*/*`, and so +on. If enabled it expands a single `**` into a 8-level deep `*` pattern. + +This feature is enabled by default. Set `recursive_glob.enabled` to false to +disable it. + +include::../inputs/input-common-harvester-options.asciidoc[] + +include::../inputs/input-common-file-options.asciidoc[] + +[id="{beatname_lc}-input-{type}-common-options"] +include::../inputs/input-common-options.asciidoc[] + +:type!: diff --git a/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-redis.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-redis.asciidoc new file mode 100644 index 00000000..05d3f9ec --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-redis.asciidoc @@ -0,0 +1,81 @@ +:type: redis + +[id="{beatname_lc}-input-{type}"] +=== Redis input + +++++ +Redis +++++ + +experimental[] + +Use the `redis` input to read entries from Redis slowlogs. + +Example configuration: + +["source","yaml",subs="attributes"] +---- +{beatname_lc}.inputs: +- type: redis + hosts: ["localhost:6379"] + password: "$\{redis_pwd\}" +---- + + +==== Configuration options + +The `redis` input supports the following configuration options plus the +<<{beatname_lc}-input-{type}-common-options>> described later. + +[float] +[[redis-hosts]] +===== `hosts` + +The list of Redis hosts to connect to. + +[float] +[[redis-password]] +===== `password` + +The password to use when connecting to Redis. + +[float] +[[redis-scan_frequency]] +===== `scan_frequency` + +How often {beatname_uc} reads entries from Redis slowlogs. Specify `1s` to scan +Redis as frequently as possible without causing {beatname_uc} to scan too +frequently. Do not set this value to less than `1s`. + +The default is `10s`. + +IMPORTANT: Redis slowlogs are not permanent. To ensure that all slowlog entries +are collected, set `scan_frequency` to a value that allows {beatname_uc} +sufficient time to connect to Redis, query the logs, and buffer them to the +output within the specified interval. + +[float] +[[redis-timeout]] +===== `timeout` + +How long to wait for a response from Redis before the input returns an error. +The default is `1s`. + +[float] +[[redis-network]] +===== `network` + +The network type to use for the Redis connection. Valid settings include: `tcp`, +`tcp4`, `tcp6`, and `unix`. The default is `tcp`. + +[float] +[[redis-maxconn]] +===== `maxconn` + +The maximum number of concurrent connections. The default is `10`. + +[id="{beatname_lc}-input-{type}-common-options"] +include::../inputs/input-common-options.asciidoc[] + +:type!: + diff --git a/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-stdin.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-stdin.asciidoc new file mode 100644 index 00000000..1e9b6aca --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-stdin.asciidoc @@ -0,0 +1,34 @@ +:type: stdin + +[id="{beatname_lc}-input-{type}"] +=== Stdin input + +++++ +Stdin +++++ + +Use the `stdin` input to read events from standard in. + +Note: This input cannot be run at the same time with other input types. + +Example configuration: + +["source","yaml",subs="attributes"] +---- +{beatname_lc}.inputs: +- type: stdin +---- + +[[stdin-input-options]] +==== Configuration options + +The `stdin` input supports the following configuration options plus the +<<{beatname_lc}-input-{type}-common-options>> described later. + +include::../inputs/input-common-harvester-options.asciidoc[] + +[id="{beatname_lc}-input-{type}-common-options"] +include::../inputs/input-common-options.asciidoc[] + +:type!: + diff --git a/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-syslog.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-syslog.asciidoc new file mode 100644 index 00000000..36a45bbd --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-syslog.asciidoc @@ -0,0 +1,47 @@ +:type: syslog + +[id="{beatname_lc}-input-{type}"] +=== Syslog input + +++++ +Syslog +++++ + +Use the `syslog` input to read events over TCP or UDP, this input will parse BSD (rfc3164) +event and some variant. + +Example configurations: + +["source","yaml",subs="attributes"] +---- +{beatname_lc}.inputs: +- type: syslog + protocol.udp: + host: "localhost:9000" +---- + +["source","yaml",subs="attributes"] +---- +{beatname_lc}.inputs: +- type: syslog + protocol.tcp: + host: "localhost:9000" +---- + +==== Configuration options + +The `syslog` input supports protocol specific configuration options plus the +<<{beatname_lc}-input-{type}-common-options>> described later. + +Protocol `udp`: + +include::../inputs/input-common-udp-options.asciidoc[] + +Protocol `tcp`: + +include::../inputs/input-common-tcp-options.asciidoc[] + +[id="{beatname_lc}-input-{type}-common-options"] +include::../inputs/input-common-options.asciidoc[] + +:type!: diff --git a/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-tcp.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-tcp.asciidoc new file mode 100644 index 00000000..f2051990 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-tcp.asciidoc @@ -0,0 +1,35 @@ +:type: tcp + +[id="{beatname_lc}-input-{type}"] +=== TCP input + +++++ +TCP +++++ + +experimental[] + +Use the `TCP` input to read events over TCP. + +Example configuration: + +["source","yaml",subs="attributes"] +---- +{beatname_lc}.inputs: +- type: tcp + max_message_size: 10MiB + host: "localhost:9000" +---- + + +==== Configuration options + +The `tcp` input supports the following configuration options plus the +<<{beatname_lc}-input-{type}-common-options>> described later. + +include::../inputs/input-common-tcp-options.asciidoc[] + +[id="{beatname_lc}-input-{type}-common-options"] +include::../inputs/input-common-options.asciidoc[] + +:type!: diff --git a/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-udp.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-udp.asciidoc new file mode 100644 index 00000000..cb29bbda --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-udp.asciidoc @@ -0,0 +1,35 @@ +:type: udp + +[id="{beatname_lc}-input-{type}"] +=== UDP input + +++++ +UDP +++++ + +experimental[] + +Use the `udp` input to read events over UDP. + +Example configuration: + +["source","yaml",subs="attributes"] +---- +{beatname_lc}.inputs: +- type: udp + max_message_size: 10KiB + host: "localhost:8080" +---- + + +==== Configuration options + +The `udp` input supports the following configuration options plus the +<<{beatname_lc}-input-{type}-common-options>> described later. + +include::../inputs/input-common-udp-options.asciidoc[] + +[id="{beatname_lc}-input-{type}-common-options"] +include::../inputs/input-common-options.asciidoc[] + +:type!: diff --git a/vendor/github.com/elastic/beats/filebeat/docs/load-balancing.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/load-balancing.asciidoc index 2f0b8b8f..d196ea97 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/load-balancing.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/load-balancing.asciidoc @@ -26,7 +26,7 @@ Example: [source,yaml] ------------------------------------------------------------------------------- -filebeat.prospectors: +filebeat.inputs: - type: log paths: - /var/log/*.log diff --git a/vendor/github.com/elastic/beats/filebeat/docs/migration.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/migration.asciidoc index aa268542..01b87846 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/migration.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/migration.asciidoc @@ -1,22 +1,22 @@ [[migrating-from-logstash-forwarder]] -= Migrating from Logstash Forwarder to Filebeat += Migrating from Logstash Forwarder to {beatname_uc} [partintro] -- -Filebeat is based on the Logstash Forwarder source code and replaces Logstash Forwarder as the method +{beatname_uc} is based on the Logstash Forwarder source code and replaces Logstash Forwarder as the method to use for tailing log files and forwarding them to Logstash. -Filebeat introduces the following major changes: +{beatname_uc} introduces the following major changes: * The config file was restructured and converted from JSON to YAML. * The registry file, which stores the state of the currently read files, was changed. * Command line options were removed and moved to the configuration file. * Configuration options for outputs are now inherited from libbeat. For details, see the {libbeat}/index.html[Beats Platform Reference]. -* The https://www.elastic.co/guide/en/logstash/current/plugins-inputs-beats.html[Beats input plugin for Logstash] is required. +* The {logstash-ref}/plugins-inputs-beats.html[Beats input plugin for Logstash] is required. The following topics describe how to migrate from -https://github.com/elastic/logstash-forwarder[Logstash Forwarder] to Filebeat: +https://github.com/elastic/logstash-forwarder[Logstash Forwarder] to {beatname_uc}: * <> * <> @@ -29,8 +29,10 @@ https://github.com/elastic/logstash-forwarder[Logstash Forwarder] to Filebeat: [[migration-input-plugin]] == Migrate to the Beats input plugin for Logstash -Filebeat requires the https://www.elastic.co/guide/en/logstash/current/plugins-inputs-beats.html[Beats input plugin for Logstash]. -For information about getting started with this plugin, see {libbeat}/logstash-installation.html#logstash-setup[Setting up Logstash]. +{beatname_uc} requires the {logstash-ref}/plugins-inputs-beats.html[Beats input +plugin for Logstash]. For information about getting started with this plugin, +see {stack-ov}/get-started-elastic-stack.html#logstash-setup[Configure Logstash to +listen for Beats input] in the {stack} getting started tutorial. In both the 1.5.x and 2.x versions of Logstash, this plugin can be loaded in parallel with the @@ -38,65 +40,65 @@ https://github.com/logstash-plugins/logstash-input-lumberjack[Lumberjack] plugin used by the Logstash Forwarder. If you have a large number of servers that you want to migrate from -Logstash Forwarder to Filebeat, we recommend that you keep the Lumberjack plugin and load the +Logstash Forwarder to {beatname_uc}, we recommend that you keep the Lumberjack plugin and load the Beats input plugin on the same Logstash instances, but set up the Beats input plugin to use a different port. After you have migrated -all the machines to Filebeat, you can remove the Lumberjack plugin. +all the machines to {beatname_uc}, you can remove the Lumberjack plugin. We realize that opening additional ports may not be feasible in your organization. Another option for phased migration -to Filebeat is to ship data from Logstash Forwarder directly to the Beats input plugin. +to {beatname_uc} is to ship data from Logstash Forwarder directly to the Beats input plugin. -IMPORTANT: This data shipping path is only supported for migrating to Filebeat and will no longer be supported when Logstash Forwarder reaches https://www.elastic.co/support/eol[End of Life]. +IMPORTANT: This data shipping path is only supported for migrating to {beatname_uc} and will no longer be supported when Logstash Forwarder reaches https://www.elastic.co/support/eol[End of Life]. What's required? * The https://www.elastic.co/guide/en/logstash/current/plugins-inputs-beats.html[Beats input plugin for Logstash] version 2.2.8 or later. -* SSL must be explicitly enabled in the Beats input plugin (`ssl => true`) because SSL is on by default with Logstash Forwarder. The SSL/TLS configs should be the same for both the Logstash Forwarder and Filebeat instances. +* SSL must be explicitly enabled in the Beats input plugin (`ssl => true`) because SSL is on by default with Logstash Forwarder. The SSL/TLS configs should be the same for both the Logstash Forwarder and {beatname_uc} instances. [[migration-registry-file]] == Update the registry file -The registry file stores the state and location information that Filebeat uses to track -where it was last reading. Under Logstash Forwarder, this file was called `.logstash-fowarder`. For Filebeat, +The registry file stores the state and location information that {beatname_uc} uses to track +where it was last reading. Under Logstash Forwarder, this file was called `.logstash-forwarder`. For {beatname_uc}, the file was renamed. The name varies depending on the package type: * `data/registry` for `.tar.gz` and `.tgz` archives - * `/var/lib/filebeat/registry` for DEB and RPM packages - * `c:\ProgramData\filebeat\registry` for the Windows zip file + * +/var/lib/{beatname_lc}/registry+ for DEB and RPM packages + * +c:\ProgramData{backslash}{beatname_lc}{backslash}registry+ for the Windows zip file For enhancement reasons, especially for Windows, the structure of the registry file has changed. This makes migrating the file complex and leads to potential errors. -Instead of migrating the registry file, we recommend that you start Filebeat on +Instead of migrating the registry file, we recommend that you start {beatname_uc} on the same host where Logstash Forwarder is running, and send the log files to a different index. This will start indexing from scratch. If you want to start reading at the end of all files, you can set the `tail_files` option in the -Filebeat configuration file to true. +{beatname_uc} configuration file to true. Using this approach allows you to keep the old Logstash Forwarder running and then -slowly migrate over to Filebeat. +slowly migrate over to {beatname_uc}. [[migration-configuration]] == Migrate your configuration -Although Filebeat is based on Logstash Forwarder, Filebeat uses YAML for its configuration +Although {beatname_uc} is based on Logstash Forwarder, {beatname_uc} uses YAML for its configuration file, rather than the JSON+comments language used by Logstash Forwarder. This means that you -will need to migrate your existing configuration files to use the YAML syntax. Filebeat has a main -configuration file called `filebeat.yml`, but Filebeat also accepts reading +will need to migrate your existing configuration files to use the YAML syntax. {beatname_uc} has a main +configuration file called +{beatname_lc}.yml+, but {beatname_uc} also accepts reading multiple configuration files from a `conf.d` directory and has similar restrictions to Logstash Forwarder. If you specify additional config files, you need to place them in a directory other than the directory -where the main Filebeat config file resides. You specify the location of the config files by using the +where the main {beatname_uc} config file resides. You specify the location of the config files by using the `config_dir` option to configure the path to the directory. In most cases, you can do a one-to-one -conversion to create a Filebeat config file for each Logstash Forwarder config file. +conversion to create a {beatname_uc} config file for each Logstash Forwarder config file. -Before migrating your config files, we recommend that you first read the <> -section to understand the Filebeat options. +Before migrating your config files, we recommend that you first read the <> +section to understand the {beatname_uc} options. [float] === Migrate the "files" section -To migrate the `files` section from the Logstash Forwarder configuration, create a `prospectors` section in the Filebeat config file. For example, assuming that you start +To migrate the `files` section from the Logstash Forwarder configuration, create an `inputs` section in the {beatname_uc} config file. For example, assuming that you start with this configuration in Logstash Forwarder: [source,json] @@ -130,11 +132,11 @@ with this configuration in Logstash Forwarder: ] ------------------------------------------------------------------------------------- -The equivalent `prospectors` section would look like this: +The equivalent `inputs` section would look like this: -[source,yaml] +["source","yaml",subs="attributes"] ------------------------------------------------------------------------------------- -filebeat.prospectors: +{beatname_lc}.inputs: - type: log paths: - /var/log/messages @@ -156,15 +158,15 @@ As you can see, apart from the new `type` options, which were before implicitly defined via the `type` custom field, the remaining options can be migrated mechanically. -The Filebeat configuration gives you more control over how each prospector behaves +The {beatname_uc} configuration gives you more control over how each input behaves by allowing you to configure options that were previously global in Logstash Forwarder -and set them separately for each prospector. See <>. +and set them separately for each input. See <>. [float] === Migrate the "network" section -Like Logstash Forwarder, Filebeat can communicate directly with Logstash. -Filebeat can also insert log entries directly +Like Logstash Forwarder, {beatname_uc} can communicate directly with Logstash. +{beatname_uc} can also insert log entries directly into Elasticsearch. This results in an `output` section that is a bit more complex, as you can see in the following example. You'll find, however, that you can easily translate the Logstash part of the configuration from the equivalent Logstash Forwarder @@ -199,7 +201,7 @@ The following snippet shows the `network` section of the Logstash Forwarder conf } ------------------------------------------------------------------------------------- -The equivalent in Filebeat would look like this: +The equivalent in {beatname_uc} would look like this: [source,yaml] @@ -214,9 +216,9 @@ output.logstash: ssl.key: ./logstash-forwarder.key ------------------------------------------------------------------------------------- -<1> When multiple hosts are defined, the default behavior in Filebeat is to +<1> When multiple hosts are defined, the default behavior in {beatname_uc} is to pick a random host for new connections, similar to the Logstash Forwarder - behavior. Filebeat can optionally do load balancing. For more details, see the + behavior. {beatname_uc} can optionally do load balancing. For more details, see the <> configuration option. <2> Note that if the `ssl` settings are missing, then SSL is disabled. SSL is automatically enabled when you add any of the `ssl` options. For more information about @@ -235,7 +237,7 @@ With the refactoring of the configuration file, the following options were remov |Action |`deadTime` -|`deadTime` was renamed to `ignore_older`. Filebeat keeps the files that it’s reading open until they are older than the timespan specified by `ignore_older`. If a file is changed, Filebeat reopens it. +|`deadTime` was renamed to `ignore_older`. {beatname_uc} keeps the files that it’s reading open until they are older than the timespan specified by `ignore_older`. If a file is changed, {beatname_uc} reopens it. |`netTimeout` |`netTimeout` was removed and is replaced by the `timeout` option in libbeat. @@ -244,13 +246,13 @@ With the refactoring of the configuration file, the following options were remov |Both options were removed and replaced by logging options in libbeat. |=== -For more information about these options, see <>. +For more information about these options, see <>. [float] === A complete example Let's see a simple, but complete example of a Logstash Forwarder configuration -and its equivalent for Filebeat. +and its equivalent for {beatname_uc}. Logstash Forwarder configuration: @@ -274,11 +276,11 @@ Logstash Forwarder configuration: } ------------------------------------------------------------------------------------- -Filebeat configuration: +{beatname_uc} configuration: -[source,yaml] +["source","yaml",subs="attributes"] ------------------------------------------------------------------------------------- -filebeat.prospectors: +{beatname_lc}.inputs: - type: log paths: - /var/log/*.log @@ -293,11 +295,11 @@ output.elasticsearch: Most command line options available in Logstash Forwarder have been removed and migrated to config file options. The only mandatory command line option for -running Filebeat is `-c` followed by the path to the config file. If you used command line +running {beatname_uc} is `-c` followed by the path to the config file. If you used command line options with Logstash Forwarder, make sure that you add your options to the configuration file. For naming changes, see <>. -Filebeat does provide command line options that are common to all Beats. For more details about +{beatname_uc} does provide command line options that are common to all Beats. For more details about these options, see <>. [[renamed-options]] @@ -316,9 +318,9 @@ replaced by options specified in libbeat. |`-config` |`-c` command line option and `config_dir` -|The config option was split into two parts. You use the `-c` command line option to specify the location of the base (required) config file when you start Filebeat. To use additional config files, you specify the `config_dir` configuration option. +|The config option was split into two parts. You use the `-c` command line option to specify the location of the base (required) config file when you start {beatname_uc}. To use additional config files, you specify the `config_dir` configuration option. -The `config_dir` option specifies the path to the directory that contains additional configuration files. This option MUST point to a directory other than the directory where the main Filebeat config file resides. +The `config_dir` option specifies the path to the directory that contains additional configuration files. This option MUST point to a directory other than the directory where the main {beatname_uc} config file resides. |`-idle-timeout` | @@ -334,7 +336,7 @@ The `config_dir` option specifies the path to the directory that contains additi |`-tail` |`tail_files` -|`tail_files` was moved to the config file and removed as a flag. You can now configure this option separately for each prospector. +|`tail_files` was moved to the config file and removed as a flag. You can now configure this option separately for each input. |`-cpuProfileFile` | @@ -350,7 +352,7 @@ The `config_dir` option specifies the path to the directory that contains additi [[migration-changed-fields]] == Changes to the output fields -In the default configuration, Filebeat structures its output documents a little +In the default configuration, {beatname_uc} structures its output documents a little differently from the Logstash Forwarder. This section discusses the differences and the options you have in case you want compatibility with the Logstash Forwarder. @@ -360,12 +362,12 @@ Forwarder. The custom fields (added from the configuration file) are set as top-level fields in Logstash Forwarder but are grouped together under a `fields` -dictionary in Filebeat. If you need the old behavior during the migration phase, -you can use the <> configuration option: +dictionary in {beatname_uc}. If you need the old behavior during the migration phase, +you can use the <> configuration option: -[source,yaml] +["source","yaml",subs="attributes"] ------------------------------------------------------------------------------------- -filebeat.prospectors: +{beatname_lc}.inputs: - type: log paths: - /var/log/*.log @@ -375,10 +377,10 @@ filebeat.prospectors: ------------------------------------------------------------------------------------- [float] -=== Filebeat uses "beat.hostname" for sending the hostname of the server +=== {beatname_uc} uses "beat.hostname" for sending the hostname of the server While the Logstash Forwarder sends the hostname of the server it's running on in -the `host` field, Filebeat uses the `beat.hostname` field for the same purpose. +the `host` field, {beatname_uc} uses the `beat.hostname` field for the same purpose. Because `host` is commonly used in the Logstash plugin ecosystem, the Beats input plugin automatically copies `beat.hostname` into `host`. @@ -417,7 +419,7 @@ if you notice any regressions from Logstash Forwarder. [float] === Packaging -The packaging process for Filebeat uses the Beats infrastructure, so some +The packaging process for {beatname_uc} uses the Beats infrastructure, so some aspects of packaging, such as the init scripts, are different from Logstash Forwarder. Please post GitHub issues if you hit any issues with the new packages. @@ -426,13 +428,13 @@ One notable change is the name of the registry file. The name varies depending o type: * `registry` for `.tar.gz` and `.tgz` archives - * `/usr/lib/filebeat/registry` for DEB and RPM packages - * `c:\ProgramData\filebeat\registry` for the Windows zip file + * +/usr/lib/{beatname_lc}/registry+ for DEB and RPM packages + * +c:\ProgramData{backslash}{beatname_lc}{backslash}registry+ for the Windows zip file [float] === Publisher improvements -Behind the scenes, Filebeat uses a sightly improved protocol for communicating +Behind the scenes, {beatname_uc} uses a slightly improved protocol for communicating with Logstash. [float] @@ -440,9 +442,9 @@ with Logstash. If you follow the section on migrating the configuration, you will have SSL enabled. However, you must be aware that if the `ssl` section is missing from the -configuration file, Filebeat uses an unencrypted connection to talk to Logstash. +configuration file, {beatname_uc} uses an unencrypted connection to talk to Logstash. [float] === Logging -Filebeat uses libbeat logging and can also log to rotating files instead of syslog. +{beatname_uc} uses libbeat logging and can also log to rotating files instead of syslog. diff --git a/vendor/github.com/elastic/beats/filebeat/docs/modules-getting-started.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/modules-getting-started.asciidoc index b8a2bd37..97cc016e 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/modules-getting-started.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/modules-getting-started.asciidoc @@ -20,7 +20,7 @@ following the numbered steps under <>. Before running Filebeat modules, you need to: * Install and configure the Elastic stack. See -{libbeat}/getting-started.html[Getting Started with Beats and the Elastic Stack]. +{stack-ov}/get-started-elastic-stack.html[Getting started with the {stack}]. * Complete the Filebeat installation instructions described in <>. After installing Filebeat, return to this diff --git a/vendor/github.com/elastic/beats/filebeat/docs/modules-overview.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/modules-overview.asciidoc index 3834d8e7..e53fe9ea 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/modules-overview.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/modules-overview.asciidoc @@ -1,16 +1,16 @@ [[filebeat-modules-overview]] == Modules overview -Filebeat modules simplify the collection, parsing, and visualization of common +{beatname_uc} modules simplify the collection, parsing, and visualization of common log formats. A typical module (say, for the Nginx logs) is composed of one or more filesets (in the case of Nginx, `access` and `error`). A fileset contains the following: -* Filebeat prospector configurations, which contain the default paths where to +* {beatname_uc} input configurations, which contain the default paths where to look or the log files. These default paths depend on the operating system. - The Filebeat configuration is also responsible with stitching together + The {beatname_uc} configuration is also responsible with stitching together multiline events when needed. * Elasticsearch {elasticsearch}/ingest.html[Ingest Node] pipeline definition, @@ -22,23 +22,23 @@ the following: * Sample Kibana dashboards, which can be used to visualize the log files. -Filebeat automatically adjusts these configurations based on your environment +{beatname_uc} automatically adjusts these configurations based on your environment and loads them to the respective Elastic stack components. -NOTE: At the moment, Filebeat modules require using the Elasticsearch -{elasticsearch}/ingest.html[Ingest Node]. In the future, Filebeat Modules will +NOTE: At the moment, {beatname_uc} modules require using the Elasticsearch +{elasticsearch}/ingest.html[Ingest Node]. In the future, {beatname_uc} modules will be able to also configure Logstash as a more powerful alternative to Ingest Node. For now, if you want to use Logstash, you can follow the steps described in the section called -{logstashdoc}/filebeat-modules.html[Working with Filebeat Modules] in the +{logstashdoc}/filebeat-modules.html[Working with {beatname_uc} Modules] in the Logstash Reference. -Filebeat modules require Elasticsearch 5.2 or later. +{beatname_uc} modules require Elasticsearch 5.2 or later. [float] === Get started -To learn how to configure and run Filebeat modules: +To learn how to configure and run {beatname_uc} modules: * Get started by reading <>. * Learn about the different ways to enable modules in <>. diff --git a/vendor/github.com/elastic/beats/filebeat/docs/modules/iis.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/modules/iis.asciidoc new file mode 100644 index 00000000..b17940de --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/docs/modules/iis.asciidoc @@ -0,0 +1,79 @@ +//// +This file is generated! See scripts/docs_collector.py +//// + +[[filebeat-module-iis]] +:modulename: iis + +== IIS module + +The +{modulename}+ module parses access and error logs created by the +Internet Information Services (IIS) HTTP server. + +include::../include/what-happens.asciidoc[] + +[float] +=== Compatibility + +This module requires the +{elasticsearch-plugins}/ingest-user-agent.html[ingest-user-agent] and +{elasticsearch-plugins}/ingest-geoip.html[ingest-geoip] Elasticsearch plugins. + +The IIS module was tested with logs from version 10. + +include::../include/running-modules.asciidoc[] + +[float] +=== Example dashboard + +This module comes with a sample dashboard. For example: + +[role="screenshot"] +image::./images/kibana-iis.png[] + +include::../include/configuring-intro.asciidoc[] + +The following example shows how to set paths in the +modules.d/{modulename}.yml+ +file to override the default paths for IIS access logs and error logs: + +["source","yaml",subs="attributes"] +----- +- module: iis + access: + enabled: true + var.paths: ["C:/inetpub/logs/LogFiles/*/*.log"] + error: + enabled: true + var.paths: ["C:/Windows/System32/LogFiles/HTTPERR/*.log"] +----- + +To specify the same settings at the command line, you use: + +["source","sh",subs="attributes"] +----- +./{beatname_lc} --modules {modulename} -M "iis.access.var.paths=[C:/inetpub/logs/LogFiles/*/*.log]" -M "iis.error.var.paths=[C:/Windows/System32/LogFiles/HTTPERR/*.log]" +----- + + +//set the fileset name used in the included example +:fileset_ex: access + +include::../include/config-option-intro.asciidoc[] + +[float] +==== `access` log fileset settings + +include::../include/var-paths.asciidoc[] + +[float] +==== `error` log fileset settings + +include::../include/var-paths.asciidoc[] + + +[float] +=== Fields + +For a description of each field in the module, see the +<> section. + diff --git a/vendor/github.com/elastic/beats/filebeat/docs/modules/kafka.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/modules/kafka.asciidoc index 1c1e4dd4..d13069f0 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/modules/kafka.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/modules/kafka.asciidoc @@ -15,7 +15,7 @@ include::../include/what-happens.asciidoc[] [float] === Compatibility -The +{modulename}+ module was tested with logs from versions 2.11. +The +{modulename}+ module was tested with logs from versions 0.9. include::../include/running-modules.asciidoc[] @@ -45,11 +45,6 @@ file to override the default paths for logs: ----- -// REVIEWERS: I must be doing something wrong with the config settings. The -// above config works, but when I try to specify var.kafka_home, it doesn't -// seem to have any effect. - - To specify the same settings at the command line, you use: ["source","sh",subs="attributes"] @@ -66,14 +61,6 @@ include::../include/config-option-intro.asciidoc[] [float] ==== `log` fileset settings -// REVIEWERS: I've added a description because this variable appears in the -// kafka.yml file. However, I don't understand how it works. - -//*`var.kafka_home`*:: - -//The home path for Kafka. If this variable is not set, {beatname_uc} looks under -//`/opt`. - include::../include/var-paths.asciidoc[] diff --git a/vendor/github.com/elastic/beats/filebeat/docs/modules/mongodb.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/modules/mongodb.asciidoc new file mode 100644 index 00000000..7d0ef958 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/docs/modules/mongodb.asciidoc @@ -0,0 +1,69 @@ +//// +This file is generated! See scripts/docs_collector.py +//// + +[[filebeat-module-mongodb]] +:modulename: mongodb + +== MongoDB module + +The +{modulename}+ module collects and parses logs created by +https://www.mongodb.com/[MongoDB]. + +include::../include/what-happens.asciidoc[] + +[float] +=== Compatibility + +The +{modulename}+ module was tested with logs from versions v3.2.11 on Debian. + +include::../include/running-modules.asciidoc[] + +[float] +=== Example dashboard + +This module comes with one sample dashboard including error and regular logs. + +[role="screenshot"] +image::./images/filebeat-mongodb-overview.png[] + +include::../include/configuring-intro.asciidoc[] + +The following example shows how to set paths in the +modules.d/{modulename}.yml+ +file to override the default paths for MongoDB logs: + + +["source","yaml",subs="attributes"] +----- +- module: mongodb + log: + enabled: true + var.paths: ["/path/to/log/mongodb/*.log*"] +----- + + +To specify the same settings at the command line, you use: + +["source","sh",subs="attributes"] +----- +./{beatname_lc} --modules {modulename} -M "mongodb.log.var.paths=[/path/to/log/mongodb/*.log*]" +----- + + +:fileset_ex: log + +include::../include/config-option-intro.asciidoc[] + + +[float] +==== `log` log fileset settings + +include::../include/var-paths.asciidoc[] + + +[float] +=== Fields + +For a description of each field in the module, see the +<> section. + diff --git a/vendor/github.com/elastic/beats/filebeat/docs/modules/traefik.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/modules/traefik.asciidoc index aecee8ac..ad56271a 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/modules/traefik.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/modules/traefik.asciidoc @@ -19,8 +19,6 @@ This module requires the {elasticsearch-plugins}/ingest-user-agent.html[ingest-user-agent] and {elasticsearch-plugins}/ingest-geoip.html[ingest-geoip] Elasticsearch plugins. -//REVIEWERS: Do we need to say anything else about compatibility here? - include::../include/running-modules.asciidoc[] [float] diff --git a/vendor/github.com/elastic/beats/filebeat/docs/modules_list.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/modules_list.asciidoc index fd34e2f1..d1aac5c2 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/modules_list.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/modules_list.asciidoc @@ -6,8 +6,10 @@ This file is generated! See scripts/docs_collector.py * <> * <> * <> + * <> * <> * <> + * <> * <> * <> * <> @@ -23,8 +25,10 @@ include::modules-overview.asciidoc[] include::modules/apache2.asciidoc[] include::modules/auditd.asciidoc[] include::modules/icinga.asciidoc[] +include::modules/iis.asciidoc[] include::modules/kafka.asciidoc[] include::modules/logstash.asciidoc[] +include::modules/mongodb.asciidoc[] include::modules/mysql.asciidoc[] include::modules/nginx.asciidoc[] include::modules/osquery.asciidoc[] diff --git a/vendor/github.com/elastic/beats/filebeat/docs/multiline.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/multiline.asciidoc index 7c980f74..3e0b8231 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/multiline.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/multiline.asciidoc @@ -17,11 +17,11 @@ Also read <> and <> to avoid common mistakes. [[multiline]] === Configuration options -You can specify the following options in the `filebeat.prospectors` section of -the +{beatname_lc}.yml+ config file to control how Filebeat deals with messages +You can specify the following options in the +{beatname_lc}.inputs+ section of +the +{beatname_lc}.yml+ config file to control how {beatname_uc} deals with messages that span multiple lines. -The following example shows how to configure Filebeat to handle a multiline message where the first line of the message begins with a bracket (`[`). +The following example shows how to configure {beatname_uc} to handle a multiline message where the first line of the message begins with a bracket (`[`). [source,yaml] ------------------------------------------------------------------------------------- @@ -31,7 +31,7 @@ multiline.match: after ------------------------------------------------------------------------------------- -Filebeat takes all the lines that do not start with `[` and combines them with the previous line that does. For example, you could use this configuration to join the following lines of a multiline message into a single event: +{beatname_uc} takes all the lines that do not start with `[` and combines them with the previous line that does. For example, you could use this configuration to join the following lines of a multiline message into a single event: ["source","sh",subs="attributes,callouts"] ------------------------------------------------------------------------------------- @@ -43,7 +43,7 @@ Filebeat takes all the lines that do not start with `[` and combines them with t ------------------------------------------------------------------------------------- -*`multiline.pattern`*:: Specifies the regular expression pattern to match. Note that the regexp patterns supported by Filebeat +*`multiline.pattern`*:: Specifies the regular expression pattern to match. Note that the regexp patterns supported by {beatname_uc} differ somewhat from the patterns supported by Logstash. See <> for a list of supported regexp patterns. Depending on how you configure other multiline options, lines that match the specified regular expression are considered either continuations of a previous line or the start of a new multiline event. You can set the `negate` option to negate @@ -51,7 +51,7 @@ the pattern. *`multiline.negate`*:: Defines whether the pattern is negated. The default is `false`. -*`multiline.match`*:: Specifies how Filebeat combines matching lines into an event. The settings are `after` or `before`. The behavior of these settings depends on what you specify for `negate`: +*`multiline.match`*:: Specifies how {beatname_uc} combines matching lines into an event. The settings are `after` or `before`. The behavior of these settings depends on what you specify for `negate`: + [options="header"] |======================= @@ -70,7 +70,7 @@ NOTE: The `after` setting is equivalent to `previous` in https://www.elastic.co/ the multiline message contains more than `max_lines`, any additional lines are discarded. The default is 500. -*`multiline.timeout`*:: After the specified timeout, Filebeat sends the multiline event even if no new pattern is found to start a new event. The default is 5s. +*`multiline.timeout`*:: After the specified timeout, {beatname_uc} sends the multiline event even if no new pattern is found to start a new event. The default is 5s. === Examples of multiline configuration @@ -95,7 +95,7 @@ Exception in thread "main" java.lang.NullPointerException at com.example.myproject.Bootstrap.main(Bootstrap.java:14) ------------------------------------------------------------------------------------- -To consolidate these lines into a single event in Filebeat, use the following multiline configuration: +To consolidate these lines into a single event in {beatname_uc}, use the following multiline configuration: [source,yaml] ------------------------------------------------------------------------------------- @@ -119,7 +119,7 @@ Caused by: java.lang.NullPointerException ... 1 more ------------------------------------------------------------------------------------- -To consolidate these lines into a single event in Filebeat, use the following multiline configuration: +To consolidate these lines into a single event in {beatname_uc}, use the following multiline configuration: [source,yaml] ------------------------------------------------------------------------------------- @@ -145,7 +145,7 @@ printf ("%10.10ld \t %10.10ld \t %s\ %f", w, x, y, z ); ------------------------------------------------------------------------------------- -To consolidate these lines into a single event in Filebeat, use the following multiline configuration: +To consolidate these lines into a single event in {beatname_uc}, use the following multiline configuration: [source,yaml] ------------------------------------------------------------------------------------- @@ -168,7 +168,7 @@ specific activity, as in this example: (/dev/disk1)]], net usable_space [34.5gb], net total_space [118.9gb], types [hfs] ------------------------------------------------------------------------------------- -To consolidate these lines into a single event in Filebeat, use the following multiline configuration: +To consolidate these lines into a single event in {beatname_uc}, use the following multiline configuration: [source,yaml] ------------------------------------------------------------------------------------- @@ -192,7 +192,7 @@ Sometimes your application logs contain events, that begin and end with custom m [2015-08-24 11:49:14,399] End event ------------------------------------------------------------------------------------- -To consolidate this as a single event in Filebeat, use the following multiline configuration: +To consolidate this as a single event in {beatname_uc}, use the following multiline configuration: [source,yaml] ------------------------------------------------------------------------------------- diff --git a/vendor/github.com/elastic/beats/filebeat/docs/multiple-prospectors.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/multiple-prospectors.asciidoc deleted file mode 100644 index 11b9f387..00000000 --- a/vendor/github.com/elastic/beats/filebeat/docs/multiple-prospectors.asciidoc +++ /dev/null @@ -1,34 +0,0 @@ -[[multiple-prospectors]] -=== Specify multiple prospectors - -When you need to collect lines from multiple files, you can simply configure a single prospector and specify multiple -paths to start a harvester for each file. However, if you want to apply additional prospector-specific -<> (such as `fields`, `include_lines`, `exclude_lines`, `multiline`, and so on) -to the lines harvested from specific files, you need to define multiple prospectors in the Filebeat config file. - -Within the config file, you can specify multiple prospectors, and each prospector can define multiple paths to crawl, as -shown in the following example. - -NOTE: Make sure a file is not defined more than once across all prospectors because this can lead -to unexpected behaviour. - -[source,yaml] -------------------------------------------------------------------------------------- -filebeat.prospectors: -- type: log - paths: - - /var/log/system.log - - /var/log/wifi.log -- type: log - paths: - - "/var/log/apache2/*" - fields: - apache: true - fields_under_root: true -------------------------------------------------------------------------------------- - -The config file in the example starts two prospectors (the list of prospectors is a http://yaml.org/[YAML] -array, so each prospector begins with a `-`). The first prospector has two harvesters, -one harvesting the `system.log` file, and the other harvesting `wifi.log`. The second prospector -starts a harvester for each file in the `apache2` directory and uses the `fields` configuration -option to add a field called `apache` to the output. diff --git a/vendor/github.com/elastic/beats/filebeat/docs/overview.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/overview.asciidoc index 37e98845..de7de04b 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/overview.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/overview.asciidoc @@ -5,14 +5,25 @@ Overview ++++ -Filebeat is a log data shipper for local files. Installed as an agent on your servers, Filebeat monitors the log directories or specific log files, tails the files, -and forwards them either to https://www.elastic.co/products/elasticsearch[Elasticsearch] or https://www.elastic.co/products/logstash[Logstash] for indexing. +{beatname_uc} is a lightweight shipper for forwarding and centralizing log data. +Installed as an agent on your servers, {beatname_uc} monitors the log +files or locations that you specify, collects log events, and forwards them +to either to https://www.elastic.co/products/elasticsearch[Elasticsearch] or +https://www.elastic.co/products/logstash[Logstash] for indexing. -Here's how Filebeat works: When you start Filebeat, it starts one or more prospectors that look in the local paths you've specified for log files. For each log file that the prospector locates, Filebeat starts a harvester. Each harvester reads a single log file for new content and sends the new log data to libbeat, which aggregates the events and sends the aggregated data to the output that you've configured for Filebeat. +Here's how {beatname_uc} works: When you start {beatname_uc}, it starts one or +more inputs that look in the locations you've specified for log data. For +each log that {beatname_uc} locates, {beatname_uc} starts a harvester. Each +harvester reads a single log for new content and sends the new log data to +libbeat, which aggregates the events and sends the aggregated data to the output +that you've configured for {beatname_uc}. image:./images/filebeat.png[Beats design] -For more information about prospectors and harvesters, see <>. +For more information about inputs and harvesters, see +<>. -Filebeat is a https://www.elastic.co/products/beats[Beat], and it is based on the libbeat framework. -General information about libbeat and setting up Elasticsearch, Logstash, and Kibana are covered in the {libbeat}/index.html[Beats Platform Reference]. +{beatname_uc} is a https://www.elastic.co/products/beats[Beat], and it is based on +the libbeat framework. General information about libbeat and setting up +Elasticsearch, Logstash, and Kibana are covered in the +{libbeat}/index.html[Beats Platform Reference]. diff --git a/vendor/github.com/elastic/beats/filebeat/docs/reload-configuration.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/reload-configuration.asciidoc index 5428a68a..5aacc9ba 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/reload-configuration.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/reload-configuration.asciidoc @@ -1,35 +1,35 @@ [[filebeat-configuration-reloading]] == Load external configuration files -Filebeat can load external configuration files for prospectors and modules, -which allows you to separate your configuration into multiple smaller -configuration files. See the <> and the +{beatname_uc} can load external configuration files for inputs and modules, +allowing you to separate your configuration into multiple smaller +configuration files. See the <> and the <> sections for details. include::../../libbeat/docs/shared-note-file-permissions.asciidoc[] [float] -[[load-prospector-config]] -=== Prospector config +[[load-input-config]] +=== Input config -For prospector configurations, you specify the `path` option in the -`filebeat.config.prospectors` section of the +{beatname_lc}.yml+ file. For +For input configurations, you specify the `path` option in the ++{beatname_lc}.config.inputs+ section of the +{beatname_lc}.yml+ file. For example: -[source,yaml] +["source","sh",subs="attributes,callouts"] ------------------------------------------------------------------------------ -filebeat.config.prospectors: +{beatname_lc}.config.inputs: enabled: true path: configs/*.yml ------------------------------------------------------------------------------ -Each file found by the `path` Glob must contain a list of one or more prospector +Each file found by the `path` Glob must contain a list of one or more input definitions. -TIP: The first line of each external configuration file must be a prospector +TIP: The first line of each external configuration file must be an input definition that starts with `- type`. Make sure you omit the line -`filebeat.config.prospectors` from this file. ++{beatname_lc}.config.inputs+ from this file. For example: @@ -47,8 +47,8 @@ For example: ------------------------------------------------------------------------------ -WARNING: It is critical that two running prospectors DO NOT have overlapping -file paths defined. If more than one prospector harvests the same file at the +WARNING: It is critical that two running inputs DO NOT have overlapping +file paths defined. If more than one input harvests the same file at the same time, it can lead to unexpected behavior. [float] @@ -56,13 +56,13 @@ same time, it can lead to unexpected behavior. === Module config For module configurations, you specify the `path` option in the -`filebeat.config.modules` section of the +{beatname_lc}.yml+ file. By default, -Filebeat loads the module configurations enabled in the ++{beatname_lc}.config.modules+ section of the +{beatname_lc}.yml+ file. By default, +{beatname_uc} loads the module configurations enabled in the <> directory. For example: -[source,yaml] +["source","sh",subs="attributes"] ------------------------------------------------------------------------------ -filebeat.config.modules: +{beatname_lc}.config.modules: enabled: true path: ${path.config}/modules.d/*.yml ------------------------------------------------------------------------------ @@ -77,7 +77,7 @@ definitions. TIP: The first line of each external configuration file must be a module definition that starts with `- module`. Make sure you omit the line -`filebeat.config.modules` from this file. ++{beatname_lc}.config.modules+ from this file. For example: @@ -95,27 +95,27 @@ For example: === Live reloading -You can configure Filebeat to dynamically reload external configuration files -when there are changes. This feature is available for prospector and module +You can configure {beatname_uc} to dynamically reload external configuration files +when there are changes. This feature is available for input and module configurations that are loaded as -<>. You cannot +<<{beatname_lc}-configuration-reloading,external configuration files>>. You cannot use this feature to reload the main +{beatname_lc}.yml+ configuration file. To configure this feature, you specify a path (https://golang.org/pkg/path/filepath/#Glob[Glob]) to watch for configuration -changes. When the files found by the Glob change, new prospectors and/or +changes. When the files found by the Glob change, new inputs and/or modules are started and stopped according to changes in the configuration files. This feature is especially useful in container environments where one container is used to tail logs for services running in other containers on the same host. To enable dynamic config reloading, you specify the `path` and `reload` options -under `filebeat.config.prospectors` or `filebeat.config.modules` sections. For -example: +under +{beatname_lc}.config.inputs+ or +{beatname_lc}.config.modules+ sections. +For example: -[source,yaml] +["source","sh",subs="attributes"] ------------------------------------------------------------------------------ -filebeat.config.prospectors: +{beatname_lc}.config.inputs: enabled: true path: configs/*.yml reload.enabled: true diff --git a/vendor/github.com/elastic/beats/filebeat/docs/running-on-kubernetes.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/running-on-kubernetes.asciidoc index ce00f879..a5f46106 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/running-on-kubernetes.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/running-on-kubernetes.asciidoc @@ -19,7 +19,7 @@ By deploying Filebeat as a https://kubernetes.io/docs/concepts/workloads/control we ensure we get a running instance on each node of the cluster. Docker logs host folder (`/var/lib/docker/containers`) is mounted on the Filebeat -container. Filebeat will start a prospector for these files and start harvesting +container. Filebeat will start an input for these files and start harvesting them as they appear. Everything is deployed under `kube-system` namespace, you can change that by diff --git a/vendor/github.com/elastic/beats/filebeat/docs/securing-filebeat.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/securing-filebeat.asciidoc index 618bb3a4..f240ffff 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/securing-filebeat.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/securing-filebeat.asciidoc @@ -8,6 +8,7 @@ The following topics describe how to secure communication between Filebeat and o * <> * <> +* <> //sets block macro for https.asciidoc included in next section diff --git a/vendor/github.com/elastic/beats/filebeat/docs/setting-up-running.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/setting-up-running.asciidoc index 8225809b..df46ab50 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/setting-up-running.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/setting-up-running.asciidoc @@ -4,7 +4,7 @@ // that is unique to each beat. ///// -[[seting-up-and-running]] +[[setting-up-and-running]] == Setting up and running {beatname_uc} Before reading this section, see the @@ -34,3 +34,5 @@ include::../../libbeat/docs/command-reference.asciidoc[] include::./running-on-docker.asciidoc[] include::./running-on-kubernetes.asciidoc[] + +include::../../libbeat/docs/shared-shutdown.asciidoc[] diff --git a/vendor/github.com/elastic/beats/filebeat/filebeat.reference.yml b/vendor/github.com/elastic/beats/filebeat/filebeat.reference.yml index 96cd9844..715aa914 100644 --- a/vendor/github.com/elastic/beats/filebeat/filebeat.reference.yml +++ b/vendor/github.com/elastic/beats/filebeat/filebeat.reference.yml @@ -24,9 +24,9 @@ filebeat.modules: # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. #var.convert_timezone: false - # Prospector configuration (advanced). Any prospector configuration option + # Input configuration (advanced). Any input configuration option # can be added under this section. - #prospector: + #input: # Authorization logs #auth: @@ -39,9 +39,9 @@ filebeat.modules: # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. #var.convert_timezone: false - # Prospector configuration (advanced). Any prospector configuration option + # Input configuration (advanced). Any input configuration option # can be added under this section. - #prospector: + #input: #------------------------------- Apache2 Module ------------------------------ #- module: apache2 @@ -53,9 +53,9 @@ filebeat.modules: # Filebeat will choose the paths depending on your OS. #var.paths: - # Prospector configuration (advanced). Any prospector configuration option + # Input configuration (advanced). Any input configuration option # can be added under this section. - #prospector: + #input: # Error logs #error: @@ -65,9 +65,9 @@ filebeat.modules: # Filebeat will choose the paths depending on your OS. #var.paths: - # Prospector configuration (advanced). Any prospector configuration option + # Input configuration (advanced). Any input configuration option # can be added under this section. - #prospector: + #input: #------------------------------- Auditd Module ------------------------------- #- module: auditd @@ -78,9 +78,9 @@ filebeat.modules: # Filebeat will choose the paths depending on your OS. #var.paths: - # Prospector configuration (advanced). Any prospector configuration option + # Input configuration (advanced). Any input configuration option # can be added under this section. - #prospector: + #input: #------------------------------- Icinga Module ------------------------------- #- module: icinga @@ -92,9 +92,9 @@ filebeat.modules: # Filebeat will choose the paths depending on your OS. #var.paths: - # Prospector configuration (advanced). Any prospector configuration option + # Input configuration (advanced). Any input configuration option # can be added under this section. - #prospector: + #input: # Debug logs #debug: @@ -104,9 +104,9 @@ filebeat.modules: # Filebeat will choose the paths depending on your OS. #var.paths: - # Prospector configuration (advanced). Any prospector configuration option + # Input configuration (advanced). Any input configuration option # can be added under this section. - #prospector: + #input: # Startup logs #startup: @@ -116,9 +116,35 @@ filebeat.modules: # Filebeat will choose the paths depending on your OS. #var.paths: - # Prospector configuration (advanced). Any prospector configuration option + # Input configuration (advanced). Any input configuration option # can be added under this section. - #prospector: + #input: + +#--------------------------------- IIS Module -------------------------------- +#- module: iis + # Access logs + #access: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + + # Error logs + #error: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: #-------------------------------- Kafka Module ------------------------------- - module: kafka @@ -152,6 +178,20 @@ filebeat.modules: # Filebeat will choose the paths depending on your OS. #var.paths: +#------------------------------- mongodb Module ------------------------------ +#- module: mongodb + # Logs + #log: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + #-------------------------------- MySQL Module ------------------------------- #- module: mysql # Error logs @@ -162,9 +202,9 @@ filebeat.modules: # Filebeat will choose the paths depending on your OS. #var.paths: - # Prospector configuration (advanced). Any prospector configuration option + # Input configuration (advanced). Any input configuration option # can be added under this section. - #prospector: + #input: # Slow logs #slowlog: @@ -174,9 +214,9 @@ filebeat.modules: # Filebeat will choose the paths depending on your OS. #var.paths: - # Prospector configuration (advanced). Any prospector configuration option + # Input configuration (advanced). Any input configuration option # can be added under this section. - #prospector: + #input: #-------------------------------- Nginx Module ------------------------------- #- module: nginx @@ -188,9 +228,9 @@ filebeat.modules: # Filebeat will choose the paths depending on your OS. #var.paths: - # Prospector configuration (advanced). Any prospector configuration option + # Input configuration (advanced). Any input configuration option # can be added under this section. - #prospector: + #input: # Error logs #error: @@ -200,9 +240,9 @@ filebeat.modules: # Filebeat will choose the paths depending on your OS. #var.paths: - # Prospector configuration (advanced). Any prospector configuration option + # Input configuration (advanced). Any input configuration option # can be added under this section. - #prospector: + #input: #------------------------------- Osquery Module ------------------------------ - module: osquery @@ -228,9 +268,9 @@ filebeat.modules: # Filebeat will choose the paths depending on your OS. #var.paths: - # Prospector configuration (advanced). Any prospector configuration option + # Input configuration (advanced). Any input configuration option # can be added under this section. - #prospector: + #input: #-------------------------------- Redis Module ------------------------------- #- module: redis @@ -262,30 +302,30 @@ filebeat.modules: # Filebeat will choose the paths depending on your OS. #var.paths: - # Prospector configuration (advanced). Any prospector configuration option + # Input configuration (advanced). Any input configuration option # can be added under this section. - #prospector: + #input: -#=========================== Filebeat prospectors ============================= +#=========================== Filebeat inputs ============================= -# List of prospectors to fetch data. -filebeat.prospectors: -# Each - is a prospector. Most options can be set at the prospector level, so -# you can use different prospectors for various configurations. -# Below are the prospector specific configurations. +# List of inputs to fetch data. +filebeat.inputs: +# Each - is an input. Most options can be set at the input level, so +# you can use different inputs for various configurations. +# Below are the input specific configurations. # Type of the files. Based on this the way the file is read is decided. -# The different types cannot be mixed in one prospector +# The different types cannot be mixed in one input # # Possible options are: # * log: Reads every line of the log file (default) # * stdin: Reads the standard in -#------------------------------ Log prospector -------------------------------- +#------------------------------ Log input -------------------------------- - type: log - # Change to true to enable this prospector configuration. + # Change to true to enable this input configuration. enabled: false # Paths that should be crawled and fetched. Glob based paths. @@ -336,7 +376,7 @@ filebeat.prospectors: # Time strings like 2h (2 hours), 5m (5 minutes) can be used. #ignore_older: 0 - # How often the prospector checks for new files in the paths that are specified + # How often the input checks for new files in the paths that are specified # for harvesting. Specify 1s to scan the directory as frequently as possible # without causing Filebeat to scan too frequently. Default: 10s. #scan_frequency: 10s @@ -406,7 +446,7 @@ filebeat.prospectors: # this can mean that the first entries of a new file are skipped. #tail_files: false - # The Ingest Node pipeline ID associated with this prospector. If this is set, it + # The Ingest Node pipeline ID associated with this input. If this is set, it # overwrites the pipeline option from the Elasticsearch output. #pipeline: @@ -472,23 +512,25 @@ filebeat.prospectors: # Note: Potential data loss. Make sure to read and understand the docs for this option. #close_timeout: 0 - # Defines if prospectors is enabled + # Defines if inputs is enabled #enabled: true -#----------------------------- Stdin prospector ------------------------------- +#----------------------------- Stdin input ------------------------------- # Configuration to use stdin input #- type: stdin -#------------------------- Redis slowlog prospector --------------------------- -# Experimental: Config options for the redis slow log prospector +#------------------------- Redis slowlog input --------------------------- +# Experimental: Config options for the redis slow log input #- type: redis - #hosts: ["localhost:6379"] - #username: - #password: #enabled: false + + # List of hosts to pool to retrieve the slow log information. + #hosts: ["localhost:6379"] + + # How often the input checks for redis slow log. #scan_frequency: 10s - # Timeout after which time the prospector should return an error + # Timeout after which time the input should return an error #timeout: 1s # Network type to be used for redis connection. Default: tcp @@ -500,17 +542,64 @@ filebeat.prospectors: # Redis AUTH password. Empty by default. #password: foobared -#------------------------------ Udp prospector -------------------------------- -# Experimental: Config options for the udp prospector +#------------------------------ Udp input -------------------------------- +# Experimental: Config options for the udp input #- type: udp + #enabled: false # Maximum size of the message received over UDP - #max_message_size: 10240 + #max_message_size: 10KiB + +#------------------------------ TCP input -------------------------------- +# Experimental: Config options for the TCP input +#- type: tcp + #enabled: false + + # The host and port to receive the new event + #host: "localhost:9000" + + # Character used to split new message + #line_delimiter: "\n" + + # Maximum size in bytes of the message received over TCP + #max_message_size: 20MiB + + # The number of seconds of inactivity before a remote connection is closed. + #timeout: 300s + +#------------------------------ Syslog input -------------------------------- +# Experimental: Config options for the Syslog input +# Accept RFC3164 formatted syslog event via UDP. +#- type: syslog + #enabled: false + #protocol.udp: + # The host and port to receive the new event + #host: "localhost:9000" + + # Maximum size of the message received over UDP + #max_message_size: 10KiB + +# Accept RFC3164 formatted syslog event via TCP. +#- type: syslog + #enabled: false + + #protocol.tcp: + # The host and port to receive the new event + #host: "localhost:9000" + + # Character used to split new message + #line_delimiter: "\n" + + # Maximum size in bytes of the message received over TCP + #max_message_size: 20MiB + + # The number of seconds of inactivity before a remote connection is closed. + #timeout: 300s #========================== Filebeat autodiscover ============================== # Autodiscover allows you to detect changes in the system and spawn new modules -# or prospectors as they happen. +# or inputs as they happen. #filebeat.autodiscover: # List of enabled autodiscover providers @@ -530,10 +619,15 @@ filebeat.prospectors: # data path. #filebeat.registry_file: ${path.data}/registry -# These config files must have the full filebeat config part inside, but only -# the prospector part is processed. All global options like spool_size are ignored. -# The config_dir MUST point to a different directory then where the main filebeat config file is in. -#filebeat.config_dir: +# The permissions mask to apply on registry file. The default value is 0600. +# Must be a valid Unix-style file permissions mask expressed in octal notation. +# This option is not supported on Windows. +#filebeat.registry_file_permissions: 0600 + +# By default Ingest pipelines are not updated if a pipeline with the same ID +# already exists. If this option is enabled Filebeat overwrites pipelines +# everytime a new Elasticsearch connection is established. +#filebeat.overwrite_pipelines: false # How long filebeat waits on shutdown for the publisher to finish. # Default is 0, not waiting. @@ -541,9 +635,9 @@ filebeat.prospectors: # Enable filebeat config reloading #filebeat.config: - #prospectors: + #inputs: #enabled: false - #path: prospectors.d/*.yml + #path: inputs.d/*.yml #reload.enabled: true #reload.period: 10s #modules: @@ -587,7 +681,8 @@ filebeat.prospectors: # Hints the minimum number of events stored in the queue, # before providing a batch of events to the outputs. - # A value of 0 (the default) ensures events are immediately available + # The default value is set to 2048. + # A value of 0 ensures events are immediately available # to be sent to the outputs. #flush.min_events: 2048 @@ -595,6 +690,66 @@ filebeat.prospectors: # if the number of events stored in the queue is < min_flush_events. #flush.timeout: 1s + # The spool queue will store events in a local spool file, before + # forwarding the events to the outputs. + # + # Beta: spooling to disk is currently a beta feature. Use with care. + # + # The spool file is a circular buffer, which blocks once the file/buffer is full. + # Events are put into a write buffer and flushed once the write buffer + # is full or the flush_timeout is triggered. + # Once ACKed by the output, events are removed immediately from the queue, + # making space for new events to be persisted. + #spool: + # The file namespace configures the file path and the file creation settings. + # Once the file exists, the `size`, `page_size` and `prealloc` settings + # will have no more effect. + #file: + # Location of spool file. The default value is ${path.data}/spool.dat. + #path: "${path.data}/spool.dat" + + # Configure file permissions if file is created. The default value is 0600. + #permissions: 0600 + + # File size hint. The spool blocks, once this limit is reached. The default value is 100 MiB. + #size: 100MiB + + # The files page size. A file is split into multiple pages of the same size. The default value is 4KiB. + #page_size: 4KiB + + # If prealloc is set, the required space for the file is reserved using + # truncate. The default value is true. + #prealloc: true + + # Spool writer settings + # Events are serialized into a write buffer. The write buffer is flushed if: + # - The buffer limit has been reached. + # - The configured limit of buffered events is reached. + # - The flush timeout is triggered. + #write: + # Sets the write buffer size. + #buffer_size: 1MiB + + # Maximum duration after which events are flushed, if the write buffer + # is not full yet. The default value is 1s. + #flush.timeout: 1s + + # Number of maximum buffered events. The write buffer is flushed once the + # limit is reached. + #flush.events: 16384 + + # Configure the on-disk event encoding. The encoding can be changed + # between restarts. + # Valid encodings are: json, ubjson, and cbor. + #codec: cbor + #read: + # Reader flush timeout, waiting for more events to become available, so + # to fill a complete batch, as required by the outputs. + # If flush_timeout is 0, all available events are forwarded to the + # outputs immediately. + # The default value is 0s. + #flush.timeout: 0s + # Sets the maximum number of CPUs that can be executing simultaneously. The # default is the number of logical CPUs available in the system. #max_procs: @@ -629,6 +784,14 @@ filebeat.prospectors: # equals: # http.code: 200 # +# The following example renames the field a to b: +# +#processors: +#- rename: +# fields: +# - from: "a" +# to: "b" +# # The following example enriches each event with metadata from the cloud # provider about the host machine. It works on EC2, GCE, DigitalOcean, # Tencent Cloud, and Alibaba Cloud. @@ -653,6 +816,7 @@ filebeat.prospectors: # match_pids: ["process.pid", "process.ppid"] # match_source: true # match_source_index: 4 +# match_short_id: false # cleanup_timeout: 60 # # To connect to Docker over TLS you must specify a client and CA certificate. # #ssl: @@ -666,6 +830,7 @@ filebeat.prospectors: # #processors: #- add_docker_metadata: ~ +#- add_host_metadata: ~ #============================= Elastic Cloud ================================== @@ -738,7 +903,18 @@ output.elasticsearch: # The default is 50. #bulk_max_size: 50 - # Configure http request timeout before failing an request to Elasticsearch. + # The number of seconds to wait before trying to reconnect to Elasticsearch + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Elasticsearch after a network error. The default is 60s. + #backoff.max: 60s + + # Configure http request timeout before failing a request to Elasticsearch. #timeout: 90 # Use SSL settings for HTTPS. @@ -802,7 +978,7 @@ output.elasticsearch: # Optional load balance the events between the Logstash hosts. Default is false. #loadbalance: false - # Number of batches to be sent asynchronously to logstash while processing + # Number of batches to be sent asynchronously to Logstash while processing # new batches. #pipelining: 2 @@ -811,6 +987,17 @@ output.elasticsearch: # if no error is encountered. #slow_start: false + # The number of seconds to wait before trying to reconnect to Logstash + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Logstash after a network error. The default is 60s. + #backoff.max: 60s + # Optional index name. The default index name is set to filebeat # in all lowercase. #index: 'filebeat' @@ -1155,6 +1342,10 @@ output.elasticsearch: # the default for the logs path is a logs subdirectory inside the home path. #path.logs: ${path.home}/logs +#================================ Keystore ========================================== +# Location of the Keystore containing the keys and their sensitive values. +#keystore.path: "${path.config}/beats.keystore" + #============================== Dashboards ===================================== # These settings control loading the sample dashboards to the Kibana index. Loading # the dashboards are disabled by default and can be enabled either by setting the @@ -1189,6 +1380,17 @@ output.elasticsearch: # how to install the dashboards by first querying Elasticsearch. #setup.dashboards.always_kibana: false +# If true and Kibana is not reachable at the time when dashboards are loaded, +# it will retry to reconnect to Kibana instead of exiting with an error. +#setup.dashboards.retry.enabled: false + +# Duration interval between Kibana connection retries. +#setup.dashboards.retry.interval: 1s + +# Maximum number of retries before exiting with an error, 0 for unlimited retrying. +#setup.dashboards.retry.maximum: 0 + + #============================== Template ===================================== # A template is used to set the mapping in Elasticsearch diff --git a/vendor/github.com/elastic/beats/filebeat/filebeat.yml b/vendor/github.com/elastic/beats/filebeat/filebeat.yml index 21163d7e..354d0ba1 100644 --- a/vendor/github.com/elastic/beats/filebeat/filebeat.yml +++ b/vendor/github.com/elastic/beats/filebeat/filebeat.yml @@ -10,17 +10,17 @@ # For more available modules and options, please see the filebeat.reference.yml sample # configuration file. -#=========================== Filebeat prospectors ============================= +#=========================== Filebeat inputs ============================= -filebeat.prospectors: +filebeat.inputs: -# Each - is a prospector. Most options can be set at the prospector level, so -# you can use different prospectors for various configurations. -# Below are the prospector specific configurations. +# Each - is an input. Most options can be set at the input level, so +# you can use different inputs for various configurations. +# Below are the input specific configurations. - type: log - # Change to true to enable this prospector configuration. + # Change to true to enable this input configuration. enabled: false # Paths that should be crawled and fetched. Glob based paths. diff --git a/vendor/github.com/elastic/beats/filebeat/fileset/config.go b/vendor/github.com/elastic/beats/filebeat/fileset/config.go index 7a20fa15..ac6d46e5 100644 --- a/vendor/github.com/elastic/beats/filebeat/fileset/config.go +++ b/vendor/github.com/elastic/beats/filebeat/fileset/config.go @@ -1,5 +1,12 @@ package fileset +import ( + "fmt" + + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/common/cfgwarn" +) + // ModuleConfig contains the configuration file options for a module type ModuleConfig struct { Module string `config:"module" validate:"required"` @@ -13,5 +20,24 @@ type ModuleConfig struct { type FilesetConfig struct { Enabled *bool `config:"enabled"` Var map[string]interface{} `config:"var"` + Input map[string]interface{} `config:"input"` Prospector map[string]interface{} `config:"prospector"` } + +// NewFilesetConfig creates a new FilesetConfig from a common.Config. +func NewFilesetConfig(cfg *common.Config) (*FilesetConfig, error) { + var fcfg FilesetConfig + err := cfg.Unpack(&fcfg) + if err != nil { + return nil, fmt.Errorf("error unpacking configuration") + } + + if len(fcfg.Prospector) > 0 { + cfgwarn.Deprecate("7.0.0", "prospector is deprecated. Use `input` instead.") + if len(fcfg.Input) > 0 { + return nil, fmt.Errorf("error prospector and input are defined in the fileset, use only input") + } + fcfg.Input = fcfg.Prospector + } + return &fcfg, nil +} diff --git a/vendor/github.com/elastic/beats/filebeat/fileset/config_test.go b/vendor/github.com/elastic/beats/filebeat/fileset/config_test.go new file mode 100644 index 00000000..f96a345c --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/fileset/config_test.go @@ -0,0 +1,62 @@ +package fileset + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/libbeat/common" +) + +func TestProspectorDeprecation(t *testing.T) { + cfg := map[string]interface{}{ + "enabled": true, + "prospector": map[string]interface{}{ + "close_eof": true, + }, + } + + c, err := common.NewConfigFrom(cfg) + assert.NoError(t, err) + + f, err := NewFilesetConfig(c) + if assert.NoError(t, err) { + assert.Equal(t, f.Input["close_eof"], true) + } +} + +func TestInputSettings(t *testing.T) { + cfg := map[string]interface{}{ + "enabled": true, + "input": map[string]interface{}{ + "close_eof": true, + }, + } + + c, err := common.NewConfigFrom(cfg) + assert.NoError(t, err) + + f, err := NewFilesetConfig(c) + if assert.NoError(t, err) { + assert.Equal(t, f.Input["close_eof"], true) + assert.Nil(t, f.Prospector) + } +} + +func TestProspectorDeprecationWhenInputIsAlsoDefined(t *testing.T) { + cfg := map[string]interface{}{ + "enabled": true, + "input": map[string]interface{}{ + "close_eof": true, + }, + "prospector": map[string]interface{}{ + "close_eof": true, + }, + } + + c, err := common.NewConfigFrom(cfg) + assert.NoError(t, err) + + _, err = NewFilesetConfig(c) + assert.Error(t, err, "error prospector and input are defined in the fileset, use only input") +} diff --git a/vendor/github.com/elastic/beats/filebeat/fileset/factory.go b/vendor/github.com/elastic/beats/filebeat/fileset/factory.go index d946578b..473877fb 100644 --- a/vendor/github.com/elastic/beats/filebeat/fileset/factory.go +++ b/vendor/github.com/elastic/beats/filebeat/fileset/factory.go @@ -4,7 +4,7 @@ import ( "github.com/mitchellh/hashstructure" "github.com/elastic/beats/filebeat/channel" - "github.com/elastic/beats/filebeat/prospector" + input "github.com/elastic/beats/filebeat/prospector" "github.com/elastic/beats/filebeat/registrar" "github.com/elastic/beats/libbeat/cfgfile" "github.com/elastic/beats/libbeat/common" @@ -18,26 +18,29 @@ type Factory struct { registrar *registrar.Registrar beatVersion string pipelineLoaderFactory PipelineLoaderFactory + overwritePipelines bool beatDone chan struct{} } -// Wrap an array of prospectors and implements cfgfile.Runner interface -type prospectorsRunner struct { +// Wrap an array of inputs and implements cfgfile.Runner interface +type inputsRunner struct { id uint64 moduleRegistry *ModuleRegistry - prospectors []*prospector.Prospector + inputs []*input.Runner pipelineLoaderFactory PipelineLoaderFactory + overwritePipelines bool } // NewFactory instantiates a new Factory func NewFactory(outlet channel.Factory, registrar *registrar.Registrar, beatVersion string, - pipelineLoaderFactory PipelineLoaderFactory, beatDone chan struct{}) *Factory { + pipelineLoaderFactory PipelineLoaderFactory, overwritePipelines bool, beatDone chan struct{}) *Factory { return &Factory{ outlet: outlet, registrar: registrar, beatVersion: beatVersion, beatDone: beatDone, pipelineLoaderFactory: pipelineLoaderFactory, + overwritePipelines: overwritePipelines, } } @@ -49,7 +52,7 @@ func (f *Factory) Create(c *common.Config, meta *common.MapStrPointer) (cfgfile. return nil, err } - pConfigs, err := m.GetProspectorConfigs() + pConfigs, err := m.GetInputConfigs() if err != nil { return nil, err } @@ -62,24 +65,25 @@ func (f *Factory) Create(c *common.Config, meta *common.MapStrPointer) (cfgfile. return nil, err } - prospectors := make([]*prospector.Prospector, len(pConfigs)) + inputs := make([]*input.Runner, len(pConfigs)) for i, pConfig := range pConfigs { - prospectors[i], err = prospector.New(pConfig, f.outlet, f.beatDone, f.registrar.GetStates(), meta) + inputs[i], err = input.New(pConfig, f.outlet, f.beatDone, f.registrar.GetStates(), meta) if err != nil { - logp.Err("Error creating prospector: %s", err) + logp.Err("Error creating input: %s", err) return nil, err } } - return &prospectorsRunner{ + return &inputsRunner{ id: id, moduleRegistry: m, - prospectors: prospectors, + inputs: inputs, pipelineLoaderFactory: f.pipelineLoaderFactory, + overwritePipelines: f.overwritePipelines, }, nil } -func (p *prospectorsRunner) Start() { +func (p *inputsRunner) Start() { // Load pipelines if p.pipelineLoaderFactory != nil { // Load pipelines instantly and then setup a callback for reconnections: @@ -87,7 +91,7 @@ func (p *prospectorsRunner) Start() { if err != nil { logp.Err("Error loading pipeline: %s", err) } else { - err := p.moduleRegistry.LoadPipelines(pipelineLoader) + err := p.moduleRegistry.LoadPipelines(pipelineLoader, p.overwritePipelines) if err != nil { // Log error and continue logp.Err("Error loading pipeline: %s", err) @@ -96,21 +100,21 @@ func (p *prospectorsRunner) Start() { // Callback: callback := func(esClient *elasticsearch.Client) error { - return p.moduleRegistry.LoadPipelines(esClient) + return p.moduleRegistry.LoadPipelines(esClient, p.overwritePipelines) } elasticsearch.RegisterConnectCallback(callback) } - for _, prospector := range p.prospectors { - prospector.Start() + for _, input := range p.inputs { + input.Start() } } -func (p *prospectorsRunner) Stop() { - for _, prospector := range p.prospectors { - prospector.Stop() +func (p *inputsRunner) Stop() { + for _, input := range p.inputs { + input.Stop() } } -func (p *prospectorsRunner) String() string { +func (p *inputsRunner) String() string { return p.moduleRegistry.InfoString() } diff --git a/vendor/github.com/elastic/beats/filebeat/fileset/fileset.go b/vendor/github.com/elastic/beats/filebeat/fileset/fileset.go index 519ada7e..8992b29a 100644 --- a/vendor/github.com/elastic/beats/filebeat/fileset/fileset.go +++ b/vendor/github.com/elastic/beats/filebeat/fileset/fileset.go @@ -84,6 +84,7 @@ type manifest struct { ModuleVersion string `config:"module_version"` Vars []map[string]interface{} `config:"var"` IngestPipeline string `config:"ingest_pipeline"` + Input string `config:"input"` Prospector string `config:"prospector"` MachineLearning []struct { Name string `config:"name"` @@ -96,6 +97,18 @@ type manifest struct { } `config:"requires"` } +func newManifest(cfg *common.Config) (*manifest, error) { + var manifest manifest + err := cfg.Unpack(&manifest) + if err != nil { + return nil, err + } + if manifest.Prospector != "" { + manifest.Input = manifest.Prospector + } + return &manifest, nil +} + // ProcessorRequirement represents the declaration of a dependency to a particular // Ingest Node processor / plugin. type ProcessorRequirement struct { @@ -109,12 +122,11 @@ func (fs *Fileset) readManifest() (*manifest, error) { if err != nil { return nil, fmt.Errorf("Error reading manifest file: %v", err) } - var manifest manifest - err = cfg.Unpack(&manifest) + manifest, err := newManifest(cfg) if err != nil { return nil, fmt.Errorf("Error unpacking manifest: %v", err) } - return &manifest, nil + return manifest, nil } // evaluateVars resolves the fileset variables. @@ -265,31 +277,31 @@ func (fs *Fileset) getBuiltinVars() (map[string]interface{}, error) { }, nil } -func (fs *Fileset) getProspectorConfig() (*common.Config, error) { - path, err := applyTemplate(fs.vars, fs.manifest.Prospector, false) +func (fs *Fileset) getInputConfig() (*common.Config, error) { + path, err := applyTemplate(fs.vars, fs.manifest.Input, false) if err != nil { - return nil, fmt.Errorf("Error expanding vars on the prospector path: %v", err) + return nil, fmt.Errorf("Error expanding vars on the input path: %v", err) } contents, err := ioutil.ReadFile(filepath.Join(fs.modulePath, fs.name, path)) if err != nil { - return nil, fmt.Errorf("Error reading prospector file %s: %v", path, err) + return nil, fmt.Errorf("Error reading input file %s: %v", path, err) } yaml, err := applyTemplate(fs.vars, string(contents), false) if err != nil { - return nil, fmt.Errorf("Error interpreting the template of the prospector: %v", err) + return nil, fmt.Errorf("Error interpreting the template of the input: %v", err) } cfg, err := common.NewConfigWithYAML([]byte(yaml), "") if err != nil { - return nil, fmt.Errorf("Error reading prospector config: %v", err) + return nil, fmt.Errorf("Error reading input config: %v", err) } // overrides - if len(fs.fcfg.Prospector) > 0 { - overrides, err := common.NewConfigFrom(fs.fcfg.Prospector) + if len(fs.fcfg.Input) > 0 { + overrides, err := common.NewConfigFrom(fs.fcfg.Input) if err != nil { - return nil, fmt.Errorf("Error creating config from prospector overrides: %v", err) + return nil, fmt.Errorf("Error creating config from input overrides: %v", err) } cfg, err = common.MergeConfigs(cfg, overrides) if err != nil { @@ -300,20 +312,20 @@ func (fs *Fileset) getProspectorConfig() (*common.Config, error) { // force our pipeline ID err = cfg.SetString("pipeline", -1, fs.pipelineID) if err != nil { - return nil, fmt.Errorf("Error setting the pipeline ID in the prospector config: %v", err) + return nil, fmt.Errorf("Error setting the pipeline ID in the input config: %v", err) } // force our the module/fileset name err = cfg.SetString("_module_name", -1, fs.mcfg.Module) if err != nil { - return nil, fmt.Errorf("Error setting the _module_name cfg in the prospector config: %v", err) + return nil, fmt.Errorf("Error setting the _module_name cfg in the input config: %v", err) } err = cfg.SetString("_fileset_name", -1, fs.name) if err != nil { - return nil, fmt.Errorf("Error setting the _fileset_name cfg in the prospector config: %v", err) + return nil, fmt.Errorf("Error setting the _fileset_name cfg in the input config: %v", err) } - cfg.PrintDebugf("Merged prospector config for fileset %s/%s", fs.mcfg.Module, fs.name) + cfg.PrintDebugf("Merged input config for fileset %s/%s", fs.mcfg.Module, fs.name) return cfg, nil } diff --git a/vendor/github.com/elastic/beats/filebeat/fileset/fileset_test.go b/vendor/github.com/elastic/beats/filebeat/fileset/fileset_test.go index da63567b..55355218 100644 --- a/vendor/github.com/elastic/beats/filebeat/fileset/fileset_test.go +++ b/vendor/github.com/elastic/beats/filebeat/fileset/fileset_test.go @@ -30,7 +30,7 @@ func TestLoadManifestNginx(t *testing.T) { assert.NoError(t, err) assert.Equal(t, manifest.ModuleVersion, "1.0") assert.Equal(t, manifest.IngestPipeline, "ingest/default.json") - assert.Equal(t, manifest.Prospector, "config/nginx-access.yml") + assert.Equal(t, manifest.Input, "config/nginx-access.yml") vars := manifest.Vars assert.Equal(t, "paths", vars[0]["name"]) @@ -145,11 +145,11 @@ func TestResolveVariable(t *testing.T) { } } -func TestGetProspectorConfigNginx(t *testing.T) { +func TestGetInputConfigNginx(t *testing.T) { fs := getModuleForTesting(t, "nginx", "access") assert.NoError(t, fs.Read("5.2.0")) - cfg, err := fs.getProspectorConfig() + cfg, err := fs.getInputConfig() assert.NoError(t, err) assert.True(t, cfg.HasField("paths")) @@ -160,11 +160,11 @@ func TestGetProspectorConfigNginx(t *testing.T) { assert.Equal(t, "filebeat-5.2.0-nginx-access-default", pipelineID) } -func TestGetProspectorConfigNginxOverrides(t *testing.T) { +func TestGetInputConfigNginxOverrides(t *testing.T) { modulesPath, err := filepath.Abs("../module") assert.NoError(t, err) fs, err := New(modulesPath, "access", &ModuleConfig{Module: "nginx"}, &FilesetConfig{ - Prospector: map[string]interface{}{ + Input: map[string]interface{}{ "close_eof": true, }, }) @@ -172,7 +172,7 @@ func TestGetProspectorConfigNginxOverrides(t *testing.T) { assert.NoError(t, fs.Read("5.2.0")) - cfg, err := fs.getProspectorConfig() + cfg, err := fs.getInputConfig() assert.NoError(t, err) assert.True(t, cfg.HasField("paths")) diff --git a/vendor/github.com/elastic/beats/filebeat/fileset/modules.go b/vendor/github.com/elastic/beats/filebeat/fileset/modules.go index 550a3042..35fcc225 100644 --- a/vendor/github.com/elastic/beats/filebeat/fileset/modules.go +++ b/vendor/github.com/elastic/beats/filebeat/fileset/modules.go @@ -14,8 +14,14 @@ import ( "github.com/elastic/beats/libbeat/logp" mlimporter "github.com/elastic/beats/libbeat/ml-importer" "github.com/elastic/beats/libbeat/paths" + "github.com/elastic/beats/libbeat/setup/kibana" ) +var availableMLModules = map[string]string{ + "apache2": "access", + "nginx": "access", +} + type ModuleRegistry struct { registry map[string]map[string]*Fileset // module -> fileset -> Fileset } @@ -133,7 +139,7 @@ func mcfgFromConfig(cfg *common.Config) (*ModuleConfig, error) { err = cfg.Unpack(&dict) if err != nil { - return nil, fmt.Errorf("Error unpacking module %s in a dict: %v", mcfg.Module, err) + return nil, fmt.Errorf("error unpacking module %s in a dict: %v", mcfg.Module, err) } mcfg.Filesets = map[string]*FilesetConfig{} @@ -142,17 +148,16 @@ func mcfgFromConfig(cfg *common.Config) (*ModuleConfig, error) { continue } - var fcfg FilesetConfig tmpCfg, err := common.NewConfigFrom(filesetConfig) if err != nil { - return nil, fmt.Errorf("Error creating config from fileset %s/%s: %v", mcfg.Module, name, err) + return nil, fmt.Errorf("error creating config from fileset %s/%s: %v", mcfg.Module, name, err) } - err = tmpCfg.Unpack(&fcfg) + + fcfg, err := NewFilesetConfig(tmpCfg) if err != nil { - return nil, fmt.Errorf("Error unpacking fileset %s/%s: %v", mcfg.Module, name, err) + return nil, fmt.Errorf("error creating config from fileset %s/%s: %v", mcfg.Module, name, err) } - mcfg.Filesets[name] = &fcfg - + mcfg.Filesets[name] = fcfg } return &mcfg, nil @@ -204,13 +209,12 @@ func applyOverrides(fcfg *FilesetConfig, return nil, fmt.Errorf("Error merging configs: %v", err) } - var res FilesetConfig - err = resultConfig.Unpack(&res) + res, err := NewFilesetConfig(resultConfig) if err != nil { return nil, fmt.Errorf("Error unpacking configs: %v", err) } - return &res, nil + return res, nil } // appendWithoutDuplicates appends basic module configuration for each module in the @@ -238,13 +242,13 @@ func appendWithoutDuplicates(moduleConfigs []*ModuleConfig, modules []string) ([ return moduleConfigs, nil } -func (reg *ModuleRegistry) GetProspectorConfigs() ([]*common.Config, error) { +func (reg *ModuleRegistry) GetInputConfigs() ([]*common.Config, error) { result := []*common.Config{} for module, filesets := range reg.registry { for name, fileset := range filesets { - fcfg, err := fileset.getProspectorConfig() + fcfg, err := fileset.getInputConfig() if err != nil { - return result, fmt.Errorf("Error getting config for fielset %s/%s: %v", + return result, fmt.Errorf("Error getting config for fileset %s/%s: %v", module, name, err) } result = append(result, fcfg) @@ -253,51 +257,13 @@ func (reg *ModuleRegistry) GetProspectorConfigs() ([]*common.Config, error) { return result, nil } -// PipelineLoader factory builds and returns a PipelineLoader -type PipelineLoaderFactory func() (PipelineLoader, error) - -// PipelineLoader is a subset of the Elasticsearch client API capable of loading -// the pipelines. -type PipelineLoader interface { - LoadJSON(path string, json map[string]interface{}) ([]byte, error) - Request(method, path string, pipeline string, params map[string]string, body interface{}) (int, []byte, error) - GetVersion() string -} - -// LoadPipelines loads the pipelines for each configured fileset. -func (reg *ModuleRegistry) LoadPipelines(esClient PipelineLoader) error { - for module, filesets := range reg.registry { - for name, fileset := range filesets { - // check that all the required Ingest Node plugins are available - requiredProcessors := fileset.GetRequiredProcessors() - logp.Debug("modules", "Required processors: %s", requiredProcessors) - if len(requiredProcessors) > 0 { - err := checkAvailableProcessors(esClient, requiredProcessors) - if err != nil { - return fmt.Errorf("Error loading pipeline for fileset %s/%s: %v", module, name, err) - } - } - - pipelineID, content, err := fileset.GetPipeline(esClient.GetVersion()) - if err != nil { - return fmt.Errorf("Error getting pipeline for fileset %s/%s: %v", module, name, err) - } - err = loadPipeline(esClient, pipelineID, content) - if err != nil { - return fmt.Errorf("Error loading pipeline for fileset %s/%s: %v", module, name, err) - } - } - } - return nil -} - // InfoString returns the enabled modules and filesets in a single string, ready to // be shown to the user func (reg *ModuleRegistry) InfoString() string { var result string for module, filesets := range reg.registry { var filesetNames string - for name, _ := range filesets { + for name := range filesets { if filesetNames != "" { filesetNames += ", " } @@ -370,107 +336,60 @@ func checkAvailableProcessors(esClient PipelineLoader, requiredProcessors []Proc return nil } -func loadPipeline(esClient PipelineLoader, pipelineID string, content map[string]interface{}) error { - path := "/_ingest/pipeline/" + pipelineID - status, _, _ := esClient.Request("GET", path, "", nil, nil) - if status == 200 { - logp.Debug("modules", "Pipeline %s already loaded", pipelineID) - return nil - } - body, err := esClient.LoadJSON(path, content) +// LoadML loads the machine-learning configurations into Elasticsearch, if X-Pack is available +func (reg *ModuleRegistry) LoadML(esClient PipelineLoader) error { + haveXpack, err := mlimporter.HaveXpackML(esClient) if err != nil { - return interpretError(err, body) + return errors.Errorf("error checking if xpack is available: %v", err) } - logp.Info("Elasticsearch pipeline with ID '%s' loaded", pipelineID) - return nil -} - -func interpretError(initialErr error, body []byte) error { - var response struct { - Error struct { - RootCause []struct { - Type string `json:"type"` - Reason string `json:"reason"` - Header struct { - ProcessorType string `json:"processor_type"` - } `json:"header"` - Index string `json:"index"` - } `json:"root_cause"` - } `json:"error"` - } - err := json.Unmarshal(body, &response) - if err != nil { - // this might be ES < 2.0. Do a best effort to check for ES 1.x - var response1x struct { - Error string `json:"error"` - } - err1x := json.Unmarshal(body, &response1x) - if err1x == nil && response1x.Error != "" { - return fmt.Errorf("The Filebeat modules require Elasticsearch >= 5.0. "+ - "This is the response I got from Elasticsearch: %s", body) - } - - return fmt.Errorf("couldn't load pipeline: %v. Additionally, error decoding response body: %s", - initialErr, body) + if !haveXpack { + logp.Warn("X-Pack Machine Learning is not enabled") + return nil } - // missing plugins? - if len(response.Error.RootCause) > 0 && - response.Error.RootCause[0].Type == "parse_exception" && - strings.HasPrefix(response.Error.RootCause[0].Reason, "No processor type exists with name") && - response.Error.RootCause[0].Header.ProcessorType != "" { - - plugins := map[string]string{ - "geoip": "ingest-geoip", - "user_agent": "ingest-user-agent", - } - plugin, ok := plugins[response.Error.RootCause[0].Header.ProcessorType] - if !ok { - return fmt.Errorf("This module requires an Elasticsearch plugin that provides the %s processor. "+ - "Please visit the Elasticsearch documentation for instructions on how to install this plugin. "+ - "Response body: %s", response.Error.RootCause[0].Header.ProcessorType, body) + for module, filesets := range reg.registry { + for name, fileset := range filesets { + for _, mlConfig := range fileset.GetMLConfigs() { + err := mlimporter.ImportMachineLearningJob(esClient, &mlConfig) + if err != nil { + return errors.Errorf("error loading ML config from %s/%s: %v", module, name, err) + } + } } - - return fmt.Errorf("This module requires the %s plugin to be installed in Elasticsearch. "+ - "You can install it using the following command in the Elasticsearch home directory:\n"+ - " sudo bin/elasticsearch-plugin install %s", plugin, plugin) - } - - // older ES version? - if len(response.Error.RootCause) > 0 && - response.Error.RootCause[0].Type == "invalid_index_name_exception" && - response.Error.RootCause[0].Index == "_ingest" { - - return fmt.Errorf("The Ingest Node functionality seems to be missing from Elasticsearch. "+ - "The Filebeat modules require Elasticsearch >= 5.0. "+ - "This is the response I got from Elasticsearch: %s", body) } - return fmt.Errorf("couldn't load pipeline: %v. Response body: %s", initialErr, body) + return nil } -// LoadML loads the machine-learning configurations into Elasticsearch, if Xpack is available -func (reg *ModuleRegistry) LoadML(esClient PipelineLoader) error { +// SetupML sets up the machine-learning configurations into Elasticsearch using Kibana, if X-Pack is available +func (reg *ModuleRegistry) SetupML(esClient PipelineLoader, kibanaClient *kibana.Client) error { haveXpack, err := mlimporter.HaveXpackML(esClient) if err != nil { return errors.Errorf("Error checking if xpack is available: %v", err) } if !haveXpack { - logp.Warn("Xpack Machine Learning is not enabled") + logp.Warn("X-Pack Machine Learning is not enabled") return nil } - for module, filesets := range reg.registry { - for name, fileset := range filesets { - for _, mlConfig := range fileset.GetMLConfigs() { - err = mlimporter.ImportMachineLearningJob(esClient, &mlConfig) - if err != nil { - return errors.Errorf("Error loading ML config from %s/%s: %v", module, name, err) - } + modules := make(map[string]string) + if reg.Empty() { + modules = availableMLModules + } else { + for _, module := range reg.ModuleNames() { + if fileset, ok := availableMLModules[module]; ok { + modules[module] = fileset } } } + for module, fileset := range modules { + prefix := fmt.Sprintf("filebeat-%s-%s-", module, fileset) + err := mlimporter.SetupModule(kibanaClient, module, prefix) + if err != nil { + return errors.Errorf("Error setting up ML for %s: %v", module, err) + } + } return nil } @@ -481,3 +400,19 @@ func (reg *ModuleRegistry) Empty() bool { } return count == 0 } + +// ModuleNames returns the names of modules in the ModuleRegistry. +func (reg *ModuleRegistry) ModuleNames() []string { + var modules []string + for m := range reg.registry { + modules = append(modules, m) + } + return modules +} + +// ModuleFilesets return the list of available filesets for the given module +// it returns an empty list if the module doesn't exist +func (reg *ModuleRegistry) ModuleFilesets(module string) ([]string, error) { + modulesPath := paths.Resolve(paths.Home, "module") + return getModuleFilesets(modulesPath, module) +} diff --git a/vendor/github.com/elastic/beats/filebeat/fileset/modules_integration_test.go b/vendor/github.com/elastic/beats/filebeat/fileset/modules_integration_test.go index 947078e1..e15e4427 100644 --- a/vendor/github.com/elastic/beats/filebeat/fileset/modules_integration_test.go +++ b/vendor/github.com/elastic/beats/filebeat/fileset/modules_integration_test.go @@ -34,7 +34,7 @@ func TestLoadPipeline(t *testing.T) { }, } - err := loadPipeline(client, "my-pipeline-id", content) + err := loadPipeline(client, "my-pipeline-id", content, false) assert.NoError(t, err) status, _, err := client.Request("GET", "/_ingest/pipeline/my-pipeline-id", "", nil, nil) @@ -43,9 +43,17 @@ func TestLoadPipeline(t *testing.T) { // loading again shouldn't actually update the pipeline content["description"] = "describe pipeline 2" - err = loadPipeline(client, "my-pipeline-id", content) + err = loadPipeline(client, "my-pipeline-id", content, false) assert.NoError(t, err) + checkUploadedPipeline(t, client, "describe pipeline") + // loading again updates the pipeline + err = loadPipeline(client, "my-pipeline-id", content, true) + assert.NoError(t, err) + checkUploadedPipeline(t, client, "describe pipeline 2") +} + +func checkUploadedPipeline(t *testing.T, client *elasticsearch.Client, expectedDescription string) { status, response, err := client.Request("GET", "/_ingest/pipeline/my-pipeline-id", "", nil, nil) assert.NoError(t, err) assert.Equal(t, 200, status) @@ -53,7 +61,7 @@ func TestLoadPipeline(t *testing.T) { var res map[string]interface{} err = json.Unmarshal(response, &res) if assert.NoError(t, err) { - assert.Equal(t, "describe pipeline", res["my-pipeline-id"].(map[string]interface{})["description"], string(response)) + assert.Equal(t, expectedDescription, res["my-pipeline-id"].(map[string]interface{})["description"], string(response)) } } @@ -78,7 +86,7 @@ func TestSetupNginx(t *testing.T) { t.Fatal(err) } - err = reg.LoadPipelines(client) + err = reg.LoadPipelines(client, false) if err != nil { t.Fatal(err) } diff --git a/vendor/github.com/elastic/beats/filebeat/fileset/modules_test.go b/vendor/github.com/elastic/beats/filebeat/fileset/modules_test.go index feed6e6c..7be00906 100644 --- a/vendor/github.com/elastic/beats/filebeat/fileset/modules_test.go +++ b/vendor/github.com/elastic/beats/filebeat/fileset/modules_test.go @@ -58,7 +58,7 @@ func TestNewModuleRegistry(t *testing.T) { for module, filesets := range reg.registry { for name, fileset := range filesets { - cfg, err := fileset.getProspectorConfig() + cfg, err := fileset.getInputConfig() assert.NoError(t, err, fmt.Sprintf("module: %s, fileset: %s", module, name)) moduleName, err := cfg.String("_module_name", -1) @@ -181,11 +181,32 @@ func TestApplyOverrides(t *testing.T) { }, }, expected: FilesetConfig{ + Input: map[string]interface{}{ + "close_eof": true, + }, Prospector: map[string]interface{}{ "close_eof": true, }, }, }, + { + name: "input overrides", + fcfg: FilesetConfig{}, + module: "nginx", + fileset: "access", + overrides: &ModuleOverrides{ + "nginx": map[string]*common.Config{ + "access": load(t, map[string]interface{}{ + "input.close_eof": true, + }), + }, + }, + expected: FilesetConfig{ + Input: map[string]interface{}{ + "close_eof": true, + }, + }, + }, } for _, test := range tests { @@ -351,9 +372,9 @@ func TestMissingModuleFolder(t *testing.T) { assert.NotNil(t, reg) // this should return an empty list, but no error - prospectors, err := reg.GetProspectorConfigs() + inputs, err := reg.GetInputConfigs() assert.NoError(t, err) - assert.Equal(t, 0, len(prospectors)) + assert.Equal(t, 0, len(inputs)) } func TestInterpretError(t *testing.T) { diff --git a/vendor/github.com/elastic/beats/filebeat/fileset/pipelines.go b/vendor/github.com/elastic/beats/filebeat/fileset/pipelines.go new file mode 100644 index 00000000..e56d6824 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/fileset/pipelines.go @@ -0,0 +1,128 @@ +package fileset + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/elastic/beats/libbeat/logp" +) + +// PipelineLoaderFactory builds and returns a PipelineLoader +type PipelineLoaderFactory func() (PipelineLoader, error) + +// PipelineLoader is a subset of the Elasticsearch client API capable of loading +// the pipelines. +type PipelineLoader interface { + LoadJSON(path string, json map[string]interface{}) ([]byte, error) + Request(method, path string, pipeline string, params map[string]string, body interface{}) (int, []byte, error) + GetVersion() string +} + +// LoadPipelines loads the pipelines for each configured fileset. +func (reg *ModuleRegistry) LoadPipelines(esClient PipelineLoader, overwrite bool) error { + for module, filesets := range reg.registry { + for name, fileset := range filesets { + // check that all the required Ingest Node plugins are available + requiredProcessors := fileset.GetRequiredProcessors() + logp.Debug("modules", "Required processors: %s", requiredProcessors) + if len(requiredProcessors) > 0 { + err := checkAvailableProcessors(esClient, requiredProcessors) + if err != nil { + return fmt.Errorf("Error loading pipeline for fileset %s/%s: %v", module, name, err) + } + } + + pipelineID, content, err := fileset.GetPipeline(esClient.GetVersion()) + if err != nil { + return fmt.Errorf("Error getting pipeline for fileset %s/%s: %v", module, name, err) + } + err = loadPipeline(esClient, pipelineID, content, overwrite) + if err != nil { + return fmt.Errorf("Error loading pipeline for fileset %s/%s: %v", module, name, err) + } + } + } + return nil +} + +func loadPipeline(esClient PipelineLoader, pipelineID string, content map[string]interface{}, overwrite bool) error { + path := "/_ingest/pipeline/" + pipelineID + if !overwrite { + status, _, _ := esClient.Request("GET", path, "", nil, nil) + if status == 200 { + logp.Debug("modules", "Pipeline %s already loaded", pipelineID) + return nil + } + } + body, err := esClient.LoadJSON(path, content) + if err != nil { + return interpretError(err, body) + } + logp.Info("Elasticsearch pipeline with ID '%s' loaded", pipelineID) + return nil +} + +func interpretError(initialErr error, body []byte) error { + var response struct { + Error struct { + RootCause []struct { + Type string `json:"type"` + Reason string `json:"reason"` + Header struct { + ProcessorType string `json:"processor_type"` + } `json:"header"` + Index string `json:"index"` + } `json:"root_cause"` + } `json:"error"` + } + err := json.Unmarshal(body, &response) + if err != nil { + // this might be ES < 2.0. Do a best effort to check for ES 1.x + var response1x struct { + Error string `json:"error"` + } + err1x := json.Unmarshal(body, &response1x) + if err1x == nil && response1x.Error != "" { + return fmt.Errorf("The Filebeat modules require Elasticsearch >= 5.0. "+ + "This is the response I got from Elasticsearch: %s", body) + } + + return fmt.Errorf("couldn't load pipeline: %v. Additionally, error decoding response body: %s", + initialErr, body) + } + + // missing plugins? + if len(response.Error.RootCause) > 0 && + response.Error.RootCause[0].Type == "parse_exception" && + strings.HasPrefix(response.Error.RootCause[0].Reason, "No processor type exists with name") && + response.Error.RootCause[0].Header.ProcessorType != "" { + + plugins := map[string]string{ + "geoip": "ingest-geoip", + "user_agent": "ingest-user-agent", + } + plugin, ok := plugins[response.Error.RootCause[0].Header.ProcessorType] + if !ok { + return fmt.Errorf("This module requires an Elasticsearch plugin that provides the %s processor. "+ + "Please visit the Elasticsearch documentation for instructions on how to install this plugin. "+ + "Response body: %s", response.Error.RootCause[0].Header.ProcessorType, body) + } + + return fmt.Errorf("This module requires the %s plugin to be installed in Elasticsearch. "+ + "You can install it using the following command in the Elasticsearch home directory:\n"+ + " sudo bin/elasticsearch-plugin install %s", plugin, plugin) + } + + // older ES version? + if len(response.Error.RootCause) > 0 && + response.Error.RootCause[0].Type == "invalid_index_name_exception" && + response.Error.RootCause[0].Index == "_ingest" { + + return fmt.Errorf("The Ingest Node functionality seems to be missing from Elasticsearch. "+ + "The Filebeat modules require Elasticsearch >= 5.0. "+ + "This is the response I got from Elasticsearch: %s", body) + } + + return fmt.Errorf("couldn't load pipeline: %v. Response body: %s", initialErr, body) +} diff --git a/vendor/github.com/elastic/beats/filebeat/harvester/forwarder.go b/vendor/github.com/elastic/beats/filebeat/harvester/forwarder.go index 3bc813f9..d89812b0 100644 --- a/vendor/github.com/elastic/beats/filebeat/harvester/forwarder.go +++ b/vendor/github.com/elastic/beats/filebeat/harvester/forwarder.go @@ -27,13 +27,13 @@ func NewForwarder(outlet Outlet) *Forwarder { return &Forwarder{Outlet: outlet} } -// Send updates the prospector state and sends the event to the spooler -// All state updates done by the prospector itself are synchronous to make sure no states are overwritten +// Send updates the input state and sends the event to the spooler +// All state updates done by the input itself are synchronous to make sure no states are overwritten func (f *Forwarder) Send(data *util.Data) error { ok := f.Outlet.OnEvent(data) if !ok { - logp.Info("Prospector outlet closed") - return errors.New("prospector outlet closed") + logp.Info("Input outlet closed") + return errors.New("input outlet closed") } return nil diff --git a/vendor/github.com/elastic/beats/filebeat/harvester/harvester.go b/vendor/github.com/elastic/beats/filebeat/harvester/harvester.go index 50d06169..6931b467 100644 --- a/vendor/github.com/elastic/beats/filebeat/harvester/harvester.go +++ b/vendor/github.com/elastic/beats/filebeat/harvester/harvester.go @@ -5,7 +5,7 @@ import ( ) // Harvester contains all methods which must be supported by each harvester -// so the registry can be used by the prospector. +// so the registry can be used by the input type Harvester interface { ID() uuid.UUID Run() error diff --git a/vendor/github.com/elastic/beats/filebeat/harvester/reader/docker_json.go b/vendor/github.com/elastic/beats/filebeat/harvester/reader/docker_json.go index b65cf45f..96307ef4 100644 --- a/vendor/github.com/elastic/beats/filebeat/harvester/reader/docker_json.go +++ b/vendor/github.com/elastic/beats/filebeat/harvester/reader/docker_json.go @@ -3,6 +3,7 @@ package reader import ( "bytes" "encoding/json" + "strings" "time" "github.com/elastic/beats/libbeat/common" @@ -23,6 +24,12 @@ type dockerLog struct { Stream string `json:"stream"` } +type crioLog struct { + Timestamp time.Time + Stream string + Log []byte +} + // NewDockerJSON creates a new reader renaming a field func NewDockerJSON(r Reader, stream string) *DockerJSON { return &DockerJSON{ @@ -31,6 +38,55 @@ func NewDockerJSON(r Reader, stream string) *DockerJSON { } } +// parseCRILog parses logs in CRI log format. +// CRI log format example : +// 2017-09-12T22:32:21.212861448Z stdout 2017-09-12 22:32:21.212 [INFO][88] table.go 710: Invalidating dataplane cache +func parseCRILog(message Message, msg *crioLog) (Message, error) { + log := strings.SplitN(string(message.Content), " ", 3) + if len(log) < 3 { + return message, errors.New("invalid CRI log") + } + ts, err := time.Parse(time.RFC3339, log[0]) + if err != nil { + return message, errors.Wrap(err, "parsing CRI timestamp") + } + + msg.Timestamp = ts + msg.Stream = log[1] + msg.Log = []byte(log[2]) + message.AddFields(common.MapStr{ + "stream": msg.Stream, + }) + message.Content = msg.Log + message.Ts = ts + + return message, nil +} + +// parseDockerJSONLog parses logs in Docker JSON log format. +// Docker JSON log format example: +// {"log":"1:M 09 Nov 13:27:36.276 # User requested shutdown...\n","stream":"stdout"} +func parseDockerJSONLog(message Message, msg *dockerLog) (Message, error) { + dec := json.NewDecoder(bytes.NewReader(message.Content)) + if err := dec.Decode(&msg); err != nil { + return message, errors.Wrap(err, "decoding docker JSON") + } + + // Parse timestamp + ts, err := time.Parse(time.RFC3339, msg.Timestamp) + if err != nil { + return message, errors.Wrap(err, "parsing docker timestamp") + } + + message.AddFields(common.MapStr{ + "stream": msg.Stream, + }) + message.Content = []byte(msg.Log) + message.Ts = ts + + return message, nil +} + // Next returns the next line. func (p *DockerJSON) Next() (Message, error) { for { @@ -39,27 +95,19 @@ func (p *DockerJSON) Next() (Message, error) { return message, err } - var line dockerLog - dec := json.NewDecoder(bytes.NewReader(message.Content)) - if err = dec.Decode(&line); err != nil { - return message, errors.Wrap(err, "decoding docker JSON") - } + var dockerLine dockerLog + var crioLine crioLog - if p.stream != "all" && p.stream != line.Stream { - continue + if strings.HasPrefix(string(message.Content), "{") { + message, err = parseDockerJSONLog(message, &dockerLine) + } else { + message, err = parseCRILog(message, &crioLine) } - // Parse timestamp - ts, err := time.Parse(time.RFC3339, line.Timestamp) - if err != nil { - return message, errors.Wrap(err, "parsing docker timestamp") + if p.stream != "all" && p.stream != dockerLine.Stream && p.stream != crioLine.Stream { + continue } - message.AddFields(common.MapStr{ - "stream": line.Stream, - }) - message.Content = []byte(line.Log) - message.Ts = ts - return message, nil + return message, err } } diff --git a/vendor/github.com/elastic/beats/filebeat/harvester/reader/docker_json_test.go b/vendor/github.com/elastic/beats/filebeat/harvester/reader/docker_json_test.go index e6b58be1..db811dd3 100644 --- a/vendor/github.com/elastic/beats/filebeat/harvester/reader/docker_json_test.go +++ b/vendor/github.com/elastic/beats/filebeat/harvester/reader/docker_json_test.go @@ -32,12 +32,34 @@ func TestDockerJSON(t *testing.T) { stream: "all", expectedError: true, }, + // Wrong CRI + { + input: [][]byte{[]byte(`2017-09-12T22:32:21.212861448Z stdout`)}, + stream: "all", + expectedError: true, + }, + // Wrong CRI + { + input: [][]byte{[]byte(`{this is not JSON nor CRI`)}, + stream: "all", + expectedError: true, + }, // Missing time { input: [][]byte{[]byte(`{"log":"1:M 09 Nov 13:27:36.276 # User requested shutdown...\n","stream":"stdout"}`)}, stream: "all", expectedError: true, }, + // CRI log + { + input: [][]byte{[]byte(`2017-09-12T22:32:21.212861448Z stdout 2017-09-12 22:32:21.212 [INFO][88] table.go 710: Invalidating dataplane cache`)}, + stream: "all", + expectedMessage: Message{ + Content: []byte("2017-09-12 22:32:21.212 [INFO][88] table.go 710: Invalidating dataplane cache"), + Fields: common.MapStr{"stream": "stdout"}, + Ts: time.Date(2017, 9, 12, 22, 32, 21, 212861448, time.UTC), + }, + }, // Filtering stream { input: [][]byte{ @@ -52,6 +74,20 @@ func TestDockerJSON(t *testing.T) { Ts: time.Date(2017, 11, 9, 13, 27, 36, 277747246, time.UTC), }, }, + // Filtering stream + { + input: [][]byte{ + []byte(`2017-10-12T13:32:21.232861448Z stdout 2017-10-12 13:32:21.212 [INFO][88] table.go 710: Invalidating dataplane cache`), + []byte(`2017-11-12T23:32:21.212771448Z stderr 2017-11-12 23:32:21.212 [ERROR][77] table.go 111: error`), + []byte(`2017-12-12T10:32:21.212864448Z stdout 2017-12-12 10:32:21.212 [WARN][88] table.go 222: Warn`), + }, + stream: "stderr", + expectedMessage: Message{ + Content: []byte("2017-11-12 23:32:21.212 [ERROR][77] table.go 111: error"), + Fields: common.MapStr{"stream": "stderr"}, + Ts: time.Date(2017, 11, 12, 23, 32, 21, 212771448, time.UTC), + }, + }, } for _, test := range tests { diff --git a/vendor/github.com/elastic/beats/filebeat/harvester/reader/json.go b/vendor/github.com/elastic/beats/filebeat/harvester/reader/json.go index e84a90c8..756fe4a3 100644 --- a/vendor/github.com/elastic/beats/filebeat/harvester/reader/json.go +++ b/vendor/github.com/elastic/beats/filebeat/harvester/reader/json.go @@ -29,7 +29,9 @@ func (r *JSON) decodeJSON(text []byte) ([]byte, common.MapStr) { err := unmarshal(text, &jsonFields) if err != nil || jsonFields == nil { - logp.Err("Error decoding JSON: %v", err) + if !r.cfg.IgnoreDecodingError { + logp.Err("Error decoding JSON: %v", err) + } if r.cfg.AddErrorKey { jsonFields = common.MapStr{"error": createJSONError(fmt.Sprintf("Error decoding JSON: %v", err))} } @@ -99,6 +101,13 @@ func MergeJSONFields(data common.MapStr, jsonFields common.MapStr, text *string, jsonFields[config.MessageKey] = *text } + // handle the case in which r.cfg.AddErrorKey is set and len(jsonFields) == 1 + // and only thing it contains is `error` key due to error in json decoding + // which results in loss of message key in the main beat event + if len(jsonFields) == 1 && jsonFields["error"] != nil { + data["message"] = *text + } + if config.KeysUnderRoot { // Delete existing json key delete(data, "json") diff --git a/vendor/github.com/elastic/beats/filebeat/harvester/reader/json_config.go b/vendor/github.com/elastic/beats/filebeat/harvester/reader/json_config.go index ed993fa1..41e0911d 100644 --- a/vendor/github.com/elastic/beats/filebeat/harvester/reader/json_config.go +++ b/vendor/github.com/elastic/beats/filebeat/harvester/reader/json_config.go @@ -1,10 +1,11 @@ package reader type JSONConfig struct { - MessageKey string `config:"message_key"` - KeysUnderRoot bool `config:"keys_under_root"` - OverwriteKeys bool `config:"overwrite_keys"` - AddErrorKey bool `config:"add_error_key"` + MessageKey string `config:"message_key"` + KeysUnderRoot bool `config:"keys_under_root"` + OverwriteKeys bool `config:"overwrite_keys"` + AddErrorKey bool `config:"add_error_key"` + IgnoreDecodingError bool `config:"ignore_decoding_error"` } func (c *JSONConfig) Validate() error { diff --git a/vendor/github.com/elastic/beats/filebeat/harvester/registry.go b/vendor/github.com/elastic/beats/filebeat/harvester/registry.go index 9b78ee0b..5d0bdce0 100644 --- a/vendor/github.com/elastic/beats/filebeat/harvester/registry.go +++ b/vendor/github.com/elastic/beats/filebeat/harvester/registry.go @@ -80,7 +80,7 @@ func (r *Registry) Start(h Harvester) error { // Starts harvester and picks the right type. In case type is not set, set it to default (log) err := h.Run() if err != nil { - logp.Err("Error running prospector: %v", err) + logp.Err("Error running input: %v", err) } }() return nil diff --git a/vendor/github.com/elastic/beats/filebeat/harvester/util.go b/vendor/github.com/elastic/beats/filebeat/harvester/util.go index 640d5bb2..c144f2d3 100644 --- a/vendor/github.com/elastic/beats/filebeat/harvester/util.go +++ b/vendor/github.com/elastic/beats/filebeat/harvester/util.go @@ -2,7 +2,7 @@ package harvester import "github.com/elastic/beats/libbeat/common/match" -// Contains available prospector types +// Contains available input types const ( LogType = "log" StdinType = "stdin" diff --git a/vendor/github.com/elastic/beats/filebeat/include/list.go b/vendor/github.com/elastic/beats/filebeat/include/list.go index 3ac8060e..96e26850 100644 --- a/vendor/github.com/elastic/beats/filebeat/include/list.go +++ b/vendor/github.com/elastic/beats/filebeat/include/list.go @@ -1,16 +1,18 @@ /* -Package include imports all prospector packages so that they register +Package include imports all input packages so that they register their factories with the global registry. This package can be imported in the -main package to automatically register all of the standard supported prospectors +main package to automatically register all of the standard supported inputs modules. */ package include import ( // This list is automatically generated by `make imports` - _ "github.com/elastic/beats/filebeat/prospector/docker" - _ "github.com/elastic/beats/filebeat/prospector/log" - _ "github.com/elastic/beats/filebeat/prospector/redis" - _ "github.com/elastic/beats/filebeat/prospector/stdin" - _ "github.com/elastic/beats/filebeat/prospector/udp" + _ "github.com/elastic/beats/filebeat/input/docker" + _ "github.com/elastic/beats/filebeat/input/log" + _ "github.com/elastic/beats/filebeat/input/redis" + _ "github.com/elastic/beats/filebeat/input/stdin" + _ "github.com/elastic/beats/filebeat/input/syslog" + _ "github.com/elastic/beats/filebeat/input/tcp" + _ "github.com/elastic/beats/filebeat/input/udp" ) diff --git a/vendor/github.com/elastic/beats/filebeat/prospector/config.go b/vendor/github.com/elastic/beats/filebeat/input/config.go similarity index 66% rename from vendor/github.com/elastic/beats/filebeat/prospector/config.go rename to vendor/github.com/elastic/beats/filebeat/input/config.go index e4eadc2d..6f64678b 100644 --- a/vendor/github.com/elastic/beats/filebeat/prospector/config.go +++ b/vendor/github.com/elastic/beats/filebeat/input/config.go @@ -1,4 +1,4 @@ -package prospector +package input import ( "time" @@ -8,21 +8,21 @@ import ( ) var ( - defaultConfig = prospectorConfig{ + defaultConfig = inputConfig{ ScanFrequency: 10 * time.Second, Type: cfg.DefaultType, } ) -type prospectorConfig struct { +type inputConfig struct { ScanFrequency time.Duration `config:"scan_frequency" validate:"min=0,nonzero"` Type string `config:"type"` InputType string `config:"input_type"` } -func (c *prospectorConfig) Validate() error { +func (c *inputConfig) Validate() error { if c.InputType != "" { - cfgwarn.Deprecate("6.0.0", "input_type prospector config is deprecated. Use type instead.") + cfgwarn.Deprecate("6.0.0", "input_type input config is deprecated. Use type instead.") c.Type = c.InputType } return nil diff --git a/vendor/github.com/elastic/beats/filebeat/prospector/docker/config.go b/vendor/github.com/elastic/beats/filebeat/input/docker/config.go similarity index 100% rename from vendor/github.com/elastic/beats/filebeat/prospector/docker/config.go rename to vendor/github.com/elastic/beats/filebeat/input/docker/config.go diff --git a/vendor/github.com/elastic/beats/filebeat/prospector/docker/prospector.go b/vendor/github.com/elastic/beats/filebeat/input/docker/input.go similarity index 51% rename from vendor/github.com/elastic/beats/filebeat/prospector/docker/prospector.go rename to vendor/github.com/elastic/beats/filebeat/input/docker/input.go index 49b451d8..835f50bb 100644 --- a/vendor/github.com/elastic/beats/filebeat/prospector/docker/prospector.go +++ b/vendor/github.com/elastic/beats/filebeat/input/docker/input.go @@ -5,8 +5,8 @@ import ( "path" "github.com/elastic/beats/filebeat/channel" - "github.com/elastic/beats/filebeat/prospector" - "github.com/elastic/beats/filebeat/prospector/log" + "github.com/elastic/beats/filebeat/input" + "github.com/elastic/beats/filebeat/input/log" "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/common/cfgwarn" @@ -14,24 +14,28 @@ import ( ) func init() { - err := prospector.Register("docker", NewProspector) + err := input.Register("docker", NewInput) if err != nil { panic(err) } } -// NewProspector creates a new docker prospector -func NewProspector(cfg *common.Config, outletFactory channel.Factory, context prospector.Context) (prospector.Prospectorer, error) { - cfgwarn.Experimental("Docker prospector is enabled.") +// NewInput creates a new docker input +func NewInput( + cfg *common.Config, + outletFactory channel.Factory, + context input.Context, +) (input.Input, error) { + cfgwarn.Experimental("Docker input is enabled.") config := defaultConfig if err := cfg.Unpack(&config); err != nil { - return nil, errors.Wrap(err, "reading docker prospector config") + return nil, errors.Wrap(err, "reading docker input config") } - // Wrap log prospector with custom docker settings + // Wrap log input with custom docker settings if len(config.Containers.IDs) == 0 { - return nil, errors.New("Docker prospector requires at least one entry under 'containers.ids'") + return nil, errors.New("Docker input requires at least one entry under 'containers.ids'") } for idx, containerID := range config.Containers.IDs { @@ -43,9 +47,18 @@ func NewProspector(cfg *common.Config, outletFactory channel.Factory, context pr } if err := cfg.SetString("docker-json", -1, config.Containers.Stream); err != nil { - return nil, errors.Wrap(err, "update prospector config") + return nil, errors.Wrap(err, "update input config") } - return log.NewProspector(cfg, outletFactory, context) + + // Add stream to meta to ensure different state per stream + if config.Containers.Stream != "all" { + if context.Meta == nil { + context.Meta = map[string]string{} + } + context.Meta["stream"] = config.Containers.Stream + } + + return log.NewInput(cfg, outletFactory, context) } func checkStream(val string) error { diff --git a/vendor/github.com/elastic/beats/filebeat/input/file/state.go b/vendor/github.com/elastic/beats/filebeat/input/file/state.go index 06dcd323..1628ab9a 100644 --- a/vendor/github.com/elastic/beats/filebeat/input/file/state.go +++ b/vendor/github.com/elastic/beats/filebeat/input/file/state.go @@ -2,28 +2,33 @@ package file import ( "os" - "sync" + "strconv" "time" + "github.com/mitchellh/hashstructure" + "github.com/elastic/beats/libbeat/common/file" - "github.com/elastic/beats/libbeat/logp" ) // State is used to communicate the reading state of a file type State struct { - Id string `json:"-"` // local unique id to make comparison more efficient - Finished bool `json:"-"` // harvester state - Fileinfo os.FileInfo `json:"-"` // the file info - Source string `json:"source"` - Offset int64 `json:"offset"` - Timestamp time.Time `json:"timestamp"` - TTL time.Duration `json:"ttl"` - Type string `json:"type"` + Id string `json:"-"` // local unique id to make comparison more efficient + Finished bool `json:"-"` // harvester state + Fileinfo os.FileInfo `json:"-"` // the file info + Source string `json:"source"` + Offset int64 `json:"offset"` + Timestamp time.Time `json:"timestamp"` + TTL time.Duration `json:"ttl"` + Type string `json:"type"` + Meta map[string]string `json:"meta"` FileStateOS file.StateOS } // NewState creates a new file state -func NewState(fileInfo os.FileInfo, path string, t string) State { +func NewState(fileInfo os.FileInfo, path string, t string, meta map[string]string) State { + if len(meta) == 0 { + meta = nil + } return State{ Fileinfo: fileInfo, Source: path, @@ -32,6 +37,7 @@ func NewState(fileInfo os.FileInfo, path string, t string) State { Timestamp: time.Now(), TTL: -1, // By default, state does have an infinite ttl Type: t, + Meta: meta, } } @@ -39,8 +45,18 @@ func NewState(fileInfo os.FileInfo, path string, t string) State { func (s *State) ID() string { // Generate id on first request. This is needed as id is not set when converting back from json if s.Id == "" { - s.Id = s.FileStateOS.String() + if len(s.Meta) == 0 { + s.Id = s.FileStateOS.String() + } else { + hashValue, _ := hashstructure.Hash(s.Meta, nil) + var hashBuf [17]byte + hash := strconv.AppendUint(hashBuf[:0], hashValue, 16) + hash = append(hash, '-') + + s.Id = string(hash) + s.FileStateOS.String() + } } + return s.Id } @@ -51,119 +67,8 @@ func (s *State) IsEqual(c *State) bool { // IsEmpty returns true if the state is empty func (s *State) IsEmpty() bool { - return *s == State{} -} - -// States handles list of FileState -type States struct { - states []State - sync.RWMutex -} - -func NewStates() *States { - return &States{ - states: []State{}, - } -} - -// Update updates a state. If previous state didn't exist, new one is created -func (s *States) Update(newState State) { - s.Lock() - defer s.Unlock() - - index, _ := s.findPrevious(newState) - newState.Timestamp = time.Now() - - if index >= 0 { - s.states[index] = newState - } else { - // No existing state found, add new one - s.states = append(s.states, newState) - logp.Debug("prospector", "New state added for %s", newState.Source) - } -} - -func (s *States) FindPrevious(newState State) State { - s.RLock() - defer s.RUnlock() - _, state := s.findPrevious(newState) - return state -} - -// findPreviousState returns the previous state fo the file -// In case no previous state exists, index -1 is returned -func (s *States) findPrevious(newState State) (int, State) { - // TODO: This could be made potentially more performance by using an index (harvester id) and only use iteration as fall back - for index, oldState := range s.states { - // This is using the FileStateOS for comparison as FileInfo identifiers can only be fetched for existing files - if oldState.IsEqual(&newState) { - return index, oldState - } - } - - return -1, State{} -} - -// Cleanup cleans up the state array. All states which are older then `older` are removed -// The number of states that were cleaned up is returned -func (s *States) Cleanup() int { - s.Lock() - defer s.Unlock() - - statesBefore := len(s.states) - - currentTime := time.Now() - states := s.states[:0] - - for _, state := range s.states { - - expired := (state.TTL > 0 && currentTime.Sub(state.Timestamp) > state.TTL) - - if state.TTL == 0 || expired { - if state.Finished { - logp.Debug("state", "State removed for %v because of older: %v", state.Source, state.TTL) - continue // drop state - } else { - logp.Err("State for %s should have been dropped, but couldn't as state is not finished.", state.Source) - } - } - - states = append(states, state) // in-place copy old state - } - s.states = states - - return statesBefore - len(s.states) -} - -// Count returns number of states -func (s *States) Count() int { - s.RLock() - defer s.RUnlock() - - return len(s.states) -} - -// Returns a copy of the file states -func (s *States) GetStates() []State { - s.RLock() - defer s.RUnlock() - - newStates := make([]State, len(s.states)) - copy(newStates, s.states) - - return newStates -} - -// SetStates overwrites all internal states with the given states array -func (s *States) SetStates(states []State) { - s.Lock() - defer s.Unlock() - s.states = states -} - -// Copy create a new copy of the states object -func (s *States) Copy() *States { - states := NewStates() - states.states = s.GetStates() - return states + return s.FileStateOS == file.StateOS{} && + s.Source == "" && + len(s.Meta) == 0 && + s.Timestamp.IsZero() } diff --git a/vendor/github.com/elastic/beats/filebeat/input/file/states.go b/vendor/github.com/elastic/beats/filebeat/input/file/states.go new file mode 100644 index 00000000..b72d0953 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/input/file/states.go @@ -0,0 +1,157 @@ +package file + +import ( + "sync" + "time" + + "github.com/elastic/beats/libbeat/logp" +) + +// States handles list of FileState. One must use NewStates to instantiate a +// file states regisry. Using the zero-value is not safe. +type States struct { + sync.RWMutex + + // states store + states []State + + // idx maps state IDs to state indexes for fast lookup and modifications. + idx map[string]int +} + +// NewStates generates a new states registry. +func NewStates() *States { + return &States{ + states: nil, + idx: map[string]int{}, + } +} + +// Update updates a state. If previous state didn't exist, new one is created +func (s *States) Update(newState State) { + s.UpdateWithTs(newState, time.Now()) +} + +// UpdateWithTs updates a state, assigning the given timestamp. +// If previous state didn't exist, new one is created +func (s *States) UpdateWithTs(newState State, ts time.Time) { + s.Lock() + defer s.Unlock() + + id := newState.ID() + index := s.findPrevious(id) + newState.Timestamp = ts + + if index >= 0 { + s.states[index] = newState + } else { + // No existing state found, add new one + s.idx[id] = len(s.states) + s.states = append(s.states, newState) + logp.Debug("input", "New state added for %s", newState.Source) + } +} + +// FindPrevious lookups a registered state, that matching the new state. +// Returns a zero-state if no match is found. +func (s *States) FindPrevious(newState State) State { + s.RLock() + defer s.RUnlock() + i := s.findPrevious(newState.ID()) + if i < 0 { + return State{} + } + return s.states[i] +} + +// findPrevious returns the previous state for the file. +// In case no previous state exists, index -1 is returned +func (s *States) findPrevious(id string) int { + if i, exists := s.idx[id]; exists { + return i + } + return -1 +} + +// Cleanup cleans up the state array. All states which are older then `older` are removed +// The number of states that were cleaned up and number of states that can be +// cleaned up in the future is returned. +func (s *States) Cleanup() (int, int) { + s.Lock() + defer s.Unlock() + + currentTime := time.Now() + statesBefore := len(s.states) + numCanExpire := 0 + + L := len(s.states) + for i := 0; i < L; { + state := &s.states[i] + canExpire := state.TTL > 0 + expired := (canExpire && currentTime.Sub(state.Timestamp) > state.TTL) + + if state.TTL == 0 || expired { + if !state.Finished { + logp.Err("State for %s should have been dropped, but couldn't as state is not finished.", state.Source) + i++ + continue + } + + delete(s.idx, state.ID()) + logp.Debug("state", "State removed for %v because of older: %v", state.Source, state.TTL) + + L-- + if L != i { + s.states[i] = s.states[L] + s.idx[s.states[i].ID()] = i + } + } else { + i++ + if canExpire { + numCanExpire++ + } + } + } + + s.states = s.states[:L] + return statesBefore - L, numCanExpire +} + +// Count returns number of states +func (s *States) Count() int { + s.RLock() + defer s.RUnlock() + + return len(s.states) +} + +// GetStates creates copy of the file states. +func (s *States) GetStates() []State { + s.RLock() + defer s.RUnlock() + + newStates := make([]State, len(s.states)) + copy(newStates, s.states) + + return newStates +} + +// SetStates overwrites all internal states with the given states array +func (s *States) SetStates(states []State) { + s.Lock() + defer s.Unlock() + s.states = states + + // create new index + s.idx = map[string]int{} + for i := range states { + s.idx[states[i].ID()] = i + } +} + +// Copy create a new copy of the states object +func (s *States) Copy() *States { + new := NewStates() + new.SetStates(s.GetStates()) + return new +} diff --git a/vendor/github.com/elastic/beats/filebeat/input/file/state_test.go b/vendor/github.com/elastic/beats/filebeat/input/file/states_test.go similarity index 60% rename from vendor/github.com/elastic/beats/filebeat/input/file/state_test.go rename to vendor/github.com/elastic/beats/filebeat/input/file/states_test.go index 495c60ea..43934658 100644 --- a/vendor/github.com/elastic/beats/filebeat/input/file/state_test.go +++ b/vendor/github.com/elastic/beats/filebeat/input/file/states_test.go @@ -10,34 +10,35 @@ import ( ) var cleanupTests = []struct { + title string state State countBefore int cleanupCount int countAfter int }{ { - // Finished and TTL set to 0 + "Finished and TTL set to 0", State{ TTL: 0, Finished: true, }, 1, 1, 0, }, { - // Unfinished but TTL set to 0 + "Unfinished but TTL set to 0", State{ TTL: 0, Finished: false, }, 1, 0, 1, }, { - // TTL = -1 means not expiring + "TTL = -1 means not expiring", State{ TTL: -1, Finished: true, }, 1, 0, 1, }, { - // Expired and finished + "Expired and finished", State{ TTL: 1 * time.Second, Timestamp: time.Now().Add(-2 * time.Second), @@ -45,7 +46,7 @@ var cleanupTests = []struct { }, 1, 1, 0, }, { - // Expired but unfinished + "Expired but unfinished", State{ TTL: 1 * time.Second, Timestamp: time.Now().Add(-2 * time.Second), @@ -56,11 +57,15 @@ var cleanupTests = []struct { func TestCleanup(t *testing.T) { for _, test := range cleanupTests { - states := NewStates() - states.states = append(states.states, test.state) + test := test + t.Run(test.title, func(t *testing.T) { + states := NewStates() + states.SetStates([]State{test.state}) - assert.Equal(t, test.countBefore, states.Count()) - assert.Equal(t, test.cleanupCount, states.Cleanup()) - assert.Equal(t, test.countAfter, states.Count()) + assert.Equal(t, test.countBefore, states.Count()) + cleanupCount, _ := states.Cleanup() + assert.Equal(t, test.cleanupCount, cleanupCount) + assert.Equal(t, test.countAfter, states.Count()) + }) } } diff --git a/vendor/github.com/elastic/beats/filebeat/input/input.go b/vendor/github.com/elastic/beats/filebeat/input/input.go new file mode 100644 index 00000000..1d4ec5ae --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/input/input.go @@ -0,0 +1,151 @@ +package input + +import ( + "fmt" + "sync" + "time" + + "github.com/mitchellh/hashstructure" + + "github.com/elastic/beats/filebeat/channel" + "github.com/elastic/beats/filebeat/input/file" + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" +) + +// Input is the interface common to all input +type Input interface { + Run() + Stop() + Wait() +} + +// Runner encapsulate the lifecycle of the input +type Runner struct { + config inputConfig + input Input + done chan struct{} + wg *sync.WaitGroup + ID uint64 + Once bool + beatDone chan struct{} +} + +// New instantiates a new Runner +func New( + conf *common.Config, + outlet channel.Factory, + beatDone chan struct{}, + states []file.State, + dynFields *common.MapStrPointer, +) (*Runner, error) { + input := &Runner{ + config: defaultConfig, + wg: &sync.WaitGroup{}, + done: make(chan struct{}), + Once: false, + beatDone: beatDone, + } + + var err error + if err = conf.Unpack(&input.config); err != nil { + return nil, err + } + + var h map[string]interface{} + conf.Unpack(&h) + input.ID, err = hashstructure.Hash(h, nil) + if err != nil { + return nil, err + } + + var f Factory + f, err = GetFactory(input.config.Type) + if err != nil { + return input, err + } + + context := Context{ + States: states, + Done: input.done, + BeatDone: input.beatDone, + DynamicFields: dynFields, + Meta: nil, + } + var ipt Input + ipt, err = f(conf, outlet, context) + if err != nil { + return input, err + } + input.input = ipt + + return input, nil +} + +// Start starts the input +func (p *Runner) Start() { + p.wg.Add(1) + logp.Info("Starting input of type: %v; ID: %d ", p.config.Type, p.ID) + + onceWg := sync.WaitGroup{} + if p.Once { + // Make sure start is only completed when Run did a complete first scan + defer onceWg.Wait() + } + + onceWg.Add(1) + // Add waitgroup to make sure input is finished + go func() { + defer func() { + onceWg.Done() + p.stop() + p.wg.Done() + }() + + p.Run() + }() +} + +// Run starts scanning through all the file paths and fetch the related files. Start a harvester for each file +func (p *Runner) Run() { + // Initial input run + p.input.Run() + + // Shuts down after the first complete run of all input + if p.Once { + return + } + + for { + select { + case <-p.done: + logp.Info("input ticker stopped") + return + case <-time.After(p.config.ScanFrequency): + logp.Debug("input", "Run input") + p.input.Run() + } + } +} + +// Stop stops the input and with it all harvesters +func (p *Runner) Stop() { + // Stop scanning and wait for completion + close(p.done) + p.wg.Wait() +} + +func (p *Runner) stop() { + logp.Info("Stopping Input: %d", p.ID) + + // In case of once, it will be waited until harvesters close itself + if p.Once { + p.input.Wait() + } else { + p.input.Stop() + } +} + +func (p *Runner) String() string { + return fmt.Sprintf("input [type=%s, ID=%d]", p.config.Type, p.ID) +} diff --git a/vendor/github.com/elastic/beats/filebeat/prospector/prospector_test.go b/vendor/github.com/elastic/beats/filebeat/input/input_test.go similarity index 55% rename from vendor/github.com/elastic/beats/filebeat/prospector/prospector_test.go rename to vendor/github.com/elastic/beats/filebeat/input/input_test.go index c28447a1..db0f7562 100644 --- a/vendor/github.com/elastic/beats/filebeat/prospector/prospector_test.go +++ b/vendor/github.com/elastic/beats/filebeat/input/input_test.go @@ -1,3 +1,3 @@ // +build !integration -package prospector +package input diff --git a/vendor/github.com/elastic/beats/filebeat/prospector/log/config.go b/vendor/github.com/elastic/beats/filebeat/input/log/config.go similarity index 94% rename from vendor/github.com/elastic/beats/filebeat/prospector/log/config.go rename to vendor/github.com/elastic/beats/filebeat/input/log/config.go index e0a7284e..b7a5f46c 100644 --- a/vendor/github.com/elastic/beats/filebeat/prospector/log/config.go +++ b/vendor/github.com/elastic/beats/filebeat/input/log/config.go @@ -24,7 +24,7 @@ var ( }, CleanInactive: 0, - // Prospector + // Input Enabled: true, IgnoreOlder: 0, ScanFrequency: 10 * time.Second, @@ -60,7 +60,7 @@ type config struct { InputType string `config:"input_type"` CleanInactive time.Duration `config:"clean_inactive" validate:"min=0"` - // Prospector + // Input Enabled bool `config:"enabled"` ExcludeFiles []match.Matcher `config:"exclude_files"` IgnoreOlder time.Duration `config:"ignore_older"` @@ -84,7 +84,7 @@ type config struct { Multiline *reader.MultilineConfig `config:"multiline"` JSON *reader.JSONConfig `config:"json"` - // Hidden on purpose, used by the docker prospector: + // Hidden on purpose, used by the docker input: DockerJSON string `config:"docker-json"` } @@ -127,9 +127,9 @@ func (c *config) Validate() error { c.Type = c.InputType } - // Prospector + // Input if c.Type == harvester.LogType && len(c.Paths) == 0 { - return fmt.Errorf("No paths were defined for prospector") + return fmt.Errorf("No paths were defined for input") } if c.CleanInactive != 0 && c.IgnoreOlder == 0 { @@ -171,11 +171,11 @@ func (c *config) Validate() error { // resolveRecursiveGlobs expands `**` from the globs in multiple patterns func (c *config) resolveRecursiveGlobs() error { if !c.RecursiveGlob { - logp.Debug("prospector", "recursive glob disabled") + logp.Debug("input", "recursive glob disabled") return nil } - logp.Debug("prospector", "recursive glob enabled") + logp.Debug("input", "recursive glob enabled") var paths []string for _, path := range c.Paths { patterns, err := file.GlobPatterns(path, recursiveGlobDepth) @@ -183,7 +183,7 @@ func (c *config) resolveRecursiveGlobs() error { return err } if len(patterns) > 1 { - logp.Debug("prospector", "%q expanded to %#v", path, patterns) + logp.Debug("input", "%q expanded to %#v", path, patterns) } paths = append(paths, patterns...) } diff --git a/vendor/github.com/elastic/beats/filebeat/prospector/log/config_test.go b/vendor/github.com/elastic/beats/filebeat/input/log/config_test.go similarity index 100% rename from vendor/github.com/elastic/beats/filebeat/prospector/log/config_test.go rename to vendor/github.com/elastic/beats/filebeat/input/log/config_test.go diff --git a/vendor/github.com/elastic/beats/filebeat/prospector/log/file.go b/vendor/github.com/elastic/beats/filebeat/input/log/file.go similarity index 100% rename from vendor/github.com/elastic/beats/filebeat/prospector/log/file.go rename to vendor/github.com/elastic/beats/filebeat/input/log/file.go diff --git a/vendor/github.com/elastic/beats/filebeat/prospector/log/harvester.go b/vendor/github.com/elastic/beats/filebeat/input/log/harvester.go similarity index 91% rename from vendor/github.com/elastic/beats/filebeat/prospector/log/harvester.go rename to vendor/github.com/elastic/beats/filebeat/input/log/harvester.go index 6634f5f9..e619e323 100644 --- a/vendor/github.com/elastic/beats/filebeat/prospector/log/harvester.go +++ b/vendor/github.com/elastic/beats/filebeat/input/log/harvester.go @@ -52,6 +52,9 @@ var ( ErrClosed = errors.New("reader closed") ) +// OutletFactory provides an outlet for the harvester +type OutletFactory func() channel.Outleter + // Harvester contains all harvester related data type Harvester struct { id uuid.UUID @@ -62,6 +65,7 @@ type Harvester struct { done chan struct{} stopOnce sync.Once stopWg *sync.WaitGroup + stopLock sync.Mutex // internal harvester state state file.State @@ -74,8 +78,8 @@ type Harvester struct { encoding encoding.Encoding // event/state publishing - forwarder *harvester.Forwarder - publishState func(*util.Data) bool + outletFactory OutletFactory + publishState func(*util.Data) bool onTerminate func() } @@ -86,17 +90,18 @@ func NewHarvester( state file.State, states *file.States, publishState func(*util.Data) bool, - outlet channel.Outleter, + outletFactory OutletFactory, ) (*Harvester, error) { h := &Harvester{ - config: defaultConfig, - state: state, - states: states, - publishState: publishState, - done: make(chan struct{}), - stopWg: &sync.WaitGroup{}, - id: uuid.NewV4(), + config: defaultConfig, + state: state, + states: states, + publishState: publishState, + done: make(chan struct{}), + stopWg: &sync.WaitGroup{}, + id: uuid.NewV4(), + outletFactory: outletFactory, } if err := config.Unpack(&h.config); err != nil { @@ -115,8 +120,6 @@ func NewHarvester( } // Add outlet signal so harvester can also stop itself - outlet = channel.CloseOnSignal(outlet, h.done) - h.forwarder = harvester.NewForwarder(outlet) return h, nil } @@ -163,11 +166,20 @@ func (h *Harvester) Run() error { if h.onTerminate != nil { defer h.onTerminate() } + + outlet := channel.CloseOnSignal(h.outletFactory(), h.done) + forwarder := harvester.NewForwarder(outlet) + // This is to make sure a harvester is not started anymore if stop was already // called before the harvester was started. The waitgroup is not incremented afterwards // as otherwise it could happened that between checking for the close channel and incrementing // the waitgroup, the harvester could be stopped. + // Here stopLock is used to prevent a data race where stopWg.Add(1) below is called + // while stopWg.Wait() is executing in a different goroutine, which is forbidden + // according to sync.WaitGroup docs. + h.stopLock.Lock() h.stopWg.Add(1) + h.stopLock.Unlock() select { case <-h.done: h.stopWg.Done() @@ -193,7 +205,7 @@ func (h *Harvester) Run() error { // Closes reader after timeout or when done channel is closed // This routine is also responsible to properly stop the reader - go func() { + go func(source string) { closeTimeout := make(<-chan time.Time) // starts close_timeout timer @@ -204,14 +216,14 @@ func (h *Harvester) Run() error { select { // Applies when timeout is reached case <-closeTimeout: - logp.Info("Closing harvester because close_timeout was reached.") + logp.Info("Closing harvester because close_timeout was reached: %s", source) // Required when reader loop returns and reader finished case <-h.done: } h.stop() h.log.Close() - }() + }(h.state.Source) logp.Info("Harvester started for file: %s", h.state.Source) @@ -240,7 +252,7 @@ func (h *Harvester) Run() error { case ErrInactive: logp.Info("File is inactive: %s. Closing because close_inactive of %v reached.", h.state.Source, h.config.CloseInactive) default: - logp.Err("Read line error: %s; File: ", err, h.state.Source) + logp.Err("Read line error: %v; File: %v", err, h.state.Source) } return nil } @@ -255,6 +267,7 @@ func (h *Harvester) Run() error { // This is important in case sending is not successful so on shutdown // the old offset is reported state := h.getState() + startingOffset := state.Offset state.Offset += int64(message.Bytes) // Create state event @@ -269,7 +282,7 @@ func (h *Harvester) Run() error { if !message.IsEmpty() && h.shouldExportLine(text) { fields := common.MapStr{ "source": state.Source, - "offset": state.Offset, // Offset here is the offset before the starting char. + "offset": startingOffset, // Offset here is the offset before the starting char. } fields.DeepUpdate(message.Fields) @@ -302,7 +315,7 @@ func (h *Harvester) Run() error { // Always send event to update state, also if lines was skipped // Stop harvester in case of an error - if !h.sendEvent(data) { + if !h.sendEvent(data, forwarder) { return nil } @@ -321,17 +334,20 @@ func (h *Harvester) stop() { // Stop stops harvester and waits for completion func (h *Harvester) Stop() { h.stop() + // Prevent stopWg.Wait() to be called at the same time as stopWg.Add(1) + h.stopLock.Lock() h.stopWg.Wait() + h.stopLock.Unlock() } // sendEvent sends event to the spooler channel // Return false if event was not sent -func (h *Harvester) sendEvent(data *util.Data) bool { +func (h *Harvester) sendEvent(data *util.Data, forwarder *harvester.Forwarder) bool { if h.source.HasState() { h.states.Update(data.GetState()) } - err := h.forwarder.Send(data) + err := forwarder.Send(data) return err == nil } @@ -408,7 +424,7 @@ func (h *Harvester) validateFile(f *os.File) error { return fmt.Errorf("Tried to open non regular file: %q %s", info.Mode(), info.Name()) } - // Compares the stat of the opened file to the state given by the prospector. Abort if not match. + // Compares the stat of the opened file to the state given by the input. Abort if not match. if !os.SameFile(h.state.Fileinfo, info) { return errors.New("file info is not identical with opened file. Aborting harvesting and retrying file later again") } diff --git a/vendor/github.com/elastic/beats/filebeat/prospector/log/harvester_test.go b/vendor/github.com/elastic/beats/filebeat/input/log/harvester_test.go similarity index 100% rename from vendor/github.com/elastic/beats/filebeat/prospector/log/harvester_test.go rename to vendor/github.com/elastic/beats/filebeat/input/log/harvester_test.go diff --git a/vendor/github.com/elastic/beats/filebeat/prospector/log/prospector.go b/vendor/github.com/elastic/beats/filebeat/input/log/input.go similarity index 71% rename from vendor/github.com/elastic/beats/filebeat/prospector/log/prospector.go rename to vendor/github.com/elastic/beats/filebeat/input/log/input.go index 0ae11eed..10a118df 100644 --- a/vendor/github.com/elastic/beats/filebeat/prospector/log/prospector.go +++ b/vendor/github.com/elastic/beats/filebeat/input/log/input.go @@ -11,8 +11,8 @@ import ( "github.com/elastic/beats/filebeat/channel" "github.com/elastic/beats/filebeat/harvester" + "github.com/elastic/beats/filebeat/input" "github.com/elastic/beats/filebeat/input/file" - "github.com/elastic/beats/filebeat/prospector" "github.com/elastic/beats/filebeat/util" "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/common/atomic" @@ -25,20 +25,20 @@ const ( ) var ( - filesRenamed = monitoring.NewInt(nil, "filebeat.prospector.log.files.renamed") - filesTruncated = monitoring.NewInt(nil, "filebeat.prospector.log.files.truncated") + filesRenamed = monitoring.NewInt(nil, "filebeat.input.log.files.renamed") + filesTruncated = monitoring.NewInt(nil, "filebeat.input.log.files.truncated") harvesterSkipped = monitoring.NewInt(nil, "filebeat.harvester.skipped") ) func init() { - err := prospector.Register("log", NewProspector) + err := input.Register("log", NewInput) if err != nil { panic(err) } } -// Prospector contains the prospector and its config -type Prospector struct { +// Input contains the input and its config +type Input struct { cfg *common.Config config config states *file.States @@ -47,17 +47,18 @@ type Prospector struct { stateOutlet channel.Outleter done chan struct{} numHarvesters atomic.Uint32 + meta map[string]string } -// NewProspector instantiates a new Log -func NewProspector( +// NewInput instantiates a new Log +func NewInput( cfg *common.Config, outlet channel.Factory, - context prospector.Context, -) (prospector.Prospectorer, error) { + context input.Context, +) (input.Input, error) { // Note: underlying output. - // The prospector and harvester do have different requirements + // The input and harvester do have different requirements // on the timings the outlets must be closed/unblocked. // The outlet generated here is the underlying outlet, only closed // once all workers have been shut down. @@ -72,14 +73,20 @@ func NewProspector( // can be forwarded correctly to the registrar. stateOut := channel.CloseOnSignal(channel.SubOutlet(out), context.BeatDone) - p := &Prospector{ + meta := context.Meta + if len(meta) == 0 { + meta = nil + } + + p := &Input{ config: defaultConfig, cfg: cfg, harvesters: harvester.NewRegistry(), outlet: out, stateOutlet: stateOut, - states: &file.States{}, + states: file.NewStates(), done: context.Done, + meta: meta, } if err := cfg.Unpack(&p.config); err != nil { @@ -100,7 +107,7 @@ func NewProspector( } if len(p.config.Paths) == 0 { - return nil, fmt.Errorf("each prospector must have at least one path defined") + return nil, fmt.Errorf("each input must have at least one path defined") } err = p.loadStates(context.States) @@ -113,23 +120,23 @@ func NewProspector( return p, nil } -// LoadStates loads states into prospector +// LoadStates loads states into input // It goes through all states coming from the registry. Only the states which match the glob patterns of -// the prospector will be loaded and updated. All other states will not be touched. -func (p *Prospector) loadStates(states []file.State) error { - logp.Debug("prospector", "exclude_files: %s. Number of stats: %d", p.config.ExcludeFiles, len(states)) +// the input will be loaded and updated. All other states will not be touched. +func (p *Input) loadStates(states []file.State) error { + logp.Debug("input", "exclude_files: %s. Number of stats: %d", p.config.ExcludeFiles, len(states)) for _, state := range states { - // Check if state source belongs to this prospector. If yes, update the state. - if p.matchesFile(state.Source) { + // Check if state source belongs to this input. If yes, update the state. + if p.matchesFile(state.Source) && p.matchesMeta(state.Meta) { state.TTL = -1 - // In case a prospector is tried to be started with an unfinished state matching the glob pattern + // In case a input is tried to be started with an unfinished state matching the glob pattern if !state.Finished { - return fmt.Errorf("Can only start a prospector when all related states are finished: %+v", state) + return fmt.Errorf("Can only start an input when all related states are finished: %+v", state) } - // Update prospector states and send new states to registry + // Update input states and send new states to registry err := p.updateState(state) if err != nil { logp.Err("Problem putting initial state: %+v", err) @@ -138,13 +145,13 @@ func (p *Prospector) loadStates(states []file.State) error { } } - logp.Debug("prospector", "Prospector with previous states loaded: %v", p.states.Count()) + logp.Debug("input", "input with previous states loaded: %v", p.states.Count()) return nil } -// Run runs the prospector -func (p *Prospector) Run() { - logp.Debug("prospector", "Start next scan") +// Run runs the input +func (p *Input) Run() { + logp.Debug("input", "Start next scan") // TailFiles is like ignore_older = 1ns and only on startup if p.config.TailFiles { @@ -164,8 +171,9 @@ func (p *Prospector) Run() { // It is important that a first scan is run before cleanup to make sure all new states are read first if p.config.CleanInactive > 0 || p.config.CleanRemoved { beforeCount := p.states.Count() - cleanedStates := p.states.Cleanup() - logp.Debug("prospector", "Prospector states cleaned up. Before: %d, After: %d", beforeCount, beforeCount-cleanedStates) + cleanedStates, pendingClean := p.states.Cleanup() + logp.Debug("input", "input states cleaned up. Before: %d, After: %d, Pending: %d", + beforeCount, beforeCount-cleanedStates, pendingClean) } // Marking removed files to be cleaned up. Cleanup happens after next scan to make sure all states are updated first @@ -176,26 +184,26 @@ func (p *Prospector) Run() { if err != nil { if os.IsNotExist(err) { p.removeState(state) - logp.Debug("prospector", "Remove state for file as file removed: %s", state.Source) + logp.Debug("input", "Remove state for file as file removed: %s", state.Source) } else { - logp.Err("Prospector state for %s was not removed: %s", state.Source, err) + logp.Err("input state for %s was not removed: %s", state.Source, err) } } else { // Check if existing source on disk and state are the same. Remove if not the case. - newState := file.NewState(stat, state.Source, p.config.Type) + newState := file.NewState(stat, state.Source, p.config.Type, p.meta) if !newState.FileStateOS.IsSame(state.FileStateOS) { p.removeState(state) - logp.Debug("prospector", "Remove state for file as file removed or renamed: %s", state.Source) + logp.Debug("input", "Remove state for file as file removed or renamed: %s", state.Source) } } } } } -func (p *Prospector) removeState(state file.State) { +func (p *Input) removeState(state file.State) { // Only clean up files where state is Finished if !state.Finished { - logp.Debug("prospector", "State for file not removed because harvester not finished: %s", state.Source) + logp.Debug("input", "State for file not removed because harvester not finished: %s", state.Source) return } @@ -208,7 +216,7 @@ func (p *Prospector) removeState(state file.State) { // getFiles returns all files which have to be harvested // All globs are expanded and then directory and excluded files are removed -func (p *Prospector) getFiles() map[string]os.FileInfo { +func (p *Input) getFiles() map[string]os.FileInfo { paths := map[string]os.FileInfo{} for _, path := range p.config.Paths { @@ -224,37 +232,37 @@ func (p *Prospector) getFiles() map[string]os.FileInfo { // check if the file is in the exclude_files list if p.isFileExcluded(file) { - logp.Debug("prospector", "Exclude file: %s", file) + logp.Debug("input", "Exclude file: %s", file) continue } // Fetch Lstat File info to detected also symlinks fileInfo, err := os.Lstat(file) if err != nil { - logp.Debug("prospector", "lstat(%s) failed: %s", file, err) + logp.Debug("input", "lstat(%s) failed: %s", file, err) continue } if fileInfo.IsDir() { - logp.Debug("prospector", "Skipping directory: %s", file) + logp.Debug("input", "Skipping directory: %s", file) continue } isSymlink := fileInfo.Mode()&os.ModeSymlink > 0 if isSymlink && !p.config.Symlinks { - logp.Debug("prospector", "File %s skipped as it is a symlink.", file) + logp.Debug("input", "File %s skipped as it is a symlink.", file) continue } // Fetch Stat file info which fetches the inode. In case of a symlink, the original inode is fetched fileInfo, err = os.Stat(file) if err != nil { - logp.Debug("prospector", "stat(%s) failed: %s", file, err) + logp.Debug("input", "stat(%s) failed: %s", file, err) continue } - // If symlink is enabled, it is checked that original is not part of same prospector - // It original is harvested by other prospector, states will potentially overwrite each other + // If symlink is enabled, it is checked that original is not part of same input + // It original is harvested by other input, states will potentially overwrite each other if p.config.Symlinks { for _, finfo := range paths { if os.SameFile(finfo, fileInfo) { @@ -271,8 +279,8 @@ func (p *Prospector) getFiles() map[string]os.FileInfo { return paths } -// matchesFile returns true in case the given filePath is part of this prospector, means matches its glob patterns -func (p *Prospector) matchesFile(filePath string) bool { +// matchesFile returns true in case the given filePath is part of this input, means matches its glob patterns +func (p *Input) matchesFile(filePath string) bool { // Path is cleaned to ensure we always compare clean paths filePath = filepath.Clean(filePath) @@ -284,7 +292,7 @@ func (p *Prospector) matchesFile(filePath string) bool { // Evaluate if glob matches filePath match, err := filepath.Match(glob, filePath) if err != nil { - logp.Debug("prospector", "Error matching glob: %s", err) + logp.Debug("input", "Error matching glob: %s", err) continue } @@ -296,6 +304,21 @@ func (p *Prospector) matchesFile(filePath string) bool { return false } +// matchesMeta returns true in case the given meta is equal to the one of this input, false if not +func (p *Input) matchesMeta(meta map[string]string) bool { + if len(meta) != len(p.meta) { + return false + } + + for k, v := range p.meta { + if meta[k] != v { + return false + } + } + + return true +} + type FileSortInfo struct { info os.FileInfo path string @@ -351,16 +374,16 @@ func getSortedFiles(scanOrder string, scanSort string, sortInfos []FileSortInfo) return sortInfos, nil } -func getFileState(path string, info os.FileInfo, p *Prospector) (file.State, error) { +func getFileState(path string, info os.FileInfo, p *Input) (file.State, error) { var err error var absolutePath string absolutePath, err = filepath.Abs(path) if err != nil { return file.State{}, fmt.Errorf("could not fetch abs path for file %s: %s", absolutePath, err) } - logp.Debug("prospector", "Check file for harvesting: %s", absolutePath) + logp.Debug("input", "Check file for harvesting: %s", absolutePath) // Create new state for comparison - newState := file.NewState(info, absolutePath, p.config.Type) + newState := file.NewState(info, absolutePath, p.config.Type, p.meta) return newState, nil } @@ -373,7 +396,7 @@ func getKeys(paths map[string]os.FileInfo) []string { } // Scan starts a scanGlob for each provided path/glob -func (p *Prospector) scan() { +func (p *Input) scan() { var sortInfos []FileSortInfo var files []string @@ -407,7 +430,7 @@ func (p *Prospector) scan() { select { case <-p.done: - logp.Info("Scan aborted because prospector stopped.") + logp.Info("Scan aborted because input stopped.") return default: } @@ -431,7 +454,7 @@ func (p *Prospector) scan() { // Decides if previous state exists if lastState.IsEmpty() { - logp.Debug("prospector", "Start harvester for new file: %s", newState.Source) + logp.Debug("input", "Start harvester for new file: %s", newState.Source) err := p.startHarvester(newState, 0) if err != nil { logp.Err("Harvester could not be started on new file: %s, Err: %s", newState.Source, err) @@ -443,8 +466,8 @@ func (p *Prospector) scan() { } // harvestExistingFile continues harvesting a file with a known state if needed -func (p *Prospector) harvestExistingFile(newState file.State, oldState file.State) { - logp.Debug("prospector", "Update existing file for harvesting: %s, offset: %v", newState.Source, oldState.Offset) +func (p *Input) harvestExistingFile(newState file.State, oldState file.State) { + logp.Debug("input", "Update existing file for harvesting: %s, offset: %v", newState.Source, oldState.Offset) // No harvester is running for the file, start a new harvester // It is important here that only the size is checked and not modification time, as modification time could be incorrect on windows @@ -453,7 +476,7 @@ func (p *Prospector) harvestExistingFile(newState file.State, oldState file.Stat // Resume harvesting of an old file we've stopped harvesting from // This could also be an issue with force_close_older that a new harvester is started after each scan but not needed? // One problem with comparing modTime is that it is in seconds, and scans can happen more then once a second - logp.Debug("prospector", "Resuming harvesting of file: %s, offset: %d, new size: %d", newState.Source, oldState.Offset, newState.Fileinfo.Size()) + logp.Debug("input", "Resuming harvesting of file: %s, offset: %d, new size: %d", newState.Source, oldState.Offset, newState.Fileinfo.Size()) err := p.startHarvester(newState, oldState.Offset) if err != nil { logp.Err("Harvester could not be started on existing file: %s, Err: %s", newState.Source, err) @@ -463,7 +486,7 @@ func (p *Prospector) harvestExistingFile(newState file.State, oldState file.Stat // File size was reduced -> truncated file if oldState.Finished && newState.Fileinfo.Size() < oldState.Offset { - logp.Debug("prospector", "Old file was truncated. Starting from the beginning: %s, offset: %d, new size: %d ", newState.Source, newState.Fileinfo.Size()) + logp.Debug("input", "Old file was truncated. Starting from the beginning: %s, offset: %d, new size: %d ", newState.Source, newState.Fileinfo.Size()) err := p.startHarvester(newState, 0) if err != nil { logp.Err("Harvester could not be started on truncated file: %s, Err: %s", newState.Source, err) @@ -477,10 +500,10 @@ func (p *Prospector) harvestExistingFile(newState file.State, oldState file.Stat if oldState.Source != "" && oldState.Source != newState.Source { // This does not start a new harvester as it is assume that the older harvester is still running // or no new lines were detected. It sends only an event status update to make sure the new name is persisted. - logp.Debug("prospector", "File rename was detected: %s -> %s, Current offset: %v", oldState.Source, newState.Source, oldState.Offset) + logp.Debug("input", "File rename was detected: %s -> %s, Current offset: %v", oldState.Source, newState.Source, oldState.Offset) if oldState.Finished { - logp.Debug("prospector", "Updating state for renamed file: %s -> %s, Current offset: %v", oldState.Source, newState.Source, oldState.Offset) + logp.Debug("input", "Updating state for renamed file: %s -> %s, Current offset: %v", oldState.Source, newState.Source, oldState.Offset) // Update state because of file rotation oldState.Source = newState.Source err := p.updateState(oldState) @@ -490,22 +513,22 @@ func (p *Prospector) harvestExistingFile(newState file.State, oldState file.Stat filesRenamed.Add(1) } else { - logp.Debug("prospector", "File rename detected but harvester not finished yet.") + logp.Debug("input", "File rename detected but harvester not finished yet.") } } if !oldState.Finished { // Nothing to do. Harvester is still running and file was not renamed - logp.Debug("prospector", "Harvester for file is still running: %s", newState.Source) + logp.Debug("input", "Harvester for file is still running: %s", newState.Source) } else { - logp.Debug("prospector", "File didn't change: %s", newState.Source) + logp.Debug("input", "File didn't change: %s", newState.Source) } } // handleIgnoreOlder handles states which fall under ignore older // Based on the state information it is decided if the state information has to be updated or not -func (p *Prospector) handleIgnoreOlder(lastState, newState file.State) error { - logp.Debug("prospector", "Ignore file because ignore_older reached: %s", newState.Source) +func (p *Input) handleIgnoreOlder(lastState, newState file.State) error { + logp.Debug("input", "Ignore file because ignore_older reached: %s", newState.Source) if !lastState.IsEmpty() { if !lastState.Finished { @@ -517,7 +540,7 @@ func (p *Prospector) handleIgnoreOlder(lastState, newState file.State) error { // Make sure file is not falling under clean_inactive yet if p.isCleanInactive(newState) { - logp.Debug("prospector", "Do not write state for ignore_older because clean_inactive reached") + logp.Debug("input", "Do not write state for ignore_older because clean_inactive reached") return nil } @@ -536,13 +559,13 @@ func (p *Prospector) handleIgnoreOlder(lastState, newState file.State) error { } // isFileExcluded checks if the given path should be excluded -func (p *Prospector) isFileExcluded(file string) bool { +func (p *Input) isFileExcluded(file string) bool { patterns := p.config.ExcludeFiles return len(patterns) > 0 && harvester.MatchAny(patterns, file) } // isIgnoreOlder checks if the given state reached ignore_older -func (p *Prospector) isIgnoreOlder(state file.State) bool { +func (p *Input) isIgnoreOlder(state file.State) bool { // ignore_older is disable if p.config.IgnoreOlder == 0 { return false @@ -557,7 +580,7 @@ func (p *Prospector) isIgnoreOlder(state file.State) bool { } // isCleanInactive checks if the given state false under clean_inactive -func (p *Prospector) isCleanInactive(state file.State) bool { +func (p *Input) isCleanInactive(state file.State) bool { // clean_inactive is disable if p.config.CleanInactive <= 0 { return false @@ -571,10 +594,21 @@ func (p *Prospector) isCleanInactive(state file.State) bool { return false } +// subOutletWrap returns a factory method that will wrap the passed outlet +// in a SubOutlet and memoize the result so the wrapping is done only once. +func subOutletWrap(outlet channel.Outleter) func() channel.Outleter { + var subOutlet channel.Outleter + return func() channel.Outleter { + if subOutlet == nil { + subOutlet = channel.SubOutlet(outlet) + } + return subOutlet + } +} + // createHarvester creates a new harvester instance from the given state -func (p *Prospector) createHarvester(state file.State, onTerminate func()) (*Harvester, error) { +func (p *Input) createHarvester(state file.State, onTerminate func()) (*Harvester, error) { // Each wraps the outlet, for closing the outlet individually - outlet := channel.SubOutlet(p.outlet) h, err := NewHarvester( p.cfg, state, @@ -582,15 +616,17 @@ func (p *Prospector) createHarvester(state file.State, onTerminate func()) (*Har func(d *util.Data) bool { return p.stateOutlet.OnEvent(d) }, - outlet, + subOutletWrap(p.outlet), ) - h.onTerminate = onTerminate + if err == nil { + h.onTerminate = onTerminate + } return h, err } // startHarvester starts a new harvester with the given offset // In case the HarvesterLimit is reached, an error is returned -func (p *Prospector) startHarvester(state file.State, offset int64) error { +func (p *Input) startHarvester(state file.State, offset int64) error { if p.numHarvesters.Inc() > p.config.HarvesterLimit && p.config.HarvesterLimit > 0 { p.numHarvesters.Dec() harvesterSkipped.Add(1) @@ -624,14 +660,18 @@ func (p *Prospector) startHarvester(state file.State, offset int64) error { return err } -// updateState updates the prospector state and forwards the event to the spooler -// All state updates done by the prospector itself are synchronous to make sure not states are overwritten -func (p *Prospector) updateState(state file.State) error { +// updateState updates the input state and forwards the event to the spooler +// All state updates done by the input itself are synchronous to make sure not states are overwritten +func (p *Input) updateState(state file.State) error { // Add ttl if cleanOlder is enabled and TTL is not already 0 if p.config.CleanInactive > 0 && state.TTL != 0 { state.TTL = p.config.CleanInactive } + if len(state.Meta) == 0 { + state.Meta = nil + } + // Update first internal state p.states.Update(state) @@ -639,21 +679,21 @@ func (p *Prospector) updateState(state file.State) error { data.SetState(state) ok := p.outlet.OnEvent(data) if !ok { - logp.Info("Prospector outlet closed") - return errors.New("prospector outlet closed") + logp.Info("input outlet closed") + return errors.New("input outlet closed") } return nil } // Wait waits for the all harvesters to complete and only then call stop -func (p *Prospector) Wait() { +func (p *Input) Wait() { p.harvesters.WaitForCompletion() p.Stop() } -// Stop stops all harvesters and then stops the prospector -func (p *Prospector) Stop() { +// Stop stops all harvesters and then stops the input +func (p *Input) Stop() { // Stop all harvesters // In case the beatDone channel is closed, this will not wait for completion // Otherwise Stop will wait until output is complete diff --git a/vendor/github.com/elastic/beats/filebeat/prospector/log/prospector_other_test.go b/vendor/github.com/elastic/beats/filebeat/input/log/input_other_test.go similarity index 91% rename from vendor/github.com/elastic/beats/filebeat/prospector/log/prospector_other_test.go rename to vendor/github.com/elastic/beats/filebeat/input/log/input_other_test.go index 813b1b70..3b8a3b72 100644 --- a/vendor/github.com/elastic/beats/filebeat/prospector/log/prospector_other_test.go +++ b/vendor/github.com/elastic/beats/filebeat/input/log/input_other_test.go @@ -59,7 +59,7 @@ var matchTests = []struct { func TestMatchFile(t *testing.T) { for _, test := range matchTests { - p := Prospector{ + p := Input{ config: config{ Paths: test.paths, ExcludeFiles: test.excludeFiles, @@ -72,8 +72,8 @@ func TestMatchFile(t *testing.T) { var initStateTests = []struct { states []file.State // list of states - paths []string // prospector glob - count int // expected states in prospector + paths []string // input glob + count int // expected states in input }{ { []file.State{ @@ -123,15 +123,15 @@ var initStateTests = []struct { }, } -// TestInit checks that the correct states are in a prospector after the init phase +// TestInit checks that the correct states are in an input after the init phase // This means only the ones that match the glob and not exclude files func TestInit(t *testing.T) { for _, test := range initStateTests { - p := Prospector{ + p := Input{ config: config{ Paths: test.paths, }, - states: &file.States{}, + states: file.NewStates(), outlet: TestOutlet{}, } diff --git a/vendor/github.com/elastic/beats/filebeat/prospector/log/prospector_test.go b/vendor/github.com/elastic/beats/filebeat/input/log/input_test.go similarity index 61% rename from vendor/github.com/elastic/beats/filebeat/prospector/log/prospector_test.go rename to vendor/github.com/elastic/beats/filebeat/input/log/input_test.go index 6a4c81c6..1e879941 100644 --- a/vendor/github.com/elastic/beats/filebeat/prospector/log/prospector_test.go +++ b/vendor/github.com/elastic/beats/filebeat/input/log/input_test.go @@ -13,8 +13,8 @@ import ( "github.com/elastic/beats/libbeat/common/match" ) -func TestProspectorFileExclude(t *testing.T) { - p := Prospector{ +func TestInputFileExclude(t *testing.T) { + p := Input{ config: config{ ExcludeFiles: []match.Matcher{match.MustCompile(`\.gz$`)}, }, @@ -49,7 +49,7 @@ var cleanInactiveTests = []struct { func TestIsCleanInactive(t *testing.T) { for _, test := range cleanInactiveTests { - l := Prospector{ + l := Input{ config: config{ CleanInactive: test.cleanInactive, }, @@ -64,6 +64,61 @@ func TestIsCleanInactive(t *testing.T) { } } +func TestMatchesMeta(t *testing.T) { + tests := []struct { + Input *Input + Meta map[string]string + Result bool + }{ + { + Input: &Input{ + meta: map[string]string{ + "it": "matches", + }, + }, + Meta: map[string]string{ + "it": "matches", + }, + Result: true, + }, + { + Input: &Input{ + meta: map[string]string{ + "it": "doesnt", + "doesnt": "match", + }, + }, + Meta: map[string]string{ + "it": "doesnt", + }, + Result: false, + }, + { + Input: &Input{ + meta: map[string]string{ + "it": "doesnt", + }, + }, + Meta: map[string]string{ + "it": "doesnt", + "doesnt": "match", + }, + Result: false, + }, + { + Input: &Input{ + meta: map[string]string{}, + }, + Meta: map[string]string{}, + Result: true, + }, + } + + for _, test := range tests { + assert.Equal(t, test.Result, test.Input.matchesMeta(test.Meta)) + } +} + type TestFileInfo struct { time time.Time } diff --git a/vendor/github.com/elastic/beats/filebeat/prospector/log/log.go b/vendor/github.com/elastic/beats/filebeat/input/log/log.go similarity index 100% rename from vendor/github.com/elastic/beats/filebeat/prospector/log/log.go rename to vendor/github.com/elastic/beats/filebeat/input/log/log.go diff --git a/vendor/github.com/elastic/beats/filebeat/prospector/log/prospector_windows_test.go b/vendor/github.com/elastic/beats/filebeat/input/log/prospector_windows_test.go similarity index 98% rename from vendor/github.com/elastic/beats/filebeat/prospector/log/prospector_windows_test.go rename to vendor/github.com/elastic/beats/filebeat/input/log/prospector_windows_test.go index b990125a..8971c8f9 100644 --- a/vendor/github.com/elastic/beats/filebeat/prospector/log/prospector_windows_test.go +++ b/vendor/github.com/elastic/beats/filebeat/input/log/prospector_windows_test.go @@ -59,7 +59,7 @@ var matchTestsWindows = []struct { func TestMatchFileWindows(t *testing.T) { for _, test := range matchTestsWindows { - p := Prospector{ + p := Input{ config: config{ Paths: test.paths, ExcludeFiles: test.excludeFiles, diff --git a/vendor/github.com/elastic/beats/filebeat/prospector/log/stdin.go b/vendor/github.com/elastic/beats/filebeat/input/log/stdin.go similarity index 100% rename from vendor/github.com/elastic/beats/filebeat/prospector/log/stdin.go rename to vendor/github.com/elastic/beats/filebeat/input/log/stdin.go diff --git a/vendor/github.com/elastic/beats/filebeat/prospector/plugin.go b/vendor/github.com/elastic/beats/filebeat/input/plugin.go similarity index 59% rename from vendor/github.com/elastic/beats/filebeat/prospector/plugin.go rename to vendor/github.com/elastic/beats/filebeat/input/plugin.go index 04677360..251c8baa 100644 --- a/vendor/github.com/elastic/beats/filebeat/prospector/plugin.go +++ b/vendor/github.com/elastic/beats/filebeat/input/plugin.go @@ -1,4 +1,4 @@ -package prospector +package input import ( "errors" @@ -6,18 +6,18 @@ import ( "github.com/elastic/beats/libbeat/plugin" ) -type prospectorPlugin struct { +type inputPlugin struct { name string factory Factory } -const pluginKey = "filebeat.prospector" +const pluginKey = "filebeat.input" func init() { plugin.MustRegisterLoader(pluginKey, func(ifc interface{}) error { - p, ok := ifc.(prospectorPlugin) + p, ok := ifc.(inputPlugin) if !ok { - return errors.New("plugin does not match filebeat prospector plugin type") + return errors.New("plugin does not match filebeat input plugin type") } if p.factory != nil { @@ -34,5 +34,5 @@ func Plugin( module string, factory Factory, ) map[string][]interface{} { - return plugin.MakePlugin(pluginKey, prospectorPlugin{module, factory}) + return plugin.MakePlugin(pluginKey, inputPlugin{module, factory}) } diff --git a/vendor/github.com/elastic/beats/filebeat/prospector/redis/_meta/Dockerfile b/vendor/github.com/elastic/beats/filebeat/input/redis/_meta/Dockerfile similarity index 100% rename from vendor/github.com/elastic/beats/filebeat/prospector/redis/_meta/Dockerfile rename to vendor/github.com/elastic/beats/filebeat/input/redis/_meta/Dockerfile diff --git a/vendor/github.com/elastic/beats/filebeat/prospector/redis/_meta/env b/vendor/github.com/elastic/beats/filebeat/input/redis/_meta/env similarity index 100% rename from vendor/github.com/elastic/beats/filebeat/prospector/redis/_meta/env rename to vendor/github.com/elastic/beats/filebeat/input/redis/_meta/env diff --git a/vendor/github.com/elastic/beats/filebeat/prospector/redis/config.go b/vendor/github.com/elastic/beats/filebeat/input/redis/config.go similarity index 99% rename from vendor/github.com/elastic/beats/filebeat/prospector/redis/config.go rename to vendor/github.com/elastic/beats/filebeat/input/redis/config.go index 5b43551d..11874702 100644 --- a/vendor/github.com/elastic/beats/filebeat/prospector/redis/config.go +++ b/vendor/github.com/elastic/beats/filebeat/input/redis/config.go @@ -7,7 +7,6 @@ import ( ) var defaultConfig = config{ - ForwarderConfig: harvester.ForwarderConfig{ Type: "redis", }, diff --git a/vendor/github.com/elastic/beats/filebeat/prospector/redis/doc.go b/vendor/github.com/elastic/beats/filebeat/input/redis/doc.go similarity index 86% rename from vendor/github.com/elastic/beats/filebeat/prospector/redis/doc.go rename to vendor/github.com/elastic/beats/filebeat/input/redis/doc.go index 7487c48c..1f1082e0 100644 --- a/vendor/github.com/elastic/beats/filebeat/prospector/redis/doc.go +++ b/vendor/github.com/elastic/beats/filebeat/input/redis/doc.go @@ -1,4 +1,4 @@ -// Package redis package contains prospector and harvester to read the redis slow log +// Package redis package contains input and harvester to read the redis slow log // // The redis slow log is stored in memory. The slow log can be activate on the redis command line as following: // diff --git a/vendor/github.com/elastic/beats/filebeat/prospector/redis/harvester.go b/vendor/github.com/elastic/beats/filebeat/input/redis/harvester.go similarity index 100% rename from vendor/github.com/elastic/beats/filebeat/prospector/redis/harvester.go rename to vendor/github.com/elastic/beats/filebeat/input/redis/harvester.go diff --git a/vendor/github.com/elastic/beats/filebeat/prospector/redis/prospector.go b/vendor/github.com/elastic/beats/filebeat/input/redis/input.go similarity index 72% rename from vendor/github.com/elastic/beats/filebeat/prospector/redis/prospector.go rename to vendor/github.com/elastic/beats/filebeat/input/redis/input.go index 751a666b..dad011fe 100644 --- a/vendor/github.com/elastic/beats/filebeat/prospector/redis/prospector.go +++ b/vendor/github.com/elastic/beats/filebeat/input/redis/input.go @@ -7,22 +7,22 @@ import ( "github.com/elastic/beats/filebeat/channel" "github.com/elastic/beats/filebeat/harvester" + "github.com/elastic/beats/filebeat/input" "github.com/elastic/beats/filebeat/input/file" - "github.com/elastic/beats/filebeat/prospector" "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/common/cfgwarn" "github.com/elastic/beats/libbeat/logp" ) func init() { - err := prospector.Register("redis", NewProspector) + err := input.Register("redis", NewInput) if err != nil { panic(err) } } -// Prospector is a prospector for redis -type Prospector struct { +// Input is a input for redis +type Input struct { started bool outlet channel.Outleter config config @@ -30,9 +30,9 @@ type Prospector struct { registry *harvester.Registry } -// NewProspector creates a new redis prospector -func NewProspector(cfg *common.Config, outletFactory channel.Factory, context prospector.Context) (prospector.Prospectorer, error) { - cfgwarn.Experimental("Redis slowlog prospector is enabled.") +// NewInput creates a new redis input +func NewInput(cfg *common.Config, outletFactory channel.Factory, context input.Context) (input.Input, error) { + cfgwarn.Experimental("Redis slowlog input is enabled.") config := defaultConfig @@ -46,7 +46,7 @@ func NewProspector(cfg *common.Config, outletFactory channel.Factory, context pr return nil, err } - p := &Prospector{ + p := &Input{ started: false, outlet: outlet, config: config, @@ -58,13 +58,13 @@ func NewProspector(cfg *common.Config, outletFactory channel.Factory, context pr } // LoadStates loads the states -func (p *Prospector) LoadStates(states []file.State) error { +func (p *Input) LoadStates(states []file.State) error { return nil } -// Run runs the prospector -func (p *Prospector) Run() { - logp.Debug("redis", "Run redis prospector with hosts: %+v", p.config.Hosts) +// Run runs the input +func (p *Input) Run() { + logp.Debug("redis", "Run redis input with hosts: %+v", p.config.Hosts) if len(p.config.Hosts) == 0 { logp.Err("No redis hosts configured") @@ -85,14 +85,14 @@ func (p *Prospector) Run() { } } -// Stop stopps the prospector and all its harvesters -func (p *Prospector) Stop() { +// Stop stops the input and all its harvesters +func (p *Input) Stop() { p.registry.Stop() p.outlet.Close() } -// Wait waits for the propsector to be completed. Not implemented. -func (p *Prospector) Wait() {} +// Wait waits for the input to be completed. Not implemented. +func (p *Input) Wait() {} // CreatePool creates a redis connection pool // NOTE: This code is copied from the redis pool handling in metricbeat diff --git a/vendor/github.com/elastic/beats/filebeat/prospector/registry.go b/vendor/github.com/elastic/beats/filebeat/input/registry.go similarity index 55% rename from vendor/github.com/elastic/beats/filebeat/prospector/registry.go rename to vendor/github.com/elastic/beats/filebeat/input/registry.go index e1192885..c4c11577 100644 --- a/vendor/github.com/elastic/beats/filebeat/prospector/registry.go +++ b/vendor/github.com/elastic/beats/filebeat/input/registry.go @@ -1,4 +1,4 @@ -package prospector +package input import ( "fmt" @@ -14,33 +14,34 @@ type Context struct { Done chan struct{} BeatDone chan struct{} DynamicFields *common.MapStrPointer + Meta map[string]string } -type Factory func(config *common.Config, outletFactory channel.Factory, context Context) (Prospectorer, error) +type Factory = func(config *common.Config, outletFactory channel.Factory, context Context) (Input, error) var registry = make(map[string]Factory) func Register(name string, factory Factory) error { - logp.Info("Registering prospector factory") + logp.Info("Registering input factory") if name == "" { - return fmt.Errorf("Error registering prospector: name cannot be empty") + return fmt.Errorf("Error registering input: name cannot be empty") } if factory == nil { - return fmt.Errorf("Error registering prospector '%v': factory cannot be empty", name) + return fmt.Errorf("Error registering input '%v': factory cannot be empty", name) } if _, exists := registry[name]; exists { - return fmt.Errorf("Error registering prospector '%v': already registered", name) + return fmt.Errorf("Error registering input '%v': already registered", name) } registry[name] = factory - logp.Info("Successfully registered prospector") + logp.Info("Successfully registered input") return nil } func GetFactory(name string) (Factory, error) { if _, exists := registry[name]; !exists { - return nil, fmt.Errorf("Error creating prospector. No such prospector type exist: '%v'", name) + return nil, fmt.Errorf("Error creating input. No such input type exist: '%v'", name) } return registry[name], nil } diff --git a/vendor/github.com/elastic/beats/filebeat/prospector/registry_test.go b/vendor/github.com/elastic/beats/filebeat/input/registry_test.go similarity index 66% rename from vendor/github.com/elastic/beats/filebeat/prospector/registry_test.go rename to vendor/github.com/elastic/beats/filebeat/input/registry_test.go index 52179b5e..a1247f72 100644 --- a/vendor/github.com/elastic/beats/filebeat/prospector/registry_test.go +++ b/vendor/github.com/elastic/beats/filebeat/input/registry_test.go @@ -1,4 +1,4 @@ -package prospector +package input import ( "testing" @@ -9,21 +9,21 @@ import ( "github.com/elastic/beats/libbeat/common" ) -var fakeFactory = func(config *common.Config, outletFactory channel.Factory, context Context) (Prospectorer, error) { +var fakeFactory = func(config *common.Config, outletFactory channel.Factory, context Context) (Input, error) { return nil, nil } func TestAddFactoryEmptyName(t *testing.T) { err := Register("", nil) if assert.Error(t, err) { - assert.Equal(t, "Error registering prospector: name cannot be empty", err.Error()) + assert.Equal(t, "Error registering input: name cannot be empty", err.Error()) } } func TestAddNilFactory(t *testing.T) { err := Register("name", nil) if assert.Error(t, err) { - assert.Equal(t, "Error registering prospector 'name': factory cannot be empty", err.Error()) + assert.Equal(t, "Error registering input 'name': factory cannot be empty", err.Error()) } } @@ -36,7 +36,7 @@ func TestAddFactoryTwice(t *testing.T) { err = Register("name", fakeFactory) if assert.Error(t, err) { - assert.Equal(t, "Error registering prospector 'name': already registered", err.Error()) + assert.Equal(t, "Error registering input 'name': already registered", err.Error()) } } @@ -52,6 +52,6 @@ func TestGetNonExistentFactory(t *testing.T) { f, err := GetFactory("noSuchFactory") assert.Nil(t, f) if assert.Error(t, err) { - assert.Equal(t, "Error creating prospector. No such prospector type exist: 'noSuchFactory'", err.Error()) + assert.Equal(t, "Error creating input. No such input type exist: 'noSuchFactory'", err.Error()) } } diff --git a/vendor/github.com/elastic/beats/filebeat/prospector/runnerfactory.go b/vendor/github.com/elastic/beats/filebeat/input/runnerfactory.go similarity index 85% rename from vendor/github.com/elastic/beats/filebeat/prospector/runnerfactory.go rename to vendor/github.com/elastic/beats/filebeat/input/runnerfactory.go index 641de1fa..ae557f8e 100644 --- a/vendor/github.com/elastic/beats/filebeat/prospector/runnerfactory.go +++ b/vendor/github.com/elastic/beats/filebeat/input/runnerfactory.go @@ -1,4 +1,4 @@ -package prospector +package input import ( "github.com/elastic/beats/filebeat/channel" @@ -23,11 +23,11 @@ func NewRunnerFactory(outlet channel.Factory, registrar *registrar.Registrar, be } } -// Create creates a prospector based on a config +// Create creates a input based on a config func (r *RunnerFactory) Create(c *common.Config, meta *common.MapStrPointer) (cfgfile.Runner, error) { p, err := New(c, r.outlet, r.beatDone, r.registrar.GetStates(), meta) if err != nil { - // In case of error with loading state, prospector is still returned + // In case of error with loading state, input is still returned return p, err } diff --git a/vendor/github.com/elastic/beats/filebeat/prospector/stdin/prospector.go b/vendor/github.com/elastic/beats/filebeat/input/stdin/input.go similarity index 63% rename from vendor/github.com/elastic/beats/filebeat/prospector/stdin/prospector.go rename to vendor/github.com/elastic/beats/filebeat/input/stdin/input.go index a1e81ef5..025eb064 100644 --- a/vendor/github.com/elastic/beats/filebeat/prospector/stdin/prospector.go +++ b/vendor/github.com/elastic/beats/filebeat/input/stdin/input.go @@ -5,22 +5,22 @@ import ( "github.com/elastic/beats/filebeat/channel" "github.com/elastic/beats/filebeat/harvester" + "github.com/elastic/beats/filebeat/input" "github.com/elastic/beats/filebeat/input/file" - "github.com/elastic/beats/filebeat/prospector" - "github.com/elastic/beats/filebeat/prospector/log" + "github.com/elastic/beats/filebeat/input/log" "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/logp" ) func init() { - err := prospector.Register("stdin", NewProspector) + err := input.Register("stdin", NewInput) if err != nil { panic(err) } } -// Prospector is a prospector for stdin -type Prospector struct { +// Input is an input for stdin +type Input struct { harvester *log.Harvester started bool cfg *common.Config @@ -28,15 +28,15 @@ type Prospector struct { registry *harvester.Registry } -// NewStdin creates a new stdin prospector -// This prospector contains one harvester which is reading from stdin -func NewProspector(cfg *common.Config, outlet channel.Factory, context prospector.Context) (prospector.Prospectorer, error) { +// NewInput creates a new stdin input +// This input contains one harvester which is reading from stdin +func NewInput(cfg *common.Config, outlet channel.Factory, context input.Context) (input.Input, error) { out, err := outlet(cfg, context.DynamicFields) if err != nil { return nil, err } - p := &Prospector{ + p := &Input{ started: false, cfg: cfg, outlet: out, @@ -51,8 +51,8 @@ func NewProspector(cfg *common.Config, outlet channel.Factory, context prospecto return p, nil } -// Run runs the prospector -func (p *Prospector) Run() { +// Run runs the input +func (p *Input) Run() { // Make sure stdin harvester is only started once if !p.started { err := p.harvester.Setup() @@ -68,21 +68,23 @@ func (p *Prospector) Run() { } // createHarvester creates a new harvester instance from the given state -func (p *Prospector) createHarvester(state file.State) (*log.Harvester, error) { +func (p *Input) createHarvester(state file.State) (*log.Harvester, error) { // Each harvester gets its own copy of the outlet h, err := log.NewHarvester( p.cfg, state, nil, nil, - p.outlet, + func() channel.Outleter { + return p.outlet + }, ) return h, err } -// Wait waits for completion of the prospector. -func (p *Prospector) Wait() {} +// Wait waits for completion of the input. +func (p *Input) Wait() {} -// Stop stops the prospector. -func (p *Prospector) Stop() { +// Stop stops the input +func (p *Input) Stop() { p.outlet.Close() } diff --git a/vendor/github.com/elastic/beats/filebeat/input/syslog/config.go b/vendor/github.com/elastic/beats/filebeat/input/syslog/config.go new file mode 100644 index 00000000..1a3b12a1 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/input/syslog/config.go @@ -0,0 +1,60 @@ +package syslog + +import ( + "fmt" + "time" + + "github.com/dustin/go-humanize" + + "github.com/elastic/beats/filebeat/harvester" + "github.com/elastic/beats/filebeat/inputsource" + "github.com/elastic/beats/filebeat/inputsource/tcp" + "github.com/elastic/beats/filebeat/inputsource/udp" + "github.com/elastic/beats/libbeat/common" +) + +type config struct { + harvester.ForwarderConfig `config:",inline"` + Protocol common.ConfigNamespace `config:"protocol"` +} + +var defaultConfig = config{ + ForwarderConfig: harvester.ForwarderConfig{ + Type: "syslog", + }, +} + +var defaultTCP = tcp.Config{ + LineDelimiter: "\n", + Timeout: time.Minute * 5, + MaxMessageSize: 20 * humanize.MiByte, +} + +var defaultUDP = udp.Config{ + MaxMessageSize: 10 * humanize.KiByte, + Timeout: time.Minute * 5, +} + +func factory( + cb inputsource.NetworkFunc, + config common.ConfigNamespace, +) (inputsource.Network, error) { + n, cfg := config.Name(), config.Config() + + switch n { + case tcp.Name: + config := defaultTCP + if err := cfg.Unpack(&config); err != nil { + return nil, err + } + return tcp.New(&config, cb) + case udp.Name: + config := defaultUDP + if err := cfg.Unpack(&config); err != nil { + return nil, err + } + return udp.New(&config, cb), nil + default: + return nil, fmt.Errorf("you must choose between TCP or UDP") + } +} diff --git a/vendor/github.com/elastic/beats/filebeat/input/syslog/event.go b/vendor/github.com/elastic/beats/filebeat/input/syslog/event.go new file mode 100644 index 00000000..cee05988 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/input/syslog/event.go @@ -0,0 +1,312 @@ +package syslog + +import ( + "math" + "time" +) + +const severityMask = 7 +const facilityShift = 3 + +var month = map[string]time.Month{ + "Jan": time.January, + "Feb": time.February, + "Mar": time.March, + "Apr": time.April, + "May": time.May, + "Jun": time.June, + "Jul": time.July, + "Aug": time.August, + "Sep": time.September, + "Oct": time.October, + "Nov": time.November, + "Dec": time.December, +} + +var monthIndexed = []time.Month{ + 0, + time.January, + time.February, + time.March, + time.April, + time.May, + time.June, + time.July, + time.August, + time.September, + time.October, + time.November, + time.December, +} + +// event is a parsed syslog event, validation of the format is done at the parser level. +type event struct { + message string + hostname string //x + priority int + program string //x + pid int + month time.Month + day int + hour int + minute int + second int + nanosecond int + year int + loc *time.Location +} + +// newEvent() return a new event. +func newEvent() *event { + return &event{ + priority: -1, + pid: -1, + month: -1, + day: -1, + hour: -1, + minute: -1, + second: -1, + year: time.Now().Year(), + } +} + +// SetTimeZone set the timezone offset from the string. +func (s *event) SetTimeZone(b []byte) { + // We assume that we are in utc and ignore any other bytes after. + // This can be followed by others bytes +00, +00:00 or +0000. + if b[0] == 'Z' || b[0] == 'z' { + s.loc = time.UTC + return + } + + d := 1 + if b[0] == '-' { + d = -1 + } + + // +00 +00:00 or +0000 + var h, m int + switch len(b[1:]) { + case 2: + h = int(time.Hour * time.Duration(bytesToInt(skipLeadZero(b[1:])))) + s.loc = time.FixedZone("", d*h) + case 4: + h = int(time.Hour * time.Duration(bytesToInt(skipLeadZero(b[1:3])))) + m = int(time.Minute * time.Duration(bytesToInt(skipLeadZero(b[3:5])))) + s.loc = time.FixedZone("", d*(h+m)) + case 5: + h = int(time.Hour * time.Duration(bytesToInt(skipLeadZero(b[1:3])))) + m = int(time.Minute * time.Duration(bytesToInt(skipLeadZero(b[4:6])))) + s.loc = time.FixedZone("", d*(h+m)) + } +} + +// SetMonthNumeric sets the month with a number. +func (s *event) SetMonthNumeric(b []byte) { + s.month = monthIndexed[bytesToInt(skipLeadZero(b))] +} + +// SetMonth sets the month. +func (s *event) SetMonth(b []byte) { + var k string + if len(b) > 3 { + k = string(b[0:3]) + } else { + k = string(b) + } + v, ok := month[k] + if ok { + s.month = v + } +} + +// Month returns the month. +func (s *event) Month() time.Month { + return s.month +} + +// SetDay sets the day as. +func (s *event) SetDay(b []byte) { + s.day = bytesToInt(skipLeadZero(b)) +} + +// Day returns the day. +func (s *event) Day() int { + return s.day +} + +// SetHour sets the hour. +func (s *event) SetHour(b []byte) { + s.hour = bytesToInt(skipLeadZero(b)) +} + +// Hour returns the hour. +func (s *event) Hour() int { + return s.hour +} + +// SetMinute sets the minute. +func (s *event) SetMinute(b []byte) { + s.minute = bytesToInt(skipLeadZero(b)) +} + +// Minute return the minutes. +func (s *event) Minute() int { + return s.minute +} + +// SetSecond sets the second. +func (s *event) SetSecond(b []byte) { + s.second = bytesToInt(skipLeadZero(b)) +} + +// Second returns the second. +func (s *event) Second() int { + return s.second +} + +// SetYear sets the current year. +func (s *event) SetYear(b []byte) { + s.year = bytesToInt(b) +} + +// Year returns the current year, since syslog events don't include that. +func (s *event) Year() int { + return s.year +} + +// SetMessage sets the message. +func (s *event) SetMessage(b []byte) { + s.message = string(b) +} + +// Message returns the message. +func (s *event) Message() string { + return s.message +} + +// SetPriority sets the priority. +func (s *event) SetPriority(priority []byte) { + s.priority = bytesToInt(priority) +} + +// Priority returns the priority. +func (s *event) Priority() int { + return s.priority +} + +// HasPriority returns if the priority was in original event. +func (s *event) HasPriority() bool { + return s.priority > 0 +} + +// Severity returns the severity, will return -1 if priority is not set. +func (s *event) Severity() int { + if !s.HasPriority() { + return -1 + } + return s.Priority() & severityMask +} + +// Facility returns the facility, will return -1 if priority is not set. +func (s *event) Facility() int { + if !s.HasPriority() { + return -1 + } + return s.Priority() >> facilityShift +} + +// SetHostname sets the hostname. +func (s *event) SetHostname(b []byte) { + s.hostname = string(b) +} + +// Hostname returns the hostname. +func (s *event) Hostname() string { + return string(s.hostname) +} + +// SetProgram sets the programs as a byte slice. +func (s *event) SetProgram(b []byte) { + s.program = string(b) +} + +// Program returns the program name. +func (s *event) Program() string { + return s.program +} + +func (s *event) SetPid(b []byte) { + s.pid = bytesToInt(b) +} + +// Pid returns the pid. +func (s *event) Pid() int { + return s.pid +} + +// HasPid returns true if a pid is set. +func (s *event) HasPid() bool { + return s.pid > 0 +} + +// SetNanoSecond sets the nanosecond. +func (s *event) SetNanosecond(b []byte) { + // We assume that we receive a byte array representing a nanosecond, this might not be + // always the case, so we have to pad it. + if len(b) < 7 { + s.nanosecond = bytesToInt(skipLeadZero(b)) * int(math.Pow10((7 - len(b)))) + } else { + s.nanosecond = bytesToInt(skipLeadZero(b)) + } +} + +// NanoSecond returns the nanosecond. +func (s *event) Nanosecond() int { + return s.nanosecond +} + +// Timestamp return the timestamp in UTC. +func (s *event) Timestamp(timezone *time.Location) time.Time { + var t *time.Location + if s.loc == nil { + t = timezone + } else { + t = s.loc + } + + return time.Date( + s.Year(), + s.Month(), + s.Day(), + s.Hour(), + s.Minute(), + s.Second(), + s.Nanosecond(), + t, + ).UTC() +} + +// IsValid returns true if the date and the message are present. +func (s *event) IsValid() bool { + return s.day != -1 && s.hour != -1 && s.minute != -1 && s.second != -1 && s.message != "" +} + +// BytesToInt takes a variable length of bytes and assume ascii chars and convert it to int, this is +// a simplified implementation of strconv.Atoi's fast path without error handling and remove the +// need to convert the byte array to string, we also assume that any errors are taken care at +// the parsing level. +func bytesToInt(b []byte) int { + var i int + for _, x := range b { + i = i*10 + int(x-'0') + } + return i +} + +func skipLeadZero(b []byte) []byte { + if len(b) > 1 && b[0] == '0' { + return b[1:len(b)] + } + return b +} diff --git a/vendor/github.com/elastic/beats/filebeat/input/syslog/event_test.go b/vendor/github.com/elastic/beats/filebeat/input/syslog/event_test.go new file mode 100644 index 00000000..25a58253 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/input/syslog/event_test.go @@ -0,0 +1,91 @@ +package syslog + +import ( + "fmt" + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestSeverity(t *testing.T) { + e := newEvent() + e.SetPriority([]byte("13")) + assert.Equal(t, 5, e.Severity()) +} + +func TestFacility(t *testing.T) { + e := newEvent() + e.SetPriority([]byte("13")) + assert.Equal(t, 1, e.Facility()) +} + +func TestHasPriority(t *testing.T) { + e := newEvent() + e.SetPriority([]byte("13")) + assert.True(t, e.HasPriority()) + assert.Equal(t, 13, e.Priority()) + assert.Equal(t, 5, e.Severity()) + assert.Equal(t, 1, e.Facility()) +} + +func TestNoPrioritySet(t *testing.T) { + e := newEvent() + assert.False(t, e.HasPriority()) + assert.Equal(t, -1, e.Priority()) + assert.Equal(t, -1, e.Severity()) + assert.Equal(t, -1, e.Facility()) +} + +func TestHasPid(t *testing.T) { + e := newEvent() + assert.False(t, e.HasPid()) + e.SetPid([]byte(strconv.Itoa(20))) + assert.True(t, e.HasPid()) +} + +func TestDateParsing(t *testing.T) { + now := time.Now() + e := newEvent() + e.SetDay(itb(now.Day())) + e.SetMonth([]byte(now.Month().String())) + e.SetHour(itb(now.Hour())) + e.SetMinute(itb(now.Minute())) + e.SetSecond(itb(now.Second())) + e.SetNanosecond(itb(now.Nanosecond())) + new := e.Timestamp(time.Local) + assert.Equal(t, now.UTC(), new) +} + +func TestIsValid(t *testing.T) { + e := newEvent() + assert.False(t, e.IsValid()) + + now := time.Now() + + e.SetDay(itb(now.Day())) + assert.False(t, e.IsValid()) + + e.SetMonth([]byte(now.Month().String())) + assert.False(t, e.IsValid()) + + e.SetHour(itb(now.Hour())) + assert.False(t, e.IsValid()) + + e.SetMinute(itb(now.Minute())) + assert.False(t, e.IsValid()) + + e.SetSecond(itb(now.Second())) + assert.False(t, e.IsValid()) + + e.SetMessage([]byte("hello world")) + assert.True(t, e.IsValid()) +} + +func itb(i int) []byte { + if i < 10 { + return []byte(fmt.Sprintf("0%d", i)) + } + return []byte(strconv.Itoa(i)) +} diff --git a/vendor/github.com/elastic/beats/filebeat/input/syslog/input.go b/vendor/github.com/elastic/beats/filebeat/input/syslog/input.go new file mode 100644 index 00000000..bdff0c8a --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/input/syslog/input.go @@ -0,0 +1,241 @@ +package syslog + +import ( + "strings" + "sync" + "time" + + "github.com/pkg/errors" + + "github.com/elastic/beats/filebeat/channel" + "github.com/elastic/beats/filebeat/harvester" + "github.com/elastic/beats/filebeat/input" + "github.com/elastic/beats/filebeat/inputsource" + "github.com/elastic/beats/filebeat/util" + "github.com/elastic/beats/libbeat/beat" + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/common/cfgwarn" + "github.com/elastic/beats/libbeat/logp" +) + +// Parser is generated from a ragel state machine using the following command: +//go:generate ragel -Z -G2 parser.rl -o parser.go + +// Severity and Facility are derived from the priority, theses are the human readable terms +// defined in https://tools.ietf.org/html/rfc3164#section-4.1.1. +// +// Example: +// 2 => "Critial" +type mapper []string + +var ( + severityLabels = mapper{ + "Emergency", + "Alert", + "Critical", + "Error", + "Warning", + "Notice", + "Informational", + "Debug", + } + + facilityLabels = mapper{ + "kernel", + "user-level", + "mail", + "system", + "security/authorization", + "syslogd", + "line printer", + "network news", + "UUCP", + "clock", + "security/authorization", + "FTP", + "NTP", + "log audit", + "log alert", + "clock", + "local0", + "local1", + "local2", + "local3", + "local4", + "local5", + "local6", + "local7", + } +) + +func init() { + err := input.Register("syslog", NewInput) + if err != nil { + panic(err) + } +} + +// Input define a syslog input +type Input struct { + sync.Mutex + started bool + outlet channel.Outleter + server inputsource.Network + config *config + log *logp.Logger +} + +// NewInput creates a new syslog input +func NewInput( + cfg *common.Config, + outlet channel.Factory, + context input.Context, +) (input.Input, error) { + cfgwarn.Experimental("Syslog input type is used") + + log := logp.NewLogger("syslog") + + out, err := outlet(cfg, context.DynamicFields) + if err != nil { + return nil, err + } + + config := defaultConfig + if err = cfg.Unpack(&config); err != nil { + return nil, err + } + + forwarder := harvester.NewForwarder(out) + cb := func(data []byte, metadata inputsource.NetworkMetadata) { + ev := newEvent() + Parse(data, ev) + var d *util.Data + if !ev.IsValid() { + log.Errorw("can't not parse event as syslog rfc3164", "message", string(data)) + // On error revert to the raw bytes content, we need a better way to communicate this kind of + // error upstream this should be a global effort. + d = &util.Data{ + Event: beat.Event{ + Timestamp: time.Now(), + Meta: common.MapStr{ + "truncated": metadata.Truncated, + }, + Fields: common.MapStr{ + "message": string(data), + }, + }, + } + } else { + event := createEvent(ev, metadata, time.Local, log) + d = &util.Data{Event: *event} + } + + forwarder.Send(d) + } + + server, err := factory(cb, config.Protocol) + if err != nil { + return nil, err + } + + return &Input{ + outlet: out, + started: false, + server: server, + config: &config, + log: log, + }, nil +} + +// Run starts listening for Syslog events over the network. +func (p *Input) Run() { + p.Lock() + defer p.Unlock() + + if !p.started { + p.log.Infow("Starting Syslog input", "protocol", p.config.Protocol.Name()) + err := p.server.Start() + if err != nil { + p.log.Error("Error starting the server", "error", err) + } + p.started = true + } +} + +// Stop stops the syslog input. +func (p *Input) Stop() { + defer p.outlet.Close() + p.Lock() + defer p.Unlock() + + p.log.Info("Stopping Syslog input") + p.server.Stop() + p.started = false +} + +// Wait stops the syslog input. +func (p *Input) Wait() { + p.Stop() +} + +func createEvent(ev *event, metadata inputsource.NetworkMetadata, timezone *time.Location, log *logp.Logger) *beat.Event { + f := common.MapStr{ + "message": strings.TrimRight(ev.Message(), "\n"), + "source": metadata.RemoteAddr.String(), + } + + syslog := common.MapStr{} + event := common.MapStr{} + process := common.MapStr{} + + if ev.Hostname() != "" { + f["hostname"] = ev.Hostname() + } + + if ev.HasPid() { + process["pid"] = ev.Pid() + } + + if ev.Program() != "" { + process["program"] = ev.Program() + } + + if ev.HasPriority() { + syslog["priority"] = ev.Priority() + + event["severity"] = ev.Severity() + v, err := mapValueToName(ev.Severity(), severityLabels) + if err != nil { + log.Debugw("could not find severity label", "error", err) + } else { + syslog["severity_label"] = v + } + + syslog["facility"] = ev.Facility() + v, err = mapValueToName(ev.Facility(), facilityLabels) + if err != nil { + log.Debugw("could not find facility label", "error", err) + } else { + syslog["facility_label"] = v + } + } + + f["syslog"] = syslog + f["event"] = event + f["process"] = process + + return &beat.Event{ + Timestamp: ev.Timestamp(timezone), + Meta: common.MapStr{ + "truncated": metadata.Truncated, + }, + Fields: f, + } +} + +func mapValueToName(v int, m mapper) (string, error) { + if v < 0 || v >= len(m) { + return "", errors.Errorf("value out of bound: %d", v) + } + return m[v], nil +} diff --git a/vendor/github.com/elastic/beats/filebeat/input/syslog/input_test.go b/vendor/github.com/elastic/beats/filebeat/input/syslog/input_test.go new file mode 100644 index 00000000..be58cdab --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/input/syslog/input_test.go @@ -0,0 +1,157 @@ +package syslog + +import ( + "net" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/filebeat/inputsource" + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" +) + +func TestWhenPriorityIsSet(t *testing.T) { + e := newEvent() + e.SetPriority([]byte("13")) + e.SetMessage([]byte("hello world")) + e.SetHostname([]byte("wopr")) + e.SetPid([]byte("123")) + + m := dummyMetadata() + event := createEvent(e, m, time.Local, logp.NewLogger("syslog")) + + expected := common.MapStr{ + "source": "127.0.0.1", + "message": "hello world", + "hostname": "wopr", + "process": common.MapStr{ + "pid": 123, + }, + "event": common.MapStr{ + "severity": 5, + }, + "syslog": common.MapStr{ + "facility": 1, + "severity_label": "Notice", + "facility_label": "user-level", + "priority": 13, + }, + } + + assert.Equal(t, expected, event.Fields) +} + +func TestWhenPriorityIsNotSet(t *testing.T) { + e := newEvent() + e.SetMessage([]byte("hello world")) + e.SetHostname([]byte("wopr")) + e.SetPid([]byte("123")) + + m := dummyMetadata() + event := createEvent(e, m, time.Local, logp.NewLogger("syslog")) + expected := common.MapStr{ + "source": "127.0.0.1", + "message": "hello world", + "hostname": "wopr", + "process": common.MapStr{ + "pid": 123, + }, + "event": common.MapStr{}, + "syslog": common.MapStr{}, + } + + assert.Equal(t, expected, event.Fields) +} + +func TestPid(t *testing.T) { + t.Run("is set", func(t *testing.T) { + e := newEvent() + e.SetMessage([]byte("hello world")) + e.SetPid([]byte("123")) + m := dummyMetadata() + event := createEvent(e, m, time.Local, logp.NewLogger("syslog")) + v, err := event.GetValue("process") + if !assert.NoError(t, err) { + return + } + assert.Equal(t, common.MapStr{"pid": 123}, v) + }) + + t.Run("is not set", func(t *testing.T) { + e := newEvent() + e.SetMessage([]byte("hello world")) + m := dummyMetadata() + event := createEvent(e, m, time.Local, logp.NewLogger("syslog")) + + v, err := event.GetValue("process") + if !assert.NoError(t, err) { + return + } + assert.Equal(t, common.MapStr{}, v) + }) +} + +func TestHostname(t *testing.T) { + t.Run("is set", func(t *testing.T) { + e := newEvent() + e.SetMessage([]byte("hello world")) + e.SetHostname([]byte("wopr")) + m := dummyMetadata() + event := createEvent(e, m, time.Local, logp.NewLogger("syslog")) + v, err := event.GetValue("hostname") + if !assert.NoError(t, err) { + return + } + assert.Equal(t, "wopr", v) + }) + + t.Run("is not set", func(t *testing.T) { + e := newEvent() + e.SetMessage([]byte("hello world")) + m := dummyMetadata() + event := createEvent(e, m, time.Local, logp.NewLogger("syslog")) + + _, err := event.GetValue("hostname") + if !assert.Error(t, err) { + return + } + }) +} + +func TestProgram(t *testing.T) { + t.Run("is set", func(t *testing.T) { + e := newEvent() + e.SetMessage([]byte("hello world")) + e.SetProgram([]byte("sudo")) + m := dummyMetadata() + event := createEvent(e, m, time.Local, logp.NewLogger("syslog")) + v, err := event.GetValue("process") + if !assert.NoError(t, err) { + return + } + assert.Equal(t, common.MapStr{"program": "sudo"}, v) + }) + + t.Run("is not set", func(t *testing.T) { + e := newEvent() + e.SetMessage([]byte("hello world")) + m := dummyMetadata() + event := createEvent(e, m, time.Local, logp.NewLogger("syslog")) + + v, err := event.GetValue("process") + if !assert.NoError(t, err) { + return + } + + assert.Equal(t, common.MapStr{}, v) + }) +} + +func dummyMetadata() inputsource.NetworkMetadata { + ip := "127.0.0.1" + parsedIP := net.ParseIP(ip) + addr := &net.IPAddr{IP: parsedIP, Zone: ""} + return inputsource.NetworkMetadata{RemoteAddr: addr} +} diff --git a/vendor/github.com/elastic/beats/filebeat/input/syslog/parser.go b/vendor/github.com/elastic/beats/filebeat/input/syslog/parser.go new file mode 100644 index 00000000..179fcc02 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/input/syslog/parser.go @@ -0,0 +1,2207 @@ +//line parser.rl:1 + +// Code generated by ragel DO NOT EDIT. +package syslog + +//line parser.go:8 +const syslog_start int = 0 +const syslog_first_final int = 1 +const syslog_error int = -1 + +const syslog_en_main int = 0 + +//line parser.rl:9 + +// syslog +//<34>Oct 11 22:14:15 wopr su: 'su root' failed for foobar +//<13>Feb 5 17:32:18 10.0.0.99 Use the quad dmg. +func Parse(data []byte, event *event) { + var p, cs int + pe := len(data) + tok := 0 + eof := len(data) + + //line parser.go:28 + { + cs = syslog_start + } + + //line parser.go:33 + { + if (p) == (pe) { + goto _test_eof + } + switch cs { + case 0: + goto st_case_0 + case 1: + goto st_case_1 + case 2: + goto st_case_2 + case 3: + goto st_case_3 + case 4: + goto st_case_4 + case 5: + goto st_case_5 + case 6: + goto st_case_6 + case 7: + goto st_case_7 + case 8: + goto st_case_8 + case 9: + goto st_case_9 + case 10: + goto st_case_10 + case 11: + goto st_case_11 + case 12: + goto st_case_12 + case 13: + goto st_case_13 + case 14: + goto st_case_14 + case 15: + goto st_case_15 + case 16: + goto st_case_16 + case 17: + goto st_case_17 + case 18: + goto st_case_18 + case 19: + goto st_case_19 + case 20: + goto st_case_20 + case 21: + goto st_case_21 + case 22: + goto st_case_22 + case 23: + goto st_case_23 + case 24: + goto st_case_24 + case 25: + goto st_case_25 + case 26: + goto st_case_26 + case 27: + goto st_case_27 + case 28: + goto st_case_28 + case 29: + goto st_case_29 + case 30: + goto st_case_30 + case 31: + goto st_case_31 + case 32: + goto st_case_32 + case 33: + goto st_case_33 + case 34: + goto st_case_34 + case 35: + goto st_case_35 + case 36: + goto st_case_36 + case 37: + goto st_case_37 + case 38: + goto st_case_38 + case 39: + goto st_case_39 + case 40: + goto st_case_40 + case 41: + goto st_case_41 + case 42: + goto st_case_42 + case 43: + goto st_case_43 + case 44: + goto st_case_44 + case 45: + goto st_case_45 + case 46: + goto st_case_46 + case 47: + goto st_case_47 + case 48: + goto st_case_48 + case 49: + goto st_case_49 + case 50: + goto st_case_50 + case 51: + goto st_case_51 + case 52: + goto st_case_52 + case 53: + goto st_case_53 + case 54: + goto st_case_54 + case 55: + goto st_case_55 + case 56: + goto st_case_56 + case 57: + goto st_case_57 + case 58: + goto st_case_58 + case 59: + goto st_case_59 + case 60: + goto st_case_60 + case 61: + goto st_case_61 + case 62: + goto st_case_62 + case 63: + goto st_case_63 + case 64: + goto st_case_64 + case 65: + goto st_case_65 + case 66: + goto st_case_66 + case 67: + goto st_case_67 + case 68: + goto st_case_68 + case 69: + goto st_case_69 + case 70: + goto st_case_70 + case 71: + goto st_case_71 + case 72: + goto st_case_72 + case 73: + goto st_case_73 + case 74: + goto st_case_74 + case 75: + goto st_case_75 + case 76: + goto st_case_76 + case 77: + goto st_case_77 + case 78: + goto st_case_78 + case 79: + goto st_case_79 + case 80: + goto st_case_80 + case 81: + goto st_case_81 + case 82: + goto st_case_82 + case 83: + goto st_case_83 + case 84: + goto st_case_84 + case 85: + goto st_case_85 + case 86: + goto st_case_86 + case 87: + goto st_case_87 + case 88: + goto st_case_88 + case 89: + goto st_case_89 + case 90: + goto st_case_90 + case 91: + goto st_case_91 + case 92: + goto st_case_92 + case 93: + goto st_case_93 + case 94: + goto st_case_94 + case 95: + goto st_case_95 + case 96: + goto st_case_96 + case 97: + goto st_case_97 + case 98: + goto st_case_98 + case 99: + goto st_case_99 + case 100: + goto st_case_100 + case 101: + goto st_case_101 + case 102: + goto st_case_102 + case 103: + goto st_case_103 + case 104: + goto st_case_104 + case 105: + goto st_case_105 + } + goto st_out + st_case_0: + switch data[(p)] { + case 60: + goto tr2 + case 65: + goto tr3 + case 70: + goto tr4 + case 74: + goto tr5 + case 77: + goto tr6 + case 78: + goto tr7 + case 79: + goto tr8 + case 83: + goto tr9 + case 101: + goto tr10 + } + if 48 <= data[(p)] && data[(p)] <= 57 { + goto tr1 + } + goto tr0 + tr0: + //line parser.rl:20 + + tok = p + + goto st1 + st1: + if (p)++; (p) == (pe) { + goto _test_eof1 + } + st_case_1: + //line parser.go:289 + goto st1 + tr1: + //line parser.rl:20 + + tok = p + + goto st2 + st2: + if (p)++; (p) == (pe) { + goto _test_eof2 + } + st_case_2: + //line parser.go:302 + if 48 <= data[(p)] && data[(p)] <= 57 { + goto st3 + } + goto st1 + st3: + if (p)++; (p) == (pe) { + goto _test_eof3 + } + st_case_3: + if 48 <= data[(p)] && data[(p)] <= 57 { + goto st4 + } + goto st1 + st4: + if (p)++; (p) == (pe) { + goto _test_eof4 + } + st_case_4: + if 48 <= data[(p)] && data[(p)] <= 57 { + goto st5 + } + goto st1 + st5: + if (p)++; (p) == (pe) { + goto _test_eof5 + } + st_case_5: + if data[(p)] == 45 { + goto tr15 + } + goto st1 + tr15: + //line parser.rl:36 + + event.SetYear(data[tok:p]) + + goto st6 + st6: + if (p)++; (p) == (pe) { + goto _test_eof6 + } + st_case_6: + //line parser.go:345 + if 48 <= data[(p)] && data[(p)] <= 57 { + goto tr16 + } + goto st1 + tr16: + //line parser.rl:20 + + tok = p + + goto st7 + st7: + if (p)++; (p) == (pe) { + goto _test_eof7 + } + st_case_7: + //line parser.go:361 + if 48 <= data[(p)] && data[(p)] <= 57 { + goto st8 + } + goto st1 + st8: + if (p)++; (p) == (pe) { + goto _test_eof8 + } + st_case_8: + if data[(p)] == 45 { + goto tr18 + } + goto st1 + tr18: + //line parser.rl:40 + + event.SetMonthNumeric(data[tok:p]) + + goto st9 + st9: + if (p)++; (p) == (pe) { + goto _test_eof9 + } + st_case_9: + //line parser.go:386 + if 48 <= data[(p)] && data[(p)] <= 51 { + goto tr19 + } + goto st1 + tr19: + //line parser.rl:20 + + tok = p + + goto st10 + st10: + if (p)++; (p) == (pe) { + goto _test_eof10 + } + st_case_10: + //line parser.go:402 + if 48 <= data[(p)] && data[(p)] <= 57 { + goto st11 + } + goto st1 + st11: + if (p)++; (p) == (pe) { + goto _test_eof11 + } + st_case_11: + switch data[(p)] { + case 32: + goto tr21 + case 84: + goto tr21 + case 116: + goto tr21 + } + if 9 <= data[(p)] && data[(p)] <= 13 { + goto tr21 + } + goto st1 + tr21: + //line parser.rl:44 + + event.SetDay(data[tok:p]) + + goto st12 + st12: + if (p)++; (p) == (pe) { + goto _test_eof12 + } + st_case_12: + //line parser.go:435 + if data[(p)] == 50 { + goto tr23 + } + if 48 <= data[(p)] && data[(p)] <= 49 { + goto tr22 + } + goto st1 + tr22: + //line parser.rl:20 + + tok = p + + goto st13 + st13: + if (p)++; (p) == (pe) { + goto _test_eof13 + } + st_case_13: + //line parser.go:454 + if 48 <= data[(p)] && data[(p)] <= 57 { + goto st14 + } + goto st1 + st14: + if (p)++; (p) == (pe) { + goto _test_eof14 + } + st_case_14: + if data[(p)] == 58 { + goto tr25 + } + goto st1 + tr25: + //line parser.rl:48 + + event.SetHour(data[tok:p]) + + goto st15 + st15: + if (p)++; (p) == (pe) { + goto _test_eof15 + } + st_case_15: + //line parser.go:479 + if 48 <= data[(p)] && data[(p)] <= 53 { + goto tr26 + } + goto st1 + tr26: + //line parser.rl:20 + + tok = p + + goto st16 + st16: + if (p)++; (p) == (pe) { + goto _test_eof16 + } + st_case_16: + //line parser.go:495 + if 48 <= data[(p)] && data[(p)] <= 57 { + goto st17 + } + goto st1 + st17: + if (p)++; (p) == (pe) { + goto _test_eof17 + } + st_case_17: + if data[(p)] == 58 { + goto tr28 + } + goto st1 + tr28: + //line parser.rl:52 + + event.SetMinute(data[tok:p]) + + goto st18 + st18: + if (p)++; (p) == (pe) { + goto _test_eof18 + } + st_case_18: + //line parser.go:520 + if 48 <= data[(p)] && data[(p)] <= 53 { + goto tr29 + } + goto st1 + tr29: + //line parser.rl:20 + + tok = p + + goto st19 + st19: + if (p)++; (p) == (pe) { + goto _test_eof19 + } + st_case_19: + //line parser.go:536 + if 48 <= data[(p)] && data[(p)] <= 57 { + goto st20 + } + goto st1 + st20: + if (p)++; (p) == (pe) { + goto _test_eof20 + } + st_case_20: + switch data[(p)] { + case 32: + goto tr31 + case 43: + goto tr32 + case 45: + goto tr32 + case 46: + goto tr33 + case 90: + goto tr34 + case 122: + goto tr34 + } + if 9 <= data[(p)] && data[(p)] <= 13 { + goto tr31 + } + goto st1 + tr31: + //line parser.rl:56 + + event.SetSecond(data[tok:p]) + + goto st21 + tr49: + //line parser.rl:76 + + event.SetTimeZone(data[tok:p]) + + goto st21 + tr54: + //line parser.rl:60 + + event.SetNanosecond(data[tok:p]) + + goto st21 + st21: + if (p)++; (p) == (pe) { + goto _test_eof21 + } + st_case_21: + //line parser.go:587 + switch { + case data[(p)] > 95: + if 97 <= data[(p)] && data[(p)] <= 122 { + goto tr35 + } + case data[(p)] >= 46: + goto tr35 + } + goto tr0 + tr35: + //line parser.rl:20 + + tok = p + + goto st22 + st22: + if (p)++; (p) == (pe) { + goto _test_eof22 + } + st_case_22: + //line parser.go:608 + if data[(p)] == 32 { + goto tr36 + } + switch { + case data[(p)] < 46: + if 9 <= data[(p)] && data[(p)] <= 13 { + goto tr36 + } + case data[(p)] > 95: + if 97 <= data[(p)] && data[(p)] <= 122 { + goto st22 + } + default: + goto st22 + } + goto st1 + tr36: + //line parser.rl:64 + + event.SetHostname(data[tok:p]) + + goto st23 + st23: + if (p)++; (p) == (pe) { + goto _test_eof23 + } + st_case_23: + //line parser.go:636 + switch data[(p)] { + case 32: + goto tr0 + case 91: + goto tr0 + case 93: + goto tr0 + } + if 9 <= data[(p)] && data[(p)] <= 13 { + goto tr0 + } + goto tr38 + tr38: + //line parser.rl:20 + + tok = p + + goto st24 + st24: + if (p)++; (p) == (pe) { + goto _test_eof24 + } + st_case_24: + //line parser.go:660 + switch data[(p)] { + case 32: + goto st1 + case 58: + goto tr40 + case 91: + goto tr41 + case 93: + goto st1 + } + if 9 <= data[(p)] && data[(p)] <= 13 { + goto st1 + } + goto st24 + tr40: + //line parser.rl:68 + + event.SetProgram(data[tok:p]) + + goto st25 + st25: + if (p)++; (p) == (pe) { + goto _test_eof25 + } + st_case_25: + //line parser.go:686 + switch data[(p)] { + case 32: + goto st26 + case 58: + goto tr40 + case 91: + goto tr41 + case 93: + goto st1 + } + if 9 <= data[(p)] && data[(p)] <= 13 { + goto st26 + } + goto st24 + st26: + if (p)++; (p) == (pe) { + goto _test_eof26 + } + st_case_26: + goto tr0 + tr41: + //line parser.rl:68 + + event.SetProgram(data[tok:p]) + + goto st27 + st27: + if (p)++; (p) == (pe) { + goto _test_eof27 + } + st_case_27: + //line parser.go:718 + if 48 <= data[(p)] && data[(p)] <= 57 { + goto tr43 + } + goto st1 + tr43: + //line parser.rl:20 + + tok = p + + goto st28 + st28: + if (p)++; (p) == (pe) { + goto _test_eof28 + } + st_case_28: + //line parser.go:734 + if data[(p)] == 93 { + goto tr45 + } + if 48 <= data[(p)] && data[(p)] <= 57 { + goto st28 + } + goto st1 + tr45: + //line parser.rl:72 + + event.SetPid(data[tok:p]) + + goto st29 + st29: + if (p)++; (p) == (pe) { + goto _test_eof29 + } + st_case_29: + //line parser.go:753 + if data[(p)] == 58 { + goto st30 + } + goto st1 + st30: + if (p)++; (p) == (pe) { + goto _test_eof30 + } + st_case_30: + if data[(p)] == 32 { + goto st26 + } + if 9 <= data[(p)] && data[(p)] <= 13 { + goto st26 + } + goto st1 + tr32: + //line parser.rl:56 + + event.SetSecond(data[tok:p]) + + //line parser.rl:20 + + tok = p + + goto st31 + tr55: + //line parser.rl:60 + + event.SetNanosecond(data[tok:p]) + + //line parser.rl:20 + + tok = p + + goto st31 + st31: + if (p)++; (p) == (pe) { + goto _test_eof31 + } + st_case_31: + //line parser.go:795 + if 48 <= data[(p)] && data[(p)] <= 57 { + goto st32 + } + goto st1 + st32: + if (p)++; (p) == (pe) { + goto _test_eof32 + } + st_case_32: + if 48 <= data[(p)] && data[(p)] <= 57 { + goto st33 + } + goto st1 + st33: + if (p)++; (p) == (pe) { + goto _test_eof33 + } + st_case_33: + switch data[(p)] { + case 32: + goto tr49 + case 58: + goto st36 + } + switch { + case data[(p)] > 13: + if 48 <= data[(p)] && data[(p)] <= 57 { + goto st34 + } + case data[(p)] >= 9: + goto tr49 + } + goto st1 + st34: + if (p)++; (p) == (pe) { + goto _test_eof34 + } + st_case_34: + if 48 <= data[(p)] && data[(p)] <= 57 { + goto st35 + } + goto st1 + st35: + if (p)++; (p) == (pe) { + goto _test_eof35 + } + st_case_35: + if data[(p)] == 32 { + goto tr49 + } + if 9 <= data[(p)] && data[(p)] <= 13 { + goto tr49 + } + goto st1 + st36: + if (p)++; (p) == (pe) { + goto _test_eof36 + } + st_case_36: + if 48 <= data[(p)] && data[(p)] <= 57 { + goto st34 + } + goto st1 + tr33: + //line parser.rl:56 + + event.SetSecond(data[tok:p]) + + goto st37 + st37: + if (p)++; (p) == (pe) { + goto _test_eof37 + } + st_case_37: + //line parser.go:870 + if 48 <= data[(p)] && data[(p)] <= 57 { + goto tr53 + } + goto st1 + tr53: + //line parser.rl:20 + + tok = p + + goto st38 + st38: + if (p)++; (p) == (pe) { + goto _test_eof38 + } + st_case_38: + //line parser.go:886 + switch data[(p)] { + case 32: + goto tr54 + case 43: + goto tr55 + case 45: + goto tr55 + case 90: + goto tr57 + case 122: + goto tr57 + } + switch { + case data[(p)] > 13: + if 48 <= data[(p)] && data[(p)] <= 57 { + goto st38 + } + case data[(p)] >= 9: + goto tr54 + } + goto st1 + tr34: + //line parser.rl:56 + + event.SetSecond(data[tok:p]) + + //line parser.rl:20 + + tok = p + + goto st39 + tr57: + //line parser.rl:60 + + event.SetNanosecond(data[tok:p]) + + //line parser.rl:20 + + tok = p + + goto st39 + st39: + if (p)++; (p) == (pe) { + goto _test_eof39 + } + st_case_39: + //line parser.go:933 + switch data[(p)] { + case 32: + goto tr49 + case 43: + goto st31 + case 45: + goto st31 + } + if 9 <= data[(p)] && data[(p)] <= 13 { + goto tr49 + } + goto st1 + tr23: + //line parser.rl:20 + + tok = p + + goto st40 + st40: + if (p)++; (p) == (pe) { + goto _test_eof40 + } + st_case_40: + //line parser.go:957 + if 48 <= data[(p)] && data[(p)] <= 51 { + goto st14 + } + goto st1 + tr2: + //line parser.rl:20 + + tok = p + + goto st41 + st41: + if (p)++; (p) == (pe) { + goto _test_eof41 + } + st_case_41: + //line parser.go:973 + if 48 <= data[(p)] && data[(p)] <= 57 { + goto tr59 + } + goto st1 + tr59: + //line parser.rl:20 + + tok = p + + goto st42 + st42: + if (p)++; (p) == (pe) { + goto _test_eof42 + } + st_case_42: + //line parser.go:989 + if data[(p)] == 62 { + goto tr61 + } + if 48 <= data[(p)] && data[(p)] <= 57 { + goto st43 + } + goto st1 + st43: + if (p)++; (p) == (pe) { + goto _test_eof43 + } + st_case_43: + if data[(p)] == 62 { + goto tr61 + } + if 48 <= data[(p)] && data[(p)] <= 57 { + goto st44 + } + goto st1 + st44: + if (p)++; (p) == (pe) { + goto _test_eof44 + } + st_case_44: + if data[(p)] == 62 { + goto tr61 + } + if 48 <= data[(p)] && data[(p)] <= 57 { + goto st45 + } + goto st1 + st45: + if (p)++; (p) == (pe) { + goto _test_eof45 + } + st_case_45: + if data[(p)] == 62 { + goto tr61 + } + if 48 <= data[(p)] && data[(p)] <= 57 { + goto st46 + } + goto st1 + st46: + if (p)++; (p) == (pe) { + goto _test_eof46 + } + st_case_46: + if data[(p)] == 62 { + goto tr61 + } + goto st1 + tr61: + //line parser.rl:24 + + event.SetPriority(data[tok:p]) + + goto st47 + st47: + if (p)++; (p) == (pe) { + goto _test_eof47 + } + st_case_47: + //line parser.go:1053 + switch data[(p)] { + case 65: + goto tr3 + case 70: + goto tr4 + case 74: + goto tr5 + case 77: + goto tr6 + case 78: + goto tr7 + case 79: + goto tr8 + case 83: + goto tr9 + case 101: + goto tr10 + } + if 48 <= data[(p)] && data[(p)] <= 57 { + goto tr1 + } + goto tr0 + tr3: + //line parser.rl:20 + + tok = p + + goto st48 + st48: + if (p)++; (p) == (pe) { + goto _test_eof48 + } + st_case_48: + //line parser.go:1087 + switch data[(p)] { + case 112: + goto st49 + case 117: + goto st70 + } + goto st1 + st49: + if (p)++; (p) == (pe) { + goto _test_eof49 + } + st_case_49: + if data[(p)] == 114 { + goto st50 + } + goto st1 + st50: + if (p)++; (p) == (pe) { + goto _test_eof50 + } + st_case_50: + switch data[(p)] { + case 32: + goto tr68 + case 105: + goto st68 + } + if 9 <= data[(p)] && data[(p)] <= 13 { + goto tr68 + } + goto st1 + tr68: + //line parser.rl:32 + + event.SetMonth(data[tok:p]) + + goto st51 + st51: + if (p)++; (p) == (pe) { + goto _test_eof51 + } + st_case_51: + //line parser.go:1130 + switch data[(p)] { + case 32: + goto st52 + case 51: + goto tr72 + } + switch { + case data[(p)] < 49: + if 9 <= data[(p)] && data[(p)] <= 13 { + goto st52 + } + case data[(p)] > 50: + if 52 <= data[(p)] && data[(p)] <= 57 { + goto tr73 + } + default: + goto tr71 + } + goto st1 + st52: + if (p)++; (p) == (pe) { + goto _test_eof52 + } + st_case_52: + if 49 <= data[(p)] && data[(p)] <= 57 { + goto tr73 + } + goto st1 + tr73: + //line parser.rl:20 + + tok = p + + goto st53 + st53: + if (p)++; (p) == (pe) { + goto _test_eof53 + } + st_case_53: + //line parser.go:1170 + if data[(p)] == 32 { + goto tr74 + } + if 9 <= data[(p)] && data[(p)] <= 13 { + goto tr74 + } + goto st1 + tr74: + //line parser.rl:44 + + event.SetDay(data[tok:p]) + + goto st54 + st54: + if (p)++; (p) == (pe) { + goto _test_eof54 + } + st_case_54: + //line parser.go:1189 + if data[(p)] == 50 { + goto tr76 + } + if 48 <= data[(p)] && data[(p)] <= 49 { + goto tr75 + } + goto st1 + tr75: + //line parser.rl:20 + + tok = p + + goto st55 + st55: + if (p)++; (p) == (pe) { + goto _test_eof55 + } + st_case_55: + //line parser.go:1208 + if 48 <= data[(p)] && data[(p)] <= 57 { + goto st56 + } + goto st1 + st56: + if (p)++; (p) == (pe) { + goto _test_eof56 + } + st_case_56: + if data[(p)] == 58 { + goto tr78 + } + goto st1 + tr78: + //line parser.rl:48 + + event.SetHour(data[tok:p]) + + goto st57 + st57: + if (p)++; (p) == (pe) { + goto _test_eof57 + } + st_case_57: + //line parser.go:1233 + if 48 <= data[(p)] && data[(p)] <= 53 { + goto tr79 + } + goto st1 + tr79: + //line parser.rl:20 + + tok = p + + goto st58 + st58: + if (p)++; (p) == (pe) { + goto _test_eof58 + } + st_case_58: + //line parser.go:1249 + if 48 <= data[(p)] && data[(p)] <= 57 { + goto st59 + } + goto st1 + st59: + if (p)++; (p) == (pe) { + goto _test_eof59 + } + st_case_59: + if data[(p)] == 58 { + goto tr81 + } + goto st1 + tr81: + //line parser.rl:52 + + event.SetMinute(data[tok:p]) + + goto st60 + st60: + if (p)++; (p) == (pe) { + goto _test_eof60 + } + st_case_60: + //line parser.go:1274 + if 48 <= data[(p)] && data[(p)] <= 53 { + goto tr82 + } + goto st1 + tr82: + //line parser.rl:20 + + tok = p + + goto st61 + st61: + if (p)++; (p) == (pe) { + goto _test_eof61 + } + st_case_61: + //line parser.go:1290 + if 48 <= data[(p)] && data[(p)] <= 57 { + goto st62 + } + goto st1 + st62: + if (p)++; (p) == (pe) { + goto _test_eof62 + } + st_case_62: + switch data[(p)] { + case 32: + goto tr31 + case 46: + goto tr84 + } + if 9 <= data[(p)] && data[(p)] <= 13 { + goto tr31 + } + goto st1 + tr84: + //line parser.rl:56 + + event.SetSecond(data[tok:p]) + + goto st63 + st63: + if (p)++; (p) == (pe) { + goto _test_eof63 + } + st_case_63: + //line parser.go:1321 + if 48 <= data[(p)] && data[(p)] <= 57 { + goto tr85 + } + goto st1 + tr85: + //line parser.rl:20 + + tok = p + + goto st64 + st64: + if (p)++; (p) == (pe) { + goto _test_eof64 + } + st_case_64: + //line parser.go:1337 + if data[(p)] == 32 { + goto tr54 + } + switch { + case data[(p)] > 13: + if 48 <= data[(p)] && data[(p)] <= 57 { + goto st64 + } + case data[(p)] >= 9: + goto tr54 + } + goto st1 + tr76: + //line parser.rl:20 + + tok = p + + goto st65 + st65: + if (p)++; (p) == (pe) { + goto _test_eof65 + } + st_case_65: + //line parser.go:1361 + if 48 <= data[(p)] && data[(p)] <= 51 { + goto st56 + } + goto st1 + tr71: + //line parser.rl:20 + + tok = p + + goto st66 + st66: + if (p)++; (p) == (pe) { + goto _test_eof66 + } + st_case_66: + //line parser.go:1377 + if data[(p)] == 32 { + goto tr74 + } + switch { + case data[(p)] > 13: + if 48 <= data[(p)] && data[(p)] <= 57 { + goto st53 + } + case data[(p)] >= 9: + goto tr74 + } + goto st1 + tr72: + //line parser.rl:20 + + tok = p + + goto st67 + st67: + if (p)++; (p) == (pe) { + goto _test_eof67 + } + st_case_67: + //line parser.go:1401 + if data[(p)] == 32 { + goto tr74 + } + switch { + case data[(p)] > 13: + if 48 <= data[(p)] && data[(p)] <= 49 { + goto st53 + } + case data[(p)] >= 9: + goto tr74 + } + goto st1 + st68: + if (p)++; (p) == (pe) { + goto _test_eof68 + } + st_case_68: + if data[(p)] == 108 { + goto st69 + } + goto st1 + st69: + if (p)++; (p) == (pe) { + goto _test_eof69 + } + st_case_69: + if data[(p)] == 32 { + goto tr68 + } + if 9 <= data[(p)] && data[(p)] <= 13 { + goto tr68 + } + goto st1 + st70: + if (p)++; (p) == (pe) { + goto _test_eof70 + } + st_case_70: + if data[(p)] == 103 { + goto st71 + } + goto st1 + st71: + if (p)++; (p) == (pe) { + goto _test_eof71 + } + st_case_71: + switch data[(p)] { + case 32: + goto tr68 + case 117: + goto st72 + } + if 9 <= data[(p)] && data[(p)] <= 13 { + goto tr68 + } + goto st1 + st72: + if (p)++; (p) == (pe) { + goto _test_eof72 + } + st_case_72: + if data[(p)] == 115 { + goto st73 + } + goto st1 + st73: + if (p)++; (p) == (pe) { + goto _test_eof73 + } + st_case_73: + if data[(p)] == 116 { + goto st69 + } + goto st1 + tr4: + //line parser.rl:20 + + tok = p + + goto st74 + st74: + if (p)++; (p) == (pe) { + goto _test_eof74 + } + st_case_74: + //line parser.go:1488 + if data[(p)] == 101 { + goto st75 + } + goto st1 + st75: + if (p)++; (p) == (pe) { + goto _test_eof75 + } + st_case_75: + if data[(p)] == 98 { + goto st76 + } + goto st1 + st76: + if (p)++; (p) == (pe) { + goto _test_eof76 + } + st_case_76: + switch data[(p)] { + case 32: + goto tr68 + case 114: + goto st77 + } + if 9 <= data[(p)] && data[(p)] <= 13 { + goto tr68 + } + goto st1 + st77: + if (p)++; (p) == (pe) { + goto _test_eof77 + } + st_case_77: + if data[(p)] == 117 { + goto st78 + } + goto st1 + st78: + if (p)++; (p) == (pe) { + goto _test_eof78 + } + st_case_78: + if data[(p)] == 97 { + goto st79 + } + goto st1 + st79: + if (p)++; (p) == (pe) { + goto _test_eof79 + } + st_case_79: + if data[(p)] == 114 { + goto st80 + } + goto st1 + st80: + if (p)++; (p) == (pe) { + goto _test_eof80 + } + st_case_80: + if data[(p)] == 121 { + goto st69 + } + goto st1 + tr5: + //line parser.rl:20 + + tok = p + + goto st81 + st81: + if (p)++; (p) == (pe) { + goto _test_eof81 + } + st_case_81: + //line parser.go:1564 + switch data[(p)] { + case 97: + goto st82 + case 117: + goto st84 + } + goto st1 + st82: + if (p)++; (p) == (pe) { + goto _test_eof82 + } + st_case_82: + if data[(p)] == 110 { + goto st83 + } + goto st1 + st83: + if (p)++; (p) == (pe) { + goto _test_eof83 + } + st_case_83: + switch data[(p)] { + case 32: + goto tr68 + case 117: + goto st78 + } + if 9 <= data[(p)] && data[(p)] <= 13 { + goto tr68 + } + goto st1 + st84: + if (p)++; (p) == (pe) { + goto _test_eof84 + } + st_case_84: + switch data[(p)] { + case 108: + goto st85 + case 110: + goto st86 + } + goto st1 + st85: + if (p)++; (p) == (pe) { + goto _test_eof85 + } + st_case_85: + switch data[(p)] { + case 32: + goto tr68 + case 121: + goto st69 + } + if 9 <= data[(p)] && data[(p)] <= 13 { + goto tr68 + } + goto st1 + st86: + if (p)++; (p) == (pe) { + goto _test_eof86 + } + st_case_86: + switch data[(p)] { + case 32: + goto tr68 + case 101: + goto st69 + } + if 9 <= data[(p)] && data[(p)] <= 13 { + goto tr68 + } + goto st1 + tr6: + //line parser.rl:20 + + tok = p + + goto st87 + st87: + if (p)++; (p) == (pe) { + goto _test_eof87 + } + st_case_87: + //line parser.go:1649 + if data[(p)] == 97 { + goto st88 + } + goto st1 + st88: + if (p)++; (p) == (pe) { + goto _test_eof88 + } + st_case_88: + switch data[(p)] { + case 32: + goto tr68 + case 114: + goto st89 + case 121: + goto st69 + } + if 9 <= data[(p)] && data[(p)] <= 13 { + goto tr68 + } + goto st1 + st89: + if (p)++; (p) == (pe) { + goto _test_eof89 + } + st_case_89: + switch data[(p)] { + case 32: + goto tr68 + case 99: + goto st90 + } + if 9 <= data[(p)] && data[(p)] <= 13 { + goto tr68 + } + goto st1 + st90: + if (p)++; (p) == (pe) { + goto _test_eof90 + } + st_case_90: + if data[(p)] == 104 { + goto st69 + } + goto st1 + tr7: + //line parser.rl:20 + + tok = p + + goto st91 + st91: + if (p)++; (p) == (pe) { + goto _test_eof91 + } + st_case_91: + //line parser.go:1706 + if data[(p)] == 111 { + goto st92 + } + goto st1 + st92: + if (p)++; (p) == (pe) { + goto _test_eof92 + } + st_case_92: + if data[(p)] == 118 { + goto st93 + } + goto st1 + st93: + if (p)++; (p) == (pe) { + goto _test_eof93 + } + st_case_93: + switch data[(p)] { + case 32: + goto tr68 + case 101: + goto st94 + } + if 9 <= data[(p)] && data[(p)] <= 13 { + goto tr68 + } + goto st1 + st94: + if (p)++; (p) == (pe) { + goto _test_eof94 + } + st_case_94: + if data[(p)] == 109 { + goto st95 + } + goto st1 + st95: + if (p)++; (p) == (pe) { + goto _test_eof95 + } + st_case_95: + if data[(p)] == 98 { + goto st96 + } + goto st1 + st96: + if (p)++; (p) == (pe) { + goto _test_eof96 + } + st_case_96: + if data[(p)] == 101 { + goto st97 + } + goto st1 + st97: + if (p)++; (p) == (pe) { + goto _test_eof97 + } + st_case_97: + if data[(p)] == 114 { + goto st69 + } + goto st1 + tr8: + //line parser.rl:20 + + tok = p + + goto st98 + st98: + if (p)++; (p) == (pe) { + goto _test_eof98 + } + st_case_98: + //line parser.go:1782 + if data[(p)] == 99 { + goto st99 + } + goto st1 + st99: + if (p)++; (p) == (pe) { + goto _test_eof99 + } + st_case_99: + if data[(p)] == 116 { + goto st100 + } + goto st1 + st100: + if (p)++; (p) == (pe) { + goto _test_eof100 + } + st_case_100: + switch data[(p)] { + case 32: + goto tr68 + case 111: + goto st95 + } + if 9 <= data[(p)] && data[(p)] <= 13 { + goto tr68 + } + goto st1 + tr9: + //line parser.rl:20 + + tok = p + + goto st101 + st101: + if (p)++; (p) == (pe) { + goto _test_eof101 + } + st_case_101: + //line parser.go:1822 + if data[(p)] == 101 { + goto st102 + } + goto st1 + st102: + if (p)++; (p) == (pe) { + goto _test_eof102 + } + st_case_102: + if data[(p)] == 112 { + goto st103 + } + goto st1 + st103: + if (p)++; (p) == (pe) { + goto _test_eof103 + } + st_case_103: + switch data[(p)] { + case 32: + goto tr68 + case 116: + goto st104 + } + if 9 <= data[(p)] && data[(p)] <= 13 { + goto tr68 + } + goto st1 + st104: + if (p)++; (p) == (pe) { + goto _test_eof104 + } + st_case_104: + if data[(p)] == 101 { + goto st94 + } + goto st1 + tr10: + //line parser.rl:20 + + tok = p + + goto st105 + st105: + if (p)++; (p) == (pe) { + goto _test_eof105 + } + st_case_105: + //line parser.go:1871 + if data[(p)] == 99 { + goto st93 + } + goto st1 + st_out: + _test_eof1: + cs = 1 + goto _test_eof + _test_eof2: + cs = 2 + goto _test_eof + _test_eof3: + cs = 3 + goto _test_eof + _test_eof4: + cs = 4 + goto _test_eof + _test_eof5: + cs = 5 + goto _test_eof + _test_eof6: + cs = 6 + goto _test_eof + _test_eof7: + cs = 7 + goto _test_eof + _test_eof8: + cs = 8 + goto _test_eof + _test_eof9: + cs = 9 + goto _test_eof + _test_eof10: + cs = 10 + goto _test_eof + _test_eof11: + cs = 11 + goto _test_eof + _test_eof12: + cs = 12 + goto _test_eof + _test_eof13: + cs = 13 + goto _test_eof + _test_eof14: + cs = 14 + goto _test_eof + _test_eof15: + cs = 15 + goto _test_eof + _test_eof16: + cs = 16 + goto _test_eof + _test_eof17: + cs = 17 + goto _test_eof + _test_eof18: + cs = 18 + goto _test_eof + _test_eof19: + cs = 19 + goto _test_eof + _test_eof20: + cs = 20 + goto _test_eof + _test_eof21: + cs = 21 + goto _test_eof + _test_eof22: + cs = 22 + goto _test_eof + _test_eof23: + cs = 23 + goto _test_eof + _test_eof24: + cs = 24 + goto _test_eof + _test_eof25: + cs = 25 + goto _test_eof + _test_eof26: + cs = 26 + goto _test_eof + _test_eof27: + cs = 27 + goto _test_eof + _test_eof28: + cs = 28 + goto _test_eof + _test_eof29: + cs = 29 + goto _test_eof + _test_eof30: + cs = 30 + goto _test_eof + _test_eof31: + cs = 31 + goto _test_eof + _test_eof32: + cs = 32 + goto _test_eof + _test_eof33: + cs = 33 + goto _test_eof + _test_eof34: + cs = 34 + goto _test_eof + _test_eof35: + cs = 35 + goto _test_eof + _test_eof36: + cs = 36 + goto _test_eof + _test_eof37: + cs = 37 + goto _test_eof + _test_eof38: + cs = 38 + goto _test_eof + _test_eof39: + cs = 39 + goto _test_eof + _test_eof40: + cs = 40 + goto _test_eof + _test_eof41: + cs = 41 + goto _test_eof + _test_eof42: + cs = 42 + goto _test_eof + _test_eof43: + cs = 43 + goto _test_eof + _test_eof44: + cs = 44 + goto _test_eof + _test_eof45: + cs = 45 + goto _test_eof + _test_eof46: + cs = 46 + goto _test_eof + _test_eof47: + cs = 47 + goto _test_eof + _test_eof48: + cs = 48 + goto _test_eof + _test_eof49: + cs = 49 + goto _test_eof + _test_eof50: + cs = 50 + goto _test_eof + _test_eof51: + cs = 51 + goto _test_eof + _test_eof52: + cs = 52 + goto _test_eof + _test_eof53: + cs = 53 + goto _test_eof + _test_eof54: + cs = 54 + goto _test_eof + _test_eof55: + cs = 55 + goto _test_eof + _test_eof56: + cs = 56 + goto _test_eof + _test_eof57: + cs = 57 + goto _test_eof + _test_eof58: + cs = 58 + goto _test_eof + _test_eof59: + cs = 59 + goto _test_eof + _test_eof60: + cs = 60 + goto _test_eof + _test_eof61: + cs = 61 + goto _test_eof + _test_eof62: + cs = 62 + goto _test_eof + _test_eof63: + cs = 63 + goto _test_eof + _test_eof64: + cs = 64 + goto _test_eof + _test_eof65: + cs = 65 + goto _test_eof + _test_eof66: + cs = 66 + goto _test_eof + _test_eof67: + cs = 67 + goto _test_eof + _test_eof68: + cs = 68 + goto _test_eof + _test_eof69: + cs = 69 + goto _test_eof + _test_eof70: + cs = 70 + goto _test_eof + _test_eof71: + cs = 71 + goto _test_eof + _test_eof72: + cs = 72 + goto _test_eof + _test_eof73: + cs = 73 + goto _test_eof + _test_eof74: + cs = 74 + goto _test_eof + _test_eof75: + cs = 75 + goto _test_eof + _test_eof76: + cs = 76 + goto _test_eof + _test_eof77: + cs = 77 + goto _test_eof + _test_eof78: + cs = 78 + goto _test_eof + _test_eof79: + cs = 79 + goto _test_eof + _test_eof80: + cs = 80 + goto _test_eof + _test_eof81: + cs = 81 + goto _test_eof + _test_eof82: + cs = 82 + goto _test_eof + _test_eof83: + cs = 83 + goto _test_eof + _test_eof84: + cs = 84 + goto _test_eof + _test_eof85: + cs = 85 + goto _test_eof + _test_eof86: + cs = 86 + goto _test_eof + _test_eof87: + cs = 87 + goto _test_eof + _test_eof88: + cs = 88 + goto _test_eof + _test_eof89: + cs = 89 + goto _test_eof + _test_eof90: + cs = 90 + goto _test_eof + _test_eof91: + cs = 91 + goto _test_eof + _test_eof92: + cs = 92 + goto _test_eof + _test_eof93: + cs = 93 + goto _test_eof + _test_eof94: + cs = 94 + goto _test_eof + _test_eof95: + cs = 95 + goto _test_eof + _test_eof96: + cs = 96 + goto _test_eof + _test_eof97: + cs = 97 + goto _test_eof + _test_eof98: + cs = 98 + goto _test_eof + _test_eof99: + cs = 99 + goto _test_eof + _test_eof100: + cs = 100 + goto _test_eof + _test_eof101: + cs = 101 + goto _test_eof + _test_eof102: + cs = 102 + goto _test_eof + _test_eof103: + cs = 103 + goto _test_eof + _test_eof104: + cs = 104 + goto _test_eof + _test_eof105: + cs = 105 + goto _test_eof + + _test_eof: + { + } + if (p) == eof { + switch cs { + case 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105: + //line parser.rl:28 + + event.SetMessage(data[tok:p]) + + //line parser.go:1991 + } + } + + } + + //line parser.rl:84 + +} diff --git a/vendor/github.com/elastic/beats/filebeat/input/syslog/parser.rl b/vendor/github.com/elastic/beats/filebeat/input/syslog/parser.rl new file mode 100644 index 00000000..cbfd3236 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/input/syslog/parser.rl @@ -0,0 +1,85 @@ +// Code generated by ragel DO NOT EDIT. +package syslog + +%%{ + machine syslog; + write data; + variable p p; + variable pe pe; +}%% + +// syslog +//<34>Oct 11 22:14:15 wopr su: 'su root' failed for foobar +//<13>Feb 5 17:32:18 10.0.0.99 Use the quad dmg. +func Parse(data []byte, event *event) { + var p, cs int + pe := len(data) + tok := 0 + eof := len(data) + %%{ + action tok { + tok = p + } + + action priority { + event.SetPriority(data[tok:p]) + } + + action message { + event.SetMessage(data[tok:p]) + } + + action month { + event.SetMonth(data[tok:p]) + } + + action year{ + event.SetYear(data[tok:p]) + } + + action month_numeric { + event.SetMonthNumeric(data[tok:p]) + } + + action day { + event.SetDay(data[tok:p]) + } + + action hour { + event.SetHour(data[tok:p]) + } + + action minute { + event.SetMinute(data[tok:p]) + } + + action second { + event.SetSecond(data[tok:p]) + } + + action nanosecond{ + event.SetNanosecond(data[tok:p]) + } + + action hostname { + event.SetHostname(data[tok:p]) + } + + action program { + event.SetProgram(data[tok:p]) + } + + action pid { + event.SetPid(data[tok:p]) + } + + action timezone { + event.SetTimeZone(data[tok:p]) + } + + include syslog_rfc3164 "syslog_rfc3164.rl"; + + write init; + write exec; + }%% +} diff --git a/vendor/github.com/elastic/beats/filebeat/input/syslog/parser_test.go b/vendor/github.com/elastic/beats/filebeat/input/syslog/parser_test.go new file mode 100644 index 00000000..3445a39e --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/input/syslog/parser_test.go @@ -0,0 +1,612 @@ +package syslog + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestParseSyslog(t *testing.T) { + tests := []struct { + title string + log []byte + syslog event + }{ + { + title: "no timezone in date", + log: []byte("<190>2018-06-19 02:13:38 super mon message"), + syslog: event{ + priority: 190, + message: "mon message", + hostname: "super", + program: "", + month: 6, + pid: -1, + day: 19, + year: 2018, + hour: 2, + minute: 13, + second: 38, + }, + }, + { + title: "no timezone in date with nanoseconds", + log: []byte("<190>2018-06-19 02:13:38.0004 super mon message"), + syslog: event{ + priority: 190, + message: "mon message", + hostname: "super", + program: "", + month: 6, + pid: -1, + day: 19, + year: 2018, + hour: 2, + minute: 13, + second: 38, + nanosecond: 4000, + }, + }, + { + title: "time in ISO8601 format", + log: []byte("<190>2018-06-19T02:13:38.635322-07:00 super mon message"), + syslog: event{ + priority: 190, + message: "mon message", + hostname: "super", + program: "", + month: 6, + pid: -1, + day: 19, + year: 2018, + hour: 2, + minute: 13, + second: 38, + nanosecond: 6353220, + loc: time.FixedZone("", int(-7*time.Hour)), + }, + }, + { + title: "time in ISO8601 format", + log: []byte("<190>2018-06-19T02:13:38.635322-0700 super mon message"), + syslog: event{ + priority: 190, + message: "mon message", + hostname: "super", + program: "", + month: 6, + pid: -1, + day: 19, + year: 2018, + hour: 2, + minute: 13, + second: 38, + nanosecond: 6353220, + loc: time.FixedZone("", int(-7*time.Hour)), + }, + }, + { + title: "time in ISO8601 format", + log: []byte("<190>2018-06-19T02:13:38.635322-0730 super mon message"), + syslog: event{ + priority: 190, + message: "mon message", + hostname: "super", + program: "", + month: 6, + pid: -1, + day: 19, + year: 2018, + hour: 2, + minute: 13, + second: 38, + nanosecond: 6353220, + loc: time.FixedZone("", int(-7*time.Hour)+int(-30*time.Minute)), + }, + }, + { + title: "time in ISO8601 format", + log: []byte("<190>2018-06-19T02:13:38.635322-07:10 super mon message"), + syslog: event{ + priority: 190, + message: "mon message", + hostname: "super", + program: "", + month: 6, + pid: -1, + day: 19, + year: 2018, + hour: 2, + minute: 13, + second: 38, + nanosecond: 6353220, + loc: time.FixedZone("", int(-7*time.Hour)+int(-10*time.Minute)), + }, + }, + { + title: "time in ISO8601 format", + log: []byte("<190>2018-06-19T02:13:38.635322-07 super mon message"), + syslog: event{ + priority: 190, + message: "mon message", + hostname: "super", + program: "", + month: 6, + pid: -1, + day: 19, + year: 2018, + hour: 2, + minute: 13, + second: 38, + nanosecond: 6353220, + loc: time.FixedZone("", int(-7*time.Hour)), + }, + }, + { + title: "time in ISO8601 format", + log: []byte("<190>2018-06-19T02:13:38.635322Z super mon message"), + syslog: event{ + priority: 190, + message: "mon message", + hostname: "super", + program: "", + month: 6, + pid: -1, + day: 19, + year: 2018, + hour: 2, + minute: 13, + second: 38, + nanosecond: 6353220, + loc: time.UTC, + }, + }, + { + title: "time in ISO8601 format", + log: []byte("<190>2018-06-19T02:13:38.635322Z+0000 super mon message"), + syslog: event{ + priority: 190, + message: "mon message", + hostname: "super", + program: "", + month: 6, + pid: -1, + day: 19, + year: 2018, + hour: 2, + minute: 13, + second: 38, + nanosecond: 6353220, + loc: time.UTC, + }, + }, + { + title: "time in ISO8601 format", + log: []byte("<190>2018-06-19T02:13:38.635322Z+00:00 super mon message"), + syslog: event{ + priority: 190, + message: "mon message", + hostname: "super", + program: "", + month: 6, + pid: -1, + day: 19, + year: 2018, + hour: 2, + minute: 13, + second: 38, + nanosecond: 6353220, + loc: time.UTC, + }, + }, + { + title: "time in ISO8601 format", + log: []byte("<190>2018-06-19T02:13:38.635322Z+00 super mon message"), + syslog: event{ + priority: 190, + message: "mon message", + hostname: "super", + program: "", + month: 6, + pid: -1, + day: 19, + year: 2018, + hour: 2, + minute: 13, + second: 38, + nanosecond: 6353220, + loc: time.UTC, + }, + }, + { + title: "time in ISO8601 format", + log: []byte("<190>2018-06-19T02:13:38Z+00 super mon message"), + syslog: event{ + priority: 190, + message: "mon message", + hostname: "super", + program: "", + month: 6, + pid: -1, + day: 19, + year: 2018, + hour: 2, + minute: 13, + second: 38, + loc: time.UTC, + }, + }, + { + title: "priority and timestamp defined as 2018-05-08T10:31:24 (rfc3339)", + log: []byte("<38>2018-05-08T10:31:24 localhost prg00000[1234]: seq: 0000000000, thread: 0000, runid: 1525768284, stamp: 2018-05-08T10:31:24 PADDPADDPADDPADDPADDPADDPADDPADDPADDPADDPADDPADDPADDPADDPAD DPADDPADDPADDPADDPADDPADDPADDPADDPADDPADDPADDPADDPADDPADDPADDPADDPADD"), + syslog: event{ + priority: 38, + message: "seq: 0000000000, thread: 0000, runid: 1525768284, stamp: 2018-05-08T10:31:24 PADDPADDPADDPADDPADDPADDPADDPADDPADDPADDPADDPADDPADDPADDPAD DPADDPADDPADDPADDPADDPADDPADDPADDPADDPADDPADDPADDPADDPADDPADDPADDPADD", + hostname: "localhost", + program: "prg00000", + pid: 1234, + month: 5, + day: 8, + hour: 10, + minute: 31, + second: 24, + year: 2016, + }, + }, + { + title: "timestamp defined as 2018-05-08T10:31:24 (rfc3339)", + log: []byte("2016-05-08T10:31:24 localhost prg00000[1234]: seq: 0000000000, thread: 0000, runid: 1525768284"), + syslog: event{ + priority: -1, + message: "seq: 0000000000, thread: 0000, runid: 1525768284", + hostname: "localhost", + program: "prg00000", + pid: 1234, + month: 5, + day: 8, + hour: 10, + minute: 31, + second: 24, + year: 2016, + }, + }, + { + title: "timestamp with nanosecond defined as 2018-05-08T10:31:24.0004 (rfc3339)", + log: []byte("2016-05-08T10:31:24.0004 localhost prg00000[1234]: seq: 0000000000, thread: 0000, runid: 1525768284"), + syslog: event{ + priority: -1, + message: "seq: 0000000000, thread: 0000, runid: 1525768284", + hostname: "localhost", + program: "prg00000", + pid: 1234, + month: 5, + day: 8, + hour: 10, + minute: 31, + second: 24, + year: 2016, + nanosecond: 4000, + }, + }, + { + title: "message only", + log: []byte("--- last message repeated 1 time ---"), + syslog: event{ + priority: -1, + message: "--- last message repeated 1 time ---", + hostname: "", + program: "", + pid: -1, + month: -1, + day: -1, + hour: -1, + minute: -1, + second: -1, + }, + }, + { + title: "time and message only", + log: []byte("Oct 11 22:14:15 --- last message repeated 1 time ---"), + syslog: event{ + priority: -1, + message: "--- last message repeated 1 time ---", + hostname: "", + program: "", + pid: -1, + month: 10, + day: 11, + hour: 22, + minute: 14, + second: 15, + }, + }, + { + title: "time with nanosecond", + log: []byte("Oct 11 22:14:15.0000005 --- last message repeated 1 time ---"), + syslog: event{ + priority: -1, + message: "--- last message repeated 1 time ---", + hostname: "", + program: "", + pid: -1, + month: 10, + day: 11, + hour: 22, + minute: 14, + second: 15, + nanosecond: 5, + }, + }, + { + title: "No priority defined", + log: []byte("Oct 11 22:14:15 mymachine su[230]: 'su root' failed for lonvick on /dev/pts/8"), + syslog: event{ + priority: -1, + message: "'su root' failed for lonvick on /dev/pts/8", + hostname: "mymachine", + program: "su", + pid: 230, + month: 10, + day: 11, + hour: 22, + minute: 14, + second: 15, + }, + }, + { + log: []byte("<34>Oct 11 22:14:15 mymachine su[230]: 'su root' failed for lonvick on /dev/pts/8"), + syslog: event{ + priority: 34, + message: "'su root' failed for lonvick on /dev/pts/8", + hostname: "mymachine", + program: "su", + pid: 230, + month: 10, + day: 11, + hour: 22, + minute: 14, + second: 15, + }, + }, + { + log: []byte("<34>Oct 11 22:14:15.57643 mymachine su: 'su root' failed for lonvick on /dev/pts/8"), + syslog: event{ + priority: 34, + message: "'su root' failed for lonvick on /dev/pts/8", + hostname: "mymachine", + program: "su", + pid: -1, + month: 10, + day: 11, + hour: 22, + minute: 14, + second: 15, + nanosecond: 5764300, + }, + }, + { + log: []byte("<34>Oct 11 22:14:15 mymachine su: 'su root' failed for lonvick on /dev/pts/8"), + syslog: event{ + priority: 34, + message: "'su root' failed for lonvick on /dev/pts/8", + hostname: "mymachine", + program: "su", + pid: -1, + month: 10, + day: 11, + hour: 22, + minute: 14, + second: 15, + }, + }, + { + log: []byte("<34>Oct 11 22:14:15 mymachine postfix/smtpd[2000]: 'su root' failed for lonvick on /dev/pts/8"), + syslog: event{ + priority: 34, + message: "'su root' failed for lonvick on /dev/pts/8", + hostname: "mymachine", + program: "postfix/smtpd", + pid: 2000, + month: 10, + day: 11, + hour: 22, + minute: 14, + second: 15, + }, + }, + { + log: []byte("<34>Oct 11 22:14:15 wopr.mymachine.co postfix/smtpd[2000]: 'su root' failed for lonvick on /dev/pts/8"), + syslog: event{ + priority: 34, + message: "'su root' failed for lonvick on /dev/pts/8", + hostname: "wopr.mymachine.co", + program: "postfix/smtpd", + pid: 2000, + month: 10, + day: 11, + hour: 22, + minute: 14, + second: 15, + }, + }, + { + log: []byte("<13>Feb 25 17:32:18 10.0.0.99 Use the Force!"), + syslog: event{ + message: "Use the Force!", + hostname: "10.0.0.99", + priority: 13, + pid: -1, + month: 2, + day: 25, + hour: 17, + minute: 32, + second: 18, + }, + }, + { + title: "Check relay + hostname alpha", + log: []byte("<13>Feb 25 17:32:18 wopr Use the Force!"), + syslog: event{ + message: "Use the Force!", + hostname: "wopr", + priority: 13, + pid: -1, + month: 2, + day: 25, + hour: 17, + minute: 32, + second: 18, + }, + }, + { + title: "Check relay + ipv6", + log: []byte("<13>Feb 25 17:32:18 2607:f0d0:1002:51::4 Use the Force!"), + syslog: event{ + message: "Use the Force!", + hostname: "2607:f0d0:1002:51::4", + priority: 13, + pid: -1, + month: 2, + day: 25, + hour: 17, + minute: 32, + second: 18, + }, + }, + { + title: "Check relay + ipv6", + log: []byte("<13>Feb 25 17:32:18 2607:f0d0:1002:0051:0000:0000:0000:0004 Use the Force!"), + syslog: event{ + message: "Use the Force!", + hostname: "2607:f0d0:1002:0051:0000:0000:0000:0004", + priority: 13, + pid: -1, + month: 2, + day: 25, + hour: 17, + minute: 32, + second: 18, + }, + }, + { + title: "Number inf the host", + log: []byte("<164>Oct 26 15:19:25 1.2.3.4 ASA1-2: Deny udp src DRAC:10.1.2.3/43434 dst outside:192.168.0.1/53 by access-group \"acl_drac\" [0x0, 0x0]"), + syslog: event{ + message: "Deny udp src DRAC:10.1.2.3/43434 dst outside:192.168.0.1/53 by access-group \"acl_drac\" [0x0, 0x0]", + hostname: "1.2.3.4", + program: "ASA1-2", + priority: 164, + pid: -1, + month: 10, + day: 26, + hour: 15, + minute: 19, + second: 25, + }, + }, + { + log: []byte("<164>Oct 26 15:19:25 1.2.3.4 %ASA1-120: Deny udp src DRAC:10.1.2.3/43434 dst outside:192.168.0.1/53 by access-group \"acl_drac\" [0x0, 0x0]"), + syslog: event{ + message: "Deny udp src DRAC:10.1.2.3/43434 dst outside:192.168.0.1/53 by access-group \"acl_drac\" [0x0, 0x0]", + hostname: "1.2.3.4", + program: "%ASA1-120", + priority: 164, + pid: -1, + month: 10, + day: 26, + hour: 15, + minute: 19, + second: 25, + }, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%s:%s", test.title, string(test.log)), func(t *testing.T) { + l := newEvent() + Parse(test.log, l) + assert.Equal(t, test.syslog.Message(), l.Message()) + assert.Equal(t, test.syslog.Hostname(), l.Hostname()) + assert.Equal(t, test.syslog.Priority(), l.Priority()) + assert.Equal(t, test.syslog.Pid(), l.Pid()) + assert.Equal(t, test.syslog.Program(), l.Program()) + assert.Equal(t, test.syslog.Month(), l.Month()) + assert.Equal(t, test.syslog.Day(), l.Day()) + assert.Equal(t, test.syslog.Hour(), l.Hour()) + assert.Equal(t, test.syslog.Minute(), l.Minute()) + assert.Equal(t, test.syslog.Second(), l.Second()) + assert.Equal(t, test.syslog.Nanosecond(), l.Nanosecond()) + assert.Equal(t, test.syslog.loc, l.loc) + }) + } +} + +func TestDay(t *testing.T) { + for d := 1; d <= 31; d++ { + t.Run(fmt.Sprintf("Day %d", d), func(t *testing.T) { + log := fmt.Sprintf("<34>Oct %2d 22:14:15 mymachine postfix/smtpd[2000]: 'su root' failed for lonvick on /dev/pts/8", d) + l := newEvent() + Parse([]byte(log), l) + assert.Equal(t, d, l.Day()) + }) + } +} + +func TestHour(t *testing.T) { + for d := 0; d <= 23; d++ { + t.Run(fmt.Sprintf("Hour %d", d), func(t *testing.T) { + log := fmt.Sprintf("<34>Oct 11 %02d:14:15 mymachine postfix/smtpd[2000]: 'su root' failed for lonvick on /dev/pts/8", d) + l := newEvent() + Parse([]byte(log), l) + assert.Equal(t, d, l.Hour()) + }) + } +} + +func TestMinute(t *testing.T) { + for d := 0; d <= 59; d++ { + t.Run(fmt.Sprintf("Minute %d", d), func(t *testing.T) { + log := fmt.Sprintf("<34>Oct 11 10:%02d:15 mymachine postfix/smtpd[2000]: 'su root' failed for lonvick on /dev/pts/8", d) + l := newEvent() + Parse([]byte(log), l) + assert.Equal(t, d, l.Minute()) + }) + } +} + +func TestSecond(t *testing.T) { + for d := 0; d <= 59; d++ { + t.Run(fmt.Sprintf("Second %d", d), func(t *testing.T) { + log := fmt.Sprintf("<34>Oct 11 10:15:%02d mymachine postfix/smtpd[2000]: 'su root' failed for lonvick on /dev/pts/8", d) + l := newEvent() + Parse([]byte(log), l) + assert.Equal(t, d, l.Second()) + }) + } +} + +func TestPriority(t *testing.T) { + for d := 1; d <= 120; d++ { + t.Run(fmt.Sprintf("Priority %d", d), func(t *testing.T) { + log := fmt.Sprintf("<%d>Oct 11 10:15:15 mymachine postfix/smtpd[2000]: 'su root' failed for lonvick on /dev/pts/8", d) + l := newEvent() + Parse([]byte(log), l) + assert.Equal(t, d, l.Priority()) + }) + return + } +} + +var e *event + +func BenchmarkParser(b *testing.B) { + b.ReportAllocs() + l := newEvent() + log := []byte("<34>Oct 11 22:14:15 mymachine su: 'su root' failed for lonvick on /dev/pts/8") + for n := 0; n < b.N; n++ { + Parse(log, l) + e = l + } +} diff --git a/vendor/github.com/elastic/beats/filebeat/input/syslog/syslog_rfc3164.rl b/vendor/github.com/elastic/beats/filebeat/input/syslog/syslog_rfc3164.rl new file mode 100644 index 00000000..4f7650ff --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/input/syslog/syslog_rfc3164.rl @@ -0,0 +1,60 @@ +%%{ + machine syslog_rfc3164; + + # General + brackets = "[" | "]"; + + # Priority + # Ref: https://tools.ietf.org/html/rfc3164#section-4.1.1 + # Match: "<123>" + priority = digit{1,5}>tok %priority; + prio = "<" priority ">"; + + # Header + # Timestamp + # https://tools.ietf.org/html/rfc3164#section-4.1.2 + # Match: "Jan" and "January" + month = ( "Jan" ("uary")? | "Feb" "ruary"? | "Mar" "ch"? | "Apr" "il"? | "Ma" "y"? | "Jun" "e"? | "Jul" "y"? | "Aug" "ust"? | "Sep" ("tember")? | "Oct" "ober"? | "Nov" "ember"? | "ec" "ember"?) >tok %month; + + # Match: " 5" and "10" as the day + multiple_digits_day = (([12][0-9]) | ("3"[01]))>tok %day; + single_digit_day = [1-9]>tok %day; + day = (space? single_digit_day | multiple_digits_day); + + # Match: hh:mm:ss (24 hr format) + hour = ([01][0-9]|"2"[0-3])>tok %hour; + minute = ([0-5][0-9])>tok %minute; + second = ([0-5][0-9])>tok %second; + nanosecond = digit+>tok %nanosecond; + time = hour ":" minute ":" second ("." nanosecond)?; + offset_marker = "Z" | "z"; + offset_direction = "-" | "+"; + offset_hour = digit{2}; + offset_minute = digit{2}; + timezone = (offset_marker | offset_marker? offset_direction offset_hour (":"? offset_minute)?)>tok %timezone; + + # Some BSD style actually uses rfc3339 formatted date. + year = digit{4}>tok %year; + month_numeric = digit{2}>tok %month_numeric; + day_two_digits = ([0-3][0-9])>tok %day; + + # common timestamp format + timestamp_rfc3164 = month space day space time; + time_separator = "T" | "t"; + timestamp_rfc3339 = year "-" month_numeric "-" day_two_digits (time_separator | space) time timezone?; + timestamp = timestamp_rfc3339 | timestamp_rfc3164; + + hostname = [a-zA-Z0-9.-_:]+>tok %hostname; + header = timestamp space hostname space; + + # MSG + # https://tools.ietf.org/html/rfc3164#section-4.1.3 + program = (extend -space -brackets)+>tok %program; + pid = digit+>tok %pid; + syslogprog = program ("[" pid "]")? ":" space; + message = any+>tok %message; + msg = syslogprog? message>tok %message; + + main := (prio)? (header msg | timestamp space message | message); + +}%% diff --git a/vendor/github.com/elastic/beats/filebeat/input/tcp/config.go b/vendor/github.com/elastic/beats/filebeat/input/tcp/config.go new file mode 100644 index 00000000..7b30b2a9 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/input/tcp/config.go @@ -0,0 +1,26 @@ +package tcp + +import ( + "time" + + "github.com/dustin/go-humanize" + + "github.com/elastic/beats/filebeat/harvester" + "github.com/elastic/beats/filebeat/inputsource/tcp" +) + +type config struct { + tcp.Config `config:",inline"` + harvester.ForwarderConfig `config:",inline"` +} + +var defaultConfig = config{ + ForwarderConfig: harvester.ForwarderConfig{ + Type: "tcp", + }, + Config: tcp.Config{ + LineDelimiter: "\n", + Timeout: time.Minute * 5, + MaxMessageSize: 20 * humanize.MiByte, + }, +} diff --git a/vendor/github.com/elastic/beats/filebeat/input/tcp/input.go b/vendor/github.com/elastic/beats/filebeat/input/tcp/input.go new file mode 100644 index 00000000..96f66ed4 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/input/tcp/input.go @@ -0,0 +1,117 @@ +package tcp + +import ( + "sync" + "time" + + "github.com/elastic/beats/filebeat/channel" + "github.com/elastic/beats/filebeat/harvester" + "github.com/elastic/beats/filebeat/input" + "github.com/elastic/beats/filebeat/inputsource" + "github.com/elastic/beats/filebeat/inputsource/tcp" + "github.com/elastic/beats/filebeat/util" + "github.com/elastic/beats/libbeat/beat" + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/common/cfgwarn" + "github.com/elastic/beats/libbeat/logp" +) + +func init() { + err := input.Register("tcp", NewInput) + if err != nil { + panic(err) + } +} + +// Input for TCP connection +type Input struct { + sync.Mutex + server *tcp.Server + started bool + outlet channel.Outleter + config *config + log *logp.Logger +} + +// NewInput creates a new TCP input +func NewInput( + cfg *common.Config, + outlet channel.Factory, + context input.Context, +) (input.Input, error) { + cfgwarn.Experimental("TCP input type is used") + + out, err := outlet(cfg, context.DynamicFields) + if err != nil { + return nil, err + } + + forwarder := harvester.NewForwarder(out) + + config := defaultConfig + err = cfg.Unpack(&config) + if err != nil { + return nil, err + } + + cb := func(data []byte, metadata inputsource.NetworkMetadata) { + event := createEvent(data, metadata) + forwarder.Send(event) + } + + server, err := tcp.New(&config.Config, cb) + if err != nil { + return nil, err + } + + return &Input{ + server: server, + started: false, + outlet: out, + config: &config, + log: logp.NewLogger("tcp input").With(config.Config.Host), + }, nil +} + +// Run start a TCP input +func (p *Input) Run() { + p.Lock() + defer p.Unlock() + + if !p.started { + p.log.Info("Starting TCP input") + err := p.server.Start() + if err != nil { + p.log.Errorw("Error starting the TCP server", "error", err) + } + p.started = true + } +} + +// Stop stops TCP server +func (p *Input) Stop() { + defer p.outlet.Close() + p.Lock() + defer p.Unlock() + + p.log.Info("Stopping TCP input") + p.server.Stop() + p.started = false +} + +// Wait stop the current server +func (p *Input) Wait() { + p.Stop() +} + +func createEvent(raw []byte, metadata inputsource.NetworkMetadata) *util.Data { + data := util.NewData() + data.Event = beat.Event{ + Timestamp: time.Now(), + Fields: common.MapStr{ + "message": string(raw), + "source": metadata.RemoteAddr.String(), + }, + } + return data +} diff --git a/vendor/github.com/elastic/beats/filebeat/input/tcp/input_test.go b/vendor/github.com/elastic/beats/filebeat/input/tcp/input_test.go new file mode 100644 index 00000000..fde5100a --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/input/tcp/input_test.go @@ -0,0 +1,30 @@ +package tcp + +import ( + "net" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/filebeat/inputsource" +) + +func TestCreateEvent(t *testing.T) { + hello := "hello world" + ip := "127.0.0.1" + parsedIP := net.ParseIP(ip) + addr := &net.IPAddr{IP: parsedIP, Zone: ""} + + message := []byte(hello) + mt := inputsource.NetworkMetadata{RemoteAddr: addr} + + data := createEvent(message, mt) + event := data.GetEvent() + + m, err := event.GetValue("message") + assert.NoError(t, err) + assert.Equal(t, string(message), m) + + from, _ := event.GetValue("source") + assert.Equal(t, ip, from) +} diff --git a/vendor/github.com/elastic/beats/filebeat/input/udp/config.go b/vendor/github.com/elastic/beats/filebeat/input/udp/config.go new file mode 100644 index 00000000..6f1651e9 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/input/udp/config.go @@ -0,0 +1,28 @@ +package udp + +import ( + "time" + + "github.com/dustin/go-humanize" + + "github.com/elastic/beats/filebeat/harvester" + "github.com/elastic/beats/filebeat/inputsource/udp" +) + +var defaultConfig = config{ + ForwarderConfig: harvester.ForwarderConfig{ + Type: "udp", + }, + Config: udp.Config{ + MaxMessageSize: 10 * humanize.KiByte, + // TODO: What should be default port? + Host: "localhost:8080", + // TODO: What should be the default timeout? + Timeout: time.Minute * 5, + }, +} + +type config struct { + udp.Config `config:",inline"` + harvester.ForwarderConfig `config:",inline"` +} diff --git a/vendor/github.com/elastic/beats/filebeat/input/udp/input.go b/vendor/github.com/elastic/beats/filebeat/input/udp/input.go new file mode 100644 index 00000000..0f6ff183 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/input/udp/input.go @@ -0,0 +1,106 @@ +package udp + +import ( + "sync" + "time" + + "github.com/elastic/beats/filebeat/channel" + "github.com/elastic/beats/filebeat/harvester" + "github.com/elastic/beats/filebeat/input" + "github.com/elastic/beats/filebeat/inputsource" + "github.com/elastic/beats/filebeat/inputsource/udp" + "github.com/elastic/beats/filebeat/util" + "github.com/elastic/beats/libbeat/beat" + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/common/cfgwarn" + "github.com/elastic/beats/libbeat/logp" +) + +func init() { + err := input.Register("udp", NewInput) + if err != nil { + panic(err) + } +} + +// Input defines a udp input to receive event on a specific host:port. +type Input struct { + sync.Mutex + udp *udp.Server + started bool + outlet channel.Outleter +} + +// NewInput creates a new udp input +func NewInput( + cfg *common.Config, + outlet channel.Factory, + context input.Context, +) (input.Input, error) { + cfgwarn.Experimental("UDP input type is used") + + out, err := outlet(cfg, context.DynamicFields) + if err != nil { + return nil, err + } + + config := defaultConfig + if err = cfg.Unpack(&config); err != nil { + return nil, err + } + + forwarder := harvester.NewForwarder(out) + callback := func(data []byte, metadata inputsource.NetworkMetadata) { + e := util.NewData() + e.Event = beat.Event{ + Timestamp: time.Now(), + Meta: common.MapStr{ + "truncated": metadata.Truncated, + }, + Fields: common.MapStr{ + "message": string(data), + "source": metadata.RemoteAddr.String(), + }, + } + forwarder.Send(e) + } + + udp := udp.New(&config.Config, callback) + + return &Input{ + outlet: out, + udp: udp, + started: false, + }, nil +} + +// Run starts and start the UDP server and read events from the socket +func (p *Input) Run() { + p.Lock() + defer p.Unlock() + + if !p.started { + logp.Info("Starting UDP input") + err := p.udp.Start() + if err != nil { + logp.Err("Error running harvester: %v", err) + } + p.started = true + } +} + +// Stop stops the UDP input +func (p *Input) Stop() { + defer p.outlet.Close() + p.Lock() + defer p.Unlock() + + logp.Info("Stopping UDP input") + p.udp.Stop() + p.started = false +} + +// Wait suspends the UDP input +func (p *Input) Wait() { + p.Stop() +} diff --git a/vendor/github.com/elastic/beats/filebeat/inputsource/inputsource.go b/vendor/github.com/elastic/beats/filebeat/inputsource/inputsource.go new file mode 100644 index 00000000..e283490e --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/inputsource/inputsource.go @@ -0,0 +1 @@ +package inputsource diff --git a/vendor/github.com/elastic/beats/filebeat/inputsource/network.go b/vendor/github.com/elastic/beats/filebeat/inputsource/network.go new file mode 100644 index 00000000..699a26f2 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/inputsource/network.go @@ -0,0 +1,20 @@ +package inputsource + +import ( + "net" +) + +// Network interface implemented by TCP and UDP input source. +type Network interface { + Start() error + Stop() +} + +// NetworkMetadata defines common information that we can retrieve from a remote connection. +type NetworkMetadata struct { + RemoteAddr net.Addr + Truncated bool +} + +// NetworkFunc defines callback executed when a new event is received from a network source. +type NetworkFunc = func(data []byte, metadata NetworkMetadata) diff --git a/vendor/github.com/elastic/beats/filebeat/inputsource/network_metadata.go b/vendor/github.com/elastic/beats/filebeat/inputsource/network_metadata.go new file mode 100644 index 00000000..e283490e --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/inputsource/network_metadata.go @@ -0,0 +1 @@ +package inputsource diff --git a/vendor/github.com/elastic/beats/filebeat/inputsource/tcp/client.go b/vendor/github.com/elastic/beats/filebeat/inputsource/tcp/client.go new file mode 100644 index 00000000..7d2e0aa1 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/inputsource/tcp/client.go @@ -0,0 +1,79 @@ +package tcp + +import ( + "bufio" + "net" + "time" + + "github.com/pkg/errors" + + "github.com/elastic/beats/filebeat/inputsource" + "github.com/elastic/beats/libbeat/logp" +) + +// Client is a remote client. +type client struct { + conn net.Conn + log *logp.Logger + callback inputsource.NetworkFunc + done chan struct{} + metadata inputsource.NetworkMetadata + splitFunc bufio.SplitFunc + maxMessageSize uint64 + timeout time.Duration +} + +func newClient( + conn net.Conn, + log *logp.Logger, + callback inputsource.NetworkFunc, + splitFunc bufio.SplitFunc, + maxReadMessage uint64, + timeout time.Duration, +) *client { + client := &client{ + conn: conn, + log: log.With("address", conn.RemoteAddr()), + callback: callback, + done: make(chan struct{}), + splitFunc: splitFunc, + maxMessageSize: maxReadMessage, + timeout: timeout, + metadata: inputsource.NetworkMetadata{ + RemoteAddr: conn.RemoteAddr(), + }, + } + return client +} + +func (c *client) handle() error { + r := NewResetableLimitedReader(NewDeadlineReader(c.conn, c.timeout), c.maxMessageSize) + buf := bufio.NewReader(r) + scanner := bufio.NewScanner(buf) + scanner.Split(c.splitFunc) + + for scanner.Scan() { + err := scanner.Err() + if err != nil { + // we are forcing a close on the socket, lets ignore any error that could happen. + select { + case <-c.done: + break + default: + } + // This is a user defined limit and we should notify the user. + if IsMaxReadBufferErr(err) { + c.log.Errorw("client errors", "error", err) + } + return errors.Wrap(err, "tcp client error") + } + r.Reset() + c.callback(scanner.Bytes(), c.metadata) + } + return nil +} + +func (c *client) close() { + close(c.done) + c.conn.Close() +} diff --git a/vendor/github.com/elastic/beats/filebeat/inputsource/tcp/config.go b/vendor/github.com/elastic/beats/filebeat/inputsource/tcp/config.go new file mode 100644 index 00000000..afce4d19 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/inputsource/tcp/config.go @@ -0,0 +1,29 @@ +package tcp + +import ( + "fmt" + "time" + + "github.com/elastic/beats/libbeat/common/cfgtype" +) + +// Name is the human readable name and identifier. +const Name = "tcp" + +type size uint64 + +// Config exposes the tcp configuration. +type Config struct { + Host string `config:"host"` + LineDelimiter string `config:"line_delimiter" validate:"nonzero"` + Timeout time.Duration `config:"timeout" validate:"nonzero,positive"` + MaxMessageSize cfgtype.ByteSize `config:"max_message_size" validate:"nonzero,positive"` +} + +// Validate validates the Config option for the tcp input. +func (c *Config) Validate() error { + if len(c.Host) == 0 { + return fmt.Errorf("need to specify the host using the `host:port` syntax") + } + return nil +} diff --git a/vendor/github.com/elastic/beats/filebeat/inputsource/tcp/conn.go b/vendor/github.com/elastic/beats/filebeat/inputsource/tcp/conn.go new file mode 100644 index 00000000..a2a40bb0 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/inputsource/tcp/conn.go @@ -0,0 +1,72 @@ +package tcp + +import ( + "io" + "net" + "time" + + "github.com/pkg/errors" +) + +// ErrMaxReadBuffer returns when too many bytes was read on the io.Reader +var ErrMaxReadBuffer = errors.New("max read buffer reached") + +// ResetableLimitedReader is based on LimitedReader but allow to reset the byte read and return a specific +// error when we reach the limit. +type ResetableLimitedReader struct { + reader io.Reader + maxReadBuffer uint64 + byteRead uint64 +} + +// NewResetableLimitedReader returns a new ResetableLimitedReader +func NewResetableLimitedReader(reader io.Reader, maxReadBuffer uint64) *ResetableLimitedReader { + return &ResetableLimitedReader{ + reader: reader, + maxReadBuffer: maxReadBuffer, + } +} + +// Read reads the specified amount of byte +func (m *ResetableLimitedReader) Read(p []byte) (n int, err error) { + if m.byteRead >= m.maxReadBuffer { + return 0, ErrMaxReadBuffer + } + n, err = m.reader.Read(p) + m.byteRead += uint64(n) + return +} + +// Reset resets the number of byte read +func (m *ResetableLimitedReader) Reset() { + m.byteRead = 0 +} + +// IsMaxReadBufferErr returns true when the error is ErrMaxReadBuffer +func IsMaxReadBufferErr(err error) bool { + return err == ErrMaxReadBuffer +} + +// DeadlineReader allow read to a io.Reader to timeout, the timeout is refreshed on every read. +type DeadlineReader struct { + conn net.Conn + timeout time.Duration +} + +// NewDeadlineReader returns a new DeadlineReader +func NewDeadlineReader(c net.Conn, timeout time.Duration) *DeadlineReader { + return &DeadlineReader{ + conn: c, + timeout: timeout, + } +} + +// Read reads the number of bytes from the reader +func (d *DeadlineReader) Read(p []byte) (n int, err error) { + d.refresh() + return d.conn.Read(p) +} + +func (d *DeadlineReader) refresh() { + d.conn.SetDeadline(time.Now().Add(d.timeout)) +} diff --git a/vendor/github.com/elastic/beats/filebeat/inputsource/tcp/conn_test.go b/vendor/github.com/elastic/beats/filebeat/inputsource/tcp/conn_test.go new file mode 100644 index 00000000..aaf2bea6 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/inputsource/tcp/conn_test.go @@ -0,0 +1,43 @@ +package tcp + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestResetableLimitedReader(t *testing.T) { + maxReadBuffer := 400 + + t.Run("WhenMaxReadIsReachedInMultipleRead", func(t *testing.T) { + r := strings.NewReader(randomString(maxReadBuffer * 2)) + m := NewResetableLimitedReader(r, uint64(maxReadBuffer)) + toRead := make([]byte, maxReadBuffer) + _, err := m.Read(toRead) + assert.NoError(t, err) + toRead = make([]byte, 300) + _, err = m.Read(toRead) + assert.Equal(t, ErrMaxReadBuffer, err) + }) + + t.Run("WhenMaxReadIsNotReached", func(t *testing.T) { + r := strings.NewReader(randomString(maxReadBuffer * 2)) + m := NewResetableLimitedReader(r, uint64(maxReadBuffer)) + toRead := make([]byte, maxReadBuffer) + _, err := m.Read(toRead) + assert.NoError(t, err) + }) + + t.Run("WhenResetIsCalled", func(t *testing.T) { + r := strings.NewReader(randomString(maxReadBuffer * 2)) + m := NewResetableLimitedReader(r, uint64(maxReadBuffer)) + toRead := make([]byte, maxReadBuffer) + _, err := m.Read(toRead) + assert.NoError(t, err) + m.Reset() + toRead = make([]byte, 300) + _, err = m.Read(toRead) + assert.NoError(t, err) + }) +} diff --git a/vendor/github.com/elastic/beats/filebeat/inputsource/tcp/scan.go b/vendor/github.com/elastic/beats/filebeat/inputsource/tcp/scan.go new file mode 100644 index 00000000..4e948158 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/inputsource/tcp/scan.go @@ -0,0 +1,31 @@ +package tcp + +import ( + "bufio" + "bytes" +) + +// factoryDelimiter return a function to split line using a custom delimiter supporting multibytes +// delimiter, the delimiter is stripped from the returned value. +func factoryDelimiter(delimiter []byte) bufio.SplitFunc { + return func(data []byte, eof bool) (int, []byte, error) { + if eof && len(data) == 0 { + return 0, nil, nil + } + if i := bytes.Index(data, delimiter); i >= 0 { + return i + len(delimiter), dropDelimiter(data[0:i], delimiter), nil + } + if eof { + return len(data), dropDelimiter(data, delimiter), nil + } + return 0, nil, nil + } +} + +func dropDelimiter(data []byte, delimiter []byte) []byte { + if len(data) > len(delimiter) && + bytes.Equal(data[len(data)-len(delimiter):len(data)], delimiter) { + return data[0 : len(data)-len(delimiter)] + } + return data +} diff --git a/vendor/github.com/elastic/beats/filebeat/inputsource/tcp/scan_test.go b/vendor/github.com/elastic/beats/filebeat/inputsource/tcp/scan_test.go new file mode 100644 index 00000000..87b28431 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/inputsource/tcp/scan_test.go @@ -0,0 +1,91 @@ +package tcp + +import ( + "bufio" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCustomDelimiter(t *testing.T) { + tests := []struct { + name string + text string + expected []string + delimiter []byte + }{ + { + name: "Multiple chars delimiter", + text: "hellobonjourholahey", + expected: []string{ + "hello", + "bonjour", + "hola", + "hey", + }, + delimiter: []byte(""), + }, + { + name: "Multiple chars delimiter with half starting delimiter", + text: "hellobonjourhey", + expected: []string{ + "hello", + "bonjour"), + }, + { + name: "Multiple chars delimiter with half ending delimiter", + text: "helloEND>holahey", + expected: []string{ + "hello", + "END>hola", + "hey", + }, + delimiter: []byte(""), + }, + { + name: "Delimiter end of string", + text: "hellobonjourholahey", + expected: []string{ + "hello", + "bonjour", + "hola", + "hey", + }, + delimiter: []byte(""), + }, + { + name: "Single char delimiter", + text: "hello;bonjour;hola;hey", + expected: []string{ + "hello", + "bonjour", + "hola", + "hey", + }, + delimiter: []byte(";"), + }, + { + name: "Empty string", + text: "", + expected: []string(nil), + delimiter: []byte(";"), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + buf := strings.NewReader(test.text) + scanner := bufio.NewScanner(buf) + scanner.Split(factoryDelimiter(test.delimiter)) + var elements []string + for scanner.Scan() { + elements = append(elements, scanner.Text()) + } + assert.EqualValues(t, test.expected, elements) + }) + } +} diff --git a/vendor/github.com/elastic/beats/filebeat/inputsource/tcp/server.go b/vendor/github.com/elastic/beats/filebeat/inputsource/tcp/server.go new file mode 100644 index 00000000..66bccc1d --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/inputsource/tcp/server.go @@ -0,0 +1,160 @@ +package tcp + +import ( + "bufio" + "bytes" + "fmt" + "net" + "sync" + + "github.com/elastic/beats/filebeat/inputsource" + "github.com/elastic/beats/libbeat/logp" +) + +// Server represent a TCP server +type Server struct { + sync.RWMutex + callback inputsource.NetworkFunc + config *Config + Listener net.Listener + clients map[*client]struct{} + wg sync.WaitGroup + done chan struct{} + splitFunc bufio.SplitFunc + log *logp.Logger +} + +// New creates a new tcp server +func New( + config *Config, + callback inputsource.NetworkFunc, +) (*Server, error) { + + if len(config.LineDelimiter) == 0 { + return nil, fmt.Errorf("empty line delimiter") + } + + sf := splitFunc([]byte(config.LineDelimiter)) + return &Server{ + config: config, + callback: callback, + clients: make(map[*client]struct{}, 0), + done: make(chan struct{}), + splitFunc: sf, + log: logp.NewLogger("tcp").With("address", config.Host), + }, nil +} + +// Start listen to the TCP socket. +func (s *Server) Start() error { + var err error + s.Listener, err = net.Listen("tcp", s.config.Host) + if err != nil { + return err + } + + s.log.Info("Started listening for TCP connection") + + s.wg.Add(1) + go func() { + defer s.wg.Done() + s.run() + }() + return nil +} + +// Run start and run a new TCP listener to receive new data +func (s *Server) run() { + for { + conn, err := s.Listener.Accept() + if err != nil { + select { + case <-s.done: + return + default: + s.log.Debugw("Can not accept the connection", "error", err) + continue + } + } + + client := newClient( + conn, + s.log, + s.callback, + s.splitFunc, + uint64(s.config.MaxMessageSize), + s.config.Timeout, + ) + + s.log.Debugw("New client", "address", conn.RemoteAddr(), "total", s.clientsCount()) + s.wg.Add(1) + go func() { + defer logp.Recover("recovering from a tcp client crash") + defer s.wg.Done() + defer conn.Close() + + s.registerClient(client) + defer s.unregisterClient(client) + + err := client.handle() + if err != nil { + s.log.Debugw("Client error", "error", err) + } + + s.log.Debugw("Client disconnected", "address", conn.RemoteAddr(), "total", s.clientsCount()) + }() + } +} + +// Stop stops accepting new incoming TCP connection and close any active clients +func (s *Server) Stop() { + s.log.Info("Stopping TCP server") + close(s.done) + s.Listener.Close() + for _, client := range s.allClients() { + client.close() + } + s.wg.Wait() + s.log.Info("TCP server stopped") +} + +func (s *Server) registerClient(client *client) { + s.Lock() + defer s.Unlock() + s.clients[client] = struct{}{} +} + +func (s *Server) unregisterClient(client *client) { + s.Lock() + defer s.Unlock() + delete(s.clients, client) +} + +func (s *Server) allClients() []*client { + s.RLock() + defer s.RUnlock() + currentClients := make([]*client, len(s.clients)) + idx := 0 + for client := range s.clients { + currentClients[idx] = client + idx++ + } + return currentClients +} + +func (s *Server) clientsCount() int { + s.RLock() + defer s.RUnlock() + return len(s.clients) +} + +func splitFunc(lineDelimiter []byte) bufio.SplitFunc { + ld := []byte(lineDelimiter) + if bytes.Equal(ld, []byte("\n")) { + // This will work for most usecases and will also strip \r if present. + // CustomDelimiter, need to match completely and the delimiter will be completely removed from + // the returned byte slice + return bufio.ScanLines + } + return factoryDelimiter(ld) +} diff --git a/vendor/github.com/elastic/beats/filebeat/inputsource/tcp/server_test.go b/vendor/github.com/elastic/beats/filebeat/inputsource/tcp/server_test.go new file mode 100644 index 00000000..19a52976 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/inputsource/tcp/server_test.go @@ -0,0 +1,245 @@ +package tcp + +import ( + "fmt" + "math/rand" + "net" + "strings" + "testing" + "time" + + "github.com/dustin/go-humanize" + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/filebeat/inputsource" + "github.com/elastic/beats/libbeat/common" +) + +var defaultConfig = Config{ + LineDelimiter: "\n", + Timeout: time.Minute * 5, + MaxMessageSize: 20 * humanize.MiByte, +} + +type info struct { + message string + mt inputsource.NetworkMetadata +} + +func TestErrorOnEmptyLineDelimiter(t *testing.T) { + cfg := map[string]interface{}{ + "line_delimiter": "", + } + + c, _ := common.NewConfigFrom(cfg) + config := defaultConfig + err := c.Unpack(&config) + assert.Error(t, err) +} + +func TestReceiveEventsAndMetadata(t *testing.T) { + expectedMessages := generateMessages(5, 100) + largeMessages := generateMessages(10, 4096) + + tests := []struct { + name string + cfg map[string]interface{} + expectedMessages []string + messageSent string + }{ + { + name: "NewLine", + cfg: map[string]interface{}{}, + expectedMessages: expectedMessages, + messageSent: strings.Join(expectedMessages, "\n"), + }, + { + name: "NewLineWithCR", + cfg: map[string]interface{}{}, + expectedMessages: expectedMessages, + messageSent: strings.Join(expectedMessages, "\r\n"), + }, + { + name: "CustomDelimiter", + cfg: map[string]interface{}{ + "line_delimiter": ";", + }, + expectedMessages: expectedMessages, + messageSent: strings.Join(expectedMessages, ";"), + }, + { + name: "MultipleCharsCustomDelimiter", + cfg: map[string]interface{}{ + "line_delimiter": "", + }, + expectedMessages: expectedMessages, + messageSent: strings.Join(expectedMessages, ""), + }, + { + name: "SingleCharCustomDelimiterMessageWithoutBoudaries", + cfg: map[string]interface{}{ + "line_delimiter": ";", + }, + expectedMessages: []string{"hello"}, + messageSent: "hello", + }, + { + name: "MultipleCharCustomDelimiterMessageWithoutBoundaries", + cfg: map[string]interface{}{ + "line_delimiter": "", + }, + expectedMessages: []string{"hello"}, + messageSent: "hello", + }, + { + name: "NewLineMessageWithoutBoundaries", + cfg: map[string]interface{}{ + "line_delimiter": "\n", + }, + expectedMessages: []string{"hello"}, + messageSent: "hello", + }, + { + name: "NewLineLargeMessagePayload", + cfg: map[string]interface{}{ + "line_delimiter": "\n", + }, + expectedMessages: largeMessages, + messageSent: strings.Join(largeMessages, "\n"), + }, + { + name: "CustomLargeMessagePayload", + cfg: map[string]interface{}{ + "line_delimiter": ";", + }, + expectedMessages: largeMessages, + messageSent: strings.Join(largeMessages, ";"), + }, + { + name: "MaxReadBufferReached", + cfg: map[string]interface{}{}, + expectedMessages: []string{}, + messageSent: randomString(900000), + }, + { + name: "MaxReadBufferReachedUserConfigured", + cfg: map[string]interface{}{ + "max_read_message": 50000, + }, + expectedMessages: []string{}, + messageSent: randomString(600000), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ch := make(chan *info, len(test.expectedMessages)) + defer close(ch) + to := func(message []byte, mt inputsource.NetworkMetadata) { + ch <- &info{message: string(message), mt: mt} + } + test.cfg["host"] = "localhost:0" + cfg, _ := common.NewConfigFrom(test.cfg) + config := defaultConfig + err := cfg.Unpack(&config) + if !assert.NoError(t, err) { + return + } + server, err := New(&config, to) + if !assert.NoError(t, err) { + return + } + err = server.Start() + if !assert.NoError(t, err) { + return + } + defer server.Stop() + + conn, err := net.Dial("tcp", server.Listener.Addr().String()) + assert.NoError(t, err) + fmt.Fprint(conn, test.messageSent) + conn.Close() + + var events []*info + + for len(events) < len(test.expectedMessages) { + select { + case event := <-ch: + events = append(events, event) + default: + } + } + + for idx, e := range events { + assert.Equal(t, test.expectedMessages[idx], e.message) + assert.NotNil(t, e.mt.RemoteAddr) + } + }) + } +} + +func TestReceiveNewEventsConcurrently(t *testing.T) { + workers := 4 + eventsCount := 100 + ch := make(chan *info, eventsCount*workers) + defer close(ch) + to := func(message []byte, mt inputsource.NetworkMetadata) { + ch <- &info{message: string(message), mt: mt} + } + cfg, err := common.NewConfigFrom(map[string]interface{}{"host": ":0"}) + if !assert.NoError(t, err) { + return + } + config := defaultConfig + err = cfg.Unpack(&config) + if !assert.NoError(t, err) { + return + } + server, err := New(&config, to) + if !assert.NoError(t, err) { + return + } + err = server.Start() + if !assert.NoError(t, err) { + return + } + defer server.Stop() + + samples := generateMessages(eventsCount, 1024) + for w := 0; w < workers; w++ { + go func() { + conn, err := net.Dial("tcp", server.Listener.Addr().String()) + defer conn.Close() + assert.NoError(t, err) + for _, sample := range samples { + fmt.Fprintln(conn, sample) + } + }() + } + + var events []*info + for len(events) < eventsCount*workers { + select { + case event := <-ch: + events = append(events, event) + default: + } + } +} + +func randomString(l int) string { + charsets := []byte("abcdefghijklmnopqrstuvwzyzABCDEFGHIJKLMNOPQRSTUVWZYZ0123456789") + message := make([]byte, l) + for i := range message { + message[i] = charsets[rand.Intn(len(charsets))] + } + return string(message) +} + +func generateMessages(c int, l int) []string { + messages := make([]string, c) + for i := range messages { + messages[i] = randomString(l) + } + return messages +} diff --git a/vendor/github.com/elastic/beats/filebeat/inputsource/udp/config.go b/vendor/github.com/elastic/beats/filebeat/inputsource/udp/config.go new file mode 100644 index 00000000..740646d1 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/inputsource/udp/config.go @@ -0,0 +1,14 @@ +package udp + +import ( + "time" + + "github.com/elastic/beats/libbeat/common/cfgtype" +) + +// Config options for the UDPServer +type Config struct { + Host string `config:"host"` + MaxMessageSize cfgtype.ByteSize `config:"max_message_size" validate:"positive,nonzero"` + Timeout time.Duration `config:"timeout"` +} diff --git a/vendor/github.com/elastic/beats/filebeat/inputsource/udp/server.go b/vendor/github.com/elastic/beats/filebeat/inputsource/udp/server.go new file mode 100644 index 00000000..e5cf5e93 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/inputsource/udp/server.go @@ -0,0 +1,112 @@ +package udp + +import ( + "net" + "runtime" + "strings" + "sync" + "time" + + "github.com/elastic/beats/filebeat/inputsource" + "github.com/elastic/beats/libbeat/logp" +) + +// Name is the human readable name and identifier. +const Name = "udp" + +const windowErrBuffer = "A message sent on a datagram socket was larger than the internal message" + + " buffer or some other network limit, or the buffer used to receive a datagram into was smaller" + + " than the datagram itself." + +// Server creates a simple UDP Server and listen to a specific host:port and will send any +// event received to the callback method. +type Server struct { + config *Config + callback inputsource.NetworkFunc + Listener net.PacketConn + log *logp.Logger + wg sync.WaitGroup + done chan struct{} +} + +// New returns a new UDPServer instance. +func New(config *Config, callback inputsource.NetworkFunc) *Server { + return &Server{ + config: config, + callback: callback, + log: logp.NewLogger("udp").With("address", config.Host), + done: make(chan struct{}), + } +} + +// Start starts the UDP Server and listen to incoming events. +func (u *Server) Start() error { + var err error + u.Listener, err = net.ListenPacket("udp", u.config.Host) + if err != nil { + return err + } + u.log.Info("Started listening for UDP connection") + u.wg.Add(1) + go func() { + defer u.wg.Done() + u.run() + }() + return nil +} + +func (u *Server) run() { + for { + select { + case <-u.done: + return + default: + } + + buffer := make([]byte, u.config.MaxMessageSize) + u.Listener.SetDeadline(time.Now().Add(u.config.Timeout)) + + // If you are using Windows and you are using a fixed buffer and you get a datagram which + // is bigger than the specified size of the buffer, it will return an `err` and the buffer will + // contains a subset of the data. + // + // On Unix based system, the buffer will be truncated but no error will be returned. + length, addr, err := u.Listener.ReadFrom(buffer) + if err != nil { + // don't log any deadline events. + e, ok := err.(net.Error) + if ok && e.Timeout() { + continue + } + + u.log.Errorw("Error reading from the socket", "error", err) + + // On Windows send the current buffer and mark it as truncated. + // The buffer will have content but length will return 0, addr will be nil. + if isLargerThanBuffer(err) { + u.callback(buffer, inputsource.NetworkMetadata{RemoteAddr: addr, Truncated: true}) + continue + } + } + + if length > 0 { + u.callback(buffer[:length], inputsource.NetworkMetadata{RemoteAddr: addr}) + } + } +} + +// Stop stops the current udp server. +func (u *Server) Stop() { + u.log.Info("Stopping UDP server") + u.Listener.Close() + close(u.done) + u.wg.Wait() + u.log.Info("UDP server stopped") +} + +func isLargerThanBuffer(err error) bool { + if runtime.GOOS != "windows" { + return false + } + return strings.Contains(err.Error(), windowErrBuffer) +} diff --git a/vendor/github.com/elastic/beats/filebeat/inputsource/udp/server_test.go b/vendor/github.com/elastic/beats/filebeat/inputsource/udp/server_test.go new file mode 100644 index 00000000..9c72a956 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/inputsource/udp/server_test.go @@ -0,0 +1,81 @@ +package udp + +import ( + "net" + "runtime" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/filebeat/inputsource" +) + +const maxMessageSize = 20 +const timeout = time.Second * 15 + +type info struct { + message []byte + mt inputsource.NetworkMetadata +} + +func TestReceiveEventFromUDP(t *testing.T) { + tests := []struct { + name string + message []byte + expected []byte + }{ + { + name: "Sending a message under the MaxMessageSize limit", + message: []byte("Hello world"), + expected: []byte("Hello world"), + }, + { + name: "Sending a message over the MaxMessageSize limit will truncate the message", + message: []byte("Hello world not so nice"), + expected: []byte("Hello world not so n"), + }, + } + + ch := make(chan info) + host := "localhost:0" + config := &Config{Host: host, MaxMessageSize: maxMessageSize, Timeout: timeout} + fn := func(message []byte, metadata inputsource.NetworkMetadata) { + ch <- info{message: message, mt: metadata} + } + s := New(config, fn) + err := s.Start() + if !assert.NoError(t, err) { + return + } + defer s.Stop() + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + conn, err := net.Dial("udp", s.Listener.LocalAddr().String()) + if !assert.NoError(t, err) { + return + } + defer conn.Close() + + _, err = conn.Write(test.message) + if !assert.NoError(t, err) { + return + } + info := <-ch + assert.Equal(t, test.expected, info.message) + if runtime.GOOS == "windows" { + if len(test.expected) < len(test.message) { + assert.Nil(t, info.mt.RemoteAddr) + assert.True(t, info.mt.Truncated) + } else { + assert.NotNil(t, info.mt.RemoteAddr) + assert.False(t, info.mt.Truncated) + } + } else { + assert.NotNil(t, info.mt.RemoteAddr) + assert.False(t, info.mt.Truncated) + } + }) + } +} diff --git a/vendor/github.com/elastic/beats/filebeat/main.go b/vendor/github.com/elastic/beats/filebeat/main.go index c7b99619..a40fadac 100644 --- a/vendor/github.com/elastic/beats/filebeat/main.go +++ b/vendor/github.com/elastic/beats/filebeat/main.go @@ -7,12 +7,12 @@ import ( ) // The basic model of execution: -// - prospector: finds files in paths/globs to harvest, starts harvesters +// - input: finds files in paths/globs to harvest, starts harvesters // - harvester: reads a file, sends events to the spooler // - spooler: buffers events until ready to flush to the publisher // - publisher: writes to the network, notifies registrar // - registrar: records positions of files read -// Finally, prospector uses the registrar information, on restart, to +// Finally, input uses the registrar information, on restart, to // determine where in each file to restart a harvester. func main() { if err := cmd.RootCmd.Execute(); err != nil { diff --git a/vendor/github.com/elastic/beats/filebeat/module/apache2/_meta/config.reference.yml b/vendor/github.com/elastic/beats/filebeat/module/apache2/_meta/config.reference.yml index 91117963..ad61cd6f 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/apache2/_meta/config.reference.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/apache2/_meta/config.reference.yml @@ -7,9 +7,9 @@ # Filebeat will choose the paths depending on your OS. #var.paths: - # Prospector configuration (advanced). Any prospector configuration option + # Input configuration (advanced). Any input configuration option # can be added under this section. - #prospector: + #input: # Error logs #error: @@ -19,6 +19,6 @@ # Filebeat will choose the paths depending on your OS. #var.paths: - # Prospector configuration (advanced). Any prospector configuration option + # Input configuration (advanced). Any input configuration option # can be added under this section. - #prospector: + #input: diff --git a/vendor/github.com/elastic/beats/filebeat/module/apache2/access/manifest.yml b/vendor/github.com/elastic/beats/filebeat/module/apache2/access/manifest.yml index 7639a4f3..2d75da4b 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/apache2/access/manifest.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/apache2/access/manifest.yml @@ -12,7 +12,7 @@ var: - "C:/Program Files/Apache Software Foundation/Apache2.*/logs/access.log*" ingest_pipeline: ingest/default.json -prospector: config/access.yml +input: config/access.yml requires.processors: - name: user_agent diff --git a/vendor/github.com/elastic/beats/filebeat/module/apache2/access/test/test.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/apache2/access/test/test.log-expected.json index dc2c9a88..426e3ec5 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/apache2/access/test/test.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/apache2/access/test/test.log-expected.json @@ -33,6 +33,9 @@ }, "prospector": { "type": "log" + }, + "input": { + "type": "log" } }, "fields": { @@ -89,6 +92,9 @@ }, "prospector": { "type": "log" + }, + "input": { + "type": "log" } }, "fields": { @@ -128,6 +134,9 @@ }, "prospector": { "type": "log" + }, + "input": { + "type": "log" } }, "fields": { @@ -177,6 +186,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "read_timestamp": "2017-05-29T19:34:14.378Z", "source": "/home/exekias/go/src/github.com/elastic/beats/filebeat/apache2.log" }, diff --git a/vendor/github.com/elastic/beats/filebeat/module/apache2/error/manifest.yml b/vendor/github.com/elastic/beats/filebeat/module/apache2/error/manifest.yml index 2de0969b..0d8abdc0 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/apache2/error/manifest.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/apache2/error/manifest.yml @@ -11,4 +11,4 @@ var: - "C:/Program Files/Apache Software Foundation/Apache2.*/logs/error.log*" ingest_pipeline: ingest/pipeline.json -prospector: config/error.yml +input: config/error.yml diff --git a/vendor/github.com/elastic/beats/filebeat/module/apache2/error/test/test.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/apache2/error/test/test.log-expected.json index d72695ad..e98c5ba4 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/apache2/error/test/test.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/apache2/error/test/test.log-expected.json @@ -23,6 +23,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "module/apache2/error/test/test.log", "fields": { "pipeline_id": "apache2-error-pipeline", @@ -56,6 +59,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "module/apache2/error/test/test.log", "fields": { "pipeline_id": "apache2-error-pipeline", @@ -86,6 +92,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "module/apache2/error/test/test.log", "fields": { "pipeline_id": "apache2-error-pipeline", diff --git a/vendor/github.com/elastic/beats/filebeat/module/auditd/_meta/config.reference.yml b/vendor/github.com/elastic/beats/filebeat/module/auditd/_meta/config.reference.yml index 5b0d02ba..57776242 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/auditd/_meta/config.reference.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/auditd/_meta/config.reference.yml @@ -6,6 +6,6 @@ # Filebeat will choose the paths depending on your OS. #var.paths: - # Prospector configuration (advanced). Any prospector configuration option + # Input configuration (advanced). Any input configuration option # can be added under this section. - #prospector: + #input: diff --git a/vendor/github.com/elastic/beats/filebeat/module/auditd/log/manifest.yml b/vendor/github.com/elastic/beats/filebeat/module/auditd/log/manifest.yml index 5c8bf0a9..99ff50e7 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/auditd/log/manifest.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/auditd/log/manifest.yml @@ -8,7 +8,7 @@ var: os.windows: [] ingest_pipeline: ingest/pipeline.json -prospector: config/log.yml +input: config/log.yml requires.processors: - name: geoip diff --git a/vendor/github.com/elastic/beats/filebeat/module/auditd/log/test/test.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/auditd/log/test/test.log-expected.json index 65590185..3aed3845 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/auditd/log/test/test.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/auditd/log/test/test.log-expected.json @@ -10,6 +10,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/Users/me/go/src/github.com/elastic/beats/filebeat/module/auditd/log/test/test.log", "fileset": { "module": "auditd", @@ -49,6 +52,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/Users/me/go/src/github.com/elastic/beats/filebeat/module/auditd/log/test/test.log", "fileset": { "module": "auditd", diff --git a/vendor/github.com/elastic/beats/filebeat/module/icinga/_meta/config.reference.yml b/vendor/github.com/elastic/beats/filebeat/module/icinga/_meta/config.reference.yml index ed6d8c69..bbddd5bd 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/icinga/_meta/config.reference.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/icinga/_meta/config.reference.yml @@ -7,9 +7,9 @@ # Filebeat will choose the paths depending on your OS. #var.paths: - # Prospector configuration (advanced). Any prospector configuration option + # Input configuration (advanced). Any input configuration option # can be added under this section. - #prospector: + #input: # Debug logs #debug: @@ -19,9 +19,9 @@ # Filebeat will choose the paths depending on your OS. #var.paths: - # Prospector configuration (advanced). Any prospector configuration option + # Input configuration (advanced). Any input configuration option # can be added under this section. - #prospector: + #input: # Startup logs #startup: @@ -31,6 +31,6 @@ # Filebeat will choose the paths depending on your OS. #var.paths: - # Prospector configuration (advanced). Any prospector configuration option + # Input configuration (advanced). Any input configuration option # can be added under this section. - #prospector: + #input: diff --git a/vendor/github.com/elastic/beats/filebeat/module/icinga/debug/manifest.yml b/vendor/github.com/elastic/beats/filebeat/module/icinga/debug/manifest.yml index 32b3826c..50881b6d 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/icinga/debug/manifest.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/icinga/debug/manifest.yml @@ -10,4 +10,4 @@ var: - c:/programdata/icinga2/var/log/icinga2/debug.log* ingest_pipeline: ingest/pipeline.json -prospector: config/debug.yml +input: config/debug.yml diff --git a/vendor/github.com/elastic/beats/filebeat/module/icinga/debug/test/test.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/icinga/debug/test/test.log-expected.json index e2aee136..20e628b9 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/icinga/debug/test/test.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/icinga/debug/test/test.log-expected.json @@ -15,6 +15,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "icinga": { "debug": { "severity": "debug", @@ -53,6 +56,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "icinga": { "debug": { "severity": "debug", @@ -91,6 +97,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "icinga": { "debug": { "severity": "notice", diff --git a/vendor/github.com/elastic/beats/filebeat/module/icinga/main/manifest.yml b/vendor/github.com/elastic/beats/filebeat/module/icinga/main/manifest.yml index 1ce31416..5ab760df 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/icinga/main/manifest.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/icinga/main/manifest.yml @@ -10,4 +10,4 @@ var: - c:/programdata/icinga2/var/log/icinga2/icinga2.log* ingest_pipeline: ingest/pipeline.json -prospector: config/main.yml +input: config/main.yml diff --git a/vendor/github.com/elastic/beats/filebeat/module/icinga/main/test/test.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/icinga/main/test/test.log-expected.json index 9b30dad5..e1019e29 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/icinga/main/test/test.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/icinga/main/test/test.log-expected.json @@ -15,6 +15,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "icinga": { "main": { "severity": "warning", @@ -53,6 +56,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "icinga": { "main": { "severity": "information", @@ -91,6 +97,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "icinga": { "main": { "severity": "information", diff --git a/vendor/github.com/elastic/beats/filebeat/module/icinga/startup/manifest.yml b/vendor/github.com/elastic/beats/filebeat/module/icinga/startup/manifest.yml index 2872ff87..b749ff46 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/icinga/startup/manifest.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/icinga/startup/manifest.yml @@ -10,4 +10,4 @@ var: - c:/programdata/icinga2/var/log/icinga2/startup.log ingest_pipeline: ingest/pipeline.json -prospector: config/startup.yml +input: config/startup.yml diff --git a/vendor/github.com/elastic/beats/filebeat/module/icinga/startup/test/test.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/icinga/startup/test/test.log-expected.json index b3830b74..68450a09 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/icinga/startup/test/test.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/icinga/startup/test/test.log-expected.json @@ -15,6 +15,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "icinga": { "startup": { "severity": "information", @@ -53,6 +56,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "icinga": { "startup": { "severity": "information", diff --git a/vendor/github.com/elastic/beats/filebeat/module/iis/_meta/config.reference.yml b/vendor/github.com/elastic/beats/filebeat/module/iis/_meta/config.reference.yml new file mode 100644 index 00000000..aebe3e38 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/iis/_meta/config.reference.yml @@ -0,0 +1,24 @@ +#- module: iis + # Access logs + #access: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + + # Error logs + #error: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: diff --git a/vendor/github.com/elastic/beats/filebeat/module/iis/_meta/config.yml b/vendor/github.com/elastic/beats/filebeat/module/iis/_meta/config.yml new file mode 100644 index 00000000..0ed84f14 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/iis/_meta/config.yml @@ -0,0 +1,17 @@ +- module: iis + # Access logs + access: + enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Error logs + error: + enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/filebeat/module/iis/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/filebeat/module/iis/_meta/docs.asciidoc new file mode 100644 index 00000000..bd3efcbc --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/iis/_meta/docs.asciidoc @@ -0,0 +1,66 @@ +:modulename: iis + +== IIS module + +The +{modulename}+ module parses access and error logs created by the +Internet Information Services (IIS) HTTP server. + +include::../include/what-happens.asciidoc[] + +[float] +=== Compatibility + +This module requires the +{elasticsearch-plugins}/ingest-user-agent.html[ingest-user-agent] and +{elasticsearch-plugins}/ingest-geoip.html[ingest-geoip] Elasticsearch plugins. + +The IIS module was tested with logs from version 10. + +include::../include/running-modules.asciidoc[] + +[float] +=== Example dashboard + +This module comes with a sample dashboard. For example: + +[role="screenshot"] +image::./images/kibana-iis.png[] + +include::../include/configuring-intro.asciidoc[] + +The following example shows how to set paths in the +modules.d/{modulename}.yml+ +file to override the default paths for IIS access logs and error logs: + +["source","yaml",subs="attributes"] +----- +- module: iis + access: + enabled: true + var.paths: ["C:/inetpub/logs/LogFiles/*/*.log"] + error: + enabled: true + var.paths: ["C:/Windows/System32/LogFiles/HTTPERR/*.log"] +----- + +To specify the same settings at the command line, you use: + +["source","sh",subs="attributes"] +----- +./{beatname_lc} --modules {modulename} -M "iis.access.var.paths=[C:/inetpub/logs/LogFiles/*/*.log]" -M "iis.error.var.paths=[C:/Windows/System32/LogFiles/HTTPERR/*.log]" +----- + + +//set the fileset name used in the included example +:fileset_ex: access + +include::../include/config-option-intro.asciidoc[] + +[float] +==== `access` log fileset settings + +include::../include/var-paths.asciidoc[] + +[float] +==== `error` log fileset settings + +include::../include/var-paths.asciidoc[] diff --git a/vendor/github.com/elastic/beats/filebeat/module/iis/_meta/fields.yml b/vendor/github.com/elastic/beats/filebeat/module/iis/_meta/fields.yml new file mode 100644 index 00000000..686d2927 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/iis/_meta/fields.yml @@ -0,0 +1,10 @@ +- key: iis + title: "IIS" + description: > + Module for parsing IIS log files. + fields: + - name: iis + type: group + description: > + Fields from IIS log files. + fields: diff --git a/vendor/github.com/elastic/beats/filebeat/module/iis/_meta/kibana/6/dashboard/Filebeat-iis.json b/vendor/github.com/elastic/beats/filebeat/module/iis/_meta/kibana/6/dashboard/Filebeat-iis.json new file mode 100644 index 00000000..9ca48111 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/iis/_meta/kibana/6/dashboard/Filebeat-iis.json @@ -0,0 +1,120 @@ +{ + "version": "6.1.2", + "objects": [ + { + "id": "eb2db5b0-fe11-11e7-a3b0-d13028918f9f", + "type": "visualization", + "updated_at": "2018-01-20T18:44:17.162Z", + "version": 1, + "attributes": { + "title": "Access map [Filebeat IIS]", + "visState": "{\"title\":\"Access map [Filebeat IIS]\",\"type\":\"tile_map\",\"params\":{\"mapType\":\"Scaled Circle Markers\",\"isDesaturated\":true,\"addTooltip\":true,\"heatClusterSize\":1.5,\"legendPosition\":\"bottomright\",\"mapZoom\":2,\"mapCenter\":[0,0],\"wms\":{\"enabled\":false,\"options\":{\"format\":\"image/png\",\"transparent\":true}}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"geohash_grid\",\"schema\":\"segment\",\"params\":{\"field\":\"iis.access.geoip.location\",\"autoPrecision\":true,\"isFilteredByCollar\":true,\"useGeocentroid\":true,\"precision\":2}}]}", + "uiStateJSON": "{}", + "description": "", + "version": 1, + "kibanaSavedObjectMeta": { + "searchSourceJSON": "{\"index\":\"filebeat-*\",\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"}}" + } + } + }, + { + "id": "f31414b0-fe14-11e7-a3b0-d13028918f9f", + "type": "visualization", + "updated_at": "2018-01-20T19:05:58.905Z", + "version": 1, + "attributes": { + "title": "Response codes over time [Filebeat IIS]", + "visState": "{\"title\":\"Response codes over time [Filebeat IIS]\",\"type\":\"histogram\",\"params\":{\"type\":\"histogram\",\"grid\":{\"categoryLines\":false,\"style\":{\"color\":\"#eee\"}},\"categoryAxes\":[{\"id\":\"CategoryAxis-1\",\"type\":\"category\",\"position\":\"bottom\",\"show\":true,\"style\":{},\"scale\":{\"type\":\"linear\"},\"labels\":{\"show\":true,\"truncate\":100},\"title\":{}}],\"valueAxes\":[{\"id\":\"ValueAxis-1\",\"name\":\"LeftAxis-1\",\"type\":\"value\",\"position\":\"left\",\"show\":true,\"style\":{},\"scale\":{\"type\":\"linear\",\"mode\":\"normal\"},\"labels\":{\"show\":true,\"rotate\":0,\"filter\":false,\"truncate\":100},\"title\":{\"text\":\"Count\"}}],\"seriesParams\":[{\"show\":\"true\",\"type\":\"histogram\",\"mode\":\"stacked\",\"data\":{\"label\":\"Count\",\"id\":\"1\"},\"valueAxis\":\"ValueAxis-1\",\"drawLinesBetweenPoints\":true,\"showCircles\":true}],\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"times\":[],\"addTimeMarker\":false},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"date_histogram\",\"schema\":\"segment\",\"params\":{\"field\":\"@timestamp\",\"interval\":\"auto\",\"customInterval\":\"2h\",\"min_doc_count\":1,\"extended_bounds\":{}}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"group\",\"params\":{\"field\":\"iis.access.response_code\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}]}", + "uiStateJSON": "{}", + "description": "", + "version": 1, + "kibanaSavedObjectMeta": { + "searchSourceJSON": "{\"index\":\"filebeat-*\",\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"}}" + } + } + }, + { + "id": "63129c80-fe12-11e7-a3b0-d13028918f9f", + "type": "visualization", + "updated_at": "2018-01-20T18:47:38.312Z", + "version": 1, + "attributes": { + "title": "Broswers breakdown [Filebeat IIS]", + "visState": "{\"title\":\"Broswers breakdown [Filebeat IIS]\",\"type\":\"pie\",\"params\":{\"type\":\"pie\",\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"isDonut\":true,\"labels\":{\"show\":false,\"values\":true,\"last_level\":true,\"truncate\":100}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"iis.access.user_agent.name\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"iis.access.user_agent.major\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}]}", + "uiStateJSON": "{}", + "description": "", + "version": 1, + "kibanaSavedObjectMeta": { + "searchSourceJSON": "{\"index\":\"filebeat-*\",\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"}}" + } + } + }, + { + "id": "ccd3f9c0-fe12-11e7-a3b0-d13028918f9f", + "type": "visualization", + "updated_at": "2018-01-20T18:51:54.619Z", + "version": 2, + "attributes": { + "title": "Operating systems breakdown [Filebeat IIS]", + "visState": "{\"title\":\"Operating systems breakdown [Filebeat IIS]\",\"type\":\"pie\",\"params\":{\"type\":\"pie\",\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"isDonut\":true,\"labels\":{\"show\":false,\"values\":true,\"last_level\":true,\"truncate\":100}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"iis.access.user_agent.os_name\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"iis.access.user_agent.os_major\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}]}", + "uiStateJSON": "{}", + "description": "", + "version": 1, + "kibanaSavedObjectMeta": { + "searchSourceJSON": "{\"index\":\"filebeat-*\",\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"}}" + } + } + }, + { + "id": "41f38230-fe17-11e7-a3b0-d13028918f9f", + "type": "visualization", + "updated_at": "2018-01-20T19:22:30.227Z", + "version": 1, + "attributes": { + "title": "Error logs over time [Filebeat IIS]", + "visState": "{\"title\":\"Error logs over time [Filebeat IIS]\",\"type\":\"histogram\",\"params\":{\"type\":\"histogram\",\"grid\":{\"categoryLines\":false,\"style\":{\"color\":\"#eee\"}},\"categoryAxes\":[{\"id\":\"CategoryAxis-1\",\"type\":\"category\",\"position\":\"bottom\",\"show\":true,\"style\":{},\"scale\":{\"type\":\"linear\"},\"labels\":{\"show\":true,\"truncate\":100},\"title\":{}}],\"valueAxes\":[{\"id\":\"ValueAxis-1\",\"name\":\"LeftAxis-1\",\"type\":\"value\",\"position\":\"left\",\"show\":true,\"style\":{},\"scale\":{\"type\":\"linear\",\"mode\":\"normal\"},\"labels\":{\"show\":true,\"rotate\":0,\"filter\":false,\"truncate\":100},\"title\":{\"text\":\"Count\"}}],\"seriesParams\":[{\"show\":\"true\",\"type\":\"histogram\",\"mode\":\"stacked\",\"data\":{\"label\":\"Count\",\"id\":\"1\"},\"valueAxis\":\"ValueAxis-1\",\"drawLinesBetweenPoints\":true,\"showCircles\":true}],\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"times\":[],\"addTimeMarker\":false},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"date_histogram\",\"schema\":\"segment\",\"params\":{\"field\":\"@timestamp\",\"interval\":\"auto\",\"customInterval\":\"2h\",\"min_doc_count\":1,\"extended_bounds\":{}}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"group\",\"params\":{\"field\":\"iis.error.response_code\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}]}", + "uiStateJSON": "{}", + "description": "", + "version": 1, + "kibanaSavedObjectMeta": { + "searchSourceJSON": "{\"index\":\"filebeat-*\",\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"}}" + } + } + }, + { + "id": "c0d02cd0-fe1b-11e7-a3b0-d13028918f9f", + "type": "visualization", + "updated_at": "2018-01-20T19:58:24.005Z", + "version": 2, + "attributes": { + "title": "Top URLs by response code [Filebeat IIS]", + "visState": "{\"title\":\"Top URLs by response code [Filebeat IIS]\",\"type\":\"pie\",\"params\":{\"type\":\"pie\",\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"isDonut\":false,\"labels\":{\"show\":false,\"values\":true,\"last_level\":true,\"truncate\":100}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"split\",\"params\":{\"field\":\"iis.access.url\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"URL\",\"row\":false}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"iis.access.response_code\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}]}", + "uiStateJSON": "{}", + "description": "", + "version": 1, + "kibanaSavedObjectMeta": { + "searchSourceJSON": "{\"index\":\"filebeat-*\",\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"}}" + } + } + }, + { + "id": "4278ad30-fe16-11e7-a3b0-d13028918f9f", + "type": "dashboard", + "updated_at": "2018-01-20T19:57:50.287Z", + "version": 4, + "attributes": { + "title": "[Filebeat IIS] Access and error logs", + "hits": 0, + "description": "Dashboard for the Filebeat IIS module", + "panelsJSON": "[{\"gridData\":{\"h\":3,\"i\":\"1\",\"w\":12,\"x\":0,\"y\":0},\"id\":\"eb2db5b0-fe11-11e7-a3b0-d13028918f9f\",\"panelIndex\":\"1\",\"type\":\"visualization\",\"version\":\"6.1.2\"},{\"gridData\":{\"h\":3,\"i\":\"2\",\"w\":7,\"x\":0,\"y\":3},\"id\":\"f31414b0-fe14-11e7-a3b0-d13028918f9f\",\"panelIndex\":\"2\",\"type\":\"visualization\",\"version\":\"6.1.2\"},{\"gridData\":{\"h\":3,\"i\":\"4\",\"w\":6,\"x\":0,\"y\":9},\"id\":\"63129c80-fe12-11e7-a3b0-d13028918f9f\",\"panelIndex\":\"4\",\"type\":\"visualization\",\"version\":\"6.1.2\"},{\"gridData\":{\"h\":3,\"i\":\"5\",\"w\":6,\"x\":6,\"y\":9},\"id\":\"ccd3f9c0-fe12-11e7-a3b0-d13028918f9f\",\"panelIndex\":\"5\",\"type\":\"visualization\",\"version\":\"6.1.2\"},{\"gridData\":{\"h\":3,\"i\":\"6\",\"w\":5,\"x\":7,\"y\":3},\"id\":\"41f38230-fe17-11e7-a3b0-d13028918f9f\",\"panelIndex\":\"6\",\"type\":\"visualization\",\"version\":\"6.1.2\"},{\"gridData\":{\"h\":3,\"i\":\"7\",\"w\":12,\"x\":0,\"y\":6},\"id\":\"c0d02cd0-fe1b-11e7-a3b0-d13028918f9f\",\"panelIndex\":\"7\",\"type\":\"visualization\",\"version\":\"6.1.2\"}]", + "optionsJSON": "{\"darkTheme\":false,\"hidePanelTitles\":false,\"useMargins\":true}", + "uiStateJSON": "{}", + "version": 1, + "timeRestore": false, + "kibanaSavedObjectMeta": { + "searchSourceJSON": "{\"query\":{\"language\":\"lucene\",\"query\":\"\"},\"filter\":[],\"highlightAll\":true,\"version\":true}" + } + } + } + ] +} \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/filebeat/module/iis/access/_meta/fields.yml b/vendor/github.com/elastic/beats/filebeat/module/iis/access/_meta/fields.yml new file mode 100644 index 00000000..c38779f1 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/iis/access/_meta/fields.yml @@ -0,0 +1,159 @@ +- name: access + type: group + description: > + Contains fields for IIS access logs. + fields: + - name: server_ip + type: keyword + description: > + The server IP address. + - name: method + type: keyword + example: GET + description: > + The request HTTP method. + - name: url + type: keyword + description: > + The request HTTP URL. + - name: query_string + type: keyword + description: > + The request query string, if any. + - name: port + type: long + description: > + The request port number. + - name: user_name + type: keyword + description: > + The user name used when basic authentication is used. + - name: remote_ip + type: keyword + description: > + The client IP address. + - name: referrer + type: keyword + description: > + The HTTP referrer. + - name: response_code + type: long + description: > + The HTTP response code. + - name: sub_status + type: long + description: > + The HTTP substatus code. + - name: win32_status + type: long + description: > + The Windows status code. + - name: request_time_ms + type: long + description: > + The request time in milliseconds. + - name: site_name + type: keyword + description: > + The site name and instance number. + - name: server_name + type: keyword + description: > + The name of the server on which the log file entry was generated. + - name: http_version + type: keyword + description: > + The HTTP version. + - name: cookie + type: keyword + description: > + The content of the cookie sent or received, if any. + - name: hostname + type: keyword + description: > + The host header name, if any. + - name: body_sent.bytes + type: long + format: bytes + description: > + The number of bytes of the server response body. + - name: body_received.bytes + type: long + format: bytes + description: > + The number of bytes of the server request body. + - name: agent + type: text + description: > + Contains the un-parsed user agent string. Only present if the user + agent Elasticsearch plugin is not available or not used. + - name: user_agent + type: group + description: > + Contains the parsed user agent field. Only present if the user + agent Elasticsearch plugin is available and used. + fields: + - name: device + type: keyword + description: > + The name of the physical device. + - name: major + type: long + description: > + The major version of the user agent. + - name: minor + type: long + description: > + The minor version of the user agent. + - name: patch + type: keyword + description: > + The patch version of the user agent. + - name: name + type: keyword + example: Chrome + description: > + The name of the user agent. + - name: os + type: keyword + description: > + The name of the operating system. + - name: os_major + type: long + description: > + The major version of the operating system. + - name: os_minor + type: long + description: > + The minor version of the operating system. + - name: os_name + type: keyword + description: > + The name of the operating system. + - name: geoip + type: group + description: > + Contains GeoIP information gathered based on the remote_ip field. + Only present if the GeoIP Elasticsearch plugin is available and + used. + fields: + - name: continent_name + type: keyword + description: > + The name of the continent. + - name: country_iso_code + type: keyword + description: > + Country ISO code. + - name: location + type: geo_point + description: > + The longitude and latitude. + - name: region_name + type: keyword + description: > + The region name. + - name: city_name + type: keyword + description: > + The city name. diff --git a/vendor/github.com/elastic/beats/filebeat/module/iis/access/config/iis-access.yml b/vendor/github.com/elastic/beats/filebeat/module/iis/access/config/iis-access.yml new file mode 100644 index 00000000..92f8c3b9 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/iis/access/config/iis-access.yml @@ -0,0 +1,7 @@ +type: log +paths: +{{ range $i, $path := .paths }} + - {{$path}} +{{ end }} +exclude_files: [".gz$"] +exclude_lines: ["^#"] diff --git a/vendor/github.com/elastic/beats/filebeat/module/iis/access/ingest/default.json b/vendor/github.com/elastic/beats/filebeat/module/iis/access/ingest/default.json new file mode 100644 index 00000000..6f0cfd4a --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/iis/access/ingest/default.json @@ -0,0 +1,53 @@ +{ + "description": "Pipeline for parsing IIS access logs. Requires the geoip and user_agent plugins.", + "processors": [{ + "grok": { + "field": "message", + "patterns":[ + "%{TIMESTAMP_ISO8601:iis.access.time} %{IPORHOST:iis.access.server_ip} %{WORD:iis.access.method} %{URIPATH:iis.access.url} %{NOTSPACE:iis.access.query_string} %{NUMBER:iis.access.port} %{NOTSPACE:iis.access.user_name} %{IPORHOST:iis.access.remote_ip} %{NOTSPACE:iis.access.agent} %{NOTSPACE:iis.access.referrer} %{NUMBER:iis.access.response_code} %{NUMBER:iis.access.sub_status} %{NUMBER:iis.access.win32_status} %{NUMBER:iis.access.request_time_ms}", + "%{TIMESTAMP_ISO8601:iis.access.time} %{NOTSPACE:iis.access.site_name} %{WORD:iis.access.method} %{URIPATH:iis.access.url} %{NOTSPACE:iis.access.query_string} %{NUMBER:iis.access.port} %{NOTSPACE:iis.access.user_name} %{IPORHOST:iis.access.remote_ip} %{NOTSPACE:iis.access.agent} %{NOTSPACE:iis.access.cookie} %{NOTSPACE:iis.access.referrer} %{NOTSPACE:iis.access.hostname} %{NUMBER:iis.access.response_code} %{NUMBER:iis.access.sub_status} %{NUMBER:iis.access.win32_status} %{NUMBER:iis.access.body_sent.bytes} %{NUMBER:iis.access.body_received.bytes} %{NUMBER:iis.access.request_time_ms}", + "%{TIMESTAMP_ISO8601:iis.access.time} %{NOTSPACE:iis.access.site_name} %{NOTSPACE:iis.access.server_name} %{IPORHOST:iis.access.server_ip} %{WORD:iis.access.method} %{URIPATH:iis.access.url} %{NOTSPACE:iis.access.query_string} %{NUMBER:iis.access.port} %{NOTSPACE:iis.access.user_name} %{IPORHOST:iis.access.remote_ip} HTTP/%{NUMBER:iis.access.http_version} %{NOTSPACE:iis.access.agent} %{NOTSPACE:iis.access.cookie} %{NOTSPACE:iis.access.referrer} %{NOTSPACE:iis.access.hostname} %{NUMBER:iis.access.response_code} %{NUMBER:iis.access.sub_status} %{NUMBER:iis.access.win32_status} %{NUMBER:iis.access.body_sent.bytes} %{NUMBER:iis.access.body_received.bytes} %{NUMBER:iis.access.request_time_ms}" + ], + "ignore_missing": true + } + }, { + "remove":{ + "field": "message" + } + }, { + "rename": { + "field": "@timestamp", + "target_field": "read_timestamp" + } + }, { + "date": { + "field": "iis.access.time", + "target_field": "@timestamp", + "formats": ["yyyy-MM-dd HH:mm:ss"] + } + }, { + "remove": { + "field": "iis.access.time" + } + }, { + "user_agent": { + "field": "iis.access.agent", + "target_field": "iis.access.user_agent" + } + }, { + "remove": { + "field": "iis.access.agent" + } + }, { + "geoip": { + "field": "iis.access.remote_ip", + "target_field": "iis.access.geoip" + } + }], + "on_failure" : [{ + "set" : { + "field" : "error.message", + "value" : "{{ _ingest.on_failure_message }}" + } + }] +} diff --git a/vendor/github.com/elastic/beats/filebeat/module/iis/access/manifest.yml b/vendor/github.com/elastic/beats/filebeat/module/iis/access/manifest.yml new file mode 100644 index 00000000..097afa5e --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/iis/access/manifest.yml @@ -0,0 +1,19 @@ +module_version: 1.0 + +var: + - name: paths + default: + - C:/inetpub/logs/LogFiles/*/*.log + os.darwin: [""] + os.linux: [""] + os.windows: + - C:/inetpub/logs/LogFiles/*/*.log + +ingest_pipeline: ingest/default.json +input: config/iis-access.yml + +requires.processors: +- name: user_agent + plugin: ingest-user-agent +- name: geoip + plugin: ingest-geoip diff --git a/vendor/github.com/elastic/beats/filebeat/module/iis/access/test/test.log b/vendor/github.com/elastic/beats/filebeat/module/iis/access/test/test.log new file mode 100644 index 00000000..a0c21915 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/iis/access/test/test.log @@ -0,0 +1,15 @@ +#Software: Microsoft Internet Information Services 10.0 +#Version: 1.0 +#Date: 2018-01-01 08:09:10 +#Fields: date time s-ip cs-method cs-uri-stem cs-uri-query s-port cs-username c-ip cs(User-Agent) cs(Referer) sc-status sc-substatus sc-win32-status time-taken +2018-01-01 08:09:10 127.0.0.1 GET / q=100 80 - 85.181.35.98 Mozilla/5.0+(Windows+NT+6.1;+Win64;+x64;+rv:57.0)+Gecko/20100101+Firefox/57.0 - 200 0 0 123 +#Software: Microsoft Internet Information Services 10.0 +#Version: 1.0 +#Date: 2018-01-01 09:10:11 +#Fields: date time s-sitename cs-method cs-uri-stem cs-uri-query s-port cs-username c-ip cs(User-Agent) cs(Cookie) cs(Referer) cs-host sc-status sc-substatus sc-win32-status sc-bytes cs-bytes time-taken +2018-01-01 09:10:11 W3SVC1 GET / - 80 - 127.0.0.1 Mozilla/5.0+(Windows+NT+6.1;+Win64;+x64;+rv:57.0)+Gecko/20100101+Firefox/57.0 - - example.com 200 0 0 123 456 789 +#Software: Microsoft Internet Information Services 10.0 +#Version: 1.0 +#Date: 2018-01-01 10:11:12 +#Fields: date time s-sitename s-computername s-ip cs-method cs-uri-stem cs-uri-query s-port cs-username c-ip cs-version cs(User-Agent) cs(Cookie) cs(Referer) cs-host sc-status sc-substatus sc-win32-status sc-bytes cs-bytes time-taken +2018-01-01 10:11:12 W3SVC1 MACHINE-NAME 127.0.0.1 GET / - 80 - 85.181.35.98 HTTP/1.1 Mozilla/5.0+(Windows+NT+6.1;+Win64;+x64;+rv:57.0)+Gecko/20100101+Firefox/57.0 - - example.com 200 0 0 123 456 789 diff --git a/vendor/github.com/elastic/beats/filebeat/module/iis/access/test/test.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/iis/access/test/test.log-expected.json new file mode 100644 index 00000000..70a6d065 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/iis/access/test/test.log-expected.json @@ -0,0 +1,197 @@ +[ + { + "_index": "filebeat-6.1.3-alpha1-2018.01.20", + "_type": "doc", + "_id": "dL-VFGEBcLJksy_1Pk15", + "_version": 1, + "_score": null, + "_source": { + "iis": { + "access": { + "response_code": "200", + "geoip": { + "continent_name": "Europe", + "city_name": "Berlin", + "location": { + "lat": 52.5167, + "lon": 13.4 + }, + "region_name": "Land Berlin", + "country_iso_code": "DE" + }, + "method": "GET", + "sub_status": "0", + "user_name": "-", + "request_time_ms": "123", + "url": "/", + "referrer": "-", + "remote_ip": "85.181.35.98", + "port": "80", + "server_ip": "127.0.0.1", + "win32_status": "0", + "query_string": "q=100", + "user_agent": { + "major": "57", + "minor": "0", + "os": "Windows", + "name": "Firefox", + "os_name": "Windows", + "device": "Other" + } + } + }, + "@timestamp": "2018-01-01T08:09:10.000Z", + "offset": 405, + "beat": { + "hostname": "MACHINE-NAME", + "name": "MACHINE-NAME", + "version": "6.1.3-alpha1" + }, + "prospector": { + "type": "log" + }, + "input": { + "type": "log" + }, + "read_timestamp": "2018-01-20T17:19:39.236Z", + "source": "/go/src/github.com/elastic/beats/filebeat/module/iis/access/test/test.log", + "fileset": { + "module": "iis", + "name": "access" + } + } + }, + { + "_index": "filebeat-6.1.3-alpha1-2018.01.20", + "_type": "doc", + "_id": "db-VFGEBcLJksy_1Pk15", + "_version": 1, + "_score": null, + "_source": { + "iis": { + "access": { + "response_code": "200", + "cookie": "-", + "method": "GET", + "sub_status": "0", + "user_name": "-", + "request_time_ms": "789", + "url": "/", + "site_name": "W3SVC1", + "referrer": "-", + "body_received": { + "bytes": "456" + }, + "hostname": "example.com", + "remote_ip": "127.0.0.1", + "port": "80", + "body_sent": { + "bytes": "123" + }, + "win32_status": "0", + "query_string": "-", + "user_agent": { + "major": "57", + "minor": "0", + "os": "Windows", + "name": "Firefox", + "os_name": "Windows", + "device": "Other" + } + } + }, + "@timestamp": "2018-01-01T09:10:11.000Z", + "offset": 869, + "beat": { + "hostname": "MACHINE-NAME", + "name": "MACHINE-NAME", + "version": "6.1.3-alpha1" + }, + "prospector": { + "type": "log" + }, + "input": { + "type": "log" + }, + "read_timestamp": "2018-01-20T17:19:39.237Z", + "source": "/go/src/github.com/elastic/beats/filebeat/module/iis/access/test/test.log", + "fileset": { + "module": "iis", + "name": "access" + } + } + }, + { + "_index": "filebeat-6.1.3-alpha1-2018.01.20", + "_type": "doc", + "_id": "dr-VFGEBcLJksy_1Pk15", + "_version": 1, + "_score": null, + "_source": { + "iis": { + "access": { + "server_name": "MACHINE-NAME", + "response_code": "200", + "geoip": { + "continent_name": "Europe", + "city_name": "Berlin", + "location": { + "lat": 52.5167, + "lon": 13.4 + }, + "region_name": "Land Berlin", + "country_iso_code": "DE" + }, + "cookie": "-", + "method": "GET", + "sub_status": "0", + "user_name": "-", + "request_time_ms": "789", + "http_version": "1.1", + "url": "/", + "site_name": "W3SVC1", + "referrer": "-", + "body_received": { + "bytes": "456" + }, + "hostname": "example.com", + "remote_ip": "85.181.35.98", + "port": "80", + "server_ip": "127.0.0.1", + "body_sent": { + "bytes": "123" + }, + "win32_status": "0", + "query_string": "-", + "user_agent": { + "major": "57", + "minor": "0", + "os": "Windows", + "name": "Firefox", + "os_name": "Windows", + "device": "Other" + } + } + }, + "@timestamp": "2018-01-01T10:11:12.000Z", + "offset": 1399, + "beat": { + "hostname": "MACHINE-NAME", + "name": "MACHINE-NAME", + "version": "6.1.3-alpha1" + }, + "prospector": { + "type": "log" + }, + "input": { + "type": "log" + }, + "read_timestamp": "2018-01-20T17:19:39.237Z", + "source": "/go/src/github.com/elastic/beats/filebeat/module/iis/access/test/test.log", + "fileset": { + "module": "iis", + "name": "access" + } + } + } +] diff --git a/vendor/github.com/elastic/beats/filebeat/module/iis/error/_meta/fields.yml b/vendor/github.com/elastic/beats/filebeat/module/iis/error/_meta/fields.yml new file mode 100644 index 00000000..ff48d740 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/iis/error/_meta/fields.yml @@ -0,0 +1,73 @@ +- name: error + type: group + description: > + Contains fields for IIS error logs. + fields: + - name: remote_ip + type: keyword + description: > + The client IP address. + - name: remote_port + type: long + description: > + The client port number. + - name: server_ip + type: keyword + description: > + The server IP address. + - name: server_port + type: long + description: > + The server port number. + - name: http_version + type: keyword + description: > + The HTTP version. + - name: method + type: keyword + example: GET + description: > + The request HTTP method. + - name: url + type: keyword + description: > + The request HTTP URL. + - name: response_code + type: long + description: > + The HTTP response code. + - name: reason_phrase + type: keyword + description: > + The HTTP reason phrase. + - name: queue_name + type: keyword + description: > + The IIS application pool name. + - name: geoip + type: group + description: > + Contains GeoIP information gathered based on the remote_ip field. + Only present if the GeoIP Elasticsearch plugin is available and + used. + fields: + - name: continent_name + type: keyword + description: > + The name of the continent. + - name: country_iso_code + type: keyword + description: > + Country ISO code. + - name: location + type: geo_point + description: > + The longitude and latitude. + - name: region_name + type: keyword + description: > + The region name. + - name: city_name + type: keyword + description: > + The city name. diff --git a/vendor/github.com/elastic/beats/filebeat/module/iis/error/config/iis-error.yml b/vendor/github.com/elastic/beats/filebeat/module/iis/error/config/iis-error.yml new file mode 100644 index 00000000..92f8c3b9 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/iis/error/config/iis-error.yml @@ -0,0 +1,7 @@ +type: log +paths: +{{ range $i, $path := .paths }} + - {{$path}} +{{ end }} +exclude_files: [".gz$"] +exclude_lines: ["^#"] diff --git a/vendor/github.com/elastic/beats/filebeat/module/iis/error/ingest/default.json b/vendor/github.com/elastic/beats/filebeat/module/iis/error/ingest/default.json new file mode 100644 index 00000000..632e31d7 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/iis/error/ingest/default.json @@ -0,0 +1,42 @@ +{ + "description": "Pipeline for parsing IIS error logs. Requires the geoip plugin.", + "processors": [{ + "grok": { + "field": "message", + "patterns":[ + "%{TIMESTAMP_ISO8601:iis.error.time} %{IPORHOST:iis.error.remote_ip} %{NUMBER:iis.error.remote_port} %{IPORHOST:iis.error.server_ip} %{IPORHOST:iis.error.server_port} (?:HTTP/%{NUMBER:iis.error.http_version}|-) (?:%{WORD:iis.error.method}|-) (?:%{URIPATHPARAM:iis.error.url}|-)(?: -)? (?:%{NUMBER:iis.error.response_code}|-) (?:%{NUMBER}|-) (?:%{NOTSPACE:iis.error.reason_phrase}|-) (?:%{NOTSPACE:iis.error.queue_name}|-)" + ], + "ignore_missing": true + } + }, { + "remove":{ + "field": "message" + } + }, { + "rename": { + "field": "@timestamp", + "target_field": "read_timestamp" + } + }, { + "date": { + "field": "iis.error.time", + "target_field": "@timestamp", + "formats": ["yyyy-MM-dd HH:mm:ss"] + } + }, { + "remove": { + "field": "iis.error.time" + } + }, { + "geoip": { + "field": "iis.error.remote_ip", + "target_field": "iis.error.geoip" + } + }], + "on_failure" : [{ + "set" : { + "field" : "error.message", + "value" : "{{ _ingest.on_failure_message }}" + } + }] +} diff --git a/vendor/github.com/elastic/beats/filebeat/module/iis/error/manifest.yml b/vendor/github.com/elastic/beats/filebeat/module/iis/error/manifest.yml new file mode 100644 index 00000000..577742f6 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/iis/error/manifest.yml @@ -0,0 +1,17 @@ +module_version: 1.0 + +var: + - name: paths + default: + - c:/Windows/System32/LogFiles/HTTPERR/*.log + os.darwin: [""] + os.linux: [""] + os.windows: + - c:/Windows/System32/LogFiles/HTTPERR/*.log + +ingest_pipeline: ingest/default.json +input: config/iis-error.yml + +requires.processors: +- name: geoip + plugin: ingest-geoip diff --git a/vendor/github.com/elastic/beats/filebeat/module/iis/error/test/test.log b/vendor/github.com/elastic/beats/filebeat/module/iis/error/test/test.log new file mode 100644 index 00000000..f50daaa0 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/iis/error/test/test.log @@ -0,0 +1,8 @@ +#Software: Microsoft HTTP API 2.0 +#Version: 1.0 +#Date: 2018-01-01 08:09:10 +#Fields: date time c-ip c-port s-ip s-port cs-version cs-method cs-uri sc-status s-siteid s-reason s-queuename +2018-01-01 08:09:10 172.31.77.6 2094 172.31.77.6 80 HTTP/1.1 GET /qos/1kbfile.txt 503 - ConnLimit - +2018-01-01 09:10:11 85.181.35.98 2780 127.0.0.1 80 HTTP/1.1 GET /ThisIsMyUrl.htm 400 - Hostname - +2018-01-01 10:11:12 85.181.35.98 2894 127.0.0.1 80 HTTP/2.0 GET / 505 - Version_N/S - +2018-01-01 11:12:13 85.181.35.98 64388 127.0.0.1 80 - - - - - Timer_MinBytesPerSecond - diff --git a/vendor/github.com/elastic/beats/filebeat/module/iis/error/test/test.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/iis/error/test/test.log-expected.json new file mode 100644 index 00000000..5974b636 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/iis/error/test/test.log-expected.json @@ -0,0 +1,195 @@ +[ + { + "_index": "filebeat-6.1.3-alpha1-2018.01.20", + "_type": "doc", + "_id": "cL-VFGEBcLJksy_1Pk15", + "_score": 1, + "_source": { + "iis": { + "error": { + "queue_name": "-", + "response_code": "503", + "remote_ip": "172.31.77.6", + "method": "GET", + "server_ip": "172.31.77.6", + "remote_port": "2094", + "http_version": "1.1", + "server_port": "80", + "reason_phrase": "ConnLimit", + "url": "/qos/1kbfile.txt" + } + }, + "@timestamp": "2018-01-01T08:09:10.000Z", + "offset": 291, + "beat": { + "hostname": "MACHINE-NAME", + "name": "MACHINE-NAME", + "version": "6.1.3-alpha1" + }, + "prospector": { + "type": "log" + }, + "input": { + "type": "log" + }, + "read_timestamp": "2018-01-20T17:19:39.229Z", + "source": "/go/src/github.com/elastic/beats/filebeat/module/iis/error/test/test.log", + "fileset": { + "module": "iis", + "name": "error" + } + } + }, + { + "_index": "filebeat-6.1.3-alpha1-2018.01.20", + "_type": "doc", + "_id": "cb-VFGEBcLJksy_1Pk15", + "_version": 1, + "_score": null, + "_source": { + "iis": { + "error": { + "queue_name": "-", + "response_code": "400", + "remote_ip": "85.181.35.98", + "geoip": { + "continent_name": "Europe", + "city_name": "Berlin", + "location": { + "lat": 52.5167, + "lon": 13.4 + }, + "region_name": "Land Berlin", + "country_iso_code": "DE" + }, + "method": "GET", + "server_ip": "127.0.0.1", + "remote_port": "2780", + "http_version": "1.1", + "server_port": "80", + "reason_phrase": "Hostname", + "url": "/ThisIsMyUrl.htm" + } + }, + "@timestamp": "2018-01-01T09:10:11.000Z", + "offset": 390, + "beat": { + "hostname": "MACHINE-NAME", + "name": "MACHINE-NAME", + "version": "6.1.3-alpha1" + }, + "prospector": { + "type": "log" + }, + "input": { + "type": "log" + }, + "read_timestamp": "2018-01-20T17:19:39.229Z", + "source": "/go/src/github.com/elastic/beats/filebeat/module/iis/error/test/test.log", + "fileset": { + "module": "iis", + "name": "error" + } + } + }, + { + "_index": "filebeat-6.1.3-alpha1-2018.01.20", + "_type": "doc", + "_id": "cr-VFGEBcLJksy_1Pk15", + "_version": 1, + "_score": null, + "_source": { + "iis": { + "error": { + "queue_name": "-", + "response_code": "505", + "remote_ip": "85.181.35.98", + "geoip": { + "continent_name": "Europe", + "city_name": "Berlin", + "location": { + "lat": 52.5167, + "lon": 13.4 + }, + "region_name": "Land Berlin", + "country_iso_code": "DE" + }, + "method": "GET", + "server_ip": "127.0.0.1", + "remote_port": "2894", + "http_version": "2.0", + "server_port": "80", + "reason_phrase": "Version_N/S", + "url": "/" + } + }, + "@timestamp": "2018-01-01T10:11:12.000Z", + "offset": 477, + "beat": { + "hostname": "MACHINE-NAME", + "name": "MACHINE-NAME", + "version": "6.1.3-alpha1" + }, + "prospector": { + "type": "log" + }, + "input": { + "type": "log" + }, + "read_timestamp": "2018-01-20T17:19:39.229Z", + "source": "/go/src/github.com/elastic/beats/filebeat/module/iis/error/test/test.log", + "fileset": { + "module": "iis", + "name": "error" + } + } + }, + { + "_index": "filebeat-6.1.3-alpha1-2018.01.20", + "_type": "doc", + "_id": "c7-VFGEBcLJksy_1Pk15", + "_version": 1, + "_score": null, + "_source": { + "iis": { + "error": { + "queue_name": "-", + "remote_ip": "85.181.35.98", + "geoip": { + "continent_name": "Europe", + "city_name": "Berlin", + "location": { + "lat": 52.5167, + "lon": 13.4 + }, + "region_name": "Land Berlin", + "country_iso_code": "DE" + }, + "server_ip": "127.0.0.1", + "remote_port": "64388", + "server_port": "80", + "reason_phrase": "Timer_MinBytesPerSecond" + } + }, + "@timestamp": "2018-01-01T11:12:13.000Z", + "offset": 566, + "beat": { + "hostname": "MACHINE-NAME", + "name": "MACHINE-NAME", + "version": "6.1.3-alpha1" + }, + "prospector": { + "type": "log" + }, + "input": { + "type": "log" + }, + "read_timestamp": "2018-01-20T17:19:39.229Z", + "source": "/go/src/github.com/elastic/beats/filebeat/module/iis/error/test/test.log", + "fileset": { + "module": "iis", + "name": "error" + } + } + } +] diff --git a/vendor/github.com/elastic/beats/filebeat/module/kafka/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/filebeat/module/kafka/_meta/docs.asciidoc index 9bbaa14c..0ef83790 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/kafka/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/module/kafka/_meta/docs.asciidoc @@ -10,7 +10,7 @@ include::../include/what-happens.asciidoc[] [float] === Compatibility -The +{modulename}+ module was tested with logs from versions 2.11. +The +{modulename}+ module was tested with logs from versions 0.9. include::../include/running-modules.asciidoc[] @@ -40,11 +40,6 @@ file to override the default paths for logs: ----- -// REVIEWERS: I must be doing something wrong with the config settings. The -// above config works, but when I try to specify var.kafka_home, it doesn't -// seem to have any effect. - - To specify the same settings at the command line, you use: ["source","sh",subs="attributes"] @@ -61,12 +56,4 @@ include::../include/config-option-intro.asciidoc[] [float] ==== `log` fileset settings -// REVIEWERS: I've added a description because this variable appears in the -// kafka.yml file. However, I don't understand how it works. - -//*`var.kafka_home`*:: - -//The home path for Kafka. If this variable is not set, {beatname_uc} looks under -//`/opt`. - include::../include/var-paths.asciidoc[] diff --git a/vendor/github.com/elastic/beats/filebeat/module/kafka/log/manifest.yml b/vendor/github.com/elastic/beats/filebeat/module/kafka/log/manifest.yml index fb7916d9..97ea201e 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/kafka/log/manifest.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/kafka/log/manifest.yml @@ -11,4 +11,4 @@ var: - "{{.kafka_home}}/logs/kafka-*.log*" ingest_pipeline: ingest/pipeline.json -prospector: config/log.yml +input: config/log.yml diff --git a/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/controller.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/controller.log-expected.json index b3453298..02c88963 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/controller.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/controller.log-expected.json @@ -25,6 +25,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/controller.log", "fileset": { "module": "kafka", @@ -46,7 +49,7 @@ 1501847065113 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -73,6 +76,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/controller.log", "message": "[2017-08-04 11:44:25,112] INFO [Controller-0-to-broker-0-send-thread]: Shutting down (kafka.controller.RequestSendThread)", "fileset": { @@ -94,7 +100,7 @@ 1501847065112 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -121,6 +127,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/controller.log", "message": "[2017-08-04 11:44:25,112] INFO [Controller-0-to-broker-0-send-thread]: Stopped (kafka.controller.RequestSendThread)", "fileset": { @@ -142,7 +151,7 @@ 1501847065112 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -169,6 +178,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/controller.log", "fileset": { "module": "kafka", @@ -190,7 +202,7 @@ 1501847065111 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -217,6 +229,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/controller.log", "fileset": { "module": "kafka", @@ -238,7 +253,7 @@ 1501847065105 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -265,6 +280,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/controller.log", "message": "[2017-08-04 11:44:25,100] DEBUG [Controller 0]: De-registering IsrChangeNotificationListener (kafka.controller.KafkaController)", "fileset": { @@ -286,7 +304,7 @@ 1501847065100 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -313,6 +331,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/controller.log", "message": "[2017-08-04 11:44:25,099] DEBUG [Controller 0]: Controller resigning, broker id 0 (kafka.controller.KafkaController)", "fileset": { @@ -334,7 +355,7 @@ 1501847065099 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -361,6 +382,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/controller.log", "fileset": { "module": "kafka", @@ -382,7 +406,7 @@ 1501847065097 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -409,6 +433,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/controller.log", "message": "[2017-08-04 11:44:25,095] INFO [controller-event-thread]: Stopped (kafka.controller.ControllerEventManager$ControllerEventThread)", "fileset": { @@ -430,7 +457,7 @@ 1501847065095 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -457,6 +484,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/controller.log", "message": "[2017-08-04 11:44:25,094] INFO [controller-event-thread]: Shutting down (kafka.controller.ControllerEventManager$ControllerEventThread)", "fileset": { @@ -478,7 +508,7 @@ 1501847065094 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -505,6 +535,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/controller.log", "message": "[2017-08-04 11:44:22,588] DEBUG [Controller 0]: Live brokers: (kafka.controller.KafkaController)", "fileset": { @@ -526,7 +559,7 @@ 1501847062588 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -553,6 +586,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/controller.log", "message": "[2017-08-04 10:48:21,165] INFO [Partition state machine on Controller 0]: Invoking state change to OnlinePartition for partitions (kafka.controller.PartitionStateMachine)", "fileset": { @@ -574,7 +610,7 @@ 1501843701165 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -601,6 +637,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/controller.log", "fileset": { "module": "kafka", @@ -622,7 +661,7 @@ 1501843701157 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -649,6 +688,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/controller.log", "message": "[2017-08-04 10:48:21,156] INFO [Partition state machine on Controller 0]: Started partition state machine with initial state -> Map() (kafka.controller.PartitionStateMachine)", "fileset": { @@ -670,7 +712,7 @@ 1501843701156 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -697,6 +739,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/controller.log", "message": "[2017-08-04 10:48:21,154] INFO [Replica state machine on controller 0]: Started replica state machine with initial state -> Map() (kafka.controller.ReplicaStateMachine)", "fileset": { @@ -718,7 +763,7 @@ 1501843701154 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -745,6 +790,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/controller.log", "fileset": { "module": "kafka", @@ -766,7 +814,7 @@ 1501843701085 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -793,6 +841,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/controller.log", "message": "[2017-08-04 10:48:21,082] INFO [Controller 0]: Controller 0 incremented epoch to 1 (kafka.controller.KafkaController)", "fileset": { @@ -814,7 +865,7 @@ 1501843701082 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -841,6 +892,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/controller.log", "message": "[2017-08-04 10:48:21,064] INFO [Controller 0]: Broker 0 starting become controller state transition (kafka.controller.KafkaController)", "fileset": { @@ -862,7 +916,7 @@ 1501843701064 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -889,6 +943,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/controller.log", "fileset": { "module": "kafka", @@ -910,7 +967,7 @@ 1501843701063 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -937,6 +994,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/controller.log", "fileset": { "module": "kafka", diff --git a/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/server.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/server.log-expected.json index 71394236..5d00a3b2 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/server.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/server.log-expected.json @@ -25,6 +25,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/server.log", "fileset": { "module": "kafka", @@ -46,7 +49,7 @@ 1501843701167 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -73,6 +76,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/server.log", "fileset": { "module": "kafka", @@ -94,7 +100,7 @@ 1501843701162 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -121,6 +127,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/server.log", "fileset": { "module": "kafka", @@ -142,7 +151,7 @@ 1501843701127 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -169,6 +178,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/server.log", "message": "[2017-08-04 10:48:21,095] INFO [Group Metadata Manager on Broker 0]: Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager)", "fileset": { @@ -190,7 +202,7 @@ 1501843701095 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -217,6 +229,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/server.log", "message": "[2017-08-04 10:48:21,063] INFO Result of znode creation is: OK (kafka.utils.ZKCheckedEphemeral)", "fileset": { @@ -238,7 +253,7 @@ 1501843701063 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -265,6 +280,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/server.log", "message": "[2017-08-04 10:48:21,062] INFO [ExpirationReaper-0-Heartbeat]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)", "fileset": { @@ -286,7 +304,7 @@ 1501843701062 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -313,6 +331,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/server.log", "message": "[2017-08-04 10:48:20,873] INFO Loading logs. (kafka.log.LogManager)", "fileset": { @@ -334,7 +355,7 @@ 1501843700873 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -361,6 +382,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/server.log", "fileset": { "module": "kafka", @@ -382,7 +406,7 @@ 1501843700866 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -409,6 +433,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/server.log", "message": "[2017-08-04 10:48:20,800] INFO [ThrottledRequestReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledRequestReaper)", "fileset": { @@ -430,7 +457,7 @@ 1501843700800 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -457,6 +484,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/server.log", "message": "[2017-08-04 10:48:20,748] WARN No meta.properties file under dir /tmp/kafka-logs/meta.properties (kafka.server.BrokerMetadataCheckpoint)", "fileset": { @@ -478,7 +508,7 @@ 1501843700748 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -505,6 +535,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/server.log", "fileset": { "module": "kafka", @@ -526,7 +559,7 @@ 1501843700458 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -553,6 +586,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/server.log", "message": "[2017-08-04 10:48:20,457] INFO Session establishment complete on server localhost/0:0:0:0:0:0:0:1:2181, sessionid = 0x15dabf8d4140000, negotiated timeout = 6000 (org.apache.zookeeper.ClientCnxn)", "fileset": { @@ -574,7 +610,7 @@ 1501843700457 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -601,6 +637,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/server.log", "message": "[2017-08-04 10:48:20,420] INFO Socket connection established to localhost/0:0:0:0:0:0:0:1:2181, initiating session (org.apache.zookeeper.ClientCnxn)", "fileset": { @@ -622,7 +661,7 @@ 1501843700420 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -649,6 +688,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/server.log", "message": "[2017-08-04 10:48:20,415] INFO Opening socket connection to server localhost/0:0:0:0:0:0:0:1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn)", "fileset": { @@ -670,7 +712,7 @@ 1501843700415 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -697,6 +739,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/server.log", "message": "[2017-08-04 10:48:20,413] INFO Waiting for keeper state SyncConnected (org.I0Itec.zkclient.ZkClient)", "fileset": { @@ -718,7 +763,7 @@ 1501843700413 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -745,6 +790,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/server.log", "message": "[2017-08-04 10:48:20,401] INFO Initiating client connection, connectString=localhost:2181 sessionTimeout=6000 watcher=org.I0Itec.zkclient.ZkClient@5ffead27 (org.apache.zookeeper.ZooKeeper)", "fileset": { @@ -766,7 +814,7 @@ 1501843700401 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -793,6 +841,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/server.log", "message": "[2017-08-04 10:48:20,400] INFO Client environment:java.compiler= (org.apache.zookeeper.ZooKeeper)", "fileset": { @@ -814,7 +865,7 @@ 1501843700400 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -841,6 +892,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/server.log", "message": "[2017-08-04 10:48:20,400] INFO Client environment:java.io.tmpdir=/tmp (org.apache.zookeeper.ZooKeeper)", "fileset": { @@ -862,7 +916,7 @@ 1501843700400 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -889,6 +943,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/server.log", "message": "[2017-08-04 10:48:20,379] INFO Connecting to zookeeper on localhost:2181 (kafka.server.KafkaServer)", "fileset": { @@ -910,7 +967,7 @@ 1501843700379 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.11", "_type": "doc", @@ -937,6 +994,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/server.log", "fileset": { "module": "kafka", diff --git a/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/state-change.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/state-change.log-expected.json index 9d02b4d8..e73b094e 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/state-change.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/state-change.log-expected.json @@ -25,6 +25,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/kafka/log/test/state-change.log", "message": "[2017-08-04 10:48:21,428] TRACE Controller 0 epoch 1 received response {error_code=0} for a request sent to broker baldur:9092 (id: 0 rack: null) (state.change.logger)", "fileset": { diff --git a/vendor/github.com/elastic/beats/filebeat/module/logstash/log/manifest.yml b/vendor/github.com/elastic/beats/filebeat/module/logstash/log/manifest.yml index 6ded99d0..696adeeb 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/logstash/log/manifest.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/logstash/log/manifest.yml @@ -10,4 +10,4 @@ var: - c:/programdata/logstash/logs/logstash-{{.format}}*.log ingest_pipeline: ingest/pipeline-{{.format}}.json -prospector: config/log.yml +input: config/log.yml diff --git a/vendor/github.com/elastic/beats/filebeat/module/logstash/log/test/logstash-plain.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/logstash/log/test/logstash-plain.log-expected.json index c4faeaef..e688a7d6 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/logstash/log/test/logstash-plain.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/logstash/log/test/logstash-plain.log-expected.json @@ -25,6 +25,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "read_timestamp": "2017-10-30T19:40:38.780Z", "source": "/tmp/logstash-plain.log" }, diff --git a/vendor/github.com/elastic/beats/filebeat/module/logstash/slowlog/manifest.yml b/vendor/github.com/elastic/beats/filebeat/module/logstash/slowlog/manifest.yml index 1dab9b3f..41d534bc 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/logstash/slowlog/manifest.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/logstash/slowlog/manifest.yml @@ -10,4 +10,4 @@ var: - c:/programdata/logstash/logs/logstash-slowlog-{{.format}}*.log ingest_pipeline: ingest/pipeline-{{.format}}.json -prospector: config/slowlog.yml +input: config/slowlog.yml diff --git a/vendor/github.com/elastic/beats/filebeat/module/logstash/slowlog/test/slowlog-plain.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/logstash/slowlog/test/slowlog-plain.log-expected.json index ce83d138..5f8d9574 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/logstash/slowlog/test/slowlog-plain.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/logstash/slowlog/test/slowlog-plain.log-expected.json @@ -31,6 +31,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "read_timestamp": "2017-11-21T05:34:25.435Z", "source": "/Users/ph/go/src/github.com/elastic/beats/filebeat/module/logstash/slowlog/test/slowlog-plain.log" }, diff --git a/vendor/github.com/elastic/beats/filebeat/module/mongodb/_meta/config.reference.yml b/vendor/github.com/elastic/beats/filebeat/module/mongodb/_meta/config.reference.yml new file mode 100644 index 00000000..86f1511e --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/mongodb/_meta/config.reference.yml @@ -0,0 +1,12 @@ +#- module: mongodb + # Logs + #log: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: diff --git a/vendor/github.com/elastic/beats/filebeat/module/mongodb/_meta/config.yml b/vendor/github.com/elastic/beats/filebeat/module/mongodb/_meta/config.yml new file mode 100644 index 00000000..be6ea989 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/mongodb/_meta/config.yml @@ -0,0 +1,8 @@ +- module: mongodb + # All logs + log: + enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: diff --git a/vendor/github.com/elastic/beats/filebeat/module/mongodb/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/filebeat/module/mongodb/_meta/docs.asciidoc new file mode 100755 index 00000000..9db6f633 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/mongodb/_meta/docs.asciidoc @@ -0,0 +1,56 @@ +:modulename: mongodb + +== MongoDB module + +The +{modulename}+ module collects and parses logs created by +https://www.mongodb.com/[MongoDB]. + +include::../include/what-happens.asciidoc[] + +[float] +=== Compatibility + +The +{modulename}+ module was tested with logs from versions v3.2.11 on Debian. + +include::../include/running-modules.asciidoc[] + +[float] +=== Example dashboard + +This module comes with one sample dashboard including error and regular logs. + +[role="screenshot"] +image::./images/filebeat-mongodb-overview.png[] + +include::../include/configuring-intro.asciidoc[] + +The following example shows how to set paths in the +modules.d/{modulename}.yml+ +file to override the default paths for MongoDB logs: + + +["source","yaml",subs="attributes"] +----- +- module: mongodb + log: + enabled: true + var.paths: ["/path/to/log/mongodb/*.log*"] +----- + + +To specify the same settings at the command line, you use: + +["source","sh",subs="attributes"] +----- +./{beatname_lc} --modules {modulename} -M "mongodb.log.var.paths=[/path/to/log/mongodb/*.log*]" +----- + + +:fileset_ex: log + +include::../include/config-option-intro.asciidoc[] + + +[float] +==== `log` log fileset settings + +include::../include/var-paths.asciidoc[] diff --git a/vendor/github.com/elastic/beats/filebeat/module/mongodb/_meta/fields.yml b/vendor/github.com/elastic/beats/filebeat/module/mongodb/_meta/fields.yml new file mode 100755 index 00000000..3f82c54f --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/mongodb/_meta/fields.yml @@ -0,0 +1,10 @@ +- key: mongodb + title: "mongodb" + description: > + Module for parsing MongoDB log files. + fields: + - name: mongodb + type: group + description: > + Fields from MongoDB logs. + fields: diff --git a/vendor/github.com/elastic/beats/filebeat/module/mongodb/_meta/kibana/6/dashboard/Filebeat-Mongodb-overview.json b/vendor/github.com/elastic/beats/filebeat/module/mongodb/_meta/kibana/6/dashboard/Filebeat-Mongodb-overview.json new file mode 100644 index 00000000..ee919ab0 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/mongodb/_meta/kibana/6/dashboard/Filebeat-Mongodb-overview.json @@ -0,0 +1,89 @@ +{ + "objects": [ + { + "attributes": { + "description": "", + "kibanaSavedObjectMeta": { + "searchSourceJSON": "{\"filter\":[],\"query\":{\"language\":\"lucene\",\"query\":\"\"}}" + }, + "savedSearchId": "bfc96a60-0a80-11e8-bffe-ff7d4f68cf94", + "title": "Logs Severity [Filebeat MongoDB]", + "uiStateJSON": "{}", + "version": 1, + "visState": "{\"aggs\":[{\"enabled\":true,\"id\":\"1\",\"params\":{},\"schema\":\"metric\",\"type\":\"count\"},{\"enabled\":true,\"id\":\"2\",\"params\":{\"customLabel\":\"Log severity\",\"field\":\"mongodb.log.severity\",\"order\":\"desc\",\"orderBy\":\"1\",\"size\":5},\"schema\":\"segment\",\"type\":\"terms\"}],\"params\":{\"addLegend\":true,\"addTooltip\":true,\"isDonut\":true,\"legendPosition\":\"right\",\"type\":\"pie\"},\"title\":\"Logs Severity [Filebeat MongoDB]\",\"type\":\"pie\"}" + }, + "id": "0fef5710-0a82-11e8-bffe-ff7d4f68cf94", + "type": "visualization", + "version": 3 + }, + { + "attributes": { + "columns": [ + "mongodb.log.timestamp", + "mongodb.log.severity", + "mongodb.log.component", + "mongodb.log.context", + "mongodb.log.message" + ], + "description": "", + "hits": 0, + "kibanaSavedObjectMeta": { + "searchSourceJSON": "{\"index\":\"filebeat-*\",\"highlightAll\":true,\"version\":true,\"query\":{\"language\":\"lucene\",\"query\":\"mongodb.log.severity: F or mongodb.log.severity: W\"},\"filter\":[]}" + }, + "sort": [ + "@timestamp", + "desc" + ], + "title": "Error logs [Filebeat MongoDB]", + "version": 1 + }, + "id": "e49fe000-0a7e-11e8-bffe-ff7d4f68cf94", + "type": "search", + "version": 3 + }, + { + "attributes": { + "columns": [ + "mongodb.log.timestamp", + "mongodb.log.severity", + "mongodb.log.component", + "mongodb.log.context", + "mongodb.log.message" + ], + "description": "", + "hits": 0, + "kibanaSavedObjectMeta": { + "searchSourceJSON": "{\"index\":\"filebeat-*\",\"highlightAll\":true,\"version\":true,\"query\":{\"query\":\"mongodb.log.severity: *\",\"language\":\"lucene\"},\"filter\":[]}" + }, + "sort": [ + "mongodb.log.timestamp", + "asc" + ], + "title": "All logs [Filebeat MongoDB]", + "version": 1 + }, + "id": "bfc96a60-0a80-11e8-bffe-ff7d4f68cf94", + "type": "search", + "version": 2 + }, + { + "attributes": { + "description": "Filebeat MongoDB module overview", + "hits": 0, + "kibanaSavedObjectMeta": { + "searchSourceJSON": "{\"query\":{\"language\":\"lucene\",\"query\":\"\"},\"filter\":[],\"highlightAll\":true,\"version\":true}" + }, + "optionsJSON": "{\"darkTheme\":false}", + "panelsJSON": "[{\"size_x\":4,\"size_y\":3,\"panelIndex\":1,\"type\":\"visualization\",\"id\":\"0fef5710-0a82-11e8-bffe-ff7d4f68cf94\",\"col\":1,\"row\":1},{\"size_x\":8,\"size_y\":3,\"panelIndex\":2,\"type\":\"search\",\"id\":\"e49fe000-0a7e-11e8-bffe-ff7d4f68cf94\",\"col\":5,\"row\":1,\"columns\":[\"mongodb.log.timestamp\",\"mongodb.log.severity\",\"mongodb.log.component\",\"mongodb.log.context\",\"mongodb.log.message\"],\"sort\":[\"@timestamp\",\"desc\"]},{\"size_x\":12,\"size_y\":6,\"panelIndex\":3,\"type\":\"search\",\"id\":\"bfc96a60-0a80-11e8-bffe-ff7d4f68cf94\",\"col\":1,\"row\":4,\"columns\":[\"mongodb.log.timestamp\",\"mongodb.log.severity\",\"mongodb.log.component\",\"mongodb.log.context\",\"mongodb.log.message\"],\"sort\":[\"mongodb.log.timestamp\",\"asc\"]}]", + "timeRestore": false, + "title": "Overview [Filebeat MongoDB]", + "uiStateJSON": "{}", + "version": 1 + }, + "id": "abcf35b0-0a82-11e8-bffe-ff7d4f68cf94", + "type": "dashboard", + "version": 2 + } + ], + "version": "6.0.0" +} diff --git a/vendor/github.com/elastic/beats/filebeat/module/mongodb/log/_meta/fields.yml b/vendor/github.com/elastic/beats/filebeat/module/mongodb/log/_meta/fields.yml new file mode 100644 index 00000000..a45d2ca9 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/mongodb/log/_meta/fields.yml @@ -0,0 +1,24 @@ +- name: log + type: group + description: > + Contains fields from MongoDB logs. + fields: + - name: severity + description: > + Severity level of message + example: I + type: keyword + - name: component + description: > + Functional categorization of message + example: COMMAND + type: keyword + - name: context + description: > + Context of message + example: initandlisten + type: keyword + - name: message + description: > + The message in the log line. + type: text diff --git a/vendor/github.com/elastic/beats/filebeat/scripts/module/fileset/config/config.yml b/vendor/github.com/elastic/beats/filebeat/module/mongodb/log/config/log.yml old mode 100644 new mode 100755 similarity index 100% rename from vendor/github.com/elastic/beats/filebeat/scripts/module/fileset/config/config.yml rename to vendor/github.com/elastic/beats/filebeat/module/mongodb/log/config/log.yml diff --git a/vendor/github.com/elastic/beats/filebeat/module/mongodb/log/ingest/pipeline.json b/vendor/github.com/elastic/beats/filebeat/module/mongodb/log/ingest/pipeline.json new file mode 100755 index 00000000..b0a39f2a --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/mongodb/log/ingest/pipeline.json @@ -0,0 +1,41 @@ +{ + "description": "Pipeline for parsing MongoDB logs", + "processors": [{ + "grok": { + "field": "message", + "patterns":[ + "%{TIMESTAMP_ISO8601:mongodb.log.timestamp} %{WORD:mongodb.log.severity} %{WORD:mongodb.log.component} *\\[%{WORD:mongodb.log.context}\\] %{GREEDYDATA:mongodb.log.message}" + ], + "ignore_missing": true + } + }, + { + "remove": { + "field": "message" + } + }, + { + "rename": { + "field": "@timestamp", + "target_field": "read_timestamp" + } + }, + { + "date": { + "field": "mongodb.log.timestamp", + "target_field": "@timestamp", + "formats": ["YYYY-MM-DD'T'HH:mm:ss.SSSZZ"] + } + }, + { + "remove": { + "field": "mongodb.log.timestamp" + } + }], + "on_failure" : [{ + "set" : { + "field" : "error.message", + "value" : "{{ _ingest.on_failure_message }}" + } + }] +} diff --git a/vendor/github.com/elastic/beats/filebeat/module/mongodb/log/manifest.yml b/vendor/github.com/elastic/beats/filebeat/module/mongodb/log/manifest.yml new file mode 100644 index 00000000..f77eaa89 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/mongodb/log/manifest.yml @@ -0,0 +1,11 @@ +module_version: 1.0 + +var: + - name: paths + default: + - /var/log/mongodb/mongodb.log + os.windows: + - c:\data\log\mongod.log + +ingest_pipeline: ingest/pipeline.json +input: config/log.yml diff --git a/vendor/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log-expected.json new file mode 100644 index 00000000..3229854f --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log-expected.json @@ -0,0 +1,1080 @@ +[ + { + "_index": "filebeat-7.0.0-alpha1-2018.02.05", + "_type": "doc", + "_id": "2IHaZmEBQXBNR1UUaDmZ", + "_score": 1, + "_source": { + "@timestamp": "2018-02-05T16:44:04.007Z", + "offset": 326, + "beat": { + "hostname": "sleipnir", + "name": "sleipnir", + "version": "7.0.0-alpha1" + }, + "prospector": { + "type": "log" + }, + "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log", + "fileset": { + "module": "mongodb", + "name": "log" + }, + "mongodb": { + "log": { + "severity": "I", + "component": "CONTROL", + "context": "initandlisten", + "message": "git version: 009580ad490190ba33d1c6253ebd8d91808923e4", + "timestamp": "2018-02-05T13:44:56.657+0100" + } + } + } + }, + { + "_index": "filebeat-7.0.0-alpha1-2018.02.05", + "_type": "doc", + "_id": "24HaZmEBQXBNR1UUaDmZ", + "_score": 1, + "_source": { + "@timestamp": "2018-02-05T16:44:04.007Z", + "offset": 573, + "beat": { + "hostname": "sleipnir", + "name": "sleipnir", + "version": "7.0.0-alpha1" + }, + "prospector": { + "type": "log" + }, + "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log", + "fileset": { + "module": "mongodb", + "name": "log" + }, + "mongodb": { + "log": { + "severity": "I", + "component": "CONTROL", + "context": "initandlisten", + "message": "modules: none", + "timestamp": "2018-02-05T13:44:56.657+0100" + } + } + } + }, + { + "_index": "filebeat-7.0.0-alpha1-2018.02.05", + "_type": "doc", + "_id": "4IHaZmEBQXBNR1UUaDmZ", + "_score": 1, + "_source": { + "@timestamp": "2018-02-05T16:44:04.007Z", + "offset": 1482, + "beat": { + "hostname": "sleipnir", + "name": "sleipnir", + "version": "7.0.0-alpha1" + }, + "prospector": { + "type": "log" + }, + "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log", + "fileset": { + "module": "mongodb", + "name": "log" + }, + "mongodb": { + "log": { + "severity": "I", + "component": "STORAGE", + "context": "initandlisten", + "message": "wiredtiger_open config: create,cache_size=8G,session_max=20000,eviction=(threads_max=4),config_base=false,statistics=(fast),log=(enabled=true,archive=true,path=journal,compressor=snappy),file_manager=(close_idle_time=100000),checkpoint=(wait=60,log_size=2GB),statistics_log=(wait=0),", + "timestamp": "2018-02-05T13:44:56.677+0100" + } + } + } + }, + { + "_index": "filebeat-7.0.0-alpha1-2018.02.05", + "_type": "doc", + "_id": "4YHaZmEBQXBNR1UUaDmZ", + "_score": 1, + "_source": { + "@timestamp": "2018-02-05T16:44:04.007Z", + "offset": 1635, + "beat": { + "hostname": "sleipnir", + "name": "sleipnir", + "version": "7.0.0-alpha1" + }, + "prospector": { + "type": "log" + }, + "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log", + "message": "2018-02-05T13:44:56.724+0100 I FTDC [initandlisten] Initializing full-time diagnostic data capture with directory '/var/lib/mongodb/diagnostic.data'", + "fileset": { + "module": "mongodb", + "name": "log" + }, + "error": { + "message": "Provided Grok expressions do not match field value: [2018-02-05T13:44:56.724+0100 I FTDC [initandlisten] Initializing full-time diagnostic data capture with directory '/var/lib/mongodb/diagnostic.data']" + } + } + }, + { + "_index": "filebeat-7.0.0-alpha1-2018.02.05", + "_type": "doc", + "_id": "4oHaZmEBQXBNR1UUaDmZ", + "_score": 1, + "_source": { + "@timestamp": "2018-02-05T16:44:04.007Z", + "offset": 1750, + "beat": { + "hostname": "sleipnir", + "name": "sleipnir", + "version": "7.0.0-alpha1" + }, + "prospector": { + "type": "log" + }, + "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log", + "fileset": { + "module": "mongodb", + "name": "log" + }, + "mongodb": { + "log": { + "severity": "I", + "component": "NETWORK", + "context": "HostnameCanonicalizationWorker", + "message": "Starting hostname canonicalization worker", + "timestamp": "2018-02-05T13:44:56.724+0100" + } + } + } + }, + { + "_index": "filebeat-7.0.0-alpha1-2018.02.05", + "_type": "doc", + "_id": "44HaZmEBQXBNR1UUaDmZ", + "_score": 1, + "_source": { + "@timestamp": "2018-02-05T16:44:04.007Z", + "offset": 1844, + "beat": { + "hostname": "sleipnir", + "name": "sleipnir", + "version": "7.0.0-alpha1" + }, + "prospector": { + "type": "log" + }, + "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log", + "fileset": { + "module": "mongodb", + "name": "log" + }, + "mongodb": { + "log": { + "severity": "I", + "component": "NETWORK", + "context": "initandlisten", + "message": "waiting for connections on port 27017", + "timestamp": "2018-02-05T13:44:56.744+0100" + } + } + } + }, + { + "_index": "filebeat-7.0.0-alpha1-2018.02.05", + "_type": "doc", + "_id": "5YHaZmEBQXBNR1UUaDmZ", + "_score": 1, + "_source": { + "@timestamp": "2018-02-05T16:44:04.007Z", + "offset": 2072, + "beat": { + "hostname": "sleipnir", + "name": "sleipnir", + "version": "7.0.0-alpha1" + }, + "prospector": { + "type": "log" + }, + "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log", + "fileset": { + "module": "mongodb", + "name": "log" + }, + "mongodb": { + "log": { + "severity": "I", + "component": "NETWORK", + "context": "conn1", + "message": "end connection 127.0.0.1:55404 (0 connections now open)", + "timestamp": "2018-02-05T13:50:55.170+0100" + } + } + } + }, + { + "_index": "filebeat-7.0.0-alpha1-2018.02.05", + "_type": "doc", + "_id": "5oHaZmEBQXBNR1UUaDmZ", + "_score": 1, + "_source": { + "@timestamp": "2018-02-05T16:44:04.007Z", + "offset": 2196, + "beat": { + "hostname": "sleipnir", + "name": "sleipnir", + "version": "7.0.0-alpha1" + }, + "prospector": { + "type": "log" + }, + "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log", + "fileset": { + "module": "mongodb", + "name": "log" + }, + "mongodb": { + "log": { + "severity": "I", + "component": "NETWORK", + "context": "initandlisten", + "message": "connection accepted from 127.0.0.1:55406 #2 (1 connection now open)", + "timestamp": "2018-02-05T13:50:55.487+0100" + } + } + } + }, + { + "_index": "filebeat-7.0.0-alpha1-2018.02.05", + "_type": "doc", + "_id": "7oHaZmEBQXBNR1UUaDmZ", + "_score": 1, + "_source": { + "@timestamp": "2018-02-05T16:44:04.007Z", + "offset": 3077, + "beat": { + "hostname": "sleipnir", + "name": "sleipnir", + "version": "7.0.0-alpha1" + }, + "prospector": { + "type": "log" + }, + "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log", + "fileset": { + "module": "mongodb", + "name": "log" + }, + "mongodb": { + "log": { + "severity": "I", + "component": "CONTROL", + "context": "signalProcessingThread", + "message": "now exiting", + "timestamp": "2018-02-05T14:49:45.606+0100" + } + } + } + }, + { + "_index": "filebeat-7.0.0-alpha1-2018.02.05", + "_type": "doc", + "_id": "8YHaZmEBQXBNR1UUaDma", + "_score": 1, + "_source": { + "@timestamp": "2018-02-05T16:44:04.007Z", + "offset": 3374, + "beat": { + "hostname": "sleipnir", + "name": "sleipnir", + "version": "7.0.0-alpha1" + }, + "prospector": { + "type": "log" + }, + "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log", + "fileset": { + "module": "mongodb", + "name": "log" + }, + "mongodb": { + "log": { + "severity": "I", + "component": "NETWORK", + "context": "signalProcessingThread", + "message": "closing listening socket: 7", + "timestamp": "2018-02-05T14:49:45.606+0100" + } + } + } + }, + { + "_index": "filebeat-7.0.0-alpha1-2018.02.05", + "_type": "doc", + "_id": "8oHaZmEBQXBNR1UUaDma", + "_score": 1, + "_source": { + "@timestamp": "2018-02-05T16:44:04.007Z", + "offset": 3493, + "beat": { + "hostname": "sleipnir", + "name": "sleipnir", + "version": "7.0.0-alpha1" + }, + "prospector": { + "type": "log" + }, + "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log", + "fileset": { + "module": "mongodb", + "name": "log" + }, + "mongodb": { + "log": { + "severity": "I", + "component": "NETWORK", + "context": "signalProcessingThread", + "message": "removing socket file: /run/mongodb/mongodb-27017.sock", + "timestamp": "2018-02-05T14:49:45.606+0100" + } + } + } + }, + { + "_index": "filebeat-7.0.0-alpha1-2018.02.05", + "_type": "doc", + "_id": "84HaZmEBQXBNR1UUaDma", + "_score": 1, + "_source": { + "@timestamp": "2018-02-05T16:44:04.007Z", + "offset": 3594, + "beat": { + "hostname": "sleipnir", + "name": "sleipnir", + "version": "7.0.0-alpha1" + }, + "prospector": { + "type": "log" + }, + "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log", + "fileset": { + "module": "mongodb", + "name": "log" + }, + "mongodb": { + "log": { + "severity": "I", + "component": "NETWORK", + "context": "signalProcessingThread", + "message": "shutdown: going to flush diaglog...", + "timestamp": "2018-02-05T14:49:45.606+0100" + } + } + } + }, + { + "_index": "filebeat-7.0.0-alpha1-2018.02.05", + "_type": "doc", + "_id": "9IHaZmEBQXBNR1UUaDma", + "_score": 1, + "_source": { + "@timestamp": "2018-02-05T16:44:04.007Z", + "offset": 3695, + "beat": { + "hostname": "sleipnir", + "name": "sleipnir", + "version": "7.0.0-alpha1" + }, + "prospector": { + "type": "log" + }, + "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log", + "fileset": { + "module": "mongodb", + "name": "log" + }, + "mongodb": { + "log": { + "severity": "I", + "component": "NETWORK", + "context": "signalProcessingThread", + "message": "shutdown: going to close sockets...", + "timestamp": "2018-02-05T14:49:45.606+0100" + } + } + } + }, + { + "_index": "filebeat-7.0.0-alpha1-2018.02.05", + "_type": "doc", + "_id": "9oHaZmEBQXBNR1UUaDma", + "_score": 1, + "_source": { + "@timestamp": "2018-02-05T16:44:04.007Z", + "offset": 3888, + "beat": { + "hostname": "sleipnir", + "name": "sleipnir", + "version": "7.0.0-alpha1" + }, + "prospector": { + "type": "log" + }, + "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log", + "fileset": { + "module": "mongodb", + "name": "log" + }, + "mongodb": { + "log": { + "severity": "I", + "component": "STORAGE", + "context": "signalProcessingThread", + "message": "shutdown: removing fs lock...", + "timestamp": "2018-02-05T14:49:45.688+0100" + } + } + } + }, + { + "_index": "filebeat-7.0.0-alpha1-2018.02.05", + "_type": "doc", + "_id": "14HaZmEBQXBNR1UUaDmZ", + "_score": 1, + "_source": { + "@timestamp": "2018-02-05T16:44:04.007Z", + "offset": 216, + "beat": { + "hostname": "sleipnir", + "name": "sleipnir", + "version": "7.0.0-alpha1" + }, + "prospector": { + "type": "log" + }, + "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log", + "fileset": { + "module": "mongodb", + "name": "log" + }, + "mongodb": { + "log": { + "severity": "I", + "component": "CONTROL", + "context": "initandlisten", + "message": "db version v3.2.11", + "timestamp": "2018-02-05T13:44:56.657+0100" + } + } + } + }, + { + "_index": "filebeat-7.0.0-alpha1-2018.02.05", + "_type": "doc", + "_id": "3IHaZmEBQXBNR1UUaDmZ", + "_score": 1, + "_source": { + "@timestamp": "2018-02-05T16:44:04.007Z", + "offset": 648, + "beat": { + "hostname": "sleipnir", + "name": "sleipnir", + "version": "7.0.0-alpha1" + }, + "prospector": { + "type": "log" + }, + "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log", + "fileset": { + "module": "mongodb", + "name": "log" + }, + "mongodb": { + "log": { + "severity": "I", + "component": "CONTROL", + "context": "initandlisten", + "message": "build environment:", + "timestamp": "2018-02-05T13:44:56.657+0100" + } + } + } + }, + { + "_index": "filebeat-7.0.0-alpha1-2018.02.05", + "_type": "doc", + "_id": "3YHaZmEBQXBNR1UUaDmZ", + "_score": 1, + "_source": { + "@timestamp": "2018-02-05T16:44:04.007Z", + "offset": 725, + "beat": { + "hostname": "sleipnir", + "name": "sleipnir", + "version": "7.0.0-alpha1" + }, + "prospector": { + "type": "log" + }, + "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log", + "fileset": { + "module": "mongodb", + "name": "log" + }, + "mongodb": { + "log": { + "severity": "I", + "component": "CONTROL", + "context": "initandlisten", + "message": " distarch: x86_64", + "timestamp": "2018-02-05T13:44:56.657+0100" + } + } + } + }, + { + "_index": "filebeat-7.0.0-alpha1-2018.02.05", + "_type": "doc", + "_id": "34HaZmEBQXBNR1UUaDmZ", + "_score": 1, + "_source": { + "@timestamp": "2018-02-05T16:44:04.007Z", + "offset": 1142, + "beat": { + "hostname": "sleipnir", + "name": "sleipnir", + "version": "7.0.0-alpha1" + }, + "prospector": { + "type": "log" + }, + "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log", + "fileset": { + "module": "mongodb", + "name": "log" + }, + "mongodb": { + "log": { + "severity": "I", + "component": "CONTROL", + "context": "initandlisten", + "message": """options: { config: "/etc/mongodb.conf", net: { bindIp: "127.0.0.1", unixDomainSocket: { pathPrefix: "/run/mongodb" } }, storage: { dbPath: "/var/lib/mongodb", journal: { enabled: true } }, systemLog: { destination: "file", logAppend: true, path: "/var/log/mongodb/mongodb.log" } }""", + "timestamp": "2018-02-05T13:44:56.657+0100" + } + } + } + }, + { + "_index": "filebeat-7.0.0-alpha1-2018.02.05", + "_type": "doc", + "_id": "5IHaZmEBQXBNR1UUaDmZ", + "_score": 1, + "_source": { + "@timestamp": "2018-02-05T16:44:04.007Z", + "offset": 1968, + "beat": { + "hostname": "sleipnir", + "name": "sleipnir", + "version": "7.0.0-alpha1" + }, + "prospector": { + "type": "log" + }, + "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log", + "fileset": { + "module": "mongodb", + "name": "log" + }, + "mongodb": { + "log": { + "severity": "I", + "component": "NETWORK", + "context": "initandlisten", + "message": "connection accepted from 127.0.0.1:55404 #1 (1 connection now open)", + "timestamp": "2018-02-05T13:50:55.170+0100" + } + } + } + }, + { + "_index": "filebeat-7.0.0-alpha1-2018.02.05", + "_type": "doc", + "_id": "6YHaZmEBQXBNR1UUaDmZ", + "_score": 1, + "_source": { + "@timestamp": "2018-02-05T16:44:04.007Z", + "offset": 2528, + "beat": { + "hostname": "sleipnir", + "name": "sleipnir", + "version": "7.0.0-alpha1" + }, + "prospector": { + "type": "log" + }, + "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log", + "fileset": { + "module": "mongodb", + "name": "log" + }, + "mongodb": { + "log": { + "severity": "I", + "component": "NETWORK", + "context": "conn3", + "message": "end connection 127.0.0.1:55414 (0 connections now open)", + "timestamp": "2018-02-05T13:50:56.180+0100" + } + } + } + }, + { + "_index": "filebeat-7.0.0-alpha1-2018.02.05", + "_type": "doc", + "_id": "64HaZmEBQXBNR1UUaDmZ", + "_score": 1, + "_source": { + "@timestamp": "2018-02-05T16:44:04.007Z", + "offset": 2756, + "beat": { + "hostname": "sleipnir", + "name": "sleipnir", + "version": "7.0.0-alpha1" + }, + "prospector": { + "type": "log" + }, + "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log", + "fileset": { + "module": "mongodb", + "name": "log" + }, + "mongodb": { + "log": { + "severity": "I", + "component": "NETWORK", + "context": "conn4", + "message": "end connection 127.0.0.1:58336 (0 connections now open)", + "timestamp": "2018-02-05T14:15:42.095+0100" + } + } + } + }, + { + "_index": "filebeat-7.0.0-alpha1-2018.02.05", + "_type": "doc", + "_id": "74HaZmEBQXBNR1UUaDma", + "_score": 1, + "_source": { + "@timestamp": "2018-02-05T16:44:04.007Z", + "offset": 3188, + "beat": { + "hostname": "sleipnir", + "name": "sleipnir", + "version": "7.0.0-alpha1" + }, + "prospector": { + "type": "log" + }, + "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log", + "fileset": { + "module": "mongodb", + "name": "log" + }, + "mongodb": { + "log": { + "severity": "I", + "component": "NETWORK", + "context": "signalProcessingThread", + "message": "shutdown: going to close listening sockets...", + "timestamp": "2018-02-05T14:49:45.606+0100" + } + } + } + }, + { + "_index": "filebeat-7.0.0-alpha1-2018.02.05", + "_type": "doc", + "_id": "9YHaZmEBQXBNR1UUaDma", + "_score": 1, + "_source": { + "@timestamp": "2018-02-05T16:44:04.007Z", + "offset": 3793, + "beat": { + "hostname": "sleipnir", + "name": "sleipnir", + "version": "7.0.0-alpha1" + }, + "prospector": { + "type": "log" + }, + "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log", + "fileset": { + "module": "mongodb", + "name": "log" + }, + "mongodb": { + "log": { + "severity": "I", + "component": "STORAGE", + "context": "signalProcessingThread", + "message": "WiredTigerKVEngine shutting down", + "timestamp": "2018-02-05T14:49:45.606+0100" + } + } + } + }, + { + "_index": "filebeat-7.0.0-alpha1-2018.02.05", + "_type": "doc", + "_id": "94HaZmEBQXBNR1UUaDma", + "_score": 1, + "_source": { + "@timestamp": "2018-02-05T16:44:04.007Z", + "offset": 3968, + "beat": { + "hostname": "sleipnir", + "name": "sleipnir", + "version": "7.0.0-alpha1" + }, + "prospector": { + "type": "log" + }, + "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log", + "fileset": { + "module": "mongodb", + "name": "log" + }, + "mongodb": { + "log": { + "severity": "I", + "component": "CONTROL", + "context": "signalProcessingThread", + "message": "dbexit: rc: 0", + "timestamp": "2018-02-05T14:49:45.688+0100" + } + } + } + }, + { + "_index": "filebeat-7.0.0-alpha1-2018.02.05", + "_type": "doc", + "_id": "1oHaZmEBQXBNR1UUaDmZ", + "_score": 1, + "_source": { + "@timestamp": "2018-02-05T16:44:04.007Z", + "offset": 141, + "beat": { + "hostname": "sleipnir", + "name": "sleipnir", + "version": "7.0.0-alpha1" + }, + "prospector": { + "type": "log" + }, + "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log", + "fileset": { + "module": "mongodb", + "name": "log" + }, + "mongodb": { + "log": { + "severity": "I", + "component": "CONTROL", + "context": "initandlisten", + "message": "MongoDB starting : pid=29803 port=27017 dbpath=/var/lib/mongodb 64-bit host=sleipnir", + "timestamp": "2018-02-05T13:44:56.657+0100" + } + } + } + }, + { + "_index": "filebeat-7.0.0-alpha1-2018.02.05", + "_type": "doc", + "_id": "2YHaZmEBQXBNR1UUaDmZ", + "_score": 1, + "_source": { + "@timestamp": "2018-02-05T16:44:04.007Z", + "offset": 427, + "beat": { + "hostname": "sleipnir", + "name": "sleipnir", + "version": "7.0.0-alpha1" + }, + "prospector": { + "type": "log" + }, + "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log", + "fileset": { + "module": "mongodb", + "name": "log" + }, + "mongodb": { + "log": { + "severity": "I", + "component": "CONTROL", + "context": "initandlisten", + "message": "OpenSSL version: OpenSSL 1.0.2l 25 May 2017", + "timestamp": "2018-02-05T13:44:56.657+0100" + } + } + } + }, + { + "_index": "filebeat-7.0.0-alpha1-2018.02.05", + "_type": "doc", + "_id": "2oHaZmEBQXBNR1UUaDmZ", + "_score": 1, + "_source": { + "@timestamp": "2018-02-05T16:44:04.007Z", + "offset": 503, + "beat": { + "hostname": "sleipnir", + "name": "sleipnir", + "version": "7.0.0-alpha1" + }, + "prospector": { + "type": "log" + }, + "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log", + "fileset": { + "module": "mongodb", + "name": "log" + }, + "mongodb": { + "log": { + "severity": "I", + "component": "CONTROL", + "context": "initandlisten", + "message": "allocator: tcmalloc", + "timestamp": "2018-02-05T13:44:56.657+0100" + } + } + } + }, + { + "_index": "filebeat-7.0.0-alpha1-2018.02.05", + "_type": "doc", + "_id": "3oHaZmEBQXBNR1UUaDmZ", + "_score": 1, + "_source": { + "@timestamp": "2018-02-05T16:44:04.007Z", + "offset": 805, + "beat": { + "hostname": "sleipnir", + "name": "sleipnir", + "version": "7.0.0-alpha1" + }, + "prospector": { + "type": "log" + }, + "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log", + "fileset": { + "module": "mongodb", + "name": "log" + }, + "mongodb": { + "log": { + "severity": "I", + "component": "CONTROL", + "context": "initandlisten", + "message": " target_arch: x86_64", + "timestamp": "2018-02-05T13:44:56.657+0100" + } + } + } + }, + { + "_index": "filebeat-7.0.0-alpha1-2018.02.05", + "_type": "doc", + "_id": "54HaZmEBQXBNR1UUaDmZ", + "_score": 1, + "_source": { + "@timestamp": "2018-02-05T16:44:04.007Z", + "offset": 2300, + "beat": { + "hostname": "sleipnir", + "name": "sleipnir", + "version": "7.0.0-alpha1" + }, + "prospector": { + "type": "log" + }, + "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log", + "fileset": { + "module": "mongodb", + "name": "log" + }, + "mongodb": { + "log": { + "severity": "I", + "component": "NETWORK", + "context": "conn2", + "message": "end connection 127.0.0.1:55406 (0 connections now open)", + "timestamp": "2018-02-05T13:50:55.487+0100" + } + } + } + }, + { + "_index": "filebeat-7.0.0-alpha1-2018.02.05", + "_type": "doc", + "_id": "6IHaZmEBQXBNR1UUaDmZ", + "_score": 1, + "_source": { + "@timestamp": "2018-02-05T16:44:04.007Z", + "offset": 2424, + "beat": { + "hostname": "sleipnir", + "name": "sleipnir", + "version": "7.0.0-alpha1" + }, + "prospector": { + "type": "log" + }, + "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log", + "fileset": { + "module": "mongodb", + "name": "log" + }, + "mongodb": { + "log": { + "severity": "I", + "component": "NETWORK", + "context": "initandlisten", + "message": "connection accepted from 127.0.0.1:55414 #3 (1 connection now open)", + "timestamp": "2018-02-05T13:50:56.180+0100" + } + } + } + }, + { + "_index": "filebeat-7.0.0-alpha1-2018.02.05", + "_type": "doc", + "_id": "6oHaZmEBQXBNR1UUaDmZ", + "_score": 1, + "_source": { + "@timestamp": "2018-02-05T16:44:04.007Z", + "offset": 2652, + "beat": { + "hostname": "sleipnir", + "name": "sleipnir", + "version": "7.0.0-alpha1" + }, + "prospector": { + "type": "log" + }, + "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log", + "fileset": { + "module": "mongodb", + "name": "log" + }, + "mongodb": { + "log": { + "severity": "I", + "component": "NETWORK", + "context": "initandlisten", + "message": "connection accepted from 127.0.0.1:58336 #4 (1 connection now open)", + "timestamp": "2018-02-05T14:11:41.401+0100" + } + } + } + }, + { + "_index": "filebeat-7.0.0-alpha1-2018.02.05", + "_type": "doc", + "_id": "7IHaZmEBQXBNR1UUaDmZ", + "_score": 1, + "_source": { + "@timestamp": "2018-02-05T16:44:04.007Z", + "offset": 2887, + "beat": { + "hostname": "sleipnir", + "name": "sleipnir", + "version": "7.0.0-alpha1" + }, + "prospector": { + "type": "log" + }, + "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log", + "fileset": { + "module": "mongodb", + "name": "log" + }, + "mongodb": { + "log": { + "severity": "I", + "component": "CONTROL", + "context": "signalProcessingThread", + "message": "got signal 15 (Terminated), will terminate after current cmd ends", + "timestamp": "2018-02-05T14:49:45.605+0100" + } + } + } + }, + { + "_index": "filebeat-7.0.0-alpha1-2018.02.05", + "_type": "doc", + "_id": "7YHaZmEBQXBNR1UUaDmZ", + "_score": 1, + "_source": { + "@timestamp": "2018-02-05T16:44:04.007Z", + "offset": 3000, + "beat": { + "hostname": "sleipnir", + "name": "sleipnir", + "version": "7.0.0-alpha1" + }, + "prospector": { + "type": "log" + }, + "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log", + "message": "2018-02-05T14:49:45.605+0100 I FTDC [signalProcessingThread] Shutting down full-time diagnostic data capture", + "fileset": { + "module": "mongodb", + "name": "log" + }, + "error": { + "message": "Provided Grok expressions do not match field value: [2018-02-05T14:49:45.605+0100 I FTDC [signalProcessingThread] Shutting down full-time diagnostic data capture]" + } + } + }, + { + "_index": "filebeat-7.0.0-alpha1-2018.02.05", + "_type": "doc", + "_id": "8IHaZmEBQXBNR1UUaDma", + "_score": 1, + "_source": { + "@timestamp": "2018-02-05T16:44:04.007Z", + "offset": 3281, + "beat": { + "hostname": "sleipnir", + "name": "sleipnir", + "version": "7.0.0-alpha1" + }, + "prospector": { + "type": "log" + }, + "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/mongodb/log/test/mongodb-debian-3.2.11.log", + "fileset": { + "module": "mongodb", + "name": "log" + }, + "mongodb": { + "log": { + "severity": "I", + "component": "NETWORK", + "context": "signalProcessingThread", + "message": "closing listening socket: 6", + "timestamp": "2018-02-05T14:49:45.606+0100" + } + } + } + } +] diff --git a/vendor/github.com/elastic/beats/filebeat/module/mongodb/module.yml b/vendor/github.com/elastic/beats/filebeat/module/mongodb/module.yml new file mode 100755 index 00000000..2fef4e2c --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/mongodb/module.yml @@ -0,0 +1,3 @@ +dashboards: +- id: 158be870-87f4-11e7-ad9c-db80de0bf8d3 + file: Filebeat-Mongodb-overview.json diff --git a/vendor/github.com/elastic/beats/filebeat/module/mysql/_meta/config.reference.yml b/vendor/github.com/elastic/beats/filebeat/module/mysql/_meta/config.reference.yml index f3b47255..49f1db5e 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/mysql/_meta/config.reference.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/mysql/_meta/config.reference.yml @@ -7,9 +7,9 @@ # Filebeat will choose the paths depending on your OS. #var.paths: - # Prospector configuration (advanced). Any prospector configuration option + # Input configuration (advanced). Any input configuration option # can be added under this section. - #prospector: + #input: # Slow logs #slowlog: @@ -19,6 +19,6 @@ # Filebeat will choose the paths depending on your OS. #var.paths: - # Prospector configuration (advanced). Any prospector configuration option + # Input configuration (advanced). Any input configuration option # can be added under this section. - #prospector: + #input: diff --git a/vendor/github.com/elastic/beats/filebeat/module/mysql/error/manifest.yml b/vendor/github.com/elastic/beats/filebeat/module/mysql/error/manifest.yml index 33a29a96..0376206f 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/mysql/error/manifest.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/mysql/error/manifest.yml @@ -11,4 +11,4 @@ var: - "c:/programdata/MySQL/MySQL Server*/error.log*" ingest_pipeline: ingest/pipeline.json -prospector: config/error.yml +input: config/error.yml diff --git a/vendor/github.com/elastic/beats/filebeat/module/mysql/slowlog/manifest.yml b/vendor/github.com/elastic/beats/filebeat/module/mysql/slowlog/manifest.yml index f9988389..32edcfc7 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/mysql/slowlog/manifest.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/mysql/slowlog/manifest.yml @@ -11,4 +11,4 @@ var: - "c:/programdata/MySQL/MySQL Server*/mysql-slow.log*" ingest_pipeline: ingest/pipeline.json -prospector: config/slowlog.yml +input: config/slowlog.yml diff --git a/vendor/github.com/elastic/beats/filebeat/module/nginx/_meta/config.reference.yml b/vendor/github.com/elastic/beats/filebeat/module/nginx/_meta/config.reference.yml index 249d1bdb..57234121 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/nginx/_meta/config.reference.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/nginx/_meta/config.reference.yml @@ -7,9 +7,9 @@ # Filebeat will choose the paths depending on your OS. #var.paths: - # Prospector configuration (advanced). Any prospector configuration option + # Input configuration (advanced). Any input configuration option # can be added under this section. - #prospector: + #input: # Error logs #error: @@ -19,6 +19,6 @@ # Filebeat will choose the paths depending on your OS. #var.paths: - # Prospector configuration (advanced). Any prospector configuration option + # Input configuration (advanced). Any input configuration option # can be added under this section. - #prospector: + #input: diff --git a/vendor/github.com/elastic/beats/filebeat/module/nginx/access/ingest/default.json b/vendor/github.com/elastic/beats/filebeat/module/nginx/access/ingest/default.json index ecb3df51..04ae1197 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/nginx/access/ingest/default.json +++ b/vendor/github.com/elastic/beats/filebeat/module/nginx/access/ingest/default.json @@ -4,13 +4,26 @@ "grok": { "field": "message", "patterns":[ - "\"?%{IP_LIST:nginx.access.remote_ip_list} - %{DATA:nginx.access.user_name} \\[%{HTTPDATE:nginx.access.time}\\] \"%{WORD:nginx.access.method} %{DATA:nginx.access.url} HTTP/%{NUMBER:nginx.access.http_version}\" %{NUMBER:nginx.access.response_code} %{NUMBER:nginx.access.body_sent.bytes} \"%{DATA:nginx.access.referrer}\" \"%{DATA:nginx.access.agent}\"" + "\"?%{IP_LIST:nginx.access.remote_ip_list} - %{DATA:nginx.access.user_name} \\[%{HTTPDATE:nginx.access.time}\\] \"%{GREEDYDATA:nginx.access.info}\" %{NUMBER:nginx.access.response_code} %{NUMBER:nginx.access.body_sent.bytes} \"%{DATA:nginx.access.referrer}\" \"%{DATA:nginx.access.agent}\"" ], "pattern_definitions": { "IP_LIST": "%{IP}(\"?,?\\s*%{IP})*" }, "ignore_missing": true } + }, { + "grok": { + "field": "nginx.access.info", + "patterns": [ + "%{WORD:nginx.access.method} %{DATA:nginx.access.url} HTTP/%{NUMBER:nginx.access.http_version}", + "" + ], + "ignore_missing": true + } + }, { + "remove": { + "field": "nginx.access.info" + } }, { "split": { "field": "nginx.access.remote_ip_list", diff --git a/vendor/github.com/elastic/beats/filebeat/module/nginx/access/manifest.yml b/vendor/github.com/elastic/beats/filebeat/module/nginx/access/manifest.yml index b7686b9d..a0fede4e 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/nginx/access/manifest.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/nginx/access/manifest.yml @@ -10,7 +10,7 @@ var: - c:/programdata/nginx/logs/*access.log* ingest_pipeline: ingest/default.json -prospector: config/nginx-access.yml +input: config/nginx-access.yml machine_learning: - name: response_code diff --git a/vendor/github.com/elastic/beats/filebeat/module/nginx/access/test/test.log b/vendor/github.com/elastic/beats/filebeat/module/nginx/access/test/test.log index e303a6d5..d107d245 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/nginx/access/test/test.log +++ b/vendor/github.com/elastic/beats/filebeat/module/nginx/access/test/test.log @@ -4,3 +4,4 @@ 85.181.35.98 - - [07/Dec/2016:11:05:07 +0100] "GET /ocelot HTTP/1.1" 200 571 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:49.0) Gecko/20100101 Firefox/49.0" "10.5.102.222, 199.96.1.1, 204.246.1.1" 10.2.1.185 - - [22/Jan/2016:13:18:29 +0000] "GET /assets/xxxx?q=100 HTTP/1.1" 200 25507 "-" "Amazon CloudFront" 2a03:0000:10ff:f00f:0000:0000:0:8000, 10.225.192.17 10.2.2.121 - - [30/Dec/2016:06:47:09 +0000] "GET /test.html HTTP/1.1" 404 8571 "-" "Mozilla/5.0 (compatible; Facebot 1.0; https://developers.facebook.com/docs/sharing/webmasters/crawler)" +127.0.0.1 - - [12/Apr/2018:09:48:40 +0200] "" 400 0 "-" "-" diff --git a/vendor/github.com/elastic/beats/filebeat/module/nginx/access/test/test.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/nginx/access/test/test.log-expected.json index f7e93a36..36f59492 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/nginx/access/test/test.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/nginx/access/test/test.log-expected.json @@ -54,6 +54,9 @@ "prospector" : { "type" : "log" }, + "input" : { + "type" : "log" + }, "read_timestamp" : "2017-05-29T22:28:06.246Z", "source" : "/Users/tsg/src/github.com/elastic/beats/filebeat/module/nginx/access/test/test.log", "fileset" : { @@ -104,6 +107,9 @@ "prospector" : { "type" : "log" }, + "" : { + "type" : "log" + }, "read_timestamp" : "2017-05-29T22:28:06.246Z", "source" : "/Users/tsg/src/github.com/elastic/beats/filebeat/module/nginx/access/test/test.log", "fileset" : { @@ -165,6 +171,9 @@ "prospector" : { "type" : "log" }, + "input" : { + "type" : "log" + }, "read_timestamp" : "2017-05-29T22:28:06.246Z", "source" : "/Users/tsg/src/github.com/elastic/beats/filebeat/module/nginx/access/test/test.log", "fileset" : { @@ -218,6 +227,9 @@ "prospector" : { "type" : "log" }, + "input" : { + "type" : "log" + }, "read_timestamp" : "2017-05-29T22:28:06.245Z", "source" : "/Users/tsg/src/github.com/elastic/beats/filebeat/module/nginx/access/test/test.log", "fileset" : { @@ -278,6 +290,9 @@ "prospector" : { "type" : "log" }, + "input" : { + "type" : "log" + }, "read_timestamp" : "2017-05-29T22:28:06.246Z", "source" : "/Users/tsg/src/github.com/elastic/beats/filebeat/module/nginx/access/test/test.log", "fileset" : { @@ -337,6 +352,9 @@ "prospector" : { "type" : "log" }, + "input" : { + "type" : "log" + }, "read_timestamp" : "2017-05-29T22:28:06.246Z", "source" : "/Users/tsg/src/github.com/elastic/beats/filebeat/module/nginx/access/test/test.log", "fileset" : { @@ -344,5 +362,51 @@ "name" : "access" } } + }, + { + "_index" : "filebeat-6.0.0-alpha2-2017.05.30", + "_type" : "doc", + "_id" : "AVxWUuZ8OMOtqbaTipsE", + "_score" : 1.0, + "_source": { + "@timestamp": "2018-04-12T07:48:40.000Z", + "nginx": { + "access": { + "body_sent": { + "bytes": "0" + }, + "referrer": "-", + "remote_ip": "127.0.0.1", + "remote_ip_list": [ + "127.0.0.1" + ], + "response_code": "400", + "user_agent": { + "device": "Other", + "name": "Other", + "os": "Other", + "os_name": "Other" + }, + "user_name": "-" + } + }, + "beat" : { + "hostname" : "a-mac-with-esc-key-2.local", + "name" : "a-mac-with-esc-key-2.local", + "version" : "6.0.0-alpha2" + }, + "prospector" : { + "type" : "log" + }, + "input" : { + "type" : "log" + }, + "read_timestamp": "2018-04-13T11:13:43.103Z", + "source" : "/Users/tsg/src/github.com/elastic/beats/filebeat/module/nginx/access/test/test.log", + "fileset" : { + "module" : "nginx", + "name" : "access" + } + } } ] diff --git a/vendor/github.com/elastic/beats/filebeat/module/nginx/error/manifest.yml b/vendor/github.com/elastic/beats/filebeat/module/nginx/error/manifest.yml index 427763bd..641ec771 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/nginx/error/manifest.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/nginx/error/manifest.yml @@ -10,4 +10,4 @@ var: - c:/programdata/nginx/logs/error.log* ingest_pipeline: ingest/pipeline.json -prospector: config/nginx-error.yml +input: config/nginx-error.yml diff --git a/vendor/github.com/elastic/beats/filebeat/module/osquery/_meta/kibana/6/dashboard/osquery-compliance.json b/vendor/github.com/elastic/beats/filebeat/module/osquery/_meta/kibana/6/dashboard/osquery-compliance.json index 86d43945..8ecfc447 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/osquery/_meta/kibana/6/dashboard/osquery-compliance.json +++ b/vendor/github.com/elastic/beats/filebeat/module/osquery/_meta/kibana/6/dashboard/osquery-compliance.json @@ -169,7 +169,7 @@ "searchSourceJSON": "{\"query\":{\"language\":\"lucene\",\"query\":\"\"},\"filter\":[],\"highlightAll\":true,\"version\":true}" }, "optionsJSON": "{\"darkTheme\":false,\"hidePanelTitles\":false,\"useMargins\":true}", - "panelsJSON": "[{\"panelIndex\":\"1\",\"gridData\":{\"x\":6,\"y\":6,\"w\":6,\"h\":4,\"i\":\"1\"},\"id\":\"7a9482d0-eb00-11e7-8f04-51231daa5b05\",\"type\":\"search\",\"version\":\"7.0.0-alpha1\"},{\"panelIndex\":\"2\",\"gridData\":{\"x\":5,\"y\":1,\"w\":7,\"h\":5,\"i\":\"2\"},\"id\":\"a9fd8bb0-eb01-11e7-8f04-51231daa5b05\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1\"},{\"panelIndex\":\"3\",\"gridData\":{\"x\":0,\"y\":6,\"w\":6,\"h\":4,\"i\":\"3\"},\"id\":\"3824b080-eb02-11e7-8f04-51231daa5b05\",\"type\":\"search\",\"version\":\"7.0.0-alpha1\"},{\"panelIndex\":\"4\",\"gridData\":{\"x\":0,\"y\":1,\"w\":5,\"h\":3,\"i\":\"4\"},\"id\":\"1da1ed30-eb03-11e7-8f04-51231daa5b05\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1\"},{\"panelIndex\":\"5\",\"gridData\":{\"x\":0,\"y\":4,\"w\":5,\"h\":2,\"i\":\"5\"},\"embeddableConfig\":{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"},\"legendOpen\":false}},\"id\":\"240f3630-eb05-11e7-8f04-51231daa5b05\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1\"},{\"panelIndex\":\"6\",\"gridData\":{\"x\":0,\"y\":0,\"w\":12,\"h\":1,\"i\":\"6\"},\"version\":\"6.1.0-SNAPSHOT\",\"type\":\"visualization\",\"id\":\"2d6e0760-f4ab-11e7-8647-534bb4c21040\"}]", + "panelsJSON": "[{\"panelIndex\":\"1\",\"gridData\":{\"x\":6,\"y\":6,\"w\":6,\"h\":4,\"i\":\"1\"},\"id\":\"7a9482d0-eb00-11e7-8f04-51231daa5b05\",\"type\":\"search\",\"version\":\"6.2.4\"},{\"panelIndex\":\"2\",\"gridData\":{\"x\":5,\"y\":1,\"w\":7,\"h\":5,\"i\":\"2\"},\"id\":\"a9fd8bb0-eb01-11e7-8f04-51231daa5b05\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"panelIndex\":\"3\",\"gridData\":{\"x\":0,\"y\":6,\"w\":6,\"h\":4,\"i\":\"3\"},\"id\":\"3824b080-eb02-11e7-8f04-51231daa5b05\",\"type\":\"search\",\"version\":\"6.2.4\"},{\"panelIndex\":\"4\",\"gridData\":{\"x\":0,\"y\":1,\"w\":5,\"h\":3,\"i\":\"4\"},\"id\":\"1da1ed30-eb03-11e7-8f04-51231daa5b05\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"panelIndex\":\"5\",\"gridData\":{\"x\":0,\"y\":4,\"w\":5,\"h\":2,\"i\":\"5\"},\"embeddableConfig\":{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"},\"legendOpen\":false}},\"id\":\"240f3630-eb05-11e7-8f04-51231daa5b05\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"panelIndex\":\"6\",\"gridData\":{\"x\":0,\"y\":0,\"w\":12,\"h\":1,\"i\":\"6\"},\"version\":\"6.1.0-SNAPSHOT\",\"type\":\"visualization\",\"id\":\"2d6e0760-f4ab-11e7-8647-534bb4c21040\"}]", "timeRestore": false, "title": "[Osquery Result] Compliance pack", "uiStateJSON": "{\"P-5\":{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}}}", @@ -182,4 +182,4 @@ } ], "version": "6.1.0-SNAPSHOT" -} \ No newline at end of file +} diff --git a/vendor/github.com/elastic/beats/filebeat/module/osquery/result/manifest.yml b/vendor/github.com/elastic/beats/filebeat/module/osquery/result/manifest.yml index adda5a99..626a4dba 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/osquery/result/manifest.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/osquery/result/manifest.yml @@ -14,4 +14,4 @@ var: ingest_pipeline: ingest/pipeline.json -prospector: config/result.yml +input: config/result.yml diff --git a/vendor/github.com/elastic/beats/filebeat/module/osquery/result/test/test.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/osquery/result/test/test.log-expected.json index a3bd3da0..15e016af 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/osquery/result/test/test.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/osquery/result/test/test.log-expected.json @@ -43,6 +43,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/Users/tsg/src/github.com/elastic/beats/filebeat/module/osquery/result/test/test.log", "fileset": { "module": "osquery", diff --git a/vendor/github.com/elastic/beats/filebeat/module/postgresql/_meta/config.reference.yml b/vendor/github.com/elastic/beats/filebeat/module/postgresql/_meta/config.reference.yml index fa500fda..e1deee0e 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/postgresql/_meta/config.reference.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/postgresql/_meta/config.reference.yml @@ -7,6 +7,6 @@ # Filebeat will choose the paths depending on your OS. #var.paths: - # Prospector configuration (advanced). Any prospector configuration option + # Input configuration (advanced). Any input configuration option # can be added under this section. - #prospector: + #input: diff --git a/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/ingest/pipeline.json b/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/ingest/pipeline.json index 17e461d8..c9c33c0b 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/ingest/pipeline.json +++ b/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/ingest/pipeline.json @@ -6,16 +6,12 @@ "field": "message", "ignore_missing": true, "patterns": [ - "%{LOCALDATETIME:postgresql.log.timestamp} %{WORD:postgresql.log.timezone} \\[%{NUMBER:postgresql.log.thread_id}\\] %{USERNAME:postgresql.log.user}@%{HOSTNAME:postgresql.log.database} %{WORD:postgresql.log.level}: duration: %{NUMBER:postgresql.log.duration} ms statement: %{MULTILINEQUERY:postgresql.log.query}", - "%{LOCALDATETIME:postgresql.log.timestamp} %{WORD:postgresql.log.timezone} \\[%{NUMBER:postgresql.log.thread_id}\\] \\[%{USERNAME:postgresql.log.user}\\]@\\[%{HOSTNAME:postgresql.log.database}\\] %{WORD:postgresql.log.level}: duration: %{NUMBER:postgresql.log.duration} ms statement: %{MULTILINEQUERY:postgresql.log.query}", - "%{LOCALDATETIME:postgresql.log.timestamp} %{WORD:postgresql.log.timezone} \\[%{NUMBER:postgresql.log.thread_id}\\] %{USERNAME:postgresql.log.user}@%{HOSTNAME:postgresql.log.database} %{WORD:postgresql.log.level}: ?%{GREEDYDATA:postgresql.log.message}", - "%{LOCALDATETIME:postgresql.log.timestamp} %{WORD:postgresql.log.timezone} \\[%{NUMBER:postgresql.log.thread_id}\\] \\[%{USERNAME:postgresql.log.user}\\]@\\[%{HOSTNAME:postgresql.log.database}\\] %{WORD:postgresql.log.level}: ?%{GREEDYDATA:postgresql.log.message}", - "%{LOCALDATETIME:postgresql.log.timestamp} %{WORD:postgresql.log.timezone} \\[%{NUMBER:postgresql.log.thread_id}\\] %{WORD:postgresql.log.level}: ?%{GREEDYDATA:postgresql.log.message}" + "^%{LOCALDATETIME:postgresql.log.timestamp} %{WORD:postgresql.log.timezone} \\[%{NUMBER:postgresql.log.thread_id}\\] ((\\[%{USERNAME:postgresql.log.user}\\]@\\[%{POSTGRESQL_DB_NAME:postgresql.log.database}\\]|%{USERNAME:postgresql.log.user}@%{POSTGRESQL_DB_NAME:postgresql.log.database}) )?%{WORD:postgresql.log.level}: (duration: %{NUMBER:postgresql.log.duration} ms statement: %{GREEDYDATA:postgresql.log.query}|%{GREEDYDATA:postgresql.log.message})" ], "pattern_definitions": { "LOCALDATETIME": "[-0-9]+ %{TIME}", - "GREEDYDATA": ".*", - "MULTILINEQUERY" : "(.|\n|\t)*?;$" + "GREEDYDATA": "(.|\n|\t)*", + "POSTGRESQL_DB_NAME": "[a-zA-Z0-9_]+[a-zA-Z0-9_\\$]*" } } }, diff --git a/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/manifest.yml b/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/manifest.yml index bd9631f9..e5ab4a9a 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/manifest.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/manifest.yml @@ -10,4 +10,4 @@ var: - "c:/Program Files/PostgreSQL/*/logs/*.log*" ingest_pipeline: ingest/pipeline.json -prospector: config/log.yml +input: config/log.yml diff --git a/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-9.6-debian-with-slowlog.log b/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-9.6-debian-with-slowlog.log index 1903a2a1..39a4d0ff 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-9.6-debian-with-slowlog.log +++ b/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-9.6-debian-with-slowlog.log @@ -56,6 +56,6 @@ AND pg_catalog.pg_table_is_visible(c.oid) ORDER BY 1,2; 2017-07-31 13:43:22.645 CEST [5502] postgres@clients LOG: duration: 36.162 ms statement: create table cats(name varchar(50) primary key, toy varchar (50) not null, born timestamp not null); -2017-07-31 13:46:02.670 CEST [5502] postgres@clients LOG: duration: 10.540 ms statement: insert into cats(name, toy, born) values('kate', 'ball', now()); -2017-07-31 13:46:23.016 CEST [5502] postgres@clients LOG: duration: 5.156 ms statement: insert into cats(name, toy, born) values('frida', 'horse', now()); -2017-07-31 13:46:55.637 CEST [5502] postgres@clients LOG: duration: 25.871 ms statement: create table dogs(name varchar(50) primary key, owner varchar (50) not null, born timestamp not null); +2017-07-31 13:46:02.670 CEST [5502] postgres@c$lients LOG: duration: 10.540 ms statement: insert into cats(name, toy, born) values('kate', 'ball', now()); +2017-07-31 13:46:23.016 CEST [5502] postgres@_clients$db LOG: duration: 5.156 ms statement: insert into cats(name, toy, born) values('frida', 'horse', now()); +2017-07-31 13:46:55.637 CEST [5502] postgres@clients_db LOG: duration: 25.871 ms statement: create table dogs(name varchar(50) primary key, owner varchar (50) not null, born timestamp not null); diff --git a/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-9.6-debian-with-slowlog.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-9.6-debian-with-slowlog.log-expected.json index 91f5e346..9c001e73 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-9.6-debian-with-slowlog.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-9.6-debian-with-slowlog.log-expected.json @@ -25,6 +25,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-9.6-debian.log", "message": "2017-07-31 13:36:42.585 CEST [4974] LOG: database system was shut down at 2017-06-17 16:58:04 CEST", "fileset": { @@ -41,7 +44,7 @@ 1501508202585 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.01", "_type": "doc", @@ -68,6 +71,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-9.6-debian.log", "message": "2017-07-31 13:36:42.605 CEST [4974] LOG: MultiXact member wraparound protections are now enabled", "fileset": { @@ -84,7 +90,7 @@ 1501508202605 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.01", "_type": "doc", @@ -111,6 +117,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-9.6-debian.log", "message": "2017-07-31 13:36:42.615 CEST [4978] LOG: autovacuum launcher started", "fileset": { @@ -127,7 +136,7 @@ 1501508202615 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.01", "_type": "doc", @@ -154,6 +163,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-9.6-debian.log", "message": "2017-07-31 13:36:42.616 CEST [4973] LOG: database system is ready to accept connections", "fileset": { @@ -170,7 +182,7 @@ 1501508202616 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.01", "_type": "doc", @@ -199,6 +211,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-9.6-debian.log", "fileset": { "module": "postgresql", @@ -215,7 +230,7 @@ 1501508202956 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.01", "_type": "doc", @@ -245,6 +260,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-9.6-debian.log", "fileset": { "module": "postgresql", @@ -261,7 +279,7 @@ 1501508203557 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.01", "_type": "doc", @@ -291,6 +309,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-9.6-debian.log", "message": "2017-07-31 13:36:44.104 CEST [4986] postgres@postgres LOG: duration: 2.895 ms statement: SELECT d.datname as \"Name\",\n\t pg_catalog.pg_get_userbyid(d.datdba) as \"Owner\",\n\t pg_catalog.pg_encoding_to_char(d.encoding) as \"Encoding\",\n\t d.datcollate as \"Collate\",\n\t d.datctype as \"Ctype\",\n\t pg_catalog.array_to_string(d.datacl, E'\\n') AS \"Access privileges\"\n\tFROM pg_catalog.pg_database d\n\tORDER BY 1;", "fileset": { @@ -307,7 +328,7 @@ 1501508204104 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.01", "_type": "doc", @@ -337,6 +358,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-9.6-debian.log", "fileset": { "module": "postgresql", @@ -353,7 +377,7 @@ 1501508204642 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.01", "_type": "doc", @@ -382,6 +406,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-9.6-debian.log", "message": "2017-07-31 13:39:16.249 CEST [5407] postgres@users FATAL: database \"users\" does not exist", "fileset": { @@ -398,7 +425,7 @@ 1501508356249 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.01", "_type": "doc", @@ -427,6 +454,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-9.6-debian.log", "fileset": { "module": "postgresql", @@ -443,7 +473,7 @@ 1501508357945 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.01", "_type": "doc", @@ -473,6 +503,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-9.6-debian.log", "message": "2017-07-31 13:39:21.025 CEST [5404] postgres@postgres LOG: duration: 37.598 ms statement: SELECT n.nspname as \"Schema\",\n\t c.relname as \"Name\",\n\t CASE c.relkind WHEN 'r' THEN 'table' WHEN 'v' THEN 'view' WHEN 'm' THEN 'materialized view' WHEN 'i' THEN 'index' WHEN 'S' THEN 'sequence' WHEN 's' THEN 'special' WHEN 'f' THEN 'foreign table' END as \"Type\",\n\t pg_catalog.pg_get_userbyid(c.relowner) as \"Owner\"\n\tFROM pg_catalog.pg_class c\n\t LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n\tWHERE c.relkind IN ('r','')\n\t AND n.nspname <> 'pg_catalog'\n\t AND n.nspname <> 'information_schema'\n\t AND n.nspname !~ '^pg_toast'\n\t AND pg_catalog.pg_table_is_visible(c.oid)\n\tORDER BY 1,2;", "fileset": { @@ -489,7 +522,7 @@ 1501508361025 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.01", "_type": "doc", @@ -519,6 +552,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-9.6-debian.log", "message": "2017-07-31 13:39:31.619 CEST [5502] postgres@clients LOG: duration: 9.482 ms statement: select * from clients;", "fileset": { @@ -535,7 +571,7 @@ 1501508371619 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.01", "_type": "doc", @@ -565,6 +601,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-9.6-debian.log", "fileset": { "module": "postgresql", @@ -581,7 +620,7 @@ 1501508380147 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.01", "_type": "doc", @@ -611,6 +650,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-9.6-debian.log", "message": "2017-07-31 13:40:54.310 CEST [5502] postgres@clients LOG: duration: 26.082 ms statement: SELECT n.nspname as \"Schema\",\n\t c.relname as \"Name\",\n\t CASE c.relkind WHEN 'r' THEN 'table' WHEN 'v' THEN 'view' WHEN 'm' THEN 'materialized view' WHEN 'i' THEN 'index' WHEN 'S' THEN 'sequence' WHEN 's' THEN 'special' WHEN 'f' THEN 'foreign table' END as \"Type\",\n\t pg_catalog.pg_get_userbyid(c.relowner) as \"Owner\"\n\tFROM pg_catalog.pg_class c\n\t LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n\tWHERE c.relkind IN ('r','')\n\t AND n.nspname <> 'pg_catalog'\n\t AND n.nspname <> 'information_schema'\n\t AND n.nspname !~ '^pg_toast'\n\t AND pg_catalog.pg_table_is_visible(c.oid)\n\tORDER BY 1,2;", "fileset": { @@ -627,7 +669,7 @@ 1501508454310 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.01", "_type": "doc", @@ -657,6 +699,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-9.6-debian.log", "message": "2017-07-31 13:43:22.645 CEST [5502] postgres@clients LOG: duration: 36.162 ms statement: create table cats(name varchar(50) primary key, toy varchar (50) not null, born timestamp not null);", "fileset": { @@ -673,7 +718,7 @@ 1501508602645 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.01", "_type": "doc", @@ -687,7 +732,7 @@ "log": { "duration": "10.540", "thread_id": "5502", - "database": "clients", + "database": "c$lients", "level": "LOG", "timezone": "CEST", "query": "insert into cats(name, toy, born) values('kate', 'ball', now());", @@ -703,6 +748,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-9.6-debian.log", "fileset": { "module": "postgresql", @@ -719,7 +767,7 @@ 1501508762670 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.01", "_type": "doc", @@ -733,7 +781,7 @@ "log": { "duration": "5.156", "thread_id": "5502", - "database": "clients", + "database": "_clients$db", "level": "LOG", "timezone": "CEST", "query": "insert into cats(name, toy, born) values('frida', 'horse', now());", @@ -749,6 +797,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-9.6-debian.log", "message": "2017-07-31 13:46:23.016 CEST [5502] postgres@clients LOG: duration: 5.156 ms statement: insert into cats(name, toy, born) values('frida', 'horse', now());", "fileset": { @@ -765,7 +816,7 @@ 1501508783016 ] }, - + { "_index": "filebeat-7.0.0-alpha1-2017.08.01", "_type": "doc", @@ -779,7 +830,7 @@ "log": { "duration": "25.871", "thread_id": "5502", - "database": "clients", + "database": "clients_db", "level": "LOG", "timezone": "CEST", "query": "create table dogs(name varchar(50) primary key, owner varchar (50) not null, born timestamp not null);", @@ -795,6 +846,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/home/n/go/src/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-9.6-debian.log", "message": "2017-07-31 13:46:55.637 CEST [5502] postgres@clients LOG: duration: 25.871 ms statement: create table dogs(name varchar(50) primary key, owner varchar (50) not null, born timestamp not null);", "fileset": { diff --git a/vendor/github.com/elastic/beats/filebeat/module/redis/log/manifest.yml b/vendor/github.com/elastic/beats/filebeat/module/redis/log/manifest.yml index 98351f14..3c63a894 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/redis/log/manifest.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/redis/log/manifest.yml @@ -11,4 +11,4 @@ var: - "c:/program files/Redis/logs/redis.log*" ingest_pipeline: ingest/pipeline.json -prospector: config/log.yml +input: config/log.yml diff --git a/vendor/github.com/elastic/beats/filebeat/module/redis/log/test/test.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/redis/log/test/test.log-expected.json index 7428fd57..586dc2f0 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/redis/log/test/test.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/redis/log/test/test.log-expected.json @@ -15,6 +15,9 @@ "prospector" : { "type" : "log" }, + "event" : { + "type" : "log" + }, "read_timestamp" : "2017-06-01T22:43:37.024Z", "source" : "/Users/tsg/src/github.com/elastic/beats/filebeat/module/redis/log/test/test.log", "fileset" : { @@ -45,6 +48,9 @@ "prospector" : { "type" : "log" }, + "event" : { + "type" : "log" + }, "read_timestamp" : "2017-06-01T22:43:37.024Z", "source" : "/Users/tsg/src/github.com/elastic/beats/filebeat/module/redis/log/test/test.log", "fileset" : { @@ -77,6 +83,9 @@ "prospector" : { "type" : "log" }, + "event" : { + "type" : "log" + }, "read_timestamp" : "2017-06-01T22:43:37.024Z", "source" : "/Users/tsg/src/github.com/elastic/beats/filebeat/module/redis/log/test/test.log", "fileset" : { @@ -107,6 +116,9 @@ "prospector" : { "type" : "log" }, + "event" : { + "type" : "log" + }, "read_timestamp" : "2017-06-01T22:43:37.024Z", "source" : "/Users/tsg/src/github.com/elastic/beats/filebeat/module/redis/log/test/test.log", "fileset" : { diff --git a/vendor/github.com/elastic/beats/filebeat/module/redis/slowlog/manifest.yml b/vendor/github.com/elastic/beats/filebeat/module/redis/slowlog/manifest.yml index 351329a7..8dca2e39 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/redis/slowlog/manifest.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/redis/slowlog/manifest.yml @@ -8,4 +8,4 @@ var: default: "" ingest_pipeline: ingest/pipeline.json -prospector: config/slowlog.yml +input: config/slowlog.yml diff --git a/vendor/github.com/elastic/beats/filebeat/module/system/_meta/config.reference.yml b/vendor/github.com/elastic/beats/filebeat/module/system/_meta/config.reference.yml index 6f243887..b4121ca8 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/system/_meta/config.reference.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/system/_meta/config.reference.yml @@ -10,9 +10,9 @@ # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. #var.convert_timezone: false - # Prospector configuration (advanced). Any prospector configuration option + # Input configuration (advanced). Any input configuration option # can be added under this section. - #prospector: + #input: # Authorization logs #auth: @@ -25,6 +25,6 @@ # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. #var.convert_timezone: false - # Prospector configuration (advanced). Any prospector configuration option + # Input configuration (advanced). Any input configuration option # can be added under this section. - #prospector: + #input: diff --git a/vendor/github.com/elastic/beats/filebeat/module/system/auth/_meta/fields.yml b/vendor/github.com/elastic/beats/filebeat/module/system/auth/_meta/fields.yml index bf43509a..1e94e3df 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/system/auth/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/system/auth/_meta/fields.yml @@ -17,6 +17,7 @@ description: > The PID of the process that sent the auth message. - name: message + type: text description: > The message in the log line. - name: user diff --git a/vendor/github.com/elastic/beats/filebeat/module/system/auth/manifest.yml b/vendor/github.com/elastic/beats/filebeat/module/system/auth/manifest.yml index c64de896..0e7cc747 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/system/auth/manifest.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/system/auth/manifest.yml @@ -19,4 +19,4 @@ var: value: false ingest_pipeline: ingest/pipeline.json -prospector: config/auth.yml +input: config/auth.yml diff --git a/vendor/github.com/elastic/beats/filebeat/module/system/auth/test/test.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/system/auth/test/test.log-expected.json index 61544f5d..db420118 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/system/auth/test/test.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/system/auth/test/test.log-expected.json @@ -29,6 +29,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/Users/tsg/src/github.com/elastic/beats/filebeat/module/system/auth/test/test.log", "fileset": { "module": "system", @@ -65,6 +68,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/Users/tsg/src/github.com/elastic/beats/filebeat/module/system/auth/test/test.log", "fileset": { "module": "system", @@ -100,6 +106,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/Users/tsg/src/github.com/elastic/beats/filebeat/module/system/auth/test/test.log", "fileset": { "module": "system", @@ -133,6 +142,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/Users/tsg/src/github.com/elastic/beats/filebeat/module/system/auth/test/test.log", "fileset": { "module": "system", @@ -170,6 +182,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/Users/tsg/src/github.com/elastic/beats/filebeat/module/system/auth/test/test.log", "fileset": { "module": "system", @@ -204,6 +219,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/Users/tsg/src/github.com/elastic/beats/filebeat/module/system/auth/test/test.log", "fileset": { "module": "system", @@ -241,6 +259,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/Users/tsg/src/github.com/elastic/beats/filebeat/module/system/auth/test/test.log", "fileset": { "module": "system", @@ -279,6 +300,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/Users/tsg/src/github.com/elastic/beats/filebeat/module/system/auth/test/test.log", "fileset": { "module": "system", @@ -326,6 +350,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/Users/tsg/src/github.com/elastic/beats/filebeat/module/system/auth/test/test.log", "fileset": { "module": "system", @@ -362,6 +389,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/Users/tsg/src/github.com/elastic/beats/filebeat/module/system/auth/test/test.log", "fileset": { "module": "system", diff --git a/vendor/github.com/elastic/beats/filebeat/module/system/syslog/_meta/fields.yml b/vendor/github.com/elastic/beats/filebeat/module/system/syslog/_meta/fields.yml index 5dd3d0e4..667222cc 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/system/syslog/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/system/syslog/_meta/fields.yml @@ -16,5 +16,6 @@ description: > The PID of the process that sent the syslog message. - name: message + type: text description: > The message in the log line. diff --git a/vendor/github.com/elastic/beats/filebeat/module/system/syslog/manifest.yml b/vendor/github.com/elastic/beats/filebeat/module/system/syslog/manifest.yml index 88cfe1bb..5d4bebb2 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/system/syslog/manifest.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/system/syslog/manifest.yml @@ -17,4 +17,4 @@ var: value: false ingest_pipeline: ingest/pipeline.json -prospector: config/syslog.yml +input: config/syslog.yml diff --git a/vendor/github.com/elastic/beats/filebeat/module/system/syslog/test/darwin-syslog-sample.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/system/syslog/test/darwin-syslog-sample.log-expected.json index fdf1f34a..f9a53e85 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/system/syslog/test/darwin-syslog-sample.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/system/syslog/test/darwin-syslog-sample.log-expected.json @@ -24,6 +24,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "module/system/syslog/test/darwin-syslog-sample.log", "fields": { "source_type": "system-syslog" @@ -55,6 +58,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "module/system/syslog/test/darwin-syslog-sample.log", "fields": { "source_type": "system-syslog" @@ -83,6 +89,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "source": "/Users/tsg/src/github.com/elastic/beats/filebeat/module/system/syslog/test/darwin-syslog-sample.log", "fileset": { "module": "system", diff --git a/vendor/github.com/elastic/beats/filebeat/module/traefik/_meta/config.reference.yml b/vendor/github.com/elastic/beats/filebeat/module/traefik/_meta/config.reference.yml index 0379b2e6..e800f735 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/traefik/_meta/config.reference.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/traefik/_meta/config.reference.yml @@ -7,6 +7,6 @@ # Filebeat will choose the paths depending on your OS. #var.paths: - # Prospector configuration (advanced). Any prospector configuration option + # Input configuration (advanced). Any input configuration option # can be added under this section. - #prospector: + #input: diff --git a/vendor/github.com/elastic/beats/filebeat/module/traefik/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/filebeat/module/traefik/_meta/docs.asciidoc index 1dd1def3..6264e00f 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/traefik/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/module/traefik/_meta/docs.asciidoc @@ -14,8 +14,6 @@ This module requires the {elasticsearch-plugins}/ingest-user-agent.html[ingest-user-agent] and {elasticsearch-plugins}/ingest-geoip.html[ingest-geoip] Elasticsearch plugins. -//REVIEWERS: Do we need to say anything else about compatibility here? - include::../include/running-modules.asciidoc[] [float] diff --git a/vendor/github.com/elastic/beats/filebeat/module/traefik/access/manifest.yml b/vendor/github.com/elastic/beats/filebeat/module/traefik/access/manifest.yml index 58871c90..c72c12d4 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/traefik/access/manifest.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/traefik/access/manifest.yml @@ -10,7 +10,7 @@ var: - c:/programdata/traefik/logs/*access.log* ingest_pipeline: ingest/pipeline.json -prospector: config/traefik-access.yml +input: config/traefik-access.yml requires.processors: - name: user_agent diff --git a/vendor/github.com/elastic/beats/filebeat/module/traefik/access/tests/test.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/traefik/access/tests/test.log-expected.json index 9bd7e6f4..6746bebf 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/traefik/access/tests/test.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/traefik/access/tests/test.log-expected.json @@ -47,6 +47,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "read_timestamp": "2017-10-09T18:56:25.032Z", "source": "/var/log/traefik/access.log", "fileset": { @@ -103,6 +106,9 @@ "prospector": { "type": "log" }, + "input": { + "type": "log" + }, "read_timestamp": "2017-10-09T18:56:25.031Z", "source": "/var/log/traefik/access.log", "fileset": { diff --git a/vendor/github.com/elastic/beats/filebeat/modules.d/iis.yml.disabled b/vendor/github.com/elastic/beats/filebeat/modules.d/iis.yml.disabled new file mode 100644 index 00000000..0ed84f14 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/modules.d/iis.yml.disabled @@ -0,0 +1,17 @@ +- module: iis + # Access logs + access: + enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Error logs + error: + enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/filebeat/modules.d/mongodb.yml.disabled b/vendor/github.com/elastic/beats/filebeat/modules.d/mongodb.yml.disabled new file mode 100644 index 00000000..be6ea989 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/modules.d/mongodb.yml.disabled @@ -0,0 +1,8 @@ +- module: mongodb + # All logs + log: + enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: diff --git a/vendor/github.com/elastic/beats/filebeat/prospector/prospector.go b/vendor/github.com/elastic/beats/filebeat/prospector/prospector.go index f2b9a867..c6e4ca2c 100644 --- a/vendor/github.com/elastic/beats/filebeat/prospector/prospector.go +++ b/vendor/github.com/elastic/beats/filebeat/prospector/prospector.go @@ -1,150 +1,37 @@ +// Package prospector allows to define new way of reading data in Filebeat +// Deprecated: See the input package package prospector -import ( - "fmt" - "sync" - "time" +import "github.com/elastic/beats/filebeat/input" - "github.com/mitchellh/hashstructure" +// Prospectorer defines how to read new data +// Deprecated: See input.input +type Prospectorer = input.Input - "github.com/elastic/beats/filebeat/channel" - "github.com/elastic/beats/filebeat/input/file" - "github.com/elastic/beats/libbeat/common" - "github.com/elastic/beats/libbeat/logp" -) +// Runner encapsulate the lifecycle of a prospectorer +// Deprecated: See input.Runner +type Runner = input.Runner -// Prospectorer is the interface common to all prospectors -type Prospectorer interface { - Run() - Stop() - Wait() -} +// Context wrapper for backward compatibility +// Deprecated: See input.Context +type Context = input.Context -// Prospector contains the prospector -type Prospector struct { - config prospectorConfig - prospectorer Prospectorer - done chan struct{} - wg *sync.WaitGroup - ID uint64 - Once bool - beatDone chan struct{} -} +// Factory wrapper for backward compatibility +// Deprecated: See input.Factory +type Factory = input.Factory -// NewProspector instantiates a new prospector -func New( - conf *common.Config, - outlet channel.Factory, - beatDone chan struct{}, - states []file.State, - dynFields *common.MapStrPointer, -) (*Prospector, error) { - prospector := &Prospector{ - config: defaultConfig, - wg: &sync.WaitGroup{}, - done: make(chan struct{}), - Once: false, - beatDone: beatDone, - } +// Register wrapper for backward compatibility +// Deprecated: See input.Register +var Register = input.Register - var err error - if err = conf.Unpack(&prospector.config); err != nil { - return nil, err - } +// GetFactory wrapper for backward compatibility +// Deprecated: See input.GetFactory +var GetFactory = input.GetFactory - var h map[string]interface{} - conf.Unpack(&h) - prospector.ID, err = hashstructure.Hash(h, nil) - if err != nil { - return nil, err - } +// New wrapper for backward compatibility +// Deprecated: see input.New +var New = input.New - var f Factory - f, err = GetFactory(prospector.config.Type) - if err != nil { - return prospector, err - } - - context := Context{ - States: states, - Done: prospector.done, - BeatDone: prospector.beatDone, - DynamicFields: dynFields, - } - var prospectorer Prospectorer - prospectorer, err = f(conf, outlet, context) - if err != nil { - return prospector, err - } - prospector.prospectorer = prospectorer - - return prospector, nil -} - -// Start starts the prospector -func (p *Prospector) Start() { - p.wg.Add(1) - logp.Debug("prospector", "Starting prospector of type: %v; ID: %d", p.config.Type, p.ID) - - onceWg := sync.WaitGroup{} - if p.Once { - // Make sure start is only completed when Run did a complete first scan - defer onceWg.Wait() - } - - onceWg.Add(1) - // Add waitgroup to make sure prospectors finished - go func() { - defer func() { - onceWg.Done() - p.stop() - p.wg.Done() - }() - - p.Run() - }() -} - -// Run starts scanning through all the file paths and fetch the related files. Start a harvester for each file -func (p *Prospector) Run() { - // Initial prospector run - p.prospectorer.Run() - - // Shuts down after the first complete run of all prospectors - if p.Once { - return - } - - for { - select { - case <-p.done: - logp.Info("Prospector ticker stopped") - return - case <-time.After(p.config.ScanFrequency): - logp.Debug("prospector", "Run prospector") - p.prospectorer.Run() - } - } -} - -// Stop stops the prospector and with it all harvesters -func (p *Prospector) Stop() { - // Stop scanning and wait for completion - close(p.done) - p.wg.Wait() -} - -func (p *Prospector) stop() { - logp.Info("Stopping Prospector: %d", p.ID) - - // In case of once, it will be waited until harvesters close itself - if p.Once { - p.prospectorer.Wait() - } else { - p.prospectorer.Stop() - } -} - -func (p *Prospector) String() string { - return fmt.Sprintf("prospector [type=%s, ID=%d]", p.config.Type, p.ID) -} +// NewRunnerFactory wrapper for backward compatibility +// Deprecated: see input.NewRunnerFactory +var NewRunnerFactory = input.NewRunnerFactory diff --git a/vendor/github.com/elastic/beats/filebeat/prospector/udp/config.go b/vendor/github.com/elastic/beats/filebeat/prospector/udp/config.go deleted file mode 100644 index f3f43fee..00000000 --- a/vendor/github.com/elastic/beats/filebeat/prospector/udp/config.go +++ /dev/null @@ -1,20 +0,0 @@ -package udp - -import ( - "github.com/elastic/beats/filebeat/harvester" -) - -var defaultConfig = config{ - ForwarderConfig: harvester.ForwarderConfig{ - Type: "udp", - }, - MaxMessageSize: 10240, - // TODO: What should be default port? - Host: "localhost:8080", -} - -type config struct { - harvester.ForwarderConfig `config:",inline"` - Host string `config:"host"` - MaxMessageSize int `config:"max_message_size"` -} diff --git a/vendor/github.com/elastic/beats/filebeat/prospector/udp/harvester.go b/vendor/github.com/elastic/beats/filebeat/prospector/udp/harvester.go deleted file mode 100644 index 7a928b23..00000000 --- a/vendor/github.com/elastic/beats/filebeat/prospector/udp/harvester.go +++ /dev/null @@ -1,74 +0,0 @@ -package udp - -import ( - "net" - "time" - - "github.com/elastic/beats/libbeat/beat" - "github.com/elastic/beats/libbeat/common" - "github.com/elastic/beats/libbeat/logp" - - "github.com/elastic/beats/filebeat/harvester" - "github.com/elastic/beats/filebeat/util" -) - -type Harvester struct { - forwarder *harvester.Forwarder - done chan struct{} - cfg *common.Config - listener net.PacketConn -} - -func NewHarvester(forwarder *harvester.Forwarder, cfg *common.Config) *Harvester { - return &Harvester{ - done: make(chan struct{}), - cfg: cfg, - forwarder: forwarder, - } -} - -func (h *Harvester) Run() error { - config := defaultConfig - err := h.cfg.Unpack(&config) - if err != nil { - return err - } - - h.listener, err = net.ListenPacket("udp", config.Host) - if err != nil { - return err - } - defer h.listener.Close() - - logp.Info("Started listening for udp on: %s", config.Host) - - buffer := make([]byte, config.MaxMessageSize) - - for { - select { - case <-h.done: - return nil - default: - } - - length, _, err := h.listener.ReadFrom(buffer) - if err != nil { - logp.Err("Error reading from buffer: %v", err.Error()) - continue - } - data := util.NewData() - data.Event = beat.Event{ - Timestamp: time.Now(), - Fields: common.MapStr{ - "message": string(buffer[:length]), - }, - } - h.forwarder.Send(data) - } -} - -func (h *Harvester) Stop() { - logp.Info("Stopping udp harvester") - close(h.done) - h.listener.Close() -} diff --git a/vendor/github.com/elastic/beats/filebeat/prospector/udp/prospector.go b/vendor/github.com/elastic/beats/filebeat/prospector/udp/prospector.go deleted file mode 100644 index acf2d1b1..00000000 --- a/vendor/github.com/elastic/beats/filebeat/prospector/udp/prospector.go +++ /dev/null @@ -1,62 +0,0 @@ -package udp - -import ( - "github.com/elastic/beats/filebeat/channel" - "github.com/elastic/beats/filebeat/harvester" - "github.com/elastic/beats/filebeat/prospector" - "github.com/elastic/beats/libbeat/common" - "github.com/elastic/beats/libbeat/common/cfgwarn" - "github.com/elastic/beats/libbeat/logp" -) - -func init() { - err := prospector.Register("udp", NewProspector) - if err != nil { - panic(err) - } -} - -type Prospector struct { - harvester *Harvester - started bool - outlet channel.Outleter -} - -func NewProspector(cfg *common.Config, outlet channel.Factory, context prospector.Context) (prospector.Prospectorer, error) { - cfgwarn.Experimental("UDP prospector type is used") - - out, err := outlet(cfg, context.DynamicFields) - if err != nil { - return nil, err - } - - forwarder := harvester.NewForwarder(out) - return &Prospector{ - outlet: out, - harvester: NewHarvester(forwarder, cfg), - started: false, - }, nil -} - -func (p *Prospector) Run() { - if !p.started { - logp.Info("Starting udp prospector") - p.started = true - go func() { - defer p.outlet.Close() - err := p.harvester.Run() - if err != nil { - logp.Err("Error running harvester:: %v", err) - } - }() - } -} - -func (p *Prospector) Stop() { - logp.Info("Stopping udp prospector") - p.harvester.Stop() -} - -func (p *Prospector) Wait() { - p.Stop() -} diff --git a/vendor/github.com/elastic/beats/filebeat/registrar/registrar.go b/vendor/github.com/elastic/beats/filebeat/registrar/registrar.go index 0dc312dd..80c1829f 100644 --- a/vendor/github.com/elastic/beats/filebeat/registrar/registrar.go +++ b/vendor/github.com/elastic/beats/filebeat/registrar/registrar.go @@ -3,6 +3,7 @@ package registrar import ( "encoding/json" "fmt" + "io" "os" "path/filepath" "sync" @@ -19,10 +20,13 @@ type Registrar struct { Channel chan []file.State out successLogger done chan struct{} - registryFile string // Path to the Registry File + registryFile string // Path to the Registry File + fileMode os.FileMode // Permissions to apply on the Registry File wg sync.WaitGroup states *file.States // Map with all file paths inside and the corresponding state + gcRequired bool // gcRequired is set if registry state needs to be gc'ed before the next write + gcEnabled bool // gcEnabled indictes the registry contains some state that can be gc'ed in the future flushTimeout time.Duration bufferedStateUpdates int } @@ -32,15 +36,20 @@ type successLogger interface { } var ( - statesUpdate = monitoring.NewInt(nil, "registrar.states.update") - statesCleanup = monitoring.NewInt(nil, "registrar.states.cleanup") - statesCurrent = monitoring.NewInt(nil, "registrar.states.current") - registryWrites = monitoring.NewInt(nil, "registrar.writes") + statesUpdate = monitoring.NewInt(nil, "registrar.states.update") + statesCleanup = monitoring.NewInt(nil, "registrar.states.cleanup") + statesCurrent = monitoring.NewInt(nil, "registrar.states.current") + registryWrites = monitoring.NewInt(nil, "registrar.writes.total") + registryFails = monitoring.NewInt(nil, "registrar.writes.fail") + registrySuccess = monitoring.NewInt(nil, "registrar.writes.success") ) -func New(registryFile string, flushTimeout time.Duration, out successLogger) (*Registrar, error) { +// New creates a new Registrar instance, updating the registry file on +// `file.State` updates. New fails if the file can not be opened or created. +func New(registryFile string, fileMode os.FileMode, flushTimeout time.Duration, out successLogger) (*Registrar, error) { r := &Registrar{ registryFile: registryFile, + fileMode: fileMode, done: make(chan struct{}), states: file.NewStates(), Channel: make(chan []file.State, 1), @@ -107,26 +116,117 @@ func (r *Registrar) loadStates() error { logp.Info("Loading registrar data from %s", r.registryFile) - decoder := json.NewDecoder(f) - states := []file.State{} - err = decoder.Decode(&states) + states, err := readStatesFrom(f) if err != nil { - return fmt.Errorf("Error decoding states: %s", err) + return err } - - states = resetStates(states) r.states.SetStates(states) logp.Info("States Loaded from registrar: %+v", len(states)) return nil } +func readStatesFrom(in io.Reader) ([]file.State, error) { + states := []file.State{} + decoder := json.NewDecoder(in) + if err := decoder.Decode(&states); err != nil { + return nil, fmt.Errorf("Error decoding states: %s", err) + } + + states = fixStates(states) + states = resetStates(states) + return states, nil +} + +// fixStates cleans up the regsitry states when updating from an older version +// of filebeat potentially writing invalid entries. +func fixStates(states []file.State) []file.State { + if len(states) == 0 { + return states + } + + // we use a map of states here, so to identify and merge duplicate entries. + idx := map[string]*file.State{} + for i := range states { + state := &states[i] + fixState(state) + + id := state.ID() + old, exists := idx[id] + if !exists { + idx[id] = state + } else { + mergeStates(old, state) // overwrite the entry in 'old' + } + } + + if len(idx) == len(states) { + return states + } + + i := 0 + newStates := make([]file.State, len(idx)) + for _, state := range idx { + newStates[i] = *state + i++ + } + return newStates +} + +// fixState updates a read state to fullfil required invariantes: +// - "Meta" must be nil if len(Meta) == 0 +func fixState(st *file.State) { + if len(st.Meta) == 0 { + st.Meta = nil + } +} + +// mergeStates merges 2 states by trying to determine the 'newer' state. +// The st state is overwritten with the updated fields. +func mergeStates(st, other *file.State) { + st.Finished = st.Finished || other.Finished + if st.Offset < other.Offset { // always select the higher offset + st.Offset = other.Offset + } + + // update file meta-data. As these are updated concurrently by the + // prospectors, select the newer state based on the update timestamp. + var meta, metaOld, metaNew map[string]string + if st.Timestamp.Before(other.Timestamp) { + st.Source = other.Source + st.Timestamp = other.Timestamp + st.TTL = other.TTL + st.FileStateOS = other.FileStateOS + + metaOld, metaNew = st.Meta, other.Meta + } else { + metaOld, metaNew = other.Meta, st.Meta + } + + if len(metaOld) == 0 || len(metaNew) == 0 { + meta = metaNew + } else { + meta = map[string]string{} + for k, v := range metaOld { + meta[k] = v + } + for k, v := range metaNew { + meta[k] = v + } + } + + if len(meta) == 0 { + meta = nil + } + st.Meta = meta +} + // resetStates sets all states to finished and disable TTL on restart -// For all states covered by a prospector, TTL will be overwritten with the prospector value +// For all states covered by an input, TTL will be overwritten with the input value func resetStates(states []file.State) []file.State { for key, state := range states { state.Finished = true - // Set ttl to -2 to easily spot which states are not managed by a prospector + // Set ttl to -2 to easily spot which states are not managed by a input state.TTL = -2 states[key] = state } @@ -134,7 +234,7 @@ func resetStates(states []file.State) []file.State { } func (r *Registrar) Start() error { - // Load the previous log file locations now, for use in prospector + // Load the previous log file locations now, for use in input err := r.loadStates() if err != nil { return fmt.Errorf("Error loading state: %v", err) @@ -183,24 +283,53 @@ func (r *Registrar) Run() { // onEvents processes events received from the publisher pipeline func (r *Registrar) onEvents(states []file.State) { r.processEventStates(states) + r.bufferedStateUpdates += len(states) + + // check if we need to enable state cleanup + if !r.gcEnabled { + for i := range states { + if states[i].TTL >= 0 || states[i].Finished { + r.gcEnabled = true + break + } + } + } + + logp.Debug("registrar", "Registrar state updates processed. Count: %v", len(states)) + + // new set of events received -> mark state registry ready for next + // cleanup phase in case gc'able events are stored in the registry. + r.gcRequired = r.gcEnabled +} + +// gcStates runs a registry Cleanup. The method check if more event in the +// registry can be gc'ed in the future. If no potential removable state is found, +// the gcEnabled flag is set to false, indicating the current registrar state being +// stable. New registry update events can re-enable state gc'ing. +func (r *Registrar) gcStates() { + if !r.gcRequired { + return + } beforeCount := r.states.Count() - cleanedStates := r.states.Cleanup() + cleanedStates, pendingClean := r.states.Cleanup() statesCleanup.Add(int64(cleanedStates)) - r.bufferedStateUpdates += len(states) - logp.Debug("registrar", - "Registrar states cleaned up. Before: %d, After: %d", - beforeCount, beforeCount-cleanedStates) + "Registrar states cleaned up. Before: %d, After: %d, Pending: %d", + beforeCount, beforeCount-cleanedStates, pendingClean) + + r.gcRequired = false + r.gcEnabled = pendingClean > 0 } // processEventStates gets the states from the events and writes them to the registrar state func (r *Registrar) processEventStates(states []file.State) { logp.Debug("registrar", "Processing %d events", len(states)) + ts := time.Now() for i := range states { - r.states.Update(states[i]) + r.states.UpdateWithTs(states[i], ts) statesUpdate.Add(1) } } @@ -225,34 +354,55 @@ func (r *Registrar) flushRegistry() { // writeRegistry writes the new json registry file to disk. func (r *Registrar) writeRegistry() error { - logp.Debug("registrar", "Write registry file: %s", r.registryFile) + // First clean up states + r.gcStates() + states := r.states.GetStates() + statesCurrent.Set(int64(len(states))) + + registryWrites.Inc() - tempfile := r.registryFile + ".new" - f, err := os.OpenFile(tempfile, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_SYNC, 0600) + tempfile, err := writeTmpFile(r.registryFile, r.fileMode, states) if err != nil { - logp.Err("Failed to create tempfile (%s) for writing: %s", tempfile, err) + registryFails.Inc() return err } - // First clean up states - states := r.states.GetStates() - - encoder := json.NewEncoder(f) - err = encoder.Encode(states) + err = helper.SafeFileRotate(r.registryFile, tempfile) if err != nil { - f.Close() - logp.Err("Error when encoding the states: %s", err) + registryFails.Inc() return err } - // Directly close file because of windows - f.Close() + logp.Debug("registrar", "Registry file updated. %d states written.", len(states)) + registrySuccess.Inc() - err = helper.SafeFileRotate(r.registryFile, tempfile) + return nil +} - logp.Debug("registrar", "Registry file updated. %d states written.", len(states)) - registryWrites.Add(1) - statesCurrent.Set(int64(len(states))) +func writeTmpFile(baseName string, perm os.FileMode, states []file.State) (string, error) { + logp.Debug("registrar", "Write registry file: %s", baseName) + + tempfile := baseName + ".new" + f, err := os.OpenFile(tempfile, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_SYNC, perm) + if err != nil { + logp.Err("Failed to create tempfile (%s) for writing: %s", tempfile, err) + return "", err + } + + defer f.Close() + + encoder := json.NewEncoder(f) + + if err := encoder.Encode(states); err != nil { + logp.Err("Error when encoding the states: %s", err) + return "", err + } + + // Commit the changes to storage to avoid corrupt registry files + if err = f.Sync(); err != nil { + logp.Err("Error when syncing new registry file contents: %s", err) + return "", err + } - return err + return tempfile, nil } diff --git a/vendor/github.com/elastic/beats/filebeat/registrar/registrar_test.go b/vendor/github.com/elastic/beats/filebeat/registrar/registrar_test.go new file mode 100644 index 00000000..102b073d --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/registrar/registrar_test.go @@ -0,0 +1,189 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package registrar + +import ( + "reflect" + "sort" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/filebeat/input/file" +) + +func TestRegistrarRead(t *testing.T) { + type testCase struct { + input string + expected []file.State + } + + zone := time.FixedZone("+0000", 0) + + cases := map[string]testCase{ + "ok registry with one entry": testCase{ + input: `[ + { + "type": "log", + "source": "test.log", + "offset": 10, + "timestamp": "2018-07-16T10:45:01+00:00", + "ttl": -1, + "meta": null + } + ]`, + expected: []file.State{ + { + Type: "log", + Source: "test.log", + Timestamp: time.Date(2018, time.July, 16, 10, 45, 01, 0, zone), + Offset: 10, + TTL: -2, // loader always resets states + }, + }, + }, + + "load config without meta": testCase{ + input: `[ + { + "type": "log", + "source": "test.log", + "offset": 10, + "timestamp": "2018-07-16T10:45:01+00:00", + "ttl": -1 + } + ]`, + expected: []file.State{ + { + Type: "log", + Source: "test.log", + Timestamp: time.Date(2018, time.July, 16, 10, 45, 01, 0, zone), + Offset: 10, + TTL: -2, // loader always resets states + }, + }, + }, + + "load config with empty meta": testCase{ + input: `[ + { + "type": "log", + "source": "test.log", + "offset": 10, + "timestamp": "2018-07-16T10:45:01+00:00", + "ttl": -1, + "meta": {} + } + ]`, + expected: []file.State{ + { + Type: "log", + Source: "test.log", + Timestamp: time.Date(2018, time.July, 16, 10, 45, 01, 0, zone), + Offset: 10, + TTL: -2, // loader always resets states + }, + }, + }, + + "requires merge without meta-data": testCase{ + input: `[ + { + "type": "log", + "source": "test.log", + "offset": 100, + "timestamp": "2018-07-16T10:45:01+00:00", + "ttl": -1, + "meta": {} + }, + { + "type": "log", + "source": "test.log", + "offset": 10, + "timestamp": "2018-07-16T10:45:10+00:00", + "ttl": -1, + "meta": null + } + ]`, + expected: []file.State{ + { + Type: "log", + Source: "test.log", + Timestamp: time.Date(2018, time.July, 16, 10, 45, 10, 0, zone), + Offset: 100, + TTL: -2, // loader always resets states + Meta: nil, + }, + }, + }, + } + + matchState := func(t *testing.T, i int, expected, actual file.State) { + check := func(name string, a, b interface{}) { + if !reflect.DeepEqual(a, b) { + t.Errorf("State %v: %v mismatch (expected=%v, actual=%v)", i, name, a, b) + } + } + + check("id", expected.ID(), actual.ID()) + check("source", expected.Source, actual.Source) + check("offset", expected.Offset, actual.Offset) + check("ttl", expected.TTL, actual.TTL) + check("meta", expected.Meta, actual.Meta) + check("type", expected.Type, actual.Type) + + if t1, t2 := expected.Timestamp, actual.Timestamp; !t1.Equal(t2) { + t.Errorf("State %v: timestamp mismatch (expected=%v, actual=%v)", i, t1, t2) + } + } + + for name, test := range cases { + test := test + t.Run(name, func(t *testing.T) { + in := strings.NewReader(test.input) + + states, err := readStatesFrom(in) + if !assert.NoError(t, err) { + return + } + + actual := sortedStates(states) + expected := sortedStates(test.expected) + if len(actual) != len(expected) { + t.Errorf("expected %v state, but registrar did load %v states", + len(expected), len(actual)) + return + } + + for i := range expected { + matchState(t, i, expected[i], actual[i]) + } + }) + } +} + +func sortedStates(states []file.State) []file.State { + tmp := make([]file.State, len(states)) + copy(tmp, states) + sort.Slice(tmp, func(i, j int) bool { + return tmp[i].ID() < tmp[j].ID() + }) + return tmp +} diff --git a/vendor/github.com/elastic/beats/filebeat/scripts/fileset/config/config.yml b/vendor/github.com/elastic/beats/filebeat/scripts/fileset/config/config.yml new file mode 100644 index 00000000..0afd1731 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/scripts/fileset/config/config.yml @@ -0,0 +1,6 @@ +type: log +paths: +{{ range $i, $path := .paths }} + - {{$path}} +{{ end }} +exclude_files: [".gz$"] diff --git a/vendor/github.com/elastic/beats/filebeat/scripts/module/fileset/fields.yml b/vendor/github.com/elastic/beats/filebeat/scripts/fileset/fields.yml similarity index 100% rename from vendor/github.com/elastic/beats/filebeat/scripts/module/fileset/fields.yml rename to vendor/github.com/elastic/beats/filebeat/scripts/fileset/fields.yml diff --git a/vendor/github.com/elastic/beats/filebeat/scripts/module/fileset/ingest/pipeline.json b/vendor/github.com/elastic/beats/filebeat/scripts/fileset/ingest/pipeline.json similarity index 100% rename from vendor/github.com/elastic/beats/filebeat/scripts/module/fileset/ingest/pipeline.json rename to vendor/github.com/elastic/beats/filebeat/scripts/fileset/ingest/pipeline.json diff --git a/vendor/github.com/elastic/beats/filebeat/scripts/module/fileset/manifest.yml b/vendor/github.com/elastic/beats/filebeat/scripts/fileset/manifest.yml similarity index 87% rename from vendor/github.com/elastic/beats/filebeat/scripts/module/fileset/manifest.yml rename to vendor/github.com/elastic/beats/filebeat/scripts/fileset/manifest.yml index 669f5e79..36e13958 100644 --- a/vendor/github.com/elastic/beats/filebeat/scripts/module/fileset/manifest.yml +++ b/vendor/github.com/elastic/beats/filebeat/scripts/fileset/manifest.yml @@ -10,4 +10,4 @@ var: - c:/programdata/example/logs/test.log* ingest_pipeline: ingest/pipeline.json -prospector: config/{fileset}.yml +input: config/{fileset}.yml diff --git a/vendor/github.com/elastic/beats/filebeat/scripts/fileset/module-fileset.yml b/vendor/github.com/elastic/beats/filebeat/scripts/fileset/module-fileset.yml new file mode 100644 index 00000000..8ed91d95 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/scripts/fileset/module-fileset.yml @@ -0,0 +1,2 @@ +- id: Filebeat-{module}-{fileset}-Dashboard + file: Filebeat-{module}-{fileset}.json diff --git a/vendor/github.com/elastic/beats/filebeat/scripts/generate_imports_helper.py b/vendor/github.com/elastic/beats/filebeat/scripts/generate_imports_helper.py index 71048128..5e8fcc0d 100644 --- a/vendor/github.com/elastic/beats/filebeat/scripts/generate_imports_helper.py +++ b/vendor/github.com/elastic/beats/filebeat/scripts/generate_imports_helper.py @@ -2,19 +2,21 @@ from os import listdir -comment = """Package include imports all prospector packages so that they register +comment = """Package include imports all input packages so that they register their factories with the global registry. This package can be imported in the -main package to automatically register all of the standard supported prospectors +main package to automatically register all of the standard supported inputs modules.""" def get_importable_lines(go_beat_path, import_line): - path = abspath("prospector") + path = abspath("input") imported_prospector_lines = [] - prospectors = [p for p in listdir(path) if isdir(join(path, p))] + + # Skip the file folder, its not an input but I will do the move with another PR + prospectors = [p for p in listdir(path) if isdir(join(path, p)) and p.find("file") is -1] for prospector in sorted(prospectors): - prospector_import = import_line.format(beat_path=go_beat_path, module="prospector", name=prospector) + prospector_import = import_line.format(beat_path=go_beat_path, module="input", name=prospector) imported_prospector_lines.append(prospector_import) return imported_prospector_lines diff --git a/vendor/github.com/elastic/beats/filebeat/scripts/generator/fields/main.go b/vendor/github.com/elastic/beats/filebeat/scripts/generator/fields/main.go index bcbf9785..8c6e48a4 100644 --- a/vendor/github.com/elastic/beats/filebeat/scripts/generator/fields/main.go +++ b/vendor/github.com/elastic/beats/filebeat/scripts/generator/fields/main.go @@ -33,6 +33,7 @@ var ( "POSINT": "long", "SYSLOGHOST": "keyword", "SYSLOGTIMESTAMP": "text", + "LOCALDATETIME": "text", "TIMESTAMP": "text", "USERNAME": "keyword", "WORD": "keyword", @@ -77,6 +78,13 @@ func newFieldYml(name, typeName string, noDoc bool) *fieldYml { func newField(lp string) field { lp = lp[1 : len(lp)-1] ee := strings.Split(lp, ":") + if len(ee) != 2 { + return field{ + Type: ee[0], + Elements: nil, + } + } + e := strings.Split(ee[1], ".") return field{ Type: ee[0], @@ -120,6 +128,9 @@ func getElementsFromPatterns(patterns []string) ([]field, error) { pp := r.FindAllString(lp, -1) for _, p := range pp { f := newField(p) + if f.Elements == nil { + continue + } fs = addNewField(fs, f) } @@ -267,7 +278,11 @@ func generateField(out []*fieldYml, field field, index, count int, noDoc bool) [ func generateFields(f []field, noDoc bool) []*fieldYml { var out []*fieldYml for _, ff := range f { - out = generateField(out, ff, 1, len(ff.Elements), noDoc) + index := 1 + if len(ff.Elements) == 1 { + index = 0 + } + out = generateField(out, ff, index, len(ff.Elements), noDoc) } return out } diff --git a/vendor/github.com/elastic/beats/filebeat/scripts/generator/fields/main_test.go b/vendor/github.com/elastic/beats/filebeat/scripts/generator/fields/main_test.go new file mode 100644 index 00000000..6fed0608 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/scripts/generator/fields/main_test.go @@ -0,0 +1,161 @@ +package main + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/assert" +) + +type FieldsGeneratorTestCase struct { + patterns []string + fields []*fieldYml +} + +func TestFieldsGenerator(t *testing.T) { + tests := []FieldsGeneratorTestCase{ + FieldsGeneratorTestCase{ + patterns: []string{ + "%{LOCALDATETIME:postgresql.log.timestamp} %{WORD:postgresql.log.timezone} \\[%{NUMBER:postgresql.log.thread_id}\\] %{USERNAME:postgresql.log.user}@%{HOSTNAME:postgresql.log.database} %{WORD:postgresql.log.level}: duration: %{NUMBER:postgresql.log.duration} ms statement: %{MULTILINEQUERY:postgresql.log.query}", + "%{LOCALDATETIME:postgresql.log.timestamp} %{WORD:postgresql.log.timezone} \\[%{NUMBER:postgresql.log.thread_id}\\] \\[%{USERNAME:postgresql.log.user}\\]@\\[%{HOSTNAME:postgresql.log.database}\\] %{WORD:postgresql.log.level}: duration: %{NUMBER:postgresql.log.duration} ms statement: %{MULTILINEQUERY:postgresql.log.query}", + "%{LOCALDATETIME:postgresql.log.timestamp} %{WORD:postgresql.log.timezone} \\[%{NUMBER:postgresql.log.thread_id}\\] %{USERNAME:postgresql.log.user}@%{HOSTNAME:postgresql.log.database} %{WORD:postgresql.log.level}: ?%{GREEDYDATA:postgresql.log.message}", + "%{LOCALDATETIME:postgresql.log.timestamp} %{WORD:postgresql.log.timezone} \\[%{NUMBER:postgresql.log.thread_id}\\] \\[%{USERNAME:postgresql.log.user}\\]@\\[%{HOSTNAME:postgresql.log.database}\\] %{WORD:postgresql.log.level}: ?%{GREEDYDATA:postgresql.log.message}", + "%{LOCALDATETIME:postgresql.log.timestamp} %{WORD:postgresql.log.timezone} \\[%{NUMBER:postgresql.log.thread_id}\\] %{WORD:postgresql.log.level}: ?%{GREEDYDATA:postgresql.log.message}", + }, + fields: []*fieldYml{ + &fieldYml{Name: "log", Description: "Please add description", Example: "Please add example", Type: "group", Fields: []*fieldYml{ + &fieldYml{Name: "timestamp", Description: "Please add description", Example: "Please add example", Type: "text"}, + &fieldYml{Name: "timezone", Description: "Please add description", Example: "Please add example", Type: "keyword"}, + &fieldYml{Name: "thread_id", Description: "Please add description", Example: "Please add example", Type: "long"}, + &fieldYml{Name: "user", Description: "Please add description", Example: "Please add example", Type: "keyword"}, + &fieldYml{Name: "database", Description: "Please add description", Example: "Please add example", Type: "keyword"}, + &fieldYml{Name: "level", Description: "Please add description", Example: "Please add example", Type: "keyword"}, + &fieldYml{Name: "duration", Description: "Please add description", Example: "Please add example", Type: "long"}, + &fieldYml{Name: "query", Description: "Please add description", Example: "Please add example", Type: "text"}, + &fieldYml{Name: "message", Description: "Please add description", Example: "Please add example", Type: "text"}, + }, + }, + }, + }, + FieldsGeneratorTestCase{ + patterns: []string{ + "%{DATA:nginx.error.time} \\[%{DATA:nginx.error.level}\\] %{NUMBER:nginx.error.pid}#%{NUMBER:nginx.error.tid}: (\\*%{NUMBER:nginx.error.connection_id} )?%{GREEDYDATA:nginx.error.message}", + }, + fields: []*fieldYml{ + &fieldYml{Name: "error", Description: "Please add description", Example: "Please add example", Type: "group", Fields: []*fieldYml{ + &fieldYml{Name: "time", Description: "Please add description", Example: "Please add example", Type: "text"}, + &fieldYml{Name: "level", Description: "Please add description", Example: "Please add example", Type: "text"}, + &fieldYml{Name: "pid", Description: "Please add description", Example: "Please add example", Type: "long"}, + &fieldYml{Name: "tid", Description: "Please add description", Example: "Please add example", Type: "long"}, + &fieldYml{Name: "connection_id", Description: "Please add description", Example: "Please add example", Type: "long"}, + &fieldYml{Name: "message", Description: "Please add description", Example: "Please add example", Type: "text"}, + }, + }, + }, + }, + FieldsGeneratorTestCase{ + patterns: []string{ + "\\[%{TIMESTAMP:icinga.main.timestamp}\\] %{WORD:icinga.main.severity}/%{WORD:icinga.main.facility}: %{GREEDYMULTILINE:icinga.main.message}", + }, + fields: []*fieldYml{ + &fieldYml{Name: "main", Description: "Please add description", Example: "Please add example", Type: "group", Fields: []*fieldYml{ + &fieldYml{Name: "timestamp", Description: "Please add description", Example: "Please add example", Type: "text"}, + &fieldYml{Name: "severity", Description: "Please add description", Example: "Please add example", Type: "keyword"}, + &fieldYml{Name: "facility", Description: "Please add description", Example: "Please add example", Type: "keyword"}, + &fieldYml{Name: "message", Description: "Please add description", Example: "Please add example", Type: "text"}, + }, + }, + }, + }, + FieldsGeneratorTestCase{ + patterns: []string{ + "(%{POSINT:redis.log.pid}:%{CHAR:redis.log.role} )?%{REDISTIMESTAMP:redis.log.timestamp} %{REDISLEVEL:redis.log.level} %{GREEDYDATA:redis.log.message}", + "%{POSINT:redis.log.pid}:signal-handler \\(%{POSINT:redis.log.timestamp}\\) %{GREEDYDATA:redis.log.message}", + }, + fields: []*fieldYml{ + &fieldYml{Name: "log", Description: "Please add description", Example: "Please add example", Type: "group", Fields: []*fieldYml{ + &fieldYml{Name: "pid", Description: "Please add description", Example: "Please add example", Type: "long"}, + &fieldYml{Name: "role", Description: "Please add description", Example: "Please add example"}, + &fieldYml{Name: "timestamp", Description: "Please add description", Example: "Please add example"}, + &fieldYml{Name: "level", Description: "Please add description", Example: "Please add example"}, + &fieldYml{Name: "message", Description: "Please add description", Example: "Please add example", Type: "text"}, + }, + }, + }, + }, + FieldsGeneratorTestCase{ + patterns: []string{ + "\\[%{TIMESTAMP:timestamp}\\] %{WORD:severity}/%{WORD:facility}: %{GREEDYMULTILINE:message}", + }, + fields: []*fieldYml{ + &fieldYml{Name: "timestamp", Description: "Please add description", Example: "Please add example", Type: "text"}, + &fieldYml{Name: "severity", Description: "Please add description", Example: "Please add example", Type: "keyword"}, + &fieldYml{Name: "facility", Description: "Please add description", Example: "Please add example", Type: "keyword"}, + &fieldYml{Name: "message", Description: "Please add description", Example: "Please add example", Type: "text"}, + }, + }, + FieldsGeneratorTestCase{ + patterns: []string{ + "\\[%{TIMESTAMP:timestamp}\\] %{WORD:severity}/%{WORD}: %{GREEDYMULTILINE:message}", + }, + fields: []*fieldYml{ + &fieldYml{Name: "timestamp", Description: "Please add description", Example: "Please add example", Type: "text"}, + &fieldYml{Name: "severity", Description: "Please add description", Example: "Please add example", Type: "keyword"}, + &fieldYml{Name: "message", Description: "Please add description", Example: "Please add example", Type: "text"}, + }, + }, + } + + for _, tc := range tests { + var proc processors + proc.patterns = tc.patterns + fs, err := proc.processFields() + if err != nil { + t.Error(err) + return + } + + f := generateFields(fs, false) + assert.True(t, reflect.DeepEqual(f, tc.fields)) + } +} + +// Known limitations +func TestFieldsGeneratorKnownLimitations(t *testing.T) { + tests := []FieldsGeneratorTestCase{ + // FIXME Field names including dots are not parsed properly + FieldsGeneratorTestCase{ + patterns: []string{ + "^# User@Host: %{USER:mysql.slowlog.user}(\\[[^\\]]+\\])? @ %{HOSTNAME:mysql.slowlog.host} \\[(%{IP:mysql.slowlog.ip})?\\](\\s*Id:\\s* %{NUMBER:mysql.slowlog.id})?\n# Query_time: %{NUMBER:mysql.slowlog.query_time.sec}\\s* Lock_time: %{NUMBER:mysql.slowlog.lock_time.sec}\\s* Rows_sent: %{NUMBER:mysql.slowlog.rows_sent}\\s* Rows_examined: %{NUMBER:mysql.slowlog.rows_examined}\n(SET timestamp=%{NUMBER:mysql.slowlog.timestamp};\n)?%{GREEDYMULTILINE:mysql.slowlog.query}", + }, + fields: []*fieldYml{ + &fieldYml{Name: "slowlog", Description: "Please add description", Example: "Please add example", Type: "group", Fields: []*fieldYml{ + &fieldYml{Name: "user", Description: "Please add description", Example: "Please add example", Type: "keyword"}, + &fieldYml{Name: "host", Description: "Please add description", Example: "Please add example", Type: "keyword"}, + &fieldYml{Name: "ip", Description: "Please add description", Example: "Please add example"}, + &fieldYml{Name: "id", Description: "Please add description", Example: "Please add example", Type: "long"}, + &fieldYml{Name: "query_time.ms", Description: "Please add description", Example: "Please add example", Type: "long"}, + &fieldYml{Name: "lock_time.ms", Description: "Please add description", Example: "Please add example", Type: "long"}, + &fieldYml{Name: "rows_sent", Description: "Please add description", Example: "Please add example", Type: "long"}, + &fieldYml{Name: "rows_examined", Description: "Please add description", Example: "Please add example", Type: "long"}, + &fieldYml{Name: "timestamp", Description: "Please add description", Example: "Please add example", Type: "text"}, + &fieldYml{Name: "query", Description: "Please add description", Example: "Please add example", Type: "text"}, + }, + }, + }, + }, + } + + for _, tc := range tests { + var proc processors + proc.patterns = tc.patterns + fs, err := proc.processFields() + if err != nil { + t.Error(err) + return + } + + f := generateFields(fs, false) + assert.False(t, reflect.DeepEqual(f, tc.fields)) + } +} diff --git a/vendor/github.com/elastic/beats/filebeat/scripts/generator/fileset/main.go b/vendor/github.com/elastic/beats/filebeat/scripts/generator/fileset/main.go index 053e4512..0872e3b2 100644 --- a/vendor/github.com/elastic/beats/filebeat/scripts/generator/fileset/main.go +++ b/vendor/github.com/elastic/beats/filebeat/scripts/generator/fileset/main.go @@ -1,86 +1,51 @@ package main import ( - "bytes" "flag" "fmt" - "io/ioutil" "os" "path" + + "github.com/elastic/beats/filebeat/scripts/generator" ) -func copyTemplatesToDest(templatesPath, name, filesetPath, module, fileset string) error { - template := path.Join(templatesPath, name) - c, err := ioutil.ReadFile(template) - if err != nil { - return err +func generateFileset(module, fileset, modulesPath, beatsPath string) error { + filesetPath := path.Join(modulesPath, "module", module, fileset) + if generator.DirExists(filesetPath) { + return fmt.Errorf("fileset already exists: %s", fileset) } - c = bytes.Replace(c, []byte("{module}"), []byte(module), -1) - c = bytes.Replace(c, []byte("{fileset}"), []byte(fileset), -1) - - dest := path.Join(filesetPath, name) - err = ioutil.WriteFile(dest, c, os.ModePerm) + err := generator.CreateDirectories(filesetPath, []string{"", "_meta", "test", "config", "ingest"}) if err != nil { - return fmt.Errorf("cannot copy template: %v", err) - } - return nil -} - -func generateModule(module, fileset, modulePath, beatsPath string) error { - p := path.Join(modulePath, "module", module) - if _, err := os.Stat(p); os.IsExist(err) { - return fmt.Errorf("module already exists: %s at %s", module, p) + return err } - d := path.Join(p, "_meta", "kibana", "default") - err := os.MkdirAll(d, 0750) + replace := map[string]string{"module": module, "fileset": fileset} + templatesPath := path.Join(beatsPath, "scripts", "fileset") + filesToCopy := []string{path.Join("config", "config.yml"), path.Join("ingest", "pipeline.json"), "manifest.yml"} + err = generator.CopyTemplates(templatesPath, filesetPath, filesToCopy, replace) if err != nil { return err } - - templatesPath := path.Join(beatsPath, "scripts", "module") - filesToCopy := []string{path.Join("fields.yml"), path.Join("docs.asciidoc")} - for _, f := range filesToCopy { - err := copyTemplatesToDest(templatesPath, f, p, module, "") - if err != nil { - return err - } + err = generator.RenameConfigYml(modulesPath, module, fileset) + if err != nil { + return err } - return nil + return addFilesetDashboard(module, fileset, modulesPath, templatesPath) } -func generateFileset(module, fileset, modulePath, beatsPath string) error { - filesetPath := path.Join(modulePath, "module", module, fileset) - if _, err := os.Stat(filesetPath); os.IsExist(err) { - return fmt.Errorf("fileset already exists: %s", fileset) - } - - dirsToCreate := []string{"", "_meta", "test", "config", "ingest"} - for _, d := range dirsToCreate { - p := path.Join(filesetPath, d) - err := os.Mkdir(p, 0750) - if err != nil { - return err - } - } - - templatesPath := path.Join(beatsPath, "scripts", "module", "fileset") - filesToCopy := []string{path.Join("config", "config.yml"), path.Join("ingest", "pipeline.json"), "manifest.yml"} - for _, f := range filesToCopy { - err := copyTemplatesToDest(templatesPath, f, filesetPath, module, fileset) - if err != nil { - return err - } - } - return nil +func addFilesetDashboard(module, fileset, modulesPath, templatesPath string) error { + template := path.Join(templatesPath, "module-fileset.yml") + dest := path.Join(modulesPath, "module", module, "module.yml") + replacement := map[string]string{"module": module, "fileset": fileset} + return generator.AppendTemplate(template, dest, replacement) } func main() { module := flag.String("module", "", "Name of the module") fileset := flag.String("fileset", "", "Name of the fileset") - modulePath := flag.String("path", ".", "Path to the generated fileset") + modulesPath := flag.String("path", ".", "Path to the generated fileset") beatsPath := flag.String("beats_path", ".", "Path to elastic/beats") flag.Parse() @@ -94,17 +59,17 @@ func main() { os.Exit(1) } - err := generateModule(*module, *fileset, *modulePath, *beatsPath) - if err != nil { - fmt.Printf("Cannot generate module: %v\n", err) + modulePath := path.Join(*modulesPath, "module", *module) + if !generator.DirExists(modulePath) { + fmt.Print("Cannot generate fileset: module not exists, please create module first by create-module command\n") os.Exit(2) } - err = generateFileset(*module, *fileset, *modulePath, *beatsPath) + err := generateFileset(*module, *fileset, *modulesPath, *beatsPath) if err != nil { fmt.Printf("Cannot generate fileset: %v\n", err) os.Exit(3) } - fmt.Println("New module was generated. After setting up Grok pattern in pipeline.json, please generate fields.yml") + fmt.Println("New fileset was generated, please check that module.yml file have proper fileset dashboard settings. After setting up Grok pattern in pipeline.json, please generate fields.yml") } diff --git a/vendor/github.com/elastic/beats/filebeat/scripts/generator/generator.go b/vendor/github.com/elastic/beats/filebeat/scripts/generator/generator.go new file mode 100644 index 00000000..f9be861c --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/scripts/generator/generator.go @@ -0,0 +1,96 @@ +package generator + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path" +) + +// DirExists check that directory exists +func DirExists(dir string) bool { + if _, err := os.Stat(dir); !os.IsNotExist(err) { + return true + } + + return false +} + +// CreateDirectories create directories in baseDir +func CreateDirectories(baseDir string, directories []string) error { + for _, d := range directories { + p := path.Join(baseDir, d) + err := os.MkdirAll(p, 0750) + if err != nil { + return err + } + } + + return nil +} + +// CopyTemplates copy templates from source, make replacement in template content and save it to dest +func CopyTemplates(src, dest string, templates []string, replace map[string]string) error { + for _, template := range templates { + err := copyTemplate(path.Join(src, template), path.Join(dest, template), replace) + if err != nil { + return err + } + } + + return nil +} + +// AppendTemplate read template, make replacement in content and append it to dest +func AppendTemplate(template, dest string, replace map[string]string) error { + c, err := readTemplate(template, replace) + if err != nil { + return err + } + + f, err := os.OpenFile(dest, os.O_WRONLY|os.O_APPEND, 0644) + if err == nil { + _, err = f.Write(c) + } + if err != nil { + return fmt.Errorf("cannot append template: %v", err) + } + + return nil +} + +func copyTemplate(template, dest string, replace map[string]string) error { + c, err := readTemplate(template, replace) + if err != nil { + return err + } + + err = ioutil.WriteFile(dest, c, 0644) + if err != nil { + return fmt.Errorf("cannot copy template: %v", err) + } + + return nil +} + +func readTemplate(template string, replace map[string]string) ([]byte, error) { + c, err := ioutil.ReadFile(template) + if err != nil { + return []byte{}, fmt.Errorf("cannot read template: %v", err) + } + + for oldV, newV := range replace { + c = bytes.Replace(c, []byte("{"+oldV+"}"), []byte(newV), -1) + } + + return c, nil +} + +// RenameConfigYml renemas config.yml to the name of the fileset, otherwise Filebeat refuses to start +func RenameConfigYml(modulesPath, module, fileset string) error { + old := path.Join(modulesPath, "module", module, fileset, "config", "config.yml") + new := path.Join(modulesPath, "module", module, fileset, "config", fileset+".yml") + + return os.Rename(old, new) +} diff --git a/vendor/github.com/elastic/beats/filebeat/scripts/generator/module/main.go b/vendor/github.com/elastic/beats/filebeat/scripts/generator/module/main.go new file mode 100644 index 00000000..49ad9ba9 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/scripts/generator/module/main.go @@ -0,0 +1,52 @@ +package main + +import ( + "flag" + "fmt" + "os" + "path" + + "github.com/elastic/beats/filebeat/scripts/generator" +) + +func generateModule(module, modulesPath, beatsPath string) error { + modulePath := path.Join(modulesPath, "module", module) + if generator.DirExists(modulePath) { + return fmt.Errorf("module already exists: %s", module) + } + + err := generator.CreateDirectories(modulePath, []string{path.Join("_meta", "kibana", "6")}) + if err != nil { + return err + } + + replace := map[string]string{"module": module} + templatesPath := path.Join(beatsPath, "scripts", "module") + filesToCopy := []string{path.Join("_meta", "fields.yml"), path.Join("_meta", "docs.asciidoc"), path.Join("_meta", "config.yml"), path.Join("module.yml")} + generator.CopyTemplates(templatesPath, modulePath, filesToCopy, replace) + if err != nil { + return err + } + + return nil +} + +func main() { + module := flag.String("module", "", "Name of the module") + modulePath := flag.String("path", ".", "Path to the generated fileset") + beatsPath := flag.String("beats_path", ".", "Path to elastic/beats") + flag.Parse() + + if *module == "" { + fmt.Println("Missing parameter: module") + os.Exit(1) + } + + err := generateModule(*module, *modulePath, *beatsPath) + if err != nil { + fmt.Printf("Cannot generate module: %v\n", err) + os.Exit(2) + } + + fmt.Println("New module was generated, now you can start creating filesets by create-fileset command.") +} diff --git a/vendor/github.com/elastic/beats/filebeat/scripts/module/_meta/config.yml b/vendor/github.com/elastic/beats/filebeat/scripts/module/_meta/config.yml new file mode 100644 index 00000000..ce53c30b --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/scripts/module/_meta/config.yml @@ -0,0 +1,8 @@ +- module: {{ module }} + # All logs + {{ fileset }}: + enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: diff --git a/vendor/github.com/elastic/beats/filebeat/scripts/module/docs.asciidoc b/vendor/github.com/elastic/beats/filebeat/scripts/module/_meta/docs.asciidoc similarity index 100% rename from vendor/github.com/elastic/beats/filebeat/scripts/module/docs.asciidoc rename to vendor/github.com/elastic/beats/filebeat/scripts/module/_meta/docs.asciidoc diff --git a/vendor/github.com/elastic/beats/filebeat/scripts/module/fields.yml b/vendor/github.com/elastic/beats/filebeat/scripts/module/_meta/fields.yml similarity index 100% rename from vendor/github.com/elastic/beats/filebeat/scripts/module/fields.yml rename to vendor/github.com/elastic/beats/filebeat/scripts/module/_meta/fields.yml diff --git a/vendor/github.com/elastic/beats/filebeat/scripts/module/module.yml b/vendor/github.com/elastic/beats/filebeat/scripts/module/module.yml new file mode 100644 index 00000000..ad3cb984 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/scripts/module/module.yml @@ -0,0 +1 @@ +dashboards: diff --git a/vendor/github.com/elastic/beats/filebeat/scripts/tester/main.go b/vendor/github.com/elastic/beats/filebeat/scripts/tester/main.go new file mode 100644 index 00000000..8cc051fb --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/scripts/tester/main.go @@ -0,0 +1,352 @@ +package main + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "strings" + "time" + + "github.com/elastic/beats/filebeat/harvester/encoding" + "github.com/elastic/beats/filebeat/harvester/reader" + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/common/match" +) + +type logReaderConfig struct { + multiPattern string + multiNegate bool + maxBytes int + matchMode string + encoding string +} + +func main() { + esURL := flag.String("elasticsearch", "http://localhost:9200", "Elasticsearch URL") + path := flag.String("pipeline", "", "Path to pipeline") + modulesPath := flag.String("modules", "./modules", "Path to modules") + + log := flag.String("log", "", "Single log line to test") + logfile := flag.String("logfile", "", "Path to log file") + + multiPattern := flag.String("multiline.pattern", "", "Multiline pattern") + multiNegate := flag.Bool("multiline.negate", false, "Multiline negate") + multiMode := flag.String("multiline.mode", "before", "Multiline mode") + maxBytes := flag.Int("maxbytes", 10485760, "Number of max bytes to be read") + fileEncoding := flag.String("encoding", "utf8", "Encoding of logfile") + + verbose := flag.Bool("verbose", false, "Call Simulate API with verbose option") + simulateVerbose := flag.Bool("simulate.verbose", false, "Print full output of Simulate API with verbose option") + flag.Parse() + + if *path == "" { + os.Stderr.WriteString("Error: -pipeline is required\n") + os.Exit(1) + } + + if *log == "" && *logfile == "" { + os.Stderr.WriteString("Error: -log or -logs has to be specified\n") + os.Exit(1) + } + + if *multiPattern != "" && *logfile == "" { + os.Stderr.WriteString("Error: -multiline-pattern is set but -logfile is not\n") + os.Exit(1) + } + + var logs []string + var err error + if *logfile != "" { + c := logReaderConfig{ + multiPattern: *multiPattern, + multiNegate: *multiNegate, + matchMode: *multiMode, + maxBytes: *maxBytes, + encoding: *fileEncoding, + } + logs, err = getLogsFromFile(*logfile, &c) + if err != nil { + os.Stderr.WriteString(fmt.Sprintf("Error while reading logs from file: %v\n", err)) + os.Exit(2) + } + } else { + logs = []string{*log} + } + + paths, err := getPipelinePath(*path, *modulesPath) + if err != nil { + os.Stderr.WriteString(err.Error()) + os.Exit(3) + } + if len(paths) == 0 { + os.Stderr.WriteString("No pipeline file was found\n") + os.Exit(3) + } + + for _, path := range paths { + err = testPipeline(*esURL, path, logs, *verbose, *simulateVerbose) + if err != nil { + os.Stderr.WriteString(err.Error()) + os.Exit(4) + } + } +} +func getLogsFromFile(logfile string, conf *logReaderConfig) ([]string, error) { + f, err := os.Open(logfile) + if err != nil { + return nil, err + } + defer f.Close() + + encFactory, ok := encoding.FindEncoding(conf.encoding) + if !ok { + return nil, fmt.Errorf("unable to find encoding: %s", conf.encoding) + } + + enc, err := encFactory(f) + if err != nil { + return nil, fmt.Errorf("failed to initialize encoding: %v", err) + } + + var r reader.Reader + r, err = reader.NewEncode(f, enc, 4096) + if err != nil { + return nil, err + } + + r = reader.NewStripNewline(r) + + if conf.multiPattern != "" { + p, err := match.Compile(conf.multiPattern) + if err != nil { + return nil, err + } + + c := reader.MultilineConfig{ + Negate: conf.multiNegate, + Match: conf.matchMode, + Pattern: &p, + } + r, err = reader.NewMultiline(r, "\n", 1<<20, &c) + if err != nil { + return nil, err + } + } + r = reader.NewLimit(r, conf.maxBytes) + + var logs []string + for { + msg, err := r.Next() + if err != nil { + break + } + logs = append(logs, string(msg.Content)) + } + + return logs, nil +} + +func getPipelinePath(path, modulesPath string) ([]string, error) { + var paths []string + stat, err := os.Stat(path) + if err != nil { + parts := strings.Split(path, "/") + if len(parts) != 2 { + return nil, fmt.Errorf("Cannot find pipeline in %s\n", path) + } + module := parts[0] + fileset := parts[1] + + pathToPipeline := filepath.Join(modulesPath, module, fileset, "ingest", "pipeline.json") + _, err := os.Stat(pathToPipeline) + if err != nil { + return nil, fmt.Errorf("Cannot find pipeline in %s: %v %v\n", path, err, pathToPipeline) + } + return []string{pathToPipeline}, nil + } + + if stat.IsDir() { + files, err := ioutil.ReadDir(path) + if err != nil { + return nil, err + } + for _, f := range files { + isPipelineFile := strings.HasSuffix(f.Name(), ".json") + if isPipelineFile { + fullPath := filepath.Join(path, f.Name()) + paths = append(paths, fullPath) + } + } + if len(paths) == 0 { + return paths, fmt.Errorf("Cannot find pipeline in %s", path) + } + return paths, nil + } + + isPipelineFile := strings.HasSuffix(path, ".json") + if isPipelineFile { + return []string{path}, nil + } + + return paths, nil + +} + +func testPipeline(esURL, path string, logs []string, verbose, simulateVerbose bool) error { + pipeline, err := readPipeline(path) + if err != nil { + return fmt.Errorf("Error while reading pipeline: %v\n", err) + } + + resp, err := runSimulate(esURL, pipeline, logs, simulateVerbose) + if err != nil { + return fmt.Errorf("Error while sending request to Elasticsearch: %v\n", err) + } + + err = showResp(resp, verbose, simulateVerbose) + if err != nil { + return fmt.Errorf("Error while reading response from Elasticsearch: %v\n", err) + } + return nil +} + +func readPipeline(path string) (map[string]interface{}, error) { + d, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + + var p map[string]interface{} + err = json.Unmarshal(d, &p) + if err != nil { + return nil, err + } + + return p, nil +} + +func runSimulate(url string, pipeline map[string]interface{}, logs []string, verbose bool) (*http.Response, error) { + var sources []common.MapStr + now := time.Now().UTC() + for _, l := range logs { + s := common.MapStr{ + "@timestamp": common.Time(now), + "message": l, + } + sources = append(sources, s) + } + + var docs []common.MapStr + for _, s := range sources { + d := common.MapStr{ + "_index": "index", + "_type": "doc", + "_id": "id", + "_source": s, + } + docs = append(docs, d) + } + + p := common.MapStr{ + "pipeline": pipeline, + "docs": docs, + } + + payload := p.String() + client := http.Client{} + + simulateURL := url + "/_ingest/pipeline/_simulate" + if verbose { + simulateURL += "?verbose" + } + + return client.Post(simulateURL, "application/json", strings.NewReader(payload)) +} + +func showResp(resp *http.Response, verbose, simulateVerbose bool) error { + if resp.StatusCode != 200 { + return fmt.Errorf("response code is %d not 200", resp.StatusCode) + } + + b := new(bytes.Buffer) + b.ReadFrom(resp.Body) + var r common.MapStr + err := json.Unmarshal(b.Bytes(), &r) + if err != nil { + return err + } + + if verbose { + fmt.Println(r.StringToPrint()) + } else { + docErrors, err := getDocErrors(r, simulateVerbose) + if err != nil { + return err + } + + for _, d := range docErrors { + fmt.Println(d.StringToPrint()) + } + } + return nil +} + +func getDocErrors(r common.MapStr, simulateVerbose bool) ([]common.MapStr, error) { + d, err := r.GetValue("docs") + if err != nil { + return nil, err + } + + docs := d.([]interface{}) + if simulateVerbose { + return getErrorsSimulateVerbose(docs) + } + + return getRegularErrors(docs) +} + +func getRegularErrors(docs []interface{}) ([]common.MapStr, error) { + var errors []common.MapStr + for _, d := range docs { + dd := d.(map[string]interface{}) + doc := common.MapStr(dd) + hasError, err := doc.HasKey("doc._source.error") + if err != nil { + return nil, err + } + + if hasError { + errors = append(errors, doc) + } + } + return errors, nil +} + +func getErrorsSimulateVerbose(docs []interface{}) ([]common.MapStr, error) { + var errors []common.MapStr + for _, d := range docs { + pr := d.(map[string]interface{}) + p := common.MapStr(pr) + + rr, err := p.GetValue("processor_results") + if err != nil { + return nil, err + } + res := rr.([]interface{}) + hasError := false + for _, r := range res { + rres := r.(map[string]interface{}) + result := common.MapStr(rres) + hasError, _ = result.HasKey("error") + if hasError { + errors = append(errors, p) + } + } + } + return errors, nil +} diff --git a/vendor/github.com/elastic/beats/filebeat/scripts/tester/main_test.go b/vendor/github.com/elastic/beats/filebeat/scripts/tester/main_test.go new file mode 100644 index 00000000..1267fa45 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/scripts/tester/main_test.go @@ -0,0 +1,48 @@ +package main + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGetPipelinePath(t *testing.T) { + testCases := []struct { + pipelinePath string + count int + }{ + { + pipelinePath: "../../module/postgresql/log/ingest/pipeline.json", + count: 1, + }, + { + pipelinePath: "../../module/postgresql/log/ingest", + count: 1, + }, + { + pipelinePath: "postgresql/log", + count: 1, + }, + } + + for _, tc := range testCases { + paths, err := getPipelinePath(tc.pipelinePath, "../../module") + if err != nil { + t.Fatal(err) + } + assert.Equal(t, tc.count, len(paths)) + } + + testCasesError := []string{ + "non-such-pipeline.json", + "no/such/path/to/pipeline", + "not/module", + } + for _, p := range testCasesError { + paths, err := getPipelinePath(p, "./module") + if err == nil { + t.Fatal(paths) + } + assert.Equal(t, 0, len(paths)) + } +} diff --git a/vendor/github.com/elastic/beats/filebeat/tests/files/registry/test-2lines-registry-6.3.0 b/vendor/github.com/elastic/beats/filebeat/tests/files/registry/test-2lines-registry-6.3.0 new file mode 100644 index 00000000..5f7414b9 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/tests/files/registry/test-2lines-registry-6.3.0 @@ -0,0 +1 @@ +[{"source":"test.log","offset":10,"timestamp":"2018-07-18T21:51:43.529893808+02:00","ttl":-1,"type":"log","FileStateOS":{"inode":8604592318,"device":16777220}}] diff --git a/vendor/github.com/elastic/beats/filebeat/tests/files/registry/test-2lines-registry-6.3.1 b/vendor/github.com/elastic/beats/filebeat/tests/files/registry/test-2lines-registry-6.3.1 new file mode 100644 index 00000000..a4c2ccf1 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/tests/files/registry/test-2lines-registry-6.3.1 @@ -0,0 +1 @@ +[{"source":"test.log","offset":10,"timestamp":"2018-07-18T21:51:43.529893808+02:00","ttl":-1,"type":"log","meta":{},"FileStateOS":{"inode":8604592318,"device":16777220}}] diff --git a/vendor/github.com/elastic/beats/filebeat/tests/files/registry/test-2lines-registry-6.3.1-faulty b/vendor/github.com/elastic/beats/filebeat/tests/files/registry/test-2lines-registry-6.3.1-faulty new file mode 100644 index 00000000..2606e69b --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/tests/files/registry/test-2lines-registry-6.3.1-faulty @@ -0,0 +1,4 @@ +[ + {"source":"test.log","offset":10,"timestamp":"2018-07-18T21:51:43.529893808+02:00","ttl":-1,"type":"log","meta":{},"FileStateOS":{"inode":8604592318,"device":16777220}}, + {"source":"test.log","offset":0,"timestamp":"2018-07-18T21:51:43.529893808+02:00","ttl":-1,"type":"log","meta":null,"FileStateOS":{"inode":8604592318,"device":16777220}} +] diff --git a/vendor/github.com/elastic/beats/filebeat/tests/files/registry/test-2lines-registry-latest b/vendor/github.com/elastic/beats/filebeat/tests/files/registry/test-2lines-registry-latest new file mode 100644 index 00000000..110dc161 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/tests/files/registry/test-2lines-registry-latest @@ -0,0 +1 @@ +[{"source":"test.log","offset":10,"timestamp":"2018-07-18T21:51:43.529893808+02:00","ttl":-1,"type":"log","meta":null,"FileStateOS":{"inode":8604592318,"device":16777220}}] diff --git a/vendor/github.com/elastic/beats/filebeat/tests/load/filebeat.yml b/vendor/github.com/elastic/beats/filebeat/tests/load/filebeat.yml index ae0e8615..93217983 100644 --- a/vendor/github.com/elastic/beats/filebeat/tests/load/filebeat.yml +++ b/vendor/github.com/elastic/beats/filebeat/tests/load/filebeat.yml @@ -15,7 +15,7 @@ filebeat: spool_size: 4096 idle_timeout: 5s registry_file: registry - + registry_file_permissions: 0600 ############################# Output ########################################## diff --git a/vendor/github.com/elastic/beats/filebeat/tests/system/config/filebeat.yml.j2 b/vendor/github.com/elastic/beats/filebeat/tests/system/config/filebeat.yml.j2 index 4c1e9e7b..52e099d8 100644 --- a/vendor/github.com/elastic/beats/filebeat/tests/system/config/filebeat.yml.j2 +++ b/vendor/github.com/elastic/beats/filebeat/tests/system/config/filebeat.yml.j2 @@ -1,10 +1,10 @@ ###################### Filebeat Config Template ############################### -filebeat.prospectors: -{% if prospectors is not defined %} -{% set prospectors = true %} +filebeat.{{input_config | default("inputs")}}: +{% if inputs is not defined %} +{% set inputs = true %} {% endif %} -{% if prospectors %} +{% if inputs %} - type: {{type | default("log") }} input_type: {{input_type_deprecated }} # Paths that should be crawled and fetched @@ -31,9 +31,9 @@ filebeat.prospectors: harvester_limit: {{harvester_limit | default(0) }} symlinks: {{symlinks}} pipeline: {{pipeline}} - {%- if prospector_processors %} + {%- if input_processors %} processors: - {%- for processor in prospector_processors %} + {%- for processor in input_processors %} {%- for name, settings in processor.items() %} - {{name}}: {%- if settings %} @@ -74,6 +74,7 @@ filebeat.prospectors: {% if json.keys_under_root %}keys_under_root: true{% endif %} {% if json.overwrite_keys %}overwrite_keys: true{% endif %} {% if json.add_error_key %}add_error_key: true{% endif %} + {% if json.ignore_decoding_error %}ignore_decoding_error: true{% endif %} {% endif %} {% if multiline %} @@ -85,17 +86,18 @@ filebeat.prospectors: max_lines: {{ max_lines|default(500) }} {% endif %} {% endif %} -{% if prospector_raw %} -{{prospector_raw}} +{% if input_raw %} +{{input_raw}} {% endif %} filebeat.shutdown_timeout: {{ shutdown_timeout|default(0) }} {% if not skip_registry_config %} filebeat.registry_file: {{ beat.working_dir + '/' }}{{ registryFile|default("registry")}} +filebeat.registry_file_permissions: {{ registryFilePermissions|default("0600") }} {%endif%} {% if reload or reload_path -%} -filebeat.config.{{ reload_type|default("prospectors") }}: +filebeat.config.{{ reload_type|default("inputs") }}: path: {{ reload_path }} {% if reload -%} reload.period: 1s @@ -119,101 +121,4 @@ filebeat.autodiscover: {%- endfor %} {% endif %} -#================================ General ===================================== - -# The name of the shipper that publishes the network data. It can be used to group -# all the transactions sent by a single shipper in the web interface. -# If this options is not defined, the hostname is used. -name: {{shipper_name}} - -# The tags of the shipper are included in their own field with each -# transaction published. Tags make it easy to group servers by different -# logical properties. -tags: [ - {%- if agent_tags -%} - {%- for tag in agent_tags -%} - "{{ tag }}" - {%- if not loop.last %}, {% endif -%} - {%- endfor -%} - {%- endif -%} -] - -{% if geoip_paths is not none %} -geoip: - paths: [ - {%- for path in geoip_paths -%} - "{{ beat.working_dir + '/' + path }}" - {%- if not loop.last %}, {% endif -%} - {%- endfor -%} - ] -{%- endif %} - -{% if setup_template_name %} -setup.template.name: setup_template_name -setup.template.pattern: setup_template_pattern -{%- endif %} - -{%- if processors %} - -#================================ Filters ===================================== - -processors: -{%- for processor in processors %} -{%- for name, settings in processor.items() %} -- {{name}}: - {%- if settings %} - {%- for k, v in settings.items() %} - {{k}}: - {{v | default([])}} - {%- endfor %} - {%- endif %} -{%- endfor %} -{%- endfor %} - -{%- endif %} - -#================================ Queue ===================================== - -queue.mem: - events: 4096 - flush.min_events: 8 - flush.timeout: 0.1s - -#================================ Outputs ===================================== - -# Configure what outputs to use when sending the data collected by the beat. -# Multiple outputs may be used. - -{%- if elasticsearch %} -#------------------------------- Elasticsearch output ---------------------------- -output.elasticsearch: - hosts: ["{{ elasticsearch.host }}"] -{% if elasticsearch.pipeline %} - pipeline: {{elasticsearch.pipeline}} -{% endif %} -{% if elasticsearch.index %} - index: {{elasticsearch.index}} -{% endif %} -{%- elif logstash %} -#------------------------------- Logstash output --------------------------------- -output.logstash: - hosts: ["{{ logstash.host }}"] -{%- else %} -#------------------------------- File output ---------------------------------- -output.file: - path: {{ output_file_path|default(beat.working_dir + "/output") }} - filename: "{{ output_file_filename|default("filebeat") }}" - rotate_every_kb: {{ rotate_every_kb | default(1000) }} - #number_of_files: 7 -{%- endif %} - -{% if path_data %} -#================================ Paths ===================================== -path: - data: {{path_data}} -{% endif %} - -{% if keystore_path %} -#================================ keystore ===================================== -keystore.path: {{keystore_path}} -{% endif %} +{% include './tests/system/config/libbeat.yml.j2' %} diff --git a/vendor/github.com/elastic/beats/filebeat/tests/system/config/filebeat_prospectors.yml.j2 b/vendor/github.com/elastic/beats/filebeat/tests/system/config/filebeat_inputs.yml.j2 similarity index 69% rename from vendor/github.com/elastic/beats/filebeat/tests/system/config/filebeat_prospectors.yml.j2 rename to vendor/github.com/elastic/beats/filebeat/tests/system/config/filebeat_inputs.yml.j2 index 43536e9c..04ef3c2a 100644 --- a/vendor/github.com/elastic/beats/filebeat/tests/system/config/filebeat_prospectors.yml.j2 +++ b/vendor/github.com/elastic/beats/filebeat/tests/system/config/filebeat_inputs.yml.j2 @@ -1,9 +1,9 @@ -filebeat.prospectors: -{% for prospector in prospectors %} +filebeat.inputs: +{% for input in inputs %} - paths: - - {{prospector.path}} + - {{input.path}} scan_frequency: 0.5s - encoding: {{prospector.encoding | default("plain") }} + encoding: {{input.encoding | default("plain") }} {% endfor %} filebeat.registry_file: {{ beat.working_dir + '/' }}{{ registryFile|default("registry")}} diff --git a/vendor/github.com/elastic/beats/filebeat/tests/system/module/test/test/manifest.yml b/vendor/github.com/elastic/beats/filebeat/tests/system/module/test/test/manifest.yml index d55e76cd..2946824a 100644 --- a/vendor/github.com/elastic/beats/filebeat/tests/system/module/test/test/manifest.yml +++ b/vendor/github.com/elastic/beats/filebeat/tests/system/module/test/test/manifest.yml @@ -6,4 +6,4 @@ var: - test.log ingest_pipeline: ingest/default.json -prospector: config/test.yml +input: config/test.yml diff --git a/vendor/github.com/elastic/beats/filebeat/tests/system/test_autodiscover.py b/vendor/github.com/elastic/beats/filebeat/tests/system/test_autodiscover.py index df8fc479..b2fcd481 100644 --- a/vendor/github.com/elastic/beats/filebeat/tests/system/test_autodiscover.py +++ b/vendor/github.com/elastic/beats/filebeat/tests/system/test_autodiscover.py @@ -14,13 +14,13 @@ class TestAutodiscover(filebeat.BaseTest): "integration test not available on 2.x") def test_docker(self): """ - Test docker autodiscover starts prospector + Test docker autodiscover starts input """ import docker docker_client = docker.from_env() self.render_config_template( - prospectors=False, + inputs=False, autodiscover={ 'docker': { 'templates': ''' @@ -42,8 +42,8 @@ def test_docker(self): docker_client.images.pull('busybox') docker_client.containers.run('busybox', 'sleep 1') - self.wait_until(lambda: self.log_contains('Autodiscover starting runner: prospector')) - self.wait_until(lambda: self.log_contains('Autodiscover stopping runner: prospector')) + self.wait_until(lambda: self.log_contains('Autodiscover starting runner: input')) + self.wait_until(lambda: self.log_contains('Autodiscover stopping runner: input')) output = self.read_output_json() proc.check_kill_and_wait() diff --git a/vendor/github.com/elastic/beats/filebeat/tests/system/test_base.py b/vendor/github.com/elastic/beats/filebeat/tests/system/test_base.py index 94b954e0..105be678 100644 --- a/vendor/github.com/elastic/beats/filebeat/tests/system/test_base.py +++ b/vendor/github.com/elastic/beats/filebeat/tests/system/test_base.py @@ -25,12 +25,13 @@ def test_base(self): output = self.read_output()[0] assert "@timestamp" in output assert "prospector.type" in output + assert "input.type" in output def test_invalid_config_with_removed_settings(self): """ Checks if filebeat fails to load if removed settings have been used: """ - self.render_config_template(console={"pretty": "false"}) + self.render_config_template() exit_code = self.run_beat(extra_args=[ "-E", "filebeat.spool_size=2048", diff --git a/vendor/github.com/elastic/beats/filebeat/tests/system/test_crawler.py b/vendor/github.com/elastic/beats/filebeat/tests/system/test_crawler.py index 13008bf9..19714387 100644 --- a/vendor/github.com/elastic/beats/filebeat/tests/system/test_crawler.py +++ b/vendor/github.com/elastic/beats/filebeat/tests/system/test_crawler.py @@ -573,15 +573,15 @@ def test_encodings(self): f.write(text + "\n") # create the config file - prospectors = [] + inputs = [] for enc_go, enc_py, _ in encodings: - prospectors.append({ + inputs.append({ "path": self.working_dir + "/log/test-{}".format(enc_py), "encoding": enc_go }) self.render_config_template( - template_name="filebeat_prospectors", - prospectors=prospectors + template_name="filebeat_inputs", + inputs=inputs ) # run filebeat diff --git a/vendor/github.com/elastic/beats/filebeat/tests/system/test_deprecated.py b/vendor/github.com/elastic/beats/filebeat/tests/system/test_deprecated.py index 0dd25fba..be764d8c 100644 --- a/vendor/github.com/elastic/beats/filebeat/tests/system/test_deprecated.py +++ b/vendor/github.com/elastic/beats/filebeat/tests/system/test_deprecated.py @@ -2,8 +2,6 @@ from filebeat import BaseTest import os -import codecs -import time """ Test Harvesters @@ -37,4 +35,84 @@ def test_input_type_deprecated(self): filebeat.check_kill_and_wait() - assert self.log_contains("DEPRECATED: input_type prospector config is deprecated") + assert self.log_contains("DEPRECATED: input_type input config is deprecated") + + def test_prospectors_deprecated(self): + """ + Checks that harvesting works with deprecated prospectors but a deprecation warning is printed. + """ + + self.render_config_template( + input_config="prospectors", + path=os.path.abspath(self.working_dir) + "/log/test.log", + scan_frequency="0.1s" + ) + os.mkdir(self.working_dir + "/log/") + + logfile = self.working_dir + "/log/test.log" + + with open(logfile, 'w') as f: + f.write("Hello world\n") + + filebeat = self.start_beat() + + # Let it read the file + self.wait_until( + lambda: self.output_has(lines=1), max_timeout=10) + + filebeat.check_kill_and_wait() + + assert self.log_contains("DEPRECATED: prospectors are deprecated, Use `inputs` instead.") + + def test_reload_config_prospector_deprecated(self): + """ + Checks that harvesting works with `config.prospectors` + """ + + inputConfigTemplate = """ + - type: log + paths: + - {} + scan_frequency: 1s + """ + + self.render_config_template( + reload_type="prospectors", + reload=True, + reload_path=self.working_dir + "/configs/*.yml", + inputs=False, + ) + + os.mkdir(self.working_dir + "/logs/") + logfile1 = self.working_dir + "/logs/test1.log" + logfile2 = self.working_dir + "/logs/test2.log" + os.mkdir(self.working_dir + "/configs/") + + with open(self.working_dir + "/configs/input.yml", 'w') as f: + f.write(inputConfigTemplate.format(self.working_dir + "/logs/test1.log")) + + proc = self.start_beat() + + with open(logfile1, 'w') as f: + f.write("Hello world1\n") + + self.wait_until(lambda: self.output_lines() > 0) + + with open(self.working_dir + "/configs/input2.yml", 'w') as f: + f.write(inputConfigTemplate.format(self.working_dir + "/logs/test2.log")) + + self.wait_until( + lambda: self.log_contains_count("New runner started") == 2, + max_timeout=15) + + # Add new log line and see if it is picked up = new input is running + with open(logfile1, 'a') as f: + f.write("Hello world2\n") + + # Add new log line and see if it is picked up = new input is running + with open(logfile2, 'a') as f: + f.write("Hello world3\n") + + self.wait_until(lambda: self.output_lines() == 3) + + assert self.log_contains("DEPRECATED: config.prospectors are deprecated, Use `config.inputs` instead.") diff --git a/vendor/github.com/elastic/beats/filebeat/tests/system/test_harvester.py b/vendor/github.com/elastic/beats/filebeat/tests/system/test_harvester.py index 6edaaedf..4bc5cf44 100644 --- a/vendor/github.com/elastic/beats/filebeat/tests/system/test_harvester.py +++ b/vendor/github.com/elastic/beats/filebeat/tests/system/test_harvester.py @@ -4,6 +4,7 @@ import os import codecs import time +import io """ Test Harvesters @@ -276,7 +277,7 @@ def test_exceed_buffer(self): # Wait until state is written self.wait_until( lambda: self.log_contains( - "Registrar states cleaned up"), + "Registrar state updates processed"), max_timeout=15) filebeat.check_kill_and_wait() @@ -438,7 +439,6 @@ def test_bom_utf8(self): os.mkdir(self.working_dir + "/log/") self.copy_files(["logs/bom8.log"], - source_dir="../files", target_dir="log") filebeat = self.start_beat() @@ -785,18 +785,22 @@ def test_decode_error(self): """ self.render_config_template( path=os.path.abspath(self.working_dir) + "/log/*", - encoding="GBK", # Set invalid encoding for entry below which is actually uft-8 + encoding="utf-16be", ) os.mkdir(self.working_dir + "/log/") logfile = self.working_dir + "/log/test.log" - with open(logfile, 'w') as file: - file.write("hello world1" + "\n") - - file.write('' + '\n') - file.write("hello world2" + "\n") + with io.open(logfile, 'w', encoding="utf-16") as file: + file.write(u'hello world1') + file.write(u"\n") + with io.open(logfile, 'a', encoding="utf-16") as file: + file.write(u"\U00012345=Ra") + with io.open(logfile, 'a', encoding="utf-16") as file: + file.write(u"\n") + file.write(u"hello world2") + file.write(u"\n") filebeat = self.start_beat() @@ -807,7 +811,7 @@ def test_decode_error(self): # Wait until error shows up self.wait_until( - lambda: self.log_contains("Error decoding line: simplifiedchinese: invalid GBK encoding"), + lambda: self.log_contains("Error decoding line: transform: short source buffer"), max_timeout=5) filebeat.check_kill_and_wait() diff --git a/vendor/github.com/elastic/beats/filebeat/tests/system/test_prospector.py b/vendor/github.com/elastic/beats/filebeat/tests/system/test_input.py similarity index 86% rename from vendor/github.com/elastic/beats/filebeat/tests/system/test_prospector.py rename to vendor/github.com/elastic/beats/filebeat/tests/system/test_input.py index ab057aff..4a9dd9e5 100644 --- a/vendor/github.com/elastic/beats/filebeat/tests/system/test_prospector.py +++ b/vendor/github.com/elastic/beats/filebeat/tests/system/test_input.py @@ -3,12 +3,11 @@ from filebeat import BaseTest import os import time -import unittest from beat.beat import Proc """ -Tests for the prospector functionality. +Tests for the input functionality. """ @@ -77,77 +76,6 @@ def test_not_ignore_old_files(self): objs = self.read_output() assert len(objs) == 5 - def test_stdin(self): - """ - Test stdin input. Checks if reading is continued after the first read. - """ - self.render_config_template( - type="stdin" - ) - - proc = self.start_beat() - - self.wait_until( - lambda: self.log_contains( - "Harvester started for file: -"), - max_timeout=10) - - iterations1 = 5 - for n in range(0, iterations1): - os.write(proc.stdin_write, "Hello World\n") - - self.wait_until( - lambda: self.output_has(lines=iterations1), - max_timeout=15) - - iterations2 = 10 - for n in range(0, iterations2): - os.write(proc.stdin_write, "Hello World\n") - - self.wait_until( - lambda: self.output_has(lines=iterations1 + iterations2), - max_timeout=15) - - proc.check_kill_and_wait() - - objs = self.read_output() - assert len(objs) == iterations1 + iterations2 - - def test_stdin_eof(self): - """ - Test that Filebeat works when stdin is closed. - """ - self.render_config_template( - type="stdin", - close_eof="true", - ) - - args = [self.test_binary, - "-systemTest", - "-test.coverprofile", - os.path.join(self.working_dir, "coverage.cov"), - "-c", os.path.join(self.working_dir, "filebeat.yml"), - "-e", "-v", "-d", "*", - ] - proc = Proc(args, os.path.join(self.working_dir, "filebeat.log")) - os.write(proc.stdin_write, "Hello World\n") - - proc.start() - self.wait_until(lambda: self.output_has(lines=1)) - - # Continue writing after end was reached - os.write(proc.stdin_write, "Hello World2\n") - os.close(proc.stdin_write) - - self.wait_until(lambda: self.output_has(lines=2)) - - proc.proc.terminate() - proc.proc.wait() - - objs = self.read_output() - assert objs[0]["message"] == "Hello World" - assert objs[1]["message"] == "Hello World2" - def test_rotating_close_inactive_larger_write_rate(self): self.render_config_template( path=os.path.abspath(self.working_dir) + "/log/*", @@ -269,26 +197,26 @@ def test_rotating_close_inactive_low_write_rate(self): filebeat.check_kill_and_wait() - def test_shutdown_no_prospectors(self): + def test_shutdown_no_inputs(self): """ - In case no prospectors are defined, filebeat must shut down and report an error + In case no inputs are defined, filebeat must shut down and report an error """ self.render_config_template( - prospectors=False, + inputs=False, ) filebeat = self.start_beat() self.wait_until( lambda: self.log_contains( - "No modules or prospectors enabled"), + "no modules or inputs enabled"), max_timeout=10) filebeat.check_wait(exit_code=1) def test_no_paths_defined(self): """ - In case a prospector is defined but doesn't contain any paths, prospector must return error which + In case a input is defined but doesn't contain any paths, input must return error which leads to shutdown of filebeat because of configuration error """ self.render_config_template( @@ -299,7 +227,7 @@ def test_no_paths_defined(self): # wait for first "Start next scan" log message self.wait_until( lambda: self.log_contains( - "No paths were defined for prospector"), + "No paths were defined for "), max_timeout=10) self.wait_until( @@ -311,7 +239,7 @@ def test_no_paths_defined(self): def test_files_added_late(self): """ - Tests that prospectors stay running even though no harvesters are started yet + Tests that inputs stay running even though no harvesters are started yet """ self.render_config_template( path=os.path.abspath(self.working_dir) + "/log/*", @@ -627,13 +555,13 @@ def test_harvester_limit(self): filebeat.check_kill_and_wait() - def test_prospector_filter_dropfields(self): + def test_input_filter_dropfields(self): """ - Check drop_fields filtering action at a prospector level + Check drop_fields filtering action at a input level """ self.render_config_template( path=os.path.abspath(self.working_dir) + "/test.log", - prospector_processors=[{ + input_processors=[{ "drop_fields": { "fields": ["offset"], }, @@ -652,13 +580,13 @@ def test_prospector_filter_dropfields(self): assert "offset" not in output assert "message" in output - def test_prospector_filter_includefields(self): + def test_input_filter_includefields(self): """ - Check include_fields filtering action at a prospector level + Check include_fields filtering action at a input level """ self.render_config_template( path=os.path.abspath(self.working_dir) + "/test.log", - prospector_processors=[{ + input_processors=[{ "include_fields": { "fields": ["offset"], }, diff --git a/vendor/github.com/elastic/beats/filebeat/tests/system/test_json.py b/vendor/github.com/elastic/beats/filebeat/tests/system/test_json.py index 0e2f58b3..d98b6025 100644 --- a/vendor/github.com/elastic/beats/filebeat/tests/system/test_json.py +++ b/vendor/github.com/elastic/beats/filebeat/tests/system/test_json.py @@ -20,7 +20,6 @@ def test_docker_logs(self): os.mkdir(self.working_dir + "/log/") self.copy_files(["logs/docker.log"], - source_dir="../files", target_dir="log") proc = self.start_beat() @@ -48,7 +47,6 @@ def test_docker_logs_filtering(self): os.mkdir(self.working_dir + "/log/") self.copy_files(["logs/docker.log"], - source_dir="../files", target_dir="log") proc = self.start_beat() @@ -81,7 +79,6 @@ def test_docker_logs_multiline(self): os.mkdir(self.working_dir + "/log/") self.copy_files(["logs/docker_multiline.log"], - source_dir="../files", target_dir="log") proc = self.start_beat() @@ -116,7 +113,6 @@ def test_simple_json_overwrite(self): os.mkdir(self.working_dir + "/log/") self.copy_files(["logs/json_override.log"], - source_dir="../files", target_dir="log") proc = self.start_beat() @@ -140,7 +136,6 @@ def test_json_add_tags(self): ) os.mkdir(self.working_dir + "/log/") self.copy_files(["logs/json_tag.log"], - source_dir="../files", target_dir="log") proc = self.start_beat() @@ -208,7 +203,6 @@ def test_timestamp_in_message(self): ) os.mkdir(self.working_dir + "/log/") self.copy_files(["logs/json_timestamp.log"], - source_dir="../files", target_dir="log") proc = self.start_beat() @@ -250,7 +244,6 @@ def test_type_in_message(self): ) os.mkdir(self.working_dir + "/log/") self.copy_files(["logs/json_type.log"], - source_dir="../files", target_dir="log") proc = self.start_beat() @@ -295,7 +288,6 @@ def test_with_generic_filtering(self): os.mkdir(self.working_dir + "/log/") self.copy_files(["logs/json_null.log"], - source_dir="../files", target_dir="log") proc = self.start_beat() @@ -316,6 +308,72 @@ def test_with_generic_filtering(self): # We drop null values during the generic event conversion. assert "res" not in o + def test_json_decoding_error_true(self): + """ + Test if json_decoding_error is set to true, that no errors are logged. + """ + self.render_config_template( + path=os.path.abspath(self.working_dir) + "/log/*", + json=dict( + message_key="message", + ignore_decoding_error=True + ), + ) + + os.mkdir(self.working_dir + "/log/") + + testfile1 = self.working_dir + "/log/test.log" + + message = "invalidjson" + with open(testfile1, 'a') as f: + f.write(message + "\n") + + proc = self.start_beat() + self.wait_until( + lambda: self.output_has(lines=1), + max_timeout=10) + proc.check_kill_and_wait() + + output = self.read_output( + required_fields=["@timestamp"], + ) + assert len(output) == 1 + assert output[0]["message"] == message + assert False == self.log_contains_count("Error decoding JSON") + + def test_json_decoding_error_false(self): + """ + Test if json_decoding_error is set to false, that an errors is logged. + """ + self.render_config_template( + path=os.path.abspath(self.working_dir) + "/log/*", + json=dict( + message_key="message", + ignore_decoding_error=False + ), + ) + + os.mkdir(self.working_dir + "/log/") + + testfile1 = self.working_dir + "/log/test.log" + + message = "invalidjson" + with open(testfile1, 'a') as f: + f.write(message + "\n") + + proc = self.start_beat() + self.wait_until( + lambda: self.output_has(lines=1), + max_timeout=10) + proc.check_kill_and_wait() + + output = self.read_output( + required_fields=["@timestamp"], + ) + assert len(output) == 1 + assert output[0]["message"] == message + assert True == self.log_contains_count("Error decoding JSON") + def test_with_generic_filtering_remove_headers(self): """ It should work fine to combine JSON decoding with @@ -339,7 +397,6 @@ def test_with_generic_filtering_remove_headers(self): os.mkdir(self.working_dir + "/log/") self.copy_files(["logs/json_null.log"], - source_dir="../files", target_dir="log") proc = self.start_beat() @@ -378,7 +435,6 @@ def test_integer_condition(self): ) os.mkdir(self.working_dir + "/log/") self.copy_files(["logs/json_int.log"], - source_dir="../files", target_dir="log") proc = self.start_beat() diff --git a/vendor/github.com/elastic/beats/filebeat/tests/system/test_load.py b/vendor/github.com/elastic/beats/filebeat/tests/system/test_load.py index 56dd8186..bc33dbda 100644 --- a/vendor/github.com/elastic/beats/filebeat/tests/system/test_load.py +++ b/vendor/github.com/elastic/beats/filebeat/tests/system/test_load.py @@ -56,7 +56,7 @@ def test_no_missing_events(self): # wait until filebeat is fully running self.wait_until( lambda: self.log_contains( - "Loading and starting Prospectors completed."), + "Loading and starting Inputs completed."), max_timeout=15) # Start logging and rotating diff --git a/vendor/github.com/elastic/beats/filebeat/tests/system/test_modules.py b/vendor/github.com/elastic/beats/filebeat/tests/system/test_modules.py index a4d9a35a..ce1161a9 100644 --- a/vendor/github.com/elastic/beats/filebeat/tests/system/test_modules.py +++ b/vendor/github.com/elastic/beats/filebeat/tests/system/test_modules.py @@ -91,7 +91,7 @@ def _test_expected_events(self, module, test_file, res, objects): break assert found, "The following expected object was not found:\n {}\nSearched in: \n{}".format( - ev["_source"][module], objects) + pretty_json(ev["_source"][module]), pretty_json(objects)) def run_on_file(self, module, fileset, test_file, cfgfile): print("Testing {}/{} on {}".format(module, fileset, test_file)) @@ -108,10 +108,11 @@ def run_on_file(self, module, fileset, test_file, cfgfile): "-c", cfgfile, "-modules={}".format(module), "-M", "{module}.*.enabled=false".format(module=module), - "-M", "{module}.{fileset}.enabled=true".format(module=module, fileset=fileset), + "-M", "{module}.{fileset}.enabled=true".format( + module=module, fileset=fileset), "-M", "{module}.{fileset}.var.paths=[{test_file}]".format( module=module, fileset=fileset, test_file=test_file), - "-M", "*.*.prospector.close_eof=true", + "-M", "*.*.input.close_eof=true", ] output_path = os.path.join(self.working_dir, module, fileset, os.path.basename(test_file)) @@ -138,7 +139,8 @@ def run_on_file(self, module, fileset, test_file, cfgfile): assert obj["fileset"]["module"] == module, "expected fileset.module={} but got {}".format( module, obj["fileset"]["module"]) - assert "error" not in obj, "not error expected but got: {}".format(obj) + assert "error" not in obj, "not error expected but got: {}".format( + obj) if (module == "auditd" and fileset == "log") \ or (module == "osquery" and fileset == "result"): @@ -153,13 +155,13 @@ def run_on_file(self, module, fileset, test_file, cfgfile): @unittest.skipIf(not INTEGRATION_TESTS or os.getenv("TESTING_ENVIRONMENT") == "2x", "integration test not available on 2.x") - def test_prospector_pipeline_config(self): + def test_input_pipeline_config(self): """ - Tests that the pipeline configured in the prospector overwrites + Tests that the pipeline configured in the input overwrites the one from the output. """ self.init() - index_name = "filebeat-test-prospector" + index_name = "filebeat-test-input" try: self.es.indices.delete(index=index_name) except: @@ -229,13 +231,16 @@ def _run_ml_test(self, setup_flag, modules_flag): # Clean any previous state for df in self.es.transport.perform_request("GET", "/_xpack/ml/datafeeds/")["datafeeds"]: if df["datafeed_id"] == 'filebeat-nginx-access-response_code': - self.es.transport.perform_request("DELETE", "/_xpack/ml/datafeeds/" + df["datafeed_id"]) + self.es.transport.perform_request( + "DELETE", "/_xpack/ml/datafeeds/" + df["datafeed_id"]) for df in self.es.transport.perform_request("GET", "/_xpack/ml/anomaly_detectors/")["jobs"]: if df["job_id"] == 'datafeed-filebeat-nginx-access-response_code': - self.es.transport.perform_request("DELETE", "/_xpack/ml/anomaly_detectors/" + df["job_id"]) + self.es.transport.perform_request( + "DELETE", "/_xpack/ml/anomaly_detectors/" + df["job_id"]) - shutil.rmtree(os.path.join(self.working_dir, "modules.d"), ignore_errors=True) + shutil.rmtree(os.path.join(self.working_dir, + "modules.d"), ignore_errors=True) # generate a minimal configuration cfgfile = os.path.join(self.working_dir, "filebeat.yml") @@ -267,7 +272,8 @@ def _run_ml_test(self, setup_flag, modules_flag): if modules_flag: cmd += ["--modules=nginx"] - output = open(os.path.join(self.working_dir, "output.log"), "ab") + output_path = os.path.join(self.working_dir, "output.log") + output = open(output_path, "ab") output.write(" ".join(cmd) + "\n") beat = subprocess.Popen(cmd, stdin=None, @@ -284,3 +290,24 @@ def _run_ml_test(self, setup_flag, modules_flag): (df["datafeed_id"] for df in self.es.transport.perform_request("GET", "/_xpack/ml/datafeeds/")["datafeeds"])) beat.kill() + + # check if fails during trying to setting it up again + output = open(output_path, "ab") + output.write(" ".join(cmd) + "\n") + beat = subprocess.Popen(cmd, + stdin=None, + stdout=output, + stderr=output, + bufsize=0) + + output = open(output_path, "r") + for obj in ["Datafeed", "Job", "Dashboard", "Search", "Visualization"]: + self.wait_log_contains("{obj} already exists".format(obj=obj), + logfile=output_path, + max_timeout=30) + + beat.kill() + + +def pretty_json(obj): + return json.dumps(obj, indent=2, separators=(',', ': ')) diff --git a/vendor/github.com/elastic/beats/filebeat/tests/system/test_multiline.py b/vendor/github.com/elastic/beats/filebeat/tests/system/test_multiline.py index 2bf67391..9abc9f8a 100644 --- a/vendor/github.com/elastic/beats/filebeat/tests/system/test_multiline.py +++ b/vendor/github.com/elastic/beats/filebeat/tests/system/test_multiline.py @@ -24,7 +24,6 @@ def test_java_elasticsearch_log(self): os.mkdir(self.working_dir + "/log/") self.copy_files(["logs/elasticsearch-multiline-log.log"], - source_dir="../files", target_dir="log") proc = self.start_beat() @@ -55,7 +54,6 @@ def test_c_style_log(self): os.mkdir(self.working_dir + "/log/") self.copy_files(["logs/multiline-c-log.log"], - source_dir="../files", target_dir="log") proc = self.start_beat() @@ -132,7 +130,6 @@ def test_max_lines(self): os.mkdir(self.working_dir + "/log/") self.copy_files(["logs/elasticsearch-multiline-log.log"], - source_dir="../files", target_dir="log") proc = self.start_beat() @@ -215,7 +212,6 @@ def test_max_bytes(self): os.mkdir(self.working_dir + "/log/") self.copy_files(["logs/elasticsearch-multiline-log.log"], - source_dir="../files", target_dir="log") proc = self.start_beat() diff --git a/vendor/github.com/elastic/beats/filebeat/tests/system/test_redis.py b/vendor/github.com/elastic/beats/filebeat/tests/system/test_redis.py index ab7910a3..f51e734e 100644 --- a/vendor/github.com/elastic/beats/filebeat/tests/system/test_redis.py +++ b/vendor/github.com/elastic/beats/filebeat/tests/system/test_redis.py @@ -19,21 +19,21 @@ def init(self): return r @unittest.skipUnless(INTEGRATION_TESTS, "integration test") - def test_prospector(self): + def test_input(self): r = self.init() r.set("hello", "world") - prospector_raw = """ + input_raw = """ - type: redis hosts: ["{}:{}"] enabled: true scan_frequency: 1s """ - prospector_raw = prospector_raw.format(self.get_host(), self.get_port()) + input_raw = input_raw.format(self.get_host(), self.get_port()) self.render_config_template( - prospector_raw=prospector_raw, - prospectors=False, + input_raw=input_raw, + inputs=False, ) filebeat = self.start_beat() @@ -45,8 +45,8 @@ def test_prospector(self): output = self.read_output()[0] - print output assert output["prospector.type"] == "redis" + assert output["input.type"] == "redis" assert "redis.slowlog.cmd" in output def get_host(self): diff --git a/vendor/github.com/elastic/beats/filebeat/tests/system/test_registrar.py b/vendor/github.com/elastic/beats/filebeat/tests/system/test_registrar.py index eac4888c..8c95c939 100644 --- a/vendor/github.com/elastic/beats/filebeat/tests/system/test_registrar.py +++ b/vendor/github.com/elastic/beats/filebeat/tests/system/test_registrar.py @@ -5,6 +5,7 @@ import platform import time import shutil +import stat from filebeat import BaseTest from nose.plugins.skip import SkipTest @@ -69,6 +70,7 @@ def test_registrar_file_content(self): "offset": iterations * line_len, }, record) self.assertTrue("FileStateOS" in record) + self.assertIsNone(record["meta"]) file_state_os = record["FileStateOS"] if os.name == "nt": @@ -161,6 +163,135 @@ def test_custom_registry_file_location(self): assert os.path.isfile(os.path.join(self.working_dir, "a/b/c/registry")) + def test_registry_file_default_permissions(self): + """ + Test that filebeat default registry permission is set + """ + + if os.name == "nt": + # This test is currently skipped on windows because file permission + # configuration isn't implemented on Windows yet + raise SkipTest + + self.render_config_template( + path=os.path.abspath(self.working_dir) + "/log/*", + registryFile="a/b/c/registry", + ) + os.mkdir(self.working_dir + "/log/") + testfile_path = self.working_dir + "/log/test.log" + with open(testfile_path, 'w') as testfile: + testfile.write("hello world\n") + filebeat = self.start_beat() + self.wait_until( + lambda: self.output_has(lines=1), + max_timeout=15) + # wait until the registry file exist. Needed to avoid a race between + # the logging and actual writing the file. Seems to happen on Windows. + self.wait_until( + lambda: os.path.isfile(os.path.join(self.working_dir, + "a/b/c/registry")), + max_timeout=1) + filebeat.check_kill_and_wait() + + registry_file_perm_mask = oct(stat.S_IMODE(os.lstat(os.path.join(self.working_dir, + "a/b/c/registry")).st_mode)) + self.assertEqual(registry_file_perm_mask, "0600") + + def test_registry_file_custom_permissions(self): + """ + Test that filebeat registry permission is set as per configuration + """ + + if os.name == "nt": + # This test is currently skipped on windows because file permission + # configuration isn't implemented on Windows yet + raise SkipTest + + self.render_config_template( + path=os.path.abspath(self.working_dir) + "/log/*", + registryFile="a/b/c/registry", + registryFilePermissions=0644 + ) + os.mkdir(self.working_dir + "/log/") + testfile_path = self.working_dir + "/log/test.log" + with open(testfile_path, 'w') as testfile: + testfile.write("hello world\n") + filebeat = self.start_beat() + self.wait_until( + lambda: self.output_has(lines=1), + max_timeout=15) + # wait until the registry file exist. Needed to avoid a race between + # the logging and actual writing the file. Seems to happen on Windows. + self.wait_until( + lambda: os.path.isfile(os.path.join(self.working_dir, + "a/b/c/registry")), + max_timeout=1) + filebeat.check_kill_and_wait() + + registry_file_perm_mask = oct(stat.S_IMODE(os.lstat(os.path.join(self.working_dir, + "a/b/c/registry")).st_mode)) + self.assertEqual(registry_file_perm_mask, "0644") + + def test_registry_file_update_permissions(self): + """ + Test that filebeat registry permission is updated along with configuration + """ + + if os.name == "nt": + # This test is currently skipped on windows because file permission + # configuration isn't implemented on Windows yet + raise SkipTest + + self.render_config_template( + path=os.path.abspath(self.working_dir) + "/log/*", + registryFile="a/b/c/registry_x", + ) + os.mkdir(self.working_dir + "/log/") + testfile_path = self.working_dir + "/log/test.log" + with open(testfile_path, 'w') as testfile: + testfile.write("hello world\n") + filebeat = self.start_beat() + self.wait_until( + lambda: self.output_has(lines=1), + max_timeout=15) + # wait until the registry file exist. Needed to avoid a race between + # the logging and actual writing the file. Seems to happen on Windows. + self.wait_until( + lambda: os.path.isfile(os.path.join(self.working_dir, + "a/b/c/registry_x")), + max_timeout=1) + filebeat.check_kill_and_wait() + + registry_file_perm_mask = oct(stat.S_IMODE(os.lstat(os.path.join(self.working_dir, + "a/b/c/registry_x")).st_mode)) + self.assertEqual(registry_file_perm_mask, "0600") + + self.render_config_template( + path=os.path.abspath(self.working_dir) + "/log/*", + registryFile="a/b/c/registry_x", + registryFilePermissions=0644 + ) + + filebeat = self.start_beat() + self.wait_until( + lambda: self.output_has(lines=1), + max_timeout=15) + # wait until the registry file exist. Needed to avoid a race between + # the logging and actual writing the file. Seems to happen on Windows. + self.wait_until( + lambda: os.path.isfile(os.path.join(self.working_dir, + "a/b/c/registry_x")), + max_timeout=1) + + # Wait a momemt to make sure registry is completely written + time.sleep(1) + + filebeat.check_kill_and_wait() + + registry_file_perm_mask = oct(stat.S_IMODE(os.lstat(os.path.join(self.working_dir, + "a/b/c/registry_x")).st_mode)) + self.assertEqual(registry_file_perm_mask, "0644") + def test_rotating_file(self): """ Checks that the registry is properly updated after a file is rotated @@ -685,7 +816,7 @@ def test_clean_inactive(self): data = self.get_registry() assert len(data) == 2 - # Wait until states are removed from prospectors + # Wait until states are removed from inputs self.wait_until( lambda: self.log_contains_count( "State removed for") == 2, @@ -699,7 +830,7 @@ def test_clean_inactive(self): lambda: self.output_has(lines=3), max_timeout=30) - # Wait until states are removed from prospectors + # Wait until states are removed from inputs self.wait_until( lambda: self.log_contains_count( "State removed for") >= 3, @@ -753,7 +884,7 @@ def test_clean_removed(self): os.remove(testfile_path1) - # Wait until states are removed from prospectors + # Wait until states are removed from inputs self.wait_until(lambda: self.log_contains("Remove state for file as file removed")) # Add one more line to make sure registry is written @@ -807,7 +938,7 @@ def test_clean_removed_with_clean_inactive(self): # Wait until registry file is created self.wait_until( - lambda: self.log_contains_count("Registry file updated") > 1, + lambda: self.log_contains_count("Registry file updated. 2 states written.") > 0, max_timeout=15) data = self.get_registry() @@ -815,7 +946,7 @@ def test_clean_removed_with_clean_inactive(self): os.remove(testfile_path1) - # Wait until states are removed from prospectors + # Wait until states are removed from inputs self.wait_until( lambda: self.log_contains( "Remove state for file as file removed"), @@ -980,20 +1111,20 @@ def test_restart_state(self): # Make sure all 4 states are persisted self.wait_until( lambda: self.log_contains( - "Prospector states cleaned up. Before: 4, After: 4", logfile="filebeat2.log"), + "input states cleaned up. Before: 4, After: 4", logfile="filebeat2.log"), max_timeout=10) # Wait until registry file is cleaned self.wait_until( lambda: self.log_contains( - "Prospector states cleaned up. Before: 0, After: 0", logfile="filebeat2.log"), + "input states cleaned up. Before: 0, After: 0", logfile="filebeat2.log"), max_timeout=10) filebeat.check_kill_and_wait() def test_restart_state_reset(self): """ - Test that ttl is set to -1 after restart and no prospector covering it + Test that ttl is set to -1 after restart and no inputs covering it """ self.render_config_template( @@ -1031,10 +1162,10 @@ def test_restart_state_reset(self): filebeat = self.start_beat(output="filebeat2.log") - # Wait until prospectors are started + # Wait until inputs are started self.wait_until( lambda: self.log_contains_count( - "Starting prospector of type: log", logfile="filebeat2.log") >= 1, + "Starting input of type: log", logfile="filebeat2.log") >= 1, max_timeout=10) filebeat.check_kill_and_wait() @@ -1197,7 +1328,7 @@ def test_restart_state_reset_ttl_no_clean_inactive(self): filebeat = self.start_beat(output="filebeat2.log") - # Wait until prospectors are started + # Wait until inputs are started self.wait_until( lambda: self.log_contains("Registry file updated", logfile="filebeat2.log"), max_timeout=10) @@ -1301,14 +1432,14 @@ def test_ignore_older_state_clean_inactive(self): data = self.get_registry() assert len(data) == 0 - def test_registrar_files_with_prospector_level_processors(self): + def test_registrar_files_with_input_level_processors(self): """ Check that multiple files are put into registrar file with drop event processor """ self.render_config_template( path=os.path.abspath(self.working_dir) + "/log/*", - prospector_processors=[{ + input_processors=[{ "drop_event": {}, }] ) @@ -1376,3 +1507,58 @@ def test_registrar_files_with_prospector_level_processors(self): "inode": stat.st_ino, "device": stat.st_dev, }, file_state_os) + + def test_registrar_meta(self): + """ + Check that multiple entries for the same file are on the registry when they have + different meta + """ + + self.render_config_template( + type='docker', + input_raw=''' + containers: + path: {path} + stream: stdout + ids: + - container_id +- type: docker + containers: + path: {path} + stream: stderr + ids: + - container_id + '''.format(path=os.path.abspath(self.working_dir) + "/log/") + ) + os.mkdir(self.working_dir + "/log/") + os.mkdir(self.working_dir + "/log/container_id") + testfile_path1 = self.working_dir + "/log/container_id/test.log" + + with open(testfile_path1, 'w') as f: + for i in range(0, 10): + f.write('{"log":"hello\\n","stream":"stdout","time":"2018-04-13T13:39:57.924216596Z"}\n') + f.write('{"log":"hello\\n","stream":"stderr","time":"2018-04-13T13:39:57.924216596Z"}\n') + + filebeat = self.start_beat() + + self.wait_until( + lambda: self.output_has(lines=20), + max_timeout=15) + + # wait until the registry file exist. Needed to avoid a race between + # the logging and actual writing the file. Seems to happen on Windows. + + self.wait_until( + lambda: os.path.isfile(os.path.join(self.working_dir, + "registry")), + max_timeout=1) + + filebeat.check_kill_and_wait() + + # Check registry contains 2 entries with meta + data = self.get_registry() + assert len(data) == 2 + assert data[0]["source"] == data[1]["source"] + assert data[0]["meta"]["stream"] in ("stdout", "stderr") + assert data[1]["meta"]["stream"] in ("stdout", "stderr") + assert data[0]["meta"]["stream"] != data[1]["meta"]["stream"] diff --git a/vendor/github.com/elastic/beats/filebeat/tests/system/test_registrar_upgrade.py b/vendor/github.com/elastic/beats/filebeat/tests/system/test_registrar_upgrade.py new file mode 100644 index 00000000..21569e9c --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/tests/system/test_registrar_upgrade.py @@ -0,0 +1,86 @@ +#!/usr/bin/env python +"""Test the registrar with old registry file formats""" + +import os +import json + +from nose.plugins.skip import Skip, SkipTest + +from filebeat import BaseTest + + +class Test(BaseTest): + def test_upgrade_from_6_3_0(self): + template = "test-2lines-registry-6.3.0" + self.run_with_single_registry_format(template) + + def test_upgrade_from_6_3_1(self): + template = "test-2lines-registry-6.3.1" + self.run_with_single_registry_format(template) + + def test_upgrade_from_faulty_6_3_1(self): + template = "test-2lines-registry-6.3.1-faulty" + self.run_with_single_registry_format(template) + + def test_upgrade_from_latest(self): + template = "test-2lines-registry-latest" + self.run_with_single_registry_format(template) + + def run_with_single_registry_format(self, template): + # prepare log file + testfile, file_state = self.prepare_log() + + # prepare registry file + self.apply_registry_template(template, testfile, file_state) + + self.run_and_validate() + + def apply_registry_template(self, template, testfile, file_state): + source = self.beat_path + "/tests/files/registry/" + template + with open(source) as f: + registry = json.loads(f.read()) + + for state in registry: + state["source"] = testfile + state["FileStateOS"] = file_state + with open(self.working_dir + "/registry", 'w') as f: + f.write(json.dumps(registry)) + + def prepare_log(self): + # test is current skipped on windows, due to FileStateOS must match the + # current OS format. + if os.name == "nt": + raise SkipTest + + self.render_config_template( + path=os.path.abspath(self.working_dir) + "/log/*" + ) + + os.mkdir(self.working_dir + "/log/") + + testfile_path = self.working_dir + "/log/test.log" + with open(testfile_path, 'w') as f: + f.write("123456789\n") + f.write("abcdefghi\n") + + st = os.stat(testfile_path) + file_state = {"inode": st.st_ino, "device": st.st_dev} + return testfile_path, file_state + + def run_and_validate(self): + filebeat = self.start_beat() + self.wait_until( + lambda: self.output_has(lines=1), + max_timeout=15) + + # stop filebeat and enforce one last registry update + filebeat.check_kill_and_wait() + + data = self.get_registry() + assert len(data) == 1 + assert data[0]["offset"] == 20 + + # check only second line has been written + output = self.read_output() + assert len(output) == 1 + assert output[0]["message"] == "abcdefghi" diff --git a/vendor/github.com/elastic/beats/filebeat/tests/system/test_reload_prospectors.py b/vendor/github.com/elastic/beats/filebeat/tests/system/test_reload_inputs.py similarity index 71% rename from vendor/github.com/elastic/beats/filebeat/tests/system/test_reload_prospectors.py rename to vendor/github.com/elastic/beats/filebeat/tests/system/test_reload_inputs.py index d9db94c8..cee9c874 100644 --- a/vendor/github.com/elastic/beats/filebeat/tests/system/test_reload_prospectors.py +++ b/vendor/github.com/elastic/beats/filebeat/tests/system/test_reload_inputs.py @@ -1,12 +1,9 @@ -import re -import sys -import unittest import os import time from filebeat import BaseTest -prospectorConfigTemplate = """ +inputConfigTemplate = """ - type: log paths: - {} @@ -18,12 +15,12 @@ class Test(BaseTest): def test_reload(self): """ - Test basic prospectors reload + Test basic input reload """ self.render_config_template( reload=True, reload_path=self.working_dir + "/configs/*.yml", - prospectors=False, + inputs=False, ) proc = self.start_beat() @@ -32,8 +29,8 @@ def test_reload(self): logfile = self.working_dir + "/logs/test.log" os.mkdir(self.working_dir + "/configs/") - with open(self.working_dir + "/configs/prospector.yml", 'w') as f: - f.write(prospectorConfigTemplate.format(self.working_dir + "/logs/*")) + with open(self.working_dir + "/configs/input.yml", 'w') as f: + f.write(inputConfigTemplate.format(self.working_dir + "/logs/*")) with open(logfile, 'w') as f: f.write("Hello world\n") @@ -43,12 +40,12 @@ def test_reload(self): def test_start_stop(self): """ - Test basic prospectors start and stop + Test basic input start and stop """ self.render_config_template( reload=True, reload_path=self.working_dir + "/configs/*.yml", - prospectors=False, + inputs=False, ) proc = self.start_beat() @@ -57,19 +54,19 @@ def test_start_stop(self): logfile = self.working_dir + "/logs/test.log" os.mkdir(self.working_dir + "/configs/") - with open(self.working_dir + "/configs/prospector.yml", 'w') as f: - f.write(prospectorConfigTemplate.format(self.working_dir + "/logs/*")) + with open(self.working_dir + "/configs/input.yml", 'w') as f: + f.write(inputConfigTemplate.format(self.working_dir + "/logs/*")) with open(logfile, 'w') as f: f.write("Hello world\n") self.wait_until(lambda: self.output_lines() == 1) - # Remove prospector - with open(self.working_dir + "/configs/prospector.yml", 'w') as f: + # Remove input + with open(self.working_dir + "/configs/input.yml", 'w') as f: f.write("") - # Wait until prospector is stopped + # Wait until input is stopped self.wait_until( lambda: self.log_contains("Runner stopped:"), max_timeout=15) @@ -86,12 +83,12 @@ def test_start_stop(self): def test_start_stop_replace(self): """ - Test basic start and replace with another prospector + Test basic start and replace with another input """ self.render_config_template( reload=True, reload_path=self.working_dir + "/configs/*.yml", - prospectors=False, + inputs=False, ) proc = self.start_beat() @@ -103,25 +100,25 @@ def test_start_stop_replace(self): first_line = "First log file" second_line = "Second log file" - with open(self.working_dir + "/configs/prospector.yml", 'w') as f: - f.write(prospectorConfigTemplate.format(self.working_dir + "/logs/test1.log")) + with open(self.working_dir + "/configs/input.yml", 'w') as f: + f.write(inputConfigTemplate.format(self.working_dir + "/logs/test1.log")) with open(logfile1, 'w') as f: f.write(first_line + "\n") self.wait_until(lambda: self.output_lines() == 1) - # Remove prospector - with open(self.working_dir + "/configs/prospector.yml", 'w') as f: + # Remove input + with open(self.working_dir + "/configs/input.yml", 'w') as f: f.write("") - # Wait until prospector is stopped + # Wait until input is stopped self.wait_until( lambda: self.log_contains("Runner stopped:"), max_timeout=15) - with open(self.working_dir + "/configs/prospector.yml", 'w') as f: - f.write(prospectorConfigTemplate.format(self.working_dir + "/logs/test2.log")) + with open(self.working_dir + "/configs/input.yml", 'w') as f: + f.write(inputConfigTemplate.format(self.working_dir + "/logs/test2.log")) # Update both log files, only 1 change should be picke dup with open(logfile1, 'a') as f: @@ -140,14 +137,14 @@ def test_start_stop_replace(self): assert output[1]["message"] == second_line assert self.output_lines() == 2 - def test_reload_same_prospector(self): + def test_reload_same_input(self): """ - Test reloading same prospector + Test reloading same input """ self.render_config_template( reload=True, reload_path=self.working_dir + "/configs/*.yml", - prospectors=False, + inputs=False, ) proc = self.start_beat() @@ -158,11 +155,11 @@ def test_reload_same_prospector(self): first_line = "First log file" second_line = "Second log file" - config = prospectorConfigTemplate.format(self.working_dir + "/logs/test.log") + config = inputConfigTemplate.format(self.working_dir + "/logs/test.log") config = config + """ close_eof: true """ - with open(self.working_dir + "/configs/prospector.yml", 'w') as f: + with open(self.working_dir + "/configs/input.yml", 'w') as f: f.write(config) with open(logfile, 'w') as f: @@ -170,15 +167,15 @@ def test_reload_same_prospector(self): self.wait_until(lambda: self.output_lines() == 1) - # Overwrite prospector with same path but new fields - with open(self.working_dir + "/configs/prospector.yml", 'w') as f: + # Overwrite input with same path but new fields + with open(self.working_dir + "/configs/input.yml", 'w') as f: config = config + """ fields: hello: world """ f.write(config) - # Wait until prospector is stopped + # Wait until input is stopped self.wait_until( lambda: self.log_contains("Runner stopped:"), max_timeout=15) @@ -204,11 +201,11 @@ def test_reload_same_prospector(self): def test_load_configs(self): """ - Test loading separate prospectors configs + Test loading separate inputs configs """ self.render_config_template( reload_path=self.working_dir + "/configs/*.yml", - prospectors=False, + inputs=False, ) os.mkdir(self.working_dir + "/logs/") @@ -218,11 +215,11 @@ def test_load_configs(self): first_line = "First log file" second_line = "Second log file" - config = prospectorConfigTemplate.format(self.working_dir + "/logs/test.log") + config = inputConfigTemplate.format(self.working_dir + "/logs/test.log") config = config + """ close_eof: true """ - with open(self.working_dir + "/configs/prospector.yml", 'w') as f: + with open(self.working_dir + "/configs/input.yml", 'w') as f: f.write(config) with open(logfile, 'w') as f: @@ -259,15 +256,15 @@ def test_reload_same_config(self): self.render_config_template( reload=True, reload_path=self.working_dir + "/configs/*.yml", - prospectors=False, + inputs=False, ) os.mkdir(self.working_dir + "/logs/") logfile = self.working_dir + "/logs/test.log" os.mkdir(self.working_dir + "/configs/") - with open(self.working_dir + "/configs/prospector.yml", 'w') as f: - f.write(prospectorConfigTemplate.format(self.working_dir + "/logs/*")) + with open(self.working_dir + "/configs/input.yml", 'w') as f: + f.write(inputConfigTemplate.format(self.working_dir + "/logs/*")) proc = self.start_beat() @@ -277,13 +274,13 @@ def test_reload_same_config(self): self.wait_until(lambda: self.output_lines() > 0) # New config with same config file but a bit different to make it reload - # Add it intentionally when other prospector is still running to cause an error - with open(self.working_dir + "/configs/prospector.yml", 'w') as f: - f.write(prospectorConfigTemplate.format(self.working_dir + "/logs/test.log")) + # Add it intentionally when other input is still running to cause an error + with open(self.working_dir + "/configs/input.yml", 'w') as f: + f.write(inputConfigTemplate.format(self.working_dir + "/logs/test.log")) # Make sure error shows up in log file self.wait_until( - lambda: self.log_contains("Can only start a prospector when all related states are finished"), + lambda: self.log_contains("Can only start an input when all related states are finished"), max_timeout=15) # Wait until old runner is stopped @@ -291,7 +288,7 @@ def test_reload_same_config(self): lambda: self.log_contains("Runner stopped:"), max_timeout=15) - # Add new log line and see if it is picked up = new prospector is running + # Add new log line and see if it is picked up = new input is running with open(logfile, 'a') as f: f.write("Hello world2\n") @@ -301,12 +298,12 @@ def test_reload_same_config(self): def test_reload_add(self): """ - Test adding a prospector and makes sure both are still running + Test adding a input and makes sure both are still running """ self.render_config_template( reload=True, reload_path=self.working_dir + "/configs/*.yml", - prospectors=False, + inputs=False, ) os.mkdir(self.working_dir + "/logs/") @@ -314,8 +311,8 @@ def test_reload_add(self): logfile2 = self.working_dir + "/logs/test2.log" os.mkdir(self.working_dir + "/configs/") - with open(self.working_dir + "/configs/prospector1.yml", 'w') as f: - f.write(prospectorConfigTemplate.format(self.working_dir + "/logs/test1.log")) + with open(self.working_dir + "/configs/input.yml", 'w') as f: + f.write(inputConfigTemplate.format(self.working_dir + "/logs/test1.log")) proc = self.start_beat() @@ -324,18 +321,18 @@ def test_reload_add(self): self.wait_until(lambda: self.output_lines() > 0) - with open(self.working_dir + "/configs/prospector2.yml", 'w') as f: - f.write(prospectorConfigTemplate.format(self.working_dir + "/logs/test2.log")) + with open(self.working_dir + "/configs/input2.yml", 'w') as f: + f.write(inputConfigTemplate.format(self.working_dir + "/logs/test2.log")) self.wait_until( lambda: self.log_contains_count("New runner started") == 2, max_timeout=15) - # Add new log line and see if it is picked up = new prospector is running + # Add new log line and see if it is picked up = new input is running with open(logfile1, 'a') as f: f.write("Hello world2\n") - # Add new log line and see if it is picked up = new prospector is running + # Add new log line and see if it is picked up = new input is running with open(logfile2, 'a') as f: f.write("Hello world3\n") diff --git a/vendor/github.com/elastic/beats/filebeat/tests/system/test_reload_modules.py b/vendor/github.com/elastic/beats/filebeat/tests/system/test_reload_modules.py index d0341503..2ecbc7a7 100644 --- a/vendor/github.com/elastic/beats/filebeat/tests/system/test_reload_modules.py +++ b/vendor/github.com/elastic/beats/filebeat/tests/system/test_reload_modules.py @@ -1,5 +1,4 @@ import re -import sys import unittest import os import shutil @@ -16,7 +15,7 @@ enabled: true var.paths: - {} - prospector: + input: scan_frequency: 1s auth: enabled: false @@ -31,7 +30,7 @@ def setUp(self): self.es = Elasticsearch([self.get_elasticsearch_url()]) # Copy system module - shutil.copytree(os.path.join("module", "test"), + shutil.copytree(os.path.join(self.beat_path, "tests", "system", "module", "test"), os.path.join(self.working_dir, "module", "test")) def test_reload(self): @@ -42,7 +41,7 @@ def test_reload(self): reload=True, reload_path=self.working_dir + "/configs/*.yml", reload_type="modules", - prospectors=False, + inputs=False, ) proc = self.start_beat() @@ -72,7 +71,7 @@ def test_reload_writes_pipeline(self): reload=True, reload_path=self.working_dir + "/configs/*.yml", reload_type="modules", - prospectors=False, + inputs=False, elasticsearch={"host": self.get_elasticsearch_url()} ) @@ -103,7 +102,7 @@ def test_no_es_connection(self): reload=True, reload_path=self.working_dir + "/configs/*.yml", reload_type="modules", - prospectors=False, + inputs=False, elasticsearch={"host": 'errorhost:9201'} ) @@ -126,7 +125,7 @@ def test_start_stop(self): reload=True, reload_path=self.working_dir + "/configs/*.yml", reload_type="modules", - prospectors=False, + inputs=False, ) proc = self.start_beat() @@ -146,11 +145,11 @@ def test_start_stop(self): self.wait_until(lambda: self.output_lines() == 1, max_timeout=10) print(self.output_lines()) - # Remove prospector + # Remove input with open(self.working_dir + "/configs/system.yml", 'w') as f: f.write("") - # Wait until prospector is stopped + # Wait until input is stopped self.wait_until( lambda: self.log_contains("Runner stopped:"), max_timeout=15) @@ -171,7 +170,7 @@ def test_load_configs(self): self.render_config_template( reload_path=self.working_dir + "/configs/*.yml", reload_type="modules", - prospectors=False, + inputs=False, ) os.mkdir(self.working_dir + "/logs/") @@ -219,7 +218,7 @@ def test_wrong_module_no_reload(self): self.render_config_template( reload=False, reload_path=self.working_dir + "/configs/*.yml", - prospectors=False, + inputs=False, ) os.mkdir(self.working_dir + "/configs/") @@ -229,7 +228,7 @@ def test_wrong_module_no_reload(self): test: enabled: true wrong_field: error - prospector: + input: scan_frequency: 1s """ with open(config_path, 'w') as f: @@ -239,7 +238,7 @@ def test_wrong_module_no_reload(self): # Wait until offset for new line is updated self.wait_until( - lambda: self.log_contains("No paths were defined for prospector accessing"), + lambda: self.log_contains("No paths were defined for input accessing"), max_timeout=10) assert exit_code == 1 diff --git a/vendor/github.com/elastic/beats/filebeat/tests/system/test_shutdown.py b/vendor/github.com/elastic/beats/filebeat/tests/system/test_shutdown.py index 5d6ea3c6..4577b61b 100644 --- a/vendor/github.com/elastic/beats/filebeat/tests/system/test_shutdown.py +++ b/vendor/github.com/elastic/beats/filebeat/tests/system/test_shutdown.py @@ -64,8 +64,13 @@ def test_shutdown_wait_ok(self): # we allow for a potential race in the harvester shutdown here. # In some cases the registry offset might match the penultimate offset. - assert (offset == outputs[-1]["offset"] or - offset == outputs[-2]["offset"]) + + eol_offset = 1 + if os.name == "nt": + eol_offset += 1 + + assert (offset == (outputs[-1]["offset"] + eol_offset + len(outputs[-1]["message"])) or + offset == (outputs[-2]["offset"] + eol_offset + len(outputs[-2]["message"]))) def test_shutdown_wait_timeout(self): """ @@ -143,36 +148,35 @@ def test_once(self): def nasa_logs(self): # Uncompress the nasa log file. - nasa_log = '../files/logs/nasa-50k.log' + nasa_log = os.path.join(self.beat_path, "tests", "files", "logs", "nasa-50k.log") if not os.path.isfile(nasa_log): - with gzip.open('../files/logs/nasa-50k.log.gz', 'rb') as infile: + with gzip.open(nasa_log + ".gz", 'rb') as infile: with open(nasa_log, 'w') as outfile: for line in infile: outfile.write(line) os.mkdir(self.working_dir + "/log/") self.copy_files(["logs/nasa-50k.log"], - source_dir="../files", target_dir="log") def test_stopping_empty_path(self): """ - Test filebeat stops properly when 1 prospector has an invalid config. + Test filebeat stops properly when 1 input has an invalid config. """ - prospector_raw = """ + input_raw = """ - type: log paths: [] """ self.render_config_template( path=os.path.abspath(self.working_dir) + "/log/*", - prospector_raw=prospector_raw, + input_raw=input_raw, ) filebeat = self.start_beat() time.sleep(2) # Wait until first flush - msg = "No paths were defined for prospector" + msg = "No paths were defined for input" self.wait_until( lambda: self.log_contains_count(msg) >= 1, max_timeout=5) diff --git a/vendor/github.com/elastic/beats/filebeat/tests/system/test_stdin.py b/vendor/github.com/elastic/beats/filebeat/tests/system/test_stdin.py new file mode 100644 index 00000000..6a79c050 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/tests/system/test_stdin.py @@ -0,0 +1,106 @@ +#!/usr/bin/env python + +from filebeat import BaseTest +import os + +from beat.beat import Proc + +""" +Tests for the stdin functionality. +""" + + +class Test(BaseTest): + + def test_stdin(self): + """ + Test stdin input. Checks if reading is continued after the first read. + """ + self.render_config_template( + type="stdin" + ) + + proc = self.start_beat() + + self.wait_until( + lambda: self.log_contains( + "Harvester started for file: -"), + max_timeout=10) + + iterations1 = 5 + for n in range(0, iterations1): + os.write(proc.stdin_write, "Hello World\n") + + self.wait_until( + lambda: self.output_has(lines=iterations1), + max_timeout=15) + + iterations2 = 10 + for n in range(0, iterations2): + os.write(proc.stdin_write, "Hello World\n") + + self.wait_until( + lambda: self.output_has(lines=iterations1 + iterations2), + max_timeout=15) + + proc.check_kill_and_wait() + + objs = self.read_output() + assert len(objs) == iterations1 + iterations2 + + def test_stdin_eof(self): + """ + Test that Filebeat works when stdin is closed. + """ + self.render_config_template( + type="stdin", + close_eof="true", + ) + + args = [self.test_binary, + "-systemTest", + "-test.coverprofile", + os.path.join(self.working_dir, "coverage.cov"), + "-c", os.path.join(self.working_dir, "filebeat.yml"), + "-e", "-v", "-d", "*", + ] + proc = Proc(args, os.path.join(self.working_dir, "filebeat.log")) + os.write(proc.stdin_write, "Hello World\n") + + proc.start() + self.wait_until(lambda: self.output_has(lines=1)) + + # Continue writing after end was reached + os.write(proc.stdin_write, "Hello World2\n") + os.close(proc.stdin_write) + + self.wait_until(lambda: self.output_has(lines=2)) + + proc.proc.terminate() + proc.proc.wait() + + objs = self.read_output() + assert objs[0]["message"] == "Hello World" + assert objs[1]["message"] == "Hello World2" + + def test_stdin_is_exclusive(self): + """ + Test that Filebeat run Stdin in exclusive mode. + """ + + input_raw = """ +- type: stdin + enabled: true +- type: udp + host: 127.0.0.0:10000 + enabled: true +""" + + self.render_config_template( + input_raw=input_raw, + inputs=False, + ) + + filebeat = self.start_beat() + filebeat.check_wait(exit_code=1) + assert self.log_contains("Exiting: stdin requires to be run in exclusive mode, configured inputs: stdin, udp") diff --git a/vendor/github.com/elastic/beats/filebeat/tests/system/test_syslog.py b/vendor/github.com/elastic/beats/filebeat/tests/system/test_syslog.py new file mode 100644 index 00000000..d1a3b371 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/tests/system/test_syslog.py @@ -0,0 +1,104 @@ +from filebeat import BaseTest +import socket + + +class Test(BaseTest): + """ + Test filebeat with the syslog input + """ + + def test_syslog_with_tcp(self): + """ + Test syslog input with events from TCP. + """ + host = "127.0.0.1" + port = 8080 + input_raw = """ +- type: syslog + protocol: + tcp: + host: "{}:{}" +""" + + input_raw = input_raw.format(host, port) + self.render_config_template( + input_raw=input_raw, + inputs=False, + ) + + filebeat = self.start_beat() + + self.wait_until(lambda: self.log_contains("Started listening for TCP connection")) + + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # TCP + sock.connect((host, port)) + + for n in range(0, 2): + m = "<13>Oct 11 22:14:15 wopr.mymachine.co postfix/smtpd[2000]:" \ + " 'su root' failed for lonvick on /dev/pts/8 {}\n" + m = m.format(n) + sock.send(m) + + self.wait_until(lambda: self.output_count(lambda x: x >= 2)) + + filebeat.check_kill_and_wait() + + output = self.read_output() + + assert len(output) == 2 + self.assert_syslog(output[0]) + sock.close() + + def test_syslog_with_udp(self): + """ + Test syslog input with events from TCP. + """ + host = "127.0.0.1" + port = 8080 + input_raw = """ +- type: syslog + protocol: + udp: + host: "{}:{}" +""" + + input_raw = input_raw.format(host, port) + self.render_config_template( + input_raw=input_raw, + inputs=False, + ) + + filebeat = self.start_beat() + + self.wait_until(lambda: self.log_contains("Started listening for UDP connection")) + + sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # TCP + + for n in range(0, 2): + m = "<13>Oct 11 22:14:15 wopr.mymachine.co postfix/smtpd[2000]:" \ + " 'su root' failed for lonvick on /dev/pts/8 {}\n" + m = m.format(n) + sock.sendto(m, (host, port)) + + self.wait_until(lambda: self.output_count(lambda x: x >= 2)) + + filebeat.check_kill_and_wait() + + output = self.read_output() + + assert len(output) == 2 + self.assert_syslog(output[0]) + + def assert_syslog(self, syslog): + assert syslog["prospector.type"] == "syslog" + assert syslog["event.severity"] == 5 + assert syslog["hostname"] == "wopr.mymachine.co" + assert syslog["input.type"] == "syslog" + assert syslog["message"] == "'su root' failed for lonvick on /dev/pts/8 0" + assert syslog["process.pid"] == 2000 + assert syslog["process.program"] == "postfix/smtpd" + assert syslog["syslog.facility"] == 1 + assert syslog["syslog.priority"] == 13 + assert syslog["syslog.severity_label"] == "Notice" + assert syslog["syslog.facility_label"] == "user-level" + assert len(syslog["source"]) > 0 diff --git a/vendor/github.com/elastic/beats/filebeat/tests/system/test_tcp.py b/vendor/github.com/elastic/beats/filebeat/tests/system/test_tcp.py new file mode 100644 index 00000000..d6788d16 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/tests/system/test_tcp.py @@ -0,0 +1,68 @@ +from filebeat import BaseTest +import socket + + +class Test(BaseTest): + """ + Test filebeat TCP input + """ + + def test_tcp_with_newline_delimiter(self): + """ + Test TCP input with a new line delimiter + """ + self.send_events_with_delimiter("\n") + + def test_tcp_with_custom_char_delimiter(self): + """ + Test TCP input with a custom single char delimiter + """ + self.send_events_with_delimiter(";") + + def test_tcp_with_custom_word_delimiter(self): + """ + Test TCP input with a custom single char delimiter + """ + self.send_events_with_delimiter("") + + def send_events_with_delimiter(self, delimiter): + host = "127.0.0.1" + port = 8080 + input_raw = """ +- type: tcp + host: "{}:{}" + enabled: true +""" + + # Use default of \n and stripping \r + if delimiter is not "": + input_raw += "\n line_delimiter: {}".format(delimiter) + + input_raw = input_raw.format(host, port) + + self.render_config_template( + input_raw=input_raw, + inputs=False, + ) + + filebeat = self.start_beat() + + self.wait_until(lambda: self.log_contains("Started listening for TCP connection")) + + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # TCP + sock.connect((host, port)) + + for n in range(0, 2): + sock.send("Hello World: " + str(n) + delimiter) + + self.wait_until(lambda: self.output_count(lambda x: x >= 2)) + + filebeat.check_kill_and_wait() + + output = self.read_output() + + assert len(output) == 2 + assert output[0]["prospector.type"] == "tcp" + assert output[0]["input.type"] == "tcp" + + sock.close() diff --git a/vendor/github.com/elastic/beats/filebeat/tests/system/test_udp.py b/vendor/github.com/elastic/beats/filebeat/tests/system/test_udp.py index 2efddfab..de6c92e3 100644 --- a/vendor/github.com/elastic/beats/filebeat/tests/system/test_udp.py +++ b/vendor/github.com/elastic/beats/filebeat/tests/system/test_udp.py @@ -1,6 +1,5 @@ from filebeat import BaseTest import socket -import time class Test(BaseTest): @@ -9,22 +8,22 @@ def test_udp(self): host = "127.0.0.1" port = 8080 - prospector_raw = """ + input_raw = """ - type: udp host: "{}:{}" enabled: true """ - prospector_raw = prospector_raw.format(host, port) + input_raw = input_raw.format(host, port) self.render_config_template( - prospector_raw=prospector_raw, - prospectors=False, + input_raw=input_raw, + inputs=False, ) filebeat = self.start_beat() - self.wait_until(lambda: self.log_contains("Started listening for udp")) + self.wait_until(lambda: self.log_contains("Started listening for UDP connection")) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP @@ -39,3 +38,4 @@ def test_udp(self): assert len(output) == 2 assert output[0]["prospector.type"] == "udp" + assert output[0]["input.type"] == "udp" diff --git a/vendor/github.com/elastic/beats/filebeat/util/data.go b/vendor/github.com/elastic/beats/filebeat/util/data.go index a47b4cb2..849185f6 100644 --- a/vendor/github.com/elastic/beats/filebeat/util/data.go +++ b/vendor/github.com/elastic/beats/filebeat/util/data.go @@ -28,7 +28,7 @@ func (d *Data) GetState() file.State { // HasState returns true if the data object contains state data func (d *Data) HasState() bool { - return d.state != file.State{} + return !d.state.IsEmpty() } // GetEvent returns the event in the data object diff --git a/vendor/github.com/elastic/beats/generator/beat/{beat}/.travis.yml b/vendor/github.com/elastic/beats/generator/beat/{beat}/.travis.yml index 30a20005..1fa58d5d 100644 --- a/vendor/github.com/elastic/beats/generator/beat/{beat}/.travis.yml +++ b/vendor/github.com/elastic/beats/generator/beat/{beat}/.travis.yml @@ -6,7 +6,7 @@ services: language: go go: - - 1.9.2 + - 1.9.4 os: - linux diff --git a/vendor/github.com/elastic/beats/generator/beat/{beat}/Makefile b/vendor/github.com/elastic/beats/generator/beat/{beat}/Makefile index e983ed2b..a4560753 100644 --- a/vendor/github.com/elastic/beats/generator/beat/{beat}/Makefile +++ b/vendor/github.com/elastic/beats/generator/beat/{beat}/Makefile @@ -8,6 +8,7 @@ ES_BEATS?=./vendor/github.com/elastic/beats GOPACKAGES=$(shell govendor list -no-status +local) PREFIX?=. NOTICE_FILE=NOTICE +GOBUILD_FLAGS=-i -ldflags "-X $(BEAT_PATH)/vendor/github.com/elastic/beats/libbeat/version.buildTime=$(NOW) -X $(BEAT_PATH)/vendor/github.com/elastic/beats/libbeat/version.commit=$(COMMIT_ID)" # Path to the libbeat Makefile -include $(ES_BEATS)/libbeat/scripts/Makefile @@ -22,7 +23,7 @@ setup: copy-vendor copy-vendor: mkdir -p vendor/github.com/elastic/ cp -R ${BEAT_GOPATH}/src/github.com/elastic/beats vendor/github.com/elastic/ - rm -rf vendor/github.com/elastic/beats/.git + rm -rf vendor/github.com/elastic/beats/.git vendor/github.com/elastic/beats/x-pack .PHONY: git-init git-init: diff --git a/vendor/github.com/elastic/beats/generator/beat/{beat}/README.md b/vendor/github.com/elastic/beats/generator/beat/{beat}/README.md index bce2a9c9..97764e73 100644 --- a/vendor/github.com/elastic/beats/generator/beat/{beat}/README.md +++ b/vendor/github.com/elastic/beats/generator/beat/{beat}/README.md @@ -114,4 +114,4 @@ The beat frameworks provides tools to crosscompile and package your beat for dif make package ``` -This will fetch and create all images required for the build process. The hole process to finish can take several minutes. +This will fetch and create all images required for the build process. The whole process to finish can take several minutes. diff --git a/vendor/github.com/elastic/beats/generator/beat/{beat}/beater/{beat}.go.tmpl b/vendor/github.com/elastic/beats/generator/beat/{beat}/beater/{beat}.go.tmpl index 564ba409..e2d67f94 100644 --- a/vendor/github.com/elastic/beats/generator/beat/{beat}/beater/{beat}.go.tmpl +++ b/vendor/github.com/elastic/beats/generator/beat/{beat}/beater/{beat}.go.tmpl @@ -19,14 +19,14 @@ type {Beat} struct { // Creates beater func New(b *beat.Beat, cfg *common.Config) (beat.Beater, error) { - config := config.DefaultConfig - if err := cfg.Unpack(&config); err != nil { + c := config.DefaultConfig + if err := cfg.Unpack(&c); err != nil { return nil, fmt.Errorf("Error reading config file: %v", err) } bt := &{Beat}{ done: make(chan struct{}), - config: config, + config: c, } return bt, nil } diff --git a/vendor/github.com/elastic/beats/generator/common/Makefile b/vendor/github.com/elastic/beats/generator/common/Makefile index c446c15b..269fde2c 100644 --- a/vendor/github.com/elastic/beats/generator/common/Makefile +++ b/vendor/github.com/elastic/beats/generator/common/Makefile @@ -44,6 +44,8 @@ test-build: test python-env: @test -d ${PYTHON_ENV} || virtualenv ${PYTHON_ENV} @${PYTHON_ENV}/bin/pip install --upgrade pip PyYAML + @# Work around pip bug. See: https://github.com/pypa/pip/issues/4464 + @find $(PYTHON_ENV) -type d -name dist-packages -exec sh -c "echo dist-packages > {}.pth" ';' # Cleans up environment .PHONY: clean diff --git a/vendor/github.com/elastic/beats/generator/metricbeat/{beat}/Makefile b/vendor/github.com/elastic/beats/generator/metricbeat/{beat}/Makefile index eab3101c..c50dc3c3 100644 --- a/vendor/github.com/elastic/beats/generator/metricbeat/{beat}/Makefile +++ b/vendor/github.com/elastic/beats/generator/metricbeat/{beat}/Makefile @@ -22,7 +22,8 @@ setup: copy-vendor copy-vendor: mkdir -p vendor/github.com/elastic/ cp -R ${GOPATH}/src/github.com/elastic/beats vendor/github.com/elastic/ - rm -rf vendor/github.com/elastic/beats/.git + ln -s ${PWD}/vendor/github.com/elastic/beats/metricbeat/scripts/generate_imports_helper.py ${PWD}/vendor/github.com/elastic/beats/script/generate_imports_helper.py + rm -rf vendor/github.com/elastic/beats/.git vendor/github.com/elastic/beats/x-pack # This is called by the beats packer before building starts .PHONY: before-build diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/configuring-howto.asciidoc b/vendor/github.com/elastic/beats/heartbeat/docs/configuring-howto.asciidoc index 0a80f514..0cf72900 100644 --- a/vendor/github.com/elastic/beats/heartbeat/docs/configuring-howto.asciidoc +++ b/vendor/github.com/elastic/beats/heartbeat/docs/configuring-howto.asciidoc @@ -73,4 +73,3 @@ include::../../libbeat/docs/yaml.asciidoc[] include::../../libbeat/docs/regexp.asciidoc[] include::../../libbeat/docs/reference-yml.asciidoc[] - diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/fields.asciidoc b/vendor/github.com/elastic/beats/heartbeat/docs/fields.asciidoc index 57005aa5..679dc605 100644 --- a/vendor/github.com/elastic/beats/heartbeat/docs/fields.asciidoc +++ b/vendor/github.com/elastic/beats/heartbeat/docs/fields.asciidoc @@ -16,6 +16,7 @@ grouped in the following categories: * <> * <> * <> +* <> * <> * <> * <> @@ -32,33 +33,41 @@ Contains common beat fields available in all event types. -[float] -=== `beat.name` - +*`beat.name`*:: ++ +-- The name of the Beat sending the log messages. If the Beat name is set in the configuration file, then that value is used. If it is not set, the hostname is used. To set the Beat name, use the `name` option in the configuration file. -[float] -=== `beat.hostname` +-- +*`beat.hostname`*:: ++ +-- The hostname as returned by the operating system on which the Beat is running. -[float] -=== `beat.timezone` +-- +*`beat.timezone`*:: ++ +-- The timezone as returned by the operating system on which the Beat is running. -[float] -=== `beat.version` +-- +*`beat.version`*:: ++ +-- The version of the beat that generated this event. -[float] -=== `@timestamp` +-- +*`@timestamp`*:: ++ +-- type: date example: August 26th 2016, 12:35:53.332 @@ -70,20 +79,26 @@ required: True The timestamp when the event log record was generated. -[float] -=== `tags` +-- +*`tags`*:: ++ +-- Arbitrary tags that can be set per Beat and per transaction type. -[float] -=== `fields` +-- +*`fields`*:: ++ +-- type: object Contains user configurable fields. +-- + [float] == error fields @@ -91,30 +106,36 @@ Error fields containing additional info in case of errors. -[float] -=== `error.message` - +*`error.message`*:: ++ +-- type: text Error message. -[float] -=== `error.code` +-- +*`error.code`*:: ++ +-- type: long Error code. -[float] -=== `error.type` +-- +*`error.type`*:: ++ +-- type: keyword Error type. +-- + [[exported-fields-cloud]] == Cloud provider metadata fields @@ -122,56 +143,70 @@ Metadata from cloud providers added by the add_cloud_metadata processor. -[float] -=== `meta.cloud.provider` - +*`meta.cloud.provider`*:: ++ +-- example: ec2 Name of the cloud provider. Possible values are ec2, gce, or digitalocean. -[float] -=== `meta.cloud.instance_id` +-- +*`meta.cloud.instance_id`*:: ++ +-- Instance ID of the host machine. -[float] -=== `meta.cloud.instance_name` +-- +*`meta.cloud.instance_name`*:: ++ +-- Instance name of the host machine. -[float] -=== `meta.cloud.machine_type` +-- +*`meta.cloud.machine_type`*:: ++ +-- example: t2.medium Machine type of the host machine. -[float] -=== `meta.cloud.availability_zone` +-- +*`meta.cloud.availability_zone`*:: ++ +-- example: us-east-1c Availability zone in which this host is running. -[float] -=== `meta.cloud.project_id` +-- +*`meta.cloud.project_id`*:: ++ +-- example: project-x Name of the project in Google Cloud. -[float] -=== `meta.cloud.region` +-- +*`meta.cloud.region`*:: ++ +-- Region in which this host is running. +-- + [[exported-fields-common]] == Common heartbeat monitor fields @@ -185,70 +220,84 @@ Common monitor fields. -[float] -=== `monitor.type` - +*`monitor.type`*:: ++ +-- type: keyword The monitor type. -[float] -=== `monitor.name` +-- +*`monitor.name`*:: ++ +-- type: keyword The monitors configured name -[float] -=== `monitor.id` +-- +*`monitor.id`*:: ++ +-- type: keyword The monitors full job ID as used by heartbeat. +-- + [float] == duration fields Total monitoring test duration -[float] -=== `monitor.duration.us` - +*`monitor.duration.us`*:: ++ +-- type: long Duration in microseconds -[float] -=== `monitor.scheme` +-- +*`monitor.scheme`*:: ++ +-- type: keyword Address url scheme. For example `tcp`, `tls`, `http`, and `https`. -[float] -=== `monitor.host` +-- +*`monitor.host`*:: ++ +-- type: keyword Hostname of service being monitored. Can be missing, if service is monitored by IP. -[float] -=== `monitor.ip` +-- +*`monitor.ip`*:: ++ +-- type: ip IP of service being monitored. If service is monitored by hostname, the `ip` field contains the resolved ip address for the current host. -[float] -=== `monitor.status` +-- +*`monitor.status`*:: ++ +-- type: keyword required: True @@ -256,6 +305,8 @@ required: True Indicator if monitor could validate the service to be available. +-- + [[exported-fields-docker-processor]] == Docker fields @@ -264,38 +315,114 @@ Docker stats collected from Docker. -[float] -=== `docker.container.id` - +*`docker.container.id`*:: ++ +-- type: keyword Unique container id. -[float] -=== `docker.container.image` +-- +*`docker.container.image`*:: ++ +-- type: keyword Name of the image the container was built on. -[float] -=== `docker.container.name` +-- +*`docker.container.name`*:: ++ +-- type: keyword Container name. -[float] -=== `docker.container.labels` +-- +*`docker.container.labels`*:: ++ +-- type: object Image labels. +-- + +[[exported-fields-host-processor]] +== Host fields + +Info collected for the host machine. + + + + +*`host.name`*:: ++ +-- +type: keyword + +Hostname. + + +-- + +*`host.id`*:: ++ +-- +type: keyword + +Unique host id. + + +-- + +*`host.architecture`*:: ++ +-- +type: keyword + +Host architecture (e.g. x86_64, arm, ppc, mips). + + +-- + +*`host.os.platform`*:: ++ +-- +type: keyword + +OS platform (e.g. centos, ubuntu, windows). + + +-- + +*`host.os.version`*:: ++ +-- +type: keyword + +OS version. + + +-- + +*`host.os.family`*:: ++ +-- +type: keyword + +OS family (e.g. redhat, debian, freebsd, windows). + + +-- + [[exported-fields-http]] == HTTP monitor fields @@ -309,14 +436,26 @@ HTTP related fields. -[float] -=== `http.url` - +*`http.url`*:: ++ +-- type: text Service url used by monitor. +*`http.url.raw`*:: ++ +-- +type: keyword + +The service url used by monitor. This is a non-analyzed field that is useful for aggregations. + + +-- + +-- + [float] == response fields @@ -324,14 +463,16 @@ Service response parameters. -[float] -=== `http.response.status` - +*`http.response.status`*:: ++ +-- type: integer Response status code. +-- + [float] == rtt fields @@ -352,13 +493,15 @@ Note: if validator is not reading body or only a prefix, this -[float] -=== `http.rtt.validate.us` - +*`http.rtt.validate.us`*:: ++ +-- type: long Duration in microseconds +-- + [float] == validate_body fields @@ -371,39 +514,45 @@ Note: if validator is not reading body or only a prefix, this -[float] -=== `http.rtt.validate_body.us` - +*`http.rtt.validate_body.us`*:: ++ +-- type: long Duration in microseconds +-- + [float] == write_request fields Duration of sending the complete HTTP request. Duration based on already available network connection. -[float] -=== `http.rtt.write_request.us` - +*`http.rtt.write_request.us`*:: ++ +-- type: long Duration in microseconds +-- + [float] == response_header fields Time required between sending the start of sending the HTTP request and first byte from HTTP response being read. Duration based on already available network connection. -[float] -=== `http.rtt.response_header.us` - +*`http.rtt.response_header.us`*:: ++ +-- type: long Duration in microseconds +-- + [float] == total fields @@ -416,13 +565,15 @@ Note: if validator is not reading body or only a prefix, this -[float] -=== `http.rtt.total.us` - +*`http.rtt.total.us`*:: ++ +-- type: long Duration in microseconds +-- + [[exported-fields-icmp]] == ICMP fields @@ -436,27 +587,31 @@ IP ping fields. -[float] -=== `icmp.requests` - +*`icmp.requests`*:: ++ +-- type: integer Number if ICMP EchoRequests send. +-- + [float] == rtt fields ICMP Echo Request and Reply round trip time -[float] -=== `icmp.rtt.us` - +*`icmp.rtt.us`*:: ++ +-- type: long Duration in microseconds +-- + [[exported-fields-kubernetes-processor]] == Kubernetes fields @@ -465,62 +620,76 @@ Kubernetes metadata added by the kubernetes processor -[float] -=== `kubernetes.pod.name` - +*`kubernetes.pod.name`*:: ++ +-- type: keyword Kubernetes pod name -[float] -=== `kubernetes.namespace` +-- +*`kubernetes.namespace`*:: ++ +-- type: keyword Kubernetes namespace -[float] -=== `kubernetes.node.name` +-- +*`kubernetes.node.name`*:: ++ +-- type: keyword Kubernetes node name -[float] -=== `kubernetes.labels` +-- +*`kubernetes.labels`*:: ++ +-- type: object Kubernetes labels map -[float] -=== `kubernetes.annotations` +-- +*`kubernetes.annotations`*:: ++ +-- type: object Kubernetes annotations map -[float] -=== `kubernetes.container.name` +-- +*`kubernetes.container.name`*:: ++ +-- type: keyword Kubernetes container name -[float] -=== `kubernetes.container.image` +-- +*`kubernetes.container.image`*:: ++ +-- type: keyword Kubernetes container image +-- + [[exported-fields-resolve]] == Host lookup fields @@ -534,35 +703,41 @@ Host lookup fields. -[float] -=== `resolve.host` - +*`resolve.host`*:: ++ +-- type: keyword Hostname of service being monitored. -[float] -=== `resolve.ip` +-- +*`resolve.ip`*:: ++ +-- type: ip IP address found for the given host. +-- + [float] == rtt fields Duration required to resolve an IP from hostname. -[float] -=== `resolve.rtt.us` - +*`resolve.rtt.us`*:: ++ +-- type: long Duration in microseconds +-- + [[exported-fields-socks5]] == SOCKS5 proxy fields @@ -590,13 +765,15 @@ Time required to establish a connection via SOCKS5 to endpoint based on availabl -[float] -=== `socks5.rtt.connect.us` - +*`socks5.rtt.connect.us`*:: ++ +-- type: long Duration in microseconds +-- + [[exported-fields-tcp]] == TCP layer fields @@ -610,14 +787,16 @@ TCP network layer related fields. -[float] -=== `tcp.port` - +*`tcp.port`*:: ++ +-- type: integer Service port number. +-- + [float] == rtt fields @@ -632,13 +811,15 @@ Duration required to establish a TCP connection based on already available IP ad -[float] -=== `tcp.rtt.connect.us` - +*`tcp.rtt.connect.us`*:: ++ +-- type: long Duration in microseconds +-- + [float] == validate fields @@ -646,13 +827,15 @@ Duration of validation step based on existing TCP connection. -[float] -=== `tcp.rtt.validate.us` - +*`tcp.rtt.validate.us`*:: ++ +-- type: long Duration in microseconds +-- + [[exported-fields-tls]] == TLS encryption layer fields @@ -680,10 +863,12 @@ Time required to finish TLS handshake based on already available network connect -[float] -=== `tls.rtt.handshake.us` - +*`tls.rtt.handshake.us`*:: ++ +-- type: long Duration in microseconds +-- + diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/getting-started.asciidoc b/vendor/github.com/elastic/beats/heartbeat/docs/getting-started.asciidoc index d01f3789..95319181 100644 --- a/vendor/github.com/elastic/beats/heartbeat/docs/getting-started.asciidoc +++ b/vendor/github.com/elastic/beats/heartbeat/docs/getting-started.asciidoc @@ -1,18 +1,7 @@ [[heartbeat-getting-started]] == Getting Started With Heartbeat -To get started with your own Heartbeat setup, install and configure these -related products: - - * Elasticsearch for storage and indexing the data. - * Kibana for the UI. - * Logstash (optional) for inserting data into Elasticsearch. - -See {libbeat}/getting-started.html[Getting Started with Beats and the Elastic Stack] -for more information. - -After installing the Elastic Stack, read the following topics to learn how to -install, configure, and run Heartbeat: +include::../../libbeat/docs/shared-getting-started-intro.asciidoc[] * <> * <> @@ -130,8 +119,7 @@ https://www.elastic.co/downloads/beats/heartbeat[downloads page]. . Rename the +heartbeat--windows+ directory to +Heartbeat+. . Open a PowerShell prompt as an Administrator (right-click the PowerShell icon -and select *Run As Administrator*). If you are running Windows XP, you may need -to download and install PowerShell. +and select *Run As Administrator*). . From the PowerShell prompt, run the following commands to install Heartbeat as a Windows service: diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/heartbeat-options.asciidoc b/vendor/github.com/elastic/beats/heartbeat/docs/heartbeat-options.asciidoc index a7eedf82..4e09c2db 100644 --- a/vendor/github.com/elastic/beats/heartbeat/docs/heartbeat-options.asciidoc +++ b/vendor/github.com/elastic/beats/heartbeat/docs/heartbeat-options.asciidoc @@ -252,8 +252,8 @@ A list of hosts to ping. The entries in the list can be: * A plain host name, such as `localhost`, or an IP address. If you specify this option, you must also specify a value for <>. If the -monitor is configured to use SSL, Heartbeat establishes an SSL/TLS-based -connection. Otherwise, it establishes a plain TCP connection. +monitor is <>, Heartbeat establishes an +SSL/TLS-based connection. Otherwise, it establishes a plain TCP connection. * A hostname and port, such as `localhost:12345`. Heartbeat connects to the port on the specified host. If the monitor is <>, Heartbeat establishes an @@ -360,6 +360,8 @@ Example configuration: ------------------------------------------------------------------------------- +Also see <> for a full description of the `ssl` options. + [float] [[monitor-http-options]] === HTTP options @@ -429,6 +431,8 @@ Example configuration: ------------------------------------------------------------------------------- +Also see <> for a full description of the `ssl` options. + [float] [[monitor-http-check]] ==== `check` @@ -459,7 +463,7 @@ Under `check.response`, specify these options: *`status`*:: The expected status code. If this setting is not configured or it's set to 0, any status code other than 404 is accepted. *`headers`*:: The required response headers. -*`body`*:: The required response body content. +*`body`*:: A list of regular expressions to match the the body output. Only a single expression needs to match. The following configuration shows how to check the response when the body contains JSON: @@ -469,13 +473,49 @@ contains JSON: - type: http schedule: '@every 5s' urls: ["https://myhost:80"] -check.request: - method: GET - headers: - 'X-API-Key': '12345-mykey-67890' -check.response: - status: 200 - body: '{"status": "ok"}' + check.request: + method: GET + headers: + 'X-API-Key': '12345-mykey-67890' + check.response: + status: 200 + body: '{"status": "ok"}' +------------------------------------------------------------------------------- + +The following configuration shows how to check the response for multiple regex +patterns: + +[source,yaml] +------------------------------------------------------------------------------- +- type: http + schedule: '@every 5s' + urls: ["https://myhost:80"] + check.request: + method: GET + headers: + 'X-API-Key': '12345-mykey-67890' + check.response: + status: 200 + body: + - hello + - world +------------------------------------------------------------------------------- + +The following configuration shows how to check the response with a multiline +regex: + +[source,yaml] +------------------------------------------------------------------------------- +- type: http + schedule: '@every 5s' + urls: ["https://myhost:80"] + check.request: + method: GET + headers: + 'X-API-Key': '12345-mykey-67890' + check.response: + status: 200 + body: '(?s)first.*second.*third' ------------------------------------------------------------------------------- diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/index.asciidoc b/vendor/github.com/elastic/beats/heartbeat/docs/index.asciidoc index b15e0ead..4022fe0a 100644 --- a/vendor/github.com/elastic/beats/heartbeat/docs/index.asciidoc +++ b/vendor/github.com/elastic/beats/heartbeat/docs/index.asciidoc @@ -2,31 +2,27 @@ include::../../libbeat/docs/version.asciidoc[] -include::{asciidoc-dir}/../../shared/attributes62.asciidoc[] +include::{asciidoc-dir}/../../shared/attributes.asciidoc[] :version: {stack-version} :beatname_lc: heartbeat :beatname_uc: Heartbeat :beatname_pkg: heartbeat-elastic +:github_repo_name: beats +:discuss_forum: beats/{beatname_lc} +:beat_default_index_prefix: {beatname_lc} +:has_ml_jobs: yes include::../../libbeat/docs/shared-beats-attributes.asciidoc[] include::./overview.asciidoc[] -include::../../libbeat/docs/contributing-to-beats.asciidoc[] - include::./getting-started.asciidoc[] include::../../libbeat/docs/repositories.asciidoc[] include::./setting-up-running.asciidoc[] -// -//include::./upgrading.asciidoc[] - -// -//include::./how-heartbeat-works.asciidoc[] - include::./configuring-howto.asciidoc[] include::./fields.asciidoc[] @@ -41,5 +37,5 @@ include::./troubleshooting.asciidoc[] include::./faq.asciidoc[] -// -//include::./heartbeat-devguide.asciidoc[] +include::../../libbeat/docs/contributing-to-beats.asciidoc[] + diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/securing-heartbeat.asciidoc b/vendor/github.com/elastic/beats/heartbeat/docs/securing-heartbeat.asciidoc index 56d5a5cd..d78029ba 100644 --- a/vendor/github.com/elastic/beats/heartbeat/docs/securing-heartbeat.asciidoc +++ b/vendor/github.com/elastic/beats/heartbeat/docs/securing-heartbeat.asciidoc @@ -9,6 +9,7 @@ and other products in the Elastic stack: * <> * <> +* <> //sets block macro for https.asciidoc included in next section diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/setting-up-running.asciidoc b/vendor/github.com/elastic/beats/heartbeat/docs/setting-up-running.asciidoc index 4e179b40..8f215865 100644 --- a/vendor/github.com/elastic/beats/heartbeat/docs/setting-up-running.asciidoc +++ b/vendor/github.com/elastic/beats/heartbeat/docs/setting-up-running.asciidoc @@ -4,7 +4,7 @@ // that is unique to each beat. ///// -[[seting-up-and-running]] +[[setting-up-and-running]] == Setting up and running {beatname_uc} Before reading this section, see the @@ -29,3 +29,5 @@ include::../../libbeat/docs/keystore.asciidoc[] include::../../libbeat/docs/command-reference.asciidoc[] include::./running-on-docker.asciidoc[] + +include::../../libbeat/docs/shared-shutdown.asciidoc[] diff --git a/vendor/github.com/elastic/beats/heartbeat/heartbeat.reference.yml b/vendor/github.com/elastic/beats/heartbeat/heartbeat.reference.yml index 20660d14..564f3b66 100644 --- a/vendor/github.com/elastic/beats/heartbeat/heartbeat.reference.yml +++ b/vendor/github.com/elastic/beats/heartbeat/heartbeat.reference.yml @@ -248,7 +248,8 @@ heartbeat.scheduler: # Hints the minimum number of events stored in the queue, # before providing a batch of events to the outputs. - # A value of 0 (the default) ensures events are immediately available + # The default value is set to 2048. + # A value of 0 ensures events are immediately available # to be sent to the outputs. #flush.min_events: 2048 @@ -256,6 +257,66 @@ heartbeat.scheduler: # if the number of events stored in the queue is < min_flush_events. #flush.timeout: 1s + # The spool queue will store events in a local spool file, before + # forwarding the events to the outputs. + # + # Beta: spooling to disk is currently a beta feature. Use with care. + # + # The spool file is a circular buffer, which blocks once the file/buffer is full. + # Events are put into a write buffer and flushed once the write buffer + # is full or the flush_timeout is triggered. + # Once ACKed by the output, events are removed immediately from the queue, + # making space for new events to be persisted. + #spool: + # The file namespace configures the file path and the file creation settings. + # Once the file exists, the `size`, `page_size` and `prealloc` settings + # will have no more effect. + #file: + # Location of spool file. The default value is ${path.data}/spool.dat. + #path: "${path.data}/spool.dat" + + # Configure file permissions if file is created. The default value is 0600. + #permissions: 0600 + + # File size hint. The spool blocks, once this limit is reached. The default value is 100 MiB. + #size: 100MiB + + # The files page size. A file is split into multiple pages of the same size. The default value is 4KiB. + #page_size: 4KiB + + # If prealloc is set, the required space for the file is reserved using + # truncate. The default value is true. + #prealloc: true + + # Spool writer settings + # Events are serialized into a write buffer. The write buffer is flushed if: + # - The buffer limit has been reached. + # - The configured limit of buffered events is reached. + # - The flush timeout is triggered. + #write: + # Sets the write buffer size. + #buffer_size: 1MiB + + # Maximum duration after which events are flushed, if the write buffer + # is not full yet. The default value is 1s. + #flush.timeout: 1s + + # Number of maximum buffered events. The write buffer is flushed once the + # limit is reached. + #flush.events: 16384 + + # Configure the on-disk event encoding. The encoding can be changed + # between restarts. + # Valid encodings are: json, ubjson, and cbor. + #codec: cbor + #read: + # Reader flush timeout, waiting for more events to become available, so + # to fill a complete batch, as required by the outputs. + # If flush_timeout is 0, all available events are forwarded to the + # outputs immediately. + # The default value is 0s. + #flush.timeout: 0s + # Sets the maximum number of CPUs that can be executing simultaneously. The # default is the number of logical CPUs available in the system. #max_procs: @@ -290,6 +351,14 @@ heartbeat.scheduler: # equals: # http.code: 200 # +# The following example renames the field a to b: +# +#processors: +#- rename: +# fields: +# - from: "a" +# to: "b" +# # The following example enriches each event with metadata from the cloud # provider about the host machine. It works on EC2, GCE, DigitalOcean, # Tencent Cloud, and Alibaba Cloud. @@ -314,6 +383,7 @@ heartbeat.scheduler: # match_pids: ["process.pid", "process.ppid"] # match_source: true # match_source_index: 4 +# match_short_id: false # cleanup_timeout: 60 # # To connect to Docker over TLS you must specify a client and CA certificate. # #ssl: @@ -327,6 +397,7 @@ heartbeat.scheduler: # #processors: #- add_docker_metadata: ~ +#- add_host_metadata: ~ #============================= Elastic Cloud ================================== @@ -399,7 +470,18 @@ output.elasticsearch: # The default is 50. #bulk_max_size: 50 - # Configure http request timeout before failing an request to Elasticsearch. + # The number of seconds to wait before trying to reconnect to Elasticsearch + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Elasticsearch after a network error. The default is 60s. + #backoff.max: 60s + + # Configure http request timeout before failing a request to Elasticsearch. #timeout: 90 # Use SSL settings for HTTPS. @@ -463,7 +545,7 @@ output.elasticsearch: # Optional load balance the events between the Logstash hosts. Default is false. #loadbalance: false - # Number of batches to be sent asynchronously to logstash while processing + # Number of batches to be sent asynchronously to Logstash while processing # new batches. #pipelining: 2 @@ -472,6 +554,17 @@ output.elasticsearch: # if no error is encountered. #slow_start: false + # The number of seconds to wait before trying to reconnect to Logstash + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Logstash after a network error. The default is 60s. + #backoff.max: 60s + # Optional index name. The default index name is set to heartbeat # in all lowercase. #index: 'heartbeat' @@ -816,6 +909,10 @@ output.elasticsearch: # the default for the logs path is a logs subdirectory inside the home path. #path.logs: ${path.home}/logs +#================================ Keystore ========================================== +# Location of the Keystore containing the keys and their sensitive values. +#keystore.path: "${path.config}/beats.keystore" + #============================== Dashboards ===================================== # These settings control loading the sample dashboards to the Kibana index. Loading # the dashboards are disabled by default and can be enabled either by setting the @@ -850,6 +947,17 @@ output.elasticsearch: # how to install the dashboards by first querying Elasticsearch. #setup.dashboards.always_kibana: false +# If true and Kibana is not reachable at the time when dashboards are loaded, +# it will retry to reconnect to Kibana instead of exiting with an error. +#setup.dashboards.retry.enabled: false + +# Duration interval between Kibana connection retries. +#setup.dashboards.retry.interval: 1s + +# Maximum number of retries before exiting with an error, 0 for unlimited retrying. +#setup.dashboards.retry.maximum: 0 + + #============================== Template ===================================== # A template is used to set the mapping in Elasticsearch diff --git a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/_meta/fields.yml b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/_meta/fields.yml index b90e9949..248db552 100644 --- a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/_meta/fields.yml @@ -11,6 +11,12 @@ type: text description: > Service url used by monitor. + multi_fields: + - name: raw + type: keyword + description: > + The service url used by monitor. This is a non-analyzed field that is useful + for aggregations. - name: response type: group diff --git a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/check.go b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/check.go index d0fcaa38..69dba158 100644 --- a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/check.go +++ b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/check.go @@ -1,12 +1,12 @@ package http import ( - "bytes" "errors" "fmt" - "io" "io/ioutil" "net/http" + + "github.com/elastic/beats/libbeat/common/match" ) type RespCheck func(*http.Response) error @@ -29,7 +29,7 @@ func makeValidateResponse(config *responseParameters) RespCheck { } if len(config.RecvBody) > 0 { - checks = append(checks, checkBody([]byte(config.RecvBody))) + checks = append(checks, checkBody(config.RecvBody)) } return checkAll(checks...) @@ -84,18 +84,17 @@ func checkHeaders(headers map[string]string) RespCheck { } } -func checkBody(body []byte) RespCheck { +func checkBody(body []match.Matcher) RespCheck { return func(r *http.Response) error { - // read up to len(body)+1 bytes for comparing content to be equal - in := io.LimitReader(r.Body, int64(len(body))+1) - content, err := ioutil.ReadAll(in) + content, err := ioutil.ReadAll(r.Body) if err != nil { return err } - - if !bytes.Equal(body, content) { - return errBodyMismatch + for _, m := range body { + if m.Match(content) { + return nil + } } - return nil + return errBodyMismatch } } diff --git a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/check_test.go b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/check_test.go new file mode 100644 index 00000000..64a288f3 --- /dev/null +++ b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/check_test.go @@ -0,0 +1,110 @@ +package http + +import ( + "fmt" + "log" + "net/http" + "net/http/httptest" + "testing" + + "github.com/elastic/beats/libbeat/common/match" +) + +func TestCheckBody(t *testing.T) { + + var matchTests = []struct { + description string + body string + patterns []string + result bool + }{ + { + "Single regex that matches", + "ok", + []string{"ok"}, + true, + }, + { + "Regex matching json example", + `{"status": "ok"}`, + []string{`{"status": "ok"}`}, + true, + }, + { + "Regex matching first line of multiline body string", + `first line + second line`, + []string{"first"}, + true, + }, + { + "Regex matching lastline of multiline body string", + `first line + second line`, + []string{"second"}, + true, + }, + { + "Regex matching multiple lines of multiline body string", + `first line + second line + third line`, + []string{"(?s)first.*second.*third"}, + true, + }, + { + "Regex not matching multiple lines of multiline body string", + `first line + second line + third line`, + []string{"(?s)first.*fourth.*third"}, + false, + }, + { + "Single regex that doesn't match", + "ok", + []string{"notok"}, + false, + }, + { + "Multiple regex match where at least one must match", + "ok", + []string{"ok", "yay"}, + true, + }, + { + "Multiple regex match where none of the patterns match", + "ok", + []string{"notok", "yay"}, + false, + }, + } + + for _, test := range matchTests { + t.Run(test.description, func(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, test.body) + })) + defer ts.Close() + + res, err := http.Get(ts.URL) + if err != nil { + log.Fatal(err) + } + + patterns := []match.Matcher{} + for _, pattern := range test.patterns { + patterns = append(patterns, match.MustCompile(pattern)) + } + check := checkBody(patterns)(res) + + if result := (check == nil); result != test.result { + if test.result { + t.Fatalf("Expected at least one of patterns: %s to match body: %s", test.patterns, test.body) + } else { + t.Fatalf("Did not expect patterns: %s to match body: %s", test.patterns, test.body) + } + } + }) + } +} diff --git a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/config.go b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/config.go index fb525392..38e5e4f3 100644 --- a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/config.go +++ b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/config.go @@ -5,6 +5,7 @@ import ( "strings" "time" + "github.com/elastic/beats/libbeat/common/match" "github.com/elastic/beats/libbeat/outputs" "github.com/elastic/beats/heartbeat/monitors" @@ -52,7 +53,7 @@ type responseParameters struct { // expected HTTP response configuration Status uint16 `config:"status" verify:"min=0, max=699"` RecvHeaders map[string]string `config:"headers"` - RecvBody string `config:"body"` + RecvBody []match.Matcher `config:"body"` } type compressionConfig struct { @@ -74,7 +75,7 @@ var defaultConfig = Config{ Response: responseParameters{ Status: 0, RecvHeaders: nil, - RecvBody: "", + RecvBody: []match.Matcher{}, }, }, } diff --git a/vendor/github.com/elastic/beats/heartbeat/monitors/util.go b/vendor/github.com/elastic/beats/heartbeat/monitors/util.go index 88b58984..efd6c755 100644 --- a/vendor/github.com/elastic/beats/heartbeat/monitors/util.go +++ b/vendor/github.com/elastic/beats/heartbeat/monitors/util.go @@ -122,6 +122,8 @@ func annotated( } if fields != nil { + fields = fields.Clone() + status := look.Status(err) fields.DeepUpdate(common.MapStr{ "monitor": common.MapStr{ @@ -130,7 +132,7 @@ func annotated( }, }) if user := settings.Fields; user != nil { - fields.DeepUpdate(user) + fields.DeepUpdate(user.Clone()) } event.Timestamp = start @@ -370,10 +372,13 @@ func resolveErr(host string, err error) (common.MapStr, []TaskRunner, error) { func WithFields(fields common.MapStr, r TaskRunner) TaskRunner { return MakeCont(func() (common.MapStr, []TaskRunner, error) { event, cont, err := r.Run() - if event == nil { + if event != nil { + event = event.Clone() + event.DeepUpdate(fields) + } else if err != nil { event = common.MapStr{} + event.DeepUpdate(fields) } - event.DeepUpdate(fields) for i := range cont { cont[i] = WithFields(fields, cont[i]) diff --git a/vendor/github.com/elastic/beats/libbeat/.gitignore b/vendor/github.com/elastic/beats/libbeat/.gitignore index 65796e8b..49eb469d 100644 --- a/vendor/github.com/elastic/beats/libbeat/.gitignore +++ b/vendor/github.com/elastic/beats/libbeat/.gitignore @@ -28,3 +28,4 @@ _testmain.go /libbeat.yml /libbeat.reference.yml /docs/fields.asciidoc +_meta/kibana diff --git a/vendor/github.com/elastic/beats/libbeat/Dockerfile b/vendor/github.com/elastic/beats/libbeat/Dockerfile index 9b45ebbf..994fe630 100644 --- a/vendor/github.com/elastic/beats/libbeat/Dockerfile +++ b/vendor/github.com/elastic/beats/libbeat/Dockerfile @@ -1,5 +1,5 @@ # Beats dockerfile used for testing -FROM golang:1.9.2 +FROM golang:1.9.4 MAINTAINER Nicolas Ruflin RUN set -x && \ diff --git a/vendor/github.com/elastic/beats/libbeat/_meta/config.reference.yml b/vendor/github.com/elastic/beats/libbeat/_meta/config.reference.yml index 24314cbc..bc13837d 100644 --- a/vendor/github.com/elastic/beats/libbeat/_meta/config.reference.yml +++ b/vendor/github.com/elastic/beats/libbeat/_meta/config.reference.yml @@ -34,7 +34,8 @@ # Hints the minimum number of events stored in the queue, # before providing a batch of events to the outputs. - # A value of 0 (the default) ensures events are immediately available + # The default value is set to 2048. + # A value of 0 ensures events are immediately available # to be sent to the outputs. #flush.min_events: 2048 @@ -42,6 +43,66 @@ # if the number of events stored in the queue is < min_flush_events. #flush.timeout: 1s + # The spool queue will store events in a local spool file, before + # forwarding the events to the outputs. + # + # Beta: spooling to disk is currently a beta feature. Use with care. + # + # The spool file is a circular buffer, which blocks once the file/buffer is full. + # Events are put into a write buffer and flushed once the write buffer + # is full or the flush_timeout is triggered. + # Once ACKed by the output, events are removed immediately from the queue, + # making space for new events to be persisted. + #spool: + # The file namespace configures the file path and the file creation settings. + # Once the file exists, the `size`, `page_size` and `prealloc` settings + # will have no more effect. + #file: + # Location of spool file. The default value is ${path.data}/spool.dat. + #path: "${path.data}/spool.dat" + + # Configure file permissions if file is created. The default value is 0600. + #permissions: 0600 + + # File size hint. The spool blocks, once this limit is reached. The default value is 100 MiB. + #size: 100MiB + + # The files page size. A file is split into multiple pages of the same size. The default value is 4KiB. + #page_size: 4KiB + + # If prealloc is set, the required space for the file is reserved using + # truncate. The default value is true. + #prealloc: true + + # Spool writer settings + # Events are serialized into a write buffer. The write buffer is flushed if: + # - The buffer limit has been reached. + # - The configured limit of buffered events is reached. + # - The flush timeout is triggered. + #write: + # Sets the write buffer size. + #buffer_size: 1MiB + + # Maximum duration after which events are flushed, if the write buffer + # is not full yet. The default value is 1s. + #flush.timeout: 1s + + # Number of maximum buffered events. The write buffer is flushed once the + # limit is reached. + #flush.events: 16384 + + # Configure the on-disk event encoding. The encoding can be changed + # between restarts. + # Valid encodings are: json, ubjson, and cbor. + #codec: cbor + #read: + # Reader flush timeout, waiting for more events to become available, so + # to fill a complete batch, as required by the outputs. + # If flush_timeout is 0, all available events are forwarded to the + # outputs immediately. + # The default value is 0s. + #flush.timeout: 0s + # Sets the maximum number of CPUs that can be executing simultaneously. The # default is the number of logical CPUs available in the system. #max_procs: @@ -76,6 +137,14 @@ # equals: # http.code: 200 # +# The following example renames the field a to b: +# +#processors: +#- rename: +# fields: +# - from: "a" +# to: "b" +# # The following example enriches each event with metadata from the cloud # provider about the host machine. It works on EC2, GCE, DigitalOcean, # Tencent Cloud, and Alibaba Cloud. @@ -100,6 +169,7 @@ # match_pids: ["process.pid", "process.ppid"] # match_source: true # match_source_index: 4 +# match_short_id: false # cleanup_timeout: 60 # # To connect to Docker over TLS you must specify a client and CA certificate. # #ssl: @@ -113,6 +183,7 @@ # #processors: #- add_docker_metadata: ~ +#- add_host_metadata: ~ #============================= Elastic Cloud ================================== @@ -185,7 +256,18 @@ output.elasticsearch: # The default is 50. #bulk_max_size: 50 - # Configure http request timeout before failing an request to Elasticsearch. + # The number of seconds to wait before trying to reconnect to Elasticsearch + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Elasticsearch after a network error. The default is 60s. + #backoff.max: 60s + + # Configure http request timeout before failing a request to Elasticsearch. #timeout: 90 # Use SSL settings for HTTPS. @@ -249,7 +331,7 @@ output.elasticsearch: # Optional load balance the events between the Logstash hosts. Default is false. #loadbalance: false - # Number of batches to be sent asynchronously to logstash while processing + # Number of batches to be sent asynchronously to Logstash while processing # new batches. #pipelining: 2 @@ -258,6 +340,17 @@ output.elasticsearch: # if no error is encountered. #slow_start: false + # The number of seconds to wait before trying to reconnect to Logstash + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Logstash after a network error. The default is 60s. + #backoff.max: 60s + # Optional index name. The default index name is set to beat-index-prefix # in all lowercase. #index: 'beat-index-prefix' @@ -602,6 +695,10 @@ output.elasticsearch: # the default for the logs path is a logs subdirectory inside the home path. #path.logs: ${path.home}/logs +#================================ Keystore ========================================== +# Location of the Keystore containing the keys and their sensitive values. +#keystore.path: "${path.config}/beats.keystore" + #============================== Dashboards ===================================== # These settings control loading the sample dashboards to the Kibana index. Loading # the dashboards are disabled by default and can be enabled either by setting the @@ -636,6 +733,17 @@ output.elasticsearch: # how to install the dashboards by first querying Elasticsearch. #setup.dashboards.always_kibana: false +# If true and Kibana is not reachable at the time when dashboards are loaded, +# it will retry to reconnect to Kibana instead of exiting with an error. +#setup.dashboards.retry.enabled: false + +# Duration interval between Kibana connection retries. +#setup.dashboards.retry.interval: 1s + +# Maximum number of retries before exiting with an error, 0 for unlimited retrying. +#setup.dashboards.retry.maximum: 0 + + #============================== Template ===================================== # A template is used to set the mapping in Elasticsearch diff --git a/vendor/github.com/elastic/beats/libbeat/autodiscover/appender.go b/vendor/github.com/elastic/beats/libbeat/autodiscover/appender.go new file mode 100644 index 00000000..9c1a3fd8 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/autodiscover/appender.go @@ -0,0 +1,91 @@ +package autodiscover + +import ( + "fmt" + "strings" + + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/common/bus" + "github.com/elastic/beats/libbeat/logp" +) + +// Appender provides an interface by which extra configuration can be added into configs +type Appender interface { + // Append takes a processed event and add extra configuration + Append(event bus.Event) +} + +// Appenders is a list of Appender objects +type Appenders []Appender + +// AppenderBuilder is a func used to generate a Appender object +type AppenderBuilder func(*common.Config) (Appender, error) + +// AddBuilder registers a new AppenderBuilder +func (r *registry) AddAppender(name string, appender AppenderBuilder) error { + r.lock.Lock() + defer r.lock.Unlock() + + if name == "" { + return fmt.Errorf("appender name is required") + } + + _, exists := r.appenders[name] + if exists { + return fmt.Errorf("appender '%s' is already registered", name) + } + + if appender == nil { + return fmt.Errorf("appender '%s' cannot be registered with a nil factory", name) + } + + r.appenders[name] = appender + logp.Debug(debugK, "Appender registered: %s", name) + return nil +} + +// GetAppender returns the appender with the giving name, nil if it doesn't exist +func (r *registry) GetAppender(name string) AppenderBuilder { + r.lock.RLock() + defer r.lock.RUnlock() + + name = strings.ToLower(name) + return r.appenders[name] +} + +// BuildAppender reads provider configuration and instantiate one +func (r *registry) BuildAppender(c *common.Config) (Appender, error) { + var config AppenderConfig + err := c.Unpack(&config) + if err != nil { + return nil, err + } + + appender := r.GetAppender(config.Type) + if appender == nil { + return nil, fmt.Errorf("unknown autodiscover appender %s", config.Type) + } + + return appender(c) +} + +// Append uses all initialized appenders to modify generated bus.Events. +func (a Appenders) Append(event bus.Event) { + for _, appender := range a { + appender.Append(event) + } +} + +// NewAppenders instances and returns the given list of appenders. +func NewAppenders(aConfigs []*common.Config) (Appenders, error) { + var appenders Appenders + for _, acfg := range aConfigs { + appender, err := Registry.BuildAppender(acfg) + if err != nil { + return nil, err + } + appenders = append(appenders, appender) + } + + return appenders, nil +} diff --git a/vendor/github.com/elastic/beats/libbeat/autodiscover/appender_test.go b/vendor/github.com/elastic/beats/libbeat/autodiscover/appender_test.go new file mode 100644 index 00000000..2531c6b3 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/autodiscover/appender_test.go @@ -0,0 +1,77 @@ +package autodiscover + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/common/bus" +) + +type fakeAppender struct{} + +func (f *fakeAppender) Append(event bus.Event) { + event["foo"] = "bar" +} + +func newFakeAppender(_ *common.Config) (Appender, error) { + return &fakeAppender{}, nil +} + +func TestAppenderRegistry(t *testing.T) { + // Add a new builder + reg := NewRegistry() + reg.AddAppender("fake", newFakeAppender) + + // Check if that appender is available in registry + b := reg.GetAppender("fake") + assert.NotNil(t, b) + + // Generate a config with type fake + config := AppenderConfig{ + Type: "fake", + } + + cfg, err := common.NewConfigFrom(&config) + + // Make sure that config building doesn't fail + assert.Nil(t, err) + appender, err := reg.BuildAppender(cfg) + assert.Nil(t, err) + assert.NotNil(t, appender) + + // Attempt to build using an array of configs + Registry.AddAppender("fake", newFakeAppender) + cfgs := []*common.Config{cfg} + appenders, err := NewAppenders(cfgs) + assert.Nil(t, err) + assert.Equal(t, len(appenders), 1) + + // Attempt to build using an incorrect config + incorrectConfig := AppenderConfig{ + Type: "wrong", + } + icfg, err := common.NewConfigFrom(&incorrectConfig) + assert.Nil(t, err) + cfgs = append(cfgs, icfg) + appenders, err = NewAppenders(cfgs) + assert.NotNil(t, err) + assert.Nil(t, appenders) + + // Try to append onto an event using fakeAppender and the result should have one item + event := bus.Event{} + appender.Append(event) + assert.Equal(t, len(event), 1) + assert.Equal(t, event["foo"], "bar") + + appenders = Appenders{} + appenders = append(appenders, appender) + + // Try using appenders object for the same as above and expect + // the same result + event = bus.Event{} + appenders.Append(event) + assert.Equal(t, len(event), 1) + assert.Equal(t, event["foo"], "bar") +} diff --git a/vendor/github.com/elastic/beats/libbeat/autodiscover/appenders/config/config.go b/vendor/github.com/elastic/beats/libbeat/autodiscover/appenders/config/config.go new file mode 100644 index 00000000..1ce15685 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/autodiscover/appenders/config/config.go @@ -0,0 +1,106 @@ +package config + +import ( + "fmt" + + "github.com/elastic/beats/libbeat/autodiscover" + "github.com/elastic/beats/libbeat/autodiscover/template" + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/common/bus" + "github.com/elastic/beats/libbeat/common/cfgwarn" + "github.com/elastic/beats/libbeat/logp" + "github.com/elastic/beats/libbeat/processors" +) + +func init() { + autodiscover.Registry.AddAppender("config", NewConfigAppender) +} + +type config struct { + ConditionConfig *processors.ConditionConfig `config:"condition"` + Config *common.Config `config:"config"` +} + +type configs []config + +type configMap struct { + condition *processors.Condition + config common.MapStr +} + +type configAppender struct { + configMaps []configMap +} + +// NewConfigAppender creates a configAppender that can append templatized configs into built configs +func NewConfigAppender(cfg *common.Config) (autodiscover.Appender, error) { + cfgwarn.Beta("The config appender is beta") + + confs := configs{} + err := cfg.Unpack(&confs) + if err != nil { + return nil, fmt.Errorf("unable to unpack config appender due to error: %v", err) + } + + var configMaps []configMap + for _, conf := range confs { + // Unpack the condition + cond, err := processors.NewCondition(conf.ConditionConfig) + + if err != nil { + logp.Warn("config", "unable to create condition due to error: %v", err) + continue + } + cm := configMap{condition: cond} + + // Unpack the config + cf := common.MapStr{} + err = conf.Config.Unpack(&cf) + if err != nil { + logp.Warn("config", "unable to unpack config due to error: %v", err) + continue + } + cm.config = cf + configMaps = append(configMaps, cm) + } + return &configAppender{configMaps: configMaps}, nil +} + +// Append adds configuration into configs built by builds/templates. It applies conditions to filter out +// configs to apply, applies them and tries to apply templates if any are present. +func (c *configAppender) Append(event bus.Event) { + cfgsRaw, ok := event["config"] + // There are no configs + if !ok { + return + } + + cfgs, ok := cfgsRaw.([]*common.Config) + // Config key doesnt have an array of config objects + if !ok { + return + } + for _, configMap := range c.configMaps { + if configMap.condition == nil || configMap.condition.Check(common.MapStr(event)) == true { + // Merge the template with all the configs + for _, cfg := range cfgs { + cf := common.MapStr{} + err := cfg.Unpack(&cf) + if err != nil { + logp.Debug("config", "unable to unpack config due to error: %v", err) + continue + } + err = cfg.Merge(&configMap.config) + if err != nil { + logp.Debug("config", "unable to merge configs due to error: %v", err) + } + } + + // Apply the template + template.ApplyConfigTemplate(event, cfgs) + } + } + + // Replace old config with newly appended configs + event["config"] = cfgs +} diff --git a/vendor/github.com/elastic/beats/libbeat/autodiscover/appenders/config/config_test.go b/vendor/github.com/elastic/beats/libbeat/autodiscover/appenders/config/config_test.go new file mode 100644 index 00000000..539299c6 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/autodiscover/appenders/config/config_test.go @@ -0,0 +1,106 @@ +package config + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/common/bus" +) + +func TestGenerateAppender(t *testing.T) { + tests := []struct { + eventConfig common.MapStr + event bus.Event + result common.MapStr + config string + }{ + // Appender without a condition should apply the config regardless + { + event: bus.Event{}, + result: common.MapStr{ + "test": "bar", + "test1": "foo", + "test2": "foo", + }, + eventConfig: common.MapStr{ + "test": "bar", + }, + config: ` +- config: + "test1": foo +- config: + "test2": foo +`, + }, + // Appender with a condition check that fails. Only appender with no condition should pass + { + event: bus.Event{ + "foo": "bar", + }, + result: common.MapStr{ + "test": "bar", + "test1": "foo", + }, + eventConfig: common.MapStr{ + "test": "bar", + }, + config: ` +- config: + "test1": foo +- config: + "test2": foo + condition.equals: + "foo": "bar1" +`, + }, + // Appender with a condition check that passes. It should get appended + { + event: bus.Event{ + "foo": "bar", + }, + result: common.MapStr{ + "test": "bar", + "test1": "foo", + "test2": "foo", + }, + eventConfig: common.MapStr{ + "test": "bar", + }, + config: ` +- config: + "test1": foo +- config: + "test2": foo + condition.equals: + "foo": "bar" +`, + }, + } + for _, test := range tests { + config, err := common.NewConfigWithYAML([]byte(test.config), "") + if err != nil { + t.Fatal(err) + } + + appender, err := NewConfigAppender(config) + assert.Nil(t, err) + assert.NotNil(t, appender) + + eveConfig, err := common.NewConfigFrom(&test.eventConfig) + assert.Nil(t, err) + + test.event["config"] = []*common.Config{eveConfig} + appender.Append(test.event) + + cfgs, _ := test.event["config"].([]*common.Config) + assert.Equal(t, len(cfgs), 1) + + out := common.MapStr{} + cfgs[0].Unpack(&out) + + assert.Equal(t, out, test.result) + + } +} diff --git a/vendor/github.com/elastic/beats/libbeat/autodiscover/appenders/registry.go b/vendor/github.com/elastic/beats/libbeat/autodiscover/appenders/registry.go new file mode 100644 index 00000000..89c82c40 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/autodiscover/appenders/registry.go @@ -0,0 +1,31 @@ +package appenders + +import ( + "errors" + + "github.com/elastic/beats/libbeat/autodiscover" + p "github.com/elastic/beats/libbeat/plugin" +) + +type appenderPlugin struct { + name string + appender autodiscover.AppenderBuilder +} + +var pluginKey = "libbeat.autodiscover.appender" + +// Plugin accepts a AppenderBuilder to be registered as a plugin +func Plugin(name string, appender autodiscover.AppenderBuilder) map[string][]interface{} { + return p.MakePlugin(pluginKey, appenderPlugin{name, appender}) +} + +func init() { + p.MustRegisterLoader(pluginKey, func(ifc interface{}) error { + app, ok := ifc.(appenderPlugin) + if !ok { + return errors.New("plugin does not match appender plugin type") + } + + return autodiscover.Registry.AddAppender(app.name, app.appender) + }) +} diff --git a/vendor/github.com/elastic/beats/libbeat/autodiscover/autodiscover.go b/vendor/github.com/elastic/beats/libbeat/autodiscover/autodiscover.go index 37945734..945e5241 100644 --- a/vendor/github.com/elastic/beats/libbeat/autodiscover/autodiscover.go +++ b/vendor/github.com/elastic/beats/libbeat/autodiscover/autodiscover.go @@ -16,7 +16,6 @@ const debugK = "autodiscover" // Adapter must be implemented by the beat in order to provide Autodiscover type Adapter interface { - // TODO Hints // CreateConfig generates a valid list of configs from the given event, the received event will have all keys defined by `StartFilter` CreateConfig(bus.Event) ([]*common.Config, error) @@ -51,7 +50,7 @@ func NewAutodiscover(name string, adapter Adapter, config *Config) (*Autodiscove // Init providers var providers []Provider for _, providerCfg := range config.Providers { - provider, err := ProviderRegistry.BuildProvider(bus, providerCfg) + provider, err := Registry.BuildProvider(bus, providerCfg) if err != nil { return nil, err } @@ -175,8 +174,12 @@ func (a *Autodiscover) handleStop(event bus.Event) { } if runner := a.runners.Get(hash); runner != nil { + // Stop can block, we run it asyncrhonously to avoid blocking + // the whole events loop. The runner hash is removed in any case + // so an equivalent configuration can be added again while this + // one stops, and a duplicated event don't try to stop it twice. logp.Info("Autodiscover stopping runner: %s", runner) - runner.Stop() + go runner.Stop() a.runners.Remove(hash) } else { logp.Debug(debugK, "Runner not found for stopping: %s", hash) diff --git a/vendor/github.com/elastic/beats/libbeat/autodiscover/autodiscover_test.go b/vendor/github.com/elastic/beats/libbeat/autodiscover/autodiscover_test.go index a8a5fa11..449fedfe 100644 --- a/vendor/github.com/elastic/beats/libbeat/autodiscover/autodiscover_test.go +++ b/vendor/github.com/elastic/beats/libbeat/autodiscover/autodiscover_test.go @@ -102,8 +102,8 @@ func TestNilAutodiscover(t *testing.T) { func TestAutodiscover(t *testing.T) { // Register mock autodiscover provider busChan := make(chan bus.Bus, 1) - ProviderRegistry = NewRegistry() - ProviderRegistry.AddProvider("mock", func(b bus.Bus, c *common.Config) (Provider, error) { + Registry = NewRegistry() + Registry.AddProvider("mock", func(b bus.Bus, c *common.Config) (Provider, error) { // intercept bus to mock events busChan <- b @@ -205,8 +205,8 @@ func TestAutodiscoverHash(t *testing.T) { // Register mock autodiscover provider busChan := make(chan bus.Bus, 1) - ProviderRegistry = NewRegistry() - ProviderRegistry.AddProvider("mock", func(b bus.Bus, c *common.Config) (Provider, error) { + Registry = NewRegistry() + Registry.AddProvider("mock", func(b bus.Bus, c *common.Config) (Provider, error) { // intercept bus to mock events busChan <- b diff --git a/vendor/github.com/elastic/beats/libbeat/autodiscover/builder.go b/vendor/github.com/elastic/beats/libbeat/autodiscover/builder.go new file mode 100644 index 00000000..f485746e --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/autodiscover/builder.go @@ -0,0 +1,112 @@ +package autodiscover + +import ( + "errors" + "fmt" + "strings" + + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/common/bus" + "github.com/elastic/beats/libbeat/logp" +) + +// Builder provides an interface by which configs can be built from provider metadata +type Builder interface { + // CreateConfig creates a config from hints passed from providers + CreateConfig(event bus.Event) []*common.Config +} + +// Builders is a list of Builder objects +type Builders []Builder + +// BuilderConstructor is a func used to generate a Builder object +type BuilderConstructor func(*common.Config) (Builder, error) + +// AddBuilder registers a new BuilderConstructor +func (r *registry) AddBuilder(name string, builder BuilderConstructor) error { + r.lock.Lock() + defer r.lock.Unlock() + + if name == "" { + return fmt.Errorf("builder name is required") + } + + _, exists := r.builders[name] + if exists { + return fmt.Errorf("builder '%s' is already registered", name) + } + + if builder == nil { + return fmt.Errorf("builder '%s' cannot be registered with a nil factory", name) + } + + r.builders[name] = builder + logp.Debug(debugK, "Builder registered: %s", name) + return nil +} + +// GetBuilder returns the provider with the giving name, nil if it doesn't exist +func (r *registry) GetBuilder(name string) BuilderConstructor { + r.lock.RLock() + defer r.lock.RUnlock() + + name = strings.ToLower(name) + return r.builders[name] +} + +// BuildBuilder reads provider configuration and instatiate one +func (r *registry) BuildBuilder(c *common.Config) (Builder, error) { + var config BuilderConfig + err := c.Unpack(&config) + if err != nil { + return nil, err + } + + builder := r.GetBuilder(config.Type) + if builder == nil { + return nil, fmt.Errorf("unknown autodiscover builder %s", config.Type) + } + + return builder(c) +} + +// GetConfig creates configs for all builders initalized. +func (b Builders) GetConfig(event bus.Event) []*common.Config { + var configs []*common.Config + + for _, builder := range b { + if config := builder.CreateConfig(event); config != nil { + configs = append(configs, config...) + } + } + + return configs +} + +// NewBuilders instances the given list of builders. If hintsEnabled is true it will +// just enable the hints builder +func NewBuilders(bConfigs []*common.Config, hintsEnabled bool) (Builders, error) { + var builders Builders + if hintsEnabled { + if len(bConfigs) > 0 { + return nil, errors.New("hints.enabled is incompatible with manually defining builders") + } + + hints, err := common.NewConfigFrom(map[string]string{"type": "hints"}) + if err != nil { + return nil, err + } + + bConfigs = append(bConfigs, hints) + } + + for _, bcfg := range bConfigs { + builder, err := Registry.BuildBuilder(bcfg) + if err != nil { + return nil, err + } + builders = append(builders, builder) + } + + return builders, nil +} diff --git a/vendor/github.com/elastic/beats/libbeat/autodiscover/builder/helper.go b/vendor/github.com/elastic/beats/libbeat/autodiscover/builder/helper.go new file mode 100644 index 00000000..7a9be7e8 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/autodiscover/builder/helper.go @@ -0,0 +1,119 @@ +package builder + +import ( + "fmt" + "strconv" + "strings" + + "github.com/elastic/beats/libbeat/common" +) + +// GetContainerID returns the id of a container +func GetContainerID(container common.MapStr) string { + id, _ := container["id"].(string) + return id +} + +// GetContainerName returns the name of a container +func GetContainerName(container common.MapStr) string { + name, _ := container["name"].(string) + return name +} + +// GetHintString takes a hint and returns its value as a string +func GetHintString(hints common.MapStr, key, config string) string { + if iface, err := hints.GetValue(fmt.Sprintf("%s.%s", key, config)); err == nil { + if str, ok := iface.(string); ok { + return str + } + } + + return "" +} + +// GetHintMapStr takes a hint and returns a MapStr +func GetHintMapStr(hints common.MapStr, key, config string) common.MapStr { + if iface, err := hints.GetValue(fmt.Sprintf("%s.%s", key, config)); err == nil { + if mapstr, ok := iface.(common.MapStr); ok { + return mapstr + } + } + + return nil +} + +// GetHintAsList takes a hint and returns the value as lists. +func GetHintAsList(hints common.MapStr, key, config string) []string { + if str := GetHintString(hints, key, config); str != "" { + return getStringAsList(str) + } + + return nil +} + +func getStringAsList(input string) []string { + if input == "" { + return []string{} + } + list := strings.Split(input, ",") + + for i := 0; i < len(list); i++ { + list[i] = strings.TrimSpace(list[i]) + } + + return list +} + +// IsNoOp is a big red button to prevent spinning up Runners in case of issues. +func IsNoOp(hints common.MapStr, key string) bool { + if value, err := hints.GetValue(fmt.Sprintf("%s.disable", key)); err == nil { + noop, _ := strconv.ParseBool(value.(string)) + return noop + } + + return false +} + +// GenerateHints parses annotations based on a prefix and sets up hints that can be picked up by individual Beats. +func GenerateHints(annotations common.MapStr, container, prefix string) common.MapStr { + hints := common.MapStr{} + if rawEntries, err := annotations.GetValue(prefix); err == nil { + if entries, ok := rawEntries.(common.MapStr); ok { + for key, rawValue := range entries { + // If there are top level hints like co.elastic.logs/ then just add the values after the / + // Only consider namespaced annotations + parts := strings.Split(key, "/") + if len(parts) == 2 { + hintKey := fmt.Sprintf("%s.%s", parts[0], parts[1]) + // Insert only if there is no entry already. container level annotations take + // higher priority. + if _, err := hints.GetValue(hintKey); err != nil { + hints.Put(hintKey, rawValue) + } + } else if container != "" { + // Only consider annotations that are of type common.MapStr as we are looking for + // container level nesting + builderHints, ok := rawValue.(common.MapStr) + if !ok { + continue + } + + // Check for / prefix + for hintKey, rawVal := range builderHints { + if strings.HasPrefix(hintKey, container) { + // Split the key to get part[1] to be the hint + parts := strings.Split(hintKey, "/") + if len(parts) == 2 { + // key will be the hint type + hintKey := fmt.Sprintf("%s.%s", key, parts[1]) + hints.Put(hintKey, rawVal) + } + } + } + } + } + } + } + + return hints +} diff --git a/vendor/github.com/elastic/beats/libbeat/autodiscover/builder/helper_test.go b/vendor/github.com/elastic/beats/libbeat/autodiscover/builder/helper_test.go new file mode 100644 index 00000000..740f2ecb --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/autodiscover/builder/helper_test.go @@ -0,0 +1,57 @@ +package builder + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/libbeat/common" +) + +func TestGenerateHints(t *testing.T) { + tests := []struct { + annotations map[string]string + result common.MapStr + }{ + // Empty annotations should return empty hints + { + annotations: map[string]string{}, + result: common.MapStr{}, + }, + + // Scenarios being tested: + // logs/multiline.pattern must be a nested common.MapStr under hints.logs + // metrics/module must be found in hints.metrics + // not.to.include must not be part of hints + // period is annotated at both container and pod level. Container level value must be in hints + { + annotations: map[string]string{ + "co.elastic.logs/multiline.pattern": "^test", + "co.elastic.metrics/module": "prometheus", + "co.elastic.metrics/period": "10s", + "co.elastic.metrics.foobar/period": "15s", + "co.elastic.metrics.foobar1/period": "15s", + "not.to.include": "true", + }, + result: common.MapStr{ + "logs": common.MapStr{ + "multiline": common.MapStr{ + "pattern": "^test", + }, + }, + "metrics": common.MapStr{ + "module": "prometheus", + "period": "15s", + }, + }, + }, + } + + for _, test := range tests { + annMap := common.MapStr{} + for k, v := range test.annotations { + annMap.Put(k, v) + } + assert.Equal(t, GenerateHints(annMap, "foobar", "co.elastic"), test.result) + } +} diff --git a/vendor/github.com/elastic/beats/libbeat/autodiscover/builder/plugin.go b/vendor/github.com/elastic/beats/libbeat/autodiscover/builder/plugin.go new file mode 100644 index 00000000..165b7a62 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/autodiscover/builder/plugin.go @@ -0,0 +1,31 @@ +package builder + +import ( + "errors" + + "github.com/elastic/beats/libbeat/autodiscover" + p "github.com/elastic/beats/libbeat/plugin" +) + +type builderPlugin struct { + name string + builder autodiscover.BuilderConstructor +} + +var pluginKey = "libbeat.autodiscover.builder" + +// Plugin accepts a BuilderConstructor to be registered as a plugin +func Plugin(name string, b autodiscover.BuilderConstructor) map[string][]interface{} { + return p.MakePlugin(pluginKey, builderPlugin{name, b}) +} + +func init() { + p.MustRegisterLoader(pluginKey, func(ifc interface{}) error { + b, ok := ifc.(builderPlugin) + if !ok { + return errors.New("plugin does not match builder plugin type") + } + + return autodiscover.Registry.AddBuilder(b.name, b.builder) + }) +} diff --git a/vendor/github.com/elastic/beats/libbeat/autodiscover/builder_test.go b/vendor/github.com/elastic/beats/libbeat/autodiscover/builder_test.go new file mode 100644 index 00000000..23da96a0 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/autodiscover/builder_test.go @@ -0,0 +1,57 @@ +package autodiscover + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/common/bus" +) + +type fakeBuilder struct{} + +func (f *fakeBuilder) CreateConfig(event bus.Event) []*common.Config { + return []*common.Config{common.NewConfig()} +} + +func newFakeBuilder(_ *common.Config) (Builder, error) { + return &fakeBuilder{}, nil +} + +func TestBuilderRegistry(t *testing.T) { + // Add a new builder + reg := NewRegistry() + reg.AddBuilder("fake", newFakeBuilder) + + // Check if that builder is available in registry + b := reg.GetBuilder("fake") + assert.NotNil(t, b) + + // Generate a config with type fake + config := BuilderConfig{ + Type: "fake", + } + + cfg, err := common.NewConfigFrom(&config) + + // Make sure that config building doesn't fail + assert.Nil(t, err) + + builder, err := reg.BuildBuilder(cfg) + assert.Nil(t, err) + assert.NotNil(t, builder) + + // Try to create a config with fake builder and assert length + // of configs returned is one + res := builder.CreateConfig(nil) + assert.Equal(t, len(res), 1) + + builders := Builders{} + builders = append(builders, builder) + + // Try using builders object for the same as above and expect + // the same result + res = builders.GetConfig(nil) + assert.Equal(t, len(res), 1) +} diff --git a/vendor/github.com/elastic/beats/libbeat/autodiscover/config.go b/vendor/github.com/elastic/beats/libbeat/autodiscover/config.go index f3bdc1f4..89bbc8b5 100644 --- a/vendor/github.com/elastic/beats/libbeat/autodiscover/config.go +++ b/vendor/github.com/elastic/beats/libbeat/autodiscover/config.go @@ -1,6 +1,9 @@ package autodiscover -import "github.com/elastic/beats/libbeat/common" +import ( + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/processors" +) // Config settings for Autodiscover type Config struct { @@ -11,3 +14,14 @@ type Config struct { type ProviderConfig struct { Type string `config:"type"` } + +// BuilderConfig settings +type BuilderConfig struct { + Type string `config:"type"` +} + +// AppenderConfig settings +type AppenderConfig struct { + Type string `config:"type"` + ConditionConfig *processors.ConditionConfig `config:"condition"` +} diff --git a/vendor/github.com/elastic/beats/libbeat/autodiscover/provider.go b/vendor/github.com/elastic/beats/libbeat/autodiscover/provider.go index ac1619e1..417bee45 100644 --- a/vendor/github.com/elastic/beats/libbeat/autodiscover/provider.go +++ b/vendor/github.com/elastic/beats/libbeat/autodiscover/provider.go @@ -3,7 +3,6 @@ package autodiscover import ( "fmt" "strings" - "sync" "github.com/elastic/beats/libbeat/cfgfile" "github.com/elastic/beats/libbeat/common" @@ -16,27 +15,9 @@ type Provider interface { cfgfile.Runner } -// ProviderRegistry holds all known autodiscover providers, they must be added to it to enable them for use -var ProviderRegistry = NewRegistry() - // ProviderBuilder creates a new provider based on the given config and returns it type ProviderBuilder func(bus.Bus, *common.Config) (Provider, error) -// Register of autodiscover providers -type registry struct { - // Lock to control concurrent read/writes - lock sync.RWMutex - // A map of provider name to ProviderBuilder. - providers map[string]ProviderBuilder -} - -// NewRegistry creates and returns a new Registry -func NewRegistry() *registry { - return ®istry{ - providers: make(map[string]ProviderBuilder, 0), - } -} - // AddProvider registers a new ProviderBuilder func (r *registry) AddProvider(name string, provider ProviderBuilder) error { r.lock.Lock() diff --git a/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/docker/config.go b/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/docker/config.go index 9de1bee7..6c8d000c 100644 --- a/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/docker/config.go +++ b/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/docker/config.go @@ -2,18 +2,32 @@ package docker import ( "github.com/elastic/beats/libbeat/autodiscover/template" + "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/common/docker" ) // Config for docker autodiscover provider type Config struct { - Host string `config:"host"` - TLS *docker.TLSConfig `config:"ssl"` - Templates template.MapperSettings `config:"templates"` + Host string `config:"host"` + TLS *docker.TLSConfig `config:"ssl"` + Prefix string `config:"prefix"` + HintsEnabled bool `config:"hints.enabled"` + Builders []*common.Config `config:"builders"` + Appenders []*common.Config `config:"appenders"` + Templates template.MapperSettings `config:"templates"` } func defaultConfig() *Config { return &Config{ - Host: "unix:///var/run/docker.sock", + Host: "unix:///var/run/docker.sock", + Prefix: "co.elastic", + } +} + +// Validate ensures correctness of config +func (c *Config) Validate() { + // Make sure that prefix doesn't ends with a '.' + if c.Prefix[len(c.Prefix)-1] == '.' && c.Prefix != "." { + c.Prefix = c.Prefix[:len(c.Prefix)-2] } } diff --git a/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/docker/docker.go b/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/docker/docker.go index a79110e1..af83f3a9 100644 --- a/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/docker/docker.go +++ b/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/docker/docker.go @@ -2,21 +2,26 @@ package docker import ( "github.com/elastic/beats/libbeat/autodiscover" + "github.com/elastic/beats/libbeat/autodiscover/builder" "github.com/elastic/beats/libbeat/autodiscover/template" "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/common/bus" + "github.com/elastic/beats/libbeat/common/cfgwarn" "github.com/elastic/beats/libbeat/common/docker" + "github.com/elastic/beats/libbeat/common/safemapstr" "github.com/elastic/beats/libbeat/logp" ) func init() { - autodiscover.ProviderRegistry.AddProvider("docker", AutodiscoverBuilder) + autodiscover.Registry.AddProvider("docker", AutodiscoverBuilder) } // Provider implements autodiscover provider for docker containers type Provider struct { config *Config bus bus.Bus + builders autodiscover.Builders + appenders autodiscover.Appenders watcher docker.Watcher templates *template.Mapper stop chan interface{} @@ -26,18 +31,29 @@ type Provider struct { // AutodiscoverBuilder builds and returns an autodiscover provider func AutodiscoverBuilder(bus bus.Bus, c *common.Config) (autodiscover.Provider, error) { + cfgwarn.Beta("The docker autodiscover is beta") config := defaultConfig() err := c.Unpack(&config) if err != nil { return nil, err } + watcher, err := docker.NewWatcher(config.Host, config.TLS, false) + if err != nil { + return nil, err + } + mapper, err := template.NewConfigMapper(config.Templates) if err != nil { return nil, err } - watcher, err := docker.NewWatcher(config.Host, config.TLS) + builders, err := autodiscover.NewBuilders(config.Builders, config.HintsEnabled) + if err != nil { + return nil, err + } + + appenders, err := autodiscover.NewAppenders(config.Appenders) if err != nil { return nil, err } @@ -52,6 +68,8 @@ func AutodiscoverBuilder(bus bus.Bus, c *common.Config) (autodiscover.Provider, return &Provider{ config: config, bus: bus, + builders: builders, + appenders: appenders, templates: mapper, watcher: watcher, stop: make(chan interface{}), @@ -94,7 +112,7 @@ func (d *Provider) emitContainer(event bus.Event, flag string) { labelMap := common.MapStr{} for k, v := range container.Labels { - labelMap[k] = v + safemapstr.Put(labelMap, k, v) } meta := common.MapStr{ @@ -105,18 +123,21 @@ func (d *Provider) emitContainer(event bus.Event, flag string) { "labels": labelMap, }, } - - // Emit container info - d.publish(bus.Event{ - flag: true, - "host": host, - "docker": meta, - "meta": common.MapStr{ + // Without this check there would be overlapping configurations with and without ports. + if len(container.Ports) == 0 { + event := bus.Event{ + flag: true, + "host": host, "docker": meta, - }, - }) + "meta": common.MapStr{ + "docker": meta, + }, + } + + d.publish(event) + } - // Emit container private ports + // Emit container container and port information for _, port := range container.Ports { event := bus.Event{ flag: true, @@ -136,10 +157,43 @@ func (d *Provider) publish(event bus.Event) { // Try to match a config if config := d.templates.GetConfig(event); config != nil { event["config"] = config + } else { + // If no template matches, try builders: + if config := d.builders.GetConfig(d.generateHints(event)); config != nil { + event["config"] = config + } } + + // Call all appenders to append any extra configuration + d.appenders.Append(event) + d.bus.Publish(event) } +func (d *Provider) generateHints(event bus.Event) bus.Event { + // Try to build a config with enabled builders. Send a provider agnostic payload. + // Builders are Beat specific. + e := bus.Event{} + var dockerMeta common.MapStr + + if rawDocker, err := common.MapStr(event).GetValue("docker.container"); err == nil { + dockerMeta = rawDocker.(common.MapStr) + e["container"] = dockerMeta + } + + if host, ok := event["host"]; ok { + e["host"] = host + } + if port, ok := event["port"]; ok { + e["port"] = port + } + if labels, err := dockerMeta.GetValue("labels"); err == nil { + hints := builder.GenerateHints(labels.(common.MapStr), "", d.config.Prefix) + e["hints"] = hints + } + return e +} + // Stop the autodiscover process func (d *Provider) Stop() { close(d.stop) diff --git a/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/docker/docker_integration_test.go b/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/docker/docker_integration_test.go index e23384ed..1facd50e 100644 --- a/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/docker/docker_integration_test.go +++ b/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/docker/docker_integration_test.go @@ -35,7 +35,7 @@ func TestDockerStart(t *testing.T) { // Start cmd := []string{"echo", "Hi!"} - labels := map[string]string{"label": "value"} + labels := map[string]string{"label": "foo", "label.child": "bar"} ID, err := d.ContainerStart("busybox", cmd, labels) if err != nil { t.Fatal(err) @@ -71,7 +71,12 @@ func checkEvent(t *testing.T, listener bus.Listener, start bool) { assert.Nil(t, getValue(e, "start")) } assert.Equal(t, getValue(e, "docker.container.image"), "busybox") - assert.Equal(t, getValue(e, "docker.container.labels"), common.MapStr{"label": "value"}) + assert.Equal(t, getValue(e, "docker.container.labels"), common.MapStr{ + "label": common.MapStr{ + "value": "foo", + "child": "bar", + }, + }) assert.NotNil(t, getValue(e, "docker.container.id")) assert.NotNil(t, getValue(e, "docker.container.name")) assert.NotNil(t, getValue(e, "host")) diff --git a/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/docker/docker_test.go b/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/docker/docker_test.go new file mode 100644 index 00000000..91c8cc04 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/docker/docker_test.go @@ -0,0 +1,90 @@ +package docker + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/common/bus" +) + +func TestGenerateHints(t *testing.T) { + tests := []struct { + event bus.Event + result bus.Event + }{ + // Empty events should return empty hints + { + event: bus.Event{}, + result: bus.Event{}, + }, + // Docker meta must be present in the hints + { + event: bus.Event{ + "docker": common.MapStr{ + "container": common.MapStr{ + "id": "abc", + "name": "foobar", + }, + }, + }, + result: bus.Event{ + "container": common.MapStr{ + "id": "abc", + "name": "foobar", + }, + }, + }, + // Docker labels are testing with the following scenarios + // do.not.include must not be part of the hints + // logs/disable should be present in hints.logs.disable=true + { + event: bus.Event{ + "docker": common.MapStr{ + "container": common.MapStr{ + "id": "abc", + "name": "foobar", + "labels": getNestedAnnotations(common.MapStr{ + "do.not.include": "true", + "co.elastic.logs/disable": "true", + }), + }, + }, + }, + result: bus.Event{ + "container": common.MapStr{ + "id": "abc", + "name": "foobar", + "labels": getNestedAnnotations(common.MapStr{ + "do.not.include": "true", + "co.elastic.logs/disable": "true", + }), + }, + "hints": common.MapStr{ + "logs": common.MapStr{ + "disable": "true", + }, + }, + }, + }, + } + + cfg := defaultConfig() + + p := Provider{ + config: cfg, + } + for _, test := range tests { + assert.Equal(t, p.generateHints(test.event), test.result) + } +} + +func getNestedAnnotations(in common.MapStr) common.MapStr { + out := common.MapStr{} + + for k, v := range in { + out.Put(k, v) + } + return out +} diff --git a/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/kubernetes/config.go b/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/kubernetes/config.go index 7d004b82..c6c4aa17 100644 --- a/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/kubernetes/config.go +++ b/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/kubernetes/config.go @@ -4,6 +4,7 @@ import ( "time" "github.com/elastic/beats/libbeat/autodiscover/template" + "github.com/elastic/beats/libbeat/common" ) // Config for kubernetes autodiscover provider @@ -19,7 +20,11 @@ type Config struct { ExcludeLabels []string `config:"exclude_labels"` IncludeAnnotations []string `config:"include_annotations"` - Templates template.MapperSettings `config:"templates"` + Prefix string `config:"prefix"` + HintsEnabled bool `config:"hints.enabled"` + Builders []*common.Config `config:"builders"` + Appenders []*common.Config `config:"appenders"` + Templates template.MapperSettings `config:"templates"` } func defaultConfig() *Config { @@ -27,5 +32,14 @@ func defaultConfig() *Config { InCluster: true, SyncPeriod: 1 * time.Second, CleanupTimeout: 60 * time.Second, + Prefix: "co.elastic", + } +} + +// Validate ensures correctness of config +func (c *Config) Validate() { + // Make sure that prefix doesn't ends with a '.' + if c.Prefix[len(c.Prefix)-1] == '.' && c.Prefix != "." { + c.Prefix = c.Prefix[:len(c.Prefix)-2] } } diff --git a/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/kubernetes/kubernetes.go b/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/kubernetes/kubernetes.go index de2c5370..ac337e71 100644 --- a/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/kubernetes/kubernetes.go +++ b/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/kubernetes/kubernetes.go @@ -1,155 +1,172 @@ package kubernetes import ( + "time" + "github.com/elastic/beats/libbeat/autodiscover" + "github.com/elastic/beats/libbeat/autodiscover/builder" "github.com/elastic/beats/libbeat/autodiscover/template" "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/common/bus" + "github.com/elastic/beats/libbeat/common/cfgwarn" "github.com/elastic/beats/libbeat/common/kubernetes" + "github.com/elastic/beats/libbeat/common/safemapstr" "github.com/elastic/beats/libbeat/logp" ) func init() { - autodiscover.ProviderRegistry.AddProvider("kubernetes", AutodiscoverBuilder) + autodiscover.Registry.AddProvider("kubernetes", AutodiscoverBuilder) } // Provider implements autodiscover provider for docker containers type Provider struct { - config *Config - bus bus.Bus - watcher kubernetes.Watcher - metagen kubernetes.MetaGenerator - templates *template.Mapper - stop chan interface{} - startListener bus.Listener - stopListener bus.Listener - updateListener bus.Listener + config *Config + bus bus.Bus + watcher kubernetes.Watcher + metagen kubernetes.MetaGenerator + templates *template.Mapper + builders autodiscover.Builders + appenders autodiscover.Appenders } // AutodiscoverBuilder builds and returns an autodiscover provider func AutodiscoverBuilder(bus bus.Bus, c *common.Config) (autodiscover.Provider, error) { + cfgwarn.Beta("The kubernetes autodiscover is beta") config := defaultConfig() err := c.Unpack(&config) if err != nil { return nil, err } - mapper, err := template.NewConfigMapper(config.Templates) + client, err := kubernetes.GetKubernetesClient(config.InCluster, config.KubeConfig) if err != nil { return nil, err } - client, err := kubernetes.GetKubernetesClient(config.InCluster, config.KubeConfig) + metagen := kubernetes.NewMetaGenerator(config.IncludeAnnotations, config.IncludeLabels, config.ExcludeLabels) + + config.Host = kubernetes.DiscoverKubernetesNode(config.Host, config.InCluster, client) + + watcher, err := kubernetes.NewWatcher(client, &kubernetes.Pod{}, kubernetes.WatchOptions{ + SyncTimeout: config.SyncPeriod, + Node: config.Host, + Namespace: config.Namespace, + }) if err != nil { + logp.Err("kubernetes: Couldn't create watcher for %t", &kubernetes.Pod{}) return nil, err } - metagen := kubernetes.NewMetaGenerator(config.IncludeAnnotations, config.IncludeLabels, config.ExcludeLabels) - - config.Host = kubernetes.DiscoverKubernetesNode(config.Host, client) - watcher := kubernetes.NewWatcher(client.CoreV1(), config.SyncPeriod, config.CleanupTimeout, config.Host) + mapper, err := template.NewConfigMapper(config.Templates) + if err != nil { + return nil, err + } - start := watcher.ListenStart() - stop := watcher.ListenStop() - update := watcher.ListenUpdate() + builders, err := autodiscover.NewBuilders(config.Builders, config.HintsEnabled) + if err != nil { + return nil, err + } - if err := watcher.Start(); err != nil { + appenders, err := autodiscover.NewAppenders(config.Appenders) + if err != nil { return nil, err } - return &Provider{ - config: config, - bus: bus, - templates: mapper, - metagen: metagen, - watcher: watcher, - stop: make(chan interface{}), - startListener: start, - stopListener: stop, - updateListener: update, - }, nil + p := &Provider{ + config: config, + bus: bus, + templates: mapper, + builders: builders, + appenders: appenders, + metagen: metagen, + watcher: watcher, + } + + watcher.AddEventHandler(kubernetes.ResourceEventHandlerFuncs{ + AddFunc: func(obj kubernetes.Resource) { + logp.Debug("kubernetes", "Watcher Pod add: %+v", obj) + p.emit(obj.(*kubernetes.Pod), "start") + }, + UpdateFunc: func(obj kubernetes.Resource) { + logp.Debug("kubernetes", "Watcher Pod update: %+v", obj) + p.emit(obj.(*kubernetes.Pod), "stop") + p.emit(obj.(*kubernetes.Pod), "start") + }, + DeleteFunc: func(obj kubernetes.Resource) { + logp.Debug("kubernetes", "Watcher Pod delete: %+v", obj) + time.AfterFunc(config.CleanupTimeout, func() { p.emit(obj.(*kubernetes.Pod), "stop") }) + }, + }) + + return p, nil } -// Start the autodiscover provider. Start and stop listeners work the -// conventional way. Update listener triggers a stop and then a start -// to simulate an update. +// Start for Runner interface. func (p *Provider) Start() { - go func() { - for { - select { - case <-p.stop: - p.startListener.Stop() - p.stopListener.Stop() - return - - case event := <-p.startListener.Events(): - p.emit(event, "start") - - case event := <-p.stopListener.Events(): - p.emit(event, "stop") - - case event := <-p.updateListener.Events(): - //On updates, first send a stop signal followed by a start signal to simulate a restart - p.emit(event, "stop") - p.emit(event, "start") - } - } - }() + if err := p.watcher.Start(); err != nil { + logp.Err("Error starting kubernetes autodiscover provider: %s", err) + } +} + +func (p *Provider) emit(pod *kubernetes.Pod, flag string) { + // Emit events for all containers + p.emitEvents(pod, flag, pod.Spec.Containers, pod.Status.ContainerStatuses) + + // Emit events for all initContainers + p.emitEvents(pod, flag, pod.Spec.InitContainers, pod.Status.InitContainerStatuses) } -func (p *Provider) emit(event bus.Event, flag string) { - pod, ok := event["pod"].(*kubernetes.Pod) - if !ok { - logp.Err("Couldn't get a pod from watcher event") +func (p *Provider) emitEvents(pod *kubernetes.Pod, flag string, containers []kubernetes.Container, + containerstatuses []kubernetes.PodContainerStatus) { + host := pod.Status.PodIP + + // Do not emit events without host (container is still being configured) + if host == "" { return } - host := pod.Status.PodIP + // Collect all container IDs and runtimes from status information. containerIDs := map[string]string{} - - // Emit pod container IDs - for _, c := range append(pod.Status.ContainerStatuses, pod.Status.InitContainerStatuses...) { - cid := c.GetContainerID() + runtimes := map[string]string{} + for _, c := range containerstatuses { + cid, runtime := c.GetContainerIDWithRuntime() containerIDs[c.Name] = cid + runtimes[c.Name] = runtime + } + // Emit container and port information + for _, c := range containers { cmeta := common.MapStr{ - "id": cid, - "name": c.Name, - "image": c.Image, + "id": containerIDs[c.Name], + "name": c.Name, + "image": c.Image, + "runtime": runtimes[c.Name], } - - // Metadata appended to each event meta := p.metagen.ContainerMetadata(pod, c.Name) // Information that can be used in discovering a workload kubemeta := meta.Clone() kubemeta["container"] = cmeta - // Emit container info - p.publish(bus.Event{ - flag: true, - "host": host, - "kubernetes": kubemeta, - "meta": common.MapStr{ - "kubernetes": meta, - }, - }) - } - - // Emit pod ports - for _, c := range pod.Spec.Containers { - cmeta := common.MapStr{ - "id": containerIDs[c.Name], - "name": c.Name, - "image": c.Image, + // Pass annotations to all events so that it can be used in templating and by annotation builders. + annotations := common.MapStr{} + for k, v := range pod.GetMetadata().Annotations { + safemapstr.Put(annotations, k, v) } + kubemeta["annotations"] = annotations - // Metadata appended to each event - meta := p.metagen.ContainerMetadata(pod, c.Name) - - // Information that can be used in discovering a workload - kubemeta := meta.Clone() - kubemeta["container"] = cmeta + // Without this check there would be overlapping configurations with and without ports. + if len(c.Ports) == 0 { + event := bus.Event{ + flag: true, + "host": host, + "kubernetes": kubemeta, + "meta": common.MapStr{ + "kubernetes": meta, + }, + } + p.publish(event) + } for _, port := range c.Ports { event := bus.Event{ @@ -170,13 +187,62 @@ func (p *Provider) publish(event bus.Event) { // Try to match a config if config := p.templates.GetConfig(event); config != nil { event["config"] = config + } else { + // If there isn't a default template then attempt to use builders + if config := p.builders.GetConfig(p.generateHints(event)); config != nil { + event["config"] = config + } } + + // Call all appenders to append any extra configuration + p.appenders.Append(event) p.bus.Publish(event) } +func (p *Provider) generateHints(event bus.Event) bus.Event { + // Try to build a config with enabled builders. Send a provider agnostic payload. + // Builders are Beat specific. + e := bus.Event{} + var annotations common.MapStr + var kubeMeta, container common.MapStr + rawMeta, ok := event["kubernetes"] + if ok { + kubeMeta = rawMeta.(common.MapStr) + // The builder base config can configure any of the field values of kubernetes if need be. + e["kubernetes"] = kubeMeta + if rawAnn, ok := kubeMeta["annotations"]; ok { + annotations = rawAnn.(common.MapStr) + } + } + if host, ok := event["host"]; ok { + e["host"] = host + } + if port, ok := event["port"]; ok { + e["port"] = port + } + + if rawCont, ok := kubeMeta["container"]; ok { + container = rawCont.(common.MapStr) + // This would end up adding a runtime entry into the event. This would make sure + // that there is not an attempt to spin up a docker input for a rkt container and when a + // rkt input exists it would be natively supported. + e["container"] = container + } + + cname := builder.GetContainerName(container) + hints := builder.GenerateHints(annotations, cname, p.config.Prefix) + if len(hints) != 0 { + e["hints"] = hints + } + + logp.Debug("kubernetes", "Generated builder event %v", event) + + return e +} + // Stop signals the stop channel to force the watch loop routine to stop. func (p *Provider) Stop() { - close(p.stop) + p.watcher.Stop() } // String returns a description of kubernetes autodiscover provider. diff --git a/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/kubernetes/kubernetes_test.go b/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/kubernetes/kubernetes_test.go new file mode 100644 index 00000000..b2dcfaeb --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/kubernetes/kubernetes_test.go @@ -0,0 +1,272 @@ +package kubernetes + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/libbeat/autodiscover/template" + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/common/bus" + "github.com/elastic/beats/libbeat/common/kubernetes" +) + +func TestGenerateHints(t *testing.T) { + tests := []struct { + event bus.Event + result bus.Event + }{ + // Empty events should return empty hints + { + event: bus.Event{}, + result: bus.Event{}, + }, + // Only kubernetes payload must return only kubernetes as part of the hint + { + event: bus.Event{ + "kubernetes": common.MapStr{ + "pod": common.MapStr{ + "name": "foobar", + }, + }, + }, + result: bus.Event{ + "kubernetes": common.MapStr{ + "pod": common.MapStr{ + "name": "foobar", + }, + }, + }, + }, + // Kubernetes payload with container info must be bubbled to top level + { + event: bus.Event{ + "kubernetes": common.MapStr{ + "container": common.MapStr{ + "name": "foobar", + "id": "abc", + "runtime": "rkt", + }, + }, + }, + result: bus.Event{ + "kubernetes": common.MapStr{ + "container": common.MapStr{ + "name": "foobar", + "id": "abc", + "runtime": "rkt", + }, + }, + "container": common.MapStr{ + "name": "foobar", + "id": "abc", + "runtime": "rkt", + }, + }, + }, + // Scenarios being tested: + // logs/multiline.pattern must be a nested common.MapStr under hints.logs + // metrics/module must be found in hints.metrics + // not.to.include must not be part of hints + // period is annotated at both container and pod level. Container level value must be in hints + { + event: bus.Event{ + "kubernetes": common.MapStr{ + "annotations": getNestedAnnotations(common.MapStr{ + "co.elastic.logs/multiline.pattern": "^test", + "co.elastic.metrics/module": "prometheus", + "co.elastic.metrics/period": "10s", + "co.elastic.metrics.foobar/period": "15s", + "not.to.include": "true", + }), + "container": common.MapStr{ + "name": "foobar", + "id": "abc", + "runtime": "docker", + }, + }, + }, + result: bus.Event{ + "kubernetes": common.MapStr{ + "annotations": getNestedAnnotations(common.MapStr{ + "co.elastic.logs/multiline.pattern": "^test", + "co.elastic.metrics/module": "prometheus", + "not.to.include": "true", + "co.elastic.metrics/period": "10s", + "co.elastic.metrics.foobar/period": "15s", + }), + "container": common.MapStr{ + "name": "foobar", + "id": "abc", + "runtime": "docker", + }, + }, + "hints": common.MapStr{ + "logs": common.MapStr{ + "multiline": common.MapStr{ + "pattern": "^test", + }, + }, + "metrics": common.MapStr{ + "module": "prometheus", + "period": "15s", + }, + }, + "container": common.MapStr{ + "name": "foobar", + "id": "abc", + "runtime": "docker", + }, + }, + }, + } + + cfg := defaultConfig() + + p := Provider{ + config: cfg, + } + for _, test := range tests { + assert.Equal(t, p.generateHints(test.event), test.result) + } +} + +func TestEmitEvent(t *testing.T) { + tests := []struct { + Message string + Flag string + Pod *kubernetes.Pod + Expected bus.Event + }{ + { + Message: "Test common pod start", + Flag: "start", + Pod: &kubernetes.Pod{ + Metadata: kubernetes.ObjectMeta{ + Name: "filebeat", + Namespace: "default", + Labels: map[string]string{}, + Annotations: map[string]string{}, + }, + Status: kubernetes.PodStatus{ + PodIP: "127.0.0.1", + ContainerStatuses: []kubernetes.PodContainerStatus{ + { + Name: "filebeat", + ContainerID: "docker://foobar", + }, + }, + }, + Spec: kubernetes.PodSpec{ + NodeName: "node", + Containers: []kubernetes.Container{ + { + Image: "elastic/filebeat:6.3.0", + Name: "filebeat", + }, + }, + }, + }, + Expected: bus.Event{ + "start": true, + "host": "127.0.0.1", + "kubernetes": common.MapStr{ + "container": common.MapStr{ + "id": "foobar", + "name": "filebeat", + "image": "elastic/filebeat:6.3.0", + "runtime": "docker", + }, + "pod": common.MapStr{ + "name": "filebeat", + }, + "node": common.MapStr{ + "name": "node", + }, + "namespace": "default", + "annotations": common.MapStr{}, + }, + "meta": common.MapStr{ + "kubernetes": common.MapStr{ + "namespace": "default", + "container": common.MapStr{ + "name": "filebeat", + }, "pod": common.MapStr{ + "name": "filebeat", + }, "node": common.MapStr{ + "name": "node", + }, + }, + }, + }, + }, + { + Message: "Test pod without host", + Flag: "start", + Pod: &kubernetes.Pod{ + Metadata: kubernetes.ObjectMeta{ + Name: "filebeat", + Namespace: "default", + Labels: map[string]string{}, + Annotations: map[string]string{}, + }, + Status: kubernetes.PodStatus{ + ContainerStatuses: []kubernetes.PodContainerStatus{ + { + Name: "filebeat", + ContainerID: "docker://foobar", + }, + }, + }, + Spec: kubernetes.PodSpec{ + NodeName: "node", + Containers: []kubernetes.Container{ + { + Image: "elastic/filebeat:6.3.0", + Name: "filebeat", + }, + }, + }, + }, + Expected: nil, + }, + } + + for _, test := range tests { + mapper, err := template.NewConfigMapper(nil) + if err != nil { + t.Fatal(err) + } + + metaGen := kubernetes.NewMetaGenerator(nil, nil, nil) + p := &Provider{ + config: defaultConfig(), + bus: bus.New("test"), + metagen: metaGen, + templates: mapper, + } + + listener := p.bus.Subscribe() + + p.emit(test.Pod, test.Flag) + + select { + case event := <-listener.Events(): + assert.Equal(t, test.Expected, event) + case <-time.After(2 * time.Second): + if test.Expected != nil { + t.Fatal("Timeout while waiting for event") + } + } + } +} + +func getNestedAnnotations(in common.MapStr) common.MapStr { + out := common.MapStr{} + + for k, v := range in { + out.Put(k, v) + } + return out +} diff --git a/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/plugin.go b/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/plugin.go new file mode 100644 index 00000000..cfdc88a7 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/plugin.go @@ -0,0 +1,31 @@ +package providers + +import ( + "errors" + + "github.com/elastic/beats/libbeat/autodiscover" + p "github.com/elastic/beats/libbeat/plugin" +) + +type providerPlugin struct { + name string + provider autodiscover.ProviderBuilder +} + +var pluginKey = "libbeat.autodiscover.provider" + +// Plugin accepts a ProviderBuilder to be registered as a plugin +func Plugin(name string, provider autodiscover.ProviderBuilder) map[string][]interface{} { + return p.MakePlugin(pluginKey, providerPlugin{name, provider}) +} + +func init() { + p.MustRegisterLoader(pluginKey, func(ifc interface{}) error { + prov, ok := ifc.(providerPlugin) + if !ok { + return errors.New("plugin does not match processor plugin type") + } + + return autodiscover.Registry.AddProvider(prov.name, prov.provider) + }) +} diff --git a/vendor/github.com/elastic/beats/libbeat/autodiscover/registry.go b/vendor/github.com/elastic/beats/libbeat/autodiscover/registry.go new file mode 100644 index 00000000..65bc6ca7 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/autodiscover/registry.go @@ -0,0 +1,27 @@ +package autodiscover + +import "sync" + +// Register of autodiscover providers +type registry struct { + // Lock to control concurrent read/writes + lock sync.RWMutex + // A map of provider name to ProviderBuilder. + providers map[string]ProviderBuilder + // A map of builder name to BuilderConstructor. + builders map[string]BuilderConstructor + // A map of appender name to AppenderBuilder. + appenders map[string]AppenderBuilder +} + +// Registry holds all known autodiscover providers, they must be added to it to enable them for use +var Registry = NewRegistry() + +// NewRegistry creates and returns a new Registry +func NewRegistry() *registry { + return ®istry{ + providers: make(map[string]ProviderBuilder, 0), + builders: make(map[string]BuilderConstructor, 0), + appenders: make(map[string]AppenderBuilder, 0), + } +} diff --git a/vendor/github.com/elastic/beats/libbeat/autodiscover/template/config.go b/vendor/github.com/elastic/beats/libbeat/autodiscover/template/config.go index 13646794..eee88659 100644 --- a/vendor/github.com/elastic/beats/libbeat/autodiscover/template/config.go +++ b/vendor/github.com/elastic/beats/libbeat/autodiscover/template/config.go @@ -63,40 +63,50 @@ func (c *Mapper) GetConfig(event bus.Event) []*common.Config { continue } - // unpack input - vars, err := ucfg.NewFrom(map[string]interface{}{ - "data": event, - }) + configs := ApplyConfigTemplate(event, mapping.Configs) + if configs != nil { + result = append(result, configs...) + } + } + return result +} + +// ApplyConfigTemplate takes a set of templated configs and applys information in an event map +func ApplyConfigTemplate(event bus.Event, configs []*common.Config) []*common.Config { + var result []*common.Config + // unpack input + vars, err := ucfg.NewFrom(map[string]interface{}{ + "data": event, + }) + if err != nil { + logp.Err("Error building config: %v", err) + } + opts := []ucfg.Option{ + ucfg.PathSep("."), + ucfg.Env(vars), + ucfg.ResolveEnv, + ucfg.VarExp, + } + for _, config := range configs { + c, err := ucfg.NewFrom(config, opts...) if err != nil { - logp.Err("Error building config: %v", err) + logp.Err("Error parsing config: %v", err) + continue } - opts := []ucfg.Option{ - ucfg.PathSep("."), - ucfg.Env(vars), - ucfg.ResolveEnv, - ucfg.VarExp, + // Unpack config to process any vars in the template: + var unpacked map[string]interface{} + c.Unpack(&unpacked, opts...) + if err != nil { + logp.Err("Error unpacking config: %v", err) + continue } - for _, config := range mapping.Configs { - c, err := ucfg.NewFrom(config, opts...) - if err != nil { - logp.Err("Error parsing config: %v", err) - continue - } - // Unpack config to process any vars in the template: - var unpacked map[string]interface{} - c.Unpack(&unpacked, opts...) - if err != nil { - logp.Err("Error unpacking config: %v", err) - continue - } - // Repack again: - res, err := common.NewConfigFrom(unpacked) - if err != nil { - logp.Err("Error creating config from unpack: %v", err) - continue - } - result = append(result, res) + // Repack again: + res, err := common.NewConfigFrom(unpacked) + if err != nil { + logp.Err("Error creating config from unpack: %v", err) + continue } + result = append(result, res) } return result } diff --git a/vendor/github.com/elastic/beats/libbeat/beat/beat.go b/vendor/github.com/elastic/beats/libbeat/beat/beat.go index 86088dac..f30f4748 100644 --- a/vendor/github.com/elastic/beats/libbeat/beat/beat.go +++ b/vendor/github.com/elastic/beats/libbeat/beat/beat.go @@ -38,6 +38,7 @@ type Beat struct { SetupMLCallback SetupMLCallback // setup callback for ML job configs InSetupCmd bool // this is set to true when the `setup` command is called + OverwritePipelinesCallback OverwritePipelinesCallback // ingest pipeline loader callback // XXX: remove Config from public interface. // It's currently used by filebeat modules to setup the Ingest Node // pipeline and ML jobs. @@ -54,4 +55,8 @@ type BeatConfig struct { // SetupMLCallback can be used by the Beat to register MachineLearning configurations // for the enabled modules. -type SetupMLCallback func(*Beat) error +type SetupMLCallback func(*Beat, *common.Config) error + +// OverwritePipelinesCallback can be used by the Beat to register Ingest pipeline loader +// for the enabled modules. +type OverwritePipelinesCallback func(*common.Config) error diff --git a/vendor/github.com/elastic/beats/libbeat/beat/pipeline.go b/vendor/github.com/elastic/beats/libbeat/beat/pipeline.go index 9d9185f6..6780df92 100644 --- a/vendor/github.com/elastic/beats/libbeat/beat/pipeline.go +++ b/vendor/github.com/elastic/beats/libbeat/beat/pipeline.go @@ -50,6 +50,10 @@ type ClientConfig struct { // Events configures callbacks for common client callbacks Events ClientEventer + // By default events are normalized within processor pipeline, + // if the normalization step should be skipped set this to true. + SkipNormalization bool + // ACK handler strategies. // Note: ack handlers are run in another go-routine owned by the publisher pipeline. // They should not block for to long, to not block the internal buffers for diff --git a/vendor/github.com/elastic/beats/libbeat/cfgfile/reload.go b/vendor/github.com/elastic/beats/libbeat/cfgfile/reload.go index fa18a6b1..116f5b25 100644 --- a/vendor/github.com/elastic/beats/libbeat/cfgfile/reload.go +++ b/vendor/github.com/elastic/beats/libbeat/cfgfile/reload.go @@ -10,7 +10,6 @@ import ( "github.com/pkg/errors" "github.com/elastic/beats/libbeat/common" - "github.com/elastic/beats/libbeat/common/cfgwarn" "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/libbeat/monitoring" "github.com/elastic/beats/libbeat/paths" @@ -73,10 +72,6 @@ func NewReloader(cfg *common.Config) *Reloader { path = paths.Resolve(paths.Config, path) } - if config.Reload.Enabled { - cfgwarn.Beta("Dynamic config reload is enabled.") - } - return &Reloader{ registry: NewRegistry(), config: config, diff --git a/vendor/github.com/elastic/beats/libbeat/cmd/instance/beat.go b/vendor/github.com/elastic/beats/libbeat/cmd/instance/beat.go index 4621776f..e908054a 100644 --- a/vendor/github.com/elastic/beats/libbeat/cmd/instance/beat.go +++ b/vendor/github.com/elastic/beats/libbeat/cmd/instance/beat.go @@ -1,6 +1,7 @@ package instance import ( + "context" cryptRand "crypto/rand" "encoding/json" "flag" @@ -15,6 +16,7 @@ import ( "time" "github.com/satori/go.uuid" + "go.uber.org/zap" "github.com/elastic/beats/libbeat/api" "github.com/elastic/beats/libbeat/beat" @@ -37,6 +39,8 @@ import ( svc "github.com/elastic/beats/libbeat/service" "github.com/elastic/beats/libbeat/template" "github.com/elastic/beats/libbeat/version" + "github.com/elastic/go-sysinfo" + "github.com/elastic/go-sysinfo/types" // Register publisher pipeline modules _ "github.com/elastic/beats/libbeat/publisher/includes" @@ -45,6 +49,7 @@ import ( _ "github.com/elastic/beats/libbeat/processors/actions" _ "github.com/elastic/beats/libbeat/processors/add_cloud_metadata" _ "github.com/elastic/beats/libbeat/processors/add_docker_metadata" + _ "github.com/elastic/beats/libbeat/processors/add_host_metadata" _ "github.com/elastic/beats/libbeat/processors/add_kubernetes_metadata" _ "github.com/elastic/beats/libbeat/processors/add_locale" @@ -102,7 +107,7 @@ func init() { initRand() flag.BoolVar(&printVersion, "version", false, "Print the version and exit") - flag.BoolVar(&setup, "setup", false, "Load the sample Kibana dashboards") + flag.BoolVar(&setup, "setup", false, "Load sample Kibana dashboards and setup Machine Learning") } // initRand initializes the runtime random number generator seed using @@ -128,6 +133,12 @@ func initRand() { // XXX Move this as a *Beat method? func Run(name, idxPrefix, version string, bt beat.Creator) error { return handleError(func() error { + defer func() { + if r := recover(); r != nil { + logp.NewLogger(name).Fatalw("Failed due to panic.", + "panic", r, zap.Stack("stack")) + } + }() b, err := NewBeat(name, idxPrefix, version) if err != nil { return err @@ -210,6 +221,7 @@ func (b *Beat) createBeater(bt beat.Creator) (beat.Beater, error) { return nil, err } + logSystemInfo(b.Info) logp.Info("Setup Beat: %s; Version: %s", b.Info.Beat, b.Info.Version) err = b.registerTemplateLoading() @@ -248,6 +260,7 @@ func (b *Beat) createBeater(bt beat.Creator) (beat.Beater, error) { func (b *Beat) launch(bt beat.Creator) error { defer logp.Sync() + defer logp.Info("%s stopped.", b.Info.Beat) err := b.Init() if err != nil { @@ -285,21 +298,21 @@ func (b *Beat) launch(bt beat.Creator) error { return beat.GracefulExit } - svc.HandleSignals(beater.Stop) + ctx, cancel := context.WithCancel(context.Background()) + svc.HandleSignals(beater.Stop, cancel) - err = b.loadDashboards(false) + err = b.loadDashboards(ctx, false) if err != nil { return err } if setup && b.SetupMLCallback != nil { - err = b.SetupMLCallback(&b.Beat) + err = b.SetupMLCallback(&b.Beat, b.Config.Kibana) if err != nil { return err } } logp.Info("%s start running.", b.Info.Beat) - defer logp.Info("%s stopped.", b.Info.Beat) if b.Config.HTTP.Enabled() { api.Start(b.Config.HTTP, b.Info) @@ -328,7 +341,7 @@ func (b *Beat) TestConfig(bt beat.Creator) error { } // Setup registers ES index template and kibana dashboards -func (b *Beat) Setup(bt beat.Creator, template, dashboards, machineLearning bool) error { +func (b *Beat) Setup(bt beat.Creator, template, dashboards, machineLearning, pipelines bool) error { return handleError(func() error { err := b.Init() if err != nil { @@ -374,7 +387,8 @@ func (b *Beat) Setup(bt beat.Creator, template, dashboards, machineLearning bool } if dashboards { - err = b.loadDashboards(true) + fmt.Println("Loading dashboards (Kibana must be running and reachable)") + err = b.loadDashboards(context.Background(), true) if err != nil { return err } @@ -383,13 +397,23 @@ func (b *Beat) Setup(bt beat.Creator, template, dashboards, machineLearning bool } if machineLearning && b.SetupMLCallback != nil { - err = b.SetupMLCallback(&b.Beat) + err = b.SetupMLCallback(&b.Beat, b.Config.Kibana) if err != nil { return err } fmt.Println("Loaded machine learning job configurations") } + if pipelines && b.OverwritePipelinesCallback != nil { + esConfig := b.Config.Output.Config() + err = b.OverwritePipelinesCallback(esConfig) + if err != nil { + return err + } + + fmt.Println("Loaded Ingest pipelines") + } + return nil }()) } @@ -557,7 +581,7 @@ func openRegular(filename string) (*os.File, error) { return f, nil } -func (b *Beat) loadDashboards(force bool) error { +func (b *Beat) loadDashboards(ctx context.Context, force bool) error { if setup || force { // -setup implies dashboards.enabled=true if b.Config.Dashboards == nil { @@ -575,7 +599,7 @@ func (b *Beat) loadDashboards(force bool) error { if b.Config.Output.Name() == "elasticsearch" { esConfig = b.Config.Output.Config() } - err := dashboards.ImportDashboards(b.Info.Beat, b.Info.Hostname, paths.Resolve(paths.Home, ""), + err := dashboards.ImportDashboards(ctx, b.Info.Beat, b.Info.Hostname, paths.Resolve(paths.Home, ""), b.Config.Kibana, esConfig, b.Config.Dashboards, nil) if err != nil { return fmt.Errorf("Error importing Kibana dashboards: %v", err) @@ -668,3 +692,73 @@ func handleError(err error) error { fmt.Fprintf(os.Stderr, "Exiting: %v\n", err) return err } + +// logSystemInfo logs information about this system for situational awareness +// in debugging. This information includes data about the beat, build, go +// runtime, host, and process. If any of the data is not available it will be +// omitted. +func logSystemInfo(info beat.Info) { + defer logp.Recover("An unexpected error occurred while collecting " + + "information about the system.") + log := logp.NewLogger("beat").With(logp.Namespace("system_info")) + + // Beat + beat := common.MapStr{ + "type": info.Beat, + "uuid": info.UUID, + "path": common.MapStr{ + "config": paths.Resolve(paths.Config, ""), + "data": paths.Resolve(paths.Data, ""), + "home": paths.Resolve(paths.Home, ""), + "logs": paths.Resolve(paths.Logs, ""), + }, + } + log.Infow("Beat info", "beat", beat) + + // Build + build := common.MapStr{ + "commit": version.Commit(), + "time": version.BuildTime(), + "version": info.Version, + "libbeat": version.GetDefaultVersion(), + } + log.Infow("Build info", "build", build) + + // Go Runtime + log.Infow("Go runtime info", "go", sysinfo.Go()) + + // Host + if host, err := sysinfo.Host(); err == nil { + log.Infow("Host info", "host", host.Info()) + } + + // Process + if self, err := sysinfo.Self(); err == nil { + process := common.MapStr{} + + if info, err := self.Info(); err == nil { + process["name"] = info.Name + process["pid"] = info.PID + process["ppid"] = info.PPID + process["cwd"] = info.CWD + process["exe"] = info.Exe + process["start_time"] = info.StartTime + } + + if proc, ok := self.(types.Seccomp); ok { + if seccomp, err := proc.Seccomp(); err == nil { + process["seccomp"] = seccomp + } + } + + if proc, ok := self.(types.Capabilities); ok { + if caps, err := proc.Capabilities(); err == nil { + process["capabilities"] = caps + } + } + + if len(process) > 0 { + log.Infow("Process info", "process", process) + } + } +} diff --git a/vendor/github.com/elastic/beats/libbeat/cmd/instance/metrics.go b/vendor/github.com/elastic/beats/libbeat/cmd/instance/metrics.go index a8098461..d2d0f53d 100644 --- a/vendor/github.com/elastic/beats/libbeat/cmd/instance/metrics.go +++ b/vendor/github.com/elastic/beats/libbeat/cmd/instance/metrics.go @@ -4,7 +4,6 @@ package instance import ( "fmt" - "os" "runtime" "github.com/elastic/beats/libbeat/logp" @@ -65,8 +64,12 @@ func reportMemStats(m monitoring.Mode, V monitoring.Visitor) { } func getRSSSize() (uint64, error) { - beatPID := os.Getpid() - state, err := beatProcessStats.GetOne(beatPID) + pid, err := process.GetSelfPid() + if err != nil { + return 0, fmt.Errorf("error getting PID for self process: %v", err) + } + + state, err := beatProcessStats.GetOne(pid) if err != nil { return 0, fmt.Errorf("error retrieving process stats: %v", err) } @@ -101,22 +104,32 @@ func reportBeatCPU(_ monitoring.Mode, V monitoring.Visitor) { monitoring.ReportNamespace(V, "user", func() { monitoring.ReportInt(V, "ticks", int64(cpuTicks.User)) - monitoring.ReportInt(V, "time", userTime) + monitoring.ReportNamespace(V, "time", func() { + monitoring.ReportInt(V, "ms", userTime) + }) }) monitoring.ReportNamespace(V, "system", func() { monitoring.ReportInt(V, "ticks", int64(cpuTicks.System)) - monitoring.ReportInt(V, "time", systemTime) + monitoring.ReportNamespace(V, "time", func() { + monitoring.ReportInt(V, "ms", systemTime) + }) }) monitoring.ReportNamespace(V, "total", func() { monitoring.ReportFloat(V, "value", totalCPUUsage) monitoring.ReportInt(V, "ticks", int64(cpuTicks.Total)) - monitoring.ReportInt(V, "time", userTime+systemTime) + monitoring.ReportNamespace(V, "time", func() { + monitoring.ReportInt(V, "ms", userTime+systemTime) + }) }) } func getCPUUsage() (float64, *process.Ticks, error) { - beatPID := os.Getpid() - state, err := beatProcessStats.GetOne(beatPID) + pid, err := process.GetSelfPid() + if err != nil { + return 0.0, nil, fmt.Errorf("error getting PID for self process: %v", err) + } + + state, err := beatProcessStats.GetOne(pid) if err != nil { return 0.0, nil, fmt.Errorf("error retrieving process stats: %v", err) } diff --git a/vendor/github.com/elastic/beats/libbeat/cmd/setup.go b/vendor/github.com/elastic/beats/libbeat/cmd/setup.go index b1b03069..fa04e97b 100644 --- a/vendor/github.com/elastic/beats/libbeat/cmd/setup.go +++ b/vendor/github.com/elastic/beats/libbeat/cmd/setup.go @@ -19,6 +19,7 @@ func genSetupCmd(name, idxPrefix, version string, beatCreator beat.Creator) *cob * Index mapping template in Elasticsearch to ensure fields are mapped. * Kibana dashboards (where available). * ML jobs (where available). + * Ingest pipelines (where available). `, Run: func(cmd *cobra.Command, args []string) { beat, err := instance.NewBeat(name, idxPrefix, version) @@ -30,15 +31,16 @@ func genSetupCmd(name, idxPrefix, version string, beatCreator beat.Creator) *cob template, _ := cmd.Flags().GetBool("template") dashboards, _ := cmd.Flags().GetBool("dashboards") machineLearning, _ := cmd.Flags().GetBool("machine-learning") + pipelines, _ := cmd.Flags().GetBool("pipelines") // No flags: setup all - if !template && !dashboards && !machineLearning { + if !template && !dashboards && !machineLearning && !pipelines { template = true dashboards = true machineLearning = true } - if err = beat.Setup(beatCreator, template, dashboards, machineLearning); err != nil { + if err = beat.Setup(beatCreator, template, dashboards, machineLearning, pipelines); err != nil { os.Exit(1) } }, @@ -47,6 +49,7 @@ func genSetupCmd(name, idxPrefix, version string, beatCreator beat.Creator) *cob setup.Flags().Bool("template", false, "Setup index template only") setup.Flags().Bool("dashboards", false, "Setup dashboards only") setup.Flags().Bool("machine-learning", false, "Setup machine learning job configurations only") + setup.Flags().Bool("pipelines", false, "Setup Ingest pipelines only") return &setup } diff --git a/vendor/github.com/elastic/beats/libbeat/cmd/version.go b/vendor/github.com/elastic/beats/libbeat/cmd/version.go index 3d9e9438..cdecb527 100644 --- a/vendor/github.com/elastic/beats/libbeat/cmd/version.go +++ b/vendor/github.com/elastic/beats/libbeat/cmd/version.go @@ -22,8 +22,13 @@ func genVersionCmd(name, beatVersion string) *cobra.Command { return fmt.Errorf("error initializing beat: %s", err) } - fmt.Printf("%s version %s (%s), libbeat %s\n", - beat.Info.Beat, beat.Info.Version, runtime.GOARCH, version.GetDefaultVersion()) + buildTime := "unknown" + if bt := version.BuildTime(); !bt.IsZero() { + buildTime = bt.String() + } + fmt.Printf("%s version %s (%s), libbeat %s [%s built %s]\n", + beat.Info.Beat, beat.Info.Version, runtime.GOARCH, version.GetDefaultVersion(), + version.Commit(), buildTime) return nil }), } diff --git a/vendor/github.com/elastic/beats/libbeat/common/atomic/atomic32.go b/vendor/github.com/elastic/beats/libbeat/common/atomic/atomic32.go new file mode 100644 index 00000000..bd3f640a --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/common/atomic/atomic32.go @@ -0,0 +1,33 @@ +// +build 386 arm mips mipsle + +package atomic + +// atomic Uint/Int for 32bit systems + +// Uint provides an architecture specific atomic uint. +type Uint struct{ a Uint32 } + +// Int provides an architecture specific atomic uint. +type Int struct{ a Int32 } + +func MakeUint(v uint) Uint { return Uint{MakeUint32(uint32(v))} } +func NewUint(v uint) *Uint { return &Uint{MakeUint32(uint32(v))} } +func (u *Uint) Load() uint { return uint(u.a.Load()) } +func (u *Uint) Store(v uint) { u.a.Store(uint32(v)) } +func (u *Uint) Swap(new uint) uint { return uint(u.a.Swap(uint32(new))) } +func (u *Uint) Add(delta uint) uint { return uint(u.a.Add(uint32(delta))) } +func (u *Uint) Sub(delta uint) uint { return uint(u.a.Add(uint32(-delta))) } +func (u *Uint) Inc() uint { return uint(u.a.Inc()) } +func (u *Uint) Dec() uint { return uint(u.a.Dec()) } +func (u *Uint) CAS(old, new uint) bool { return u.a.CAS(uint32(old), uint32(new)) } + +func MakeInt(v int) Int { return Int{MakeInt32(int32(v))} } +func NewInt(v int) *Int { return &Int{MakeInt32(int32(v))} } +func (i *Int) Load() int { return int(i.a.Load()) } +func (i *Int) Store(v int) { i.a.Store(int32(v)) } +func (i *Int) Swap(new int) int { return int(i.a.Swap(int32(new))) } +func (i *Int) Add(delta int) int { return int(i.a.Add(int32(delta))) } +func (i *Int) Sub(delta int) int { return int(i.a.Add(int32(-delta))) } +func (i *Int) Inc() int { return int(i.a.Inc()) } +func (i *Int) Dec() int { return int(i.a.Dec()) } +func (i *Int) CAS(old, new int) bool { return i.a.CAS(int32(old), int32(new)) } diff --git a/vendor/github.com/elastic/beats/libbeat/common/atomic/atomic64.go b/vendor/github.com/elastic/beats/libbeat/common/atomic/atomic64.go new file mode 100644 index 00000000..fc34a8d3 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/common/atomic/atomic64.go @@ -0,0 +1,33 @@ +// +build amd64 arm64 ppc64 ppc64le mips64 mips64le s390x + +package atomic + +// atomic Uint/Int for 64bit systems + +// Uint provides an architecture specific atomic uint. +type Uint struct{ a Uint64 } + +// Int provides an architecture specific atomic uint. +type Int struct{ a Int64 } + +func MakeUint(v uint) Uint { return Uint{MakeUint64(uint64(v))} } +func NewUint(v uint) *Uint { return &Uint{MakeUint64(uint64(v))} } +func (u *Uint) Load() uint { return uint(u.a.Load()) } +func (u *Uint) Store(v uint) { u.a.Store(uint64(v)) } +func (u *Uint) Swap(new uint) uint { return uint(u.a.Swap(uint64(new))) } +func (u *Uint) Add(delta uint) uint { return uint(u.a.Add(uint64(delta))) } +func (u *Uint) Sub(delta uint) uint { return uint(u.a.Add(uint64(-delta))) } +func (u *Uint) Inc() uint { return uint(u.a.Inc()) } +func (u *Uint) Dec() uint { return uint(u.a.Dec()) } +func (u *Uint) CAS(old, new uint) bool { return u.a.CAS(uint64(old), uint64(new)) } + +func MakeInt(v int) Int { return Int{MakeInt64(int64(v))} } +func NewInt(v int) *Int { return &Int{MakeInt64(int64(v))} } +func (i *Int) Load() int { return int(i.a.Load()) } +func (i *Int) Store(v int) { i.a.Store(int64(v)) } +func (i *Int) Swap(new int) int { return int(i.a.Swap(int64(new))) } +func (i *Int) Add(delta int) int { return int(i.a.Add(int64(delta))) } +func (i *Int) Sub(delta int) int { return int(i.a.Add(int64(-delta))) } +func (i *Int) Inc() int { return int(i.a.Inc()) } +func (i *Int) Dec() int { return int(i.a.Dec()) } +func (i *Int) CAS(old, new int) bool { return i.a.CAS(int64(old), int64(new)) } diff --git a/vendor/github.com/elastic/beats/libbeat/common/atomic/atomic_test.go b/vendor/github.com/elastic/beats/libbeat/common/atomic/atomic_test.go index 5b7b4dfd..015291d8 100644 --- a/vendor/github.com/elastic/beats/libbeat/common/atomic/atomic_test.go +++ b/vendor/github.com/elastic/beats/libbeat/common/atomic/atomic_test.go @@ -210,3 +210,47 @@ func TestAtomicUint64(t *testing.T) { assert.True(ok, "check CAS succeeds") check(23, v.Load(), "check CAS did store new value") } + +func TestAtomicUint(t *testing.T) { + assert := assert.New(t) + check := func(expected, actual uint, msg string) { + assert.Equal(expected, actual, msg) + } + + var v Uint + check(0, v.Load(), "check zero value") + + v = MakeUint(23) + check(23, v.Load(), "check value initializer") + + v.Store(42) + check(42, v.Load(), "check store new value") + + new := v.Inc() + check(43, new, "check increment returns new value") + check(43, v.Load(), "check increment did store new value") + + new = v.Dec() + check(42, new, "check decrement returns new value") + check(42, v.Load(), "check decrement did store new value") + + new = v.Add(8) + check(50, new, "check add returns new value") + check(50, v.Load(), "check add did store new value") + + new = v.Sub(8) + check(42, new, "check sub returns new value") + check(42, v.Load(), "check sub did store new value") + + old := v.Swap(101) + check(42, old, "check swap returns old value") + check(101, v.Load(), "check swap stores new value") + + ok := v.CAS(0, 23) + assert.False(ok, "check CAS with wrong old value fails") + check(101, v.Load(), "check failed CAS did not change value") + + ok = v.CAS(101, 23) + assert.True(ok, "check CAS succeeds") + check(23, v.Load(), "check CAS did store new value") +} diff --git a/vendor/github.com/elastic/beats/libbeat/common/cfgtype/byte_size.go b/vendor/github.com/elastic/beats/libbeat/common/cfgtype/byte_size.go new file mode 100644 index 00000000..b6bc106c --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/common/cfgtype/byte_size.go @@ -0,0 +1,36 @@ +package cfgtype + +import ( + "unicode" + + "github.com/dustin/go-humanize" + + "github.com/elastic/beats/libbeat/common/cfgwarn" +) + +// ByteSize defines a new configuration option that will parse `go-humanize` compatible values into a +// int64 when the suffix is valid or will fallback to bytes. +type ByteSize int64 + +// Unpack converts a size defined from a human readable format into bytes. +func (s *ByteSize) Unpack(v string) error { + sz, err := humanize.ParseBytes(v) + if isRawBytes(v) { + cfgwarn.Deprecate("7.0", "size now requires a unit (KiB, MiB, etc...), current value: %s.", v) + } + if err != nil { + return err + } + + *s = ByteSize(sz) + return nil +} + +func isRawBytes(v string) bool { + for _, c := range v { + if !unicode.IsDigit(c) { + return false + } + } + return true +} diff --git a/vendor/github.com/elastic/beats/libbeat/common/cfgtype/byte_size_test.go b/vendor/github.com/elastic/beats/libbeat/common/cfgtype/byte_size_test.go new file mode 100644 index 00000000..1596a2a1 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/common/cfgtype/byte_size_test.go @@ -0,0 +1,37 @@ +package cfgtype + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestUnpack(t *testing.T) { + tests := []struct { + name string + s string + expected ByteSize + }{ + { + name: "friendly human value", + s: "1KiB", + expected: ByteSize(1024), + }, + { + name: "raw bytes", + s: "2024", + expected: ByteSize(2024), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + s := ByteSize(0) + err := s.Unpack(test.s) + if !assert.NoError(t, err) { + return + } + assert.Equal(t, test.expected, s) + }) + } +} diff --git a/vendor/github.com/elastic/beats/libbeat/common/coerce.go b/vendor/github.com/elastic/beats/libbeat/common/coerce.go new file mode 100644 index 00000000..e7d68cec --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/common/coerce.go @@ -0,0 +1,40 @@ +package common + +import "strconv" + +// TryToInt tries to coerce the given interface to an int. On success it returns +// the int value and true. +func TryToInt(number interface{}) (int, bool) { + var rtn int + switch v := number.(type) { + case int: + rtn = int(v) + case int8: + rtn = int(v) + case int16: + rtn = int(v) + case int32: + rtn = int(v) + case int64: + rtn = int(v) + case uint: + rtn = int(v) + case uint8: + rtn = int(v) + case uint16: + rtn = int(v) + case uint32: + rtn = int(v) + case uint64: + rtn = int(v) + case string: + var err error + rtn, err = strconv.Atoi(v) + if err != nil { + return 0, false + } + default: + return 0, false + } + return rtn, true +} diff --git a/vendor/github.com/elastic/beats/libbeat/common/coerce_test.go b/vendor/github.com/elastic/beats/libbeat/common/coerce_test.go new file mode 100644 index 00000000..c61661d3 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/common/coerce_test.go @@ -0,0 +1,57 @@ +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestTryToInt(t *testing.T) { + tests := []struct { + input interface{} + result int + resultB bool + }{ + { + int(4), + int(4), + true, + }, + { + int64(3), + int(3), + true, + }, + { + "5", + int(5), + true, + }, + { + uint32(12), + int(12), + true, + }, + { + "abc", + 0, + false, + }, + { + []string{"123"}, + 0, + false, + }, + { + uint64(55), + int(55), + true, + }, + } + + for _, test := range tests { + a, b := TryToInt(test.input) + assert.Equal(t, a, test.result) + assert.Equal(t, b, test.resultB) + } +} diff --git a/vendor/github.com/elastic/beats/libbeat/common/docker/client.go b/vendor/github.com/elastic/beats/libbeat/common/docker/client.go new file mode 100644 index 00000000..7368a062 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/common/docker/client.go @@ -0,0 +1,49 @@ +package docker + +import ( + "net/http" + "os" + + "github.com/elastic/beats/libbeat/logp" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/client" + "golang.org/x/net/context" +) + +// NewClient builds and returns a new Docker client +// It uses version 1.30 by default, and negotiates it with the server so it is downgraded if 1.30 is too high +func NewClient(host string, httpClient *http.Client, httpHeaders map[string]string) (*client.Client, error) { + version := os.Getenv("DOCKER_API_VERSION") + if version == "" { + version = api.DefaultVersion + } + + c, err := client.NewClient(host, version, httpClient, nil) + if err != nil { + return c, err + } + + if os.Getenv("DOCKER_API_VERSION") == "" { + logp.Debug("docker", "Negotiating client version") + ping, err := c.Ping(context.Background()) + if err != nil { + logp.Debug("docker", "Failed to perform ping: %s", err) + } + + // try a really old version, before versioning headers existed + if ping.APIVersion == "" { + ping.APIVersion = "1.22" + } + + // if server version is lower than the client version, downgrade + if versions.LessThan(ping.APIVersion, version) { + c.UpdateClientVersion(ping.APIVersion) + } + } + + logp.Debug("docker", "Client version set to %s", c.ClientVersion()) + + return c, nil +} diff --git a/vendor/github.com/elastic/beats/libbeat/common/docker/client_test.go b/vendor/github.com/elastic/beats/libbeat/common/docker/client_test.go new file mode 100644 index 00000000..a9a93a0f --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/common/docker/client_test.go @@ -0,0 +1,62 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build integration + +package docker + +import ( + "os" + "testing" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestNewClient(t *testing.T) { + host := "unix:///var/run/docker.sock" + + client, err := NewClient(host, nil, nil) + assert.NoError(t, err) + assert.NotNil(t, client) + + _, err = client.ContainerList(context.Background(), types.ContainerListOptions{}) + assert.NoError(t, err) + + // This test only works on newer Docker versions (any supported one really) + switch client.ClientVersion() { + case "1.22": + t.Skip("Docker version is too old for this test") + case api.DefaultVersion: + t.Logf("Using default API version: %s", api.DefaultVersion) + default: + t.Logf("Negotiated version: %s", client.ClientVersion()) + } + + // Test we can hardcode version + os.Setenv("DOCKER_API_VERSION", "1.22") + + client, err = NewClient(host, nil, nil) + assert.NoError(t, err) + assert.NotNil(t, client) + assert.Equal(t, "1.22", client.ClientVersion()) + + _, err = client.ContainerList(context.Background(), types.ContainerListOptions{}) + assert.NoError(t, err) +} diff --git a/vendor/github.com/elastic/beats/libbeat/common/docker/watcher.go b/vendor/github.com/elastic/beats/libbeat/common/docker/watcher.go index 79963273..5c2e03fe 100644 --- a/vendor/github.com/elastic/beats/libbeat/common/docker/watcher.go +++ b/vendor/github.com/elastic/beats/libbeat/common/docker/watcher.go @@ -9,7 +9,6 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/client" "github.com/docker/go-connections/tlsconfig" "golang.org/x/net/context" @@ -18,7 +17,9 @@ import ( ) // Select Docker API version -const dockerAPIVersion = "1.22" +const ( + shortIDLen = 12 +) // Watcher reads docker events and keeps a list of known containers type Watcher interface { @@ -59,6 +60,7 @@ type watcher struct { lastValidTimestamp int64 stopped sync.WaitGroup bus bus.Bus + shortID bool // whether to store short ID in "containers" too } // Container info retrieved by the watcher @@ -77,10 +79,11 @@ type Client interface { Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) } -type WatcherConstructor func(host string, tls *TLSConfig) (Watcher, error) +// WatcherConstructor represent a function that creates a new Watcher from giving parameters +type WatcherConstructor func(host string, tls *TLSConfig, storeShortID bool) (Watcher, error) // NewWatcher returns a watcher running for the given settings -func NewWatcher(host string, tls *TLSConfig) (Watcher, error) { +func NewWatcher(host string, tls *TLSConfig, storeShortID bool) (Watcher, error) { var httpClient *http.Client if tls != nil { options := tlsconfig.Options{ @@ -101,15 +104,16 @@ func NewWatcher(host string, tls *TLSConfig) (Watcher, error) { } } - client, err := client.NewClient(host, dockerAPIVersion, httpClient, nil) + client, err := NewClient(host, httpClient, nil) if err != nil { return nil, err } - return NewWatcherWithClient(client, 60*time.Second) + return NewWatcherWithClient(client, 60*time.Second, storeShortID) } -func NewWatcherWithClient(client Client, cleanupTimeout time.Duration) (*watcher, error) { +// NewWatcherWithClient creates a new Watcher from a given Docker client +func NewWatcherWithClient(client Client, cleanupTimeout time.Duration, storeShortID bool) (Watcher, error) { ctx, cancel := context.WithCancel(context.Background()) return &watcher{ client: client, @@ -119,6 +123,7 @@ func NewWatcherWithClient(client Client, cleanupTimeout time.Duration) (*watcher deleted: make(map[string]time.Time), cleanupTimeout: cleanupTimeout, bus: bus.New("docker"), + shortID: storeShortID, }, nil } @@ -126,13 +131,17 @@ func NewWatcherWithClient(client Client, cleanupTimeout time.Duration) (*watcher func (w *watcher) Container(ID string) *Container { w.RLock() container := w.containers[ID] - _, ok := w.deleted[ID] + if container == nil { + w.RUnlock() + return nil + } + _, ok := w.deleted[container.ID] w.RUnlock() // Update last access time if it's deleted if ok { w.Lock() - w.deleted[ID] = time.Now() + w.deleted[container.ID] = time.Now() w.Unlock() } @@ -145,7 +154,9 @@ func (w *watcher) Containers() map[string]*Container { defer w.RUnlock() res := make(map[string]*Container) for k, v := range w.containers { - res[k] = v + if !w.shortID || len(k) != shortIDLen { + res[k] = v + } } return res } @@ -165,6 +176,9 @@ func (w *watcher) Start() error { for _, c := range containers { w.containers[c.ID] = c + if w.shortID { + w.containers[c.ID[:shortIDLen]] = c + } } // Emit all start events (avoid blocking if the bus get's blocked) @@ -223,6 +237,9 @@ func (w *watcher) watch() { w.Lock() w.containers[event.Actor.ID] = container + if w.shortID { + w.containers[event.Actor.ID[:shortIDLen]] = container + } // un-delete if it's flagged (in case of update or recreation) delete(w.deleted, event.Actor.ID) w.Unlock() @@ -326,6 +343,9 @@ func (w *watcher) cleanupWorker() { for _, key := range toDelete { delete(w.deleted, key) delete(w.containers, key) + if w.shortID { + delete(w.containers, key[:shortIDLen]) + } } w.Unlock() } diff --git a/vendor/github.com/elastic/beats/libbeat/common/docker/watcher_test.go b/vendor/github.com/elastic/beats/libbeat/common/docker/watcher_test.go index 3ab16ffd..a0a541ab 100644 --- a/vendor/github.com/elastic/beats/libbeat/common/docker/watcher_test.go +++ b/vendor/github.com/elastic/beats/libbeat/common/docker/watcher_test.go @@ -84,6 +84,51 @@ func TestWatcherInitialization(t *testing.T) { }, watcher.Containers()) } +func TestWatcherInitializationShortID(t *testing.T) { + watcher := runWatcherShortID(t, true, + [][]types.Container{ + []types.Container{ + types.Container{ + ID: "1234567890123", + Names: []string{"/containername", "othername"}, + Image: "busybox", + Labels: map[string]string{"foo": "bar"}, + NetworkSettings: &types.SummaryNetworkSettings{}, + }, + types.Container{ + ID: "2345678901234", + Names: []string{"/other"}, + Image: "nginx", + Labels: map[string]string{}, + NetworkSettings: &types.SummaryNetworkSettings{}, + }, + }, + }, + nil, true) + + assert.Equal(t, map[string]*Container{ + "1234567890123": &Container{ + ID: "1234567890123", + Name: "containername", + Image: "busybox", + Labels: map[string]string{"foo": "bar"}, + }, + "2345678901234": &Container{ + ID: "2345678901234", + Name: "other", + Image: "nginx", + Labels: map[string]string{}, + }, + }, watcher.Containers()) + + assert.Equal(t, &Container{ + ID: "1234567890123", + Name: "containername", + Image: "busybox", + Labels: map[string]string{"foo": "bar"}, + }, watcher.Container("123456789012")) +} + func TestWatcherAddEvents(t *testing.T) { watcher := runWatcher(t, true, [][]types.Container{ @@ -137,6 +182,60 @@ func TestWatcherAddEvents(t *testing.T) { }, watcher.Containers()) } +func TestWatcherAddEventsShortID(t *testing.T) { + watcher := runWatcherShortID(t, true, + [][]types.Container{ + []types.Container{ + types.Container{ + ID: "1234567890123", + Names: []string{"/containername", "othername"}, + Image: "busybox", + Labels: map[string]string{"foo": "bar"}, + NetworkSettings: &types.SummaryNetworkSettings{}, + }, + }, + []types.Container{ + types.Container{ + ID: "2345678901234", + Names: []string{"/other"}, + Image: "nginx", + Labels: map[string]string{"label": "value"}, + NetworkSettings: &types.SummaryNetworkSettings{}, + }, + }, + }, + []interface{}{ + events.Message{ + Action: "start", + Actor: events.Actor{ + ID: "2345678901234", + Attributes: map[string]string{ + "name": "other", + "image": "nginx", + "label": "value", + }, + }, + }, + }, + true, + ) + + assert.Equal(t, map[string]*Container{ + "1234567890123": &Container{ + ID: "1234567890123", + Name: "containername", + Image: "busybox", + Labels: map[string]string{"foo": "bar"}, + }, + "2345678901234": &Container{ + ID: "2345678901234", + Name: "other", + Image: "nginx", + Labels: map[string]string{"label": "value"}, + }, + }, watcher.Containers()) +} + func TestWatcherUpdateEvent(t *testing.T) { watcher := runWatcher(t, true, [][]types.Container{ @@ -185,6 +284,55 @@ func TestWatcherUpdateEvent(t *testing.T) { assert.Equal(t, 0, len(watcher.deleted)) } +func TestWatcherUpdateEventShortID(t *testing.T) { + watcher := runWatcherShortID(t, true, + [][]types.Container{ + []types.Container{ + types.Container{ + ID: "1234567890123", + Names: []string{"/containername", "othername"}, + Image: "busybox", + Labels: map[string]string{"label": "foo"}, + NetworkSettings: &types.SummaryNetworkSettings{}, + }, + }, + []types.Container{ + types.Container{ + ID: "1234567890123", + Names: []string{"/containername", "othername"}, + Image: "busybox", + Labels: map[string]string{"label": "bar"}, + NetworkSettings: &types.SummaryNetworkSettings{}, + }, + }, + }, + []interface{}{ + events.Message{ + Action: "update", + Actor: events.Actor{ + ID: "1234567890123", + Attributes: map[string]string{ + "name": "containername", + "image": "busybox", + "label": "bar", + }, + }, + }, + }, + true, + ) + + assert.Equal(t, map[string]*Container{ + "1234567890123": &Container{ + ID: "1234567890123", + Name: "containername", + Image: "busybox", + Labels: map[string]string{"label": "bar"}, + }, + }, watcher.Containers()) + assert.Equal(t, 0, len(watcher.deleted)) +} + func TestWatcherDie(t *testing.T) { watcher := runWatcher(t, false, [][]types.Container{ @@ -229,7 +377,56 @@ func TestWatcherDie(t *testing.T) { assert.Equal(t, 0, len(watcher.Containers())) } +func TestWatcherDieShortID(t *testing.T) { + watcher := runWatcherShortID(t, false, + [][]types.Container{ + []types.Container{ + types.Container{ + ID: "0332dbd79e20aaa", + Names: []string{"/containername", "othername"}, + Image: "busybox", + Labels: map[string]string{"label": "foo"}, + NetworkSettings: &types.SummaryNetworkSettings{}, + }, + }, + }, + []interface{}{ + events.Message{ + Action: "die", + Actor: events.Actor{ + ID: "0332dbd79e20aaa", + }, + }, + }, + true, + ) + defer watcher.Stop() + + // Check it doesn't get removed while we request meta for the container + for i := 0; i < 18; i++ { + watcher.Container("0332dbd79e20") + assert.Equal(t, 1, len(watcher.Containers())) + time.Sleep(50 * time.Millisecond) + } + + // Checks a max of 10s for the watcher containers to be updated + for i := 0; i < 100; i++ { + // Now it should get removed + time.Sleep(100 * time.Millisecond) + + if len(watcher.Containers()) == 0 { + break + } + } + + assert.Equal(t, 0, len(watcher.Containers())) +} + func runWatcher(t *testing.T, kill bool, containers [][]types.Container, events []interface{}) *watcher { + return runWatcherShortID(t, kill, containers, events, false) +} + +func runWatcherShortID(t *testing.T, kill bool, containers [][]types.Container, events []interface{}, enable bool) *watcher { logp.TestingSetup() client := &MockClient{ @@ -238,10 +435,14 @@ func runWatcher(t *testing.T, kill bool, containers [][]types.Container, events done: make(chan interface{}), } - watcher, err := NewWatcherWithClient(client, 200*time.Millisecond) + w, err := NewWatcherWithClient(client, 200*time.Millisecond, enable) if err != nil { t.Fatal(err) } + watcher, ok := w.(*watcher) + if !ok { + t.Fatal("'watcher' was supposed to be pointer to the watcher structure") + } err = watcher.Start() if err != nil { diff --git a/vendor/github.com/elastic/beats/libbeat/common/event.go b/vendor/github.com/elastic/beats/libbeat/common/event.go index c91abcd5..65e7a4ce 100644 --- a/vendor/github.com/elastic/beats/libbeat/common/event.go +++ b/vendor/github.com/elastic/beats/libbeat/common/event.go @@ -152,8 +152,10 @@ func normalizeValue(value interface{}, keys ...string) (interface{}, []error) { case []int, []int8, []int16, []int32, []int64: case uint, uint8, uint16, uint32, uint64: case []uint, []uint8, []uint16, []uint32, []uint64: - case float32, float64: + case float64: return Float(value.(float64)), nil + case float32: + return Float(value.(float32)), nil case []float32, []float64: case complex64, complex128: case []complex64, []complex128: @@ -260,16 +262,22 @@ func DeDot(s string) string { // DeDotJSON replaces in keys all . with _ // This helps when sending data to Elasticsearch to prevent object and key collisions. func DeDotJSON(json interface{}) interface{} { - switch json.(type) { + switch json := json.(type) { case map[string]interface{}: result := map[string]interface{}{} - for key, value := range json.(map[string]interface{}) { + for key, value := range json { + result[DeDot(key)] = DeDotJSON(value) + } + return result + case MapStr: + result := MapStr{} + for key, value := range json { result[DeDot(key)] = DeDotJSON(value) } return result case []interface{}: - result := make([]interface{}, len(json.([]interface{}))) - for i, value := range json.([]interface{}) { + result := make([]interface{}, len(json)) + for i, value := range json { result[i] = DeDotJSON(value) } return result diff --git a/vendor/github.com/elastic/beats/libbeat/common/event_test.go b/vendor/github.com/elastic/beats/libbeat/common/event_test.go index 0ddceb04..6f80dbef 100644 --- a/vendor/github.com/elastic/beats/libbeat/common/event_test.go +++ b/vendor/github.com/elastic/beats/libbeat/common/event_test.go @@ -7,9 +7,6 @@ import ( "github.com/stretchr/testify/assert" - "io/ioutil" - "path/filepath" - "github.com/elastic/beats/libbeat/logp" ) @@ -237,6 +234,23 @@ func TestNormalizeValue(t *testing.T) { assert.Equal(t, test.out, out, "Test case %v", i) } + + var floatTests = []struct { + in interface{} + out interface{} + }{ + {float32(1), float64(1)}, + {float64(1), float64(1)}, + } + + for i, test := range floatTests { + out, err := normalizeValue(test.in) + if err != nil { + t.Error(err) + continue + } + assert.InDelta(t, test.out, float64(out.(Float)), 0.000001, "(approximate) Test case %v", i) + } } func TestNormalizeMapError(t *testing.T) { @@ -382,49 +396,60 @@ func BenchmarkConvertToGenericEventStringPointer(b *testing.B) { ConvertToGenericEvent(MapStr{"key": &val}) } } - -func TestDeDotJsonMap(t *testing.T) { - var actualJSONBody map[string]interface{} - var expectedJSONBody map[string]interface{} - - absPath, err := filepath.Abs("./testdata") - assert.NotNil(t, absPath) - assert.Nil(t, err) - - actualJSONResponse, err := ioutil.ReadFile(absPath + "/json_map_with_dots.json") - assert.Nil(t, err) - err = json.Unmarshal(actualJSONResponse, &actualJSONBody) - assert.Nil(t, err) - - dedottedJSONResponse, err := ioutil.ReadFile(absPath + "/json_map_dedot.json") - assert.Nil(t, err) - err = json.Unmarshal(dedottedJSONResponse, &expectedJSONBody) - assert.Nil(t, err) - - actualJSONBody = DeDotJSON(actualJSONBody).(map[string]interface{}) - - assert.Equal(t, expectedJSONBody, actualJSONBody) -} - -func TestDeDotJsonArray(t *testing.T) { - var actualJSONBody []interface{} - var expectedJSONBody []interface{} - - absPath, err := filepath.Abs("./testdata") - assert.NotNil(t, absPath) - assert.Nil(t, err) - - actualJSONResponse, err := ioutil.ReadFile(absPath + "/json_array_with_dots.json") - assert.Nil(t, err) - err = json.Unmarshal(actualJSONResponse, &actualJSONBody) - assert.Nil(t, err) - - dedottedJSONResponse, err := ioutil.ReadFile(absPath + "/json_array_dedot.json") - assert.Nil(t, err) - err = json.Unmarshal(dedottedJSONResponse, &expectedJSONBody) - assert.Nil(t, err) - - actualJSONBody = DeDotJSON(actualJSONBody).([]interface{}) - - assert.Equal(t, expectedJSONBody, actualJSONBody) +func TestDeDotJSON(t *testing.T) { + var tests = []struct { + input []byte + output []byte + valuer func() interface{} + }{ + { + input: []byte(`[ + {"key_with_dot.1":"value1_1"}, + {"key_without_dot_2":"value1_2"}, + {"key_with_multiple.dots.3": {"key_with_dot.2":"value2_1"}} + ] + `), + output: []byte(`[ + {"key_with_dot_1":"value1_1"}, + {"key_without_dot_2":"value1_2"}, + {"key_with_multiple_dots_3": {"key_with_dot_2":"value2_1"}} + ] + `), + valuer: func() interface{} { return []interface{}{} }, + }, + { + input: []byte(`{ + "key_without_dot_l1": { + "key_with_dot.l2": 1, + "key.with.multiple.dots_l2": 2, + "key_without_dot_l2": { + "key_with_dot.l3": 3, + "key.with.multiple.dots_l3": 4 + } + } + } + `), + output: []byte(`{ + "key_without_dot_l1": { + "key_with_dot_l2": 1, + "key_with_multiple_dots_l2": 2, + "key_without_dot_l2": { + "key_with_dot_l3": 3, + "key_with_multiple_dots_l3": 4 + } + } + } + `), + valuer: func() interface{} { return map[string]interface{}{} }, + }, + } + for _, test := range tests { + input, output := test.valuer(), test.valuer() + assert.Nil(t, json.Unmarshal(test.input, &input)) + assert.Nil(t, json.Unmarshal(test.output, &output)) + assert.Equal(t, output, DeDotJSON(input)) + if _, ok := test.valuer().(map[string]interface{}); ok { + assert.Equal(t, MapStr(output.(map[string]interface{})), DeDotJSON(MapStr(input.(map[string]interface{})))) + } + } } diff --git a/vendor/github.com/elastic/beats/libbeat/common/field.go b/vendor/github.com/elastic/beats/libbeat/common/field.go index d8b4e242..3eadc275 100644 --- a/vendor/github.com/elastic/beats/libbeat/common/field.go +++ b/vendor/github.com/elastic/beats/libbeat/common/field.go @@ -17,22 +17,23 @@ import ( type Fields []Field type Field struct { - Name string `config:"name"` - Type string `config:"type"` - Description string `config:"description"` - Format string `config:"format"` - ScalingFactor int `config:"scaling_factor"` - Fields Fields `config:"fields"` - MultiFields Fields `config:"multi_fields"` - ObjectType string `config:"object_type"` - Enabled *bool `config:"enabled"` - Analyzer string `config:"analyzer"` - SearchAnalyzer string `config:"search_analyzer"` - Norms bool `config:"norms"` - Dynamic DynamicType `config:"dynamic"` - Index *bool `config:"index"` - DocValues *bool `config:"doc_values"` - CopyTo string `config:"copy_to"` + Name string `config:"name"` + Type string `config:"type"` + Description string `config:"description"` + Format string `config:"format"` + ScalingFactor int `config:"scaling_factor"` + Fields Fields `config:"fields"` + MultiFields Fields `config:"multi_fields"` + ObjectType string `config:"object_type"` + ObjectTypeMappingType string `config:"object_type_mapping_type"` + Enabled *bool `config:"enabled"` + Analyzer string `config:"analyzer"` + SearchAnalyzer string `config:"search_analyzer"` + Norms bool `config:"norms"` + Dynamic DynamicType `config:"dynamic"` + Index *bool `config:"index"` + DocValues *bool `config:"doc_values"` + CopyTo string `config:"copy_to"` // Kibana specific Analyzed *bool `config:"analyzed"` @@ -99,6 +100,44 @@ func (f Fields) HasKey(key string) bool { return f.hasKey(keys) } +// HasNode checks if inside fields the given node exists +// In contrast to HasKey it not only compares the leaf nodes but +// every single key it traverses. +func (f Fields) HasNode(key string) bool { + keys := strings.Split(key, ".") + return f.hasNode(keys) +} + +func (f Fields) hasNode(keys []string) bool { + + // Nothing to compare, so does not contain it + if len(keys) == 0 { + return false + } + + key := keys[0] + keys = keys[1:] + + for _, field := range f { + + if field.Name == key { + + //// It's the last key to compare + if len(keys) == 0 { + return true + } + + // It's the last field to compare + if len(field.Fields) == 0 { + return true + } + + return field.Fields.hasNode(keys) + } + } + return false +} + // Recursively generates the correct key based on the dots // The mapping requires "properties" between each layer. This is added here. func GenerateKey(key string) string { @@ -134,3 +173,27 @@ func (f Fields) hasKey(keys []string) bool { } return false } + +// GetKeys returns a flat list of keys this Fields contains +func (f Fields) GetKeys() []string { + return f.getKeys("") +} + +func (f Fields) getKeys(namespace string) []string { + + var keys []string + + for _, field := range f { + fieldName := namespace + "." + field.Name + if namespace == "" { + fieldName = field.Name + } + if len(field.Fields) == 0 { + keys = append(keys, fieldName) + } else { + keys = append(keys, field.Fields.getKeys(fieldName)...) + } + } + + return keys +} diff --git a/vendor/github.com/elastic/beats/libbeat/common/field_test.go b/vendor/github.com/elastic/beats/libbeat/common/field_test.go index 2bab75a6..6c128b9b 100644 --- a/vendor/github.com/elastic/beats/libbeat/common/field_test.go +++ b/vendor/github.com/elastic/beats/libbeat/common/field_test.go @@ -68,7 +68,7 @@ func TestDynamicYaml(t *testing.T) { }{ { input: []byte(` -name: test +name: test dynamic: true`), output: Field{ Name: "test", @@ -115,3 +115,60 @@ dynamic: "strict"`), } } } + +func TestGetKeys(t *testing.T) { + tests := []struct { + fields Fields + keys []string + }{ + { + fields: Fields{ + Field{ + Name: "test", Fields: Fields{ + Field{ + Name: "find", + }, + }, + }, + }, + keys: []string{"test.find"}, + }, + { + fields: Fields{ + Field{ + Name: "a", Fields: Fields{ + Field{ + Name: "b", + }, + }, + }, + Field{ + Name: "a", Fields: Fields{ + Field{ + Name: "c", + }, + }, + }, + }, + keys: []string{"a.b", "a.c"}, + }, + { + fields: Fields{ + Field{ + Name: "a", + }, + Field{ + Name: "b", + }, + Field{ + Name: "c", + }, + }, + keys: []string{"a", "b", "c"}, + }, + } + + for _, test := range tests { + assert.Equal(t, test.keys, test.fields.GetKeys()) + } +} diff --git a/vendor/github.com/elastic/beats/libbeat/common/file/file_other.go b/vendor/github.com/elastic/beats/libbeat/common/file/file_other.go index 3be5bc34..9e1fb967 100644 --- a/vendor/github.com/elastic/beats/libbeat/common/file/file_other.go +++ b/vendor/github.com/elastic/beats/libbeat/common/file/file_other.go @@ -3,8 +3,8 @@ package file import ( - "fmt" "os" + "strconv" "syscall" ) @@ -32,7 +32,11 @@ func (fs StateOS) IsSame(state StateOS) bool { } func (fs StateOS) String() string { - return fmt.Sprintf("%d-%d", fs.Inode, fs.Device) + var buf [64]byte + current := strconv.AppendUint(buf[:0], fs.Inode, 10) + current = append(current, '-') + current = strconv.AppendUint(current, fs.Device, 10) + return string(current) } // ReadOpen opens a file for reading only diff --git a/vendor/github.com/elastic/beats/libbeat/common/file/file_other_test.go b/vendor/github.com/elastic/beats/libbeat/common/file/file_other_test.go index df60e881..29c3cf68 100644 --- a/vendor/github.com/elastic/beats/libbeat/common/file/file_other_test.go +++ b/vendor/github.com/elastic/beats/libbeat/common/file/file_other_test.go @@ -4,6 +4,7 @@ package file import ( "io/ioutil" + "math" "os" "runtime" "testing" @@ -48,3 +49,21 @@ func TestGetOSFileStateStat(t *testing.T) { assert.True(t, state.Device > 0, "Device %d", state.Device) } } + +func BenchmarkStateString(b *testing.B) { + var samples [50]uint64 + for i, v := 0, uint64(0); i < len(samples); i, v = i+1, v+math.MaxUint64/uint64(len(samples)) { + samples[i] = v + } + + for i := 0; i < b.N; i++ { + for _, inode := range samples { + for _, device := range samples { + st := StateOS{Inode: inode, Device: device} + if st.String() == "" { + b.Fatal("empty state string") + } + } + } + } +} diff --git a/vendor/github.com/elastic/beats/libbeat/common/file/file_windows.go b/vendor/github.com/elastic/beats/libbeat/common/file/file_windows.go index 2cd845f6..dd31ccac 100644 --- a/vendor/github.com/elastic/beats/libbeat/common/file/file_windows.go +++ b/vendor/github.com/elastic/beats/libbeat/common/file/file_windows.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "reflect" + "strconv" "syscall" ) @@ -43,7 +44,13 @@ func (fs StateOS) IsSame(state StateOS) bool { } func (fs StateOS) String() string { - return fmt.Sprintf("%d-%d-%d", fs.IdxHi, fs.IdxLo, fs.Vol) + var buf [92]byte + current := strconv.AppendUint(buf[:0], fs.IdxHi, 10) + current = append(current, '-') + current = strconv.AppendUint(current, fs.IdxLo, 10) + current = append(current, '-') + current = strconv.AppendUint(current, fs.Vol, 10) + return string(current) } // ReadOpen opens a file for reading only diff --git a/vendor/github.com/elastic/beats/libbeat/common/file/helper_other.go b/vendor/github.com/elastic/beats/libbeat/common/file/helper_other.go index 5150bdbb..43282eb5 100644 --- a/vendor/github.com/elastic/beats/libbeat/common/file/helper_other.go +++ b/vendor/github.com/elastic/beats/libbeat/common/file/helper_other.go @@ -4,12 +4,26 @@ package file import ( "os" + "path/filepath" ) // SafeFileRotate safely rotates an existing file under path and replaces it with the tempfile func SafeFileRotate(path, tempfile string) error { + parent := filepath.Dir(path) + if e := os.Rename(tempfile, path); e != nil { return e } + + // best-effort fsync on parent directory. The fsync is required by some + // filesystems, so to update the parents directory metadata to actually + // contain the new file being rotated in. + f, err := os.Open(parent) + if err != nil { + return nil // ignore error + } + defer f.Close() + f.Sync() + return nil } diff --git a/vendor/github.com/elastic/beats/libbeat/common/kubernetes/eventhandler.go b/vendor/github.com/elastic/beats/libbeat/common/kubernetes/eventhandler.go new file mode 100644 index 00000000..b651e465 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/common/kubernetes/eventhandler.go @@ -0,0 +1,84 @@ +package kubernetes + +// ResourceEventHandler can handle notifications for events that happen to a +// resource. The events are informational only, so you can't return an +// error. +// * OnAdd is called when an object is added. +// * OnUpdate is called when an object is modified. Note that oldObj is the +// last known state of the object-- it is possible that several changes +// were combined together, so you can't use this to see every single +// change. OnUpdate is also called when a re-list happens, and it will +// get called even if nothing changed. This is useful for periodically +// evaluating or syncing something. +// * OnDelete will get the final state of the item if it is known, otherwise +// it will get an object of type DeletedFinalStateUnknown. This can +// happen if the watch is closed and misses the delete event and we don't +// notice the deletion until the subsequent re-list. +type ResourceEventHandler interface { + OnAdd(obj Resource) + OnUpdate(obj Resource) + OnDelete(obj Resource) +} + +// ResourceEventHandlerFuncs is an adaptor to let you easily specify as many or +// as few of the notification functions as you want while still implementing +// ResourceEventHandler. +type ResourceEventHandlerFuncs struct { + AddFunc func(obj Resource) + UpdateFunc func(obj Resource) + DeleteFunc func(obj Resource) +} + +// OnAdd calls AddFunc if it's not nil. +func (r ResourceEventHandlerFuncs) OnAdd(obj Resource) { + if r.AddFunc != nil { + r.AddFunc(obj) + } +} + +// OnUpdate calls UpdateFunc if it's not nil. +func (r ResourceEventHandlerFuncs) OnUpdate(obj Resource) { + if r.UpdateFunc != nil { + r.UpdateFunc(obj) + } +} + +// OnDelete calls DeleteFunc if it's not nil. +func (r ResourceEventHandlerFuncs) OnDelete(obj Resource) { + if r.DeleteFunc != nil { + r.DeleteFunc(obj) + } +} + +// FilteringResourceEventHandler applies the provided filter to all events coming +// in, ensuring the appropriate nested handler method is invoked. An object +// that starts passing the filter after an update is considered an add, and an +// object that stops passing the filter after an update is considered a delete. +type FilteringResourceEventHandler struct { + FilterFunc func(obj Resource) bool + Handler ResourceEventHandler +} + +// OnAdd calls the nested handler only if the filter succeeds +func (r FilteringResourceEventHandler) OnAdd(obj Resource) { + if !r.FilterFunc(obj) { + return + } + r.Handler.OnAdd(obj) +} + +// OnUpdate ensures the proper handler is called depending on whether the filter matches +func (r FilteringResourceEventHandler) OnUpdate(obj Resource) { + if !r.FilterFunc(obj) { + return + } + r.Handler.OnUpdate(obj) +} + +// OnDelete calls the nested handler only if the filter succeeds +func (r FilteringResourceEventHandler) OnDelete(obj Resource) { + if !r.FilterFunc(obj) { + return + } + r.Handler.OnDelete(obj) +} diff --git a/vendor/github.com/elastic/beats/libbeat/common/kubernetes/metadata.go b/vendor/github.com/elastic/beats/libbeat/common/kubernetes/metadata.go index 9ae5b32c..5a2ce055 100644 --- a/vendor/github.com/elastic/beats/libbeat/common/kubernetes/metadata.go +++ b/vendor/github.com/elastic/beats/libbeat/common/kubernetes/metadata.go @@ -1,6 +1,9 @@ package kubernetes -import "github.com/elastic/beats/libbeat/common" +import ( + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/common/safemapstr" +) // MetaGenerator builds metadata objects for pods and containers type MetaGenerator interface { @@ -31,7 +34,7 @@ func (g *metaGenerator) PodMetadata(pod *Pod) common.MapStr { labelMap := common.MapStr{} if len(g.labels) == 0 { for k, v := range pod.Metadata.Labels { - labelMap[k] = v + safemapstr.Put(labelMap, k, v) } } else { labelMap = generateMapSubset(pod.Metadata.Labels, g.labels) @@ -85,7 +88,7 @@ func generateMapSubset(input map[string]string, keys []string) common.MapStr { for _, key := range keys { value, ok := input[key] if ok { - output[key] = value + safemapstr.Put(output, key, value) } } diff --git a/vendor/github.com/elastic/beats/libbeat/common/kubernetes/metadata_test.go b/vendor/github.com/elastic/beats/libbeat/common/kubernetes/metadata_test.go new file mode 100644 index 00000000..88001690 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/common/kubernetes/metadata_test.go @@ -0,0 +1,29 @@ +package kubernetes + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/libbeat/common" +) + +func TestPodMetadataDeDot(t *testing.T) { + tests := []struct { + pod *Pod + meta common.MapStr + }{ + { + pod: &Pod{ + Metadata: ObjectMeta{ + Labels: map[string]string{"a.key": "foo", "a": "bar"}, + }, + }, + meta: common.MapStr{"labels": common.MapStr{"a": common.MapStr{"value": "bar", "key": "foo"}}}, + }, + } + + for _, test := range tests { + assert.Equal(t, NewMetaGenerator(nil, nil, nil).PodMetadata(test.pod)["labels"], test.meta["labels"]) + } +} diff --git a/vendor/github.com/elastic/beats/libbeat/common/kubernetes/types.go b/vendor/github.com/elastic/beats/libbeat/common/kubernetes/types.go index 134c13d4..efd09728 100644 --- a/vendor/github.com/elastic/beats/libbeat/common/kubernetes/types.go +++ b/vendor/github.com/elastic/beats/libbeat/common/kubernetes/types.go @@ -3,12 +3,29 @@ package kubernetes import ( "encoding/json" "strings" + "time" - "github.com/elastic/beats/libbeat/logp" - - corev1 "github.com/ericchiang/k8s/api/v1" + "github.com/ericchiang/k8s" + "github.com/ericchiang/k8s/apis/core/v1" ) +func init() { + k8s.Register("", "v1", "events", true, &v1.Event{}) + k8s.RegisterList("", "v1", "events", true, &v1.EventList{}) +} + +// Resource is kind of kubernetes resource like pod, event, etc... +// It has a GetMetadata method for getting ObjectMeta which containing useful info like labels +type Resource interface { + GetMetadata() *ObjectMeta +} + +func resourceConverter(k8sObj k8s.Resource, r Resource) Resource { + bytes, _ := json.Marshal(k8sObj) + json.Unmarshal(bytes, r) + return r +} + type ObjectMeta struct { Annotations map[string]string `json:"annotations"` CreationTimestamp string `json:"creationTimestamp"` @@ -51,6 +68,7 @@ type ContainerPort struct { type PodSpec struct { Containers []Container `json:"containers"` + InitContainers []Container `json:"initContainers"` DNSPolicy string `json:"dnsPolicy"` NodeName string `json:"nodeName"` RestartPolicy string `json:"restartPolicy"` @@ -108,32 +126,54 @@ type Pod struct { Status PodStatus `json:"status"` } +// GetMetadata implements Resource +func (p *Pod) GetMetadata() *ObjectMeta { + return &p.Metadata +} + // GetContainerID parses the container ID to get the actual ID string func (s *PodContainerStatus) GetContainerID() string { + cID, _ := s.GetContainerIDWithRuntime() + return cID +} + +// GetContainerIDWithRuntime parses the container ID to get the actual ID string +func (s *PodContainerStatus) GetContainerIDWithRuntime() (string, string) { cID := s.ContainerID if cID != "" { - parts := strings.Split(cID, "//") + parts := strings.Split(cID, "://") if len(parts) == 2 { - return parts[1] + return parts[1], parts[0] } } - return "" + return "", "" } -// GetPod converts Pod to our own type -func GetPod(pod *corev1.Pod) *Pod { - bytes, err := json.Marshal(pod) - if err != nil { - logp.Warn("Unable to marshal %v", pod.String()) - return nil - } - - po := &Pod{} - err = json.Unmarshal(bytes, po) - if err != nil { - logp.Warn("Unable to marshal %v", pod.String()) - return nil - } +// Event is kubernetes event +type Event struct { + APIVersion string `json:"apiVersion"` + Count int64 `json:"count"` + FirstTimestamp *time.Time `json:"firstTimestamp"` + InvolvedObject struct { + APIVersion string `json:"apiVersion"` + Kind string `json:"kind"` + Name string `json:"name"` + ResourceVersion string `json:"resourceVersion"` + UID string `json:"uid"` + } `json:"involvedObject"` + Kind string `json:"kind"` + LastTimestamp *time.Time `json:"lastTimestamp"` + Message string `json:"message"` + Metadata ObjectMeta `json:"metadata"` + Reason string `json:"reason"` + Source struct { + Component string `json:"component"` + Host string `json:"host"` + } `json:"source"` + Type string `json:"type"` +} - return po +// GetMetadata implements Resource +func (e *Event) GetMetadata() *ObjectMeta { + return &e.Metadata } diff --git a/vendor/github.com/elastic/beats/libbeat/common/kubernetes/types_test.go b/vendor/github.com/elastic/beats/libbeat/common/kubernetes/types_test.go new file mode 100644 index 00000000..c947a7e4 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/common/kubernetes/types_test.go @@ -0,0 +1,68 @@ +package kubernetes + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPodContainerStatus_GetContainerID(t *testing.T) { + tests := []struct { + status *PodContainerStatus + result string + }{ + // Check to see if x://y is parsed to return y as the container id + { + status: &PodContainerStatus{ + Name: "foobar", + ContainerID: "docker://abc", + Image: "foobar:latest", + }, + result: "abc", + }, + // Check to see if x://y is not the format then "" is returned + { + status: &PodContainerStatus{ + Name: "foobar", + ContainerID: "abc", + Image: "foobar:latest", + }, + result: "", + }, + } + + for _, test := range tests { + assert.Equal(t, test.status.GetContainerID(), test.result) + } +} + +func TestPodContainerStatus_GetContainerIDWithRuntime(t *testing.T) { + tests := []struct { + status *PodContainerStatus + result string + }{ + // Check to see if x://y is parsed to return x as the runtime + { + status: &PodContainerStatus{ + Name: "foobar", + ContainerID: "docker://abc", + Image: "foobar:latest", + }, + result: "docker", + }, + // Check to see if x://y is not the format then "" is returned + { + status: &PodContainerStatus{ + Name: "foobar", + ContainerID: "abc", + Image: "foobar:latest", + }, + result: "", + }, + } + + for _, test := range tests { + _, runtime := test.status.GetContainerIDWithRuntime() + assert.Equal(t, runtime, test.result) + } +} diff --git a/vendor/github.com/elastic/beats/libbeat/common/kubernetes/util.go b/vendor/github.com/elastic/beats/libbeat/common/kubernetes/util.go index 88c1c370..a86c8b45 100644 --- a/vendor/github.com/elastic/beats/libbeat/common/kubernetes/util.go +++ b/vendor/github.com/elastic/beats/libbeat/common/kubernetes/util.go @@ -5,13 +5,17 @@ import ( "fmt" "io/ioutil" "os" + "strings" "github.com/ericchiang/k8s" + "github.com/ericchiang/k8s/apis/core/v1" "github.com/ghodss/yaml" "github.com/elastic/beats/libbeat/logp" ) +const defaultNode = "localhost" + // GetKubernetesClient returns a kubernetes client. If inCluster is true, it returns an // in cluster configuration based on the secrets mounted in the Pod. If kubeConfig is passed, // it parses the config file to get the config required to build a client. @@ -41,28 +45,82 @@ func GetKubernetesClient(inCluster bool, kubeConfig string) (client *k8s.Client, return client, nil } -// DiscoverKubernetesNode figures out the Kubernetes host to use. If host is provided in the config -// use it directly. Else use hostname of the pod which is the Pod ID to query the Pod and get the Node -// name from the specification. Else, return localhost as a default. -func DiscoverKubernetesNode(host string, client *k8s.Client) string { - ctx := context.Background() - if host == "" { - podName := os.Getenv("HOSTNAME") - logp.Info("Using pod name %s and namespace %s", podName, client.Namespace) - if podName == "localhost" { - host = "localhost" - } else { - pod, err := client.CoreV1().GetPod(ctx, podName, client.Namespace) - if err != nil { - logp.Err("Querying for pod failed with error: ", err.Error()) - logp.Info("Unable to find pod, setting host to localhost") - host = "localhost" - } else { - host = pod.Spec.GetNodeName() - } +// DiscoverKubernetesNode figures out the Kubernetes node to use. +// If host is provided in the config use it directly. +// If beat is deployed in k8s cluster, use hostname of pod which is pod name to query pod meta for node name. +// If beat is deployed outside k8s cluster, use machine-id to match against k8s nodes for node name. +func DiscoverKubernetesNode(host string, inCluster bool, client *k8s.Client) (node string) { + if host != "" { + logp.Info("kubernetes: Using node %s provided in the config", host) + return host + } + + if inCluster { + ns, err := inClusterNamespace() + if err != nil { + logp.Err("kubernetes: Couldn't get namespace when beat is in cluster with error: ", err.Error()) + return defaultNode + } + podName, err := os.Hostname() + if err != nil { + logp.Err("kubernetes: Couldn't get hostname as beat pod name in cluster with error: ", err.Error()) + return defaultNode + } + logp.Info("kubernetes: Using pod name %s and namespace %s to discover kubernetes node", podName, ns) + pod := v1.Pod{} + err = client.Get(context.TODO(), ns, podName, &pod) + if err != nil { + logp.Err("kubernetes: Querying for pod failed with error: ", err.Error()) + return defaultNode + } + logp.Info("kubernetes: Using node %s discovered by in cluster pod node query", pod.Spec.GetNodeName()) + return pod.Spec.GetNodeName() + } + + mid := machineID() + if mid == "" { + logp.Err("kubernetes: Couldn't collect info from any of the files in /etc/machine-id /var/lib/dbus/machine-id") + return defaultNode + } + + nodes := v1.NodeList{} + err := client.List(context.TODO(), k8s.AllNamespaces, &nodes) + if err != nil { + logp.Err("kubernetes: Querying for nodes failed with error: ", err.Error()) + return defaultNode + } + for _, n := range nodes.Items { + if n.GetStatus().GetNodeInfo().GetMachineID() == mid { + logp.Info("kubernetes: Using node %s discovered by machine-id matching", n.GetMetadata().GetName()) + return n.GetMetadata().GetName() + } + } + + logp.Warn("kubernetes: Couldn't discover node, using localhost as default") + return defaultNode +} +// machineID borrowed from cadvisor. +func machineID() string { + for _, file := range []string{ + "/etc/machine-id", + "/var/lib/dbus/machine-id", + } { + id, err := ioutil.ReadFile(file) + if err == nil { + return strings.TrimSpace(string(id)) } } + return "" +} - return host +// inClusterNamespace gets namespace from serviceaccount when beat is in cluster. +// code borrowed from client-go with some changes. +func inClusterNamespace() (string, error) { + // get namespace associated with the service account token, if available + data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") + if err != nil { + return "", err + } + return strings.TrimSpace(string(data)), nil } diff --git a/vendor/github.com/elastic/beats/libbeat/common/kubernetes/watcher.go b/vendor/github.com/elastic/beats/libbeat/common/kubernetes/watcher.go index c464b738..29b64c25 100644 --- a/vendor/github.com/elastic/beats/libbeat/common/kubernetes/watcher.go +++ b/vendor/github.com/elastic/beats/libbeat/common/kubernetes/watcher.go @@ -2,145 +2,185 @@ package kubernetes import ( "context" - "errors" + "fmt" "io" - "sync" "time" - "github.com/elastic/beats/libbeat/common/bus" - "github.com/elastic/beats/libbeat/logp" - "github.com/ericchiang/k8s" - corev1 "github.com/ericchiang/k8s/api/v1" + "github.com/ericchiang/k8s/apis/core/v1" + + "github.com/elastic/beats/libbeat/logp" ) // Max back off time for retries const maxBackoff = 30 * time.Second -// Watcher reads Kubernetes events and keeps a list of known pods +func filterByNode(node string) k8s.Option { + return k8s.QueryParam("fieldSelector", "spec.nodeName="+node) +} + +// Watcher watches Kubernetes resources events type Watcher interface { - // Start watching Kubernetes API for new containers + // Start watching Kubernetes API for new events after resources were listed Start() error - // Stop watching Kubernetes API for new containers + // Stop watching Kubernetes API for new events Stop() - // ListenStart returns a bus listener to receive pod started events, with a `pod` key holding it - ListenStart() bus.Listener - - // ListenUpdate returns a bus listener to receive pod updated events, with a `pod` key holding it - ListenUpdate() bus.Listener + // AddEventHandler add event handlers for corresponding event type watched + AddEventHandler(ResourceEventHandler) +} - // ListenStop returns a bus listener to receive pod stopped events, with a `pod` key holding it - ListenStop() bus.Listener +// WatchOptions controls watch behaviors +type WatchOptions struct { + // SyncTimeout is a timeout for listing historical resources + SyncTimeout time.Duration + // Node is used for filtering watched resource to given node, use "" for all nodes + Node string + // Namespace is used for filtering watched resource to given namespace, use "" for all namespaces + Namespace string } -type podWatcher struct { - sync.RWMutex - client Client - syncPeriod time.Duration - cleanupTimeout time.Duration - nodeFilter k8s.Option +type watcher struct { + client *k8s.Client + options WatchOptions lastResourceVersion string ctx context.Context stop context.CancelFunc - bus bus.Bus - pods map[string]*Pod // pod id -> Pod - deleted map[string]time.Time // deleted annotations key -> last access time -} - -// Client for Kubernetes interface -type Client interface { - ListPods(ctx context.Context, namespace string, options ...k8s.Option) (*corev1.PodList, error) - WatchPods(ctx context.Context, namespace string, options ...k8s.Option) (*k8s.CoreV1PodWatcher, error) + resourceList k8s.ResourceList + k8sResourceFactory func() k8s.Resource + resourceFactory func() Resource + items func() []k8s.Resource + handler ResourceEventHandler } -// NewWatcher initializes the watcher client to provide a local state of -// pods from the cluster (filtered to the given host) -func NewWatcher(client Client, syncPeriod, cleanupTimeout time.Duration, host string) Watcher { +// NewWatcher initializes the watcher client to provide a events handler for +// resource from the cluster (filtered to the given node) +func NewWatcher(client *k8s.Client, resource Resource, options WatchOptions) (Watcher, error) { ctx, cancel := context.WithCancel(context.Background()) - return &podWatcher{ + w := &watcher{ client: client, - cleanupTimeout: cleanupTimeout, - syncPeriod: syncPeriod, - nodeFilter: k8s.QueryParam("fieldSelector", "spec.nodeName="+host), + options: options, lastResourceVersion: "0", ctx: ctx, stop: cancel, - pods: make(map[string]*Pod), - deleted: make(map[string]time.Time), - bus: bus.New("kubernetes"), } + switch resource.(type) { + // add resource type which you want to support watching here + // note that you might need add Register like event in types.go init func + // if types were not registered by k8s library + // k8s.Register("", "v1", "events", true, &v1.Event{}) + // k8s.RegisterList("", "v1", "events", true, &v1.EventList{}) + case *Pod: + list := &v1.PodList{} + w.resourceList = list + w.k8sResourceFactory = func() k8s.Resource { return &v1.Pod{} } + w.resourceFactory = func() Resource { return &Pod{} } + w.items = func() []k8s.Resource { + rs := make([]k8s.Resource, 0, len(list.Items)) + for _, item := range list.Items { + rs = append(rs, item) + } + return rs + } + case *Event: + list := &v1.EventList{} + w.resourceList = list + w.k8sResourceFactory = func() k8s.Resource { return &v1.Event{} } + w.resourceFactory = func() Resource { return &Event{} } + w.items = func() []k8s.Resource { + rs := make([]k8s.Resource, 0, len(list.Items)) + for _, item := range list.Items { + rs = append(rs, item) + } + return rs + } + default: + return nil, fmt.Errorf("unsupported resource type for watching %T", resource) + } + return w, nil +} + +func (w *watcher) AddEventHandler(h ResourceEventHandler) { + w.handler = h +} + +func (w *watcher) buildOpts() []k8s.Option { + options := []k8s.Option{k8s.ResourceVersion(w.lastResourceVersion)} + if w.options.Node != "" { + options = append(options, filterByNode(w.options.Node)) + } + return options } -func (p *podWatcher) syncPods() error { - logp.Info("kubernetes: %s", "Performing a pod sync") - pods, err := p.client.ListPods( - p.ctx, - "", - p.nodeFilter, - k8s.ResourceVersion(p.lastResourceVersion)) +func (w *watcher) sync() error { + ctx, cancel := context.WithTimeout(w.ctx, w.options.SyncTimeout) + defer cancel() + logp.Info("kubernetes: Performing a resource sync for %T", w.resourceList) + err := w.client.List(ctx, w.options.Namespace, w.resourceList, w.buildOpts()...) if err != nil { + logp.Err("kubernetes: Performing a resource sync err %s for %T", err.Error(), w.resourceList) return err } - p.Lock() - for _, apiPod := range pods.Items { - pod := GetPod(apiPod) - p.pods[pod.Metadata.UID] = pod + for _, item := range w.items() { + w.onAdd(item) } - p.Unlock() - - // Emit all start events (avoid blocking if the bus get's blocked) - go func() { - for _, pod := range p.pods { - p.bus.Publish(bus.Event{ - "start": true, - "pod": pod, - }) - } - }() // Store last version - p.lastResourceVersion = pods.Metadata.GetResourceVersion() + w.lastResourceVersion = w.resourceList.GetMetadata().GetResourceVersion() - logp.Info("kubernetes: %s", "Pod sync done") + logp.Info("kubernetes: %s", "Resource sync done") return nil } +func (w *watcher) onAdd(obj k8s.Resource) { + w.handler.OnAdd(resourceConverter(obj, w.resourceFactory())) +} + +func (w *watcher) onUpdate(obj k8s.Resource) { + w.handler.OnUpdate(resourceConverter(obj, w.resourceFactory())) +} + +func (w *watcher) onDelete(obj k8s.Resource) { + w.handler.OnDelete(resourceConverter(obj, w.resourceFactory())) +} + // Start watching pods -func (p *podWatcher) Start() error { +func (w *watcher) Start() error { // Make sure that events don't flow into the annotator before informer is fully set up // Sync initial state: - synced := make(chan struct{}) - go func() { - p.syncPods() - close(synced) - }() - - select { - case <-time.After(p.syncPeriod): - p.Stop() - return errors.New("Timeout while doing initial Kubernetes pods sync") - case <-synced: - // Watch for new changes - go p.watch() - go p.cleanupWorker() - return nil + err := w.sync() + if err != nil { + w.Stop() + return err } + + // Watch for new changes + go w.watch() + + return nil } -func (p *podWatcher) watch() { +func (w *watcher) watch() { // Failures counter, do exponential backoff on retries var failures uint for { - logp.Info("kubernetes: %s", "Watching API for pod events") - watcher, err := p.client.WatchPods(p.ctx, "", p.nodeFilter, k8s.ResourceVersion(p.lastResourceVersion)) + select { + case <-w.ctx.Done(): + logp.Info("kubernetes: %s", "Watching API for resource events stopped") + return + default: + } + + logp.Info("kubernetes: %s", "Watching API for resource events") + + watcher, err := w.client.Watch(w.ctx, w.options.Namespace, w.k8sResourceFactory(), w.buildOpts()...) if err != nil { - //watch pod failures should be logged and gracefully failed over as metadata retrieval + //watch failures should be logged and gracefully failed over as metadata retrieval //should never stop. logp.Err("kubernetes: Watching API error %v", err) backoff(failures) @@ -149,62 +189,36 @@ func (p *podWatcher) watch() { } for { - _, apiPod, err := watcher.Next() + r := w.k8sResourceFactory() + eventType, err := watcher.Next(r) if err != nil { logp.Err("kubernetes: Watching API error %v", err) watcher.Close() - if !(err == io.EOF || err == io.ErrUnexpectedEOF) { // This is an error event which can be recovered by moving to the latest resource verison logp.Info("kubernetes: Ignoring event, moving to most recent resource version") - p.lastResourceVersion = "" + w.lastResourceVersion = "" } break } - - // Update last resource version and reset failure counter - p.lastResourceVersion = apiPod.Metadata.GetResourceVersion() failures = 0 - - pod := GetPod(apiPod) - if pod.Metadata.DeletionTimestamp != "" { - // Pod deleted - p.Lock() - p.deleted[pod.Metadata.UID] = time.Now() - p.Unlock() - - } else { - if p.Pod(pod.Metadata.UID) != nil { - // Pod updated - p.Lock() - p.pods[pod.Metadata.UID] = pod - // un-delete if it's flagged (in case of update or recreation) - delete(p.deleted, pod.Metadata.UID) - p.Unlock() - - p.bus.Publish(bus.Event{ - "update": true, - "pod": pod, - }) - - } else { - // Pod added - p.Lock() - p.pods[pod.Metadata.UID] = pod - // un-delete if it's flagged (in case of update or recreation) - delete(p.deleted, pod.Metadata.UID) - p.Unlock() - - p.bus.Publish(bus.Event{ - "start": true, - "pod": pod, - }) - } + switch eventType { + case k8s.EventAdded: + w.onAdd(r) + case k8s.EventModified: + w.onUpdate(r) + case k8s.EventDeleted: + w.onDelete(r) + default: + logp.Err("kubernetes: Watching API error with event type %s", eventType) } } } } +func (w *watcher) Stop() { + w.stop() +} func backoff(failures uint) { wait := 1 << failures * time.Second if wait > maxBackoff { @@ -212,79 +226,3 @@ func backoff(failures uint) { } time.Sleep(wait) } - -// Check annotations flagged as deleted for their last access time, fully delete -// the ones older than p.cleanupTimeout -func (p *podWatcher) cleanupWorker() { - for { - // Wait a full period - time.Sleep(p.cleanupTimeout) - - select { - case <-p.ctx.Done(): - return - default: - // Check entries for timeout - var toDelete []string - timeout := time.Now().Add(-p.cleanupTimeout) - p.RLock() - for key, lastSeen := range p.deleted { - if lastSeen.Before(timeout) { - logp.Debug("kubernetes", "Removing container %s after cool down timeout", key) - toDelete = append(toDelete, key) - } - } - p.RUnlock() - - // Delete timed out entries: - for _, key := range toDelete { - p.bus.Publish(bus.Event{ - "stop": true, - "pod": p.Pod(key), - }) - } - - p.Lock() - for _, key := range toDelete { - delete(p.deleted, key) - delete(p.pods, key) - } - p.Unlock() - } - } -} - -func (p *podWatcher) Pod(uid string) *Pod { - p.RLock() - pod := p.pods[uid] - _, deleted := p.deleted[uid] - p.RUnlock() - - // Update deleted last access - if deleted { - p.Lock() - p.deleted[uid] = time.Now() - p.Unlock() - } - - return pod -} - -// ListenStart returns a bus listener to receive pod started events, with a `pod` key holding it -func (p *podWatcher) ListenStart() bus.Listener { - return p.bus.Subscribe("start") -} - -// ListenStop returns a bus listener to receive pod stopped events, with a `pod` key holding it -func (p *podWatcher) ListenStop() bus.Listener { - return p.bus.Subscribe("stop") -} - -// ListenUpdate returns a bus listener to receive updated pod events, with a `pod` key holding it -func (p *podWatcher) ListenUpdate() bus.Listener { - return p.bus.Subscribe("update") -} - -func (p *podWatcher) Stop() { - p.stop() -} diff --git a/vendor/github.com/elastic/beats/libbeat/common/mapstr.go b/vendor/github.com/elastic/beats/libbeat/common/mapstr.go index af49e9cb..508bd46c 100644 --- a/vendor/github.com/elastic/beats/libbeat/common/mapstr.go +++ b/vendor/github.com/elastic/beats/libbeat/common/mapstr.go @@ -80,20 +80,28 @@ func deepUpdateValue(old interface{}, val MapStr) interface{} { // Delete deletes the given key from the map. func (m MapStr) Delete(key string) error { - _, err := walkMap(key, m, opDelete) - return err + k, d, _, found, err := mapFind(key, m, false) + if err != nil { + return err + } + if !found { + return ErrKeyNotFound + } + + delete(d, k) + return nil } // CopyFieldsTo copies the field specified by key to the given map. It will // overwrite the key if it exists. An error is returned if the key does not // exist in the source map. func (m MapStr) CopyFieldsTo(to MapStr, key string) error { - v, err := walkMap(key, m, opGet) + v, err := m.GetValue(key) if err != nil { return err } - _, err = walkMap(key, to, mapStrOperation{putOperation{v}, true}) + _, err = to.Put(key, v) return err } @@ -115,18 +123,21 @@ func (m MapStr) Clone() MapStr { // HasKey returns true if the key exist. If an error occurs then false is // returned with a non-nil error. func (m MapStr) HasKey(key string) (bool, error) { - hasKey, err := walkMap(key, m, opHasKey) - if err != nil { - return false, err - } - - return hasKey.(bool), nil + _, _, _, hasKey, err := mapFind(key, m, false) + return hasKey, err } // GetValue gets a value from the map. If the key does not exist then an error // is returned. func (m MapStr) GetValue(key string) (interface{}, error) { - return walkMap(key, m, opGet) + _, _, v, found, err := mapFind(key, m, false) + if err != nil { + return nil, err + } + if !found { + return nil, ErrKeyNotFound + } + return v, nil } // Put associates the specified value with the specified key. If the map @@ -137,7 +148,14 @@ func (m MapStr) GetValue(key string) (interface{}, error) { // If you need insert keys containing dots then you must use bracket notation // to insert values (e.g. m[key] = value). func (m MapStr) Put(key string, value interface{}) (interface{}, error) { - return walkMap(key, m, mapStrOperation{putOperation{value}, true}) + // XXX `safemapstr.Put` mimics this implementation, both should be updated to have similar behavior + k, d, old, _, err := mapFind(key, m, true) + if err != nil { + return nil, err + } + + d[k] = value + return old, nil } // StringToPrint returns the MapStr as pretty JSON. @@ -315,97 +333,50 @@ func tryToMapStr(v interface{}) (MapStr, bool) { } } -// walkMap walks the data MapStr to arrive at the value specified by the key. -// The key is expressed in dot-notation (eg. x.y.z). When the key is found then -// the given mapStrOperation is invoked. -func walkMap(key string, data MapStr, op mapStrOperation) (interface{}, error) { - var err error - keyParts := strings.Split(key, ".") - - // Walk maps until reaching a leaf object. - m := data - for i, k := range keyParts[0 : len(keyParts)-1] { - v, exists := m[k] +// mapFind iterates a MapStr based on a the given dotted key, finding the final +// subMap and subKey to operate on. +// An error is returned if some intermediate is no map or the key doesn't exist. +// If createMissing is set to true, intermediate maps are created. +// The final map and un-dotted key to run further operations on are returned in +// subKey and subMap. The subMap already contains a value for subKey, the +// present flag is set to true and the oldValue return will hold +// the original value. +func mapFind( + key string, + data MapStr, + createMissing bool, +) (subKey string, subMap MapStr, oldValue interface{}, present bool, err error) { + // XXX `safemapstr.mapFind` mimics this implementation, both should be updated to have similar behavior + + for { + // Fast path, key is present as is. + if v, exists := data[key]; exists { + return key, data, v, true, nil + } + + idx := strings.IndexRune(key, '.') + if idx < 0 { + return key, data, nil, false, nil + } + + k := key[:idx] + d, exists := data[k] if !exists { - if op.CreateMissingKeys { - newMap := MapStr{} - m[k] = newMap - m = newMap - continue + if createMissing { + d = MapStr{} + data[k] = d + } else { + return "", nil, nil, false, ErrKeyNotFound } - return nil, errors.Wrapf(ErrKeyNotFound, "key=%v", strings.Join(keyParts[0:i+1], ".")) } - m, err = toMapStr(v) + v, err := toMapStr(d) if err != nil { - return nil, errors.Wrapf(err, "key=%v", strings.Join(keyParts[0:i+1], ".")) + return "", nil, nil, false, err } - } - // Execute the mapStrOperator on the leaf object. - v, err := op.Do(keyParts[len(keyParts)-1], m) - if err != nil { - return nil, errors.Wrapf(err, "key=%v", key) + // advance to sub-map + key = key[idx+1:] + data = v } - - return v, nil -} - -// mapStrOperation types - -// These are static mapStrOperation types that store no state and are reusable. -var ( - opDelete = mapStrOperation{deleteOperation{}, false} - opGet = mapStrOperation{getOperation{}, false} - opHasKey = mapStrOperation{hasKeyOperation{}, false} -) - -// mapStrOperation represents an operation that can be applied to map. -type mapStrOperation struct { - mapStrOperator - CreateMissingKeys bool -} - -// mapStrOperator is an interface with a single function that performs an -// operation on a MapStr. -type mapStrOperator interface { - Do(key string, data MapStr) (value interface{}, err error) -} - -type deleteOperation struct{} - -func (op deleteOperation) Do(key string, data MapStr) (interface{}, error) { - value, found := data[key] - if !found { - return nil, ErrKeyNotFound - } - delete(data, key) - return value, nil -} - -type getOperation struct{} - -func (op getOperation) Do(key string, data MapStr) (interface{}, error) { - value, found := data[key] - if !found { - return nil, ErrKeyNotFound - } - return value, nil -} - -type hasKeyOperation struct{} - -func (op hasKeyOperation) Do(key string, data MapStr) (interface{}, error) { - _, found := data[key] - return found, nil -} - -type putOperation struct { - Value interface{} -} - -func (op putOperation) Do(key string, data MapStr) (interface{}, error) { - existingValue, _ := data[key] - data[key] = op.Value - return existingValue, nil } diff --git a/vendor/github.com/elastic/beats/libbeat/common/mapstr_test.go b/vendor/github.com/elastic/beats/libbeat/common/mapstr_test.go index ec45b1a5..ab321d13 100644 --- a/vendor/github.com/elastic/beats/libbeat/common/mapstr_test.go +++ b/vendor/github.com/elastic/beats/libbeat/common/mapstr_test.go @@ -60,6 +60,16 @@ func TestMapStrDeepUpdate(t *testing.T) { MapStr{"a": 1}, MapStr{"a": 1}, }, + { + MapStr{"a.b": 1}, + MapStr{"a": 1}, + MapStr{"a": 1, "a.b": 1}, + }, + { + MapStr{"a": 1}, + MapStr{"a.b": 1}, + MapStr{"a": 1, "a.b": 1}, + }, } for i, test := range tests { @@ -173,7 +183,9 @@ func TestHasKey(t *testing.T) { "c31": 1, "c32": 2, }, + "c4.f": 19, }, + "d.f": 1, } hasKey, err := m.HasKey("c.c2") @@ -191,6 +203,14 @@ func TestHasKey(t *testing.T) { hasKey, err = m.HasKey("dd") assert.Equal(nil, err) assert.Equal(false, hasKey) + + hasKey, err = m.HasKey("d.f") + assert.Equal(nil, err) + assert.Equal(true, hasKey) + + hasKey, err = m.HasKey("c.c4.f") + assert.Equal(nil, err) + assert.Equal(true, hasKey) } func TestMapStrPut(t *testing.T) { @@ -226,6 +246,76 @@ func TestMapStrPut(t *testing.T) { assert.Equal(t, MapStr{"subMap": MapStr{"newMap": MapStr{"a": 1}}}, m) } +func TestMapStrGetValue(t *testing.T) { + + tests := []struct { + input MapStr + key string + output interface{} + error bool + }{ + { + MapStr{"a": 1}, + "a", + 1, + false, + }, + { + MapStr{"a": MapStr{"b": 1}}, + "a", + MapStr{"b": 1}, + false, + }, + { + MapStr{"a": MapStr{"b": 1}}, + "a.b", + 1, + false, + }, + { + MapStr{"a": MapStr{"b.c": 1}}, + "a", + MapStr{"b.c": 1}, + false, + }, + { + MapStr{"a": MapStr{"b.c": 1}}, + "a.b", + nil, + true, + }, + { + MapStr{"a.b": MapStr{"c": 1}}, + "a.b", + MapStr{"c": 1}, + false, + }, + { + MapStr{"a.b": MapStr{"c": 1}}, + "a.b.c", + nil, + true, + }, + { + MapStr{"a": MapStr{"b.c": 1}}, + "a.b.c", + 1, + false, + }, + } + + for _, test := range tests { + v, err := test.input.GetValue(test.key) + if test.error { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + assert.Equal(t, test.output, v) + + } +} + func TestClone(t *testing.T) { assert := assert.New(t) @@ -569,3 +659,78 @@ func BenchmarkMapStrLogging(b *testing.B) { logger.Infow("test", "mapstr", m) } } + +func BenchmarkWalkMap(b *testing.B) { + + globalM := MapStr{ + "hello": MapStr{ + "world": MapStr{ + "ok": "test", + }, + }, + } + + b.Run("Get", func(b *testing.B) { + b.ResetTimer() + + for i := 0; i < b.N; i++ { + globalM.GetValue("test.world.ok") + } + }) + + b.Run("Put", func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + m := MapStr{ + "hello": MapStr{ + "world": MapStr{ + "ok": "test", + }, + }, + } + + m.Put("hello.world.new", 17) + } + }) + + b.Run("PutMissing", func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + m := MapStr{} + + m.Put("a.b.c", 17) + } + }) + + b.Run("HasKey", func(b *testing.B) { + b.ResetTimer() + + for i := 0; i < b.N; i++ { + globalM.HasKey("hello.world.ok") + globalM.HasKey("hello.world.no_ok") + } + }) + + b.Run("HasKeyFirst", func(b *testing.B) { + b.ResetTimer() + + for i := 0; i < b.N; i++ { + globalM.HasKey("hello") + } + }) + + b.Run("Delete", func(b *testing.B) { + b.ResetTimer() + + for i := 0; i < b.N; i++ { + m := MapStr{ + "hello": MapStr{ + "world": MapStr{ + "ok": "test", + }, + }, + } + m.Put("hello.world.test", 17) + } + }) +} diff --git a/vendor/github.com/elastic/beats/libbeat/common/safemapstr/safemapstr.go b/vendor/github.com/elastic/beats/libbeat/common/safemapstr/safemapstr.go new file mode 100644 index 00000000..80b4268b --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/common/safemapstr/safemapstr.go @@ -0,0 +1,96 @@ +package safemapstr + +import ( + "strings" + + "github.com/elastic/beats/libbeat/common" +) + +const alternativeKey = "value" + +// Put This method implements a way to put dotted keys into a MapStr while +// ensuring they don't override each other. For example: +// +// a := MapStr{} +// safemapstr.Put(a, "com.docker.swarm.task", "x") +// safemapstr.Put(a, "com.docker.swarm.task.id", 1) +// safemapstr.Put(a, "com.docker.swarm.task.name", "foobar") +// +// Will result in `{"com":{"docker":{"swarm":{"task":{"id":1,"name":"foobar","value":"x"}}}}}` +// +// Put detects this scenario and renames the common base key, by appending +// `.value` +func Put(data common.MapStr, key string, value interface{}) error { + // XXX This implementation mimics `common.MapStr.Put`, both should be updated to have similar behavior + + d, k := mapFind(data, key, alternativeKey) + d[k] = value + return nil +} + +// mapFind walk the map based on the given dotted key and returns the final map +// and key to operate on. This function adds intermediate maps, if the key is +// missing from the original map. + +// mapFind iterates a MapStr based on the given dotted key, finding the final +// subMap and subKey to operate on. +// If a key is already used, but the used value is no map, an intermediate map will be inserted and +// the old value will be stored using the 'alternativeKey' in a new map. +// If the old value found under key is already an dictionary, subMap will be +// the old value and subKey will be set to alternativeKey. +func mapFind(data common.MapStr, key, alternativeKey string) (subMap common.MapStr, subKey string) { + // XXX This implementation mimics `common.mapFind`, both should be updated to have similar behavior + + for { + if oldValue, exists := data[key]; exists { + if oldMap, ok := tryToMapStr(oldValue); ok { + return oldMap, alternativeKey + } + return data, key + } + + idx := strings.IndexRune(key, '.') + if idx < 0 { + // if old value exists and is a dictionary, return the old dictionary and + // make sure we store the new value using the 'alternativeKey' + if oldValue, exists := data[key]; exists { + if oldMap, ok := tryToMapStr(oldValue); ok { + return oldMap, alternativeKey + } + } + + return data, key + } + + // Check if first sub-key exists. Create an intermediate map if not. + k := key[:idx] + d, exists := data[k] + if !exists { + d = common.MapStr{} + data[k] = d + } + + // store old value under 'alternativeKey' if the old value is no map. + // Do not overwrite old value. + v, ok := tryToMapStr(d) + if !ok { + v = common.MapStr{alternativeKey: d} + data[k] = v + } + + // advance into sub-map + key = key[idx+1:] + data = v + } +} + +func tryToMapStr(v interface{}) (common.MapStr, bool) { + switch m := v.(type) { + case common.MapStr: + return m, true + case map[string]interface{}: + return common.MapStr(m), true + default: + return nil, false + } +} diff --git a/vendor/github.com/elastic/beats/libbeat/common/safemapstr/safemapstr_test.go b/vendor/github.com/elastic/beats/libbeat/common/safemapstr/safemapstr_test.go new file mode 100644 index 00000000..e7279cd9 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/common/safemapstr/safemapstr_test.go @@ -0,0 +1,65 @@ +package safemapstr + +import ( + "testing" + + "github.com/elastic/beats/libbeat/common" + + "github.com/stretchr/testify/assert" +) + +func TestPut(t *testing.T) { + m := common.MapStr{ + "subMap": common.MapStr{ + "a": 1, + }, + } + + // Add new value to the top-level. + err := Put(m, "a", "ok") + assert.NoError(t, err) + assert.Equal(t, common.MapStr{"a": "ok", "subMap": common.MapStr{"a": 1}}, m) + + // Add new value to subMap. + err = Put(m, "subMap.b", 2) + assert.NoError(t, err) + assert.Equal(t, common.MapStr{"a": "ok", "subMap": common.MapStr{"a": 1, "b": 2}}, m) + + // Overwrite a value in subMap. + err = Put(m, "subMap.a", 2) + assert.NoError(t, err) + assert.Equal(t, common.MapStr{"a": "ok", "subMap": common.MapStr{"a": 2, "b": 2}}, m) + + // Add value to map that does not exist. + m = common.MapStr{} + err = Put(m, "subMap.newMap.a", 1) + assert.NoError(t, err) + assert.Equal(t, common.MapStr{"subMap": common.MapStr{"newMap": common.MapStr{"a": 1}}}, m) +} + +func TestPutRenames(t *testing.T) { + assert := assert.New(t) + + a := common.MapStr{} + Put(a, "com.docker.swarm.task", "x") + Put(a, "com.docker.swarm.task.id", 1) + Put(a, "com.docker.swarm.task.name", "foobar") + assert.Equal(common.MapStr{"com": common.MapStr{"docker": common.MapStr{"swarm": common.MapStr{ + "task": common.MapStr{ + "id": 1, + "name": "foobar", + "value": "x", + }}}}}, a) + + // order is not important: + b := common.MapStr{} + Put(b, "com.docker.swarm.task.id", 1) + Put(b, "com.docker.swarm.task.name", "foobar") + Put(b, "com.docker.swarm.task", "x") + assert.Equal(common.MapStr{"com": common.MapStr{"docker": common.MapStr{"swarm": common.MapStr{ + "task": common.MapStr{ + "id": 1, + "name": "foobar", + "value": "x", + }}}}}, b) +} diff --git a/vendor/github.com/elastic/beats/libbeat/common/schema/mapstriface/mapstriface.go b/vendor/github.com/elastic/beats/libbeat/common/schema/mapstriface/mapstriface.go index d3c51eea..a903f5c0 100644 --- a/vendor/github.com/elastic/beats/libbeat/common/schema/mapstriface/mapstriface.go +++ b/vendor/github.com/elastic/beats/libbeat/common/schema/mapstriface/mapstriface.go @@ -193,9 +193,43 @@ func toInteger(key string, data map[string]interface{}) (interface{}, error) { if err == nil { return int64(f64), nil } - return 0, fmt.Errorf("Expected integer, found json.Number (%v) that cannot be converted", num) + return 0, fmt.Errorf("expected integer, found json.Number (%v) that cannot be converted", num) default: - return 0, fmt.Errorf("Expected integer, found %T", emptyIface) + return 0, fmt.Errorf("expected integer, found %T", emptyIface) + } +} + +// Float creates a Conv object for converting floats. Acceptable input +// types are int64, int, and float64. +func Float(key string, opts ...schema.SchemaOption) schema.Conv { + return schema.SetOptions(schema.Conv{Key: key, Func: toFloat}, opts) +} + +func toFloat(key string, data map[string]interface{}) (interface{}, error) { + emptyIface, exists := data[key] + if !exists { + return 0, fmt.Errorf("key %s not found", key) + } + switch emptyIface.(type) { + case float64: + return emptyIface.(float64), nil + case int: + return float64(emptyIface.(int)), nil + case int64: + return float64(emptyIface.(int64)), nil + case json.Number: + num := emptyIface.(json.Number) + i64, err := num.Float64() + if err == nil { + return i64, nil + } + f64, err := num.Float64() + if err == nil { + return f64, nil + } + return 0, fmt.Errorf("expected float, found json.Number (%v) that cannot be converted", num) + default: + return 0, fmt.Errorf("expected float, found %T", emptyIface) } } diff --git a/vendor/github.com/elastic/beats/libbeat/common/schema/mapstriface/mapstriface_test.go b/vendor/github.com/elastic/beats/libbeat/common/schema/mapstriface/mapstriface_test.go index 3671e1bf..d2bc20e4 100644 --- a/vendor/github.com/elastic/beats/libbeat/common/schema/mapstriface/mapstriface_test.go +++ b/vendor/github.com/elastic/beats/libbeat/common/schema/mapstriface/mapstriface_test.go @@ -17,13 +17,16 @@ func TestConversions(t *testing.T) { cTs := common.Time{} input := map[string]interface{}{ - "testString": "hello", - "testInt": 42, - "testIntFromFloat": 42.0, - "testIntFromInt32": int32(32), - "testIntFromInt64": int64(42), - "testJsonNumber": json.Number("3910564293633576924"), - "testBool": true, + "testString": "hello", + "testInt": 42, + "testIntFromFloat": 42.2, + "testFloat": 42.7, + "testFloatFromInt": 43, + "testIntFromInt32": int32(32), + "testIntFromInt64": int64(42), + "testJsonNumber": json.Number("3910564293633576924"), + "testJsonNumberFloat": json.Number("43.7"), + "testBool": true, "testObj": map[string]interface{}{ "testObjString": "hello, object", }, @@ -49,7 +52,10 @@ func TestConversions(t *testing.T) { "test_int": Int("testInt"), "test_int_from_float": Int("testIntFromFloat"), "test_int_from_int64": Int("testIntFromInt64"), + "test_float": Float("testFloat"), + "test_float_from_int": Float("testFloatFromInt"), "test_int_from_json": Int("testJsonNumber"), + "test_float_from_json": Float("testJsonNumberFloat"), "test_string_from_num": StrFromNum("testIntFromInt32"), "test_string_from_json_num": StrFromNum("testJsonNumber"), "test_bool": Bool("testBool"), @@ -74,7 +80,10 @@ func TestConversions(t *testing.T) { "test_int": int64(42), "test_int_from_float": int64(42), "test_int_from_int64": int64(42), + "test_float": float64(42.7), + "test_float_from_int": float64(43), "test_int_from_json": int64(3910564293633576924), + "test_float_from_json": float64(43.7), "test_string_from_num": "32", "test_string_from_json_num": "3910564293633576924", "test_bool": true, diff --git a/vendor/github.com/elastic/beats/libbeat/common/testdata/json_array_dedot.json b/vendor/github.com/elastic/beats/libbeat/common/testdata/json_array_dedot.json deleted file mode 100644 index 08b6e863..00000000 --- a/vendor/github.com/elastic/beats/libbeat/common/testdata/json_array_dedot.json +++ /dev/null @@ -1,5 +0,0 @@ -[ - {"key_with_dot_1":"value1_1"}, - {"key_without_dot_2":"value1_2"}, - {"key_with_multiple_dots_3": {"key_with_dot_2":"value2_1"}} -] diff --git a/vendor/github.com/elastic/beats/libbeat/common/testdata/json_array_with_dots.json b/vendor/github.com/elastic/beats/libbeat/common/testdata/json_array_with_dots.json deleted file mode 100644 index d0e4db99..00000000 --- a/vendor/github.com/elastic/beats/libbeat/common/testdata/json_array_with_dots.json +++ /dev/null @@ -1,5 +0,0 @@ -[ - {"key_with_dot.1":"value1_1"}, - {"key_without_dot_2":"value1_2"}, - {"key_with_multiple.dots.3": {"key_with_dot.2":"value2_1"}} -] diff --git a/vendor/github.com/elastic/beats/libbeat/common/testdata/json_map_dedot.json b/vendor/github.com/elastic/beats/libbeat/common/testdata/json_map_dedot.json deleted file mode 100644 index 37d30143..00000000 --- a/vendor/github.com/elastic/beats/libbeat/common/testdata/json_map_dedot.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "key_without_dot_l1": { - "key_with_dot_l2": 1, - "key_with_multiple_dots_l2": 2, - "key_without_dot_l2": { - "key_with_dot_l3": 3, - "key_with_multiple_dots_l3": 4 - } - } -} diff --git a/vendor/github.com/elastic/beats/libbeat/common/testdata/json_map_with_dots.json b/vendor/github.com/elastic/beats/libbeat/common/testdata/json_map_with_dots.json deleted file mode 100644 index e7fbde11..00000000 --- a/vendor/github.com/elastic/beats/libbeat/common/testdata/json_map_with_dots.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "key_without_dot_l1": { - "key_with_dot.l2": 1, - "key.with.multiple.dots_l2": 2, - "key_without_dot_l2": { - "key_with_dot.l3": 3, - "key.with.multiple.dots_l3": 4 - } - } -} diff --git a/vendor/github.com/elastic/beats/libbeat/dashboards/config.go b/vendor/github.com/elastic/beats/libbeat/dashboards/config.go index 5c6db62c..ce7d09b1 100644 --- a/vendor/github.com/elastic/beats/libbeat/dashboards/config.go +++ b/vendor/github.com/elastic/beats/libbeat/dashboards/config.go @@ -1,5 +1,7 @@ package dashboards +import "time" + type Config struct { Enabled bool `config:"enabled"` KibanaIndex string `config:"kibana_index"` @@ -11,11 +13,23 @@ type Config struct { OnlyDashboards bool `config:"only_dashboards"` OnlyIndex bool `config:"only_index"` AlwaysKibana bool `config:"always_kibana"` + Retry *Retry `config:"retry"` +} + +type Retry struct { + Enabled bool `config:"enabled"` + Interval time.Duration `config:"interval"` + Maximum uint `config:"maximum"` } var defaultConfig = Config{ KibanaIndex: ".kibana", AlwaysKibana: false, + Retry: &Retry{ + Enabled: false, + Interval: time.Second, + Maximum: 0, + }, } var ( defaultDirectory = "kibana" diff --git a/vendor/github.com/elastic/beats/libbeat/dashboards/dashboards.go b/vendor/github.com/elastic/beats/libbeat/dashboards/dashboards.go index d5556c50..b22761dd 100644 --- a/vendor/github.com/elastic/beats/libbeat/dashboards/dashboards.go +++ b/vendor/github.com/elastic/beats/libbeat/dashboards/dashboards.go @@ -1,6 +1,7 @@ package dashboards import ( + "context" "errors" "fmt" "path/filepath" @@ -24,6 +25,7 @@ const ( // via the kibana dashboard loader plugin. For older versions of the Elastic Stack // we write the dashboards directly into the .kibana index. func ImportDashboards( + ctx context.Context, beatName, hostname, homePath string, kibanaConfig, esConfig, dashboardsConfig *common.Config, msgOutputter MessageOutputter, @@ -102,16 +104,16 @@ func ImportDashboards( case importViaES: return ImportDashboardsViaElasticsearch(esLoader) case importViaKibana: - return setupAndImportDashboardsViaKibana(hostname, kibanaConfig, &dashConfig, msgOutputter) + return setupAndImportDashboardsViaKibana(ctx, hostname, kibanaConfig, &dashConfig, msgOutputter) default: return errors.New("Elasticsearch or Kibana configuration missing for loading dashboards.") } } -func setupAndImportDashboardsViaKibana(hostname string, kibanaConfig *common.Config, +func setupAndImportDashboardsViaKibana(ctx context.Context, hostname string, kibanaConfig *common.Config, dashboardsConfig *Config, msgOutputter MessageOutputter) error { - kibanaLoader, err := NewKibanaLoader(kibanaConfig, dashboardsConfig, hostname, msgOutputter) + kibanaLoader, err := NewKibanaLoader(ctx, kibanaConfig, dashboardsConfig, hostname, msgOutputter) if err != nil { return fmt.Errorf("fail to create the Kibana loader: %v", err) } diff --git a/vendor/github.com/elastic/beats/libbeat/dashboards/importer.go b/vendor/github.com/elastic/beats/libbeat/dashboards/importer.go index 2f85aae9..7256b3d2 100644 --- a/vendor/github.com/elastic/beats/libbeat/dashboards/importer.go +++ b/vendor/github.com/elastic/beats/libbeat/dashboards/importer.go @@ -71,7 +71,7 @@ func (imp Importer) ImportDashboard(file string) error { } func (imp Importer) ImportFile(fileType string, file string) error { - imp.loader.statusMsg("Import %s from %s\n", fileType, file) + imp.loader.statusMsg("Import %s from %s", fileType, file) if fileType == "dashboard" { return imp.loader.ImportDashboard(file) @@ -119,6 +119,16 @@ func (imp Importer) unzip(archive, target string) error { unzipFile := func(file *zip.File) error { filePath := filepath.Join(target, file.Name) + // check that the resulting file path is indeed under target + // Note that Rel calls Clean. + relPath, err := filepath.Rel(target, filePath) + if err != nil { + return err + } + if strings.HasPrefix(filepath.ToSlash(relPath), "../") { + return fmt.Errorf("Zip file contains files outside of the target directory: %s", relPath) + } + if file.FileInfo().IsDir() { return os.MkdirAll(filePath, file.Mode()) } @@ -268,8 +278,10 @@ func (imp Importer) ImportKibanaDir(dir string) error { if !imp.cfg.OnlyDashboards { check = append(check, "index-pattern") } + wantDashboards := false if !imp.cfg.OnlyIndex { check = append(check, "dashboard") + wantDashboards = true } types := []string{} @@ -296,7 +308,7 @@ func (imp Importer) ImportKibanaDir(dir string) error { } } - if !importDashboards { + if wantDashboards && !importDashboards { return fmt.Errorf("No dashboards to import. Please make sure the %s directory contains a dashboard directory.", dir) } diff --git a/vendor/github.com/elastic/beats/libbeat/dashboards/kibana_loader.go b/vendor/github.com/elastic/beats/libbeat/dashboards/kibana_loader.go index 065a0a8b..d4922b94 100644 --- a/vendor/github.com/elastic/beats/libbeat/dashboards/kibana_loader.go +++ b/vendor/github.com/elastic/beats/libbeat/dashboards/kibana_loader.go @@ -1,10 +1,12 @@ package dashboards import ( + "context" "encoding/json" "fmt" "io/ioutil" "net/url" + "time" "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/logp" @@ -21,13 +23,13 @@ type KibanaLoader struct { msgOutputter MessageOutputter } -func NewKibanaLoader(cfg *common.Config, dashboardsConfig *Config, hostname string, msgOutputter MessageOutputter) (*KibanaLoader, error) { +func NewKibanaLoader(ctx context.Context, cfg *common.Config, dashboardsConfig *Config, hostname string, msgOutputter MessageOutputter) (*KibanaLoader, error) { if cfg == nil || !cfg.Enabled() { return nil, fmt.Errorf("Kibana is not configured or enabled") } - client, err := kibana.NewKibanaClient(cfg) + client, err := getKibanaClient(ctx, cfg, dashboardsConfig.Retry, 0) if err != nil { return nil, fmt.Errorf("Error creating Kibana client: %v", err) } @@ -45,6 +47,22 @@ func NewKibanaLoader(cfg *common.Config, dashboardsConfig *Config, hostname stri return &loader, nil } +func getKibanaClient(ctx context.Context, cfg *common.Config, retryCfg *Retry, retryAttempt uint) (*kibana.Client, error) { + client, err := kibana.NewKibanaClient(cfg) + if err != nil { + if retryCfg.Enabled && (retryCfg.Maximum == 0 || retryCfg.Maximum > retryAttempt) { + select { + case <-ctx.Done(): + return nil, err + case <-time.After(retryCfg.Interval): + return getKibanaClient(ctx, cfg, retryCfg, retryAttempt+1) + } + } + return nil, fmt.Errorf("Error creating Kibana client: %v", err) + } + return client, nil +} + func (loader KibanaLoader) ImportIndex(file string) error { params := url.Values{} params.Set("force", "true") //overwrite the existing dashboards diff --git a/vendor/github.com/elastic/beats/libbeat/docs/breaking.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/breaking.asciidoc index 3a7560f0..7c72ee67 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/breaking.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/breaking.asciidoc @@ -1,3 +1,5 @@ +:see-relnotes: See the <> for a complete list of breaking changes, including changes to beta or experimental functionality. + [[breaking-changes]] == Breaking changes @@ -9,16 +11,191 @@ changes, but there are breaking changes between major versions (e.g. 5.x to See the following topics for a description of breaking changes: +* <> +* <> +* <> * <> * {auditbeat}/auditbeat-breaking-changes.html[Breaking changes in Auditbeat 6.2] +[[breaking-changes-6.3]] +=== Breaking changes in 6.3 + +This section discusses the main changes that you should be aware of if you +upgrade the Beats to version 6.3. {see-relnotes} + +[[breaking-changes-monitoring]] +==== Beats monitoring schema changes + +Starting with version 6.3, the monitoring field `beat.cpu.*.time.metrics` is +renamed to `beat.cpu.*.time.ms`. As a result of this change, Beats shippers +released prior to version 6.3 are unable to send monitoring data to clusters +running on Elasticsearch 6.3 and later. {see-relnotes} + +[[breaking-changes-mapping-conflict]] +==== New `host` namespace may cause mapping conflicts for Logstash + +This breaking change applies only to users who send Beats events to Logstash. + +Starting with version 6.3, Beats provides an `add_host_metadata` processor for +adding fields, such as `host.name` and `host.id`, to Beats events. These fields +are grouped under a `host` prefix and conform to the +https://github.com/elastic/ecs[Elastic Common Schema (ECS)]. The `host` object +is defined in the Elasticsearch index template even if the processor is not +used. + +We've also added a `host.name` field to all events sent by Beats. This field +prevents the Beats input plugin in Logstash from adding a default `host` field. +(By default, the plugin adds a `host` field if the event doesn't already have +one. We don't want the plugin to add this field because it causes a mapping +conflict with the `host` object defined in the index template.) + +*What does this mean to you?* + +See the info for your particular use case: + +* <> +* <> +* <> + +[[beats-template-versioned-indices]] +===== Use case: You use the Beats index template and versioned indices + +In this use case, you load the versioned template manually and use the Beats +versioned index pattern, `%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}`, +as recommended in the {logstash-ref}/plugins-inputs-beats.html[Beats input +plugin] documentation. This results in a `host` field in 6.2 indices, and a +`host.name` field in 6.3 indices. There are no mapping conflicts, but +any visualizations or searches that use `host` will no longer show results for +6.3 indices. + +*What do you need to change?* + +If you searched for the `host` field previously, modify your searches to use the +`beat.hostname` field instead. The `beat.hostname` field existed prior to 6.3 +and contains the same information as `host.name`. Using this field ensures that +your queries and aggregations will work as expected in earlier releases and 6.3. + +If you have multiple visualizations in Kibana that reference the `host` field, +export the objects, modify them by changing `host` to `beat.hostname`, and then +re-import them into Kibana. You can use the Kibana UI or API. + +To use the Kibana UI: + +. In Kibana, go to *Management > Kibana > Saved Objects* and export the objects. +. In the exported JSON file, change `host` to `beat.hostname`. +. In Kibana, go to *Saved Objects* and import the modified objects. + +For more information, see: + +* {kibana-ref}/saved-objects-api.html[Kibana Saved Objects API] +* {kibana-ref}/managing-saved-objects.html[Managing Saved Searches, Visualizations, and Dashboards] + + +[[custom-template-non-versioned-indices]] +===== Use case: You use a custom template and your indices are not versioned + +Mapping conflicts are likely in this use case because two different Beats +versions (6.2 and 6.3) are sending data to the same index. For 6.2, Logstash +adds the default `host` field, and for 6.3, Beats adds the `host.name` field, +which results in a mapping conflict. + +*What do you need to change?* + +To resolve the mapping issue, do *one* of the following: + +* Use versioned indices to prevent the mapping conflict. In the Logstash +pipeline configuration, set `manage_template => false` and use an index naming +pattern that includes `[version]` metadata: ++ +[source,yaml] +---- +manage_template => false +index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}" +---- ++ +For more information, see the documentation for the +{logstash-ref}/plugins-inputs-beats.html[Beats input plugin]. + +* Or, in the Beats config file, configure Beats to drop all `host.*` fields: ++ +[source,yaml] +---- + processors: + - drop_fields: + fields: ["host"] +---- ++ +-- +IMPORTANT: If you drop the `host` fields, you cannot use the `add_host_metadata` +processor. + +-- ++ +With this configuration, Beats drops the `host` fields before sending the +event to Logstash, and Logstash adds a default `host` field, as it did with +previous Beats versions. This approach resolves the mapping conflict, but you +should plan to migrate your Logstash configurations to use `host.name` in +future releases. + +[[beats-template-non-versioned-indices]] +===== Use case: You use the Beats index template and your indices are not versioned + +In this use case, you load the Beats index template manually into Elasticsearch, +and send your data through Logstash, but you don’t use the versioned index +pattern to create versioned indices. + +You cannot resolve the problem by dropping the `host.*` fields, because Logstash +will add a default `host` field, resulting in a mapping conflict with the +`host` field defined as an object in the index template. + +*What do you need to change?* + +To resolve the mapping issue, do *one* of the following: + +* Drop the `host.*` fields in a Logstash filter. For example: ++ +[source,yaml] +---- +filter { + mutate { + remove_field => [ "[host]" ] + } +} +---- ++ +With this configuration, there will be no `host` field in the final event at +ingestion time, and the mapping conflict is avoided. + +* Or: +** Modify the Beats index template by removing the `host.*` fields, and +** Configure Beats to drop all `host.*` fields, as described in +<>. ++ +This solution prevents a mapping conflict because the fields are no longer +defined in the Elasticsearch template. Elasticsearch can use the `host` +mapping created when Logstash added a default `host` field. + +The difference between these two approaches is that the first approach, using +a Logstash filter, drops the `host` fields completely. There will be no `host` +field in the final event. The second approach drops the `host` fields from the +Beats event, but because Logstash adds a default `host` field, there will be a +`host` field in the final event. + +[[breaking-changes-6.2]] +=== Breaking changes in 6.2 + +{see-relnotes} + +[[breaking-changes-6.1]] +=== Breaking changes in 6.1 + +{see-relnotes} [[breaking-changes-6.0]] === Breaking changes in 6.0 This section discusses the main changes that you should be aware of if you -upgrade the Beats from version 5.x to 6.x. Please also review the relevant -Breaking Changes sections of the <>. +upgrade the Beats from version 5.x to 6.x. {see-relnotes} // TODO: better link to the consolidated release notes for 6.0.0. @@ -108,7 +285,7 @@ Prior to 6.0, the recommended setting was: ---- -The index templates that ship with 6.0 are applied to new indexes that match the +The index templates that ship with 6.0 are applied to new indices that match the pattern `[beat]-[version]-*`. You must update your Logstash config, or the templates will not be applied. diff --git a/vendor/github.com/elastic/beats/libbeat/docs/command-reference.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/command-reference.asciidoc index f4a4796a..4c62b830 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/command-reference.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/command-reference.asciidoc @@ -19,7 +19,15 @@ :keystore-command-short-desc: Manages the <> :modules-command-short-desc: Manages configured modules :run-command-short-desc: Runs {beatname_uc}. This command is used by default if you start {beatname_uc} without specifying a command + +ifeval::["{has_ml_jobs}"=="yes"] :setup-command-short-desc: Sets up the initial environment, including the index template, Kibana dashboards (when available), and machine learning jobs (when available) +endif::[] + +ifeval::["{has_ml_jobs}"!="yes"] +:setup-command-short-desc: Sets up the initial environment, including the index template, Kibana dashboards (when available) +endif::[] + :test-command-short-desc: Tests the configuration :version-command-short-desc: Shows information about the current version @@ -31,7 +39,7 @@ Command reference ++++ -{beatname_uc} provides a command-line interface for running the Beat and +{beatname_uc} provides a command-line interface for starting {beatname_uc} and performing common tasks, like testing configuration files and loading dashboards. The command-line also supports <> for controlling global behaviors. @@ -49,34 +57,20 @@ Use `sudo` to run the following commands if: endif::[] -[horizontal] -<>:: -{export-command-short-desc}. - -<>:: -{help-command-short-desc}. - -<>:: -{keystore-command-short-desc}. - +[options="header"] +|======================= +|Commands | +|<> |{export-command-short-desc}. +|<> |{help-command-short-desc}. +|<> |{keystore-command-short-desc}. ifeval::[("{beatname_lc}"=="filebeat") or ("{beatname_lc}"=="metricbeat")] - -<>:: -{modules-command-short-desc}. - +|<> |{modules-command-short-desc}. endif::[] - -<>:: -{run-command-short-desc}. - -<>:: -{setup-command-short-desc}. - -<>:: -{test-command-short-desc}. - -<>:: -{version-command-short-desc}. +|<> |{run-command-short-desc}. +|<> |{setup-command-short-desc}. +|<> |{test-command-short-desc}. +|<> |{version-command-short-desc}. +|======================= Also see <>. @@ -389,10 +383,10 @@ ifeval::["{beatname_lc}"=="filebeat"] *`--once`*:: When the `--once` flag is used, {beatname_uc} starts all configured harvesters -and prospectors, and runs each prospector until the harvesters are closed. If -you set the `--once` flag, you should also set `close_eof` so the harvester is -closed when the end of the file is reached. By default harvesters are closed -after `close_inactive` is reached. +and inputs, and runs each input until the harvesters are closed. If you set the +`--once` flag, you should also set `close_eof` so the harvester is closed when +the end of the file is reached. By default harvesters are closed after +`close_inactive` is reached. endif::[] @@ -459,7 +453,10 @@ environment without actually running {beatname_uc} and ingesting data. *FLAGS* *`--dashboards`*:: -Sets up the Kibana dashboards only. +Sets up the Kibana dashboards only. This option loads the dashboards from the +{beatname_uc} package. For more options, such as loading customized dashboards, +see {beatsdevguide}/import-dashboards.html[Importing Existing Beat Dashboards] +in the _Beats Developer Guide_. *`-h, --help`*:: Shows help for the `setup` command. @@ -588,7 +585,7 @@ For example: + ["source","sh",subs="attributes"] ---------------------------------------------------------------------- -{beatname_lc} -E "name=mybeat" -E "output.elasticsearch.hosts=["http://myhost:9200"]" +{beatname_lc} -E "name=mybeat" -E "output.elasticsearch.hosts=['http://myhost:9200']" ---------------------------------------------------------------------- + This setting is applied to the currently running {beatname_uc} process. @@ -601,7 +598,7 @@ ifeval::["{beatname_lc}"=="filebeat"] + ["source","sh",subs="attributes"] ---------------------------------------------------------------------- -{beatname_lc} -modules=nginx -M "nginx.access.var.paths=[/var/log/nginx/access.log*]" -M "nginx.access.var.pipeline=no_plugins" +{beatname_lc} -modules=nginx -M "nginx.access.var.paths=['/var/log/nginx/access.log*']" -M "nginx.access.var.pipeline=no_plugins" ---------------------------------------------------------------------- endif::[] diff --git a/vendor/github.com/elastic/beats/libbeat/docs/communitybeats.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/communitybeats.asciidoc index 33dd39a8..e64e6c0d 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/communitybeats.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/communitybeats.asciidoc @@ -4,7 +4,7 @@ ////////////////////////////////////////////////////////////////////////// [[community-beats]] -== Community beats +== Community Beats The open source community has been hard at work developing new Beats. You can check out some of them here. @@ -12,10 +12,14 @@ out some of them here. Have a question about a community Beat? You can post questions and discuss issues in the https://discuss.elastic.co/c/beats/community-beats[Community Beats] category of the Beats discussion forum. -Have you created a Beat that's not listed? If so, add the name and description of your Beat to the source document for +Have you created a Beat that's not listed? Add the name and description of your Beat to the source document for https://github.com/elastic/beats/blob/master/libbeat/docs/communitybeats.asciidoc[Community Beats] and https://help.github.com/articles/using-pull-requests[open a pull request] in the https://github.com/elastic/beats[Beats GitHub repository] to get your change merged. When you're ready, go ahead and https://discuss.elastic.co/c/annoucements[announce] your new Beat in the Elastic discussion forum. +ifndef::dev-guide[] +Want to contribute? See <>. +endif::[] + NOTE: Elastic provides no warranty or support for community-sourced Beats. [horizontal] @@ -23,6 +27,7 @@ https://github.com/awormuth/amazonbeat[amazonbeat]:: Reads data from a specified https://github.com/radoondas/apachebeat[apachebeat]:: Reads status from Apache HTTPD server-status. https://github.com/verticle-io/apexbeat[apexbeat]:: Extracts configurable contextual data and metrics from Java applications via the http://toolkits.verticle.io[APEX] toolkit. https://github.com/goomzee/burrowbeat[burrowbeat]:: Monitors Kafka consumer lag using Burrow. +https://github.com/hsngerami/hsnburrowbeat[hsnburrowbeat]:: Monitors Kafka consumer lag for Burrow V1.0.0(API V3). https://github.com/goomzee/cassandrabeat[cassandrabeat]:: Uses Cassandra's nodetool cfstats utility to monitor Cassandra database nodes and lag. https://github.com/hartfordfive/cloudflarebeat[cloudflarebeat]:: Indexes log entries from the Cloudflare Enterprise Log Share API. https://github.com/jarl-tornroos/cloudfrontbeat[cloudfrontbeat]:: Reads log events from Amazon Web Services https://aws.amazon.com/cloudfront/[CloudFront]. @@ -39,10 +44,13 @@ https://github.com/gamegos/etcdbeat[etcdbeat]:: Reads stats from the Etcd v2 API https://github.com/christiangalsterer/execbeat[execbeat]:: Periodically executes shell commands and sends the standard output and standard error to Logstash or Elasticsearch. https://github.com/jarpy/factbeat[factbeat]:: Collects facts from https://puppetlabs.com/facter[Facter]. +https://github.com/ctindel/fastcombeat[fastcombeat]:: Periodically gather internet download speed from https://fast.com[fast.com]. https://github.com/FStelzer/flowbeat[flowbeat]:: Collects, parses, and indexes http://www.sflow.org/index.php[sflow] samples. https://github.com/GeneralElectric/GABeat[gabeat]:: Collects data from Google Analytics Realtime API. +https://github.com/GoogleCloudPlatform/gcsbeat[gcsbeat]:: Reads data from https://cloud.google.com/storage/[Google Cloud Storage] buckets. https://github.com/jlevesy/githubbeat[githubbeat]:: Easily monitors GitHub repository activity. https://github.com/hpcugent/gpfsbeat[gpfsbeat]:: Collects GPFS metric and quota information. +https://github.com/ullaakut/hackerbeat[hackerbeat]:: Indexes the top stories of HackerNews into an ElasticSearch instance. https://github.com/YaSuenag/hsbeat[hsbeat]:: Reads all performance counters in Java HotSpot VM. https://github.com/christiangalsterer/httpbeat[httpbeat]:: Polls multiple HTTP(S) endpoints and sends the data to Logstash or Elasticsearch. Supports all HTTP methods and proxies. @@ -62,6 +70,7 @@ https://github.com/adibendahan/mysqlbeat[mysqlbeat]:: Run any query on MySQL and https://github.com/PhaedrusTheGreek/nagioscheckbeat[nagioscheckbeat]:: For Nagios checks and performance data. https://github.com/mrkschan/nginxbeat[nginxbeat]:: Reads status from Nginx. https://github.com/2Fast2BCn/nginxupstreambeat[nginxupstreambeat]:: Reads upstream status from nginx upstream module. +https://github.com/mschneider82/nsqbeat[nsqbeat]:: Reads data from a NSQ topic. https://github.com/deepujain/nvidiagpubeat/[nvidiagpubeat]:: Uses nvidia-smi to grab metrics of NVIDIA GPUs. https://github.com/aristanetworks/openconfigbeat[openconfigbeat]:: Streams data from http://openconfig.net[OpenConfig]-enabled network devices https://github.com/joehillen/packagebeat[packagebeat]:: Collects information about system packages from package @@ -72,6 +81,7 @@ of targets and stores the round trip time (RTT) in Elasticsearch. https://github.com/carlpett/prombeat[prombeat]:: Indexes https://prometheus.io[Prometheus] metrics. https://github.com/infonova/prometheusbeat[prometheusbeat]:: Send Prometheus metrics to Elasticsearch via the remote write feature. https://github.com/hartfordfive/protologbeat[protologbeat]:: Accepts structured and unstructured logs via UDP or TCP. Can also be used to receive syslog messages or GELF formatted messages. (To be used as a successor to udplogbeat) +https://github.com/GoogleCloudPlatform/pubsubbeat[pubsubbeat]:: Reads data from https://cloud.google.com/pubsub/[Google Cloud Pub/Sub]. https://github.com/voigt/redditbeat[redditbeat]:: Collects new Reddit Submissions of one or multiple Subreddits. https://github.com/chrsblck/redisbeat[redisbeat]:: Used for Redis monitoring. https://github.com/consulthys/retsbeat[retsbeat]:: Collects counts of http://www.reso.org[RETS] resource/class records from https://en.wikipedia.org/wiki/Multiple_listing_service[Multiple Listing Service] (MLS) servers. @@ -88,4 +98,5 @@ network intrusion detection software and indexes the records in Elasticsearch. https://github.com/mrkschan/uwsgibeat[uwsgibeat]:: Reads stats from uWSGI. https://github.com/phenomenes/varnishlogbeat[varnishlogbeat]:: Reads log data from a Varnish instance and ships it to Elasticsearch. https://github.com/phenomenes/varnishstatbeat[varnishstatbeat]:: Reads stats data from a Varnish instance and ships it to Elasticsearch. +https://gitlab.com/msvechla/vaultbeat[vaultbeat]:: Collects performance metrics and statistics from Hashicorp's Vault. https://github.com/eskibars/wmibeat[wmibeat]:: Uses WMI to grab your favorite, configurable Windows metrics. diff --git a/vendor/github.com/elastic/beats/libbeat/docs/config-file-format.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/config-file-format.asciidoc index 18e96aa7..c4af4afe 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/config-file-format.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/config-file-format.asciidoc @@ -34,7 +34,7 @@ Lists and dictionaries are used in beats to build structured configurations. ["source","yaml",subs="attributes"] ------------------------------------------------------------------------------ filebeat: - prospectors: + inputs: - type: log paths: - /var/log/*.log @@ -92,12 +92,12 @@ For example this filebeat setting: ------------------------------------------------------------------------------ filebeat: - prospectors: + inputs: - type: log ------------------------------------------------------------------------------ -Gets collapsed into `filebeat.prospectors.0.type: log`. +Gets collapsed into `filebeat.inputs.0.type: log`. Alternatively to using indentation, setting names can be used in collapsed form too. @@ -109,7 +109,7 @@ Simple filebeat example with partially collapsed setting names and use of compac ["source","yaml",subs="attributes"] ------------------------------------------------------------------------------ -filebeat.prospectors: +filebeat.inputs: - type: log paths: ["/var/log/*.log"] multiline.pattern: '^[' diff --git a/vendor/github.com/elastic/beats/libbeat/docs/contributing-to-beats.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/contributing-to-beats.asciidoc index 0f0ab628..7e568148 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/contributing-to-beats.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/contributing-to-beats.asciidoc @@ -9,8 +9,8 @@ //// include::../../libbeat/docs/contributing-to-beats.asciidoc[] ////////////////////////////////////////////////////////////////////////// -[[contributing-to-beats]] -== Contributing to Beats +["appendix",id="contributing-to-beats"] += Contributing to Beats The Beats are open source and we love to receive contributions from our community — you! diff --git a/vendor/github.com/elastic/beats/libbeat/docs/dashboards.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/dashboards.asciidoc index 5547343f..b82bf1fd 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/dashboards.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/dashboards.asciidoc @@ -9,27 +9,30 @@ //// include::../../libbeat/docs/dashboards.asciidoc[] ////////////////////////////////////////////////////////////////////////// - {beatname_uc} comes packaged with example Kibana dashboards, visualizations, and searches for visualizing {beatname_uc} data in Kibana. Before you can use -the dashboards, you need to create the index pattern, +{beatname_lc}-*+, and +the dashboards, you need to create the index pattern, +{beat_default_index_prefix}-*+, and load the dashboards into Kibana. To do this, you can either run the `setup` command (as described here) or <> in the +{beatname_lc}.yml+ config file. -NOTE: Starting with Beats 6.0.0, the dashboards are loaded via the Kibana API. -This requires a Kibana endpoint configuration. You should have configured the -endpoint earlier when you -<<{beatname_lc}-configuration,configured {beatname_uc}>>. If you didn't, -configure it now. +This requires a Kibana endpoint configuration. If you didn't already configure +a Kibana endpoint, see <<{beatname_lc}-configuration,configure {beatname_uc}>>. Make sure Kibana is running before you perform this step. If you are accessing a secured Kibana instance, make sure you've configured credentials as described in <<{beatname_lc}-configuration>>. To set up the Kibana dashboards for {beatname_uc}, use the appropriate command -for your system. +for your system. The command shown here loads the dashboards from the {beatname_uc} +package. For more options, such as loading customized dashboards, see +{beatsdevguide}/import-dashboards.html[Importing Existing Beat Dashboards] in +the _Beats Developer Guide_. +ifndef::only-elasticsearch[] +If you've configured the Logstash output, see +<>. +endif::[] ifdef::allplatforms[] @@ -55,8 +58,6 @@ endif::[] ---------------------------------------------------------------------- -ifeval::["{beatname_lc}"!="auditbeat"] - *docker:* ["source","sh",subs="attributes"] @@ -64,20 +65,95 @@ ifeval::["{beatname_lc}"!="auditbeat"] docker run {dockerimage} setup --dashboards ---------------------------------------------------------------------- -endif::[] - *win:* endif::allplatforms[] Open a PowerShell prompt as an Administrator (right-click the PowerShell icon -and select *Run As Administrator*). If you are running Windows XP, you may need -to download and install PowerShell. +and select *Run As Administrator*). From the PowerShell prompt, change to the directory where you installed {beatname_uc}, and run: -["source","sh",subs="attributes,callouts"] +["source","sh",subs="attributes"] ---------------------------------------------------------------------- -PS > {beatname_lc} setup --dashboards +PS > .{backslash}{beatname_lc}.exe setup --dashboards ---------------------------------------------------------------------- + +ifndef::only-elasticsearch[] +[[load-dashboards-logstash]] +==== Set up dashboards for Logstash output + +During dashboard loading, {beatname_uc} connects to Elasticsearch to check +version information. To load dashboards when the Logstash output is enabled, you +need to temporarily disable the Logstash output and enable Elasticsearch. To +connect to a secured Elasticsearch cluster, you also need to pass Elasticsearch +credentials. + +TIP: The example shows a hard-coded password, but you should store sensitive +values in the <>. + +ifdef::allplatforms[] + +*deb and rpm:* + +["source","sh",subs="attributes"] +---- +{beatname_lc} setup -e \ + -E output.logstash.enabled=false \ + -E output.elasticsearch.hosts=['localhost:9200'] \ + -E output.elasticsearch.username={beat_default_index_prefix}_internal \ + -E output.elasticsearch.password={pwd} \ + -E setup.kibana.host=localhost:5601 +---- + + +*mac:* + +["source","sh",subs="attributes"] +---- +./{beatname_lc} setup -e \ + -E output.logstash.enabled=false \ + -E output.elasticsearch.hosts=['localhost:9200'] \ + -E output.elasticsearch.username={beat_default_index_prefix}_internal \ + -E output.elasticsearch.password={pwd} \ + -E setup.kibana.host=localhost:5601 +---- + + +*docker:* + +["source","sh",subs="attributes"] +---- +docker run {dockerimage} setup -e \ + -E output.logstash.enabled=false \ + -E output.elasticsearch.hosts=['localhost:9200'] \ + -E output.elasticsearch.username={beat_default_index_prefix}_internal \ + -E output.elasticsearch.password={pwd} \ + -E setup.kibana.host=localhost:5601 +---- + + +*win:* + +endif::allplatforms[] + +Open a PowerShell prompt as an Administrator (right-click the PowerShell icon +and select *Run As Administrator*). + +From the PowerShell prompt, change to the directory where you installed {beatname_uc}, +and run: + +["source","sh",subs="attributes"] +---- +PS > .{backslash}{beatname_lc}.exe setup -e ` + -E output.logstash.enabled=false ` + -E output.elasticsearch.hosts=['localhost:9200'] ` + -E output.elasticsearch.username={beat_default_index_prefix}_internal ` + -E output.elasticsearch.password={pwd} ` + -E setup.kibana.host=localhost:5601 +---- + + +endif::only-elasticsearch[] + diff --git a/vendor/github.com/elastic/beats/libbeat/docs/dashboardsconfig.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/dashboardsconfig.asciidoc index 03a24c41..fbc9f1a2 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/dashboardsconfig.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/dashboardsconfig.asciidoc @@ -20,7 +20,7 @@ To load the dashboards, you can either enable dashboard loading in the run the `setup` command. Dashboard loading is disabled by default. When dashboard loading is enabled, {beatname_uc} uses the Kibana API to load the -sample dashboards. Dashboard loading is only attempted at Beat startup. +sample dashboards. Dashboard loading is only attempted when {beatname_uc} starts up. If Kibana is not available at startup, {beatname_uc} will stop with an error. To enable dashboard loading, add the following setting to the config file: @@ -40,9 +40,14 @@ You can specify the following options in the `setup.dashboards` section of the ==== `setup.dashboards.enabled` If this option is set to true, {beatname_uc} loads the sample Kibana dashboards -automatically on startup. If no other options are set, the dashboard are loaded -from the local `kibana` directory in the home path of the installation. +from the local `kibana` directory in the home path of the {beatname_uc} installation. +NOTE: When dashboard loading is enabled, {beatname_uc} overwrites any existing +dashboards that match the names of the dashboards you are loading. This happens +every time {beatname_uc} starts. + +If no other options are set, the dashboard are loaded +from the local `kibana` directory in the home path of the {beatname_uc} installation. To load dashboards from a different location, you can configure one of the following options: <>, <>, or @@ -92,8 +97,27 @@ is `".kibana"` The Elasticsearch index name. This setting overwrites the index name defined in the dashboards and index pattern. Example: `"testbeat-*"` +NOTE: This setting only works for Kibana 6.0 and newer. + [float] ==== `setup.dashboards.always_kibana` Force loading of dashboards using the Kibana API without querying Elasticsearch for the version The default is `false`. + +[float] +==== `setup.dashboards.retry.enabled` + +If this option is set to true, and Kibana is not reachable at the time when dashboards are loaded, + {beatname_uc} will retry to reconnect to Kibana instead of exiting with an error. Disabled by default. + +[float] +==== `setup.dashboards.retry.interval` + +Duration interval between Kibana connection retries. Defaults to 1 second. + +[float] +==== `setup.dashboards.retry.maximum` + +Maximum number of retries before exiting with an error. Set to 0 for unlimited retrying. +Default is unlimited. diff --git a/vendor/github.com/elastic/beats/libbeat/docs/getting-help.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/getting-help.asciidoc index 2deddac7..2404de19 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/getting-help.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/getting-help.asciidoc @@ -9,7 +9,7 @@ //// include::../../libbeat/docs/getting-help.asciidoc[] ////////////////////////////////////////////////////////////////////////// -Start by searching the https://discuss.elastic.co/c/beats/{beatname_lc}[{beatname_uc} discussion forum] for your issue. If you can't find a resolution, open a new issue or add a comment to an existing one. Make sure you provide the following information, and we'll help +Start by searching the https://discuss.elastic.co/c/{discuss_forum}[{beatname_uc} discussion forum] for your issue. If you can't find a resolution, open a new issue or add a comment to an existing one. Make sure you provide the following information, and we'll help you troubleshoot the problem: * {beatname_uc} version @@ -19,6 +19,6 @@ you troubleshoot the problem: problem. See <> for more details. If you're sure you found a bug, you can open a ticket on -https://github.com/elastic/beats/issues?state=open[GitHub]. Note, however, +https://github.com/elastic/{github_repo_name}/issues?state=open[GitHub]. Note, however, that we close GitHub issues containing questions or requests for help if they -don't indicate the presence of a bug. \ No newline at end of file +don't indicate the presence of a bug. diff --git a/vendor/github.com/elastic/beats/libbeat/docs/gettingstarted.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/gettingstarted.asciidoc index 59ea4f40..2ed0572f 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/gettingstarted.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/gettingstarted.asciidoc @@ -1,492 +1,17 @@ [[getting-started]] -== Getting started with Beats and the Elastic Stack +== Getting started with {beats} -Looking for an "ELK tutorial" that shows how to set up the Elastic stack for Beats? You've -come to the right place. The topics in this section describe how to install and configure -the Elastic stack for Beats. +Each Beat is a separately installable product. Before installing Beats, you need +to install and configure the {stack}. To learn how to get up and running +quickly, see {stack-ov}/get-started-elastic-stack.html[Getting started with the +{stack}]. -A regular _Beats setup_ consists of: +After installing the {stack}, see the {beats} getting started guides: - * Elasticsearch for storage and indexing. See <>. - * Logstash (optional) for inserting data into Elasticsearch. See <>. - * Kibana for the UI. See <>. - * One or more Beats. You install the Beats on your servers to capture operational data. See <>. - * Kibana dashboards for visualizing the data. - -See the https://www.elastic.co/support/matrix[Elastic Support Matrix] for information -about supported operating systems and product compatibility. - -NOTE: To get started, you can install Elasticsearch and Kibana on a -single VM or even on your laptop. The only condition is that the machine must be -accessible from the servers you want to monitor. As you add more Beats and -your traffic grows, you'll want to replace the single Elasticsearch instance with -a cluster. You'll probably also want to automate the installation process. - - -[[elasticsearch-installation]] -=== Install Elasticsearch - -https://www.elastic.co/products/elasticsearch[Elasticsearch] is a real-time, -distributed storage, search, and analytics engine. It can be used for many -purposes, but one context where it excels is indexing streams of semi-structured -data, such as logs or decoded network packets. - -The binary packages of Elasticsearch have only one dependency: Java. The minimum -supported version is Java 8. To download and install -Elasticsearch, use the commands that work with your system -(<> for Debian/Ubuntu, <> for Redhat/Centos/Fedora, <> for OS X, and <> for Windows): - -[[deb]]*deb:* - -ifeval::["{release-state}"=="unreleased"] - -Version {stack-version} of Elasticsearch has not yet been released. - -endif::[] - -ifeval::["{release-state}"!="unreleased"] - -["source","sh",subs="attributes,callouts"] ----------------------------------------------------------------------- -sudo apt-get install openjdk-8-jre -curl -L -O https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{ES-version}.deb -sudo dpkg -i elasticsearch-{ES-version}.deb -sudo /etc/init.d/elasticsearch start ----------------------------------------------------------------------- - -endif::[] - -[[rpm]]*rpm:* - -ifeval::["{release-state}"=="unreleased"] - -Version {stack-version} of Elasticsearch has not yet been released. - -endif::[] - -ifeval::["{release-state}"!="unreleased"] - -["source","sh",subs="attributes,callouts"] ----------------------------------------------------------------------- -sudo yum install java-1.8.0-openjdk -curl -L -O https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{ES-version}.rpm -sudo rpm -i elasticsearch-{ES-version}.rpm -sudo service elasticsearch start ----------------------------------------------------------------------- - -endif::[] - -[[mac]]*mac:* - -ifeval::["{release-state}"=="unreleased"] - -Version {stack-version} of Elasticsearch has not yet been released. - -endif::[] - -ifeval::["{release-state}"!="unreleased"] - -["source","sh",subs="attributes,callouts"] ----------------------------------------------------------------------- -# install Java, e.g. from: https://www.java.com/en/download/manual.jsp -curl -L -O https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{ES-version}.zip -unzip elasticsearch-{ES-version}.zip -cd elasticsearch-{ES-version} -./bin/elasticsearch ----------------------------------------------------------------------- - -endif::[] - -[[win]]*win:* - -ifeval::["{release-state}"=="unreleased"] - -Version {stack-version} of Elasticsearch has not yet been released. - -endif::[] - -ifeval::["{release-state}"!="unreleased"] - -. If necessary, download and install the latest version of the Java from https://www.java.com[www.java.com]. - -. Download the Elasticsearch {ES-version} Windows zip file from the -https://www.elastic.co/downloads/elasticsearch[downloads page]. - -. Extract the contents of the zip file to a directory on your computer, for example, `C:\Program Files`. - -. Open a command prompt as an Administrator and navigate to the directory that contains the extracted files, for example: -+ -["source","sh",subs="attributes,callouts"] ----------------------------------------------------------------------- -cd C:\Program Files\elasticsearch-{ES-version} ----------------------------------------------------------------------- - -. Run the following command to start Elasticsearch: -+ -["source","sh",subs="attributes,callouts"] ----------------------------------------------------------------------- -bin\elasticsearch.bat ----------------------------------------------------------------------- - -endif::[] - -You can learn more about installing, configuring, and running Elasticsearch in the -https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html[Elasticsearch Reference]. - -==== Make sure Elasticsearch is up and running - - -To test that the Elasticsearch daemon is up and running, try sending an HTTP GET -request on port 9200. - -[source,shell] ----------------------------------------------------------------------- -curl http://127.0.0.1:9200 ----------------------------------------------------------------------- - -On Windows, if you don't have cURL installed, simply point your browser to the URL. - -You should see a response similar to this: - -["source","sh",subs="attributes"] ----------------------------------------------------------------------- -{ - "name" : "ofgAtrJ", - "cluster_name" : "elasticsearch", - "cluster_uuid" : "3h9xSrVlRJmDHgQ8FLnByA", - "version" : { - "number" : "{ES-version}", - "build_hash" : "db0d481", - "build_date" : "2017-02-09T22:05:32.386Z", - "build_snapshot" : false, - "lucene_version" : "6.4.1" - }, - "tagline" : "You Know, for Search" -} ----------------------------------------------------------------------- - -[[logstash-installation]] -=== Install Logstash (Optional) - -The simplest architecture for the Beats platform setup consists of one or more Beats, -Elasticsearch, and Kibana. This architecture is easy to get started -with and sufficient for networks with low traffic. It also uses the minimum amount of -servers: a single machine running Elasticsearch and Kibana. The Beats -insert the transactions directly into the Elasticsearch instance. - -If you want to perform additional processing or buffering on the data, however, -you'll want to install Logstash. - -An important advantage to this approach is that you can -use Logstash to modify the data captured by Beats in any way you like. You can also -use Logstash's many output plugins to integrate with other systems. - -To download and install Logstash, use the commands that work -with your system: - -*deb:* - -ifeval::["{release-state}"=="unreleased"] - -Version {stack-version} of Logstash has not yet been released. - -endif::[] - -ifeval::["{release-state}"!="unreleased"] - -["source","sh",subs="attributes,callouts"] ----------------------------------------------------------------------- -sudo apt-get install openjdk-8-jre -curl -L -O https://artifacts.elastic.co/downloads/logstash/logstash-{LS-version}.deb -sudo dpkg -i logstash-{LS-version}.deb ----------------------------------------------------------------------- - -endif::[] - -*rpm:* - -ifeval::["{release-state}"=="unreleased"] - -Version {stack-version} of Logstash has not yet been released. - -endif::[] - -ifeval::["{release-state}"!="unreleased"] - -["source","sh",subs="attributes,callouts"] ----------------------------------------------------------------------- -sudo yum install java-1.8.0-openjdk -curl -L -O https://artifacts.elastic.co/downloads/logstash/logstash-{LS-version}.rpm -sudo rpm -i logstash-{LS-version}.rpm ----------------------------------------------------------------------- - -endif::[] - -*mac:* - -ifeval::["{release-state}"=="unreleased"] - -Version {stack-version} of Logstash has not yet been released. - -endif::[] - -ifeval::["{release-state}"!="unreleased"] - -["source","sh",subs="attributes,callouts"] ----------------------------------------------------------------------- -# install Java, e.g. from: https://www.java.com/en/download/manual.jsp -curl -L -O https://artifacts.elastic.co/downloads/logstash/logstash-{LS-version}.zip -unzip logstash-{LS-version}.zip ----------------------------------------------------------------------- - -endif::[] - -*win:* - -ifeval::["{release-state}"=="unreleased"] - -Version {stack-version} of Logstash has not yet been released. - -endif::[] - -ifeval::["{release-state}"!="unreleased"] - -. If necessary, download and install the latest version of the Java from https://www.java.com[www.java.com]. - -. Download the Logstash {LS-version} Windows zip file from the -https://www.elastic.co/downloads/logstash[downloads page]. - -. Extract the contents of the zip file to a directory on your computer, for example, `C:\Program Files`. - -Don't start Logstash yet. You need to set a couple of configuration options first. - -endif::[] - -[[logstash-setup]] -==== Set up Logstash - -In this setup, the Beat sends events to Logstash. Logstash receives -these events by using the -{logstashdoc}/plugins-inputs-beats.html[Beats input plugin for Logstash] -and then sends the transaction to Elasticsearch by using the -{logstashdoc}/plugins-outputs-elasticsearch.html[Elasticsearch output plugin for Logstash]. -The Elasticsearch output plugin uses the bulk API, making indexing very efficient. - -To set up Logstash, you create a Logstash pipeline configuration file that -configures Logstash to listen on port 5044 for incoming Beats connections -and to index into Elasticsearch. For example, you can save the following -example configuration to a file called `logstash.conf`: - -[source,ruby] ------------------------------------------------------------------------------- -input { - beats { - port => 5044 - } -} - -# The filter part of this file is commented out to indicate that it is -# optional. -# filter { -# -# } - -output { - elasticsearch { - hosts => "localhost:9200" - manage_template => false - index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}" <1> - document_type => "%{[@metadata][type]}" <2> - } -} ------------------------------------------------------------------------------- -<1> `%{[@metadata][beat]}` sets the first part of the index name to the value -of the `beat` metadata field, `%{[@metadata][version]}` sets the second part of -the name to the beat's version, and `%{+YYYY.MM.dd}` sets the third part of the -name to a date based on the Logstash `@timestamp` field. For example: -+{beatname_lc}-2017.03.29+. -<2> `%{[@metadata][type]}` sets the document type based on the value of the `type` -metadata field. For Beats, this value resolves to `doc`. - -NOTE: Starting with Logstash 6.0, the `document_type` option is deprecated due to the -https://www.elastic.co/guide/en/elasticsearch/reference/6.0/removal-of-types.html[removal of types in Logstash 6.0]. -It will be removed in the next major version of Logstash. If you are running -Logstash 6.0 or later, you do not need to set `document_type` in your -configuration because Logstash sets the type to `doc` by default. - -When you run Logstash with this configuration, it indexes events into -Elasticsearch in the same way that the Beat would, but you get access to other -capabilities provided by Logstash for collecting, enriching, and transforming -data. See the {logstashdoc}/introduction.html[Logstash introduction] for more -information about these capabilities. - -To use this setup, you'll also need to configure your Beat to use Logstash. -For more information, see the documentation for the Beat. - -[[logstash-input-update]] -===== Update the Beats input plugin for Logstash - -Plugins have their own release cycle and are often released independent of -Logstash’s core release cycle. To ensure that you have the latest version of -the https://www.elastic.co/guide/en/logstash/current/plugins-inputs-beats.html[Beats input plugin for Logstash], -run the following command from your Logstash installation: - -*deb, rpm, and mac:* - -["source","sh",subs="attributes,callouts"] ----------------------------------------------------------------------- -./bin/logstash-plugin update logstash-input-beats ----------------------------------------------------------------------- - -*win:* - -["source","sh",subs="attributes,callouts"] ----------------------------------------------------------------------- -bin\logstash-plugin update logstash-input-beats ----------------------------------------------------------------------- - -Keep in mind that you can update to the latest version of the plugin without -having to upgrade to a newer version of Logstash. More details about working -with input plugins in Logstash are available -https://www.elastic.co/guide/en/logstash/current/working-with-plugins.html[here]. - - -==== Start Logstash - -Now you can start Logstash. Use the command that works with your system. If you -installed Logstash as a deb or rpm package, make sure the config file is in the -expected directory. - -*deb:* - -["source","sh",subs="attributes,callouts"] ----------------------------------------------------------------------- -sudo /etc/init.d/logstash start ----------------------------------------------------------------------- - -*rpm:* - -["source","sh",subs="attributes,callouts"] ----------------------------------------------------------------------- -sudo service logstash start ----------------------------------------------------------------------- - -*mac:* - -["source","sh",subs="attributes,callouts"] ----------------------------------------------------------------------- -./bin/logstash -f logstash.conf ----------------------------------------------------------------------- - -*win:* - -["source","sh",subs="attributes,callouts"] ----------------------------------------------------------------------- -bin\logstash.bat -f logstash.conf ----------------------------------------------------------------------- - -NOTE: The default configuration for Beats and Logstash uses plain TCP. For -encryption you must explicitly enable SSL when you configure Beats and Logstash. - -You can learn more about installing, configuring, and running Logstash -https://www.elastic.co/guide/en/logstash/current/getting-started-with-logstash.html[here]. - -[[kibana-installation]] -=== Install Kibana - -https://www.elastic.co/products/kibana[Kibana] is a visualization application -that gets its data from Elasticsearch. It provides a customizable and -user-friendly UI in which you can combine various widget types to create your -own dashboards. The dashboards can be easily saved, shared, and linked. - -For getting started, we recommend installing Kibana on the same server as -Elasticsearch, but it is not required. If you install the products on different servers, -you'll need to change the URL (IP:PORT) of the Elasticsearch server in the -Kibana configuration file, `config/kibana.yml`, before starting Kibana. - -Use the following commands to download and run Kibana. - -*deb or rpm:* - -ifeval::["{release-state}"=="unreleased"] - -Version {stack-version} of Kibana has not yet been released. - -endif::[] - -ifeval::["{release-state}"!="unreleased"] - -["source","sh",subs="attributes,callouts"] ----------------------------------------------------------------------- -curl -L -O https://artifacts.elastic.co/downloads/kibana/kibana-{Kibana-version}-linux-x86_64.tar.gz -tar xzvf kibana-{Kibana-version}-linux-x86_64.tar.gz -cd kibana-{Kibana-version}-linux-x86_64/ -./bin/kibana ----------------------------------------------------------------------- - -endif::[] - -*mac:* - -ifeval::["{release-state}"=="unreleased"] - -Version {stack-version} of Kibana has not yet been released. - -endif::[] - -ifeval::["{release-state}"!="unreleased"] - -["source","sh",subs="attributes,callouts"] ----------------------------------------------------------------------- -curl -L -O https://artifacts.elastic.co/downloads/kibana/kibana-{Kibana-version}-darwin-x86_64.tar.gz -tar xzvf kibana-{Kibana-version}-darwin-x86_64.tar.gz -cd kibana-{Kibana-version}-darwin-x86_64/ -./bin/kibana ----------------------------------------------------------------------- - -endif::[] - -*win:* - -ifeval::["{release-state}"=="unreleased"] - -Version {stack-version} of Kibana has not yet been released. - -endif::[] - -ifeval::["{release-state}"!="unreleased"] - -. Download the Kibana {Kibana-version} Windows zip file from the -https://www.elastic.co/downloads/kibana[downloads page]. - -. Extract the contents of the zip file to a directory on your computer, for example, `C:\Program Files`. - -. Open a command prompt as an Administrator and navigate to the directory that -contains the extracted files, for example: -+ -["source","sh",subs="attributes,callouts"] ----------------------------------------------------------------------- -cd C:\Program Files\kibana-{Kibana-version}-windows ----------------------------------------------------------------------- - -. Run the following command to start Kibana: -+ -["source","sh",subs="attributes,callouts"] ----------------------------------------------------------------------- -bin\kibana.bat ----------------------------------------------------------------------- - -endif::[] - -You can find Kibana binaries for other operating systems on the -https://www.elastic.co/downloads/kibana[Kibana downloads page]. - -==== Launch the Kibana web interface - -To launch the Kibana web interface, point your browser to port 5601. For example, -http://127.0.0.1:5601[http://127.0.0.1:5601]. - -You can learn more about Kibana in the -http://www.elastic.co/guide/en/kibana/current/index.html[Kibana User Guide]. +* {auditbeat-ref}/auditbeat-getting-started.html[Auditbeat] +* {filebeat-ref}/filebeat-getting-started.html[Filebeat] +* {heartbeat-ref}/heartbeat-getting-started.html[Heartbeat] +* {metricbeat-ref}/metricbeat-getting-started.html[Metricbeat] +* {packetbeat-ref}/packetbeat-getting-started.html[Packetbeat] +* {winlogbeat-ref}/winlogbeat-getting-started.html[Winlogbeat] diff --git a/vendor/github.com/elastic/beats/libbeat/docs/https.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/https.asciidoc index 24ca6e91..30570e64 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/https.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/https.asciidoc @@ -10,8 +10,13 @@ //// This content is structured to be included as a whole file. ////////////////////////////////////////////////////////////////////////// -To secure the communication between {beatname_uc} and Elasticsearch, you can use HTTPS -and basic authentication. Here is a sample configuration: +To secure the communication between {beatname_uc} and Elasticsearch, you can use +HTTPS and basic authentication. Basic authentication for Elasticsearch is +available when you enable {security} (see +{securitydoc}/xpack-security.html[Securing the {stack}] and <>). +If you aren't using {security}, you can use a web proxy instead. + +Here is a sample configuration: ["source","yaml",subs="attributes,callouts"] ---------------------------------------------------------------------- @@ -29,11 +34,6 @@ output.elasticsearch: TIP: To obfuscate passwords and other sensitive settings, use the <>. -Elasticsearch doesn't have built-in basic authentication, but you can achieve it -either by using a web proxy or by using X-Pack to secure Elasticsearch. For more -information, see the X-Pack documentation about -{securitydoc}/xpack-security.html[securing Elasticsearch] and <>. - {beatname_uc} verifies the validity of the server certificates and only accepts trusted certificates. Creating a correct SSL/TLS infrastructure is outside the scope of this document. diff --git a/vendor/github.com/elastic/beats/libbeat/docs/images/beats-platform.png b/vendor/github.com/elastic/beats/libbeat/docs/images/beats-platform.png index 4fa42274..362c9f0a 100644 Binary files a/vendor/github.com/elastic/beats/libbeat/docs/images/beats-platform.png and b/vendor/github.com/elastic/beats/libbeat/docs/images/beats-platform.png differ diff --git a/vendor/github.com/elastic/beats/libbeat/docs/index.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/index.asciidoc index f78c1ed1..b78451f1 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/index.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/index.asciidoc @@ -3,11 +3,15 @@ include::./version.asciidoc[] -include::{asciidoc-dir}/../../shared/attributes62.asciidoc[] +include::{asciidoc-dir}/../../shared/attributes.asciidoc[] :beatname_lc: beatname :beatname_uc: a Beat :beatname_pkg: {beatname_lc} +:github_repo_name: beats +:discuss_forum: beats/{beatname_lc} +:beat_default_index_prefix: {beatname_lc} +:has_ml_jobs: yes include::../../libbeat/docs/shared-beats-attributes.asciidoc[] @@ -15,12 +19,8 @@ include::./overview.asciidoc[] include::./communitybeats.asciidoc[] -include::../../libbeat/docs/contributing-to-beats.asciidoc[] - include::./gettingstarted.asciidoc[] -include::./installing-beats.asciidoc[] - include::./breaking.asciidoc[] include::./upgrading.asciidoc[] @@ -28,3 +28,5 @@ include::./upgrading.asciidoc[] include::./config-file-format.asciidoc[] include::./release.asciidoc[] + +include::../../libbeat/docs/contributing-to-beats.asciidoc[] diff --git a/vendor/github.com/elastic/beats/libbeat/docs/installing-beats.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/installing-beats.asciidoc deleted file mode 100644 index 01e9c024..00000000 --- a/vendor/github.com/elastic/beats/libbeat/docs/installing-beats.asciidoc +++ /dev/null @@ -1,30 +0,0 @@ - -//////////////////////////////////////////////////////////////////// -///// The content about individual configuration options has been -///// moved to the following files: -///// generalconfig.asciidoc for General options -///// outputconfig.asciidoc for Output options -///// loggingconfig.asciidoc for Logging options -///// runconfig.asciidoc for Run Configuration options -///// The content now appears in the guides for each Beat. You can -///// include the content in the guide for your Beat by using the -///// following asciidoc include statements: -///// include::../../libbeat/docs/outputconfig.asciidoc[] -///// include::../../libbeat/docs/generalconfig.asciidoc[] -///// include::../../libbeat/docs/loggingconfig.asciidoc[] -//////////////////////////////////////////////////////////////////// - -[[installing-beats]] -=== Install Beats - -After <> the Elastic stack, you need to install and configure your Beat. - -Each Beat is a separately installable product. To get up and running quickly with a Beat, see the Getting Started information for your Beat: - -* {packetbeat}/packetbeat-getting-started.html[Packetbeat] -* {metricbeat}/metricbeat-getting-started.html[Metricbeat] -* {filebeat}/filebeat-getting-started.html[Filebeat] -* {winlogbeat}/winlogbeat-getting-started.html[Winlogbeat] -* {heartbeat}/heartbeat-getting-started.html[Heartbeat] - - diff --git a/vendor/github.com/elastic/beats/libbeat/docs/keystore.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/keystore.asciidoc index 7bdae056..d5588073 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/keystore.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/keystore.asciidoc @@ -10,7 +10,11 @@ ////////////////////////////////////////////////////////////////////////// [[keystore]] -=== Secrets keystore +=== Secrets keystore for secure settings + +++++ +Secrets keystore +++++ When you configure {beatname_uc}, you might need to specify sensitive settings, such as passwords. Rather than relying on file system permissions to protect @@ -31,7 +35,7 @@ For example, imagine that the keystore contains a key called `ES_PWD` with the value `yourelasticsearchpassword`: * In the configuration file, use `output.elasticsearch.password: "${ES_PWD}"` -* On the command line, use: `-E "output.elasticsearch.password=${ES_PWD}"` +* On the command line, use: `-E "output.elasticsearch.password=\${ES_PWD}"` When {beatname_uc} unpacks the configuration, it resolves keys before resolving environment variables and other variables. @@ -114,6 +118,6 @@ To remove a key from the keystore, use: ["source","sh",subs="attributes"] ---------------------------------------------------------------- -{beatname_lc} remove ES_PWD +{beatname_lc} keystore remove ES_PWD ---------------------------------------------------------------- diff --git a/vendor/github.com/elastic/beats/libbeat/docs/loggingconfig.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/loggingconfig.asciidoc index 753a6641..eeaff84c 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/loggingconfig.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/loggingconfig.asciidoc @@ -14,7 +14,7 @@ == Configure logging The `logging` section of the +{beatname_lc}.yml+ config file contains options -for configuring the Beats logging output. The logging system can write logs to +for configuring the logging output. The logging system can write logs to the syslog or rotate log files. If logging is not explicitly configured the file output is used. @@ -67,7 +67,7 @@ Minimum log level. One of `debug`, `info`, `warning`, or `error`. The default log level is `info`. `debug`:: Logs debug messages, including a detailed printout of all events -flushed by the Beat. Also logs informational messages, warnings, errors, and +flushed. Also logs informational messages, warnings, errors, and critical errors. When the log level is `debug`, you can specify a list of <> to display debug messages for specific components. If no selectors are specified, the `*` selector is used to display debug messages @@ -84,9 +84,9 @@ published. Also logs any warnings, errors, or critical errors. [[selectors]] ==== `logging.selectors` -The list of debugging-only selector tags used by different Beats components. Use `*` -to enable debug output for all components. For example add `publish` to display -all the debug messages related to event publishing. When starting the Beat, +The list of debugging-only selector tags used by different {beatname_uc} components. +Use `*` to enable debug output for all components. For example add `publish` to display +all the debug messages related to event publishing. When starting {beatname_lc}, selectors can be overwritten using the `-d` command line option (`-d` also sets the debug log level). @@ -123,8 +123,7 @@ the <> section for details. [float] ==== `logging.files.name` -The name of the file that logs are written to. By default, the name of the Beat -is used. +The name of the file that logs are written to. The default is '{beatname_lc}'. [float] ==== `logging.files.rotateeverybytes` diff --git a/vendor/github.com/elastic/beats/libbeat/docs/monitoring/monitoring-beats.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/monitoring/monitoring-beats.asciidoc index 23775b1b..a1a22fa1 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/monitoring/monitoring-beats.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/monitoring/monitoring-beats.asciidoc @@ -16,6 +16,9 @@ [partintro] -- + +NOTE: {monitoring} for {beatname_uc} requires {es} 6.2 or later. + {monitoring} enables you to easily monitor {beatname_uc} from {kib}. For more information, see {xpack-ref}/xpack-monitoring.html[Monitoring the Elastic Stack] and @@ -24,26 +27,23 @@ information, see To configure {beatname_uc} to collect and send monitoring metrics: . Create a user that has appropriate authority to send system-level monitoring -data to {es}. For example, you can use the built-in `logstash_system` user or -assign the built-in `logstash_system` role to another user. For more +data to {es}. For example, you can use the built-in `beats_system` user or +assign the built-in `beats_system` role to another user. For more information, see {xpack-ref}/setting-up-authentication.html[Setting Up User Authentication] and {xpack-ref}/built-in-roles.html[Built-in Roles]. -. Add the `xpack.monitoring` settings in the {beatname_uc} configuration file. -If you configured {es} output and you want to use the same {es} production -cluster and credentials, you can specify the following minimal configuration -options: +. Add the `xpack.monitoring` settings in the {beatname_uc} configuration file. If you +configured {es} output, specify the following minimal configuration: + -- [source, yml] -------------------- -xpack.monitoring: - enabled: true - elasticsearch: +xpack.monitoring.enabled: true -------------------- -Otherwise, you must specify additional configuration options. For example: +If you configured a different output, such as {ls}, you must specify additional +configuration options. For example: [source, yml] -------------------- @@ -51,10 +51,14 @@ xpack.monitoring: enabled: true elasticsearch: hosts: ["https://example.com:9200", "https://example2.com:9200"] - username: elastic - password: changeme + username: beats_system + password: beatspassword -------------------- +NOTE: Currently you must send monitoring data to the same cluster as all other events. +If you configured {es} output, do not specify additional hosts in the monitoring +configuration. + -- . {kibana-ref}/monitoring-xpack-kibana.html[Configure monitoring in {kib}]. diff --git a/vendor/github.com/elastic/beats/libbeat/docs/outputconfig.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/outputconfig.asciidoc index 04c7ba83..8e6f3b01 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/outputconfig.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/outputconfig.asciidoc @@ -13,6 +13,12 @@ [[configuring-output]] == Configure the output +ifdef::only-elasticsearch[] +You configure {beatname_uc} to write to Elasticsearch by setting options +in the `output.elasticsearch` section of the +{beatname_lc}.yml+ config file +endif::[] + +ifndef::only-elasticsearch[] You configure {beatname_uc} to write to a specific output by setting options in the `output` section of the +{beatname_lc}.yml+ config file. Only a single output may be defined. @@ -26,6 +32,11 @@ The following topics describe how to configure each supported output: * <> * <> +If you've secured the {stack}, also read <> for more about +security-related configuration options. + +endif::[] + [[elasticsearch-output]] === Configure the Elasticsearch output @@ -33,7 +44,7 @@ The following topics describe how to configure each supported output: Elasticsearch ++++ -When you specify Elasticsearch for the output, the Beat sends the transactions directly to Elasticsearch by using the Elasticsearch HTTP API. +When you specify Elasticsearch for the output, {beatname_uc} sends the transactions directly to Elasticsearch by using the Elasticsearch HTTP API. Example configuration: @@ -41,13 +52,15 @@ Example configuration: ------------------------------------------------------------------------------ output.elasticsearch: - hosts: ["http://localhost:9200"] + hosts: ["https://localhost:9200"] index: "{beatname_lc}-%{[beat.version]}-%{+yyyy.MM.dd}" ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] ssl.certificate: "/etc/pki/client/cert.pem" ssl.key: "/etc/pki/client/cert.key" ------------------------------------------------------------------------------ +// + To enable SSL, just add `https` to all URLs defined under __hosts__. ["source","yaml",subs="attributes,callouts"] @@ -55,8 +68,8 @@ To enable SSL, just add `https` to all URLs defined under __hosts__. output.elasticsearch: hosts: ["https://localhost:9200"] - username: "admin" - password: "s3cr3t" + username: "{beatname_lc}_internal" + password: "{pwd}" ------------------------------------------------------------------------------ If the Elasticsearch nodes are defined by `IP:PORT`, then add `protocol: https` to the yaml file. @@ -66,14 +79,20 @@ If the Elasticsearch nodes are defined by `IP:PORT`, then add `protocol: https` output.elasticsearch: hosts: ["localhost"] protocol: "https" - username: "admin" - password: "s3cr3t" + username: "{beatname_lc}_internal" + password: "{pwd}" ------------------------------------------------------------------------------ + +For more information about securing {beatname_uc}, see +<>. + ==== Compatibility -This output works with all compatible versions of Elasticsearch. See "Supported Beats Versions" in the https://www.elastic.co/support/matrix#show_compatibility[Elastic Support Matrix]. +This output works with all compatible versions of Elasticsearch. See the +https://www.elastic.co/support/matrix#matrix_compatibility[Elastic Support +Matrix]. ==== Configuration options @@ -222,6 +241,7 @@ output.elasticsearch: message: "ERR" ------------------------------------------------------------------------------ +ifndef::no-pipeline[] ===== `pipeline` A format string value that specifies the ingest node pipeline to write events to. @@ -247,11 +267,13 @@ Example elasticsearch output with `pipelines`: ["source","yaml"] ------------------------------------------------------------------------------ -filebeat.prospectors: -- paths: ["/var/log/app/normal/*.log"] +filebeat.inputs: +- type: log + paths: ["/var/log/app/normal/*.log"] fields: type: "normal" -- paths: ["/var/log/app/critical/*.log"] +- type: log + paths: ["/var/log/app/critical/*.log"] fields: type: "critical" @@ -266,25 +288,34 @@ output.elasticsearch: when.equals: fields.type: "normal" ------------------------------------------------------------------------------ +endif::[] ===== `max_retries` +ifeval::[("{beatname_lc}"=="filebeat") or ("{beatname_lc}"=="winlogbeat")] + +{beatname_uc} ignores the `max_retries` setting and retries indefinitely. + +endif::[] + +ifeval::[("{beatname_lc}"!="filebeat") and ("{beatname_lc}"!="winlogbeat")] + The number of times to retry publishing an event after a publishing failure. After the specified number of retries, the events are typically dropped. -Some Beats, such as Filebeat, ignore the `max_retries` setting and retry until all -events are published. Set `max_retries` to a value less than 0 to retry until all events are published. The default is 3. +endif::[] + + ===== `bulk_max_size` The maximum number of events to bulk in a single Elasticsearch bulk API index request. The default is 50. -If the Beat sends single events, the events are collected into batches. If the Beat publishes -a large batch of events (larger than the value specified by `bulk_max_size`), the batch is -split. +Events can be collected into batches. {beatname_uc} will split batches larger than `bulk_max_size` +into multiple batches. Specifying a larger batch size can improve performance by lowering the overhead of sending events. However big batch sizes can also increase processing times, which might result in @@ -295,6 +326,20 @@ Setting `bulk_max_size` to values less than or equal to 0 disables the splitting of batches. When splitting is disabled, the queue decides on the number of events to be contained in a batch. +===== `backoff.init` + +The number of seconds to wait before trying to reconnect to Elasticsearch after +a network error. After waiting `backoff.init` seconds, {beatname_uc} tries to +reconnect. If the attempt fails, the backoff timer is increased exponentially up +to `backoff.max`. After a successful connection, the backoff timer is reset. The +default is 1s. + + +===== `backoff.max` + +The maximum number of seconds to wait before attempting to connect to +Elasticsearch after a network error. The default is 60s. + ===== `timeout` The http request timeout in seconds for the Elasticsearch request. The default is 90. @@ -307,6 +352,7 @@ Elasticsearch. See <> for more information. +ifndef::only-elasticsearch[] [[logstash-output]] === Configure the Logstash output @@ -385,7 +431,9 @@ will be similar to events directly indexed by Beats into Elasticsearch. ==== Compatibility -This output works with all compatible versions of Logstash. See "Supported Beats Versions" in the https://www.elastic.co/support/matrix#show_compatibility[Elastic Support Matrix]. +This output works with all compatible versions of Logstash. See the +https://www.elastic.co/support/matrix#matrix_compatibility[Elastic Support +Matrix]. ==== Configuration options @@ -432,6 +480,14 @@ load balances published events onto all Logstash hosts. If set to false, the output plugin sends all events to only one host (determined at random) and will switch to another host if the selected one becomes unresponsive. The default value is false. +["source","yaml",subs="attributes"] +------------------------------------------------------------------------------ +output.logstash: + hosts: ["localhost:5044", "localhost:5045"] + loadbalance: true + index: {beatname_lc} +------------------------------------------------------------------------------ + ===== `ttl` Time to live for a connection to Logstash after which the connection will be re-established. @@ -444,14 +500,6 @@ The default value is 0. NOTE: The "ttl" option is not yet supported on an async Logstash client (one with the "pipelining" option set). -["source","yaml",subs="attributes"] ------------------------------------------------------------------------------- -output.logstash: - hosts: ["localhost:5044", "localhost:5045"] - loadbalance: true - index: {beatname_lc} ------------------------------------------------------------------------------- - ===== `pipelining` Configures number of batches to be sent asynchronously to logstash while waiting @@ -513,15 +561,23 @@ The number of seconds to wait for responses from the Logstash server before timi ===== `max_retries` +ifeval::[("{beatname_lc}"=="filebeat") or ("{beatname_lc}"=="winlogbeat")] + +{beatname_uc} ignores the `max_retries` setting and retries indefinitely. + +endif::[] + +ifeval::[("{beatname_lc}"!="filebeat") and ("{beatname_lc}"!="winlogbeat")] + The number of times to retry publishing an event after a publishing failure. After the specified number of retries, the events are typically dropped. -Some Beats, such as Filebeat, ignore the `max_retries` setting and retry until all -events are published. Set `max_retries` to a value less than 0 to retry until all events are published. The default is 3. +endif::[] + ===== `bulk_max_size` The maximum number of events to bulk in a single Logstash request. The default is 2048. @@ -548,6 +604,19 @@ On error the number of events per transaction is reduced again. The default is `false`. +===== `backoff.init` + +The number of seconds to wait before trying to reconnect to Logstash after +a network error. After waiting `backoff.init` seconds, {beatname_uc} tries to +reconnect. If the attempt fails, the backoff timer is increased exponentially up +to `backoff.max`. After a successful connection, the backoff timer is reset. The +default is 1s. + +===== `backoff.max` + +The maximum number of seconds to wait before attempting to connect to +Logstash after a network error. The default is 60s. + [[kafka-output]] === Configure the Kafka output @@ -704,15 +773,23 @@ brokers, topics, partition, and active leaders to use for publishing. ===== `max_retries` +ifeval::[("{beatname_lc}"=="filebeat") or ("{beatname_lc}"=="winlogbeat")] + +{beatname_uc} ignores the `max_retries` setting and retries indefinitely. + +endif::[] + +ifeval::[("{beatname_lc}"!="filebeat") and ("{beatname_lc}"!="winlogbeat")] + The number of times to retry publishing an event after a publishing failure. After the specified number of retries, the events are typically dropped. -Some Beats, such as Filebeat, ignore the `max_retries` setting and retry until all -events are published. Set `max_retries` to a value less than 0 to retry until all events are published. The default is 3. +endif::[] + ===== `bulk_max_size` The maximum number of events to bulk in a single Kafka request. The default is 2048. @@ -926,15 +1003,24 @@ The Redis connection timeout in seconds. The default is 5 seconds. ===== `max_retries` +ifeval::[("{beatname_lc}"=="filebeat") or ("{beatname_lc}"=="winlogbeat")] + +{beatname_uc} ignores the `max_retries` setting and retries indefinitely. + +endif::[] + +ifeval::["{beatname_lc}"!="filebeat" and "{beatname_lc}"!="winlogbeat"] + The number of times to retry publishing an event after a publishing failure. After the specified number of retries, the events are typically dropped. -Some Beats, such as Filebeat, ignore the `max_retries` setting and retry until all -events are published. Set `max_retries` to a value less than 0 to retry until all events are published. The default is 3. +endif::[] + + ===== `bulk_max_size` The maximum number of events to bulk in a single Redis request or pipeline. The default is 2048. @@ -1028,8 +1114,8 @@ rotated. The default value is 10240 KB. ===== `number_of_files` The maximum number of files to save under <>. When this number of files is reached, the -oldest file is deleted, and the rest of the files are shifted from last to first. The default -is 7 files. +oldest file is deleted, and the rest of the files are shifted from last to first. +The number of files must be between 2 and 1024. The default is 7. ===== `permissions` @@ -1162,3 +1248,5 @@ When specified, the `cloud.auth` overwrites the `output.elasticsearch.username` `output.elasticsearch.password` settings. Because the Kibana settings inherit the username and password from the Elasticsearch output, this can also be used to set the `setup.kibana.username` and `setup.kibana.password` options. + +endif::[] diff --git a/vendor/github.com/elastic/beats/libbeat/docs/overview.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/overview.asciidoc index 48088777..332115a4 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/overview.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/overview.asciidoc @@ -4,22 +4,34 @@ Beats overview ++++ -The _Beats_ are open source data shippers that you install as _agents_ on -your servers to send different types of operational data to -https://www.elastic.co/products/elasticsearch[Elasticsearch]. Beats can -send data directly to Elasticsearch or send it to Elasticsearch via Logstash, which -you can use to parse and transform the data. - -_Packetbeat_, _Filebeat_, _Metricbeat_, and _Winlogbeat_ are a few examples of _Beats_. Packetbeat -is a network packet analyzer that ships information about the transactions -exchanged between your application servers. Filebeat ships log files from your servers. Metricbeat is a server monitoring agent -that periodically collects metrics from the operating systems and services -running on your servers. And Winlogbeat ships Windows event logs. +{beats} are open source data shippers that you install as agents on your +servers to send operational data to +https://www.elastic.co/products/elasticsearch[{es}]. Elastic provides {beats} +for capturing: + +[horizontal] +Audit data:: https://www.elastic.co/products/beats/auditbeat[Auditbeat] +Log files:: https://www.elastic.co/products/beats/filebeat[Filebeat] +Availability:: https://www.elastic.co/products/beats/heartbeat[Heartbeat] +Metrics:: https://www.elastic.co/products/beats/metricbeat[Metricbeat] +Network traffic:: https://www.elastic.co/products/beats/packetbeat[Packetbeat] +Windows event logs:: https://www.elastic.co/products/beats/winlogbeat[Winlogbeat] + +{beats} can send data directly to {es} or via +https://www.elastic.co/products/logstash[{ls}], where you can further process +and enhance the data, before visualizing it in +https://www.elastic.co/products/logstash[{kib}]. image:./images/beats-platform.png[Beats Platform] -If you have a specific use case to solve, we encourage you to create your own -Beat. We created an infrastructure to simplify the process. The _libbeat_ -library, written entirely in Golang, offers the API that all Beats use to -ship data to Elasticsearch, configure the input options, implement logging, -and more. +To get started, see <>. + +[float] +=== Need to capture other kinds of data? + +If you have a specific use case to solve, we encourage you to create a +<>. We've created an infrastructure to simplify +the process. The _libbeat_ library, written entirely in Golang, offers the API +that all Beats use to ship data to Elasticsearch, configure the input options, +implement logging, and more. To learn how to create a new Beat, see the +{beatsdevguide}/index.html[Beats Developer Guide]. diff --git a/vendor/github.com/elastic/beats/libbeat/docs/processors-using.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/processors-using.asciidoc index c8b64059..deb3c8e5 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/processors-using.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/processors-using.asciidoc @@ -1,25 +1,24 @@ [[defining-processors]] === Define processors -You define processors in the +{beatname_lc}.yml+ file to filter and enhance the -data before sending events to the configured output. - -To define a processor, you specify the processor name, an optional condition, -and a set of parameters: +You can use processors to filter and enhance data before sending it to the +configured output. To define a processor, you specify the processor name, an +optional condition, and a set of parameters: [source,yaml] ------ processors: - - : - - when: - - - : - - when: - -... +- : + when: + + + +- : + when: + + +... ------ Where: @@ -27,11 +26,134 @@ Where: * `` specifies a <> that performs some kind of action, such as selecting the fields that are exported or adding metadata to the event. -* `` specifies an optional <>. If the +* `` specifies an optional <>. If the condition is present, then the action is executed only if the condition is fulfilled. If no condition is passed, then the action is always executed. * `` is the list of parameters to pass to the processor. + +[[where-valid]] +==== Where are processors valid? + +// TODO: ANY NEW BEATS THAT RE-USE THIS TOPIC NEED TO DEFINE processor-scope. + +ifeval::["{beatname_lc}"=="filebeat"] +:processor-scope: input +endif::[] + +ifeval::["{beatname_lc}"=="auditbeat" or "{beatname_lc}"=="metricbeat"] +:processor-scope: module +endif::[] + +ifeval::["{beatname_lc}"=="packetbeat"] +:processor-scope: protocol +endif::[] + +ifeval::["{beatname_lc}"=="heartbeat"] +:processor-scope: monitor +endif::[] + +ifeval::["{beatname_lc}"=="winlogbeat"] +:processor-scope: event log shipper +endif::[] + +Processors are valid: + +* At the top-level in the configuration. The processor is applied to all data +collected by {beatname_uc}. +* Under a specific {processor-scope}. The processor is applied to the data +collected for that {processor-scope}. For example: ++ +ifeval::["{beatname_lc}"=="filebeat"] +[source,yaml] +------ +- type: + processors: + - : + when: + + +... +------ ++ +Similarly, for {beatname_uc} modules, you can define processors under the +`input` section of the module definition. +endif::[] +ifeval::["{beatname_lc}"=="metricbeat"] +[source,yaml] +---- +- module: + metricsets: [""] + processors: + - : + when: + + +---- +endif::[] +ifeval::["{beatname_lc}"=="auditbeat"] +[source,yaml] +---- +auditbeat.modules: +- module: + processors: + - : + when: + + +---- +endif::[] +ifeval::["{beatname_lc}"=="packetbeat"] +[source,yaml] +---- +packetbeat.protocols: +- type: + processors: + - : + when: + + +---- + +* Under `packetbeat.flows`. The processor is applied to the data in +<>: ++ +[source,yaml] +---- +packetbeat.flows: + processors: + - : + when: + + +---- +endif::[] +ifeval::["{beatname_lc}"=="heartbeat"] +[source,yaml] +---- +heartbeat.monitors: +- type: + processors: + - : + when: + + +---- +endif::[] +ifeval::["{beatname_lc}"=="winlogbeat"] +[source,yaml] +---- +winlogbeat.event_logs: +- name: + processors: + - : + when: + + +---- +endif::[] + + [[processors]] ==== Processors @@ -43,8 +165,10 @@ The supported processors are: * <> * <> * <> + * <> * <> * <> + * <> [[conditions]] ==== Conditions @@ -53,8 +177,8 @@ Each condition receives a field to compare. You can specify multiple fields under the same condition by using `AND` between the fields (for example, `field1 AND field2`). -For each field, you can specify a simple field name or a nested map, for -example `dns.question.name`. +For each field, you can specify a simple field name or a nested map, for example +`dns.question.name`. See <> for a list of all the fields that are exported by {beatname_uc}. @@ -65,6 +189,7 @@ The supported conditions are: * <> * <> * <> +* <> * <> * <> * <> @@ -156,6 +281,23 @@ range: ------ +[float] +[[condition-has_fields]] +===== `has_fields` + +The `has_fields` condition checks if all the given fields exist in the +event. The condition accepts a list of string values denoting the field names. + +For example, the following condition checks if the `http.response.code` field +is present in the event. + + +[source,yaml] +------ +has_fields: ['http.response.code'] +------ + + [float] [[condition-or]] ===== `or` @@ -512,6 +654,49 @@ section. NOTE: If you define an empty list of fields under `include_fields`, then only the required fields, `@timestamp` and `type`, are exported. +[[rename-fields]] +=== Rename fields from events + +The `rename` processor specifies a list of fields to rename. Under the `fields` +key each entry contains a `from: old-key` and a `to: new-key` pair. `from` is +the origin and `to` the target name of the field. + +Renaming fields can be useful in cases where field names cause conflicts. For +example if an event has two fields, `c` and `c.b`, that are both assigned scalar +values (e.g. `{"c": 1, "c.b": 2}`) this will result in an Elasticsearch error at +ingest time. This is because the value of a cannot simultaneously be a scalar +and an object. To prevent this rename_fields can be used to rename `c` to +`c.value`. + +Rename fields cannot be used to overwrite fields. To overwrite fields either +first rename the target field or use the `drop_fields` processor to drop the +field and then rename the field. + +[source,yaml] +------- +processors: +- rename: + fields: + - from: "a.g" + to: "e.d" + ignore_missing: false + fail_on_error: true +------- + +The `rename` processor has the following configuration settings: + +`ignore_missing`:: (Optional) If set to true, no error is logged in case a key +which should be renamed is missing. Default is `false`. + +`fail_on_error`:: (Optional) If set to true, in case of an error the renaming of +fields is stopped and the original event is returned. If set to false, renaming +continues also if an error happened during renaming. Default is `true`. + +See <> for a list of supported conditions. + +You can specify multiple `ignore_missing` processors under the `processors` +section. + [[add-kubernetes-metadata]] === Add Kubernetes metadata @@ -543,7 +728,7 @@ default. For example, FileBeat enables the `container` indexer, which indexes pod metadata based on all container IDs, and a `logs_path` matcher, which takes the `source` field, extracts the container ID, and uses it to retrieve metadata. -The configuration below enables the processor when the Beat is run as a pod in +The configuration below enables the processor when {beatname_lc} is run as a pod in Kubernetes. [source,yaml] @@ -619,6 +804,7 @@ processors: #match_pids: ["process.pid", "process.ppid"] #match_source: true #match_source_index: 4 + #match_short_id: true #cleanup_timeout: 60 # To connect to Docker over TLS you must specify a client and CA certificate. #ssl: @@ -645,9 +831,44 @@ is `["process.pid", "process.ppid"]`. `match_source`:: (Optional) Match container ID from a log path present in the `source` field. Enabled by default. +`match_short_id`:: (Optional) Match container short ID from a log path present +in the `source` field. Disabled by default. +This allows to match directories names that have the first 12 characters +of the container ID. For example, `/var/log/containers/b7e3460e2b21/*.log`. + `match_source_index`:: (Optional) Index in the source path split by `/` to look for container ID. It defaults to 4 to match `/var/lib/docker/containers//*.log` `cleanup_timeout`:: (Optional) Time of inactivity to consider we can clean and forget metadata for a container, 60s by default. + + +[[add-host-metadata]] +=== Add Host metadata + +beta[] + +The `add_host_metadata` processor annotates each event with relevant metadata from the host machine. +The fields added to the event are looking as following: + +[source,json] +------------------------------------------------------------------------------- +{ + "host":{ + "architecture":"x86_64", + "name":"example-host", + "id":"", + "os":{ + "family":"darwin", + "build":"16G1212", + "platform":"darwin", + "version":"10.12.6" + }, + "ip": ["192.168.0.1", "10.0.0.1"], + "mac": ["00:25:96:12:34:56", "72:00:06:ff:79:f1"] + } +} +------------------------------------------------------------------------------- + +NOTE: The host information is refreshed every 5 minutes. diff --git a/vendor/github.com/elastic/beats/libbeat/docs/queueconfig.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/queueconfig.asciidoc index eefa9727..8531ece9 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/queueconfig.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/queueconfig.asciidoc @@ -6,10 +6,12 @@ queue is responsible for buffering and combining events into batches that can be consumed by the outputs. The outputs will use bulk operations to send a batch of events in one transaction. -You can configure the type and behavior of the internal queue by setting options in the `queue` section of the +{beatname_lc}.yml+ config file. +You can configure the type and behavior of the internal queue by setting +options in the `queue` section of the +{beatname_lc}.yml+ config file. Only one +queue type can be configured. -Example configuration: +This sample configuration sets the memory queue to buffer up to 4096 events: [source,yaml] ------------------------------------------------------------------------------ @@ -21,19 +23,22 @@ queue.mem: [[configuration-internal-queue-memory]] === Configure the memory queue -The memory queue keeps all events in memory. It is the only queue type -supported right now. By default no flush interval is configured. All events -published to this queue will be directly consumed by the outputs. +The memory queue keeps all events in memory. + +If no flush interval and no number of events to flush is configured, +all events published to this queue will be directly consumed by the outputs. +To enforce spooling in the queue, set the `flush.min_events` and `flush.timeout` options. + +By default `flush.min.events` is set to 2048 and `flush.timeout` is set to 1s. + The output's `bulk_max_size` setting limits the number of events being processed at once. The memory queue waits for the output to acknowledge or drop events. If -the queue is full, no new events can be inserted into the memeory queue. Only +the queue is full, no new events can be inserted into the memory queue. Only after the signal from the output will the queue free up space for more events to be accepted. -To enforce spooling in the queue, set the `flush.min_events` and `flush.timeout` options. - This sample configuration forwards events to the output if 512 events are -available or the oldest available event is already waiting for 5s in the queue: +available or the oldest available event has been waiting for 5s in the queue: [source,yaml] ------------------------------------------------------------------------------ @@ -62,7 +67,7 @@ Minimum number of events required for publishing. If this value is set to 0, the output can start publishing events without additional waiting times. Otherwise the output has to wait for more events to become available. -The default value is 0. +The default value is 2048. [float] ===== `flush.timeout` @@ -70,5 +75,164 @@ The default value is 0. Maximum wait time for `flush.min_events` to be fulfilled. If set to 0s, events will be immediately available for consumption. -The default values is 0s. +The default value is 1s. + +[float] +[[configuration-internal-queue-spool]] +=== Configure the file spool queue + +beta[] + +The file spool queue stores all events in an on disk ring buffer. The spool +has a write buffer, which new events are written to. Events written to the +spool are forwarded to the outputs, only after the write buffer has been +flushed successfully. + +The spool waits for the output to acknowledge or drop events. If the spool is +full, no new events can be inserted. The spool will block. Space is freed only +after a signal from the output has been received. + +On disk, the spool divides a file into pages. The `file.page_size` setting +configures the file's page size at file creation time. The optimal page size depends +on the effective block size, used by the underlying file system. + +This sample configuration enables the spool with all default settings (See +<> for defaults) and the +default file path: + +[source,yaml] +------------------------------------------------------------------------------ +queue.spool: ~ +------------------------------------------------------------------------------ + +This sample configuration creates a spool of 512MiB, with 16KiB pages. The +write buffer is flushed if 10MiB of contents, or 1024 events have been +written. If the oldest available event has been waiting for 5s in the write +buffer, the buffer will be flushed as well: + +[source,yaml] +------------------------------------------------------------------------------ +queue.spool: + file: + path: "${path.data}/spool.dat" + size: 512MiB + page_size: 16KiB + write: + buffer_size: 10MiB + flush.timeout: 5s + flush.events: 1024 +------------------------------------------------------------------------------ + +[float] +[[configuration-internal-queue-spool-reference]] +==== Configuration options + +You can specify the following options in the `queue.spool` section of the ++{beatname_lc}.yml+ config file: + +[float] +===== `file.path` + +The spool file path. The file is created on startup, if it does not exist. + +The default value is "${path.data}/spool.dat". + +[float] +===== `file.permissions` + +The file permissions. The permissions are applied when the file is +created. In case the file already exists, the file permissions are compared +with `file.permissions`. The spool file is not opened if the actual file +permissions are more permissive then configured. + +The default value is 0600. + + +[float] +===== `file.size` + +Spool file size. + +The default value is 100 MiB. + +NOTE: The size should be much larger then the expected event sizes +and write buffer size. Otherwise the queue will block, because it has not +enough space. + +NOTE: The file size cannot be changed once the file has been generated. This +limitation will be removed in the future. + +[float] +===== `file.page_size` + +The file's page size. + +The spool file is split into pages of `page_size`. All I/O +operations operate on complete pages. + +The default value is 4096 (4KiB). + +NOTE: This setting should match the file system's minimum block size. If the +`page_size` is not a multiple of the file system's block size, the file system +might create additional read operations on writes. + +NOTE: The page size is only set at file creation time. It cannot be changed +afterwards. + +[float] +===== `file.prealloc` + +If `prealloc` is set to `true`, truncate is used to reserve the space up to +`file.size`. This setting is only used when the file is created. + +The file will dynamically grow, if `prealloc` is set to false. The spool +blocks, if `prealloc` is `false` and the system is out of disk space. + +The default value is `true`. + +[float] +===== `write.buffer_size` + +The write buffer size. The write buffer is flushed, once the buffer size is exceeded. + +Very big events are allowed to be bigger then the configured buffer size. But +the write buffer will be flushed right after the event has been serialized. + +The default value is 1MiB. + +[float] +===== `write.codec` + +The event encoding used for serialized events. Valid values are `json` and `cbor`. + +The default value is `cbor`. + +[float] +===== `write.flush.timeout` + +Maximum wait time of the oldest event in the write buffer. If set to 0, the +write buffer will only be flushed once `write.flush.events` or `write.buffer_size` is fulfilled. + +The default value is 1s. + +[float] +===== `write.flush.events` + +Number of buffered events. The write buffer is flushed once the limit is reached. + +The default value is 16384. + +[float] +===== `read.flush.timeout` + +The spool reader tries to read up to the output's `bulk_max_size` events at once. + +If `read.flush.timeout` is set to 0s, all available events are forwarded +immediately to the output. + +If `read.flush.timeout` is set to a value bigger then 0s, the spool will wait +for more events to be flushed. Events are forwarded to the output if +`bulk_max_size` events have been read or the oldest read event has been waiting +for the configured duration. +The default value is 0s. diff --git a/vendor/github.com/elastic/beats/libbeat/docs/reference-yml.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/reference-yml.asciidoc index 0b04a14a..e869825a 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/reference-yml.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/reference-yml.asciidoc @@ -5,22 +5,10 @@ The following reference file is available with your {beatname_uc} installation. shows all non-deprecated {beatname_uc} options. You can copy from this file and paste configurations into the +{beatname_lc}.yml+ file to customize it. -ifeval::["{beatname_lc}"!="auditbeat"] - -TIP: For rpm and deb, you'll find the reference configuration file at +/etc/{beatname_lc}/{beatname_lc}.reference.yml+. Under -Docker, it's located at +/usr/share/{beatname_lc}/{beatname_lc}.reference.yml+. For mac and win, -look in the archive that you just extracted. - -endif::[] - -ifeval::["{beatname_lc}"=="auditbeat"] - TIP: For rpm and deb, you'll find the reference configuration file at +/etc/{beatname_lc}/{beatname_lc}.reference.yml+. Under Docker, it's located at +/usr/share/{beatname_lc}/{beatname_lc}.reference.yml+. For mac and win, look in the archive that you just extracted. -endif::[] - The contents of the file are included here for your convenience. [source,yaml] diff --git a/vendor/github.com/elastic/beats/libbeat/docs/regexp.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/regexp.asciidoc index 469931de..24fc6f1c 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/regexp.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/regexp.asciidoc @@ -17,11 +17,9 @@ ifeval::["{beatname_lc}"=="filebeat"] {beatname_uc} has several configuration options that accept regular expressions. -For example, <>, -<>, <>, and -<> all accept regular expressions. Some options, -however, such as the prospector <> option, accept only -glob-based paths. +For example, `multiline.pattern`, `include_lines`, `exclude_lines`, and +`exclude_files` all accept regular expressions. Some options, however, such as +the input `paths` option, accept only glob-based paths. endif::[] diff --git a/vendor/github.com/elastic/beats/libbeat/docs/release.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/release.asciidoc index ad1fbeaf..d2b2b312 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/release.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/release.asciidoc @@ -8,6 +8,9 @@ This section summarizes the changes in each release. Also read <> for more detail about changes that affect upgrade. +* <> +* <> +* <> * <> * <> * <> @@ -25,11 +28,6 @@ upgrade. * <> * <> * <> -* <> -* <> -* <> -* <> -* <> * <> * <> * <> diff --git a/vendor/github.com/elastic/beats/libbeat/docs/repositories.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/repositories.asciidoc index 7221a5b4..64e9058b 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/repositories.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/repositories.asciidoc @@ -88,7 +88,7 @@ install {beatname_uc} by running: sudo apt-get update && sudo apt-get install {beatname_pkg} -------------------------------------------------- -. To configure the Beat to start automatically during boot, run: +. To configure {beatname_uc} to start automatically during boot, run: + ["source","sh",subs="attributes"] -------------------------------------------------- diff --git a/vendor/github.com/elastic/beats/libbeat/docs/security/basic-auth.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/security/basic-auth.asciidoc index 85fd6dec..1fbb0306 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/security/basic-auth.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/security/basic-auth.asciidoc @@ -1,6 +1,6 @@ [role="xpack"] [[beats-basic-auth]] -=== Configuring Authentication Credentials for {beatname_uc} +=== Configure authentication credentials When sending data to a secured cluster through the `elasticsearch` output, {beatname_uc} must either provide basic authentication credentials @@ -8,88 +8,141 @@ or present a client certificate. To configure authentication credentials for {beatname_uc}: -. Create a role that has the `manage_index_templates` and -`monitor` cluster privileges, and `read`, `write`, and `create_index` -privileges for the indices that {beatname_uc} creates. You can create roles from the -**Management / Roles** UI in {kib} or through the `role` API. -For example, the following request creates a ++{beatname_lc}_writer++ role: +. Create a writer role that has the following privileges: + +-- +ifeval::["{beatname_lc}"!="filebeat"] +* *Cluster*: `manage_index_templates` and `monitor` +endif::[] +ifeval::["{beatname_lc}"=="filebeat"] +* *Cluster*: `manage_index_templates`, `monitor`, and +`manage_ingest_pipelines` +endif::[] +* *Index*: `write` and `create_index` on the {beatname_uc} indices +-- ++ +You can create roles from the **Management / Roles** UI in {kib} or through the +`role` API. For example, the following request creates a role named +++{beat_default_index_prefix}_writer++: ++ +-- +ifeval::["{beatname_lc}"!="filebeat"] ["source","sh",subs="attributes,callouts"] --------------------------------------------------------------- -POST _xpack/security/role/{beatname_lc}_writer +POST _xpack/security/role/{beat_default_index_prefix}_writer { - "cluster": ["manage_index_templates", "monitor"], + "cluster": ["manage_index_templates","monitor"], "indices": [ { - "names": [ "{beatname_lc}-*" ], <1> + "names": [ "{beat_default_index_prefix}-*" ], <1> "privileges": ["write","create_index"] } ] } --------------------------------------------------------------- <1> If you use a custom {beatname_uc} index pattern, specify that pattern -instead of the default ++{beatname_lc}-*++ pattern. +instead of the default ++{beat_default_index_prefix}-*++ pattern. +endif::[] +ifeval::["{beatname_lc}"=="filebeat"] +["source","sh",subs="attributes,callouts"] +--------------------------------------------------------------- +POST _xpack/security/role/{beat_default_index_prefix}_writer +{ + "cluster": ["manage_index_templates","monitor","manage_ingest_pipelines"], <1> + "indices": [ + { + "names": [ "{beat_default_index_prefix}-*" ], <2> + "privileges": ["write","create_index"] + } + ] +} +--------------------------------------------------------------- +// CONSOLE +<1> The `manage_ingest_pipelines` cluster privilege is required to run +{beatname_uc} modules. +<2> If you use a custom {beatname_uc} index pattern, specify that pattern +instead of the default ++{beat_default_index_prefix}-*++ pattern. +endif::[] +-- . Assign the writer role to the user that {beatname_uc} will use to connect to -{es}: +{es}. If you plan to load the pre-built {kib} dashboards, also assign the +`kibana_user` role. +ifdef::has_ml_jobs[] +If you plan to load machine learning jobs, assign the `machine_learning_admin` +role. +endif::[] -.. To authenticate as a native user, create a user for the {beatname_uc} to use -internally and assign it the writer role. You can create users from the -**Management / Users** UI in {kib} or through the `user` API. For example, the -following request creates a ++{beatname_lc}_internal++ user that has the -++{beatname_lc}_writer++ role: +.. To authenticate as a native user, create a user for {beatname_uc} to use +internally and assign it the writer role, plus any other roles that are +needed. + +You can create users from the **Management / Users** UI in {kib} or through the +`user` API. For example, following request creates a user +named ++{beat_default_index_prefix}_internal++ that has the +++{beat_default_index_prefix}_writer++ and `kibana_user` roles: ++ +-- ["source","sh",subs="attributes,callouts"] --------------------------------------------------------------- -POST /_xpack/security/user/{beatname_lc}_internal +POST /_xpack/security/user/{beat_default_index_prefix}_internal { - "password" : "x-pack-test-password", - "roles" : [ "{beatname_lc}_writer"], + "password" : "{pwd}", + "roles" : [ "{beat_default_index_prefix}_writer","kibana_user"], "full_name" : "Internal {beatname_uc} User" } --------------------------------------------------------------- +// CONSOLE + +-- -.. To authenticate using PKI authentication, assign the writer role -to the internal {beatname_uc} user in the `role_mapping.yml` configuration file. Specify -the user by the distinguished name that appears in its certificate. +.. To use PKI authentication, assign the writer role, plus any other roles that are +needed, in the `role_mapping.yml` configuration file. Specify the user by the +distinguished name that appears in its certificate: + -- ["source","yaml",subs="attributes,callouts"] --------------------------------------------------------------- -{beatname_lc}_writer: +{beat_default_index_prefix}_writer: + - "cn=Internal {beatname_uc} User,ou=example,o=com" +kibana_user: - "cn=Internal {beatname_uc} User,ou=example,o=com" --------------------------------------------------------------- + + For more information, see {xpack-ref}/mapping-roles.html#mapping-roles-file[Using Role Mapping Files]. -- -. Configure authentication credentials for the `elasticsearch` output -in the {beatname_uc} configuration file: +. In the {beatname_uc} configuration file, specify authentication credentials +for the `elasticsearch` output: + -.. To use basic authentication, configure the `username` and `password` -settings. For example, the following {beatname_uc} output configuration -uses the native ++{beatname_lc}_internal++ user to connect to {es}: +.. To use basic authentication, configure the `username` and `password` settings. +For example, the following {beatname_uc} output configuration uses the native +++{beat_default_index_prefix}_internal++ user to connect to {es}: + ["source","js",subs="attributes,callouts"] -------------------------------------------------- output.elasticsearch: - hosts: ["localhost:9200"] - index: "{beatname_lc}" - username: "{beatname_lc}_internal" - password: "x-pack-test-password" + hosts: ["localhost:9200"] + username: "{beat_default_index_prefix}_internal" <1> + password: "{pwd}" <2> -------------------------------------------------- +<1> You created this user earlier. +<2> The example shows a hard-coded password, but you should store sensitive +values in the <>. -.. To use PKI authentication, configure the `certificate` and -`key` settings: +.. To use PKI authentication, configure the `certificate` and `key` settings: + ["source","js",subs="attributes,callouts"] -------------------------------------------------- output.elasticsearch: - hosts: ["localhost:9200"] - index: "{beatname_lc}" - ssl.certificate: "/etc/pki/client/cert.pem" <1> - ssl.key: "/etc/pki/client/cert.key" + hosts: ["localhost:9200"] + ssl.certificate: "/etc/pki/client/cert.pem" <1> + ssl.key: "/etc/pki/client/cert.key" -------------------------------------------------- <1> The distinguished name (DN) in the certificate must be mapped to -the writer role in the `role_mapping.yml` configuration file on each -node in the {es} cluster. +the ++{beat_default_index_prefix}_writer++ and `kibana_user` roles in the +`role_mapping.yml` configuration file on each node in the {es} cluster. + diff --git a/vendor/github.com/elastic/beats/libbeat/docs/security/beats-system.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/security/beats-system.asciidoc new file mode 100644 index 00000000..63064292 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/docs/security/beats-system.asciidoc @@ -0,0 +1,23 @@ +[role="xpack"] +[[beats-system-user]] +=== Set the password for the `beats_system` built-in user + +{security} provides built-in user credentials in {es} that have a fixed set of +privileges. In 6.3.0 and later releases, there is a `beats_system` built-in user, +which {beatname_uc} uses to store monitoring information in {es}. + +The initial passwords for all of the built-in users are set by using the +`setup-passwords` tool in {es}. Thereafter, you can change the passwords by +using the *Management > Users* page in Kibana or the +{ref}/security-api-change-password.html[Change Password API]. + +IMPORTANT: If you upgraded from {es} version 6.2 or earlier, you will not +have set a password for the `beats_system` user. A user with the +`manage_security` privilege must change the password for this built-in user. + +For more +information, see: + +* {xpack-ref}/setting-up-authentication.html[Setting Up User Authentication] +* {xpack-ref}/built-in-roles.html[Built-in Roles] +* <> diff --git a/vendor/github.com/elastic/beats/libbeat/docs/security/securing-beats.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/security/securing-beats.asciidoc index f2fdc546..e6b90036 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/security/securing-beats.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/security/securing-beats.asciidoc @@ -1,25 +1,81 @@ [role="xpack"] [[securing-beats]] -== {beatname_uc} and {security} +== Configure {beatname_uc} to use {security} -If you want {beatname_uc} to connect to a cluster that has {security} enabled, -there are extra configuration steps. +++++ +Use {security} +++++ +If you want {beatname_uc} to connect to a cluster that has +{securitydoc}/xpack-security.html[{security}] enabled, there are extra +configuration steps: + +. <>. ++ +ifeval::["{beatname_lc}"=="filebeat"] +To send data to a secured cluster through the `elasticsearch` output, +{beatname_uc} needs to authenticate as a user who can manage index templates, +monitor the cluster, create indices, read and write to the indices +it creates, and manage ingest pipelines. +endif::[] +ifeval::["{beatname_lc}"!="filebeat"] To send data to a secured cluster through the `elasticsearch` output, {beatname_uc} needs to authenticate as a user who can manage index templates, -monitor the cluster, create indices, and read, and write to the indices -it creates. See <>. +monitor the cluster, create indices, and read and write to the indices +it creates. +endif::[] -If encryption is enabled on the cluster, you also need to enable HTTPS in the -{beatname_uc} configuration. See <>. +. <>. ++ +To search the indexed {beatname_uc} data and visualize it in {kib}, users need +access to the indices {beatname_uc} creates. -In addition to configuring authentication credentials for the {beatname_uc} -itself, you need to grant authorized users permission to access the indices it -creates. See <>. +. <>. ++ +If encryption is enabled on the cluster, you need to enable HTTPS in the +{beatname_uc} configuration. + +ifeval::["{beatname_lc}"!="apm-server"] +. <>. ++ +{beatname_uc} uses the `beats_system` user to send monitoring data to {es}. If +you plan to monitor {beatname_uc} in {kib} and have not yet set up the +password, set it up now. +endif::[] For more information about {security}, see -{xpack-ref}/xpack-security.html[Securing {es} and {kib}]. +{xpack-ref}/xpack-security.html[Securing the {stack}]. + +[float] +=== {beatname_uc} features that require authorization + +After securing {beatname_uc}, make sure your users have the roles (or associated +privileges) required to use these {beatname_uc} features. You must create the +++{beat_default_index_prefix}_writer++ and +++{beat_default_index_prefix}_reader++ roles (see <> and +<>). The `machine_learning_admin` and `kibana_user` roles are +{xpack-ref}/built-in-roles.html[built-in]. + +[options="header"] +|======= +|Feature | Role +|Send data to a secured cluster | ++{beat_default_index_prefix}_writer++ +ifeval::["{beatname_lc}"=="filebeat"] +|Run Filebeat modules | ++{beat_default_index_prefix}_writer++ +endif::[] +|Load index templates | ++{beat_default_index_prefix}_writer++ and `kibana_user` +|Load {beatname_uc} dashboards into {kib} | ++{beat_default_index_prefix}_writer++ and `kibana_user` +|Load machine learning jobs | `machine_learning_admin` +|Read indices created by {beatname_uc} | ++{beat_default_index_prefix}_reader++ +|View {beatname_uc} dashboards in {kib} | `kibana_user` +|======= include::basic-auth.asciidoc[] + include::user-access.asciidoc[] + include::tls.asciidoc[] + +ifeval::["{beatname_lc}"!="apm-server"] +include::beats-system.asciidoc[] +endif::[] diff --git a/vendor/github.com/elastic/beats/libbeat/docs/security/tls.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/security/tls.asciidoc index 1e31ed26..10f7a606 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/security/tls.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/security/tls.asciidoc @@ -1,6 +1,6 @@ [role="xpack"] [[beats-tls]] -=== Configuring {beatname_uc} to use Encrypted Connections +=== Configure {beatname_uc} to use encrypted connections If encryption is enabled on the {es} cluster, you need to connect to {es} via HTTPS. If the certificate authority (CA) that signed your node certificates @@ -14,9 +14,9 @@ protocol to all host URLs: ["source","js",subs="attributes,callouts"] -------------------------------------------------- output.elasticsearch: - hosts: ["https://localhost:9200"] <1> - index: "{beatname_lc}" - ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] <2> + hosts: ["https://localhost:9200"] <1> + index: "{beatname_lc}" + ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] <2> -------------------------------------------------- <1> Specify the `https` protocol to connect the {es} cluster. <2> Specify the path to the local `.pem` file that contains your Certificate diff --git a/vendor/github.com/elastic/beats/libbeat/docs/security/user-access.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/security/user-access.asciidoc index 28626afc..98088121 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/security/user-access.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/security/user-access.asciidoc @@ -1,61 +1,71 @@ [role="xpack"] [[beats-user-access]] -=== Granting Users Access to {beatname_uc} Indices +=== Grant users access to {beatname_uc} indices -To enable users to access the indices a {beatname_uc} creates, grant them `read` -and `view_index_metadata` privileges on the {beatname_uc} indices: +To enable users to access the indices {beatname_uc} creates, grant them `read` +and `view_index_metadata` privileges on the {beatname_uc} indices. If they're +using {kib}, they also need the `kibana_user` role. -. Create a role that has the `read` and `view_index_metadata` -privileges for the {beatname_uc} indices. You can create roles from the -**Management > Roles** UI in {kib} or through the `role` API. -For example, the following request creates a ++{beatname_lc}_reader++ -role: +. Create a reader role that has the `read` and `view_index_metadata` privileges +on the {beatname_uc} indices. ++ +You can create roles from the **Management > Roles** UI in {kib} or through the +`role` API. For example, the following request creates a role named +++{beat_default_index_prefix}_reader++: + -- ["source","sh",subs="attributes,callouts"] --------------------------------------------------------------- -POST _xpack/security/role/{beatname_lc}_reader +POST _xpack/security/role/{beat_default_index_prefix}_reader { "indices": [ { - "names": [ "{beatname_lc}-*" ], <1> + "names": [ "{beat_default_index_prefix}-*" ], <1> "privileges": ["read","view_index_metadata"] } ] } --------------------------------------------------------------- +// CONSOLE <1> If you use a custom {beatname_uc} index pattern, specify that pattern -instead of the default ++{beatname_lc}-*++ pattern. +instead of the default ++{beat_default_index_prefix}-*++ pattern. -- -. Assign your users the reader role so they can access the {beatname_uc} indices: + +. Assign your users the reader role so they can access the {beatname_uc} +indices. For {kib} users who need to visualize the data, also assign the +`kibana_user` role: .. If you're using the `native` realm, you can assign roles with the -**Management > Users** UI in {kib} or through the `user` API. For -example, the following request grants ++{beatname_lc}_user++ the -++{beatname_lc}_reader++ role: +**Management > Users** UI in {kib} or through the `user` API. For example, the +following request grants ++{beat_default_index_prefix}_user++ the +++{beat_default_index_prefix}_reader++ and `kibana_user` roles: + -- ["source", "sh", subs="attributes,callouts"] --------------------------------------------------------------- -POST /_xpack/security/user/{beatname_lc}_user +POST /_xpack/security/user/{beat_default_index_prefix}_user { - "password" : "x-pack-test-password", - "roles" : [ "{beatname_lc}_reader"], + "password" : "{pwd}", + "roles" : [ "{beat_default_index_prefix}_reader","kibana_user"], "full_name" : "{beatname_uc} User" } --------------------------------------------------------------- +// CONSOLE -- -.. If you're using the LDAP, Active Directory, or PKI realms, you -assign the roles in the `role_mapping.yml` configuration -file. For example, the following snippet grants ++{beatname_uc} User++ -the ++{beatname_lc}_reader++ role: +.. If you're using the LDAP, Active Directory, or PKI realms, you assign the +roles in the `role_mapping.yml` configuration file. For example, the following +snippet grants ++{beatname_uc} User++ the ++{beat_default_index_prefix}_reader++ +and `kibana_user` roles: + -- ["source", "yaml", subs="attributes,callouts"] --------------------------------------------------------------- -{beatname_lc}_reader: +{beat_default_index_prefix}_reader: + - "cn={beatname_uc} User,dc=example,dc=com" +kibana_user: - "cn={beatname_uc} User,dc=example,dc=com" --------------------------------------------------------------- + For more information, see {xpack-ref}/mapping-roles.html#mapping-roles-file[Using Role Mapping Files]. -- diff --git a/vendor/github.com/elastic/beats/libbeat/docs/shared-autodiscover.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/shared-autodiscover.asciidoc index faf96ff3..8b7686c7 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/shared-autodiscover.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/shared-autodiscover.asciidoc @@ -1,14 +1,9 @@ [[configuration-autodiscover]] == Autodiscover -Autodiscover allows you to watch for system changes and dynamically adapt settings to them, as they happen. -This is especially useful when running your infrastructure on containers. - -When you run an application on containers, it becomes a moving target to the monitoring system. Autodiscover -allows you to automatically detect what's running and update settings to monitor it. - -You can define configuration templates for different containers. The autodiscover subsystem will use them -to monitor services as they start running. +When you run applications on containers, they become moving targets to the monitoring system. Autodiscover +allows you to track them and adapt settings as changes happen. By defining configuration templates, the +autodiscover subsystem can monitor services as they start running. You define autodiscover settings in the +{beatname_lc}.autodiscover+ section of the +{beatname_lc}.yml+ config file. To enable autodiscover, you specify a list of providers. @@ -20,6 +15,9 @@ Autodiscover providers work by watching for events on the system and translating events with a common format. When you configure the provider, you can use fields from the autodiscover event to set conditions that, when met, launch specific configurations. +On start, {beatname_uc} will scan existing containers and launch the proper configs for them. Then it will watch for new +start/stop events. This ensures you don't need to worry about state, but only define your desired configs. + [float] ===== Docker @@ -67,6 +65,51 @@ For example, with the example event, "`${data.port}`" resolves to `6379`. include::../../{beatname_lc}/docs/autodiscover-docker-config.asciidoc[] + +ifeval::["{beatname_lc}"=="filebeat"] +[WARNING] +======================================= +When using autodiscover, you have to be careful when defining config templates, especially if they are +reading from places holding information for several containers. For instance, under this file structure: + +`/mnt/logs//*.log` + +You can define a config template like this: + +**Wrong settings**: + +[source,yaml] +------------------------------------------------------------------------------------- +autodiscover.providers: + - type: docker + templates: + - condition.contains: + docker.container.image: nginx + config: + - type: log + paths: + - "/mnt/logs/*/*.log" +------------------------------------------------------------------------------------- + +That would read all the files under the given path several times (one per nginx container). What you really +want is to scope your template to the container that matched the autodiscover condition. Good settings: + +[source,yaml] +------------------------------------------------------------------------------------- +autodiscover.providers: + - type: docker + templates: + - condition.contains: + docker.container.image: nginx + config: + - type: log + paths: + - "/mnt/logs/${data.docker.container.id}/*.log" +------------------------------------------------------------------------------------- + +======================================= +endif::[] + [float] ===== Kubernetes @@ -117,4 +160,63 @@ For example: The configuration of templates and conditions is similar to that of the Docker provider. Configuration templates can contain variables from the autodiscover event. They can be accessed under data namespace. +The `kubernetes` autodiscover provider has the following configuration settings: + +`in_cluster`:: (Optional) Use in cluster settings for Kubernetes client, `true` + by default. +`host`:: (Optional) In case `in_cluster` is false, use this host to connect to + Kubernetes API. +`kube_config`:: (Optional) Use given config file as configuration for Kubernetes + client. + include::../../{beatname_lc}/docs/autodiscover-kubernetes-config.asciidoc[] + +[[configuration-autodiscover-hints]] +=== Hints based autodiscover + +include::../../{beatname_lc}/docs/autodiscover-hints.asciidoc[] + +[[configuration-autodiscover-advanced]] +=== Advanced usage + +//// +Builders are not exposed in docs, as we only have `hints` builder by the moment, +and that's covered in previous section + +[float] +==== Builders +Builders allow users to pass hints to autodiscover for it to be able to make decisions +on how and what kind of configuration should be generated. Each Beat can define its own +Builders that it can use. Hints are generated based on all information that is passed to +the provider using a prefix "co.elastic.*". The Kubernetes provider uses annotations and +the Docker provider uses labels to achieve the same. + +//// + +[float] +==== Appenders +Appenders allow users to append configuration that is already built with the help of either templates +or builders. Appenders can be configured to be applied only when a required condition is matched. The kind +of configuration that is applied is specific to each appender. + +[float] +===== Config +The config appender can apply a config on top of the config that was generated by +templates or builders. The config is applied whenever a provided condition is matched. It is always +applied if there is no condition provided. + +["source","yaml",subs="attributes"] +------------------------------------------------------------------------------------- +metricbeat.autodiscover: + providers: + - type: kubernetes + templates: + ... + appenders: + - type: config + condition.equals: + kubernetes.labels.app: "prometheus" + config: + fields: + type: monitoring +------------------------------------------------------------------------------------- diff --git a/vendor/github.com/elastic/beats/libbeat/docs/shared-beats-attributes.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/shared-beats-attributes.asciidoc index 881405a7..ad9a8eaa 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/shared-beats-attributes.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/shared-beats-attributes.asciidoc @@ -10,9 +10,8 @@ :logstashdoc: https://www.elastic.co/guide/en/logstash/{doc-branch} :elasticsearch: https://www.elastic.co/guide/en/elasticsearch/reference/{doc-branch} :elasticsearch-plugins: https://www.elastic.co/guide/en/elasticsearch/plugins/{doc-branch} -:securitydoc: https://www.elastic.co/guide/en/x-pack/{doc-branch} -:monitoringdoc: https://www.elastic.co/guide/en/x-pack/{doc-branch} -:security: X-Pack Security +:securitydoc: https://www.elastic.co/guide/en/elastic-stack-overview/{doc-branch} +:monitoringdoc: https://www.elastic.co/guide/en/elastic-stack-overview/{doc-branch} :dashboards: https://artifacts.elastic.co/downloads/beats/beats-dashboards/beats-dashboards-{stack-version}.zip :dockerimage: docker.elastic.co/beats/{beatname_lc}:{version} :dockergithub: https://github.com/elastic/beats-docker/tree/{doc-branch} diff --git a/vendor/github.com/elastic/beats/libbeat/docs/shared-config-ingest.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/shared-config-ingest.asciidoc index 6004158d..5157918a 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/shared-config-ingest.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/shared-config-ingest.asciidoc @@ -10,18 +10,20 @@ ////////////////////////////////////////////////////////////////////////// [[configuring-ingest-node]] -== Parse logs by using ingest node +== Parse data by using ingest node When you use Elasticsearch for output, you can configure {beatname_uc} to use -{elasticsearch}/ingest.html[ingest node] to pre-process documents -before the actual indexing takes place in Elasticsearch. Ingest node is a convenient processing option when you -want to do some extra processing on your data, but you do not require the full power of Logstash. For -example, you can create an ingest node pipeline in Elasticsearch that consists of one processor -that removes a field in a document followed by another processor that renames a field. +{elasticsearch}/ingest.html[ingest node] to pre-process documents before the +actual indexing takes place in Elasticsearch. Ingest node is a convenient +processing option when you want to do some extra processing on your data, but +you do not require the full power of Logstash. For example, you can create an +ingest node pipeline in Elasticsearch that consists of one processor that +removes a field in a document followed by another processor that renames a +field. -After defining the pipeline in Elasticsearch, you simply configure your Beat to use the pipeline. To configure -{beatname_uc}, you specify the pipeline ID in the `parameters` option under `elasticsearch` in the -+{beatname_lc}.yml+ file: +After defining the pipeline in Elasticsearch, you simply configure {beatname_uc} +to use the pipeline. To configure {beatname_uc}, you specify the pipeline ID in +the `parameters` option under `elasticsearch` in the +{beatname_lc}.yml+ file: [source,yaml] ------------------------------------------------------------------------------ @@ -30,7 +32,8 @@ output.elasticsearch: pipeline: my_pipeline_id ------------------------------------------------------------------------------ -For example, let's say that you've defined the following pipeline in a file named `pipeline.json`: +For example, let's say that you've defined the following pipeline in a file +named `pipeline.json`: [source,json] ------------------------------------------------------------------------------ @@ -64,5 +67,5 @@ output.elasticsearch: When you run {beatname_uc}, the value of `beat.name` is converted to lowercase before indexing. -For more information about defining a pre-processing pipeline, see the {elasticsearch}/ingest.html[Ingest Node] -documentation. +For more information about defining a pre-processing pipeline, see the +{elasticsearch}/ingest.html[Ingest Node] documentation. diff --git a/vendor/github.com/elastic/beats/libbeat/docs/shared-configuring.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/shared-configuring.asciidoc index b1d6695a..d1d2c942 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/shared-configuring.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/shared-configuring.asciidoc @@ -1,26 +1,13 @@ -//Added conditional coding to support Beats that don't offer all of these install options - -ifeval::["{beatname_lc}"!="auditbeat"] To configure {beatname_uc}, you edit the configuration file. For rpm and deb, you'll find the configuration file at +/etc/{beatname_lc}/{beatname_lc}.yml+. Under Docker, it's located at +/usr/share/{beatname_lc}/{beatname_lc}.yml+. For mac and win, -look in the archive that you just extracted. There’s also a full example -configuration file called +{beatname_lc}.reference.yml+ that shows all non-deprecated -options. - -endif::[] - -ifeval::["{beatname_lc}"=="auditbeat"] - -To configure {beatname_uc}, you edit the configuration file. For rpm and deb, -you'll find the configuration file at +/etc/{beatname_lc}/{beatname_lc}.yml+. -For mac and win, look in the archive that you just extracted. There’s also a -full example configuration file called +{beatname_lc}.reference.yml+ that shows -all non-deprecated options. - +look in the archive that you just extracted. +ifeval::["{beatname_lc}"!="apm-server"] +There’s also a full example configuration file called +{beatname_lc}.reference.yml+ +that shows all non-deprecated options. endif::[] -See the +TIP: See the {libbeat}/config-file-format.html[Config File Format] section of the _Beats Platform Reference_ for more about the structure of the config file. diff --git a/vendor/github.com/elastic/beats/libbeat/docs/shared-directory-layout.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/shared-directory-layout.asciidoc index 0199a9ca..8dce21cc 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/shared-directory-layout.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/shared-directory-layout.asciidoc @@ -59,7 +59,7 @@ Otherwise the paths might be set incorrectly. | bin | The location for the binary files. | /usr/share/{beatname_lc} | config | The location for configuration files. | /usr/share/{beatname_lc} | data | The location for persistent data files. | /usr/share/{beatname_lc}/data -| logs | The location for the logs created by {beatname_uc}. | /usr/share//{beatname_lc}/logs +| logs | The location for the logs created by {beatname_uc}. | /usr/share/{beatname_lc}/logs |======================================================================= endif::[] diff --git a/vendor/github.com/elastic/beats/libbeat/docs/shared-docker.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/shared-docker.asciidoc index 0460dd4a..0969d73b 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/shared-docker.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/shared-docker.asciidoc @@ -2,7 +2,23 @@ === Running {beatname_uc} on Docker Docker images for {beatname_uc} are available from the Elastic Docker -registry. You can retrieve an image with a `docker pull` command. +registry. The base image is https://hub.docker.com/_/centos/[centos:7]. + +A list of all published Docker images and tags is available at +https://www.docker.elastic.co[www.docker.elastic.co]. The source code is in +{dockergithub}[GitHub]. + +These images are free to use under the Elastic license. They contain open source +and free commercial features and access to paid commercial features. +{xpack-ref}/license-management.html[Start a 30-day trial] to try out all of the +paid commercial features. See the +https://www.elastic.co/subscriptions[Subscriptions] page for information about +Elastic license levels. + +==== Pulling the image + +Obtaining Beats for Docker is as simple as issuing a +docker pull+ command +against the Elastic Docker registry. ifeval::["{release-state}"=="unreleased"] @@ -18,30 +34,30 @@ ifeval::["{release-state}"!="unreleased"] docker pull {dockerimage} ------------------------------------------------ -endif::[] +Alternatively, you can download other Docker images that contain only features +available under the Apache 2.0 license. To download the images, go to +https://www.docker.elastic.co[www.docker.elastic.co]. -The base image is https://hub.docker.com/_/centos/[centos:7] and the source -code can be found on -{dockergithub}[GitHub]. +endif::[] [float] ==== Configure {beatname_uc} on Docker The Docker image provides several methods for configuring {beatname_uc}. The -conventional approach is to provide a configuration file via a bind-mounted -volume, but it's also possible to create a custom image with your +conventional approach is to provide a configuration file via a bind mount, but +it's also possible to create a custom image with your configuration included. [float] ===== Bind-mounted configuration -One way to configure {beatname_uc} on Docker is to provide +{beatname_lc}.yml+ via bind-mounting. -With +docker run+, the bind-mount can be specified like this: +One way to configure {beatname_uc} on Docker is to provide +{beatname_lc}.yml+ via a bind mount. +With +docker run+, the bind mount can be specified like this: ["source", "sh", subs="attributes"] -------------------------------------------- docker run \ - -v ~/{beatname_lc}.yml:/usr/share/{beatname_lc}/{beatname_lc}.yml \ + --mount type=bind,source="$(pwd)"/{beatname_lc}.yml,target=/usr/share/{beatname_lc}/{beatname_lc}.yml \ {dockerimage} -------------------------------------------- diff --git a/vendor/github.com/elastic/beats/libbeat/docs/shared-download-and-install.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/shared-download-and-install.asciidoc index 480b2f9f..d2c25c56 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/shared-download-and-install.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/shared-download-and-install.asciidoc @@ -1,22 +1,12 @@ -//Added conditional coding to support Beats that don't offer all of these install options -ifeval::["{beatname_lc}"!="auditbeat"] +*Before you begin*: If you haven't installed the {stack}, do that now. See +{stack-ov}/get-started-elastic-stack.html[Getting started with the {stack}]. To download and install {beatname_uc}, use the commands that work with your system (<> for Debian/Ubuntu, <> for Redhat/Centos/Fedora, <> for OS X, <> for any Docker platform, and <> for Windows). -endif::[] - -ifeval::["{beatname_lc}"=="auditbeat"] - -To download and install {beatname_uc}, use the commands that work with your system -(<> for Debian/Ubuntu, <> for Redhat/Centos/Fedora, <> for OS X, and <> for Windows). - -endif::[] - [NOTE] ================================================== If you use Apt or Yum, you can < 5044 ---------------------------------------------------------------------- * Verify that the config file for {beatname_uc} specifies the correct port where Logstash is running. * Make sure that the Elasticsearch output is commented out in the config file and the Logstash output is uncommented. -* Confirm that the most recent Beats input plugin for Logstash is installed and configured. Note that Beats will not connect -to the Lumberjack input plugin. See -{libbeat}/logstash-installation.html#logstash-input-update[Updating the Beats Input Plugin for Logstash]. +* Confirm that the most recent {logstash-ref}/plugins-inputs-beats.html[Beats +input plugin for Logstash] is installed and configured. Note that Beats will not +connect to the Lumberjack input plugin. To learn how to install and update +plugins, see {logstash-ref}/working-with-plugins.html[Working with plugins]. [float] [[metadata-missing]] diff --git a/vendor/github.com/elastic/beats/libbeat/docs/shared-getting-started-intro.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/shared-getting-started-intro.asciidoc new file mode 100644 index 00000000..0339ad90 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/docs/shared-getting-started-intro.asciidoc @@ -0,0 +1,13 @@ + +To get started with your own {beatname_uc} setup, install and configure these +related products: + +* Elasticsearch for storing and indexing the data. +* Kibana for the UI. +* Logstash (optional) for parsing and enhancing the data. + +See {stack-ov}/get-started-elastic-stack.html[Getting started with the {stack}] +for more information. + +After installing the {stack}, read the following topics to learn how to +install, configure, and run {beatname_uc}: \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/libbeat/docs/shared-kibana-config.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/shared-kibana-config.asciidoc index 41e05868..d373a5bc 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/shared-kibana-config.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/shared-kibana-config.asciidoc @@ -12,8 +12,16 @@ [[setup-kibana-endpoint]] == Set up the Kibana endpoint -Starting with Beats 6.0.0, the Kibana dashboards are loaded into Kibana +ifeval::["{beatname_lc}" == "apm-server"] +The Kibana dashboards are loaded into Kibana via the Kibana API. +This requires a Kibana endpoint configuration. +endif::[] + +ifeval::["{beatname_lc}" != "apm-server"] +Starting with {beatname_uc} 6.0.0, the Kibana dashboards are loaded into Kibana via the Kibana API. This requires a Kibana endpoint configuration. +endif::[] + You configure the endpoint in the `setup.kibana` section of the +{beatname_lc}.yml+ config file. @@ -89,7 +97,7 @@ under a custom prefix. ==== `setup.kibana.ssl.enabled` Enables {beatname_uc} to use SSL settings when connecting to Kibana via HTTPS. -If you configure the Beat to connect over HTTPS, this setting defaults to +If you configure {beatname_uc} to connect over HTTPS, this setting defaults to `true` and {beatname_uc} uses the default SSL settings. Example configuration: diff --git a/vendor/github.com/elastic/beats/libbeat/docs/shared-logstash-config.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/shared-logstash-config.asciidoc index 4b3c176a..d94ca85a 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/shared-logstash-config.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/shared-logstash-config.asciidoc @@ -11,15 +11,19 @@ [IMPORTANT] .Prerequisite -To use Logstash as an output, you must -{libbeat}/logstash-installation.html#logstash-setup[install and configure] the Beats input -plugin for Logstash. +To send events to {ls}, you also need to create a {ls} configuration pipeline +that listens for incoming Beats connections and indexes the received events into +{es}. For more information, see the section about +{stack-ov}/get-started-elastic-stack.html#logstash-setup[configuring {ls}] in +the {stack} getting started tutorial. Also see the documentation for the +{logstash-ref}/plugins-inputs-beats.html[{beats} input] and +{logstash-ref}/plugins-outputs-elasticsearch.html[{es} output] plugins. -If you want to use Logstash to perform additional processing on the data collected by -{beatname_uc}, you need to configure {beatname_uc} to use Logstash. +If you want to use {ls} to perform additional processing on the data collected by +{beatname_uc}, you need to configure {beatname_uc} to use {ls}. To do this, you edit the {beatname_uc} configuration file to disable the Elasticsearch -output by commenting it out and enable the Logstash output by uncommenting the +output by commenting it out and enable the {ls} output by uncommenting the logstash section: [source,yaml] @@ -29,7 +33,7 @@ output.logstash: hosts: ["127.0.0.1:5044"] ------------------------------------------------------------------------------ -The `hosts` option specifies the Logstash server and the port (`5044`) where Logstash is configured to listen for incoming +The `hosts` option specifies the {ls} server and the port (`5044`) where {ls} is configured to listen for incoming Beats connections. For this configuration, you must <> diff --git a/vendor/github.com/elastic/beats/libbeat/docs/shared-path-config.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/shared-path-config.asciidoc index f08d2810..a5e1bb65 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/shared-path-config.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/shared-path-config.asciidoc @@ -14,10 +14,12 @@ == Set up project paths The `path` section of the +{beatname_lc}.yml+ config file contains configuration -options that define where the Beat looks for its files. For example, all Beats -look for the Elasticsearch template file in the configuration path, Filebeat and -Winlogbeat look for their registry files in the data path, and all Beats write -their log files in the logs path. +options that define where {beatname_uc} looks for its files. For example, {beatname_uc} +looks for the Elasticsearch template file in the configuration path and writes +log files in the logs path. +ifeval::["{beatname_lc}"=="filebeat" or "{beatname_lc}"=="winlogbeat"] +{beatname_uc} looks for its registry files in the data path. +endif::[] Please see the <> section for more details. @@ -87,7 +89,7 @@ path.data: /var/lib/beats [float] ==== `logs` -The logs path for a {beatname_uc} installation. This is the default location for the Beat's +The logs path for a {beatname_uc} installation. This is the default location for {beatname_uc}'s log files. If not set by a CLI flag or in the configuration file, the default for the logs path is a `logs` subdirectory inside the home path. diff --git a/vendor/github.com/elastic/beats/libbeat/docs/shared-shutdown.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/shared-shutdown.asciidoc new file mode 100644 index 00000000..bbe14b01 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/docs/shared-shutdown.asciidoc @@ -0,0 +1,24 @@ +////////////////////////////////////////////////////////////////////////// +//// This content is shared by all Elastic Beats. Make sure you keep the +//// descriptions here generic enough to work for all Beats that include +//// this file. When using cross references, make sure that the cross +//// references resolve correctly for any files that include this one. +//// Use the appropriate variables defined in the index.asciidoc file to +//// resolve Beat names: beatname_uc and beatname_lc. +//// Use the following include to pull this content into a doc file: +//// include::../../libbeat/docs/shared-shutdown.asciidoc[] +////////////////////////////////////////////////////////////////////////// + +[[shutdown]] +=== Stopping {beatname_uc} + +An orderly shutdown of {beatname_uc} ensures that it has a chance to clean up +and close outstanding resources. You can help ensure an orderly shutdown by +stopping {beatname_uc} properly. + +If you’re running {beatname_uc} as a service, you can stop it via the service +management functionality provided by your installation. + +If you’re running {beatname_uc} directly in the console, you can stop it by +entering *Ctrl-C*. Alternatively, send SIGTERM to the {beatname_uc} process on a +POSIX system. \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/libbeat/docs/shared-ssl-config.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/shared-ssl-config.asciidoc index 2aa6f3b8..80f87056 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/shared-ssl-config.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/shared-ssl-config.asciidoc @@ -1,9 +1,13 @@ [[configuration-ssl]] == Specify SSL settings -You can specify SSL options for any <> that supports -SSL. You can also specify SSL options when you -<>. +You can specify SSL options when you configure: + +* <> that support SSL +* the <> +ifeval::["{beatname_lc}"=="heartbeat"] +* <> that support SSL +endif::[] Example output config with SSL enabled: @@ -15,7 +19,9 @@ output.elasticsearch.ssl.certificate: "/etc/pki/client/cert.pem" output.elasticsearch.ssl.key: "/etc/pki/client/cert.key" ---- +ifndef::only-elasticsearch[] Also see <>. +endif::[] Example Kibana endpoint config with SSL enabled: @@ -26,9 +32,27 @@ setup.kibana.protocol: "https" setup.kibana.ssl.enabled: true setup.kibana.ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] setup.kibana.ssl.certificate: "/etc/pki/client/cert.pem" -setup.kibana.ssl.key: "/etc/pki/client/cert.key +setup.kibana.ssl.key: "/etc/pki/client/cert.key" ---- + +ifeval::["{beatname_lc}"=="heartbeat"] +Example monitor with SSL enabled: + +[source,yaml] +------------------------------------------------------------------------------- +heartbeat.monitors: +- type: tcp + schedule: '@every 5s' + hosts: ["myhost"] + ports: [80, 9200, 5044] + ssl: + certificate_authorities: ['/etc/ca.crt'] + supported_protocols: ["TLSv1.0", "TLSv1.1", "TLSv1.2"] +------------------------------------------------------------------------------- +endif::[] + + [float] === Configuration options @@ -58,10 +82,10 @@ might fail if the server requests client authentication. If the SSL server does require client authentication, the certificate will be loaded, but not requested or used by the server. -When this option is configured, the <> option is also required. +When this option is configured, the <> option is also required. [float] -[[certificate_key]] +[[key]] ==== `key: "/etc/pki/client/cert.key"` The client certificate key used for client authentication. This option is required if <> is specified. diff --git a/vendor/github.com/elastic/beats/libbeat/docs/shared-ssl-logstash-config.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/shared-ssl-logstash-config.asciidoc index d800836a..dd9c9e85 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/shared-ssl-logstash-config.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/shared-ssl-logstash-config.asciidoc @@ -19,8 +19,8 @@ To use SSL mutual authentication: {beatname_uc} and Logstash. Creating a correct SSL/TLS infrastructure is outside the scope of this document. There are many online resources available that describe how to create certificates. + -TIP: If you are using X-Pack, you can use the -{elasticsearch}/certutil.html[certutil tool] to generate certificates. +TIP: If you are using {security}, you can use the +{elasticsearch}/certutil.html[elasticsearch-certutil tool] to generate certificates. . Configure {beatname_uc} to use SSL. In the +{beatname_lc}.yml+ config file, specify the following settings under `ssl`: @@ -51,7 +51,7 @@ For more information about these configuration options, see < * `ssl_certificate` and `ssl_key`: Specify the certificate and key that Logstash uses to authenticate with the client. * `ssl_verify_mode`: Specifies whether the Logstash server verifies the client certificate against the CA. You need to specify either `peer` or `force_peer` to make the server ask for the certificate and validate it. If you -specify `force_peer`, and {beatname_uc} doesn't provide a certificate, the Logstash connection will be closed. +specify `force_peer`, and {beatname_uc} doesn't provide a certificate, the Logstash connection will be closed. If you choose not to use {elasticsearch}/certutil.html[certutil], the certificates that you obtain must allow for both `clientAuth` and `serverAuth` if the extended key usage extension is present. + For example: + diff --git a/vendor/github.com/elastic/beats/libbeat/docs/shared-template-load.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/shared-template-load.asciidoc index 20fb21e4..88aeec5c 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/shared-template-load.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/shared-template-load.asciidoc @@ -31,14 +31,23 @@ configuring template loading options in the {beatname_uc} configuration file. You can also set options to change the name of the index and index template. +ifndef::only-elasticsearch[] NOTE: A connection to Elasticsearch is required to load the index template. If the output is Logstash, you must <>. +endif::[] For more information, see: -* <> +ifdef::only-elasticsearch[] +* <> +* <> +endif::[] + +ifndef::only-elasticsearch[] +* <> * <> - required for Logstash output +endif::[] [[load-template-auto]] ==== Configure template loading @@ -104,10 +113,13 @@ See <> for the full list of configuration options. ==== Load the template manually To load the template manually, run the <> command. A -connection to Elasticsearch is required. If Logstash output is enabled, you need +connection to Elasticsearch is required. +ifndef::only-elasticsearch[] +If Logstash output is enabled, you need to temporarily disable the Logstash output and enable Elasticsearch by using the `-E` option. The examples here assume that Logstash output is enabled. You can omit the `-E` flags if Elasticsearch output is already enabled. +endif::[] If you are connecting to a secured Elasticsearch cluster, make sure you've configured credentials as described in <<{beatname_lc}-configuration>>. @@ -117,6 +129,14 @@ Elasticsearch, see <>. To load the template, use the appropriate command for your system. +ifndef::only-elasticsearch[] +:disable_logstash: {sp}-E output.logstash.enabled=false +endif::[] + +ifdef::only-elasticsearch[] +:disable_logstash: +endif::[] + ifdef::allplatforms[] ifeval::["{requires-sudo}"=="yes"] @@ -126,46 +146,40 @@ include::./shared-note-sudo.asciidoc[] endif::[] *deb and rpm:* - ["source","sh",subs="attributes"] ---- -{beatname_lc} setup --template -E output.logstash.enabled=false -E 'output.elasticsearch.hosts=["localhost:9200"]' +{beatname_lc} setup --template{disable_logstash} -E 'output.elasticsearch.hosts=["localhost:9200"]' ---- *mac:* ["source","sh",subs="attributes"] ---- -./{beatname_lc} setup --template -E output.logstash.enabled=false -E 'output.elasticsearch.hosts=["localhost:9200"]' +./{beatname_lc} setup --template{disable_logstash} -E 'output.elasticsearch.hosts=["localhost:9200"]' ---- -ifeval::["{beatname_lc}"!="auditbeat"] - *docker:* ["source","sh",subs="attributes"] ---------------------------------------------------------------------- -docker run {dockerimage} setup --template -E output.logstash.enabled=false -E 'output.elasticsearch.hosts=["localhost:9200"]' +docker run {dockerimage} setup --template{disable_logstash} -E 'output.elasticsearch.hosts=["localhost:9200"]' ---------------------------------------------------------------------- -endif::[] - *win:* endif::allplatforms[] Open a PowerShell prompt as an Administrator (right-click the PowerShell icon -and select *Run As Administrator*). If you are running Windows XP, you may need -to download and install PowerShell. +and select *Run As Administrator*). From the PowerShell prompt, change to the directory where you installed {beatname_uc}, and run: -["source","sh",subs="attributes,callouts"] +["source","sh",subs="attributes"] ---------------------------------------------------------------------- -PS > {beatname_lc} setup --template -E output.logstash.enabled=false -E 'output.elasticsearch.hosts=["localhost:9200"]' +PS > .{backslash}{beatname_lc}.exe setup --template{disable_logstash} -E 'output.elasticsearch.hosts=["localhost:9200"]' ---------------------------------------------------------------------- @@ -192,7 +206,7 @@ PS > Invoke-RestMethod -Method Delete "http://localhost:9200/{beatname_lc}-*" ---------------------------------------------------------------------- -This command deletes all indices that match the pattern +{beatname_lc}-*+. +This command deletes all indices that match the pattern +{beat_default_index_prefix}-*+. Before running this command, make sure you want to delete all indices that match the pattern. @@ -220,12 +234,12 @@ ifdef::allplatforms[] ./{beatname_lc} export template > {beatname_lc}.template.json ---- + -*win*: +*win:* + endif::allplatforms[] ["source","sh",subs="attributes"] ---- -PS> .{backslash}{beatname_lc}.exe export template --es.version {stack-version} | Out-File -Encoding UTF8 {beatname_lc}.template.json +PS > .{backslash}{beatname_lc}.exe export template --es.version {stack-version} | Out-File -Encoding UTF8 {beatname_lc}.template.json ---- . Install the template: @@ -237,7 +251,7 @@ PS> .{backslash}{beatname_lc}.exe export template --es.version {stack-version} | curl -XPUT -H 'Content-Type: application/json' http://localhost:9200/_template/{beatname_lc}-{stack-version} -d@{beatname_lc}.template.json ---- + -*win*: +*win:* + ["source","sh",subs="attributes"] ---- diff --git a/vendor/github.com/elastic/beats/libbeat/docs/step-configure-credentials.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/step-configure-credentials.asciidoc index 46800f8b..e00db3cf 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/step-configure-credentials.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/step-configure-credentials.asciidoc @@ -7,17 +7,22 @@ in the config file before you run the commands that set up and start ---- output.elasticsearch: hosts: ["myEShost:9200"] - username: "elastic" - password: "elastic" + username: "filebeat_internal" + password: "{pwd}" <1> setup.kibana: host: "mykibanahost:5601" - username: "elastic" <1> - password: "elastic" + username: "my_kibana_user" <2> <3> + password: "{pwd}" ---- -<1> The `username` and `password` settings for Kibana are optional. If you don't +<1> This examples shows a hard-coded password, but you should store sensitive +values in the <>. +<2> The `username` and `password` settings for Kibana are optional. If you don't specify credentials for Kibana, {beatname_uc} uses the `username` and `password` specified for the Elasticsearch output. +<3> If you are planning to <>, the user must have the `kibana_user` +{xpack-ref}/built-in-roles.html[built-in role] or equivalent privileges. + -- + -Also see the security-related options described in <> and -<>. +For more information, see <>. diff --git a/vendor/github.com/elastic/beats/libbeat/docs/template-config.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/template-config.asciidoc index 85b79fc3..967aeec4 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/template-config.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/template-config.asciidoc @@ -8,10 +8,14 @@ mappings in Elasticsearch. If template loading is enabled (the default), {beatname_uc} loads the index template automatically after successfully connecting to Elasticsearch. +ifndef::only-elasticsearch[] + NOTE: A connection to Elasticsearch is required to load the index template. If the output is Logstash, you must <>. +endif::[] + You can adjust the following settings to load your own template or overwrite an existing one. @@ -19,7 +23,7 @@ existing one. you must <>. *`setup.template.name`*:: The name of the template. The default is -+{beatname_lc}+. The Beat version is always appended to the given ++{beatname_lc}+. The {beatname_uc} version is always appended to the given name, so the final name is +{beatname_lc}-%\{[beat.version]\}+. // Maintainers: a backslash character is required to escape curly braces and @@ -29,9 +33,9 @@ name, so the final name is +{beatname_lc}-%\{[beat.version]\}+. // the example as expected. *`setup.template.pattern`*:: The template pattern to apply to the default index -settings. The default pattern is +{beatname_lc}-\*+. The Beat version is always +settings. The default pattern is +{beat_default_index_prefix}-\*+. The {beatname_uc} version is always included in the pattern, so the final pattern is -+{beatname_lc}-%\{[beat.version]\}-*+. The wildcard character `-*` is used to ++{beat_default_index_prefix}-%\{[beat.version]\}-*+. The wildcard character `-*` is used to match all daily indices. + Example: @@ -39,7 +43,7 @@ Example: ["source","yaml",subs="attributes"] ---------------------------------------------------------------------- setup.template.name: "{beatname_lc}" -setup.template.pattern: "{beatname_lc}-*" +setup.template.pattern: "{beat_default_index_prefix}-*" ---------------------------------------------------------------------- *`setup.template.fields`*:: The path to the YAML file describing the fields. The default is +fields.yml+. If a @@ -78,3 +82,8 @@ setup.template.overwrite: false setup.template.settings: _source.enabled: false ---------------------------------------------------------------------- +ifeval::["{beatname_lc}"!="apm-server"] +*`setup.template.append_fields`*:: A list of of fields to be added to the template and Kibana index pattern. experimental[] + +NOTE: With append_fields only new fields can be added an no existing one overwritten or changed. This is especially useful if data is collected through the http/json metricset where the data structure is not known in advance. Changing the config of append_fields means the template has to be overwritten and only applies to new indices. If there are 2 Beats with different append_fields configs the last one writing the template will win. Any changes will also have an affect on the Kibana Index pattern. +endif::[] diff --git a/vendor/github.com/elastic/beats/libbeat/docs/version.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/version.asciidoc index 09cd7db9..9e522d9b 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/version.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/version.asciidoc @@ -1,8 +1,8 @@ -:stack-version: 6.2.3 -:doc-branch: 6.2 -:go-version: 1.9.2 +:stack-version: 6.3.2 +:doc-branch: 6.3 +:go-version: 1.9.4 :release-state: released :python: 2.7.9 :docker: 1.12 :docker-compose: 1.11 -:branch: 6.2 +:branch: 6.3 diff --git a/vendor/github.com/elastic/beats/libbeat/docs/yaml.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/yaml.asciidoc index 5d535d9d..1266115b 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/yaml.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/yaml.asciidoc @@ -43,13 +43,14 @@ simply uncomment the line and change the values. You can test your configuration file to verify that the structure is valid. Simply change to the directory where the binary is installed, and run -the Beat in the foreground with the `-configtest` flag specified. For example: +the Beat in the foreground with the `test config` command specified. For +example: ifdef::allplatforms[] ["source","shell",subs="attributes"] ---------------------------------------------------------------------- -{beatname_lc} -c {beatname_lc}.yml -configtest +{beatname_lc} test config -c {beatname_lc}.yml ---------------------------------------------------------------------- endif::allplatforms[] @@ -58,7 +59,7 @@ ifdef::win[] ["source","shell",subs="attributes"] ---------------------------------------------------------------------- -.\winlogbeat.exe -c .\winlogbeat.yml -configtest -e +.\winlogbeat.exe test config -c .\winlogbeat.yml -e ---------------------------------------------------------------------- endif::win[] diff --git a/vendor/github.com/elastic/beats/libbeat/logp/config.go b/vendor/github.com/elastic/beats/libbeat/logp/config.go index d2617c76..da7ffc1b 100644 --- a/vendor/github.com/elastic/beats/libbeat/logp/config.go +++ b/vendor/github.com/elastic/beats/libbeat/logp/config.go @@ -35,6 +35,7 @@ var defaultConfig = Config{ ToFiles: true, Files: FileConfig{ MaxSize: 10 * 1024 * 1024, + MaxBackups: 7, Permissions: 0600, }, addCaller: true, diff --git a/vendor/github.com/elastic/beats/libbeat/logp/core.go b/vendor/github.com/elastic/beats/libbeat/logp/core.go index 917c0a42..c5e4d616 100644 --- a/vendor/github.com/elastic/beats/libbeat/logp/core.go +++ b/vendor/github.com/elastic/beats/libbeat/logp/core.go @@ -27,14 +27,16 @@ func init() { selectors: map[string]struct{}{}, rootLogger: zap.NewNop(), globalLogger: zap.NewNop(), + logger: newLogger(zap.NewNop(), ""), }) } type coreLogger struct { - selectors map[string]struct{} - rootLogger *zap.Logger - globalLogger *zap.Logger - observedLogs *observer.ObservedLogs + selectors map[string]struct{} // Set of enabled debug selectors. + rootLogger *zap.Logger // Root logger without any options configured. + globalLogger *zap.Logger // Logger used by legacy global functions (e.g. logp.Info). + logger *Logger // Logger that is the basis for all logp.Loggers. + observedLogs *observer.ObservedLogs // Contains events generated while in observation mode (a testing mode). } // Configure configures the logp package. @@ -92,6 +94,7 @@ func Configure(cfg Config) error { selectors: selectors, rootLogger: root, globalLogger: root.WithOptions(zap.AddCallerSkip(1)), + logger: newLogger(root, ""), observedLogs: observedLogs, }) return nil diff --git a/vendor/github.com/elastic/beats/libbeat/logp/core_test.go b/vendor/github.com/elastic/beats/libbeat/logp/core_test.go index 56d072a4..0fab81b3 100644 --- a/vendor/github.com/elastic/beats/libbeat/logp/core_test.go +++ b/vendor/github.com/elastic/beats/libbeat/logp/core_test.go @@ -148,7 +148,7 @@ func TestLoggerLevel(t *testing.T) { } func TestRecover(t *testing.T) { - const recoveryExplanation = "Something went wrong." + const recoveryExplanation = "Something went wrong" const cause = "unexpected condition" DevelopmentSetup(ToObserverOutput()) @@ -160,8 +160,9 @@ func TestRecover(t *testing.T) { assert.Equal(t, zap.ErrorLevel, log.Level) assert.Equal(t, "logp/core_test.go", strings.Split(log.Caller.TrimmedPath(), ":")[0]) - assert.Contains(t, log.Message, recoveryExplanation) - assert.Contains(t, log.Message, cause) + assert.Contains(t, log.Message, recoveryExplanation+ + ". Recovering, but please report this.") + assert.Contains(t, log.ContextMap(), "panic") } }() @@ -186,3 +187,29 @@ func TestIsDebug(t *testing.T) { assert.False(t, IsDebug("all")) assert.True(t, IsDebug("only_this")) } + +func TestL(t *testing.T) { + if err := DevelopmentSetup(ToObserverOutput()); err != nil { + t.Fatal(err) + } + + L().Infow("infow", "rate", 2) + logs := ObserverLogs().TakeAll() + if assert.Len(t, logs, 1) { + log := logs[0] + assert.Equal(t, zap.InfoLevel, log.Level) + assert.Equal(t, "", log.LoggerName) + assert.Equal(t, "infow", log.Message) + assert.Contains(t, log.ContextMap(), "rate") + } + + const loggerName = "tester" + L().Named(loggerName).Warnf("warning %d", 1) + logs = ObserverLogs().TakeAll() + if assert.Len(t, logs, 1) { + log := logs[0] + assert.Equal(t, zap.WarnLevel, log.Level) + assert.Equal(t, loggerName, log.LoggerName) + assert.Equal(t, "warning 1", log.Message) + } +} diff --git a/vendor/github.com/elastic/beats/libbeat/logp/global.go b/vendor/github.com/elastic/beats/libbeat/logp/global.go index 068c532c..17481041 100644 --- a/vendor/github.com/elastic/beats/libbeat/logp/global.go +++ b/vendor/github.com/elastic/beats/libbeat/logp/global.go @@ -83,8 +83,8 @@ func WTF(format string, v ...interface{}) { // Recover stops a panicking goroutine and logs an Error. func Recover(msg string) { if r := recover(); r != nil { - msg := fmt.Sprintf("%s. Recovering, but please report this: %s.", msg, r) + msg := fmt.Sprintf("%s. Recovering, but please report this.", msg) globalLogger().WithOptions(zap.AddCallerSkip(2)). - Error(msg, zap.Stack("stack")) + Error(msg, zap.Any("panic", r), zap.Stack("stack")) } } diff --git a/vendor/github.com/elastic/beats/libbeat/logp/logger.go b/vendor/github.com/elastic/beats/libbeat/logp/logger.go index 04cfc91a..24cc7ad7 100644 --- a/vendor/github.com/elastic/beats/libbeat/logp/logger.go +++ b/vendor/github.com/elastic/beats/libbeat/logp/logger.go @@ -12,23 +12,35 @@ type Logger struct { sugar *zap.SugaredLogger } -// NewLogger returns a new Logger labeled with the name of the selector. This -// should never be used from any global contexts (instead create "per instance" -// loggers). -func NewLogger(selector string, options ...LogOption) *Logger { - log := loadLogger().rootLogger. +func newLogger(rootLogger *zap.Logger, selector string, options ...LogOption) *Logger { + log := rootLogger. WithOptions(zap.AddCallerSkip(1)). WithOptions(options...). Named(selector) return &Logger{log.Sugar()} } +// NewLogger returns a new Logger labeled with the name of the selector. This +// should never be used from any global contexts, otherwise you will receive a +// no-op Logger. This is because the logp package needs to be initialized first. +// Instead create new Logger instance that your object reuses. Or if you need to +// log from a static context then you may use logp.L().Infow(), for example. +func NewLogger(selector string, options ...LogOption) *Logger { + return newLogger(loadLogger().rootLogger, selector, options...) +} + // With creates a child logger and adds structured context to it. Fields added // to the child don't affect the parent, and vice versa. func (l *Logger) With(args ...interface{}) *Logger { return &Logger{l.sugar.With(args...)} } +// Named adds a new path segment to the logger's name. Segments are joined by +// periods. +func (l *Logger) Named(name string) *Logger { + return &Logger{l.sugar.Named(name)} +} + // Sprint // Debug uses fmt.Sprint to construct and log a message. @@ -51,6 +63,11 @@ func (l *Logger) Error(args ...interface{}) { l.sugar.Error(args...) } +// Fatal uses fmt.Sprint to construct and log a message, then calls os.Exit(1). +func (l *Logger) Fatal(args ...interface{}) { + l.sugar.Fatal(args...) +} + // Panic uses fmt.Sprint to construct and log a message, then panics. func (l *Logger) Panic(args ...interface{}) { l.sugar.Panic(args...) @@ -84,6 +101,11 @@ func (l *Logger) Errorf(format string, args ...interface{}) { l.sugar.Errorf(format, args...) } +// Fatalf uses fmt.Sprintf to log a templated message, then calls os.Exit(1). +func (l *Logger) Fatalf(format string, args ...interface{}) { + l.sugar.Fatalf(format, args...) +} + // Panicf uses fmt.Sprintf to log a templated message, then panics. func (l *Logger) Panicf(format string, args ...interface{}) { l.sugar.Panicf(format, args...) @@ -129,6 +151,15 @@ func (l *Logger) Errorw(msg string, keysAndValues ...interface{}) { l.sugar.Errorw(msg, keysAndValues...) } +// Fatalw logs a message with some additional context, then calls os.Exit(1). +// The additional context is added in the form of key-value pairs. The optimal +// way to write the value to the log message will be inferred by the value's +// type. To explicitly specify a type you can pass a Field such as +// logp.Stringer. +func (l *Logger) Fatalw(msg string, keysAndValues ...interface{}) { + l.sugar.Fatalw(msg, keysAndValues...) +} + // Panicw logs a message with some additional context, then panics. The // additional context is added in the form of key-value pairs. The optimal way // to write the value to the log message will be inferred by the value's type. @@ -145,3 +176,8 @@ func (l *Logger) Panicw(msg string, keysAndValues ...interface{}) { func (l *Logger) DPanicw(msg string, keysAndValues ...interface{}) { l.sugar.DPanicw(msg, keysAndValues...) } + +// L returns an unnamed global logger. +func L() *Logger { + return loadLogger().logger +} diff --git a/vendor/github.com/elastic/beats/libbeat/metric/system/memory/memory.go b/vendor/github.com/elastic/beats/libbeat/metric/system/memory/memory.go index 02f6d4b8..b31819a0 100644 --- a/vendor/github.com/elastic/beats/libbeat/metric/system/memory/memory.go +++ b/vendor/github.com/elastic/beats/libbeat/metric/system/memory/memory.go @@ -87,3 +87,35 @@ func AddSwapPercentage(s *SwapStat) { perc := float64(s.Swap.Used) / float64(s.Swap.Total) s.UsedPercent = common.Round(perc, common.DefaultDecimalPlacesCount) } + +// HugeTLBPagesStat includes metrics about huge pages usage +type HugeTLBPagesStat struct { + sigar.HugeTLBPages + UsedPercent float64 `json:"used_p"` +} + +// GetHugeTLBPages returns huge pages usage metrics +func GetHugeTLBPages() (*HugeTLBPagesStat, error) { + pages := sigar.HugeTLBPages{} + err := pages.Get() + + if err == nil { + return &HugeTLBPagesStat{HugeTLBPages: pages}, nil + } + + if sigar.IsNotImplemented(err) { + return nil, nil + } + + return nil, err +} + +// AddHugeTLBPagesPercentage calculates ratio of used huge pages +func AddHugeTLBPagesPercentage(s *HugeTLBPagesStat) { + if s.Total == 0 { + return + } + + perc := float64(s.Total-s.Free+s.Reserved) / float64(s.Total) + s.UsedPercent = common.Round(perc, common.DefaultDecimalPlacesCount) +} diff --git a/vendor/github.com/elastic/beats/libbeat/metric/system/process/process.go b/vendor/github.com/elastic/beats/libbeat/metric/system/process/process.go index 7d89125b..66f3387c 100644 --- a/vendor/github.com/elastic/beats/libbeat/metric/system/process/process.go +++ b/vendor/github.com/elastic/beats/libbeat/metric/system/process/process.go @@ -46,7 +46,7 @@ type Process struct { cpuTotalPctNorm float64 } -// Stats stores the stats of preocesses on the host. +// Stats stores the stats of processes on the host. type Stats struct { Procs []string ProcsMap ProcsMap diff --git a/vendor/github.com/elastic/beats/libbeat/metric/system/process/process_linux.go b/vendor/github.com/elastic/beats/libbeat/metric/system/process/process_linux.go new file mode 100644 index 00000000..9400a43d --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/metric/system/process/process_linux.go @@ -0,0 +1,20 @@ +package process + +import ( + "os" + "path" + "strconv" + + "github.com/elastic/gosigar" +) + +// GetSelfPid returns the PID for this process +func GetSelfPid() (int, error) { + pid, err := os.Readlink(path.Join(gosigar.Procd, "self")) + + if err != nil { + return 0, err + } + + return strconv.Atoi(pid) +} diff --git a/vendor/github.com/elastic/beats/libbeat/metric/system/process/process_other.go b/vendor/github.com/elastic/beats/libbeat/metric/system/process/process_other.go new file mode 100644 index 00000000..f829eac1 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/metric/system/process/process_other.go @@ -0,0 +1,10 @@ +// +build !linux + +package process + +import "os" + +// GetSelfPid returns the PID for this process +func GetSelfPid() (int, error) { + return os.Getpid(), nil +} diff --git a/vendor/github.com/elastic/beats/libbeat/metric/system/process/process_test.go b/vendor/github.com/elastic/beats/libbeat/metric/system/process/process_test.go index fe989d46..4bd6bcf9 100644 --- a/vendor/github.com/elastic/beats/libbeat/metric/system/process/process_test.go +++ b/vendor/github.com/elastic/beats/libbeat/metric/system/process/process_test.go @@ -66,6 +66,13 @@ func TestGetProcess(t *testing.T) { } } +// See https://github.com/elastic/beats/issues/6620 +func TestGetSelfPid(t *testing.T) { + pid, err := GetSelfPid() + assert.NoError(t, err) + assert.Equal(t, os.Getpid(), pid) +} + func TestProcState(t *testing.T) { assert.Equal(t, getProcState('R'), "running") assert.Equal(t, getProcState('S'), "sleeping") diff --git a/vendor/github.com/elastic/beats/libbeat/ml-importer/importer.go b/vendor/github.com/elastic/beats/libbeat/ml-importer/importer.go index 5062fb7b..1f08f3b6 100644 --- a/vendor/github.com/elastic/beats/libbeat/ml-importer/importer.go +++ b/vendor/github.com/elastic/beats/libbeat/ml-importer/importer.go @@ -4,14 +4,26 @@ package mlimporter import ( "encoding/json" "fmt" + "io" "io/ioutil" + "net/url" + "strings" + "github.com/joeshaw/multierror" "github.com/pkg/errors" "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/logp" ) +var ( + esDataFeedURL = "/_xpack/ml/datafeeds/datafeed-%s" + esJobURL = "/_xpack/ml/anomaly_detectors/%s" + kibanaGetModuleURL = "/api/ml/modules/get_module/%s" + kibanaRecognizeURL = "/api/ml/modules/recognize/%s" + kibanaSetupModuleURL = "/api/ml/modules/setup/%s" +) + // MLConfig contains the required configuration for loading one job and the associated // datafeed. type MLConfig struct { @@ -29,6 +41,56 @@ type MLLoader interface { GetVersion() string } +// MLSetupper is a subset of the Kibana client API capable of setting up ML objects. +type MLSetupper interface { + Request(method, path string, params url.Values, body io.Reader) (int, []byte, error) + GetVersion() string +} + +// MLResponse stores the relevant parts of the response from Kibana to check for errors. +type MLResponse struct { + Datafeeds []struct { + ID string + Success bool + Error struct { + Msg string + } + } + Jobs []struct { + ID string + Success bool + Error struct { + Msg string + } + } + Kibana struct { + Dashboard []struct { + Success bool + ID string + Exists bool + Error struct { + Message string + } + } + Search []struct { + Success bool + ID string + Exists bool + Error struct { + Message string + } + } + Visualization []struct { + Success bool + ID string + Exists bool + Error struct { + Message string + } + } + } +} + func readJSONFile(path string) (common.MapStr, error) { file, err := ioutil.ReadFile(path) if err != nil { @@ -41,8 +103,8 @@ func readJSONFile(path string) (common.MapStr, error) { // ImportMachineLearningJob uploads the job and datafeed configuration to ES/xpack. func ImportMachineLearningJob(esClient MLLoader, cfg *MLConfig) error { - jobURL := fmt.Sprintf("/_xpack/ml/anomaly_detectors/%s", cfg.ID) - datafeedURL := fmt.Sprintf("/_xpack/ml/datafeeds/datafeed-%s", cfg.ID) + jobURL := fmt.Sprintf(esJobURL, cfg.ID) + datafeedURL := fmt.Sprintf(esDataFeedURL, cfg.ID) if len(cfg.MinVersion) > 0 { esVersion, err := common.NewVersion(esClient.GetVersion()) @@ -121,3 +183,76 @@ func HaveXpackML(esClient MLLoader) (bool, error) { } return xpack.Features.ML.Available && xpack.Features.ML.Enabled, nil } + +// SetupModule creates ML jobs, data feeds and dashboards for modules. +func SetupModule(kibanaClient MLSetupper, module, prefix string) error { + setupURL := fmt.Sprintf(kibanaSetupModuleURL, module) + prefixPayload := fmt.Sprintf("{\"prefix\": \"%s\"}", prefix) + status, response, err := kibanaClient.Request("POST", setupURL, nil, strings.NewReader(prefixPayload)) + if status != 200 { + return errors.Errorf("cannot set up ML with prefix: %s", prefix) + } + if err != nil { + return err + } + + return checkResponse(response) +} + +func checkResponse(r []byte) error { + var errs multierror.Errors + + var resp MLResponse + err := json.Unmarshal(r, &resp) + if err != nil { + return err + } + + for _, feed := range resp.Datafeeds { + if !feed.Success { + if strings.HasPrefix(feed.Error.Msg, "[resource_already_exists_exception]") { + logp.Debug("machine-learning", "Datafeed already exists: %s, error: %s", feed.ID, feed.Error.Msg) + continue + } + errs = append(errs, errors.Errorf(feed.Error.Msg)) + } + } + for _, job := range resp.Jobs { + if strings.HasPrefix(job.Error.Msg, "[resource_already_exists_exception]") { + logp.Debug("machine-learning", "Job already exists: %s, error: %s", job.ID, job.Error.Msg) + continue + } + if !job.Success { + errs = append(errs, errors.Errorf(job.Error.Msg)) + } + } + for _, dashboard := range resp.Kibana.Dashboard { + if !dashboard.Success { + if dashboard.Exists || strings.Contains(dashboard.Error.Message, "version conflict, document already exists") { + logp.Debug("machine-learning", "Dashboard already exists: %s, error: %s", dashboard.ID, dashboard.Error.Message) + } else { + errs = append(errs, errors.Errorf("error while setting up dashboard: %s", dashboard.ID)) + } + } + } + for _, search := range resp.Kibana.Search { + if !search.Success { + if search.Exists || strings.Contains(search.Error.Message, "version conflict, document already exists") { + logp.Debug("machine-learning", "Search already exists: %s", search.ID) + } else { + errs = append(errs, errors.Errorf("error while setting up search: %s, error: %s", search.ID, search.Error.Message)) + } + } + } + for _, visualization := range resp.Kibana.Visualization { + if !visualization.Success { + if visualization.Exists || strings.Contains(visualization.Error.Message, "version conflict, document already exists") { + logp.Debug("machine-learning", "Visualization already exists: %s", visualization.ID) + } else { + errs = append(errs, errors.Errorf("error while setting up visualization: %s, error: %s", visualization.ID, visualization.Error.Message)) + } + } + } + + return errs.Err() +} diff --git a/vendor/github.com/elastic/beats/libbeat/monitoring/report/elasticsearch/client.go b/vendor/github.com/elastic/beats/libbeat/monitoring/report/elasticsearch/client.go index 661f648f..93b9943c 100644 --- a/vendor/github.com/elastic/beats/libbeat/monitoring/report/elasticsearch/client.go +++ b/vendor/github.com/elastic/beats/libbeat/monitoring/report/elasticsearch/client.go @@ -46,14 +46,11 @@ func (c *publishClient) Connect() error { } status, body, err := c.es.Request("GET", "/_xpack", "", params, nil) if err != nil { - debugf("XPack capabilities query failed with: %v", err) - return err + return fmt.Errorf("X-Pack capabilities query failed with: %v", err) } if status != 200 { - err := fmt.Errorf("XPack capabilities query failed with status code: %v", status) - debugf("%s", err) - return err + return fmt.Errorf("X-Pack capabilities query failed with status code: %v", status) } resp := struct { diff --git a/vendor/github.com/elastic/beats/libbeat/monitoring/report/elasticsearch/elasticsearch.go b/vendor/github.com/elastic/beats/libbeat/monitoring/report/elasticsearch/elasticsearch.go index 056c54c8..a78513b8 100644 --- a/vendor/github.com/elastic/beats/libbeat/monitoring/report/elasticsearch/elasticsearch.go +++ b/vendor/github.com/elastic/beats/libbeat/monitoring/report/elasticsearch/elasticsearch.go @@ -151,8 +151,10 @@ func (r *reporter) Stop() { } func (r *reporter) initLoop() { - logp.Info("Start monitoring endpoint init loop.") - defer logp.Info("Stop monitoring endpoint init loop.") + debugf("Start monitoring endpoint init loop.") + defer debugf("Finish monitoring endpoint init loop.") + + logged := false for { // Select one configured endpoint by random and check if xpack is available @@ -162,7 +164,11 @@ func (r *reporter) initLoop() { closing(client) break } else { - logp.Err("Monitoring could not connect to elasticsearch, failed with %v", err) + if !logged { + logp.Info("Failed to connect to Elastic X-Pack Monitoring. Either Elasticsearch X-Pack monitoring is not enabled or Elasticsearch is not available. Will keep retrying.") + logged = true + } + debugf("Monitoring could not connect to elasticsearch, failed with %v", err) } select { @@ -172,6 +178,8 @@ func (r *reporter) initLoop() { } } + logp.Info("Successfully connected to X-Pack Monitoring endpoint.") + // Start collector and send loop if monitoring endpoint has been found. go r.snapshotLoop() } diff --git a/vendor/github.com/elastic/beats/libbeat/outputs/codec/common.go b/vendor/github.com/elastic/beats/libbeat/outputs/codec/common.go index aab6bbd7..7c6885e2 100644 --- a/vendor/github.com/elastic/beats/libbeat/outputs/codec/common.go +++ b/vendor/github.com/elastic/beats/libbeat/outputs/codec/common.go @@ -3,7 +3,7 @@ package codec import ( "time" - structform "github.com/urso/go-structform" + "github.com/elastic/go-structform" "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/common/dtfmt" diff --git a/vendor/github.com/elastic/beats/libbeat/outputs/codec/json/json.go b/vendor/github.com/elastic/beats/libbeat/outputs/codec/json/json.go index a016cf10..fe9a4367 100644 --- a/vendor/github.com/elastic/beats/libbeat/outputs/codec/json/json.go +++ b/vendor/github.com/elastic/beats/libbeat/outputs/codec/json/json.go @@ -4,8 +4,8 @@ import ( "bytes" stdjson "encoding/json" - "github.com/urso/go-structform/gotype" - "github.com/urso/go-structform/json" + "github.com/elastic/go-structform/gotype" + "github.com/elastic/go-structform/json" "github.com/elastic/beats/libbeat/beat" "github.com/elastic/beats/libbeat/common" diff --git a/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/enc.go b/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/enc.go index a64510a9..ec073880 100644 --- a/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/enc.go +++ b/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/enc.go @@ -7,8 +7,8 @@ import ( "net/http" "time" - "github.com/urso/go-structform/gotype" - "github.com/urso/go-structform/json" + "github.com/elastic/go-structform/gotype" + "github.com/elastic/go-structform/json" "github.com/elastic/beats/libbeat/beat" "github.com/elastic/beats/libbeat/common" diff --git a/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/estest/estest.go b/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/estest/estest.go index ca1239cc..6285457f 100644 --- a/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/estest/estest.go +++ b/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/estest/estest.go @@ -14,7 +14,7 @@ func GetTestingElasticsearch(t internal.TestLogger) *elasticsearch.Client { URL: internal.GetURL(), Index: outil.MakeSelector(), Username: internal.GetUser(), - Password: internal.GetUser(), + Password: internal.GetPass(), Timeout: 60 * time.Second, CompressionLevel: 3, }, nil) diff --git a/vendor/github.com/elastic/beats/libbeat/outputs/kafka/client.go b/vendor/github.com/elastic/beats/libbeat/outputs/kafka/client.go index c9018cd1..3126606f 100644 --- a/vendor/github.com/elastic/beats/libbeat/outputs/kafka/client.go +++ b/vendor/github.com/elastic/beats/libbeat/outputs/kafka/client.go @@ -1,6 +1,7 @@ package kafka import ( + "errors" "fmt" "sync" "sync/atomic" @@ -39,6 +40,10 @@ type msgRef struct { err error } +var ( + errNoTopicsSelected = errors.New("no topic could be selected") +) + func newKafkaClient( observer outputs.Observer, hosts []string, @@ -140,6 +145,9 @@ func (c *client) getEventMessage(data *publisher.Event) (*message, error) { if err != nil { return nil, fmt.Errorf("setting kafka topic failed with %v", err) } + if topic == "" { + return nil, errNoTopicsSelected + } msg.topic = topic if event.Meta == nil { event.Meta = map[string]interface{}{} diff --git a/vendor/github.com/elastic/beats/libbeat/outputs/kafka/log.go b/vendor/github.com/elastic/beats/libbeat/outputs/kafka/log.go index 673ee956..8ab83c6d 100644 --- a/vendor/github.com/elastic/beats/libbeat/outputs/kafka/log.go +++ b/vendor/github.com/elastic/beats/libbeat/outputs/kafka/log.go @@ -9,11 +9,11 @@ import ( type kafkaLogger struct{} func (kl kafkaLogger) Print(v ...interface{}) { - kl.Log("kafka message: %v", v) + kl.Log("kafka message: %v", v...) } func (kl kafkaLogger) Printf(format string, v ...interface{}) { - kl.Log(format, v) + kl.Log(format, v...) } func (kl kafkaLogger) Println(v ...interface{}) { @@ -31,8 +31,8 @@ func (kafkaLogger) Log(format string, v ...interface{}) { } } if warn { - logp.Warn(format, v) + logp.Warn(format, v...) } else { - logp.Info(format, v) + logp.Info(format, v...) } } diff --git a/vendor/github.com/elastic/beats/libbeat/plugin/cli.go b/vendor/github.com/elastic/beats/libbeat/plugin/cli.go index 0df4589e..7e7c15ac 100644 --- a/vendor/github.com/elastic/beats/libbeat/plugin/cli.go +++ b/vendor/github.com/elastic/beats/libbeat/plugin/cli.go @@ -1,4 +1,5 @@ -//+build linux,go1.8,cgo +//+build linux,go1.8 darwin,go1.10 +//+build cgo package plugin diff --git a/vendor/github.com/elastic/beats/libbeat/plugin/cli_stub.go b/vendor/github.com/elastic/beats/libbeat/plugin/cli_stub.go index 4253aae3..6ee9af43 100644 --- a/vendor/github.com/elastic/beats/libbeat/plugin/cli_stub.go +++ b/vendor/github.com/elastic/beats/libbeat/plugin/cli_stub.go @@ -1,4 +1,4 @@ -//+build !linux !go1.8 !cgo +//+build linux,!go1.8 darwin,!go1.10 !linux,!darwin !cgo package plugin diff --git a/vendor/github.com/elastic/beats/libbeat/plugin/load.go b/vendor/github.com/elastic/beats/libbeat/plugin/load.go index dd6f839d..d68a1183 100644 --- a/vendor/github.com/elastic/beats/libbeat/plugin/load.go +++ b/vendor/github.com/elastic/beats/libbeat/plugin/load.go @@ -1,4 +1,5 @@ -//+build linux,go1.8,cgo +//+build linux,go1.8 darwin,go1.10 +//+build cgo package plugin diff --git a/vendor/github.com/elastic/beats/libbeat/plugin/load_stub.go b/vendor/github.com/elastic/beats/libbeat/plugin/load_stub.go index b369aa16..d6e917b3 100644 --- a/vendor/github.com/elastic/beats/libbeat/plugin/load_stub.go +++ b/vendor/github.com/elastic/beats/libbeat/plugin/load_stub.go @@ -1,4 +1,4 @@ -//+build !linux !go1.8 !cgo +//+build linux,!go1.8 darwin,!go1.10 !linux,!darwin !cgo package plugin diff --git a/vendor/github.com/elastic/beats/libbeat/processors/actions/extract_field.go b/vendor/github.com/elastic/beats/libbeat/processors/actions/extract_field.go index 95af9ae4..68ee10f3 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/actions/extract_field.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/actions/extract_field.go @@ -58,18 +58,18 @@ func NewExtractField(c *common.Config) (processors.Processor, error) { func (f *extract_field) Run(event *beat.Event) (*beat.Event, error) { fieldValue, err := event.GetValue(f.Field) if err != nil { - return nil, fmt.Errorf("error getting field '%s' from event", f.Field) + return event, fmt.Errorf("error getting field '%s' from event", f.Field) } value, ok := fieldValue.(string) if !ok { - return nil, fmt.Errorf("could not get a string from field '%s'", f.Field) + return event, fmt.Errorf("could not get a string from field '%s'", f.Field) } parts := strings.Split(value, f.Separator) parts = deleteEmpty(parts) if len(parts) < f.Index+1 { - return nil, fmt.Errorf("index is out of range for field '%s'", f.Field) + return event, fmt.Errorf("index is out of range for field '%s'", f.Field) } event.PutValue(f.Target, parts[f.Index]) diff --git a/vendor/github.com/elastic/beats/libbeat/processors/actions/extract_field_test.go b/vendor/github.com/elastic/beats/libbeat/processors/actions/extract_field_test.go index f20e393a..e7fcf679 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/actions/extract_field_test.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/actions/extract_field_test.go @@ -14,6 +14,7 @@ func TestCommonPaths(t *testing.T) { var tests = []struct { Value, Field, Separator, Target, Result string Index int + Error bool }{ // Common docker case { @@ -48,6 +49,15 @@ func TestCommonPaths(t *testing.T) { Index: 0, Result: "var", }, + { + Value: "/var/lib/foo/bar", + Field: "source", + Separator: "*", + Target: "destination", + Index: 10, // out of range + Result: "var", + Error: true, + }, } for _, test := range tests { @@ -63,17 +73,25 @@ func TestCommonPaths(t *testing.T) { test.Field: test.Value, } - actual := runExtractField(t, testConfig, input) + event, err := runExtractField(t, testConfig, input) + if test.Error { + assert.NotNil(t, err) + } else { - result, err := actual.GetValue(test.Target) - if err != nil { - t.Fatalf("could not get target field: %s", err) + assert.Nil(t, err) + result, err := event.Fields.GetValue(test.Target) + if err != nil { + t.Fatalf("could not get target field: %s", err) + } + assert.Equal(t, result.(string), test.Result) } - assert.Equal(t, result.(string), test.Result) + + // Event must be present, even on error + assert.NotNil(t, event) } } -func runExtractField(t *testing.T, config *common.Config, input common.MapStr) common.MapStr { +func runExtractField(t *testing.T, config *common.Config, input common.MapStr) (*beat.Event, error) { logp.TestingSetup() p, err := NewExtractField(config) @@ -81,10 +99,5 @@ func runExtractField(t *testing.T, config *common.Config, input common.MapStr) c t.Fatalf("error initializing extract_field: %s", err) } - actual, err := p.Run(&beat.Event{Fields: input}) - if err != nil { - t.Fatalf("error running extract_field: %s", err) - } - - return actual.Fields + return p.Run(&beat.Event{Fields: input}) } diff --git a/vendor/github.com/elastic/beats/libbeat/processors/actions/include_fields_test.go b/vendor/github.com/elastic/beats/libbeat/processors/actions/include_fields_test.go new file mode 100644 index 00000000..5c581ab5 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/processors/actions/include_fields_test.go @@ -0,0 +1,59 @@ +package actions + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/libbeat/beat" + "github.com/elastic/beats/libbeat/common" +) + +func TestIncludeFields(t *testing.T) { + + var tests = []struct { + Fields []string + Input common.MapStr + Output common.MapStr + }{ + { + Fields: []string{"test"}, + Input: common.MapStr{ + "hello": "world", + "test": 17, + }, + Output: common.MapStr{ + "test": 17, + }, + }, + { + Fields: []string{"test", "a.b"}, + Input: common.MapStr{ + "a.b": "b", + "a.c": "c", + "test": 17, + }, + Output: common.MapStr{ + "test": 17, + "a": common.MapStr{ + "b": "b", + }, + }, + }, + } + + for _, test := range tests { + p := includeFields{ + Fields: test.Fields, + } + + event := &beat.Event{ + Fields: test.Input, + } + + newEvent, err := p.Run(event) + assert.NoError(t, err) + + assert.Equal(t, test.Output, newEvent.Fields) + } +} diff --git a/vendor/github.com/elastic/beats/libbeat/processors/actions/rename.go b/vendor/github.com/elastic/beats/libbeat/processors/actions/rename.go new file mode 100644 index 00000000..dac988aa --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/processors/actions/rename.go @@ -0,0 +1,104 @@ +package actions + +import ( + "fmt" + + "github.com/pkg/errors" + + "github.com/elastic/beats/libbeat/beat" + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/common/cfgwarn" + "github.com/elastic/beats/libbeat/logp" + "github.com/elastic/beats/libbeat/processors" +) + +type renameFields struct { + config renameFieldsConfig +} + +type renameFieldsConfig struct { + Fields []fromTo `config:"fields"` + IgnoreMissing bool `config:"ignore_missing"` + FailOnError bool `config:"fail_on_error"` +} + +type fromTo struct { + From string `config:"from"` + To string `config:"to"` +} + +func init() { + processors.RegisterPlugin("rename", + configChecked(newRenameFields, + requireFields("fields"))) +} + +func newRenameFields(c *common.Config) (processors.Processor, error) { + + cfgwarn.Beta("Beta rename processor is used.") + config := renameFieldsConfig{ + IgnoreMissing: false, + FailOnError: true, + } + err := c.Unpack(&config) + if err != nil { + return nil, fmt.Errorf("failed to unpack the rename configuration: %s", err) + } + + f := &renameFields{ + config: config, + } + return f, nil +} + +func (f *renameFields) Run(event *beat.Event) (*beat.Event, error) { + var backup common.MapStr + // Creates a copy of the event to revert in case of failure + if f.config.FailOnError { + backup = event.Fields.Clone() + } + + for _, field := range f.config.Fields { + err := f.renameField(field.From, field.To, event.Fields) + if err != nil && f.config.FailOnError { + logp.Debug("rename", "Failed to rename fields, revert to old event: %s", err) + event.Fields = backup + return event, err + } + } + + return event, nil +} + +func (f *renameFields) renameField(from string, to string, fields common.MapStr) error { + // Fields cannot be overwritten. Either the target field has to be dropped first or renamed first + exists, _ := fields.HasKey(to) + if exists { + return fmt.Errorf("target field %s already exists, drop or rename this field first", to) + } + + value, err := fields.GetValue(from) + if err != nil { + // Ignore ErrKeyNotFound errors + if f.config.IgnoreMissing && errors.Cause(err) == common.ErrKeyNotFound { + return nil + } + return fmt.Errorf("could not fetch value for key: %s, Error: %s", to, err) + } + + // Deletion must happen first to support cases where a becomes a.b + err = fields.Delete(from) + if err != nil { + return fmt.Errorf("could not delete key: %s, %+v", from, err) + } + + _, err = fields.Put(to, value) + if err != nil { + return fmt.Errorf("could not put value: %s: %v, %+v", to, value, err) + } + return nil +} + +func (f *renameFields) String() string { + return "rename=" + fmt.Sprintf("%+v", f.config.Fields) +} diff --git a/vendor/github.com/elastic/beats/libbeat/processors/actions/rename_test.go b/vendor/github.com/elastic/beats/libbeat/processors/actions/rename_test.go new file mode 100644 index 00000000..55bc17d0 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/processors/actions/rename_test.go @@ -0,0 +1,343 @@ +package actions + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "reflect" + + "github.com/elastic/beats/libbeat/beat" + "github.com/elastic/beats/libbeat/common" +) + +func TestRenameRun(t *testing.T) { + var tests = []struct { + description string + Fields []fromTo + IgnoreMissing bool + FailOnError bool + Input common.MapStr + Output common.MapStr + error bool + }{ + { + description: "simple field renaming", + Fields: []fromTo{ + { + From: "a", + To: "b", + }, + }, + Input: common.MapStr{ + "a": "c", + }, + Output: common.MapStr{ + "b": "c", + }, + IgnoreMissing: false, + FailOnError: true, + error: false, + }, + { + description: "Add one more hierarchy to event", + Fields: []fromTo{ + { + From: "a.b", + To: "a.b.c", + }, + }, + Input: common.MapStr{ + "a.b": 1, + }, + Output: common.MapStr{ + "a": common.MapStr{ + "b": common.MapStr{ + "c": 1, + }, + }, + }, + IgnoreMissing: false, + FailOnError: true, + error: false, + }, + { + description: "overwrites an existing field which is not allowed", + Fields: []fromTo{ + { + From: "a", + To: "b", + }, + }, + Input: common.MapStr{ + "a": 2, + "b": "q", + }, + Output: common.MapStr{ + "a": 2, + "b": "q", + }, + error: true, + FailOnError: true, + IgnoreMissing: false, + }, + { + description: "overwrites existing field but renames it first, order matters", + Fields: []fromTo{ + { + From: "b", + To: "c", + }, + { + From: "a", + To: "b", + }, + }, + Input: common.MapStr{ + "a": 2, + "b": "q", + }, + Output: common.MapStr{ + "b": 2, + "c": "q", + }, + error: false, + FailOnError: true, + IgnoreMissing: false, + }, + { + description: "take an invalid ES event with key / object conflict and convert it to a valid event", + Fields: []fromTo{ + { + From: "a", + To: "a.value", + }, + }, + Input: common.MapStr{ + "a": 5, + "a.b": 6, + }, + Output: common.MapStr{ + "a.b": 6, + "a": common.MapStr{ + "value": 5, + }, + }, + error: false, + FailOnError: true, + IgnoreMissing: false, + }, + { + description: "renames two fields into the same namespace. order matters as a is first key and then object", + Fields: []fromTo{ + { + From: "a", + To: "a.value", + }, + { + From: "c", + To: "a.c", + }, + }, + Input: common.MapStr{ + "a": 7, + "c": 8, + }, + Output: common.MapStr{ + "a": common.MapStr{ + "value": 7, + "c": 8, + }, + }, + error: false, + IgnoreMissing: false, + FailOnError: true, + }, + { + description: "rename two fields into the same name space. this fails because a is already a key, renaming of a needs to happen first", + Fields: []fromTo{ + { + From: "c", + To: "a.c", + }, + { + From: "a", + To: "a.value", + }, + }, + Input: common.MapStr{ + "a": 9, + "c": 10, + }, + Output: common.MapStr{ + "a": 9, + "c": 10, + }, + error: true, + IgnoreMissing: false, + FailOnError: true, + }, + { + description: "renames conflicting keys. partially works because fail_on_error is false", + Fields: []fromTo{ + { + From: "c", + To: "a.c", + }, + { + From: "a", + To: "a.value", + }, + }, + Input: common.MapStr{ + "a": 9, + "c": 10, + }, + Output: common.MapStr{ + "a": common.MapStr{ + "value": 9, + }, + }, + error: false, + IgnoreMissing: false, + FailOnError: false, + }, + } + + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + f := &renameFields{ + config: renameFieldsConfig{ + Fields: test.Fields, + IgnoreMissing: test.IgnoreMissing, + FailOnError: test.FailOnError, + }, + } + event := &beat.Event{ + Fields: test.Input, + } + + newEvent, err := f.Run(event) + if !test.error { + assert.Nil(t, err) + } else { + assert.NotNil(t, err) + } + + assert.True(t, reflect.DeepEqual(newEvent.Fields, test.Output)) + }) + } +} + +func TestRenameField(t *testing.T) { + var tests = []struct { + From string + To string + ignoreMissing bool + failOnError bool + Input common.MapStr + Output common.MapStr + error bool + description string + }{ + { + description: "simple rename of field", + From: "a", + To: "c", + Input: common.MapStr{ + "a": "b", + }, + Output: common.MapStr{ + "c": "b", + }, + error: false, + failOnError: true, + ignoreMissing: false, + }, + { + description: "Add hierarchy to event", + From: "a.b", + To: "a.b.c", + Input: common.MapStr{ + "a.b": 1, + }, + Output: common.MapStr{ + "a": common.MapStr{ + "b": common.MapStr{ + "c": 1, + }, + }, + }, + error: false, + failOnError: true, + ignoreMissing: false, + }, + { + description: "overwrite an existing field that should lead to an error", + From: "a", + To: "b", + Input: common.MapStr{ + "a": 2, + "b": "q", + }, + Output: common.MapStr{ + "a": 2, + "b": "q", + }, + error: true, + failOnError: true, + ignoreMissing: false, + }, + { + description: "resolve dotted event conflict", + From: "a", + To: "a.value", + Input: common.MapStr{ + "a": 5, + "a.b": 6, + }, + Output: common.MapStr{ + "a.b": 6, + "a": common.MapStr{ + "value": 5, + }, + }, + error: false, + failOnError: true, + ignoreMissing: false, + }, + { + description: "try to rename no existing field with failOnError true", + From: "a", + To: "b", + Input: common.MapStr{ + "c": 5, + }, + Output: common.MapStr{ + "c": 5, + }, + failOnError: true, + ignoreMissing: false, + error: true, + }, + } + + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + + f := &renameFields{ + config: renameFieldsConfig{ + IgnoreMissing: test.ignoreMissing, + FailOnError: test.failOnError, + }, + } + + err := f.renameField(test.From, test.To, test.Input) + if err != nil { + assert.Equal(t, test.error, true) + } + + assert.True(t, reflect.DeepEqual(test.Input, test.Output)) + }) + } +} diff --git a/vendor/github.com/elastic/beats/libbeat/processors/add_docker_metadata/add_docker_metadata.go b/vendor/github.com/elastic/beats/libbeat/processors/add_docker_metadata/add_docker_metadata.go index 8ae941ce..b936b4b2 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/add_docker_metadata/add_docker_metadata.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/add_docker_metadata/add_docker_metadata.go @@ -4,7 +4,6 @@ import ( "fmt" "os" "path/filepath" - "strconv" "strings" "time" @@ -13,6 +12,7 @@ import ( "github.com/elastic/beats/libbeat/beat" "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/common/docker" + "github.com/elastic/beats/libbeat/common/safemapstr" "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/libbeat/processors" "github.com/elastic/beats/libbeat/processors/actions" @@ -54,7 +54,7 @@ func buildDockerMetadataProcessor(cfg *common.Config, watcherConstructor docker. return nil, errors.Wrapf(err, "fail to unpack the %v configuration", processorName) } - watcher, err := watcherConstructor(config.Host, config.TLS) + watcher, err := watcherConstructor(config.Host, config.TLS, config.MatchShortID) if err != nil { return nil, err } @@ -156,7 +156,7 @@ func (d *addDockerMetadata) Run(event *beat.Event) (*beat.Event, error) { if len(container.Labels) > 0 { labels := common.MapStr{} for k, v := range container.Labels { - labels.Put(k, v) + safemapstr.Put(labels, k, v) } meta.Put("container.labels", labels) } @@ -187,7 +187,7 @@ func (d *addDockerMetadata) lookupContainerIDByPID(event *beat.Event) string { continue } - pid, ok := tryToInt(v) + pid, ok := common.TryToInt(v) if !ok { d.log.Debugf("field %v is not a PID (type=%T, value=%v)", field, v, v) continue @@ -241,40 +241,3 @@ func getContainerIDFromCgroups(cgroups map[string]string) string { return "" } - -// tryToInt tries to coerce the given interface to an int. On success it returns -// the int value and true. -func tryToInt(number interface{}) (int, bool) { - var rtn int - switch v := number.(type) { - case int: - rtn = int(v) - case int8: - rtn = int(v) - case int16: - rtn = int(v) - case int32: - rtn = int(v) - case int64: - rtn = int(v) - case uint: - rtn = int(v) - case uint8: - rtn = int(v) - case uint16: - rtn = int(v) - case uint32: - rtn = int(v) - case uint64: - rtn = int(v) - case string: - var err error - rtn, err = strconv.Atoi(v) - if err != nil { - return 0, false - } - default: - return 0, false - } - return rtn, true -} diff --git a/vendor/github.com/elastic/beats/libbeat/processors/add_docker_metadata/add_docker_metadata_test.go b/vendor/github.com/elastic/beats/libbeat/processors/add_docker_metadata/add_docker_metadata_test.go index 2769fdb4..4ab3132b 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/add_docker_metadata/add_docker_metadata_test.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/add_docker_metadata/add_docker_metadata_test.go @@ -96,8 +96,9 @@ func TestMatchContainer(t *testing.T) { Image: "image", Name: "name", Labels: map[string]string{ - "a": "1", - "b": "2", + "a.x": "1", + "b": "2", + "b.foo": "3", }, }, })) @@ -115,8 +116,13 @@ func TestMatchContainer(t *testing.T) { "id": "container_id", "image": "image", "labels": common.MapStr{ - "a": "1", - "b": "2", + "a": common.MapStr{ + "x": "1", + }, + "b": common.MapStr{ + "value": "2", + "foo": "3", + }, }, "name": "name", }, @@ -298,7 +304,7 @@ func MockWatcherFactory(containers map[string]*docker.Container) docker.WatcherC if containers == nil { containers = make(map[string]*docker.Container) } - return func(host string, tls *docker.TLSConfig) (docker.Watcher, error) { + return func(host string, tls *docker.TLSConfig, shortID bool) (docker.Watcher, error) { return &mockWatcher{containers: containers}, nil } } diff --git a/vendor/github.com/elastic/beats/libbeat/processors/add_docker_metadata/config.go b/vendor/github.com/elastic/beats/libbeat/processors/add_docker_metadata/config.go index 529175ca..5755bc07 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/add_docker_metadata/config.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/add_docker_metadata/config.go @@ -8,13 +8,14 @@ import ( // Config for docker processor. type Config struct { - Host string `config:"host"` // Docker socket (UNIX or TCP socket). - TLS *docker.TLSConfig `config:"ssl"` // TLS settings for connecting to Docker. - Fields []string `config:"match_fields"` // A list of fields to match a container ID. - MatchSource bool `config:"match_source"` // Match container ID from a log path present in source field. - SourceIndex int `config:"match_source_index"` // Index in the source path split by / to look for container ID. - MatchPIDs []string `config:"match_pids"` // A list of fields containing process IDs (PIDs). - HostFS string `config:"system.hostfs"` // Specifies the mount point of the host’s filesystem for use in monitoring a host from within a container. + Host string `config:"host"` // Docker socket (UNIX or TCP socket). + TLS *docker.TLSConfig `config:"ssl"` // TLS settings for connecting to Docker. + Fields []string `config:"match_fields"` // A list of fields to match a container ID. + MatchSource bool `config:"match_source"` // Match container ID from a log path present in source field. + MatchShortID bool `config:"match_short_id"` // Match to container short ID from a log path present in source field. + SourceIndex int `config:"match_source_index"` // Index in the source path split by / to look for container ID. + MatchPIDs []string `config:"match_pids"` // A list of fields containing process IDs (PIDs). + HostFS string `config:"system.hostfs"` // Specifies the mount point of the host’s filesystem for use in monitoring a host from within a container. // Annotations are kept after container is killed, until they haven't been // accessed for a full `cleanup_timeout`: diff --git a/vendor/github.com/elastic/beats/libbeat/processors/add_host_metadata/_meta/fields.yml b/vendor/github.com/elastic/beats/libbeat/processors/add_host_metadata/_meta/fields.yml new file mode 100644 index 00000000..9897d770 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/processors/add_host_metadata/_meta/fields.yml @@ -0,0 +1,33 @@ +- key: host + title: Host + description: > + Info collected for the host machine. + anchor: host-processor + fields: + - name: host + type: group + fields: + - name: name + type: keyword + description: > + Hostname. + - name: id + type: keyword + description: > + Unique host id. + - name: architecture + type: keyword + description: > + Host architecture (e.g. x86_64, arm, ppc, mips). + - name: os.platform + type: keyword + description: > + OS platform (e.g. centos, ubuntu, windows). + - name: os.version + type: keyword + description: > + OS version. + - name: os.family + type: keyword + description: > + OS family (e.g. redhat, debian, freebsd, windows). diff --git a/vendor/github.com/elastic/beats/libbeat/processors/add_host_metadata/add_host_metadata.go b/vendor/github.com/elastic/beats/libbeat/processors/add_host_metadata/add_host_metadata.go new file mode 100644 index 00000000..65d8dbb6 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/processors/add_host_metadata/add_host_metadata.go @@ -0,0 +1,80 @@ +package add_host_metadata + +import ( + "time" + + "github.com/elastic/beats/libbeat/beat" + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/processors" + "github.com/elastic/go-sysinfo" + "github.com/elastic/go-sysinfo/types" +) + +func init() { + processors.RegisterPlugin("add_host_metadata", newHostMetadataProcessor) +} + +type addHostMetadata struct { + info types.HostInfo + lastUpdate time.Time + data common.MapStr +} + +const ( + cacheExpiration = time.Minute * 5 +) + +func newHostMetadataProcessor(_ *common.Config) (processors.Processor, error) { + h, err := sysinfo.Host() + if err != nil { + return nil, err + } + p := &addHostMetadata{ + info: h.Info(), + } + return p, nil +} + +// Run enriches the given event with the host meta data +func (p *addHostMetadata) Run(event *beat.Event) (*beat.Event, error) { + p.loadData() + event.Fields.DeepUpdate(p.data) + return event, nil +} + +func (p *addHostMetadata) loadData() { + + // Check if cache is expired + if p.lastUpdate.Add(cacheExpiration).Before(time.Now()) { + p.data = common.MapStr{ + "host": common.MapStr{ + "name": p.info.Hostname, + "architecture": p.info.Architecture, + "os": common.MapStr{ + "platform": p.info.OS.Platform, + "version": p.info.OS.Version, + "family": p.info.OS.Family, + }, + }, + } + + // Optional params + if p.info.UniqueID != "" { + p.data.Put("host.id", p.info.UniqueID) + } + if p.info.Containerized != nil { + p.data.Put("host.containerized", *p.info.Containerized) + } + if p.info.OS.Codename != "" { + p.data.Put("host.os.codename", p.info.OS.Codename) + } + if p.info.OS.Build != "" { + p.data.Put("host.os.build", p.info.OS.Build) + } + p.lastUpdate = time.Now() + } +} + +func (p addHostMetadata) String() string { + return "add_host_metadata=[]" +} diff --git a/vendor/github.com/elastic/beats/libbeat/processors/add_host_metadata/add_host_metadata_test.go b/vendor/github.com/elastic/beats/libbeat/processors/add_host_metadata/add_host_metadata_test.go new file mode 100644 index 00000000..0192f57b --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/processors/add_host_metadata/add_host_metadata_test.go @@ -0,0 +1,34 @@ +package add_host_metadata + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "runtime" + + "github.com/elastic/beats/libbeat/beat" + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/go-sysinfo/types" +) + +func TestRun(t *testing.T) { + event := &beat.Event{ + Fields: common.MapStr{}, + Timestamp: time.Now(), + } + p, err := newHostMetadataProcessor(nil) + if runtime.GOOS != "windows" && runtime.GOOS != "darwin" && runtime.GOOS != "linux" { + assert.IsType(t, types.ErrNotImplemented, err) + return + } + assert.NoError(t, err) + + newEvent, err := p.Run(event) + assert.NoError(t, err) + + v, err := newEvent.GetValue("host.os.family") + assert.NoError(t, err) + assert.NotNil(t, v) +} diff --git a/vendor/github.com/elastic/beats/libbeat/processors/add_kubernetes_metadata/cache.go b/vendor/github.com/elastic/beats/libbeat/processors/add_kubernetes_metadata/cache.go new file mode 100644 index 00000000..279b1aa2 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/processors/add_kubernetes_metadata/cache.go @@ -0,0 +1,62 @@ +package add_kubernetes_metadata + +import ( + "sync" + "time" + + "github.com/elastic/beats/libbeat/common" +) + +type cache struct { + sync.Mutex + timeout time.Duration + deleted map[string]time.Time // key -> when should this obj be deleted + metadata map[string]common.MapStr +} + +func newCache(cleanupTimeout time.Duration) *cache { + c := &cache{ + timeout: cleanupTimeout, + deleted: make(map[string]time.Time), + metadata: make(map[string]common.MapStr), + } + go c.cleanup() + return c +} + +func (c *cache) get(key string) common.MapStr { + c.Lock() + defer c.Unlock() + // add lifecycle if key was queried + if t, ok := c.deleted[key]; ok { + c.deleted[key] = t.Add(c.timeout) + } + return c.metadata[key] +} + +func (c *cache) delete(key string) { + c.Lock() + defer c.Unlock() + c.deleted[key] = time.Now().Add(c.timeout) +} + +func (c *cache) set(key string, data common.MapStr) { + c.Lock() + defer c.Unlock() + delete(c.deleted, key) + c.metadata[key] = data +} + +func (c *cache) cleanup() { + ticker := time.Tick(timeout) + for now := range ticker { + c.Lock() + for k, t := range c.deleted { + if now.After(t) { + delete(c.deleted, k) + delete(c.metadata, k) + } + } + c.Unlock() + } +} diff --git a/vendor/github.com/elastic/beats/libbeat/processors/add_kubernetes_metadata/kubernetes.go b/vendor/github.com/elastic/beats/libbeat/processors/add_kubernetes_metadata/kubernetes.go index dabd22e2..2455d7b0 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/add_kubernetes_metadata/kubernetes.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/add_kubernetes_metadata/kubernetes.go @@ -3,12 +3,10 @@ package add_kubernetes_metadata import ( "errors" "fmt" - "sync" "time" "github.com/elastic/beats/libbeat/beat" "github.com/elastic/beats/libbeat/common" - "github.com/elastic/beats/libbeat/common/bus" "github.com/elastic/beats/libbeat/common/kubernetes" "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/libbeat/processors" @@ -18,19 +16,11 @@ const ( timeout = time.Second * 5 ) -var ( - fatalError = errors.New("Unable to create kubernetes processor") -) - type kubernetesAnnotator struct { - sync.RWMutex - watcher kubernetes.Watcher - startListener bus.Listener - stopListener bus.Listener - updateListener bus.Listener - indexers *Indexers - matchers *Matchers - metadata map[string]common.MapStr + watcher kubernetes.Watcher + indexers *Indexers + matchers *Matchers + cache *cache } func init() { @@ -89,36 +79,46 @@ func newKubernetesAnnotator(cfg *common.Config) (processors.Processor, error) { return nil, err } - config.Host = kubernetes.DiscoverKubernetesNode(config.Host, client) + config.Host = kubernetes.DiscoverKubernetesNode(config.Host, config.InCluster, client) logp.Debug("kubernetes", "Using host ", config.Host) logp.Debug("kubernetes", "Initializing watcher") - if client != nil { - watcher := kubernetes.NewWatcher(client.CoreV1(), config.SyncPeriod, config.CleanupTimeout, config.Host) - start := watcher.ListenStart() - stop := watcher.ListenStop() - update := watcher.ListenUpdate() - - processor := &kubernetesAnnotator{ - watcher: watcher, - indexers: indexers, - matchers: matchers, - metadata: make(map[string]common.MapStr, 0), - startListener: start, - stopListener: stop, - updateListener: update, - } - // Start worker - go processor.worker() + watcher, err := kubernetes.NewWatcher(client, &kubernetes.Pod{}, kubernetes.WatchOptions{ + SyncTimeout: config.SyncPeriod, + Node: config.Host, + Namespace: config.Namespace, + }) + if err != nil { + logp.Err("kubernetes: Couldn't create watcher for %t", &kubernetes.Pod{}) + return nil, err + } - if err := watcher.Start(); err != nil { - return nil, err - } - return processor, nil + processor := &kubernetesAnnotator{ + watcher: watcher, + indexers: indexers, + matchers: matchers, + cache: newCache(config.CleanupTimeout), } - return nil, fatalError + watcher.AddEventHandler(kubernetes.ResourceEventHandlerFuncs{ + AddFunc: func(obj kubernetes.Resource) { + processor.addPod(obj.(*kubernetes.Pod)) + }, + UpdateFunc: func(obj kubernetes.Resource) { + processor.removePod(obj.(*kubernetes.Pod)) + processor.addPod(obj.(*kubernetes.Pod)) + }, + DeleteFunc: func(obj kubernetes.Resource) { + processor.removePod(obj.(*kubernetes.Pod)) + }, + }) + + if err := watcher.Start(); err != nil { + return nil, err + } + + return processor, nil } func (k *kubernetesAnnotator) Run(event *beat.Event) (*beat.Event, error) { @@ -127,69 +127,29 @@ func (k *kubernetesAnnotator) Run(event *beat.Event) (*beat.Event, error) { return event, nil } - k.RLock() - metadata := k.metadata[index] - k.RUnlock() + metadata := k.cache.get(index) if metadata == nil { return event, nil } - meta := common.MapStr{} - metaIface, ok := event.Fields["kubernetes"] - if !ok { - event.Fields["kubernetes"] = common.MapStr{} - } else { - meta = metaIface.(common.MapStr) - } - - meta.Update(metadata) - event.Fields["kubernetes"] = meta + event.Fields.DeepUpdate(common.MapStr{ + "kubernetes": metadata, + }) return event, nil } -// worker watches pod events and keeps a map of metadata -func (k *kubernetesAnnotator) worker() { - for { - select { - case event := <-k.startListener.Events(): - processEvent(k.addPod, event) - - case event := <-k.stopListener.Events(): - processEvent(k.removePod, event) - - case event := <-k.updateListener.Events(): - processEvent(k.removePod, event) - processEvent(k.addPod, event) - } - } -} - -// Run pod actions while handling errors -func processEvent(f func(pod *kubernetes.Pod), event bus.Event) { - pod, ok := event["pod"].(*kubernetes.Pod) - if !ok || pod == nil { - logp.Err("Couldn't get a pod from watcher event: %v", event) - return - } - f(pod) -} - func (k *kubernetesAnnotator) addPod(pod *kubernetes.Pod) { metadata := k.indexers.GetMetadata(pod) - k.Lock() - defer k.Unlock() for _, m := range metadata { - k.metadata[m.Index] = m.Data + k.cache.set(m.Index, m.Data) } } func (k *kubernetesAnnotator) removePod(pod *kubernetes.Pod) { indexes := k.indexers.GetIndexes(pod) - k.Lock() - defer k.Unlock() for _, idx := range indexes { - delete(k.metadata, idx) + k.cache.delete(idx) } } diff --git a/vendor/github.com/elastic/beats/libbeat/processors/add_kubernetes_metadata/kubernetes_test.go b/vendor/github.com/elastic/beats/libbeat/processors/add_kubernetes_metadata/kubernetes_test.go new file mode 100644 index 00000000..de45c7ea --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/processors/add_kubernetes_metadata/kubernetes_test.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package add_kubernetes_metadata + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/libbeat/beat" + "github.com/elastic/beats/libbeat/common" +) + +// Test metadata updates don't replace existing pod metrics +func TestAnnotatorDeepUpdate(t *testing.T) { + cfg, err := common.NewConfigFrom(map[string]interface{}{ + "lookup_fields": []string{"kubernetes.pod.name"}, + }) + if err != nil { + t.Fatal(err) + } + matcher, err := NewFieldMatcher(*cfg) + if err != nil { + t.Fatal(err) + } + + processor := kubernetesAnnotator{ + cache: newCache(10 * time.Second), + matchers: &Matchers{ + matchers: []Matcher{matcher}, + }, + } + + processor.cache.set("foo", common.MapStr{ + "pod": common.MapStr{ + "labels": common.MapStr{ + "dont": "replace", + "original": "fields", + }, + }, + }) + + event, err := processor.Run(&beat.Event{ + Fields: common.MapStr{ + "kubernetes": common.MapStr{ + "pod": common.MapStr{ + "name": "foo", + "id": "pod_id", + "metrics": common.MapStr{ + "a": 1, + "b": 2, + }, + }, + }, + }, + }) + assert.NoError(t, err) + + assert.Equal(t, common.MapStr{ + "kubernetes": common.MapStr{ + "pod": common.MapStr{ + "name": "foo", + "id": "pod_id", + "metrics": common.MapStr{ + "a": 1, + "b": 2, + }, + "labels": common.MapStr{ + "dont": "replace", + "original": "fields", + }, + }, + }, + }, event.Fields) +} diff --git a/vendor/github.com/elastic/beats/libbeat/processors/condition.go b/vendor/github.com/elastic/beats/libbeat/processors/condition.go index 4e8d28b5..7e8bfd94 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/condition.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/condition.go @@ -21,8 +21,9 @@ type RangeValue struct { } type EqualsValue struct { - Int uint64 - Str string + Int uint64 + Str string + Bool bool } type Condition struct { @@ -31,10 +32,11 @@ type Condition struct { name string filters map[string]match.Matcher } - rangexp map[string]RangeValue - or []Condition - and []Condition - not *Condition + hasfields []string + rangexp map[string]RangeValue + or []Condition + and []Condition + not *Condition } type WhenProcessor struct { @@ -81,6 +83,8 @@ func NewCondition(config *ConditionConfig) (*Condition, error) { c.matches.filters, err = compileMatches(config.Regexp.fields, match.Compile) case config.Range != nil: err = c.setRange(config.Range) + case config.HasFields != nil: + c.hasfields = config.HasFields case len(config.OR) > 0: c.or, err = NewConditionList(config.OR) case len(config.AND) > 0: @@ -118,13 +122,22 @@ func (c *Condition) setEquals(cfg *ConditionFields) error { uintValue, err := extractInt(value) if err == nil { c.equals[field] = EqualsValue{Int: uintValue} - } else { - sValue, err := extractString(value) - if err != nil { - return err - } + continue + } + + sValue, err := extractString(value) + if err == nil { c.equals[field] = EqualsValue{Str: sValue} + continue + } + + bValue, err := extractBool(value) + if err == nil { + c.equals[field] = EqualsValue{Bool: bValue} + continue } + + return fmt.Errorf("unexpected type %T in equals condition", value) } return nil @@ -216,7 +229,8 @@ func (c *Condition) Check(event ValuesMap) bool { return c.checkEquals(event) && c.checkMatches(event) && - c.checkRange(event) + c.checkRange(event) && + c.checkHasFields(event) } func (c *Condition) checkOR(event ValuesMap) bool { @@ -257,16 +271,30 @@ func (c *Condition) checkEquals(event ValuesMap) bool { if intValue != equalValue.Int { return false } - } else { - sValue, err := extractString(value) - if err != nil { - logp.Warn("unexpected type %T in equals condition as it accepts only integers and strings. ", value) + + continue + } + + sValue, err := extractString(value) + if err == nil { + if sValue != equalValue.Str { return false } - if sValue != equalValue.Str { + + continue + } + + bValue, err := extractBool(value) + if err == nil { + if bValue != equalValue.Bool { return false } + + continue } + + logp.Err("unexpected type %T in equals condition as it accepts only integers, strings or bools. ", value) + return false } return true @@ -375,6 +403,16 @@ func (c *Condition) checkRange(event ValuesMap) bool { return true } +func (c *Condition) checkHasFields(event ValuesMap) bool { + for _, field := range c.hasfields { + _, err := event.GetValue(field) + if err != nil { + return false + } + } + return true +} + func (c Condition) String() string { s := "" @@ -387,6 +425,9 @@ func (c Condition) String() string { if len(c.rangexp) > 0 { s = s + fmt.Sprintf("range: %v", c.rangexp) } + if len(c.hasfields) > 0 { + s = s + fmt.Sprintf("has_fields: %v", c.hasfields) + } if len(c.or) > 0 { for _, cond := range c.or { s = s + cond.String() + " or " diff --git a/vendor/github.com/elastic/beats/libbeat/processors/condition_test.go b/vendor/github.com/elastic/beats/libbeat/processors/condition_test.go index 22d19dbd..f797b396 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/condition_test.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/condition_test.go @@ -129,6 +129,40 @@ func TestCondition(t *testing.T) { }, result: true, }, + { + config: ConditionConfig{ + Equals: &ConditionFields{fields: map[string]interface{}{ + "final": true, + }}, + }, + result: false, + }, + { + config: ConditionConfig{ + Equals: &ConditionFields{fields: map[string]interface{}{ + "final": false, + }}, + }, + result: true, + }, + { + config: ConditionConfig{ + HasFields: []string{"proc.cmdline", "type"}, + }, + result: true, + }, + { + config: ConditionConfig{ + HasFields: []string{"cpu"}, + }, + result: false, + }, + { + config: ConditionConfig{ + HasFields: []string{"proc", "beat"}, + }, + result: false, + }, } event := &beat.Event{ @@ -150,7 +184,8 @@ func TestCondition(t *testing.T) { "username": "monica", "keywords": []string{"foo", "bar"}, }, - "type": "process", + "type": "process", + "final": false, }, } @@ -564,6 +599,18 @@ func TestWhenProcessor(t *testing.T) { []common.MapStr{{"i": 10}}, 1, }, + { + "condition_matches", + config{"when.has_fields": []string{"i"}}, + []common.MapStr{{"i": 10}}, + 1, + }, + { + "condition_fails", + config{"when.has_fields": []string{"j"}}, + []common.MapStr{{"i": 10}}, + 0, + }, } for i, test := range tests { diff --git a/vendor/github.com/elastic/beats/libbeat/processors/config.go b/vendor/github.com/elastic/beats/libbeat/processors/config.go index 4d59b188..b5668dfe 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/config.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/config.go @@ -9,13 +9,14 @@ import ( ) type ConditionConfig struct { - Equals *ConditionFields `config:"equals"` - Contains *ConditionFields `config:"contains"` - Regexp *ConditionFields `config:"regexp"` - Range *ConditionFields `config:"range"` - OR []ConditionConfig `config:"or"` - AND []ConditionConfig `config:"and"` - NOT *ConditionConfig `config:"not"` + Equals *ConditionFields `config:"equals"` + Contains *ConditionFields `config:"contains"` + Regexp *ConditionFields `config:"regexp"` + Range *ConditionFields `config:"range"` + HasFields []string `config:"has_fields"` + OR []ConditionConfig `config:"or"` + AND []ConditionConfig `config:"and"` + NOT *ConditionConfig `config:"not"` } type ConditionFields struct { @@ -130,3 +131,12 @@ func extractString(unk interface{}) (string, error) { return "", fmt.Errorf("unknown type %T passed to extractString", unk) } } + +func extractBool(unk interface{}) (bool, error) { + switch b := unk.(type) { + case bool: + return b, nil + default: + return false, fmt.Errorf("unknown type %T passed to extractBool", unk) + } +} diff --git a/vendor/github.com/elastic/beats/libbeat/processors/config_test.go b/vendor/github.com/elastic/beats/libbeat/processors/config_test.go index 85315218..f8eb23c5 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/config_test.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/config_test.go @@ -15,3 +15,13 @@ func TestExtractString(t *testing.T) { } assert.Equal(t, input, v) } + +func TestExtractBool(t *testing.T) { + input := true + + v, err := extractBool(input) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, input, v) +} diff --git a/vendor/github.com/elastic/beats/libbeat/publisher/includes/includes.go b/vendor/github.com/elastic/beats/libbeat/publisher/includes/includes.go index e36aed52..99b65e09 100644 --- a/vendor/github.com/elastic/beats/libbeat/publisher/includes/includes.go +++ b/vendor/github.com/elastic/beats/libbeat/publisher/includes/includes.go @@ -1,6 +1,10 @@ package includes import ( + // import queue types + _ "github.com/elastic/beats/libbeat/publisher/queue/memqueue" + _ "github.com/elastic/beats/libbeat/publisher/queue/spool" + // load supported output plugins _ "github.com/elastic/beats/libbeat/outputs/console" _ "github.com/elastic/beats/libbeat/outputs/elasticsearch" diff --git a/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/client_ack.go b/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/client_ack.go index f5d3749e..a9dfd6e7 100644 --- a/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/client_ack.go +++ b/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/client_ack.go @@ -13,7 +13,7 @@ type clientACKer struct { } func (p *Pipeline) makeACKer( - withProcessors bool, + canDrop bool, cfg *beat.ClientConfig, waitClose time.Duration, ) acker { @@ -25,17 +25,17 @@ func (p *Pipeline) makeACKer( sema := p.eventSema switch { case cfg.ACKCount != nil: - acker = bld.createCountACKer(withProcessors, sema, cfg.ACKCount) + acker = bld.createCountACKer(canDrop, sema, cfg.ACKCount) case cfg.ACKEvents != nil: - acker = bld.createEventACKer(withProcessors, sema, cfg.ACKEvents) + acker = bld.createEventACKer(canDrop, sema, cfg.ACKEvents) case cfg.ACKLastEvent != nil: cb := lastEventACK(cfg.ACKLastEvent) - acker = bld.createEventACKer(withProcessors, sema, cb) + acker = bld.createEventACKer(canDrop, sema, cb) default: if waitClose <= 0 { - return bld.createPipelineACKer(withProcessors, sema) + return bld.createPipelineACKer(canDrop, sema) } - acker = bld.createCountACKer(withProcessors, sema, func(_ int) {}) + acker = bld.createCountACKer(canDrop, sema, func(_ int) {}) } if waitClose <= 0 { diff --git a/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/log.go b/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/log.go deleted file mode 100644 index 5ed7c74a..00000000 --- a/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/log.go +++ /dev/null @@ -1,5 +0,0 @@ -package pipeline - -import "github.com/elastic/beats/libbeat/logp" - -var defaultLogger = logp.NewLogger("publish") diff --git a/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/module.go b/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/module.go index 594ce572..6bcefa0a 100644 --- a/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/module.go +++ b/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/module.go @@ -50,10 +50,15 @@ func Load( Processors: processors, Annotations: Annotations{ Event: config.EventMetadata, - Beat: common.MapStr{ - "name": name, - "hostname": beatInfo.Hostname, - "version": beatInfo.Version, + Builtin: common.MapStr{ + "beat": common.MapStr{ + "name": name, + "hostname": beatInfo.Hostname, + "version": beatInfo.Version, + }, + "host": common.MapStr{ + "name": name, + }, }, }, } diff --git a/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/pipeline.go b/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/pipeline.go index bb060324..8ffa1911 100644 --- a/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/pipeline.go +++ b/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/pipeline.go @@ -62,9 +62,9 @@ type pipelineProcessors struct { // The pipeline its processor settings for // constructing the clients complete processor // pipeline on connect. - beatsMeta common.MapStr - fields common.MapStr - tags []string + builtinMeta common.MapStr + fields common.MapStr + tags []string processors beat.Processor @@ -91,8 +91,8 @@ type Settings struct { // processors, so all processors configured with the pipeline or client will see // the same/complete event. type Annotations struct { - Beat common.MapStr - Event common.EventMetadata + Event common.EventMetadata + Builtin common.MapStr } // WaitCloseMode enumerates the possible behaviors of WaitClose in a pipeline. @@ -141,7 +141,7 @@ func New( ) (*Pipeline, error) { var err error - log := defaultLogger + log := logp.NewLogger("publish") annotations := settings.Annotations processors := settings.Processors disabledOutput := settings.Disabled @@ -173,7 +173,18 @@ func New( if err != nil { return nil, err } - p.eventSema = newSema(p.queue.BufferConfig().Events) + + if count := p.queue.BufferConfig().Events; count > 0 { + p.eventSema = newSema(count) + } + + maxEvents := p.queue.BufferConfig().Events + if maxEvents <= 0 { + // Maximum number of events until acker starts blocking. + // Only active if pipeline can drop events. + maxEvents = 64000 + } + p.eventSema = newSema(maxEvents) p.output = newOutputController(log, p.observer, p.queue) p.output.Set(out) @@ -390,8 +401,8 @@ func makePipelineProcessors( p.processors = tmp } - if meta := annotations.Beat; meta != nil { - p.beatsMeta = common.MapStr{"beat": meta} + if meta := annotations.Builtin; meta != nil { + p.builtinMeta = meta } if em := annotations.Event; len(em.Fields) > 0 { diff --git a/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/processor.go b/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/processor.go index 26925f2a..5f058cdd 100644 --- a/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/processor.go +++ b/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/processor.go @@ -55,8 +55,10 @@ func newProcessorPipeline( needsCopy := global.alwaysCopy || localProcessors != nil || global.processors != nil - // setup 1: generalize/normalize output (P) - processors.add(generalizeProcessor) + if !config.SkipNormalization { + // setup 1: generalize/normalize output (P) + processors.add(generalizeProcessor) + } // setup 2: add Meta from client config (C) if m := clientMeta; len(m) > 0 { @@ -79,18 +81,26 @@ func newProcessorPipeline( } if len(fields) > 0 { - processors.add(makeAddFieldsProcessor("fields", fields, needsCopy)) + // Enforce a copy of fields if dynamic fields are configured or beats + // metadata will be merged into the fields. + // With dynamic fields potentially changing at any time, we need to copy, + // so we do not change shared structures be accident. + fieldsNeedsCopy := needsCopy || config.DynamicFields != nil || fields["beat"] != nil + processors.add(makeAddFieldsProcessor("fields", fields, fieldsNeedsCopy)) } if config.DynamicFields != nil { - processors.add(makeAddDynMetaProcessor("dynamicFields", config.DynamicFields, needsCopy)) + checkCopy := func(m common.MapStr) bool { + return needsCopy || hasKey(m, "beat") + } + processors.add(makeAddDynMetaProcessor("dynamicFields", config.DynamicFields, checkCopy)) } // setup 5: client processor list processors.add(localProcessors) - // setup 6: add beats metadata - if meta := global.beatsMeta; len(meta) > 0 { + // setup 6: add beats and host metadata + if meta := global.builtinMeta; len(meta) > 0 { processors.add(makeAddFieldsProcessor("beatsMeta", meta, needsCopy)) } @@ -248,13 +258,19 @@ func makeAddFieldsProcessor(name string, fields common.MapStr, copy bool) *proce return newAnnotateProcessor(name, fn) } -func makeAddDynMetaProcessor(name string, meta *common.MapStrPointer, copy bool) *processorFn { - fn := func(event *beat.Event) { event.Fields.DeepUpdate(meta.Get()) } - if copy { - fn = func(event *beat.Event) { event.Fields.DeepUpdate(meta.Get().Clone()) } - } +func makeAddDynMetaProcessor( + name string, + meta *common.MapStrPointer, + checkCopy func(m common.MapStr) bool, +) *processorFn { + return newAnnotateProcessor(name, func(event *beat.Event) { + dynFields := meta.Get() + if checkCopy(dynFields) { + dynFields = dynFields.Clone() + } - return newAnnotateProcessor(name, fn) + event.Fields.DeepUpdate(dynFields) + }) } func debugPrintProcessor(info beat.Info) *processorFn { @@ -288,3 +304,8 @@ func makeClientProcessors(config beat.ClientConfig) processors.Processor { list: procs.All(), } } + +func hasKey(m common.MapStr, key string) bool { + _, exists := m[key] + return exists +} diff --git a/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/processor_test.go b/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/processor_test.go index d35aadf7..3666e852 100644 --- a/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/processor_test.go +++ b/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/processor_test.go @@ -34,13 +34,29 @@ func TestProcessors(t *testing.T) { []local{ { config: beat.ClientConfig{}, - events: []common.MapStr{{"value": "abc"}}, + events: []common.MapStr{{"value": "abc", "user": nil}}, expected: []common.MapStr{ {"value": "abc", "global": 1, "tags": []string{"tag"}}, }, }, }, }, + { + "no normalization", + pipelineProcessors{ + fields: common.MapStr{"global": 1}, + tags: []string{"tag"}, + }, + []local{ + { + config: beat.ClientConfig{SkipNormalization: true}, + events: []common.MapStr{{"value": "abc", "user": nil}}, + expected: []common.MapStr{ + {"value": "abc", "user": nil, "global": 1, "tags": []string{"tag"}}, + }, + }, + }, + }, { "beat local fields", pipelineProcessors{}, diff --git a/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/retry.go b/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/retry.go index b15905bb..d2a40b0f 100644 --- a/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/retry.go +++ b/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/retry.go @@ -2,7 +2,6 @@ package pipeline import ( "github.com/elastic/beats/libbeat/logp" - "github.com/elastic/beats/libbeat/publisher" ) // retryer is responsible for accepting and managing failed send attempts. It @@ -206,7 +205,7 @@ func decBatch(batch *Batch) { // filter for evens with guaranteed send flags events := batch.events[:0] for _, event := range batch.events { - if (event.Flags & publisher.GuaranteedSend) == publisher.GuaranteedSend { + if event.Guaranteed() { events = append(events, event) } } diff --git a/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/stress/configs/pipeline/default.yml b/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/stress/configs/pipeline/default_mem.yml similarity index 75% rename from vendor/github.com/elastic/beats/libbeat/publisher/pipeline/stress/configs/pipeline/default.yml rename to vendor/github.com/elastic/beats/libbeat/publisher/pipeline/stress/configs/pipeline/default_mem.yml index f6dc45cc..ead9e17e 100644 --- a/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/stress/configs/pipeline/default.yml +++ b/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/stress/configs/pipeline/default_mem.yml @@ -1,4 +1,4 @@ -queue.mem: +pipeline.queue.mem: events: 4096 flush: min_events: 2048 diff --git a/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/stress/configs/pipeline/direct.yml b/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/stress/configs/pipeline/direct_mem.yml similarity index 74% rename from vendor/github.com/elastic/beats/libbeat/publisher/pipeline/stress/configs/pipeline/direct.yml rename to vendor/github.com/elastic/beats/libbeat/publisher/pipeline/stress/configs/pipeline/direct_mem.yml index 7ab3b82d..cf8fdc2f 100644 --- a/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/stress/configs/pipeline/direct.yml +++ b/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/stress/configs/pipeline/direct_mem.yml @@ -1,4 +1,4 @@ -queue.mem: +pipeline.queue.mem: events: 4096 flush: min_events: 0 diff --git a/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/stress/configs/pipeline/small_spool.yml b/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/stress/configs/pipeline/small_spool.yml new file mode 100644 index 00000000..d5f99944 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/stress/configs/pipeline/small_spool.yml @@ -0,0 +1,11 @@ +pipeline.queue.spool: + file: + path: ${test.tmpdir}/${test.name}-spool.dat + size: 1MiB + page_size: 4KiB + prealloc: true + write: + buffer_size: 16KiB + flush_timeout: 100ms + read: + flush_timeout: 0 diff --git a/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/stress/gen.go b/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/stress/gen.go index e0afdfd2..e3c7bd30 100644 --- a/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/stress/gen.go +++ b/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/stress/gen.go @@ -1,7 +1,9 @@ package stress import ( + "bytes" "fmt" + "runtime/pprof" "sync" "time" @@ -25,7 +27,7 @@ var defaultGenerateConfig = generateConfig{ ACK: false, MaxEvents: 0, WaitClose: 0, - Watchdog: 1 * time.Second, + Watchdog: 2 * time.Second, } var publishModes = map[string]beat.PublishMode{ @@ -92,7 +94,7 @@ func generate( // start generator watchdog withWG(&wg, func() { last := uint64(0) - ticker := time.NewTicker(config.Watchdog) // todo: make ticker interval configurable + ticker := time.NewTicker(config.Watchdog) defer ticker.Stop() for { select { @@ -105,7 +107,11 @@ func generate( current := count.Load() if last == current { - err := fmt.Errorf("no progress in generators (last=%v, current=%v)", last, current) + // collect all active go-routines stack-traces: + var buf bytes.Buffer + pprof.Lookup("goroutine").WriteTo(&buf, 2) + + err := fmt.Errorf("no progress in generator %v (last=%v, current=%v):\n%s", id, last, current, buf.Bytes()) errors(err) } last = current diff --git a/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/stress/run.go b/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/stress/run.go index ce60355d..27125a9d 100644 --- a/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/stress/run.go +++ b/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/stress/run.go @@ -1,6 +1,7 @@ package stress import ( + "fmt" "sync" "time" @@ -36,13 +37,13 @@ func RunTests( ) error { config := defaultConfig if err := cfg.Unpack(&config); err != nil { - return err + return fmt.Errorf("unpacking config failed: %v", err) } // reg := monitoring.NewRegistry() pipeline, err := pipeline.Load(info, nil, config.Pipeline, config.Output) if err != nil { - return err + return fmt.Errorf("loading pipeline failed: %v", err) } defer func() { logp.Info("Stop pipeline") diff --git a/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/stress/stress_test.go b/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/stress/stress_test.go index c019096a..93f30214 100644 --- a/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/stress/stress_test.go +++ b/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/stress/stress_test.go @@ -5,7 +5,10 @@ package stress_test import ( "flag" "fmt" + "io/ioutil" + "os" "path/filepath" + "strings" "testing" "time" @@ -15,6 +18,7 @@ import ( // import queue types "github.com/elastic/beats/libbeat/publisher/pipeline/stress" _ "github.com/elastic/beats/libbeat/publisher/queue/memqueue" + _ "github.com/elastic/beats/libbeat/publisher/queue/spool" ) // additional flags @@ -39,19 +43,46 @@ func TestPipeline(t *testing.T) { } if duration == 0 { - duration = 10 * time.Second + duration = 15 * time.Second } - // TODO: if verbose, enable logging - configTest(t, "gen", genConfigs, func(t *testing.T, gen string) { configTest(t, "pipeline", pipelineConfigs, func(t *testing.T, pipeline string) { configTest(t, "out", outConfigs, func(t *testing.T, out string) { + + if testing.Verbose() { + start := time.Now() + fmt.Printf("%v Start stress test %v\n", start.Format(time.RFC3339), t.Name()) + defer func() { + end := time.Now() + fmt.Printf("%v Finished stress test %v. Duration=%v\n", end.Format(time.RFC3339), t.Name(), end.Sub(start)) + }() + } + config, err := common.LoadFiles(gen, pipeline, out) if err != nil { t.Fatal(err) } + name := t.Name() + name = strings.Replace(name, "/", "-", -1) + name = strings.Replace(name, "\\", "-", -1) + + dir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + // Merge test info into config object + config.Merge(map[string]interface{}{ + "test": map[string]interface{}{ + "tmpdir": dir, + "name": name, + }, + }) + + // check if the pipeline configuration allows for parallel tests onErr := func(err error) { t.Error(err) } diff --git a/vendor/github.com/elastic/beats/libbeat/publisher/queue/memqueue/broker.go b/vendor/github.com/elastic/beats/libbeat/publisher/queue/memqueue/broker.go index 9a27a36e..4f02e38a 100644 --- a/vendor/github.com/elastic/beats/libbeat/publisher/queue/memqueue/broker.go +++ b/vendor/github.com/elastic/beats/libbeat/publisher/queue/memqueue/broker.go @@ -5,6 +5,7 @@ import ( "time" "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/libbeat/publisher/queue" ) @@ -100,10 +101,9 @@ func NewBroker( minEvents = sz } - logger := defaultLogger b := &Broker{ done: make(chan struct{}), - logger: logger, + logger: logp.NewLogger("memqueue"), // broker API channels events: make(chan pushRequest, chanSize), diff --git a/vendor/github.com/elastic/beats/libbeat/publisher/queue/memqueue/log.go b/vendor/github.com/elastic/beats/libbeat/publisher/queue/memqueue/log.go index 4f3be49f..8d17b56d 100644 --- a/vendor/github.com/elastic/beats/libbeat/publisher/queue/memqueue/log.go +++ b/vendor/github.com/elastic/beats/libbeat/publisher/queue/memqueue/log.go @@ -1,12 +1,6 @@ package memqueue -import ( - "github.com/elastic/beats/libbeat/logp" -) - type logger interface { Debug(...interface{}) Debugf(string, ...interface{}) } - -var defaultLogger logger = logp.NewLogger("memqueue") diff --git a/vendor/github.com/elastic/beats/libbeat/publisher/queue/memqueue/queue_test.go b/vendor/github.com/elastic/beats/libbeat/publisher/queue/memqueue/queue_test.go index 9da54611..cea01228 100644 --- a/vendor/github.com/elastic/beats/libbeat/publisher/queue/memqueue/queue_test.go +++ b/vendor/github.com/elastic/beats/libbeat/publisher/queue/memqueue/queue_test.go @@ -54,7 +54,7 @@ func TestProducerCancelRemovesEvents(t *testing.T) { } func makeTestQueue(sz, minEvents int, flushTimeout time.Duration) queuetest.QueueFactory { - return func() queue.Queue { + return func(_ *testing.T) queue.Queue { return NewBroker(Settings{ Events: sz, FlushMinEvents: minEvents, diff --git a/vendor/github.com/elastic/beats/libbeat/publisher/queue/queuetest/producer_cancel.go b/vendor/github.com/elastic/beats/libbeat/publisher/queue/queuetest/producer_cancel.go index 4340c122..8633201c 100644 --- a/vendor/github.com/elastic/beats/libbeat/publisher/queue/queuetest/producer_cancel.go +++ b/vendor/github.com/elastic/beats/libbeat/publisher/queue/queuetest/producer_cancel.go @@ -25,7 +25,7 @@ func TestProducerCancelRemovesEvents(t *testing.T, factory QueueFactory) { ) log := NewTestLogger(t) - b := factory() + b := factory(t) defer b.Close() log.Debug("create first producer") diff --git a/vendor/github.com/elastic/beats/libbeat/publisher/queue/queuetest/queuetest.go b/vendor/github.com/elastic/beats/libbeat/publisher/queue/queuetest/queuetest.go index 38c4b0c6..835da001 100644 --- a/vendor/github.com/elastic/beats/libbeat/publisher/queue/queuetest/queuetest.go +++ b/vendor/github.com/elastic/beats/libbeat/publisher/queue/queuetest/queuetest.go @@ -8,7 +8,8 @@ import ( "github.com/elastic/beats/libbeat/publisher/queue" ) -type QueueFactory func() queue.Queue +// QueueFactory is used to create a per test queue instance. +type QueueFactory func(t *testing.T) queue.Queue type workerFactory func(*sync.WaitGroup, interface{}, *TestLogger, queue.Queue) func() @@ -49,7 +50,7 @@ func TestSingleProducerConsumer( log := NewTestLogger(t) log.Debug("run test: ", test.name) - queue := factory() + queue := factory(t) defer func() { err := queue.Close() if err != nil { @@ -192,7 +193,7 @@ func TestMultiProducerConsumer( log := NewTestLogger(t) log.Debug("run test: ", test.name) - queue := factory() + queue := factory(t) defer func() { err := queue.Close() if err != nil { @@ -263,6 +264,7 @@ func makeProducer( ACK: ackCB, }) for i := 0; i < maxEvents; i++ { + log.Debug("publish event", i) producer.Publish(makeEvent(makeFields(i))) } @@ -288,6 +290,7 @@ func multiConsumer(numConsumers, maxEvents, batchSize int) workerFactory { consumers[i] = b.Consumer() } + log.Debugf("consumer: wait for %v events\n", maxEvents) events.Add(maxEvents) for _, c := range consumers { @@ -303,7 +306,10 @@ func multiConsumer(numConsumers, maxEvents, batchSize int) workerFactory { return } - for range batch.Events() { + collected := batch.Events() + log.Debug("consumer: process batch", len(collected)) + + for range collected { events.Done() } batch.ACK() diff --git a/vendor/github.com/elastic/beats/libbeat/publisher/queue/spool/codec.go b/vendor/github.com/elastic/beats/libbeat/publisher/queue/spool/codec.go new file mode 100644 index 00000000..051ae656 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/publisher/queue/spool/codec.go @@ -0,0 +1,179 @@ +package spool + +import ( + "bytes" + "fmt" + "time" + + "github.com/elastic/go-structform" + "github.com/elastic/go-structform/cborl" + "github.com/elastic/go-structform/gotype" + "github.com/elastic/go-structform/json" + "github.com/elastic/go-structform/ubjson" + + "github.com/elastic/beats/libbeat/beat" + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/publisher" +) + +type encoder struct { + buf bytes.Buffer + folder *gotype.Iterator + codec codecID +} + +type decoder struct { + buf []byte + + json *json.Parser + cborl *cborl.Parser + ubjson *ubjson.Parser + unfolder *gotype.Unfolder +} + +type codecID uint8 + +type entry struct { + Timestamp int64 + Flags uint8 + Meta common.MapStr + Fields common.MapStr +} + +const ( + // Note: Never change order. Codec IDs must be not change in the future. Only + // adding new IDs is allowed. + codecUnknown codecID = iota + codecJSON + codecUBJSON + codecCBORL + + flagGuaranteed uint8 = 1 << 0 +) + +func newEncoder(codec codecID) (*encoder, error) { + switch codec { + case codecJSON, codecCBORL, codecUBJSON: + break + default: + return nil, fmt.Errorf("unknown codec type '%v'", codec) + } + + e := &encoder{codec: codec} + e.reset() + return e, nil +} + +func (e *encoder) reset() { + e.folder = nil + + var visitor structform.Visitor + switch e.codec { + case codecJSON: + visitor = json.NewVisitor(&e.buf) + case codecCBORL: + visitor = cborl.NewVisitor(&e.buf) + case codecUBJSON: + visitor = ubjson.NewVisitor(&e.buf) + default: + panic("no codec configured") + } + + folder, err := gotype.NewIterator(visitor) + if err != nil { + panic(err) + } + + e.folder = folder +} + +func (e *encoder) encode(event *publisher.Event) ([]byte, error) { + e.buf.Reset() + e.buf.WriteByte(byte(e.codec)) + + var flags uint8 + if (event.Flags & publisher.GuaranteedSend) == publisher.GuaranteedSend { + flags = flagGuaranteed + } + + err := e.folder.Fold(entry{ + Timestamp: event.Content.Timestamp.UTC().UnixNano(), + Flags: flags, + Meta: event.Content.Meta, + Fields: event.Content.Fields, + }) + if err != nil { + e.reset() + return nil, err + } + + return e.buf.Bytes(), nil +} + +func newDecoder() *decoder { + d := &decoder{} + d.reset() + return d +} + +func (d *decoder) reset() { + unfolder, err := gotype.NewUnfolder(nil) + if err != nil { + panic(err) // can not happen + } + + d.unfolder = unfolder + d.json = json.NewParser(unfolder) + d.cborl = cborl.NewParser(unfolder) + d.ubjson = ubjson.NewParser(unfolder) +} + +// Buffer prepares the read buffer to hold the next event of n bytes. +func (d *decoder) Buffer(n int) []byte { + if cap(d.buf) > n { + d.buf = d.buf[:n] + } else { + d.buf = make([]byte, n) + } + return d.buf +} + +func (d *decoder) Decode() (publisher.Event, error) { + var ( + to entry + err error + codec = codecID(d.buf[0]) + contents = d.buf[1:] + ) + + d.unfolder.SetTarget(&to) + switch codec { + case codecJSON: + err = d.json.Parse(contents) + case codecUBJSON: + err = d.ubjson.Parse(contents) + case codecCBORL: + err = d.cborl.Parse(contents) + default: + return publisher.Event{}, fmt.Errorf("unknown codec type '%v'", codec) + } + + if err != nil { + d.reset() // reset parser just in case + return publisher.Event{}, err + } + + var flags publisher.EventFlags + if (to.Flags & flagGuaranteed) != 0 { + flags |= publisher.GuaranteedSend + } + + return publisher.Event{ + Flags: flags, + Content: beat.Event{ + Timestamp: time.Unix(0, to.Timestamp), + Fields: to.Fields, + Meta: to.Meta, + }, + }, nil +} diff --git a/vendor/github.com/elastic/beats/libbeat/publisher/queue/spool/config.go b/vendor/github.com/elastic/beats/libbeat/publisher/queue/spool/config.go new file mode 100644 index 00000000..48d56957 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/publisher/queue/spool/config.go @@ -0,0 +1,112 @@ +package spool + +import ( + "errors" + "fmt" + "os" + "strings" + "time" + + "github.com/dustin/go-humanize" + "github.com/joeshaw/multierror" + + "github.com/elastic/beats/libbeat/common/cfgtype" +) + +type config struct { + File pathConfig `config:"file"` + Write writeConfig `config:"write"` + Read readConfig `config:"read"` +} + +type pathConfig struct { + Path string `config:"path"` + Permissions os.FileMode `config:"permissions"` + MaxSize cfgtype.ByteSize `config:"size"` + PageSize cfgtype.ByteSize `config:"page_size"` + Prealloc bool `config:"prealloc"` +} + +type writeConfig struct { + BufferSize cfgtype.ByteSize `config:"buffer_size"` + FlushEvents time.Duration `config:"flush.events"` + FlushTimeout time.Duration `config:"flush.timeout"` + Codec codecID `config:"codec"` +} + +type readConfig struct { + FlushTimeout time.Duration `config:"flush.timeout"` +} + +func defaultConfig() config { + return config{ + File: pathConfig{ + Path: "", + Permissions: 0600, + MaxSize: 100 * humanize.MiByte, + PageSize: 4 * humanize.KiByte, + Prealloc: true, + }, + Write: writeConfig{ + BufferSize: 1 * humanize.MiByte, + FlushTimeout: 1 * time.Second, + FlushEvents: 16 * 1024, + Codec: codecCBORL, + }, + Read: readConfig{ + FlushTimeout: 0, + }, + } +} + +func (c *pathConfig) Validate() error { + var errs multierror.Errors + + if c.MaxSize < humanize.MiByte { + errs = append(errs, errors.New("max size must be larger 1MiB")) + } + + if !c.Permissions.IsRegular() { + errs = append(errs, fmt.Errorf("permissions %v are not regular file permissions", c.Permissions.String())) + } else { + m := c.Permissions.Perm() + if (m & 0400) == 0 { + errs = append(errs, errors.New("file must be readable by current user")) + } + if (m & 0200) == 0 { + errs = append(errs, errors.New("file must be writable by current user")) + } + } + + // TODO: good 'limit' on pageSize? + + if c.PageSize >= c.MaxSize { + errs = append(errs, fmt.Errorf("page_size (%v) must be less then size (%v)", c.PageSize, c.MaxSize)) + } + + return errs.Err() +} + +func (c *writeConfig) Validate() error { + return nil +} + +func (c *readConfig) Validate() error { + return nil +} + +func (c *codecID) Unpack(value string) error { + ids := map[string]codecID{ + "json": codecJSON, + "ubjson": codecUBJSON, + "cbor": codecCBORL, + } + + id, exists := ids[strings.ToLower(value)] + if !exists { + return fmt.Errorf("codec '%v' not available", value) + } + + *c = id + return nil +} diff --git a/vendor/github.com/elastic/beats/libbeat/publisher/queue/spool/consume.go b/vendor/github.com/elastic/beats/libbeat/publisher/queue/spool/consume.go new file mode 100644 index 00000000..5e543fc2 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/publisher/queue/spool/consume.go @@ -0,0 +1,122 @@ +package spool + +import ( + "errors" + "io" + + "github.com/elastic/beats/libbeat/common/atomic" + "github.com/elastic/beats/libbeat/publisher" + "github.com/elastic/beats/libbeat/publisher/queue" +) + +type consumer struct { + ctx *spoolCtx + closed atomic.Bool + done chan struct{} + + resp chan getResponse + requ chan getRequest +} + +type batch struct { + events []publisher.Event + state ackState + ack chan batchAckMsg +} + +type ackState uint8 + +const ( + batchActive ackState = iota + batchACK +) + +func newConsumer(ctx *spoolCtx, requ chan getRequest) *consumer { + return &consumer{ + ctx: ctx, + closed: atomic.MakeBool(false), + done: make(chan struct{}), + + // internal API + resp: make(chan getResponse), + requ: requ, + } +} + +func (c *consumer) Close() error { + if c.closed.Swap(true) { + return errors.New("already closed") + } + + close(c.done) + return nil +} + +func (c *consumer) Closed() bool { + return c.closed.Load() || c.ctx.Closed() +} + +func (c *consumer) Get(sz int) (queue.Batch, error) { + log := c.ctx.logger + + if c.Closed() { + return nil, io.EOF + } + + var resp getResponse + for { + select { + case <-c.ctx.Done(): + return nil, io.EOF + + case <-c.done: + return nil, io.EOF + + case c.requ <- getRequest{sz: sz, resp: c.resp}: + } + + resp = <-c.resp + err := resp.err + if err == nil { + break + } + + if err != errRetry { + log.Debug("consumer: error response:", err) + return nil, err + } + } + + log.Debug("consumer: received batch:", len(resp.buf)) + return &batch{ + events: resp.buf, + state: batchActive, + ack: resp.ack, + }, nil +} + +func (b *batch) Events() []publisher.Event { + if b.state != batchActive { + panic("Get Events from inactive batch") + } + return b.events +} + +func (b *batch) ACK() { + if b.state != batchActive { + switch b.state { + case batchACK: + panic("Can not acknowledge already acknowledged batch") + default: + panic("inactive batch") + } + } + + b.report() +} + +func (b *batch) report() { + if b.ack != nil { + b.ack <- batchAckMsg{} + } +} diff --git a/vendor/github.com/elastic/beats/libbeat/publisher/queue/spool/inbroker.go b/vendor/github.com/elastic/beats/libbeat/publisher/queue/spool/inbroker.go new file mode 100644 index 00000000..a3fcc8d3 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/publisher/queue/spool/inbroker.go @@ -0,0 +1,522 @@ +package spool + +import ( + "fmt" + "math" + "time" + + "github.com/elastic/beats/libbeat/publisher/queue" + "github.com/elastic/go-txfile/pq" +) + +type inBroker struct { + ctx *spoolCtx + eventer queue.Eventer + + // active state handler + state func(*inBroker) bool + + // api channels + events chan pushRequest + pubCancel chan producerCancelRequest + + // queue signaling + sigACK chan struct{} + sigFlush chan uint + ackDone chan struct{} + + // queue state + queue *pq.Queue + clientStates clientStates + + // Event contents, that still needs to be send to the queue. An event is + // pending if it has been serialized, but not added to the write buffer in + // full, as some I/O operation on the write buffer failed. + // => + // - keep pointer to yet unwritten event contents + // - do not accept any events if pending is not nil + // - wait for signal from reader/queue-gc to retry writing the pending + // events contents + pending []byte + + bufferedEvents uint // number of buffered events + + // flush settings + timer *timer + flushEvents uint + + enc *encoder +} + +const ( + inSigChannelSize = 3 + inEventChannelSize = 20 +) + +func newInBroker( + ctx *spoolCtx, + eventer queue.Eventer, + qu *pq.Queue, + codec codecID, + flushTimeout time.Duration, + flushEvents uint, +) (*inBroker, error) { + enc, err := newEncoder(codec) + if err != nil { + return nil, err + } + + b := &inBroker{ + ctx: ctx, + eventer: eventer, + state: (*inBroker).stateEmpty, + + // API + events: make(chan pushRequest, inEventChannelSize), + pubCancel: make(chan producerCancelRequest), + sigACK: make(chan struct{}, inSigChannelSize), + sigFlush: make(chan uint, inSigChannelSize), + ackDone: make(chan struct{}), + + // queue state + queue: qu, + clientStates: clientStates{}, + pending: nil, + bufferedEvents: 0, + + // internal + timer: newTimer(flushTimeout), + flushEvents: flushEvents, + enc: enc, + } + + ctx.Go(b.eventLoop) + ctx.Go(b.ackLoop) + return b, nil +} + +func (b *inBroker) Producer(cfg queue.ProducerConfig) queue.Producer { + return newProducer(b.ctx, b.pubCancel, b.events, cfg.ACK, cfg.OnDrop, cfg.DropOnCancel) +} + +// onFlush is run whenever the queue flushes it's write buffer. The callback is +// run in the same go-routine as the Flush was executed from. +// Only the (*inBroker).eventLoop triggers a flush. +func (b *inBroker) onFlush(n uint) { + if n == 0 { + return + } + + if b.eventer != nil { + b.eventer.OnACK(int(n)) + } + b.ctx.logger.Debug("inbroker: flushed events:", n) + b.bufferedEvents -= n + b.sigFlush <- n +} + +// onACK is run whenever the queue releases ACKed events. The number of acked +// events and freed pages will is reported. +// Flush events are forward to the brokers eventloop, so to give the broker a +// chance to retry writing in case it has been blocked on a full queue. +func (b *inBroker) onACK(events, pages uint) { + if pages > 0 { + b.sigACK <- struct{}{} + } +} + +func (b *inBroker) ackLoop() { + log := b.ctx.logger + + log.Debug("start flush ack loop") + defer log.Debug("stop flush ack loop") + + for { + var n uint + select { + case <-b.ackDone: + return + + case n = <-b.sigFlush: + log.Debug("inbroker: receive flush", n) + states := b.clientStates.Pop(int(n)) + b.sendACKs(states) + } + } +} + +// sendACKs returns the range of ACKed/Flushed events to the individual +// producers ACK handlers. +func (b *inBroker) sendACKs(states []clientState) { + log := b.ctx.logger + + // reverse iteration on client states, so to report ranges of ACKed events + // only once. + N := len(states) + total := 0 + for i := N - 1; i != -1; i-- { + st := &states[i] + if st.state == nil { + continue + } + + count := (st.seq - st.state.lastACK) + if count == 0 || count > math.MaxUint32/2 { + // seq number comparison did underflow. This happens only if st.seq has + // already been acknowledged + // log.Debug("seq number already acked: ", st.seq) + + st.state = nil + continue + } + + log.Debugf("broker ACK events: count=%v, start-seq=%v, end-seq=%v\n", + count, + st.state.lastACK+1, + st.seq, + ) + + total += int(count) + if total > N { + panic(fmt.Sprintf("Too many events acked (expected=%v, total=%v)", + N, total, + )) + } + + // report range of ACKed events + st.state.ackCB(int(count)) + st.state.lastACK = st.seq + st.state = nil + } +} + +func (b *inBroker) eventLoop() { + log := b.ctx.logger + log.Info("spool input eventloop start") + defer log.Info("spool input eventloop stop") + + // notify ackLoop to stop only after eventLoop has finished (after last flush) + defer close(b.ackDone) + + for { + ok := b.state(b) + if !ok { + break + } + } + + // try to flush events/buffers on shutdown. + if b.bufferedEvents == 0 { + return + } + + // try to append pending events + for len(b.pending) > 0 { + n, err := b.queue.Writer().Write(b.pending) + b.pending = b.pending[n:] + if err != nil { + return + } + } + + // final flush + b.queue.Writer().Flush() +} + +// stateEmpty is the brokers active state if the write buffer is empty and the +// queue did not block on write or flush operations. +// ACKs from the output are ignored, as events can still be added to the write +// buffer. +// +// stateEmpty transitions: +// -> stateEmpty if serializing the event failed +// -> stateWithTimer if event is written to buffer without flush +// => start timer +// -> stateBlocked if queue did return an error on write (Flush failed) +func (b *inBroker) stateEmpty() bool { + log := b.ctx.logger + + select { + case <-b.ctx.Done(): + return false + + case req := <-b.events: + log.Debug("inbroker (stateEmpty): new event") + + buf, st, err := b.encodeEvent(&req) + if err != nil { + log.Debug(" inbroker (stateEmpty): encode failed") + b.respondDrop(&req) + break + } + + // write/flush failed -> block until space in file becomes available + err = b.addEvent(buf, st) + if err != nil { + log.Debug(" inbroker: append failed, blocking") + b.state = (*inBroker).stateBlocked + break + } + + // start flush timer + if b.flushEvents > 0 && b.bufferedEvents == b.flushEvents { + log.Debug(" inbroker (stateEmpty): flush events") + err := b.flushBuffer() + if err != nil { + log.Debug(" inbroker (stateEmpty): flush failed, blocking") + b.state = (*inBroker).stateBlocked + } + break + + } else if b.bufferedEvents > 0 { + log.Debug(" inbroker (stateEmpty): start flush timer") + b.timer.Start() + b.state = (*inBroker).stateWithTimer + } + + case req := <-b.pubCancel: + b.handleCancel(&req) + + case <-b.sigACK: + // ignore ACKs as long as we can write without blocking + } + + return true +} + +// stateWithTimer is the brokers active state, if the write buffer is not empty. +// The flush timer is enabled as long as the broker is in this state. +// ACKs from the output are ignored, as events can still be added to the write +// buffer. +// +// stateWithTimer transitions: +// -> stateWithTimer +// - if serializing failed +// - if event is added to buffer, without flush +// - flush, but more events are available in the buffer (might reset timer) +// -> stateEmpty if all events have been flushed +// -> stateBlocked if queue did return an error on write/flush (Flush failed) +func (b *inBroker) stateWithTimer() bool { + log := b.ctx.logger + + select { + case <-b.ctx.Done(): + return false + + case req := <-b.events: + log.Debug("inbroker (stateWithTimer): new event") + + buf, st, err := b.encodeEvent(&req) + if err != nil { + log.Debug(" inbroker (stateWithTimer): encode failed") + b.respondDrop(&req) + break + } + + count := b.bufferedEvents + err = b.addEvent(buf, st) + if err != nil { + log.Debug(" inbroker (stateWithTimer): append failed, blocking") + b.state = (*inBroker).stateBlocked + break + } + + flushed := b.bufferedEvents < count + if !flushed && b.flushEvents > 0 && b.bufferedEvents == b.flushEvents { + err := b.flushBuffer() + if err != nil { + log.Debug(" inbroker (stateWithTimer): flush failed, blocking") + b.state = (*inBroker).stateBlocked + break + } + + flushed = true + } + + if !flushed { + break + } + + // write buffer has been flushed, reset timer and broker state + log.Debug(" inbroker (stateWithTimer): buffer flushed") + if b.bufferedEvents == 0 { + b.timer.Stop(false) + b.state = (*inBroker).stateEmpty + } else { + // restart timer, as new event is most likely the only event buffered + // -> reduce IO + log.Debug(" inbroker (stateWithTimer): start flush timer") + b.timer.Restart() + } + + case req := <-b.pubCancel: + b.handleCancel(&req) + + case <-b.timer.C: + log.Debug("inbroker (stateWithTimer): flush timeout") + + b.timer.Stop(true) + + err := b.flushBuffer() + if err != nil { + log.Debug(" inbroker (stateWithTimer): flush failed, blocking") + b.state = (*inBroker).stateBlocked + break + } + + log.Debug(" inbroker (stateWithTimer): flush succeeded") + + if b.bufferedEvents > 0 { + // flush did not push all events? Restart timer. + log.Debug(" inbroker (stateWithTimer): start flush timer") + b.timer.Start() + break + } + + b.state = (*inBroker).stateEmpty + + case <-b.sigACK: + // ignore ACKs as long as we can write without blocking + } + + return true +} + +// stateBlocked is the brokers active state if the write buffer can not accept +// any new events. +// The broker will wait for an ACK signal from the outputs and retry flushing, +// in the hope of enough memory being available to flush the buffers. +// If flush did succeed, we try to add the pending event. +// For the time the broker is in this state, no events from any producers will +// be accepted. Thusly all producers will block. Closing a producer, unblocks +// the producer. The producers event (after close) might be processed or +// ignored in the future. +// +// stateBlocked transitions: +// -> stateEmpty if flush was successfull and write buffer is empty +// -> stateWithTimer if flush was successfull, but we still have some pending events +// -> stateBlocked if flush failed (still not enough space) +func (b *inBroker) stateBlocked() bool { + log := b.ctx.logger + + select { + case <-b.ctx.Done(): + return false + + case req := <-b.pubCancel: + b.handleCancel(&req) + + case <-b.sigACK: + // TODO: + // Have write buffer report number of unallocated pages and take number + // of freed pages into account before retrying. This way no transaction + // must be created if it's already clear the flush will not succeed. + + log.Debug("inbroker (stateBlocked): ACK event from queue -> try to unblock") + + err := b.flushBuffer() + if err != nil { + log.Debug(" inbroker (stateBlocked): flush failed, blocking") + break + } + + if len(b.pending) > 0 { + tmp := b.pending + b.pending = nil + err := b.writeEvent(tmp) + if err != nil || len(b.pending) > 0 { + log.Debug("writing pending event failed: ", err) + break + } + } + + if b.bufferedEvents == 0 { + b.state = (*inBroker).stateEmpty + break + } + + b.timer.Start() + log.Debug(" inbroker (stateBlocked): start flush timer") + b.state = (*inBroker).stateWithTimer + } + + return true +} + +func (b *inBroker) handleCancel(req *producerCancelRequest) { + // mark state as cancelled, so to not accept any new events + // from the state object. + if st := req.state; st != nil { + st.cancelled = true + } + + if req.resp != nil { + req.resp <- producerCancelResponse{removed: 0} + } +} + +func (b *inBroker) encodeEvent(req *pushRequest) ([]byte, clientState, error) { + buf, err := b.enc.encode(&req.event) + if err != nil { + return nil, clientState{}, err + } + + if req.state == nil { + return buf, clientState{}, nil + } + + return buf, clientState{seq: req.seq, state: req.state}, nil +} + +func (b *inBroker) respondDrop(req *pushRequest) { + if req.state != nil { + if cb := req.state.dropCB; cb != nil { + cb(req.event.Content) + } + } +} + +func (b *inBroker) addEvent(buf []byte, st clientState) error { + log := b.ctx.logger + + b.bufferedEvents++ + log.Debug(" inbroker: add event of size", len(buf), b.bufferedEvents) + + count := b.clientStates.Add(st) + log.Debug(" add event -> active:", count) + + err := b.writeEvent(buf) + log.Debug(" inbroker write ->", err, b.bufferedEvents) + + return err +} + +func (b *inBroker) writeEvent(buf []byte) error { + log := b.ctx.logger + + // append event to queue + queueWriter := b.queue.Writer() + n, err := queueWriter.Write(buf) + buf = buf[n:] + if len(buf) > 0 { + b.pending = buf + } else if err == nil { + log.Debug("writer: finalize event in buffer") + err = queueWriter.Next() + } + + if err != nil { + log := b.ctx.logger + log.Debugf("appending event content to write buffer failed with %v", err) + } + return err +} + +func (b *inBroker) flushBuffer() error { + err := b.queue.Writer().Flush() + if err != nil { + log := b.ctx.logger + log.Debugf("spool flush failed with: %v", err) + } + return err +} diff --git a/vendor/github.com/elastic/beats/libbeat/publisher/queue/spool/internal_api.go b/vendor/github.com/elastic/beats/libbeat/publisher/queue/spool/internal_api.go new file mode 100644 index 00000000..28a1a99a --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/publisher/queue/spool/internal_api.go @@ -0,0 +1,44 @@ +package spool + +import ( + "github.com/elastic/beats/libbeat/publisher" +) + +// producer -> broker API +type ( + pushRequest struct { + event publisher.Event + seq uint32 + state *produceState + } + + producerCancelRequest struct { + state *produceState + resp chan producerCancelResponse + } + + producerCancelResponse struct { + removed int + } +) + +// consumer -> broker API + +type ( + getRequest struct { + sz int // request sz events from the broker + resp chan getResponse // channel to send response to + } + + getResponse struct { + ack chan batchAckMsg + err error + buf []publisher.Event + } + + batchAckMsg struct{} + + batchCancelRequest struct { + // ack *ackChan + } +) diff --git a/vendor/github.com/elastic/beats/libbeat/publisher/queue/spool/log.go b/vendor/github.com/elastic/beats/libbeat/publisher/queue/spool/log.go new file mode 100644 index 00000000..5dd17048 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/publisher/queue/spool/log.go @@ -0,0 +1,54 @@ +package spool + +import ( + "fmt" + "sync" + + "github.com/elastic/beats/libbeat/logp" +) + +type logger interface { + Debug(...interface{}) + Debugf(string, ...interface{}) + + Info(...interface{}) + Infof(string, ...interface{}) + + Error(...interface{}) + Errorf(string, ...interface{}) +} + +var _defaultLogger struct { + singleton logger + init sync.Once +} + +func defaultLogger() logger { + _defaultLogger.init.Do(func() { + _defaultLogger.singleton = logp.NewLogger("spool") + }) + return _defaultLogger.singleton +} + +// func defaultLogger() logger { return (*outLogger)(nil) } + +type outLogger struct{} + +func (l *outLogger) Debug(vs ...interface{}) { l.report("Debug", vs) } +func (l *outLogger) Debugf(fmt string, vs ...interface{}) { l.reportf("Debug: ", fmt, vs) } + +func (l *outLogger) Info(vs ...interface{}) { l.report("Info", vs) } +func (l *outLogger) Infof(fmt string, vs ...interface{}) { l.reportf("Info", fmt, vs) } + +func (l *outLogger) Error(vs ...interface{}) { l.report("Error", vs) } +func (l *outLogger) Errorf(fmt string, vs ...interface{}) { l.reportf("Error", fmt, vs) } + +func (l *outLogger) report(level string, vs []interface{}) { + args := append([]interface{}{level, ":"}, vs...) + fmt.Println(args...) +} + +func (*outLogger) reportf(level string, str string, vs []interface{}) { + str = level + ": " + str + fmt.Printf(str, vs...) +} diff --git a/vendor/github.com/elastic/beats/libbeat/publisher/queue/spool/module.go b/vendor/github.com/elastic/beats/libbeat/publisher/queue/spool/module.go new file mode 100644 index 00000000..78930f8b --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/publisher/queue/spool/module.go @@ -0,0 +1,48 @@ +package spool + +import ( + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/common/cfgwarn" + "github.com/elastic/beats/libbeat/paths" + "github.com/elastic/beats/libbeat/publisher/queue" + "github.com/elastic/go-txfile" +) + +func init() { + queue.RegisterType("spool", create) +} + +func create(eventer queue.Eventer, cfg *common.Config) (queue.Queue, error) { + cfgwarn.Beta("Spooling to disk is beta") + + config := defaultConfig() + if err := cfg.Unpack(&config); err != nil { + return nil, err + } + + path := config.File.Path + if path == "" { + path = paths.Resolve(paths.Data, "spool.dat") + } + + flushEvents := uint(0) + if count := config.Write.FlushEvents; count > 0 { + flushEvents = uint(count) + } + + return NewSpool(defaultLogger(), path, Settings{ + Eventer: eventer, + Mode: config.File.Permissions, + WriteBuffer: uint(config.Write.BufferSize), + WriteFlushTimeout: config.Write.FlushTimeout, + WriteFlushEvents: flushEvents, + ReadFlushTimeout: config.Read.FlushTimeout, + Codec: config.Write.Codec, + File: txfile.Options{ + MaxSize: uint64(config.File.MaxSize), + PageSize: uint32(config.File.PageSize), + Prealloc: config.File.Prealloc, + Readonly: false, + }, + }) +} diff --git a/vendor/github.com/elastic/beats/libbeat/publisher/queue/spool/outbroker.go b/vendor/github.com/elastic/beats/libbeat/publisher/queue/spool/outbroker.go new file mode 100644 index 00000000..cc4d74fe --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/publisher/queue/spool/outbroker.go @@ -0,0 +1,493 @@ +package spool + +import ( + "errors" + "sync" + "time" + + "github.com/elastic/beats/libbeat/publisher" + "github.com/elastic/go-txfile/pq" +) + +type outBroker struct { + ctx *spoolCtx + state func(*outBroker) bool + + // internal API + sigFlushed chan uint + get chan getRequest + + // ack signaling + pendingACKs chanList // list of pending batches to be forwarded to the ackLoop + scheduledACKs chan chanList // shared channel for forwarding batches to ackLoop + schedACKs chan chanList // active ack forwarding channel, as used by broker (nil if pendingACKs is empty) + + // queue state + queue *pq.Queue + available uint // number of available events. getRequests are only accepted if available > 0 + events []publisher.Event + required int + total int + active getRequest + + // internal + timer *timer + dec *decoder +} + +type chanList struct { + head *ackChan + tail *ackChan +} + +type ackChan struct { + next *ackChan + ch chan batchAckMsg + total int // total number of events to ACK with this batch +} + +const ( + // maximum number of events if getRequest size is <0 + maxEvents = 2048 + + outSigChannelSize = 3 +) + +var ackChanPool = sync.Pool{ + New: func() interface{} { + return &ackChan{ + ch: make(chan batchAckMsg, 1), + } + }, +} + +var errRetry = errors.New("retry") + +func newOutBroker(ctx *spoolCtx, qu *pq.Queue, flushTimeout time.Duration) (*outBroker, error) { + b := &outBroker{ + ctx: ctx, + state: nil, + + // API + sigFlushed: make(chan uint, outSigChannelSize), + get: make(chan getRequest), + + // ack signaling + pendingACKs: chanList{}, + scheduledACKs: make(chan chanList), + schedACKs: nil, + + // queue state + queue: qu, + available: qu.Reader().Available(), + events: nil, + required: 0, + total: 0, + active: getRequest{}, + + // internal + timer: newTimer(flushTimeout), + dec: newDecoder(), + } + + b.initState() + ctx.Go(b.eventLoop) + ctx.Go(b.ackLoop) + return b, nil +} + +func (b *outBroker) Consumer() *consumer { + return newConsumer(b.ctx, b.get) +} + +// onFlush is run whenever the queue flushes it's write buffer. The callback is +// run in the same go-routine as the Flush was executed from. +func (b *outBroker) onFlush(n uint) { + if n > 0 { + select { + case <-b.ctx.Done(): // ignore flush messages on shutdown + + case b.sigFlushed <- n: + + } + } +} + +// onACK is run whenever the queue releases ACKed events. The number of acked +// events and freed pages will is reported. +func (b *outBroker) onACK(events, pages uint) { +} + +func (b *outBroker) ackLoop() { + log := b.ctx.logger + + log.Debug("start output ack loop") + defer log.Debug("stop output ack loop") + + var ackList chanList // list of pending acks + for { + select { + case <-b.ctx.Done(): + return + + case lst := <-b.scheduledACKs: + ackList.concat(&lst) + + case <-ackList.channel(): + ackCh := ackList.pop() + + for { + log.Debugf("receive ACK of %v events\n", ackCh.total) + err := b.queue.ACK(uint(ackCh.total)) + if err != nil { + log.Debug("ack failed with:", err) + time.Sleep(1 * time.Second) + continue + } + + log.Debug("ACK succeeded") + break + } + + releaseACKChan(ackCh) + } + } +} + +func (b *outBroker) eventLoop() { + for { + ok := b.state(b) + if !ok { + break + } + } +} + +// initState resets the brokers state to the initial state and clears +// buffers/points from last state updates. +func (b *outBroker) initState() { + b.events = nil + b.required = 0 + b.total = 0 + b.active = getRequest{} + if b.available == 0 { + b.state = (*outBroker).stateWaitEvents + } else { + b.state = (*outBroker).stateActive + } +} + +// stateWaitEvents is the brokers state if the queue is empty. +// The broker waits for new events and does not accept and consumer requests. +// +// stateWaitEvents transitions: +// -> stateActive: if a queue flush signal has been received +func (b *outBroker) stateWaitEvents() bool { + log := b.ctx.logger + log.Debug("outbroker (stateWaitEvents): waiting for new events") + + select { + case <-b.ctx.Done(): + return false + + case n := <-b.sigFlushed: + log.Debug("outbroker (stateWaitEvents): flush event", n) + b.available += n + b.state = (*outBroker).stateActive + + case b.schedACKs <- b.pendingACKs: + b.handleACKsScheduled() + } + + return true +} + +// stateActive is the brokers initial state, waiting for consumer to request +// new events. +// Flush signals from the input are ignored. +// +// stateActive transitions: +// -> stateActive: if consumer event get request has been fulfilled (N events +// copied or 0 timeout) +// -> stateWaitEvents: if queue is empty after read +// -> stateWithTimer: if only small number of events are available and flush +// timeout is configured. +func (b *outBroker) stateActive() bool { + log := b.ctx.logger + + select { + case <-b.ctx.Done(): + return false + + case n := <-b.sigFlushed: + b.available += n + + case b.schedACKs <- b.pendingACKs: + b.handleACKsScheduled() + + case req := <-b.get: + var events []publisher.Event + required := maxEvents + if req.sz > 0 { + events = make([]publisher.Event, 0, req.sz) + required = req.sz + } + + log.Debug("outbroker (stateActive): get request", required) + + var err error + var total int + events, total, err = b.collectEvents(events, required) + required -= len(events) + b.available -= uint(total) + + log.Debug(" outbroker (stateActive): events collected", len(events), total, err) + + // forward error to consumer and continue with current state + if err != nil { + log.Debug(" outbroker (stateActive): return error") + b.returnError(req, events, total, err) + b.initState() + break + } + + // enough events? Return + if required == 0 || (len(events) > 0 && b.timer.Zero()) { + log.Debug(" outbroker (stateActive): return events") + b.returnEvents(req, events, total) + b.initState() // prepare for next request + break + } + + // If no events have been decoded, signal an error to the consumer to retry. + // Meanwhile reinitialize state, waiting for more events. + if len(events) == 0 { + b.returnError(req, nil, total, errRetry) + b.initState() + break + } + + // not enough events -> start timer and try to collect more + b.events = events + b.required = required + b.active = req + b.total = total + b.timer.Start() + log.Debug(" outbroker (stateActive): switch to stateWithTimer") + b.state = (*outBroker).stateWithTimer + } + + return true +} + +// stateWithTimer is the brokers active state, if the events read is less then +// the minimal number of requested events. +// Once the timer triggers or more events have been consumed, the get response +// will be send to the consumer. +// +// stateWithTimer transitions: +// -> stateWithTimer: if some, but not enough events have been read from the +// queue +// -> stateActive: if the timer triggers or enough events have been returned +// to the consumer +func (b *outBroker) stateWithTimer() bool { + log := b.ctx.logger + + select { + case <-b.ctx.Done(): + return false + + case b.schedACKs <- b.pendingACKs: + b.handleACKsScheduled() + + case <-b.timer.C: + b.timer.Stop(true) + log.Debug("outbroker (stateWithTimer): flush timer") + b.returnEvents(b.active, b.events, b.total) + + log.Debug("outbroker (stateWithTimer): switch to stateActive") + b.initState() + + case n := <-b.sigFlushed: + // yay, more events \o/ + + b.available += n + + L := len(b.events) + required := b.required + events, total, err := b.collectEvents(b.events, required) + b.available -= uint(total) + collected := len(events) - L + required -= collected + total += b.total + + log.Debug(" outbroker (stateWithTimer): events collected", len(events), total, err) + + // continue with stateWithTimer? + if err == nil && required > 0 { + b.events = events + b.total = total + b.required = required + log.Debug(" outbroker (stateWithTimer): switch to stateWithTimer") + break + } + + // done serving consumer request + b.timer.Stop(false) + if err != nil { + log.Debug(" outbroker (stateWithTimer): return error") + b.returnError(b.active, events, total, err) + } else { + log.Debug(" outbroker (stateWithTimer): return events") + b.returnEvents(b.active, events, total) + } + + log.Debug("outbroker (stateWithTimer): switch to stateActive") + b.initState() + } + + return true +} + +func (b *outBroker) handleACKsScheduled() { + b.schedACKs = nil + b.pendingACKs = chanList{} +} + +func (b *outBroker) newACKChan(total int) *ackChan { + ackCh := newACKChan(total) + b.pendingACKs.append(ackCh) + b.schedACKs = b.scheduledACKs + return ackCh +} + +// signalDrop forwards an ACK of total events to the ackloop. +// The batch is marked as ACKed by the output. +// signalDrop is used to free space in the queue, in case +// a continuous set of events has been dropped due to decoding errors. +func (b *outBroker) signalDrop(total int) { + ackCh := b.newACKChan(total) + ackCh.ch <- batchAckMsg{} +} + +func (b *outBroker) returnEvents(req getRequest, events []publisher.Event, total int) { + ackCh := b.newACKChan(total) + req.resp <- getResponse{ + ack: ackCh.ch, + err: nil, + buf: events, + } +} + +func (b *outBroker) returnError( + req getRequest, + events []publisher.Event, + total int, + err error, +) { + var ch chan batchAckMsg + + if len(events) == 0 && total > 0 { + b.signalDrop(total) + } + if len(events) > 0 { + ackCh := b.newACKChan(total) + ch = ackCh.ch + } + + req.resp <- getResponse{ + ack: ch, + err: err, + buf: events, + } +} + +func (b *outBroker) collectEvents( + events []publisher.Event, + N int, +) ([]publisher.Event, int, error) { + log := b.ctx.logger + reader := b.queue.Reader() + + count := 0 + for N > 0 { + sz, err := reader.Next() + if sz <= 0 || err != nil { + return events, count, err + } + + count++ + + buf := b.dec.Buffer(sz) + _, err = reader.Read(buf) + if err != nil { + return events, count, err + } + + event, err := b.dec.Decode() + if err != nil { + log.Debug("Failed to decode event from spool: %v", err) + continue + } + + events = append(events, event) + N-- + } + + return events, count, nil +} + +func newACKChan(total int) *ackChan { + c := ackChanPool.Get().(*ackChan) + c.next = nil + c.total = total + return c +} + +func releaseACKChan(c *ackChan) { + c.next = nil + ackChanPool.Put(c) +} + +func (l *chanList) append(ch *ackChan) { + if l.head == nil { + l.head = ch + } else { + l.tail.next = ch + } + l.tail = ch +} + +func (l *chanList) concat(other *chanList) { + if other.head == nil { + return + } + + if l.head == nil { + *l = *other + return + } + + l.tail.next = other.head + l.tail = other.tail +} + +func (l *chanList) channel() chan batchAckMsg { + if l.head == nil { + return nil + } + return l.head.ch +} + +func (l *chanList) pop() *ackChan { + ch := l.head + if ch != nil { + l.head = ch.next + if l.head == nil { + l.tail = nil + } + } + + ch.next = nil + return ch +} diff --git a/vendor/github.com/elastic/beats/libbeat/publisher/queue/spool/produce.go b/vendor/github.com/elastic/beats/libbeat/publisher/queue/spool/produce.go new file mode 100644 index 00000000..839dac06 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/publisher/queue/spool/produce.go @@ -0,0 +1,190 @@ +package spool + +import ( + "sync" + + "github.com/elastic/beats/libbeat/beat" + "github.com/elastic/beats/libbeat/common/atomic" + "github.com/elastic/beats/libbeat/publisher" + "github.com/elastic/beats/libbeat/publisher/queue" +) + +// forgetfullProducer forwards event to the inBroker. The forgetfullProducer +// provides no event ACK handling and no callbacks. +type forgetfullProducer struct { + openState openState +} + +// ackProducer forwards events to the inBroker. The ackBroker provides +// functionality for ACK/Drop callbacks. +type ackProducer struct { + cancel bool + seq uint32 + state produceState + openState openState + pubCancel chan producerCancelRequest +} + +// openState tracks the producer->inBroker connection state. +type openState struct { + ctx *spoolCtx + isOpen atomic.Bool + done chan struct{} + events chan pushRequest +} + +// produceState holds the ackProducer internal callback and event ACK state +// shared between ackProducer instances and inBroker instances. +// The state is used to compute the number of per producer ACKed events and +// executing locally configured callbacks. +type produceState struct { + ackCB ackHandler + dropCB func(beat.Event) + cancelled bool + lastACK uint32 +} + +type ackHandler func(count int) + +type clientStates struct { + mux sync.Mutex + clients []clientState +} + +type clientState struct { + seq uint32 // event sequence number + state *produceState // the producer it's state used to compute and signal the ACK count +} + +func newProducer( + ctx *spoolCtx, + pubCancel chan producerCancelRequest, + events chan pushRequest, + ackCB ackHandler, + dropCB func(beat.Event), + dropOnCancel bool, +) queue.Producer { + openState := openState{ + ctx: ctx, + isOpen: atomic.MakeBool(true), + done: make(chan struct{}), + events: events, + } + + if ackCB == nil { + return &forgetfullProducer{openState: openState} + } + + p := &ackProducer{ + seq: 1, + cancel: dropOnCancel, + openState: openState, + pubCancel: pubCancel, + } + p.state.ackCB = ackCB + p.state.dropCB = dropCB + return p +} + +func (p *forgetfullProducer) Publish(event publisher.Event) bool { + return p.openState.publish(p.makeRequest(event)) +} + +func (p *forgetfullProducer) TryPublish(event publisher.Event) bool { + return p.openState.tryPublish(p.makeRequest(event)) +} + +func (p *forgetfullProducer) makeRequest(event publisher.Event) pushRequest { + return pushRequest{event: event} +} + +func (p *forgetfullProducer) Cancel() int { + p.openState.Close() + return 0 +} + +func (p *ackProducer) Publish(event publisher.Event) bool { + return p.updSeq(p.openState.publish(p.makeRequest(event))) +} + +func (p *ackProducer) TryPublish(event publisher.Event) bool { + return p.updSeq(p.openState.tryPublish(p.makeRequest(event))) +} + +func (p *ackProducer) Cancel() int { + p.openState.Close() + + if p.cancel { + ch := make(chan producerCancelResponse) + p.pubCancel <- producerCancelRequest{ + state: &p.state, + resp: ch, + } + + // wait for cancel to being processed + resp := <-ch + return resp.removed + } + return 0 +} + +func (p *ackProducer) updSeq(ok bool) bool { + if ok { + p.seq++ + } + return ok +} + +func (p *ackProducer) makeRequest(event publisher.Event) pushRequest { + return pushRequest{event: event, seq: p.seq, state: &p.state} +} + +func (st *openState) Close() { + st.isOpen.Store(false) + close(st.done) +} + +func (st *openState) publish(req pushRequest) bool { + select { + case st.events <- req: + return true + case <-st.done: + st.events = nil + return false + } +} + +func (st *openState) tryPublish(req pushRequest) bool { + select { + case st.events <- req: + return true + case <-st.done: + st.events = nil + return false + default: + log := st.ctx.logger + log.Debugf("Dropping event, queue is blocked (seq=%v) ", req.seq) + return false + } +} + +func (s *clientStates) Add(st clientState) int { + s.mux.Lock() + s.clients = append(s.clients, st) + l := len(s.clients) + s.mux.Unlock() + return l +} + +func (s *clientStates) RemoveLast() { + s.mux.Lock() + s.clients = s.clients[:len(s.clients)-1] + s.mux.Unlock() +} + +func (s *clientStates) Pop(n int) (states []clientState) { + s.mux.Lock() + states, s.clients = s.clients[:n], s.clients[n:] + s.mux.Unlock() + return states +} diff --git a/vendor/github.com/elastic/beats/libbeat/publisher/queue/spool/spool.go b/vendor/github.com/elastic/beats/libbeat/publisher/queue/spool/spool.go new file mode 100644 index 00000000..222b9034 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/publisher/queue/spool/spool.go @@ -0,0 +1,231 @@ +package spool + +import ( + "fmt" + "os" + "sync" + "time" + + "github.com/pkg/errors" + + "github.com/elastic/beats/libbeat/common/atomic" + "github.com/elastic/beats/libbeat/publisher/queue" + "github.com/elastic/go-txfile" + "github.com/elastic/go-txfile/pq" +) + +// Spool implements an on-disk queue.Queue. +type Spool struct { + // producer/input support + inCtx *spoolCtx + inBroker *inBroker + + // consumer/output support + outCtx *spoolCtx + outBroker *outBroker + + queue *pq.Queue + file *txfile.File +} + +type spoolCtx struct { + logger logger + wg sync.WaitGroup + active atomic.Bool + done chan struct{} +} + +// Settings configure a new spool to be created. +type Settings struct { + Mode os.FileMode + + File txfile.Options + + // Queue write buffer size. If a single event is bigger then the + // write-buffer, the write-buffer will grow. In this case will the write + // buffer be flushed and reset to its original size. + WriteBuffer uint + + Eventer queue.Eventer + + WriteFlushTimeout time.Duration + WriteFlushEvents uint + ReadFlushTimeout time.Duration + + Codec codecID +} + +const minInFlushTimeout = 100 * time.Millisecond +const minOutFlushTimeout = 0 * time.Millisecond + +// NewSpool creates and initializes a new file based queue. +func NewSpool(logger logger, path string, settings Settings) (*Spool, error) { + mode := settings.Mode + if mode == 0 { + mode = os.ModePerm + } + + ok := false + inCtx := newSpoolCtx(logger) + outCtx := newSpoolCtx(logger) + defer ifNotOK(&ok, inCtx.Close) + defer ifNotOK(&ok, outCtx.Close) + + if info, err := os.Lstat(path); err != nil { + if !os.IsNotExist(err) { + return nil, err + } + } else { + perm := info.Mode().Perm() + cfgPerm := settings.Mode.Perm() + + // check if file has permissions set, that must not be set via config + if (perm | cfgPerm) != cfgPerm { + return nil, fmt.Errorf("file permissions must be more strict (required permissions: %v, actual permissions: %v)", + cfgPerm, perm) + } + } + + f, err := txfile.Open(path, mode, settings.File) + if err != nil { + return nil, errors.Wrapf(err, "spool queue: failed to open file at path '%s'", path) + } + defer ifNotOK(&ok, ignoreErr(f.Close)) + + queueDelegate, err := pq.NewStandaloneDelegate(f) + if err != nil { + return nil, err + } + + spool := &Spool{ + inCtx: inCtx, + outCtx: outCtx, + } + + queue, err := pq.New(queueDelegate, pq.Settings{ + WriteBuffer: settings.WriteBuffer, + Flushed: spool.onFlush, + ACKed: spool.onACK, + }) + if err != nil { + return nil, err + } + defer ifNotOK(&ok, ignoreErr(queue.Close)) + + inFlushTimeout := settings.WriteFlushTimeout + if inFlushTimeout < minInFlushTimeout { + inFlushTimeout = minInFlushTimeout + } + inBroker, err := newInBroker(inCtx, settings.Eventer, queue, settings.Codec, + inFlushTimeout, settings.WriteFlushEvents) + if err != nil { + return nil, err + } + + outFlushTimeout := settings.ReadFlushTimeout + if outFlushTimeout < minOutFlushTimeout { + outFlushTimeout = minOutFlushTimeout + } + outBroker, err := newOutBroker(outCtx, queue, outFlushTimeout) + if err != nil { + return nil, err + } + + ok = true + spool.queue = queue + spool.inBroker = inBroker + spool.outBroker = outBroker + spool.file = f + return spool, nil +} + +// Close shuts down the queue and closes the used file. +func (s *Spool) Close() error { + // stop all workers (waits for all workers to be finished) + s.outCtx.Close() + s.inCtx.Close() + + // close queue (potentially flushing write buffer) + err := s.queue.Close() + + // finally unmap and close file + s.file.Close() + + return err +} + +// BufferConfig returns the queue initial buffer settings. +func (s *Spool) BufferConfig() queue.BufferConfig { + return queue.BufferConfig{Events: -1} +} + +// Producer creates a new queue producer for publishing events. +func (s *Spool) Producer(cfg queue.ProducerConfig) queue.Producer { + return s.inBroker.Producer(cfg) +} + +// Consumer creates a new queue consumer for consuming and acking events. +func (s *Spool) Consumer() queue.Consumer { + return s.outBroker.Consumer() +} + +// onFlush is run whenever the queue signals it's write buffer being flushed. +// Flush events are forwarded to all workers. +// The onFlush callback is directly called by the queue writer (same go-routine) +// on Write or Flush operations. +func (s *Spool) onFlush(n uint) { + s.inBroker.onFlush(n) + s.outBroker.onFlush(n) +} + +// onACK is run whenever the queue signals events being acked and removed from +// the queue. +// ACK events are forwarded to all workers. +func (s *Spool) onACK(events, pages uint) { + s.inBroker.onACK(events, pages) +} + +func newSpoolCtx(logger logger) *spoolCtx { + return &spoolCtx{ + logger: logger, + active: atomic.MakeBool(true), + done: make(chan struct{}), + } +} + +func (ctx *spoolCtx) Close() { + if ctx.active.CAS(true, false) { + close(ctx.done) + ctx.wg.Wait() + } +} + +func (ctx *spoolCtx) Done() <-chan struct{} { + return ctx.done +} + +func (ctx *spoolCtx) Open() bool { + return ctx.active.Load() +} + +func (ctx *spoolCtx) Closed() bool { + return !ctx.Open() +} + +func (ctx *spoolCtx) Go(fn func()) { + ctx.wg.Add(1) + go func() { + defer ctx.wg.Done() + fn() + }() +} + +func ifNotOK(b *bool, fn func()) { + if !(*b) { + fn() + } +} + +func ignoreErr(fn func() error) func() { + return func() { fn() } +} diff --git a/vendor/github.com/elastic/beats/libbeat/publisher/queue/spool/spool_test.go b/vendor/github.com/elastic/beats/libbeat/publisher/queue/spool/spool_test.go new file mode 100644 index 00000000..c661e0e8 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/publisher/queue/spool/spool_test.go @@ -0,0 +1,130 @@ +package spool + +import ( + "flag" + "fmt" + "math/rand" + "testing" + "time" + + humanize "github.com/dustin/go-humanize" + + "github.com/elastic/beats/libbeat/publisher/queue" + "github.com/elastic/beats/libbeat/publisher/queue/queuetest" + "github.com/elastic/go-txfile" + "github.com/elastic/go-txfile/txfiletest" +) + +var seed int64 +var debug bool + +type testQueue struct { + *Spool + teardown func() +} + +type testLogger struct { + t *testing.T +} + +func init() { + flag.Int64Var(&seed, "seed", time.Now().UnixNano(), "test random seed") + flag.BoolVar(&debug, "noisy", false, "print test logs to console") +} + +func TestProduceConsumer(t *testing.T) { + maxEvents := 4096 + minEvents := 32 + + rand.Seed(seed) + events := rand.Intn(maxEvents-minEvents) + minEvents + batchSize := rand.Intn(events-8) + 4 + + t.Log("seed: ", seed) + t.Log("events: ", events) + t.Log("batchSize: ", batchSize) + + testWith := func(factory queuetest.QueueFactory) func(t *testing.T) { + return func(test *testing.T) { + t.Run("single", func(t *testing.T) { + queuetest.TestSingleProducerConsumer(t, events, batchSize, factory) + }) + t.Run("multi", func(t *testing.T) { + queuetest.TestMultiProducerConsumer(t, events, batchSize, factory) + }) + } + } + + testWith(makeTestQueue( + 128*humanize.KiByte, 4*humanize.KiByte, 16*humanize.KiByte, + 100*time.Millisecond, + ))(t) +} + +func makeTestQueue( + maxSize, pageSize, writeBuffer uint, + flushTimeout time.Duration, +) func(*testing.T) queue.Queue { + return func(t *testing.T) queue.Queue { + if debug { + fmt.Println("Test:", t.Name()) + } + + ok := false + path, cleanPath := txfiletest.SetupPath(t, "") + defer func() { + if !ok { + cleanPath() + } + }() + + spool, err := NewSpool(&testLogger{t}, path, Settings{ + WriteBuffer: writeBuffer, + WriteFlushTimeout: flushTimeout, + Codec: codecCBORL, + File: txfile.Options{ + MaxSize: uint64(maxSize), + PageSize: uint32(pageSize), + Prealloc: true, + Readonly: false, + }, + }) + if err != nil { + t.Fatal(err) + } + + tq := &testQueue{Spool: spool, teardown: cleanPath} + return tq + } +} + +func (t *testQueue) Close() error { + err := t.Spool.Close() + t.teardown() + return err +} + +func (l *testLogger) Debug(vs ...interface{}) { l.report("Debug", vs) } +func (l *testLogger) Debugf(fmt string, vs ...interface{}) { l.reportf("Debug: ", fmt, vs) } + +func (l *testLogger) Info(vs ...interface{}) { l.report("Info", vs) } +func (l *testLogger) Infof(fmt string, vs ...interface{}) { l.reportf("Info", fmt, vs) } + +func (l *testLogger) Error(vs ...interface{}) { l.report("Error", vs) } +func (l *testLogger) Errorf(fmt string, vs ...interface{}) { l.reportf("Error", fmt, vs) } + +func (l *testLogger) report(level string, vs []interface{}) { + args := append([]interface{}{level, ":"}, vs...) + l.t.Log(args...) + if debug { + fmt.Println(args...) + } +} + +func (l *testLogger) reportf(level string, str string, vs []interface{}) { + str = level + ": " + str + l.t.Logf(str, vs...) + if debug { + fmt.Printf(str, vs...) + } +} diff --git a/vendor/github.com/elastic/beats/libbeat/publisher/queue/spool/timer.go b/vendor/github.com/elastic/beats/libbeat/publisher/queue/spool/timer.go new file mode 100644 index 00000000..8c4650b5 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/publisher/queue/spool/timer.go @@ -0,0 +1,55 @@ +package spool + +import ( + "time" +) + +type timer struct { + // flush timer + timer *time.Timer + C <-chan time.Time + duration time.Duration +} + +func newTimer(duration time.Duration) *timer { + stdtimer := time.NewTimer(duration) + if !stdtimer.Stop() { + <-stdtimer.C + } + + return &timer{ + timer: stdtimer, + C: nil, + duration: duration, + } +} + +func (t *timer) Zero() bool { + return t.duration == 0 +} + +func (t *timer) Restart() { + t.Stop(false) + t.Start() +} + +func (t *timer) Start() { + if t.C != nil { + return + } + + t.timer.Reset(t.duration) + t.C = t.timer.C +} + +func (t *timer) Stop(triggered bool) { + if t.C == nil { + return + } + + if !triggered && !t.timer.Stop() { + <-t.C + } + + t.C = nil +} diff --git a/vendor/github.com/elastic/beats/libbeat/scripts/Makefile b/vendor/github.com/elastic/beats/libbeat/scripts/Makefile index 0b0c2765..f87f70d2 100755 --- a/vendor/github.com/elastic/beats/libbeat/scripts/Makefile +++ b/vendor/github.com/elastic/beats/libbeat/scripts/Makefile @@ -15,7 +15,8 @@ ES_BEATS?=..## @community_beat Must be set to ./vendor/github.com/elastic/beats. GOPACKAGES?=$(shell go list ${BEAT_PATH}/... | grep -v /vendor/ | grep -v /scripts/cmd/ ) PACKER_TEMPLATES_DIR?=${ES_BEATS}/dev-tools/packer ## @Building Directory of templates that are used by "make package" NOTICE_FILE?=../NOTICE.txt -LICENSE_FILE?=../LICENSE.txt +LICENSE_FILE?=../licenses/APACHE-LICENSE-2.0.txt +ELASTIC_LICENSE_FILE?=../licenses/ELASTIC-LICENSE.txt space:=$() # comma:=, @@ -31,12 +32,15 @@ SHELL=bash ES_HOST?="elasticsearch" PWD=$(shell pwd) BUILD_DIR?=$(shell pwd)/build +PKG_BUILD_DIR?=$(BUILD_DIR)/package${PKG_SUFFIX} +PKG_UPLOAD_DIR?=$(BUILD_DIR)/upload COVERAGE_DIR?=${BUILD_DIR}/coverage COVERAGE_TOOL?=${BEAT_GOPATH}/bin/gotestcover COVERAGE_TOOL_REPO?=github.com/elastic/beats/vendor/github.com/pierrre/gotestcover -TESTIFY_TOOL_REPO?=github.com/elastic/beats/vendor/github.com/stretchr/testify +TESTIFY_TOOL_REPO?=github.com/elastic/beats/vendor/github.com/stretchr/testify/assert LIBCOMPOSE_TOOL_REPO?=github.com/docker/libcompose -GOBUILD_FLAGS?=-i +NOW=$(shell date -u '+%Y-%m-%dT%H:%M:%SZ') +GOBUILD_FLAGS?=-i -ldflags "-X github.com/elastic/beats/libbeat/version.buildTime=$(NOW) -X github.com/elastic/beats/libbeat/version.commit=$(COMMIT_ID)" GOIMPORTS=goimports GOIMPORTS_REPO?=golang.org/x/tools/cmd/goimports GOIMPORTS_LOCAL_PREFIX?=github.com/elastic @@ -47,17 +51,21 @@ REVIEWDOG_OPTIONS?=-diff "git diff master" REVIEWDOG_REPO?=github.com/haya14busa/reviewdog/cmd/reviewdog PROCESSES?= 4 TIMEOUT?= 90 +PYTHON_TEST_FILES?=$(shell find . -type f -name 'test_*.py' -not -path "*/build/*" -not -path "*/vendor/*") NOSETESTS_OPTIONS?=--process-timeout=$(TIMEOUT) --with-timer -v --with-xunit --xunit-file=${BUILD_DIR}/TEST-system.xml ## @testing the options to pass when calling nosetests TEST_ENVIRONMENT?=false ## @testing if true, "make testsuite" runs integration tests and system tests in a dockerized test environment SYSTEM_TESTS?=false ## @testing if true, "make test" and "make testsuite" run unit tests and system tests STRESS_TESTS?=false ## @testing if true, "make test" and "make testsuite" run also run the stress tests -GOX_OS?=linux darwin windows solaris freebsd netbsd openbsd ## @Building List of all OS to be supported by "make crosscompile". +STRESS_TEST_OPTIONS?=-timeout=20m -race -v +GOX_OS?=linux darwin windows freebsd netbsd openbsd ## @Building List of all OS to be supported by "make crosscompile". GOX_OSARCH?=!darwin/arm !darwin/arm64 !darwin/386 ## @building Space separated list of GOOS/GOARCH pairs to build by "make crosscompile". GOX_FLAGS?= ## @building Additional flags to append to the gox command used by "make crosscompile". +# XXX: Should be switched back to `snapshot` once the Elasticsearch +# snapshots are working. https://github.com/elastic/beats/pull/6416 TESTING_ENVIRONMENT?=snapshot## @testing The name of the environment under test BEAT_VERSION=$(shell head -n 1 ${ES_BEATS}/libbeat/docs/version.asciidoc | cut -c 17- ) COMMIT_ID=$(shell git rev-parse HEAD) -DOCKER_COMPOSE_PROJECT_NAME?=${BEAT_NAME}${TESTING_ENVIRONMENT}${BEAT_VERSION}${COMMIT_ID} ## @testing The name of the docker-compose project used by the integration and system tests +DOCKER_COMPOSE_PROJECT_NAME?=${BEAT_NAME}${TESTING_ENVIRONMENT//-}${BEAT_VERSION//-}${COMMIT_ID} ## @testing The name of the docker-compose project used by the integration and system tests DOCKER_COMPOSE?=TESTING_ENVIRONMENT=${TESTING_ENVIRONMENT} ES_BEATS=${ES_BEATS} docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} -f docker-compose.yml DOCKER_CACHE?=1 ## @miscellaneous If set to 0, all docker images are created without cache GOPACKAGES_COMMA_SEP=$(subst $(space),$(comma),$(strip ${GOPACKAGES})) @@ -183,7 +191,7 @@ integration-tests-environment: prepare-tests build-image .PHONY: system-tests system-tests: ## @testing Runs the system tests system-tests: prepare-tests ${BEAT_NAME}.test python-env ${ES_BEATS}/dev-tools/cmd/dashboards/export_dashboards - . ${PYTHON_ENV}/bin/activate; INTEGRATION_TESTS=${INTEGRATION_TESTS} TESTING_ENVIRONMENT=${TESTING_ENVIRONMENT} DOCKER_COMPOSE_PROJECT_NAME=${DOCKER_COMPOSE_PROJECT_NAME} nosetests -w tests/system ${NOSETESTS_OPTIONS} + . ${PYTHON_ENV}/bin/activate; INTEGRATION_TESTS=${INTEGRATION_TESTS} TESTING_ENVIRONMENT=${TESTING_ENVIRONMENT} DOCKER_COMPOSE_PROJECT_NAME=${DOCKER_COMPOSE_PROJECT_NAME} nosetests ${PYTHON_TEST_FILES} ${NOSETESTS_OPTIONS} python ${ES_BEATS}/dev-tools/aggregate_coverage.py -o ${COVERAGE_DIR}/system.cov ./build/system-tests/run # Runs the system tests @@ -196,14 +204,14 @@ system-tests-environment: prepare-tests build-image .PHONY: fast-system-tests fast-system-tests: ## @testing Runs system tests without coverage reports and in parallel fast-system-tests: ${BEAT_NAME}.test python-env - . ${PYTHON_ENV}/bin/activate; nosetests -w tests/system ${NOSETESTS_OPTIONS} + . ${PYTHON_ENV}/bin/activate; nosetests ${PYTHON_TEST_FILES} ${NOSETESTS_OPTIONS} # Runs the go based stress tests .PHONY: stress-tests stress-tests: ## @testing Runs the stress tests with race detector enabled stress-tests: if [ -n '${GOPACKAGES_STRESSTESTS}' ]; then \ - go test -race --tags=stresstest -v ${GOPACKAGES_STRESSTESTS}; \ + go test --tags=stresstest ${STRESS_TEST_OPTIONS} ${GOPACKAGES_STRESSTESTS}; \ fi # Run benchmark tests @@ -215,7 +223,7 @@ benchmark-tests: ## @testing Runs benchmarks (NOT YET IMPLEMENTED) # Run load tests .PHONY: load-tests load-tests: ## @testing Runs load tests - . ${PYTHON_ENV}/bin/activate; LOAD_TESTS=1 nosetests -w tests/system --processes=$(PROCESSES) --process-timeout=$(TIMEOUT) -a 'load' + . ${PYTHON_ENV}/bin/activate; LOAD_TESTS=1 nosetests ${PYTHON_TEST_FILES} --processes=$(PROCESSES) --process-timeout=$(TIMEOUT) -a 'load' # Sets up the virtual python environment .PHONY: python-env @@ -227,7 +235,8 @@ python-env: ${ES_BEATS}/libbeat/tests/system/requirements.txt else \ . ${PYTHON_ENV}/bin/activate && pip install ${PIP_INSTALL_PARAMS} -qUr ${ES_BEATS}/libbeat/tests/system/requirements.txt ; \ fi - + @# Work around pip bug. See: https://github.com/pypa/pip/issues/4464 + @find ${PYTHON_ENV} -type d -name 'dist-packages' -exec sh -c "echo dist-packages > {}.pth" ';' .PHONY: test test: ## @testing Runs unit and system tests without coverage reports @@ -258,8 +267,8 @@ testsuite: clean update # Runs system and system integration tests if SYSTEM_TESTS is set to true if [ $(SYSTEM_TESTS) = true ]; then \ if [ $(TEST_ENVIRONMENT) = true ]; then \ - $(MAKE) system-tests-environment; \ - else \ + $(MAKE) system-tests-environment; \ + else \ $(MAKE) system-tests; \ fi \ fi @@ -348,7 +357,7 @@ import-dashboards: update ${BEAT_NAME} # Builds the environment to test beat .PHONY: build-image build-image: write-environment - ${DOCKER_COMPOSE} build ${DOCKER_NOCACHE} + ${DOCKER_COMPOSE} build ${DOCKER_NOCACHE} --pull --force-rm # Runs the environment so the redis and elasticsearch can also be used for local development # To use it for running the test, set ES_HOST and REDIS_HOST environment variable to the ip of your docker-machine. @@ -385,7 +394,7 @@ install-home: install -m 644 ${NOTICE_FILE} ${HOME_PREFIX}/; \ fi if [ -a ${LICENSE_FILE} ]; then \ - install -m 644 ${LICENSE_FILE} ${HOME_PREFIX}/; \ + install -m 644 ${LICENSE_FILE} ${HOME_PREFIX}/LICENSE.txt; \ fi if [ -d _meta/module.generated ]; then \ install -d -m 755 ${HOME_PREFIX}/module; \ @@ -405,7 +414,7 @@ prepare-package: -v $(abspath ${ES_BEATS}/dev-tools/packer/xgo-scripts):/scripts \ -v $(abspath ${PACKER_TEMPLATES_DIR}):/templates \ -v $(abspath ../):/source \ - -v $(BUILD_DIR):/build \ + -v $(PKG_BUILD_DIR):/build \ -e PUREGO="yes" \ -e PACK=${BEAT_NAME} \ -e BEFORE_BUILD=before_build.sh \ @@ -415,6 +424,7 @@ prepare-package: -e ES_BEATS=${ES_BEATS} \ -e BEAT_PATH=${BEAT_PATH} \ -e BEAT_NAME=${BEAT_NAME} \ + -e LICENSE_FILE=${LICENSE_FILE} \ ${BEATS_BUILDER_IMAGE} # Prepares for packaging. Builds binaries with cgo @@ -426,7 +436,7 @@ prepare-package-cgo: -v $(abspath ${ES_BEATS}/dev-tools/packer/xgo-scripts):/scripts \ -v $(abspath ${PACKER_TEMPLATES_DIR}):/templates \ -v $(abspath ../):/source \ - -v $(BUILD_DIR):/build \ + -v $(PKG_BUILD_DIR):/build \ -e PACK=${BEAT_NAME} \ -e BEFORE_BUILD=before_build.sh \ -e SOURCE=/source \ @@ -435,14 +445,15 @@ prepare-package-cgo: -e ES_BEATS=${ES_BEATS} \ -e BEAT_PATH=${BEAT_PATH} \ -e BEAT_NAME=${BEAT_NAME} \ + -e LICENSE_FILE=${LICENSE_FILE} \ ${BEATS_BUILDER_IMAGE} # linux builds on older debian for compatibility docker run --rm \ - -v ${BUILD_DIR}:/build \ -v $(abspath ${ES_BEATS}/dev-tools/packer/xgo-scripts):/scripts \ -v $(abspath ${PACKER_TEMPLATES_DIR}):/templates \ -v $(abspath ..):/source \ + -v ${PKG_BUILD_DIR}:/build \ -e PACK=${BEAT_NAME} \ -e BEFORE_BUILD=before_build.sh \ -e SOURCE=/source \ @@ -451,6 +462,7 @@ prepare-package-cgo: -e ES_BEATS=${ES_BEATS} \ -e BEAT_PATH=${BEAT_PATH} \ -e BEAT_NAME=${BEAT_NAME} \ + -e LICENSE_FILE=${LICENSE_FILE} \ ${BEATS_BUILDER_DEB_IMAGE} # Prepares images for packaging @@ -460,26 +472,28 @@ package-setup: .PHONY: package package: ## @packaging Create binary packages for the beat. -package: clean update package-setup - +package: update package-setup echo "Start building packages for ${BEAT_NAME}" - mkdir -p ${BUILD_DIR}/upload + rm -rf ${PKG_BUILD_DIR} + mkdir -p ${PKG_BUILD_DIR} + mkdir -p ${PKG_UPLOAD_DIR} # Generates the package.yml file with all information needed to create packages - echo "beat_name: ${BEAT_NAME}" > ${BUILD_DIR}/package.yml - echo "beat_url: ${BEAT_URL}" >> ${BUILD_DIR}/package.yml - echo "beat_repo: ${BEAT_PATH}" >> ${BUILD_DIR}/package.yml - echo "beat_pkg_name: ${BEAT_PACKAGE_NAME}" >> ${BUILD_DIR}/package.yml - echo "beat_description: ${BEAT_DESCRIPTION}" >> ${BUILD_DIR}/package.yml - echo "beat_vendor: ${BEAT_VENDOR}" >> ${BUILD_DIR}/package.yml - echo "beat_license: ${BEAT_LICENSE}" >> ${BUILD_DIR}/package.yml - echo "beat_doc_url: ${BEAT_DOC_URL}" >> ${BUILD_DIR}/package.yml + echo "beat_name: ${BEAT_NAME}" > ${PKG_BUILD_DIR}/package.yml + echo "beat_url: ${BEAT_URL}" >> ${PKG_BUILD_DIR}/package.yml + echo "beat_repo: ${BEAT_PATH}" >> ${PKG_BUILD_DIR}/package.yml + echo "beat_pkg_name: ${BEAT_PACKAGE_NAME}" >> ${PKG_BUILD_DIR}/package.yml + echo "beat_pkg_suffix: '${PKG_SUFFIX}'" >> ${PKG_BUILD_DIR}/package.yml + echo "beat_description: ${BEAT_DESCRIPTION}" >> ${PKG_BUILD_DIR}/package.yml + echo "beat_vendor: ${BEAT_VENDOR}" >> ${PKG_BUILD_DIR}/package.yml + echo "beat_license: ${BEAT_LICENSE}" >> ${PKG_BUILD_DIR}/package.yml + echo "beat_doc_url: ${BEAT_DOC_URL}" >> ${PKG_BUILD_DIR}/package.yml if [ -a version.yml ]; then \ - cat version.yml >> ${BUILD_DIR}/package.yml; \ + cat version.yml >> ${PKG_BUILD_DIR}/package.yml; \ else \ - cat ${ES_BEATS}/dev-tools/packer/version.yml >> ${BUILD_DIR}/package.yml; \ + cat ${ES_BEATS}/dev-tools/packer/version.yml >> ${PKG_BUILD_DIR}/package.yml; \ fi if [ $(CGO) = true ]; then \ @@ -488,10 +502,23 @@ package: clean update package-setup $(MAKE) prepare-package; \ fi - SNAPSHOT=${SNAPSHOT} BUILDID=${BUILDID} BEAT_PATH=${BEAT_PATH} BUILD_DIR=${BUILD_DIR} $(MAKE) -C ${ES_BEATS}/dev-tools/packer ${PACKAGES} ${BUILD_DIR}/upload/build_id.txt + SNAPSHOT=${SNAPSHOT} BUILDID=${BUILDID} BEAT_PATH=${BEAT_PATH} BUILD_DIR=${PKG_BUILD_DIR} UPLOAD_DIR=${PKG_UPLOAD_DIR} $(MAKE) -C ${ES_BEATS}/dev-tools/packer ${PACKAGES} ${BUILD_DIR}/upload/build_id.txt $(MAKE) fix-permissions echo "Finished packages for ${BEAT_NAME}" +# Packages the Beat without Elastic X-Pack content (OSS only). +.PHONY: package-oss +package-oss: + @$(MAKE) PKG_SUFFIX=-oss package + +# Packages the Beat with Elastic X-Pack content. +.PHONY: package-elastic +package-elastic: + @$(MAKE) BEAT_LICENSE="Elastic License" LICENSE_FILE=$(ELASTIC_LICENSE_FILE) package + +.PHONY: package-all +package-all: package-elastic package-oss + package-dashboards: package-setup mkdir -p ${BUILD_DIR} cp -r _meta/kibana ${BUILD_DIR}/dashboards diff --git a/vendor/github.com/elastic/beats/libbeat/scripts/cmd/stress_pipeline/main.go b/vendor/github.com/elastic/beats/libbeat/scripts/cmd/stress_pipeline/main.go index 79e35fbb..fb65c327 100644 --- a/vendor/github.com/elastic/beats/libbeat/scripts/cmd/stress_pipeline/main.go +++ b/vendor/github.com/elastic/beats/libbeat/scripts/cmd/stress_pipeline/main.go @@ -2,6 +2,7 @@ package main import ( "flag" + "fmt" "log" "net/http" _ "net/http/pprof" @@ -9,13 +10,14 @@ import ( "github.com/elastic/beats/libbeat/beat" "github.com/elastic/beats/libbeat/common" - "github.com/elastic/beats/libbeat/logp" + logpcfg "github.com/elastic/beats/libbeat/logp/configure" "github.com/elastic/beats/libbeat/paths" "github.com/elastic/beats/libbeat/publisher/pipeline/stress" "github.com/elastic/beats/libbeat/service" // import queue types _ "github.com/elastic/beats/libbeat/publisher/queue/memqueue" + _ "github.com/elastic/beats/libbeat/publisher/queue/spool" // import outputs _ "github.com/elastic/beats/libbeat/outputs/console" @@ -31,7 +33,7 @@ var ( type config struct { Path paths.Path - Logging logp.Logging + Logging *common.Config } func main() { @@ -52,6 +54,8 @@ func run() error { flag.Parse() files := flag.Args() + fmt.Println("load config files:", files) + cfg, err := common.LoadFiles(files...) if err != nil { return err @@ -72,10 +76,11 @@ func run() error { if err := paths.InitPaths(&config.Path); err != nil { return err } - if err = logp.Init("test", time.Now(), &config.Logging); err != nil { + if err = logpcfg.Logging("test", config.Logging); err != nil { return err } - logp.SetStderr() + + cfg.PrintDebugf("input config:") return stress.RunTests(info, duration, cfg, nil) } diff --git a/vendor/github.com/elastic/beats/libbeat/scripts/generate_fields_docs.py b/vendor/github.com/elastic/beats/libbeat/scripts/generate_fields_docs.py index 8fcbbf9e..d21c04ed 100644 --- a/vendor/github.com/elastic/beats/libbeat/scripts/generate_fields_docs.py +++ b/vendor/github.com/elastic/beats/libbeat/scripts/generate_fields_docs.py @@ -44,7 +44,7 @@ def document_field(output, field, path): if "path" not in field: field["path"] = path - output.write("[float]\n=== `{}`\n\n".format(field["path"])) + output.write("*`{}`*::\n+\n--\n".format(field["path"])) if "type" in field: output.write("type: {}\n\n".format(field["type"])) @@ -69,6 +69,7 @@ def document_field(output, field, path): if "multi_fields" in field: for subfield in field["multi_fields"]: document_field(output, subfield, path + "." + subfield["name"]) + output.write("--\n\n") def fields_to_asciidoc(input, output, beat): diff --git a/vendor/github.com/elastic/beats/libbeat/service/service.go b/vendor/github.com/elastic/beats/libbeat/service/service.go index 1fe39581..b05d20c2 100644 --- a/vendor/github.com/elastic/beats/libbeat/service/service.go +++ b/vendor/github.com/elastic/beats/libbeat/service/service.go @@ -1,6 +1,7 @@ package service import ( + "context" "expvar" "flag" "fmt" @@ -22,7 +23,7 @@ import ( // HandleSignals manages OS signals that ask the service/daemon to stop. // The stopFunction should break the loop in the Beat so that // the service shut downs gracefully. -func HandleSignals(stopFunction func()) { +func HandleSignals(stopFunction func(), cancel context.CancelFunc) { var callback sync.Once // On ^C or SIGTERM, gracefully stop the sniffer @@ -31,6 +32,7 @@ func HandleSignals(stopFunction func()) { go func() { <-sigc logp.Debug("service", "Received sigterm/sigint, stopping") + cancel() callback.Do(stopFunction) }() diff --git a/vendor/github.com/elastic/beats/libbeat/setup/kibana/client.go b/vendor/github.com/elastic/beats/libbeat/setup/kibana/client.go index 07baa9ef..493eb6d8 100644 --- a/vendor/github.com/elastic/beats/libbeat/setup/kibana/client.go +++ b/vendor/github.com/elastic/beats/libbeat/setup/kibana/client.go @@ -10,6 +10,8 @@ import ( "net/url" "strings" + "github.com/pkg/errors" + "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/libbeat/outputs" @@ -38,6 +40,25 @@ func addToURL(_url, _path string, params url.Values) string { return strings.Join([]string{_url, _path, "?", params.Encode()}, "") } +func extractError(result []byte) error { + var kibanaResult struct { + Objects []struct { + Error struct { + Message string + } + } + } + if err := json.Unmarshal(result, &kibanaResult); err != nil { + return errors.Wrap(err, "parsing kibana response") + } + for _, o := range kibanaResult.Objects { + if o.Error.Message != "" { + return errors.New(kibanaResult.Objects[0].Error.Message) + } + } + return nil +} + func NewKibanaClient(cfg *common.Config) (*Client, error) { config := defaultKibanaConfig if err := cfg.Unpack(&config); err != nil { @@ -97,7 +118,7 @@ func NewKibanaClient(cfg *common.Config) (*Client, error) { } if err = client.SetVersion(); err != nil { - return nil, fmt.Errorf("fail to get the Kibana version:%v", err) + return nil, fmt.Errorf("fail to get the Kibana version: %v", err) } return client, nil @@ -140,6 +161,7 @@ func (conn *Connection) Request(method, extraPath string, return 0, nil, fmt.Errorf("fail to read response %s", err) } + retError = extractError(result) return resp.StatusCode, result, retError } @@ -157,8 +179,8 @@ func (client *Client) SetVersion() error { Version string `json:"version"` } - _, result, err := client.Connection.Request("GET", "/api/status", nil, nil) - if err != nil { + code, result, err := client.Connection.Request("GET", "/api/status", nil, nil) + if err != nil || code >= 400 { return fmt.Errorf("HTTP GET request to /api/status fails: %v. Response: %s.", err, truncateString(result)) } @@ -177,10 +199,6 @@ func (client *Client) SetVersion() error { client.Connection.URL, truncateString(result), err5x, err) } client.version = kibanaVersion5x.Version - - return fmt.Errorf("fail to unmarshal the response from GET %s/api/status: %v. Response: %s", - client.Connection.URL, err, truncateString(result)) - } else { client.version = kibanaVersion.Version.Number diff --git a/vendor/github.com/elastic/beats/libbeat/setup/kibana/client_test.go b/vendor/github.com/elastic/beats/libbeat/setup/kibana/client_test.go new file mode 100644 index 00000000..5a33ee18 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/setup/kibana/client_test.go @@ -0,0 +1,56 @@ +package kibana + +import ( + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestErrorJson(t *testing.T) { + // also common 200: {"objects":[{"id":"apm-*","type":"index-pattern","error":{"message":"[doc][index-pattern:test-*]: version conflict, document already exists (current version [1])"}}]} + kibanaTs := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte(`{"objects":[{"id":"test-*","type":"index-pattern","error":{"message":"action [indices:data/write/bulk[s]] is unauthorized for user [test]"}}]}`)) + })) + defer kibanaTs.Close() + + conn := Connection{ + URL: kibanaTs.URL, + http: http.DefaultClient, + } + code, _, err := conn.Request(http.MethodPost, "", url.Values{}, nil) + assert.Equal(t, http.StatusOK, code) + assert.Error(t, err) +} + +func TestErrorBadJson(t *testing.T) { + kibanaTs := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte(`{`)) + })) + defer kibanaTs.Close() + + conn := Connection{ + URL: kibanaTs.URL, + http: http.DefaultClient, + } + code, _, err := conn.Request(http.MethodPost, "", url.Values{}, nil) + assert.Equal(t, http.StatusOK, code) + assert.Error(t, err) +} + +func TestSuccess(t *testing.T) { + kibanaTs := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte(`{"objects":[{"id":"test-*","type":"index-pattern","updated_at":"2018-01-24T19:04:13.371Z","version":1}]}`)) + })) + defer kibanaTs.Close() + + conn := Connection{ + URL: kibanaTs.URL, + http: http.DefaultClient, + } + code, _, err := conn.Request(http.MethodPost, "", url.Values{}, nil) + assert.Equal(t, http.StatusOK, code) + assert.NoError(t, err) +} diff --git a/vendor/github.com/elastic/beats/libbeat/template/config.go b/vendor/github.com/elastic/beats/libbeat/template/config.go index eec7e6da..2db4fda3 100644 --- a/vendor/github.com/elastic/beats/libbeat/template/config.go +++ b/vendor/github.com/elastic/beats/libbeat/template/config.go @@ -1,12 +1,15 @@ package template +import "github.com/elastic/beats/libbeat/common" + type TemplateConfig struct { - Enabled bool `config:"enabled"` - Name string `config:"name"` - Pattern string `config:"pattern"` - Fields string `config:"fields"` - Overwrite bool `config:"overwrite"` - Settings TemplateSettings `config:"settings"` + Enabled bool `config:"enabled"` + Name string `config:"name"` + Pattern string `config:"pattern"` + Fields string `config:"fields"` + AppendFields common.Fields `config:"append_fields"` + Overwrite bool `config:"overwrite"` + Settings TemplateSettings `config:"settings"` } type TemplateSettings struct { diff --git a/vendor/github.com/elastic/beats/libbeat/template/load.go b/vendor/github.com/elastic/beats/libbeat/template/load.go index d596cf68..3219ca03 100644 --- a/vendor/github.com/elastic/beats/libbeat/template/load.go +++ b/vendor/github.com/elastic/beats/libbeat/template/load.go @@ -67,7 +67,7 @@ func (l *Loader) Load() error { err = l.LoadTemplate(tmpl.GetName(), output) if err != nil { - return fmt.Errorf("could not load template: %v", err) + return fmt.Errorf("could not load template. Elasticsearh returned: %v. Template is: %s", err, output) } } else { logp.Info("Template already exists and will not be overwritten.") diff --git a/vendor/github.com/elastic/beats/libbeat/template/processor.go b/vendor/github.com/elastic/beats/libbeat/template/processor.go index 388fcda7..9732bb8b 100644 --- a/vendor/github.com/elastic/beats/libbeat/template/processor.go +++ b/vendor/github.com/elastic/beats/libbeat/template/processor.go @@ -2,6 +2,7 @@ package template import ( "errors" + "strings" "github.com/elastic/beats/libbeat/common" ) @@ -200,7 +201,17 @@ func (p *Processor) array(f *common.Field) common.MapStr { func (p *Processor) object(f *common.Field) common.MapStr { dynProperties := getDefaultProperties(f) + matchType := func(onlyType string) string { + if f.ObjectTypeMappingType != "" { + return f.ObjectTypeMappingType + } + return onlyType + } + switch f.ObjectType { + case "scaled_float": + dynProperties = p.scaledFloat(f) + addDynamicTemplate(f, dynProperties, matchType("*")) case "text": dynProperties["type"] = "text" @@ -208,13 +219,13 @@ func (p *Processor) object(f *common.Field) common.MapStr { dynProperties["type"] = "string" dynProperties["index"] = "analyzed" } - addDynamicTemplate(f, dynProperties, "string") + addDynamicTemplate(f, dynProperties, matchType("string")) case "long": dynProperties["type"] = f.ObjectType - addDynamicTemplate(f, dynProperties, "long") + addDynamicTemplate(f, dynProperties, matchType("long")) case "keyword": dynProperties["type"] = f.ObjectType - addDynamicTemplate(f, dynProperties, "string") + addDynamicTemplate(f, dynProperties, matchType("string")) } properties := getDefaultProperties(f) @@ -235,12 +246,16 @@ func addDynamicTemplate(f *common.Field, properties common.MapStr, matchType str if len(f.Path) > 0 { path = f.Path + "." } + pathMatch := path + f.Name + if !strings.ContainsRune(pathMatch, '*') { + pathMatch += ".*" + } template := common.MapStr{ // Set the path of the field as name path + f.Name: common.MapStr{ "mapping": properties, "match_mapping_type": matchType, - "path_match": path + f.Name + ".*", + "path_match": pathMatch, }, } diff --git a/vendor/github.com/elastic/beats/libbeat/template/processor_test.go b/vendor/github.com/elastic/beats/libbeat/template/processor_test.go index c3a8fc0d..af5e04b9 100644 --- a/vendor/github.com/elastic/beats/libbeat/template/processor_test.go +++ b/vendor/github.com/elastic/beats/libbeat/template/processor_test.go @@ -205,6 +205,32 @@ func TestDynamicTemplate(t *testing.T) { }, }, }, + { + field: common.Field{ + Type: "object", ObjectType: "long", ObjectTypeMappingType: "futuretype", + Path: "language", Name: "english", + }, + expected: common.MapStr{ + "language.english": common.MapStr{ + "mapping": common.MapStr{"type": "long"}, + "match_mapping_type": "futuretype", + "path_match": "language.english.*", + }, + }, + }, + { + field: common.Field{ + Type: "object", ObjectType: "long", ObjectTypeMappingType: "*", + Path: "language", Name: "english", + }, + expected: common.MapStr{ + "language.english": common.MapStr{ + "mapping": common.MapStr{"type": "long"}, + "match_mapping_type": "*", + "path_match": "language.english.*", + }, + }, + }, { field: common.Field{ Type: "object", ObjectType: "long", @@ -231,6 +257,38 @@ func TestDynamicTemplate(t *testing.T) { }, }, }, + { + field: common.Field{ + Type: "object", ObjectType: "scaled_float", + Name: "core.*.pct", + }, + expected: common.MapStr{ + "core.*.pct": common.MapStr{ + "mapping": common.MapStr{ + "type": "scaled_float", + "scaling_factor": defaultScalingFactor, + }, + "match_mapping_type": "*", + "path_match": "core.*.pct", + }, + }, + }, + { + field: common.Field{ + Type: "object", ObjectType: "scaled_float", + Name: "core.*.pct", ScalingFactor: 100, ObjectTypeMappingType: "float", + }, + expected: common.MapStr{ + "core.*.pct": common.MapStr{ + "mapping": common.MapStr{ + "type": "scaled_float", + "scaling_factor": 100, + }, + "match_mapping_type": "float", + "path_match": "core.*.pct", + }, + }, + }, } for _, test := range tests { diff --git a/vendor/github.com/elastic/beats/libbeat/template/template.go b/vendor/github.com/elastic/beats/libbeat/template/template.go index 31420d5f..a69d3b5d 100644 --- a/vendor/github.com/elastic/beats/libbeat/template/template.go +++ b/vendor/github.com/elastic/beats/libbeat/template/template.go @@ -2,10 +2,12 @@ package template import ( "fmt" + "sync" "time" "github.com/elastic/beats/libbeat/beat" "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/common/cfgwarn" "github.com/elastic/beats/libbeat/common/fmtstr" ) @@ -20,11 +22,12 @@ var ( ) type Template struct { + sync.Mutex name string pattern string beatVersion common.Version esVersion common.Version - settings TemplateSettings + config TemplateConfig } // New creates a new template instance @@ -87,17 +90,32 @@ func New(beatVersion string, beatName string, esVersion string, config TemplateC name: name, beatVersion: *bV, esVersion: *esV, - settings: config.Settings, + config: config, }, nil } // Load the given input and generates the input based on it func (t *Template) Load(file string) (common.MapStr, error) { + fields, err := common.LoadFieldsYaml(file) if err != nil { return nil, err } + // Locking to make sure dynamicTemplates and defaultFields is not accessed in parallel + t.Lock() + defer t.Unlock() + + dynamicTemplates = nil + + if len(t.config.AppendFields) > 0 { + cfgwarn.Experimental("append_fields is used.") + fields, err = appendFields(fields, t.config.AppendFields) + if err != nil { + return nil, err + } + } + // Start processing at the root properties := common.MapStr{} processor := Processor{EsVersion: t.esVersion} @@ -155,7 +173,7 @@ func (t *Template) generate(properties common.MapStr, dynamicTemplates []common. indexSettings.Put("number_of_routing_shards", defaultNumberOfRoutingShards) } - indexSettings.DeepUpdate(t.settings.Index) + indexSettings.DeepUpdate(t.config.Settings.Index) var mappingName string if t.esVersion.Major >= 6 { @@ -182,9 +200,9 @@ func (t *Template) generate(properties common.MapStr, dynamicTemplates []common. }, } - if len(t.settings.Source) > 0 { + if len(t.config.Settings.Source) > 0 { key := fmt.Sprintf("mappings.%s._source", mappingName) - basicStructure.Put(key, t.settings.Source) + basicStructure.Put(key, t.config.Settings.Source) } // ES 6 moved from template to index_patterns: https://github.com/elastic/elasticsearch/pull/21009 @@ -200,3 +218,19 @@ func (t *Template) generate(properties common.MapStr, dynamicTemplates []common. return basicStructure } + +func appendFields(fields, appendFields common.Fields) (common.Fields, error) { + if len(appendFields) > 0 { + appendFieldKeys := appendFields.GetKeys() + + // Append is only allowed to add fields, not overwrite + for _, key := range appendFieldKeys { + if fields.HasNode(key) { + return nil, fmt.Errorf("append_fields contains an already existing key: %s", key) + } + } + // Appends fields to existing fields + fields = append(fields, appendFields...) + } + return fields, nil +} diff --git a/vendor/github.com/elastic/beats/libbeat/template/template_test.go b/vendor/github.com/elastic/beats/libbeat/template/template_test.go index 2e5200f7..191d4b07 100644 --- a/vendor/github.com/elastic/beats/libbeat/template/template_test.go +++ b/vendor/github.com/elastic/beats/libbeat/template/template_test.go @@ -6,6 +6,8 @@ import ( "testing" "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/libbeat/common" ) func TestNumberOfRoutingShards(t *testing.T) { @@ -54,3 +56,106 @@ func TestNumberOfRoutingShardsOverwrite(t *testing.T) { assert.Equal(t, 5, shards.(int)) } + +func TestAppendFields(t *testing.T) { + tests := []struct { + fields common.Fields + appendFields common.Fields + error bool + }{ + { + fields: common.Fields{ + common.Field{ + Name: "a", + Fields: common.Fields{ + common.Field{ + Name: "b", + }, + }, + }, + }, + appendFields: common.Fields{ + common.Field{ + Name: "a", + Fields: common.Fields{ + common.Field{ + Name: "c", + }, + }, + }, + }, + error: false, + }, + { + fields: common.Fields{ + common.Field{ + Name: "a", + Fields: common.Fields{ + common.Field{ + Name: "b", + }, + common.Field{ + Name: "c", + }, + }, + }, + }, + appendFields: common.Fields{ + common.Field{ + Name: "a", + Fields: common.Fields{ + common.Field{ + Name: "c", + }, + }, + }, + }, + error: true, + }, + { + fields: common.Fields{ + common.Field{ + Name: "a", + }, + }, + appendFields: common.Fields{ + common.Field{ + Name: "a", + Fields: common.Fields{ + common.Field{ + Name: "c", + }, + }, + }, + }, + error: true, + }, + { + fields: common.Fields{ + common.Field{ + Name: "a", + Fields: common.Fields{ + common.Field{ + Name: "c", + }, + }, + }, + }, + appendFields: common.Fields{ + common.Field{ + Name: "a", + }, + }, + error: true, + }, + } + + for _, test := range tests { + _, err := appendFields(test.fields, test.appendFields) + if test.error { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + } +} diff --git a/vendor/github.com/elastic/beats/libbeat/tests/compose/compose.go b/vendor/github.com/elastic/beats/libbeat/tests/compose/compose.go index 56b08b3a..fc4edfa1 100644 --- a/vendor/github.com/elastic/beats/libbeat/tests/compose/compose.go +++ b/vendor/github.com/elastic/beats/libbeat/tests/compose/compose.go @@ -11,6 +11,8 @@ import ( "testing" "time" + "strconv" + "github.com/docker/libcompose/docker" "github.com/docker/libcompose/docker/ctx" "github.com/docker/libcompose/project" @@ -43,13 +45,22 @@ func EnsureUp(t *testing.T, services ...string) { // EnsureUpWithTimeout starts all the requested services (must be defined in docker-compose.yml) // Wait for `timeout` seconds for health func EnsureUpWithTimeout(t *testing.T, timeout int, services ...string) { + // The NO_COMPOSE env variables makes it possible to skip the starting of the environment. + // This is useful if the service is already running locally. + if noCompose, err := strconv.ParseBool(os.Getenv("NO_COMPOSE")); err == nil && noCompose { + return + } + compose, err := getComposeProject() if err != nil { t.Fatal(err) } // Kill no longer used containers - compose.KillOld(services) + err = compose.KillOld(services) + if err != nil { + t.Fatal(err) + } for _, service := range services { err = compose.Start(service) @@ -117,16 +128,22 @@ func (c *composeProject) Wait(seconds int, services ...string) error { return nil } -func (c *composeProject) Kill(service string) { +func (c *composeProject) Kill(service string) error { c.Lock() defer c.Unlock() - c.p.Kill(context.Background(), "KILL", service) + return c.p.Kill(context.Background(), "KILL", service) } func (c *composeProject) KillOld(except []string) error { - // Do not kill ourselves or elasticsearch :) - except = append(except, "beat", "elasticsearch") + // Do not kill ourselves ;) + except = append(except, "beat") + + // These services take very long to start up and stop. If they are stopped + // it can happen that an other package tries to start them at the same time + // which leads to a conflict. We need a better solution long term but that should + // solve the problem for now. + except = append(except, "elasticsearch", "kibana", "logstash") servicesStatus, err := c.getServices() if err != nil { @@ -140,7 +157,10 @@ func (c *composeProject) KillOld(except []string) error { } if s.Old { - c.Kill(s.Name) + err = c.Kill(s.Name) + if err != nil { + return err + } } } diff --git a/vendor/github.com/elastic/beats/libbeat/dashboards/testdata/testbeat-dashboards.zip b/vendor/github.com/elastic/beats/libbeat/tests/files/testbeat-dashboards.zip similarity index 100% rename from vendor/github.com/elastic/beats/libbeat/dashboards/testdata/testbeat-dashboards.zip rename to vendor/github.com/elastic/beats/libbeat/tests/files/testbeat-dashboards.zip diff --git a/vendor/github.com/elastic/beats/libbeat/tests/system/beat/beat.py b/vendor/github.com/elastic/beats/libbeat/tests/system/beat/beat.py index 067989f8..fe0ad480 100644 --- a/vendor/github.com/elastic/beats/libbeat/tests/system/beat/beat.py +++ b/vendor/github.com/elastic/beats/libbeat/tests/system/beat/beat.py @@ -9,6 +9,7 @@ import sys import time import yaml +import hashlib from datetime import datetime, timedelta from .compose import ComposeMixin @@ -19,6 +20,8 @@ INTEGRATION_TESTS = os.environ.get('INTEGRATION_TESTS', False) +yaml_cache = {} + class TimeoutError(Exception): pass @@ -210,7 +213,7 @@ def render_config_template(self, template_name=None, output_path = os.path.join(self.working_dir, output) with open(output_path, "wb") as f: - os.chmod(output_path, 0600) + os.chmod(output_path, 0o600) f.write(output_str.encode('utf8')) # Returns output as JSON object with flattened fields (. notation) @@ -277,7 +280,10 @@ def copy_files(self, files, source_dir="files/"): def setUp(self): self.template_env = jinja2.Environment( - loader=jinja2.FileSystemLoader(self.beat_path) + loader=jinja2.FileSystemLoader([ + self.beat_path, + os.path.abspath(os.path.join(self.beat_path, "../libbeat")) + ]) ) # create working dir @@ -333,27 +339,30 @@ def get_log(self, logfile=None): def wait_log_contains(self, msg, logfile=None, max_timeout=10, poll_interval=0.1, - name="log_contains"): + name="log_contains", + ignore_case=False): self.wait_until( - cond=lambda: self.log_contains(msg, logfile), + cond=lambda: self.log_contains(msg, logfile, ignore_case=ignore_case), max_timeout=max_timeout, poll_interval=poll_interval, name=name) - def log_contains(self, msg, logfile=None): + def log_contains(self, msg, logfile=None, ignore_case=False): """ Returns true if the give logfile contains the given message. Note that the msg must be present in a single line. """ - return self.log_contains_count(msg, logfile) > 0 + return self.log_contains_count(msg, logfile, ignore_case=ignore_case) > 0 - def log_contains_count(self, msg, logfile=None): + def log_contains_count(self, msg, logfile=None, ignore_case=False): """ Returns the number of appearances of the given string in the log file """ counter = 0 + if ignore_case: + msg = msg.lower() # Init defaults if logfile is None: @@ -362,6 +371,8 @@ def log_contains_count(self, msg, logfile=None): try: with open(os.path.join(self.working_dir, logfile), "r") as f: for line in f: + if ignore_case: + line = line.lower() if line.find(msg) >= 0: counter = counter + 1 except IOError: @@ -484,6 +495,8 @@ def extract_fields(doc_list, name): if not os.path.isfile(fields_doc): fields_doc = self.beat_path + "/_meta/fields.yml" + global yaml_cache + # TODO: Make fields_doc path more generic to work with beat-generator with open(fields_doc, "r") as f: path = os.path.abspath(os.path.dirname(__file__) + "../../../../_meta/fields.generated.yml") @@ -492,9 +505,15 @@ def extract_fields(doc_list, name): with open(path) as f2: content = f2.read() - #content = "fields:\n" content += f.read() - doc = yaml.load(content) + + hash = hashlib.md5(content).hexdigest() + doc = "" + if hash in yaml_cache: + doc = yaml_cache[hash] + else: + doc = yaml.safe_load(content) + yaml_cache[hash] = doc fields = [] dictfields = [] @@ -516,7 +535,9 @@ def flatten_object(self, obj, dict_fields, prefix=""): result[prefix + key] = value return result - def copy_files(self, files, source_dir="files/", target_dir=""): + def copy_files(self, files, source_dir="", target_dir=""): + if not source_dir: + source_dir = self.beat_path + "/tests/files/" if target_dir: target_dir = os.path.join(self.working_dir, target_dir) else: diff --git a/vendor/github.com/elastic/beats/libbeat/tests/system/beat/compose.py b/vendor/github.com/elastic/beats/libbeat/tests/system/beat/compose.py index 41047a2b..70e4f590 100644 --- a/vendor/github.com/elastic/beats/libbeat/tests/system/beat/compose.py +++ b/vendor/github.com/elastic/beats/libbeat/tests/system/beat/compose.py @@ -30,27 +30,54 @@ def compose_up(cls): """ Ensure *only* the services defined under `COMPOSE_SERVICES` are running and healthy """ - if INTEGRATION_TESTS and cls.COMPOSE_SERVICES: - cls.compose_project().up( + if not INTEGRATION_TESTS or not cls.COMPOSE_SERVICES: + return + + def print_logs(container): + print("---- " + container.name_without_project) + print(container.logs()) + print("----") + + def is_healthy(container): + return container.inspect()['State']['Health']['Status'] == 'healthy' + + project = cls.compose_project() + project.up( + service_names=cls.COMPOSE_SERVICES, + do_build=BuildAction.force, + timeout=30) + + # Wait for them to be healthy + start = time.time() + while True: + containers = project.containers( service_names=cls.COMPOSE_SERVICES, - do_build=BuildAction.force, - timeout=30) - - # Wait for them to be healthy - healthy = False - seconds = cls.COMPOSE_TIMEOUT - while not healthy and seconds > 0: - print("Seconds: %d".format(seconds)) - seconds -= 1 - time.sleep(1) - healthy = True - for container in cls.compose_project().containers(service_names=cls.COMPOSE_SERVICES): - if container.inspect()['State']['Health']['Status'] != 'healthy': - healthy = False - break - - if not healthy: - raise Exception('Timeout while waiting for healthy docker-compose services') + stopped=True) + + healthy = True + for container in containers: + if not container.is_running: + print_logs(container) + raise Exception( + "Container %s unexpectedly finished on startup" % + container.name_without_project) + if not is_healthy(container): + healthy = False + break + + if healthy: + break + + time.sleep(1) + timeout = time.time() - start > cls.COMPOSE_TIMEOUT + if timeout: + for container in containers: + if not is_healthy(container): + print_logs(container) + raise Exception( + "Timeout while waiting for healthy " + "docker-compose services: %s" % + ','.join(cls.COMPOSE_SERVICES)) @classmethod def compose_down(cls): @@ -60,6 +87,19 @@ def compose_down(cls): if INTEGRATION_TESTS and cls.COMPOSE_SERVICES: cls.compose_project().kill(service_names=cls.COMPOSE_SERVICES) + @classmethod + def compose_hosts(cls): + if not INTEGRATION_TESTS or not cls.COMPOSE_SERVICES: + return [] + + hosts = [] + for container in cls.compose_project().containers(service_names=cls.COMPOSE_SERVICES): + network_settings = container.inspect()['NetworkSettings'] + for network in network_settings['Networks'].values(): + if network['IPAddress']: + hosts.append(network['IPAddress']) + return hosts + @classmethod def compose_project(cls): return get_project(cls.COMPOSE_PROJECT_DIR, project_name=os.environ.get('DOCKER_COMPOSE_PROJECT_NAME')) diff --git a/vendor/github.com/elastic/beats/libbeat/tests/system/config/libbeat.yml.j2 b/vendor/github.com/elastic/beats/libbeat/tests/system/config/libbeat.yml.j2 new file mode 100644 index 00000000..a8e494e6 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/tests/system/config/libbeat.yml.j2 @@ -0,0 +1,93 @@ +#================================ General ===================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +# If this options is not defined, the hostname is used. +name: {{shipper_name}} + +# The tags of the shipper are included in their own field with each +# transaction published. Tags make it easy to group servers by different +# logical properties. +tags: [ + {%- if agent_tags -%} + {%- for tag in agent_tags -%} + "{{ tag }}" + {%- if not loop.last %}, {% endif -%} + {%- endfor -%} + {%- endif -%} +] + +{% if setup_template_name %} +setup.template.name: "{{setup_template_name}}" +setup.template.pattern: "{{setup_template_pattern}}" +{%- endif %} + +#================================ Processors ===================================== + +{%- if processors %} +processors: +{%- for processor in processors %} +{%- for name, settings in processor.items() %} +- {{name}}: + {%- if settings %} + {%- for k, v in settings.items() %} + {{k}}: + {{v | default([])}} + {%- endfor %} + {%- endif %} +{%- endfor %} +{%- endfor %} + +{%- endif %} + +#================================ Queue ===================================== + +queue.mem: + events: 4096 + flush.min_events: {{ flush_min_events|default(8) }} + flush.timeout: 0.1s + +{% if kibana -%} +setup.kibana.host: "{{ kibana.host }}" +{%- endif %} + +#================================ Outputs ===================================== + +# Configure what outputs to use when sending the data collected by the beat. +# Multiple outputs may be used. + +{% if elasticsearch -%} +output: + elasticsearch: + hosts: ["{{ elasticsearch.host }}"] + {% if elasticsearch.pipeline %} + pipeline: {{elasticsearch.pipeline}} + {% endif %} + {% if elasticsearch.index %} + index: {{elasticsearch.index}} + {% endif %} +{%- endif %} + +{% if logstash %} +output.logstash: + hosts: ["{{ logstash.host }}"] +{%- endif %} + +{% if not (console or elasticsearch or logstash) -%} +output.file: + path: {{ output_file_path|default(beat.working_dir + "/output") }} + filename: "{{ output_file_filename|default(beat.beat_name) }}" + rotate_every_kb: {{ rotate_every_kb | default(1000) }} + #number_of_files: 7 +{%- endif %} + +{% if path_data %} +#================================ Paths ===================================== +path: + data: {{path_data}} +{%endif%} + +{% if keystore_path %} +#================================ keystore ===================================== +keystore.path: {{keystore_path}} +{% endif %} diff --git a/vendor/github.com/elastic/beats/libbeat/tests/system/requirements.txt b/vendor/github.com/elastic/beats/libbeat/tests/system/requirements.txt index b6445d94..355909db 100644 --- a/vendor/github.com/elastic/beats/libbeat/tests/system/requirements.txt +++ b/vendor/github.com/elastic/beats/libbeat/tests/system/requirements.txt @@ -1,11 +1,29 @@ -pip -nose -jinja2 -PyYAML -nose-timer -redis -elasticsearch -requests -autopep8 -six -docker-compose +autopep8==1.3.5 +backports.ssl-match-hostname==3.5.0.1 +cached-property==1.4.2 +certifi==2018.1.18 +chardet==3.0.4 +docker==3.2.1 +docker-compose==1.21.0 +docker-pycreds==0.2.2 +dockerpty==0.4.1 +docopt==0.6.2 +elasticsearch==6.2.0 +enum34==1.1.6 +functools32==3.2.3.post2 +idna==2.6 +ipaddress==1.0.19 +Jinja2==2.10 +jsonschema==2.6.0 +MarkupSafe==1.0 +nose==1.3.7 +nose-timer==0.7.1 +pycodestyle==2.4.0 +PyYAML==3.12 +redis==2.10.6 +requests==2.18.4 +six==1.11.0 +termcolor==1.1.0 +texttable==0.9.1 +urllib3==1.22 +websocket-client==0.47.0 diff --git a/vendor/github.com/elastic/beats/libbeat/tests/system/test_cmd.py b/vendor/github.com/elastic/beats/libbeat/tests/system/test_cmd.py index 4e72666f..ee9471b4 100644 --- a/vendor/github.com/elastic/beats/libbeat/tests/system/test_cmd.py +++ b/vendor/github.com/elastic/beats/libbeat/tests/system/test_cmd.py @@ -90,7 +90,7 @@ def test_setup_flag(self): extra_args=["--setup", "--path.config", self.working_dir, "-E", "setup.dashboards.file=" + - os.path.join("../../dashboards/testdata", "testbeat-dashboards.zip"), + os.path.join(self.beat_path, "tests", "files", "testbeat-dashboards.zip"), "-E", "setup.dashboards.beat=testbeat", "-E", "setup.kibana.protocol=http", "-E", "setup.kibana.host=" + self.get_kibana_host(), diff --git a/vendor/github.com/elastic/beats/libbeat/tests/system/test_dashboard.py b/vendor/github.com/elastic/beats/libbeat/tests/system/test_dashboard.py index adcc841e..8173c954 100644 --- a/vendor/github.com/elastic/beats/libbeat/tests/system/test_dashboard.py +++ b/vendor/github.com/elastic/beats/libbeat/tests/system/test_dashboard.py @@ -23,7 +23,7 @@ def test_load_dashboard(self): extra_args=["setup", "--dashboards", "-E", "setup.dashboards.file=" + - os.path.join("../../dashboards/testdata", "testbeat-dashboards.zip"), + os.path.join(self.beat_path, "tests", "files", "testbeat-dashboards.zip"), "-E", "setup.dashboards.beat=testbeat", "-E", "setup.kibana.protocol=http", "-E", "setup.kibana.host=" + self.get_kibana_host(), @@ -36,6 +36,32 @@ def test_load_dashboard(self): assert self.log_contains("Kibana dashboards successfully loaded") is True + @unittest.skipUnless(INTEGRATION_TESTS, "integration test") + @attr('integration') + def test_load_only_index_patterns(self): + """ + Test loading dashboards + """ + self.render_config_template() + beat = self.start_beat( + logging_args=["-e", "-d", "*"], + extra_args=["setup", + "--dashboards", + "-E", "setup.dashboards.file=" + + os.path.join(self.beat_path, "tests", "files", "testbeat-dashboards.zip"), + "-E", "setup.dashboards.beat=testbeat", + "-E", "setup.dashboards.only_index=true", + "-E", "setup.kibana.protocol=http", + "-E", "setup.kibana.host=" + self.get_kibana_host(), + "-E", "setup.kibana.port=" + self.get_kibana_port(), + "-E", "output.elasticsearch.hosts=['" + self.get_host() + "']", + "-E", "output.file.enabled=false"] + ) + + beat.check_wait(exit_code=0) + + assert self.log_contains("Kibana dashboards successfully loaded") is True + @unittest.skipUnless(INTEGRATION_TESTS, "integration test") @attr('integration') def test_export_dashboard(self): @@ -45,11 +71,11 @@ def test_export_dashboard(self): self.test_load_dashboard() - command = "./../../../dev-tools/cmd/dashboards/export_dashboards -kibana http://" + \ + command = self.beat_path + "/../dev-tools/cmd/dashboards/export_dashboards -kibana http://" + \ self.get_kibana_host() + ":" + self.get_kibana_port() if os.name == "nt": - command = "..\..\..\dev-tools\cmd\dashboards\export_dashboards -kibana http://" + \ + command = self.beat_path + "\..\dev-tools\cmd\dashboards\export_dashboards -kibana http://" + \ self.get_kibana_host() + ":" + self.get_kibana_port() command = command + " -dashboard Metricbeat-system-overview" diff --git a/vendor/github.com/elastic/beats/libbeat/version/helper.go b/vendor/github.com/elastic/beats/libbeat/version/helper.go index f1080f42..b34ed5a2 100644 --- a/vendor/github.com/elastic/beats/libbeat/version/helper.go +++ b/vendor/github.com/elastic/beats/libbeat/version/helper.go @@ -1,7 +1,29 @@ package version +import "time" + // GetDefaultVersion returns the current libbeat version. // This method is in a separate file as the version.go file is auto generated func GetDefaultVersion() string { return defaultBeatVersion } + +var ( + buildTime = "unknown" + commit = "unknown" +) + +// BuildTime exposes the compile-time build time information. +// It will represent the zero time instant if parsing fails. +func BuildTime() time.Time { + t, err := time.Parse(time.RFC3339, buildTime) + if err != nil { + return time.Time{} + } + return t +} + +// Commit exposes the compile-time commit hash. +func Commit() string { + return commit +} diff --git a/vendor/github.com/elastic/beats/libbeat/version/version.go b/vendor/github.com/elastic/beats/libbeat/version/version.go index aba590ce..7283170e 100644 --- a/vendor/github.com/elastic/beats/libbeat/version/version.go +++ b/vendor/github.com/elastic/beats/libbeat/version/version.go @@ -1,3 +1,4 @@ +// Code generated by dev-tools/set_version package version -const defaultBeatVersion = "6.2.4" +const defaultBeatVersion = "6.3.3" diff --git a/vendor/github.com/elastic/beats/licenses/APACHE-LICENSE-2.0.txt b/vendor/github.com/elastic/beats/licenses/APACHE-LICENSE-2.0.txt new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/elastic/beats/licenses/APACHE-LICENSE-2.0.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/elastic/beats/licenses/ELASTIC-LICENSE.txt b/vendor/github.com/elastic/beats/licenses/ELASTIC-LICENSE.txt new file mode 100644 index 00000000..7376ffc3 --- /dev/null +++ b/vendor/github.com/elastic/beats/licenses/ELASTIC-LICENSE.txt @@ -0,0 +1,223 @@ +ELASTIC LICENSE AGREEMENT + +PLEASE READ CAREFULLY THIS ELASTIC LICENSE AGREEMENT (THIS "AGREEMENT"), WHICH +CONSTITUTES A LEGALLY BINDING AGREEMENT AND GOVERNS ALL OF YOUR USE OF ALL OF +THE ELASTIC SOFTWARE WITH WHICH THIS AGREEMENT IS INCLUDED ("ELASTIC SOFTWARE") +THAT IS PROVIDED IN OBJECT CODE FORMAT, AND, IN ACCORDANCE WITH SECTION 2 BELOW, +CERTAIN OF THE ELASTIC SOFTWARE THAT IS PROVIDED IN SOURCE CODE FORMAT. BY +INSTALLING OR USING ANY OF THE ELASTIC SOFTWARE GOVERNED BY THIS AGREEMENT, YOU +ARE ASSENTING TO THE TERMS AND CONDITIONS OF THIS AGREEMENT. IF YOU DO NOT AGREE +WITH SUCH TERMS AND CONDITIONS, YOU MAY NOT INSTALL OR USE THE ELASTIC SOFTWARE +GOVERNED BY THIS AGREEMENT. IF YOU ARE INSTALLING OR USING THE SOFTWARE ON +BEHALF OF A LEGAL ENTITY, YOU REPRESENT AND WARRANT THAT YOU HAVE THE ACTUAL +AUTHORITY TO AGREE TO THE TERMS AND CONDITIONS OF THIS AGREEMENT ON BEHALF OF +SUCH ENTITY. + +Posted Date: April 20, 2018 + +This Agreement is entered into by and between Elasticsearch BV ("Elastic") and +You, or the legal entity on behalf of whom You are acting (as applicable, +"You"). + +1. OBJECT CODE END USER LICENSES, RESTRICTIONS AND THIRD PARTY OPEN SOURCE +SOFTWARE + + 1.1 Object Code End User License. Subject to the terms and conditions of + Section 1.2 of this Agreement, Elastic hereby grants to You, AT NO CHARGE and + for so long as you are not in breach of any provision of this Agreement, a + License to the Basic Features and Functions of the Elastic Software. + + 1.2 Reservation of Rights; Restrictions. As between Elastic and You, Elastic + and its licensors own all right, title and interest in and to the Elastic + Software, and except as expressly set forth in Sections 1.1, and 2.1 of this + Agreement, no other license to the Elastic Software is granted to You under + this Agreement, by implication, estoppel or otherwise. You agree not to: (i) + reverse engineer or decompile, decrypt, disassemble or otherwise reduce any + Elastic Software provided to You in Object Code, or any portion thereof, to + Source Code, except and only to the extent any such restriction is prohibited + by applicable law, (ii) except as expressly permitted in this Agreement, + prepare derivative works from, modify, copy or use the Elastic Software Object + Code or the Commercial Software Source Code in any manner; (iii) except as + expressly permitted in Section 1.1 above, transfer, sell, rent, lease, + distribute, sublicense, loan or otherwise transfer, Elastic Software Object + Code, in whole or in part, to any third party; (iv) use Elastic Software + Object Code for providing time-sharing services, any software-as-a-service, + service bureau services or as part of an application services provider or + other service offering (collectively, "SaaS Offering") where obtaining access + to the Elastic Software or the features and functions of the Elastic Software + is a primary reason or substantial motivation for users of the SaaS Offering + to access and/or use the SaaS Offering ("Prohibited SaaS Offering"); (v) + circumvent the limitations on use of Elastic Software provided to You in + Object Code format that are imposed or preserved by any License Key, or (vi) + alter or remove any Marks and Notices in the Elastic Software. If You have any + question as to whether a specific SaaS Offering constitutes a Prohibited SaaS + Offering, or are interested in obtaining Elastic's permission to engage in + commercial or non-commercial distribution of the Elastic Software, please + contact elastic_license@elastic.co. + + 1.3 Third Party Open Source Software. The Commercial Software may contain or + be provided with third party open source libraries, components, utilities and + other open source software (collectively, "Open Source Software"), which Open + Source Software may have applicable license terms as identified on a website + designated by Elastic. Notwithstanding anything to the contrary herein, use of + the Open Source Software shall be subject to the license terms and conditions + applicable to such Open Source Software, to the extent required by the + applicable licensor (which terms shall not restrict the license rights granted + to You hereunder, but may contain additional rights). To the extent any + condition of this Agreement conflicts with any license to the Open Source + Software, the Open Source Software license will govern with respect to such + Open Source Software only. Elastic may also separately provide you with + certain open source software that is licensed by Elastic. Your use of such + Elastic open source software will not be governed by this Agreement, but by + the applicable open source license terms. + +2. COMMERCIAL SOFTWARE SOURCE CODE + + 2.1 Limited License. Subject to the terms and conditions of Section 2.2 of + this Agreement, Elastic hereby grants to You, AT NO CHARGE and for so long as + you are not in breach of any provision of this Agreement, a limited, + non-exclusive, non-transferable, fully paid up royalty free right and license + to the Commercial Software in Source Code format, without the right to grant + or authorize sublicenses, to prepare Derivative Works of the Commercial + Software, provided You (i) do not hack the licensing mechanism, or otherwise + circumvent the intended limitations on the use of Elastic Software to enable + features other than Basic Features and Functions or those features You are + entitled to as part of a Subscription, and (ii) use the resulting object code + only for reasonable testing purposes. + + 2.2 Restrictions. Nothing in Section 2.1 grants You the right to (i) use the + Commercial Software Source Code other than in accordance with Section 2.1 + above, (ii) use a Derivative Work of the Commercial Software outside of a + Non-production Environment, in any production capacity, on a temporary or + permanent basis, or (iii) transfer, sell, rent, lease, distribute, sublicense, + loan or otherwise make available the Commercial Software Source Code, in whole + or in part, to any third party. Notwithstanding the foregoing, You may + maintain a copy of the repository in which the Source Code of the Commercial + Software resides and that copy may be publicly accessible, provided that you + include this Agreement with Your copy of the repository. + +3. TERMINATION + + 3.1 Termination. This Agreement will automatically terminate, whether or not + You receive notice of such Termination from Elastic, if You breach any of its + provisions. + + 3.2 Post Termination. Upon any termination of this Agreement, for any reason, + You shall promptly cease the use of the Elastic Software in Object Code format + and cease use of the Commercial Software in Source Code format. For the + avoidance of doubt, termination of this Agreement will not affect Your right + to use Elastic Software, in either Object Code or Source Code formats, made + available under the Apache License Version 2.0. + + 3.3 Survival. Sections 1.2, 2.2. 3.3, 4 and 5 shall survive any termination or + expiration of this Agreement. + +4. DISCLAIMER OF WARRANTIES AND LIMITATION OF LIABILITY + + 4.1 Disclaimer of Warranties. TO THE MAXIMUM EXTENT PERMITTED UNDER APPLICABLE + LAW, THE ELASTIC SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, + AND ELASTIC AND ITS LICENSORS MAKE NO WARRANTIES WHETHER EXPRESSED, IMPLIED OR + STATUTORY REGARDING OR RELATING TO THE ELASTIC SOFTWARE. TO THE MAXIMUM EXTENT + PERMITTED UNDER APPLICABLE LAW, ELASTIC AND ITS LICENSORS SPECIFICALLY + DISCLAIM ALL IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE AND NON-INFRINGEMENT WITH RESPECT TO THE ELASTIC SOFTWARE, AND WITH + RESPECT TO THE USE OF THE FOREGOING. FURTHER, ELASTIC DOES NOT WARRANT RESULTS + OF USE OR THAT THE ELASTIC SOFTWARE WILL BE ERROR FREE OR THAT THE USE OF THE + ELASTIC SOFTWARE WILL BE UNINTERRUPTED. + + 4.2 Limitation of Liability. IN NO EVENT SHALL ELASTIC OR ITS LICENSORS BE + LIABLE TO YOU OR ANY THIRD PARTY FOR ANY DIRECT OR INDIRECT DAMAGES, + INCLUDING, WITHOUT LIMITATION, FOR ANY LOSS OF PROFITS, LOSS OF USE, BUSINESS + INTERRUPTION, LOSS OF DATA, COST OF SUBSTITUTE GOODS OR SERVICES, OR FOR ANY + SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, IN CONNECTION WITH + OR ARISING OUT OF THE USE OR INABILITY TO USE THE ELASTIC SOFTWARE, OR THE + PERFORMANCE OF OR FAILURE TO PERFORM THIS AGREEMENT, WHETHER ALLEGED AS A + BREACH OF CONTRACT OR TORTIOUS CONDUCT, INCLUDING NEGLIGENCE, EVEN IF ELASTIC + HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +5. MISCELLANEOUS + + This Agreement completely and exclusively states the entire agreement of the + parties regarding the subject matter herein, and it supersedes, and its terms + govern, all prior proposals, agreements, or other communications between the + parties, oral or written, regarding such subject matter. This Agreement may be + modified by Elastic from time to time, and any such modifications will be + effective upon the "Posted Date" set forth at the top of the modified + Agreement. If any provision hereof is held unenforceable, this Agreement will + continue without said provision and be interpreted to reflect the original + intent of the parties. This Agreement and any non-contractual obligation + arising out of or in connection with it, is governed exclusively by Dutch law. + This Agreement shall not be governed by the 1980 UN Convention on Contracts + for the International Sale of Goods. All disputes arising out of or in + connection with this Agreement, including its existence and validity, shall be + resolved by the courts with jurisdiction in Amsterdam, The Netherlands, except + where mandatory law provides for the courts at another location in The + Netherlands to have jurisdiction. The parties hereby irrevocably waive any and + all claims and defenses either might otherwise have in any such action or + proceeding in any of such courts based upon any alleged lack of personal + jurisdiction, improper venue, forum non conveniens or any similar claim or + defense. A breach or threatened breach, by You of Section 2 may cause + irreparable harm for which damages at law may not provide adequate relief, and + therefore Elastic shall be entitled to seek injunctive relief without being + required to post a bond. You may not assign this Agreement (including by + operation of law in connection with a merger or acquisition), in whole or in + part to any third party without the prior written consent of Elastic, which + may be withheld or granted by Elastic in its sole and absolute discretion. + Any assignment in violation of the preceding sentence is void. Notices to + Elastic may also be sent to legal@elastic.co. + +6. DEFINITIONS + + The following terms have the meanings ascribed: + + 6.1 "Affiliate" means, with respect to a party, any entity that controls, is + controlled by, or which is under common control with, such party, where + "control" means ownership of at least fifty percent (50%) of the outstanding + voting shares of the entity, or the contractual right to establish policy for, + and manage the operations of, the entity. + + 6.2 "Basic Features and Functions" means those features and functions of the + Elastic Software that are eligible for use under a Basic license, as set forth + at https://www.elastic.co/subscriptions, as may be modified by Elastic from + time to time. + + 6.3 "Commercial Software" means the Elastic Software Source Code in any file + containing a header stating the contents are subject to the Elastic License or + which is contained in the repository folder labeled "x-pack", unless a LICENSE + file present in the directory subtree declares a different license. + + 6.4 "Derivative Work of the Commercial Software" means, for purposes of this + Agreement, any modification(s) or enhancement(s) to the Commercial Software, + which represent, as a whole, an original work of authorship. + + 6.5 "License" means a limited, non-exclusive, non-transferable, fully paid up, + royalty free, right and license, without the right to grant or authorize + sublicenses, solely for Your internal business operations to (i) install and + use the applicable Features and Functions of the Elastic Software in Object + Code, and (ii) permit Contractors and Your Affiliates to use the Elastic + software as set forth in (i) above, provided that such use by Contractors must + be solely for Your benefit and/or the benefit of Your Affiliates, and You + shall be responsible for all acts and omissions of such Contractors and + Affiliates in connection with their use of the Elastic software that are + contrary to the terms and conditions of this Agreement. + + 6.6 "License Key" means a sequence of bytes, including but not limited to a + JSON blob, that is used to enable certain features and functions of the + Elastic Software. + + 6.7 "Marks and Notices" means all Elastic trademarks, trade names, logos and + notices present on the Documentation as originally provided by Elastic. + + 6.8 "Non-production Environment" means an environment for development, testing + or quality assurance, where software is not used for production purposes. + + 6.9 "Object Code" means any form resulting from mechanical transformation or + translation of Source Code form, including but not limited to compiled object + code, generated documentation, and conversions to other media types. + + 6.10 "Source Code" means the preferred form of computer software for making + modifications, including but not limited to software source code, + documentation source, and configuration files. + + 6.11 "Subscription" means the right to receive Support Services and a License + to the Commercial Software. diff --git a/vendor/github.com/elastic/beats/metricbeat/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/Dockerfile index 8a1fad09..a4c4b427 100644 --- a/vendor/github.com/elastic/beats/metricbeat/Dockerfile +++ b/vendor/github.com/elastic/beats/metricbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.9.2 +FROM golang:1.9.4 MAINTAINER Nicolas Ruflin RUN set -x && \ diff --git a/vendor/github.com/elastic/beats/metricbeat/Makefile b/vendor/github.com/elastic/beats/metricbeat/Makefile index 76e1d2e2..cd076d91 100644 --- a/vendor/github.com/elastic/beats/metricbeat/Makefile +++ b/vendor/github.com/elastic/beats/metricbeat/Makefile @@ -1,17 +1,13 @@ -#!/bin/bash - # Name can be overwritten, as Metricbeat is also a library BEAT_NAME?=metricbeat BEAT_TITLE?=Metricbeat BEAT_DESCRIPTION?=Metricbeat is a lightweight shipper for metrics. SYSTEM_TESTS?=true TEST_ENVIRONMENT?=true -TESTING_ENVIRONMENT?=snapshot-noxpack -GOPACKAGES=$(shell go list ${BEAT_PATH}/... | grep -v /vendor/) ES_BEATS?=.. # Metricbeat can only be cross-compiled on platforms not requiring CGO. -GOX_OS=solaris netbsd linux windows +GOX_OS=netbsd linux windows GOX_FLAGS=-arch="amd64 386 arm ppc64 ppc64le" diff --git a/vendor/github.com/elastic/beats/metricbeat/NOTICE.txt b/vendor/github.com/elastic/beats/metricbeat/NOTICE.txt new file mode 100644 index 00000000..c2bca21b Binary files /dev/null and b/vendor/github.com/elastic/beats/metricbeat/NOTICE.txt differ diff --git a/vendor/github.com/elastic/beats/metricbeat/autodiscover/appender/kubernetes/token/config.go b/vendor/github.com/elastic/beats/metricbeat/autodiscover/appender/kubernetes/token/config.go new file mode 100644 index 00000000..7dbf2b2d --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/autodiscover/appender/kubernetes/token/config.go @@ -0,0 +1,14 @@ +package token + +import "github.com/elastic/beats/libbeat/processors" + +type config struct { + TokenPath string `config:"token_path"` + ConditionConfig *processors.ConditionConfig `config:"condition"` +} + +func defaultConfig() config { + return config{ + TokenPath: "/var/run/secrets/kubernetes.io/serviceaccount/token", + } +} diff --git a/vendor/github.com/elastic/beats/metricbeat/autodiscover/appender/kubernetes/token/token.go b/vendor/github.com/elastic/beats/metricbeat/autodiscover/appender/kubernetes/token/token.go new file mode 100644 index 00000000..acc81010 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/autodiscover/appender/kubernetes/token/token.go @@ -0,0 +1,123 @@ +package token + +import ( + "fmt" + "io/ioutil" + + "github.com/elastic/beats/libbeat/autodiscover" + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/common/bus" + "github.com/elastic/beats/libbeat/common/cfgwarn" + "github.com/elastic/beats/libbeat/logp" + "github.com/elastic/beats/libbeat/processors" +) + +func init() { + autodiscover.Registry.AddAppender("kubernetes.token", NewTokenAppender) +} + +type tokenAppender struct { + TokenPath string + Condition *processors.Condition +} + +// NewTokenAppender creates a token appender that can append a bearer token required to authenticate with +// protected endpoints +func NewTokenAppender(cfg *common.Config) (autodiscover.Appender, error) { + cfgwarn.Deprecate("7.0.0", "token appender is deprecated in favor of bearer_token_file config parameter") + conf := defaultConfig() + + err := cfg.Unpack(&conf) + if err != nil { + return nil, fmt.Errorf("unable to unpack config due to error: %v", err) + } + + // Attempt to create a condition. If fails then report error + cond, err := processors.NewCondition(conf.ConditionConfig) + if err != nil { + return nil, fmt.Errorf("unable to create condition due to error: %v", err) + } + appender := tokenAppender{ + TokenPath: conf.TokenPath, + Condition: cond, + } + + return &appender, nil +} + +// Append picks up a token from a file and adds it to the headers.Authorization section of the metricbeat module +func (t *tokenAppender) Append(event bus.Event) { + cfgsRaw, ok := event["config"] + // There are no configs + if !ok { + return + } + + cfgs, ok := cfgsRaw.([]*common.Config) + // Config key doesnt have an array of config objects + if !ok { + return + } + + // Check if the condition is met. Attempt to append only if that is the case. + if t.Condition == nil || t.Condition.Check(common.MapStr(event)) == true { + tok := t.getAuthHeaderFromToken() + // If token is empty then just return + if tok == "" { + return + } + for i := 0; i < len(cfgs); i++ { + // Unpack the config + cfg := cfgs[i] + c := common.MapStr{} + err := cfg.Unpack(&c) + if err != nil { + logp.Debug("kubernetes.config", "unable to unpack config due to error: %v", err) + continue + } + var headers common.MapStr + if hRaw, ok := c["headers"]; ok { + // If headers is not a map then continue to next config + if headers, ok = hRaw.(common.MapStr); !ok { + continue + } + } else { + headers = common.MapStr{} + } + + // Assign authorization header and add it back to the config + headers["Authorization"] = tok + c["headers"] = headers + + // Repack the configuration + newCfg, err := common.NewConfigFrom(&c) + if err != nil { + logp.Debug("kubernetes.config", "unable to repack config due to error: %v", err) + continue + } + cfgs[i] = newCfg + } + + event["config"] = cfgs + } +} + +func (t *tokenAppender) getAuthHeaderFromToken() string { + var token string + + if t.TokenPath != "" { + b, err := ioutil.ReadFile(t.TokenPath) + if err != nil { + logp.Err("Reading token file failed with err: %v", err) + } + + if len(b) != 0 { + if b[len(b)-1] == '\n' { + b = b[0 : len(b)-1] + } + token = fmt.Sprintf("Bearer %s", string(b)) + } + } + + return token +} diff --git a/vendor/github.com/elastic/beats/metricbeat/autodiscover/appender/kubernetes/token/token_test.go b/vendor/github.com/elastic/beats/metricbeat/autodiscover/appender/kubernetes/token/token_test.go new file mode 100644 index 00000000..1721762b --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/autodiscover/appender/kubernetes/token/token_test.go @@ -0,0 +1,91 @@ +package token + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/common/bus" +) + +func TestTokenAppender(t *testing.T) { + tests := []struct { + eventConfig string + event bus.Event + result common.MapStr + config string + }{ + // Appender without a condition should apply the config regardless + // Empty event config should return a config with only the headers + { + event: bus.Event{}, + result: common.MapStr{ + "headers": map[string]interface{}{ + "Authorization": "Bearer foo bar", + }, + }, + eventConfig: "", + config: ` +token_path: "test" +`, + }, + // Metricbeat module config should return a config that has headers section + { + event: bus.Event{}, + result: common.MapStr{ + "module": "prometheus", + "hosts": []interface{}{"1.2.3.4:8080"}, + "headers": map[string]interface{}{ + "Authorization": "Bearer foo bar", + }, + }, + eventConfig: ` +module: prometheus +hosts: ["1.2.3.4:8080"] +`, + config: ` +token_path: "test" +`, + }, + } + + for _, test := range tests { + config, err := common.NewConfigWithYAML([]byte(test.config), "") + if err != nil { + t.Fatal(err) + } + + eConfig, err := common.NewConfigWithYAML([]byte(test.eventConfig), "") + if err != nil { + t.Fatal(err) + } + + test.event["config"] = []*common.Config{eConfig} + writeFile("test", "foo bar") + + appender, err := NewTokenAppender(config) + assert.Nil(t, err) + assert.NotNil(t, appender) + + appender.Append(test.event) + cfgs, _ := test.event["config"].([]*common.Config) + assert.Equal(t, len(cfgs), 1) + + out := common.MapStr{} + cfgs[0].Unpack(&out) + + assert.Equal(t, out, test.result) + deleteFile("test") + } +} + +func writeFile(name, message string) { + ioutil.WriteFile(name, []byte(message), os.ModePerm) +} + +func deleteFile(name string) { + os.Remove(name) +} diff --git a/vendor/github.com/elastic/beats/metricbeat/beater/autodiscover.go b/vendor/github.com/elastic/beats/metricbeat/autodiscover/autodiscover.go similarity index 98% rename from vendor/github.com/elastic/beats/metricbeat/beater/autodiscover.go rename to vendor/github.com/elastic/beats/metricbeat/autodiscover/autodiscover.go index 503e9f4e..b9ceffd9 100644 --- a/vendor/github.com/elastic/beats/metricbeat/beater/autodiscover.go +++ b/vendor/github.com/elastic/beats/metricbeat/autodiscover/autodiscover.go @@ -1,4 +1,4 @@ -package beater +package autodiscover import ( "errors" diff --git a/vendor/github.com/elastic/beats/metricbeat/autodiscover/builder/hints/config.go b/vendor/github.com/elastic/beats/metricbeat/autodiscover/builder/hints/config.go new file mode 100644 index 00000000..d1f8c99a --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/autodiscover/builder/hints/config.go @@ -0,0 +1,15 @@ +package hints + +import "github.com/elastic/beats/metricbeat/mb" + +type config struct { + Key string `config:"key"` + Registry *mb.Register +} + +func defaultConfig() config { + return config{ + Key: "metrics", + Registry: mb.Registry, + } +} diff --git a/vendor/github.com/elastic/beats/metricbeat/autodiscover/builder/hints/metrics.go b/vendor/github.com/elastic/beats/metricbeat/autodiscover/builder/hints/metrics.go new file mode 100644 index 00000000..fc9e284e --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/autodiscover/builder/hints/metrics.go @@ -0,0 +1,170 @@ +package hints + +import ( + "fmt" + + "strings" + + "github.com/elastic/beats/libbeat/autodiscover" + "github.com/elastic/beats/libbeat/autodiscover/builder" + "github.com/elastic/beats/libbeat/autodiscover/template" + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/common/bus" + "github.com/elastic/beats/libbeat/common/cfgwarn" + "github.com/elastic/beats/libbeat/logp" + "github.com/elastic/beats/metricbeat/mb" +) + +func init() { + autodiscover.Registry.AddBuilder("hints", NewMetricHints) +} + +const ( + module = "module" + namespace = "namespace" + hosts = "hosts" + metricsets = "metricsets" + period = "period" + timeout = "timeout" + ssl = "ssl" + + defaultTimeout = "3s" + defaultPeriod = "1m" +) + +type metricHints struct { + Key string + Registry *mb.Register +} + +// NewMetricHints builds a new metrics builder based on hints +func NewMetricHints(cfg *common.Config) (autodiscover.Builder, error) { + cfgwarn.Beta("The hints builder is beta") + config := defaultConfig() + err := cfg.Unpack(&config) + + if err != nil { + return nil, fmt.Errorf("unable to unpack hints config due to error: %v", err) + } + + return &metricHints{config.Key, config.Registry}, nil +} + +// Create configs based on hints passed from providers +func (m *metricHints) CreateConfig(event bus.Event) []*common.Config { + var config []*common.Config + host, _ := event["host"].(string) + if host == "" { + return config + } + + port, _ := common.TryToInt(event["port"]) + + hints, ok := event["hints"].(common.MapStr) + if !ok { + return config + } + + mod := m.getModule(hints) + if mod == "" { + return config + } + + hsts := m.getHostsWithPort(hints, port) + ns := m.getNamespace(hints) + msets := m.getMetricSets(hints, mod) + tout := m.getTimeout(hints) + ival := m.getPeriod(hints) + sslConf := m.getSSLConfig(hints) + + moduleConfig := common.MapStr{ + "module": mod, + "metricsets": msets, + "hosts": hsts, + "timeout": tout, + "period": ival, + "enabled": true, + "ssl": sslConf, + } + + if ns != "" { + moduleConfig["namespace"] = ns + } + + logp.Debug("hints.builder", "generated config: %v", moduleConfig.String()) + + // Create config object + cfg, err := common.NewConfigFrom(moduleConfig) + if err != nil { + logp.Debug("hints.builder", "config merge failed with error: %v", err) + } + logp.Debug("hints.builder", "generated config: %v", *cfg) + config = append(config, cfg) + + // Apply information in event to the template to generate the final config + // This especially helps in a scenario where endpoints are configured as: + // co.elastic.metrics/hosts= "${data.host}:9090" + config = template.ApplyConfigTemplate(event, config) + return config +} + +func (m *metricHints) getModule(hints common.MapStr) string { + return builder.GetHintString(hints, m.Key, module) +} + +func (m *metricHints) getMetricSets(hints common.MapStr, module string) []string { + var msets []string + var err error + msets = builder.GetHintAsList(hints, m.Key, metricsets) + + if len(msets) == 0 { + // If no metricset list is given, take module defaults + // fallback to all metricsets if module has no defaults + msets, err = m.Registry.DefaultMetricSets(module) + if err != nil || len(msets) == 0 { + msets = m.Registry.MetricSets(module) + } + } + + return msets +} + +func (m *metricHints) getHostsWithPort(hints common.MapStr, port int) []string { + var result []string + thosts := builder.GetHintAsList(hints, m.Key, hosts) + + // Only pick hosts that have ${data.port} or the port on current event. This will make + // sure that incorrect meta mapping doesn't happen + for _, h := range thosts { + if strings.Contains(h, "data.port") || strings.Contains(h, fmt.Sprintf(":%d", port)) || + // Use the event that has no port config if there is a ${data.host}:9090 like input + (port == 0 && strings.Contains(h, "data.host")) { + result = append(result, h) + } + } + + return result +} + +func (m *metricHints) getNamespace(hints common.MapStr) string { + return builder.GetHintString(hints, m.Key, namespace) +} + +func (m *metricHints) getPeriod(hints common.MapStr) string { + if ival := builder.GetHintString(hints, m.Key, period); ival != "" { + return ival + } + + return defaultPeriod +} + +func (m *metricHints) getTimeout(hints common.MapStr) string { + if tout := builder.GetHintString(hints, m.Key, timeout); tout != "" { + return tout + } + return defaultTimeout +} + +func (m *metricHints) getSSLConfig(hints common.MapStr) common.MapStr { + return builder.GetHintMapStr(hints, m.Key, ssl) +} diff --git a/vendor/github.com/elastic/beats/metricbeat/autodiscover/builder/hints/metrics_test.go b/vendor/github.com/elastic/beats/metricbeat/autodiscover/builder/hints/metrics_test.go new file mode 100644 index 00000000..332bd88b --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/autodiscover/builder/hints/metrics_test.go @@ -0,0 +1,207 @@ +package hints + +import ( + "sort" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/common/bus" + "github.com/elastic/beats/metricbeat/mb" +) + +func TestGenerateHints(t *testing.T) { + tests := []struct { + message string + event bus.Event + len int + result common.MapStr + }{ + { + message: "Empty event hints should return empty config", + event: bus.Event{ + "host": "1.2.3.4", + "kubernetes": common.MapStr{ + "container": common.MapStr{ + "name": "foobar", + "id": "abc", + }, + }, + "docker": common.MapStr{ + "container": common.MapStr{ + "name": "foobar", + "id": "abc", + }, + }, + }, + len: 0, + result: common.MapStr{}, + }, + { + message: "Hints without host should return nothing", + event: bus.Event{ + "hints": common.MapStr{ + "metrics": common.MapStr{ + "module": "mockmodule", + }, + }, + }, + len: 0, + result: common.MapStr{}, + }, + { + message: "Only module hint should return all metricsets", + event: bus.Event{ + "host": "1.2.3.4", + "hints": common.MapStr{ + "metrics": common.MapStr{ + "module": "mockmodule", + }, + }, + }, + len: 1, + result: common.MapStr{ + "module": "mockmodule", + "metricsets": []string{"one", "two"}, + "timeout": "3s", + "period": "1m", + "enabled": true, + }, + }, + { + message: "metricsets hint works", + event: bus.Event{ + "host": "1.2.3.4", + "hints": common.MapStr{ + "metrics": common.MapStr{ + "module": "mockmodule", + "metricsets": "one", + }, + }, + }, + len: 1, + result: common.MapStr{ + "module": "mockmodule", + "metricsets": []string{"one"}, + "timeout": "3s", + "period": "1m", + "enabled": true, + }, + }, + { + message: "Only module, it should return defaults", + event: bus.Event{ + "host": "1.2.3.4", + "hints": common.MapStr{ + "metrics": common.MapStr{ + "module": "mockmoduledefaults", + }, + }, + }, + len: 1, + result: common.MapStr{ + "module": "mockmoduledefaults", + "metricsets": []string{"default"}, + "timeout": "3s", + "period": "1m", + "enabled": true, + }, + }, + { + message: "Module, namespace, host hint should return valid config with port should return hosts for " + + "docker host network scenario", + event: bus.Event{ + "host": "1.2.3.4", + "hints": common.MapStr{ + "metrics": common.MapStr{ + "module": "mockmoduledefaults", + "namespace": "test", + "hosts": "${data.host}:9090", + }, + }, + }, + len: 1, + result: common.MapStr{ + "module": "mockmoduledefaults", + "namespace": "test", + "metricsets": []string{"default"}, + "timeout": "3s", + "period": "1m", + "enabled": true, + "hosts": []interface{}{"1.2.3.4:9090"}, + }, + }, + { + message: "Module, namespace, host hint should return valid config", + event: bus.Event{ + "host": "1.2.3.4", + "port": 9090, + "hints": common.MapStr{ + "metrics": common.MapStr{ + "module": "mockmoduledefaults", + "namespace": "test", + "hosts": "${data.host}:9090", + }, + }, + }, + len: 1, + result: common.MapStr{ + "module": "mockmoduledefaults", + "namespace": "test", + "metricsets": []string{"default"}, + "hosts": []interface{}{"1.2.3.4:9090"}, + "timeout": "3s", + "period": "1m", + "enabled": true, + }, + }, + } + for _, test := range tests { + mockRegister := mb.NewRegister() + mockRegister.MustAddMetricSet("mockmodule", "one", NewMockMetricSet, mb.DefaultMetricSet()) + mockRegister.MustAddMetricSet("mockmodule", "two", NewMockMetricSet, mb.DefaultMetricSet()) + mockRegister.MustAddMetricSet("mockmoduledefaults", "default", NewMockMetricSet, mb.DefaultMetricSet()) + mockRegister.MustAddMetricSet("mockmoduledefaults", "other", NewMockMetricSet) + + m := metricHints{ + Key: defaultConfig().Key, + Registry: mockRegister, + } + cfgs := m.CreateConfig(test.event) + assert.Equal(t, len(cfgs), test.len) + + if test.len != 0 { + config := common.MapStr{} + err := cfgs[0].Unpack(&config) + assert.Nil(t, err, test.message) + + // metricests order is random, order it for tests + if v, err := config.GetValue("metricsets"); err == nil { + if msets, ok := v.([]interface{}); ok { + metricsets := make([]string, len(msets)) + for i, v := range msets { + metricsets[i] = v.(string) + } + sort.Strings(metricsets) + config["metricsets"] = metricsets + } + } + + assert.Equal(t, test.result, config, test.message) + } + + } +} + +type MockMetricSet struct { + mb.BaseMetricSet +} + +func NewMockMetricSet(base mb.BaseMetricSet) (mb.MetricSet, error) { + return &MockMetricSet{}, nil +} + +func (ms *MockMetricSet) Fetch(report mb.Reporter) { + +} diff --git a/vendor/github.com/elastic/beats/metricbeat/autodiscover/include.go b/vendor/github.com/elastic/beats/metricbeat/autodiscover/include.go new file mode 100644 index 00000000..b2f72b50 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/autodiscover/include.go @@ -0,0 +1,9 @@ +package autodiscover + +import ( + // include all metricbeat specific builders + _ "github.com/elastic/beats/metricbeat/autodiscover/builder/hints" + + // include all metricbeat specific appenders + _ "github.com/elastic/beats/metricbeat/autodiscover/appender/kubernetes/token" +) diff --git a/vendor/github.com/elastic/beats/metricbeat/beater/metricbeat.go b/vendor/github.com/elastic/beats/metricbeat/beater/metricbeat.go index 21882791..2217ae4a 100644 --- a/vendor/github.com/elastic/beats/metricbeat/beater/metricbeat.go +++ b/vendor/github.com/elastic/beats/metricbeat/beater/metricbeat.go @@ -12,6 +12,7 @@ import ( "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/common/cfgwarn" "github.com/elastic/beats/libbeat/logp" + mbautodiscover "github.com/elastic/beats/metricbeat/autodiscover" "github.com/elastic/beats/metricbeat/mb" "github.com/elastic/beats/metricbeat/mb/module" @@ -146,7 +147,7 @@ func newMetricbeat(b *beat.Beat, c *common.Config, options ...Option) (*Metricbe if config.Autodiscover != nil { var err error factory := module.NewFactory(b.Publisher, metricbeat.moduleOptions...) - adapter := NewAutodiscoverAdapter(factory) + adapter := mbautodiscover.NewAutodiscoverAdapter(factory) metricbeat.autodiscover, err = autodiscover.NewAutodiscover("metricbeat", adapter, config.Autodiscover) if err != nil { return nil, err diff --git a/vendor/github.com/elastic/beats/metricbeat/docker-compose.yml b/vendor/github.com/elastic/beats/metricbeat/docker-compose.yml index c1a04e84..537ecaab 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docker-compose.yml +++ b/vendor/github.com/elastic/beats/metricbeat/docker-compose.yml @@ -1,12 +1,12 @@ version: '2' services: beat: - build: . + build: ${PWD}/. environment: - TEST_ENVIRONMENT=false working_dir: /go/src/github.com/elastic/beats/metricbeat volumes: - - ./..:/go/src/github.com/elastic/beats/ + - ${PWD}/..:/go/src/github.com/elastic/beats/ # This is required to on-demand launching the rest on containers for tests & also docker module tests: - /var/run/docker.sock:/var/run/docker.sock command: make @@ -27,6 +27,7 @@ services: - ./module/logstash/_meta/env - ./module/memcached/_meta/env - ./module/mongodb/_meta/env + - ./module/munin/_meta/env - ./module/mysql/_meta/env - ./module/nginx/_meta/env - ./module/php_fpm/_meta/env @@ -44,6 +45,11 @@ services: apache: build: ./module/apache/_meta + apache_2_4_12: + build: + context: ./module/apache/_meta + dockerfile: Dockerfile.2.4.12 + ceph: build: ./module/ceph/_meta @@ -68,6 +74,16 @@ services: haproxy: build: ./module/haproxy/_meta + haproxy_1_6: + build: + context: ./module/haproxy/_meta + dockerfile: Dockerfile.1.6 + + haproxy_1_7: + build: + context: ./module/haproxy/_meta + dockerfile: Dockerfile.1.7 + http: build: ./module/http/_meta @@ -107,6 +123,9 @@ services: mongodb: build: ./module/mongodb/_meta + munin: + build: ./module/munin/_meta + mysql: build: ./module/mysql/_meta diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/autodiscover-docker-config.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/autodiscover-docker-config.asciidoc index 75be8f34..e013b826 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/autodiscover-docker-config.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/autodiscover-docker-config.asciidoc @@ -8,7 +8,7 @@ metricbeat.autodiscover: templates: - condition: contains: - docker.container.image: "redis" + docker.container.image: redis config: - module: redis metricsets: ["info", "keyspace"] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/autodiscover-hints.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/autodiscover-hints.asciidoc new file mode 100644 index 00000000..4d321413 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/docs/autodiscover-hints.asciidoc @@ -0,0 +1,112 @@ +{beatname_uc} supports autodiscover based on hints from the provider. The `hints` system looks for +hints in Kubernetes Pod annotations or Docker labels which have the prefix `co.elastic.metrics`. As soon as +the container starts, {beatname_uc} will check if it contains any hints and launch the proper config for +it. Hints tell {beatname_uc} how to get metrics for the given container. This is the full list of supported hints: + +[float] +===== `co.elastic.metrics/module` + +{beatname_uc} module to use to fetch metrics. See <> for the list of supported modules. + +[float] +===== `co.elastic.metrics/hosts` + +Hosts setting to use with the given module. Hosts can include `${data.host}` and `${data.port}` +values from the autodiscover event, ie: `${data.host}:80`. + +[float] +===== `co.elastic.metrics/metricsets` + +List of metricsets to use, comma separated. If no metricsets are provided, default metricsets for the module +are used. + +[float] +===== `co.elastic.metrics/period` + +The time interval for metrics retrieval, ie: 10s + +[float] +===== `co.elastic.metrics/timeout` + +Metrics retrieval timeout, default: 3s + +[float] +===== `co.elastic.metrics/ssl.*` + +SSL parameters, as seen in <>. + + + +[float] +=== Kubernetes + +Kubernetes autodiscover provider supports hints in Pod annotations. To enable it just set `hints.enabled`: + +["source","yaml",subs="attributes"] +------------------------------------------------------------------------------------- +metricbeat.autodiscover: + providers: + - type: kubernetes + hints.enabled: true +------------------------------------------------------------------------------------- + +This configuration enables the `hints` autodiscover for Kubernetes. The `hints` system looks for +hints in Kubernetes annotations or Docker labels which have the prefix `co.elastic.metrics`. + +You can annotate Kubernetes Pods with useful info to spin up {beatname_uc} modules: + +["source","yaml",subs="attributes"] +------------------------------------------------------------------------------------- +annotations: + co.elastic.metrics/module: prometheus + co.elastic.metrics/metricsets: collector + co.elastic.metrics/hosts: '${data.host}:9090' + co.elastic.metrics/period: 1m +------------------------------------------------------------------------------------- + +The above annotations are used to spin up a Prometheus collector metricset and it polls the +parent container on port `9090` at a 1 minute interval. + +[float] +===== Multiple containers + +When a Pod has multiple containers, these settings are shared. To set hints specific to a container in +the pod you can put the container name in the hint. + +When a pod has multiple containers, the settings are shared unless you put the container name in the +hint. For example, these hints configure a common behavior for all containers in the Pod, and sets a specific +`hosts` hint for the container called `sidecar`. + +["source","yaml",subs="attributes"] +------------------------------------------------------------------------------------- +annotations: + co.elastic.metrics/module: apache + co.elastic.metrics/hosts: '${data.host}:80' + co.elastic.metrics.sidecar/hosts: '${data.host}:8080' +------------------------------------------------------------------------------------- + +[float] +=== Docker + +Docker autodiscover provider supports hints in labels. To enable it just set `hints.enabled`: + +["source","yaml",subs="attributes"] +------------------------------------------------------------------------------------- +metricbeat.autodiscover: + providers: + - type: docker + hints.enabled: true +------------------------------------------------------------------------------------- + +You can label Docker containers with useful info to spin up {beatname_uc} modules, for example: + +["source","yaml",subs="attributes"] +------------------------------------------------------------------------------------- + co.elastic.metrics/module: nginx + co.elastic.metrics/metricsets: stubstatus + co.elastic.metrics/hosts: '${data.host}:80' + co.elastic.metrics/period: 10s +------------------------------------------------------------------------------------- + +The above labels would allow {beatname_uc} to configure a Prometheus collector to poll port `9090` +of the Docker container every 1 minute. diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/autodiscover-kubernetes-config.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/autodiscover-kubernetes-config.asciidoc index 51d2b4e8..27c222be 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/autodiscover-kubernetes-config.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/autodiscover-kubernetes-config.asciidoc @@ -17,3 +17,6 @@ metricbeat.autodiscover: ------------------------------------------------------------------------------------- This configuration launches a `prometheus` module for all containers of pods annotated `prometheus.io.scrape=true`. +There are cases where the PodSpec does not expose a port. In such cases the host can be provided as `${data.host}:9090` +directly. However, the metadata which is used to enrich the metric would not have information regarding the container since +the discovery mechanism would not have information on which container the port maps to. diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/fields.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/fields.asciidoc index 3b89b0f5..4c0763fa 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/fields.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/fields.asciidoc @@ -27,15 +27,18 @@ grouped in the following categories: * <> * <> * <> +* <> * <> * <> * <> * <> * <> * <> +* <> * <> * <> * <> +* <> * <> * <> * <> @@ -84,38 +87,46 @@ Client delete transactions stats. -[float] -=== `aerospike.namespace.client.delete.error` - +*`aerospike.namespace.client.delete.error`*:: ++ +-- type: long Number of client delete transactions that failed with an error. -[float] -=== `aerospike.namespace.client.delete.not_found` +-- +*`aerospike.namespace.client.delete.not_found`*:: ++ +-- type: long Number of client delete transactions that resulted in a not found. -[float] -=== `aerospike.namespace.client.delete.success` +-- +*`aerospike.namespace.client.delete.success`*:: ++ +-- type: long Number of successful client delete transactions. -[float] -=== `aerospike.namespace.client.delete.timeout` +-- +*`aerospike.namespace.client.delete.timeout`*:: ++ +-- type: long Number of client delete transactions that timed out. +-- + [float] == read fields @@ -123,38 +134,46 @@ Client read transactions stats. -[float] -=== `aerospike.namespace.client.read.error` - +*`aerospike.namespace.client.read.error`*:: ++ +-- type: long Number of client read transaction errors. -[float] -=== `aerospike.namespace.client.read.not_found` +-- +*`aerospike.namespace.client.read.not_found`*:: ++ +-- type: long Number of client read transaction that resulted in not found. -[float] -=== `aerospike.namespace.client.read.success` +-- +*`aerospike.namespace.client.read.success`*:: ++ +-- type: long Number of successful client read transactions. -[float] -=== `aerospike.namespace.client.read.timeout` +-- +*`aerospike.namespace.client.read.timeout`*:: ++ +-- type: long Number of client read transaction that timed out. +-- + [float] == write fields @@ -162,30 +181,36 @@ Client write transactions stats. -[float] -=== `aerospike.namespace.client.write.error` - +*`aerospike.namespace.client.write.error`*:: ++ +-- type: long Number of client write transactions that failed with an error. -[float] -=== `aerospike.namespace.client.write.success` +-- +*`aerospike.namespace.client.write.success`*:: ++ +-- type: long Number of successful client write transactions. -[float] -=== `aerospike.namespace.client.write.timeout` +-- +*`aerospike.namespace.client.write.timeout`*:: ++ +-- type: long Number of client write transactions that timed out. +-- + [float] == device fields @@ -193,9 +218,9 @@ Disk storage stats -[float] -=== `aerospike.namespace.device.available.pct` - +*`aerospike.namespace.device.available.pct`*:: ++ +-- type: scaled_float format: percent @@ -203,9 +228,11 @@ format: percent Measures the minimum contiguous disk space across all disks in a namespace. -[float] -=== `aerospike.namespace.device.free.pct` +-- +*`aerospike.namespace.device.free.pct`*:: ++ +-- type: scaled_float format: percent @@ -213,9 +240,11 @@ format: percent Percentage of disk capacity free for this namespace. -[float] -=== `aerospike.namespace.device.total.bytes` +-- +*`aerospike.namespace.device.total.bytes`*:: ++ +-- type: long format: bytes @@ -223,9 +252,11 @@ format: bytes Total bytes of disk space allocated to this namespace on this node. -[float] -=== `aerospike.namespace.device.used.bytes` +-- +*`aerospike.namespace.device.used.bytes`*:: ++ +-- type: long format: bytes @@ -233,14 +264,18 @@ format: bytes Total bytes of disk space used by this namespace on this node. -[float] -=== `aerospike.namespace.hwm_breached` +-- +*`aerospike.namespace.hwm_breached`*:: ++ +-- type: boolean If true, Aerospike has breached 'high-water-[disk|memory]-pct' for this namespace. +-- + [float] == memory fields @@ -248,9 +283,9 @@ Memory storage stats. -[float] -=== `aerospike.namespace.memory.free.pct` - +*`aerospike.namespace.memory.free.pct`*:: ++ +-- type: scaled_float format: percent @@ -258,9 +293,11 @@ format: percent Percentage of memory capacity free for this namespace on this node. -[float] -=== `aerospike.namespace.memory.used.data.bytes` +-- +*`aerospike.namespace.memory.used.data.bytes`*:: ++ +-- type: long format: bytes @@ -268,9 +305,11 @@ format: bytes Amount of memory occupied by data for this namespace on this node. -[float] -=== `aerospike.namespace.memory.used.index.bytes` +-- +*`aerospike.namespace.memory.used.index.bytes`*:: ++ +-- type: long format: bytes @@ -278,9 +317,11 @@ format: bytes Amount of memory occupied by the index for this namespace on this node. -[float] -=== `aerospike.namespace.memory.used.sindex.bytes` +-- +*`aerospike.namespace.memory.used.sindex.bytes`*:: ++ +-- type: long format: bytes @@ -288,9 +329,11 @@ format: bytes Amount of memory occupied by secondary indexes for this namespace on this node. -[float] -=== `aerospike.namespace.memory.used.total.bytes` +-- +*`aerospike.namespace.memory.used.total.bytes`*:: ++ +-- type: long format: bytes @@ -298,27 +341,35 @@ format: bytes Total bytes of memory used by this namespace on this node. -[float] -=== `aerospike.namespace.name` +-- +*`aerospike.namespace.name`*:: ++ +-- type: keyword Namespace name -[float] -=== `aerospike.namespace.node.host` +-- +*`aerospike.namespace.node.host`*:: ++ +-- type: keyword -[float] -=== `aerospike.namespace.node.name` +-- +*`aerospike.namespace.node.name`*:: ++ +-- type: keyword Node name +-- + [float] == objects fields @@ -326,30 +377,36 @@ Records stats. -[float] -=== `aerospike.namespace.objects.master` - +*`aerospike.namespace.objects.master`*:: ++ +-- type: long Number of records on this node which are active masters. -[float] -=== `aerospike.namespace.objects.total` +-- +*`aerospike.namespace.objects.total`*:: ++ +-- type: long Number of records in this namespace for this node. -[float] -=== `aerospike.namespace.stop_writes` +-- +*`aerospike.namespace.stop_writes`*:: ++ +-- type: boolean If true this namespace is currently not allowing writes. +-- + [[exported-fields-apache]] == Apache fields @@ -371,70 +428,86 @@ Apache HTTPD server metricsets collected from the Apache web server. -[float] -=== `apache.status.hostname` - +*`apache.status.hostname`*:: ++ +-- type: keyword Apache hostname. -[float] -=== `apache.status.total_accesses` +-- +*`apache.status.total_accesses`*:: ++ +-- type: long Total number of access requests. -[float] -=== `apache.status.total_kbytes` +-- +*`apache.status.total_kbytes`*:: ++ +-- type: long Total number of kilobytes served. -[float] -=== `apache.status.requests_per_sec` +-- +*`apache.status.requests_per_sec`*:: ++ +-- type: scaled_float Requests per second. -[float] -=== `apache.status.bytes_per_sec` +-- +*`apache.status.bytes_per_sec`*:: ++ +-- type: scaled_float Bytes per second. -[float] -=== `apache.status.bytes_per_request` +-- +*`apache.status.bytes_per_request`*:: ++ +-- type: scaled_float Bytes per request. -[float] -=== `apache.status.workers.busy` +-- +*`apache.status.workers.busy`*:: ++ +-- type: long Number of busy workers. -[float] -=== `apache.status.workers.idle` +-- +*`apache.status.workers.idle`*:: ++ +-- type: long Number of idle workers. +-- + [float] == uptime fields @@ -442,22 +515,26 @@ Uptime stats. -[float] -=== `apache.status.uptime.server_uptime` - +*`apache.status.uptime.server_uptime`*:: ++ +-- type: long Server uptime in seconds. -[float] -=== `apache.status.uptime.uptime` +-- +*`apache.status.uptime.uptime`*:: ++ +-- type: long Server uptime. +-- + [float] == cpu fields @@ -465,46 +542,56 @@ CPU stats. -[float] -=== `apache.status.cpu.load` - +*`apache.status.cpu.load`*:: ++ +-- type: scaled_float CPU Load. -[float] -=== `apache.status.cpu.user` +-- +*`apache.status.cpu.user`*:: ++ +-- type: scaled_float CPU user load. -[float] -=== `apache.status.cpu.system` +-- +*`apache.status.cpu.system`*:: ++ +-- type: scaled_float System cpu. -[float] -=== `apache.status.cpu.children_user` +-- +*`apache.status.cpu.children_user`*:: ++ +-- type: scaled_float CPU of children user. -[float] -=== `apache.status.cpu.children_system` +-- +*`apache.status.cpu.children_system`*:: ++ +-- type: scaled_float CPU of children system. +-- + [float] == connections fields @@ -512,38 +599,46 @@ Connection stats. -[float] -=== `apache.status.connections.total` - +*`apache.status.connections.total`*:: ++ +-- type: long Total connections. -[float] -=== `apache.status.connections.async.writing` +-- +*`apache.status.connections.async.writing`*:: ++ +-- type: long Async connection writing. -[float] -=== `apache.status.connections.async.keep_alive` +-- +*`apache.status.connections.async.keep_alive`*:: ++ +-- type: long Async keeped alive connections. -[float] -=== `apache.status.connections.async.closing` +-- +*`apache.status.connections.async.closing`*:: ++ +-- type: long Async closed connections. +-- + [float] == load fields @@ -551,30 +646,36 @@ Load averages. -[float] -=== `apache.status.load.1` - +*`apache.status.load.1`*:: ++ +-- type: scaled_float Load average for the last minute. -[float] -=== `apache.status.load.5` +-- +*`apache.status.load.5`*:: ++ +-- type: scaled_float Load average for the last 5 minutes. -[float] -=== `apache.status.load.15` +-- +*`apache.status.load.15`*:: ++ +-- type: scaled_float Load average for the last 15 minutes. +-- + [float] == scoreboard fields @@ -582,102 +683,126 @@ Scoreboard metrics. -[float] -=== `apache.status.scoreboard.starting_up` - +*`apache.status.scoreboard.starting_up`*:: ++ +-- type: long Starting up. -[float] -=== `apache.status.scoreboard.reading_request` +-- +*`apache.status.scoreboard.reading_request`*:: ++ +-- type: long Reading requests. -[float] -=== `apache.status.scoreboard.sending_reply` +-- +*`apache.status.scoreboard.sending_reply`*:: ++ +-- type: long Sending Reply. -[float] -=== `apache.status.scoreboard.keepalive` +-- +*`apache.status.scoreboard.keepalive`*:: ++ +-- type: long Keep alive. -[float] -=== `apache.status.scoreboard.dns_lookup` +-- +*`apache.status.scoreboard.dns_lookup`*:: ++ +-- type: long Dns Lookups. -[float] -=== `apache.status.scoreboard.closing_connection` +-- +*`apache.status.scoreboard.closing_connection`*:: ++ +-- type: long Closing connections. -[float] -=== `apache.status.scoreboard.logging` +-- +*`apache.status.scoreboard.logging`*:: ++ +-- type: long Logging -[float] -=== `apache.status.scoreboard.gracefully_finishing` +-- +*`apache.status.scoreboard.gracefully_finishing`*:: ++ +-- type: long Gracefully finishing. -[float] -=== `apache.status.scoreboard.idle_cleanup` +-- +*`apache.status.scoreboard.idle_cleanup`*:: ++ +-- type: long Idle cleanups. -[float] -=== `apache.status.scoreboard.open_slot` +-- +*`apache.status.scoreboard.open_slot`*:: ++ +-- type: long Open slots. -[float] -=== `apache.status.scoreboard.waiting_for_connection` +-- +*`apache.status.scoreboard.waiting_for_connection`*:: ++ +-- type: long Waiting for connections. -[float] -=== `apache.status.scoreboard.total` +-- +*`apache.status.scoreboard.total`*:: ++ +-- type: long Total. +-- + [[exported-fields-beat]] == Beat fields @@ -685,33 +810,41 @@ Contains common beat fields available in all event types. -[float] -=== `beat.name` - +*`beat.name`*:: ++ +-- The name of the Beat sending the log messages. If the Beat name is set in the configuration file, then that value is used. If it is not set, the hostname is used. To set the Beat name, use the `name` option in the configuration file. -[float] -=== `beat.hostname` +-- +*`beat.hostname`*:: ++ +-- The hostname as returned by the operating system on which the Beat is running. -[float] -=== `beat.timezone` +-- +*`beat.timezone`*:: ++ +-- The timezone as returned by the operating system on which the Beat is running. -[float] -=== `beat.version` +-- +*`beat.version`*:: ++ +-- The version of the beat that generated this event. -[float] -=== `@timestamp` +-- +*`@timestamp`*:: ++ +-- type: date example: August 26th 2016, 12:35:53.332 @@ -723,20 +856,26 @@ required: True The timestamp when the event log record was generated. -[float] -=== `tags` +-- +*`tags`*:: ++ +-- Arbitrary tags that can be set per Beat and per transaction type. -[float] -=== `fields` +-- +*`fields`*:: ++ +-- type: object Contains user configurable fields. +-- + [float] == error fields @@ -744,30 +883,36 @@ Error fields containing additional info in case of errors. -[float] -=== `error.message` - +*`error.message`*:: ++ +-- type: text Error message. -[float] -=== `error.code` +-- +*`error.code`*:: ++ +-- type: long Error code. -[float] -=== `error.type` +-- +*`error.type`*:: ++ +-- type: keyword Error type. +-- + [[exported-fields-ceph]] == Ceph fields @@ -789,9 +934,9 @@ cluster_disk -[float] -=== `ceph.cluster_disk.available.bytes` - +*`ceph.cluster_disk.available.bytes`*:: ++ +-- type: long format: bytes @@ -799,9 +944,11 @@ format: bytes Available bytes of the cluster -[float] -=== `ceph.cluster_disk.total.bytes` +-- +*`ceph.cluster_disk.total.bytes`*:: ++ +-- type: long format: bytes @@ -809,9 +956,11 @@ format: bytes Total bytes of the cluster -[float] -=== `ceph.cluster_disk.used.bytes` +-- +*`ceph.cluster_disk.used.bytes`*:: ++ +-- type: long format: bytes @@ -819,6 +968,8 @@ format: bytes Used bytes of the cluster +-- + [float] == cluster_health fields @@ -826,38 +977,46 @@ cluster_health -[float] -=== `ceph.cluster_health.overall_status` - +*`ceph.cluster_health.overall_status`*:: ++ +-- type: keyword Overall status of the cluster -[float] -=== `ceph.cluster_health.timechecks.epoch` +-- +*`ceph.cluster_health.timechecks.epoch`*:: ++ +-- type: long Map version -[float] -=== `ceph.cluster_health.timechecks.round.value` +-- +*`ceph.cluster_health.timechecks.round.value`*:: ++ +-- type: long timecheck round -[float] -=== `ceph.cluster_health.timechecks.round.status` +-- +*`ceph.cluster_health.timechecks.round.status`*:: ++ +-- type: keyword Status of the round +-- + [float] == cluster_status fields @@ -865,17 +1024,19 @@ cluster_status -[float] -=== `ceph.cluster_status.version` - +*`ceph.cluster_status.version`*:: ++ +-- type: long Ceph Status version -[float] -=== `ceph.cluster_status.traffic.read_bytes` +-- +*`ceph.cluster_status.traffic.read_bytes`*:: ++ +-- type: long format: bytes @@ -883,9 +1044,11 @@ format: bytes Cluster read throughput per second -[float] -=== `ceph.cluster_status.traffic.write_bytes` +-- +*`ceph.cluster_status.traffic.write_bytes`*:: ++ +-- type: long format: bytes @@ -893,41 +1056,51 @@ format: bytes Cluster write throughput per second -[float] -=== `ceph.cluster_status.traffic.read_op_per_sec` +-- +*`ceph.cluster_status.traffic.read_op_per_sec`*:: ++ +-- type: long Cluster read iops per second -[float] -=== `ceph.cluster_status.traffic.write_op_per_sec` +-- +*`ceph.cluster_status.traffic.write_op_per_sec`*:: ++ +-- type: long Cluster write iops per second -[float] -=== `ceph.cluster_status.misplace.total` +-- +*`ceph.cluster_status.misplace.total`*:: ++ +-- type: long Cluster misplace pg number -[float] -=== `ceph.cluster_status.misplace.objects` +-- +*`ceph.cluster_status.misplace.objects`*:: ++ +-- type: long Cluster misplace objects number -[float] -=== `ceph.cluster_status.misplace.ratio` +-- +*`ceph.cluster_status.misplace.ratio`*:: ++ +-- type: scaled_float format: percent @@ -935,25 +1108,31 @@ format: percent Cluster misplace ratio -[float] -=== `ceph.cluster_status.degraded.total` +-- +*`ceph.cluster_status.degraded.total`*:: ++ +-- type: long Cluster degraded pg number -[float] -=== `ceph.cluster_status.degraded.objects` +-- +*`ceph.cluster_status.degraded.objects`*:: ++ +-- type: long Cluster degraded objects number -[float] -=== `ceph.cluster_status.degraded.ratio` +-- +*`ceph.cluster_status.degraded.ratio`*:: ++ +-- type: scaled_float format: percent @@ -961,9 +1140,11 @@ format: percent Cluster degraded ratio -[float] -=== `ceph.cluster_status.pg.data_bytes` +-- +*`ceph.cluster_status.pg.data_bytes`*:: ++ +-- type: long format: bytes @@ -971,9 +1152,11 @@ format: bytes Cluster pg data bytes -[float] -=== `ceph.cluster_status.pg.avail_bytes` +-- +*`ceph.cluster_status.pg.avail_bytes`*:: ++ +-- type: long format: bytes @@ -981,9 +1164,11 @@ format: bytes Cluster available bytes -[float] -=== `ceph.cluster_status.pg.total_bytes` +-- +*`ceph.cluster_status.pg.total_bytes`*:: ++ +-- type: long format: bytes @@ -991,9 +1176,11 @@ format: bytes Cluster total bytes -[float] -=== `ceph.cluster_status.pg.used_bytes` +-- +*`ceph.cluster_status.pg.used_bytes`*:: ++ +-- type: long format: bytes @@ -1001,86 +1188,108 @@ format: bytes Cluster used bytes -[float] -=== `ceph.cluster_status.pg_state.state_name` +-- +*`ceph.cluster_status.pg_state.state_name`*:: ++ +-- type: long Pg state description -[float] -=== `ceph.cluster_status.pg_state.count` +-- +*`ceph.cluster_status.pg_state.count`*:: ++ +-- type: long Shows how many pgs are in state of pg_state.state_name -[float] -=== `ceph.cluster_status.pg_state.version` +-- +*`ceph.cluster_status.pg_state.version`*:: ++ +-- type: long Cluster status version -[float] -=== `ceph.cluster_status.osd.full` +-- +*`ceph.cluster_status.osd.full`*:: ++ +-- type: boolean Is osd full -[float] -=== `ceph.cluster_status.osd.nearfull` +-- +*`ceph.cluster_status.osd.nearfull`*:: ++ +-- type: boolean Is osd near full -[float] -=== `ceph.cluster_status.osd.num_osds` +-- +*`ceph.cluster_status.osd.num_osds`*:: ++ +-- type: long Shows how many osds in the cluster -[float] -=== `ceph.cluster_status.osd.num_up_osds` +-- +*`ceph.cluster_status.osd.num_up_osds`*:: ++ +-- type: long Shows how many osds are on the state of UP -[float] -=== `ceph.cluster_status.osd.num_in_osds` +-- +*`ceph.cluster_status.osd.num_in_osds`*:: ++ +-- type: long Shows how many osds are on the state of IN -[float] -=== `ceph.cluster_status.osd.num_remapped_pgs` +-- +*`ceph.cluster_status.osd.num_remapped_pgs`*:: ++ +-- type: long Shows how many osds are on the state of REMAPPED -[float] -=== `ceph.cluster_status.osd.epoch` +-- +*`ceph.cluster_status.osd.epoch`*:: ++ +-- type: long epoch number +-- + [float] == monitor_health fields @@ -1088,65 +1297,79 @@ monitor_health stats data -[float] -=== `ceph.monitor_health.available.pct` - +*`ceph.monitor_health.available.pct`*:: ++ +-- type: long Available percent of the MON -[float] -=== `ceph.monitor_health.health` +-- +*`ceph.monitor_health.health`*:: ++ +-- type: keyword Health of the MON -[float] -=== `ceph.monitor_health.available.kb` +-- +*`ceph.monitor_health.available.kb`*:: ++ +-- type: long Available KB of the MON -[float] -=== `ceph.monitor_health.total.kb` +-- +*`ceph.monitor_health.total.kb`*:: ++ +-- type: long Total KB of the MON -[float] -=== `ceph.monitor_health.used.kb` +-- +*`ceph.monitor_health.used.kb`*:: ++ +-- type: long Used KB of the MON -[float] -=== `ceph.monitor_health.last_updated` +-- +*`ceph.monitor_health.last_updated`*:: ++ +-- type: date Time when was updated -[float] -=== `ceph.monitor_health.name` +-- +*`ceph.monitor_health.name`*:: ++ +-- type: keyword Name of the MON -[float] -=== `ceph.monitor_health.store_stats.log.bytes` +-- +*`ceph.monitor_health.store_stats.log.bytes`*:: ++ +-- type: long format: bytes @@ -1154,9 +1377,11 @@ format: bytes Log bytes of MON -[float] -=== `ceph.monitor_health.store_stats.misc.bytes` +-- +*`ceph.monitor_health.store_stats.misc.bytes`*:: ++ +-- type: long format: bytes @@ -1164,9 +1389,11 @@ format: bytes Misc bytes of MON -[float] -=== `ceph.monitor_health.store_stats.sst.bytes` +-- +*`ceph.monitor_health.store_stats.sst.bytes`*:: ++ +-- type: long format: bytes @@ -1174,9 +1401,11 @@ format: bytes SST bytes of MON -[float] -=== `ceph.monitor_health.store_stats.total.bytes` +-- +*`ceph.monitor_health.store_stats.total.bytes`*:: ++ +-- type: long format: bytes @@ -1184,14 +1413,18 @@ format: bytes Total bytes of MON -[float] -=== `ceph.monitor_health.store_stats.last_updated` +-- +*`ceph.monitor_health.store_stats.last_updated`*:: ++ +-- type: long Last updated +-- + [float] == osd_df fields @@ -1199,33 +1432,39 @@ ceph osd disk usage information -[float] -=== `ceph.osd_df.id` - +*`ceph.osd_df.id`*:: ++ +-- type: long osd node id -[float] -=== `ceph.osd_df.name` +-- +*`ceph.osd_df.name`*:: ++ +-- type: text osd node name -[float] -=== `ceph.osd_df.device_class` +-- +*`ceph.osd_df.device_class`*:: ++ +-- type: keyword osd node type, illegal type include hdd, ssd etc. -[float] -=== `ceph.osd_df.total.byte` +-- +*`ceph.osd_df.total.byte`*:: ++ +-- type: long format: bytes @@ -1233,9 +1472,11 @@ format: bytes osd disk total volume -[float] -=== `ceph.osd_df.used.byte` +-- +*`ceph.osd_df.used.byte`*:: ++ +-- type: long format: bytes @@ -1243,9 +1484,11 @@ format: bytes osd disk usage volume -[float] -=== `ceph.osd_df.available.bytes` +-- +*`ceph.osd_df.available.bytes`*:: ++ +-- type: long format: bytes @@ -1253,17 +1496,21 @@ format: bytes osd disk available volume -[float] -=== `ceph.osd_df.pg_num` +-- +*`ceph.osd_df.pg_num`*:: ++ +-- type: long shows how many pg located on this osd -[float] -=== `ceph.osd_df.used.pct` +-- +*`ceph.osd_df.used.pct`*:: ++ +-- type: scaled_float format: percent @@ -1271,6 +1518,8 @@ format: percent osd disk usage percentage +-- + [float] == osd_tree fields @@ -1278,110 +1527,136 @@ ceph osd tree info -[float] -=== `ceph.osd_tree.id` - +*`ceph.osd_tree.id`*:: ++ +-- type: long osd or bucket node id -[float] -=== `ceph.osd_tree.name` +-- +*`ceph.osd_tree.name`*:: ++ +-- type: text osd or bucket node name -[float] -=== `ceph.osd_tree.type` +-- +*`ceph.osd_tree.type`*:: ++ +-- type: keyword osd or bucket node type, illegal type include osd, host, root etc. -[float] -=== `ceph.osd_tree.type_id` +-- +*`ceph.osd_tree.type_id`*:: ++ +-- type: long osd or bucket node typeID -[float] -=== `ceph.osd_tree.children` +-- +*`ceph.osd_tree.children`*:: ++ +-- type: text bucket children list, separated by comma. -[float] -=== `ceph.osd_tree.crush_weight` +-- +*`ceph.osd_tree.crush_weight`*:: ++ +-- type: float osd node crush weight -[float] -=== `ceph.osd_tree.depth` +-- +*`ceph.osd_tree.depth`*:: ++ +-- type: long node depth -[float] -=== `ceph.osd_tree.exists` +-- +*`ceph.osd_tree.exists`*:: ++ +-- type: boolean is node still exist or not(1-yes, 0-no) -[float] -=== `ceph.osd_tree.primary_affinity` +-- +*`ceph.osd_tree.primary_affinity`*:: ++ +-- type: float the weight of reading data from primary osd -[float] -=== `ceph.osd_tree.reweight` +-- +*`ceph.osd_tree.reweight`*:: ++ +-- type: long the reweight of osd -[float] -=== `ceph.osd_tree.status` +-- +*`ceph.osd_tree.status`*:: ++ +-- type: keyword status of osd, it should be up or down -[float] -=== `ceph.osd_tree.device_class` +-- +*`ceph.osd_tree.device_class`*:: ++ +-- type: keyword the device class of osd, like hdd, ssd etc. -[float] -=== `ceph.osd_tree.father` +-- +*`ceph.osd_tree.father`*:: ++ +-- type: keyword the parent node of this osd or bucket node +-- + [float] == pool_disk fields @@ -1389,25 +1664,29 @@ pool_disk -[float] -=== `ceph.pool_disk.id` - +*`ceph.pool_disk.id`*:: ++ +-- type: long Id of the pool -[float] -=== `ceph.pool_disk.name` +-- +*`ceph.pool_disk.name`*:: ++ +-- type: keyword Name of the pool -[float] -=== `ceph.pool_disk.stats.available.bytes` +-- +*`ceph.pool_disk.stats.available.bytes`*:: ++ +-- type: long format: bytes @@ -1415,17 +1694,21 @@ format: bytes Available bytes of the pool -[float] -=== `ceph.pool_disk.stats.objects` +-- +*`ceph.pool_disk.stats.objects`*:: ++ +-- type: long Number of objects of the pool -[float] -=== `ceph.pool_disk.stats.used.bytes` +-- +*`ceph.pool_disk.stats.used.bytes`*:: ++ +-- type: long format: bytes @@ -1433,14 +1716,18 @@ format: bytes Used bytes of the pool -[float] -=== `ceph.pool_disk.stats.used.kb` +-- +*`ceph.pool_disk.stats.used.kb`*:: ++ +-- type: long Used kb of the pool +-- + [[exported-fields-cloud]] == Cloud provider metadata fields @@ -1448,56 +1735,70 @@ Metadata from cloud providers added by the add_cloud_metadata processor. -[float] -=== `meta.cloud.provider` - +*`meta.cloud.provider`*:: ++ +-- example: ec2 Name of the cloud provider. Possible values are ec2, gce, or digitalocean. -[float] -=== `meta.cloud.instance_id` +-- +*`meta.cloud.instance_id`*:: ++ +-- Instance ID of the host machine. -[float] -=== `meta.cloud.instance_name` +-- +*`meta.cloud.instance_name`*:: ++ +-- Instance name of the host machine. -[float] -=== `meta.cloud.machine_type` +-- +*`meta.cloud.machine_type`*:: ++ +-- example: t2.medium Machine type of the host machine. -[float] -=== `meta.cloud.availability_zone` +-- +*`meta.cloud.availability_zone`*:: ++ +-- example: us-east-1c Availability zone in which this host is running. -[float] -=== `meta.cloud.project_id` +-- +*`meta.cloud.project_id`*:: ++ +-- example: project-x Name of the project in Google Cloud. -[float] -=== `meta.cloud.region` +-- +*`meta.cloud.region`*:: ++ +-- Region in which this host is running. +-- + [[exported-fields-common]] == Common fields @@ -1505,27 +1806,33 @@ Contains common fields available in all event types. -[float] -=== `metricset.module` - +*`metricset.module`*:: ++ +-- The name of the module that generated the event. -[float] -=== `metricset.name` +-- +*`metricset.name`*:: ++ +-- The name of the metricset that generated the event. -[float] -=== `metricset.host` +-- +*`metricset.host`*:: ++ +-- Hostname of the machine from which the metricset was collected. This field may not be present when the data was collected locally. -[float] -=== `metricset.rtt` +-- +*`metricset.rtt`*:: ++ +-- type: long required: True @@ -1533,17 +1840,21 @@ required: True Event round trip time in microseconds. -[float] -=== `metricset.namespace` +-- +*`metricset.namespace`*:: ++ +-- type: keyword Namespace of dynamic metricsets. -[float] -=== `type` +-- +*`type`*:: ++ +-- example: metricsets required: True @@ -1551,6 +1862,8 @@ required: True The document type. Always set to "doc". +-- + [[exported-fields-couchbase]] == Couchbase fields @@ -1572,25 +1885,29 @@ Couchbase bucket metrics. -[float] -=== `couchbase.bucket.name` - +*`couchbase.bucket.name`*:: ++ +-- type: keyword Name of the bucket. -[float] -=== `couchbase.bucket.type` +-- +*`couchbase.bucket.type`*:: ++ +-- type: keyword Type of the bucket. -[float] -=== `couchbase.bucket.data.used.bytes` +-- +*`couchbase.bucket.data.used.bytes`*:: ++ +-- type: long format: bytes @@ -1598,17 +1915,21 @@ format: bytes Size of user data within buckets of the specified state that are resident in RAM. -[float] -=== `couchbase.bucket.disk.fetches` +-- +*`couchbase.bucket.disk.fetches`*:: ++ +-- type: long Number of disk fetches. -[float] -=== `couchbase.bucket.disk.used.bytes` +-- +*`couchbase.bucket.disk.used.bytes`*:: ++ +-- type: long format: bytes @@ -1616,9 +1937,11 @@ format: bytes Amount of disk used (bytes). -[float] -=== `couchbase.bucket.memory.used.bytes` +-- +*`couchbase.bucket.memory.used.bytes`*:: ++ +-- type: long format: bytes @@ -1626,9 +1949,11 @@ format: bytes Amount of memory used by the bucket (bytes). -[float] -=== `couchbase.bucket.quota.ram.bytes` +-- +*`couchbase.bucket.quota.ram.bytes`*:: ++ +-- type: long format: bytes @@ -1636,9 +1961,11 @@ format: bytes Amount of RAM used by the bucket (bytes). -[float] -=== `couchbase.bucket.quota.use.pct` +-- +*`couchbase.bucket.quota.use.pct`*:: ++ +-- type: scaled_float format: percent @@ -1646,22 +1973,28 @@ format: percent Percentage of RAM used (for active objects) against the configured bucket size (%). -[float] -=== `couchbase.bucket.ops_per_sec` +-- +*`couchbase.bucket.ops_per_sec`*:: ++ +-- type: long Number of operations per second. -[float] -=== `couchbase.bucket.item_count` +-- +*`couchbase.bucket.item_count`*:: ++ +-- type: long Number of items associated with the bucket. +-- + [float] == cluster fields @@ -1669,9 +2002,9 @@ Couchbase cluster metrics. -[float] -=== `couchbase.cluster.hdd.free.bytes` - +*`couchbase.cluster.hdd.free.bytes`*:: ++ +-- type: long format: bytes @@ -1679,9 +2012,11 @@ format: bytes Free hard drive space in the cluster (bytes). -[float] -=== `couchbase.cluster.hdd.quota.total.bytes` +-- +*`couchbase.cluster.hdd.quota.total.bytes`*:: ++ +-- type: long format: bytes @@ -1689,9 +2024,11 @@ format: bytes Hard drive quota total for the cluster (bytes). -[float] -=== `couchbase.cluster.hdd.total.bytes` +-- +*`couchbase.cluster.hdd.total.bytes`*:: ++ +-- type: long format: bytes @@ -1699,9 +2036,11 @@ format: bytes Total hard drive space available to the cluster (bytes). -[float] -=== `couchbase.cluster.hdd.used.value.bytes` +-- +*`couchbase.cluster.hdd.used.value.bytes`*:: ++ +-- type: long format: bytes @@ -1709,9 +2048,11 @@ format: bytes Hard drive space used by the cluster (bytes). -[float] -=== `couchbase.cluster.hdd.used.by_data.bytes` +-- +*`couchbase.cluster.hdd.used.by_data.bytes`*:: ++ +-- type: long format: bytes @@ -1719,33 +2060,41 @@ format: bytes Hard drive space used by the data in the cluster (bytes). -[float] -=== `couchbase.cluster.max_bucket_count` +-- +*`couchbase.cluster.max_bucket_count`*:: ++ +-- type: long Max bucket count setting. -[float] -=== `couchbase.cluster.quota.index_memory.mb` +-- +*`couchbase.cluster.quota.index_memory.mb`*:: ++ +-- type: long Memory quota setting for the Index service (Mbyte). -[float] -=== `couchbase.cluster.quota.memory.mb` +-- +*`couchbase.cluster.quota.memory.mb`*:: ++ +-- type: long Memory quota setting for the cluster (Mbyte). -[float] -=== `couchbase.cluster.ram.quota.total.value.bytes` +-- +*`couchbase.cluster.ram.quota.total.value.bytes`*:: ++ +-- type: long format: bytes @@ -1753,9 +2102,11 @@ format: bytes RAM quota total for the cluster (bytes). -[float] -=== `couchbase.cluster.ram.quota.total.per_node.bytes` +-- +*`couchbase.cluster.ram.quota.total.per_node.bytes`*:: ++ +-- type: long format: bytes @@ -1763,9 +2114,11 @@ format: bytes RAM quota used by the current node in the cluster (bytes). -[float] -=== `couchbase.cluster.ram.quota.used.value.bytes` +-- +*`couchbase.cluster.ram.quota.used.value.bytes`*:: ++ +-- type: long format: bytes @@ -1773,9 +2126,11 @@ format: bytes RAM quota used by the cluster (bytes). -[float] -=== `couchbase.cluster.ram.quota.used.per_node.bytes` +-- +*`couchbase.cluster.ram.quota.used.per_node.bytes`*:: ++ +-- type: long format: bytes @@ -1783,9 +2138,11 @@ format: bytes Ram quota used by the current node in the cluster (bytes) -[float] -=== `couchbase.cluster.ram.total.bytes` +-- +*`couchbase.cluster.ram.total.bytes`*:: ++ +-- type: long format: bytes @@ -1793,9 +2150,11 @@ format: bytes Total RAM available to cluster (bytes). -[float] -=== `couchbase.cluster.ram.used.value.bytes` +-- +*`couchbase.cluster.ram.used.value.bytes`*:: ++ +-- type: long format: bytes @@ -1803,9 +2162,11 @@ format: bytes RAM used by the cluster (bytes). -[float] -=== `couchbase.cluster.ram.used.by_data.bytes` +-- +*`couchbase.cluster.ram.used.by_data.bytes`*:: ++ +-- type: long format: bytes @@ -1813,6 +2174,8 @@ format: bytes RAM used by the data in the cluster (bytes). +-- + [float] == node fields @@ -1820,17 +2183,19 @@ Couchbase node metrics. -[float] -=== `couchbase.node.cmd_get` - +*`couchbase.node.cmd_get`*:: ++ +-- type: long Number of get commands -[float] -=== `couchbase.node.couch.docs.disk_size.bytes` +-- +*`couchbase.node.couch.docs.disk_size.bytes`*:: ++ +-- type: long format: bytes @@ -1838,9 +2203,11 @@ format: bytes Amount of disk space used by Couch docs (bytes). -[float] -=== `couchbase.node.couch.docs.data_size.bytes` +-- +*`couchbase.node.couch.docs.data_size.bytes`*:: ++ +-- type: long format: bytes @@ -1848,89 +2215,111 @@ format: bytes Data size of Couch docs associated with a node (bytes). -[float] -=== `couchbase.node.couch.spatial.data_size.bytes` +-- +*`couchbase.node.couch.spatial.data_size.bytes`*:: ++ +-- type: long Size of object data for spatial views (bytes). -[float] -=== `couchbase.node.couch.spatial.disk_size.bytes` +-- +*`couchbase.node.couch.spatial.disk_size.bytes`*:: ++ +-- type: long Amount of disk space used by spatial views (bytes). -[float] -=== `couchbase.node.couch.views.disk_size.bytes` +-- +*`couchbase.node.couch.views.disk_size.bytes`*:: ++ +-- type: long Amount of disk space used by Couch views (bytes). -[float] -=== `couchbase.node.couch.views.data_size.bytes` +-- +*`couchbase.node.couch.views.data_size.bytes`*:: ++ +-- type: long Size of object data for Couch views (bytes). -[float] -=== `couchbase.node.cpu_utilization_rate.pct` +-- +*`couchbase.node.cpu_utilization_rate.pct`*:: ++ +-- type: scaled_float The CPU utilization rate (%). -[float] -=== `couchbase.node.current_items.value` +-- +*`couchbase.node.current_items.value`*:: ++ +-- type: long Number of current items. -[float] -=== `couchbase.node.current_items.total` +-- +*`couchbase.node.current_items.total`*:: ++ +-- type: long Total number of items associated with the node. -[float] -=== `couchbase.node.ep_bg_fetched` +-- +*`couchbase.node.ep_bg_fetched`*:: ++ +-- type: long Number of disk fetches performed since the server was started. -[float] -=== `couchbase.node.get_hits` +-- +*`couchbase.node.get_hits`*:: ++ +-- type: long Number of get hits. -[float] -=== `couchbase.node.hostname` +-- +*`couchbase.node.hostname`*:: ++ +-- type: keyword The hostname of the node. -[float] -=== `couchbase.node.mcd_memory.allocated.bytes` +-- +*`couchbase.node.mcd_memory.allocated.bytes`*:: ++ +-- type: long format: bytes @@ -1938,78 +2327,98 @@ format: bytes Amount of memcached memory allocated (bytes). -[float] -=== `couchbase.node.mcd_memory.reserved.bytes` +-- +*`couchbase.node.mcd_memory.reserved.bytes`*:: ++ +-- type: long Amount of memcached memory reserved (bytes). -[float] -=== `couchbase.node.memory.free.bytes` +-- +*`couchbase.node.memory.free.bytes`*:: ++ +-- type: long Amount of memory free for the node (bytes). -[float] -=== `couchbase.node.memory.total.bytes` +-- +*`couchbase.node.memory.total.bytes`*:: ++ +-- type: long Total memory available to the node (bytes). -[float] -=== `couchbase.node.memory.used.bytes` +-- +*`couchbase.node.memory.used.bytes`*:: ++ +-- type: long Memory used by the node (bytes). -[float] -=== `couchbase.node.ops` +-- +*`couchbase.node.ops`*:: ++ +-- type: long Number of operations performed on Couchbase. -[float] -=== `couchbase.node.swap.total.bytes` +-- +*`couchbase.node.swap.total.bytes`*:: ++ +-- type: long Total swap size allocated (bytes). -[float] -=== `couchbase.node.swap.used.bytes` +-- +*`couchbase.node.swap.used.bytes`*:: ++ +-- type: long Amount of swap space used (bytes). -[float] -=== `couchbase.node.uptime.sec` +-- +*`couchbase.node.uptime.sec`*:: ++ +-- type: long Time during which the node was in operation (sec). -[float] -=== `couchbase.node.vb_replica_curr_items` +-- +*`couchbase.node.vb_replica_curr_items`*:: ++ +-- type: long Number of items/documents that are replicas. +-- + [[exported-fields-docker-processor]] == Docker fields @@ -2018,38 +2427,46 @@ Docker stats collected from Docker. -[float] -=== `docker.container.id` - +*`docker.container.id`*:: ++ +-- type: keyword Unique container id. -[float] -=== `docker.container.image` +-- +*`docker.container.image`*:: ++ +-- type: keyword Name of the image the container was built on. -[float] -=== `docker.container.name` +-- +*`docker.container.name`*:: ++ +-- type: keyword Container name. -[float] -=== `docker.container.labels` +-- +*`docker.container.labels`*:: ++ +-- type: object Image labels. +-- + [[exported-fields-docker]] == Docker fields @@ -2071,38 +2488,46 @@ Docker container metrics. -[float] -=== `docker.container.command` - +*`docker.container.command`*:: ++ +-- type: keyword Command that was executed in the Docker container. -[float] -=== `docker.container.created` +-- +*`docker.container.created`*:: ++ +-- type: date Date when the container was created. -[float] -=== `docker.container.status` +-- +*`docker.container.status`*:: ++ +-- type: keyword Container status. -[float] -=== `docker.container.ip_addresses` +-- +*`docker.container.ip_addresses`*:: ++ +-- type: keyword Container IP addresses. +-- + [float] == size fields @@ -2110,30 +2535,36 @@ Container size metrics. -[float] -=== `docker.container.size.root_fs` - +*`docker.container.size.root_fs`*:: ++ +-- type: long Total size of all the files in the container. -[float] -=== `docker.container.size.rw` +-- +*`docker.container.size.rw`*:: ++ +-- type: long Size of the files that have been created or changed since creation. -[float] -=== `docker.container.tags` +-- +*`docker.container.tags`*:: ++ +-- type: array Image tags. +-- + [float] == cpu fields @@ -2141,61 +2572,75 @@ Runtime CPU metrics. -[float] -=== `docker.cpu.kernel.pct` - +*`docker.cpu.kernel.pct`*:: ++ +-- type: scaled_float format: percentage -The system kernel consumed by the Docker server. +Percentage of time in kernel space. -[float] -=== `docker.cpu.kernel.ticks` +-- +*`docker.cpu.kernel.ticks`*:: ++ +-- type: long -CPU kernel ticks. +CPU ticks in kernel space. -[float] -=== `docker.cpu.system.pct` +-- +*`docker.cpu.system.pct`*:: ++ +-- type: scaled_float format: percentage +Percentage of total CPU time in the system. -[float] -=== `docker.cpu.system.ticks` +-- +*`docker.cpu.system.ticks`*:: ++ +-- type: long CPU system ticks. -[float] -=== `docker.cpu.user.pct` +-- +*`docker.cpu.user.pct`*:: ++ +-- type: scaled_float format: percentage +Percentage of time in user space. -[float] -=== `docker.cpu.user.ticks` +-- +*`docker.cpu.user.ticks`*:: ++ +-- type: long -CPU user ticks +CPU ticks in user space. -[float] -=== `docker.cpu.total.pct` +-- +*`docker.cpu.total.pct`*:: ++ +-- type: scaled_float format: percentage @@ -2203,99 +2648,258 @@ format: percentage Total CPU usage. -[float] -== diskio fields - -Disk I/O metrics. - +-- +*`docker.cpu.core.*.pct`*:: ++ +-- +type: object -[float] -=== `docker.diskio.reads` +format: percentage -type: scaled_float +Percentage of CPU time in this core. -Number of reads. +-- -[float] -=== `docker.diskio.writes` +*`docker.cpu.core.*.ticks`*:: ++ +-- +type: object -type: scaled_float +Number of CPU ticks in this core. -Number of writes. +-- [float] -=== `docker.diskio.total` +== diskio fields -type: scaled_float +Disk I/O metrics. -Number of reads and writes combined. [float] -== healthcheck fields +== read fields -Docker container metrics. +Accumulated reads during the life of the container -[float] -=== `docker.healthcheck.failingstreak` +*`docker.diskio.read.ops`*:: ++ +-- +type: long -type: integer +Number of reads during the life of the container -concurent failed check +-- -[float] -=== `docker.healthcheck.status` +*`docker.diskio.read.bytes`*:: ++ +-- +type: long -type: keyword +format: bytes -Healthcheck status code +Bytes read during the life of the container -[float] -== event fields +-- -event fields. +*`docker.diskio.read.rate`*:: ++ +-- +type: long +Number of current reads per second -[float] -=== `docker.healthcheck.event.end_date` +-- -type: date +*`docker.diskio.reads`*:: ++ +-- +type: scaled_float -Healthcheck end date +Number of current reads per second +-- + +[float] +== write fields + +Accumulated writes during the life of the container + + + +*`docker.diskio.write.ops`*:: ++ +-- +type: long + +Number of writes during the life of the container + + +-- + +*`docker.diskio.write.bytes`*:: ++ +-- +type: long + +format: bytes + +Bytes written during the life of the container + + +-- + +*`docker.diskio.write.rate`*:: ++ +-- +type: long + +Number of current writes per second + + +-- + +*`docker.diskio.writes`*:: ++ +-- +type: scaled_float + +Number of current writes per second + + +-- + +[float] +== summary fields + +Accumulated reads and writes during the life of the container + + + +*`docker.diskio.summary.ops`*:: ++ +-- +type: long + +Number of I/O operations during the life of the container + + +-- + +*`docker.diskio.summary.bytes`*:: ++ +-- +type: long + +format: bytes + +Bytes read and written during the life of the container + + +-- + +*`docker.diskio.summary.rate`*:: ++ +-- +type: long + +Number of current operations per second + + +-- + +*`docker.diskio.total`*:: ++ +-- +type: scaled_float + +Number of reads and writes per second + + +-- + +[float] +== healthcheck fields + +Docker container metrics. + + + +*`docker.healthcheck.failingstreak`*:: ++ +-- +type: integer + +concurent failed check + + +-- + +*`docker.healthcheck.status`*:: ++ +-- +type: keyword + +Healthcheck status code + + +-- + [float] -=== `docker.healthcheck.event.start_date` +== event fields + +event fields. + + + +*`docker.healthcheck.event.end_date`*:: ++ +-- +type: date +Healthcheck end date + + +-- + +*`docker.healthcheck.event.start_date`*:: ++ +-- type: date Healthcheck start date -[float] -=== `docker.healthcheck.event.output` +-- +*`docker.healthcheck.event.output`*:: ++ +-- type: keyword Healthcheck output -[float] -=== `docker.healthcheck.event.exit_code` +-- +*`docker.healthcheck.event.exit_code`*:: ++ +-- type: integer Healthcheck status code +-- + [float] == image fields @@ -2310,30 +2914,36 @@ The image layers identifier. -[float] -=== `docker.image.id.current` - +*`docker.image.id.current`*:: ++ +-- type: keyword Unique image identifier given upon its creation. -[float] -=== `docker.image.id.parent` +-- +*`docker.image.id.parent`*:: ++ +-- type: keyword Identifier of the image, if it exists, from which the current image directly descends. -[float] -=== `docker.image.created` +-- +*`docker.image.created`*:: ++ +-- type: date Date and time when the image was created. +-- + [float] == size fields @@ -2341,38 +2951,46 @@ Image size layers. -[float] -=== `docker.image.size.virtual` - +*`docker.image.size.virtual`*:: ++ +-- type: long Size of the image. -[float] -=== `docker.image.size.regular` +-- +*`docker.image.size.regular`*:: ++ +-- type: long Total size of the all cached images associated to the current image. -[float] -=== `docker.image.labels` +-- +*`docker.image.labels`*:: ++ +-- type: object Image labels. -[float] -=== `docker.image.tags` +-- +*`docker.image.tags`*:: ++ +-- type: array Image tags. +-- + [float] == info fields @@ -2387,54 +3005,66 @@ Overall container stats. -[float] -=== `docker.info.containers.paused` - +*`docker.info.containers.paused`*:: ++ +-- type: long Total number of paused containers. -[float] -=== `docker.info.containers.running` +-- +*`docker.info.containers.running`*:: ++ +-- type: long Total number of running containers. -[float] -=== `docker.info.containers.stopped` +-- +*`docker.info.containers.stopped`*:: ++ +-- type: long Total number of stopped containers. -[float] -=== `docker.info.containers.total` +-- +*`docker.info.containers.total`*:: ++ +-- type: long Total number of existing containers. -[float] -=== `docker.info.id` +-- +*`docker.info.id`*:: ++ +-- type: keyword Unique Docker host identifier. -[float] -=== `docker.info.images` +-- +*`docker.info.images`*:: ++ +-- type: long Total number of existing images. +-- + [float] == memory fields @@ -2442,17 +3072,19 @@ Memory metrics. -[float] -=== `docker.memory.fail.count` - +*`docker.memory.fail.count`*:: ++ +-- type: scaled_float Fail counter. -[float] -=== `docker.memory.limit` +-- +*`docker.memory.limit`*:: ++ +-- type: long format: bytes @@ -2460,6 +3092,8 @@ format: bytes Memory limit. +-- + [float] == rss fields @@ -2467,9 +3101,9 @@ RSS memory stats. -[float] -=== `docker.memory.rss.total` - +*`docker.memory.rss.total`*:: ++ +-- type: long format: bytes @@ -2477,9 +3111,11 @@ format: bytes Total memory resident set size. -[float] -=== `docker.memory.rss.pct` +-- +*`docker.memory.rss.pct`*:: ++ +-- type: scaled_float format: percentage @@ -2487,6 +3123,8 @@ format: percentage Memory resident set size percentage. +-- + [float] == usage fields @@ -2494,9 +3132,9 @@ Usage memory stats. -[float] -=== `docker.memory.usage.max` - +*`docker.memory.usage.max`*:: ++ +-- type: long format: bytes @@ -2504,9 +3142,11 @@ format: bytes Max memory usage. -[float] -=== `docker.memory.usage.pct` +-- +*`docker.memory.usage.pct`*:: ++ +-- type: scaled_float format: percentage @@ -2514,9 +3154,11 @@ format: percentage Memory usage percentage. -[float] -=== `docker.memory.usage.total` +-- +*`docker.memory.usage.total`*:: ++ +-- type: long format: bytes @@ -2524,6 +3166,8 @@ format: bytes Total memory usage. +-- + [float] == network fields @@ -2531,24 +3175,26 @@ Network metrics. -[float] -=== `docker.network.interface` - +*`docker.network.interface`*:: ++ +-- type: keyword Network interface name. +-- + [float] == in fields -Incoming network stats. - +Incoming network stats per second. -[float] -=== `docker.network.in.bytes` +*`docker.network.in.bytes`*:: ++ +-- type: long format: bytes @@ -2556,40 +3202,48 @@ format: bytes Total number of incoming bytes. -[float] -=== `docker.network.in.dropped` +-- +*`docker.network.in.dropped`*:: ++ +-- type: scaled_float Total number of dropped incoming packets. -[float] -=== `docker.network.in.errors` +-- +*`docker.network.in.errors`*:: ++ +-- type: long Total errors on incoming packets. -[float] -=== `docker.network.in.packets` +-- +*`docker.network.in.packets`*:: ++ +-- type: long Total number of incoming packets. +-- + [float] == out fields -Outgoing network stats. - +Outgoing network stats per second. -[float] -=== `docker.network.out.bytes` +*`docker.network.out.bytes`*:: ++ +-- type: long format: bytes @@ -2597,87 +3251,199 @@ format: bytes Total number of outgoing bytes. -[float] -=== `docker.network.out.dropped` +-- +*`docker.network.out.dropped`*:: ++ +-- type: scaled_float Total number of dropped outgoing packets. -[float] -=== `docker.network.out.errors` +-- +*`docker.network.out.errors`*:: ++ +-- type: long Total errors on outgoing packets. -[float] -=== `docker.network.out.packets` +-- +*`docker.network.out.packets`*:: ++ +-- type: long Total number of outgoing packets. -[[exported-fields-dropwizard]] -== Dropwizard fields - -Stats collected from Dropwizard. - - +-- [float] -== dropwizard fields +== inbound fields +Incoming network stats since the container started. -[[exported-fields-elasticsearch]] -== Elasticsearch fields +*`docker.network.inbound.bytes`*:: ++ +-- +type: long -Elasticsearch module +format: bytes +Total number of incoming bytes. -[float] -== elasticsearch fields +-- +*`docker.network.inbound.dropped`*:: ++ +-- +type: long +Total number of dropped incoming packets. -[float] -=== `elasticsearch.cluster.name` +-- -type: keyword +*`docker.network.inbound.errors`*:: ++ +-- +type: long -Elasticsearch cluster name. +Total errors on incoming packets. -[float] -== node fields +-- -node +*`docker.network.inbound.packets`*:: ++ +-- +type: long +Total number of incoming packets. +-- + +[float] +== outbound fields + +Outgoing network stats since the container started. + + + +*`docker.network.outbound.bytes`*:: ++ +-- +type: long + +format: bytes + +Total number of outgoing bytes. + + +-- + +*`docker.network.outbound.dropped`*:: ++ +-- +type: long + +Total number of dropped outgoing packets. + + +-- + +*`docker.network.outbound.errors`*:: ++ +-- +type: long + +Total errors on outgoing packets. + + +-- + +*`docker.network.outbound.packets`*:: ++ +-- +type: long + +Total number of outgoing packets. + + +-- + +[[exported-fields-dropwizard]] +== Dropwizard fields + +Stats collected from Dropwizard. + + + +[float] +== dropwizard fields + + + + +[[exported-fields-elasticsearch]] +== Elasticsearch fields + +Elasticsearch module + + + +[float] +== elasticsearch fields + + + + +*`elasticsearch.cluster.name`*:: ++ +-- +type: keyword + +Elasticsearch cluster name. + + +-- + [float] -=== `elasticsearch.node.name` +== node fields + +node + + +*`elasticsearch.node.name`*:: ++ +-- type: keyword Node name. -[float] -=== `elasticsearch.node.version` +-- +*`elasticsearch.node.version`*:: ++ +-- type: keyword Node version. +-- + [float] == jvm fields @@ -2685,17 +3451,19 @@ JVM Info. -[float] -=== `elasticsearch.node.jvm.version` - +*`elasticsearch.node.jvm.version`*:: ++ +-- type: keyword JVM version. -[float] -=== `elasticsearch.node.jvm.memory.heap.init.bytes` +-- +*`elasticsearch.node.jvm.memory.heap.init.bytes`*:: ++ +-- type: long format: bytes @@ -2703,9 +3471,11 @@ format: bytes Heap init used by the JVM in bytes. -[float] -=== `elasticsearch.node.jvm.memory.heap.max.bytes` +-- +*`elasticsearch.node.jvm.memory.heap.max.bytes`*:: ++ +-- type: long format: bytes @@ -2713,9 +3483,11 @@ format: bytes Heap max used by the JVM in bytes. -[float] -=== `elasticsearch.node.jvm.memory.nonheap.init.bytes` +-- +*`elasticsearch.node.jvm.memory.nonheap.init.bytes`*:: ++ +-- type: long format: bytes @@ -2723,9 +3495,11 @@ format: bytes Non-Heap init used by the JVM in bytes. -[float] -=== `elasticsearch.node.jvm.memory.nonheap.max.bytes` +-- +*`elasticsearch.node.jvm.memory.nonheap.max.bytes`*:: ++ +-- type: long format: bytes @@ -2733,14 +3507,18 @@ format: bytes Non-Heap max used by the JVM in bytes. -[float] -=== `elasticsearch.node.process.mlockall` +-- +*`elasticsearch.node.process.mlockall`*:: ++ +-- type: boolean If process locked in memory. +-- + [float] == node.stats fields @@ -2755,33 +3533,39 @@ Node indices stats -[float] -=== `elasticsearch.node.stats.indices.docs.count` - +*`elasticsearch.node.stats.indices.docs.count`*:: ++ +-- type: long Total number of existing documents. -[float] -=== `elasticsearch.node.stats.indices.docs.deleted` +-- +*`elasticsearch.node.stats.indices.docs.deleted`*:: ++ +-- type: long Total number of deleted documents. -[float] -=== `elasticsearch.node.stats.indices.segments.count` +-- +*`elasticsearch.node.stats.indices.segments.count`*:: ++ +-- type: long Total number of segments. -[float] -=== `elasticsearch.node.stats.indices.segments.memory.bytes` +-- +*`elasticsearch.node.stats.indices.segments.memory.bytes`*:: ++ +-- type: long format: bytes @@ -2789,14 +3573,18 @@ format: bytes Total size of segments in bytes. -[float] -=== `elasticsearch.node.stats.indices.store.size.bytes` +-- +*`elasticsearch.node.stats.indices.store.size.bytes`*:: ++ +-- type: long Total size of the store in bytes. +-- + [float] == jvm.mem.pools fields @@ -2811,42 +3599,50 @@ Old memory pool stats. -[float] -=== `elasticsearch.node.stats.jvm.mem.pools.old.max.bytes` - +*`elasticsearch.node.stats.jvm.mem.pools.old.max.bytes`*:: ++ +-- type: long format: bytes Max bytes. -[float] -=== `elasticsearch.node.stats.jvm.mem.pools.old.peak.bytes` +-- +*`elasticsearch.node.stats.jvm.mem.pools.old.peak.bytes`*:: ++ +-- type: long format: bytes Peak bytes. -[float] -=== `elasticsearch.node.stats.jvm.mem.pools.old.peak_max.bytes` +-- +*`elasticsearch.node.stats.jvm.mem.pools.old.peak_max.bytes`*:: ++ +-- type: long format: bytes Peak max bytes. -[float] -=== `elasticsearch.node.stats.jvm.mem.pools.old.used.bytes` +-- +*`elasticsearch.node.stats.jvm.mem.pools.old.used.bytes`*:: ++ +-- type: long format: bytes Used bytes. +-- + [float] == young fields @@ -2854,42 +3650,50 @@ Young memory pool stats. -[float] -=== `elasticsearch.node.stats.jvm.mem.pools.young.max.bytes` - +*`elasticsearch.node.stats.jvm.mem.pools.young.max.bytes`*:: ++ +-- type: long format: bytes Max bytes. -[float] -=== `elasticsearch.node.stats.jvm.mem.pools.young.peak.bytes` +-- +*`elasticsearch.node.stats.jvm.mem.pools.young.peak.bytes`*:: ++ +-- type: long format: bytes Peak bytes. -[float] -=== `elasticsearch.node.stats.jvm.mem.pools.young.peak_max.bytes` +-- +*`elasticsearch.node.stats.jvm.mem.pools.young.peak_max.bytes`*:: ++ +-- type: long format: bytes Peak max bytes. -[float] -=== `elasticsearch.node.stats.jvm.mem.pools.young.used.bytes` +-- +*`elasticsearch.node.stats.jvm.mem.pools.young.used.bytes`*:: ++ +-- type: long format: bytes Used bytes. +-- + [float] == survivor fields @@ -2897,42 +3701,50 @@ Survivor memory pool stats. -[float] -=== `elasticsearch.node.stats.jvm.mem.pools.survivor.max.bytes` - +*`elasticsearch.node.stats.jvm.mem.pools.survivor.max.bytes`*:: ++ +-- type: long format: bytes Max bytes. -[float] -=== `elasticsearch.node.stats.jvm.mem.pools.survivor.peak.bytes` +-- +*`elasticsearch.node.stats.jvm.mem.pools.survivor.peak.bytes`*:: ++ +-- type: long format: bytes Peak bytes. -[float] -=== `elasticsearch.node.stats.jvm.mem.pools.survivor.peak_max.bytes` +-- +*`elasticsearch.node.stats.jvm.mem.pools.survivor.peak_max.bytes`*:: ++ +-- type: long format: bytes Peak max bytes. -[float] -=== `elasticsearch.node.stats.jvm.mem.pools.survivor.used.bytes` +-- +*`elasticsearch.node.stats.jvm.mem.pools.survivor.used.bytes`*:: ++ +-- type: long format: bytes Used bytes. +-- + [float] == jvm.gc.collectors fields @@ -2947,20 +3759,24 @@ Old collection gc. -[float] -=== `elasticsearch.node.stats.jvm.gc.collectors.old.collection.count` - +*`elasticsearch.node.stats.jvm.gc.collectors.old.collection.count`*:: ++ +-- type: long -[float] -=== `elasticsearch.node.stats.jvm.gc.collectors.old.collection.ms` +-- +*`elasticsearch.node.stats.jvm.gc.collectors.old.collection.ms`*:: ++ +-- type: long +-- + [float] == young.collection fields @@ -2968,20 +3784,24 @@ Young collection gc. -[float] -=== `elasticsearch.node.stats.jvm.gc.collectors.young.collection.count` - +*`elasticsearch.node.stats.jvm.gc.collectors.young.collection.count`*:: ++ +-- type: long -[float] -=== `elasticsearch.node.stats.jvm.gc.collectors.young.collection.ms` +-- +*`elasticsearch.node.stats.jvm.gc.collectors.young.collection.ms`*:: ++ +-- type: long +-- + [float] == fs.summary fields @@ -2989,33 +3809,39 @@ File system summary -[float] -=== `elasticsearch.node.stats.fs.summary.total.bytes` - +*`elasticsearch.node.stats.fs.summary.total.bytes`*:: ++ +-- type: long format: bytes -[float] -=== `elasticsearch.node.stats.fs.summary.free.bytes` +-- +*`elasticsearch.node.stats.fs.summary.free.bytes`*:: ++ +-- type: long format: bytes -[float] -=== `elasticsearch.node.stats.fs.summary.available.bytes` +-- +*`elasticsearch.node.stats.fs.summary.available.bytes`*:: ++ +-- type: long format: bytes +-- + [[exported-fields-etcd]] == Etcd fields @@ -3044,16 +3870,20 @@ The number of failed and successful Raft RPC requests. -[float] -=== `etcd.leader.followers.counts.followers.counts.success` - +*`etcd.leader.followers.counts.followers.counts.success`*:: ++ +-- type: integer -[float] -=== `etcd.leader.followers.counts.followers.counts.fail` +-- +*`etcd.leader.followers.counts.followers.counts.fail`*:: ++ +-- type: integer +-- + [float] == followers.latency fields @@ -3061,36 +3891,48 @@ latency to each peer in the cluster -[float] -=== `etcd.leader.followers.latency.followers.latency.average` - +*`etcd.leader.followers.latency.followers.latency.average`*:: ++ +-- type: scaled_float -[float] -=== `etcd.leader.followers.latency.followers.latency.current` +-- +*`etcd.leader.followers.latency.followers.latency.current`*:: ++ +-- type: scaled_float -[float] -=== `etcd.leader.followers.latency.followers.latency.maximum` +-- +*`etcd.leader.followers.latency.followers.latency.maximum`*:: ++ +-- type: scaled_float -[float] -=== `etcd.leader.followers.latency.followers.latency.minimum` +-- +*`etcd.leader.followers.latency.followers.latency.minimum`*:: ++ +-- type: integer -[float] -=== `etcd.leader.followers.latency.follower.latency.standardDeviation` +-- +*`etcd.leader.followers.latency.follower.latency.standardDeviation`*:: ++ +-- type: scaled_float -[float] -=== `etcd.leader.leader` +-- +*`etcd.leader.leader`*:: ++ +-- type: keyword +-- + [float] == self fields @@ -3098,107 +3940,133 @@ Contains etcd self statistics. -[float] -=== `etcd.self.id` - +*`etcd.self.id`*:: ++ +-- type: keyword the unique identifier for the member -[float] -=== `etcd.self.leaderinfo.leader` +-- +*`etcd.self.leaderinfo.leader`*:: ++ +-- type: keyword id of the current leader member -[float] -=== `etcd.self.leaderinfo.starttime` +-- +*`etcd.self.leaderinfo.starttime`*:: ++ +-- type: keyword -[float] -=== `etcd.self.leaderinfo.uptime` +-- +*`etcd.self.leaderinfo.uptime`*:: ++ +-- type: keyword id of the current leader member -[float] -=== `etcd.self.name` +-- +*`etcd.self.name`*:: ++ +-- type: keyword this member's name -[float] -=== `etcd.self.recv.appendrequest.count` +-- +*`etcd.self.recv.appendrequest.count`*:: ++ +-- type: integer number of append requests this node has processed -[float] -=== `etcd.self.recv.bandwithrate` +-- +*`etcd.self.recv.bandwithrate`*:: ++ +-- type: scaled_float number of bytes per second this node is receiving (follower only) -[float] -=== `etcd.self.recv.pkgrate` +-- +*`etcd.self.recv.pkgrate`*:: ++ +-- type: scaled_float number of requests per second this node is receiving (follower only) -[float] -=== `etcd.self.send.appendrequest.count` +-- +*`etcd.self.send.appendrequest.count`*:: ++ +-- type: integer number of requests that this node has sent -[float] -=== `etcd.self.send.bandwithrate` +-- +*`etcd.self.send.bandwithrate`*:: ++ +-- type: scaled_float number of bytes per second this node is sending (leader only). This value is undefined on single member clusters. -[float] -=== `etcd.self.send.pkgrate` +-- +*`etcd.self.send.pkgrate`*:: ++ +-- type: scaled_float number of requests per second this node is sending (leader only). This value is undefined on single member clusters. -[float] -=== `etcd.self.starttime` +-- +*`etcd.self.starttime`*:: ++ +-- type: keyword the time when this node was started -[float] -=== `etcd.self.state` +-- +*`etcd.self.state`*:: ++ +-- type: keyword either leader or follower +-- + [float] == store fields @@ -3206,86 +4074,118 @@ The store statistics include information about the operations that this node has -[float] -=== `etcd.store.gets.success` - +*`etcd.store.gets.success`*:: ++ +-- type: integer -[float] -=== `etcd.store.gets.fail` +-- +*`etcd.store.gets.fail`*:: ++ +-- type: integer -[float] -=== `etcd.store.sets.success` +-- +*`etcd.store.sets.success`*:: ++ +-- type: integer -[float] -=== `etcd.store.sets.fail` +-- +*`etcd.store.sets.fail`*:: ++ +-- type: integer -[float] -=== `etcd.store.delete.success` +-- +*`etcd.store.delete.success`*:: ++ +-- type: integer -[float] -=== `etcd.store.delete.fail` +-- +*`etcd.store.delete.fail`*:: ++ +-- type: integer -[float] -=== `etcd.store.update.success` +-- +*`etcd.store.update.success`*:: ++ +-- type: integer -[float] -=== `etcd.store.update.fail` +-- +*`etcd.store.update.fail`*:: ++ +-- type: integer -[float] -=== `etcd.store.create.success` +-- +*`etcd.store.create.success`*:: ++ +-- type: integer -[float] -=== `etcd.store.create.fail` +-- +*`etcd.store.create.fail`*:: ++ +-- type: integer -[float] -=== `etcd.store.compareandswap.success` +-- +*`etcd.store.compareandswap.success`*:: ++ +-- type: integer -[float] -=== `etcd.store.compareandswap.fail` +-- +*`etcd.store.compareandswap.fail`*:: ++ +-- type: integer -[float] -=== `etcd.store.compareanddelete.success` +-- +*`etcd.store.compareanddelete.success`*:: ++ +-- type: integer -[float] -=== `etcd.store.compareanddelete.fail` +-- +*`etcd.store.compareanddelete.fail`*:: ++ +-- type: integer -[float] -=== `etcd.store.expire.count` +-- +*`etcd.store.expire.count`*:: ++ +-- type: integer -[float] -=== `etcd.store.watchers` +-- +*`etcd.store.watchers`*:: ++ +-- type: integer +-- + [[exported-fields-golang]] == Golang fields @@ -3306,14 +4206,16 @@ expvar -[float] -=== `golang.expvar.cmdline` - +*`golang.expvar.cmdline`*:: ++ +-- type: keyword The cmdline of this golang program start with. +-- + [float] == heap fields @@ -3321,14 +4223,16 @@ The golang program heap information exposed by expvar. -[float] -=== `golang.heap.cmdline` - +*`golang.heap.cmdline`*:: ++ +-- type: keyword The cmdline of this golang program start with. +-- + [float] == gc fields @@ -3343,25 +4247,29 @@ Total GC pause duration over lifetime of process. -[float] -=== `golang.heap.gc.total_pause.ns` - +*`golang.heap.gc.total_pause.ns`*:: ++ +-- type: long Duration in Ns. -[float] -=== `golang.heap.gc.total_count` +-- +*`golang.heap.gc.total_count`*:: ++ +-- type: long Total number of GC was happened. -[float] -=== `golang.heap.gc.next_gc_limit` +-- +*`golang.heap.gc.next_gc_limit`*:: ++ +-- type: long format: bytes @@ -3369,14 +4277,18 @@ format: bytes Next collection will happen when HeapAlloc > this amount. -[float] -=== `golang.heap.gc.cpu_fraction` +-- +*`golang.heap.gc.cpu_fraction`*:: ++ +-- type: long Fraction of CPU time used by GC. +-- + [float] == pause fields @@ -3384,14 +4296,16 @@ Last GC pause durations during the monitoring period. -[float] -=== `golang.heap.gc.pause.count` - +*`golang.heap.gc.pause.count`*:: ++ +-- type: long Count of GC pause duration during this collect period. +-- + [float] == sum fields @@ -3399,14 +4313,16 @@ Total GC pause duration during this collect period. -[float] -=== `golang.heap.gc.pause.sum.ns` - +*`golang.heap.gc.pause.sum.ns`*:: ++ +-- type: long Duration in Ns. +-- + [float] == max fields @@ -3414,14 +4330,16 @@ Max GC pause duration during this collect period. -[float] -=== `golang.heap.gc.pause.max.ns` - +*`golang.heap.gc.pause.max.ns`*:: ++ +-- type: long Duration in Ns. +-- + [float] == avg fields @@ -3429,14 +4347,16 @@ Average GC pause duration during this collect period. -[float] -=== `golang.heap.gc.pause.avg.ns` - +*`golang.heap.gc.pause.avg.ns`*:: ++ +-- type: long Duration in Ns. +-- + [float] == system fields @@ -3444,9 +4364,9 @@ Heap summary,which bytes was obtained from system. -[float] -=== `golang.heap.system.total` - +*`golang.heap.system.total`*:: ++ +-- type: long format: bytes @@ -3454,9 +4374,11 @@ format: bytes Total bytes obtained from system (sum of XxxSys below). -[float] -=== `golang.heap.system.obtained` +-- +*`golang.heap.system.obtained`*:: ++ +-- type: long format: bytes @@ -3464,9 +4386,11 @@ format: bytes Via HeapSys, bytes obtained from system. heap_sys = heap_idle + heap_inuse. -[float] -=== `golang.heap.system.stack` +-- +*`golang.heap.system.stack`*:: ++ +-- type: long format: bytes @@ -3474,9 +4398,11 @@ format: bytes Bytes used by stack allocator, and these bytes was obtained from system. -[float] -=== `golang.heap.system.released` +-- +*`golang.heap.system.released`*:: ++ +-- type: long format: bytes @@ -3484,6 +4410,8 @@ format: bytes Bytes released to the OS. +-- + [float] == allocations fields @@ -3491,33 +4419,39 @@ Heap allocations summary. -[float] -=== `golang.heap.allocations.mallocs` - +*`golang.heap.allocations.mallocs`*:: ++ +-- type: long Number of mallocs. -[float] -=== `golang.heap.allocations.frees` +-- +*`golang.heap.allocations.frees`*:: ++ +-- type: long Number of frees. -[float] -=== `golang.heap.allocations.objects` +-- +*`golang.heap.allocations.objects`*:: ++ +-- type: long Total number of allocated objects. -[float] -=== `golang.heap.allocations.total` +-- +*`golang.heap.allocations.total`*:: ++ +-- type: long format: bytes @@ -3525,9 +4459,11 @@ format: bytes Bytes allocated (even if freed) throughout the lifetime. -[float] -=== `golang.heap.allocations.allocated` +-- +*`golang.heap.allocations.allocated`*:: ++ +-- type: long format: bytes @@ -3535,9 +4471,11 @@ format: bytes Bytes allocated and not yet freed (same as Alloc above). -[float] -=== `golang.heap.allocations.idle` +-- +*`golang.heap.allocations.idle`*:: ++ +-- type: long format: bytes @@ -3545,9 +4483,11 @@ format: bytes Bytes in idle spans. -[float] -=== `golang.heap.allocations.active` +-- +*`golang.heap.allocations.active`*:: ++ +-- type: long format: bytes @@ -3555,6 +4495,8 @@ format: bytes Bytes in non-idle span. +-- + [[exported-fields-graphite]] == Graphite fields @@ -3575,14 +4517,16 @@ server -[float] -=== `graphite.server.example` - +*`graphite.server.example`*:: ++ +-- type: keyword Example field +-- + [[exported-fields-haproxy]] == HAProxy fields @@ -3604,55 +4548,67 @@ General information about HAProxy processes. -[float] -=== `haproxy.info.processes` - +*`haproxy.info.processes`*:: ++ +-- type: long Number of processes. -[float] -=== `haproxy.info.process_num` +-- +*`haproxy.info.process_num`*:: ++ +-- type: long Process number. -[float] -=== `haproxy.info.pid` +-- +*`haproxy.info.pid`*:: ++ +-- type: long Process ID. -[float] -=== `haproxy.info.run_queue` +-- +*`haproxy.info.run_queue`*:: ++ +-- type: long -[float] -=== `haproxy.info.tasks` +-- +*`haproxy.info.tasks`*:: ++ +-- type: long -[float] -=== `haproxy.info.uptime.sec` +-- +*`haproxy.info.uptime.sec`*:: ++ +-- type: long Current uptime in seconds. -[float] -=== `haproxy.info.memory.max.bytes` +-- +*`haproxy.info.memory.max.bytes`*:: ++ +-- type: long format: bytes @@ -3660,14 +4616,18 @@ format: bytes Maximum amount of memory usage in bytes (the 'Memmax_MB' value converted to bytes). -[float] -=== `haproxy.info.ulimit_n` +-- +*`haproxy.info.ulimit_n`*:: ++ +-- type: long Maximum number of open files for the process. +-- + [float] == compress fields @@ -3680,27 +4640,33 @@ Maximum number of open files for the process. -[float] -=== `haproxy.info.compress.bps.in` - +*`haproxy.info.compress.bps.in`*:: ++ +-- type: long -[float] -=== `haproxy.info.compress.bps.out` +-- +*`haproxy.info.compress.bps.out`*:: ++ +-- type: long -[float] -=== `haproxy.info.compress.bps.rate_limit` +-- +*`haproxy.info.compress.bps.rate_limit`*:: ++ +-- type: long +-- + [float] == connection fields @@ -3713,276 +4679,340 @@ type: long -[float] -=== `haproxy.info.connection.rate.value` - +*`haproxy.info.connection.rate.value`*:: ++ +-- type: long -[float] -=== `haproxy.info.connection.rate.limit` +-- +*`haproxy.info.connection.rate.limit`*:: ++ +-- type: long -[float] -=== `haproxy.info.connection.rate.max` +-- +*`haproxy.info.connection.rate.max`*:: ++ +-- type: long -[float] -=== `haproxy.info.connection.current` +-- +*`haproxy.info.connection.current`*:: ++ +-- type: long Current connections. -[float] -=== `haproxy.info.connection.total` +-- +*`haproxy.info.connection.total`*:: ++ +-- type: long Total connections. -[float] -=== `haproxy.info.connection.ssl.current` +-- +*`haproxy.info.connection.ssl.current`*:: ++ +-- type: long Current SSL connections. -[float] -=== `haproxy.info.connection.ssl.total` +-- +*`haproxy.info.connection.ssl.total`*:: ++ +-- type: long Total SSL connections. -[float] -=== `haproxy.info.connection.ssl.max` +-- +*`haproxy.info.connection.ssl.max`*:: ++ +-- type: long Maximum SSL connections. -[float] -=== `haproxy.info.connection.max` +-- +*`haproxy.info.connection.max`*:: ++ +-- type: long Maximum connections. -[float] -=== `haproxy.info.connection.hard_max` +-- +*`haproxy.info.connection.hard_max`*:: ++ +-- type: long -[float] -=== `haproxy.info.requests.total` +-- +*`haproxy.info.requests.total`*:: ++ +-- type: long -[float] -=== `haproxy.info.sockets.max` +-- +*`haproxy.info.sockets.max`*:: ++ +-- type: long -[float] -=== `haproxy.info.requests.max` +-- +*`haproxy.info.requests.max`*:: ++ +-- type: long +-- + [float] == pipes fields -[float] -=== `haproxy.info.pipes.used` - +*`haproxy.info.pipes.used`*:: ++ +-- type: integer -[float] -=== `haproxy.info.pipes.free` +-- +*`haproxy.info.pipes.free`*:: ++ +-- type: integer -[float] -=== `haproxy.info.pipes.max` +-- +*`haproxy.info.pipes.max`*:: ++ +-- type: integer +-- + [float] == session fields None -[float] -=== `haproxy.info.session.rate.value` - +*`haproxy.info.session.rate.value`*:: ++ +-- type: integer -[float] -=== `haproxy.info.session.rate.limit` +-- +*`haproxy.info.session.rate.limit`*:: ++ +-- type: integer -[float] -=== `haproxy.info.session.rate.max` +-- +*`haproxy.info.session.rate.max`*:: ++ +-- type: integer +-- + [float] == ssl fields None -[float] -=== `haproxy.info.ssl.rate.value` - +*`haproxy.info.ssl.rate.value`*:: ++ +-- type: integer None -[float] -=== `haproxy.info.ssl.rate.limit` +-- +*`haproxy.info.ssl.rate.limit`*:: ++ +-- type: integer None -[float] -=== `haproxy.info.ssl.rate.max` +-- +*`haproxy.info.ssl.rate.max`*:: ++ +-- type: integer None +-- + [float] == frontend fields None -[float] -=== `haproxy.info.ssl.frontend.key_rate.value` - +*`haproxy.info.ssl.frontend.key_rate.value`*:: ++ +-- type: integer None -[float] -=== `haproxy.info.ssl.frontend.key_rate.max` +-- +*`haproxy.info.ssl.frontend.key_rate.max`*:: ++ +-- type: integer None -[float] -=== `haproxy.info.ssl.frontend.session_reuse.pct` +-- +*`haproxy.info.ssl.frontend.session_reuse.pct`*:: ++ +-- type: scaled_float format: percent None +-- + [float] == backend fields None -[float] -=== `haproxy.info.ssl.backend.key_rate.value` - +*`haproxy.info.ssl.backend.key_rate.value`*:: ++ +-- type: integer None -[float] -=== `haproxy.info.ssl.backend.key_rate.max` +-- +*`haproxy.info.ssl.backend.key_rate.max`*:: ++ +-- type: integer MaxConnRate -[float] -=== `haproxy.info.ssl.cached_lookups` +-- +*`haproxy.info.ssl.cached_lookups`*:: ++ +-- type: long None -[float] -=== `haproxy.info.ssl.cache_misses` +-- +*`haproxy.info.ssl.cache_misses`*:: ++ +-- type: long None +-- + [float] == zlib_mem_usage fields -[float] -=== `haproxy.info.zlib_mem_usage.value` - +*`haproxy.info.zlib_mem_usage.value`*:: ++ +-- type: integer -[float] -=== `haproxy.info.zlib_mem_usage.max` +-- +*`haproxy.info.zlib_mem_usage.max`*:: ++ +-- type: integer -[float] -=== `haproxy.info.idle.pct` +-- +*`haproxy.info.idle.pct`*:: ++ +-- type: scaled_float format: percent +-- + [float] == stat fields @@ -3990,57 +5020,69 @@ Stats collected from HAProxy processes. -[float] -=== `haproxy.stat.status` - +*`haproxy.stat.status`*:: ++ +-- type: keyword Status (UP, DOWN, NOLB, MAINT, or MAINT(via)...). -[float] -=== `haproxy.stat.weight` +-- +*`haproxy.stat.weight`*:: ++ +-- type: long Total weight (for backends), or server weight (for servers). -[float] -=== `haproxy.stat.downtime` +-- +*`haproxy.stat.downtime`*:: ++ +-- type: long Total downtime (in seconds). For backends, this value is the downtime for the whole backend, not the sum of the downtime for the servers. -[float] -=== `haproxy.stat.component_type` +-- +*`haproxy.stat.component_type`*:: ++ +-- type: integer Component type (0=frontend, 1=backend, 2=server, or 3=socket/listener). -[float] -=== `haproxy.stat.process_id` +-- +*`haproxy.stat.process_id`*:: ++ +-- type: integer Process ID (0 for first instance, 1 for second, and so on). -[float] -=== `haproxy.stat.service_name` +-- +*`haproxy.stat.service_name`*:: ++ +-- type: keyword Service name (FRONTEND for frontend, BACKEND for backend, or any name for server/listener). -[float] -=== `haproxy.stat.in.bytes` +-- +*`haproxy.stat.in.bytes`*:: ++ +-- type: long format: bytes @@ -4048,9 +5090,11 @@ format: bytes Bytes in. -[float] -=== `haproxy.stat.out.bytes` +-- +*`haproxy.stat.out.bytes`*:: ++ +-- type: long format: bytes @@ -4058,17 +5102,21 @@ format: bytes Bytes out. -[float] -=== `haproxy.stat.last_change` +-- +*`haproxy.stat.last_change`*:: ++ +-- type: integer Number of seconds since the last UP->DOWN or DOWN->UP transition. -[float] -=== `haproxy.stat.throttle.pct` +-- +*`haproxy.stat.throttle.pct`*:: ++ +-- type: scaled_float format: percentage @@ -4076,51 +5124,63 @@ format: percentage Current throttle percentage for the server when slowstart is active, or no value if slowstart is inactive. -[float] -=== `haproxy.stat.selected.total` +-- +*`haproxy.stat.selected.total`*:: ++ +-- type: long Total number of times a server was selected, either for new sessions, or when re-dispatching. For servers, this field reports the the number of times the server was selected. -[float] -=== `haproxy.stat.tracked.id` +-- +*`haproxy.stat.tracked.id`*:: ++ +-- type: long ID of the proxy/server if tracking is enabled. +-- -[float] -=== `haproxy.stat.connection.total` +*`haproxy.stat.connection.total`*:: ++ +-- type: long Cumulative number of connections. -[float] -=== `haproxy.stat.connection.retried` +-- +*`haproxy.stat.connection.retried`*:: ++ +-- type: long Number of times a connection to a server was retried. -[float] -=== `haproxy.stat.connection.time.avg` +-- +*`haproxy.stat.connection.time.avg`*:: ++ +-- type: long Average connect time in ms over the last 1024 requests. +-- -[float] -=== `haproxy.stat.request.denied` +*`haproxy.stat.request.denied`*:: ++ +-- type: long Requests denied because of security concerns. @@ -4129,25 +5189,31 @@ Requests denied because of security concerns. * For HTTP this is because of a matched http-request or tarpit rule. -[float] -=== `haproxy.stat.request.queued.current` +-- +*`haproxy.stat.request.queued.current`*:: ++ +-- type: long Current queued requests. For backends, this field reports the number of requests queued without a server assigned. -[float] -=== `haproxy.stat.request.queued.max` +-- +*`haproxy.stat.request.queued.max`*:: ++ +-- type: long Maximum value of queued.current. -[float] -=== `haproxy.stat.request.errors` +-- +*`haproxy.stat.request.errors`*:: ++ +-- type: long Request errors. Some of the possible causes are: @@ -4160,191 +5226,233 @@ Request errors. Some of the possible causes are: * request was tarpitted. -[float] -=== `haproxy.stat.request.redispatched` +-- +*`haproxy.stat.request.redispatched`*:: ++ +-- type: long Number of times a request was redispatched to another server. For servers, this field reports the number of times the server was switched away from. -[float] -=== `haproxy.stat.request.connection.errors` +-- +*`haproxy.stat.request.connection.errors`*:: ++ +-- type: long Number of requests that encountered an error trying to connect to a server. For backends, this field reports the sum of the stat for all backend servers, plus any connection errors not associated with a particular server (such as the backend having no active servers). +-- + [float] == rate fields -[float] -=== `haproxy.stat.request.rate.value` - +*`haproxy.stat.request.rate.value`*:: ++ +-- type: long Number of HTTP requests per second over the last elapsed second. -[float] -=== `haproxy.stat.request.rate.max` +-- +*`haproxy.stat.request.rate.max`*:: ++ +-- type: long Maximum number of HTTP requests per second. -[float] -=== `haproxy.stat.request.total` +-- +*`haproxy.stat.request.total`*:: ++ +-- type: long Total number of HTTP requests received. +-- -[float] -=== `haproxy.stat.response.errors` +*`haproxy.stat.response.errors`*:: ++ +-- type: long Number of response errors. This value includes the number of data transfers aborted by the server (haproxy.stat.server.aborted). Some other errors are: * write errors on the client socket (won't be counted for the server stat) * failure applying filters to the response -[float] -=== `haproxy.stat.response.time.avg` +-- +*`haproxy.stat.response.time.avg`*:: ++ +-- type: long Average response time in ms over the last 1024 requests (0 for TCP). -[float] -=== `haproxy.stat.response.denied` +-- +*`haproxy.stat.response.denied`*:: ++ +-- type: integer Responses denied because of security concerns. For HTTP this is because of a matched http-request rule, or "option checkcache". +-- + [float] == http fields -[float] -=== `haproxy.stat.response.http.1xx` - +*`haproxy.stat.response.http.1xx`*:: ++ +-- type: long HTTP responses with 1xx code. -[float] -=== `haproxy.stat.response.http.2xx` +-- +*`haproxy.stat.response.http.2xx`*:: ++ +-- type: long HTTP responses with 2xx code. -[float] -=== `haproxy.stat.response.http.3xx` +-- +*`haproxy.stat.response.http.3xx`*:: ++ +-- type: long HTTP responses with 3xx code. -[float] -=== `haproxy.stat.response.http.4xx` +-- +*`haproxy.stat.response.http.4xx`*:: ++ +-- type: long HTTP responses with 4xx code. -[float] -=== `haproxy.stat.response.http.5xx` +-- +*`haproxy.stat.response.http.5xx`*:: ++ +-- type: long HTTP responses with 5xx code. -[float] -=== `haproxy.stat.response.http.other` +-- +*`haproxy.stat.response.http.other`*:: ++ +-- type: long HTTP responses with other codes (protocol error). +-- -[float] -=== `haproxy.stat.session.current` +*`haproxy.stat.session.current`*:: ++ +-- type: long Number of current sessions. -[float] -=== `haproxy.stat.session.max` +-- +*`haproxy.stat.session.max`*:: ++ +-- type: long Maximum number of sessions. -[float] -=== `haproxy.stat.session.limit` +-- +*`haproxy.stat.session.limit`*:: ++ +-- type: long Configured session limit. +-- -[float] -=== `haproxy.stat.session.rate.value` +*`haproxy.stat.session.rate.value`*:: ++ +-- type: integer Number of sessions per second over the last elapsed second. -[float] -=== `haproxy.stat.session.rate.limit` +-- +*`haproxy.stat.session.rate.limit`*:: ++ +-- type: integer Configured limit on new sessions per second. -[float] -=== `haproxy.stat.session.rate.max` +-- +*`haproxy.stat.session.rate.max`*:: ++ +-- type: integer Maximum number of new sessions per second. +-- + [float] == check fields -[float] -=== `haproxy.stat.check.status` - +*`haproxy.stat.check.status`*:: ++ +-- type: keyword Status of the last health check. One of: @@ -4367,116 +5475,142 @@ Status of the last health check. One of: L7STS -> layer 7 response error, for example HTTP 5xx -[float] -=== `haproxy.stat.check.code` +-- +*`haproxy.stat.check.code`*:: ++ +-- type: long Layer 5-7 code, if available. -[float] -=== `haproxy.stat.check.duration` +-- +*`haproxy.stat.check.duration`*:: ++ +-- type: long Time in ms that it took to finish the last health check. -[float] -=== `haproxy.stat.check.health.last` +-- +*`haproxy.stat.check.health.last`*:: ++ +-- type: keyword The result of the last health check. -[float] -=== `haproxy.stat.check.health.fail` +-- +*`haproxy.stat.check.health.fail`*:: ++ +-- type: long Number of failed checks. -[float] -=== `haproxy.stat.check.agent.last` +-- +*`haproxy.stat.check.agent.last`*:: ++ +-- type: integer -[float] -=== `haproxy.stat.check.failed` +-- +*`haproxy.stat.check.failed`*:: ++ +-- type: long Number of checks that failed while the server was up. -[float] -=== `haproxy.stat.check.down` +-- +*`haproxy.stat.check.down`*:: ++ +-- type: long Number of UP->DOWN transitions. For backends, this value is the number of transitions to the whole backend being down, rather than the sum of the transitions for each server. -[float] -=== `haproxy.stat.client.aborted` +-- +*`haproxy.stat.client.aborted`*:: ++ +-- type: integer Number of data transfers aborted by the client. +-- + [float] == server fields -[float] -=== `haproxy.stat.server.id` - +*`haproxy.stat.server.id`*:: ++ +-- type: integer Server ID (unique inside a proxy). -[float] -=== `haproxy.stat.server.aborted` +-- +*`haproxy.stat.server.aborted`*:: ++ +-- type: integer Number of data transfers aborted by the server. This value is included in haproxy.stat.response.errors. -[float] -=== `haproxy.stat.server.active` +-- +*`haproxy.stat.server.active`*:: ++ +-- type: integer Number of backend servers that are active, meaning that they are healthy and can receive requests from the load balancer. -[float] -=== `haproxy.stat.server.backup` +-- +*`haproxy.stat.server.backup`*:: ++ +-- type: integer Number of backend servers that are backup servers. +-- + [float] == compressor fields -[float] -=== `haproxy.stat.compressor.in.bytes` - +*`haproxy.stat.compressor.in.bytes`*:: ++ +-- type: long format: bytes @@ -4484,9 +5618,11 @@ format: bytes Number of HTTP response bytes fed to the compressor. -[float] -=== `haproxy.stat.compressor.out.bytes` +-- +*`haproxy.stat.compressor.out.bytes`*:: ++ +-- type: integer format: bytes @@ -4494,9 +5630,11 @@ format: bytes Number of HTTP response bytes emitted by the compressor. -[float] -=== `haproxy.stat.compressor.bypassed.bytes` +-- +*`haproxy.stat.compressor.bypassed.bytes`*:: ++ +-- type: long format: bytes @@ -4504,9 +5642,11 @@ format: bytes Number of bytes that bypassed the HTTP compressor (CPU/BW limit). -[float] -=== `haproxy.stat.compressor.response.bytes` +-- +*`haproxy.stat.compressor.response.bytes`*:: ++ +-- type: long format: bytes @@ -4514,112 +5654,198 @@ format: bytes Number of HTTP responses that were compressed. +-- + [float] == proxy fields -[float] -=== `haproxy.stat.proxy.id` - +*`haproxy.stat.proxy.id`*:: ++ +-- type: integer Unique proxy ID. -[float] -=== `haproxy.stat.proxy.name` +-- +*`haproxy.stat.proxy.name`*:: ++ +-- type: keyword Proxy name. +-- + [float] == queue fields -[float] -=== `haproxy.stat.queue.limit` - +*`haproxy.stat.queue.limit`*:: ++ +-- type: integer Configured queue limit (maxqueue) for the server, or nothing if the value of maxqueue is 0 (meaning no limit). -[float] -=== `haproxy.stat.queue.time.avg` +-- +*`haproxy.stat.queue.time.avg`*:: ++ +-- type: integer The average queue time in ms over the last 1024 requests. -[[exported-fields-http]] -== HTTP fields +-- -HTTP module +[[exported-fields-host-processor]] +== Host fields +Info collected for the host machine. -[float] -== http fields +*`host.name`*:: ++ +-- +type: keyword +Hostname. -[float] -== request fields -HTTP request information +-- + +*`host.id`*:: ++ +-- +type: keyword +Unique host id. -[float] -=== `http.request.header` +-- -type: object +*`host.architecture`*:: ++ +-- +type: keyword -The HTTP headers sent +Host architecture (e.g. x86_64, arm, ppc, mips). -[float] -=== `http.request.method` +-- +*`host.os.platform`*:: ++ +-- type: keyword -The HTTP method used +OS platform (e.g. centos, ubuntu, windows). -[float] -=== `http.request.body` +-- +*`host.os.version`*:: ++ +-- type: keyword -The HTTP payload sent +OS version. -[float] -== response fields +-- + +*`host.os.family`*:: ++ +-- +type: keyword + +OS family (e.g. redhat, debian, freebsd, windows). + + +-- + +[[exported-fields-http]] +== HTTP fields + +HTTP module + + + +[float] +== http fields -HTTP response information [float] -=== `http.response.header` +== request fields + +HTTP request information + + +*`http.request.header`*:: ++ +-- type: object -The HTTP headers received +The HTTP headers sent + + +-- + +*`http.request.method`*:: ++ +-- +type: keyword + +The HTTP method used + + +-- + +*`http.request.body`*:: ++ +-- +type: keyword + +The HTTP payload sent +-- + [float] -=== `http.response.code` +== response fields + +HTTP response information + + + +*`http.response.header`*:: ++ +-- +type: object + +The HTTP headers received + +-- + +*`http.response.code`*:: ++ +-- type: keyword example: 404 @@ -4627,9 +5853,11 @@ example: 404 The HTTP status code -[float] -=== `http.response.phrase` +-- +*`http.response.phrase`*:: ++ +-- type: keyword example: Not found @@ -4637,14 +5865,18 @@ example: Not found The HTTP status phrase -[float] -=== `http.response.body` +-- +*`http.response.body`*:: ++ +-- type: keyword The HTTP payload received +-- + [float] == json fields @@ -4698,65 +5930,81 @@ Broker Consumer Group Information have been read from (Broker handling the consu -[float] -=== `kafka.consumergroup.broker.id` - +*`kafka.consumergroup.broker.id`*:: ++ +-- type: long Broker id -[float] -=== `kafka.consumergroup.broker.address` +-- +*`kafka.consumergroup.broker.address`*:: ++ +-- type: keyword Broker address -[float] -=== `kafka.consumergroup.id` +-- +*`kafka.consumergroup.id`*:: ++ +-- type: keyword Consumer Group ID -[float] -=== `kafka.consumergroup.topic` +-- +*`kafka.consumergroup.topic`*:: ++ +-- type: keyword Topic name -[float] -=== `kafka.consumergroup.partition` +-- +*`kafka.consumergroup.partition`*:: ++ +-- type: long Partition ID -[float] -=== `kafka.consumergroup.offset` +-- +*`kafka.consumergroup.offset`*:: ++ +-- type: long consumer offset into partition being read -[float] -=== `kafka.consumergroup.meta` +-- +*`kafka.consumergroup.meta`*:: ++ +-- type: text custom consumer meta data string -[float] -=== `kafka.consumergroup.error.code` +-- +*`kafka.consumergroup.error.code`*:: ++ +-- type: long kafka consumer/partition error code. +-- + [float] == client fields @@ -4764,27 +6012,33 @@ Assigned client reading events from partition -[float] -=== `kafka.consumergroup.client.id` - +*`kafka.consumergroup.client.id`*:: ++ +-- type: keyword Client ID (kafka setting client.id) -[float] -=== `kafka.consumergroup.client.host` +-- +*`kafka.consumergroup.client.host`*:: ++ +-- type: keyword Client host -[float] -=== `kafka.consumergroup.client.member_id` +-- +*`kafka.consumergroup.client.member_id`*:: ++ +-- type: keyword internal consumer group member ID +-- + [float] == partition fields @@ -4799,22 +6053,26 @@ Available offsets of the given partition. -[float] -=== `kafka.partition.offset.newest` - +*`kafka.partition.offset.newest`*:: ++ +-- type: long Newest offset of the partition. -[float] -=== `kafka.partition.offset.oldest` +-- +*`kafka.partition.offset.oldest`*:: ++ +-- type: long Oldest offset of the partition. +-- + [float] == partition fields @@ -4822,86 +6080,106 @@ Partition data. -[float] -=== `kafka.partition.partition.id` - +*`kafka.partition.partition.id`*:: ++ +-- type: long Partition id. -[float] -=== `kafka.partition.partition.leader` +-- +*`kafka.partition.partition.leader`*:: ++ +-- type: long Leader id (broker). -[float] -=== `kafka.partition.partition.isr` +-- +*`kafka.partition.partition.isr`*:: ++ +-- type: array List of isr ids. -[float] -=== `kafka.partition.partition.replica` +-- +*`kafka.partition.partition.replica`*:: ++ +-- type: long Replica id (broker). -[float] -=== `kafka.partition.partition.insync_replica` +-- +*`kafka.partition.partition.insync_replica`*:: ++ +-- type: boolean Indicates if replica is included in the in-sync replicate set (ISR). -[float] -=== `kafka.partition.partition.error.code` +-- +*`kafka.partition.partition.error.code`*:: ++ +-- type: long Error code from fetching partition. -[float] -=== `kafka.partition.topic.error.code` +-- +*`kafka.partition.topic.error.code`*:: ++ +-- type: long topic error code. -[float] -=== `kafka.partition.topic.name` +-- +*`kafka.partition.topic.name`*:: ++ +-- type: keyword Topic name -[float] -=== `kafka.partition.broker.id` +-- +*`kafka.partition.broker.id`*:: ++ +-- type: long Broker id -[float] -=== `kafka.partition.broker.address` +-- +*`kafka.partition.broker.address`*:: ++ +-- type: keyword Broker address +-- + [[exported-fields-kibana]] == Kibana fields @@ -4922,38 +6200,46 @@ Status fields -[float] -=== `kibana.status.name` - +*`kibana.status.name`*:: ++ +-- type: keyword Kibana instance name. -[float] -=== `kibana.status.uuid` +-- +*`kibana.status.uuid`*:: ++ +-- type: keyword Kibana instance uuid. -[float] -=== `kibana.status.version.number` +-- +*`kibana.status.version.number`*:: ++ +-- type: keyword Kibana version number. -[float] -=== `kibana.status.status.overall.state` +-- +*`kibana.status.status.overall.state`*:: ++ +-- type: keyword Kibana overall state. +-- + [float] == metrics fields @@ -4961,14 +6247,16 @@ Metrics fields -[float] -=== `kibana.status.metrics.concurrent_connections` - +*`kibana.status.metrics.concurrent_connections`*:: ++ +-- type: long Current concurrent connections. +-- + [float] == requests fields @@ -4976,22 +6264,26 @@ Request statistics. -[float] -=== `kibana.status.metrics.requests.disconnects` - +*`kibana.status.metrics.requests.disconnects`*:: ++ +-- type: long Total number of disconnected connections. -[float] -=== `kibana.status.metrics.requests.total` +-- +*`kibana.status.metrics.requests.total`*:: ++ +-- type: long Total number of connections. +-- + [[exported-fields-kubernetes-processor]] == Kubernetes fields @@ -5000,62 +6292,76 @@ Kubernetes metadata added by the kubernetes processor -[float] -=== `kubernetes.pod.name` - +*`kubernetes.pod.name`*:: ++ +-- type: keyword Kubernetes pod name -[float] -=== `kubernetes.namespace` +-- +*`kubernetes.namespace`*:: ++ +-- type: keyword Kubernetes namespace -[float] -=== `kubernetes.node.name` +-- +*`kubernetes.node.name`*:: ++ +-- type: keyword Kubernetes node name -[float] -=== `kubernetes.labels` +-- +*`kubernetes.labels`*:: ++ +-- type: object Kubernetes labels map -[float] -=== `kubernetes.annotations` +-- +*`kubernetes.annotations`*:: ++ +-- type: object Kubernetes annotations map -[float] -=== `kubernetes.container.name` +-- +*`kubernetes.container.name`*:: ++ +-- type: keyword Kubernetes container name -[float] -=== `kubernetes.container.image` +-- +*`kubernetes.container.image`*:: ++ +-- type: keyword Kubernetes container image +-- + [[exported-fields-kubernetes]] == Kubernetes fields @@ -5077,14 +6383,16 @@ kubernetes container metrics -[float] -=== `kubernetes.container.start_time` - +*`kubernetes.container.start_time`*:: ++ +-- type: date Start time +-- + [float] == cpu fields @@ -5094,22 +6402,50 @@ CPU usage metrics -[float] -=== `kubernetes.container.cpu.usage.core.ns` - +*`kubernetes.container.cpu.usage.core.ns`*:: ++ +-- type: long Container CPU Core usage nanoseconds -[float] -=== `kubernetes.container.cpu.usage.nanocores` +-- +*`kubernetes.container.cpu.usage.nanocores`*:: ++ +-- type: long CPU used nanocores +-- + +*`kubernetes.container.cpu.usage.node.pct`*:: ++ +-- +type: scaled_float + +format: percentage + +CPU usage as a percentage of the total node allocatable CPU + + +-- + +*`kubernetes.container.cpu.usage.limit.pct`*:: ++ +-- +type: scaled_float + +format: percentage + +CPU usage as a percentage of the defined limit for the container (or total node allocatable CPU if unlimited) + + +-- + [float] == logs fields @@ -5118,9 +6454,9 @@ Logs info -[float] -=== `kubernetes.container.logs.available.bytes` - +*`kubernetes.container.logs.available.bytes`*:: ++ +-- type: long format: bytes @@ -5128,10 +6464,12 @@ format: bytes Logs available capacity in bytes +-- -[float] -=== `kubernetes.container.logs.capacity.bytes` +*`kubernetes.container.logs.capacity.bytes`*:: ++ +-- type: long format: bytes @@ -5139,10 +6477,12 @@ format: bytes Logs total capacity in bytes +-- -[float] -=== `kubernetes.container.logs.used.bytes` +*`kubernetes.container.logs.used.bytes`*:: ++ +-- type: long format: bytes @@ -5150,36 +6490,44 @@ format: bytes Logs used capacity in bytes +-- -[float] -=== `kubernetes.container.logs.inodes.count` +*`kubernetes.container.logs.inodes.count`*:: ++ +-- type: long Total available inodes -[float] -=== `kubernetes.container.logs.inodes.free` +-- +*`kubernetes.container.logs.inodes.free`*:: ++ +-- type: long Total free inodes -[float] -=== `kubernetes.container.logs.inodes.used` +-- +*`kubernetes.container.logs.inodes.used`*:: ++ +-- type: long Total used inodes +-- -[float] -=== `kubernetes.container.memory.available.bytes` +*`kubernetes.container.memory.available.bytes`*:: ++ +-- type: long format: bytes @@ -5187,10 +6535,12 @@ format: bytes Total available memory +-- -[float] -=== `kubernetes.container.memory.usage.bytes` +*`kubernetes.container.memory.usage.bytes`*:: ++ +-- type: long format: bytes @@ -5198,21 +6548,49 @@ format: bytes Total memory usage +-- -[float] -=== `kubernetes.container.memory.rss.bytes` +*`kubernetes.container.memory.usage.node.pct`*:: ++ +-- +type: scaled_float -type: long +format: percentage + +Memory usage as a percentage of the total node allocatable memory + + +-- + +*`kubernetes.container.memory.usage.limit.pct`*:: ++ +-- +type: scaled_float + +format: percentage + +Memory usage as a percentage of the defined limit for the container (or total node allocatable memory if unlimited) + + +-- + + +*`kubernetes.container.memory.rss.bytes`*:: ++ +-- +type: long format: bytes RSS memory usage +-- -[float] -=== `kubernetes.container.memory.workingset.bytes` +*`kubernetes.container.memory.workingset.bytes`*:: ++ +-- type: long format: bytes @@ -5220,27 +6598,33 @@ format: bytes Working set memory usage -[float] -=== `kubernetes.container.memory.pagefaults` +-- +*`kubernetes.container.memory.pagefaults`*:: ++ +-- type: long Number of page faults -[float] -=== `kubernetes.container.memory.majorpagefaults` +-- +*`kubernetes.container.memory.majorpagefaults`*:: ++ +-- type: long Number of major page faults +-- -[float] -=== `kubernetes.container.rootfs.capacity.bytes` +*`kubernetes.container.rootfs.capacity.bytes`*:: ++ +-- type: long format: bytes @@ -5248,10 +6632,12 @@ format: bytes Root filesystem total capacity in bytes +-- -[float] -=== `kubernetes.container.rootfs.available.bytes` +*`kubernetes.container.rootfs.available.bytes`*:: ++ +-- type: long format: bytes @@ -5259,10 +6645,12 @@ format: bytes Root filesystem total available in bytes +-- -[float] -=== `kubernetes.container.rootfs.used.bytes` +*`kubernetes.container.rootfs.used.bytes`*:: ++ +-- type: long format: bytes @@ -5270,15 +6658,19 @@ format: bytes Root filesystem total used in bytes +-- -[float] -=== `kubernetes.container.rootfs.inodes.used` +*`kubernetes.container.rootfs.inodes.used`*:: ++ +-- type: long Used inodes +-- + [float] == event fields @@ -5286,38 +6678,46 @@ The Kubernetes events metricset collects events that are generated by objects ru -[float] -=== `kubernetes.event.count` - +*`kubernetes.event.count`*:: ++ +-- type: long Count field records the number of times the particular event has occurred -[float] -=== `kubernetes.event.message` +-- +*`kubernetes.event.message`*:: ++ +-- type: keyword Message recorded for the given event -[float] -=== `kubernetes.event.reason` +-- +*`kubernetes.event.reason`*:: ++ +-- type: keyword Reason recorded for the given event -[float] -=== `kubernetes.event.type` +-- +*`kubernetes.event.type`*:: ++ +-- type: keyword Type of the given event +-- + [float] == metadata fields @@ -5326,54 +6726,66 @@ Metadata associated with the given event -[float] -=== `kubernetes.event.metadata.timestamp.created` - +*`kubernetes.event.metadata.timestamp.created`*:: ++ +-- type: date Timestamp of creation of the given event -[float] -=== `kubernetes.event.metadata.name` +-- +*`kubernetes.event.metadata.name`*:: ++ +-- type: keyword Name of the event -[float] -=== `kubernetes.event.metadata.namespace` +-- +*`kubernetes.event.metadata.namespace`*:: ++ +-- type: keyword Namespace in which event was generated -[float] -=== `kubernetes.event.metadata.resource_version` +-- +*`kubernetes.event.metadata.resource_version`*:: ++ +-- type: keyword Version of the event resource -[float] -=== `kubernetes.event.metadata.uid` +-- +*`kubernetes.event.metadata.uid`*:: ++ +-- type: keyword Unique identifier to the event object -[float] -=== `kubernetes.event.metadata.self_link` +-- +*`kubernetes.event.metadata.self_link`*:: ++ +-- type: keyword URL representing the event +-- + [float] == involved_object fields @@ -5381,46 +6793,56 @@ Metadata associated with the given involved object -[float] -=== `kubernetes.event.involved_object.api_version` - +*`kubernetes.event.involved_object.api_version`*:: ++ +-- type: keyword API version of the object -[float] -=== `kubernetes.event.involved_object.kind` +-- +*`kubernetes.event.involved_object.kind`*:: ++ +-- type: keyword API kind of the object -[float] -=== `kubernetes.event.involved_object.name` +-- +*`kubernetes.event.involved_object.name`*:: ++ +-- type: keyword name of the object -[float] -=== `kubernetes.event.involved_object.resource_version` +-- +*`kubernetes.event.involved_object.resource_version`*:: ++ +-- type: keyword resource version of the object -[float] -=== `kubernetes.event.involved_object.uid` +-- +*`kubernetes.event.involved_object.uid`*:: ++ +-- type: keyword UUID version of the object +-- + [float] == node fields @@ -5428,14 +6850,16 @@ kubernetes node metrics -[float] -=== `kubernetes.node.start_time` - +*`kubernetes.node.start_time`*:: ++ +-- type: date Start time +-- + [float] == cpu fields @@ -5445,27 +6869,31 @@ CPU usage metrics -[float] -=== `kubernetes.node.cpu.usage.core.ns` - +*`kubernetes.node.cpu.usage.core.ns`*:: ++ +-- type: long Node CPU Core usage nanoseconds -[float] -=== `kubernetes.node.cpu.usage.nanocores` +-- +*`kubernetes.node.cpu.usage.nanocores`*:: ++ +-- type: long CPU used nanocores +-- -[float] -=== `kubernetes.node.memory.available.bytes` +*`kubernetes.node.memory.available.bytes`*:: ++ +-- type: long format: bytes @@ -5473,10 +6901,12 @@ format: bytes Total available memory +-- -[float] -=== `kubernetes.node.memory.usage.bytes` +*`kubernetes.node.memory.usage.bytes`*:: ++ +-- type: long format: bytes @@ -5484,10 +6914,12 @@ format: bytes Total memory usage +-- -[float] -=== `kubernetes.node.memory.rss.bytes` +*`kubernetes.node.memory.rss.bytes`*:: ++ +-- type: long format: bytes @@ -5495,10 +6927,12 @@ format: bytes RSS memory usage +-- -[float] -=== `kubernetes.node.memory.workingset.bytes` +*`kubernetes.node.memory.workingset.bytes`*:: ++ +-- type: long format: bytes @@ -5506,27 +6940,33 @@ format: bytes Working set memory usage -[float] -=== `kubernetes.node.memory.pagefaults` +-- +*`kubernetes.node.memory.pagefaults`*:: ++ +-- type: long Number of page faults -[float] -=== `kubernetes.node.memory.majorpagefaults` +-- +*`kubernetes.node.memory.majorpagefaults`*:: ++ +-- type: long Number of major page faults +-- -[float] -=== `kubernetes.node.network.rx.bytes` +*`kubernetes.node.network.rx.bytes`*:: ++ +-- type: long format: bytes @@ -5534,18 +6974,22 @@ format: bytes Received bytes -[float] -=== `kubernetes.node.network.rx.errors` +-- +*`kubernetes.node.network.rx.errors`*:: ++ +-- type: long Rx errors +-- -[float] -=== `kubernetes.node.network.tx.bytes` +*`kubernetes.node.network.tx.bytes`*:: ++ +-- type: long format: bytes @@ -5553,19 +6997,23 @@ format: bytes Transmitted bytes -[float] -=== `kubernetes.node.network.tx.errors` +-- +*`kubernetes.node.network.tx.errors`*:: ++ +-- type: long Tx errors +-- -[float] -=== `kubernetes.node.fs.capacity.bytes` +*`kubernetes.node.fs.capacity.bytes`*:: ++ +-- type: long format: bytes @@ -5573,10 +7021,12 @@ format: bytes Filesystem total capacity in bytes +-- -[float] -=== `kubernetes.node.fs.available.bytes` +*`kubernetes.node.fs.available.bytes`*:: ++ +-- type: long format: bytes @@ -5584,10 +7034,12 @@ format: bytes Filesystem total available in bytes +-- -[float] -=== `kubernetes.node.fs.used.bytes` +*`kubernetes.node.fs.used.bytes`*:: ++ +-- type: long format: bytes @@ -5595,37 +7047,45 @@ format: bytes Filesystem total used in bytes +-- -[float] -=== `kubernetes.node.fs.inodes.used` +*`kubernetes.node.fs.inodes.used`*:: ++ +-- type: long Number of used inodes -[float] -=== `kubernetes.node.fs.inodes.count` +-- +*`kubernetes.node.fs.inodes.count`*:: ++ +-- type: long Number of inodes -[float] -=== `kubernetes.node.fs.inodes.free` +-- +*`kubernetes.node.fs.inodes.free`*:: ++ +-- type: long Number of free inodes +-- -[float] -=== `kubernetes.node.runtime.imagefs.capacity.bytes` +*`kubernetes.node.runtime.imagefs.capacity.bytes`*:: ++ +-- type: long format: bytes @@ -5633,10 +7093,12 @@ format: bytes Image filesystem total capacity in bytes +-- -[float] -=== `kubernetes.node.runtime.imagefs.available.bytes` +*`kubernetes.node.runtime.imagefs.available.bytes`*:: ++ +-- type: long format: bytes @@ -5644,10 +7106,12 @@ format: bytes Image filesystem total available in bytes +-- -[float] -=== `kubernetes.node.runtime.imagefs.used.bytes` +*`kubernetes.node.runtime.imagefs.used.bytes`*:: ++ +-- type: long format: bytes @@ -5655,6 +7119,8 @@ format: bytes Image filesystem total used in bytes +-- + [float] == pod fields @@ -5662,19 +7128,21 @@ kubernetes pod metrics -[float] -=== `kubernetes.pod.start_time` - +*`kubernetes.pod.start_time`*:: ++ +-- type: date Start time +-- -[float] -=== `kubernetes.pod.network.rx.bytes` +*`kubernetes.pod.network.rx.bytes`*:: ++ +-- type: long format: bytes @@ -5682,18 +7150,22 @@ format: bytes Received bytes -[float] -=== `kubernetes.pod.network.rx.errors` +-- +*`kubernetes.pod.network.rx.errors`*:: ++ +-- type: long Rx errors +-- -[float] -=== `kubernetes.pod.network.tx.bytes` +*`kubernetes.pod.network.tx.bytes`*:: ++ +-- type: long format: bytes @@ -5701,153 +7173,265 @@ format: bytes Transmitted bytes -[float] -=== `kubernetes.pod.network.tx.errors` +-- +*`kubernetes.pod.network.tx.errors`*:: ++ +-- type: long Tx errors -[float] -== container fields +-- -kubernetes container metrics +[float] +== cpu fields +CPU usage metrics -[float] -=== `kubernetes.container.id` -type: keyword -Container id +*`kubernetes.pod.cpu.usage.nanocores`*:: ++ +-- +type: long +CPU used nanocores -[float] -=== `kubernetes.container.status.phase` -type: keyword +-- -Container phase (running, waiting, terminated) +*`kubernetes.pod.cpu.usage.node.pct`*:: ++ +-- +type: scaled_float +format: percentage -[float] -=== `kubernetes.container.status.ready` +CPU usage as a percentage of the total node CPU -type: boolean -Container ready status +-- +*`kubernetes.pod.cpu.usage.limit.pct`*:: ++ +-- +type: scaled_float -[float] -=== `kubernetes.container.status.restarts` +format: percentage -type: integer +CPU usage as a percentage of the defined limit for the pod containers (or total node CPU if unlimited) -Container restarts count +-- -[float] -=== `kubernetes.container.cpu.limit.nanocores` +*`kubernetes.pod.memory.usage.bytes`*:: ++ +-- type: long -Container CPU nanocores limit +format: bytes +Total memory usage -[float] -=== `kubernetes.container.cpu.request.nanocores` -type: long +-- -Container CPU requested nanocores +*`kubernetes.pod.memory.usage.node.pct`*:: ++ +-- +type: scaled_float +format: percentage +Memory usage as a percentage of the total node allocatable memory -[float] -=== `kubernetes.container.memory.limit.bytes` -type: long +-- -format: bytes +*`kubernetes.pod.memory.usage.limit.pct`*:: ++ +-- +type: scaled_float -Container memory limit in bytes +format: percentage +Memory usage as a percentage of the defined limit for the pod containers (or total node allocatable memory if unlimited) -[float] -=== `kubernetes.container.memory.request.bytes` -type: long +-- -format: bytes +[float] +== container fields -Container requested memory in bytes +kubernetes container metrics -[float] -== deployment fields -kubernetes deployment metrics +*`kubernetes.container.id`*:: ++ +-- +type: keyword +Container id +-- -[float] -=== `kubernetes.deployment.name` +*`kubernetes.container.status.phase`*:: ++ +-- type: keyword -Kubernetes deployment name +Container phase (running, waiting, terminated) -[float] -=== `kubernetes.deployment.paused` +-- +*`kubernetes.container.status.ready`*:: ++ +-- type: boolean -Kubernetes deployment paused status - +Container ready status -[float] -== replicas fields -Kubernetes deployment replicas info +-- + +*`kubernetes.container.status.restarts`*:: ++ +-- +type: integer + +Container restarts count + + +-- + + +*`kubernetes.container.cpu.limit.nanocores`*:: ++ +-- +type: long + +Container CPU nanocores limit + + +-- + +*`kubernetes.container.cpu.request.nanocores`*:: ++ +-- +type: long + +Container CPU requested nanocores + + +-- + + +*`kubernetes.container.memory.limit.bytes`*:: ++ +-- +type: long + +format: bytes + +Container memory limit in bytes + + +-- + +*`kubernetes.container.memory.request.bytes`*:: ++ +-- +type: long + +format: bytes + +Container requested memory in bytes + + +-- + +[float] +== deployment fields + +kubernetes deployment metrics + + + +*`kubernetes.deployment.name`*:: ++ +-- +type: keyword + +Kubernetes deployment name + + +-- + +*`kubernetes.deployment.paused`*:: ++ +-- +type: boolean + +Kubernetes deployment paused status +-- [float] -=== `kubernetes.deployment.replicas.desired` +== replicas fields + +Kubernetes deployment replicas info + + +*`kubernetes.deployment.replicas.desired`*:: ++ +-- type: integer Deployment number of desired replicas (spec) -[float] -=== `kubernetes.deployment.replicas.available` +-- +*`kubernetes.deployment.replicas.available`*:: ++ +-- type: integer Deployment available replicas -[float] -=== `kubernetes.deployment.replicas.unavailable` +-- +*`kubernetes.deployment.replicas.unavailable`*:: ++ +-- type: integer Deployment unavailable replicas -[float] -=== `kubernetes.deployment.replicas.updated` +-- +*`kubernetes.deployment.replicas.updated`*:: ++ +-- type: integer Deployment updated replicas +-- + [float] == node fields @@ -5856,43 +7440,51 @@ kubernetes node metrics -[float] -=== `kubernetes.node.status.ready` - +*`kubernetes.node.status.ready`*:: ++ +-- type: keyword Node ready status (true, false or unknown) -[float] -=== `kubernetes.node.status.unschedulable` +-- +*`kubernetes.node.status.unschedulable`*:: ++ +-- type: boolean Node unschedulable status +-- -[float] -=== `kubernetes.node.cpu.allocatable.cores` +*`kubernetes.node.cpu.allocatable.cores`*:: ++ +-- type: float Node CPU allocatable cores -[float] -=== `kubernetes.node.cpu.capacity.cores` +-- +*`kubernetes.node.cpu.capacity.cores`*:: ++ +-- type: long Node CPU capacity cores +-- -[float] -=== `kubernetes.node.memory.allocatable.bytes` +*`kubernetes.node.memory.allocatable.bytes`*:: ++ +-- type: long format: bytes @@ -5900,9 +7492,11 @@ format: bytes Node allocatable memory in bytes -[float] -=== `kubernetes.node.memory.capacity.bytes` +-- +*`kubernetes.node.memory.capacity.bytes`*:: ++ +-- type: long format: bytes @@ -5910,23 +7504,29 @@ format: bytes Node memory capacity in bytes +-- -[float] -=== `kubernetes.node.pod.allocatable.total` +*`kubernetes.node.pod.allocatable.total`*:: ++ +-- type: long Node allocatable pods -[float] -=== `kubernetes.node.pod.capacity.total` +-- +*`kubernetes.node.pod.capacity.total`*:: ++ +-- type: long Node pod capacity +-- + [float] == pod fields @@ -5934,22 +7534,26 @@ kubernetes pod metrics -[float] -=== `kubernetes.pod.ip` - +*`kubernetes.pod.ip`*:: ++ +-- type: ip Kubernetes pod IP -[float] -=== `kubernetes.pod.host_ip` +-- +*`kubernetes.pod.host_ip`*:: ++ +-- type: ip Kubernetes pod host IP +-- + [float] == status fields @@ -5957,30 +7561,36 @@ Kubernetes pod status metrics -[float] -=== `kubernetes.pod.status.phase` - +*`kubernetes.pod.status.phase`*:: ++ +-- type: keyword Kubernetes pod phase (Running, Pending...) -[float] -=== `kubernetes.pod.status.ready` +-- +*`kubernetes.pod.status.ready`*:: ++ +-- type: keyword Kubernetes pod ready status (true, false or unknown) -[float] -=== `kubernetes.pod.status.scheduled` +-- +*`kubernetes.pod.status.scheduled`*:: ++ +-- type: keyword Kubernetes pod scheduled status (true, false, unknown) +-- + [float] == replicaset fields @@ -5988,14 +7598,16 @@ kubernetes replica set metrics -[float] -=== `kubernetes.replicaset.name` - +*`kubernetes.replicaset.name`*:: ++ +-- type: keyword Kubernetes replica set name +-- + [float] == replicas fields @@ -6003,69 +7615,164 @@ Kubernetes replica set paused status -[float] -=== `kubernetes.replicaset.replicas.available` - +*`kubernetes.replicaset.replicas.available`*:: ++ +-- type: long The number of replicas per ReplicaSet -[float] -=== `kubernetes.replicaset.replicas.desired` +-- +*`kubernetes.replicaset.replicas.desired`*:: ++ +-- type: long The number of replicas per ReplicaSet -[float] -=== `kubernetes.replicaset.replicas.ready` +-- +*`kubernetes.replicaset.replicas.ready`*:: ++ +-- type: long The number of ready replicas per ReplicaSet -[float] -=== `kubernetes.replicaset.replicas.observed` +-- +*`kubernetes.replicaset.replicas.observed`*:: ++ +-- type: long The generation observed by the ReplicaSet controller -[float] -=== `kubernetes.replicaset.replicas.labeled` +-- +*`kubernetes.replicaset.replicas.labeled`*:: ++ +-- type: long The number of fully labeled replicas per ReplicaSet +-- + [float] -== system fields +== statefulset fields -kubernetes system containers metrics +kubernetes stateful set metrics + + + +*`kubernetes.statefulset.name`*:: ++ +-- +type: keyword + +Kubernetes stateful set name + + +-- + +*`kubernetes.statefulset.created`*:: ++ +-- +type: long + +The creation timestamp (epoch) for StatefulSet + + +-- + +[float] +== replicas fields + +Kubernetes stateful set replicas status + + + +*`kubernetes.statefulset.replicas.observed`*:: ++ +-- +type: long + +The number of observed replicas per StatefulSet + + +-- + +*`kubernetes.statefulset.replicas.desired`*:: ++ +-- +type: long + +The number of desired replicas per StatefulSet + + +-- + +[float] +== generation fields + +Kubernetes stateful set generation information +*`kubernetes.statefulset.generation.observed`*:: ++ +-- +type: long + +The observed generation per StatefulSet + + +-- + +*`kubernetes.statefulset.generation.desired`*:: ++ +-- +type: long + +The desired generation per StatefulSet + + +-- + [float] -=== `kubernetes.system.container` +== system fields + +kubernetes system containers metrics + + +*`kubernetes.system.container`*:: ++ +-- type: keyword Container name -[float] -=== `kubernetes.system.start_time` +-- +*`kubernetes.system.start_time`*:: ++ +-- type: date Start time +-- + [float] == cpu fields @@ -6075,27 +7782,31 @@ CPU usage metrics -[float] -=== `kubernetes.system.cpu.usage.core.ns` - +*`kubernetes.system.cpu.usage.core.ns`*:: ++ +-- type: long CPU Core usage nanoseconds -[float] -=== `kubernetes.system.cpu.usage.nanocores` +-- +*`kubernetes.system.cpu.usage.nanocores`*:: ++ +-- type: long CPU used nanocores +-- -[float] -=== `kubernetes.system.memory.usage.bytes` +*`kubernetes.system.memory.usage.bytes`*:: ++ +-- type: long format: bytes @@ -6103,10 +7814,12 @@ format: bytes Total memory usage +-- -[float] -=== `kubernetes.system.memory.rss.bytes` +*`kubernetes.system.memory.rss.bytes`*:: ++ +-- type: long format: bytes @@ -6114,10 +7827,12 @@ format: bytes RSS memory usage +-- -[float] -=== `kubernetes.system.memory.workingset.bytes` +*`kubernetes.system.memory.workingset.bytes`*:: ++ +-- type: long format: bytes @@ -6125,22 +7840,28 @@ format: bytes Working set memory usage -[float] -=== `kubernetes.system.memory.pagefaults` +-- +*`kubernetes.system.memory.pagefaults`*:: ++ +-- type: long Number of page faults -[float] -=== `kubernetes.system.memory.majorpagefaults` +-- +*`kubernetes.system.memory.majorpagefaults`*:: ++ +-- type: long Number of major page faults +-- + [float] == volume fields @@ -6148,19 +7869,21 @@ kubernetes volume metrics -[float] -=== `kubernetes.volume.name` - +*`kubernetes.volume.name`*:: ++ +-- type: keyword Volume name +-- -[float] -=== `kubernetes.volume.fs.capacity.bytes` +*`kubernetes.volume.fs.capacity.bytes`*:: ++ +-- type: long format: bytes @@ -6168,10 +7891,12 @@ format: bytes Filesystem total capacity in bytes +-- -[float] -=== `kubernetes.volume.fs.available.bytes` +*`kubernetes.volume.fs.available.bytes`*:: ++ +-- type: long format: bytes @@ -6179,10 +7904,12 @@ format: bytes Filesystem total available in bytes +-- -[float] -=== `kubernetes.volume.fs.used.bytes` +*`kubernetes.volume.fs.used.bytes`*:: ++ +-- type: long format: bytes @@ -6190,31 +7917,107 @@ format: bytes Filesystem total used in bytes +-- -[float] -=== `kubernetes.volume.fs.inodes.used` +*`kubernetes.volume.fs.inodes.used`*:: ++ +-- type: long Used inodes -[float] -=== `kubernetes.volume.fs.inodes.free` +-- +*`kubernetes.volume.fs.inodes.free`*:: ++ +-- type: long Free inodes -[float] -=== `kubernetes.volume.fs.inodes.count` +-- +*`kubernetes.volume.fs.inodes.count`*:: ++ +-- type: long Total inodes +-- + +[[exported-fields-kvm]] +== kvm fields + +experimental[] +kvm module + + + +[float] +== kvm fields + + + + +[float] +== dommemstat fields + +dommemstat + + + +[float] +== stat fields + +Memory stat + + + +*`kvm.dommemstat.stat.name`*:: ++ +-- +type: keyword + +Memory stat name + + +-- + +*`kvm.dommemstat.stat.value`*:: ++ +-- +type: long + +Memory stat value + + +-- + +*`kvm.dommemstat.id`*:: ++ +-- +type: long + +Domain id + + +-- + +*`kvm.dommemstat.name`*:: ++ +-- +type: keyword + +Domain name + + +-- + [[exported-fields-logstash]] == Logstash fields @@ -6235,22 +8038,26 @@ node -[float] -=== `logstash.node.host` - +*`logstash.node.host`*:: ++ +-- type: keyword Host name -[float] -=== `logstash.node.version` +-- +*`logstash.node.version`*:: ++ +-- type: keyword Logstash Version +-- + [float] == jvm fields @@ -6258,24 +8065,28 @@ JVM Info -[float] -=== `logstash.node.jvm.version` - +*`logstash.node.jvm.version`*:: ++ +-- type: keyword Version -[float] -=== `logstash.node.jvm.pid` +-- +*`logstash.node.jvm.pid`*:: ++ +-- type: long Pid +-- + [float] -== node_stats fields +== node.stats fields node_stats metrics. @@ -6288,30 +8099,36 @@ Events stats -[float] -=== `logstash.node_stats.events.in` - +*`logstash.node.stats.events.in`*:: ++ +-- type: long Incoming events counter. -[float] -=== `logstash.node_stats.events.out` +-- +*`logstash.node.stats.events.out`*:: ++ +-- type: long Outgoing events counter. -[float] -=== `logstash.node_stats.events.filtered` +-- +*`logstash.node.stats.events.filtered`*:: ++ +-- type: long Filtered events counter. +-- + [[exported-fields-memcached]] == Memcached fields @@ -6332,118 +8149,146 @@ stats -[float] -=== `memcached.stats.pid` - +*`memcached.stats.pid`*:: ++ +-- type: long Current process ID of the Memcached task. -[float] -=== `memcached.stats.uptime.sec` +-- +*`memcached.stats.uptime.sec`*:: ++ +-- type: long Memcached server uptime. -[float] -=== `memcached.stats.threads` +-- +*`memcached.stats.threads`*:: ++ +-- type: long Number of threads used by the current Memcached server process. -[float] -=== `memcached.stats.connections.current` +-- +*`memcached.stats.connections.current`*:: ++ +-- type: long Number of open connections to this Memcached server, should be the same value on all servers during normal operation. -[float] -=== `memcached.stats.connections.total` +-- +*`memcached.stats.connections.total`*:: ++ +-- type: long Numer of successful connect attempts to this server since it has been started. -[float] -=== `memcached.stats.get.hits` +-- +*`memcached.stats.get.hits`*:: ++ +-- type: long Number of successful "get" commands (cache hits) since startup, divide them by the "cmd_get" value to get the cache hitrate. -[float] -=== `memcached.stats.get.misses` +-- +*`memcached.stats.get.misses`*:: ++ +-- type: long Number of failed "get" requests because nothing was cached for this key or the cached value was too old. -[float] -=== `memcached.stats.cmd.get` +-- +*`memcached.stats.cmd.get`*:: ++ +-- type: long Number of "get" commands received since server startup not counting if they were successful or not. -[float] -=== `memcached.stats.cmd.set` +-- +*`memcached.stats.cmd.set`*:: ++ +-- type: long Number of "set" commands serviced since startup. -[float] -=== `memcached.stats.read.bytes` +-- +*`memcached.stats.read.bytes`*:: ++ +-- type: long Total number of bytes received from the network by this server. -[float] -=== `memcached.stats.written.bytes` +-- +*`memcached.stats.written.bytes`*:: ++ +-- type: long Total number of bytes send to the network by this server. -[float] -=== `memcached.stats.items.current` +-- +*`memcached.stats.items.current`*:: ++ +-- type: long Number of items currently in this server's cache. -[float] -=== `memcached.stats.items.total` +-- +*`memcached.stats.items.total`*:: ++ +-- type: long Number of items stored ever stored on this server. This is no "maximum item count" value but a counted increased by every new item stored in the cache. -[float] -=== `memcached.stats.evictions` +-- +*`memcached.stats.evictions`*:: ++ +-- type: long Number of objects removed from the cache to free up memory for new items because Memcached reached it's maximum memory setting (limit_maxbytes). +-- + [[exported-fields-mongodb]] == MongoDB fields @@ -6465,175 +8310,217 @@ MongoDB collection statistics metrics. -[float] -=== `mongodb.collstats.db` - +*`mongodb.collstats.db`*:: ++ +-- type: keyword Database name. -[float] -=== `mongodb.collstats.collection` +-- +*`mongodb.collstats.collection`*:: ++ +-- type: keyword Collection name. -[float] -=== `mongodb.collstats.name` +-- +*`mongodb.collstats.name`*:: ++ +-- type: keyword Combination of database and collection name. -[float] -=== `mongodb.collstats.total.time.us` +-- +*`mongodb.collstats.total.time.us`*:: ++ +-- type: long Total waiting time for locks in microseconds. -[float] -=== `mongodb.collstats.total.count` +-- +*`mongodb.collstats.total.count`*:: ++ +-- type: long Total number of lock wait events. +-- -[float] -=== `mongodb.collstats.lock.read.time.us` +*`mongodb.collstats.lock.read.time.us`*:: ++ +-- type: long Time waiting for read locks in microseconds. -[float] -=== `mongodb.collstats.lock.read.count` +-- +*`mongodb.collstats.lock.read.count`*:: ++ +-- type: long Number of read lock wait events. -[float] -=== `mongodb.collstats.lock.write.time.us` +-- +*`mongodb.collstats.lock.write.time.us`*:: ++ +-- type: long Time waiting for write locks in microseconds. -[float] -=== `mongodb.collstats.lock.write.count` +-- +*`mongodb.collstats.lock.write.count`*:: ++ +-- type: long Number of write lock wait events. -[float] -=== `mongodb.collstats.queries.time.us` +-- +*`mongodb.collstats.queries.time.us`*:: ++ +-- type: long Time running queries in microseconds. -[float] -=== `mongodb.collstats.queries.count` +-- +*`mongodb.collstats.queries.count`*:: ++ +-- type: long Number of queries executed. -[float] -=== `mongodb.collstats.getmore.time.us` +-- +*`mongodb.collstats.getmore.time.us`*:: ++ +-- type: long Time asking for more cursor rows in microseconds. -[float] -=== `mongodb.collstats.getmore.count` +-- +*`mongodb.collstats.getmore.count`*:: ++ +-- type: long Number of times a cursor asked for more data. -[float] -=== `mongodb.collstats.insert.time.us` +-- +*`mongodb.collstats.insert.time.us`*:: ++ +-- type: long Time inserting new documents in microseconds. -[float] -=== `mongodb.collstats.insert.count` +-- +*`mongodb.collstats.insert.count`*:: ++ +-- type: long Number of document insert events. -[float] -=== `mongodb.collstats.update.time.us` +-- +*`mongodb.collstats.update.time.us`*:: ++ +-- type: long Time updating documents in microseconds. -[float] -=== `mongodb.collstats.update.count` +-- +*`mongodb.collstats.update.count`*:: ++ +-- type: long Number of document update events. -[float] -=== `mongodb.collstats.remove.time.us` +-- +*`mongodb.collstats.remove.time.us`*:: ++ +-- type: long Time deleting documents in microseconds. -[float] -=== `mongodb.collstats.remove.count` +-- +*`mongodb.collstats.remove.count`*:: ++ +-- type: long Number of document delete events. -[float] -=== `mongodb.collstats.commands.time.us` +-- +*`mongodb.collstats.commands.time.us`*:: ++ +-- type: long Time executing database commands in microseconds. -[float] -=== `mongodb.collstats.commands.count` +-- +*`mongodb.collstats.commands.count`*:: ++ +-- type: long Number of database commands executed. +-- + [float] == dbstats fields @@ -6641,95 +8528,125 @@ dbstats provides an overview of a particular mongo database. This document is mo -[float] -=== `mongodb.dbstats.avg_obj_size.bytes` - +*`mongodb.dbstats.avg_obj_size.bytes`*:: ++ +-- type: long format: bytes -[float] -=== `mongodb.dbstats.collections` +-- +*`mongodb.dbstats.collections`*:: ++ +-- type: integer -[float] -=== `mongodb.dbstats.data_size.bytes` +-- +*`mongodb.dbstats.data_size.bytes`*:: ++ +-- type: long format: bytes -[float] -=== `mongodb.dbstats.db` +-- +*`mongodb.dbstats.db`*:: ++ +-- type: keyword -[float] -=== `mongodb.dbstats.file_size.bytes` +-- +*`mongodb.dbstats.file_size.bytes`*:: ++ +-- type: long format: bytes -[float] -=== `mongodb.dbstats.index_size.bytes` +-- +*`mongodb.dbstats.index_size.bytes`*:: ++ +-- type: long format: bytes -[float] -=== `mongodb.dbstats.indexes` +-- +*`mongodb.dbstats.indexes`*:: ++ +-- type: long -[float] -=== `mongodb.dbstats.num_extents` +-- +*`mongodb.dbstats.num_extents`*:: ++ +-- type: long -[float] -=== `mongodb.dbstats.objects` +-- +*`mongodb.dbstats.objects`*:: ++ +-- type: long -[float] -=== `mongodb.dbstats.storage_size.bytes` +-- +*`mongodb.dbstats.storage_size.bytes`*:: ++ +-- type: long format: bytes -[float] -=== `mongodb.dbstats.ns_size_mb.mb` +-- +*`mongodb.dbstats.ns_size_mb.mb`*:: ++ +-- type: long +-- -[float] -=== `mongodb.dbstats.data_file_version.major` +*`mongodb.dbstats.data_file_version.major`*:: ++ +-- type: long -[float] -=== `mongodb.dbstats.data_file_version.minor` +-- +*`mongodb.dbstats.data_file_version.minor`*:: ++ +-- type: long +-- -[float] -=== `mongodb.dbstats.extent_free_list.num` +*`mongodb.dbstats.extent_free_list.num`*:: ++ +-- type: long -[float] -=== `mongodb.dbstats.extent_free_list.size.bytes` +-- +*`mongodb.dbstats.extent_free_list.size.bytes`*:: ++ +-- type: long format: bytes +-- + [float] == status fields @@ -6737,70 +8654,86 @@ MongoDB server status metrics. -[float] -=== `mongodb.status.version` - +*`mongodb.status.version`*:: ++ +-- type: keyword Instance version. -[float] -=== `mongodb.status.uptime.ms` +-- +*`mongodb.status.uptime.ms`*:: ++ +-- type: long Instance uptime in milliseconds. -[float] -=== `mongodb.status.local_time` +-- +*`mongodb.status.local_time`*:: ++ +-- type: date Local time as reported by the MongoDB instance. -[float] -=== `mongodb.status.asserts.regular` +-- +*`mongodb.status.asserts.regular`*:: ++ +-- type: long Number of regular assertions produced by the server. -[float] -=== `mongodb.status.asserts.warning` +-- +*`mongodb.status.asserts.warning`*:: ++ +-- type: long Number of warning assertions produced by the server. -[float] -=== `mongodb.status.asserts.msg` +-- +*`mongodb.status.asserts.msg`*:: ++ +-- type: long Number of msg assertions produced by the server. -[float] -=== `mongodb.status.asserts.user` +-- +*`mongodb.status.asserts.user`*:: ++ +-- type: long Number of user assertions produced by the server. -[float] -=== `mongodb.status.asserts.rollovers` +-- +*`mongodb.status.asserts.rollovers`*:: ++ +-- type: long Number of rollovers assertions produced by the server. +-- + [float] == background_flushing fields @@ -6808,46 +8741,56 @@ Data about the process MongoDB uses to write data to disk. This data is only ava -[float] -=== `mongodb.status.background_flushing.flushes` - +*`mongodb.status.background_flushing.flushes`*:: ++ +-- type: long A counter that collects the number of times the database has flushed all writes to disk. -[float] -=== `mongodb.status.background_flushing.total.ms` +-- +*`mongodb.status.background_flushing.total.ms`*:: ++ +-- type: long The total number of milliseconds (ms) that the mongod processes have spent writing (i.e. flushing) data to disk. Because this is an absolute value, consider the value of `flushes` and `average_ms` to provide better context for this datum. -[float] -=== `mongodb.status.background_flushing.average.ms` +-- +*`mongodb.status.background_flushing.average.ms`*:: ++ +-- type: long The average time spent flushing to disk per flush event. -[float] -=== `mongodb.status.background_flushing.last.ms` +-- +*`mongodb.status.background_flushing.last.ms`*:: ++ +-- type: long The amount of time, in milliseconds, that the last flush operation took to complete. -[float] -=== `mongodb.status.background_flushing.last_finished` +-- +*`mongodb.status.background_flushing.last_finished`*:: ++ +-- type: date A timestamp of the last completed flush operation. +-- + [float] == connections fields @@ -6855,30 +8798,36 @@ Data regarding the current status of incoming connections and availability of th -[float] -=== `mongodb.status.connections.current` - +*`mongodb.status.connections.current`*:: ++ +-- type: long The number of connections to the database server from clients. This number includes the current shell session. Consider the value of `available` to add more context to this datum. -[float] -=== `mongodb.status.connections.available` +-- +*`mongodb.status.connections.available`*:: ++ +-- type: long The number of unused available incoming connections the database can provide. -[float] -=== `mongodb.status.connections.total_created` +-- +*`mongodb.status.connections.total_created`*:: ++ +-- type: long A count of all incoming connections created to the server. This number includes connections that have since closed. +-- + [float] == journaling fields @@ -6886,54 +8835,66 @@ Data about the journaling-related operations and performance. Journaling informa -[float] -=== `mongodb.status.journaling.commits` - +*`mongodb.status.journaling.commits`*:: ++ +-- type: long The number of transactions written to the journal during the last journal group commit interval. -[float] -=== `mongodb.status.journaling.journaled.mb` +-- +*`mongodb.status.journaling.journaled.mb`*:: ++ +-- type: long The amount of data in megabytes (MB) written to journal during the last journal group commit interval. -[float] -=== `mongodb.status.journaling.write_to_data_files.mb` +-- +*`mongodb.status.journaling.write_to_data_files.mb`*:: ++ +-- type: long The amount of data in megabytes (MB) written from journal to the data files during the last journal group commit interval. -[float] -=== `mongodb.status.journaling.compression` +-- +*`mongodb.status.journaling.compression`*:: ++ +-- type: long The compression ratio of the data written to the journal. -[float] -=== `mongodb.status.journaling.commits_in_write_lock` +-- +*`mongodb.status.journaling.commits_in_write_lock`*:: ++ +-- type: long Count of the commits that occurred while a write lock was held. Commits in a write lock indicate a MongoDB node under a heavy write load and call for further diagnosis. -[float] -=== `mongodb.status.journaling.early_commits` +-- +*`mongodb.status.journaling.early_commits`*:: ++ +-- type: long The number of times MongoDB requested a commit before the scheduled journal group commit interval. +-- + [float] == times fields @@ -6941,62 +8902,76 @@ Information about the performance of the mongod instance during the various phas -[float] -=== `mongodb.status.journaling.times.dt.ms` - +*`mongodb.status.journaling.times.dt.ms`*:: ++ +-- type: long The amount of time over which MongoDB collected the times data. Use this field to provide context to the other times field values. -[float] -=== `mongodb.status.journaling.times.prep_log_buffer.ms` +-- +*`mongodb.status.journaling.times.prep_log_buffer.ms`*:: ++ +-- type: long The amount of time spent preparing to write to the journal. Smaller values indicate better journal performance. -[float] -=== `mongodb.status.journaling.times.write_to_journal.ms` +-- +*`mongodb.status.journaling.times.write_to_journal.ms`*:: ++ +-- type: long The amount of time spent actually writing to the journal. File system speeds and device interfaces can affect performance. -[float] -=== `mongodb.status.journaling.times.write_to_data_files.ms` +-- +*`mongodb.status.journaling.times.write_to_data_files.ms`*:: ++ +-- type: long The amount of time spent writing to data files after journaling. File system speeds and device interfaces can affect performance. -[float] -=== `mongodb.status.journaling.times.remap_private_view.ms` +-- +*`mongodb.status.journaling.times.remap_private_view.ms`*:: ++ +-- type: long The amount of time spent remapping copy-on-write memory mapped views. Smaller values indicate better journal performance. -[float] -=== `mongodb.status.journaling.times.commits.ms` +-- +*`mongodb.status.journaling.times.commits.ms`*:: ++ +-- type: long The amount of time spent for commits. -[float] -=== `mongodb.status.journaling.times.commits_in_write_lock.ms` +-- +*`mongodb.status.journaling.times.commits_in_write_lock.ms`*:: ++ +-- type: long The amount of time spent for commits that occurred while a write lock was held. +-- + [float] == extra_info fields @@ -7004,9 +8979,9 @@ Platform specific data. -[float] -=== `mongodb.status.extra_info.heap_usage.bytes` - +*`mongodb.status.extra_info.heap_usage.bytes`*:: ++ +-- type: long format: bytes @@ -7014,14 +8989,18 @@ format: bytes The total size in bytes of heap space used by the database process. Only available on Unix/Linux. -[float] -=== `mongodb.status.extra_info.page_faults` +-- +*`mongodb.status.extra_info.page_faults`*:: ++ +-- type: long The total number of page faults that require disk operations. Page faults refer to operations that require the database server to access data that isn't available in active memory. +-- + [float] == network fields @@ -7029,9 +9008,9 @@ Platform specific data. -[float] -=== `mongodb.status.network.in.bytes` - +*`mongodb.status.network.in.bytes`*:: ++ +-- type: long format: bytes @@ -7039,9 +9018,11 @@ format: bytes The amount of network traffic, in bytes, received by this database. -[float] -=== `mongodb.status.network.out.bytes` +-- +*`mongodb.status.network.out.bytes`*:: ++ +-- type: long format: bytes @@ -7049,14 +9030,18 @@ format: bytes The amount of network traffic, in bytes, sent from this database. -[float] -=== `mongodb.status.network.requests` +-- +*`mongodb.status.network.requests`*:: ++ +-- type: long The total number of requests received by the server. +-- + [float] == opcounters fields @@ -7064,54 +9049,66 @@ An overview of database operations by type. -[float] -=== `mongodb.status.opcounters.insert` - +*`mongodb.status.opcounters.insert`*:: ++ +-- type: long The total number of insert operations received since the mongod instance last started. -[float] -=== `mongodb.status.opcounters.query` +-- +*`mongodb.status.opcounters.query`*:: ++ +-- type: long The total number of queries received since the mongod instance last started. -[float] -=== `mongodb.status.opcounters.update` +-- +*`mongodb.status.opcounters.update`*:: ++ +-- type: long The total number of update operations received since the mongod instance last started. -[float] -=== `mongodb.status.opcounters.delete` +-- +*`mongodb.status.opcounters.delete`*:: ++ +-- type: long The total number of delete operations received since the mongod instance last started. -[float] -=== `mongodb.status.opcounters.getmore` +-- +*`mongodb.status.opcounters.getmore`*:: ++ +-- type: long The total number of getmore operations received since the mongod instance last started. -[float] -=== `mongodb.status.opcounters.command` +-- +*`mongodb.status.opcounters.command`*:: ++ +-- type: long The total number of commands issued to the database since the mongod instance last started. +-- + [float] == opcounters_replicated fields @@ -7119,54 +9116,66 @@ An overview of database replication operations by type. -[float] -=== `mongodb.status.opcounters_replicated.insert` - +*`mongodb.status.opcounters_replicated.insert`*:: ++ +-- type: long The total number of replicated insert operations received since the mongod instance last started. -[float] -=== `mongodb.status.opcounters_replicated.query` +-- +*`mongodb.status.opcounters_replicated.query`*:: ++ +-- type: long The total number of replicated queries received since the mongod instance last started. -[float] -=== `mongodb.status.opcounters_replicated.update` +-- +*`mongodb.status.opcounters_replicated.update`*:: ++ +-- type: long The total number of replicated update operations received since the mongod instance last started. -[float] -=== `mongodb.status.opcounters_replicated.delete` +-- +*`mongodb.status.opcounters_replicated.delete`*:: ++ +-- type: long The total number of replicated delete operations received since the mongod instance last started. -[float] -=== `mongodb.status.opcounters_replicated.getmore` +-- +*`mongodb.status.opcounters_replicated.getmore`*:: ++ +-- type: long The total number of replicated getmore operations received since the mongod instance last started. -[float] -=== `mongodb.status.opcounters_replicated.command` +-- +*`mongodb.status.opcounters_replicated.command`*:: ++ +-- type: long The total number of replicated commands issued to the database since the mongod instance last started. +-- + [float] == memory fields @@ -7174,62 +9183,76 @@ Data about the current memory usage of the mongod server. -[float] -=== `mongodb.status.memory.bits` - +*`mongodb.status.memory.bits`*:: ++ +-- type: long Either 64 or 32, depending on which target architecture was specified during the mongod compilation process. -[float] -=== `mongodb.status.memory.resident.mb` +-- +*`mongodb.status.memory.resident.mb`*:: ++ +-- type: long The amount of RAM, in megabytes (MB), currently used by the database process. -[float] -=== `mongodb.status.memory.virtual.mb` +-- +*`mongodb.status.memory.virtual.mb`*:: ++ +-- type: long The amount, in megabytes (MB), of virtual memory used by the mongod process. -[float] -=== `mongodb.status.memory.mapped.mb` +-- +*`mongodb.status.memory.mapped.mb`*:: ++ +-- type: long The amount of mapped memory, in megabytes (MB), used by the database. Because MongoDB uses memory-mapped files, this value is likely to be to be roughly equivalent to the total size of your database or databases. -[float] -=== `mongodb.status.memory.mapped_with_journal.mb` +-- +*`mongodb.status.memory.mapped_with_journal.mb`*:: ++ +-- type: long The amount of mapped memory, in megabytes (MB), including the memory used for journaling. -[float] -=== `mongodb.status.write_backs_queued` +-- +*`mongodb.status.write_backs_queued`*:: ++ +-- type: boolean True when there are operations from a mongos instance queued for retrying. -[float] -=== `mongodb.status.storage_engine.name` +-- +*`mongodb.status.storage_engine.name`*:: ++ +-- type: keyword A string that represents the name of the current storage engine. +-- + [float] == wired_tiger fields @@ -7244,54 +9267,66 @@ Statistics about the transactions currently in progress. -[float] -=== `mongodb.status.wired_tiger.concurrent_transactions.write.out` - +*`mongodb.status.wired_tiger.concurrent_transactions.write.out`*:: ++ +-- type: long Number of concurrent write transaction in progress. -[float] -=== `mongodb.status.wired_tiger.concurrent_transactions.write.available` +-- +*`mongodb.status.wired_tiger.concurrent_transactions.write.available`*:: ++ +-- type: long Number of concurrent write tickets available. -[float] -=== `mongodb.status.wired_tiger.concurrent_transactions.write.total_tickets` +-- +*`mongodb.status.wired_tiger.concurrent_transactions.write.total_tickets`*:: ++ +-- type: long Number of total write tickets. -[float] -=== `mongodb.status.wired_tiger.concurrent_transactions.read.out` +-- +*`mongodb.status.wired_tiger.concurrent_transactions.read.out`*:: ++ +-- type: long Number of concurrent read transaction in progress. -[float] -=== `mongodb.status.wired_tiger.concurrent_transactions.read.available` +-- +*`mongodb.status.wired_tiger.concurrent_transactions.read.available`*:: ++ +-- type: long Number of concurrent read tickets available. -[float] -=== `mongodb.status.wired_tiger.concurrent_transactions.read.total_tickets` +-- +*`mongodb.status.wired_tiger.concurrent_transactions.read.total_tickets`*:: ++ +-- type: long Number of total read tickets. +-- + [float] == cache fields @@ -7299,9 +9334,9 @@ Statistics about the cache and page evictions from the cache. -[float] -=== `mongodb.status.wired_tiger.cache.maximum.bytes` - +*`mongodb.status.wired_tiger.cache.maximum.bytes`*:: ++ +-- type: long format: bytes @@ -7309,9 +9344,11 @@ format: bytes Maximum cache size. -[float] -=== `mongodb.status.wired_tiger.cache.used.bytes` +-- +*`mongodb.status.wired_tiger.cache.used.bytes`*:: ++ +-- type: long format: bytes @@ -7319,9 +9356,11 @@ format: bytes Size in byte of the data currently in cache. -[float] -=== `mongodb.status.wired_tiger.cache.dirty.bytes` +-- +*`mongodb.status.wired_tiger.cache.dirty.bytes`*:: ++ +-- type: long format: bytes @@ -7329,30 +9368,38 @@ format: bytes Size in bytes of the dirty data in the cache. -[float] -=== `mongodb.status.wired_tiger.cache.pages.read` +-- +*`mongodb.status.wired_tiger.cache.pages.read`*:: ++ +-- type: long Number of pages read into the cache. -[float] -=== `mongodb.status.wired_tiger.cache.pages.write` +-- +*`mongodb.status.wired_tiger.cache.pages.write`*:: ++ +-- type: long Number of pages written from the cache. -[float] -=== `mongodb.status.wired_tiger.cache.pages.evicted` +-- +*`mongodb.status.wired_tiger.cache.pages.evicted`*:: ++ +-- type: long Number of pages evicted from the cache. +-- + [float] == log fields @@ -7360,9 +9407,9 @@ Statistics about the write ahead log used by WiredTiger. -[float] -=== `mongodb.status.wired_tiger.log.size.bytes` - +*`mongodb.status.wired_tiger.log.size.bytes`*:: ++ +-- type: long format: bytes @@ -7370,9 +9417,11 @@ format: bytes Total log size in bytes. -[float] -=== `mongodb.status.wired_tiger.log.write.bytes` +-- +*`mongodb.status.wired_tiger.log.write.bytes`*:: ++ +-- type: long format: bytes @@ -7380,9 +9429,11 @@ format: bytes Number of bytes written into the log. -[float] -=== `mongodb.status.wired_tiger.log.max_file_size.bytes` +-- +*`mongodb.status.wired_tiger.log.max_file_size.bytes`*:: ++ +-- type: long format: bytes @@ -7390,38 +9441,63 @@ format: bytes Maximum file size. -[float] -=== `mongodb.status.wired_tiger.log.flushes` +-- +*`mongodb.status.wired_tiger.log.flushes`*:: ++ +-- type: long Number of flush operations. -[float] -=== `mongodb.status.wired_tiger.log.writes` +-- +*`mongodb.status.wired_tiger.log.writes`*:: ++ +-- type: long Number of write operations. -[float] -=== `mongodb.status.wired_tiger.log.scans` +-- +*`mongodb.status.wired_tiger.log.scans`*:: ++ +-- type: long Number of scan operations. -[float] -=== `mongodb.status.wired_tiger.log.syncs` +-- +*`mongodb.status.wired_tiger.log.syncs`*:: ++ +-- type: long Number of sync operations. +-- + +[[exported-fields-munin]] +== Munin fields + +experimental[] +Munin node metrics exporter + + + +[float] +== munin fields + +munin contains metrics exposed by a munin node agent + + + [[exported-fields-mysql]] == MySQL fields @@ -7450,42 +9526,50 @@ Aborted status fields. -[float] -=== `mysql.status.aborted.clients` - +*`mysql.status.aborted.clients`*:: ++ +-- type: long The number of connections that were aborted because the client died without closing the connection properly. -[float] -=== `mysql.status.aborted.connects` +-- +*`mysql.status.aborted.connects`*:: ++ +-- type: long The number of failed attempts to connect to the MySQL server. +-- + [float] == binlog fields -[float] -=== `mysql.status.binlog.cache.disk_use` - +*`mysql.status.binlog.cache.disk_use`*:: ++ +-- type: long -[float] -=== `mysql.status.binlog.cache.use` +-- +*`mysql.status.binlog.cache.use`*:: ++ +-- type: long +-- + [float] == bytes fields @@ -7493,9 +9577,9 @@ Bytes stats. -[float] -=== `mysql.status.bytes.received` - +*`mysql.status.bytes.received`*:: ++ +-- type: long format: bytes @@ -7503,9 +9587,11 @@ format: bytes The number of bytes received from all clients. -[float] -=== `mysql.status.bytes.sent` +-- +*`mysql.status.bytes.sent`*:: ++ +-- type: long format: bytes @@ -7513,6 +9599,8 @@ format: bytes The number of bytes sent to all clients. +-- + [float] == threads fields @@ -7520,185 +9608,227 @@ Threads stats. -[float] -=== `mysql.status.threads.cached` - +*`mysql.status.threads.cached`*:: ++ +-- type: long The number of cached threads. -[float] -=== `mysql.status.threads.created` +-- +*`mysql.status.threads.created`*:: ++ +-- type: long The number of created threads. -[float] -=== `mysql.status.threads.connected` +-- +*`mysql.status.threads.connected`*:: ++ +-- type: long The number of connected threads. -[float] -=== `mysql.status.threads.running` +-- +*`mysql.status.threads.running`*:: ++ +-- type: long The number of running threads. -[float] -=== `mysql.status.connections` +-- +*`mysql.status.connections`*:: ++ +-- type: long +-- + [float] == created fields -[float] -=== `mysql.status.created.tmp.disk_tables` - +*`mysql.status.created.tmp.disk_tables`*:: ++ +-- type: long -[float] -=== `mysql.status.created.tmp.files` +-- +*`mysql.status.created.tmp.files`*:: ++ +-- type: long -[float] -=== `mysql.status.created.tmp.tables` +-- +*`mysql.status.created.tmp.tables`*:: ++ +-- type: long +-- + [float] == delayed fields -[float] -=== `mysql.status.delayed.errors` - +*`mysql.status.delayed.errors`*:: ++ +-- type: long -[float] -=== `mysql.status.delayed.insert_threads` +-- +*`mysql.status.delayed.insert_threads`*:: ++ +-- type: long -[float] -=== `mysql.status.delayed.writes` +-- +*`mysql.status.delayed.writes`*:: ++ +-- type: long -[float] -=== `mysql.status.flush_commands` +-- +*`mysql.status.flush_commands`*:: ++ +-- type: long -[float] -=== `mysql.status.max_used_connections` +-- +*`mysql.status.max_used_connections`*:: ++ +-- type: long +-- + [float] == open fields -[float] -=== `mysql.status.open.files` - +*`mysql.status.open.files`*:: ++ +-- type: long -[float] -=== `mysql.status.open.streams` +-- +*`mysql.status.open.streams`*:: ++ +-- type: long -[float] -=== `mysql.status.open.tables` +-- +*`mysql.status.open.tables`*:: ++ +-- type: long -[float] -=== `mysql.status.opened_tables` +-- +*`mysql.status.opened_tables`*:: ++ +-- type: long +-- + [float] == command fields -[float] -=== `mysql.status.command.delete` - +*`mysql.status.command.delete`*:: ++ +-- type: long The number of DELETE queries since startup. -[float] -=== `mysql.status.command.insert` +-- +*`mysql.status.command.insert`*:: ++ +-- type: long The number of INSERT queries since startup. -[float] -=== `mysql.status.command.select` +-- +*`mysql.status.command.select`*:: ++ +-- type: long The number of SELECT queries since startup. -[float] -=== `mysql.status.command.update` +-- +*`mysql.status.command.update`*:: ++ +-- type: long The number of UPDATE queries since startup. +-- + [[exported-fields-nginx]] == Nginx fields @@ -7720,86 +9850,106 @@ Nginx server status metrics collected from various modules. -[float] -=== `nginx.stubstatus.hostname` - +*`nginx.stubstatus.hostname`*:: ++ +-- type: keyword Nginx hostname. -[float] -=== `nginx.stubstatus.active` +-- +*`nginx.stubstatus.active`*:: ++ +-- type: long The current number of active client connections including Waiting connections. -[float] -=== `nginx.stubstatus.accepts` +-- +*`nginx.stubstatus.accepts`*:: ++ +-- type: long The total number of accepted client connections. -[float] -=== `nginx.stubstatus.handled` +-- +*`nginx.stubstatus.handled`*:: ++ +-- type: long The total number of handled client connections. -[float] -=== `nginx.stubstatus.dropped` +-- +*`nginx.stubstatus.dropped`*:: ++ +-- type: long The total number of dropped client connections. -[float] -=== `nginx.stubstatus.requests` +-- +*`nginx.stubstatus.requests`*:: ++ +-- type: long The total number of client requests. -[float] -=== `nginx.stubstatus.current` +-- +*`nginx.stubstatus.current`*:: ++ +-- type: long The current number of client requests. -[float] -=== `nginx.stubstatus.reading` +-- +*`nginx.stubstatus.reading`*:: ++ +-- type: long The current number of connections where Nginx is reading the request header. -[float] -=== `nginx.stubstatus.writing` +-- +*`nginx.stubstatus.writing`*:: ++ +-- type: long The current number of connections where Nginx is writing the response back to the client. -[float] -=== `nginx.stubstatus.waiting` +-- +*`nginx.stubstatus.waiting`*:: ++ +-- type: long The current number of idle client connections waiting for a request. +-- + [[exported-fields-php_fpm]] == PHP_FPM fields @@ -7822,22 +9972,26 @@ PHP-FPM server status metrics collected from PHP-FPM. -[float] -=== `php_fpm.pool.name` - +*`php_fpm.pool.name`*:: ++ +-- type: keyword The name of the pool. -[float] -=== `php_fpm.pool.process_manager` +-- +*`php_fpm.pool.process_manager`*:: ++ +-- type: keyword Static, dynamic or ondemand. +-- + [float] == connections fields @@ -7845,38 +9999,46 @@ Connection state specific statistics. -[float] -=== `php_fpm.pool.connections.accepted` - +*`php_fpm.pool.connections.accepted`*:: ++ +-- type: long The number of incoming requests that the PHP-FPM server has accepted; when a connection is accepted it is removed from the listen queue. -[float] -=== `php_fpm.pool.connections.queued` +-- +*`php_fpm.pool.connections.queued`*:: ++ +-- type: long The current number of connections that have been initiated, but not yet accepted. If this value is non-zero it typically means that all the available server processes are currently busy, and there are no processes available to serve the next request. Raising `pm.max_children` (provided the server can handle it) should help keep this number low. This property follows from the fact that PHP-FPM listens via a socket (TCP or file based), and thus inherits some of the characteristics of sockets. -[float] -=== `php_fpm.pool.connections.max_listen_queue` +-- +*`php_fpm.pool.connections.max_listen_queue`*:: ++ +-- type: long The maximum number of requests in the queue of pending connections since FPM has started. -[float] -=== `php_fpm.pool.connections.listen_queue_len` +-- +*`php_fpm.pool.connections.listen_queue_len`*:: ++ +-- type: long The size of the socket queue of pending connections. +-- + [float] == processes fields @@ -7884,65 +10046,79 @@ Process state specific statistics. -[float] -=== `php_fpm.pool.processes.idle` - +*`php_fpm.pool.processes.idle`*:: ++ +-- type: long The number of servers in the `waiting to process` state (i.e. not currently serving a page). This value should fall between the `pm.min_spare_servers` and `pm.max_spare_servers` values when the process manager is `dynamic`. -[float] -=== `php_fpm.pool.processes.active` +-- +*`php_fpm.pool.processes.active`*:: ++ +-- type: long The number of servers current processing a page - the minimum is `1` (so even on a fully idle server, the result will be not read `0`). -[float] -=== `php_fpm.pool.processes.total` +-- +*`php_fpm.pool.processes.total`*:: ++ +-- type: long The number of idle + active processes. -[float] -=== `php_fpm.pool.processes.max_active` +-- +*`php_fpm.pool.processes.max_active`*:: ++ +-- type: long The maximum number of active processes since FPM has started. -[float] -=== `php_fpm.pool.processes.max_children_reached` +-- +*`php_fpm.pool.processes.max_children_reached`*:: ++ +-- type: long Number of times, the process limit has been reached, when pm tries to start more children (works only for pm 'dynamic' and 'ondemand'). -[float] -=== `php_fpm.pool.slow_requests` +-- +*`php_fpm.pool.slow_requests`*:: ++ +-- type: long The number of times a request execution time has exceeded `request_slowlog_timeout`. -[float] -=== `php_fpm.pool.start_since` +-- +*`php_fpm.pool.start_since`*:: ++ +-- type: long Number of seconds since FPM has started. -[float] -=== `php_fpm.pool.start_time` +-- +*`php_fpm.pool.start_time`*:: ++ +-- type: date format: epoch_second @@ -7950,6 +10126,8 @@ format: epoch_second The date and time FPM has started. +-- + [[exported-fields-postgresql]] == PostgreSQL fields @@ -7971,113 +10149,141 @@ One document per server process, showing information related to the current acti -[float] -=== `postgresql.activity.database.oid` - +*`postgresql.activity.database.oid`*:: ++ +-- type: long OID of the database this backend is connected to. -[float] -=== `postgresql.activity.database.name` +-- +*`postgresql.activity.database.name`*:: ++ +-- type: keyword Name of the database this backend is connected to. -[float] -=== `postgresql.activity.pid` +-- +*`postgresql.activity.pid`*:: ++ +-- type: long Process ID of this backend. -[float] -=== `postgresql.activity.user.id` +-- +*`postgresql.activity.user.id`*:: ++ +-- type: long OID of the user logged into this backend. -[float] -=== `postgresql.activity.user.name` +-- +*`postgresql.activity.user.name`*:: ++ +-- Name of the user logged into this backend. -[float] -=== `postgresql.activity.application_name` +-- +*`postgresql.activity.application_name`*:: ++ +-- Name of the application that is connected to this backend. -[float] -=== `postgresql.activity.client.address` +-- +*`postgresql.activity.client.address`*:: ++ +-- IP address of the client connected to this backend. -[float] -=== `postgresql.activity.client.hostname` +-- +*`postgresql.activity.client.hostname`*:: ++ +-- Host name of the connected client, as reported by a reverse DNS lookup of client_addr. -[float] -=== `postgresql.activity.client.port` +-- +*`postgresql.activity.client.port`*:: ++ +-- type: long TCP port number that the client is using for communication with this backend, or -1 if a Unix socket is used. -[float] -=== `postgresql.activity.backend_start` +-- +*`postgresql.activity.backend_start`*:: ++ +-- type: date Time when this process was started, i.e., when the client connected to the server. -[float] -=== `postgresql.activity.transaction_start` +-- +*`postgresql.activity.transaction_start`*:: ++ +-- type: date Time when this process' current transaction was started. -[float] -=== `postgresql.activity.query_start` +-- +*`postgresql.activity.query_start`*:: ++ +-- type: date Time when the currently active query was started, or if state is not active, when the last query was started. -[float] -=== `postgresql.activity.state_change` +-- +*`postgresql.activity.state_change`*:: ++ +-- type: date Time when the state was last changed. -[float] -=== `postgresql.activity.waiting` +-- +*`postgresql.activity.waiting`*:: ++ +-- type: boolean True if this backend is currently waiting on a lock. -[float] -=== `postgresql.activity.state` +-- +*`postgresql.activity.state`*:: ++ +-- Current overall state of this backend. Possible values are: * active: The backend is executing a query. @@ -8091,12 +10297,16 @@ Current overall state of this backend. Possible values are: * disabled: This state is reported if track_activities is disabled in this backend. -[float] -=== `postgresql.activity.query` +-- +*`postgresql.activity.query`*:: ++ +-- Text of this backend's most recent query. If state is active this field shows the currently executing query. In all other states, it shows the last query that was executed. +-- + [float] == bgwriter fields @@ -8104,94 +10314,116 @@ Statistics about the background writer process's activity. Collected using the p -[float] -=== `postgresql.bgwriter.checkpoints.scheduled` - +*`postgresql.bgwriter.checkpoints.scheduled`*:: ++ +-- type: long Number of scheduled checkpoints that have been performed. -[float] -=== `postgresql.bgwriter.checkpoints.requested` +-- +*`postgresql.bgwriter.checkpoints.requested`*:: ++ +-- type: long Number of requested checkpoints that have been performed. -[float] -=== `postgresql.bgwriter.checkpoints.times.write.ms` +-- +*`postgresql.bgwriter.checkpoints.times.write.ms`*:: ++ +-- type: float Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds. -[float] -=== `postgresql.bgwriter.checkpoints.times.sync.ms` +-- +*`postgresql.bgwriter.checkpoints.times.sync.ms`*:: ++ +-- type: float Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds. -[float] -=== `postgresql.bgwriter.buffers.checkpoints` +-- +*`postgresql.bgwriter.buffers.checkpoints`*:: ++ +-- type: long Number of buffers written during checkpoints. -[float] -=== `postgresql.bgwriter.buffers.clean` +-- +*`postgresql.bgwriter.buffers.clean`*:: ++ +-- type: long Number of buffers written by the background writer. -[float] -=== `postgresql.bgwriter.buffers.clean_full` +-- +*`postgresql.bgwriter.buffers.clean_full`*:: ++ +-- type: long Number of times the background writer stopped a cleaning scan because it had written too many buffers. -[float] -=== `postgresql.bgwriter.buffers.backend` +-- +*`postgresql.bgwriter.buffers.backend`*:: ++ +-- type: long Number of buffers written directly by a backend. -[float] -=== `postgresql.bgwriter.buffers.backend_fsync` +-- +*`postgresql.bgwriter.buffers.backend_fsync`*:: ++ +-- type: long Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write) -[float] -=== `postgresql.bgwriter.buffers.allocated` +-- +*`postgresql.bgwriter.buffers.allocated`*:: ++ +-- type: long Number of buffers allocated. -[float] -=== `postgresql.bgwriter.stats_reset` +-- +*`postgresql.bgwriter.stats_reset`*:: ++ +-- type: date Time at which these statistics were last reset. +-- + [float] == database fields @@ -8199,229 +10431,454 @@ One row per database, showing database-wide statistics. Collected by querying pg -[float] -=== `postgresql.database.oid` - +*`postgresql.database.oid`*:: ++ +-- type: long OID of the database this backend is connected to. -[float] -=== `postgresql.database.name` +-- +*`postgresql.database.name`*:: ++ +-- type: keyword Name of the database this backend is connected to. -[float] -=== `postgresql.database.number_of_backends` +-- +*`postgresql.database.number_of_backends`*:: ++ +-- type: long Number of backends currently connected to this database. -[float] -=== `postgresql.database.transactions.commit` +-- +*`postgresql.database.transactions.commit`*:: ++ +-- type: long Number of transactions in this database that have been committed. -[float] -=== `postgresql.database.transactions.rollback` +-- +*`postgresql.database.transactions.rollback`*:: ++ +-- type: long Number of transactions in this database that have been rolled back. -[float] -=== `postgresql.database.blocks.read` +-- +*`postgresql.database.blocks.read`*:: ++ +-- type: long Number of disk blocks read in this database. +-- + +*`postgresql.database.blocks.hit`*:: ++ +-- +type: long + +Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache). + + +-- + +*`postgresql.database.blocks.time.read.ms`*:: ++ +-- +type: long + +Time spent reading data file blocks by backends in this database, in milliseconds. + + +-- + +*`postgresql.database.blocks.time.write.ms`*:: ++ +-- +type: long + +Time spent writing data file blocks by backends in this database, in milliseconds. + + +-- + +*`postgresql.database.rows.returned`*:: ++ +-- +type: long + +Number of rows returned by queries in this database. + + +-- + +*`postgresql.database.rows.fetched`*:: ++ +-- +type: long + +Number of rows fetched by queries in this database. + + +-- + +*`postgresql.database.rows.inserted`*:: ++ +-- +type: long + +Number of rows inserted by queries in this database. + + +-- + +*`postgresql.database.rows.updated`*:: ++ +-- +type: long + +Number of rows updated by queries in this database. + + +-- + +*`postgresql.database.rows.deleted`*:: ++ +-- +type: long + +Number of rows deleted by queries in this database. + + +-- + +*`postgresql.database.conflicts`*:: ++ +-- +type: long + +Number of queries canceled due to conflicts with recovery in this database. + + +-- + +*`postgresql.database.temporary.files`*:: ++ +-- +type: long + +Number of temporary files created by queries in this database. All temporary files are counted, regardless of why the temporary file was created (e.g., sorting or hashing), and regardless of the log_temp_files setting. + + +-- + +*`postgresql.database.temporary.bytes`*:: ++ +-- +type: long + +Total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting. + + +-- + +*`postgresql.database.deadlocks`*:: ++ +-- +type: long + +Number of deadlocks detected in this database. + + +-- + +*`postgresql.database.stats_reset`*:: ++ +-- +type: date + +Time at which these statistics were last reset. + + +-- + +[[exported-fields-prometheus]] +== Prometheus fields + +Stats collected from Prometheus. + + + [float] -=== `postgresql.database.blocks.hit` +== prometheus fields + + + + +[float] +== stats fields + +Stats about the Prometheus server. + + + +[float] +== notifications fields + +Notification stats. + + + +*`prometheus.stats.notifications.queue_length`*:: ++ +-- +type: long + +Current queue length. + + +-- +*`prometheus.stats.notifications.dropped`*:: ++ +-- type: long -Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache). +Number of dropped queue events. -[float] -=== `postgresql.database.blocks.time.read.ms` +-- +*`prometheus.stats.processes.open_fds`*:: ++ +-- type: long -Time spent reading data file blocks by backends in this database, in milliseconds. +Number of open file descriptors. -[float] -=== `postgresql.database.blocks.time.write.ms` +-- +*`prometheus.stats.storage.chunks_to_persist`*:: ++ +-- type: long -Time spent writing data file blocks by backends in this database, in milliseconds. +Number of memory chunks that are not yet persisted to disk. -[float] -=== `postgresql.database.rows.returned` +-- -type: long +[[exported-fields-rabbitmq]] +== RabbitMQ fields + +RabbitMQ module -Number of rows returned by queries in this database. [float] -=== `postgresql.database.rows.fetched` +== rabbitmq fields -type: long -Number of rows fetched by queries in this database. [float] -=== `postgresql.database.rows.inserted` - -type: long +== connection fields -Number of rows inserted by queries in this database. +connection -[float] -=== `postgresql.database.rows.updated` -type: long +*`rabbitmq.connection.name`*:: ++ +-- +type: keyword -Number of rows updated by queries in this database. +The name of the connection with non-ASCII characters escaped as in C. -[float] -=== `postgresql.database.rows.deleted` +-- -type: long +*`rabbitmq.connection.vhost`*:: ++ +-- +type: keyword -Number of rows deleted by queries in this database. +Virtual host name with non-ASCII characters escaped as in C. -[float] -=== `postgresql.database.conflicts` +-- -type: long +*`rabbitmq.connection.user`*:: ++ +-- +type: keyword -Number of queries canceled due to conflicts with recovery in this database. +User name. -[float] -=== `postgresql.database.temporary.files` +-- -type: long +*`rabbitmq.connection.node`*:: ++ +-- +type: keyword -Number of temporary files created by queries in this database. All temporary files are counted, regardless of why the temporary file was created (e.g., sorting or hashing), and regardless of the log_temp_files setting. +Node name. -[float] -=== `postgresql.database.temporary.bytes` +-- +*`rabbitmq.connection.channels`*:: ++ +-- type: long -Total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting. +The number of channels on the connection. -[float] -=== `postgresql.database.deadlocks` +-- +*`rabbitmq.connection.channel_max`*:: ++ +-- type: long -Number of deadlocks detected in this database. +The maximum number of channels allowed on the connection. -[float] -=== `postgresql.database.stats_reset` +-- -type: date +*`rabbitmq.connection.frame_max`*:: ++ +-- +type: long -Time at which these statistics were last reset. +format: bytes +Maximum permissible size of a frame (in bytes) to negotiate with clients. -[[exported-fields-prometheus]] -== Prometheus fields -Stats collected from Prometheus. +-- +*`rabbitmq.connection.type`*:: ++ +-- +type: keyword +Type of the connection. -[float] -== prometheus fields +-- +*`rabbitmq.connection.host`*:: ++ +-- +type: keyword +Server hostname obtained via reverse DNS, or its IP address if reverse DNS failed or was disabled. -[float] -== stats fields -Stats about the Prometheus server. +-- +*`rabbitmq.connection.peer.host`*:: ++ +-- +type: keyword +Peer hostname obtained via reverse DNS, or its IP address if reverse DNS failed or was not enabled. -[float] -== notifications fields -Notification stats. +-- +*`rabbitmq.connection.port`*:: ++ +-- +type: long +Server port. -[float] -=== `prometheus.stats.notifications.queue_length` +-- + +*`rabbitmq.connection.peer.port`*:: ++ +-- type: long -Current queue length. +Peer port. -[float] -=== `prometheus.stats.notifications.dropped` +-- +*`rabbitmq.connection.packet_count.sent`*:: ++ +-- type: long -Number of dropped queue events. +Number of packets sent on the connection. -[float] -=== `prometheus.stats.processes.open_fds` +-- +*`rabbitmq.connection.packet_count.received`*:: ++ +-- type: long -Number of open file descriptors. +Number of packets received on the connection. -[float] -=== `prometheus.stats.storage.chunks_to_persist` +-- +*`rabbitmq.connection.packet_count.pending`*:: ++ +-- type: long -Number of memory chunks that are not yet persisted to disk. +Number of packets pending on the connection. -[[exported-fields-rabbitmq]] -== RabbitMQ fields +-- -RabbitMQ module +*`rabbitmq.connection.octet_count.sent`*:: ++ +-- +type: long +Number of octets sent on the connection. -[float] -== rabbitmq fields +-- + +*`rabbitmq.connection.octet_count.received`*:: ++ +-- +type: long +Number of octets received on the connection. +-- [float] == node fields @@ -8430,9 +10887,9 @@ node -[float] -=== `rabbitmq.node.disk.free.bytes` - +*`rabbitmq.node.disk.free.bytes`*:: ++ +-- type: long format: bytes @@ -8440,9 +10897,11 @@ format: bytes Disk free space in bytes. -[float] -=== `rabbitmq.node.disk.free.limit.bytes` +-- +*`rabbitmq.node.disk.free.limit.bytes`*:: ++ +-- type: long format: bytes @@ -8450,33 +10909,41 @@ format: bytes Point at which the disk alarm will go off. -[float] -=== `rabbitmq.node.fd.total` +-- +*`rabbitmq.node.fd.total`*:: ++ +-- type: long File descriptors available. -[float] -=== `rabbitmq.node.fd.used` +-- +*`rabbitmq.node.fd.used`*:: ++ +-- type: long Used file descriptors. -[float] -=== `rabbitmq.node.gc.num.count` +-- +*`rabbitmq.node.gc.num.count`*:: ++ +-- type: long Number of GC operations. -[float] -=== `rabbitmq.node.gc.reclaimed.bytes` +-- +*`rabbitmq.node.gc.reclaimed.bytes`*:: ++ +-- type: long format: bytes @@ -8484,33 +10951,41 @@ format: bytes GC bytes reclaimed. -[float] -=== `rabbitmq.node.io.file_handle.open_attempt.avg.ms` +-- +*`rabbitmq.node.io.file_handle.open_attempt.avg.ms`*:: ++ +-- type: long File handle open avg time -[float] -=== `rabbitmq.node.io.file_handle.open_attempt.count` +-- +*`rabbitmq.node.io.file_handle.open_attempt.count`*:: ++ +-- type: long File handle open attempts -[float] -=== `rabbitmq.node.io.read.avg.ms` +-- +*`rabbitmq.node.io.read.avg.ms`*:: ++ +-- type: long File handle read avg time -[float] -=== `rabbitmq.node.io.read.bytes` +-- +*`rabbitmq.node.io.read.bytes`*:: ++ +-- type: long format: bytes @@ -8518,65 +10993,81 @@ format: bytes Data read in bytes -[float] -=== `rabbitmq.node.io.read.count` +-- +*`rabbitmq.node.io.read.count`*:: ++ +-- type: long Data read operations -[float] -=== `rabbitmq.node.io.reopen.count` +-- +*`rabbitmq.node.io.reopen.count`*:: ++ +-- type: long Data reopen operations -[float] -=== `rabbitmq.node.io.seek.avg.ms` +-- +*`rabbitmq.node.io.seek.avg.ms`*:: ++ +-- type: long Data seek avg time -[float] -=== `rabbitmq.node.io.seek.count` +-- +*`rabbitmq.node.io.seek.count`*:: ++ +-- type: long Data seek operations -[float] -=== `rabbitmq.node.io.sync.avg.ms` +-- +*`rabbitmq.node.io.sync.avg.ms`*:: ++ +-- type: long Data sync avg time -[float] -=== `rabbitmq.node.io.sync.count` +-- +*`rabbitmq.node.io.sync.count`*:: ++ +-- type: long Data sync operations -[float] -=== `rabbitmq.node.io.write.avg.ms` +-- +*`rabbitmq.node.io.write.avg.ms`*:: ++ +-- type: long Data write avg time -[float] -=== `rabbitmq.node.io.write.bytes` +-- +*`rabbitmq.node.io.write.bytes`*:: ++ +-- type: long format: bytes @@ -8584,17 +11075,21 @@ format: bytes Data write in bytes -[float] -=== `rabbitmq.node.io.write.count` +-- +*`rabbitmq.node.io.write.count`*:: ++ +-- type: long Data write operations -[float] -=== `rabbitmq.node.mem.limit.bytes` +-- +*`rabbitmq.node.mem.limit.bytes`*:: ++ +-- type: long format: bytes @@ -8602,142 +11097,178 @@ format: bytes Point at which the memory alarm will go off. -[float] -=== `rabbitmq.node.mem.used.bytes` +-- +*`rabbitmq.node.mem.used.bytes`*:: ++ +-- type: long Memory used in bytes. -[float] -=== `rabbitmq.node.mnesia.disk.tx.count` +-- +*`rabbitmq.node.mnesia.disk.tx.count`*:: ++ +-- type: long Number of Mnesia transactions which have been performed that required writes to disk. -[float] -=== `rabbitmq.node.mnesia.ram.tx.count` +-- +*`rabbitmq.node.mnesia.ram.tx.count`*:: ++ +-- type: long Number of Mnesia transactions which have been performed that did not require writes to disk. -[float] -=== `rabbitmq.node.msg.store_read.count` +-- +*`rabbitmq.node.msg.store_read.count`*:: ++ +-- type: long Number of messages which have been read from the message store. -[float] -=== `rabbitmq.node.msg.store_write.count` +-- +*`rabbitmq.node.msg.store_write.count`*:: ++ +-- type: long Number of messages which have been written to the message store. -[float] -=== `rabbitmq.node.name` +-- +*`rabbitmq.node.name`*:: ++ +-- type: keyword Node name -[float] -=== `rabbitmq.node.proc.total` +-- +*`rabbitmq.node.proc.total`*:: ++ +-- type: long Maximum number of Erlang processes. -[float] -=== `rabbitmq.node.proc.used` +-- +*`rabbitmq.node.proc.used`*:: ++ +-- type: long Number of Erlang processes in use. -[float] -=== `rabbitmq.node.processors` +-- +*`rabbitmq.node.processors`*:: ++ +-- type: long Number of cores detected and usable by Erlang. -[float] -=== `rabbitmq.node.queue.index.journal_write.count` +-- +*`rabbitmq.node.queue.index.journal_write.count`*:: ++ +-- type: long Number of records written to the queue index journal. -[float] -=== `rabbitmq.node.queue.index.read.count` +-- +*`rabbitmq.node.queue.index.read.count`*:: ++ +-- type: long Number of records read from the queue index. -[float] -=== `rabbitmq.node.queue.index.write.count` +-- +*`rabbitmq.node.queue.index.write.count`*:: ++ +-- type: long Number of records written to the queue index. -[float] -=== `rabbitmq.node.run.queue` +-- +*`rabbitmq.node.run.queue`*:: ++ +-- type: long Average number of Erlang processes waiting to run. -[float] -=== `rabbitmq.node.socket.total` +-- +*`rabbitmq.node.socket.total`*:: ++ +-- type: long File descriptors available for use as sockets. -[float] -=== `rabbitmq.node.socket.used` +-- +*`rabbitmq.node.socket.used`*:: ++ +-- type: long File descriptors used as sockets. -[float] -=== `rabbitmq.node.type` +-- +*`rabbitmq.node.type`*:: ++ +-- type: keyword Node type. -[float] -=== `rabbitmq.node.uptime` +-- +*`rabbitmq.node.uptime`*:: ++ +-- type: long Node uptime. +-- + [float] == queue fields @@ -8745,81 +11276,99 @@ queue -[float] -=== `rabbitmq.queue.name` - +*`rabbitmq.queue.name`*:: ++ +-- type: keyword The name of the queue with non-ASCII characters escaped as in C. -[float] -=== `rabbitmq.queue.vhost` +-- +*`rabbitmq.queue.vhost`*:: ++ +-- type: keyword Virtual host name with non-ASCII characters escaped as in C. -[float] -=== `rabbitmq.queue.durable` +-- +*`rabbitmq.queue.durable`*:: ++ +-- type: boolean Whether or not the queue survives server restarts. -[float] -=== `rabbitmq.queue.auto_delete` +-- +*`rabbitmq.queue.auto_delete`*:: ++ +-- type: boolean Whether the queue will be deleted automatically when no longer used. -[float] -=== `rabbitmq.queue.exclusive` +-- +*`rabbitmq.queue.exclusive`*:: ++ +-- type: boolean Whether the queue is exclusive (i.e. has owner_pid). -[float] -=== `rabbitmq.queue.node` +-- +*`rabbitmq.queue.node`*:: ++ +-- type: keyword Node name. -[float] -=== `rabbitmq.queue.state` +-- +*`rabbitmq.queue.state`*:: ++ +-- type: keyword The state of the queue. Normally 'running', but may be "{syncing, MsgCount}" if the queue is synchronising. Queues which are located on cluster nodes that are currently down will be shown with a status of 'down'. -[float] -=== `rabbitmq.queue.arguments.max_priority` +-- +*`rabbitmq.queue.arguments.max_priority`*:: ++ +-- type: long Maximum number of priority levels for the queue to support. -[float] -=== `rabbitmq.queue.consumers.count` +-- +*`rabbitmq.queue.consumers.count`*:: ++ +-- type: long Number of consumers. -[float] -=== `rabbitmq.queue.consumers.utilisation.pct` +-- +*`rabbitmq.queue.consumers.utilisation.pct`*:: ++ +-- type: long format: percentage @@ -8827,41 +11376,51 @@ format: percentage Fraction of the time (between 0.0 and 1.0) that the queue is able to immediately deliver messages to consumers. This can be less than 1.0 if consumers are limited by network congestion or prefetch count. -[float] -=== `rabbitmq.queue.messages.total.count` +-- +*`rabbitmq.queue.messages.total.count`*:: ++ +-- type: long Sum of ready and unacknowledged messages (queue depth). -[float] -=== `rabbitmq.queue.messages.ready.count` +-- +*`rabbitmq.queue.messages.ready.count`*:: ++ +-- type: long Number of messages ready to be delivered to clients. -[float] -=== `rabbitmq.queue.messages.unacknowledged.count` +-- +*`rabbitmq.queue.messages.unacknowledged.count`*:: ++ +-- type: long Number of messages delivered to clients but not yet acknowledged. -[float] -=== `rabbitmq.queue.messages.persistent.count` +-- +*`rabbitmq.queue.messages.persistent.count`*:: ++ +-- type: long Total number of persistent messages in the queue (will always be 0 for transient queues). -[float] -=== `rabbitmq.queue.memory.bytes` +-- +*`rabbitmq.queue.memory.bytes`*:: ++ +-- type: long format: bytes @@ -8869,22 +11428,28 @@ format: bytes Bytes of memory consumed by the Erlang process associated with the queue, including stack, heap and internal structures. -[float] -=== `rabbitmq.queue.disk.reads.count` +-- +*`rabbitmq.queue.disk.reads.count`*:: ++ +-- type: long Total number of times messages have been read from disk by this queue since it started. -[float] -=== `rabbitmq.queue.disk.writes.count` +-- +*`rabbitmq.queue.disk.writes.count`*:: ++ +-- type: long Total number of times messages have been written to disk by this queue since it started. +-- + [[exported-fields-redis]] == Redis fields @@ -8913,38 +11478,46 @@ Redis client stats. -[float] -=== `redis.info.clients.connected` - +*`redis.info.clients.connected`*:: ++ +-- type: long Number of client connections (excluding connections from slaves). -[float] -=== `redis.info.clients.longest_output_list` +-- +*`redis.info.clients.longest_output_list`*:: ++ +-- type: long Longest output list among current client connections. -[float] -=== `redis.info.clients.biggest_input_buf` +-- +*`redis.info.clients.biggest_input_buf`*:: ++ +-- type: long Biggest input buffer among current client connections. -[float] -=== `redis.info.clients.blocked` +-- +*`redis.info.clients.blocked`*:: ++ +-- type: long Number of clients pending on a blocking call (BLPOP, BRPOP, BRPOPLPUSH). +-- + [float] == cluster fields @@ -8952,14 +11525,16 @@ Redis cluster information. -[float] -=== `redis.info.cluster.enabled` - +*`redis.info.cluster.enabled`*:: ++ +-- type: boolean Indicates that the Redis cluster is enabled. +-- + [float] == cpu fields @@ -8967,38 +11542,46 @@ Redis CPU stats -[float] -=== `redis.info.cpu.used.sys` - +*`redis.info.cpu.used.sys`*:: ++ +-- type: scaled_float System CPU consumed by the Redis server. -[float] -=== `redis.info.cpu.used.sys_children` +-- +*`redis.info.cpu.used.sys_children`*:: ++ +-- type: scaled_float User CPU consumed by the Redis server. -[float] -=== `redis.info.cpu.used.user` +-- +*`redis.info.cpu.used.user`*:: ++ +-- type: scaled_float System CPU consumed by the background processes. -[float] -=== `redis.info.cpu.used.user_children` +-- +*`redis.info.cpu.used.user_children`*:: ++ +-- type: scaled_float User CPU consumed by the background processes. +-- + [float] == memory fields @@ -9006,18 +11589,20 @@ Redis memory stats. -[float] -=== `redis.info.memory.used.value` - +*`redis.info.memory.used.value`*:: ++ +-- type: long format: bytes Used memory. -[float] -=== `redis.info.memory.used.rss` +-- +*`redis.info.memory.used.rss`*:: ++ +-- type: long format: bytes @@ -9025,9 +11610,11 @@ format: bytes Used memory rss. -[float] -=== `redis.info.memory.used.peak` +-- +*`redis.info.memory.used.peak`*:: ++ +-- type: long format: bytes @@ -9035,9 +11622,11 @@ format: bytes Used memory peak. -[float] -=== `redis.info.memory.used.lua` +-- +*`redis.info.memory.used.lua`*:: ++ +-- type: long format: bytes @@ -9045,14 +11634,18 @@ format: bytes Used memory lua. -[float] -=== `redis.info.memory.allocator` +-- +*`redis.info.memory.allocator`*:: ++ +-- type: keyword Memory allocator. +-- + [float] == persistence fields @@ -9060,116 +11653,144 @@ Redis CPU stats. -[float] -=== `redis.info.persistence.loading` - +*`redis.info.persistence.loading`*:: ++ +-- type: boolean None +-- + [float] == rdb fields None -[float] -=== `redis.info.persistence.rdb.last_save.changes_since` - +*`redis.info.persistence.rdb.last_save.changes_since`*:: ++ +-- type: long None -[float] -=== `redis.info.persistence.rdb.bgsave.in_progress` +-- +*`redis.info.persistence.rdb.bgsave.in_progress`*:: ++ +-- type: boolean None -[float] -=== `redis.info.persistence.rdb.last_save.time` +-- +*`redis.info.persistence.rdb.last_save.time`*:: ++ +-- type: long None -[float] -=== `redis.info.persistence.rdb.bgsave.last_status` +-- +*`redis.info.persistence.rdb.bgsave.last_status`*:: ++ +-- type: keyword None -[float] -=== `redis.info.persistence.rdb.bgsave.last_time.sec` +-- +*`redis.info.persistence.rdb.bgsave.last_time.sec`*:: ++ +-- type: long None -[float] -=== `redis.info.persistence.rdb.bgsave.current_time.sec` +-- +*`redis.info.persistence.rdb.bgsave.current_time.sec`*:: ++ +-- type: long None +-- + [float] == aof fields None -[float] -=== `redis.info.persistence.aof.enabled` - +*`redis.info.persistence.aof.enabled`*:: ++ +-- type: boolean None -[float] -=== `redis.info.persistence.aof.rewrite.in_progress` +-- +*`redis.info.persistence.aof.rewrite.in_progress`*:: ++ +-- type: boolean None -[float] -=== `redis.info.persistence.aof.rewrite.scheduled` +-- +*`redis.info.persistence.aof.rewrite.scheduled`*:: ++ +-- type: boolean None -[float] -=== `redis.info.persistence.aof.rewrite.last_time.sec` +-- +*`redis.info.persistence.aof.rewrite.last_time.sec`*:: ++ +-- type: long None -[float] -=== `redis.info.persistence.aof.rewrite.current_time.sec` +-- +*`redis.info.persistence.aof.rewrite.current_time.sec`*:: ++ +-- type: long None -[float] -=== `redis.info.persistence.aof.bgrewrite.last_status` +-- +*`redis.info.persistence.aof.bgrewrite.last_status`*:: ++ +-- type: keyword None -[float] -=== `redis.info.persistence.aof.write.last_status` +-- +*`redis.info.persistence.aof.write.last_status`*:: ++ +-- type: keyword None +-- + [float] == replication fields @@ -9177,55 +11798,69 @@ Replication -[float] -=== `redis.info.replication.role` - +*`redis.info.replication.role`*:: ++ +-- type: keyword None -[float] -=== `redis.info.replication.connected_slaves` +-- +*`redis.info.replication.connected_slaves`*:: ++ +-- type: long None -[float] -=== `redis.info.replication.master_offset` +-- +*`redis.info.replication.master_offset`*:: ++ +-- type: long None -[float] -=== `redis.info.replication.backlog.active` +-- +*`redis.info.replication.backlog.active`*:: ++ +-- type: long None -[float] -=== `redis.info.replication.backlog.size` +-- +*`redis.info.replication.backlog.size`*:: ++ +-- type: long None -[float] -=== `redis.info.replication.backlog.first_byte_offset` +-- +*`redis.info.replication.backlog.first_byte_offset`*:: ++ +-- type: long None -[float] -=== `redis.info.replication.backlog.histlen` +-- +*`redis.info.replication.backlog.histlen`*:: ++ +-- type: long None +-- + [float] == server fields @@ -9233,118 +11868,150 @@ Server info -[float] -=== `redis.info.server.version` - +*`redis.info.server.version`*:: ++ +-- type: keyword None -[float] -=== `redis.info.server.git_sha1` +-- +*`redis.info.server.git_sha1`*:: ++ +-- type: keyword None -[float] -=== `redis.info.server.git_dirty` +-- +*`redis.info.server.git_dirty`*:: ++ +-- type: keyword None -[float] -=== `redis.info.server.build_id` +-- +*`redis.info.server.build_id`*:: ++ +-- type: keyword None -[float] -=== `redis.info.server.mode` +-- +*`redis.info.server.mode`*:: ++ +-- type: keyword None -[float] -=== `redis.info.server.os` +-- +*`redis.info.server.os`*:: ++ +-- type: keyword None -[float] -=== `redis.info.server.arch_bits` +-- +*`redis.info.server.arch_bits`*:: ++ +-- type: keyword None -[float] -=== `redis.info.server.multiplexing_api` +-- +*`redis.info.server.multiplexing_api`*:: ++ +-- type: keyword None -[float] -=== `redis.info.server.gcc_version` +-- +*`redis.info.server.gcc_version`*:: ++ +-- type: keyword None -[float] -=== `redis.info.server.process_id` +-- +*`redis.info.server.process_id`*:: ++ +-- type: long None -[float] -=== `redis.info.server.run_id` +-- +*`redis.info.server.run_id`*:: ++ +-- type: keyword None -[float] -=== `redis.info.server.tcp_port` +-- +*`redis.info.server.tcp_port`*:: ++ +-- type: long None -[float] -=== `redis.info.server.uptime` +-- +*`redis.info.server.uptime`*:: ++ +-- type: long None -[float] -=== `redis.info.server.hz` +-- +*`redis.info.server.hz`*:: ++ +-- type: long None -[float] -=== `redis.info.server.lru_clock` +-- +*`redis.info.server.lru_clock`*:: ++ +-- type: long None -[float] -=== `redis.info.server.config_file` +-- +*`redis.info.server.config_file`*:: ++ +-- type: keyword None +-- + [float] == stats fields @@ -9352,139 +12019,177 @@ Redis stats. -[float] -=== `redis.info.stats.connections.received` - +*`redis.info.stats.connections.received`*:: ++ +-- type: long Total number of connections received. -[float] -=== `redis.info.stats.connections.rejected` +-- +*`redis.info.stats.connections.rejected`*:: ++ +-- type: long Total number of connections rejected. -[float] -=== `redis.info.stats.commands_processed` +-- +*`redis.info.stats.commands_processed`*:: ++ +-- type: long Total number of commands preocessed. -[float] -=== `redis.info.stats.net.input.bytes` +-- +*`redis.info.stats.net.input.bytes`*:: ++ +-- type: long Total network input in bytes. -[float] -=== `redis.info.stats.net.output.bytes` +-- +*`redis.info.stats.net.output.bytes`*:: ++ +-- type: long Total network output in bytes. -[float] -=== `redis.info.stats.instantaneous.ops_per_sec` +-- +*`redis.info.stats.instantaneous.ops_per_sec`*:: ++ +-- type: long None -[float] -=== `redis.info.stats.instantaneous.input_kbps` +-- +*`redis.info.stats.instantaneous.input_kbps`*:: ++ +-- type: scaled_float None -[float] -=== `redis.info.stats.instantaneous.output_kbps` +-- +*`redis.info.stats.instantaneous.output_kbps`*:: ++ +-- type: scaled_float None -[float] -=== `redis.info.stats.sync.full` +-- +*`redis.info.stats.sync.full`*:: ++ +-- type: long None -[float] -=== `redis.info.stats.sync.partial.ok` +-- +*`redis.info.stats.sync.partial.ok`*:: ++ +-- type: long None -[float] -=== `redis.info.stats.sync.partial.err` +-- +*`redis.info.stats.sync.partial.err`*:: ++ +-- type: long None -[float] -=== `redis.info.stats.keys.expired` +-- +*`redis.info.stats.keys.expired`*:: ++ +-- type: long None -[float] -=== `redis.info.stats.keys.evicted` +-- +*`redis.info.stats.keys.evicted`*:: ++ +-- type: long None -[float] -=== `redis.info.stats.keyspace.hits` +-- +*`redis.info.stats.keyspace.hits`*:: ++ +-- type: long None -[float] -=== `redis.info.stats.keyspace.misses` +-- +*`redis.info.stats.keyspace.misses`*:: ++ +-- type: long None -[float] -=== `redis.info.stats.pubsub.channels` +-- +*`redis.info.stats.pubsub.channels`*:: ++ +-- type: long None -[float] -=== `redis.info.stats.pubsub.patterns` +-- +*`redis.info.stats.pubsub.patterns`*:: ++ +-- type: long None -[float] -=== `redis.info.stats.latest_fork_usec` +-- +*`redis.info.stats.latest_fork_usec`*:: ++ +-- type: long None -[float] -=== `redis.info.stats.migrate_cached_sockets` +-- +*`redis.info.stats.migrate_cached_sockets`*:: ++ +-- type: long None +-- + [float] == keyspace fields @@ -9492,37 +12197,45 @@ None -[float] -=== `redis.keyspace.id` - +*`redis.keyspace.id`*:: ++ +-- type: keyword Keyspace identifier. -[float] -=== `redis.keyspace.avg_ttl` +-- +*`redis.keyspace.avg_ttl`*:: ++ +-- type: long Average ttl. -[float] -=== `redis.keyspace.keys` +-- +*`redis.keyspace.keys`*:: ++ +-- type: long Number of keys in the keyspace. -[float] -=== `redis.keyspace.expires` +-- +*`redis.keyspace.expires`*:: ++ +-- type: long +-- + [[exported-fields-system]] == System fields @@ -9544,17 +12257,19 @@ System status metrics, like CPU and memory usage, that are collected from the op -[float] -=== `system.core.id` - +*`system.core.id`*:: ++ +-- type: long CPU Core number. -[float] -=== `system.core.user.pct` +-- +*`system.core.user.pct`*:: ++ +-- type: scaled_float format: percent @@ -9562,17 +12277,21 @@ format: percent The percentage of CPU time spent in user space. -[float] -=== `system.core.user.ticks` +-- +*`system.core.user.ticks`*:: ++ +-- type: long The amount of CPU time spent in user space. -[float] -=== `system.core.system.pct` +-- +*`system.core.system.pct`*:: ++ +-- type: scaled_float format: percent @@ -9580,17 +12299,21 @@ format: percent The percentage of CPU time spent in kernel space. -[float] -=== `system.core.system.ticks` +-- +*`system.core.system.ticks`*:: ++ +-- type: long The amount of CPU time spent in kernel space. -[float] -=== `system.core.nice.pct` +-- +*`system.core.nice.pct`*:: ++ +-- type: scaled_float format: percent @@ -9598,17 +12321,21 @@ format: percent The percentage of CPU time spent on low-priority processes. -[float] -=== `system.core.nice.ticks` +-- +*`system.core.nice.ticks`*:: ++ +-- type: long The amount of CPU time spent on low-priority processes. -[float] -=== `system.core.idle.pct` +-- +*`system.core.idle.pct`*:: ++ +-- type: scaled_float format: percent @@ -9616,17 +12343,21 @@ format: percent The percentage of CPU time spent idle. -[float] -=== `system.core.idle.ticks` +-- +*`system.core.idle.ticks`*:: ++ +-- type: long The amount of CPU time spent idle. -[float] -=== `system.core.iowait.pct` +-- +*`system.core.iowait.pct`*:: ++ +-- type: scaled_float format: percent @@ -9634,17 +12365,21 @@ format: percent The percentage of CPU time spent in wait (on disk). -[float] -=== `system.core.iowait.ticks` +-- +*`system.core.iowait.ticks`*:: ++ +-- type: long The amount of CPU time spent in wait (on disk). -[float] -=== `system.core.irq.pct` +-- +*`system.core.irq.pct`*:: ++ +-- type: scaled_float format: percent @@ -9652,17 +12387,21 @@ format: percent The percentage of CPU time spent servicing and handling hardware interrupts. -[float] -=== `system.core.irq.ticks` +-- +*`system.core.irq.ticks`*:: ++ +-- type: long The amount of CPU time spent servicing and handling hardware interrupts. -[float] -=== `system.core.softirq.pct` +-- +*`system.core.softirq.pct`*:: ++ +-- type: scaled_float format: percent @@ -9670,17 +12409,21 @@ format: percent The percentage of CPU time spent servicing and handling software interrupts. -[float] -=== `system.core.softirq.ticks` +-- +*`system.core.softirq.ticks`*:: ++ +-- type: long The amount of CPU time spent servicing and handling software interrupts. -[float] -=== `system.core.steal.pct` +-- +*`system.core.steal.pct`*:: ++ +-- type: scaled_float format: percent @@ -9688,14 +12431,18 @@ format: percent The percentage of CPU time spent in involuntary wait by the virtual CPU while the hypervisor was servicing another processor. Available only on Unix. -[float] -=== `system.core.steal.ticks` +-- +*`system.core.steal.ticks`*:: ++ +-- type: long The amount of CPU time spent in involuntary wait by the virtual CPU while the hypervisor was servicing another processor. Available only on Unix. +-- + [float] == cpu fields @@ -9703,17 +12450,19 @@ The amount of CPU time spent in involuntary wait by the virtual CPU while the hy -[float] -=== `system.cpu.cores` - +*`system.cpu.cores`*:: ++ +-- type: long The number of CPU cores present on the host. The non-normalized percentages will have a maximum value of `100% * cores`. The normalized percentages already take this value into account and have a maximum value of 100%. -[float] -=== `system.cpu.user.pct` +-- +*`system.cpu.user.pct`*:: ++ +-- type: scaled_float format: percent @@ -9721,9 +12470,11 @@ format: percent The percentage of CPU time spent in user space. On multi-core systems, you can have percentages that are greater than 100%. For example, if 3 cores are at 60% use, then the `system.cpu.user.pct` will be 180%. -[float] -=== `system.cpu.system.pct` +-- +*`system.cpu.system.pct`*:: ++ +-- type: scaled_float format: percent @@ -9731,9 +12482,11 @@ format: percent The percentage of CPU time spent in kernel space. -[float] -=== `system.cpu.nice.pct` +-- +*`system.cpu.nice.pct`*:: ++ +-- type: scaled_float format: percent @@ -9741,9 +12494,11 @@ format: percent The percentage of CPU time spent on low-priority processes. -[float] -=== `system.cpu.idle.pct` +-- +*`system.cpu.idle.pct`*:: ++ +-- type: scaled_float format: percent @@ -9751,9 +12506,11 @@ format: percent The percentage of CPU time spent idle. -[float] -=== `system.cpu.iowait.pct` +-- +*`system.cpu.iowait.pct`*:: ++ +-- type: scaled_float format: percent @@ -9761,9 +12518,11 @@ format: percent The percentage of CPU time spent in wait (on disk). -[float] -=== `system.cpu.irq.pct` +-- +*`system.cpu.irq.pct`*:: ++ +-- type: scaled_float format: percent @@ -9771,9 +12530,11 @@ format: percent The percentage of CPU time spent servicing and handling hardware interrupts. -[float] -=== `system.cpu.softirq.pct` +-- +*`system.cpu.softirq.pct`*:: ++ +-- type: scaled_float format: percent @@ -9781,9 +12542,11 @@ format: percent The percentage of CPU time spent servicing and handling software interrupts. -[float] -=== `system.cpu.steal.pct` +-- +*`system.cpu.steal.pct`*:: ++ +-- type: scaled_float format: percent @@ -9791,9 +12554,11 @@ format: percent The percentage of CPU time spent in involuntary wait by the virtual CPU while the hypervisor was servicing another processor. Available only on Unix. -[float] -=== `system.cpu.total.pct` +-- +*`system.cpu.total.pct`*:: ++ +-- type: scaled_float format: percent @@ -9801,9 +12566,11 @@ format: percent The percentage of CPU time spent in non-idle state. -[float] -=== `system.cpu.user.norm.pct` +-- +*`system.cpu.user.norm.pct`*:: ++ +-- type: scaled_float format: percent @@ -9811,9 +12578,11 @@ format: percent The percentage of CPU time spent in user space. -[float] -=== `system.cpu.system.norm.pct` +-- +*`system.cpu.system.norm.pct`*:: ++ +-- type: scaled_float format: percent @@ -9821,9 +12590,11 @@ format: percent The percentage of CPU time spent in kernel space. -[float] -=== `system.cpu.nice.norm.pct` +-- +*`system.cpu.nice.norm.pct`*:: ++ +-- type: scaled_float format: percent @@ -9831,9 +12602,11 @@ format: percent The percentage of CPU time spent on low-priority processes. -[float] -=== `system.cpu.idle.norm.pct` +-- +*`system.cpu.idle.norm.pct`*:: ++ +-- type: scaled_float format: percent @@ -9841,9 +12614,11 @@ format: percent The percentage of CPU time spent idle. -[float] -=== `system.cpu.iowait.norm.pct` +-- +*`system.cpu.iowait.norm.pct`*:: ++ +-- type: scaled_float format: percent @@ -9851,9 +12626,11 @@ format: percent The percentage of CPU time spent in wait (on disk). -[float] -=== `system.cpu.irq.norm.pct` +-- +*`system.cpu.irq.norm.pct`*:: ++ +-- type: scaled_float format: percent @@ -9861,9 +12638,11 @@ format: percent The percentage of CPU time spent servicing and handling hardware interrupts. -[float] -=== `system.cpu.softirq.norm.pct` +-- +*`system.cpu.softirq.norm.pct`*:: ++ +-- type: scaled_float format: percent @@ -9871,9 +12650,11 @@ format: percent The percentage of CPU time spent servicing and handling software interrupts. -[float] -=== `system.cpu.steal.norm.pct` +-- +*`system.cpu.steal.norm.pct`*:: ++ +-- type: scaled_float format: percent @@ -9881,9 +12662,11 @@ format: percent The percentage of CPU time spent in involuntary wait by the virtual CPU while the hypervisor was servicing another processor. Available only on Unix. -[float] -=== `system.cpu.total.norm.pct` +-- +*`system.cpu.total.norm.pct`*:: ++ +-- type: scaled_float format: percent @@ -9891,78 +12674,98 @@ format: percent The percentage of CPU time spent in non-idle state. -[float] -=== `system.cpu.total.value` +-- +*`system.cpu.total.value`*:: ++ +-- type: long The value of CPU usage since starting the process. -[float] -=== `system.cpu.user.ticks` +-- +*`system.cpu.user.ticks`*:: ++ +-- type: long The amount of CPU time spent in user space. -[float] -=== `system.cpu.system.ticks` +-- +*`system.cpu.system.ticks`*:: ++ +-- type: long The amount of CPU time spent in kernel space. -[float] -=== `system.cpu.nice.ticks` +-- +*`system.cpu.nice.ticks`*:: ++ +-- type: long The amount of CPU time spent on low-priority processes. -[float] -=== `system.cpu.idle.ticks` +-- +*`system.cpu.idle.ticks`*:: ++ +-- type: long The amount of CPU time spent idle. -[float] -=== `system.cpu.iowait.ticks` +-- +*`system.cpu.iowait.ticks`*:: ++ +-- type: long The amount of CPU time spent in wait (on disk). -[float] -=== `system.cpu.irq.ticks` +-- +*`system.cpu.irq.ticks`*:: ++ +-- type: long The amount of CPU time spent servicing and handling hardware interrupts. -[float] -=== `system.cpu.softirq.ticks` +-- +*`system.cpu.softirq.ticks`*:: ++ +-- type: long The amount of CPU time spent servicing and handling software interrupts. -[float] -=== `system.cpu.steal.ticks` +-- +*`system.cpu.steal.ticks`*:: ++ +-- type: long The amount of CPU time spent in involuntary wait by the virtual CPU while the hypervisor was servicing another processor. Available only on Unix. +-- + [float] == diskio fields @@ -9970,9 +12773,9 @@ The amount of CPU time spent in involuntary wait by the virtual CPU while the hy -[float] -=== `system.diskio.name` - +*`system.diskio.name`*:: ++ +-- type: keyword example: sda1 @@ -9980,33 +12783,41 @@ example: sda1 The disk name. -[float] -=== `system.diskio.serial_number` +-- +*`system.diskio.serial_number`*:: ++ +-- type: keyword The disk's serial number. This may not be provided by all operating systems. -[float] -=== `system.diskio.read.count` +-- +*`system.diskio.read.count`*:: ++ +-- type: long The total number of reads completed successfully. -[float] -=== `system.diskio.write.count` +-- +*`system.diskio.write.count`*:: ++ +-- type: long The total number of writes completed successfully. -[float] -=== `system.diskio.read.bytes` +-- +*`system.diskio.read.bytes`*:: ++ +-- type: long format: bytes @@ -10014,9 +12825,11 @@ format: bytes The total number of bytes read successfully. On Linux this is the number of sectors read multiplied by an assumed sector size of 512. -[float] -=== `system.diskio.write.bytes` +-- +*`system.diskio.write.bytes`*:: ++ +-- type: long format: bytes @@ -10024,65 +12837,81 @@ format: bytes The total number of bytes written successfully. On Linux this is the number of sectors written multiplied by an assumed sector size of 512. -[float] -=== `system.diskio.read.time` +-- +*`system.diskio.read.time`*:: ++ +-- type: long The total number of milliseconds spent by all reads. -[float] -=== `system.diskio.write.time` +-- +*`system.diskio.write.time`*:: ++ +-- type: long The total number of milliseconds spent by all writes. -[float] -=== `system.diskio.io.time` +-- +*`system.diskio.io.time`*:: ++ +-- type: long The total number of of milliseconds spent doing I/Os. -[float] -=== `system.diskio.iostat.read.request.merges_per_sec` +-- +*`system.diskio.iostat.read.request.merges_per_sec`*:: ++ +-- type: float The number of read requests merged per second that were queued to the device. -[float] -=== `system.diskio.iostat.write.request.merges_per_sec` +-- +*`system.diskio.iostat.write.request.merges_per_sec`*:: ++ +-- type: float The number of write requests merged per second that were queued to the device. -[float] -=== `system.diskio.iostat.read.request.per_sec` +-- +*`system.diskio.iostat.read.request.per_sec`*:: ++ +-- type: float The number of read requests that were issued to the device per second -[float] -=== `system.diskio.iostat.write.request.per_sec` +-- +*`system.diskio.iostat.write.request.per_sec`*:: ++ +-- type: float The number of write requests that were issued to the device per second -[float] -=== `system.diskio.iostat.read.per_sec.bytes` +-- +*`system.diskio.iostat.read.per_sec.bytes`*:: ++ +-- type: float format: bytes @@ -10090,9 +12919,11 @@ format: bytes The number of Bytes read from the device per second. -[float] -=== `system.diskio.iostat.write.per_sec.bytes` +-- +*`system.diskio.iostat.write.per_sec.bytes`*:: ++ +-- type: float format: bytes @@ -10100,46 +12931,58 @@ format: bytes The number of Bytes write from the device per second. -[float] -=== `system.diskio.iostat.request.avg_size` +-- +*`system.diskio.iostat.request.avg_size`*:: ++ +-- type: float The average size (in sectors) of the requests that were issued to the device. -[float] -=== `system.diskio.iostat.queue.avg_size` +-- +*`system.diskio.iostat.queue.avg_size`*:: ++ +-- type: float The average queue length of the requests that were issued to the device. -[float] -=== `system.diskio.iostat.await` +-- +*`system.diskio.iostat.await`*:: ++ +-- type: float The average time spent for requests issued to the device to be served. -[float] -=== `system.diskio.iostat.service_time` +-- +*`system.diskio.iostat.service_time`*:: ++ +-- type: float The average service time (in milliseconds) for I/O requests that were issued to the device. -[float] -=== `system.diskio.iostat.busy` +-- +*`system.diskio.iostat.busy`*:: ++ +-- type: float Percentage of CPU time during which I/O requests were issued to the device (bandwidth utilization for the device). Device saturation occurs when this value is close to 100%. +-- + [float] == filesystem fields @@ -10147,9 +12990,9 @@ Percentage of CPU time during which I/O requests were issued to the device (band -[float] -=== `system.filesystem.available` - +*`system.filesystem.available`*:: ++ +-- type: long format: bytes @@ -10157,41 +13000,51 @@ format: bytes The disk space available to an unprivileged user in bytes. -[float] -=== `system.filesystem.device_name` +-- +*`system.filesystem.device_name`*:: ++ +-- type: keyword The disk name. For example: `/dev/disk1` -[float] -=== `system.filesystem.type` +-- +*`system.filesystem.type`*:: ++ +-- type: keyword The disk type. For example: `ext4` -[float] -=== `system.filesystem.mount_point` +-- +*`system.filesystem.mount_point`*:: ++ +-- type: keyword The mounting point. For example: `/` -[float] -=== `system.filesystem.files` +-- +*`system.filesystem.files`*:: ++ +-- type: long The total number of file nodes in the file system. -[float] -=== `system.filesystem.free` +-- +*`system.filesystem.free`*:: ++ +-- type: long format: bytes @@ -10199,17 +13052,21 @@ format: bytes The disk space available in bytes. -[float] -=== `system.filesystem.free_files` +-- +*`system.filesystem.free_files`*:: ++ +-- type: long The number of free file nodes in the file system. -[float] -=== `system.filesystem.total` +-- +*`system.filesystem.total`*:: ++ +-- type: long format: bytes @@ -10217,9 +13074,11 @@ format: bytes The total disk space in bytes. -[float] -=== `system.filesystem.used.bytes` +-- +*`system.filesystem.used.bytes`*:: ++ +-- type: long format: bytes @@ -10227,9 +13086,11 @@ format: bytes The used disk space in bytes. -[float] -=== `system.filesystem.used.pct` +-- +*`system.filesystem.used.pct`*:: ++ +-- type: scaled_float format: percent @@ -10237,36 +13098,42 @@ format: percent The percentage of used disk space. +-- + [float] == fsstat fields -`system.fsstat` contains filesystem metrics aggregated from all mounted filesystems, similar with what `df -a` prints out. +`system.fsstat` contains filesystem metrics aggregated from all mounted filesystems. -[float] -=== `system.fsstat.count` - +*`system.fsstat.count`*:: ++ +-- type: long Number of file systems found. -[float] -=== `system.fsstat.total_files` +-- +*`system.fsstat.total_files`*:: ++ +-- type: long Total number of files. +-- + [float] == total_size fields Nested file system docs. -[float] -=== `system.fsstat.total_size.free` - +*`system.fsstat.total_size.free`*:: ++ +-- type: long format: bytes @@ -10274,9 +13141,11 @@ format: bytes Total free space. -[float] -=== `system.fsstat.total_size.used` +-- +*`system.fsstat.total_size.used`*:: ++ +-- type: long format: bytes @@ -10284,9 +13153,11 @@ format: bytes Total used space. -[float] -=== `system.fsstat.total_size.total` +-- +*`system.fsstat.total_size.total`*:: ++ +-- type: long format: bytes @@ -10294,6 +13165,8 @@ format: bytes Total space (used plus free). +-- + [float] == load fields @@ -10301,62 +13174,76 @@ CPU load averages. -[float] -=== `system.load.1` - +*`system.load.1`*:: ++ +-- type: scaled_float Load average for the last minute. -[float] -=== `system.load.5` +-- +*`system.load.5`*:: ++ +-- type: scaled_float Load average for the last 5 minutes. -[float] -=== `system.load.15` +-- +*`system.load.15`*:: ++ +-- type: scaled_float Load average for the last 15 minutes. -[float] -=== `system.load.norm.1` +-- +*`system.load.norm.1`*:: ++ +-- type: scaled_float Load for the last minute divided by the number of cores. -[float] -=== `system.load.norm.5` +-- +*`system.load.norm.5`*:: ++ +-- type: scaled_float Load for the last 5 minutes divided by the number of cores. -[float] -=== `system.load.norm.15` +-- +*`system.load.norm.15`*:: ++ +-- type: scaled_float Load for the last 15 minutes divided by the number of cores. -[float] -=== `system.load.cores` +-- +*`system.load.cores`*:: ++ +-- type: long The number of CPU cores present on the host. +-- + [float] == memory fields @@ -10364,9 +13251,9 @@ The number of CPU cores present on the host. -[float] -=== `system.memory.total` - +*`system.memory.total`*:: ++ +-- type: long format: bytes @@ -10374,9 +13261,11 @@ format: bytes Total memory. -[float] -=== `system.memory.used.bytes` +-- +*`system.memory.used.bytes`*:: ++ +-- type: long format: bytes @@ -10384,9 +13273,11 @@ format: bytes Used memory. -[float] -=== `system.memory.free` +-- +*`system.memory.free`*:: ++ +-- type: long format: bytes @@ -10394,9 +13285,11 @@ format: bytes The total amount of free memory in bytes. This value does not include memory consumed by system caches and buffers (see system.memory.actual.free). -[float] -=== `system.memory.used.pct` +-- +*`system.memory.used.pct`*:: ++ +-- type: scaled_float format: percent @@ -10404,6 +13297,8 @@ format: percent The percentage of used memory. +-- + [float] == actual fields @@ -10411,9 +13306,9 @@ Actual memory used and free. -[float] -=== `system.memory.actual.used.bytes` - +*`system.memory.actual.used.bytes`*:: ++ +-- type: long format: bytes @@ -10421,9 +13316,11 @@ format: bytes Actual used memory in bytes. It represents the difference between the total and the available memory. The available memory depends on the OS. For more details, please check `system.actual.free`. -[float] -=== `system.memory.actual.free` +-- +*`system.memory.actual.free`*:: ++ +-- type: long format: bytes @@ -10431,9 +13328,11 @@ format: bytes Actual free memory in bytes. It is calculated based on the OS. On Linux it consists of the free memory plus caches and buffers. On OSX it is a sum of free memory and the inactive memory. On Windows, it is equal to `system.memory.free`. -[float] -=== `system.memory.actual.used.pct` +-- +*`system.memory.actual.used.pct`*:: ++ +-- type: scaled_float format: percent @@ -10441,15 +13340,17 @@ format: percent The percentage of actual used memory. +-- + [float] == swap fields This group contains statistics related to the swap memory usage on the system. -[float] -=== `system.memory.swap.total` - +*`system.memory.swap.total`*:: ++ +-- type: long format: bytes @@ -10457,9 +13358,11 @@ format: bytes Total swap memory. -[float] -=== `system.memory.swap.used.bytes` +-- +*`system.memory.swap.used.bytes`*:: ++ +-- type: long format: bytes @@ -10467,9 +13370,11 @@ format: bytes Used swap memory. -[float] -=== `system.memory.swap.free` +-- +*`system.memory.swap.free`*:: ++ +-- type: long format: bytes @@ -10477,9 +13382,11 @@ format: bytes Available swap memory. -[float] -=== `system.memory.swap.used.pct` +-- +*`system.memory.swap.used.pct`*:: ++ +-- type: scaled_float format: percent @@ -10487,16 +13394,108 @@ format: percent The percentage of used swap memory. +-- + [float] -== network fields +== hugepages fields + +This group contains statistics related to huge pages usage on the system. + + +*`system.memory.hugepages.total`*:: ++ +-- +type: long + +format: number + +Number of huge pages in the pool. + + +-- + +*`system.memory.hugepages.used.bytes`*:: ++ +-- +type: long + +format: bytes + +Memory used in allocated huge pages. + + +-- + +*`system.memory.hugepages.used.pct`*:: ++ +-- +type: long + +format: percent + +Percentage of huge pages used. + + +-- + +*`system.memory.hugepages.free`*:: ++ +-- +type: long + +format: number + +Number of available huge pages in the pool. + + +-- + +*`system.memory.hugepages.reserved`*:: ++ +-- +type: long + +format: number + +Number of reserved but not allocated huge pages in the pool. + + +-- + +*`system.memory.hugepages.surplus`*:: ++ +-- +type: long + +format: number + +Number of overcommited huge pages. -`network` contains network IO metrics for a single network interface. + +-- + +*`system.memory.hugepages.default_size`*:: ++ +-- +type: long + +format: bytes + +Default size for huge pages. +-- [float] -=== `system.network.name` +== network fields + +`network` contains network IO metrics for a single network interface. + + +*`system.network.name`*:: ++ +-- type: keyword example: eth0 @@ -10504,9 +13503,11 @@ example: eth0 The network interface name. -[float] -=== `system.network.out.bytes` +-- +*`system.network.out.bytes`*:: ++ +-- type: long format: bytes @@ -10514,9 +13515,11 @@ format: bytes The number of bytes sent. -[float] -=== `system.network.in.bytes` +-- +*`system.network.in.bytes`*:: ++ +-- type: long format: bytes @@ -10524,54 +13527,68 @@ format: bytes The number of bytes received. -[float] -=== `system.network.out.packets` +-- +*`system.network.out.packets`*:: ++ +-- type: long The number of packets sent. -[float] -=== `system.network.in.packets` +-- +*`system.network.in.packets`*:: ++ +-- type: long The number or packets received. -[float] -=== `system.network.in.errors` +-- +*`system.network.in.errors`*:: ++ +-- type: long The number of errors while receiving. -[float] -=== `system.network.out.errors` +-- +*`system.network.out.errors`*:: ++ +-- type: long The number of errors while sending. -[float] -=== `system.network.in.dropped` +-- +*`system.network.in.dropped`*:: ++ +-- type: long The number of incoming packets that were dropped. -[float] -=== `system.network.out.dropped` +-- +*`system.network.out.dropped`*:: ++ +-- type: long The number of outgoing packets that were dropped. This value is always 0 on Darwin and BSD because it is not reported by the operating system. +-- + [float] == process fields @@ -10579,103 +13596,125 @@ The number of outgoing packets that were dropped. This value is always 0 on Darw -[float] -=== `system.process.name` - +*`system.process.name`*:: ++ +-- type: keyword The process name. -[float] -=== `system.process.state` +-- +*`system.process.state`*:: ++ +-- type: keyword The process state. For example: "running". -[float] -=== `system.process.pid` +-- +*`system.process.pid`*:: ++ +-- type: long The process pid. -[float] -=== `system.process.ppid` +-- +*`system.process.ppid`*:: ++ +-- type: long The process parent pid. -[float] -=== `system.process.pgid` +-- +*`system.process.pgid`*:: ++ +-- type: long The process group id. -[float] -=== `system.process.cmdline` +-- +*`system.process.cmdline`*:: ++ +-- type: keyword The full command-line used to start the process, including the arguments separated by space. -[float] -=== `system.process.username` +-- +*`system.process.username`*:: ++ +-- type: keyword The username of the user that created the process. If the username cannot be determined, the field will contain the user's numeric identifier (UID). On Windows, this field includes the user's domain and is formatted as `domain\username`. -[float] -=== `system.process.cwd` +-- +*`system.process.cwd`*:: ++ +-- type: keyword The current working directory of the process. This field is only available on Linux. -[float] -=== `system.process.env` +-- +*`system.process.env`*:: ++ +-- type: object The environment variables used to start the process. The data is available on FreeBSD, Linux, and OS X. +-- + [float] == cpu fields CPU-specific statistics per process. -[float] -=== `system.process.cpu.user` - +*`system.process.cpu.user.ticks`*:: ++ +-- type: long The amount of CPU time the process spent in user space. -[float] -=== `system.process.cpu.total.value` +-- +*`system.process.cpu.total.value`*:: ++ +-- type: long The value of CPU usage since starting the process. -[float] -=== `system.process.cpu.total.pct` +-- +*`system.process.cpu.total.pct`*:: ++ +-- type: scaled_float format: percent @@ -10683,9 +13722,11 @@ format: percent The percentage of CPU time spent by the process since the last update. Its value is similar to the %CPU value of the process displayed by the top command on Unix systems. -[float] -=== `system.process.cpu.total.norm.pct` +-- +*`system.process.cpu.total.norm.pct`*:: ++ +-- type: scaled_float format: percent @@ -10693,39 +13734,47 @@ format: percent The percentage of CPU time spent by the process since the last event. This value is normalized by the number of CPU cores and it ranges from 0 to 100%. -[float] -=== `system.process.cpu.system` +-- +*`system.process.cpu.system.ticks`*:: ++ +-- type: long The amount of CPU time the process spent in kernel space. -[float] -=== `system.process.cpu.total.ticks` +-- +*`system.process.cpu.total.ticks`*:: ++ +-- type: long The total CPU time spent by the process. -[float] -=== `system.process.cpu.start_time` +-- +*`system.process.cpu.start_time`*:: ++ +-- type: date The time when the process was started. +-- + [float] == memory fields Memory-specific statistics per process. -[float] -=== `system.process.memory.size` - +*`system.process.memory.size`*:: ++ +-- type: long format: bytes @@ -10733,9 +13782,11 @@ format: bytes The total virtual memory the process has. -[float] -=== `system.process.memory.rss.bytes` +-- +*`system.process.memory.rss.bytes`*:: ++ +-- type: long format: bytes @@ -10743,9 +13794,11 @@ format: bytes The Resident Set Size. The amount of memory the process occupied in main memory (RAM). -[float] -=== `system.process.memory.rss.pct` +-- +*`system.process.memory.rss.pct`*:: ++ +-- type: scaled_float format: percent @@ -10753,9 +13806,11 @@ format: percent The percentage of memory the process occupied in main memory (RAM). -[float] -=== `system.process.memory.share` +-- +*`system.process.memory.share`*:: ++ +-- type: long format: bytes @@ -10763,6 +13818,8 @@ format: bytes The shared memory the process uses. +-- + [float] == fd fields @@ -10770,29 +13827,35 @@ File descriptor usage metrics. This set of metrics is available for Linux and Fr -[float] -=== `system.process.fd.open` - +*`system.process.fd.open`*:: ++ +-- type: long The number of file descriptors open by the process. -[float] -=== `system.process.fd.limit.soft` +-- +*`system.process.fd.limit.soft`*:: ++ +-- type: long The soft limit on the number of file descriptors opened by the process. The soft limit can be changed by the process at any time. -[float] -=== `system.process.fd.limit.hard` +-- +*`system.process.fd.limit.hard`*:: ++ +-- type: long The hard limit on the number of file descriptors opened by the process. The hard limit can only be raised by root. +-- + [float] == cgroup fields @@ -10800,22 +13863,26 @@ Metrics and limits from the cgroup of which the task is a member. cgroup metrics -[float] -=== `system.process.cgroup.id` - +*`system.process.cgroup.id`*:: ++ +-- type: keyword The ID common to all cgroups associated with this task. If there isn't a common ID used by all cgroups this field will be absent. -[float] -=== `system.process.cgroup.path` +-- +*`system.process.cgroup.path`*:: ++ +-- type: keyword The path to the cgroup relative to the cgroup subsystem's mountpoint. If there isn't a common path used by all cgroups this field will be absent. +-- + [float] == cpu fields @@ -10823,160 +13890,196 @@ The cpu subsystem schedules CPU access for tasks in the cgroup. Access can be co -[float] -=== `system.process.cgroup.cpu.id` - +*`system.process.cgroup.cpu.id`*:: ++ +-- type: keyword ID of the cgroup. -[float] -=== `system.process.cgroup.cpu.path` +-- +*`system.process.cgroup.cpu.path`*:: ++ +-- type: keyword Path to the cgroup relative to the cgroup subsystem's mountpoint. -[float] -=== `system.process.cgroup.cpu.cfs.period.us` +-- +*`system.process.cgroup.cpu.cfs.period.us`*:: ++ +-- type: long Period of time in microseconds for how regularly a cgroup's access to CPU resources should be reallocated. -[float] -=== `system.process.cgroup.cpu.cfs.quota.us` +-- +*`system.process.cgroup.cpu.cfs.quota.us`*:: ++ +-- type: long Total amount of time in microseconds for which all tasks in a cgroup can run during one period (as defined by cfs.period.us). -[float] -=== `system.process.cgroup.cpu.cfs.shares` +-- +*`system.process.cgroup.cpu.cfs.shares`*:: ++ +-- type: long An integer value that specifies a relative share of CPU time available to the tasks in a cgroup. The value specified in the cpu.shares file must be 2 or higher. -[float] -=== `system.process.cgroup.cpu.rt.period.us` +-- +*`system.process.cgroup.cpu.rt.period.us`*:: ++ +-- type: long Period of time in microseconds for how regularly a cgroup's access to CPU resources is reallocated. -[float] -=== `system.process.cgroup.cpu.rt.runtime.us` +-- +*`system.process.cgroup.cpu.rt.runtime.us`*:: ++ +-- type: long Period of time in microseconds for the longest continuous period in which the tasks in a cgroup have access to CPU resources. -[float] -=== `system.process.cgroup.cpu.stats.periods` +-- +*`system.process.cgroup.cpu.stats.periods`*:: ++ +-- type: long Number of period intervals (as specified in cpu.cfs.period.us) that have elapsed. -[float] -=== `system.process.cgroup.cpu.stats.throttled.periods` +-- +*`system.process.cgroup.cpu.stats.throttled.periods`*:: ++ +-- type: long Number of times tasks in a cgroup have been throttled (that is, not allowed to run because they have exhausted all of the available time as specified by their quota). -[float] -=== `system.process.cgroup.cpu.stats.throttled.ns` +-- +*`system.process.cgroup.cpu.stats.throttled.ns`*:: ++ +-- type: long The total time duration (in nanoseconds) for which tasks in a cgroup have been throttled. +-- + [float] == cpuacct fields CPU accounting metrics. -[float] -=== `system.process.cgroup.cpuacct.id` - +*`system.process.cgroup.cpuacct.id`*:: ++ +-- type: keyword ID of the cgroup. -[float] -=== `system.process.cgroup.cpuacct.path` +-- +*`system.process.cgroup.cpuacct.path`*:: ++ +-- type: keyword Path to the cgroup relative to the cgroup subsystem's mountpoint. -[float] -=== `system.process.cgroup.cpuacct.total.ns` +-- +*`system.process.cgroup.cpuacct.total.ns`*:: ++ +-- type: long Total CPU time in nanoseconds consumed by all tasks in the cgroup. -[float] -=== `system.process.cgroup.cpuacct.stats.user.ns` +-- +*`system.process.cgroup.cpuacct.stats.user.ns`*:: ++ +-- type: long CPU time consumed by tasks in user mode. -[float] -=== `system.process.cgroup.cpuacct.stats.system.ns` +-- +*`system.process.cgroup.cpuacct.stats.system.ns`*:: ++ +-- type: long CPU time consumed by tasks in user (kernel) mode. -[float] -=== `system.process.cgroup.cpuacct.percpu` +-- +*`system.process.cgroup.cpuacct.percpu`*:: ++ +-- type: object CPU time (in nanoseconds) consumed on each CPU by all tasks in this cgroup. +-- + [float] == memory fields Memory limits and metrics. -[float] -=== `system.process.cgroup.memory.id` - +*`system.process.cgroup.memory.id`*:: ++ +-- type: keyword ID of the cgroup. -[float] -=== `system.process.cgroup.memory.path` +-- +*`system.process.cgroup.memory.path`*:: ++ +-- type: keyword Path to the cgroup relative to the cgroup subsystem's mountpoint. -[float] -=== `system.process.cgroup.memory.mem.usage.bytes` +-- +*`system.process.cgroup.memory.mem.usage.bytes`*:: ++ +-- type: long format: bytes @@ -10984,9 +14087,11 @@ format: bytes Total memory usage by processes in the cgroup (in bytes). -[float] -=== `system.process.cgroup.memory.mem.usage.max.bytes` +-- +*`system.process.cgroup.memory.mem.usage.max.bytes`*:: ++ +-- type: long format: bytes @@ -10994,9 +14099,11 @@ format: bytes The maximum memory used by processes in the cgroup (in bytes). -[float] -=== `system.process.cgroup.memory.mem.limit.bytes` +-- +*`system.process.cgroup.memory.mem.limit.bytes`*:: ++ +-- type: long format: bytes @@ -11004,17 +14111,21 @@ format: bytes The maximum amount of user memory in bytes (including file cache) that tasks in the cgroup are allowed to use. -[float] -=== `system.process.cgroup.memory.mem.failures` +-- +*`system.process.cgroup.memory.mem.failures`*:: ++ +-- type: long The number of times that the memory limit (mem.limit.bytes) was reached. -[float] -=== `system.process.cgroup.memory.memsw.usage.bytes` +-- +*`system.process.cgroup.memory.memsw.usage.bytes`*:: ++ +-- type: long format: bytes @@ -11022,9 +14133,11 @@ format: bytes The sum of current memory usage plus swap space used by processes in the cgroup (in bytes). -[float] -=== `system.process.cgroup.memory.memsw.usage.max.bytes` +-- +*`system.process.cgroup.memory.memsw.usage.max.bytes`*:: ++ +-- type: long format: bytes @@ -11032,9 +14145,11 @@ format: bytes The maximum amount of memory and swap space used by processes in the cgroup (in bytes). -[float] -=== `system.process.cgroup.memory.memsw.limit.bytes` +-- +*`system.process.cgroup.memory.memsw.limit.bytes`*:: ++ +-- type: long format: bytes @@ -11042,17 +14157,21 @@ format: bytes The maximum amount for the sum of memory and swap usage that tasks in the cgroup are allowed to use. -[float] -=== `system.process.cgroup.memory.memsw.failures` +-- +*`system.process.cgroup.memory.memsw.failures`*:: ++ +-- type: long The number of times that the memory plus swap space limit (memsw.limit.bytes) was reached. -[float] -=== `system.process.cgroup.memory.kmem.usage.bytes` +-- +*`system.process.cgroup.memory.kmem.usage.bytes`*:: ++ +-- type: long format: bytes @@ -11060,9 +14179,11 @@ format: bytes Total kernel memory usage by processes in the cgroup (in bytes). -[float] -=== `system.process.cgroup.memory.kmem.usage.max.bytes` +-- +*`system.process.cgroup.memory.kmem.usage.max.bytes`*:: ++ +-- type: long format: bytes @@ -11070,9 +14191,11 @@ format: bytes The maximum kernel memory used by processes in the cgroup (in bytes). -[float] -=== `system.process.cgroup.memory.kmem.limit.bytes` +-- +*`system.process.cgroup.memory.kmem.limit.bytes`*:: ++ +-- type: long format: bytes @@ -11080,17 +14203,21 @@ format: bytes The maximum amount of kernel memory that tasks in the cgroup are allowed to use. -[float] -=== `system.process.cgroup.memory.kmem.failures` +-- +*`system.process.cgroup.memory.kmem.failures`*:: ++ +-- type: long The number of times that the memory limit (kmem.limit.bytes) was reached. -[float] -=== `system.process.cgroup.memory.kmem_tcp.usage.bytes` +-- +*`system.process.cgroup.memory.kmem_tcp.usage.bytes`*:: ++ +-- type: long format: bytes @@ -11098,9 +14225,11 @@ format: bytes Total memory usage for TCP buffers in bytes. -[float] -=== `system.process.cgroup.memory.kmem_tcp.usage.max.bytes` +-- +*`system.process.cgroup.memory.kmem_tcp.usage.max.bytes`*:: ++ +-- type: long format: bytes @@ -11108,9 +14237,11 @@ format: bytes The maximum memory used for TCP buffers by processes in the cgroup (in bytes). -[float] -=== `system.process.cgroup.memory.kmem_tcp.limit.bytes` +-- +*`system.process.cgroup.memory.kmem_tcp.limit.bytes`*:: ++ +-- type: long format: bytes @@ -11118,17 +14249,21 @@ format: bytes The maximum amount of memory for TCP buffers that tasks in the cgroup are allowed to use. -[float] -=== `system.process.cgroup.memory.kmem_tcp.failures` +-- +*`system.process.cgroup.memory.kmem_tcp.failures`*:: ++ +-- type: long The number of times that the memory limit (kmem_tcp.limit.bytes) was reached. -[float] -=== `system.process.cgroup.memory.stats.active_anon.bytes` +-- +*`system.process.cgroup.memory.stats.active_anon.bytes`*:: ++ +-- type: long format: bytes @@ -11136,27 +14271,33 @@ format: bytes Anonymous and swap cache on active least-recently-used (LRU) list, including tmpfs (shmem), in bytes. -[float] -=== `system.process.cgroup.memory.stats.active_file.bytes` +-- +*`system.process.cgroup.memory.stats.active_file.bytes`*:: ++ +-- type: long format: bytes File-backed memory on active LRU list, in bytes. -[float] -=== `system.process.cgroup.memory.stats.cache.bytes` +-- +*`system.process.cgroup.memory.stats.cache.bytes`*:: ++ +-- type: long format: bytes Page cache, including tmpfs (shmem), in bytes. -[float] -=== `system.process.cgroup.memory.stats.hierarchical_memory_limit.bytes` +-- +*`system.process.cgroup.memory.stats.hierarchical_memory_limit.bytes`*:: ++ +-- type: long format: bytes @@ -11164,9 +14305,11 @@ format: bytes Memory limit for the hierarchy that contains the memory cgroup, in bytes. -[float] -=== `system.process.cgroup.memory.stats.hierarchical_memsw_limit.bytes` +-- +*`system.process.cgroup.memory.stats.hierarchical_memsw_limit.bytes`*:: ++ +-- type: long format: bytes @@ -11174,9 +14317,11 @@ format: bytes Memory plus swap limit for the hierarchy that contains the memory cgroup, in bytes. -[float] -=== `system.process.cgroup.memory.stats.inactive_anon.bytes` +-- +*`system.process.cgroup.memory.stats.inactive_anon.bytes`*:: ++ +-- type: long format: bytes @@ -11184,9 +14329,11 @@ format: bytes Anonymous and swap cache on inactive LRU list, including tmpfs (shmem), in bytes -[float] -=== `system.process.cgroup.memory.stats.inactive_file.bytes` +-- +*`system.process.cgroup.memory.stats.inactive_file.bytes`*:: ++ +-- type: long format: bytes @@ -11194,9 +14341,11 @@ format: bytes File-backed memory on inactive LRU list, in bytes. -[float] -=== `system.process.cgroup.memory.stats.mapped_file.bytes` +-- +*`system.process.cgroup.memory.stats.mapped_file.bytes`*:: ++ +-- type: long format: bytes @@ -11204,41 +14353,51 @@ format: bytes Size of memory-mapped mapped files, including tmpfs (shmem), in bytes. -[float] -=== `system.process.cgroup.memory.stats.page_faults` +-- +*`system.process.cgroup.memory.stats.page_faults`*:: ++ +-- type: long Number of times that a process in the cgroup triggered a page fault. -[float] -=== `system.process.cgroup.memory.stats.major_page_faults` +-- +*`system.process.cgroup.memory.stats.major_page_faults`*:: ++ +-- type: long Number of times that a process in the cgroup triggered a major fault. "Major" faults happen when the kernel actually has to read the data from disk. -[float] -=== `system.process.cgroup.memory.stats.pages_in` +-- +*`system.process.cgroup.memory.stats.pages_in`*:: ++ +-- type: long Number of pages paged into memory. This is a counter. -[float] -=== `system.process.cgroup.memory.stats.pages_out` +-- +*`system.process.cgroup.memory.stats.pages_out`*:: ++ +-- type: long Number of pages paged out of memory. This is a counter. -[float] -=== `system.process.cgroup.memory.stats.rss.bytes` +-- +*`system.process.cgroup.memory.stats.rss.bytes`*:: ++ +-- type: long format: bytes @@ -11246,9 +14405,11 @@ format: bytes Anonymous and swap cache (includes transparent hugepages), not including tmpfs (shmem), in bytes. -[float] -=== `system.process.cgroup.memory.stats.rss_huge.bytes` +-- +*`system.process.cgroup.memory.stats.rss_huge.bytes`*:: ++ +-- type: long format: bytes @@ -11256,9 +14417,11 @@ format: bytes Number of bytes of anonymous transparent hugepages. -[float] -=== `system.process.cgroup.memory.stats.swap.bytes` +-- +*`system.process.cgroup.memory.stats.swap.bytes`*:: ++ +-- type: long format: bytes @@ -11266,9 +14429,11 @@ format: bytes Swap usage, in bytes. -[float] -=== `system.process.cgroup.memory.stats.unevictable.bytes` +-- +*`system.process.cgroup.memory.stats.unevictable.bytes`*:: ++ +-- type: long format: bytes @@ -11276,30 +14441,36 @@ format: bytes Memory that cannot be reclaimed, in bytes. +-- + [float] == blkio fields Block IO metrics. -[float] -=== `system.process.cgroup.blkio.id` - +*`system.process.cgroup.blkio.id`*:: ++ +-- type: keyword ID of the cgroup. -[float] -=== `system.process.cgroup.blkio.path` +-- +*`system.process.cgroup.blkio.path`*:: ++ +-- type: keyword Path to the cgroup relative to the cgroup subsystems mountpoint. -[float] -=== `system.process.cgroup.blkio.total.bytes` +-- +*`system.process.cgroup.blkio.total.bytes`*:: ++ +-- type: long format: bytes @@ -11307,14 +14478,18 @@ format: bytes Total number of bytes transferred to and from all block devices by processes in the cgroup. -[float] -=== `system.process.cgroup.blkio.total.ios` +-- +*`system.process.cgroup.blkio.total.ios`*:: ++ +-- type: long Total number of I/O operations performed on all devices by processes in the cgroup as seen by the throttling policy. +-- + [float] == process.summary fields @@ -11322,62 +14497,76 @@ Summary metrics for the processes running on the host. -[float] -=== `system.process.summary.total` - +*`system.process.summary.total`*:: ++ +-- type: long Total number of processes on this host. -[float] -=== `system.process.summary.running` +-- +*`system.process.summary.running`*:: ++ +-- type: long Number of running processes on this host. -[float] -=== `system.process.summary.idle` +-- +*`system.process.summary.idle`*:: ++ +-- type: long Number of idle processes on this host. -[float] -=== `system.process.summary.sleeping` +-- +*`system.process.summary.sleeping`*:: ++ +-- type: long Number of sleeping processes on this host. -[float] -=== `system.process.summary.stopped` +-- +*`system.process.summary.stopped`*:: ++ +-- type: long Number of stopped processes on this host. -[float] -=== `system.process.summary.zombie` +-- +*`system.process.summary.zombie`*:: ++ +-- type: long Number of zombie processes on this host. -[float] -=== `system.process.summary.unknown` +-- +*`system.process.summary.unknown`*:: ++ +-- type: long Number of processes for which the state couldn't be retrieved or is unknown. +-- + [float] == raid fields @@ -11385,54 +14574,66 @@ raid -[float] -=== `system.raid.name` - +*`system.raid.name`*:: ++ +-- type: keyword Name of the device. -[float] -=== `system.raid.activity_state` +-- +*`system.raid.activity_state`*:: ++ +-- type: keyword activity-state of the device. -[float] -=== `system.raid.disks.active` +-- +*`system.raid.disks.active`*:: ++ +-- type: long Number of active disks. -[float] -=== `system.raid.disks.total` +-- +*`system.raid.disks.total`*:: ++ +-- type: long Total number of disks the device consists of. -[float] -=== `system.raid.blocks.total` +-- +*`system.raid.blocks.total`*:: ++ +-- type: long Number of blocks the device holds. -[float] -=== `system.raid.blocks.synced` +-- +*`system.raid.blocks.synced`*:: ++ +-- type: long Number of blocks on the device that are in sync. +-- + [float] == socket fields @@ -11440,9 +14641,9 @@ TCP sockets that are active. -[float] -=== `system.socket.direction` - +*`system.socket.direction`*:: ++ +-- type: keyword example: incoming @@ -11450,9 +14651,11 @@ example: incoming How the socket was initiated. Possible values are incoming, outgoing, or listening. -[float] -=== `system.socket.family` +-- +*`system.socket.family`*:: ++ +-- type: keyword example: ipv4 @@ -11460,9 +14663,11 @@ example: ipv4 Address family. -[float] -=== `system.socket.local.ip` +-- +*`system.socket.local.ip`*:: ++ +-- type: ip example: 192.0.2.1 or 2001:0DB8:ABED:8536::1 @@ -11470,9 +14675,11 @@ example: 192.0.2.1 or 2001:0DB8:ABED:8536::1 Local IP address. This can be an IPv4 or IPv6 address. -[float] -=== `system.socket.local.port` +-- +*`system.socket.local.port`*:: ++ +-- type: long example: 22 @@ -11480,9 +14687,11 @@ example: 22 Local port. -[float] -=== `system.socket.remote.ip` +-- +*`system.socket.remote.ip`*:: ++ +-- type: ip example: 192.0.2.1 or 2001:0DB8:ABED:8536::1 @@ -11490,9 +14699,11 @@ example: 192.0.2.1 or 2001:0DB8:ABED:8536::1 Remote IP address. This can be an IPv4 or IPv6 address. -[float] -=== `system.socket.remote.port` +-- +*`system.socket.remote.port`*:: ++ +-- type: long example: 22 @@ -11500,9 +14711,11 @@ example: 22 Remote port. -[float] -=== `system.socket.remote.host` +-- +*`system.socket.remote.host`*:: ++ +-- type: keyword example: 76-211-117-36.nw.example.com. @@ -11510,9 +14723,11 @@ example: 76-211-117-36.nw.example.com. PTR record associated with the remote IP. It is obtained via reverse IP lookup. -[float] -=== `system.socket.remote.etld_plus_one` +-- +*`system.socket.remote.etld_plus_one`*:: ++ +-- type: keyword example: example.com. @@ -11520,61 +14735,77 @@ example: example.com. The effective top-level domain (eTLD) of the remote host plus one more label. For example, the eTLD+1 for "foo.bar.golang.org." is "golang.org.". The data for determining the eTLD comes from an embedded copy of the data from http://publicsuffix.org. -[float] -=== `system.socket.remote.host_error` +-- +*`system.socket.remote.host_error`*:: ++ +-- type: keyword Error describing the cause of the reverse lookup failure. -[float] -=== `system.socket.process.pid` +-- +*`system.socket.process.pid`*:: ++ +-- type: long ID of the process that opened the socket. -[float] -=== `system.socket.process.command` +-- +*`system.socket.process.command`*:: ++ +-- type: keyword Name of the command (limited to 20 chars by the OS). -[float] -=== `system.socket.process.cmdline` +-- +*`system.socket.process.cmdline`*:: ++ +-- type: keyword -[float] -=== `system.socket.process.exe` +-- +*`system.socket.process.exe`*:: ++ +-- type: keyword Absolute path to the executable. -[float] -=== `system.socket.user.id` +-- +*`system.socket.user.id`*:: ++ +-- type: long UID of the user running the process. -[float] -=== `system.socket.user.name` +-- +*`system.socket.user.name`*:: ++ +-- type: keyword Name of the user running the process. +-- + [float] == uptime fields @@ -11582,9 +14813,9 @@ Name of the user running the process. -[float] -=== `system.uptime.duration.ms` - +*`system.uptime.duration.ms`*:: ++ +-- type: long format: duration @@ -11592,6 +14823,8 @@ format: duration The OS uptime in milliseconds. +-- + [[exported-fields-uwsgi]] == uwsgi fields @@ -11612,238 +14845,296 @@ uwsgi.status metricset fields -[float] -=== `uwsgi.status.total.requests` - +*`uwsgi.status.total.requests`*:: ++ +-- type: long Total requests handled -[float] -=== `uwsgi.status.total.exceptions` +-- +*`uwsgi.status.total.exceptions`*:: ++ +-- type: long Total exceptions -[float] -=== `uwsgi.status.total.write_errors` +-- +*`uwsgi.status.total.write_errors`*:: ++ +-- type: long Total requests write errors -[float] -=== `uwsgi.status.total.read_errors` +-- +*`uwsgi.status.total.read_errors`*:: ++ +-- type: long Total read errors -[float] -=== `uwsgi.status.total.pid` +-- +*`uwsgi.status.total.pid`*:: ++ +-- type: long Process id -[float] -=== `uwsgi.status.worker.id` +-- +*`uwsgi.status.worker.id`*:: ++ +-- type: long Worker id -[float] -=== `uwsgi.status.worker.pid` +-- +*`uwsgi.status.worker.pid`*:: ++ +-- type: long Worker process id -[float] -=== `uwsgi.status.worker.accepting` +-- +*`uwsgi.status.worker.accepting`*:: ++ +-- type: long State of worker, 1 if still accepting new requests otherwise 0 -[float] -=== `uwsgi.status.worker.requests` +-- +*`uwsgi.status.worker.requests`*:: ++ +-- type: long Number of requests served by this worker -[float] -=== `uwsgi.status.worker.delta_requests` +-- +*`uwsgi.status.worker.delta_requests`*:: ++ +-- type: long Number of requests served by this worker after worker is reloaded when reached MAX_REQUESTS -[float] -=== `uwsgi.status.worker.exceptions` +-- +*`uwsgi.status.worker.exceptions`*:: ++ +-- type: long Exceptions raised -[float] -=== `uwsgi.status.worker.harakiri_count` +-- +*`uwsgi.status.worker.harakiri_count`*:: ++ +-- type: long Dropped requests by timeout -[float] -=== `uwsgi.status.worker.signals` +-- +*`uwsgi.status.worker.signals`*:: ++ +-- type: long Emitted signals count -[float] -=== `uwsgi.status.worker.signal_queue` +-- +*`uwsgi.status.worker.signal_queue`*:: ++ +-- type: long Number of signals waiting to be handled -[float] -=== `uwsgi.status.worker.status` +-- +*`uwsgi.status.worker.status`*:: ++ +-- type: keyword Worker status (cheap, pause, sig, busy, idle) -[float] -=== `uwsgi.status.worker.rss` +-- +*`uwsgi.status.worker.rss`*:: ++ +-- type: keyword Resident Set Size. memory currently used by a process. if always zero try `--memory-report` option of uwsgi -[float] -=== `uwsgi.status.worker.vsz` +-- +*`uwsgi.status.worker.vsz`*:: ++ +-- type: long Virtual Set Size. memory size assigned to a process. if always zero try `--memory-report` option of uwsgi -[float] -=== `uwsgi.status.worker.running_time` +-- +*`uwsgi.status.worker.running_time`*:: ++ +-- type: long Process running time -[float] -=== `uwsgi.status.worker.respawn_count` +-- +*`uwsgi.status.worker.respawn_count`*:: ++ +-- type: long Respawn count -[float] -=== `uwsgi.status.worker.tx` +-- +*`uwsgi.status.worker.tx`*:: ++ +-- type: long Transmitted size -[float] -=== `uwsgi.status.worker.avg_rt` +-- +*`uwsgi.status.worker.avg_rt`*:: ++ +-- type: long Average response time -[float] -=== `uwsgi.status.core.id` +-- +*`uwsgi.status.core.id`*:: ++ +-- type: long worker ID -[float] -=== `uwsgi.status.core.worker_pid` +-- +*`uwsgi.status.core.worker_pid`*:: ++ +-- type: long Parent worker PID -[float] -=== `uwsgi.status.core.requests.total` +-- +*`uwsgi.status.core.requests.total`*:: ++ +-- type: long Number of total requests served -[float] -=== `uwsgi.status.core.requests.static` +-- +*`uwsgi.status.core.requests.static`*:: ++ +-- type: long Number of static file serves -[float] -=== `uwsgi.status.core.requests.routed` +-- +*`uwsgi.status.core.requests.routed`*:: ++ +-- type: long Routed requests -[float] -=== `uwsgi.status.core.requests.offloaded` +-- +*`uwsgi.status.core.requests.offloaded`*:: ++ +-- type: long Offloaded requests -[float] -=== `uwsgi.status.core.write_errors` +-- +*`uwsgi.status.core.write_errors`*:: ++ +-- type: long Number of failed writes -[float] -=== `uwsgi.status.core.read_errors` +-- +*`uwsgi.status.core.read_errors`*:: ++ +-- type: long Number of failed reads +-- + [[exported-fields-vsphere]] == vSphere fields @@ -11864,25 +15155,29 @@ datastore -[float] -=== `vsphere.datastore.name` - +*`vsphere.datastore.name`*:: ++ +-- type: keyword Datastore name -[float] -=== `vsphere.datastore.fstype` +-- +*`vsphere.datastore.fstype`*:: ++ +-- type: keyword Filesystem type -[float] -=== `vsphere.datastore.capacity.total.bytes` +-- +*`vsphere.datastore.capacity.total.bytes`*:: ++ +-- type: long format: bytes @@ -11890,9 +15185,11 @@ format: bytes Total bytes of the datastore -[float] -=== `vsphere.datastore.capacity.free.bytes` +-- +*`vsphere.datastore.capacity.free.bytes`*:: ++ +-- type: long format: bytes @@ -11900,9 +15197,11 @@ format: bytes Free bytes of the datastore -[float] -=== `vsphere.datastore.capacity.used.bytes` +-- +*`vsphere.datastore.capacity.used.bytes`*:: ++ +-- type: long format: bytes @@ -11910,9 +15209,11 @@ format: bytes Used bytes of the datastore -[float] -=== `vsphere.datastore.capacity.used.pct` +-- +*`vsphere.datastore.capacity.used.pct`*:: ++ +-- type: long format: percent @@ -11920,6 +15221,8 @@ format: percent Used percent of the datastore +-- + [float] == host fields @@ -11927,41 +15230,49 @@ host -[float] -=== `vsphere.host.name` - +*`vsphere.host.name`*:: ++ +-- type: keyword Host name -[float] -=== `vsphere.host.cpu.used.mhz` +-- +*`vsphere.host.cpu.used.mhz`*:: ++ +-- type: long Used CPU in Mhz -[float] -=== `vsphere.host.cpu.total.mhz` +-- +*`vsphere.host.cpu.total.mhz`*:: ++ +-- type: long Total CPU in Mhz -[float] -=== `vsphere.host.cpu.free.mhz` +-- +*`vsphere.host.cpu.free.mhz`*:: ++ +-- type: long Free CPU in Mhz -[float] -=== `vsphere.host.memory.used.bytes` +-- +*`vsphere.host.memory.used.bytes`*:: ++ +-- type: long format: bytes @@ -11969,9 +15280,11 @@ format: bytes Used Memory in bytes -[float] -=== `vsphere.host.memory.total.bytes` +-- +*`vsphere.host.memory.total.bytes`*:: ++ +-- type: long format: bytes @@ -11979,9 +15292,11 @@ format: bytes Total Memory in bytes -[float] -=== `vsphere.host.memory.free.bytes` +-- +*`vsphere.host.memory.free.bytes`*:: ++ +-- type: long format: bytes @@ -11989,14 +15304,18 @@ format: bytes Free Memory in bytes -[float] -=== `vsphere.host.network_names` +-- +*`vsphere.host.network_names`*:: ++ +-- type: keyword Network names +-- + [float] == virtualmachine fields @@ -12004,33 +15323,39 @@ virtualmachine -[float] -=== `vsphere.virtualmachine.host` - +*`vsphere.virtualmachine.host`*:: ++ +-- type: keyword Host name -[float] -=== `vsphere.virtualmachine.name` +-- +*`vsphere.virtualmachine.name`*:: ++ +-- type: keyword Virtual Machine name -[float] -=== `vsphere.virtualmachine.cpu.used.mhz` +-- +*`vsphere.virtualmachine.cpu.used.mhz`*:: ++ +-- type: long Used CPU in Mhz -[float] -=== `vsphere.virtualmachine.memory.used.guest.bytes` +-- +*`vsphere.virtualmachine.memory.used.guest.bytes`*:: ++ +-- type: long format: bytes @@ -12038,9 +15363,11 @@ format: bytes Used Memory of Guest in bytes -[float] -=== `vsphere.virtualmachine.memory.used.host.bytes` +-- +*`vsphere.virtualmachine.memory.used.host.bytes`*:: ++ +-- type: long format: bytes @@ -12048,9 +15375,11 @@ format: bytes Used Memory of Host in bytes -[float] -=== `vsphere.virtualmachine.memory.total.guest.bytes` +-- +*`vsphere.virtualmachine.memory.total.guest.bytes`*:: ++ +-- type: long format: bytes @@ -12058,9 +15387,11 @@ format: bytes Total Memory of Guest in bytes -[float] -=== `vsphere.virtualmachine.memory.free.guest.bytes` +-- +*`vsphere.virtualmachine.memory.free.guest.bytes`*:: ++ +-- type: long format: bytes @@ -12068,22 +15399,28 @@ format: bytes Free Memory of Guest in bytes -[float] -=== `vsphere.virtualmachine.custom_fields` +-- +*`vsphere.virtualmachine.custom_fields`*:: ++ +-- type: object Custom fields -[float] -=== `vsphere.virtualmachine.network_names` +-- +*`vsphere.virtualmachine.network_names`*:: ++ +-- type: keyword Network names +-- + [[exported-fields-windows]] == Windows fields @@ -12104,9 +15441,9 @@ beta[] Module for Windows -[float] -=== `windows.service.id` - +*`windows.service.id`*:: ++ +-- type: keyword example: hW3NJFc1Ap @@ -12114,9 +15451,11 @@ example: hW3NJFc1Ap A unique ID for the service. It is a hash of the machine's GUID and the service name. -[float] -=== `windows.service.name` +-- +*`windows.service.name`*:: ++ +-- type: keyword example: Wecsvc @@ -12124,9 +15463,11 @@ example: Wecsvc The service name. -[float] -=== `windows.service.display_name` +-- +*`windows.service.display_name`*:: ++ +-- type: keyword example: Windows Event Collector @@ -12134,33 +15475,41 @@ example: Windows Event Collector The display name of the service. -[float] -=== `windows.service.start_type` +-- +*`windows.service.start_type`*:: ++ +-- type: keyword The startup type of the service. The possible values are `Automatic`, `Boot`, `Disabled`, `Manual`, and `System`. -[float] -=== `windows.service.state` +-- +*`windows.service.state`*:: ++ +-- type: keyword The actual state of the service. The possible values are `Continuing`, `Pausing`, `Paused`, `Running`, `Starting`, `Stopping`, and `Stopped`. -[float] -=== `windows.service.exit_code` +-- +*`windows.service.exit_code`*:: ++ +-- type: keyword For `Stopped` services this is the error code that service reports when starting to stopping. This will be the generic Windows service error code unless the service provides a service-specific error code. -[float] -=== `windows.service.pid` +-- +*`windows.service.pid`*:: ++ +-- type: long example: 1092 @@ -12168,9 +15517,11 @@ example: 1092 For `Running` services this is the associated process PID. -[float] -=== `windows.service.uptime.ms` +-- +*`windows.service.uptime.ms`*:: ++ +-- type: long format: duration @@ -12178,6 +15529,8 @@ format: duration The service's uptime specified in milliseconds. +-- + [[exported-fields-zookeeper]] == ZooKeeper fields @@ -12199,155 +15552,193 @@ ZooKeeper metrics collected by the four-letter monitoring commands. -[float] -=== `zookeeper.mntr.hostname` - +*`zookeeper.mntr.hostname`*:: ++ +-- type: keyword ZooKeeper hostname. -[float] -=== `zookeeper.mntr.approximate_data_size` +-- +*`zookeeper.mntr.approximate_data_size`*:: ++ +-- type: long Approximate size of ZooKeeper data. -[float] -=== `zookeeper.mntr.latency.avg` +-- +*`zookeeper.mntr.latency.avg`*:: ++ +-- type: long Average latency between ensemble hosts in milliseconds. -[float] -=== `zookeeper.mntr.ephemerals_count` +-- +*`zookeeper.mntr.ephemerals_count`*:: ++ +-- type: long Number of ephemeral znodes. -[float] -=== `zookeeper.mntr.followers` +-- +*`zookeeper.mntr.followers`*:: ++ +-- type: long Number of followers seen by the current host. -[float] -=== `zookeeper.mntr.max_file_descriptor_count` +-- +*`zookeeper.mntr.max_file_descriptor_count`*:: ++ +-- type: long Maximum number of file descriptors allowed for the ZooKeeper process. -[float] -=== `zookeeper.mntr.latency.max` +-- +*`zookeeper.mntr.latency.max`*:: ++ +-- type: long Maximum latency in milliseconds. -[float] -=== `zookeeper.mntr.latency.min` +-- +*`zookeeper.mntr.latency.min`*:: ++ +-- type: long Minimum latency in milliseconds. -[float] -=== `zookeeper.mntr.num_alive_connections` +-- +*`zookeeper.mntr.num_alive_connections`*:: ++ +-- type: long Number of connections to ZooKeeper that are currently alive. -[float] -=== `zookeeper.mntr.open_file_descriptor_count` +-- +*`zookeeper.mntr.open_file_descriptor_count`*:: ++ +-- type: long Number of file descriptors open by the ZooKeeper process. -[float] -=== `zookeeper.mntr.outstanding_requests` +-- +*`zookeeper.mntr.outstanding_requests`*:: ++ +-- type: long Number of outstanding requests that need to be processed by the cluster. -[float] -=== `zookeeper.mntr.packets.received` +-- +*`zookeeper.mntr.packets.received`*:: ++ +-- type: long Number of ZooKeeper network packets received. -[float] -=== `zookeeper.mntr.packets.sent` +-- +*`zookeeper.mntr.packets.sent`*:: ++ +-- type: long Number of ZooKeeper network packets sent. -[float] -=== `zookeeper.mntr.pending_syncs` +-- +*`zookeeper.mntr.pending_syncs`*:: ++ +-- type: long Number of pending syncs to carry out to ZooKeeper ensemble followers. -[float] -=== `zookeeper.mntr.server_state` +-- +*`zookeeper.mntr.server_state`*:: ++ +-- type: keyword Role in the ZooKeeper ensemble. -[float] -=== `zookeeper.mntr.synced_followers` +-- +*`zookeeper.mntr.synced_followers`*:: ++ +-- type: long Number of synced followers reported when a node server_state is leader. -[float] -=== `zookeeper.mntr.version` +-- +*`zookeeper.mntr.version`*:: ++ +-- type: keyword ZooKeeper version and build string reported. -[float] -=== `zookeeper.mntr.watch_count` +-- +*`zookeeper.mntr.watch_count`*:: ++ +-- type: long Number of watches currently set on the local ZooKeeper process. -[float] -=== `zookeeper.mntr.znode_count` +-- +*`zookeeper.mntr.znode_count`*:: ++ +-- type: long Number of znodes reported by the local ZooKeeper process. +-- + diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/gettingstarted.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/gettingstarted.asciidoc index f1cb3801..3dcdd589 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/gettingstarted.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/gettingstarted.asciidoc @@ -1,37 +1,28 @@ -[[metricbeat-getting-started]] -== Getting started with Metricbeat +[id="{beatname_lc}-getting-started"] +== Getting started with {beatname_uc} -Metricbeat helps you monitor your servers and the services they host by +{beatname_uc} helps you monitor your servers and the services they host by collecting metrics from the operating system and services. -To get started with your own Metricbeat setup, install and configure these -related products: +include::../../libbeat/docs/shared-getting-started-intro.asciidoc[] - * Elasticsearch for storage and indexing the data. - * Kibana for the UI. - * Logstash (optional) for inserting data into Elasticsearch. - -See {libbeat}/getting-started.html[Getting Started with Beats and the Elastic Stack] for more information. - -After installing the Elastic Stack, read the following topics to learn how to install, configure, and run Metricbeat: - -* <> -* <> -* <> +* <<{beatname_lc}-installation>> +* <<{beatname_lc}-configuration>> +* <<{beatname_lc}-template>> * <> -* <> +* <<{beatname_lc}-starting>> * <> * <> -[[metricbeat-installation]] -=== Step 1: Install Metricbeat +[id="{beatname_lc}-installation"] +=== Step 1: Install {beatname_uc} -You should install Metricbeat as close as possible to the service you want to +You should install {beatname_uc} as close as possible to the service you want to monitor. For example, if you have four servers with MySQL running, it's -recommended that you run Metricbeat on each server. This allows Metricbeat to +recommended that you run {beatname_uc} on each server. This allows {beatname_uc} to access your service from localhost and does not cause any additional network -traffic or prevent Metricbeat from collecting metrics when there are network -problems. Metrics from multiple Metricbeat instances will be combined on the +traffic or prevent {beatname_uc} from collecting metrics when there are network +problems. Metrics from multiple {beatname_uc} instances will be combined on the Elasticsearch server. include::../../libbeat/docs/shared-download-and-install.asciidoc[] @@ -47,10 +38,10 @@ endif::[] ifeval::["{release-state}"!="unreleased"] -["source","sh",subs="attributes,callouts"] +["source","sh",subs="attributes"] ------------------------------------------------ -curl -L -O https://artifacts.elastic.co/downloads/beats/metricbeat/metricbeat-{version}-amd64.deb -sudo dpkg -i metricbeat-{version}-amd64.deb +curl -L -O https://artifacts.elastic.co/downloads/beats/{beatname_lc}/{beatname_lc}-{version}-amd64.deb +sudo dpkg -i {beatname_lc}-{version}-amd64.deb ------------------------------------------------ endif::[] @@ -66,10 +57,10 @@ endif::[] ifeval::["{release-state}"!="unreleased"] -["source","sh",subs="attributes,callouts"] +["source","sh",subs="attributes"] ------------------------------------------------ -curl -L -O https://artifacts.elastic.co/downloads/beats/metricbeat/metricbeat-{version}-x86_64.rpm -sudo rpm -vi metricbeat-{version}-x86_64.rpm +curl -L -O https://artifacts.elastic.co/downloads/beats/{beatname_lc}/{beatname_lc}-{version}-x86_64.rpm +sudo rpm -vi {beatname_lc}-{version}-x86_64.rpm ------------------------------------------------ endif::[] @@ -85,10 +76,10 @@ endif::[] ifeval::["{release-state}"!="unreleased"] -["source","sh",subs="attributes,callouts"] +["source","sh",subs="attributes"] ------------------------------------------------ -curl -L -O https://artifacts.elastic.co/downloads/beats/metricbeat/metricbeat-{version}-darwin-x86_64.tar.gz -tar xzvf metricbeat-{version}-darwin-x86_64.tar.gz +curl -L -O https://artifacts.elastic.co/downloads/beats/{beatname_lc}/{beatname_lc}-{version}-darwin-x86_64.tar.gz +tar xzvf {beatname_lc}-{version}-darwin-x86_64.tar.gz ------------------------------------------------ endif::[] @@ -122,100 +113,97 @@ endif::[] ifeval::["{release-state}"!="unreleased"] -. Download the Metricbeat Windows zip file from the -https://www.elastic.co/downloads/beats/metricbeat[downloads page]. +. Download the {beatname_uc} Windows zip file from the +https://www.elastic.co/downloads/beats/{beatname_lc}[downloads page]. . Extract the contents of the zip file into `C:\Program Files`. -. Rename the `metricbeat--windows` directory to `Metricbeat`. +. Rename the +{beatname_lc}--windows+` directory to +{beatname_uc}+. . Open a PowerShell prompt as an Administrator (right-click the PowerShell icon -and select *Run As Administrator*). If you are running Windows XP, you may need -to download and install PowerShell. +and select *Run As Administrator*). -. From the PowerShell prompt, run the following commands to install Metricbeat +. From the PowerShell prompt, run the following commands to install {beatname_uc} as a Windows service: + -[source,shell] +["source","sh",subs="attributes"] ---------------------------------------------------------------------- -PS > cd 'C:\Program Files\Metricbeat' -PS C:\Program Files\Metricbeat> .\install-service-metricbeat.ps1 +PS > cd 'C:{backslash}Program Files{backslash}{beatname_uc}' +PS C:{backslash}Program Files{backslash}{beatname_uc}> .{backslash}install-service-{beatname_lc}.ps1 ---------------------------------------------------------------------- NOTE: If script execution is disabled on your system, you need to set the execution policy for the current session to allow the script to run. For -example: `PowerShell.exe -ExecutionPolicy UnRestricted -File -.\install-service-metricbeat.ps1`. +example: +PowerShell.exe -ExecutionPolicy UnRestricted -File +.{backslash}install-service-{beatname_lc}.ps1+. endif::[] -Before starting Metricbeat, you should look at the configuration options in the -configuration file, for example `C:\Program Files\Metricbeat\metricbeat.yml`. +Before starting {beatname_uc}, you should look at the configuration options in the +configuration file, for example +C:{backslash}Program Files{backslash}{beatname_uc}{backslash}{beatname_lc}.yml+. For more information about these options, see -<>. +<>. -[[metricbeat-configuration]] -=== Step 2: Configure Metricbeat +[id="{beatname_lc}-configuration"] +=== Step 2: Configure {beatname_uc} include::../../libbeat/docs/shared-configuring.asciidoc[] -Metricbeat uses <> to collect metrics. You configure -each module individually. The following example shows the default configuration -in the `metricbeat.yml` file. The system status module is enabled by default to -collect metrics about your server, such as CPU usage, memory usage, network IO -metrics, and process statistics: - -[source, shell] -------------------------------------- -metricbeat.modules: -- module: system - metricsets: - - cpu - - filesystem - - memory - - network - - process - enabled: true - period: 10s - processes: ['.*'] - cpu_ticks: false -------------------------------------- - -The following example shows how to configure two modules: the system module -and the Apache HTTPD module: - -[source, shell] -------------------------------------- -metricbeat.modules: -- module: system - metricsets: - - cpu - - filesystem - - memory - - network - - process - enabled: true - period: 10s - processes: ['.*'] - cpu_ticks: false -- module: apache - metricsets: ["status"] - enabled: true - period: 1s - hosts: ["http://127.0.0.1"] -------------------------------------- - -To configure Metricbeat: - -. Define the Metricbeat modules that you want to enable. For each module, specify -the metricsets that you want to collect. See <> for -more details about configuring modules. +When you configure {beatname_uc}, you need to specify which +<<{beatname_lc}-modules,modules>> to run. {beatname_uc} uses modules to collect +metrics. Each module defines the basic logic for collecting data from a specific +service, such as Redis or MySQL. A module consists of metricsets that fetch and +structure the data. Read <> to learn more. + +To configure {beatname_uc}: + +. Enable the modules that you want to run. If you accept the default +configuration without enabling additional modules, {beatname_uc} collects system +metrics only. ++ +You can either enable the default module configurations defined in the +`modules.d` directory (recommended), or add the module configs to the ++{beatname_lc}.yml+ file. The `modules.d` directory contains default +configurations for all available {beatname_uc} modules. ++ +If you are using a Docker image, see <>. ++ +The following examples enable the `apache` and `mysql` configs in the +`modules.d` directory : ++ +*deb and rpm:* ++ +["source","sh",subs="attributes"] +---- +{beatname_lc} modules enable apache mysql +---- ++ +*mac:* ++ +["source","sh",subs="attributes"] +---- +./{beatname_lc} modules enable apache mysql +---- ++ +*win:* ++ +["source","sh",subs="attributes"] +---- +PS > .{backslash}{beatname_lc}.exe modules enable apache mysql +---- + -If you accept the default configuration without specifying additional modules, -Metricbeat will collect system metrics only. +See the <> to learn more about this command. ++ +To change the default module configurations, modify the `.yml` files in the +`modules.d` directory. See <> for more about available +settings. ++ +See <> if you want to add the module configs to the ++{beatname_lc}.yml+ file rather than using the `modules.d` directory. . If you are sending output directly to Elasticsearch (and not using Logstash), -set the IP address and port where Metricbeat can find the Elasticsearch installation: +set the IP address and port where {beatname_uc} can find the Elasticsearch +installation. For example: + [source,yaml] ---------------------------------------------------------------------- @@ -234,7 +222,7 @@ include::../../libbeat/docs/step-test-config.asciidoc[] include::../../libbeat/docs/step-look-at-config.asciidoc[] -[[metricbeat-template]] +[id="{beatname_lc}-template"] === Step 3: Load the index template in Elasticsearch :allplatforms: @@ -246,16 +234,16 @@ include::../../libbeat/docs/shared-template-load.asciidoc[] :allplatforms: include::../../libbeat/docs/dashboards.asciidoc[] -[[metricbeat-starting]] -=== Step 5: Start Metricbeat +[id="{beatname_lc}-starting"] +=== Step 5: Start {beatname_uc} -Run Metricbeat by issuing the appropriate command for your platform. If you +Run {beatname_uc} by issuing the appropriate command for your platform. If you are accessing a secured Elasticsearch cluster, make sure you've configured credentials as described in <<{beatname_lc}-configuration>>. -NOTE: If you use an init.d script to start Metricbeat on deb or rpm, you can't +NOTE: If you use an init.d script to start {beatname_uc} on deb or rpm, you can't specify command line flags (see <>). To specify flags, -start Metricbeat in the foreground. +start {beatname_uc} in the foreground. *deb:* @@ -273,45 +261,42 @@ sudo service {beatname_lc} start *docker:* -["source", "shell", subs="attributes"] ----------------------------------------------------------------------- -docker run {dockerimage} ----------------------------------------------------------------------- +See <>. *mac:* -[source,shell] +["source","sh",subs="attributes,callouts"] ---------------------------------------------------------------------- -sudo chown root metricbeat.yml <1> +sudo chown root {beatname_lc}.yml <1> sudo chown root modules.d/system.yml <1> -sudo ./metricbeat -e -c metricbeat.yml -d "publish" +sudo ./{beatname_lc} -e -c {beatname_lc}.yml -d "publish" ---------------------------------------------------------------------- -<1> You'll be running Metricbeat as root, so you need to change ownership of the +<1> You'll be running {beatname_uc} as root, so you need to change ownership of the configuration file and any configurations enabled in the `modules.d` directory, -or run Metricbeat with `-strict.perms=false` specified. See +or run {beatname_uc} with `-strict.perms=false` specified. See {libbeat}/config-file-permissions.html[Config File Ownership and Permissions] in the _Beats Platform Reference_. *win:* -[source,shell] +["source","sh",subs="attributes"] ---------------------------------------------------------------------- -PS C:\Program Files\Metricbeat> Start-Service metricbeat +PS C:{backslash}Program Files{backslash}{beatname_uc}> Start-Service {beatname_lc} ---------------------------------------------------------------------- -By default the log files are stored in `C:\ProgramData\metricbeat\Logs`. +By default the log files are stored in +C:{backslash}ProgramData{backslash}{beatname_lc}{backslash}Logs+. NOTE: On Windows, statistics about system load and swap usage are currently not captured. -==== Test the Metricbeat installation +==== Test the {beatname_uc} installation To verify that your server's statistics are present in Elasticsearch, issue the following command: -[source,shell] +["source","sh",subs="attributes"] ---------------------------------------------------------------------- -curl -XGET 'http://localhost:9200/metricbeat-*/_search?pretty' +curl -XGET 'http://localhost:9200/{beatname_lc}-*/_search?pretty' ---------------------------------------------------------------------- Make sure that you replace `localhost:9200` with the address of your @@ -332,4 +317,5 @@ include::../../libbeat/docs/opendashboards.asciidoc[] The dashboards are provided as examples. We recommend that you {kibana-ref}/dashboard.html[customize] them to meet your needs. -image:./images/metricbeat_system_dashboard.png[Metricbeat Dashboard] +[role="screenshot"] +image:./images/{beatname_lc}_system_dashboard.png[{beatname_uc} Dashboard] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/images/icon-no.png b/vendor/github.com/elastic/beats/metricbeat/docs/images/icon-no.png new file mode 100644 index 00000000..fb866805 Binary files /dev/null and b/vendor/github.com/elastic/beats/metricbeat/docs/images/icon-no.png differ diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/images/icon-yes.png b/vendor/github.com/elastic/beats/metricbeat/docs/images/icon-yes.png new file mode 100644 index 00000000..7837b283 Binary files /dev/null and b/vendor/github.com/elastic/beats/metricbeat/docs/images/icon-yes.png differ diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/index.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/index.asciidoc index ec3fec59..cff0c452 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/index.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/index.asciidoc @@ -2,19 +2,21 @@ include::../../libbeat/docs/version.asciidoc[] -include::{asciidoc-dir}/../../shared/attributes62.asciidoc[] +include::{asciidoc-dir}/../../shared/attributes.asciidoc[] :version: {stack-version} :beatname_lc: metricbeat :beatname_uc: Metricbeat :beatname_pkg: {beatname_lc} +:github_repo_name: beats +:discuss_forum: beats/{beatname_lc} +:beat_default_index_prefix: {beatname_lc} +:has_ml_jobs: yes include::../../libbeat/docs/shared-beats-attributes.asciidoc[] include::./overview.asciidoc[] -include::../../libbeat/docs/contributing-to-beats.asciidoc[] - include::./gettingstarted.asciidoc[] include::../../libbeat/docs/repositories.asciidoc[] @@ -40,3 +42,5 @@ include::../../libbeat/docs/security/securing-beats.asciidoc[] include::./troubleshooting.asciidoc[] include::./faq.asciidoc[] + +include::../../libbeat/docs/contributing-to-beats.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/metricbeat-options.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/metricbeat-options.asciidoc index ff2a373b..e929eccf 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/metricbeat-options.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/metricbeat-options.asciidoc @@ -37,7 +37,7 @@ To see a list of enabled and disabled modules, run: ---- You can change the default module configurations by modifying the `.yml` files -in the `modules.d` directory. +in the `modules.d` directory. The following example shows a basic configuration for the Apache module: @@ -73,7 +73,7 @@ requires the `modules.d` layout. To enable specific modules and metricsets in the +{beatname_lc}.yml+ config file, you can add entries to the +{beatname_lc}.modules+ list. Each entry in the -list begins with a dash (-) and is followed by settings for that module. +list begins with a dash (-) and is followed by settings for that module. The following example shows a configuration where the apache and mysql modules are enabled: @@ -118,7 +118,7 @@ metricbeat.modules: == Configuration combinations You can specify a module configuration that uses different combinations of -metricsets, periods, and hosts. +metricsets, periods, and hosts. For a module with multiple metricsets defined, it's possible to define the module twice and specify a different period to use for each metricset. For the @@ -139,6 +139,7 @@ the `set2` metricset will be fetched every 2 minutes: [float] +[[module-config-options]] === Standard config options You can specify the following options for any Metricbeat module. Some modules @@ -156,8 +157,7 @@ The name of the module to run. For documentation about each module, see the A list of metricsets to execute. Make sure that you only list metricsets that are available in the module. It is not possible to reference metricsets from -other modules. For a list of available metricsets, see the documentation for the -module. +other modules. For a list of available metricsets, see <>. [float] ==== `enabled` @@ -200,3 +200,37 @@ A list of processors to apply to the data generated by the metricset. See <> for information about specifying processors in your config. +[float] +[[module-http-config-options]] +=== Standard HTTP config options + +The modules and metricsets for which the host is defined as a HTTP URL, also +support the following options: + +[float] +==== `username` + +The username to use for basic authentication. + +[float] +==== `password` + +The password to use for basic authentication. + +[float] +==== `headers` + +A list of headers to use with the HTTP request. For example: + +[source,yaml] +---- +headers: + Cookie: abcdef=123456 + My-Custom-Header: my-custom-value +---- + +[float] +==== `bearer_token_file` + +If defined, Metricbeat will read the contents of the file once at initialization +and then use the value in an HTTP Authorization header. \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/aerospike.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/aerospike.asciidoc index a1fde698..638ab236 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/aerospike.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/aerospike.asciidoc @@ -7,7 +7,7 @@ This file is generated! See scripts/docs_collector.py beta[] -The Aerospike module uses the http://www.aerospike.com/docs/reference/info[Info command] to collect metrics. +The Aerospike module uses the http://www.aerospike.com/docs/reference/info[Info command] to collect metrics. The default metricset is `namespace`. [float] === Compatibility @@ -26,7 +26,7 @@ in <>. Here is an example configuration: metricbeat.modules: - module: aerospike metricsets: ["namespace"] - enabled: false + enabled: true period: 10s hosts: ["localhost:3000"] ---- diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/apache.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/apache.asciidoc index e2d21540..882cd271 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/apache.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/apache.asciidoc @@ -6,13 +6,13 @@ This file is generated! See scripts/docs_collector.py == Apache module This module periodically fetches metrics from https://httpd.apache.org/[Apache -HTTPD] servers. +HTTPD] servers. The default metricset is `status`. [float] === Compatibility -The Apache metricsets were tested with Apache 2.4.20 and are expected to work with all versions ->= 2.2.31 and >= 2.4.16. +The Apache metricsets were tested with Apache 2.4.12 and 2.4.20 and are expected to work with +all versions >= 2.2.31 and >= 2.4.16. [float] @@ -35,12 +35,22 @@ metricbeat.modules: - module: apache metricsets: ["status"] period: 10s + enabled: true # Apache hosts hosts: ["http://127.0.0.1"] + + # Path to server status. Default server-status + #server_status_path: "server-status" + + # Username of hosts. Empty by default + #username: username + + # Password of hosts. Empty by default + #password: password ---- -This module supports TLS connection when using `ssl` config field, as described in <>. +This module supports TLS connection when using `ssl` config field, as described in <>. It also supports the options described in <>. [float] === Metricsets diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/ceph.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/ceph.asciidoc index 24c258da..b1d2f758 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/ceph.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/ceph.asciidoc @@ -8,7 +8,7 @@ This file is generated! See scripts/docs_collector.py beta[] The Ceph module collects metrics by submitting HTTP GET requests to -the http://docs.ceph.com/docs/master/man/8/ceph-rest-api/[ceph-rest-api]. +the http://docs.ceph.com/docs/master/man/8/ceph-rest-api/[ceph-rest-api]. The default metricsets are `cluster_disk`, `cluster_health`, `monitor_health`, `pool_disk`, `osd_tree`. [float] @@ -24,9 +24,10 @@ metricbeat.modules: metricsets: ["cluster_disk", "cluster_health", "monitor_health", "pool_disk", "osd_tree"] period: 10s hosts: ["localhost:5000"] + enabled: true ---- -This module supports TLS connection when using `ssl` config field, as described in <>. +This module supports TLS connection when using `ssl` config field, as described in <>. It also supports the options described in <>. [float] === Metricsets diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/couchbase.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/couchbase.asciidoc index db50d32b..c8c47367 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/couchbase.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/couchbase.asciidoc @@ -8,7 +8,7 @@ This file is generated! See scripts/docs_collector.py beta[] This module periodically fetches metrics from https://www.couchbase.com/[Couchbase] -servers. +servers. The default metricsets are `bucket`, `cluster`, `node`. [float] @@ -24,9 +24,10 @@ metricbeat.modules: metricsets: ["bucket", "cluster", "node"] period: 10s hosts: ["localhost:8091"] + enabled: true ---- -This module supports TLS connection when using `ssl` config field, as described in <>. +This module supports TLS connection when using `ssl` config field, as described in <>. It also supports the options described in <>. [float] === Metricsets diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/docker.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/docker.asciidoc index 99f14420..e050e5c8 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/docker.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/docker.asciidoc @@ -5,7 +5,7 @@ This file is generated! See scripts/docs_collector.py [[metricbeat-module-docker]] == Docker module -This module fetches metrics from https://www.docker.com/[Docker] containers. +This module fetches metrics from https://www.docker.com/[Docker] containers. The default metricsets are: `container`, `cpu`, `diskio`, `healthcheck`, `info`, `memory` and `network`. The `image` metricset is not enabled by default. The Docker module is currently not tested on Windows. @@ -29,9 +29,21 @@ in <>. Here is an example configuration: ---- metricbeat.modules: - module: docker - metricsets: ["container", "cpu", "diskio", "healthcheck", "info", "memory", "network"] + metricsets: + - "container" + - "cpu" + - "diskio" + - "healthcheck" + - "info" + #- "image" + - "memory" + - "network" hosts: ["unix:///var/run/docker.sock"] period: 10s + enabled: true + + # Replace dots in labels with `_`. Set to false to keep dots + labels.dedot: true # To connect to Docker over TLS you must specify a client and CA certificate. #ssl: diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/dropwizard.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/dropwizard.asciidoc index dc59eadb..7a110f09 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/dropwizard.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/dropwizard.asciidoc @@ -7,7 +7,7 @@ This file is generated! See scripts/docs_collector.py beta[] -This is the http://dropwizard.io[Dropwizard] module. +This is the http://dropwizard.io[Dropwizard] module. The default metricset is `collector`. @@ -26,9 +26,10 @@ metricbeat.modules: hosts: ["localhost:8080"] metrics_path: /metrics/metrics namespace: example + enabled: true ---- -This module supports TLS connection when using `ssl` config field, as described in <>. +This module supports TLS connection when using `ssl` config field, as described in <>. It also supports the options described in <>. [float] === Metricsets diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/elasticsearch.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/elasticsearch.asciidoc index 25fa6a50..a2d84448 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/elasticsearch.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/elasticsearch.asciidoc @@ -7,7 +7,9 @@ This file is generated! See scripts/docs_collector.py beta[] -The Elasticsearch module contains a minimal set of metrics to enable monitoring of Elasticsearch across multiple versions. To monitor more Elasticsearch metrics, use our {monitoringdoc}/xpack-monitoring.html[X-Pack monitoring] which is available under a free basic license. +The Elasticsearch module contains a minimal set of metrics to enable monitoring of Elasticsearch across multiple versions. To monitor more Elasticsearch metrics, use our {monitoringdoc}/xpack-monitoring.html[monitoring] feature. + +The default metricsets are `node` and `node_stats`. [float] @@ -25,7 +27,7 @@ metricbeat.modules: hosts: ["localhost:9200"] ---- -This module supports TLS connection when using `ssl` config field, as described in <>. +This module supports TLS connection when using `ssl` config field, as described in <>. It also supports the options described in <>. [float] === Metricsets diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/etcd.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/etcd.asciidoc index 7396e8e6..3b718442 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/etcd.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/etcd.asciidoc @@ -9,7 +9,7 @@ beta[] This is the Etcd Module. The Etcd module uses https://coreos.com/etcd/docs/latest/v2/api.html [Etcd v2 API] to collect metrics. - +The default metricsets are `leader`, `self` and `store`. [float] @@ -25,10 +25,9 @@ metricbeat.modules: metricsets: ["leader", "self", "store"] period: 10s hosts: ["localhost:2379"] - ---- -This module supports TLS connection when using `ssl` config field, as described in <>. +This module supports TLS connection when using `ssl` config field, as described in <>. It also supports the options described in <>. [float] === Metricsets diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/golang.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/golang.asciidoc index bd078c01..1ab5d994 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/golang.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/golang.asciidoc @@ -5,7 +5,7 @@ This file is generated! See scripts/docs_collector.py [[metricbeat-module-golang]] == Golang module -experimental[] +beta[] The golang module collects metrics by submitting HTTP GET requests to https://golang.org/pkg/expvar/[golang-expvar-api]. @@ -29,7 +29,7 @@ metricbeat.modules: path: "/debug/vars" ---- -This module supports TLS connection when using `ssl` config field, as described in <>. +This module supports TLS connection when using `ssl` config field, as described in <>. It also supports the options described in <>. [float] === Metricsets diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/golang/expvar.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/golang/expvar.asciidoc index 9029744f..7a6b8165 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/golang/expvar.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/golang/expvar.asciidoc @@ -5,7 +5,7 @@ This file is generated! See scripts/docs_collector.py [[metricbeat-metricset-golang-expvar]] === Golang expvar metricset -experimental[] +beta[] include::../../../module/golang/expvar/_meta/docs.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/golang/heap.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/golang/heap.asciidoc index f40f20d2..136041dd 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/golang/heap.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/golang/heap.asciidoc @@ -5,7 +5,7 @@ This file is generated! See scripts/docs_collector.py [[metricbeat-metricset-golang-heap]] === Golang heap metricset -experimental[] +beta[] include::../../../module/golang/heap/_meta/docs.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/graphite.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/graphite.asciidoc index bec73f5c..b00a5386 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/graphite.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/graphite.asciidoc @@ -9,6 +9,7 @@ beta[] This is the graphite Module. +The default metricset is `server`. [float] @@ -23,12 +24,24 @@ metricbeat.modules: - module: graphite metricsets: ["server"] enabled: true -# protocol: "udp" -# templates: -# - filter: "test.*.bash.*" # This would match metrics like test.localhost.bash.stats -# namespace: "test" -# template: ".host.shell.metric*" # test.localhost.bash.stats would become metric=stats and tags host=localhost,shell=bash -# delimiter: "_" + + # Host address to listen on. Default localhost. + #host: localhost + + # Listening port. Default 2003. + #port: 2003 + + # Protocol to listen on. This can be udp or tcp. Default udp. + #protocol: "udp" + + # Receive buffer size in bytes + #receive_buffer_size: 1024 + + #templates: + # - filter: "test.*.bash.*" # This would match metrics like test.localhost.bash.stats + # namespace: "test" + # template: ".host.shell.metric*" # test.localhost.bash.stats would become metric=stats and tags host=localhost,shell=bash + # delimiter: "_" ---- diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/haproxy.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/haproxy.asciidoc index 8ba4995f..b6d393b8 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/haproxy.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/haproxy.asciidoc @@ -32,16 +32,18 @@ required authentication add this to the haproxy config: [source,haproxy] ---- listen stats - bind 0.0.0.0:14569 + bind 0.0.0.0:14567 stats enable stats uri /stats stats auth admin:admin ---- +The default metricsets are `info`and `stat`. + [float] === Compatibility -The HAProxy metricsets were tested with HAProxy 1.6 and are expected to work with all 1.6 versions. +The HAProxy metricsets are tested with HAProxy versions from 1.6, 1.7 to 1.8. [float] @@ -57,6 +59,7 @@ metricbeat.modules: metricsets: ["info", "stat"] period: 10s hosts: ["tcp://127.0.0.1:14567"] + enabled: true ---- [float] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/http.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/http.asciidoc index bc106639..ce990187 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/http.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/http.asciidoc @@ -5,8 +5,6 @@ This file is generated! See scripts/docs_collector.py [[metricbeat-module-http]] == HTTP module -beta[] - The HTTP module is a Metricbeat module used to call arbitrary HTTP endpoints for which a dedicated Metricbeat module is not available. Multiple endpoints can be configured which are polled in a regular interval and the result is shipped to the configured output channel. It is recommended to install a Metricbeat instance on each host from which data should be fetched. @@ -35,8 +33,11 @@ metricbeat.modules: path: "/" #body: "" #method: "GET" + #username: "user" + #password: "secret" #request.enabled: false #response.enabled: false + #json.is_array: false #dedot.enabled: false - module: http @@ -51,7 +52,7 @@ metricbeat.modules: # key: "value" ---- -This module supports TLS connection when using `ssl` config field, as described in <>. +This module supports TLS connection when using `ssl` config field, as described in <>. It also supports the options described in <>. [float] === Metricsets diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/http/json.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/http/json.asciidoc index af10d962..f56e2945 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/http/json.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/http/json.asciidoc @@ -5,8 +5,6 @@ This file is generated! See scripts/docs_collector.py [[metricbeat-metricset-http-json]] === HTTP json metricset -beta[] - include::../../../module/http/json/_meta/docs.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/http/server.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/http/server.asciidoc index 29ba4fc8..d4acb5a7 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/http/server.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/http/server.asciidoc @@ -5,7 +5,7 @@ This file is generated! See scripts/docs_collector.py [[metricbeat-metricset-http-server]] === HTTP server metricset -experimental[] +beta[] include::../../../module/http/server/_meta/docs.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/jolokia.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/jolokia.asciidoc index b9f1cd57..bd84b185 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/jolokia.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/jolokia.asciidoc @@ -5,8 +5,6 @@ This file is generated! See scripts/docs_collector.py [[metricbeat-module-jolokia]] == Jolokia module -beta[] - This is the Jolokia module. @@ -25,13 +23,29 @@ metricbeat.modules: period: 10s hosts: ["localhost"] namespace: "metrics" - path: "/jolokia/?ignoreErrors=true&canonicalNaming=false" - jmx.mapping: - jmx.application: - jmx.instance: + #username: "user" + #password: "secret" + jmx.mappings: + - mbean: 'java.lang:type=Runtime' + attributes: + - attr: Uptime + field: uptime + - mbean: 'java.lang:type=Memory' + attributes: + - attr: HeapMemoryUsage + field: memory.heap_usage + - attr: NonHeapMemoryUsage + field: memory.non_heap_usage + # GC Metrics - this depends on what is available on your JVM + # - mbean: 'java.lang:type=GarbageCollector,name=ConcurrentMarkSweep' + # attributes: + # - attr: CollectionTime + # field: gc.cms_collection_time + # - attr: CollectionCount + # field: gc.cms_collection_count ---- -This module supports TLS connection when using `ssl` config field, as described in <>. +This module supports TLS connection when using `ssl` config field, as described in <>. It also supports the options described in <>. [float] === Metricsets diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/jolokia/jmx.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/jolokia/jmx.asciidoc index 978e5d9c..aa9bb948 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/jolokia/jmx.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/jolokia/jmx.asciidoc @@ -5,8 +5,6 @@ This file is generated! See scripts/docs_collector.py [[metricbeat-metricset-jolokia-jmx]] === Jolokia jmx metricset -beta[] - include::../../../module/jolokia/jmx/_meta/docs.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/kafka.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/kafka.asciidoc index 926feffa..5d097b5a 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/kafka.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/kafka.asciidoc @@ -9,6 +9,9 @@ beta[] This is the Kafka module. +The default metricsets are `consumergroup` and `partition`. + +This module is tested with 0.10.2. [float] @@ -21,9 +24,10 @@ in <>. Here is an example configuration: ---- metricbeat.modules: - module: kafka - metricsets: ["partition"] + metricsets: ["consumergroup", "partition"] period: 10s hosts: ["localhost:9092"] + enabled: true #client_id: metricbeat #retries: 3 diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/kibana.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/kibana.asciidoc index 5a24430e..2819c935 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/kibana.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/kibana.asciidoc @@ -7,7 +7,9 @@ This file is generated! See scripts/docs_collector.py beta[] -The Kibana module only tracks the high-level metrics. To monitor more Kibana metrics, use our {monitoringdoc}/xpack-monitoring.html[X-Pack monitoring] which is available under a free basic license. +The Kibana module only tracks the high-level metrics. To monitor more Kibana metrics, use our {monitoringdoc}/xpack-monitoring.html[monitoring] feature. + +The default metricset is `status`. [float] @@ -23,9 +25,10 @@ metricbeat.modules: metricsets: ["status"] period: 10s hosts: ["localhost:5601"] + enabled: true ---- -This module supports TLS connection when using `ssl` config field, as described in <>. +This module supports TLS connection when using `ssl` config field, as described in <>. It also supports the options described in <>. [float] === Metricsets diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/kubernetes.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/kubernetes.asciidoc index 043adbd7..de0c5508 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/kubernetes.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/kubernetes.asciidoc @@ -12,6 +12,8 @@ All metricsets with the `state_` prefix require `hosts` field pointing to kube-s service within the cluster, while the rest should be pointed to kubelet service. Check the example configuration on how to do it. +The default metricsets are `container`, `node`, `pod`, `system` and `volume`. + [float] === Example configuration @@ -25,21 +27,28 @@ metricbeat.modules: # Node metrics, from kubelet: - module: kubernetes metricsets: + - container - node - - system - pod - - container + - system - volume period: 10s hosts: ["localhost:10255"] + enabled: true + #bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + #ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt + #ssl.certificate: "/etc/pki/client/cert.pem" + #ssl.key: "/etc/pki/client/cert.key" # State metrics from kube-state-metrics service: - module: kubernetes - enabled: false + enabled: true metricsets: - state_node - state_deployment - state_replicaset + - state_statefulset - state_pod - state_container period: 10s @@ -47,12 +56,12 @@ metricbeat.modules: # Kubernetes events - module: kubernetes - enabled: false + enabled: true metricsets: - event ---- -This module supports TLS connection when using `ssl` config field, as described in <>. +This module supports TLS connection when using `ssl` config field, as described in <>. It also supports the options described in <>. [float] === Metricsets @@ -77,6 +86,8 @@ The following metricsets are available: * <> +* <> + * <> * <> @@ -99,6 +110,8 @@ include::kubernetes/state_pod.asciidoc[] include::kubernetes/state_replicaset.asciidoc[] +include::kubernetes/state_statefulset.asciidoc[] + include::kubernetes/system.asciidoc[] include::kubernetes/volume.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/kubernetes/event.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/kubernetes/event.asciidoc index f7fe81b6..55bff425 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/kubernetes/event.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/kubernetes/event.asciidoc @@ -5,7 +5,7 @@ This file is generated! See scripts/docs_collector.py [[metricbeat-metricset-kubernetes-event]] === Kubernetes event metricset -experimental[] +beta[] include::../../../module/kubernetes/event/_meta/docs.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/kubernetes/state_statefulset.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/kubernetes/state_statefulset.asciidoc new file mode 100644 index 00000000..b53d50de --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/kubernetes/state_statefulset.asciidoc @@ -0,0 +1,21 @@ +//// +This file is generated! See scripts/docs_collector.py +//// + +[[metricbeat-metricset-kubernetes-state_statefulset]] +=== Kubernetes state_statefulset metricset + +include::../../../module/kubernetes/state_statefulset/_meta/docs.asciidoc[] + + +==== Fields + +For a description of each field in the metricset, see the +<> section. + +Here is an example document generated by this metricset: + +[source,json] +---- +include::../../../module/kubernetes/state_statefulset/_meta/data.json[] +---- diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/kvm.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/kvm.asciidoc new file mode 100644 index 00000000..9230de76 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/kvm.asciidoc @@ -0,0 +1,43 @@ +//// +This file is generated! See scripts/docs_collector.py +//// + +[[metricbeat-module-kvm]] +== kvm module + +experimental[] + +== kvm module + +This is the kvm module. + + + +[float] +=== Example configuration + +The kvm module supports the standard configuration options that are described +in <>. Here is an example configuration: + +[source,yaml] +---- +metricbeat.modules: +- module: kvm + metricsets: ["dommemstat"] + enabled: false + period: 10s + hosts: ["localhost"] + + # Timeout to connect to Libvirt server + #timeout: 1s +---- + +[float] +=== Metricsets + +The following metricsets are available: + +* <> + +include::kvm/dommemstat.asciidoc[] + diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/kvm/dommemstat.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/kvm/dommemstat.asciidoc new file mode 100644 index 00000000..825d2d89 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/kvm/dommemstat.asciidoc @@ -0,0 +1,23 @@ +//// +This file is generated! See scripts/docs_collector.py +//// + +[[metricbeat-metricset-kvm-dommemstat]] +=== kvm dommemstat metricset + +experimental[] + +include::../../../module/kvm/dommemstat/_meta/docs.asciidoc[] + + +==== Fields + +For a description of each field in the metricset, see the +<> section. + +Here is an example document generated by this metricset: + +[source,json] +---- +include::../../../module/kvm/dommemstat/_meta/data.json[] +---- diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/logstash.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/logstash.asciidoc index 95699132..999fee81 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/logstash.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/logstash.asciidoc @@ -5,10 +5,11 @@ This file is generated! See scripts/docs_collector.py [[metricbeat-module-logstash]] == Logstash module -experimental[] +beta[] This is the Logstash module. +The default metricsets are `node` and `node_stats`. [float] @@ -22,13 +23,12 @@ in <>. Here is an example configuration: metricbeat.modules: - module: logstash metricsets: ["node", "node_stats"] - enabled: false + enabled: true period: 10s hosts: ["localhost:9600"] - ---- -This module supports TLS connection when using `ssl` config field, as described in <>. +This module supports TLS connection when using `ssl` config field, as described in <>. It also supports the options described in <>. [float] === Metricsets diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/logstash/node.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/logstash/node.asciidoc index e93d6ef3..14703324 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/logstash/node.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/logstash/node.asciidoc @@ -5,7 +5,7 @@ This file is generated! See scripts/docs_collector.py [[metricbeat-metricset-logstash-node]] === Logstash node metricset -experimental[] +beta[] include::../../../module/logstash/node/_meta/docs.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/logstash/node_stats.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/logstash/node_stats.asciidoc index 76329778..bcec5c35 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/logstash/node_stats.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/logstash/node_stats.asciidoc @@ -5,7 +5,7 @@ This file is generated! See scripts/docs_collector.py [[metricbeat-metricset-logstash-node_stats]] === Logstash node_stats metricset -experimental[] +beta[] include::../../../module/logstash/node_stats/_meta/docs.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/memcached.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/memcached.asciidoc index 654a2893..5b0a425e 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/memcached.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/memcached.asciidoc @@ -9,6 +9,7 @@ beta[] This is the Memcached module. These metricsets were tested with Memcached version 1.4.35. +The default metricset is `stats`. [float] @@ -24,6 +25,7 @@ metricbeat.modules: metricsets: ["stats"] period: 10s hosts: ["localhost:11211"] + enabled: true ---- [float] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/mongodb.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/mongodb.asciidoc index 85cfa75b..6ecd0517 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/mongodb.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/mongodb.asciidoc @@ -5,8 +5,6 @@ This file is generated! See scripts/docs_collector.py [[metricbeat-module-mongodb]] == MongoDB module -beta[] - This module periodically fetches metrics from https://www.mongodb.com[MongoDB] servers. @@ -49,6 +47,8 @@ over the username and password configuration options. password: test ---- +The default metricsets are `collstats`, `dbstats` and `status`. + [float] === Compatibility @@ -68,6 +68,7 @@ metricbeat.modules: - module: mongodb metricsets: ["dbstats", "status"] period: 10s + enabled: true # The hosts must be passed as MongoDB URLs in the format: # [mongodb://][user:pass@]host[:port]. diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/mongodb/collstats.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/mongodb/collstats.asciidoc index a8a60a03..33bf5b06 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/mongodb/collstats.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/mongodb/collstats.asciidoc @@ -5,8 +5,6 @@ This file is generated! See scripts/docs_collector.py [[metricbeat-metricset-mongodb-collstats]] === MongoDB collstats metricset -experimental[] - include::../../../module/mongodb/collstats/_meta/docs.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/mongodb/dbstats.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/mongodb/dbstats.asciidoc index fde85e24..52f124ef 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/mongodb/dbstats.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/mongodb/dbstats.asciidoc @@ -5,8 +5,6 @@ This file is generated! See scripts/docs_collector.py [[metricbeat-metricset-mongodb-dbstats]] === MongoDB dbstats metricset -beta[] - include::../../../module/mongodb/dbstats/_meta/docs.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/mongodb/status.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/mongodb/status.asciidoc index 77854578..a640b2c2 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/mongodb/status.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/mongodb/status.asciidoc @@ -5,8 +5,6 @@ This file is generated! See scripts/docs_collector.py [[metricbeat-metricset-mongodb-status]] === MongoDB status metricset -beta[] - include::../../../module/mongodb/status/_meta/docs.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/munin.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/munin.asciidoc new file mode 100644 index 00000000..517211e0 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/munin.asciidoc @@ -0,0 +1,42 @@ +//// +This file is generated! See scripts/docs_collector.py +//// + +[[metricbeat-module-munin]] +== Munin module + +experimental[] + +== munin module + +This is the munin module. + +The default metricset is `node`. + + +[float] +=== Example configuration + +The Munin module supports the standard configuration options that are described +in <>. Here is an example configuration: + +[source,yaml] +---- +metricbeat.modules: +- module: munin + metricsets: ["node"] + enabled: true + period: 10s + hosts: ["localhost:4949"] + node.namespace: node +---- + +[float] +=== Metricsets + +The following metricsets are available: + +* <> + +include::munin/node.asciidoc[] + diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/munin/node.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/munin/node.asciidoc new file mode 100644 index 00000000..9e40635b --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/munin/node.asciidoc @@ -0,0 +1,17 @@ +//// +This file is generated! See scripts/docs_collector.py +//// + +[[metricbeat-metricset-munin-node]] +=== Munin node metricset + +experimental[] + +include::../../../module/munin/node/_meta/docs.asciidoc[] + + +==== Fields + +For a description of each field in the metricset, see the +<> section. + diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/mysql.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/mysql.asciidoc index de695e35..3330ab76 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/mysql.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/mysql.asciidoc @@ -8,6 +8,8 @@ This file is generated! See scripts/docs_collector.py This module periodically fetches metrics from https://www.mysql.com/[MySQL] servers. +The default metricset is `status`. + [float] === Module-specific configuration notes diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/nginx.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/nginx.asciidoc index cc16d01f..b0d1022a 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/nginx.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/nginx.asciidoc @@ -7,6 +7,8 @@ This file is generated! See scripts/docs_collector.py This module periodically fetches metrics from https://nginx.org/[Nginx] servers. +The default metricset is `stubstatus`. + [float] === Compatibility @@ -33,16 +35,17 @@ in <>. Here is an example configuration: metricbeat.modules: - module: nginx metricsets: ["stubstatus"] + enabled: true period: 10s # Nginx hosts hosts: ["http://127.0.0.1"] # Path to server status. Default server-status - #server_status_path: "server-status" + server_status_path: "server-status" ---- -This module supports TLS connection when using `ssl` config field, as described in <>. +This module supports TLS connection when using `ssl` config field, as described in <>. It also supports the options described in <>. [float] === Metricsets diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/php_fpm.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/php_fpm.asciidoc index 08791d30..7c1fde14 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/php_fpm.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/php_fpm.asciidoc @@ -10,6 +10,8 @@ beta[] This module periodically fetches metrics from https://php-fpm.org[PHP-FPM] servers. +The default metricset is `pool`. + [float] === Module-specific configuration notes @@ -49,12 +51,13 @@ in <>. Here is an example configuration: metricbeat.modules: - module: php_fpm metricsets: ["pool"] + enabled: true period: 10s status_path: "/status" hosts: ["localhost:8080"] ---- -This module supports TLS connection when using `ssl` config field, as described in <>. +This module supports TLS connection when using `ssl` config field, as described in <>. It also supports the options described in <>. [float] === Metricsets diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/postgresql.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/postgresql.asciidoc index 9d03d9f1..c5d2c87e 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/postgresql.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/postgresql.asciidoc @@ -8,6 +8,8 @@ This file is generated! See scripts/docs_collector.py This module periodically fetches metrics from https://www.postgresql.org/[PostgreSQL] servers. +Default metricsets are `activity`, `bgwriter` and `database`. + [float] === Module-specific configuration notes @@ -64,6 +66,7 @@ in <>. Here is an example configuration: ---- metricbeat.modules: - module: postgresql + enabled: true metricsets: # Stats about every PostgreSQL database - database diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/prometheus.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/prometheus.asciidoc index 2130d7d3..cada7468 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/prometheus.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/prometheus.asciidoc @@ -10,6 +10,8 @@ beta[] This module periodically fetches metrics from https://prometheus.io/docs/[Prometheus]. +The default metricset is `collector`. + [float] === Example configuration @@ -22,13 +24,27 @@ in <>. Here is an example configuration: metricbeat.modules: - module: prometheus metricsets: ["stats"] + enabled: true period: 10s hosts: ["localhost:9090"] - metrics_path: /metrics + #metrics_path: /metrics #namespace: example + +- module: prometheus + metricsets: ["collector"] + enabled: true + period: 10s + hosts: ["localhost:9090"] + #metrics_path: /metrics + #namespace: example + + # This can be used for service account based authorization: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + #ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt ---- -This module supports TLS connection when using `ssl` config field, as described in <>. +This module supports TLS connection when using `ssl` config field, as described in <>. It also supports the options described in <>. [float] === Metricsets diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/rabbitmq.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/rabbitmq.asciidoc index 59268582..7a6d30c2 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/rabbitmq.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/rabbitmq.asciidoc @@ -9,6 +9,7 @@ beta[] The RabbitMQ module uses http://www.rabbitmq.com/management.html[HTTP API] created by the management plugin to collect metrics. +The default metricsets are `connection`, `node` and `queue`. [float] @@ -21,25 +22,30 @@ in <>. Here is an example configuration: ---- metricbeat.modules: - module: rabbitmq - metricsets: ["node", "queue"] + metricsets: ["node", "queue", "connection"] + enabled: true period: 10s hosts: ["localhost:15672"] - username: guest - password: guest + #username: guest + #password: guest ---- -This module supports TLS connection when using `ssl` config field, as described in <>. +This module supports TLS connection when using `ssl` config field, as described in <>. It also supports the options described in <>. [float] === Metricsets The following metricsets are available: +* <> + * <> * <> +include::rabbitmq/connection.asciidoc[] + include::rabbitmq/node.asciidoc[] include::rabbitmq/queue.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/rabbitmq/connection.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/rabbitmq/connection.asciidoc new file mode 100644 index 00000000..ee879ba0 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/rabbitmq/connection.asciidoc @@ -0,0 +1,23 @@ +//// +This file is generated! See scripts/docs_collector.py +//// + +[[metricbeat-metricset-rabbitmq-connection]] +=== RabbitMQ connection metricset + +beta[] + +include::../../../module/rabbitmq/connection/_meta/docs.asciidoc[] + + +==== Fields + +For a description of each field in the metricset, see the +<> section. + +Here is an example document generated by this metricset: + +[source,json] +---- +include::../../../module/rabbitmq/connection/_meta/data.json[] +---- diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/redis.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/redis.asciidoc index c5607051..0fe58f3b 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/redis.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/redis.asciidoc @@ -7,6 +7,8 @@ This file is generated! See scripts/docs_collector.py This module periodically fetches metrics from http://redis.io/[Redis] servers. +The defaut metricsets are `info` and `keyspace`. + [float] === Module-specific configuration notes @@ -42,6 +44,7 @@ in <>. Here is an example configuration: metricbeat.modules: - module: redis metricsets: ["info", "keyspace"] + enabled: true period: 10s # Redis hosts diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/system.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/system.asciidoc index 56f36d6c..be414698 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/system.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/system.asciidoc @@ -8,6 +8,9 @@ This file is generated! See scripts/docs_collector.py The System module allows you to monitor your servers. Because the System module always applies to the local server, the `hosts` config option is not needed. +The default metricsets are `cpu`, `load`, `memory`, `network`, `process` and +`process_summary`. + [float] === Dashboard @@ -26,35 +29,77 @@ in <>. Here is an example configuration: ---- metricbeat.modules: - module: system - period: 10s metricsets: - - cpu - - load - - memory - - network - - process - - process_summary - #- core - #- diskio - #- socket + - cpu # CPU usage + - filesystem # File system usage for each mountpoint + - fsstat # File system summary metrics + - load # CPU load averages + - memory # Memory usage + - network # Network IO + - process # Per process metrics + - process_summary # Process summary + - uptime # System Uptime + #- core # Per CPU core usage + #- diskio # Disk IO + #- raid # Raid + #- socket # Sockets and connection info (linux only) + enabled: true + period: 10s processes: ['.*'] - process.include_top_n: - by_cpu: 5 # include top 5 processes by CPU - by_memory: 5 # include top 5 processes by memory -- module: system - period: 1m - metricsets: - - filesystem - - fsstat - processors: - - drop_event.when.regexp: - system.filesystem.mount_point: '^/(sys|cgroup|proc|dev|etc|host|lib)($|/)' + # Configure the metric types that are included by these metricsets. + cpu.metrics: ["percentages"] # The other available options are normalized_percentages and ticks. + core.metrics: ["percentages"] # The other available option is ticks. -- module: system - period: 15m - metricsets: - - uptime + # A list of filesystem types to ignore. The filesystem metricset will not + # collect data from filesystems matching any of the specified types, and + # fsstats will not include data from these filesystems in its summary stats. + # If not set, types associated to virtual filesystems are automatically + # added when this information is available in the system (e.g. the list of + # `nodev` types in `/proc/filesystem`). + #filesystem.ignore_types: [] + + # These options allow you to filter out all processes that are not + # in the top N by CPU or memory, in order to reduce the number of documents created. + # If both the `by_cpu` and `by_memory` options are used, the union of the two sets + # is included. + #process.include_top_n: + + # Set to false to disable this feature and include all processes + #enabled: true + + # How many processes to include from the top by CPU. The processes are sorted + # by the `system.process.cpu.total.pct` field. + #by_cpu: 0 + + # How many processes to include from the top by memory. The processes are sorted + # by the `system.process.memory.rss.bytes` field. + #by_memory: 0 + + # If false, cmdline of a process is not cached. + #process.cmdline.cache.enabled: true + + # Enable collection of cgroup metrics from processes on Linux. + #process.cgroups.enabled: true + + # A list of regular expressions used to whitelist environment variables + # reported with the process metricset's events. Defaults to empty. + #process.env.whitelist: [] + + # Include the cumulative CPU tick values with the process metrics. Defaults + # to false. + #process.include_cpu_ticks: false + + # Raid mount point to monitor + #raid.mount_point: '/' + + # Configure reverse DNS lookup on remote IP addresses in the socket metricset. + #socket.reverse_lookup.enabled: false + #socket.reverse_lookup.success_ttl: 60s + #socket.reverse_lookup.failure_ttl: 60s + + # Diskio configurations + #diskio.include_devices: [] ---- [float] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/system/raid.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/system/raid.asciidoc index f48c0b2a..f172194e 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/system/raid.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/system/raid.asciidoc @@ -5,7 +5,7 @@ This file is generated! See scripts/docs_collector.py [[metricbeat-metricset-system-raid]] === System raid metricset -experimental[] +beta[] include::../../../module/system/raid/_meta/docs.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/uwsgi.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/uwsgi.asciidoc index b52a7626..bb8df6c8 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/uwsgi.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/uwsgi.asciidoc @@ -5,25 +5,26 @@ This file is generated! See scripts/docs_collector.py [[metricbeat-module-uwsgi]] == uwsgi module -experimental[] +beta[] == uwsgi module -This is the uwsgi module. Uses http://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html[StatsServer]. +This is the uwsgi module. By default collects the `stats` metricset, using +http://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html[StatsServer]. [float] === Module-specific configuration notes The uWSGI module has these additional config options: -*`hosts`*:: host URLs to get data from. by default `tcp://127.0.0.1:9191`. +*`hosts`*:: host URLs to get data from (e.g: `tcp://127.0.0.1:9191`). Can obtain data from 3 types of schemes: tcp (tcp://ip:port), unix socket (unix:///tmp/uwsgi.sock) and http/https server (http://ip:port) [float] === Dashboard -The nginx module comes with a predefined dashboard. For example: +The uwsgi module comes with a predefined dashboard. For example: image::./images/uwsgi_dashboard.png[] @@ -39,6 +40,7 @@ in <>. Here is an example configuration: metricbeat.modules: - module: uwsgi metricsets: ["status"] + enable: true period: 10s hosts: ["tcp://127.0.0.1:9191"] ---- diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/uwsgi/status.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/uwsgi/status.asciidoc index 710e9369..3b86eef0 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/uwsgi/status.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/uwsgi/status.asciidoc @@ -5,7 +5,7 @@ This file is generated! See scripts/docs_collector.py [[metricbeat-metricset-uwsgi-status]] === uwsgi status metricset -experimental[] +beta[] include::../../../module/uwsgi/status/_meta/docs.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/vsphere.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/vsphere.asciidoc index 689a1966..76c3c91b 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/vsphere.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/vsphere.asciidoc @@ -9,6 +9,7 @@ beta[] The vSphere module uses the https://github.com/vmware/govmomi[Govmomi] library to collect metrics from any Vmware SDK URL (ESXi/VCenter). This library is built for and tested against ESXi and vCenter 5.5, 6.0 and 6.5. +By default it enables the metricsets `datastore`, `host` and `virtualmachine`. [float] @@ -21,6 +22,7 @@ in <>. Here is an example configuration: ---- metricbeat.modules: - module: vsphere + enabled: true metricsets: ["datastore", "host", "virtualmachine"] period: 10s hosts: ["https://localhost/sdk"] @@ -31,7 +33,6 @@ metricbeat.modules: insecure: false # Get custom fields when using virtualmachine metric set. Default false. # get_custom_fields: false - ---- [float] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/windows.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/windows.asciidoc index 2943f320..863abf40 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/windows.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/windows.asciidoc @@ -7,7 +7,8 @@ This file is generated! See scripts/docs_collector.py beta[] -This is the Windows module. +This is the Windows module. It collects metrics from Windows systems, +by default metricset `service` is enabled. [float] @@ -21,11 +22,18 @@ in <>. Here is an example configuration: metricbeat.modules: - module: windows metricsets: ["perfmon"] + enabled: true period: 10s + perfmon.ignore_non_existent_counters: true perfmon.counters: + # - instance_label: processor.name + # instance_name: total + # measurement_label: processor.time.total.pct + # query: '\Processor Information(_Total)\% Processor Time' - module: windows metricsets: ["service"] + enabled: true period: 60s ---- diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/zookeeper.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/zookeeper.asciidoc index 3f0e6317..30a7fa3c 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/zookeeper.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/zookeeper.asciidoc @@ -5,7 +5,8 @@ This file is generated! See scripts/docs_collector.py [[metricbeat-module-zookeeper]] == ZooKeeper module -The ZooKeeper module fetches statistics from the ZooKeeper service. +The ZooKeeper module fetches statistics from the ZooKeeper service. The default +metricset is `mntr`. [float] === Compatibility @@ -24,6 +25,7 @@ in <>. Here is an example configuration: ---- metricbeat.modules: - module: zookeeper + enabled: true metricsets: ["mntr"] period: 10s hosts: ["localhost:2181"] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules_list.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules_list.asciidoc index cb5ade37..49982046 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules_list.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules_list.asciidoc @@ -3,26 +3,26 @@ This file is generated! See scripts/docs_collector.py //// [options="header"] -|======================== -|Modules |Metricsets -|<> beta[] | -.1+| |<> beta[] -|<> | -.1+| |<> -|<> beta[] | -.7+| |<> beta[] +|=================================== +|Modules |Dashboards |Metricsets +|<> beta[] |image:./images/icon-no.png[No prebuilt dashboards] | +.1+| .1+| |<> beta[] +|<> |image:./images/icon-yes.png[Prebuilt dashboards are available] | +.1+| .1+| |<> +|<> beta[] |image:./images/icon-no.png[No prebuilt dashboards] | +.7+| .7+| |<> beta[] |<> beta[] |<> beta[] |<> beta[] |<> experimental[] |<> beta[] |<> beta[] -|<> beta[] | -.3+| |<> beta[] +|<> beta[] |image:./images/icon-no.png[No prebuilt dashboards] | +.3+| .3+| |<> beta[] |<> beta[] |<> beta[] -|<> | -.8+| |<> +|<> |image:./images/icon-yes.png[Prebuilt dashboards are available] | +.8+| .8+| |<> |<> |<> |<> @@ -30,36 +30,36 @@ This file is generated! See scripts/docs_collector.py |<> |<> |<> -|<> beta[] | -.1+| |<> beta[] -|<> beta[] | -.2+| |<> beta[] +|<> beta[] |image:./images/icon-no.png[No prebuilt dashboards] | +.1+| .1+| |<> beta[] +|<> beta[] |image:./images/icon-no.png[No prebuilt dashboards] | +.2+| .2+| |<> beta[] |<> beta[] -|<> beta[] | -.3+| |<> beta[] +|<> beta[] |image:./images/icon-no.png[No prebuilt dashboards] | +.3+| .3+| |<> beta[] |<> beta[] |<> beta[] -|<> experimental[] | -.2+| |<> experimental[] -|<> experimental[] -|<> beta[] | -.1+| |<> beta[] -|<> | -.2+| |<> +|<> beta[] |image:./images/icon-yes.png[Prebuilt dashboards are available] | +.2+| .2+| |<> beta[] +|<> beta[] +|<> beta[] |image:./images/icon-no.png[No prebuilt dashboards] | +.1+| .1+| |<> beta[] +|<> |image:./images/icon-no.png[No prebuilt dashboards] | +.2+| .2+| |<> |<> -|<> beta[] | -.2+| |<> beta[] -|<> experimental[] -|<> beta[] | -.1+| |<> beta[] -|<> beta[] | -.2+| |<> beta[] +|<> |image:./images/icon-no.png[No prebuilt dashboards] | +.2+| .2+| |<> +|<> beta[] +|<> |image:./images/icon-no.png[No prebuilt dashboards] | +.1+| .1+| |<> +|<> beta[] |image:./images/icon-no.png[No prebuilt dashboards] | +.2+| .2+| |<> beta[] |<> beta[] -|<> beta[] | -.1+| |<> beta[] -|<> | -.11+| |<> -|<> experimental[] +|<> beta[] |image:./images/icon-no.png[No prebuilt dashboards] | +.1+| .1+| |<> beta[] +|<> |image:./images/icon-yes.png[Prebuilt dashboards are available] | +.12+| .12+| |<> +|<> beta[] |<> |<> |<> @@ -67,38 +67,44 @@ This file is generated! See scripts/docs_collector.py |<> |<> |<> +|<> |<> |<> -|<> experimental[] | -.2+| |<> experimental[] -|<> experimental[] -|<> beta[] | -.1+| |<> beta[] -|<> beta[] | -.3+| |<> experimental[] -|<> beta[] -|<> beta[] -|<> | -.1+| |<> -|<> | -.1+| |<> -|<> beta[] | -.1+| |<> beta[] -|<> | -.3+| |<> +|<> experimental[] |image:./images/icon-no.png[No prebuilt dashboards] | +.1+| .1+| |<> experimental[] +|<> beta[] |image:./images/icon-no.png[No prebuilt dashboards] | +.2+| .2+| |<> beta[] +|<> beta[] +|<> beta[] |image:./images/icon-no.png[No prebuilt dashboards] | +.1+| .1+| |<> beta[] +|<> |image:./images/icon-yes.png[Prebuilt dashboards are available] | +.3+| .3+| |<> +|<> +|<> +|<> experimental[] |image:./images/icon-no.png[No prebuilt dashboards] | +.1+| .1+| |<> experimental[] +|<> |image:./images/icon-yes.png[Prebuilt dashboards are available] | +.1+| .1+| |<> +|<> |image:./images/icon-yes.png[Prebuilt dashboards are available] | +.1+| .1+| |<> +|<> beta[] |image:./images/icon-no.png[No prebuilt dashboards] | +.1+| .1+| |<> beta[] +|<> |image:./images/icon-no.png[No prebuilt dashboards] | +.3+| .3+| |<> |<> |<> -|<> beta[] | -.2+| |<> beta[] +|<> beta[] |image:./images/icon-no.png[No prebuilt dashboards] | +.2+| .2+| |<> beta[] |<> beta[] -|<> beta[] | -.2+| |<> beta[] +|<> beta[] |image:./images/icon-yes.png[Prebuilt dashboards are available] | +.3+| .3+| |<> beta[] +|<> beta[] |<> beta[] -|<> | -.2+| |<> +|<> |image:./images/icon-yes.png[Prebuilt dashboards are available] | +.2+| .2+| |<> |<> -|<> | -.13+| |<> +|<> |image:./images/icon-yes.png[Prebuilt dashboards are available] | +.13+| .13+| |<> |<> |<> |<> @@ -108,20 +114,20 @@ This file is generated! See scripts/docs_collector.py |<> |<> |<> -|<> experimental[] +|<> beta[] |<> beta[] |<> -|<> experimental[] | -.1+| |<> experimental[] -|<> beta[] | -.3+| |<> beta[] +|<> beta[] |image:./images/icon-yes.png[Prebuilt dashboards are available] | +.1+| .1+| |<> beta[] +|<> beta[] |image:./images/icon-no.png[No prebuilt dashboards] | +.3+| .3+| |<> beta[] |<> beta[] |<> beta[] -|<> beta[] | -.2+| |<> beta[] +|<> beta[] |image:./images/icon-yes.png[Prebuilt dashboards are available] | +.2+| .2+| |<> beta[] |<> beta[] -|<> | -.1+| |<> +|<> |image:./images/icon-no.png[No prebuilt dashboards] | +.1+| .1+| |<> |================================ -- @@ -142,9 +148,11 @@ include::modules/jolokia.asciidoc[] include::modules/kafka.asciidoc[] include::modules/kibana.asciidoc[] include::modules/kubernetes.asciidoc[] +include::modules/kvm.asciidoc[] include::modules/logstash.asciidoc[] include::modules/memcached.asciidoc[] include::modules/mongodb.asciidoc[] +include::modules/munin.asciidoc[] include::modules/mysql.asciidoc[] include::modules/nginx.asciidoc[] include::modules/php_fpm.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/running-on-docker.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/running-on-docker.asciidoc index 8509a560..bef686c7 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/running-on-docker.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/running-on-docker.asciidoc @@ -15,9 +15,9 @@ host machine from within the container. ["source","sh",subs="attributes"] ---- docker run \ - --volume=/proc:/hostfs/proc:ro \ <1> - --volume=/sys/fs/cgroup:/hostfs/sys/fs/cgroup:ro \ <2> - --volume=/:/hostfs:ro \ <3> + --mount type=bind,source=/proc,target=/hostfs/proc,readonly \ <1> + --mount type=bind,source=/sys/fs/cgroup,target=/hostfs/sys/fs/cgroup,readonly \ <2> + --mount type=bind,source=/,target=/hostfs,readonly \ <3> --net=host <4> {dockerimage} -system.hostfs=/hostfs ---- @@ -75,7 +75,7 @@ The mysql module configuration would look like this: metricbeat.modules: - module: mysql metricsets: ["status"] - hosts: ["mysql:3306"] <1> + hosts: ["tcp(mysql:3306)/"] <1> username: root password: ${MYSQL_PASSWORD} <2> ---- diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/securing-metricbeat.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/securing-metricbeat.asciidoc index 03c5198b..06430c5c 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/securing-metricbeat.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/securing-metricbeat.asciidoc @@ -1,4 +1,4 @@ -[[securing-meticbeat]] +[[securing-metricbeat]] = Securing {beatname_uc} [partintro] @@ -9,6 +9,7 @@ The following topics describe how to secure communication between * <> * <> +* <> //sets block macro for https.asciidoc included in next section diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/setting-up-running.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/setting-up-running.asciidoc index 13f28655..36d74a7c 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/setting-up-running.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/setting-up-running.asciidoc @@ -4,7 +4,7 @@ // that is unique to each beat. ///// -[[seting-up-and-running]] +[[setting-up-and-running]] == Setting up and running {beatname_uc} Before reading this section, see the @@ -33,3 +33,5 @@ include::../../libbeat/docs/command-reference.asciidoc[] include::./running-on-docker.asciidoc[] include::./running-on-kubernetes.asciidoc[] + +include::../../libbeat/docs/shared-shutdown.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/helper/http.go b/vendor/github.com/elastic/beats/metricbeat/helper/http.go index 0fd319a1..70c066f5 100644 --- a/vendor/github.com/elastic/beats/metricbeat/helper/http.go +++ b/vendor/github.com/elastic/beats/metricbeat/helper/http.go @@ -11,6 +11,9 @@ import ( "time" "github.com/elastic/beats/libbeat/outputs" + + "github.com/pkg/errors" + "github.com/elastic/beats/libbeat/outputs/transport" "github.com/elastic/beats/metricbeat/mb" ) @@ -25,23 +28,32 @@ type HTTP struct { } // NewHTTP creates new http helper -func NewHTTP(base mb.BaseMetricSet) *HTTP { +func NewHTTP(base mb.BaseMetricSet) (*HTTP, error) { config := struct { - TLS *outputs.TLSConfig `config:"ssl"` - Timeout time.Duration `config:"timeout"` - Headers map[string]string `config:"headers"` + TLS *outputs.TLSConfig `config:"ssl"` + Timeout time.Duration `config:"timeout"` + Headers map[string]string `config:"headers"` + BearerTokenFile string `config:"bearer_token_file"` }{} if err := base.Module().UnpackConfig(&config); err != nil { - return nil + return nil, err } if config.Headers == nil { config.Headers = map[string]string{} } + if config.BearerTokenFile != "" { + header, err := getAuthHeaderFromToken(config.BearerTokenFile) + if err != nil { + return nil, err + } + config.Headers["Authorization"] = header + } + tlsConfig, err := outputs.LoadTLSConfig(config.TLS) if err != nil { - return nil + return nil, err } var dialer, tlsDialer transport.Dialer @@ -49,7 +61,7 @@ func NewHTTP(base mb.BaseMetricSet) *HTTP { dialer = transport.NetDialer(config.Timeout) tlsDialer, err = transport.TLSDialer(dialer, tlsConfig, config.Timeout) if err != nil { - return nil + return nil, err } return &HTTP{ @@ -65,7 +77,7 @@ func NewHTTP(base mb.BaseMetricSet) *HTTP { method: "GET", uri: base.HostData().SanitizedURI, body: nil, - } + }, nil } // FetchResponse fetches a response for the http metricset. @@ -153,3 +165,22 @@ func (h *HTTP) FetchJSON() (map[string]interface{}, error) { return data, nil } + +// getAuthHeaderFromToken reads a bearer authorizaiton token from the given file +func getAuthHeaderFromToken(path string) (string, error) { + var token string + + b, err := ioutil.ReadFile(path) + if err != nil { + return "", errors.Wrap(err, "reading bearer token file") + } + + if len(b) != 0 { + if b[len(b)-1] == '\n' { + b = b[0 : len(b)-1] + } + token = fmt.Sprintf("Bearer %s", string(b)) + } + + return token, nil +} diff --git a/vendor/github.com/elastic/beats/metricbeat/helper/http_test.go b/vendor/github.com/elastic/beats/metricbeat/helper/http_test.go new file mode 100644 index 00000000..a3e5d615 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/helper/http_test.go @@ -0,0 +1,71 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package helper + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGetAuthHeaderFromToken(t *testing.T) { + tests := []struct { + Name, Content, Expected string + }{ + { + "Test a token is read", + "testtoken", + "Bearer testtoken", + }, + { + "Test a token is trimmed", + "testtoken\n", + "Bearer testtoken", + }, + } + + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + content := []byte(test.Content) + tmpfile, err := ioutil.TempFile("", "token") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tmpfile.Name()) + + if _, err := tmpfile.Write(content); err != nil { + t.Fatal(err) + } + if err := tmpfile.Close(); err != nil { + t.Fatal(err) + } + + header, err := getAuthHeaderFromToken(tmpfile.Name()) + assert.NoError(t, err) + assert.Equal(t, test.Expected, header) + }) + } +} + +func TestGetAuthHeaderFromTokenNoFile(t *testing.T) { + header, err := getAuthHeaderFromToken("nonexistingfile") + assert.Equal(t, "", header) + assert.Error(t, err) +} diff --git a/vendor/github.com/elastic/beats/metricbeat/helper/prometheus.go b/vendor/github.com/elastic/beats/metricbeat/helper/prometheus.go index 958b9e7f..3338cfbe 100644 --- a/vendor/github.com/elastic/beats/metricbeat/helper/prometheus.go +++ b/vendor/github.com/elastic/beats/metricbeat/helper/prometheus.go @@ -2,11 +2,12 @@ package helper import ( "fmt" - - "github.com/elastic/beats/metricbeat/mb" + "io" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" + + "github.com/elastic/beats/metricbeat/mb" ) // Prometheus helper retrieves prometheus formatted metrics @@ -15,9 +16,12 @@ type Prometheus struct { } // NewPrometheusClient creates new prometheus helper -func NewPrometheusClient(base mb.BaseMetricSet) *Prometheus { - http := NewHTTP(base) - return &Prometheus{*http} +func NewPrometheusClient(base mb.BaseMetricSet) (*Prometheus, error) { + http, err := NewHTTP(base) + if err != nil { + return nil, err + } + return &Prometheus{*http}, nil } // GetFamilies requests metric families from prometheus endpoint and returns them @@ -39,10 +43,14 @@ func (p *Prometheus) GetFamilies() ([]*dto.MetricFamily, error) { } families := []*dto.MetricFamily{} - for err == nil { + for { mf := &dto.MetricFamily{} err = decoder.Decode(mf) - if err == nil { + if err != nil { + if err == io.EOF { + break + } + } else { families = append(families, mf) } } diff --git a/vendor/github.com/elastic/beats/metricbeat/include/list.go b/vendor/github.com/elastic/beats/metricbeat/include/list.go index 094e2a4e..f9626bfc 100644 --- a/vendor/github.com/elastic/beats/metricbeat/include/list.go +++ b/vendor/github.com/elastic/beats/metricbeat/include/list.go @@ -70,9 +70,12 @@ import ( _ "github.com/elastic/beats/metricbeat/module/kubernetes/state_node" _ "github.com/elastic/beats/metricbeat/module/kubernetes/state_pod" _ "github.com/elastic/beats/metricbeat/module/kubernetes/state_replicaset" + _ "github.com/elastic/beats/metricbeat/module/kubernetes/state_statefulset" _ "github.com/elastic/beats/metricbeat/module/kubernetes/system" _ "github.com/elastic/beats/metricbeat/module/kubernetes/util" _ "github.com/elastic/beats/metricbeat/module/kubernetes/volume" + _ "github.com/elastic/beats/metricbeat/module/kvm" + _ "github.com/elastic/beats/metricbeat/module/kvm/dommemstat" _ "github.com/elastic/beats/metricbeat/module/logstash" _ "github.com/elastic/beats/metricbeat/module/logstash/node" _ "github.com/elastic/beats/metricbeat/module/logstash/node_stats" @@ -82,6 +85,8 @@ import ( _ "github.com/elastic/beats/metricbeat/module/mongodb/collstats" _ "github.com/elastic/beats/metricbeat/module/mongodb/dbstats" _ "github.com/elastic/beats/metricbeat/module/mongodb/status" + _ "github.com/elastic/beats/metricbeat/module/munin" + _ "github.com/elastic/beats/metricbeat/module/munin/node" _ "github.com/elastic/beats/metricbeat/module/mysql" _ "github.com/elastic/beats/metricbeat/module/mysql/status" _ "github.com/elastic/beats/metricbeat/module/nginx" @@ -96,6 +101,7 @@ import ( _ "github.com/elastic/beats/metricbeat/module/prometheus/collector" _ "github.com/elastic/beats/metricbeat/module/prometheus/stats" _ "github.com/elastic/beats/metricbeat/module/rabbitmq" + _ "github.com/elastic/beats/metricbeat/module/rabbitmq/connection" _ "github.com/elastic/beats/metricbeat/module/rabbitmq/node" _ "github.com/elastic/beats/metricbeat/module/rabbitmq/queue" _ "github.com/elastic/beats/metricbeat/module/redis" diff --git a/vendor/github.com/elastic/beats/metricbeat/mb/builders.go b/vendor/github.com/elastic/beats/metricbeat/mb/builders.go index 34455de6..03d290c2 100644 --- a/vendor/github.com/elastic/beats/metricbeat/mb/builders.go +++ b/vendor/github.com/elastic/beats/metricbeat/mb/builders.go @@ -146,7 +146,7 @@ func newBaseMetricSets(r *Register, m Module) ([]BaseMetricSet, error) { metricSetNames := m.Config().MetricSets if len(metricSetNames) == 0 { var err error - metricSetNames, err = r.defaultMetricSets(m.Name()) + metricSetNames, err = r.DefaultMetricSets(m.Name()) if err != nil { return nil, errors.Errorf("no metricsets configured for module '%s'", m.Name()) } diff --git a/vendor/github.com/elastic/beats/metricbeat/mb/registry.go b/vendor/github.com/elastic/beats/metricbeat/mb/registry.go index 1d8b1ed8..db58d6e0 100644 --- a/vendor/github.com/elastic/beats/metricbeat/mb/registry.go +++ b/vendor/github.com/elastic/beats/metricbeat/mb/registry.go @@ -217,10 +217,10 @@ func (r *Register) metricSetRegistration(module, name string) (MetricSetRegistra return registration, nil } -// defaultMetricSets returns the names of the default MetricSets for a module. +// DefaultMetricSets returns the names of the default MetricSets for a module. // An error is returned if no default MetricSet is declared or the module does // not exist. -func (r *Register) defaultMetricSets(module string) ([]string, error) { +func (r *Register) DefaultMetricSets(module string) ([]string, error) { r.lock.RLock() defer r.lock.RUnlock() diff --git a/vendor/github.com/elastic/beats/metricbeat/mb/registry_test.go b/vendor/github.com/elastic/beats/metricbeat/mb/registry_test.go index 6821eae5..8585a1bb 100644 --- a/vendor/github.com/elastic/beats/metricbeat/mb/registry_test.go +++ b/vendor/github.com/elastic/beats/metricbeat/mb/registry_test.go @@ -195,7 +195,7 @@ func TestDefaultMetricSet(t *testing.T) { t.Fatal(err) } - names, err := registry.defaultMetricSets(moduleName) + names, err := registry.DefaultMetricSets(moduleName) if err != nil { t.Fatal(err) } diff --git a/vendor/github.com/elastic/beats/metricbeat/mb/testing/data_generator.go b/vendor/github.com/elastic/beats/metricbeat/mb/testing/data_generator.go index 2db38e65..8c38da39 100644 --- a/vendor/github.com/elastic/beats/metricbeat/mb/testing/data_generator.go +++ b/vendor/github.com/elastic/beats/metricbeat/mb/testing/data_generator.go @@ -57,6 +57,28 @@ func WriteEvents(f mb.EventsFetcher, t testing.TB) error { return nil } +// WriteEventsReporterV2 fetches events and writes the first event to a ./_meta/data.json +// file. +func WriteEventsReporterV2(f mb.ReportingMetricSetV2, t testing.TB) error { + if !*dataFlag { + t.Skip("skip data generation tests") + } + + events, errs := ReportingFetchV2(f) + if len(errs) > 0 { + return errs[0] + } + + if len(events) == 0 { + return fmt.Errorf("no events were generated") + } + + e := StandardizeEvent(f, events[0]) + + WriteEventToDataJSON(t, e) + return nil +} + // CreateFullEvent builds a full event given the data generated by a MetricSet. // This simulates the output of Metricbeat as if it were // 2016-05-23T08:05:34.853Z and the hostname is host.example.com. @@ -78,9 +100,11 @@ func StandardizeEvent(ms mb.MetricSet, e mb.Event, modifiers ...mb.EventModifier } e.Timestamp = startTime - e.Namespace = ms.Registration().Namespace e.Took = 115 * time.Microsecond e.Host = ms.Host() + if e.Namespace == "" { + e.Namespace = ms.Registration().Namespace + } fullEvent := e.BeatEvent(ms.Module().Name(), ms.Name(), modifiers...) diff --git a/vendor/github.com/elastic/beats/metricbeat/mb/testing/modules.go b/vendor/github.com/elastic/beats/metricbeat/mb/testing/modules.go index c638a09a..d590da00 100644 --- a/vendor/github.com/elastic/beats/metricbeat/mb/testing/modules.go +++ b/vendor/github.com/elastic/beats/metricbeat/mb/testing/modules.go @@ -138,6 +138,42 @@ func ReportingFetch(metricSet mb.ReportingMetricSet) ([]common.MapStr, []error) return r.events, r.errs } +// NewReportingMetricSetV2 returns a new ReportingMetricSetV2 instance. Then +// you can use ReportingFetchV2 to perform a Fetch operation with the MetricSet. +func NewReportingMetricSetV2(t testing.TB, config interface{}) mb.ReportingMetricSetV2 { + metricSet := newMetricSet(t, config) + + reportingMetricSetV2, ok := metricSet.(mb.ReportingMetricSetV2) + if !ok { + t.Fatal("MetricSet does not implement ReportingMetricSetV2") + } + + return reportingMetricSetV2 +} + +type capturingReporterV2 struct { + events []mb.Event + errs []error +} + +func (r *capturingReporterV2) Event(event mb.Event) bool { + r.events = append(r.events, event) + return true +} + +func (r *capturingReporterV2) Error(err error) bool { + r.errs = append(r.errs, err) + return true +} + +// ReportingFetchV2 runs the given reporting metricset and returns all of the +// events and errors that occur during that period. +func ReportingFetchV2(metricSet mb.ReportingMetricSetV2) ([]mb.Event, []error) { + r := &capturingReporterV2{} + metricSet.Fetch(r) + return r.events, r.errs +} + // NewPushMetricSet instantiates a new PushMetricSet using the given // configuration. The ModuleFactory and MetricSetFactory are obtained from the // global Registry. @@ -217,16 +253,16 @@ func NewPushMetricSetV2(t testing.TB, config interface{}) mb.PushMetricSetV2 { return pushMetricSet } -// capturingReporterV2 stores all the events and errors from a metricset's +// capturingPushReporterV2 stores all the events and errors from a metricset's // Run method. -type capturingReporterV2 struct { +type capturingPushReporterV2 struct { doneC chan struct{} eventsC chan mb.Event } // report writes an event to the output channel and returns true. If the output // is closed it returns false. -func (r *capturingReporterV2) report(event mb.Event) bool { +func (r *capturingPushReporterV2) report(event mb.Event) bool { select { case <-r.doneC: // Publisher is stopped. @@ -237,17 +273,17 @@ func (r *capturingReporterV2) report(event mb.Event) bool { } // Event stores the passed-in event into the events array -func (r *capturingReporterV2) Event(event mb.Event) bool { +func (r *capturingPushReporterV2) Event(event mb.Event) bool { return r.report(event) } // Error stores the given error into the errors array. -func (r *capturingReporterV2) Error(err error) bool { +func (r *capturingPushReporterV2) Error(err error) bool { return r.report(mb.Event{Error: err}) } // Done returns the Done channel for this reporter. -func (r *capturingReporterV2) Done() <-chan struct{} { +func (r *capturingPushReporterV2) Done() <-chan struct{} { return r.doneC } @@ -255,7 +291,7 @@ func (r *capturingReporterV2) Done() <-chan struct{} { // time and returns all of the events and errors that occur during that period. func RunPushMetricSetV2(timeout time.Duration, waitEvents int, metricSet mb.PushMetricSetV2) []mb.Event { var ( - r = &capturingReporterV2{doneC: make(chan struct{}), eventsC: make(chan mb.Event)} + r = &capturingPushReporterV2{doneC: make(chan struct{}), eventsC: make(chan mb.Event)} wg sync.WaitGroup events []mb.Event ) diff --git a/vendor/github.com/elastic/beats/metricbeat/metricbeat.reference.yml b/vendor/github.com/elastic/beats/metricbeat/metricbeat.reference.yml index eb062654..8de1f7f1 100644 --- a/vendor/github.com/elastic/beats/metricbeat/metricbeat.reference.yml +++ b/vendor/github.com/elastic/beats/metricbeat/metricbeat.reference.yml @@ -61,6 +61,7 @@ metricbeat.modules: - uptime # System Uptime #- core # Per CPU core usage #- diskio # Disk IO + #- raid # Raid #- socket # Sockets and connection info (linux only) enabled: true period: 10s @@ -73,6 +74,9 @@ metricbeat.modules: # A list of filesystem types to ignore. The filesystem metricset will not # collect data from filesystems matching any of the specified types, and # fsstats will not include data from these filesystems in its summary stats. + # If not set, types associated to virtual filesystems are automatically + # added when this information is available in the system (e.g. the list of + # `nodev` types in `/proc/filesystem`). #filesystem.ignore_types: [] # These options allow you to filter out all processes that are not @@ -80,7 +84,7 @@ metricbeat.modules: # If both the `by_cpu` and `by_memory` options are used, the union of the two sets # is included. #process.include_top_n: - # + # Set to false to disable this feature and include all processes #enabled: true @@ -106,15 +110,21 @@ metricbeat.modules: # to false. #process.include_cpu_ticks: false + # Raid mount point to monitor + #raid.mount_point: '/' + # Configure reverse DNS lookup on remote IP addresses in the socket metricset. #socket.reverse_lookup.enabled: false #socket.reverse_lookup.success_ttl: 60s #socket.reverse_lookup.failure_ttl: 60s + # Diskio configurations + #diskio.include_devices: [] + #------------------------------ Aerospike Module ----------------------------- - module: aerospike metricsets: ["namespace"] - enabled: false + enabled: true period: 10s hosts: ["localhost:3000"] @@ -122,6 +132,7 @@ metricbeat.modules: - module: apache metricsets: ["status"] period: 10s + enabled: true # Apache hosts hosts: ["http://127.0.0.1"] @@ -140,18 +151,32 @@ metricbeat.modules: metricsets: ["cluster_disk", "cluster_health", "monitor_health", "pool_disk", "osd_tree"] period: 10s hosts: ["localhost:5000"] + enabled: true #------------------------------ Couchbase Module ----------------------------- - module: couchbase metricsets: ["bucket", "cluster", "node"] period: 10s hosts: ["localhost:8091"] + enabled: true #------------------------------- Docker Module ------------------------------- - module: docker - metricsets: ["container", "cpu", "diskio", "healthcheck", "info", "memory", "network"] + metricsets: + - "container" + - "cpu" + - "diskio" + - "healthcheck" + - "info" + #- "image" + - "memory" + - "network" hosts: ["unix:///var/run/docker.sock"] period: 10s + enabled: true + + # Replace dots in labels with `_`. Set to false to keep dots + labels.dedot: true # To connect to Docker over TLS you must specify a client and CA certificate. #ssl: @@ -166,6 +191,7 @@ metricbeat.modules: hosts: ["localhost:8080"] metrics_path: /metrics/metrics namespace: example + enabled: true #---------------------------- Elasticsearch Module --------------------------- - module: elasticsearch @@ -179,7 +205,6 @@ metricbeat.modules: period: 10s hosts: ["localhost:2379"] - #------------------------------- Golang Module ------------------------------- - module: golang metricsets: ["expvar","heap"] @@ -194,12 +219,24 @@ metricbeat.modules: - module: graphite metricsets: ["server"] enabled: true -# protocol: "udp" -# templates: -# - filter: "test.*.bash.*" # This would match metrics like test.localhost.bash.stats -# namespace: "test" -# template: ".host.shell.metric*" # test.localhost.bash.stats would become metric=stats and tags host=localhost,shell=bash -# delimiter: "_" + + # Host address to listen on. Default localhost. + #host: localhost + + # Listening port. Default 2003. + #port: 2003 + + # Protocol to listen on. This can be udp or tcp. Default udp. + #protocol: "udp" + + # Receive buffer size in bytes + #receive_buffer_size: 1024 + + #templates: + # - filter: "test.*.bash.*" # This would match metrics like test.localhost.bash.stats + # namespace: "test" + # template: ".host.shell.metric*" # test.localhost.bash.stats would become metric=stats and tags host=localhost,shell=bash + # delimiter: "_" #------------------------------- HAProxy Module ------------------------------ @@ -207,6 +244,7 @@ metricbeat.modules: metricsets: ["info", "stat"] period: 10s hosts: ["tcp://127.0.0.1:14567"] + enabled: true #-------------------------------- HTTP Module -------------------------------- - module: http @@ -217,8 +255,11 @@ metricbeat.modules: path: "/" #body: "" #method: "GET" + #username: "user" + #password: "secret" #request.enabled: false #response.enabled: false + #json.is_array: false #dedot.enabled: false - module: http @@ -238,16 +279,33 @@ metricbeat.modules: period: 10s hosts: ["localhost"] namespace: "metrics" - path: "/jolokia/?ignoreErrors=true&canonicalNaming=false" - jmx.mapping: - jmx.application: - jmx.instance: + #username: "user" + #password: "secret" + jmx.mappings: + - mbean: 'java.lang:type=Runtime' + attributes: + - attr: Uptime + field: uptime + - mbean: 'java.lang:type=Memory' + attributes: + - attr: HeapMemoryUsage + field: memory.heap_usage + - attr: NonHeapMemoryUsage + field: memory.non_heap_usage + # GC Metrics - this depends on what is available on your JVM + # - mbean: 'java.lang:type=GarbageCollector,name=ConcurrentMarkSweep' + # attributes: + # - attr: CollectionTime + # field: gc.cms_collection_time + # - attr: CollectionCount + # field: gc.cms_collection_count #-------------------------------- Kafka Module ------------------------------- - module: kafka - metricsets: ["partition"] + metricsets: ["consumergroup", "partition"] period: 10s hosts: ["localhost:9092"] + enabled: true #client_id: metricbeat #retries: 3 @@ -275,26 +333,34 @@ metricbeat.modules: metricsets: ["status"] period: 10s hosts: ["localhost:5601"] + enabled: true #----------------------------- Kubernetes Module ----------------------------- # Node metrics, from kubelet: - module: kubernetes metricsets: + - container - node - - system - pod - - container + - system - volume period: 10s hosts: ["localhost:10255"] + enabled: true + #bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + #ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt + #ssl.certificate: "/etc/pki/client/cert.pem" + #ssl.key: "/etc/pki/client/cert.key" # State metrics from kube-state-metrics service: - module: kubernetes - enabled: false + enabled: true metricsets: - state_node - state_deployment - state_replicaset + - state_statefulset - state_pod - state_container period: 10s @@ -302,28 +368,39 @@ metricbeat.modules: # Kubernetes events - module: kubernetes - enabled: false + enabled: true metricsets: - event +#--------------------------------- kvm Module -------------------------------- +- module: kvm + metricsets: ["dommemstat"] + enabled: false + period: 10s + hosts: ["localhost"] + + # Timeout to connect to Libvirt server + #timeout: 1s + #------------------------------ Logstash Module ------------------------------ - module: logstash metricsets: ["node", "node_stats"] - enabled: false + enabled: true period: 10s hosts: ["localhost:9600"] - #------------------------------ Memcached Module ----------------------------- - module: memcached metricsets: ["stats"] period: 10s hosts: ["localhost:11211"] + enabled: true #------------------------------- MongoDB Module ------------------------------ - module: mongodb metricsets: ["dbstats", "status"] period: 10s + enabled: true # The hosts must be passed as MongoDB URLs in the format: # [mongodb://][user:pass@]host[:port]. @@ -338,6 +415,14 @@ metricbeat.modules: # Password to use when connecting to MongoDB. Empty by default. #password: pass +#-------------------------------- Munin Module ------------------------------- +- module: munin + metricsets: ["node"] + enabled: true + period: 10s + hosts: ["localhost:4949"] + node.namespace: node + #-------------------------------- MySQL Module ------------------------------- - module: mysql metricsets: ["status"] @@ -358,26 +443,28 @@ metricbeat.modules: #raw: false #-------------------------------- Nginx Module ------------------------------- -#- module: nginx - #metricsets: ["stubstatus"] - #enabled: true - #period: 10s +- module: nginx + metricsets: ["stubstatus"] + enabled: true + period: 10s # Nginx hosts - #hosts: ["http://127.0.0.1"] + hosts: ["http://127.0.0.1"] # Path to server status. Default server-status - #server_status_path: "server-status" + server_status_path: "server-status" #------------------------------- PHP_FPM Module ------------------------------ - module: php_fpm metricsets: ["pool"] + enabled: true period: 10s status_path: "/status" hosts: ["localhost:8080"] #----------------------------- PostgreSQL Module ----------------------------- - module: postgresql + enabled: true metricsets: # Stats about every PostgreSQL database - database @@ -405,23 +492,39 @@ metricbeat.modules: #----------------------------- Prometheus Module ----------------------------- - module: prometheus metricsets: ["stats"] + enabled: true + period: 10s + hosts: ["localhost:9090"] + #metrics_path: /metrics + #namespace: example + +- module: prometheus + metricsets: ["collector"] + enabled: true period: 10s hosts: ["localhost:9090"] - metrics_path: /metrics + #metrics_path: /metrics #namespace: example + # This can be used for service account based authorization: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + #ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt + #------------------------------ RabbitMQ Module ------------------------------ - module: rabbitmq - metricsets: ["node", "queue"] + metricsets: ["node", "queue", "connection"] + enabled: true period: 10s hosts: ["localhost:15672"] - username: guest - password: guest + #username: guest + #password: guest #-------------------------------- Redis Module ------------------------------- - module: redis metricsets: ["info", "keyspace"] + enabled: true period: 10s # Redis hosts @@ -453,11 +556,13 @@ metricbeat.modules: #-------------------------------- uwsgi Module ------------------------------- - module: uwsgi metricsets: ["status"] + enable: true period: 10s hosts: ["tcp://127.0.0.1:9191"] #------------------------------- vSphere Module ------------------------------ - module: vsphere + enabled: true metricsets: ["datastore", "host", "virtualmachine"] period: 10s hosts: ["https://localhost/sdk"] @@ -468,20 +573,27 @@ metricbeat.modules: insecure: false # Get custom fields when using virtualmachine metric set. Default false. # get_custom_fields: false - #------------------------------- Windows Module ------------------------------ - module: windows metricsets: ["perfmon"] + enabled: true period: 10s + perfmon.ignore_non_existent_counters: true perfmon.counters: + # - instance_label: processor.name + # instance_name: total + # measurement_label: processor.time.total.pct + # query: '\Processor Information(_Total)\% Processor Time' - module: windows metricsets: ["service"] + enabled: true period: 60s #------------------------------ ZooKeeper Module ----------------------------- - module: zookeeper + enabled: true metricsets: ["mntr"] period: 10s hosts: ["localhost:2181"] @@ -523,7 +635,8 @@ metricbeat.modules: # Hints the minimum number of events stored in the queue, # before providing a batch of events to the outputs. - # A value of 0 (the default) ensures events are immediately available + # The default value is set to 2048. + # A value of 0 ensures events are immediately available # to be sent to the outputs. #flush.min_events: 2048 @@ -531,6 +644,66 @@ metricbeat.modules: # if the number of events stored in the queue is < min_flush_events. #flush.timeout: 1s + # The spool queue will store events in a local spool file, before + # forwarding the events to the outputs. + # + # Beta: spooling to disk is currently a beta feature. Use with care. + # + # The spool file is a circular buffer, which blocks once the file/buffer is full. + # Events are put into a write buffer and flushed once the write buffer + # is full or the flush_timeout is triggered. + # Once ACKed by the output, events are removed immediately from the queue, + # making space for new events to be persisted. + #spool: + # The file namespace configures the file path and the file creation settings. + # Once the file exists, the `size`, `page_size` and `prealloc` settings + # will have no more effect. + #file: + # Location of spool file. The default value is ${path.data}/spool.dat. + #path: "${path.data}/spool.dat" + + # Configure file permissions if file is created. The default value is 0600. + #permissions: 0600 + + # File size hint. The spool blocks, once this limit is reached. The default value is 100 MiB. + #size: 100MiB + + # The files page size. A file is split into multiple pages of the same size. The default value is 4KiB. + #page_size: 4KiB + + # If prealloc is set, the required space for the file is reserved using + # truncate. The default value is true. + #prealloc: true + + # Spool writer settings + # Events are serialized into a write buffer. The write buffer is flushed if: + # - The buffer limit has been reached. + # - The configured limit of buffered events is reached. + # - The flush timeout is triggered. + #write: + # Sets the write buffer size. + #buffer_size: 1MiB + + # Maximum duration after which events are flushed, if the write buffer + # is not full yet. The default value is 1s. + #flush.timeout: 1s + + # Number of maximum buffered events. The write buffer is flushed once the + # limit is reached. + #flush.events: 16384 + + # Configure the on-disk event encoding. The encoding can be changed + # between restarts. + # Valid encodings are: json, ubjson, and cbor. + #codec: cbor + #read: + # Reader flush timeout, waiting for more events to become available, so + # to fill a complete batch, as required by the outputs. + # If flush_timeout is 0, all available events are forwarded to the + # outputs immediately. + # The default value is 0s. + #flush.timeout: 0s + # Sets the maximum number of CPUs that can be executing simultaneously. The # default is the number of logical CPUs available in the system. #max_procs: @@ -565,6 +738,14 @@ metricbeat.modules: # equals: # http.code: 200 # +# The following example renames the field a to b: +# +#processors: +#- rename: +# fields: +# - from: "a" +# to: "b" +# # The following example enriches each event with metadata from the cloud # provider about the host machine. It works on EC2, GCE, DigitalOcean, # Tencent Cloud, and Alibaba Cloud. @@ -589,6 +770,7 @@ metricbeat.modules: # match_pids: ["process.pid", "process.ppid"] # match_source: true # match_source_index: 4 +# match_short_id: false # cleanup_timeout: 60 # # To connect to Docker over TLS you must specify a client and CA certificate. # #ssl: @@ -602,6 +784,7 @@ metricbeat.modules: # #processors: #- add_docker_metadata: ~ +#- add_host_metadata: ~ #============================= Elastic Cloud ================================== @@ -674,7 +857,18 @@ output.elasticsearch: # The default is 50. #bulk_max_size: 50 - # Configure http request timeout before failing an request to Elasticsearch. + # The number of seconds to wait before trying to reconnect to Elasticsearch + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Elasticsearch after a network error. The default is 60s. + #backoff.max: 60s + + # Configure http request timeout before failing a request to Elasticsearch. #timeout: 90 # Use SSL settings for HTTPS. @@ -738,7 +932,7 @@ output.elasticsearch: # Optional load balance the events between the Logstash hosts. Default is false. #loadbalance: false - # Number of batches to be sent asynchronously to logstash while processing + # Number of batches to be sent asynchronously to Logstash while processing # new batches. #pipelining: 2 @@ -747,6 +941,17 @@ output.elasticsearch: # if no error is encountered. #slow_start: false + # The number of seconds to wait before trying to reconnect to Logstash + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Logstash after a network error. The default is 60s. + #backoff.max: 60s + # Optional index name. The default index name is set to metricbeat # in all lowercase. #index: 'metricbeat' @@ -1091,6 +1296,10 @@ output.elasticsearch: # the default for the logs path is a logs subdirectory inside the home path. #path.logs: ${path.home}/logs +#================================ Keystore ========================================== +# Location of the Keystore containing the keys and their sensitive values. +#keystore.path: "${path.config}/beats.keystore" + #============================== Dashboards ===================================== # These settings control loading the sample dashboards to the Kibana index. Loading # the dashboards are disabled by default and can be enabled either by setting the @@ -1125,6 +1334,17 @@ output.elasticsearch: # how to install the dashboards by first querying Elasticsearch. #setup.dashboards.always_kibana: false +# If true and Kibana is not reachable at the time when dashboards are loaded, +# it will retry to reconnect to Kibana instead of exiting with an error. +#setup.dashboards.retry.enabled: false + +# Duration interval between Kibana connection retries. +#setup.dashboards.retry.interval: 1s + +# Maximum number of retries before exiting with an error, 0 for unlimited retrying. +#setup.dashboards.retry.maximum: 0 + + #============================== Template ===================================== # A template is used to set the mapping in Elasticsearch diff --git a/vendor/github.com/elastic/beats/metricbeat/module/aerospike/_meta/config.reference.yml b/vendor/github.com/elastic/beats/metricbeat/module/aerospike/_meta/config.reference.yml new file mode 100644 index 00000000..38aca68f --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/aerospike/_meta/config.reference.yml @@ -0,0 +1,5 @@ +- module: aerospike + metricsets: ["namespace"] + enabled: true + period: 10s + hosts: ["localhost:3000"] diff --git a/vendor/github.com/elastic/beats/metricbeat/module/aerospike/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/aerospike/_meta/config.yml index 787e314d..0a4a883f 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/aerospike/_meta/config.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/aerospike/_meta/config.yml @@ -1,5 +1,2 @@ - module: aerospike - metricsets: ["namespace"] - enabled: false - period: 10s hosts: ["localhost:3000"] diff --git a/vendor/github.com/elastic/beats/metricbeat/module/aerospike/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/aerospike/_meta/docs.asciidoc index 9d1f5923..3cbf6660 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/aerospike/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/aerospike/_meta/docs.asciidoc @@ -1,4 +1,4 @@ -The Aerospike module uses the http://www.aerospike.com/docs/reference/info[Info command] to collect metrics. +The Aerospike module uses the http://www.aerospike.com/docs/reference/info[Info command] to collect metrics. The default metricset is `namespace`. [float] === Compatibility diff --git a/vendor/github.com/elastic/beats/metricbeat/module/aerospike/namespace/namespace.go b/vendor/github.com/elastic/beats/metricbeat/module/aerospike/namespace/namespace.go index 1dafef5e..c7e0afcb 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/aerospike/namespace/namespace.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/aerospike/namespace/namespace.go @@ -16,9 +16,9 @@ import ( // init registers the MetricSet with the central registry. // The New method will be called after the setup of the module and before starting to fetch data func init() { - if err := mb.Registry.AddMetricSet("aerospike", "namespace", New); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("aerospike", "namespace", New, + mb.DefaultMetricSet(), + ) } // MetricSet type defines all fields of the MetricSet @@ -27,6 +27,7 @@ func init() { // multiple fetch calls. type MetricSet struct { mb.BaseMetricSet + host *as.Host client *as.Client } @@ -47,14 +48,9 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return nil, errors.Wrap(err, "Invalid host format, expected hostname:port") } - client, err := as.NewClientWithPolicyAndHost(as.NewClientPolicy(), host) - if err != nil { - return nil, err - } - return &MetricSet{ BaseMetricSet: base, - client: client, + host: host, }, nil } @@ -64,6 +60,10 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch() ([]common.MapStr, error) { var events []common.MapStr + if err := m.connect(); err != nil { + return nil, err + } + for _, node := range m.client.GetNodes() { info, err := as.RequestNodeInfo(node, "namespaces") if err != nil { @@ -91,3 +91,15 @@ func (m *MetricSet) Fetch() ([]common.MapStr, error) { return events, nil } + +// create an aerospike client if it doesn't exist yet +func (m *MetricSet) connect() error { + if m.client == nil { + client, err := as.NewClientWithPolicyAndHost(as.NewClientPolicy(), m.host) + if err != nil { + return err + } + m.client = client + } + return nil +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/apache/_meta/Dockerfile.2.4.12 b/vendor/github.com/elastic/beats/metricbeat/module/apache/_meta/Dockerfile.2.4.12 new file mode 100644 index 00000000..f35ea2d9 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/apache/_meta/Dockerfile.2.4.12 @@ -0,0 +1,4 @@ +FROM httpd:2.4.12 +RUN apt-get update && apt-get install -y curl +HEALTHCHECK --interval=1s --retries=90 CMD curl -f http://localhost +COPY ./httpd.conf /usr/local/apache2/conf/httpd.conf diff --git a/vendor/github.com/elastic/beats/metricbeat/module/apache/_meta/config.reference.yml b/vendor/github.com/elastic/beats/metricbeat/module/apache/_meta/config.reference.yml index 74f42663..2ddee294 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/apache/_meta/config.reference.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/apache/_meta/config.reference.yml @@ -1,6 +1,7 @@ - module: apache metricsets: ["status"] period: 10s + enabled: true # Apache hosts hosts: ["http://127.0.0.1"] diff --git a/vendor/github.com/elastic/beats/metricbeat/module/apache/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/apache/_meta/config.yml index 08d1efaf..04e689e0 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/apache/_meta/config.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/apache/_meta/config.yml @@ -1,6 +1,4 @@ - module: apache - metricsets: ["status"] - period: 10s - - # Apache hosts hosts: ["http://127.0.0.1"] + #username: "user" + #password: "secret" diff --git a/vendor/github.com/elastic/beats/metricbeat/module/apache/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/apache/_meta/docs.asciidoc index 7e86891a..c684a103 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/apache/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/apache/_meta/docs.asciidoc @@ -1,11 +1,11 @@ This module periodically fetches metrics from https://httpd.apache.org/[Apache -HTTPD] servers. +HTTPD] servers. The default metricset is `status`. [float] === Compatibility -The Apache metricsets were tested with Apache 2.4.20 and are expected to work with all versions ->= 2.2.31 and >= 2.4.16. +The Apache metricsets were tested with Apache 2.4.12 and 2.4.20 and are expected to work with +all versions >= 2.2.31 and >= 2.4.16. [float] diff --git a/vendor/github.com/elastic/beats/metricbeat/module/apache/_meta/env b/vendor/github.com/elastic/beats/metricbeat/module/apache/_meta/env index ffc83c44..5339c340 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/apache/_meta/env +++ b/vendor/github.com/elastic/beats/metricbeat/module/apache/_meta/env @@ -1,2 +1,3 @@ APACHE_HOST=apache +APACHE_OLD_HOST=apache_2_4_12 APACHE_PORT=80 diff --git a/vendor/github.com/elastic/beats/metricbeat/module/apache/status/data.go b/vendor/github.com/elastic/beats/metricbeat/module/apache/status/data.go index baa0d715..aa124aaa 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/apache/status/data.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/apache/status/data.go @@ -51,8 +51,40 @@ var ( "15": c.Float("Load15", s.Optional), }, } + + // Schema used till apache 2.4.12 + schemaOld = s.Schema{ + "total_accesses": c.Int("Total Accesses"), + "total_kbytes": c.Int("Total kBytes"), + "requests_per_sec": c.Float("ReqPerSec", s.Optional), + "bytes_per_sec": c.Float("BytesPerSec", s.Optional), + "workers": s.Object{ + "busy": c.Int("BusyWorkers"), + "idle": c.Int("IdleWorkers"), + }, + "uptime": s.Object{ + "uptime": c.Int("Uptime"), + }, + "connections": s.Object{ + "total": c.Int("ConnsTotal", s.Optional), + "async": s.Object{ + "writing": c.Int("ConnsAsyncWriting", s.Optional), + "keep_alive": c.Int("ConnsAsyncKeepAlive", s.Optional), + "closing": c.Int("ConnsAsyncClosing", s.Optional), + }, + }, + } ) +func applySchema(event common.MapStr, fullEvent map[string]interface{}) *s.Errors { + applicableSchema := schema + if _, found := fullEvent["ServerUptimeSeconds"]; !found { + applicableSchema = schemaOld + } + _, err := applicableSchema.ApplyTo(event, fullEvent) + return err +} + // Map body to MapStr func eventMapping(scanner *bufio.Scanner, hostname string) (common.MapStr, *s.Errors) { var ( @@ -140,9 +172,8 @@ func eventMapping(scanner *bufio.Scanner, hostname string) (common.MapStr, *s.Er "total": totalAll, }, } - _, err := schema.ApplyTo(event, fullEvent) - return event, err + return event, applySchema(event, fullEvent) } /* diff --git a/vendor/github.com/elastic/beats/metricbeat/module/apache/status/status.go b/vendor/github.com/elastic/beats/metricbeat/module/apache/status/status.go index 00258f15..88a70d1f 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/apache/status/status.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/apache/status/status.go @@ -35,9 +35,10 @@ var ( ) func init() { - if err := mb.Registry.AddMetricSet("apache", "status", New, hostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("apache", "status", New, + mb.WithHostParser(hostParser), + mb.DefaultMetricSet(), + ) } // MetricSet for fetching Apache HTTPD server status. @@ -48,9 +49,13 @@ type MetricSet struct { // New creates new instance of MetricSet. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + http, err := helper.NewHTTP(base) + if err != nil { + return nil, err + } return &MetricSet{ base, - helper.NewHTTP(base), + http, }, nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/apache/status/status_test.go b/vendor/github.com/elastic/beats/metricbeat/module/apache/status/status_test.go index e419a5d2..77a62823 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/apache/status/status_test.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/apache/status/status_test.go @@ -123,8 +123,8 @@ func TestFetchEventContents(t *testing.T) { assert.EqualValues(t, 63, event["total_kbytes"]) uptime := event["uptime"].(common.MapStr) - assert.EqualValues(t, 1026782, uptime["server_uptime"]) assert.EqualValues(t, 1026782, uptime["uptime"]) + assert.EqualValues(t, 1026782, uptime["server_uptime"]) } // TestFetchTimeout verifies that the HTTP request times out and an error is diff --git a/vendor/github.com/elastic/beats/metricbeat/module/ceph/_meta/config.reference.yml b/vendor/github.com/elastic/beats/metricbeat/module/ceph/_meta/config.reference.yml new file mode 100644 index 00000000..4f2f01d2 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/ceph/_meta/config.reference.yml @@ -0,0 +1,5 @@ +- module: ceph + metricsets: ["cluster_disk", "cluster_health", "monitor_health", "pool_disk", "osd_tree"] + period: 10s + hosts: ["localhost:5000"] + enabled: true diff --git a/vendor/github.com/elastic/beats/metricbeat/module/ceph/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/ceph/_meta/config.yml index a069c8e6..d77bcf36 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/ceph/_meta/config.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/ceph/_meta/config.yml @@ -1,4 +1,11 @@ - module: ceph - metricsets: ["cluster_disk", "cluster_health", "monitor_health", "pool_disk", "osd_tree"] + metricsets: ["cluster_health", "cluster_status", "monitor_health"] period: 10s hosts: ["localhost:5000"] + #username: "user" + #password: "secret" + +- module: ceph + metricsets: ["cluster_disk", "osd_tree", "pool_disk"] + period: 1m + hosts: ["localhost:5000"] diff --git a/vendor/github.com/elastic/beats/metricbeat/module/ceph/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/ceph/_meta/docs.asciidoc index a9189cc4..ec2e4442 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/ceph/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/ceph/_meta/docs.asciidoc @@ -1,2 +1,2 @@ The Ceph module collects metrics by submitting HTTP GET requests to -the http://docs.ceph.com/docs/master/man/8/ceph-rest-api/[ceph-rest-api]. +the http://docs.ceph.com/docs/master/man/8/ceph-rest-api/[ceph-rest-api]. The default metricsets are `cluster_disk`, `cluster_health`, `monitor_health`, `pool_disk`, `osd_tree`. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/ceph/cluster_disk/cluster_disk.go b/vendor/github.com/elastic/beats/metricbeat/module/ceph/cluster_disk/cluster_disk.go index c9b59cfe..d6cde915 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/ceph/cluster_disk/cluster_disk.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/ceph/cluster_disk/cluster_disk.go @@ -21,9 +21,10 @@ var ( ) func init() { - if err := mb.Registry.AddMetricSet("ceph", "cluster_disk", New, hostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("ceph", "cluster_disk", New, + mb.WithHostParser(hostParser), + mb.DefaultMetricSet(), + ) } type MetricSet struct { @@ -34,7 +35,10 @@ type MetricSet struct { func New(base mb.BaseMetricSet) (mb.MetricSet, error) { cfgwarn.Beta("The ceph cluster_disk metricset is beta") - http := helper.NewHTTP(base) + http, err := helper.NewHTTP(base) + if err != nil { + return nil, err + } http.SetHeader("Accept", "application/json") return &MetricSet{ diff --git a/vendor/github.com/elastic/beats/metricbeat/module/ceph/cluster_health/cluster_health.go b/vendor/github.com/elastic/beats/metricbeat/module/ceph/cluster_health/cluster_health.go index 64cf3ebd..0fe98a60 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/ceph/cluster_health/cluster_health.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/ceph/cluster_health/cluster_health.go @@ -21,9 +21,10 @@ var ( ) func init() { - if err := mb.Registry.AddMetricSet("ceph", "cluster_health", New, hostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("ceph", "cluster_health", New, + mb.WithHostParser(hostParser), + mb.DefaultMetricSet(), + ) } type MetricSet struct { @@ -34,7 +35,10 @@ type MetricSet struct { func New(base mb.BaseMetricSet) (mb.MetricSet, error) { cfgwarn.Beta("The ceph cluster_health metricset is beta") - http := helper.NewHTTP(base) + http, err := helper.NewHTTP(base) + if err != nil { + return nil, err + } http.SetHeader("Accept", "application/json") return &MetricSet{ diff --git a/vendor/github.com/elastic/beats/metricbeat/module/ceph/cluster_status/cluster_status.go b/vendor/github.com/elastic/beats/metricbeat/module/ceph/cluster_status/cluster_status.go index 2ebf64e2..f04631a2 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/ceph/cluster_status/cluster_status.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/ceph/cluster_status/cluster_status.go @@ -21,9 +21,10 @@ var ( ) func init() { - if err := mb.Registry.AddMetricSet("ceph", "cluster_status", New, hostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("ceph", "cluster_status", New, + mb.WithHostParser(hostParser), + mb.DefaultMetricSet(), + ) } type MetricSet struct { @@ -34,7 +35,10 @@ type MetricSet struct { func New(base mb.BaseMetricSet) (mb.MetricSet, error) { cfgwarn.Beta("The ceph cluster_status metricset is beta") - http := helper.NewHTTP(base) + http, err := helper.NewHTTP(base) + if err != nil { + return nil, err + } http.SetHeader("Accept", "application/json") return &MetricSet{ diff --git a/vendor/github.com/elastic/beats/metricbeat/module/ceph/monitor_health/monitor_health.go b/vendor/github.com/elastic/beats/metricbeat/module/ceph/monitor_health/monitor_health.go index 8499d797..b37e7040 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/ceph/monitor_health/monitor_health.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/ceph/monitor_health/monitor_health.go @@ -21,9 +21,10 @@ var ( ) func init() { - if err := mb.Registry.AddMetricSet("ceph", "monitor_health", New, hostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("ceph", "monitor_health", New, + mb.WithHostParser(hostParser), + mb.DefaultMetricSet(), + ) } type MetricSet struct { @@ -34,7 +35,10 @@ type MetricSet struct { func New(base mb.BaseMetricSet) (mb.MetricSet, error) { cfgwarn.Beta("The ceph monitor_health metricset is beta") - http := helper.NewHTTP(base) + http, err := helper.NewHTTP(base) + if err != nil { + return nil, err + } http.SetHeader("Accept", "application/json") return &MetricSet{ diff --git a/vendor/github.com/elastic/beats/metricbeat/module/ceph/osd_df/osd_df.go b/vendor/github.com/elastic/beats/metricbeat/module/ceph/osd_df/osd_df.go index 7f9dbb7f..af4efc1a 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/ceph/osd_df/osd_df.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/ceph/osd_df/osd_df.go @@ -21,9 +21,10 @@ var ( ) func init() { - if err := mb.Registry.AddMetricSet("ceph", "osd_df", New, hostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("ceph", "osd_df", New, + mb.WithHostParser(hostParser), + mb.DefaultMetricSet(), + ) } type MetricSet struct { @@ -34,7 +35,10 @@ type MetricSet struct { func New(base mb.BaseMetricSet) (mb.MetricSet, error) { cfgwarn.Beta("The ceph osd_df metricset is experimental") - http := helper.NewHTTP(base) + http, err := helper.NewHTTP(base) + if err != nil { + return nil, err + } http.SetHeader("Accept", "application/json") return &MetricSet{ diff --git a/vendor/github.com/elastic/beats/metricbeat/module/ceph/osd_tree/osd_tree.go b/vendor/github.com/elastic/beats/metricbeat/module/ceph/osd_tree/osd_tree.go index c54cf5f9..2e232aa1 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/ceph/osd_tree/osd_tree.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/ceph/osd_tree/osd_tree.go @@ -21,9 +21,10 @@ var ( ) func init() { - if err := mb.Registry.AddMetricSet("ceph", "osd_tree", New, hostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("ceph", "osd_tree", New, + mb.WithHostParser(hostParser), + mb.DefaultMetricSet(), + ) } type MetricSet struct { @@ -34,7 +35,10 @@ type MetricSet struct { func New(base mb.BaseMetricSet) (mb.MetricSet, error) { cfgwarn.Beta("The ceph osd_tree metricset is beta") - http := helper.NewHTTP(base) + http, err := helper.NewHTTP(base) + if err != nil { + return nil, err + } http.SetHeader("Accept", "application/json") return &MetricSet{ diff --git a/vendor/github.com/elastic/beats/metricbeat/module/ceph/pool_disk/pool_disk.go b/vendor/github.com/elastic/beats/metricbeat/module/ceph/pool_disk/pool_disk.go index f430fc17..61db26b8 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/ceph/pool_disk/pool_disk.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/ceph/pool_disk/pool_disk.go @@ -21,9 +21,10 @@ var ( ) func init() { - if err := mb.Registry.AddMetricSet("ceph", "pool_disk", New, hostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("ceph", "pool_disk", New, + mb.WithHostParser(hostParser), + mb.DefaultMetricSet(), + ) } type MetricSet struct { @@ -34,7 +35,10 @@ type MetricSet struct { func New(base mb.BaseMetricSet) (mb.MetricSet, error) { cfgwarn.Experimental("The ceph pool_disk metricset is experimental") - http := helper.NewHTTP(base) + http, err := helper.NewHTTP(base) + if err != nil { + return nil, err + } http.SetHeader("Accept", "application/json") return &MetricSet{ diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/_meta/config.reference.yml b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/_meta/config.reference.yml new file mode 100644 index 00000000..1d74f195 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/_meta/config.reference.yml @@ -0,0 +1,5 @@ +- module: couchbase + metricsets: ["bucket", "cluster", "node"] + period: 10s + hosts: ["localhost:8091"] + enabled: true diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/_meta/config.yml index 2691d77a..760115c7 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/_meta/config.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/_meta/config.yml @@ -1,4 +1,4 @@ - module: couchbase - metricsets: ["bucket", "cluster", "node"] - period: 10s hosts: ["localhost:8091"] + #username: "user" + #password: "secret" diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/_meta/docs.asciidoc index bd76133c..08f8895a 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/_meta/docs.asciidoc @@ -1,2 +1,2 @@ This module periodically fetches metrics from https://www.couchbase.com/[Couchbase] -servers. +servers. The default metricsets are `bucket`, `cluster`, `node`. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/bucket/bucket.go b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/bucket/bucket.go index 41c21179..32783ae3 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/bucket/bucket.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/bucket/bucket.go @@ -23,9 +23,10 @@ var ( // init registers the MetricSet with the central registry. // The New method will be called after the setup of the module and before starting to fetch data func init() { - if err := mb.Registry.AddMetricSet("couchbase", "bucket", New, hostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("couchbase", "bucket", New, + mb.WithHostParser(hostParser), + mb.DefaultMetricSet(), + ) } // MetricSet type defines all fields of the MetricSet @@ -38,9 +39,13 @@ type MetricSet struct { func New(base mb.BaseMetricSet) (mb.MetricSet, error) { cfgwarn.Beta("The couchbase bucket metricset is beta") + http, err := helper.NewHTTP(base) + if err != nil { + return nil, err + } return &MetricSet{ BaseMetricSet: base, - http: helper.NewHTTP(base), + http: http, }, nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/cluster/cluster.go b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/cluster/cluster.go index 0e3444a9..7190fea5 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/cluster/cluster.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/cluster/cluster.go @@ -23,9 +23,10 @@ var ( // init registers the MetricSet with the central registry. // The New method will be called after the setup of the module and before starting to fetch data func init() { - if err := mb.Registry.AddMetricSet("couchbase", "cluster", New, hostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("couchbase", "cluster", New, + mb.WithHostParser(hostParser), + mb.DefaultMetricSet(), + ) } // MetricSet type defines all fields of the MetricSet @@ -38,9 +39,13 @@ type MetricSet struct { func New(base mb.BaseMetricSet) (mb.MetricSet, error) { cfgwarn.Beta("The couchbase cluster metricset is beta") + http, err := helper.NewHTTP(base) + if err != nil { + return nil, err + } return &MetricSet{ BaseMetricSet: base, - http: helper.NewHTTP(base), + http: http, }, nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/node/node.go b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/node/node.go index c99d6963..3f40985f 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/node/node.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/node/node.go @@ -23,9 +23,10 @@ var ( // init registers the MetricSet with the central registry. // The New method will be called after the setup of the module and before starting to fetch data func init() { - if err := mb.Registry.AddMetricSet("couchbase", "node", New, hostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("couchbase", "node", New, + mb.WithHostParser(hostParser), + mb.DefaultMetricSet(), + ) } // MetricSet type defines all fields of the MetricSet @@ -38,9 +39,13 @@ type MetricSet struct { func New(base mb.BaseMetricSet) (mb.MetricSet, error) { cfgwarn.Beta("The couchbase node metricset is beta") + http, err := helper.NewHTTP(base) + if err != nil { + return nil, err + } return &MetricSet{ BaseMetricSet: base, - http: helper.NewHTTP(base), + http: http, }, nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/_meta/config.reference.yml b/vendor/github.com/elastic/beats/metricbeat/module/docker/_meta/config.reference.yml new file mode 100644 index 00000000..22ee422c --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/_meta/config.reference.yml @@ -0,0 +1,22 @@ +- module: docker + metricsets: + - "container" + - "cpu" + - "diskio" + - "healthcheck" + - "info" + #- "image" + - "memory" + - "network" + hosts: ["unix:///var/run/docker.sock"] + period: 10s + enabled: true + + # Replace dots in labels with `_`. Set to false to keep dots + labels.dedot: true + + # To connect to Docker over TLS you must specify a client and CA certificate. + #ssl: + #certificate_authority: "/etc/pki/root/ca.pem" + #certificate: "/etc/pki/client/cert.pem" + #key: "/etc/pki/client/cert.key" diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/docker/_meta/config.yml index 87049258..5b25e17f 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/_meta/config.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/_meta/config.yml @@ -1,10 +1,5 @@ - module: docker - metricsets: ["container", "cpu", "diskio", "healthcheck", "info", "memory", "network"] hosts: ["unix:///var/run/docker.sock"] - period: 10s - # To connect to Docker over TLS you must specify a client and CA certificate. - #ssl: - #certificate_authority: "/etc/pki/root/ca.pem" - #certificate: "/etc/pki/client/cert.pem" - #key: "/etc/pki/client/cert.key" + # Replace dots in labels with `_`. Set to false to keep dots + labels.dedot: true diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/docker/_meta/docs.asciidoc index 537fc59c..eaed9458 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/_meta/docs.asciidoc @@ -1,4 +1,4 @@ -This module fetches metrics from https://www.docker.com/[Docker] containers. +This module fetches metrics from https://www.docker.com/[Docker] containers. The default metricsets are: `container`, `cpu`, `diskio`, `healthcheck`, `info`, `memory` and `network`. The `image` metricset is not enabled by default. The Docker module is currently not tested on Windows. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/config.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/config.go index cd82d2c2..aa1ccc18 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/config.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/config.go @@ -1,7 +1,15 @@ package docker type Config struct { - TLS *TLSConfig `config:"ssl"` + TLS *TLSConfig `config:"ssl"` + DeDot bool `config:"labels.dedot"` +} + +// DefaultConfig returns default module config +func DefaultConfig() Config { + return Config{ + DeDot: true, + } } type TLSConfig struct { diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/container/container.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/container/container.go index 22ea3fc1..eb6ca0b0 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/container/container.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/container/container.go @@ -1,7 +1,10 @@ package container import ( - dc "github.com/fsouza/go-dockerclient" + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/metricbeat/mb" @@ -9,19 +12,21 @@ import ( ) func init() { - if err := mb.Registry.AddMetricSet("docker", "container", New, docker.HostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("docker", "container", New, + mb.WithHostParser(docker.HostParser), + mb.DefaultMetricSet(), + ) } type MetricSet struct { mb.BaseMetricSet - dockerClient *dc.Client + dockerClient *client.Client + dedot bool } // New creates a new instance of the docker container MetricSet. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { - config := docker.Config{} + config := docker.DefaultConfig() if err := base.Module().UnpackConfig(&config); err != nil { return nil, err } @@ -34,6 +39,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return &MetricSet{ BaseMetricSet: base, dockerClient: client, + dedot: config.DeDot, }, nil } @@ -41,9 +47,9 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { // This is based on https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/list-containers. func (m *MetricSet) Fetch() ([]common.MapStr, error) { // Fetch a list of all containers. - containers, err := m.dockerClient.ListContainers(dc.ListContainersOptions{}) + containers, err := m.dockerClient.ContainerList(context.Background(), types.ContainerListOptions{}) if err != nil { return nil, err } - return eventsMapping(containers), nil + return eventsMapping(containers, m.dedot), nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/container/data.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/container/data.go index 492c7b39..75b9363f 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/container/data.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/container/data.go @@ -3,28 +3,28 @@ package container import ( "time" + "github.com/docker/docker/api/types" + "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/metricbeat/module/docker" - - dc "github.com/fsouza/go-dockerclient" ) -func eventsMapping(containersList []dc.APIContainers) []common.MapStr { +func eventsMapping(containersList []types.Container, dedot bool) []common.MapStr { myEvents := []common.MapStr{} for _, container := range containersList { - myEvents = append(myEvents, eventMapping(&container)) + myEvents = append(myEvents, eventMapping(&container, dedot)) } return myEvents } -func eventMapping(cont *dc.APIContainers) common.MapStr { +func eventMapping(cont *types.Container, dedot bool) common.MapStr { event := common.MapStr{ "created": common.Time(time.Unix(cont.Created, 0)), "id": cont.ID, "name": docker.ExtractContainerName(cont.Names), "command": cont.Command, "image": cont.Image, - "ip_addresses": extractIPAddresses(cont.Networks), + "ip_addresses": extractIPAddresses(cont.NetworkSettings), "size": common.MapStr{ "root_fs": cont.SizeRootFs, "rw": cont.SizeRw, @@ -32,7 +32,7 @@ func eventMapping(cont *dc.APIContainers) common.MapStr { "status": cont.Status, } - labels := docker.DeDotLabels(cont.Labels) + labels := docker.DeDotLabels(cont.Labels, dedot) if len(labels) > 0 { event["labels"] = labels @@ -41,7 +41,7 @@ func eventMapping(cont *dc.APIContainers) common.MapStr { return event } -func extractIPAddresses(networks dc.NetworkList) []string { +func extractIPAddresses(networks *types.SummaryNetworkSettings) []string { ipAddresses := make([]string, 0, len(networks.Networks)) for _, network := range networks.Networks { ipAddresses = append(ipAddresses, network.IPAddress) diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/_meta/data.json b/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/_meta/data.json index c2afbc08..5589d985 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/_meta/data.json +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/_meta/data.json @@ -6,33 +6,55 @@ }, "docker": { "container": { - "id": "0b8f7cd09c17df586a8e56f5b2d45dd00735d44fcc2665776d07d33b027b4f09", + "id": "bbdcbc751e7eda7a50e773b4a5d8c2800af664f835ef9c0ad6bbb9c160c50d83", "labels": { - "com_docker_compose_config-hash": "1c0f606c0e592b110060ad4e159cf25d361fb8fa", + "build-date": "20170911", + "com_docker_compose_config-hash": "371e477ae73fd44b19bcbcf0d4feaf4de9adfb69137a8f16d09cff749724ca99", "com_docker_compose_container-number": "1", "com_docker_compose_oneoff": "False", "com_docker_compose_project": "metricbeat", - "com_docker_compose_service": "phpfpm", - "com_docker_compose_version": "1.5.0" + "com_docker_compose_service": "elasticsearch", + "com_docker_compose_version": "1.21.0", + "license": "GPLv2", + "maintainer": "Elastic Docker Team \u003cdocker@elastic.co\u003e", + "name": "CentOS Base Image", + "vendor": "CentOS" }, - "name": "metricbeat_phpfpm_1" + "name": "metricbeat_elasticsearch_1" }, "cpu": { - "core": {}, + "core": { + "0": { + "pct": 0.01012583677581864, + "ticks": 9528454911 + }, + "1": { + "pct": 0.0069975889168765746, + "ticks": 11916812270 + }, + "2": { + "pct": 0.001329603022670025, + "ticks": 10894346015 + }, + "3": { + "pct": 0.0018390015113350126, + "ticks": 10847487614 + } + }, "kernel": { - "pct": 0, - "ticks": 0 + "pct": 0.010075566750629723, + "ticks": 1960000000 }, "system": { - "pct": 0, - "ticks": 0 + "pct": 4, + "ticks": 1092479570000000 }, "total": { - "pct": 0 + "pct": 0.02029203022670025 }, "user": { - "pct": 0, - "ticks": 0 + "pct": 0.010075566750629723, + "ticks": 40960000000 } } }, diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/_meta/fields.yml index 14e2190d..2189fcd9 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/_meta/fields.yml @@ -8,15 +8,16 @@ type: scaled_float format: percentage description: > - The system kernel consumed by the Docker server. + Percentage of time in kernel space. - name: kernel.ticks type: long description: > - CPU kernel ticks. + CPU ticks in kernel space. - name: system.pct type: scaled_float format: percentage description: > + Percentage of total CPU time in the system. - name: system.ticks type: long description: > @@ -25,17 +26,24 @@ type: scaled_float format: percentage description: > + Percentage of time in user space. - name: user.ticks type: long description: > - CPU user ticks + CPU ticks in user space. - name: total.pct type: scaled_float format: percentage description: > Total CPU usage. - # TODO: how to document cpu list? - #- name: core - # type: array - # description: > - # Dictionary with list of cpu and usage inside. + - name: core.*.pct + type: object + object_type: scaled_float + format: percentage + description: > + Percentage of CPU time in this core. + - name: core.*.ticks + type: object + object_type: long + description: > + Number of CPU ticks in this core. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/cpu.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/cpu.go index 697d4fca..ab7c8d19 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/cpu.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/cpu.go @@ -1,28 +1,30 @@ package cpu import ( + "github.com/docker/docker/client" + "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/metricbeat/mb" "github.com/elastic/beats/metricbeat/module/docker" - - dc "github.com/fsouza/go-dockerclient" ) func init() { - if err := mb.Registry.AddMetricSet("docker", "cpu", New, docker.HostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("docker", "cpu", New, + mb.WithHostParser(docker.HostParser), + mb.DefaultMetricSet(), + ) } type MetricSet struct { mb.BaseMetricSet cpuService *CPUService - dockerClient *dc.Client + dockerClient *client.Client + dedot bool } // New creates a new instance of the docker cpu MetricSet. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { - config := docker.Config{} + config := docker.DefaultConfig() if err := base.Module().UnpackConfig(&config); err != nil { return nil, err } @@ -36,6 +38,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { BaseMetricSet: base, dockerClient: client, cpuService: &CPUService{}, + dedot: config.DeDot, }, nil } @@ -46,6 +49,6 @@ func (m *MetricSet) Fetch() ([]common.MapStr, error) { return nil, err } - formattedStats := m.cpuService.getCPUStatsList(stats) + formattedStats := m.cpuService.getCPUStatsList(stats, m.dedot) return eventsMapping(formattedStats), nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/cpu_test.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/cpu_test.go index 92399231..db09f1a9 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/cpu_test.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/cpu_test.go @@ -4,36 +4,48 @@ import ( "reflect" "testing" - "github.com/elastic/beats/libbeat/common" + "github.com/docker/docker/api/types" - dc "github.com/fsouza/go-dockerclient" + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/metricbeat/module/docker" ) var cpuService CPUService -var statsList = make([]dc.Stats, 3) + +func cpuUsageFor(stats types.StatsJSON) *cpuUsage { + u := cpuUsage{ + Stat: &docker.Stat{Stats: stats}, + systemDelta: 1000000000, // Nanoseconds in a second + } + if len(stats.CPUStats.CPUUsage.PercpuUsage) == 0 { + u.cpus = 1 + } + return &u +} func TestCPUService_PerCpuUsage(t *testing.T) { oldPerCpuValuesTest := [][]uint64{{1, 9, 9, 5}, {1, 2, 3, 4}, {0, 0, 0, 0}} newPerCpuValuesTest := [][]uint64{{100000001, 900000009, 900000009, 500000005}, {101, 202, 303, 404}, {0, 0, 0, 0}} + var statsList = make([]types.StatsJSON, 3) for index := range statsList { statsList[index].PreCPUStats.CPUUsage.PercpuUsage = oldPerCpuValuesTest[index] statsList[index].CPUStats.CPUUsage.PercpuUsage = newPerCpuValuesTest[index] } testCase := []struct { - given dc.Stats + given types.StatsJSON expected common.MapStr }{ {statsList[0], common.MapStr{ - "0": common.MapStr{"pct": float64(0.10)}, - "1": common.MapStr{"pct": float64(0.90)}, - "2": common.MapStr{"pct": float64(0.90)}, - "3": common.MapStr{"pct": float64(0.50)}, + "0": common.MapStr{"pct": float64(0.40)}, + "1": common.MapStr{"pct": float64(3.60)}, + "2": common.MapStr{"pct": float64(3.60)}, + "3": common.MapStr{"pct": float64(2.00)}, }}, {statsList[1], common.MapStr{ - "0": common.MapStr{"pct": float64(0.0000001)}, - "1": common.MapStr{"pct": float64(0.0000002)}, - "2": common.MapStr{"pct": float64(0.0000003)}, - "3": common.MapStr{"pct": float64(0.0000004)}, + "0": common.MapStr{"pct": float64(0.0000004)}, + "1": common.MapStr{"pct": float64(0.0000008)}, + "2": common.MapStr{"pct": float64(0.0000012)}, + "3": common.MapStr{"pct": float64(0.0000016)}, }}, {statsList[2], common.MapStr{ "0": common.MapStr{"pct": float64(0)}, @@ -43,7 +55,8 @@ func TestCPUService_PerCpuUsage(t *testing.T) { }}, } for _, tt := range testCase { - out := perCpuUsage(&tt.given) + usage := cpuUsageFor(tt.given) + out := usage.PerCPU() // Remove ticks for test for _, s := range out { s.(common.MapStr).Delete("ticks") @@ -57,12 +70,13 @@ func TestCPUService_PerCpuUsage(t *testing.T) { func TestCPUService_TotalUsage(t *testing.T) { oldTotalValuesTest := []uint64{100, 50, 10} totalValuesTest := []uint64{2, 500000050, 10} + var statsList = make([]types.StatsJSON, 3) for index := range statsList { statsList[index].PreCPUStats.CPUUsage.TotalUsage = oldTotalValuesTest[index] statsList[index].CPUStats.CPUUsage.TotalUsage = totalValuesTest[index] } testCase := []struct { - given dc.Stats + given types.StatsJSON expected float64 }{ {statsList[0], -1}, @@ -70,7 +84,8 @@ func TestCPUService_TotalUsage(t *testing.T) { {statsList[2], 0}, } for _, tt := range testCase { - out := totalUsage(&tt.given) + usage := cpuUsageFor(tt.given) + out := usage.Total() if tt.expected != out { t.Errorf("totalUsage(%v) => %v, want %v", tt.given.CPUStats.CPUUsage.TotalUsage, out, tt.expected) } @@ -80,12 +95,13 @@ func TestCPUService_TotalUsage(t *testing.T) { func TestCPUService_UsageInKernelmode(t *testing.T) { usageOldValuesTest := []uint64{100, 10, 500000050} usageValuesTest := []uint64{3, 500000010, 500000050} + var statsList = make([]types.StatsJSON, 3) for index := range statsList { statsList[index].PreCPUStats.CPUUsage.UsageInKernelmode = usageOldValuesTest[index] statsList[index].CPUStats.CPUUsage.UsageInKernelmode = usageValuesTest[index] } testCase := []struct { - given dc.Stats + given types.StatsJSON expected float64 }{ {statsList[0], -1}, @@ -93,7 +109,8 @@ func TestCPUService_UsageInKernelmode(t *testing.T) { {statsList[2], 0}, } for _, tt := range testCase { - out := usageInKernelmode(&tt.given) + usage := cpuUsageFor(tt.given) + out := usage.InKernelMode() if out != tt.expected { t.Errorf("usageInKernelmode(%v) => %v, want %v", tt.given.CPUStats.CPUUsage.UsageInKernelmode, out, tt.expected) } @@ -103,12 +120,13 @@ func TestCPUService_UsageInKernelmode(t *testing.T) { func TestCPUService_UsageInUsermode(t *testing.T) { usageOldValuesTest := []uint64{0, 1965, 500} usageValuesTest := []uint64{500000000, 325, 1000000500} + var statsList = make([]types.StatsJSON, 3) for index := range statsList { statsList[index].PreCPUStats.CPUUsage.UsageInUsermode = usageOldValuesTest[index] statsList[index].CPUStats.CPUUsage.UsageInUsermode = usageValuesTest[index] } testCase := []struct { - given dc.Stats + given types.StatsJSON expected float64 }{ {statsList[0], 0.50}, @@ -116,7 +134,8 @@ func TestCPUService_UsageInUsermode(t *testing.T) { {statsList[2], 1}, } for _, tt := range testCase { - out := usageInUsermode(&tt.given) + usage := cpuUsageFor(tt.given) + out := usage.InUserMode() if out != tt.expected { t.Errorf("usageInUsermode(%v) => %v, want %v", tt.given.CPUStats.CPUUsage.UsageInUsermode, out, tt.expected) } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/helper.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/helper.go index 4fc77483..0d61b78d 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/helper.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/helper.go @@ -6,17 +6,8 @@ import ( "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/metricbeat/module/docker" - - dc "github.com/fsouza/go-dockerclient" ) -type CPUCalculator interface { - perCpuUsage(stats *dc.Stats) common.MapStr - totalUsage(stats *dc.Stats) float64 - usageInKernelmode(stats *dc.Stats) float64 - usageInUsermode(stats *dc.Stats) float64 -} - type CPUStats struct { Time common.Time Container *docker.Container @@ -36,72 +27,101 @@ func NewCpuService() *CPUService { return &CPUService{} } -func (c *CPUService) getCPUStatsList(rawStats []docker.Stat) []CPUStats { +func (c *CPUService) getCPUStatsList(rawStats []docker.Stat, dedot bool) []CPUStats { formattedStats := []CPUStats{} for _, stats := range rawStats { - formattedStats = append(formattedStats, c.getCpuStats(&stats)) + formattedStats = append(formattedStats, c.getCPUStats(&stats, dedot)) } return formattedStats } -func (c *CPUService) getCpuStats(myRawStat *docker.Stat) CPUStats { +func (c *CPUService) getCPUStats(myRawStat *docker.Stat, dedot bool) CPUStats { + usage := cpuUsage{Stat: myRawStat} + return CPUStats{ Time: common.Time(myRawStat.Stats.Read), - Container: docker.NewContainer(&myRawStat.Container), - PerCpuUsage: perCpuUsage(&myRawStat.Stats), - TotalUsage: totalUsage(&myRawStat.Stats), + Container: docker.NewContainer(myRawStat.Container, dedot), + PerCpuUsage: usage.PerCPU(), + TotalUsage: usage.Total(), UsageInKernelmode: myRawStat.Stats.CPUStats.CPUUsage.UsageInKernelmode, - UsageInKernelmodePercentage: usageInKernelmode(&myRawStat.Stats), + UsageInKernelmodePercentage: usage.InKernelMode(), UsageInUsermode: myRawStat.Stats.CPUStats.CPUUsage.UsageInUsermode, - UsageInUsermodePercentage: usageInUsermode(&myRawStat.Stats), - SystemUsage: myRawStat.Stats.CPUStats.SystemCPUUsage, - SystemUsagePercentage: systemUsage(&myRawStat.Stats), + UsageInUsermodePercentage: usage.InUserMode(), + SystemUsage: myRawStat.Stats.CPUStats.SystemUsage, + SystemUsagePercentage: usage.System(), } } -func perCpuUsage(stats *dc.Stats) common.MapStr { +// TODO: These helper should be merged with the cpu helper in system/cpu + +type cpuUsage struct { + *docker.Stat + + cpus int + systemDelta uint64 +} + +func (u *cpuUsage) CPUs() int { + if u.cpus == 0 { + u.cpus = len(u.Stats.CPUStats.CPUUsage.PercpuUsage) + } + return u.cpus +} + +func (u *cpuUsage) SystemDelta() uint64 { + if u.systemDelta == 0 { + u.systemDelta = u.Stats.CPUStats.SystemUsage - u.Stats.PreCPUStats.SystemUsage + } + return u.systemDelta +} + +func (u *cpuUsage) PerCPU() common.MapStr { var output common.MapStr - if len(stats.CPUStats.CPUUsage.PercpuUsage) == len(stats.PreCPUStats.CPUUsage.PercpuUsage) { + if len(u.Stats.CPUStats.CPUUsage.PercpuUsage) == len(u.Stats.PreCPUStats.CPUUsage.PercpuUsage) { output = common.MapStr{} - for index := range stats.CPUStats.CPUUsage.PercpuUsage { + for index := range u.Stats.CPUStats.CPUUsage.PercpuUsage { cpu := common.MapStr{} - cpu["pct"] = calculateLoad(stats.CPUStats.CPUUsage.PercpuUsage[index], stats.PreCPUStats.CPUUsage.PercpuUsage[index]) - cpu["ticks"] = stats.CPUStats.CPUUsage.PercpuUsage[index] + cpu["pct"] = u.calculatePercentage( + u.Stats.CPUStats.CPUUsage.PercpuUsage[index], + u.Stats.PreCPUStats.CPUUsage.PercpuUsage[index]) + cpu["ticks"] = u.Stats.CPUStats.CPUUsage.PercpuUsage[index] output[strconv.Itoa(index)] = cpu } } return output } -// TODO: These helper should be merged with the cpu helper in system/cpu - -func totalUsage(stats *dc.Stats) float64 { - return calculateLoad(stats.CPUStats.CPUUsage.TotalUsage, stats.PreCPUStats.CPUUsage.TotalUsage) +func (u *cpuUsage) Total() float64 { + return u.calculatePercentage(u.Stats.CPUStats.CPUUsage.TotalUsage, u.Stats.PreCPUStats.CPUUsage.TotalUsage) } -func usageInKernelmode(stats *dc.Stats) float64 { - return calculateLoad(stats.CPUStats.CPUUsage.UsageInKernelmode, stats.PreCPUStats.CPUUsage.UsageInKernelmode) +func (u *cpuUsage) InKernelMode() float64 { + return u.calculatePercentage(u.Stats.CPUStats.CPUUsage.UsageInKernelmode, u.Stats.PreCPUStats.CPUUsage.UsageInKernelmode) } -func usageInUsermode(stats *dc.Stats) float64 { - return calculateLoad(stats.CPUStats.CPUUsage.UsageInUsermode, stats.PreCPUStats.CPUUsage.UsageInUsermode) +func (u *cpuUsage) InUserMode() float64 { + return u.calculatePercentage(u.Stats.CPUStats.CPUUsage.UsageInUsermode, u.Stats.PreCPUStats.CPUUsage.UsageInUsermode) } -func systemUsage(stats *dc.Stats) float64 { - return calculateLoad(stats.CPUStats.SystemCPUUsage, stats.PreCPUStats.SystemCPUUsage) +func (u *cpuUsage) System() float64 { + return u.calculatePercentage(u.Stats.CPUStats.SystemUsage, u.Stats.PreCPUStats.SystemUsage) } // This function is meant to calculate the % CPU time change between two successive readings. // The "oldValue" refers to the CPU statistics of the last read. // Time here is expressed by second and not by nanoseconde. // The main goal is to expose the %, in the same way, it's displayed by docker Client. -func calculateLoad(newValue uint64, oldValue uint64) float64 { - value := float64(newValue) - float64(oldValue) - if value < 0 { +func (u *cpuUsage) calculatePercentage(newValue uint64, oldValue uint64) float64 { + if newValue < oldValue { logp.Err("Error calculating CPU time change for docker module: new stats value (%v) is lower than the old one(%v)", newValue, oldValue) return -1 } - return value / float64(1000000000) + value := newValue - oldValue + if value == 0 || u.SystemDelta() == 0 { + return 0 + } + + return float64(uint64(u.CPUs())*value) / float64(u.SystemDelta()) } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/mock_cpu.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/mock_cpu.go deleted file mode 100644 index 9a2e2337..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/mock_cpu.go +++ /dev/null @@ -1,36 +0,0 @@ -package cpu - -import ( - "github.com/elastic/beats/libbeat/common" - - dc "github.com/fsouza/go-dockerclient" - "github.com/stretchr/testify/mock" -) - -type MockCPUCalculator struct { - mock.Mock -} - -func (_m *MockCPUCalculator) PerCpuUsage(stats *dc.Stats) common.MapStr { - ret := _m.Called() - ret0, _ := ret[0].(common.MapStr) - return ret0 -} - -func (_m *MockCPUCalculator) TotalUsage(stats *dc.Stats) float64 { - ret := _m.Called() - ret0, _ := ret[0].(float64) - return ret0 -} - -func (_m *MockCPUCalculator) UsageInKernelmode(stats *dc.Stats) float64 { - ret := _m.Called() - ret0, _ := ret[0].(float64) - return ret0 -} - -func (_m *MockCPUCalculator) UsageInUsermode(stats *dc.Stats) float64 { - ret := _m.Called() - ret0, _ := ret[0].(float64) - return ret0 -} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/_meta/data.json b/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/_meta/data.json index a5201a46..3adb0cc1 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/_meta/data.json +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/_meta/data.json @@ -6,20 +6,40 @@ }, "docker": { "container": { - "id": "af5b4d9b5a792bf883e3f0cb55413aec8148d75a2bbd5723680f7ad8dc5545f6", + "id": "59c5d4838454f38c7d67fdacec7a32ca4476a062ef00edf69ba6be9117cf2e7b", "labels": { - "com_docker_compose_config-hash": "b41e43e99efa9215f20761ad78899d65e4119b55", + "build-date": "20170911", + "com_docker_compose_config-hash": "a2bcfc1f8c99a4be6920deda8da8d4d06fe0d10d51623b8e1dbcc8228e96926c", "com_docker_compose_container-number": "1", "com_docker_compose_oneoff": "False", "com_docker_compose_project": "metricbeat", - "com_docker_compose_service": "nginx", - "com_docker_compose_version": "1.5.0" + "com_docker_compose_service": "elasticsearch", + "com_docker_compose_version": "1.20.1", + "license": "GPLv2", + "maintainer": "Elastic Docker Team \u003cdocker@elastic.co\u003e", + "name": "CentOS Base Image", + "vendor": "CentOS" }, - "name": "metricbeat_nginx_1" + "name": "metricbeat_elasticsearch_1" }, "diskio": { + "read": { + "bytes": 61964288, + "ops": 3284, + "rate": 0 + }, "reads": 0, + "summary": { + "bytes": 63479808, + "ops": 3500, + "rate": 0 + }, "total": 0, + "write": { + "bytes": 1515520, + "ops": 216, + "rate": 0 + }, "writes": 0 } }, diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/_meta/fields.yml index 5e68b616..c4f5d990 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/_meta/fields.yml @@ -4,15 +4,72 @@ Disk I/O metrics. release: ga fields: + - name: read + type: group + description: > + Accumulated reads during the life of the container + fields: + - name: ops + type: long + description: > + Number of reads during the life of the container + - name: bytes + type: long + format: bytes + description: > + Bytes read during the life of the container + - name: rate + type: long + description: > + Number of current reads per second - name: reads type: scaled_float + deprecated: true + description: > + Number of current reads per second + - name: write + type: group description: > - Number of reads. + Accumulated writes during the life of the container + fields: + - name: ops + type: long + description: > + Number of writes during the life of the container + - name: bytes + type: long + format: bytes + description: > + Bytes written during the life of the container + - name: rate + type: long + description: > + Number of current writes per second - name: writes type: scaled_float + deprecated: true + description: > + Number of current writes per second + - name: summary + type: group description: > - Number of writes. + Accumulated reads and writes during the life of the container + fields: + - name: ops + type: long + description: > + Number of I/O operations during the life of the container + - name: bytes + type: long + format: bytes + description: > + Bytes read and written during the life of the container + - name: rate + type: long + description: > + Number of current operations per second - name: total type: scaled_float + deprecated: true description: > - Number of reads and writes combined. + Number of reads and writes per second diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/data.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/data.go index e73691b2..b87313ef 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/data.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/data.go @@ -21,6 +21,21 @@ func eventMapping(stats *BlkioStats) common.MapStr { "reads": stats.reads, "writes": stats.writes, "total": stats.totals, + "read": common.MapStr{ + "ops": stats.serviced.reads, + "bytes": stats.servicedBytes.reads, + "rate": stats.reads, + }, + "write": common.MapStr{ + "ops": stats.serviced.writes, + "bytes": stats.servicedBytes.writes, + "rate": stats.writes, + }, + "summary": common.MapStr{ + "ops": stats.serviced.totals, + "bytes": stats.servicedBytes.totals, + "rate": stats.totals, + }, } return event diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/diskio.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/diskio.go index c24a15a0..61f148af 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/diskio.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/diskio.go @@ -1,28 +1,30 @@ package diskio import ( + "github.com/docker/docker/client" + "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/metricbeat/mb" "github.com/elastic/beats/metricbeat/module/docker" - - dc "github.com/fsouza/go-dockerclient" ) func init() { - if err := mb.Registry.AddMetricSet("docker", "diskio", New, docker.HostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("docker", "diskio", New, + mb.WithHostParser(docker.HostParser), + mb.DefaultMetricSet(), + ) } type MetricSet struct { mb.BaseMetricSet - blkioService *BLkioService - dockerClient *dc.Client + blkioService *BlkioService + dockerClient *client.Client + dedot bool } // New create a new instance of the docker diskio MetricSet. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { - config := docker.Config{} + config := docker.DefaultConfig() if err := base.Module().UnpackConfig(&config); err != nil { return nil, err } @@ -35,9 +37,8 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return &MetricSet{ BaseMetricSet: base, dockerClient: client, - blkioService: &BLkioService{ - BlkioSTatsPerContainer: make(map[string]BlkioRaw), - }, + blkioService: NewBlkioService(), + dedot: config.DeDot, }, nil } @@ -48,6 +49,6 @@ func (m *MetricSet) Fetch() ([]common.MapStr, error) { return nil, err } - formattedStats := m.blkioService.getBlkioStatsList(stats) + formattedStats := m.blkioService.getBlkioStatsList(stats, m.dedot) return eventsMapping(formattedStats), nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/diskio_test.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/diskio_test.go index dca77799..9cd4108d 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/diskio_test.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/diskio_test.go @@ -5,19 +5,20 @@ import ( "testing" "time" - "github.com/elastic/beats/metricbeat/module/docker" + "github.com/docker/docker/api/types" + "github.com/stretchr/testify/assert" - dc "github.com/fsouza/go-dockerclient" + "github.com/elastic/beats/metricbeat/module/docker" ) -var blkioService BLkioService +var blkioService BlkioService var oldBlkioRaw = make([]BlkioRaw, 3) var newBlkioRaw = make([]BlkioRaw, 3) func TestDeltaMultipleContainers(t *testing.T) { var apiContainer1 docker.Stat var apiContainer2 docker.Stat - metrics := dc.BlkioStatsEntry{ + metrics := types.BlkioStatEntry{ Major: 123, Minor: 123, Op: "Total", @@ -31,30 +32,30 @@ func TestDeltaMultipleContainers(t *testing.T) { "Id": "8dfafdbc3a41", "Names": ["container1"] }]` - var containers []dc.APIContainers + var containers []types.Container err := json.Unmarshal([]byte(jsonContainers), &containers) if err != nil { t.Fatal(err) } apiContainer1.Stats.Read = time.Now() - apiContainer1.Container = containers[0] - apiContainer1.Stats.BlkioStats.IOServicedRecursive = append(apiContainer1.Stats.BlkioStats.IOServicedRecursive, metrics) + apiContainer1.Container = &containers[0] + apiContainer1.Stats.BlkioStats.IoServicedRecursive = append(apiContainer1.Stats.BlkioStats.IoServicedRecursive, metrics) apiContainer2.Stats.Read = time.Now() - apiContainer2.Container = containers[1] - apiContainer2.Stats.BlkioStats.IOServicedRecursive = append(apiContainer2.Stats.BlkioStats.IOServicedRecursive, metrics) + apiContainer2.Container = &containers[1] + apiContainer2.Stats.BlkioStats.IoServicedRecursive = append(apiContainer2.Stats.BlkioStats.IoServicedRecursive, metrics) dockerStats := []docker.Stat{apiContainer1, apiContainer2} - stats := blkioService.getBlkioStatsList(dockerStats) + stats := blkioService.getBlkioStatsList(dockerStats, true) totals := make([]float64, 2) for _, stat := range stats { totals[0] = stat.totals } - dockerStats[0].Stats.BlkioStats.IOServicedRecursive[0].Value = 1000 + dockerStats[0].Stats.BlkioStats.IoServicedRecursive[0].Value = 1000 dockerStats[0].Stats.Read = dockerStats[0].Stats.Read.Add(time.Second * 10) - dockerStats[1].Stats.BlkioStats.IOServicedRecursive[0].Value = 1000 + dockerStats[1].Stats.BlkioStats.IoServicedRecursive[0].Value = 1000 dockerStats[1].Stats.Read = dockerStats[0].Stats.Read.Add(time.Second * 10) - stats = blkioService.getBlkioStatsList(dockerStats) + stats = blkioService.getBlkioStatsList(dockerStats, true) for _, stat := range stats { totals[1] = stat.totals if stat.totals < totals[0] { @@ -63,10 +64,10 @@ func TestDeltaMultipleContainers(t *testing.T) { } dockerStats[0].Stats.Read = dockerStats[0].Stats.Read.Add(time.Second * 15) - dockerStats[0].Stats.BlkioStats.IOServicedRecursive[0].Value = 2000 - dockerStats[1].Stats.BlkioStats.IOServicedRecursive[0].Value = 2000 + dockerStats[0].Stats.BlkioStats.IoServicedRecursive[0].Value = 2000 + dockerStats[1].Stats.BlkioStats.IoServicedRecursive[0].Value = 2000 dockerStats[1].Stats.Read = dockerStats[0].Stats.Read.Add(time.Second * 15) - stats = blkioService.getBlkioStatsList(dockerStats) + stats = blkioService.getBlkioStatsList(dockerStats, true) for _, stat := range stats { if stat.totals < totals[1] || stat.totals < totals[0] { t.Errorf("getBlkioStatsList(%v) => %v, want value bigger than %v", dockerStats, stat.totals, totals[1]) @@ -77,7 +78,7 @@ func TestDeltaMultipleContainers(t *testing.T) { func TestDeltaOneContainer(t *testing.T) { var apiContainer docker.Stat - metrics := dc.BlkioStatsEntry{ + metrics := types.BlkioStatEntry{ Major: 123, Minor: 123, Op: "Total", @@ -88,34 +89,34 @@ func TestDeltaOneContainer(t *testing.T) { "Id": "8dfafdbc3a40", "Names": ["container"] }` - var containers dc.APIContainers + var containers types.Container err := json.Unmarshal([]byte(jsonContainers), &containers) if err != nil { t.Fatal(err) } apiContainer.Stats.Read = time.Now() - apiContainer.Container = containers - apiContainer.Stats.BlkioStats.IOServicedRecursive = append(apiContainer.Stats.BlkioStats.IOServicedRecursive, metrics) + apiContainer.Container = &containers + apiContainer.Stats.BlkioStats.IoServicedRecursive = append(apiContainer.Stats.BlkioStats.IoServicedRecursive, metrics) dockerStats := []docker.Stat{apiContainer} - stats := blkioService.getBlkioStatsList(dockerStats) + stats := blkioService.getBlkioStatsList(dockerStats, true) totals := make([]float64, 2) for _, stat := range stats { totals[0] = stat.totals } - dockerStats[0].Stats.BlkioStats.IOServicedRecursive[0].Value = 1000 + dockerStats[0].Stats.BlkioStats.IoServicedRecursive[0].Value = 1000 dockerStats[0].Stats.Read = dockerStats[0].Stats.Read.Add(time.Second * 10) - stats = blkioService.getBlkioStatsList(dockerStats) + stats = blkioService.getBlkioStatsList(dockerStats, true) for _, stat := range stats { if stat.totals < totals[0] { t.Errorf("getBlkioStatsList(%v) => %v, want value bigger than %v", dockerStats, stat.totals, totals[0]) } } - dockerStats[0].Stats.BlkioStats.IOServicedRecursive[0].Value = 2000 + dockerStats[0].Stats.BlkioStats.IoServicedRecursive[0].Value = 2000 dockerStats[0].Stats.Read = dockerStats[0].Stats.Read.Add(time.Second * 15) - stats = blkioService.getBlkioStatsList(dockerStats) + stats = blkioService.getBlkioStatsList(dockerStats, true) for _, stat := range stats { if stat.totals < totals[1] || stat.totals < totals[0] { t.Errorf("getBlkioStatsList(%v) => %v, want value bigger than %v", dockerStats, stat.totals, totals[1]) @@ -203,3 +204,53 @@ func setTime(index int) { oldBlkioRaw[index].Time = time.Now() newBlkioRaw[index].Time = oldBlkioRaw[index].Time.Add(time.Duration(2000000000)) } + +func TestGetBlkioStats(t *testing.T) { + start := time.Now() + later := start.Add(10 * time.Second) + + blkioService := BlkioService{ + map[string]BlkioRaw{ + "cebada": {Time: start, reads: 100, writes: 200, totals: 300}, + }, + } + + dockerStats := &docker.Stat{ + Container: &types.Container{ + ID: "cebada", + Names: []string{"test"}, + }, + Stats: types.StatsJSON{Stats: types.Stats{ + Read: later, + BlkioStats: types.BlkioStats{ + IoServicedRecursive: []types.BlkioStatEntry{ + {Major: 1, Minor: 1, Op: "Read", Value: 100}, + {Major: 1, Minor: 1, Op: "Write", Value: 200}, + {Major: 1, Minor: 1, Op: "Total", Value: 300}, + {Major: 1, Minor: 2, Op: "Read", Value: 50}, + {Major: 1, Minor: 2, Op: "Write", Value: 100}, + {Major: 1, Minor: 2, Op: "Total", Value: 150}, + }, + IoServiceBytesRecursive: []types.BlkioStatEntry{ + {Major: 1, Minor: 1, Op: "Read", Value: 1000}, + {Major: 1, Minor: 1, Op: "Write", Value: 2000}, + {Major: 1, Minor: 1, Op: "Total", Value: 3000}, + {Major: 1, Minor: 2, Op: "Read", Value: 500}, + {Major: 1, Minor: 2, Op: "Write", Value: 1000}, + {Major: 1, Minor: 2, Op: "Total", Value: 1500}, + }, + }, + }}, + } + + stats := blkioService.getBlkioStats(dockerStats, true) + assert.Equal(t, float64(5), stats.reads) + assert.Equal(t, float64(10), stats.writes) + assert.Equal(t, float64(15), stats.totals) + assert.Equal(t, + BlkioRaw{Time: later, reads: 150, writes: 300, totals: 450}, + stats.serviced) + assert.Equal(t, + BlkioRaw{Time: later, reads: 1500, writes: 3000, totals: 4500}, + stats.servicedBytes) +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/helper.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/helper.go index 6f71c6fd..4aa1ab37 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/helper.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/helper.go @@ -3,9 +3,9 @@ package diskio import ( "time" - "github.com/elastic/beats/metricbeat/module/docker" + "github.com/docker/docker/api/types" - dc "github.com/fsouza/go-dockerclient" + "github.com/elastic/beats/metricbeat/module/docker" ) type BlkioStats struct { @@ -14,12 +14,9 @@ type BlkioStats struct { reads float64 writes float64 totals float64 -} -type BlkioCalculator interface { - getReadPs(old *BlkioRaw, new *BlkioRaw) float64 - getWritePs(old *BlkioRaw, new *BlkioRaw) float64 - getTotalPs(old *BlkioRaw, new *BlkioRaw) float64 + serviced BlkioRaw + servicedBytes BlkioRaw } type BlkioRaw struct { @@ -29,72 +26,86 @@ type BlkioRaw struct { totals uint64 } -type BLkioService struct { - BlkioSTatsPerContainer map[string]BlkioRaw +// BlkioService is a helper to collect and calculate disk I/O metrics +type BlkioService struct { + lastStatsPerContainer map[string]BlkioRaw } -func (io *BLkioService) getBlkioStatsList(rawStats []docker.Stat) []BlkioStats { - formattedStats := []BlkioStats{} - if io.BlkioSTatsPerContainer == nil { - io.BlkioSTatsPerContainer = make(map[string]BlkioRaw) +// NewBlkioService builds a new initialized BlkioService +func NewBlkioService() *BlkioService { + return &BlkioService{ + lastStatsPerContainer: make(map[string]BlkioRaw), } +} + +func (io *BlkioService) getBlkioStatsList(rawStats []docker.Stat, dedot bool) []BlkioStats { + formattedStats := []BlkioStats{} + + statsPerContainer := make(map[string]BlkioRaw) for _, myRawStats := range rawStats { - formattedStats = append(formattedStats, io.getBlkioStats(&myRawStats)) + stats := io.getBlkioStats(&myRawStats, dedot) + statsPerContainer[myRawStats.Container.ID] = stats.serviced + formattedStats = append(formattedStats, stats) } + io.lastStatsPerContainer = statsPerContainer return formattedStats } -func (io *BLkioService) getBlkioStats(myRawStat *docker.Stat) BlkioStats { - newBlkioStats := io.getNewStats(myRawStat.Stats.Read, myRawStat.Stats.BlkioStats.IOServicedRecursive) - oldBlkioStats, exist := io.BlkioSTatsPerContainer[myRawStat.Container.ID] +func (io *BlkioService) getBlkioStats(myRawStat *docker.Stat, dedot bool) BlkioStats { + newBlkioStats := io.getNewStats(myRawStat.Stats.Read, myRawStat.Stats.BlkioStats.IoServicedRecursive) + bytesBlkioStats := io.getNewStats(myRawStat.Stats.Read, myRawStat.Stats.BlkioStats.IoServiceBytesRecursive) myBlkioStats := BlkioStats{ Time: myRawStat.Stats.Read, - Container: docker.NewContainer(&myRawStat.Container), + Container: docker.NewContainer(myRawStat.Container, dedot), + + serviced: newBlkioStats, + servicedBytes: bytesBlkioStats, } + oldBlkioStats, exist := io.lastStatsPerContainer[myRawStat.Container.ID] if exist { myBlkioStats.reads = io.getReadPs(&oldBlkioStats, &newBlkioStats) myBlkioStats.writes = io.getWritePs(&oldBlkioStats, &newBlkioStats) myBlkioStats.totals = io.getTotalPs(&oldBlkioStats, &newBlkioStats) } - io.BlkioSTatsPerContainer[myRawStat.Container.ID] = newBlkioStats - return myBlkioStats } -func (io *BLkioService) getNewStats(time time.Time, blkioEntry []dc.BlkioStatsEntry) BlkioRaw { +func (io *BlkioService) getNewStats(time time.Time, blkioEntry []types.BlkioStatEntry) BlkioRaw { stats := BlkioRaw{ Time: time, reads: 0, writes: 0, totals: 0, } + for _, myEntry := range blkioEntry { - if myEntry.Op == "Write" { - stats.writes = myEntry.Value - } else if myEntry.Op == "Read" { - stats.reads = myEntry.Value - } else if myEntry.Op == "Total" { - stats.totals = myEntry.Value + switch myEntry.Op { + case "Write": + stats.writes += myEntry.Value + case "Read": + stats.reads += myEntry.Value + case "Total": + stats.totals += myEntry.Value } } return stats } -func (io *BLkioService) getReadPs(old *BlkioRaw, new *BlkioRaw) float64 { +func (io *BlkioService) getReadPs(old *BlkioRaw, new *BlkioRaw) float64 { duration := new.Time.Sub(old.Time) return calculatePerSecond(duration, old.reads, new.reads) } -func (io *BLkioService) getWritePs(old *BlkioRaw, new *BlkioRaw) float64 { +func (io *BlkioService) getWritePs(old *BlkioRaw, new *BlkioRaw) float64 { duration := new.Time.Sub(old.Time) return calculatePerSecond(duration, old.writes, new.writes) } -func (io *BLkioService) getTotalPs(old *BlkioRaw, new *BlkioRaw) float64 { +func (io *BlkioService) getTotalPs(old *BlkioRaw, new *BlkioRaw) float64 { duration := new.Time.Sub(old.Time) return calculatePerSecond(duration, old.totals, new.totals) } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/docker.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/docker.go index eeadf1d7..02408c36 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/docker.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/docker.go @@ -1,16 +1,24 @@ package docker import ( + "context" + "encoding/json" + "net/http" "sync" "time" - "github.com/elastic/beats/libbeat/logp" + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "github.com/docker/go-connections/tlsconfig" + + "github.com/elastic/beats/libbeat/common/docker" "github.com/elastic/beats/metricbeat/mb" "github.com/elastic/beats/metricbeat/mb/parse" - - "github.com/fsouza/go-dockerclient" ) +// Select Docker API version +const dockerAPIVersion = "1.22" + var HostParser = parse.URLHostParserBuilder{DefaultScheme: "tcp"}.Build() func init() { @@ -33,24 +41,34 @@ func NewModule(base mb.BaseModule) (mb.Module, error) { } type Stat struct { - Container docker.APIContainers - Stats docker.Stats + Container *types.Container + Stats types.StatsJSON } -func NewDockerClient(endpoint string, config Config) (*docker.Client, error) { - var err error - var client *docker.Client - - if !config.TLS.IsEnabled() { - client, err = docker.NewClient(endpoint) - } else { - client, err = docker.NewTLSClient( - endpoint, - config.TLS.Certificate, - config.TLS.Key, - config.TLS.CA, - ) +// NewDockerClient initializes and returns a new Docker client +func NewDockerClient(endpoint string, config Config) (*client.Client, error) { + var httpClient *http.Client + + if config.TLS.IsEnabled() { + options := tlsconfig.Options{ + CAFile: config.TLS.CA, + CertFile: config.TLS.Certificate, + KeyFile: config.TLS.Key, + } + + tlsc, err := tlsconfig.Client(options) + if err != nil { + return nil, err + } + + httpClient = &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsc, + }, + } } + + client, err := docker.NewClient(endpoint, httpClient, nil) if err != nil { return nil, err } @@ -59,8 +77,10 @@ func NewDockerClient(endpoint string, config Config) (*docker.Client, error) { } // FetchStats returns a list of running containers with all related stats inside -func FetchStats(client *docker.Client, timeout time.Duration) ([]Stat, error) { - containers, err := client.ListContainers(docker.ListContainersOptions{}) +func FetchStats(client *client.Client, timeout time.Duration) ([]Stat, error) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + containers, err := client.ContainerList(ctx, types.ContainerListOptions{}) if err != nil { return nil, err } @@ -72,9 +92,9 @@ func FetchStats(client *docker.Client, timeout time.Duration) ([]Stat, error) { wg.Add(len(containers)) for _, container := range containers { - go func(container docker.APIContainers) { + go func(container types.Container) { defer wg.Done() - statsQueue <- exportContainerStats(client, &container, timeout) + statsQueue <- exportContainerStats(ctx, client, &container) }(container) } @@ -99,38 +119,18 @@ func FetchStats(client *docker.Client, timeout time.Duration) ([]Stat, error) { // This is currently very inefficient as docker calculates the average for each request, // means each request will take at least 2s: https://github.com/docker/docker/blob/master/cli/command/container/stats_helpers.go#L148 // Getting all stats at once is implemented here: https://github.com/docker/docker/pull/25361 -func exportContainerStats(client *docker.Client, container *docker.APIContainers, timeout time.Duration) Stat { - var wg sync.WaitGroup +func exportContainerStats(ctx context.Context, client *client.Client, container *types.Container) Stat { var event Stat + event.Container = container - statsC := make(chan *docker.Stats) - errC := make(chan error, 1) - statsOptions := docker.StatsOptions{ - ID: container.ID, - Stats: statsC, - Stream: false, - Timeout: timeout, + containerStats, err := client.ContainerStats(ctx, container.ID, false) + if err != nil { + return event } - wg.Add(2) - go func() { - defer wg.Done() - errC <- client.Stats(statsOptions) - close(errC) - }() - go func() { - defer wg.Done() - stats := <-statsC - err := <-errC - if stats != nil && err == nil { - event.Stats = *stats - event.Container = *container - } else if err == nil && stats == nil { - logp.Warn("Container stopped when recovering stats: %v", container.ID) - } else { - logp.Err("An error occurred while getting docker stats: %v", err) - } - }() - wg.Wait() + defer containerStats.Body.Close() + decoder := json.NewDecoder(containerStats.Body) + decoder.Decode(&event.Stats) + return event } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/healthcheck/data.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/healthcheck/data.go index 607185fd..930f573b 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/healthcheck/data.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/healthcheck/data.go @@ -1,9 +1,10 @@ package healthcheck import ( + "context" "strings" - dc "github.com/fsouza/go-dockerclient" + "github.com/docker/docker/api/types" "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/logp" @@ -11,7 +12,7 @@ import ( "github.com/elastic/beats/metricbeat/module/docker" ) -func eventsMapping(containers []dc.APIContainers, m *MetricSet) []common.MapStr { +func eventsMapping(containers []types.Container, m *MetricSet) []common.MapStr { var events []common.MapStr for _, container := range containers { event := eventMapping(&container, m) @@ -22,12 +23,12 @@ func eventsMapping(containers []dc.APIContainers, m *MetricSet) []common.MapStr return events } -func eventMapping(cont *dc.APIContainers, m *MetricSet) common.MapStr { +func eventMapping(cont *types.Container, m *MetricSet) common.MapStr { if !hasHealthCheck(cont.Status) { return nil } - container, err := m.dockerClient.InspectContainer(cont.ID) + container, err := m.dockerClient.ContainerInspect(context.TODO(), cont.ID) if err != nil { logp.Err("Error inpsecting container %v: %v", cont.ID, err) return nil @@ -41,7 +42,7 @@ func eventMapping(cont *dc.APIContainers, m *MetricSet) common.MapStr { return common.MapStr{ mb.ModuleDataKey: common.MapStr{ - "container": docker.NewContainer(cont).ToMapStr(), + "container": docker.NewContainer(cont, m.dedot).ToMapStr(), }, "status": container.State.Health.Status, "failingstreak": container.State.Health.FailingStreak, diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/healthcheck/healthcheck.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/healthcheck/healthcheck.go index ea461f12..6827f45f 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/healthcheck/healthcheck.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/healthcheck/healthcheck.go @@ -1,7 +1,10 @@ package healthcheck import ( - dc "github.com/fsouza/go-dockerclient" + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/metricbeat/mb" @@ -9,19 +12,21 @@ import ( ) func init() { - if err := mb.Registry.AddMetricSet("docker", "healthcheck", New, docker.HostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("docker", "healthcheck", New, + mb.WithHostParser(docker.HostParser), + mb.DefaultMetricSet(), + ) } type MetricSet struct { mb.BaseMetricSet - dockerClient *dc.Client + dockerClient *client.Client + dedot bool } // New creates a new instance of the docker healthcheck MetricSet. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { - config := docker.Config{} + config := docker.DefaultConfig() if err := base.Module().UnpackConfig(&config); err != nil { return nil, err } @@ -34,6 +39,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return &MetricSet{ BaseMetricSet: base, dockerClient: client, + dedot: config.DeDot, }, nil } @@ -41,7 +47,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { // This is based on https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/list-containers. func (m *MetricSet) Fetch() ([]common.MapStr, error) { // Fetch a list of all containers. - containers, err := m.dockerClient.ListContainers(dc.ListContainersOptions{}) + containers, err := m.dockerClient.ContainerList(context.TODO(), types.ContainerListOptions{}) if err != nil { return nil, err } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/helper.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/helper.go index 285d8e2c..0b81b81d 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/helper.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/helper.go @@ -3,9 +3,10 @@ package docker import ( "strings" - "github.com/fsouza/go-dockerclient" + "github.com/docker/docker/api/types" "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/common/safemapstr" ) type Container struct { @@ -26,11 +27,14 @@ func (c *Container) ToMapStr() common.MapStr { return m } -func NewContainer(container *docker.APIContainers) *Container { +// NewContainer converts Docker API container to an internal structure, it applies +// dedot to container labels if dedot is true, or stores them in a nested way if it's +// false +func NewContainer(container *types.Container, dedot bool) *Container { return &Container{ ID: container.ID, Name: ExtractContainerName(container.Names), - Labels: DeDotLabels(container.Labels), + Labels: DeDotLabels(container.Labels, dedot), } } @@ -48,14 +52,20 @@ func ExtractContainerName(names []string) string { } // DeDotLabels returns a new common.MapStr containing a copy of the labels -// where the dots in each label name have been changed to an underscore. -func DeDotLabels(labels map[string]string) common.MapStr { +// where the dots have been converted into nested structure, avoiding +// possible mapping errors +func DeDotLabels(labels map[string]string, dedot bool) common.MapStr { outputLabels := common.MapStr{} for k, v := range labels { - // This is necessary so that ES does not interpret '.' fields as new - // nested JSON objects, and also makes this compatible with ES 2.x. - label := common.DeDot(k) - outputLabels.Put(label, v) + if dedot { + // This is necessary so that ES does not interpret '.' fields as new + // nested JSON objects, and also makes this compatible with ES 2.x. + label := common.DeDot(k) + outputLabels.Put(label, v) + } else { + // If we don't dedot we ensure there are no mapping errors with safemapstr + safemapstr.Put(outputLabels, k, v) + } } return outputLabels diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/helper_test.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/helper_test.go new file mode 100644 index 00000000..9e63a74b --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/helper_test.go @@ -0,0 +1,43 @@ +package docker + +import ( + "testing" + + "github.com/elastic/beats/libbeat/common" + + "github.com/stretchr/testify/assert" +) + +func TestDeDotLabels(t *testing.T) { + labels := map[string]string{ + "com.docker.swarm.task": "", + "com.docker.swarm.task.id": "1", + "com.docker.swarm.task.name": "foobar", + } + + t.Run("dedot enabled", func(t *testing.T) { + result := DeDotLabels(labels, true) + assert.Equal(t, common.MapStr{ + "com_docker_swarm_task": "", + "com_docker_swarm_task_id": "1", + "com_docker_swarm_task_name": "foobar", + }, result) + }) + + t.Run("dedot disabled", func(t *testing.T) { + result := DeDotLabels(labels, false) + assert.Equal(t, common.MapStr{ + "com": common.MapStr{ + "docker": common.MapStr{ + "swarm": common.MapStr{ + "task": common.MapStr{ + "value": "", + "id": "1", + "name": "foobar", + }, + }, + }, + }, + }, result) + }) +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/image/data.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/image/data.go index 0dc24770..2ab91cd6 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/image/data.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/image/data.go @@ -3,21 +3,21 @@ package image import ( "time" + "github.com/docker/docker/api/types" + "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/metricbeat/module/docker" - - dc "github.com/fsouza/go-dockerclient" ) -func eventsMapping(imagesList []dc.APIImages) []common.MapStr { +func eventsMapping(imagesList []types.ImageSummary, dedot bool) []common.MapStr { events := []common.MapStr{} for _, image := range imagesList { - events = append(events, eventMapping(&image)) + events = append(events, eventMapping(&image, dedot)) } return events } -func eventMapping(image *dc.APIImages) common.MapStr { +func eventMapping(image *types.ImageSummary, dedot bool) common.MapStr { event := common.MapStr{ "id": common.MapStr{ "current": image.ID, @@ -30,8 +30,8 @@ func eventMapping(image *dc.APIImages) common.MapStr { }, "tags": image.RepoTags, } - labels := docker.DeDotLabels(image.Labels) - if len(labels) > 0 { + if len(image.Labels) > 0 { + labels := docker.DeDotLabels(image.Labels, dedot) event["labels"] = labels } return event diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/image/image.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/image/image.go index f0598376..7df1423b 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/image/image.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/image/image.go @@ -1,19 +1,22 @@ package image import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/metricbeat/mb" "github.com/elastic/beats/metricbeat/module/docker" - - dc "github.com/fsouza/go-dockerclient" ) // init registers the MetricSet with the central registry. // The New method will be called after the setup of the module and before starting to fetch data func init() { - if err := mb.Registry.AddMetricSet("docker", "image", New); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("docker", "image", New, + mb.WithHostParser(docker.HostParser), + ) } // MetricSet type defines all fields of the MetricSet @@ -22,14 +25,15 @@ func init() { // multiple fetch calls. type MetricSet struct { mb.BaseMetricSet - dockerClient *dc.Client + dockerClient *client.Client + dedot bool } // New create a new instance of the MetricSet // Part of new is also setting up the configuration by processing additional // configuration entries if needed. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { - config := docker.Config{} + config := docker.DefaultConfig() if err := base.Module().UnpackConfig(&config); err != nil { return nil, err } @@ -42,6 +46,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return &MetricSet{ BaseMetricSet: base, dockerClient: client, + dedot: config.DeDot, }, nil } @@ -49,10 +54,10 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { // It returns the event which is then forward to the output. In case of an error, a // descriptive error must be returned. func (m *MetricSet) Fetch() ([]common.MapStr, error) { - images, err := m.dockerClient.ListImages(dc.ListImagesOptions{}) + images, err := m.dockerClient.ImageList(context.TODO(), types.ImageListOptions{}) if err != nil { return nil, err } - return eventsMapping(images), nil + return eventsMapping(images, m.dedot), nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/info/data.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/info/data.go index f41e8307..93d9865e 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/info/data.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/info/data.go @@ -1,12 +1,12 @@ package info import ( - "github.com/elastic/beats/libbeat/common" + "github.com/docker/docker/api/types" - dc "github.com/fsouza/go-dockerclient" + "github.com/elastic/beats/libbeat/common" ) -func eventMapping(info *dc.DockerInfo) common.MapStr { +func eventMapping(info *types.Info) common.MapStr { event := common.MapStr{ "id": info.ID, "containers": common.MapStr{ diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/info/info.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/info/info.go index 28de54b4..12fe1932 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/info/info.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/info/info.go @@ -1,22 +1,25 @@ package info import ( + "context" + + "github.com/docker/docker/client" + "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/metricbeat/mb" "github.com/elastic/beats/metricbeat/module/docker" - - dc "github.com/fsouza/go-dockerclient" ) func init() { - if err := mb.Registry.AddMetricSet("docker", "info", New, docker.HostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("docker", "info", New, + mb.WithHostParser(docker.HostParser), + mb.DefaultMetricSet(), + ) } type MetricSet struct { mb.BaseMetricSet - dockerClient *dc.Client + dockerClient *client.Client } // New create a new instance of the docker info MetricSet. @@ -40,10 +43,10 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { // Fetch creates a new event for info. // See: https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/display-system-wide-information func (m *MetricSet) Fetch() (common.MapStr, error) { - info, err := m.dockerClient.Info() + info, err := m.dockerClient.Info(context.TODO()) if err != nil { return nil, err } - return eventMapping(info), nil + return eventMapping(&info), nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/memory/helper.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/memory/helper.go index 35624166..f91560b3 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/memory/helper.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/memory/helper.go @@ -19,24 +19,25 @@ type MemoryData struct { type MemoryService struct{} -func (s *MemoryService) getMemoryStatsList(rawStats []docker.Stat) []MemoryData { +func (s *MemoryService) getMemoryStatsList(rawStats []docker.Stat, dedot bool) []MemoryData { formattedStats := []MemoryData{} for _, myRawStats := range rawStats { - formattedStats = append(formattedStats, s.GetMemoryStats(myRawStats)) + formattedStats = append(formattedStats, s.getMemoryStats(myRawStats, dedot)) } return formattedStats } -func (s *MemoryService) GetMemoryStats(myRawStat docker.Stat) MemoryData { +func (s *MemoryService) getMemoryStats(myRawStat docker.Stat, dedot bool) MemoryData { + totalRSS := myRawStat.Stats.MemoryStats.Stats["total_rss"] return MemoryData{ Time: common.Time(myRawStat.Stats.Read), - Container: docker.NewContainer(&myRawStat.Container), + Container: docker.NewContainer(myRawStat.Container, dedot), Failcnt: myRawStat.Stats.MemoryStats.Failcnt, Limit: myRawStat.Stats.MemoryStats.Limit, MaxUsage: myRawStat.Stats.MemoryStats.MaxUsage, - TotalRss: myRawStat.Stats.MemoryStats.Stats.TotalRss, - TotalRssP: float64(myRawStat.Stats.MemoryStats.Stats.TotalRss) / float64(myRawStat.Stats.MemoryStats.Limit), + TotalRss: totalRSS, + TotalRssP: float64(totalRSS) / float64(myRawStat.Stats.MemoryStats.Limit), Usage: myRawStat.Stats.MemoryStats.Usage, UsageP: float64(myRawStat.Stats.MemoryStats.Usage) / float64(myRawStat.Stats.MemoryStats.Limit), } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/memory/memory.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/memory/memory.go index 3d676332..7bb0e663 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/memory/memory.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/memory/memory.go @@ -1,28 +1,30 @@ package memory import ( + "github.com/docker/docker/client" + "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/metricbeat/mb" "github.com/elastic/beats/metricbeat/module/docker" - - dc "github.com/fsouza/go-dockerclient" ) func init() { - if err := mb.Registry.AddMetricSet("docker", "memory", New, docker.HostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("docker", "memory", New, + mb.WithHostParser(docker.HostParser), + mb.DefaultMetricSet(), + ) } type MetricSet struct { mb.BaseMetricSet memoryService *MemoryService - dockerClient *dc.Client + dockerClient *client.Client + dedot bool } // New creates a new instance of the docker memory MetricSet. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { - config := docker.Config{} + config := docker.DefaultConfig() if err := base.Module().UnpackConfig(&config); err != nil { return nil, err } @@ -36,6 +38,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { BaseMetricSet: base, memoryService: &MemoryService{}, dockerClient: client, + dedot: config.DeDot, }, nil } @@ -46,6 +49,6 @@ func (m *MetricSet) Fetch() ([]common.MapStr, error) { return nil, err } - memoryStats := m.memoryService.getMemoryStatsList(stats) + memoryStats := m.memoryService.getMemoryStatsList(stats, m.dedot) return eventsMapping(memoryStats), nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/memory/memory_test.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/memory/memory_test.go index d4abad30..765f2ccc 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/memory/memory_test.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/memory/memory_test.go @@ -5,10 +5,11 @@ import ( "testing" "time" + "github.com/docker/docker/api/types" + "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/metricbeat/module/docker" - dc "github.com/fsouza/go-dockerclient" "github.com/stretchr/testify/assert" ) @@ -16,35 +17,41 @@ func TestMemoryService_GetMemoryStats(t *testing.T) { //Container + dockerstats containerID := "containerID" labels := map[string]string{ - "label1": "val1", - "label2": "val2", + "label1": "val1", + "label2": "val2", + "label2.foo": "val3", } - container := dc.APIContainers{ + container := types.Container{ ID: containerID, Image: "image", Command: "command", Created: 123789, Status: "Up", - Ports: []dc.APIPort{{PrivatePort: 1234, PublicPort: 4567, Type: "portType", IP: "123.456.879.1"}}, SizeRw: 123, SizeRootFs: 456, Names: []string{"/name1", "name1/fake"}, Labels: labels, - Networks: dc.NetworkList{}, } memoryService := &MemoryService{} memorystats := getMemoryStats(time.Now(), 1) memoryRawStats := docker.Stat{} - memoryRawStats.Container = container + memoryRawStats.Container = &container memoryRawStats.Stats = memorystats + totalRSS := memorystats.MemoryStats.Stats["total_rss"] expectedEvent := common.MapStr{ "_module": common.MapStr{ "container": common.MapStr{ - "id": containerID, - "name": "name1", - "labels": docker.DeDotLabels(labels), + "id": containerID, + "name": "name1", + "labels": common.MapStr{ + "label1": "val1", + "label2": common.MapStr{ + "foo": "val3", + "value": "val2", + }, + }, }, }, "fail": common.MapStr{ @@ -52,8 +59,8 @@ func TestMemoryService_GetMemoryStats(t *testing.T) { }, "limit": memorystats.MemoryStats.Limit, "rss": common.MapStr{ - "total": memorystats.MemoryStats.Stats.TotalRss, - "pct": float64(memorystats.MemoryStats.Stats.TotalRss) / float64(memorystats.MemoryStats.Limit), + "total": totalRSS, + "pct": float64(totalRSS) / float64(memorystats.MemoryStats.Limit), }, "usage": common.MapStr{ "total": memorystats.MemoryStats.Usage, @@ -62,7 +69,7 @@ func TestMemoryService_GetMemoryStats(t *testing.T) { }, } //WHEN - rawStats := memoryService.GetMemoryStats(memoryRawStats) + rawStats := memoryService.getMemoryStats(memoryRawStats, false) event := eventMapping(&rawStats) //THEN assert.True(t, equalEvent(expectedEvent, event)) @@ -70,58 +77,22 @@ func TestMemoryService_GetMemoryStats(t *testing.T) { t.Logf(" returned : %v", event) } -func getMemoryStats(read time.Time, number uint64) dc.Stats { - type memoryStatsStructure struct { - Stats struct { - TotalPgmafault uint64 `json:"total_pgmafault,omitempty" yaml:"total_pgmafault,omitempty" toml:"total_pgmafault,omitempty"` - Cache uint64 `json:"cache,omitempty" yaml:"cache,omitempty" toml:"cache,omitempty"` - MappedFile uint64 `json:"mapped_file,omitempty" yaml:"mapped_file,omitempty" toml:"mapped_file,omitempty"` - TotalInactiveFile uint64 `json:"total_inactive_file,omitempty" yaml:"total_inactive_file,omitempty" toml:"total_inactive_file,omitempty"` - Pgpgout uint64 `json:"pgpgout,omitempty" yaml:"pgpgout,omitempty" toml:"pgpgout,omitempty"` - Rss uint64 `json:"rss,omitempty" yaml:"rss,omitempty" toml:"rss,omitempty"` - TotalMappedFile uint64 `json:"total_mapped_file,omitempty" yaml:"total_mapped_file,omitempty" toml:"total_mapped_file,omitempty"` - Writeback uint64 `json:"writeback,omitempty" yaml:"writeback,omitempty" toml:"writeback,omitempty"` - Unevictable uint64 `json:"unevictable,omitempty" yaml:"unevictable,omitempty" toml:"unevictable,omitempty"` - Pgpgin uint64 `json:"pgpgin,omitempty" yaml:"pgpgin,omitempty" toml:"pgpgin,omitempty"` - TotalUnevictable uint64 `json:"total_unevictable,omitempty" yaml:"total_unevictable,omitempty" toml:"total_unevictable,omitempty"` - Pgmajfault uint64 `json:"pgmajfault,omitempty" yaml:"pgmajfault,omitempty" toml:"pgmajfault,omitempty"` - TotalRss uint64 `json:"total_rss,omitempty" yaml:"total_rss,omitempty" toml:"total_rss,omitempty"` - TotalRssHuge uint64 `json:"total_rss_huge,omitempty" yaml:"total_rss_huge,omitempty" toml:"total_rss_huge,omitempty"` - TotalWriteback uint64 `json:"total_writeback,omitempty" yaml:"total_writeback,omitempty" toml:"total_writeback,omitempty"` - TotalInactiveAnon uint64 `json:"total_inactive_anon,omitempty" yaml:"total_inactive_anon,omitempty" toml:"total_inactive_anon,omitempty"` - RssHuge uint64 `json:"rss_huge,omitempty" yaml:"rss_huge,omitempty" toml:"rss_huge,omitempty"` - HierarchicalMemoryLimit uint64 `json:"hierarchical_memory_limit,omitempty" yaml:"hierarchical_memory_limit,omitempty" toml:"hierarchical_memory_limit,omitempty"` - TotalPgfault uint64 `json:"total_pgfault,omitempty" yaml:"total_pgfault,omitempty" toml:"total_pgfault,omitempty"` - TotalActiveFile uint64 `json:"total_active_file,omitempty" yaml:"total_active_file,omitempty" toml:"total_active_file,omitempty"` - ActiveAnon uint64 `json:"active_anon,omitempty" yaml:"active_anon,omitempty" toml:"active_anon,omitempty"` - TotalActiveAnon uint64 `json:"total_active_anon,omitempty" yaml:"total_active_anon,omitempty" toml:"total_active_anon,omitempty"` - TotalPgpgout uint64 `json:"total_pgpgout,omitempty" yaml:"total_pgpgout,omitempty" toml:"total_pgpgout,omitempty"` - TotalCache uint64 `json:"total_cache,omitempty" yaml:"total_cache,omitempty" toml:"total_cache,omitempty"` - InactiveAnon uint64 `json:"inactive_anon,omitempty" yaml:"inactive_anon,omitempty" toml:"inactive_anon,omitempty"` - ActiveFile uint64 `json:"active_file,omitempty" yaml:"active_file,omitempty" toml:"active_file,omitempty"` - Pgfault uint64 `json:"pgfault,omitempty" yaml:"pgfault,omitempty" toml:"pgfault,omitempty"` - InactiveFile uint64 `json:"inactive_file,omitempty" yaml:"inactive_file,omitempty" toml:"inactive_file,omitempty"` - TotalPgpgin uint64 `json:"total_pgpgin,omitempty" yaml:"total_pgpgin,omitempty" toml:"total_pgpgin,omitempty"` - HierarchicalMemswLimit uint64 `json:"hierarchical_memsw_limit,omitempty" yaml:"hierarchical_memsw_limit,omitempty" toml:"hierarchical_memsw_limit,omitempty"` - Swap uint64 `json:"swap,omitempty" yaml:"swap,omitempty" toml:"swap,omitempty"` - } `json:"stats,omitempty" yaml:"stats,omitempty" toml:"stats,omitempty"` - MaxUsage uint64 `json:"max_usage,omitempty" yaml:"max_usage,omitempty" toml:"max_usage,omitempty"` - Usage uint64 `json:"usage,omitempty" yaml:"usage,omitempty" toml:"usage,omitempty"` - Failcnt uint64 `json:"failcnt,omitempty" yaml:"failcnt,omitempty" toml:"failcnt,omitempty"` - Limit uint64 `json:"limit,omitempty" yaml:"limit,omitempty" toml:"limit,omitempty"` - } +func getMemoryStats(read time.Time, number uint64) types.StatsJSON { - myMemoryStats := dc.Stats{ - Read: read, - MemoryStats: memoryStatsStructure{ - MaxUsage: number, - Usage: number * 2, - Failcnt: number * 3, - Limit: number * 4, + myMemoryStats := types.StatsJSON{ + Stats: types.Stats{ + Read: read, + MemoryStats: types.MemoryStats{ + MaxUsage: number, + Usage: number * 2, + Failcnt: number * 3, + Limit: number * 4, + Stats: map[string]uint64{}, + }, }, } - myMemoryStats.MemoryStats.Stats.TotalRss = number * 5 + myMemoryStats.MemoryStats.Stats["total_rss"] = number * 5 return myMemoryStats } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/network/_meta/data.json b/vendor/github.com/elastic/beats/metricbeat/module/docker/network/_meta/data.json index f7b4524c..ddb3c827 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/network/_meta/data.json +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/network/_meta/data.json @@ -6,19 +6,16 @@ }, "docker": { "container": { - "id": "da57ef738524e5a4f6ae17b477f134d30719603db7b96d2c01b6f34010412e66", + "id": "452523bf833fd9fd1a8425135b720de4cb9b5a32096deac5b52a97e97bb6d16d", "labels": { - "build-date": "20170911", - "com_docker_compose_config-hash": "13a74b89a90a6fdcb6bbbc7eb37b7cb0615bdaf8", + "com_docker_compose_config-hash": "68a840a9e1c606ca1026492e50620e139ca342c585f330025a90f39a5fd32538", "com_docker_compose_container-number": "1", "com_docker_compose_oneoff": "False", "com_docker_compose_project": "metricbeat", "com_docker_compose_service": "elasticsearch", - "com_docker_compose_version": "1.5.0", - "license": "GPLv2", + "com_docker_compose_version": "1.21.0", "maintainer": "Elastic Docker Team \u003cdocker@elastic.co\u003e", - "name": "CentOS Base Image", - "vendor": "CentOS" + "org_label-schema_schema-version": "= 1.0 org.label-schema.name=CentOS Base Image org.label-schema.vendor=CentOS org.label-schema.license=GPLv2 org.label-schema.build-date=20180402" }, "name": "metricbeat_elasticsearch_1" }, @@ -29,12 +26,24 @@ "errors": 0, "packets": 0 }, + "inbound": { + "bytes": 61694097, + "dropped": 0, + "errors": 0, + "packets": 714036 + }, "interface": "eth0", "out": { "bytes": 0, "dropped": 0, "errors": 0, "packets": 0 + }, + "outbound": { + "bytes": 69114459, + "dropped": 0, + "errors": 0, + "packets": 713985 } } }, @@ -44,4 +53,4 @@ "name": "network", "rtt": 115 } -} \ No newline at end of file +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/network/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/docker/network/_meta/fields.yml index cb10ad1b..f66b8492 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/network/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/network/_meta/fields.yml @@ -11,8 +11,9 @@ Network interface name. - name: in type: group + deprecated: true description: > - Incoming network stats. + Incoming network stats per second. fields: - name: bytes type: long @@ -33,8 +34,9 @@ Total number of incoming packets. - name: out type: group + deprecated: true description: > - Outgoing network stats. + Outgoing network stats per second. fields: - name: bytes type: long @@ -53,3 +55,49 @@ type: long description: > Total number of outgoing packets. + - name: inbound + type: group + deprecated: true + description: > + Incoming network stats since the container started. + fields: + - name: bytes + type: long + format: bytes + description: > + Total number of incoming bytes. + - name: dropped + type: long + description: > + Total number of dropped incoming packets. + - name: errors + type: long + description: > + Total errors on incoming packets. + - name: packets + type: long + description: > + Total number of incoming packets. + - name: outbound + type: group + deprecated: true + description: > + Outgoing network stats since the container started. + fields: + - name: bytes + type: long + format: bytes + description: > + Total number of outgoing bytes. + - name: dropped + type: long + description: > + Total number of dropped outgoing packets. + - name: errors + type: long + description: > + Total errors on outgoing packets. + - name: packets + type: long + description: > + Total number of outgoing packets. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/network/data.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/network/data.go index 0844e276..bb4bc9ff 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/network/data.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/network/data.go @@ -5,32 +5,44 @@ import ( "github.com/elastic/beats/metricbeat/mb" ) -func eventsMapping(netsStatsList []NetStats) []common.MapStr { - myEvents := []common.MapStr{} +func eventsMapping(r mb.ReporterV2, netsStatsList []NetStats) { for _, netsStats := range netsStatsList { - myEvents = append(myEvents, eventMapping(&netsStats)) + eventMapping(r, &netsStats) } - return myEvents } -func eventMapping(stats *NetStats) common.MapStr { - event := common.MapStr{ - mb.ModuleDataKey: common.MapStr{ +func eventMapping(r mb.ReporterV2, stats *NetStats) { + // Deprecated fields + r.Event(mb.Event{ + ModuleFields: common.MapStr{ "container": stats.Container.ToMapStr(), }, - "interface": stats.NameInterface, - "in": common.MapStr{ - "bytes": stats.RxBytes, - "dropped": stats.RxDropped, - "errors": stats.RxErrors, - "packets": stats.RxPackets, + MetricSetFields: common.MapStr{ + "interface": stats.NameInterface, + "in": common.MapStr{ + "bytes": stats.RxBytes, + "dropped": stats.RxDropped, + "errors": stats.RxErrors, + "packets": stats.RxPackets, + }, + "out": common.MapStr{ + "bytes": stats.TxBytes, + "dropped": stats.TxDropped, + "errors": stats.TxErrors, + "packets": stats.TxPackets, + }, + "inbound": common.MapStr{ + "bytes": stats.Total.RxBytes, + "dropped": stats.Total.RxDropped, + "errors": stats.Total.RxErrors, + "packets": stats.Total.RxPackets, + }, + "outbound": common.MapStr{ + "bytes": stats.Total.TxBytes, + "dropped": stats.Total.TxDropped, + "errors": stats.Total.TxErrors, + "packets": stats.Total.TxPackets, + }, }, - "out": common.MapStr{ - "bytes": stats.TxBytes, - "dropped": stats.TxDropped, - "errors": stats.TxErrors, - "packets": stats.TxPackets, - }, - } - return event + }) } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/network/helper.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/network/helper.go index 7d357845..cc22146b 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/network/helper.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/network/helper.go @@ -3,9 +3,9 @@ package network import ( "time" - "github.com/elastic/beats/metricbeat/module/docker" + "github.com/docker/docker/api/types" - dc "github.com/fsouza/go-dockerclient" + "github.com/elastic/beats/metricbeat/module/docker" ) type NetService struct { @@ -47,27 +47,29 @@ type NetStats struct { TxDropped float64 TxErrors float64 TxPackets float64 + Total *types.NetworkStats } -func (n *NetService) getNetworkStatsPerContainer(rawStats []docker.Stat) []NetStats { +func (n *NetService) getNetworkStatsPerContainer(rawStats []docker.Stat, dedot bool) []NetStats { formattedStats := []NetStats{} for _, myStats := range rawStats { for nameInterface, rawnNetStats := range myStats.Stats.Networks { - formattedStats = append(formattedStats, n.getNetworkStats(nameInterface, &rawnNetStats, &myStats)) + formattedStats = append(formattedStats, n.getNetworkStats(nameInterface, &rawnNetStats, &myStats, dedot)) } } return formattedStats } -func (n *NetService) getNetworkStats(nameInterface string, rawNetStats *dc.NetworkStats, myRawstats *docker.Stat) NetStats { +func (n *NetService) getNetworkStats(nameInterface string, rawNetStats *types.NetworkStats, myRawstats *docker.Stat, dedot bool) NetStats { newNetworkStats := createNetRaw(myRawstats.Stats.Read, rawNetStats) oldNetworkStat, exist := n.NetworkStatPerContainer[myRawstats.Container.ID][nameInterface] netStats := NetStats{ - Container: docker.NewContainer(&myRawstats.Container), + Container: docker.NewContainer(myRawstats.Container, dedot), Time: myRawstats.Stats.Read, NameInterface: nameInterface, + Total: rawNetStats, } if exist { @@ -88,7 +90,7 @@ func (n *NetService) getNetworkStats(nameInterface string, rawNetStats *dc.Netwo return netStats } -func createNetRaw(time time.Time, stats *dc.NetworkStats) NetRaw { +func createNetRaw(time time.Time, stats *types.NetworkStats) NetRaw { return NetRaw{ Time: time, RxBytes: stats.RxBytes, diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/network/network.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/network/network.go index cbe3442b..568bf6fb 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/network/network.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/network/network.go @@ -1,28 +1,29 @@ package network import ( - "github.com/elastic/beats/libbeat/common" + "github.com/docker/docker/client" + "github.com/elastic/beats/metricbeat/mb" "github.com/elastic/beats/metricbeat/module/docker" - - dc "github.com/fsouza/go-dockerclient" ) func init() { - if err := mb.Registry.AddMetricSet("docker", "network", New, docker.HostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("docker", "network", New, + mb.WithHostParser(docker.HostParser), + mb.DefaultMetricSet(), + ) } type MetricSet struct { mb.BaseMetricSet netService *NetService - dockerClient *dc.Client + dockerClient *client.Client + dedot bool } // New creates a new instance of the docker network MetricSet. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { - config := docker.Config{} + config := docker.DefaultConfig() if err := base.Module().UnpackConfig(&config); err != nil { return nil, err } @@ -38,16 +39,18 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { netService: &NetService{ NetworkStatPerContainer: make(map[string]map[string]NetRaw), }, + dedot: config.DeDot, }, nil } // Fetch methods creates a list of network events for each container. -func (m *MetricSet) Fetch() ([]common.MapStr, error) { +func (m *MetricSet) Fetch(r mb.ReporterV2) { stats, err := docker.FetchStats(m.dockerClient, m.Module().Config().Timeout) if err != nil { - return nil, err + r.Error(err) + return } - formattedStats := m.netService.getNetworkStatsPerContainer(stats) - return eventsMapping(formattedStats), nil + formattedStats := m.netService.getNetworkStatsPerContainer(stats, m.dedot) + eventsMapping(r, formattedStats) } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/network/network_integration_test.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/network/network_integration_test.go index 55bdfe4f..fa380e20 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/network/network_integration_test.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/network/network_integration_test.go @@ -9,8 +9,8 @@ import ( ) func TestData(t *testing.T) { - f := mbtest.NewEventsFetcher(t, getConfig()) - err := mbtest.WriteEvents(f, t) + ms := mbtest.NewReportingMetricSetV2(t, getConfig()) + err := mbtest.WriteEventsReporterV2(ms, t) if err != nil { t.Fatal("write", err) } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/AUTHORS b/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/AUTHORS deleted file mode 100644 index ca0dd999..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/AUTHORS +++ /dev/null @@ -1,169 +0,0 @@ -# This is the official list of go-dockerclient authors for copyright purposes. - -Abhishek Chanda -Adam Bell-Hanssen -Adrien Kohlbecker -Aldrin Leal -Alex Dadgar -Alfonso Acosta -André Carvalho -Andreas Jaekle -Andrew Snodgrass -Andrews Medina -Andrey Sibiryov -Andy Goldstein -Anirudh Aithal -Antonio Murdaca -Artem Sidorenko -Arthur Rodrigues -Ben Marini -Ben McCann -Ben Parees -Benno van den Berg -Bradley Cicenas -Brendan Fosberry -Brian Lalor -Brian P. Hamachek -Brian Palmer -Bryan Boreham -Burke Libbey -Carlos Diaz-Padron -Carson A -Cássio Botaro -Cesar Wong -Cezar Sa Espinola -Changping Chen -Cheah Chu Yeow -cheneydeng -Chris Bednarski -Christian Stewart -CMGS -Colin Hebert -Craig Jellick -Dan Williams -Daniel, Dao Quang Minh -Daniel Garcia -Daniel Hiltgen -Darren Shepherd -Dave Choi -David Huie -Dawn Chen -Dinesh Subhraveti -Drew Wells -Ed -Elias G. Schneevoigt -Erez Horev -Eric Anderson -Eric Mountain -Erwin van Eyk -Ethan Mosbaugh -Ewout Prangsma -Fabio Rehm -Fatih Arslan -Felipe Oliveira -Flavia Missi -Florent Aide -Francisco Souza -Frank Groeneveld -George Moura -Grégoire Delattre -Guilherme Rezende -Guillermo Álvarez Fernández -Harry Zhang -He Simei -Ivan Mikushin -James Bardin -James Nugent -Jari Kolehmainen -Jason Wilder -Jawher Moussa -Jean-Baptiste Dalido -Jeff Mitchell -Jeffrey Hulten -Jen Andre -Jérôme Laurens -Jim Minter -Johan Euphrosine -John Hughes -Jorge Marey -Julian Einwag -Kamil Domanski -Karan Misra -Ken Herner -Kevin Lin -Kevin Xu -Kim, Hirokuni -Kostas Lekkas -Kyle Allan -Liron Levin -Lior Yankovich -Liu Peng -Lorenz Leutgeb -Lucas Clemente -Lucas Weiblen -Lyon Hill -Mantas Matelis -Marguerite des Trois Maisons -Mariusz Borsa -Martin Sweeney -Máximo Cuadros Ortiz -Michael Schmatz -Michal Fojtik -Mike Dillon -Mrunal Patel -Nate Jones -Nguyen Sy Thanh Son -Nicholas Van Wiggeren -Nick Ethier -niko83 -Omeid Matten -Orivej Desh -Paul Bellamy -Paul Morie -Paul Weil -Peter Edge -Peter Jihoon Kim -Phil Lu -Philippe Lafoucrière -Radek Simko -Rafe Colton -Raphaël Pinson -Reed Allman -RJ Catalano -Rob Miller -Robbert Klarenbeek -Robert Williamson -Roman Khlystik -Russell Haering -Salvador Gironès -Sam Rijs -Sami Wagiaalla -Samuel Archambault -Samuel Karp -Seth Jennings -Shane Xie -Silas Sewell -Simon Eskildsen -Simon Menke -Skolos -Soulou -Sridhar Ratnakumar -Summer Mousa -Sunjin Lee -Swaroop Ramachandra -Tarsis Azevedo -Tim Schindler -Timothy St. Clair -Tobi Knaup -Tom Wilkie -Tonic -ttyh061 -upccup -Victor Marmol -Vincenzo Prignano -Vlad Alexandru Ionescu -Weitao Zhou -Wiliam Souza -Ye Yin -Yu, Zou -Yuriy Bogdanov diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE b/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE deleted file mode 100644 index 70663447..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE +++ /dev/null @@ -1,6 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -You can find the Docker license at the following link: -https://raw.githubusercontent.com/docker/docker/master/LICENSE diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/Makefile b/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/Makefile deleted file mode 100644 index 483aa1bb..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/Makefile +++ /dev/null @@ -1,41 +0,0 @@ -.PHONY: \ - all \ - lint \ - vet \ - fmt \ - fmtcheck \ - pretest \ - test \ - integration \ - clean - -all: test - -lint: - @ go get -v github.com/golang/lint/golint - [ -z "$$(golint . | grep -v 'type name will be used as docker.DockerInfo' | grep -v 'context.Context should be the first' | tee /dev/stderr)" ] - -vet: - go vet ./... - -fmt: - gofmt -s -w . - -fmtcheck: - [ -z "$$(gofmt -s -d . | tee /dev/stderr)" ] - -testdeps: - go get -d -t ./... - -pretest: testdeps lint vet fmtcheck - -gotest: - go test $(GO_TEST_FLAGS) ./... - -test: pretest gotest - -integration: - go test -tags docker_integration -run TestIntegration -v - -clean: - go clean ./... diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/README.markdown b/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/README.markdown deleted file mode 100644 index 68434a03..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/README.markdown +++ /dev/null @@ -1,112 +0,0 @@ -# go-dockerclient - -[![Travis Build Status](https://travis-ci.org/fsouza/go-dockerclient.svg?branch=master)](https://travis-ci.org/fsouza/go-dockerclient) -[![AppVeyor Build Status](https://ci.appveyor.com/api/projects/status/4m374pti06ubg2l7?svg=true)](https://ci.appveyor.com/project/fsouza/go-dockerclient) -[![GoDoc](https://img.shields.io/badge/api-Godoc-blue.svg?style=flat-square)](https://godoc.org/github.com/fsouza/go-dockerclient) - -This package presents a client for the Docker remote API. It also provides -support for the extensions in the [Swarm API](https://docs.docker.com/swarm/swarm-api/). -It currently supports the Docker API up to version 1.23. - -This package also provides support for docker's network API, which is a simple -passthrough to the libnetwork remote API. Note that docker's network API is -only available in docker 1.8 and above, and only enabled in docker if -DOCKER_EXPERIMENTAL is defined during the docker build process. - -For more details, check the [remote API -documentation](http://docs.docker.com/engine/reference/api/docker_remote_api/). - -## Example - -```go -package main - -import ( - "fmt" - - "github.com/fsouza/go-dockerclient" -) - -func main() { - endpoint := "unix:///var/run/docker.sock" - client, err := docker.NewClient(endpoint) - if err != nil { - panic(err) - } - imgs, err := client.ListImages(docker.ListImagesOptions{All: false}) - if err != nil { - panic(err) - } - for _, img := range imgs { - fmt.Println("ID: ", img.ID) - fmt.Println("RepoTags: ", img.RepoTags) - fmt.Println("Created: ", img.Created) - fmt.Println("Size: ", img.Size) - fmt.Println("VirtualSize: ", img.VirtualSize) - fmt.Println("ParentId: ", img.ParentID) - } -} -``` - -## Using with TLS - -In order to instantiate the client for a TLS-enabled daemon, you should use -NewTLSClient, passing the endpoint and path for key and certificates as -parameters. - -```go -package main - -import ( - "fmt" - - "github.com/fsouza/go-dockerclient" -) - -func main() { - endpoint := "tcp://[ip]:[port]" - path := os.Getenv("DOCKER_CERT_PATH") - ca := fmt.Sprintf("%s/ca.pem", path) - cert := fmt.Sprintf("%s/cert.pem", path) - key := fmt.Sprintf("%s/key.pem", path) - client, _ := docker.NewTLSClient(endpoint, cert, key, ca) - // use client -} -``` - -If using [docker-machine](https://docs.docker.com/machine/), or another -application that exports environment variables `DOCKER_HOST`, -`DOCKER_TLS_VERIFY`, `DOCKER_CERT_PATH`, you can use NewClientFromEnv. - - -```go -package main - -import ( - "fmt" - - "github.com/fsouza/go-dockerclient" -) - -func main() { - client, _ := docker.NewClientFromEnv() - // use client -} -``` - -See the documentation for more details. - -## Developing - -All development commands can be seen in the [Makefile](Makefile). - -Commited code must pass: - -* [golint](https://github.com/golang/lint) (with some exceptions, see the Makefile). -* [go vet](https://golang.org/cmd/vet/) -* [gofmt](https://golang.org/cmd/gofmt) -* [go test](https://golang.org/cmd/go/#hdr-Test_packages) - -Running `make test` will check all of these. If your editor does not -automatically call ``gofmt -s``, `make fmt` will format all go files in this -repository. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/auth.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/auth.go deleted file mode 100644 index 03d192b7..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/auth.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path" - "strings" -) - -// ErrCannotParseDockercfg is the error returned by NewAuthConfigurations when the dockercfg cannot be parsed. -var ErrCannotParseDockercfg = errors.New("Failed to read authentication from dockercfg") - -// AuthConfiguration represents authentication options to use in the PushImage -// method. It represents the authentication in the Docker index server. -type AuthConfiguration struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - Email string `json:"email,omitempty"` - ServerAddress string `json:"serveraddress,omitempty"` -} - -// AuthConfigurations represents authentication options to use for the -// PushImage method accommodating the new X-Registry-Config header -type AuthConfigurations struct { - Configs map[string]AuthConfiguration `json:"configs"` -} - -// AuthConfigurations119 is used to serialize a set of AuthConfigurations -// for Docker API >= 1.19. -type AuthConfigurations119 map[string]AuthConfiguration - -// dockerConfig represents a registry authentation configuration from the -// .dockercfg file. -type dockerConfig struct { - Auth string `json:"auth"` - Email string `json:"email"` -} - -// NewAuthConfigurationsFromFile returns AuthConfigurations from a path containing JSON -// in the same format as the .dockercfg file. -func NewAuthConfigurationsFromFile(path string) (*AuthConfigurations, error) { - r, err := os.Open(path) - if err != nil { - return nil, err - } - return NewAuthConfigurations(r) -} - -func cfgPaths(dockerConfigEnv string, homeEnv string) []string { - var paths []string - if dockerConfigEnv != "" { - paths = append(paths, path.Join(dockerConfigEnv, "config.json")) - } - if homeEnv != "" { - paths = append(paths, path.Join(homeEnv, ".docker", "config.json")) - paths = append(paths, path.Join(homeEnv, ".dockercfg")) - } - return paths -} - -// NewAuthConfigurationsFromDockerCfg returns AuthConfigurations from -// system config files. The following files are checked in the order listed: -// - $DOCKER_CONFIG/config.json if DOCKER_CONFIG set in the environment, -// - $HOME/.docker/config.json -// - $HOME/.dockercfg -func NewAuthConfigurationsFromDockerCfg() (*AuthConfigurations, error) { - err := fmt.Errorf("No docker configuration found") - var auths *AuthConfigurations - - pathsToTry := cfgPaths(os.Getenv("DOCKER_CONFIG"), os.Getenv("HOME")) - for _, path := range pathsToTry { - auths, err = NewAuthConfigurationsFromFile(path) - if err == nil { - return auths, nil - } - } - return auths, err -} - -// NewAuthConfigurations returns AuthConfigurations from a JSON encoded string in the -// same format as the .dockercfg file. -func NewAuthConfigurations(r io.Reader) (*AuthConfigurations, error) { - var auth *AuthConfigurations - confs, err := parseDockerConfig(r) - if err != nil { - return nil, err - } - auth, err = authConfigs(confs) - if err != nil { - return nil, err - } - return auth, nil -} - -func parseDockerConfig(r io.Reader) (map[string]dockerConfig, error) { - buf := new(bytes.Buffer) - buf.ReadFrom(r) - byteData := buf.Bytes() - - confsWrapper := struct { - Auths map[string]dockerConfig `json:"auths"` - }{} - if err := json.Unmarshal(byteData, &confsWrapper); err == nil { - if len(confsWrapper.Auths) > 0 { - return confsWrapper.Auths, nil - } - } - - var confs map[string]dockerConfig - if err := json.Unmarshal(byteData, &confs); err != nil { - return nil, err - } - return confs, nil -} - -// authConfigs converts a dockerConfigs map to a AuthConfigurations object. -func authConfigs(confs map[string]dockerConfig) (*AuthConfigurations, error) { - c := &AuthConfigurations{ - Configs: make(map[string]AuthConfiguration), - } - for reg, conf := range confs { - data, err := base64.StdEncoding.DecodeString(conf.Auth) - if err != nil { - return nil, err - } - userpass := strings.SplitN(string(data), ":", 2) - if len(userpass) != 2 { - return nil, ErrCannotParseDockercfg - } - c.Configs[reg] = AuthConfiguration{ - Email: conf.Email, - Username: userpass[0], - Password: userpass[1], - ServerAddress: reg, - } - } - return c, nil -} - -// AuthStatus returns the authentication status for Docker API versions >= 1.23. -type AuthStatus struct { - Status string `json:"Status,omitempty" yaml:"Status,omitempty" toml:"Status,omitempty"` - IdentityToken string `json:"IdentityToken,omitempty" yaml:"IdentityToken,omitempty" toml:"IdentityToken,omitempty"` -} - -// AuthCheck validates the given credentials. It returns nil if successful. -// -// For Docker API versions >= 1.23, the AuthStatus struct will be populated, otherwise it will be empty.` -// -// See https://goo.gl/6nsZkH for more details. -func (c *Client) AuthCheck(conf *AuthConfiguration) (AuthStatus, error) { - var authStatus AuthStatus - if conf == nil { - return authStatus, errors.New("conf is nil") - } - resp, err := c.do("POST", "/auth", doOptions{data: conf}) - if err != nil { - return authStatus, err - } - defer resp.Body.Close() - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return authStatus, err - } - if len(data) == 0 { - return authStatus, nil - } - if err := json.Unmarshal(data, &authStatus); err != nil { - return authStatus, err - } - return authStatus, nil -} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/change.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/change.go deleted file mode 100644 index 3f936b22..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/change.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2014 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import "fmt" - -// ChangeType is a type for constants indicating the type of change -// in a container -type ChangeType int - -const ( - // ChangeModify is the ChangeType for container modifications - ChangeModify ChangeType = iota - - // ChangeAdd is the ChangeType for additions to a container - ChangeAdd - - // ChangeDelete is the ChangeType for deletions from a container - ChangeDelete -) - -// Change represents a change in a container. -// -// See https://goo.gl/Wo0JJp for more details. -type Change struct { - Path string - Kind ChangeType -} - -func (change *Change) String() string { - var kind string - switch change.Kind { - case ChangeModify: - kind = "C" - case ChangeAdd: - kind = "A" - case ChangeDelete: - kind = "D" - } - return fmt.Sprintf("%s %s", kind, change.Path) -} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/client.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/client.go deleted file mode 100644 index ca113552..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/client.go +++ /dev/null @@ -1,1028 +0,0 @@ -// Copyright 2013 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package docker provides a client for the Docker remote API. -// -// See https://goo.gl/o2v3rk for more details on the remote API. -package docker - -import ( - "bufio" - "bytes" - "crypto/tls" - "crypto/x509" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "net/http/httputil" - "net/url" - "os" - "path/filepath" - "reflect" - "strconv" - "strings" - "sync/atomic" - "time" - - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/homedir" - "github.com/docker/docker/pkg/stdcopy" - "github.com/hashicorp/go-cleanhttp" - "golang.org/x/net/context" - "golang.org/x/net/context/ctxhttp" -) - -const ( - userAgent = "go-dockerclient" - - unixProtocol = "unix" - namedPipeProtocol = "npipe" -) - -var ( - // ErrInvalidEndpoint is returned when the endpoint is not a valid HTTP URL. - ErrInvalidEndpoint = errors.New("invalid endpoint") - - // ErrConnectionRefused is returned when the client cannot connect to the given endpoint. - ErrConnectionRefused = errors.New("cannot connect to Docker endpoint") - - // ErrInactivityTimeout is returned when a streamable call has been inactive for some time. - ErrInactivityTimeout = errors.New("inactivity time exceeded timeout") - - apiVersion112, _ = NewAPIVersion("1.12") - apiVersion119, _ = NewAPIVersion("1.19") - apiVersion124, _ = NewAPIVersion("1.24") - apiVersion125, _ = NewAPIVersion("1.25") -) - -// APIVersion is an internal representation of a version of the Remote API. -type APIVersion []int - -// NewAPIVersion returns an instance of APIVersion for the given string. -// -// The given string must be in the form .., where , -// and are integer numbers. -func NewAPIVersion(input string) (APIVersion, error) { - if !strings.Contains(input, ".") { - return nil, fmt.Errorf("Unable to parse version %q", input) - } - raw := strings.Split(input, "-") - arr := strings.Split(raw[0], ".") - ret := make(APIVersion, len(arr)) - var err error - for i, val := range arr { - ret[i], err = strconv.Atoi(val) - if err != nil { - return nil, fmt.Errorf("Unable to parse version %q: %q is not an integer", input, val) - } - } - return ret, nil -} - -func (version APIVersion) String() string { - var str string - for i, val := range version { - str += strconv.Itoa(val) - if i < len(version)-1 { - str += "." - } - } - return str -} - -// LessThan is a function for comparing APIVersion structs -func (version APIVersion) LessThan(other APIVersion) bool { - return version.compare(other) < 0 -} - -// LessThanOrEqualTo is a function for comparing APIVersion structs -func (version APIVersion) LessThanOrEqualTo(other APIVersion) bool { - return version.compare(other) <= 0 -} - -// GreaterThan is a function for comparing APIVersion structs -func (version APIVersion) GreaterThan(other APIVersion) bool { - return version.compare(other) > 0 -} - -// GreaterThanOrEqualTo is a function for comparing APIVersion structs -func (version APIVersion) GreaterThanOrEqualTo(other APIVersion) bool { - return version.compare(other) >= 0 -} - -func (version APIVersion) compare(other APIVersion) int { - for i, v := range version { - if i <= len(other)-1 { - otherVersion := other[i] - - if v < otherVersion { - return -1 - } else if v > otherVersion { - return 1 - } - } - } - if len(version) > len(other) { - return 1 - } - if len(version) < len(other) { - return -1 - } - return 0 -} - -// Client is the basic type of this package. It provides methods for -// interaction with the API. -type Client struct { - SkipServerVersionCheck bool - HTTPClient *http.Client - TLSConfig *tls.Config - Dialer Dialer - - endpoint string - endpointURL *url.URL - eventMonitor *eventMonitoringState - requestedAPIVersion APIVersion - serverAPIVersion APIVersion - expectedAPIVersion APIVersion - nativeHTTPClient *http.Client -} - -// Dialer is an interface that allows network connections to be dialed -// (net.Dialer fulfills this interface) and named pipes (a shim using -// winio.DialPipe) -type Dialer interface { - Dial(network, address string) (net.Conn, error) -} - -// NewClient returns a Client instance ready for communication with the given -// server endpoint. It will use the latest remote API version available in the -// server. -func NewClient(endpoint string) (*Client, error) { - client, err := NewVersionedClient(endpoint, "") - if err != nil { - return nil, err - } - client.SkipServerVersionCheck = true - return client, nil -} - -// NewTLSClient returns a Client instance ready for TLS communications with the givens -// server endpoint, key and certificates . It will use the latest remote API version -// available in the server. -func NewTLSClient(endpoint string, cert, key, ca string) (*Client, error) { - client, err := NewVersionedTLSClient(endpoint, cert, key, ca, "") - if err != nil { - return nil, err - } - client.SkipServerVersionCheck = true - return client, nil -} - -// NewTLSClientFromBytes returns a Client instance ready for TLS communications with the givens -// server endpoint, key and certificates (passed inline to the function as opposed to being -// read from a local file). It will use the latest remote API version available in the server. -func NewTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte) (*Client, error) { - client, err := NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, "") - if err != nil { - return nil, err - } - client.SkipServerVersionCheck = true - return client, nil -} - -// NewVersionedClient returns a Client instance ready for communication with -// the given server endpoint, using a specific remote API version. -func NewVersionedClient(endpoint string, apiVersionString string) (*Client, error) { - u, err := parseEndpoint(endpoint, false) - if err != nil { - return nil, err - } - var requestedAPIVersion APIVersion - if strings.Contains(apiVersionString, ".") { - requestedAPIVersion, err = NewAPIVersion(apiVersionString) - if err != nil { - return nil, err - } - } - c := &Client{ - HTTPClient: cleanhttp.DefaultClient(), - Dialer: &net.Dialer{}, - endpoint: endpoint, - endpointURL: u, - eventMonitor: new(eventMonitoringState), - requestedAPIVersion: requestedAPIVersion, - } - c.initializeNativeClient() - return c, nil -} - -// NewVersionnedTLSClient has been DEPRECATED, please use NewVersionedTLSClient. -func NewVersionnedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) { - return NewVersionedTLSClient(endpoint, cert, key, ca, apiVersionString) -} - -// NewVersionedTLSClient returns a Client instance ready for TLS communications with the givens -// server endpoint, key and certificates, using a specific remote API version. -func NewVersionedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) { - var certPEMBlock []byte - var keyPEMBlock []byte - var caPEMCert []byte - if _, err := os.Stat(cert); !os.IsNotExist(err) { - certPEMBlock, err = ioutil.ReadFile(cert) - if err != nil { - return nil, err - } - } - if _, err := os.Stat(key); !os.IsNotExist(err) { - keyPEMBlock, err = ioutil.ReadFile(key) - if err != nil { - return nil, err - } - } - if _, err := os.Stat(ca); !os.IsNotExist(err) { - caPEMCert, err = ioutil.ReadFile(ca) - if err != nil { - return nil, err - } - } - return NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, apiVersionString) -} - -// NewClientFromEnv returns a Client instance ready for communication created from -// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH. -// -// See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68. -// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7. -func NewClientFromEnv() (*Client, error) { - client, err := NewVersionedClientFromEnv("") - if err != nil { - return nil, err - } - client.SkipServerVersionCheck = true - return client, nil -} - -// NewVersionedClientFromEnv returns a Client instance ready for TLS communications created from -// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH, -// and using a specific remote API version. -// -// See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68. -// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7. -func NewVersionedClientFromEnv(apiVersionString string) (*Client, error) { - dockerEnv, err := getDockerEnv() - if err != nil { - return nil, err - } - dockerHost := dockerEnv.dockerHost - if dockerEnv.dockerTLSVerify { - parts := strings.SplitN(dockerEnv.dockerHost, "://", 2) - if len(parts) != 2 { - return nil, fmt.Errorf("could not split %s into two parts by ://", dockerHost) - } - cert := filepath.Join(dockerEnv.dockerCertPath, "cert.pem") - key := filepath.Join(dockerEnv.dockerCertPath, "key.pem") - ca := filepath.Join(dockerEnv.dockerCertPath, "ca.pem") - return NewVersionedTLSClient(dockerEnv.dockerHost, cert, key, ca, apiVersionString) - } - return NewVersionedClient(dockerEnv.dockerHost, apiVersionString) -} - -// NewVersionedTLSClientFromBytes returns a Client instance ready for TLS communications with the givens -// server endpoint, key and certificates (passed inline to the function as opposed to being -// read from a local file), using a specific remote API version. -func NewVersionedTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte, apiVersionString string) (*Client, error) { - u, err := parseEndpoint(endpoint, true) - if err != nil { - return nil, err - } - var requestedAPIVersion APIVersion - if strings.Contains(apiVersionString, ".") { - requestedAPIVersion, err = NewAPIVersion(apiVersionString) - if err != nil { - return nil, err - } - } - tlsConfig := &tls.Config{} - if certPEMBlock != nil && keyPEMBlock != nil { - tlsCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock) - if err != nil { - return nil, err - } - tlsConfig.Certificates = []tls.Certificate{tlsCert} - } - if caPEMCert == nil { - tlsConfig.InsecureSkipVerify = true - } else { - caPool := x509.NewCertPool() - if !caPool.AppendCertsFromPEM(caPEMCert) { - return nil, errors.New("Could not add RootCA pem") - } - tlsConfig.RootCAs = caPool - } - tr := cleanhttp.DefaultTransport() - tr.TLSClientConfig = tlsConfig - if err != nil { - return nil, err - } - c := &Client{ - HTTPClient: &http.Client{Transport: tr}, - TLSConfig: tlsConfig, - Dialer: &net.Dialer{}, - endpoint: endpoint, - endpointURL: u, - eventMonitor: new(eventMonitoringState), - requestedAPIVersion: requestedAPIVersion, - } - c.initializeNativeClient() - return c, nil -} - -// SetTimeout takes a timeout and applies it to both the HTTPClient and -// nativeHTTPClient. It should not be called concurrently with any other Client -// methods. -func (c *Client) SetTimeout(t time.Duration) { - if c.HTTPClient != nil { - c.HTTPClient.Timeout = t - } - if c.nativeHTTPClient != nil { - c.nativeHTTPClient.Timeout = t - } -} - -func (c *Client) checkAPIVersion() error { - serverAPIVersionString, err := c.getServerAPIVersionString() - if err != nil { - return err - } - c.serverAPIVersion, err = NewAPIVersion(serverAPIVersionString) - if err != nil { - return err - } - if c.requestedAPIVersion == nil { - c.expectedAPIVersion = c.serverAPIVersion - } else { - c.expectedAPIVersion = c.requestedAPIVersion - } - return nil -} - -// Endpoint returns the current endpoint. It's useful for getting the endpoint -// when using functions that get this data from the environment (like -// NewClientFromEnv. -func (c *Client) Endpoint() string { - return c.endpoint -} - -// Ping pings the docker server -// -// See https://goo.gl/wYfgY1 for more details. -func (c *Client) Ping() error { - return c.PingWithContext(nil) -} - -// PingWithContext pings the docker server -// The context object can be used to cancel the ping request. -// -// See https://goo.gl/wYfgY1 for more details. -func (c *Client) PingWithContext(ctx context.Context) error { - path := "/_ping" - resp, err := c.do("GET", path, doOptions{context: ctx}) - if err != nil { - return err - } - if resp.StatusCode != http.StatusOK { - return newError(resp) - } - resp.Body.Close() - return nil -} - -func (c *Client) getServerAPIVersionString() (version string, err error) { - resp, err := c.do("GET", "/version", doOptions{}) - if err != nil { - return "", err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return "", fmt.Errorf("Received unexpected status %d while trying to retrieve the server version", resp.StatusCode) - } - var versionResponse map[string]interface{} - if err := json.NewDecoder(resp.Body).Decode(&versionResponse); err != nil { - return "", err - } - if version, ok := (versionResponse["ApiVersion"]).(string); ok { - return version, nil - } - return "", nil -} - -type doOptions struct { - data interface{} - forceJSON bool - headers map[string]string - context context.Context -} - -func (c *Client) do(method, path string, doOptions doOptions) (*http.Response, error) { - var params io.Reader - if doOptions.data != nil || doOptions.forceJSON { - buf, err := json.Marshal(doOptions.data) - if err != nil { - return nil, err - } - params = bytes.NewBuffer(buf) - } - if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil { - err := c.checkAPIVersion() - if err != nil { - return nil, err - } - } - httpClient := c.HTTPClient - protocol := c.endpointURL.Scheme - var u string - switch protocol { - case unixProtocol, namedPipeProtocol: - httpClient = c.nativeHTTPClient - u = c.getFakeNativeURL(path) - default: - u = c.getURL(path) - } - - req, err := http.NewRequest(method, u, params) - if err != nil { - return nil, err - } - req.Header.Set("User-Agent", userAgent) - if doOptions.data != nil { - req.Header.Set("Content-Type", "application/json") - } else if method == "POST" { - req.Header.Set("Content-Type", "plain/text") - } - - for k, v := range doOptions.headers { - req.Header.Set(k, v) - } - - ctx := doOptions.context - if ctx == nil { - ctx = context.Background() - } - - resp, err := ctxhttp.Do(ctx, httpClient, req) - if err != nil { - if strings.Contains(err.Error(), "connection refused") { - return nil, ErrConnectionRefused - } - - return nil, chooseError(ctx, err) - } - if resp.StatusCode < 200 || resp.StatusCode >= 400 { - return nil, newError(resp) - } - return resp, nil -} - -type streamOptions struct { - setRawTerminal bool - rawJSONStream bool - useJSONDecoder bool - headers map[string]string - in io.Reader - stdout io.Writer - stderr io.Writer - // timeout is the initial connection timeout - timeout time.Duration - // Timeout with no data is received, it's reset every time new data - // arrives - inactivityTimeout time.Duration - context context.Context -} - -// if error in context, return that instead of generic http error -func chooseError(ctx context.Context, err error) error { - select { - case <-ctx.Done(): - return ctx.Err() - default: - return err - } -} - -func (c *Client) stream(method, path string, streamOptions streamOptions) error { - if (method == "POST" || method == "PUT") && streamOptions.in == nil { - streamOptions.in = bytes.NewReader(nil) - } - if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil { - err := c.checkAPIVersion() - if err != nil { - return err - } - } - req, err := http.NewRequest(method, c.getURL(path), streamOptions.in) - if err != nil { - return err - } - req.Header.Set("User-Agent", userAgent) - if method == "POST" { - req.Header.Set("Content-Type", "plain/text") - } - for key, val := range streamOptions.headers { - req.Header.Set(key, val) - } - var resp *http.Response - protocol := c.endpointURL.Scheme - address := c.endpointURL.Path - if streamOptions.stdout == nil { - streamOptions.stdout = ioutil.Discard - } - if streamOptions.stderr == nil { - streamOptions.stderr = ioutil.Discard - } - - // make a sub-context so that our active cancellation does not affect parent - ctx := streamOptions.context - if ctx == nil { - ctx = context.Background() - } - subCtx, cancelRequest := context.WithCancel(ctx) - defer cancelRequest() - - if protocol == unixProtocol || protocol == namedPipeProtocol { - var dial net.Conn - dial, err = c.Dialer.Dial(protocol, address) - if err != nil { - return err - } - go func() { - <-subCtx.Done() - dial.Close() - }() - breader := bufio.NewReader(dial) - err = req.Write(dial) - if err != nil { - return chooseError(subCtx, err) - } - - // ReadResponse may hang if server does not replay - if streamOptions.timeout > 0 { - dial.SetDeadline(time.Now().Add(streamOptions.timeout)) - } - - if resp, err = http.ReadResponse(breader, req); err != nil { - // Cancel timeout for future I/O operations - if streamOptions.timeout > 0 { - dial.SetDeadline(time.Time{}) - } - if strings.Contains(err.Error(), "connection refused") { - return ErrConnectionRefused - } - - return chooseError(subCtx, err) - } - } else { - if resp, err = ctxhttp.Do(subCtx, c.HTTPClient, req); err != nil { - if strings.Contains(err.Error(), "connection refused") { - return ErrConnectionRefused - } - return chooseError(subCtx, err) - } - } - defer resp.Body.Close() - if resp.StatusCode < 200 || resp.StatusCode >= 400 { - return newError(resp) - } - var canceled uint32 - if streamOptions.inactivityTimeout > 0 { - var ch chan<- struct{} - resp.Body, ch = handleInactivityTimeout(resp.Body, streamOptions.inactivityTimeout, cancelRequest, &canceled) - defer close(ch) - } - err = handleStreamResponse(resp, &streamOptions) - if err != nil { - if atomic.LoadUint32(&canceled) != 0 { - return ErrInactivityTimeout - } - return chooseError(subCtx, err) - } - return nil -} - -func handleStreamResponse(resp *http.Response, streamOptions *streamOptions) error { - var err error - if !streamOptions.useJSONDecoder && resp.Header.Get("Content-Type") != "application/json" { - if streamOptions.setRawTerminal { - _, err = io.Copy(streamOptions.stdout, resp.Body) - } else { - _, err = stdcopy.StdCopy(streamOptions.stdout, streamOptions.stderr, resp.Body) - } - return err - } - // if we want to get raw json stream, just copy it back to output - // without decoding it - if streamOptions.rawJSONStream { - _, err = io.Copy(streamOptions.stdout, resp.Body) - return err - } - - err = DisplayJSONMessagesStream(resp.Body, streamOptions.stdout, 0, false, nil) - - return err -} - -type proxyReader struct { - io.ReadCloser - calls uint64 -} - -func (p *proxyReader) callCount() uint64 { - return atomic.LoadUint64(&p.calls) -} - -func (p *proxyReader) Read(data []byte) (int, error) { - atomic.AddUint64(&p.calls, 1) - return p.ReadCloser.Read(data) -} - -func handleInactivityTimeout(reader io.ReadCloser, timeout time.Duration, cancelRequest func(), canceled *uint32) (io.ReadCloser, chan<- struct{}) { - done := make(chan struct{}) - proxyReader := &proxyReader{ReadCloser: reader} - go func() { - var lastCallCount uint64 - for { - select { - case <-time.After(timeout): - case <-done: - return - } - curCallCount := proxyReader.callCount() - if curCallCount == lastCallCount { - atomic.AddUint32(canceled, 1) - cancelRequest() - return - } - lastCallCount = curCallCount - } - }() - return proxyReader, done -} - -type hijackOptions struct { - success chan struct{} - setRawTerminal bool - in io.Reader - stdout io.Writer - stderr io.Writer - data interface{} -} - -// CloseWaiter is an interface with methods for closing the underlying resource -// and then waiting for it to finish processing. -type CloseWaiter interface { - io.Closer - Wait() error -} - -type waiterFunc func() error - -func (w waiterFunc) Wait() error { return w() } - -type closerFunc func() error - -func (c closerFunc) Close() error { return c() } - -func (c *Client) hijack(method, path string, hijackOptions hijackOptions) (CloseWaiter, error) { - if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil { - err := c.checkAPIVersion() - if err != nil { - return nil, err - } - } - var params io.Reader - if hijackOptions.data != nil { - buf, err := json.Marshal(hijackOptions.data) - if err != nil { - return nil, err - } - params = bytes.NewBuffer(buf) - } - req, err := http.NewRequest(method, c.getURL(path), params) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "application/json") - req.Header.Set("Connection", "Upgrade") - req.Header.Set("Upgrade", "tcp") - protocol := c.endpointURL.Scheme - address := c.endpointURL.Path - if protocol != unixProtocol && protocol != namedPipeProtocol { - protocol = "tcp" - address = c.endpointURL.Host - } - var dial net.Conn - if c.TLSConfig != nil && protocol != unixProtocol && protocol != namedPipeProtocol { - netDialer, ok := c.Dialer.(*net.Dialer) - if !ok { - return nil, ErrTLSNotSupported - } - dial, err = tlsDialWithDialer(netDialer, protocol, address, c.TLSConfig) - if err != nil { - return nil, err - } - } else { - dial, err = c.Dialer.Dial(protocol, address) - if err != nil { - return nil, err - } - } - - errs := make(chan error, 1) - quit := make(chan struct{}) - go func() { - clientconn := httputil.NewClientConn(dial, nil) - defer clientconn.Close() - clientconn.Do(req) - if hijackOptions.success != nil { - hijackOptions.success <- struct{}{} - <-hijackOptions.success - } - rwc, br := clientconn.Hijack() - defer rwc.Close() - - errChanOut := make(chan error, 1) - errChanIn := make(chan error, 2) - if hijackOptions.stdout == nil && hijackOptions.stderr == nil { - close(errChanOut) - } else { - // Only copy if hijackOptions.stdout and/or hijackOptions.stderr is actually set. - // Otherwise, if the only stream you care about is stdin, your attach session - // will "hang" until the container terminates, even though you're not reading - // stdout/stderr - if hijackOptions.stdout == nil { - hijackOptions.stdout = ioutil.Discard - } - if hijackOptions.stderr == nil { - hijackOptions.stderr = ioutil.Discard - } - - go func() { - defer func() { - if hijackOptions.in != nil { - if closer, ok := hijackOptions.in.(io.Closer); ok { - closer.Close() - } - errChanIn <- nil - } - }() - - var err error - if hijackOptions.setRawTerminal { - _, err = io.Copy(hijackOptions.stdout, br) - } else { - _, err = stdcopy.StdCopy(hijackOptions.stdout, hijackOptions.stderr, br) - } - errChanOut <- err - }() - } - - go func() { - var err error - if hijackOptions.in != nil { - _, err = io.Copy(rwc, hijackOptions.in) - } - errChanIn <- err - rwc.(interface { - CloseWrite() error - }).CloseWrite() - }() - - var errIn error - select { - case errIn = <-errChanIn: - case <-quit: - } - - var errOut error - select { - case errOut = <-errChanOut: - case <-quit: - } - - if errIn != nil { - errs <- errIn - } else { - errs <- errOut - } - }() - - return struct { - closerFunc - waiterFunc - }{ - closerFunc(func() error { close(quit); return nil }), - waiterFunc(func() error { return <-errs }), - }, nil -} - -func (c *Client) getURL(path string) string { - urlStr := strings.TrimRight(c.endpointURL.String(), "/") - if c.endpointURL.Scheme == unixProtocol || c.endpointURL.Scheme == namedPipeProtocol { - urlStr = "" - } - if c.requestedAPIVersion != nil { - return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedAPIVersion, path) - } - return fmt.Sprintf("%s%s", urlStr, path) -} - -// getFakeNativeURL returns the URL needed to make an HTTP request over a UNIX -// domain socket to the given path. -func (c *Client) getFakeNativeURL(path string) string { - u := *c.endpointURL // Copy. - - // Override URL so that net/http will not complain. - u.Scheme = "http" - u.Host = "unix.sock" // Doesn't matter what this is - it's not used. - u.Path = "" - urlStr := strings.TrimRight(u.String(), "/") - if c.requestedAPIVersion != nil { - return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedAPIVersion, path) - } - return fmt.Sprintf("%s%s", urlStr, path) -} - -type jsonMessage struct { - Status string `json:"status,omitempty"` - Progress string `json:"progress,omitempty"` - Error string `json:"error,omitempty"` - Stream string `json:"stream,omitempty"` -} - -func queryString(opts interface{}) string { - if opts == nil { - return "" - } - value := reflect.ValueOf(opts) - if value.Kind() == reflect.Ptr { - value = value.Elem() - } - if value.Kind() != reflect.Struct { - return "" - } - items := url.Values(map[string][]string{}) - for i := 0; i < value.NumField(); i++ { - field := value.Type().Field(i) - if field.PkgPath != "" { - continue - } - key := field.Tag.Get("qs") - if key == "" { - key = strings.ToLower(field.Name) - } else if key == "-" { - continue - } - addQueryStringValue(items, key, value.Field(i)) - } - return items.Encode() -} - -func addQueryStringValue(items url.Values, key string, v reflect.Value) { - switch v.Kind() { - case reflect.Bool: - if v.Bool() { - items.Add(key, "1") - } - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if v.Int() > 0 { - items.Add(key, strconv.FormatInt(v.Int(), 10)) - } - case reflect.Float32, reflect.Float64: - if v.Float() > 0 { - items.Add(key, strconv.FormatFloat(v.Float(), 'f', -1, 64)) - } - case reflect.String: - if v.String() != "" { - items.Add(key, v.String()) - } - case reflect.Ptr: - if !v.IsNil() { - if b, err := json.Marshal(v.Interface()); err == nil { - items.Add(key, string(b)) - } - } - case reflect.Map: - if len(v.MapKeys()) > 0 { - if b, err := json.Marshal(v.Interface()); err == nil { - items.Add(key, string(b)) - } - } - case reflect.Array, reflect.Slice: - vLen := v.Len() - if vLen > 0 { - for i := 0; i < vLen; i++ { - addQueryStringValue(items, key, v.Index(i)) - } - } - } -} - -// Error represents failures in the API. It represents a failure from the API. -type Error struct { - Status int - Message string -} - -func newError(resp *http.Response) *Error { - defer resp.Body.Close() - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return &Error{Status: resp.StatusCode, Message: fmt.Sprintf("cannot read body, err: %v", err)} - } - return &Error{Status: resp.StatusCode, Message: string(data)} -} - -func (e *Error) Error() string { - return fmt.Sprintf("API error (%d): %s", e.Status, e.Message) -} - -func parseEndpoint(endpoint string, tls bool) (*url.URL, error) { - if endpoint != "" && !strings.Contains(endpoint, "://") { - endpoint = "tcp://" + endpoint - } - u, err := url.Parse(endpoint) - if err != nil { - return nil, ErrInvalidEndpoint - } - if tls && u.Scheme != "unix" { - u.Scheme = "https" - } - switch u.Scheme { - case unixProtocol, namedPipeProtocol: - return u, nil - case "http", "https", "tcp": - _, port, err := net.SplitHostPort(u.Host) - if err != nil { - if e, ok := err.(*net.AddrError); ok { - if e.Err == "missing port in address" { - return u, nil - } - } - return nil, ErrInvalidEndpoint - } - number, err := strconv.ParseInt(port, 10, 64) - if err == nil && number > 0 && number < 65536 { - if u.Scheme == "tcp" { - if tls { - u.Scheme = "https" - } else { - u.Scheme = "http" - } - } - return u, nil - } - return nil, ErrInvalidEndpoint - default: - return nil, ErrInvalidEndpoint - } -} - -type dockerEnv struct { - dockerHost string - dockerTLSVerify bool - dockerCertPath string -} - -func getDockerEnv() (*dockerEnv, error) { - dockerHost := os.Getenv("DOCKER_HOST") - var err error - if dockerHost == "" { - dockerHost = opts.DefaultHost - } - dockerTLSVerify := os.Getenv("DOCKER_TLS_VERIFY") != "" - var dockerCertPath string - if dockerTLSVerify { - dockerCertPath = os.Getenv("DOCKER_CERT_PATH") - if dockerCertPath == "" { - home := homedir.Get() - if home == "" { - return nil, errors.New("environment variable HOME must be set if DOCKER_CERT_PATH is not set") - } - dockerCertPath = filepath.Join(home, ".docker") - dockerCertPath, err = filepath.Abs(dockerCertPath) - if err != nil { - return nil, err - } - } - } - return &dockerEnv{ - dockerHost: dockerHost, - dockerTLSVerify: dockerTLSVerify, - dockerCertPath: dockerCertPath, - }, nil -} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/client_unix.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/client_unix.go deleted file mode 100644 index dd153348..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/client_unix.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2016 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !windows - -package docker - -import ( - "context" - "net" - "net/http" - - "github.com/hashicorp/go-cleanhttp" -) - -// initializeNativeClient initializes the native Unix domain socket client on -// Unix-style operating systems -func (c *Client) initializeNativeClient() { - if c.endpointURL.Scheme != unixProtocol { - return - } - socketPath := c.endpointURL.Path - tr := cleanhttp.DefaultTransport() - tr.Dial = func(network, addr string) (net.Conn, error) { - return c.Dialer.Dial(network, addr) - } - tr.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) { - return c.Dialer.Dial(unixProtocol, socketPath) - } - c.nativeHTTPClient = &http.Client{Transport: tr} -} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/client_windows.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/client_windows.go deleted file mode 100644 index 95ef56b4..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/client_windows.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build windows - -// Copyright 2016 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "context" - "net" - "net/http" - "time" - - "github.com/Microsoft/go-winio" - "github.com/hashicorp/go-cleanhttp" -) - -const namedPipeConnectTimeout = 2 * time.Second - -type pipeDialer struct { - dialFunc func(network, addr string) (net.Conn, error) -} - -func (p pipeDialer) Dial(network, address string) (net.Conn, error) { - return p.dialFunc(network, address) -} - -// initializeNativeClient initializes the native Named Pipe client for Windows -func (c *Client) initializeNativeClient() { - if c.endpointURL.Scheme != namedPipeProtocol { - return - } - namedPipePath := c.endpointURL.Path - dialFunc := func(network, addr string) (net.Conn, error) { - timeout := namedPipeConnectTimeout - return winio.DialPipe(namedPipePath, &timeout) - } - tr := cleanhttp.DefaultTransport() - tr.Dial = dialFunc - tr.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) { - return dialFunc(network, addr) - } - c.Dialer = &pipeDialer{dialFunc} - c.nativeHTTPClient = &http.Client{Transport: tr} -} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/container.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/container.go deleted file mode 100644 index 759a7ae2..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/container.go +++ /dev/null @@ -1,1545 +0,0 @@ -// Copyright 2013 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/docker/go-units" - "golang.org/x/net/context" -) - -// ErrContainerAlreadyExists is the error returned by CreateContainer when the -// container already exists. -var ErrContainerAlreadyExists = errors.New("container already exists") - -// ListContainersOptions specify parameters to the ListContainers function. -// -// See https://goo.gl/kaOHGw for more details. -type ListContainersOptions struct { - All bool - Size bool - Limit int - Since string - Before string - Filters map[string][]string - Context context.Context -} - -// APIPort is a type that represents a port mapping returned by the Docker API -type APIPort struct { - PrivatePort int64 `json:"PrivatePort,omitempty" yaml:"PrivatePort,omitempty" toml:"PrivatePort,omitempty"` - PublicPort int64 `json:"PublicPort,omitempty" yaml:"PublicPort,omitempty" toml:"PublicPort,omitempty"` - Type string `json:"Type,omitempty" yaml:"Type,omitempty" toml:"Type,omitempty"` - IP string `json:"IP,omitempty" yaml:"IP,omitempty" toml:"IP,omitempty"` -} - -// APIMount represents a mount point for a container. -type APIMount struct { - Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"` - Source string `json:"Source,omitempty" yaml:"Source,omitempty" toml:"Source,omitempty"` - Destination string `json:"Destination,omitempty" yaml:"Destination,omitempty" toml:"Destination,omitempty"` - Driver string `json:"Driver,omitempty" yaml:"Driver,omitempty" toml:"Driver,omitempty"` - Mode string `json:"Mode,omitempty" yaml:"Mode,omitempty" toml:"Mode,omitempty"` - RW bool `json:"RW,omitempty" yaml:"RW,omitempty" toml:"RW,omitempty"` - Propogation string `json:"Propogation,omitempty" yaml:"Propogation,omitempty" toml:"Propogation,omitempty"` -} - -// APIContainers represents each container in the list returned by -// ListContainers. -type APIContainers struct { - ID string `json:"Id" yaml:"Id" toml:"Id"` - Image string `json:"Image,omitempty" yaml:"Image,omitempty" toml:"Image,omitempty"` - Command string `json:"Command,omitempty" yaml:"Command,omitempty" toml:"Command,omitempty"` - Created int64 `json:"Created,omitempty" yaml:"Created,omitempty" toml:"Created,omitempty"` - State string `json:"State,omitempty" yaml:"State,omitempty" toml:"State,omitempty"` - Status string `json:"Status,omitempty" yaml:"Status,omitempty" toml:"Status,omitempty"` - Ports []APIPort `json:"Ports,omitempty" yaml:"Ports,omitempty" toml:"Ports,omitempty"` - SizeRw int64 `json:"SizeRw,omitempty" yaml:"SizeRw,omitempty" toml:"SizeRw,omitempty"` - SizeRootFs int64 `json:"SizeRootFs,omitempty" yaml:"SizeRootFs,omitempty" toml:"SizeRootFs,omitempty"` - Names []string `json:"Names,omitempty" yaml:"Names,omitempty" toml:"Names,omitempty"` - Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty" toml:"Labels,omitempty"` - Networks NetworkList `json:"NetworkSettings,omitempty" yaml:"NetworkSettings,omitempty" toml:"NetworkSettings,omitempty"` - Mounts []APIMount `json:"Mounts,omitempty" yaml:"Mounts,omitempty" toml:"Mounts,omitempty"` -} - -// NetworkList encapsulates a map of networks, as returned by the Docker API in -// ListContainers. -type NetworkList struct { - Networks map[string]ContainerNetwork `json:"Networks" yaml:"Networks,omitempty" toml:"Networks,omitempty"` -} - -// ListContainers returns a slice of containers matching the given criteria. -// -// See https://goo.gl/kaOHGw for more details. -func (c *Client) ListContainers(opts ListContainersOptions) ([]APIContainers, error) { - path := "/containers/json?" + queryString(opts) - resp, err := c.do("GET", path, doOptions{context: opts.Context}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - var containers []APIContainers - if err := json.NewDecoder(resp.Body).Decode(&containers); err != nil { - return nil, err - } - return containers, nil -} - -// Port represents the port number and the protocol, in the form -// /. For example: 80/tcp. -type Port string - -// Port returns the number of the port. -func (p Port) Port() string { - return strings.Split(string(p), "/")[0] -} - -// Proto returns the name of the protocol. -func (p Port) Proto() string { - parts := strings.Split(string(p), "/") - if len(parts) == 1 { - return "tcp" - } - return parts[1] -} - -// HealthCheck represents one check of health. -type HealthCheck struct { - Start time.Time `json:"Start,omitempty" yaml:"Start,omitempty" toml:"Start,omitempty"` - End time.Time `json:"End,omitempty" yaml:"End,omitempty" toml:"End,omitempty"` - ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty" toml:"ExitCode,omitempty"` - Output string `json:"Output,omitempty" yaml:"Output,omitempty" toml:"Output,omitempty"` -} - -// Health represents the health of a container. -type Health struct { - Status string `json:"Status,omitempty" yaml:"Status,omitempty" toml:"Status,omitempty"` - FailingStreak int `json:"FailingStreak,omitempty" yaml:"FailingStreak,omitempty" toml:"FailingStreak,omitempty"` - Log []HealthCheck `json:"Log,omitempty" yaml:"Log,omitempty" toml:"Log,omitempty"` -} - -// State represents the state of a container. -type State struct { - Status string `json:"Status,omitempty" yaml:"Status,omitempty" toml:"Status,omitempty"` - Running bool `json:"Running,omitempty" yaml:"Running,omitempty" toml:"Running,omitempty"` - Paused bool `json:"Paused,omitempty" yaml:"Paused,omitempty" toml:"Paused,omitempty"` - Restarting bool `json:"Restarting,omitempty" yaml:"Restarting,omitempty" toml:"Restarting,omitempty"` - OOMKilled bool `json:"OOMKilled,omitempty" yaml:"OOMKilled,omitempty" toml:"OOMKilled,omitempty"` - RemovalInProgress bool `json:"RemovalInProgress,omitempty" yaml:"RemovalInProgress,omitempty" toml:"RemovalInProgress,omitempty"` - Dead bool `json:"Dead,omitempty" yaml:"Dead,omitempty" toml:"Dead,omitempty"` - Pid int `json:"Pid,omitempty" yaml:"Pid,omitempty" toml:"Pid,omitempty"` - ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty" toml:"ExitCode,omitempty"` - Error string `json:"Error,omitempty" yaml:"Error,omitempty" toml:"Error,omitempty"` - StartedAt time.Time `json:"StartedAt,omitempty" yaml:"StartedAt,omitempty" toml:"StartedAt,omitempty"` - FinishedAt time.Time `json:"FinishedAt,omitempty" yaml:"FinishedAt,omitempty" toml:"FinishedAt,omitempty"` - Health Health `json:"Health,omitempty" yaml:"Health,omitempty" toml:"Health,omitempty"` -} - -// String returns a human-readable description of the state -func (s *State) String() string { - if s.Running { - if s.Paused { - return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) - } - if s.Restarting { - return fmt.Sprintf("Restarting (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) - } - - return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) - } - - if s.RemovalInProgress { - return "Removal In Progress" - } - - if s.Dead { - return "Dead" - } - - if s.StartedAt.IsZero() { - return "Created" - } - - if s.FinishedAt.IsZero() { - return "" - } - - return fmt.Sprintf("Exited (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) -} - -// StateString returns a single string to describe state -func (s *State) StateString() string { - if s.Running { - if s.Paused { - return "paused" - } - if s.Restarting { - return "restarting" - } - return "running" - } - - if s.Dead { - return "dead" - } - - if s.StartedAt.IsZero() { - return "created" - } - - return "exited" -} - -// PortBinding represents the host/container port mapping as returned in the -// `docker inspect` json -type PortBinding struct { - HostIP string `json:"HostIP,omitempty" yaml:"HostIP,omitempty" toml:"HostIP,omitempty"` - HostPort string `json:"HostPort,omitempty" yaml:"HostPort,omitempty" toml:"HostPort,omitempty"` -} - -// PortMapping represents a deprecated field in the `docker inspect` output, -// and its value as found in NetworkSettings should always be nil -type PortMapping map[string]string - -// ContainerNetwork represents the networking settings of a container per network. -type ContainerNetwork struct { - MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty" toml:"MacAddress,omitempty"` - GlobalIPv6PrefixLen int `json:"GlobalIPv6PrefixLen,omitempty" yaml:"GlobalIPv6PrefixLen,omitempty" toml:"GlobalIPv6PrefixLen,omitempty"` - GlobalIPv6Address string `json:"GlobalIPv6Address,omitempty" yaml:"GlobalIPv6Address,omitempty" toml:"GlobalIPv6Address,omitempty"` - IPv6Gateway string `json:"IPv6Gateway,omitempty" yaml:"IPv6Gateway,omitempty" toml:"IPv6Gateway,omitempty"` - IPPrefixLen int `json:"IPPrefixLen,omitempty" yaml:"IPPrefixLen,omitempty" toml:"IPPrefixLen,omitempty"` - IPAddress string `json:"IPAddress,omitempty" yaml:"IPAddress,omitempty" toml:"IPAddress,omitempty"` - Gateway string `json:"Gateway,omitempty" yaml:"Gateway,omitempty" toml:"Gateway,omitempty"` - EndpointID string `json:"EndpointID,omitempty" yaml:"EndpointID,omitempty" toml:"EndpointID,omitempty"` - NetworkID string `json:"NetworkID,omitempty" yaml:"NetworkID,omitempty" toml:"NetworkID,omitempty"` -} - -// NetworkSettings contains network-related information about a container -type NetworkSettings struct { - Networks map[string]ContainerNetwork `json:"Networks,omitempty" yaml:"Networks,omitempty" toml:"Networks,omitempty"` - IPAddress string `json:"IPAddress,omitempty" yaml:"IPAddress,omitempty" toml:"IPAddress,omitempty"` - IPPrefixLen int `json:"IPPrefixLen,omitempty" yaml:"IPPrefixLen,omitempty" toml:"IPPrefixLen,omitempty"` - MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty" toml:"MacAddress,omitempty"` - Gateway string `json:"Gateway,omitempty" yaml:"Gateway,omitempty" toml:"Gateway,omitempty"` - Bridge string `json:"Bridge,omitempty" yaml:"Bridge,omitempty" toml:"Bridge,omitempty"` - PortMapping map[string]PortMapping `json:"PortMapping,omitempty" yaml:"PortMapping,omitempty" toml:"PortMapping,omitempty"` - Ports map[Port][]PortBinding `json:"Ports,omitempty" yaml:"Ports,omitempty" toml:"Ports,omitempty"` - NetworkID string `json:"NetworkID,omitempty" yaml:"NetworkID,omitempty" toml:"NetworkID,omitempty"` - EndpointID string `json:"EndpointID,omitempty" yaml:"EndpointID,omitempty" toml:"EndpointID,omitempty"` - SandboxKey string `json:"SandboxKey,omitempty" yaml:"SandboxKey,omitempty" toml:"SandboxKey,omitempty"` - GlobalIPv6Address string `json:"GlobalIPv6Address,omitempty" yaml:"GlobalIPv6Address,omitempty" toml:"GlobalIPv6Address,omitempty"` - GlobalIPv6PrefixLen int `json:"GlobalIPv6PrefixLen,omitempty" yaml:"GlobalIPv6PrefixLen,omitempty" toml:"GlobalIPv6PrefixLen,omitempty"` - IPv6Gateway string `json:"IPv6Gateway,omitempty" yaml:"IPv6Gateway,omitempty" toml:"IPv6Gateway,omitempty"` - LinkLocalIPv6Address string `json:"LinkLocalIPv6Address,omitempty" yaml:"LinkLocalIPv6Address,omitempty" toml:"LinkLocalIPv6Address,omitempty"` - LinkLocalIPv6PrefixLen int `json:"LinkLocalIPv6PrefixLen,omitempty" yaml:"LinkLocalIPv6PrefixLen,omitempty" toml:"LinkLocalIPv6PrefixLen,omitempty"` - SecondaryIPAddresses []string `json:"SecondaryIPAddresses,omitempty" yaml:"SecondaryIPAddresses,omitempty" toml:"SecondaryIPAddresses,omitempty"` - SecondaryIPv6Addresses []string `json:"SecondaryIPv6Addresses,omitempty" yaml:"SecondaryIPv6Addresses,omitempty" toml:"SecondaryIPv6Addresses,omitempty"` -} - -// PortMappingAPI translates the port mappings as contained in NetworkSettings -// into the format in which they would appear when returned by the API -func (settings *NetworkSettings) PortMappingAPI() []APIPort { - var mapping []APIPort - for port, bindings := range settings.Ports { - p, _ := parsePort(port.Port()) - if len(bindings) == 0 { - mapping = append(mapping, APIPort{ - PrivatePort: int64(p), - Type: port.Proto(), - }) - continue - } - for _, binding := range bindings { - p, _ := parsePort(port.Port()) - h, _ := parsePort(binding.HostPort) - mapping = append(mapping, APIPort{ - PrivatePort: int64(p), - PublicPort: int64(h), - Type: port.Proto(), - IP: binding.HostIP, - }) - } - } - return mapping -} - -func parsePort(rawPort string) (int, error) { - port, err := strconv.ParseUint(rawPort, 10, 16) - if err != nil { - return 0, err - } - return int(port), nil -} - -// Config is the list of configuration options used when creating a container. -// Config does not contain the options that are specific to starting a container on a -// given host. Those are contained in HostConfig -type Config struct { - Hostname string `json:"Hostname,omitempty" yaml:"Hostname,omitempty" toml:"Hostname,omitempty"` - Domainname string `json:"Domainname,omitempty" yaml:"Domainname,omitempty" toml:"Domainname,omitempty"` - User string `json:"User,omitempty" yaml:"User,omitempty" toml:"User,omitempty"` - Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty" toml:"Memory,omitempty"` - MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty" toml:"MemorySwap,omitempty"` - MemoryReservation int64 `json:"MemoryReservation,omitempty" yaml:"MemoryReservation,omitempty" toml:"MemoryReservation,omitempty"` - KernelMemory int64 `json:"KernelMemory,omitempty" yaml:"KernelMemory,omitempty" toml:"KernelMemory,omitempty"` - CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty" toml:"CpuShares,omitempty"` - CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty" toml:"Cpuset,omitempty"` - PortSpecs []string `json:"PortSpecs,omitempty" yaml:"PortSpecs,omitempty" toml:"PortSpecs,omitempty"` - ExposedPorts map[Port]struct{} `json:"ExposedPorts,omitempty" yaml:"ExposedPorts,omitempty" toml:"ExposedPorts,omitempty"` - PublishService string `json:"PublishService,omitempty" yaml:"PublishService,omitempty" toml:"PublishService,omitempty"` - StopSignal string `json:"StopSignal,omitempty" yaml:"StopSignal,omitempty" toml:"StopSignal,omitempty"` - Env []string `json:"Env,omitempty" yaml:"Env,omitempty" toml:"Env,omitempty"` - Cmd []string `json:"Cmd" yaml:"Cmd" toml:"Cmd"` - Healthcheck *HealthConfig `json:"Healthcheck,omitempty" yaml:"Healthcheck,omitempty" toml:"Healthcheck,omitempty"` - DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty" toml:"Dns,omitempty"` // For Docker API v1.9 and below only - Image string `json:"Image,omitempty" yaml:"Image,omitempty" toml:"Image,omitempty"` - Volumes map[string]struct{} `json:"Volumes,omitempty" yaml:"Volumes,omitempty" toml:"Volumes,omitempty"` - VolumeDriver string `json:"VolumeDriver,omitempty" yaml:"VolumeDriver,omitempty" toml:"VolumeDriver,omitempty"` - WorkingDir string `json:"WorkingDir,omitempty" yaml:"WorkingDir,omitempty" toml:"WorkingDir,omitempty"` - MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty" toml:"MacAddress,omitempty"` - Entrypoint []string `json:"Entrypoint" yaml:"Entrypoint" toml:"Entrypoint"` - SecurityOpts []string `json:"SecurityOpts,omitempty" yaml:"SecurityOpts,omitempty" toml:"SecurityOpts,omitempty"` - OnBuild []string `json:"OnBuild,omitempty" yaml:"OnBuild,omitempty" toml:"OnBuild,omitempty"` - Mounts []Mount `json:"Mounts,omitempty" yaml:"Mounts,omitempty" toml:"Mounts,omitempty"` - Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty" toml:"Labels,omitempty"` - AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty" toml:"AttachStdin,omitempty"` - AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty" toml:"AttachStdout,omitempty"` - AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty" toml:"AttachStderr,omitempty"` - ArgsEscaped bool `json:"ArgsEscaped,omitempty" yaml:"ArgsEscaped,omitempty" toml:"ArgsEscaped,omitempty"` - Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty" toml:"Tty,omitempty"` - OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty" toml:"OpenStdin,omitempty"` - StdinOnce bool `json:"StdinOnce,omitempty" yaml:"StdinOnce,omitempty" toml:"StdinOnce,omitempty"` - NetworkDisabled bool `json:"NetworkDisabled,omitempty" yaml:"NetworkDisabled,omitempty" toml:"NetworkDisabled,omitempty"` - - // This is no longer used and has been kept here for backward - // compatibility, please use HostConfig.VolumesFrom. - VolumesFrom string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty" toml:"VolumesFrom,omitempty"` -} - -// Mount represents a mount point in the container. -// -// It has been added in the version 1.20 of the Docker API, available since -// Docker 1.8. -type Mount struct { - Name string - Source string - Destination string - Driver string - Mode string - RW bool -} - -// LogConfig defines the log driver type and the configuration for it. -type LogConfig struct { - Type string `json:"Type,omitempty" yaml:"Type,omitempty" toml:"Type,omitempty"` - Config map[string]string `json:"Config,omitempty" yaml:"Config,omitempty" toml:"Config,omitempty"` -} - -// ULimit defines system-wide resource limitations This can help a lot in -// system administration, e.g. when a user starts too many processes and -// therefore makes the system unresponsive for other users. -type ULimit struct { - Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"` - Soft int64 `json:"Soft,omitempty" yaml:"Soft,omitempty" toml:"Soft,omitempty"` - Hard int64 `json:"Hard,omitempty" yaml:"Hard,omitempty" toml:"Hard,omitempty"` -} - -// SwarmNode containers information about which Swarm node the container is on. -type SwarmNode struct { - ID string `json:"ID,omitempty" yaml:"ID,omitempty" toml:"ID,omitempty"` - IP string `json:"IP,omitempty" yaml:"IP,omitempty" toml:"IP,omitempty"` - Addr string `json:"Addr,omitempty" yaml:"Addr,omitempty" toml:"Addr,omitempty"` - Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"` - CPUs int64 `json:"CPUs,omitempty" yaml:"CPUs,omitempty" toml:"CPUs,omitempty"` - Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty" toml:"Memory,omitempty"` - Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty" toml:"Labels,omitempty"` -} - -// GraphDriver contains information about the GraphDriver used by the -// container. -type GraphDriver struct { - Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"` - Data map[string]string `json:"Data,omitempty" yaml:"Data,omitempty" toml:"Data,omitempty"` -} - -// HealthConfig holds configuration settings for the HEALTHCHECK feature -// -// It has been added in the version 1.24 of the Docker API, available since -// Docker 1.12. -type HealthConfig struct { - // Test is the test to perform to check that the container is healthy. - // An empty slice means to inherit the default. - // The options are: - // {} : inherit healthcheck - // {"NONE"} : disable healthcheck - // {"CMD", args...} : exec arguments directly - // {"CMD-SHELL", command} : run command with system's default shell - Test []string `json:"Test,omitempty" yaml:"Test,omitempty" toml:"Test,omitempty"` - - // Zero means to inherit. Durations are expressed as integer nanoseconds. - Interval time.Duration `json:"Interval,omitempty" yaml:"Interval,omitempty" toml:"Interval,omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:"Timeout,omitempty" yaml:"Timeout,omitempty" toml:"Timeout,omitempty"` // Timeout is the time to wait before considering the check to have hung. - - // Retries is the number of consecutive failures needed to consider a container as unhealthy. - // Zero means inherit. - Retries int `json:"Retries,omitempty" yaml:"Retries,omitempty" toml:"Retries,omitempty"` -} - -// Container is the type encompasing everything about a container - its config, -// hostconfig, etc. -type Container struct { - ID string `json:"Id" yaml:"Id" toml:"Id"` - - Created time.Time `json:"Created,omitempty" yaml:"Created,omitempty" toml:"Created,omitempty"` - - Path string `json:"Path,omitempty" yaml:"Path,omitempty" toml:"Path,omitempty"` - Args []string `json:"Args,omitempty" yaml:"Args,omitempty" toml:"Args,omitempty"` - - Config *Config `json:"Config,omitempty" yaml:"Config,omitempty" toml:"Config,omitempty"` - State State `json:"State,omitempty" yaml:"State,omitempty" toml:"State,omitempty"` - Image string `json:"Image,omitempty" yaml:"Image,omitempty" toml:"Image,omitempty"` - - Node *SwarmNode `json:"Node,omitempty" yaml:"Node,omitempty" toml:"Node,omitempty"` - - NetworkSettings *NetworkSettings `json:"NetworkSettings,omitempty" yaml:"NetworkSettings,omitempty" toml:"NetworkSettings,omitempty"` - - SysInitPath string `json:"SysInitPath,omitempty" yaml:"SysInitPath,omitempty" toml:"SysInitPath,omitempty"` - ResolvConfPath string `json:"ResolvConfPath,omitempty" yaml:"ResolvConfPath,omitempty" toml:"ResolvConfPath,omitempty"` - HostnamePath string `json:"HostnamePath,omitempty" yaml:"HostnamePath,omitempty" toml:"HostnamePath,omitempty"` - HostsPath string `json:"HostsPath,omitempty" yaml:"HostsPath,omitempty" toml:"HostsPath,omitempty"` - LogPath string `json:"LogPath,omitempty" yaml:"LogPath,omitempty" toml:"LogPath,omitempty"` - Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"` - Driver string `json:"Driver,omitempty" yaml:"Driver,omitempty" toml:"Driver,omitempty"` - Mounts []Mount `json:"Mounts,omitempty" yaml:"Mounts,omitempty" toml:"Mounts,omitempty"` - - Volumes map[string]string `json:"Volumes,omitempty" yaml:"Volumes,omitempty" toml:"Volumes,omitempty"` - VolumesRW map[string]bool `json:"VolumesRW,omitempty" yaml:"VolumesRW,omitempty" toml:"VolumesRW,omitempty"` - HostConfig *HostConfig `json:"HostConfig,omitempty" yaml:"HostConfig,omitempty" toml:"HostConfig,omitempty"` - ExecIDs []string `json:"ExecIDs,omitempty" yaml:"ExecIDs,omitempty" toml:"ExecIDs,omitempty"` - GraphDriver *GraphDriver `json:"GraphDriver,omitempty" yaml:"GraphDriver,omitempty" toml:"GraphDriver,omitempty"` - - RestartCount int `json:"RestartCount,omitempty" yaml:"RestartCount,omitempty" toml:"RestartCount,omitempty"` - - AppArmorProfile string `json:"AppArmorProfile,omitempty" yaml:"AppArmorProfile,omitempty" toml:"AppArmorProfile,omitempty"` -} - -// UpdateContainerOptions specify parameters to the UpdateContainer function. -// -// See https://goo.gl/Y6fXUy for more details. -type UpdateContainerOptions struct { - BlkioWeight int `json:"BlkioWeight"` - CPUShares int `json:"CpuShares"` - CPUPeriod int `json:"CpuPeriod"` - CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` - CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` - CPUQuota int `json:"CpuQuota"` - CpusetCpus string `json:"CpusetCpus"` - CpusetMems string `json:"CpusetMems"` - Memory int `json:"Memory"` - MemorySwap int `json:"MemorySwap"` - MemoryReservation int `json:"MemoryReservation"` - KernelMemory int `json:"KernelMemory"` - RestartPolicy RestartPolicy `json:"RestartPolicy,omitempty"` - Context context.Context -} - -// UpdateContainer updates the container at ID with the options -// -// See https://goo.gl/Y6fXUy for more details. -func (c *Client) UpdateContainer(id string, opts UpdateContainerOptions) error { - resp, err := c.do("POST", fmt.Sprintf("/containers/"+id+"/update"), doOptions{ - data: opts, - forceJSON: true, - context: opts.Context, - }) - if err != nil { - return err - } - defer resp.Body.Close() - return nil -} - -// RenameContainerOptions specify parameters to the RenameContainer function. -// -// See https://goo.gl/46inai for more details. -type RenameContainerOptions struct { - // ID of container to rename - ID string `qs:"-"` - - // New name - Name string `json:"name,omitempty" yaml:"name,omitempty"` - Context context.Context -} - -// RenameContainer updates and existing containers name -// -// See https://goo.gl/46inai for more details. -func (c *Client) RenameContainer(opts RenameContainerOptions) error { - resp, err := c.do("POST", fmt.Sprintf("/containers/"+opts.ID+"/rename?%s", queryString(opts)), doOptions{ - context: opts.Context, - }) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// InspectContainer returns information about a container by its ID. -// -// See https://goo.gl/FaI5JT for more details. -func (c *Client) InspectContainer(id string) (*Container, error) { - return c.inspectContainer(id, doOptions{}) -} - -// InspectContainerWithContext returns information about a container by its ID. -// The context object can be used to cancel the inspect request. -// -// See https://goo.gl/FaI5JT for more details. -func (c *Client) InspectContainerWithContext(id string, ctx context.Context) (*Container, error) { - return c.inspectContainer(id, doOptions{context: ctx}) -} - -func (c *Client) inspectContainer(id string, opts doOptions) (*Container, error) { - path := "/containers/" + id + "/json" - resp, err := c.do("GET", path, opts) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return nil, &NoSuchContainer{ID: id} - } - return nil, err - } - defer resp.Body.Close() - var container Container - if err := json.NewDecoder(resp.Body).Decode(&container); err != nil { - return nil, err - } - return &container, nil -} - -// ContainerChanges returns changes in the filesystem of the given container. -// -// See https://goo.gl/15KKzh for more details. -func (c *Client) ContainerChanges(id string) ([]Change, error) { - path := "/containers/" + id + "/changes" - resp, err := c.do("GET", path, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return nil, &NoSuchContainer{ID: id} - } - return nil, err - } - defer resp.Body.Close() - var changes []Change - if err := json.NewDecoder(resp.Body).Decode(&changes); err != nil { - return nil, err - } - return changes, nil -} - -// CreateContainerOptions specify parameters to the CreateContainer function. -// -// See https://goo.gl/tyzwVM for more details. -type CreateContainerOptions struct { - Name string - Config *Config `qs:"-"` - HostConfig *HostConfig `qs:"-"` - NetworkingConfig *NetworkingConfig `qs:"-"` - Context context.Context -} - -// CreateContainer creates a new container, returning the container instance, -// or an error in case of failure. -// -// The returned container instance contains only the container ID. To get more -// details about the container after creating it, use InspectContainer. -// -// See https://goo.gl/tyzwVM for more details. -func (c *Client) CreateContainer(opts CreateContainerOptions) (*Container, error) { - path := "/containers/create?" + queryString(opts) - resp, err := c.do( - "POST", - path, - doOptions{ - data: struct { - *Config - HostConfig *HostConfig `json:"HostConfig,omitempty" yaml:"HostConfig,omitempty" toml:"HostConfig,omitempty"` - NetworkingConfig *NetworkingConfig `json:"NetworkingConfig,omitempty" yaml:"NetworkingConfig,omitempty" toml:"NetworkingConfig,omitempty"` - }{ - opts.Config, - opts.HostConfig, - opts.NetworkingConfig, - }, - context: opts.Context, - }, - ) - - if e, ok := err.(*Error); ok { - if e.Status == http.StatusNotFound { - return nil, ErrNoSuchImage - } - if e.Status == http.StatusConflict { - return nil, ErrContainerAlreadyExists - } - } - - if err != nil { - return nil, err - } - defer resp.Body.Close() - var container Container - if err := json.NewDecoder(resp.Body).Decode(&container); err != nil { - return nil, err - } - - container.Name = opts.Name - - return &container, nil -} - -// KeyValuePair is a type for generic key/value pairs as used in the Lxc -// configuration -type KeyValuePair struct { - Key string `json:"Key,omitempty" yaml:"Key,omitempty" toml:"Key,omitempty"` - Value string `json:"Value,omitempty" yaml:"Value,omitempty" toml:"Value,omitempty"` -} - -// RestartPolicy represents the policy for automatically restarting a container. -// -// Possible values are: -// -// - always: the docker daemon will always restart the container -// - on-failure: the docker daemon will restart the container on failures, at -// most MaximumRetryCount times -// - unless-stopped: the docker daemon will always restart the container except -// when user has manually stopped the container -// - no: the docker daemon will not restart the container automatically -type RestartPolicy struct { - Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"` - MaximumRetryCount int `json:"MaximumRetryCount,omitempty" yaml:"MaximumRetryCount,omitempty" toml:"MaximumRetryCount,omitempty"` -} - -// AlwaysRestart returns a restart policy that tells the Docker daemon to -// always restart the container. -func AlwaysRestart() RestartPolicy { - return RestartPolicy{Name: "always"} -} - -// RestartOnFailure returns a restart policy that tells the Docker daemon to -// restart the container on failures, trying at most maxRetry times. -func RestartOnFailure(maxRetry int) RestartPolicy { - return RestartPolicy{Name: "on-failure", MaximumRetryCount: maxRetry} -} - -// RestartUnlessStopped returns a restart policy that tells the Docker daemon to -// always restart the container except when user has manually stopped the container. -func RestartUnlessStopped() RestartPolicy { - return RestartPolicy{Name: "unless-stopped"} -} - -// NeverRestart returns a restart policy that tells the Docker daemon to never -// restart the container on failures. -func NeverRestart() RestartPolicy { - return RestartPolicy{Name: "no"} -} - -// Device represents a device mapping between the Docker host and the -// container. -type Device struct { - PathOnHost string `json:"PathOnHost,omitempty" yaml:"PathOnHost,omitempty" toml:"PathOnHost,omitempty"` - PathInContainer string `json:"PathInContainer,omitempty" yaml:"PathInContainer,omitempty" toml:"PathInContainer,omitempty"` - CgroupPermissions string `json:"CgroupPermissions,omitempty" yaml:"CgroupPermissions,omitempty" toml:"CgroupPermissions,omitempty"` -} - -// BlockWeight represents a relative device weight for an individual device inside -// of a container -type BlockWeight struct { - Path string `json:"Path,omitempty"` - Weight string `json:"Weight,omitempty"` -} - -// BlockLimit represents a read/write limit in IOPS or Bandwidth for a device -// inside of a container -type BlockLimit struct { - Path string `json:"Path,omitempty"` - Rate int64 `json:"Rate,omitempty"` -} - -// HostConfig contains the container options related to starting a container on -// a given host -type HostConfig struct { - Binds []string `json:"Binds,omitempty" yaml:"Binds,omitempty" toml:"Binds,omitempty"` - CapAdd []string `json:"CapAdd,omitempty" yaml:"CapAdd,omitempty" toml:"CapAdd,omitempty"` - CapDrop []string `json:"CapDrop,omitempty" yaml:"CapDrop,omitempty" toml:"CapDrop,omitempty"` - GroupAdd []string `json:"GroupAdd,omitempty" yaml:"GroupAdd,omitempty" toml:"GroupAdd,omitempty"` - ContainerIDFile string `json:"ContainerIDFile,omitempty" yaml:"ContainerIDFile,omitempty" toml:"ContainerIDFile,omitempty"` - LxcConf []KeyValuePair `json:"LxcConf,omitempty" yaml:"LxcConf,omitempty" toml:"LxcConf,omitempty"` - PortBindings map[Port][]PortBinding `json:"PortBindings,omitempty" yaml:"PortBindings,omitempty" toml:"PortBindings,omitempty"` - Links []string `json:"Links,omitempty" yaml:"Links,omitempty" toml:"Links,omitempty"` - DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty" toml:"Dns,omitempty"` // For Docker API v1.10 and above only - DNSOptions []string `json:"DnsOptions,omitempty" yaml:"DnsOptions,omitempty" toml:"DnsOptions,omitempty"` - DNSSearch []string `json:"DnsSearch,omitempty" yaml:"DnsSearch,omitempty" toml:"DnsSearch,omitempty"` - ExtraHosts []string `json:"ExtraHosts,omitempty" yaml:"ExtraHosts,omitempty" toml:"ExtraHosts,omitempty"` - VolumesFrom []string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty" toml:"VolumesFrom,omitempty"` - UsernsMode string `json:"UsernsMode,omitempty" yaml:"UsernsMode,omitempty" toml:"UsernsMode,omitempty"` - NetworkMode string `json:"NetworkMode,omitempty" yaml:"NetworkMode,omitempty" toml:"NetworkMode,omitempty"` - IpcMode string `json:"IpcMode,omitempty" yaml:"IpcMode,omitempty" toml:"IpcMode,omitempty"` - PidMode string `json:"PidMode,omitempty" yaml:"PidMode,omitempty" toml:"PidMode,omitempty"` - UTSMode string `json:"UTSMode,omitempty" yaml:"UTSMode,omitempty" toml:"UTSMode,omitempty"` - RestartPolicy RestartPolicy `json:"RestartPolicy,omitempty" yaml:"RestartPolicy,omitempty" toml:"RestartPolicy,omitempty"` - Devices []Device `json:"Devices,omitempty" yaml:"Devices,omitempty" toml:"Devices,omitempty"` - LogConfig LogConfig `json:"LogConfig,omitempty" yaml:"LogConfig,omitempty" toml:"LogConfig,omitempty"` - SecurityOpt []string `json:"SecurityOpt,omitempty" yaml:"SecurityOpt,omitempty" toml:"SecurityOpt,omitempty"` - Cgroup string `json:"Cgroup,omitempty" yaml:"Cgroup,omitempty" toml:"Cgroup,omitempty"` - CgroupParent string `json:"CgroupParent,omitempty" yaml:"CgroupParent,omitempty" toml:"CgroupParent,omitempty"` - Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty" toml:"Memory,omitempty"` - MemoryReservation int64 `json:"MemoryReservation,omitempty" yaml:"MemoryReservation,omitempty" toml:"MemoryReservation,omitempty"` - KernelMemory int64 `json:"KernelMemory,omitempty" yaml:"KernelMemory,omitempty" toml:"KernelMemory,omitempty"` - MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty" toml:"MemorySwap,omitempty"` - MemorySwappiness int64 `json:"MemorySwappiness,omitempty" yaml:"MemorySwappiness,omitempty" toml:"MemorySwappiness,omitempty"` - CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty" toml:"CpuShares,omitempty"` - CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty" toml:"Cpuset,omitempty"` - CPUSetCPUs string `json:"CpusetCpus,omitempty" yaml:"CpusetCpus,omitempty" toml:"CpusetCpus,omitempty"` - CPUSetMEMs string `json:"CpusetMems,omitempty" yaml:"CpusetMems,omitempty" toml:"CpusetMems,omitempty"` - CPUQuota int64 `json:"CpuQuota,omitempty" yaml:"CpuQuota,omitempty" toml:"CpuQuota,omitempty"` - CPUPeriod int64 `json:"CpuPeriod,omitempty" yaml:"CpuPeriod,omitempty" toml:"CpuPeriod,omitempty"` - CPURealtimePeriod int64 `json:"CpuRealtimePeriod,omitempty" yaml:"CpuRealtimePeriod,omitempty" toml:"CpuRealtimePeriod,omitempty"` - CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime,omitempty" yaml:"CpuRealtimeRuntime,omitempty" toml:"CpuRealtimeRuntime,omitempty"` - BlkioWeight int64 `json:"BlkioWeight,omitempty" yaml:"BlkioWeight,omitempty" toml:"BlkioWeight,omitempty"` - BlkioWeightDevice []BlockWeight `json:"BlkioWeightDevice,omitempty" yaml:"BlkioWeightDevice,omitempty" toml:"BlkioWeightDevice,omitempty"` - BlkioDeviceReadBps []BlockLimit `json:"BlkioDeviceReadBps,omitempty" yaml:"BlkioDeviceReadBps,omitempty" toml:"BlkioDeviceReadBps,omitempty"` - BlkioDeviceReadIOps []BlockLimit `json:"BlkioDeviceReadIOps,omitempty" yaml:"BlkioDeviceReadIOps,omitempty" toml:"BlkioDeviceReadIOps,omitempty"` - BlkioDeviceWriteBps []BlockLimit `json:"BlkioDeviceWriteBps,omitempty" yaml:"BlkioDeviceWriteBps,omitempty" toml:"BlkioDeviceWriteBps,omitempty"` - BlkioDeviceWriteIOps []BlockLimit `json:"BlkioDeviceWriteIOps,omitempty" yaml:"BlkioDeviceWriteIOps,omitempty" toml:"BlkioDeviceWriteIOps,omitempty"` - Ulimits []ULimit `json:"Ulimits,omitempty" yaml:"Ulimits,omitempty" toml:"Ulimits,omitempty"` - VolumeDriver string `json:"VolumeDriver,omitempty" yaml:"VolumeDriver,omitempty" toml:"VolumeDriver,omitempty"` - OomScoreAdj int `json:"OomScoreAdj,omitempty" yaml:"OomScoreAdj,omitempty" toml:"OomScoreAdj,omitempty"` - PidsLimit int64 `json:"PidsLimit,omitempty" yaml:"PidsLimit,omitempty" toml:"PidsLimit,omitempty"` - ShmSize int64 `json:"ShmSize,omitempty" yaml:"ShmSize,omitempty" toml:"ShmSize,omitempty"` - Tmpfs map[string]string `json:"Tmpfs,omitempty" yaml:"Tmpfs,omitempty" toml:"Tmpfs,omitempty"` - Privileged bool `json:"Privileged,omitempty" yaml:"Privileged,omitempty" toml:"Privileged,omitempty"` - PublishAllPorts bool `json:"PublishAllPorts,omitempty" yaml:"PublishAllPorts,omitempty" toml:"PublishAllPorts,omitempty"` - ReadonlyRootfs bool `json:"ReadonlyRootfs,omitempty" yaml:"ReadonlyRootfs,omitempty" toml:"ReadonlyRootfs,omitempty"` - OOMKillDisable bool `json:"OomKillDisable,omitempty" yaml:"OomKillDisable,omitempty" toml:"OomKillDisable,omitempty"` - AutoRemove bool `json:"AutoRemove,omitempty" yaml:"AutoRemove,omitempty" toml:"AutoRemove,omitempty"` - StorageOpt map[string]string `json:"StorageOpt,omitempty" yaml:"StorageOpt,omitempty" toml:"StorageOpt,omitempty"` - Sysctls map[string]string `json:"Sysctls,omitempty" yaml:"Sysctls,omitempty" toml:"Sysctls,omitempty"` -} - -// NetworkingConfig represents the container's networking configuration for each of its interfaces -// Carries the networking configs specified in the `docker run` and `docker network connect` commands -type NetworkingConfig struct { - EndpointsConfig map[string]*EndpointConfig `json:"EndpointsConfig" yaml:"EndpointsConfig" toml:"EndpointsConfig"` // Endpoint configs for each connecting network -} - -// StartContainer starts a container, returning an error in case of failure. -// -// Passing the HostConfig to this method has been deprecated in Docker API 1.22 -// (Docker Engine 1.10.x) and totally removed in Docker API 1.24 (Docker Engine -// 1.12.x). The client will ignore the parameter when communicating with Docker -// API 1.24 or greater. -// -// See https://goo.gl/fbOSZy for more details. -func (c *Client) StartContainer(id string, hostConfig *HostConfig) error { - return c.startContainer(id, hostConfig, doOptions{}) -} - -// StartContainerWithContext starts a container, returning an error in case of -// failure. The context can be used to cancel the outstanding start container -// request. -// -// Passing the HostConfig to this method has been deprecated in Docker API 1.22 -// (Docker Engine 1.10.x) and totally removed in Docker API 1.24 (Docker Engine -// 1.12.x). The client will ignore the parameter when communicating with Docker -// API 1.24 or greater. -// -// See https://goo.gl/fbOSZy for more details. -func (c *Client) StartContainerWithContext(id string, hostConfig *HostConfig, ctx context.Context) error { - return c.startContainer(id, hostConfig, doOptions{context: ctx}) -} - -func (c *Client) startContainer(id string, hostConfig *HostConfig, opts doOptions) error { - path := "/containers/" + id + "/start" - if c.serverAPIVersion == nil { - c.checkAPIVersion() - } - if c.serverAPIVersion != nil && c.serverAPIVersion.LessThan(apiVersion124) { - opts.data = hostConfig - opts.forceJSON = true - } - resp, err := c.do("POST", path, opts) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return &NoSuchContainer{ID: id, Err: err} - } - return err - } - defer resp.Body.Close() - if resp.StatusCode == http.StatusNotModified { - return &ContainerAlreadyRunning{ID: id} - } - return nil -} - -// StopContainer stops a container, killing it after the given timeout (in -// seconds). -// -// See https://goo.gl/R9dZcV for more details. -func (c *Client) StopContainer(id string, timeout uint) error { - return c.stopContainer(id, timeout, doOptions{}) -} - -// StopContainerWithContext stops a container, killing it after the given -// timeout (in seconds). The context can be used to cancel the stop -// container request. -// -// See https://goo.gl/R9dZcV for more details. -func (c *Client) StopContainerWithContext(id string, timeout uint, ctx context.Context) error { - return c.stopContainer(id, timeout, doOptions{context: ctx}) -} - -func (c *Client) stopContainer(id string, timeout uint, opts doOptions) error { - path := fmt.Sprintf("/containers/%s/stop?t=%d", id, timeout) - resp, err := c.do("POST", path, opts) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return &NoSuchContainer{ID: id} - } - return err - } - defer resp.Body.Close() - if resp.StatusCode == http.StatusNotModified { - return &ContainerNotRunning{ID: id} - } - return nil -} - -// RestartContainer stops a container, killing it after the given timeout (in -// seconds), during the stop process. -// -// See https://goo.gl/MrAKQ5 for more details. -func (c *Client) RestartContainer(id string, timeout uint) error { - path := fmt.Sprintf("/containers/%s/restart?t=%d", id, timeout) - resp, err := c.do("POST", path, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return &NoSuchContainer{ID: id} - } - return err - } - resp.Body.Close() - return nil -} - -// PauseContainer pauses the given container. -// -// See https://goo.gl/D1Yaii for more details. -func (c *Client) PauseContainer(id string) error { - path := fmt.Sprintf("/containers/%s/pause", id) - resp, err := c.do("POST", path, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return &NoSuchContainer{ID: id} - } - return err - } - resp.Body.Close() - return nil -} - -// UnpauseContainer unpauses the given container. -// -// See https://goo.gl/sZ2faO for more details. -func (c *Client) UnpauseContainer(id string) error { - path := fmt.Sprintf("/containers/%s/unpause", id) - resp, err := c.do("POST", path, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return &NoSuchContainer{ID: id} - } - return err - } - resp.Body.Close() - return nil -} - -// TopResult represents the list of processes running in a container, as -// returned by /containers//top. -// -// See https://goo.gl/FLwpPl for more details. -type TopResult struct { - Titles []string - Processes [][]string -} - -// TopContainer returns processes running inside a container -// -// See https://goo.gl/FLwpPl for more details. -func (c *Client) TopContainer(id string, psArgs string) (TopResult, error) { - var args string - var result TopResult - if psArgs != "" { - args = fmt.Sprintf("?ps_args=%s", psArgs) - } - path := fmt.Sprintf("/containers/%s/top%s", id, args) - resp, err := c.do("GET", path, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return result, &NoSuchContainer{ID: id} - } - return result, err - } - defer resp.Body.Close() - err = json.NewDecoder(resp.Body).Decode(&result) - return result, err -} - -// Stats represents container statistics, returned by /containers//stats. -// -// See https://goo.gl/Dk3Xio for more details. -type Stats struct { - Read time.Time `json:"read,omitempty" yaml:"read,omitempty" toml:"read,omitempty"` - PidsStats struct { - Current uint64 `json:"current,omitempty" yaml:"current,omitempty"` - } `json:"pids_stats,omitempty" yaml:"pids_stats,omitempty" toml:"pids_stats,omitempty"` - Network NetworkStats `json:"network,omitempty" yaml:"network,omitempty" toml:"network,omitempty"` - Networks map[string]NetworkStats `json:"networks,omitempty" yaml:"networks,omitempty" toml:"networks,omitempty"` - MemoryStats struct { - Stats struct { - TotalPgmafault uint64 `json:"total_pgmafault,omitempty" yaml:"total_pgmafault,omitempty" toml:"total_pgmafault,omitempty"` - Cache uint64 `json:"cache,omitempty" yaml:"cache,omitempty" toml:"cache,omitempty"` - MappedFile uint64 `json:"mapped_file,omitempty" yaml:"mapped_file,omitempty" toml:"mapped_file,omitempty"` - TotalInactiveFile uint64 `json:"total_inactive_file,omitempty" yaml:"total_inactive_file,omitempty" toml:"total_inactive_file,omitempty"` - Pgpgout uint64 `json:"pgpgout,omitempty" yaml:"pgpgout,omitempty" toml:"pgpgout,omitempty"` - Rss uint64 `json:"rss,omitempty" yaml:"rss,omitempty" toml:"rss,omitempty"` - TotalMappedFile uint64 `json:"total_mapped_file,omitempty" yaml:"total_mapped_file,omitempty" toml:"total_mapped_file,omitempty"` - Writeback uint64 `json:"writeback,omitempty" yaml:"writeback,omitempty" toml:"writeback,omitempty"` - Unevictable uint64 `json:"unevictable,omitempty" yaml:"unevictable,omitempty" toml:"unevictable,omitempty"` - Pgpgin uint64 `json:"pgpgin,omitempty" yaml:"pgpgin,omitempty" toml:"pgpgin,omitempty"` - TotalUnevictable uint64 `json:"total_unevictable,omitempty" yaml:"total_unevictable,omitempty" toml:"total_unevictable,omitempty"` - Pgmajfault uint64 `json:"pgmajfault,omitempty" yaml:"pgmajfault,omitempty" toml:"pgmajfault,omitempty"` - TotalRss uint64 `json:"total_rss,omitempty" yaml:"total_rss,omitempty" toml:"total_rss,omitempty"` - TotalRssHuge uint64 `json:"total_rss_huge,omitempty" yaml:"total_rss_huge,omitempty" toml:"total_rss_huge,omitempty"` - TotalWriteback uint64 `json:"total_writeback,omitempty" yaml:"total_writeback,omitempty" toml:"total_writeback,omitempty"` - TotalInactiveAnon uint64 `json:"total_inactive_anon,omitempty" yaml:"total_inactive_anon,omitempty" toml:"total_inactive_anon,omitempty"` - RssHuge uint64 `json:"rss_huge,omitempty" yaml:"rss_huge,omitempty" toml:"rss_huge,omitempty"` - HierarchicalMemoryLimit uint64 `json:"hierarchical_memory_limit,omitempty" yaml:"hierarchical_memory_limit,omitempty" toml:"hierarchical_memory_limit,omitempty"` - TotalPgfault uint64 `json:"total_pgfault,omitempty" yaml:"total_pgfault,omitempty" toml:"total_pgfault,omitempty"` - TotalActiveFile uint64 `json:"total_active_file,omitempty" yaml:"total_active_file,omitempty" toml:"total_active_file,omitempty"` - ActiveAnon uint64 `json:"active_anon,omitempty" yaml:"active_anon,omitempty" toml:"active_anon,omitempty"` - TotalActiveAnon uint64 `json:"total_active_anon,omitempty" yaml:"total_active_anon,omitempty" toml:"total_active_anon,omitempty"` - TotalPgpgout uint64 `json:"total_pgpgout,omitempty" yaml:"total_pgpgout,omitempty" toml:"total_pgpgout,omitempty"` - TotalCache uint64 `json:"total_cache,omitempty" yaml:"total_cache,omitempty" toml:"total_cache,omitempty"` - InactiveAnon uint64 `json:"inactive_anon,omitempty" yaml:"inactive_anon,omitempty" toml:"inactive_anon,omitempty"` - ActiveFile uint64 `json:"active_file,omitempty" yaml:"active_file,omitempty" toml:"active_file,omitempty"` - Pgfault uint64 `json:"pgfault,omitempty" yaml:"pgfault,omitempty" toml:"pgfault,omitempty"` - InactiveFile uint64 `json:"inactive_file,omitempty" yaml:"inactive_file,omitempty" toml:"inactive_file,omitempty"` - TotalPgpgin uint64 `json:"total_pgpgin,omitempty" yaml:"total_pgpgin,omitempty" toml:"total_pgpgin,omitempty"` - HierarchicalMemswLimit uint64 `json:"hierarchical_memsw_limit,omitempty" yaml:"hierarchical_memsw_limit,omitempty" toml:"hierarchical_memsw_limit,omitempty"` - Swap uint64 `json:"swap,omitempty" yaml:"swap,omitempty" toml:"swap,omitempty"` - } `json:"stats,omitempty" yaml:"stats,omitempty" toml:"stats,omitempty"` - MaxUsage uint64 `json:"max_usage,omitempty" yaml:"max_usage,omitempty" toml:"max_usage,omitempty"` - Usage uint64 `json:"usage,omitempty" yaml:"usage,omitempty" toml:"usage,omitempty"` - Failcnt uint64 `json:"failcnt,omitempty" yaml:"failcnt,omitempty" toml:"failcnt,omitempty"` - Limit uint64 `json:"limit,omitempty" yaml:"limit,omitempty" toml:"limit,omitempty"` - } `json:"memory_stats,omitempty" yaml:"memory_stats,omitempty" toml:"memory_stats,omitempty"` - BlkioStats struct { - IOServiceBytesRecursive []BlkioStatsEntry `json:"io_service_bytes_recursive,omitempty" yaml:"io_service_bytes_recursive,omitempty" toml:"io_service_bytes_recursive,omitempty"` - IOServicedRecursive []BlkioStatsEntry `json:"io_serviced_recursive,omitempty" yaml:"io_serviced_recursive,omitempty" toml:"io_serviced_recursive,omitempty"` - IOQueueRecursive []BlkioStatsEntry `json:"io_queue_recursive,omitempty" yaml:"io_queue_recursive,omitempty" toml:"io_queue_recursive,omitempty"` - IOServiceTimeRecursive []BlkioStatsEntry `json:"io_service_time_recursive,omitempty" yaml:"io_service_time_recursive,omitempty" toml:"io_service_time_recursive,omitempty"` - IOWaitTimeRecursive []BlkioStatsEntry `json:"io_wait_time_recursive,omitempty" yaml:"io_wait_time_recursive,omitempty" toml:"io_wait_time_recursive,omitempty"` - IOMergedRecursive []BlkioStatsEntry `json:"io_merged_recursive,omitempty" yaml:"io_merged_recursive,omitempty" toml:"io_merged_recursive,omitempty"` - IOTimeRecursive []BlkioStatsEntry `json:"io_time_recursive,omitempty" yaml:"io_time_recursive,omitempty" toml:"io_time_recursive,omitempty"` - SectorsRecursive []BlkioStatsEntry `json:"sectors_recursive,omitempty" yaml:"sectors_recursive,omitempty" toml:"sectors_recursive,omitempty"` - } `json:"blkio_stats,omitempty" yaml:"blkio_stats,omitempty" toml:"blkio_stats,omitempty"` - CPUStats CPUStats `json:"cpu_stats,omitempty" yaml:"cpu_stats,omitempty" toml:"cpu_stats,omitempty"` - PreCPUStats CPUStats `json:"precpu_stats,omitempty"` -} - -// NetworkStats is a stats entry for network stats -type NetworkStats struct { - RxDropped uint64 `json:"rx_dropped,omitempty" yaml:"rx_dropped,omitempty" toml:"rx_dropped,omitempty"` - RxBytes uint64 `json:"rx_bytes,omitempty" yaml:"rx_bytes,omitempty" toml:"rx_bytes,omitempty"` - RxErrors uint64 `json:"rx_errors,omitempty" yaml:"rx_errors,omitempty" toml:"rx_errors,omitempty"` - TxPackets uint64 `json:"tx_packets,omitempty" yaml:"tx_packets,omitempty" toml:"tx_packets,omitempty"` - TxDropped uint64 `json:"tx_dropped,omitempty" yaml:"tx_dropped,omitempty" toml:"tx_dropped,omitempty"` - RxPackets uint64 `json:"rx_packets,omitempty" yaml:"rx_packets,omitempty" toml:"rx_packets,omitempty"` - TxErrors uint64 `json:"tx_errors,omitempty" yaml:"tx_errors,omitempty" toml:"tx_errors,omitempty"` - TxBytes uint64 `json:"tx_bytes,omitempty" yaml:"tx_bytes,omitempty" toml:"tx_bytes,omitempty"` -} - -// CPUStats is a stats entry for cpu stats -type CPUStats struct { - CPUUsage struct { - PercpuUsage []uint64 `json:"percpu_usage,omitempty" yaml:"percpu_usage,omitempty" toml:"percpu_usage,omitempty"` - UsageInUsermode uint64 `json:"usage_in_usermode,omitempty" yaml:"usage_in_usermode,omitempty" toml:"usage_in_usermode,omitempty"` - TotalUsage uint64 `json:"total_usage,omitempty" yaml:"total_usage,omitempty" toml:"total_usage,omitempty"` - UsageInKernelmode uint64 `json:"usage_in_kernelmode,omitempty" yaml:"usage_in_kernelmode,omitempty" toml:"usage_in_kernelmode,omitempty"` - } `json:"cpu_usage,omitempty" yaml:"cpu_usage,omitempty" toml:"cpu_usage,omitempty"` - SystemCPUUsage uint64 `json:"system_cpu_usage,omitempty" yaml:"system_cpu_usage,omitempty" toml:"system_cpu_usage,omitempty"` - ThrottlingData struct { - Periods uint64 `json:"periods,omitempty"` - ThrottledPeriods uint64 `json:"throttled_periods,omitempty"` - ThrottledTime uint64 `json:"throttled_time,omitempty"` - } `json:"throttling_data,omitempty" yaml:"throttling_data,omitempty" toml:"throttling_data,omitempty"` -} - -// BlkioStatsEntry is a stats entry for blkio_stats -type BlkioStatsEntry struct { - Major uint64 `json:"major,omitempty" yaml:"major,omitempty" toml:"major,omitempty"` - Minor uint64 `json:"minor,omitempty" yaml:"minor,omitempty" toml:"minor,omitempty"` - Op string `json:"op,omitempty" yaml:"op,omitempty" toml:"op,omitempty"` - Value uint64 `json:"value,omitempty" yaml:"value,omitempty" toml:"value,omitempty"` -} - -// StatsOptions specify parameters to the Stats function. -// -// See https://goo.gl/Dk3Xio for more details. -type StatsOptions struct { - ID string - Stats chan<- *Stats - Stream bool - // A flag that enables stopping the stats operation - Done <-chan bool - // Initial connection timeout - Timeout time.Duration - // Timeout with no data is received, it's reset every time new data - // arrives - InactivityTimeout time.Duration `qs:"-"` - Context context.Context -} - -// Stats sends container statistics for the given container to the given channel. -// -// This function is blocking, similar to a streaming call for logs, and should be run -// on a separate goroutine from the caller. Note that this function will block until -// the given container is removed, not just exited. When finished, this function -// will close the given channel. Alternatively, function can be stopped by -// signaling on the Done channel. -// -// See https://goo.gl/Dk3Xio for more details. -func (c *Client) Stats(opts StatsOptions) (retErr error) { - errC := make(chan error, 1) - readCloser, writeCloser := io.Pipe() - - defer func() { - close(opts.Stats) - - select { - case err := <-errC: - if err != nil && retErr == nil { - retErr = err - } - default: - // No errors - } - - if err := readCloser.Close(); err != nil && retErr == nil { - retErr = err - } - }() - - go func() { - err := c.stream("GET", fmt.Sprintf("/containers/%s/stats?stream=%v", opts.ID, opts.Stream), streamOptions{ - rawJSONStream: true, - useJSONDecoder: true, - stdout: writeCloser, - timeout: opts.Timeout, - inactivityTimeout: opts.InactivityTimeout, - context: opts.Context, - }) - if err != nil { - dockerError, ok := err.(*Error) - if ok { - if dockerError.Status == http.StatusNotFound { - err = &NoSuchContainer{ID: opts.ID} - } - } - } - if closeErr := writeCloser.Close(); closeErr != nil && err == nil { - err = closeErr - } - errC <- err - close(errC) - }() - - quit := make(chan struct{}) - defer close(quit) - go func() { - // block here waiting for the signal to stop function - select { - case <-opts.Done: - readCloser.Close() - case <-quit: - return - } - }() - - decoder := json.NewDecoder(readCloser) - stats := new(Stats) - for err := decoder.Decode(stats); err != io.EOF; err = decoder.Decode(stats) { - if err != nil { - return err - } - opts.Stats <- stats - stats = new(Stats) - } - return nil -} - -// KillContainerOptions represents the set of options that can be used in a -// call to KillContainer. -// -// See https://goo.gl/JnTxXZ for more details. -type KillContainerOptions struct { - // The ID of the container. - ID string `qs:"-"` - - // The signal to send to the container. When omitted, Docker server - // will assume SIGKILL. - Signal Signal - Context context.Context -} - -// KillContainer sends a signal to a container, returning an error in case of -// failure. -// -// See https://goo.gl/JnTxXZ for more details. -func (c *Client) KillContainer(opts KillContainerOptions) error { - path := "/containers/" + opts.ID + "/kill" + "?" + queryString(opts) - resp, err := c.do("POST", path, doOptions{context: opts.Context}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return &NoSuchContainer{ID: opts.ID} - } - return err - } - resp.Body.Close() - return nil -} - -// RemoveContainerOptions encapsulates options to remove a container. -// -// See https://goo.gl/hL5IPC for more details. -type RemoveContainerOptions struct { - // The ID of the container. - ID string `qs:"-"` - - // A flag that indicates whether Docker should remove the volumes - // associated to the container. - RemoveVolumes bool `qs:"v"` - - // A flag that indicates whether Docker should remove the container - // even if it is currently running. - Force bool - Context context.Context -} - -// RemoveContainer removes a container, returning an error in case of failure. -// -// See https://goo.gl/hL5IPC for more details. -func (c *Client) RemoveContainer(opts RemoveContainerOptions) error { - path := "/containers/" + opts.ID + "?" + queryString(opts) - resp, err := c.do("DELETE", path, doOptions{context: opts.Context}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return &NoSuchContainer{ID: opts.ID} - } - return err - } - resp.Body.Close() - return nil -} - -// UploadToContainerOptions is the set of options that can be used when -// uploading an archive into a container. -// -// See https://goo.gl/g25o7u for more details. -type UploadToContainerOptions struct { - InputStream io.Reader `json:"-" qs:"-"` - Path string `qs:"path"` - NoOverwriteDirNonDir bool `qs:"noOverwriteDirNonDir"` - Context context.Context -} - -// UploadToContainer uploads a tar archive to be extracted to a path in the -// filesystem of the container. -// -// See https://goo.gl/g25o7u for more details. -func (c *Client) UploadToContainer(id string, opts UploadToContainerOptions) error { - url := fmt.Sprintf("/containers/%s/archive?", id) + queryString(opts) - - return c.stream("PUT", url, streamOptions{ - in: opts.InputStream, - context: opts.Context, - }) -} - -// DownloadFromContainerOptions is the set of options that can be used when -// downloading resources from a container. -// -// See https://goo.gl/W49jxK for more details. -type DownloadFromContainerOptions struct { - OutputStream io.Writer `json:"-" qs:"-"` - Path string `qs:"path"` - InactivityTimeout time.Duration `qs:"-"` - Context context.Context -} - -// DownloadFromContainer downloads a tar archive of files or folders in a container. -// -// See https://goo.gl/W49jxK for more details. -func (c *Client) DownloadFromContainer(id string, opts DownloadFromContainerOptions) error { - url := fmt.Sprintf("/containers/%s/archive?", id) + queryString(opts) - - return c.stream("GET", url, streamOptions{ - setRawTerminal: true, - stdout: opts.OutputStream, - inactivityTimeout: opts.InactivityTimeout, - context: opts.Context, - }) -} - -// CopyFromContainerOptions has been DEPRECATED, please use DownloadFromContainerOptions along with DownloadFromContainer. -// -// See https://goo.gl/nWk2YQ for more details. -type CopyFromContainerOptions struct { - OutputStream io.Writer `json:"-"` - Container string `json:"-"` - Resource string - Context context.Context `json:"-"` -} - -// CopyFromContainer has been DEPRECATED, please use DownloadFromContainerOptions along with DownloadFromContainer. -// -// See https://goo.gl/nWk2YQ for more details. -func (c *Client) CopyFromContainer(opts CopyFromContainerOptions) error { - if opts.Container == "" { - return &NoSuchContainer{ID: opts.Container} - } - if c.serverAPIVersion == nil { - c.checkAPIVersion() - } - if c.serverAPIVersion != nil && c.serverAPIVersion.GreaterThanOrEqualTo(apiVersion124) { - return errors.New("go-dockerclient: CopyFromContainer is no longer available in Docker >= 1.12, use DownloadFromContainer instead") - } - url := fmt.Sprintf("/containers/%s/copy", opts.Container) - resp, err := c.do("POST", url, doOptions{ - data: opts, - context: opts.Context, - }) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return &NoSuchContainer{ID: opts.Container} - } - return err - } - defer resp.Body.Close() - _, err = io.Copy(opts.OutputStream, resp.Body) - return err -} - -// WaitContainer blocks until the given container stops, return the exit code -// of the container status. -// -// See https://goo.gl/4AGweZ for more details. -func (c *Client) WaitContainer(id string) (int, error) { - return c.waitContainer(id, doOptions{}) -} - -// WaitContainerWithContext blocks until the given container stops, return the exit code -// of the container status. The context object can be used to cancel the -// inspect request. -// -// See https://goo.gl/4AGweZ for more details. -func (c *Client) WaitContainerWithContext(id string, ctx context.Context) (int, error) { - return c.waitContainer(id, doOptions{context: ctx}) -} - -func (c *Client) waitContainer(id string, opts doOptions) (int, error) { - resp, err := c.do("POST", "/containers/"+id+"/wait", opts) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return 0, &NoSuchContainer{ID: id} - } - return 0, err - } - defer resp.Body.Close() - var r struct{ StatusCode int } - if err := json.NewDecoder(resp.Body).Decode(&r); err != nil { - return 0, err - } - return r.StatusCode, nil -} - -// CommitContainerOptions aggregates parameters to the CommitContainer method. -// -// See https://goo.gl/CzIguf for more details. -type CommitContainerOptions struct { - Container string - Repository string `qs:"repo"` - Tag string - Message string `qs:"comment"` - Author string - Run *Config `qs:"-"` - Context context.Context -} - -// CommitContainer creates a new image from a container's changes. -// -// See https://goo.gl/CzIguf for more details. -func (c *Client) CommitContainer(opts CommitContainerOptions) (*Image, error) { - path := "/commit?" + queryString(opts) - resp, err := c.do("POST", path, doOptions{ - data: opts.Run, - context: opts.Context, - }) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return nil, &NoSuchContainer{ID: opts.Container} - } - return nil, err - } - defer resp.Body.Close() - var image Image - if err := json.NewDecoder(resp.Body).Decode(&image); err != nil { - return nil, err - } - return &image, nil -} - -// AttachToContainerOptions is the set of options that can be used when -// attaching to a container. -// -// See https://goo.gl/JF10Zk for more details. -type AttachToContainerOptions struct { - Container string `qs:"-"` - InputStream io.Reader `qs:"-"` - OutputStream io.Writer `qs:"-"` - ErrorStream io.Writer `qs:"-"` - - // If set, after a successful connect, a sentinel will be sent and then the - // client will block on receive before continuing. - // - // It must be an unbuffered channel. Using a buffered channel can lead - // to unexpected behavior. - Success chan struct{} - - // Use raw terminal? Usually true when the container contains a TTY. - RawTerminal bool `qs:"-"` - - // Get container logs, sending it to OutputStream. - Logs bool - - // Stream the response? - Stream bool - - // Attach to stdin, and use InputStream. - Stdin bool - - // Attach to stdout, and use OutputStream. - Stdout bool - - // Attach to stderr, and use ErrorStream. - Stderr bool -} - -// AttachToContainer attaches to a container, using the given options. -// -// See https://goo.gl/JF10Zk for more details. -func (c *Client) AttachToContainer(opts AttachToContainerOptions) error { - cw, err := c.AttachToContainerNonBlocking(opts) - if err != nil { - return err - } - return cw.Wait() -} - -// AttachToContainerNonBlocking attaches to a container, using the given options. -// This function does not block. -// -// See https://goo.gl/NKpkFk for more details. -func (c *Client) AttachToContainerNonBlocking(opts AttachToContainerOptions) (CloseWaiter, error) { - if opts.Container == "" { - return nil, &NoSuchContainer{ID: opts.Container} - } - path := "/containers/" + opts.Container + "/attach?" + queryString(opts) - return c.hijack("POST", path, hijackOptions{ - success: opts.Success, - setRawTerminal: opts.RawTerminal, - in: opts.InputStream, - stdout: opts.OutputStream, - stderr: opts.ErrorStream, - }) -} - -// LogsOptions represents the set of options used when getting logs from a -// container. -// -// See https://goo.gl/krK0ZH for more details. -type LogsOptions struct { - Context context.Context - Container string `qs:"-"` - OutputStream io.Writer `qs:"-"` - ErrorStream io.Writer `qs:"-"` - InactivityTimeout time.Duration `qs:"-"` - Tail string - - Since int64 - Follow bool - Stdout bool - Stderr bool - Timestamps bool - - // Use raw terminal? Usually true when the container contains a TTY. - RawTerminal bool `qs:"-"` -} - -// Logs gets stdout and stderr logs from the specified container. -// -// When LogsOptions.RawTerminal is set to false, go-dockerclient will multiplex -// the streams and send the containers stdout to LogsOptions.OutputStream, and -// stderr to LogsOptions.ErrorStream. -// -// When LogsOptions.RawTerminal is true, callers will get the raw stream on -// LogOptions.OutputStream. The caller can use libraries such as dlog -// (github.com/ahmetalpbalkan/dlog). -// -// See https://goo.gl/krK0ZH for more details. -func (c *Client) Logs(opts LogsOptions) error { - if opts.Container == "" { - return &NoSuchContainer{ID: opts.Container} - } - if opts.Tail == "" { - opts.Tail = "all" - } - path := "/containers/" + opts.Container + "/logs?" + queryString(opts) - return c.stream("GET", path, streamOptions{ - setRawTerminal: opts.RawTerminal, - stdout: opts.OutputStream, - stderr: opts.ErrorStream, - inactivityTimeout: opts.InactivityTimeout, - context: opts.Context, - }) -} - -// ResizeContainerTTY resizes the terminal to the given height and width. -// -// See https://goo.gl/FImjeq for more details. -func (c *Client) ResizeContainerTTY(id string, height, width int) error { - params := make(url.Values) - params.Set("h", strconv.Itoa(height)) - params.Set("w", strconv.Itoa(width)) - resp, err := c.do("POST", "/containers/"+id+"/resize?"+params.Encode(), doOptions{}) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// ExportContainerOptions is the set of parameters to the ExportContainer -// method. -// -// See https://goo.gl/yGJCIh for more details. -type ExportContainerOptions struct { - ID string - OutputStream io.Writer - InactivityTimeout time.Duration `qs:"-"` - Context context.Context -} - -// ExportContainer export the contents of container id as tar archive -// and prints the exported contents to stdout. -// -// See https://goo.gl/yGJCIh for more details. -func (c *Client) ExportContainer(opts ExportContainerOptions) error { - if opts.ID == "" { - return &NoSuchContainer{ID: opts.ID} - } - url := fmt.Sprintf("/containers/%s/export", opts.ID) - return c.stream("GET", url, streamOptions{ - setRawTerminal: true, - stdout: opts.OutputStream, - inactivityTimeout: opts.InactivityTimeout, - context: opts.Context, - }) -} - -// PruneContainersOptions specify parameters to the PruneContainers function. -// -// See https://goo.gl/wnkgDT for more details. -type PruneContainersOptions struct { - Filters map[string][]string - Context context.Context -} - -// PruneContainersResults specify results from the PruneContainers function. -// -// See https://goo.gl/wnkgDT for more details. -type PruneContainersResults struct { - ContainersDeleted []string - SpaceReclaimed int64 -} - -// PruneContainers deletes containers which are stopped. -// -// See https://goo.gl/wnkgDT for more details. -func (c *Client) PruneContainers(opts PruneContainersOptions) (*PruneContainersResults, error) { - path := "/containers/prune?" + queryString(opts) - resp, err := c.do("POST", path, doOptions{context: opts.Context}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - var results PruneContainersResults - if err := json.NewDecoder(resp.Body).Decode(&results); err != nil { - return nil, err - } - return &results, nil -} - -// NoSuchContainer is the error returned when a given container does not exist. -type NoSuchContainer struct { - ID string - Err error -} - -func (err *NoSuchContainer) Error() string { - if err.Err != nil { - return err.Err.Error() - } - return "No such container: " + err.ID -} - -// ContainerAlreadyRunning is the error returned when a given container is -// already running. -type ContainerAlreadyRunning struct { - ID string -} - -func (err *ContainerAlreadyRunning) Error() string { - return "Container already running: " + err.ID -} - -// ContainerNotRunning is the error returned when a given container is not -// running. -type ContainerNotRunning struct { - ID string -} - -func (err *ContainerNotRunning) Error() string { - return "Container not running: " + err.ID -} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/env.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/env.go deleted file mode 100644 index c54b0b0e..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/env.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2014 Docker authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the DOCKER-LICENSE file. - -package docker - -import ( - "encoding/json" - "fmt" - "io" - "strconv" - "strings" -) - -// Env represents a list of key-pair represented in the form KEY=VALUE. -type Env []string - -// Get returns the string value of the given key. -func (env *Env) Get(key string) (value string) { - return env.Map()[key] -} - -// Exists checks whether the given key is defined in the internal Env -// representation. -func (env *Env) Exists(key string) bool { - _, exists := env.Map()[key] - return exists -} - -// GetBool returns a boolean representation of the given key. The key is false -// whenever its value if 0, no, false, none or an empty string. Any other value -// will be interpreted as true. -func (env *Env) GetBool(key string) (value bool) { - s := strings.ToLower(strings.Trim(env.Get(key), " \t")) - if s == "" || s == "0" || s == "no" || s == "false" || s == "none" { - return false - } - return true -} - -// SetBool defines a boolean value to the given key. -func (env *Env) SetBool(key string, value bool) { - if value { - env.Set(key, "1") - } else { - env.Set(key, "0") - } -} - -// GetInt returns the value of the provided key, converted to int. -// -// It the value cannot be represented as an integer, it returns -1. -func (env *Env) GetInt(key string) int { - return int(env.GetInt64(key)) -} - -// SetInt defines an integer value to the given key. -func (env *Env) SetInt(key string, value int) { - env.Set(key, strconv.Itoa(value)) -} - -// GetInt64 returns the value of the provided key, converted to int64. -// -// It the value cannot be represented as an integer, it returns -1. -func (env *Env) GetInt64(key string) int64 { - s := strings.Trim(env.Get(key), " \t") - val, err := strconv.ParseInt(s, 10, 64) - if err != nil { - return -1 - } - return val -} - -// SetInt64 defines an integer (64-bit wide) value to the given key. -func (env *Env) SetInt64(key string, value int64) { - env.Set(key, strconv.FormatInt(value, 10)) -} - -// GetJSON unmarshals the value of the provided key in the provided iface. -// -// iface is a value that can be provided to the json.Unmarshal function. -func (env *Env) GetJSON(key string, iface interface{}) error { - sval := env.Get(key) - if sval == "" { - return nil - } - return json.Unmarshal([]byte(sval), iface) -} - -// SetJSON marshals the given value to JSON format and stores it using the -// provided key. -func (env *Env) SetJSON(key string, value interface{}) error { - sval, err := json.Marshal(value) - if err != nil { - return err - } - env.Set(key, string(sval)) - return nil -} - -// GetList returns a list of strings matching the provided key. It handles the -// list as a JSON representation of a list of strings. -// -// If the given key matches to a single string, it will return a list -// containing only the value that matches the key. -func (env *Env) GetList(key string) []string { - sval := env.Get(key) - if sval == "" { - return nil - } - var l []string - if err := json.Unmarshal([]byte(sval), &l); err != nil { - l = append(l, sval) - } - return l -} - -// SetList stores the given list in the provided key, after serializing it to -// JSON format. -func (env *Env) SetList(key string, value []string) error { - return env.SetJSON(key, value) -} - -// Set defines the value of a key to the given string. -func (env *Env) Set(key, value string) { - *env = append(*env, key+"="+value) -} - -// Decode decodes `src` as a json dictionary, and adds each decoded key-value -// pair to the environment. -// -// If `src` cannot be decoded as a json dictionary, an error is returned. -func (env *Env) Decode(src io.Reader) error { - m := make(map[string]interface{}) - if err := json.NewDecoder(src).Decode(&m); err != nil { - return err - } - for k, v := range m { - env.SetAuto(k, v) - } - return nil -} - -// SetAuto will try to define the Set* method to call based on the given value. -func (env *Env) SetAuto(key string, value interface{}) { - if fval, ok := value.(float64); ok { - env.SetInt64(key, int64(fval)) - } else if sval, ok := value.(string); ok { - env.Set(key, sval) - } else if val, err := json.Marshal(value); err == nil { - env.Set(key, string(val)) - } else { - env.Set(key, fmt.Sprintf("%v", value)) - } -} - -// Map returns the map representation of the env. -func (env *Env) Map() map[string]string { - if len(*env) == 0 { - return nil - } - m := make(map[string]string) - for _, kv := range *env { - parts := strings.SplitN(kv, "=", 2) - m[parts[0]] = parts[1] - } - return m -} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/event.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/event.go deleted file mode 100644 index 007d2b22..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/event.go +++ /dev/null @@ -1,391 +0,0 @@ -// Copyright 2014 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "math" - "net" - "net/http" - "net/http/httputil" - "sync" - "sync/atomic" - "time" -) - -// APIEvents represents events coming from the Docker API -// The fields in the Docker API changed in API version 1.22, and -// events for more than images and containers are now fired off. -// To maintain forward and backward compatibility, go-dockerclient -// replicates the event in both the new and old format as faithfully as possible. -// -// For events that only exist in 1.22 in later, `Status` is filled in as -// `"Type:Action"` instead of just `Action` to allow for older clients to -// differentiate and not break if they rely on the pre-1.22 Status types. -// -// The transformEvent method can be consulted for more information about how -// events are translated from new/old API formats -type APIEvents struct { - // New API Fields in 1.22 - Action string `json:"action,omitempty"` - Type string `json:"type,omitempty"` - Actor APIActor `json:"actor,omitempty"` - - // Old API fields for < 1.22 - Status string `json:"status,omitempty"` - ID string `json:"id,omitempty"` - From string `json:"from,omitempty"` - - // Fields in both - Time int64 `json:"time,omitempty"` - TimeNano int64 `json:"timeNano,omitempty"` -} - -// APIActor represents an actor that accomplishes something for an event -type APIActor struct { - ID string `json:"id,omitempty"` - Attributes map[string]string `json:"attributes,omitempty"` -} - -type eventMonitoringState struct { - // `sync/atomic` expects the first word in an allocated struct to be 64-bit - // aligned on both ARM and x86-32. See https://goo.gl/zW7dgq for more details. - lastSeen int64 - sync.RWMutex - sync.WaitGroup - enabled bool - C chan *APIEvents - errC chan error - listeners []chan<- *APIEvents -} - -const ( - maxMonitorConnRetries = 5 - retryInitialWaitTime = 10. -) - -var ( - // ErrNoListeners is the error returned when no listeners are available - // to receive an event. - ErrNoListeners = errors.New("no listeners present to receive event") - - // ErrListenerAlreadyExists is the error returned when the listerner already - // exists. - ErrListenerAlreadyExists = errors.New("listener already exists for docker events") - - // ErrTLSNotSupported is the error returned when the client does not support - // TLS (this applies to the Windows named pipe client). - ErrTLSNotSupported = errors.New("tls not supported by this client") - - // EOFEvent is sent when the event listener receives an EOF error. - EOFEvent = &APIEvents{ - Type: "EOF", - Status: "EOF", - } -) - -// AddEventListener adds a new listener to container events in the Docker API. -// -// The parameter is a channel through which events will be sent. -func (c *Client) AddEventListener(listener chan<- *APIEvents) error { - var err error - if !c.eventMonitor.isEnabled() { - err = c.eventMonitor.enableEventMonitoring(c) - if err != nil { - return err - } - } - return c.eventMonitor.addListener(listener) -} - -// RemoveEventListener removes a listener from the monitor. -func (c *Client) RemoveEventListener(listener chan *APIEvents) error { - err := c.eventMonitor.removeListener(listener) - if err != nil { - return err - } - if c.eventMonitor.listernersCount() == 0 { - c.eventMonitor.disableEventMonitoring() - } - return nil -} - -func (eventState *eventMonitoringState) addListener(listener chan<- *APIEvents) error { - eventState.Lock() - defer eventState.Unlock() - if listenerExists(listener, &eventState.listeners) { - return ErrListenerAlreadyExists - } - eventState.Add(1) - eventState.listeners = append(eventState.listeners, listener) - return nil -} - -func (eventState *eventMonitoringState) removeListener(listener chan<- *APIEvents) error { - eventState.Lock() - defer eventState.Unlock() - if listenerExists(listener, &eventState.listeners) { - var newListeners []chan<- *APIEvents - for _, l := range eventState.listeners { - if l != listener { - newListeners = append(newListeners, l) - } - } - eventState.listeners = newListeners - eventState.Add(-1) - } - return nil -} - -func (eventState *eventMonitoringState) closeListeners() { - for _, l := range eventState.listeners { - close(l) - eventState.Add(-1) - } - eventState.listeners = nil -} - -func (eventState *eventMonitoringState) listernersCount() int { - eventState.RLock() - defer eventState.RUnlock() - return len(eventState.listeners) -} - -func listenerExists(a chan<- *APIEvents, list *[]chan<- *APIEvents) bool { - for _, b := range *list { - if b == a { - return true - } - } - return false -} - -func (eventState *eventMonitoringState) enableEventMonitoring(c *Client) error { - eventState.Lock() - defer eventState.Unlock() - if !eventState.enabled { - eventState.enabled = true - atomic.StoreInt64(&eventState.lastSeen, 0) - eventState.C = make(chan *APIEvents, 100) - eventState.errC = make(chan error, 1) - go eventState.monitorEvents(c) - } - return nil -} - -func (eventState *eventMonitoringState) disableEventMonitoring() error { - eventState.Lock() - defer eventState.Unlock() - - eventState.closeListeners() - - eventState.Wait() - - if eventState.enabled { - eventState.enabled = false - close(eventState.C) - close(eventState.errC) - } - return nil -} - -func (eventState *eventMonitoringState) monitorEvents(c *Client) { - var err error - for eventState.noListeners() { - time.Sleep(10 * time.Millisecond) - } - if err = eventState.connectWithRetry(c); err != nil { - // terminate if connect failed - eventState.disableEventMonitoring() - return - } - for eventState.isEnabled() { - timeout := time.After(100 * time.Millisecond) - select { - case ev, ok := <-eventState.C: - if !ok { - return - } - if ev == EOFEvent { - eventState.disableEventMonitoring() - return - } - eventState.updateLastSeen(ev) - go eventState.sendEvent(ev) - case err = <-eventState.errC: - if err == ErrNoListeners { - eventState.disableEventMonitoring() - return - } else if err != nil { - defer func() { go eventState.monitorEvents(c) }() - return - } - case <-timeout: - continue - } - } -} - -func (eventState *eventMonitoringState) connectWithRetry(c *Client) error { - var retries int - eventState.RLock() - eventChan := eventState.C - errChan := eventState.errC - eventState.RUnlock() - err := c.eventHijack(atomic.LoadInt64(&eventState.lastSeen), eventChan, errChan) - for ; err != nil && retries < maxMonitorConnRetries; retries++ { - waitTime := int64(retryInitialWaitTime * math.Pow(2, float64(retries))) - time.Sleep(time.Duration(waitTime) * time.Millisecond) - eventState.RLock() - eventChan = eventState.C - errChan = eventState.errC - eventState.RUnlock() - err = c.eventHijack(atomic.LoadInt64(&eventState.lastSeen), eventChan, errChan) - } - return err -} - -func (eventState *eventMonitoringState) noListeners() bool { - eventState.RLock() - defer eventState.RUnlock() - return len(eventState.listeners) == 0 -} - -func (eventState *eventMonitoringState) isEnabled() bool { - eventState.RLock() - defer eventState.RUnlock() - return eventState.enabled -} - -func (eventState *eventMonitoringState) sendEvent(event *APIEvents) { - eventState.RLock() - defer eventState.RUnlock() - eventState.Add(1) - defer eventState.Done() - if eventState.enabled { - if len(eventState.listeners) == 0 { - eventState.errC <- ErrNoListeners - return - } - - for _, listener := range eventState.listeners { - listener <- event - } - } -} - -func (eventState *eventMonitoringState) updateLastSeen(e *APIEvents) { - eventState.Lock() - defer eventState.Unlock() - if atomic.LoadInt64(&eventState.lastSeen) < e.Time { - atomic.StoreInt64(&eventState.lastSeen, e.Time) - } -} - -func (c *Client) eventHijack(startTime int64, eventChan chan *APIEvents, errChan chan error) error { - uri := "/events" - if startTime != 0 { - uri += fmt.Sprintf("?since=%d", startTime) - } - protocol := c.endpointURL.Scheme - address := c.endpointURL.Path - if protocol != "unix" && protocol != "npipe" { - protocol = "tcp" - address = c.endpointURL.Host - } - var dial net.Conn - var err error - if c.TLSConfig == nil { - dial, err = c.Dialer.Dial(protocol, address) - } else { - netDialer, ok := c.Dialer.(*net.Dialer) - if !ok { - return ErrTLSNotSupported - } - dial, err = tlsDialWithDialer(netDialer, protocol, address, c.TLSConfig) - } - if err != nil { - return err - } - conn := httputil.NewClientConn(dial, nil) - req, err := http.NewRequest("GET", uri, nil) - if err != nil { - return err - } - res, err := conn.Do(req) - if err != nil { - return err - } - go func(res *http.Response, conn *httputil.ClientConn) { - defer conn.Close() - defer res.Body.Close() - decoder := json.NewDecoder(res.Body) - for { - var event APIEvents - if err = decoder.Decode(&event); err != nil { - if err == io.EOF || err == io.ErrUnexpectedEOF { - c.eventMonitor.RLock() - if c.eventMonitor.enabled && c.eventMonitor.C == eventChan { - // Signal that we're exiting. - eventChan <- EOFEvent - } - c.eventMonitor.RUnlock() - break - } - errChan <- err - } - if event.Time == 0 { - continue - } - if !c.eventMonitor.isEnabled() || c.eventMonitor.C != eventChan { - return - } - transformEvent(&event) - eventChan <- &event - } - }(res, conn) - return nil -} - -// transformEvent takes an event and determines what version it is from -// then populates both versions of the event -func transformEvent(event *APIEvents) { - // if event version is <= 1.21 there will be no Action and no Type - if event.Action == "" && event.Type == "" { - event.Action = event.Status - event.Actor.ID = event.ID - event.Actor.Attributes = map[string]string{} - switch event.Status { - case "delete", "import", "pull", "push", "tag", "untag": - event.Type = "image" - default: - event.Type = "container" - if event.From != "" { - event.Actor.Attributes["image"] = event.From - } - } - } else { - if event.Status == "" { - if event.Type == "image" || event.Type == "container" { - event.Status = event.Action - } else { - // Because just the Status has been overloaded with different Types - // if an event is not for an image or a container, we prepend the type - // to avoid problems for people relying on actions being only for - // images and containers - event.Status = event.Type + ":" + event.Action - } - } - if event.ID == "" { - event.ID = event.Actor.ID - } - if event.From == "" { - event.From = event.Actor.Attributes["image"] - } - } -} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/exec.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/exec.go deleted file mode 100644 index b935c689..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/exec.go +++ /dev/null @@ -1,209 +0,0 @@ -// Copyright 2014 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - - "golang.org/x/net/context" -) - -// Exec is the type representing a `docker exec` instance and containing the -// instance ID -type Exec struct { - ID string `json:"Id,omitempty" yaml:"Id,omitempty"` -} - -// CreateExecOptions specify parameters to the CreateExecContainer function. -// -// See https://goo.gl/60TeBP for more details -type CreateExecOptions struct { - AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty" toml:"AttachStdin,omitempty"` - AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty" toml:"AttachStdout,omitempty"` - AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty" toml:"AttachStderr,omitempty"` - Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty" toml:"Tty,omitempty"` - Cmd []string `json:"Cmd,omitempty" yaml:"Cmd,omitempty" toml:"Cmd,omitempty"` - Container string `json:"Container,omitempty" yaml:"Container,omitempty" toml:"Container,omitempty"` - User string `json:"User,omitempty" yaml:"User,omitempty" toml:"User,omitempty"` - Context context.Context `json:"-"` - Privileged bool `json:"Privileged,omitempty" yaml:"Privileged,omitempty" toml:"Privileged,omitempty"` -} - -// CreateExec sets up an exec instance in a running container `id`, returning the exec -// instance, or an error in case of failure. -// -// See https://goo.gl/60TeBP for more details -func (c *Client) CreateExec(opts CreateExecOptions) (*Exec, error) { - path := fmt.Sprintf("/containers/%s/exec", opts.Container) - resp, err := c.do("POST", path, doOptions{data: opts, context: opts.Context}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return nil, &NoSuchContainer{ID: opts.Container} - } - return nil, err - } - defer resp.Body.Close() - var exec Exec - if err := json.NewDecoder(resp.Body).Decode(&exec); err != nil { - return nil, err - } - - return &exec, nil -} - -// StartExecOptions specify parameters to the StartExecContainer function. -// -// See https://goo.gl/1EeDWi for more details -type StartExecOptions struct { - InputStream io.Reader `qs:"-"` - OutputStream io.Writer `qs:"-"` - ErrorStream io.Writer `qs:"-"` - - Detach bool `json:"Detach,omitempty" yaml:"Detach,omitempty" toml:"Detach,omitempty"` - Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty" toml:"Tty,omitempty"` - - // Use raw terminal? Usually true when the container contains a TTY. - RawTerminal bool `qs:"-"` - - // If set, after a successful connect, a sentinel will be sent and then the - // client will block on receive before continuing. - // - // It must be an unbuffered channel. Using a buffered channel can lead - // to unexpected behavior. - Success chan struct{} `json:"-"` - - Context context.Context `json:"-"` -} - -// StartExec starts a previously set up exec instance id. If opts.Detach is -// true, it returns after starting the exec command. Otherwise, it sets up an -// interactive session with the exec command. -// -// See https://goo.gl/1EeDWi for more details -func (c *Client) StartExec(id string, opts StartExecOptions) error { - cw, err := c.StartExecNonBlocking(id, opts) - if err != nil { - return err - } - if cw != nil { - return cw.Wait() - } - return nil -} - -// StartExecNonBlocking starts a previously set up exec instance id. If opts.Detach is -// true, it returns after starting the exec command. Otherwise, it sets up an -// interactive session with the exec command. -// -// See https://goo.gl/1EeDWi for more details -func (c *Client) StartExecNonBlocking(id string, opts StartExecOptions) (CloseWaiter, error) { - if id == "" { - return nil, &NoSuchExec{ID: id} - } - - path := fmt.Sprintf("/exec/%s/start", id) - - if opts.Detach { - resp, err := c.do("POST", path, doOptions{data: opts, context: opts.Context}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return nil, &NoSuchExec{ID: id} - } - return nil, err - } - defer resp.Body.Close() - return nil, nil - } - - return c.hijack("POST", path, hijackOptions{ - success: opts.Success, - setRawTerminal: opts.RawTerminal, - in: opts.InputStream, - stdout: opts.OutputStream, - stderr: opts.ErrorStream, - data: opts, - }) -} - -// ResizeExecTTY resizes the tty session used by the exec command id. This API -// is valid only if Tty was specified as part of creating and starting the exec -// command. -// -// See https://goo.gl/Mo5bxx for more details -func (c *Client) ResizeExecTTY(id string, height, width int) error { - params := make(url.Values) - params.Set("h", strconv.Itoa(height)) - params.Set("w", strconv.Itoa(width)) - - path := fmt.Sprintf("/exec/%s/resize?%s", id, params.Encode()) - resp, err := c.do("POST", path, doOptions{}) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// ExecProcessConfig is a type describing the command associated to a Exec -// instance. It's used in the ExecInspect type. -type ExecProcessConfig struct { - User string `json:"user,omitempty" yaml:"user,omitempty" toml:"user,omitempty"` - Privileged bool `json:"privileged,omitempty" yaml:"privileged,omitempty" toml:"privileged,omitempty"` - Tty bool `json:"tty,omitempty" yaml:"tty,omitempty" toml:"tty,omitempty"` - EntryPoint string `json:"entrypoint,omitempty" yaml:"entrypoint,omitempty" toml:"entrypoint,omitempty"` - Arguments []string `json:"arguments,omitempty" yaml:"arguments,omitempty" toml:"arguments,omitempty"` -} - -// ExecInspect is a type with details about a exec instance, including the -// exit code if the command has finished running. It's returned by a api -// call to /exec/(id)/json -// -// See https://goo.gl/ctMUiW for more details -type ExecInspect struct { - ID string `json:"ID,omitempty" yaml:"ID,omitempty" toml:"ID,omitempty"` - ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty" toml:"ExitCode,omitempty"` - Running bool `json:"Running,omitempty" yaml:"Running,omitempty" toml:"Running,omitempty"` - OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty" toml:"OpenStdin,omitempty"` - OpenStderr bool `json:"OpenStderr,omitempty" yaml:"OpenStderr,omitempty" toml:"OpenStderr,omitempty"` - OpenStdout bool `json:"OpenStdout,omitempty" yaml:"OpenStdout,omitempty" toml:"OpenStdout,omitempty"` - ProcessConfig ExecProcessConfig `json:"ProcessConfig,omitempty" yaml:"ProcessConfig,omitempty" toml:"ProcessConfig,omitempty"` - ContainerID string `json:"ContainerID,omitempty" yaml:"ContainerID,omitempty" toml:"ContainerID,omitempty"` - DetachKeys string `json:"DetachKeys,omitempty" yaml:"DetachKeys,omitempty" toml:"DetachKeys,omitempty"` - CanRemove bool `json:"CanRemove,omitempty" yaml:"CanRemove,omitempty" toml:"CanRemove,omitempty"` -} - -// InspectExec returns low-level information about the exec command id. -// -// See https://goo.gl/ctMUiW for more details -func (c *Client) InspectExec(id string) (*ExecInspect, error) { - path := fmt.Sprintf("/exec/%s/json", id) - resp, err := c.do("GET", path, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return nil, &NoSuchExec{ID: id} - } - return nil, err - } - defer resp.Body.Close() - var exec ExecInspect - if err := json.NewDecoder(resp.Body).Decode(&exec); err != nil { - return nil, err - } - return &exec, nil -} - -// NoSuchExec is the error returned when a given exec instance does not exist. -type NoSuchExec struct { - ID string -} - -func (err *NoSuchExec) Error() string { - return "No such exec instance: " + err.ID -} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/image.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/image.go deleted file mode 100644 index d686e413..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/image.go +++ /dev/null @@ -1,647 +0,0 @@ -// Copyright 2013 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "os" - "strings" - "time" - - "golang.org/x/net/context" -) - -// APIImages represent an image returned in the ListImages call. -type APIImages struct { - ID string `json:"Id" yaml:"Id" toml:"Id"` - RepoTags []string `json:"RepoTags,omitempty" yaml:"RepoTags,omitempty" toml:"RepoTags,omitempty"` - Created int64 `json:"Created,omitempty" yaml:"Created,omitempty" toml:"Created,omitempty"` - Size int64 `json:"Size,omitempty" yaml:"Size,omitempty" toml:"Size,omitempty"` - VirtualSize int64 `json:"VirtualSize,omitempty" yaml:"VirtualSize,omitempty" toml:"VirtualSize,omitempty"` - ParentID string `json:"ParentId,omitempty" yaml:"ParentId,omitempty" toml:"ParentId,omitempty"` - RepoDigests []string `json:"RepoDigests,omitempty" yaml:"RepoDigests,omitempty" toml:"RepoDigests,omitempty"` - Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty" toml:"Labels,omitempty"` -} - -// RootFS represents the underlying layers used by an image -type RootFS struct { - Type string `json:"Type,omitempty" yaml:"Type,omitempty" toml:"Type,omitempty"` - Layers []string `json:"Layers,omitempty" yaml:"Layers,omitempty" toml:"Layers,omitempty"` -} - -// Image is the type representing a docker image and its various properties -type Image struct { - ID string `json:"Id" yaml:"Id" toml:"Id"` - RepoTags []string `json:"RepoTags,omitempty" yaml:"RepoTags,omitempty" toml:"RepoTags,omitempty"` - Parent string `json:"Parent,omitempty" yaml:"Parent,omitempty" toml:"Parent,omitempty"` - Comment string `json:"Comment,omitempty" yaml:"Comment,omitempty" toml:"Comment,omitempty"` - Created time.Time `json:"Created,omitempty" yaml:"Created,omitempty" toml:"Created,omitempty"` - Container string `json:"Container,omitempty" yaml:"Container,omitempty" toml:"Container,omitempty"` - ContainerConfig Config `json:"ContainerConfig,omitempty" yaml:"ContainerConfig,omitempty" toml:"ContainerConfig,omitempty"` - DockerVersion string `json:"DockerVersion,omitempty" yaml:"DockerVersion,omitempty" toml:"DockerVersion,omitempty"` - Author string `json:"Author,omitempty" yaml:"Author,omitempty" toml:"Author,omitempty"` - Config *Config `json:"Config,omitempty" yaml:"Config,omitempty" toml:"Config,omitempty"` - Architecture string `json:"Architecture,omitempty" yaml:"Architecture,omitempty"` - Size int64 `json:"Size,omitempty" yaml:"Size,omitempty" toml:"Size,omitempty"` - VirtualSize int64 `json:"VirtualSize,omitempty" yaml:"VirtualSize,omitempty" toml:"VirtualSize,omitempty"` - RepoDigests []string `json:"RepoDigests,omitempty" yaml:"RepoDigests,omitempty" toml:"RepoDigests,omitempty"` - RootFS *RootFS `json:"RootFS,omitempty" yaml:"RootFS,omitempty" toml:"RootFS,omitempty"` -} - -// ImagePre012 serves the same purpose as the Image type except that it is for -// earlier versions of the Docker API (pre-012 to be specific) -type ImagePre012 struct { - ID string `json:"id"` - Parent string `json:"parent,omitempty"` - Comment string `json:"comment,omitempty"` - Created time.Time `json:"created"` - Container string `json:"container,omitempty"` - ContainerConfig Config `json:"container_config,omitempty"` - DockerVersion string `json:"docker_version,omitempty"` - Author string `json:"author,omitempty"` - Config *Config `json:"config,omitempty"` - Architecture string `json:"architecture,omitempty"` - Size int64 `json:"size,omitempty"` -} - -var ( - // ErrNoSuchImage is the error returned when the image does not exist. - ErrNoSuchImage = errors.New("no such image") - - // ErrMissingRepo is the error returned when the remote repository is - // missing. - ErrMissingRepo = errors.New("missing remote repository e.g. 'github.com/user/repo'") - - // ErrMissingOutputStream is the error returned when no output stream - // is provided to some calls, like BuildImage. - ErrMissingOutputStream = errors.New("missing output stream") - - // ErrMultipleContexts is the error returned when both a ContextDir and - // InputStream are provided in BuildImageOptions - ErrMultipleContexts = errors.New("image build may not be provided BOTH context dir and input stream") - - // ErrMustSpecifyNames is the error rreturned when the Names field on - // ExportImagesOptions is nil or empty - ErrMustSpecifyNames = errors.New("must specify at least one name to export") -) - -// ListImagesOptions specify parameters to the ListImages function. -// -// See https://goo.gl/BVzauZ for more details. -type ListImagesOptions struct { - Filters map[string][]string - All bool - Digests bool - Filter string - Context context.Context -} - -// ListImages returns the list of available images in the server. -// -// See https://goo.gl/BVzauZ for more details. -func (c *Client) ListImages(opts ListImagesOptions) ([]APIImages, error) { - path := "/images/json?" + queryString(opts) - resp, err := c.do("GET", path, doOptions{context: opts.Context}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - var images []APIImages - if err := json.NewDecoder(resp.Body).Decode(&images); err != nil { - return nil, err - } - return images, nil -} - -// ImageHistory represent a layer in an image's history returned by the -// ImageHistory call. -type ImageHistory struct { - ID string `json:"Id" yaml:"Id" toml:"Id"` - Tags []string `json:"Tags,omitempty" yaml:"Tags,omitempty" toml:"Tags,omitempty"` - Created int64 `json:"Created,omitempty" yaml:"Created,omitempty" toml:"Tags,omitempty"` - CreatedBy string `json:"CreatedBy,omitempty" yaml:"CreatedBy,omitempty" toml:"CreatedBy,omitempty"` - Size int64 `json:"Size,omitempty" yaml:"Size,omitempty" toml:"Size,omitempty"` -} - -// ImageHistory returns the history of the image by its name or ID. -// -// See https://goo.gl/fYtxQa for more details. -func (c *Client) ImageHistory(name string) ([]ImageHistory, error) { - resp, err := c.do("GET", "/images/"+name+"/history", doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return nil, ErrNoSuchImage - } - return nil, err - } - defer resp.Body.Close() - var history []ImageHistory - if err := json.NewDecoder(resp.Body).Decode(&history); err != nil { - return nil, err - } - return history, nil -} - -// RemoveImage removes an image by its name or ID. -// -// See https://goo.gl/Vd2Pck for more details. -func (c *Client) RemoveImage(name string) error { - resp, err := c.do("DELETE", "/images/"+name, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return ErrNoSuchImage - } - return err - } - resp.Body.Close() - return nil -} - -// RemoveImageOptions present the set of options available for removing an image -// from a registry. -// -// See https://goo.gl/Vd2Pck for more details. -type RemoveImageOptions struct { - Force bool `qs:"force"` - NoPrune bool `qs:"noprune"` - Context context.Context -} - -// RemoveImageExtended removes an image by its name or ID. -// Extra params can be passed, see RemoveImageOptions -// -// See https://goo.gl/Vd2Pck for more details. -func (c *Client) RemoveImageExtended(name string, opts RemoveImageOptions) error { - uri := fmt.Sprintf("/images/%s?%s", name, queryString(&opts)) - resp, err := c.do("DELETE", uri, doOptions{context: opts.Context}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return ErrNoSuchImage - } - return err - } - resp.Body.Close() - return nil -} - -// InspectImage returns an image by its name or ID. -// -// See https://goo.gl/ncLTG8 for more details. -func (c *Client) InspectImage(name string) (*Image, error) { - resp, err := c.do("GET", "/images/"+name+"/json", doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return nil, ErrNoSuchImage - } - return nil, err - } - defer resp.Body.Close() - - var image Image - - // if the caller elected to skip checking the server's version, assume it's the latest - if c.SkipServerVersionCheck || c.expectedAPIVersion.GreaterThanOrEqualTo(apiVersion112) { - if err := json.NewDecoder(resp.Body).Decode(&image); err != nil { - return nil, err - } - } else { - var imagePre012 ImagePre012 - if err := json.NewDecoder(resp.Body).Decode(&imagePre012); err != nil { - return nil, err - } - - image.ID = imagePre012.ID - image.Parent = imagePre012.Parent - image.Comment = imagePre012.Comment - image.Created = imagePre012.Created - image.Container = imagePre012.Container - image.ContainerConfig = imagePre012.ContainerConfig - image.DockerVersion = imagePre012.DockerVersion - image.Author = imagePre012.Author - image.Config = imagePre012.Config - image.Architecture = imagePre012.Architecture - image.Size = imagePre012.Size - } - - return &image, nil -} - -// PushImageOptions represents options to use in the PushImage method. -// -// See https://goo.gl/BZemGg for more details. -type PushImageOptions struct { - // Name of the image - Name string - - // Tag of the image - Tag string - - // Registry server to push the image - Registry string - - OutputStream io.Writer `qs:"-"` - RawJSONStream bool `qs:"-"` - InactivityTimeout time.Duration `qs:"-"` - - Context context.Context -} - -// PushImage pushes an image to a remote registry, logging progress to w. -// -// An empty instance of AuthConfiguration may be used for unauthenticated -// pushes. -// -// See https://goo.gl/BZemGg for more details. -func (c *Client) PushImage(opts PushImageOptions, auth AuthConfiguration) error { - if opts.Name == "" { - return ErrNoSuchImage - } - headers, err := headersWithAuth(auth) - if err != nil { - return err - } - name := opts.Name - opts.Name = "" - path := "/images/" + name + "/push?" + queryString(&opts) - return c.stream("POST", path, streamOptions{ - setRawTerminal: true, - rawJSONStream: opts.RawJSONStream, - headers: headers, - stdout: opts.OutputStream, - inactivityTimeout: opts.InactivityTimeout, - context: opts.Context, - }) -} - -// PullImageOptions present the set of options available for pulling an image -// from a registry. -// -// See https://goo.gl/qkoSsn for more details. -type PullImageOptions struct { - Repository string `qs:"fromImage"` - Tag string - - // Only required for Docker Engine 1.9 or 1.10 w/ Remote API < 1.21 - // and Docker Engine < 1.9 - // This parameter was removed in Docker Engine 1.11 - Registry string - - OutputStream io.Writer `qs:"-"` - RawJSONStream bool `qs:"-"` - InactivityTimeout time.Duration `qs:"-"` - Context context.Context -} - -// PullImage pulls an image from a remote registry, logging progress to -// opts.OutputStream. -// -// See https://goo.gl/qkoSsn for more details. -func (c *Client) PullImage(opts PullImageOptions, auth AuthConfiguration) error { - if opts.Repository == "" { - return ErrNoSuchImage - } - - headers, err := headersWithAuth(auth) - if err != nil { - return err - } - if opts.Tag == "" && strings.Contains(opts.Repository, "@") { - parts := strings.SplitN(opts.Repository, "@", 2) - opts.Repository = parts[0] - opts.Tag = parts[1] - } - return c.createImage(queryString(&opts), headers, nil, opts.OutputStream, opts.RawJSONStream, opts.InactivityTimeout, opts.Context) -} - -func (c *Client) createImage(qs string, headers map[string]string, in io.Reader, w io.Writer, rawJSONStream bool, timeout time.Duration, context context.Context) error { - path := "/images/create?" + qs - return c.stream("POST", path, streamOptions{ - setRawTerminal: true, - headers: headers, - in: in, - stdout: w, - rawJSONStream: rawJSONStream, - inactivityTimeout: timeout, - context: context, - }) -} - -// LoadImageOptions represents the options for LoadImage Docker API Call -// -// See https://goo.gl/rEsBV3 for more details. -type LoadImageOptions struct { - InputStream io.Reader - OutputStream io.Writer - Context context.Context -} - -// LoadImage imports a tarball docker image -// -// See https://goo.gl/rEsBV3 for more details. -func (c *Client) LoadImage(opts LoadImageOptions) error { - return c.stream("POST", "/images/load", streamOptions{ - setRawTerminal: true, - in: opts.InputStream, - stdout: opts.OutputStream, - context: opts.Context, - }) -} - -// ExportImageOptions represent the options for ExportImage Docker API call. -// -// See https://goo.gl/AuySaA for more details. -type ExportImageOptions struct { - Name string - OutputStream io.Writer - InactivityTimeout time.Duration - Context context.Context -} - -// ExportImage exports an image (as a tar file) into the stream. -// -// See https://goo.gl/AuySaA for more details. -func (c *Client) ExportImage(opts ExportImageOptions) error { - return c.stream("GET", fmt.Sprintf("/images/%s/get", opts.Name), streamOptions{ - setRawTerminal: true, - stdout: opts.OutputStream, - inactivityTimeout: opts.InactivityTimeout, - context: opts.Context, - }) -} - -// ExportImagesOptions represent the options for ExportImages Docker API call -// -// See https://goo.gl/N9XlDn for more details. -type ExportImagesOptions struct { - Names []string - OutputStream io.Writer `qs:"-"` - InactivityTimeout time.Duration `qs:"-"` - Context context.Context -} - -// ExportImages exports one or more images (as a tar file) into the stream -// -// See https://goo.gl/N9XlDn for more details. -func (c *Client) ExportImages(opts ExportImagesOptions) error { - if opts.Names == nil || len(opts.Names) == 0 { - return ErrMustSpecifyNames - } - return c.stream("GET", "/images/get?"+queryString(&opts), streamOptions{ - setRawTerminal: true, - stdout: opts.OutputStream, - inactivityTimeout: opts.InactivityTimeout, - }) -} - -// ImportImageOptions present the set of informations available for importing -// an image from a source file or the stdin. -// -// See https://goo.gl/qkoSsn for more details. -type ImportImageOptions struct { - Repository string `qs:"repo"` - Source string `qs:"fromSrc"` - Tag string `qs:"tag"` - - InputStream io.Reader `qs:"-"` - OutputStream io.Writer `qs:"-"` - RawJSONStream bool `qs:"-"` - InactivityTimeout time.Duration `qs:"-"` - Context context.Context -} - -// ImportImage imports an image from a url, a file or stdin -// -// See https://goo.gl/qkoSsn for more details. -func (c *Client) ImportImage(opts ImportImageOptions) error { - if opts.Repository == "" { - return ErrNoSuchImage - } - if opts.Source != "-" { - opts.InputStream = nil - } - if opts.Source != "-" && !isURL(opts.Source) { - f, err := os.Open(opts.Source) - if err != nil { - return err - } - opts.InputStream = f - opts.Source = "-" - } - return c.createImage(queryString(&opts), nil, opts.InputStream, opts.OutputStream, opts.RawJSONStream, opts.InactivityTimeout, opts.Context) -} - -// BuildImageOptions present the set of informations available for building an -// image from a tarfile with a Dockerfile in it. -// -// For more details about the Docker building process, see -// https://goo.gl/4nYHwV. -type BuildImageOptions struct { - Name string `qs:"t"` - Dockerfile string `qs:"dockerfile"` - NoCache bool `qs:"nocache"` - CacheFrom []string `qs:"-"` - SuppressOutput bool `qs:"q"` - Pull bool `qs:"pull"` - RmTmpContainer bool `qs:"rm"` - ForceRmTmpContainer bool `qs:"forcerm"` - RawJSONStream bool `qs:"-"` - Memory int64 `qs:"memory"` - Memswap int64 `qs:"memswap"` - CPUShares int64 `qs:"cpushares"` - CPUQuota int64 `qs:"cpuquota"` - CPUPeriod int64 `qs:"cpuperiod"` - CPUSetCPUs string `qs:"cpusetcpus"` - Labels map[string]string `qs:"labels"` - InputStream io.Reader `qs:"-"` - OutputStream io.Writer `qs:"-"` - Remote string `qs:"remote"` - Auth AuthConfiguration `qs:"-"` // for older docker X-Registry-Auth header - AuthConfigs AuthConfigurations `qs:"-"` // for newer docker X-Registry-Config header - ContextDir string `qs:"-"` - Ulimits []ULimit `qs:"-"` - BuildArgs []BuildArg `qs:"-"` - NetworkMode string `qs:"networkmode"` - InactivityTimeout time.Duration `qs:"-"` - CgroupParent string `qs:"cgroupparent"` - Context context.Context -} - -// BuildArg represents arguments that can be passed to the image when building -// it from a Dockerfile. -// -// For more details about the Docker building process, see -// https://goo.gl/4nYHwV. -type BuildArg struct { - Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"` - Value string `json:"Value,omitempty" yaml:"Value,omitempty" toml:"Value,omitempty"` -} - -func (c *Client) versionedAuthConfigs(authConfigs AuthConfigurations) interface{} { - if c.serverAPIVersion == nil { - c.checkAPIVersion() - } - if c.serverAPIVersion != nil && c.serverAPIVersion.GreaterThanOrEqualTo(apiVersion119) { - return AuthConfigurations119(authConfigs.Configs) - } - return authConfigs -} - -// TagImageOptions present the set of options to tag an image. -// -// See https://goo.gl/prHrvo for more details. -type TagImageOptions struct { - Repo string - Tag string - Force bool - Context context.Context -} - -// TagImage adds a tag to the image identified by the given name. -// -// See https://goo.gl/prHrvo for more details. -func (c *Client) TagImage(name string, opts TagImageOptions) error { - if name == "" { - return ErrNoSuchImage - } - resp, err := c.do("POST", "/images/"+name+"/tag?"+queryString(&opts), doOptions{ - context: opts.Context, - }) - - if err != nil { - return err - } - - defer resp.Body.Close() - - if resp.StatusCode == http.StatusNotFound { - return ErrNoSuchImage - } - - return err -} - -func isURL(u string) bool { - p, err := url.Parse(u) - if err != nil { - return false - } - return p.Scheme == "http" || p.Scheme == "https" -} - -func headersWithAuth(auths ...interface{}) (map[string]string, error) { - var headers = make(map[string]string) - - for _, auth := range auths { - switch auth.(type) { - case AuthConfiguration: - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(auth); err != nil { - return nil, err - } - headers["X-Registry-Auth"] = base64.URLEncoding.EncodeToString(buf.Bytes()) - case AuthConfigurations, AuthConfigurations119: - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(auth); err != nil { - return nil, err - } - headers["X-Registry-Config"] = base64.URLEncoding.EncodeToString(buf.Bytes()) - } - } - - return headers, nil -} - -// APIImageSearch reflect the result of a search on the Docker Hub. -// -// See https://goo.gl/KLO9IZ for more details. -type APIImageSearch struct { - Description string `json:"description,omitempty" yaml:"description,omitempty" toml:"description,omitempty"` - IsOfficial bool `json:"is_official,omitempty" yaml:"is_official,omitempty" toml:"is_official,omitempty"` - IsAutomated bool `json:"is_automated,omitempty" yaml:"is_automated,omitempty" toml:"is_automated,omitempty"` - Name string `json:"name,omitempty" yaml:"name,omitempty" toml:"name,omitempty"` - StarCount int `json:"star_count,omitempty" yaml:"star_count,omitempty" toml:"star_count,omitempty"` -} - -// SearchImages search the docker hub with a specific given term. -// -// See https://goo.gl/KLO9IZ for more details. -func (c *Client) SearchImages(term string) ([]APIImageSearch, error) { - resp, err := c.do("GET", "/images/search?term="+term, doOptions{}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - var searchResult []APIImageSearch - if err := json.NewDecoder(resp.Body).Decode(&searchResult); err != nil { - return nil, err - } - return searchResult, nil -} - -// SearchImagesEx search the docker hub with a specific given term and authentication. -// -// See https://goo.gl/KLO9IZ for more details. -func (c *Client) SearchImagesEx(term string, auth AuthConfiguration) ([]APIImageSearch, error) { - headers, err := headersWithAuth(auth) - if err != nil { - return nil, err - } - - resp, err := c.do("GET", "/images/search?term="+term, doOptions{ - headers: headers, - }) - if err != nil { - return nil, err - } - - defer resp.Body.Close() - - var searchResult []APIImageSearch - if err := json.NewDecoder(resp.Body).Decode(&searchResult); err != nil { - return nil, err - } - - return searchResult, nil -} - -// PruneImagesOptions specify parameters to the PruneImages function. -// -// See https://goo.gl/qfZlbZ for more details. -type PruneImagesOptions struct { - Filters map[string][]string - Context context.Context -} - -// PruneImagesResults specify results from the PruneImages function. -// -// See https://goo.gl/qfZlbZ for more details. -type PruneImagesResults struct { - ImagesDeleted []struct{ Untagged, Deleted string } - SpaceReclaimed int64 -} - -// PruneImages deletes images which are unused. -// -// See https://goo.gl/qfZlbZ for more details. -func (c *Client) PruneImages(opts PruneImagesOptions) (*PruneImagesResults, error) { - path := "/images/prune?" + queryString(opts) - resp, err := c.do("POST", path, doOptions{context: opts.Context}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - var results PruneImagesResults - if err := json.NewDecoder(resp.Body).Decode(&results); err != nil { - return nil, err - } - return &results, nil -} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/jsonmessage.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/jsonmessage.go deleted file mode 100644 index 5a54967f..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/jsonmessage.go +++ /dev/null @@ -1,225 +0,0 @@ -package docker - -// NOTE: This is a modified copy of the file https://github.com/moby/moby/tree/master/pkg/jsonmessage -// This heavily reduces the number of packages which are a dependency. - -import ( - "encoding/json" - "fmt" - "io" - "time" - - "github.com/docker/docker/pkg/jsonlog" -) - -// JSONError wraps a concrete Code and Message, `Code` is -// is an integer error code, `Message` is the error message. -type JSONError struct { - Code int `json:"code,omitempty"` - Message string `json:"message,omitempty"` -} - -func (e *JSONError) Error() string { - return e.Message -} - -// JSONProgress describes a Progress. terminalFd is the fd of the current terminal, -// Start is the initial value for the operation. Current is the current status and -// value of the progress made towards Total. Total is the end value describing when -// we made 100% progress for an operation. -type JSONProgress struct { - terminalFd uintptr - Current int64 `json:"current,omitempty"` - Total int64 `json:"total,omitempty"` - Start int64 `json:"start,omitempty"` - // If true, don't show xB/yB - HideCounts bool `json:"hidecounts,omitempty"` -} - -func (p *JSONProgress) String() string { - return "" -} - -// JSONMessage defines a message struct. It describes -// the created time, where it from, status, ID of the -// message. It's used for docker events. -type JSONMessage struct { - Stream string `json:"stream,omitempty"` - Status string `json:"status,omitempty"` - Progress *JSONProgress `json:"progressDetail,omitempty"` - ProgressMessage string `json:"progress,omitempty"` //deprecated - ID string `json:"id,omitempty"` - From string `json:"from,omitempty"` - Time int64 `json:"time,omitempty"` - TimeNano int64 `json:"timeNano,omitempty"` - Error *JSONError `json:"errorDetail,omitempty"` - ErrorMessage string `json:"error,omitempty"` //deprecated - // Aux contains out-of-band data, such as digests for push signing. - Aux *json.RawMessage `json:"aux,omitempty"` -} - -/* Satisfied by gotty.TermInfo as well as noTermInfo from below */ -type termInfo interface { - Parse(attr string, params ...interface{}) (string, error) -} - -type noTermInfo struct{} // canary used when no terminfo. - -func (ti *noTermInfo) Parse(attr string, params ...interface{}) (string, error) { - return "", fmt.Errorf("noTermInfo") -} - -func clearLine(out io.Writer, ti termInfo) { - // el2 (clear whole line) is not exposed by terminfo. - - // First clear line from beginning to cursor - if attr, err := ti.Parse("el1"); err == nil { - fmt.Fprintf(out, "%s", attr) - } else { - fmt.Fprintf(out, "\x1b[1K") - } - // Then clear line from cursor to end - if attr, err := ti.Parse("el"); err == nil { - fmt.Fprintf(out, "%s", attr) - } else { - fmt.Fprintf(out, "\x1b[K") - } -} - -func cursorUp(out io.Writer, ti termInfo, l int) { - if l == 0 { // Should never be the case, but be tolerant - return - } - if attr, err := ti.Parse("cuu", l); err == nil { - fmt.Fprintf(out, "%s", attr) - } else { - fmt.Fprintf(out, "\x1b[%dA", l) - } -} - -func cursorDown(out io.Writer, ti termInfo, l int) { - if l == 0 { // Should never be the case, but be tolerant - return - } - if attr, err := ti.Parse("cud", l); err == nil { - fmt.Fprintf(out, "%s", attr) - } else { - fmt.Fprintf(out, "\x1b[%dB", l) - } -} - -// Display displays the JSONMessage to `out`. `termInfo` is non-nil if `out` -// is a terminal. If this is the case, it will erase the entire current line -// when displaying the progressbar. -func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error { - if jm.Error != nil { - if jm.Error.Code == 401 { - return fmt.Errorf("Authentication is required.") - } - return jm.Error - } - var endl string - if termInfo != nil && jm.Stream == "" && jm.Progress != nil { - clearLine(out, termInfo) - endl = "\r" - fmt.Fprintf(out, endl) - } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal - return nil - } - if jm.TimeNano != 0 { - fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(jsonlog.RFC3339NanoFixed)) - } else if jm.Time != 0 { - fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(jsonlog.RFC3339NanoFixed)) - } - if jm.ID != "" { - fmt.Fprintf(out, "%s: ", jm.ID) - } - if jm.From != "" { - fmt.Fprintf(out, "(from %s) ", jm.From) - } - if jm.Progress != nil && termInfo != nil { - fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) - } else if jm.ProgressMessage != "" { //deprecated - fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) - } else if jm.Stream != "" { - fmt.Fprintf(out, "%s%s", jm.Stream, endl) - } else { - fmt.Fprintf(out, "%s%s\n", jm.Status, endl) - } - return nil -} - -// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal` -// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of -// each line and move the cursor while displaying. -func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(*json.RawMessage)) error { - var ( - dec = json.NewDecoder(in) - ids = make(map[string]int) - ) - - var termInfo termInfo - - for { - diff := 0 - var jm JSONMessage - if err := dec.Decode(&jm); err != nil { - if err == io.EOF { - break - } - return err - } - - if jm.Aux != nil { - if auxCallback != nil { - auxCallback(jm.Aux) - } - continue - } - - if jm.Progress != nil { - jm.Progress.terminalFd = terminalFd - } - if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { - line, ok := ids[jm.ID] - if !ok { - // NOTE: This approach of using len(id) to - // figure out the number of lines of history - // only works as long as we clear the history - // when we output something that's not - // accounted for in the map, such as a line - // with no ID. - line = len(ids) - ids[jm.ID] = line - if termInfo != nil { - fmt.Fprintf(out, "\n") - } - } - diff = len(ids) - line - if termInfo != nil { - cursorUp(out, termInfo, diff) - } - } else { - // When outputting something that isn't progress - // output, clear the history of previous lines. We - // don't want progress entries from some previous - // operation to be updated (for example, pull -a - // with multiple tags). - ids = make(map[string]int) - } - err := jm.Display(out, termInfo) - if jm.ID != "" && termInfo != nil { - cursorDown(out, termInfo, diff) - } - if err != nil { - return err - } - } - return nil -} - -type stream interface { - io.Writer - FD() uintptr - IsTerminal() bool -} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/misc.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/misc.go deleted file mode 100644 index aef595fe..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/misc.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright 2013 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "encoding/json" - "net" - "strings" - - "github.com/docker/docker/api/types/swarm" -) - -// Version returns version information about the docker server. -// -// See https://goo.gl/mU7yje for more details. -func (c *Client) Version() (*Env, error) { - resp, err := c.do("GET", "/version", doOptions{}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - var env Env - if err := env.Decode(resp.Body); err != nil { - return nil, err - } - return &env, nil -} - -// DockerInfo contains information about the Docker server -// -// See https://goo.gl/bHUoz9 for more details. -type DockerInfo struct { - ID string - Containers int - ContainersRunning int - ContainersPaused int - ContainersStopped int - Images int - Driver string - DriverStatus [][2]string - SystemStatus [][2]string - Plugins PluginsInfo - MemoryLimit bool - SwapLimit bool - KernelMemory bool - CPUCfsPeriod bool `json:"CpuCfsPeriod"` - CPUCfsQuota bool `json:"CpuCfsQuota"` - CPUShares bool - CPUSet bool - IPv4Forwarding bool - BridgeNfIptables bool - BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` - Debug bool - OomKillDisable bool - ExperimentalBuild bool - NFd int - NGoroutines int - SystemTime string - ExecutionDriver string - LoggingDriver string - CgroupDriver string - NEventsListener int - KernelVersion string - OperatingSystem string - OSType string - Architecture string - IndexServerAddress string - RegistryConfig *ServiceConfig - NCPU int - MemTotal int64 - DockerRootDir string - HTTPProxy string `json:"HttpProxy"` - HTTPSProxy string `json:"HttpsProxy"` - NoProxy string - Name string - Labels []string - ServerVersion string - ClusterStore string - ClusterAdvertise string - Swarm swarm.Info -} - -// PluginsInfo is a struct with the plugins registered with the docker daemon -// -// for more information, see: https://goo.gl/bHUoz9 -type PluginsInfo struct { - // List of Volume plugins registered - Volume []string - // List of Network plugins registered - Network []string - // List of Authorization plugins registered - Authorization []string -} - -// ServiceConfig stores daemon registry services configuration. -// -// for more information, see: https://goo.gl/7iFFDz -type ServiceConfig struct { - InsecureRegistryCIDRs []*NetIPNet - IndexConfigs map[string]*IndexInfo - Mirrors []string -} - -// NetIPNet is the net.IPNet type, which can be marshalled and -// unmarshalled to JSON. -// -// for more information, see: https://goo.gl/7iFFDz -type NetIPNet net.IPNet - -// MarshalJSON returns the JSON representation of the IPNet. -// -func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { - return json.Marshal((*net.IPNet)(ipnet).String()) -} - -// UnmarshalJSON sets the IPNet from a byte array of JSON. -// -func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { - var ipnetStr string - if err = json.Unmarshal(b, &ipnetStr); err == nil { - var cidr *net.IPNet - if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { - *ipnet = NetIPNet(*cidr) - } - } - return -} - -// IndexInfo contains information about a registry. -// -// for more information, see: https://goo.gl/7iFFDz -type IndexInfo struct { - Name string - Mirrors []string - Secure bool - Official bool -} - -// Info returns system-wide information about the Docker server. -// -// See https://goo.gl/ElTHi2 for more details. -func (c *Client) Info() (*DockerInfo, error) { - resp, err := c.do("GET", "/info", doOptions{}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - var info DockerInfo - if err := json.NewDecoder(resp.Body).Decode(&info); err != nil { - return nil, err - } - return &info, nil -} - -// ParseRepositoryTag gets the name of the repository and returns it splitted -// in two parts: the repository and the tag. It ignores the digest when it is -// present. -// -// Some examples: -// -// localhost.localdomain:5000/samalba/hipache:latest -> localhost.localdomain:5000/samalba/hipache, latest -// localhost.localdomain:5000/samalba/hipache -> localhost.localdomain:5000/samalba/hipache, "" -// busybox:latest@sha256:4a731fb46adc5cefe3ae374a8b6020fc1b6ad667a279647766e9a3cd89f6fa92 -> busybox, latest -func ParseRepositoryTag(repoTag string) (repository string, tag string) { - parts := strings.SplitN(repoTag, "@", 2) - repoTag = parts[0] - n := strings.LastIndex(repoTag, ":") - if n < 0 { - return repoTag, "" - } - if tag := repoTag[n+1:]; !strings.Contains(tag, "/") { - return repoTag[:n], tag - } - return repoTag, "" -} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/network.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/network.go deleted file mode 100644 index 295efd56..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/network.go +++ /dev/null @@ -1,321 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - - "golang.org/x/net/context" -) - -// ErrNetworkAlreadyExists is the error returned by CreateNetwork when the -// network already exists. -var ErrNetworkAlreadyExists = errors.New("network already exists") - -// Network represents a network. -// -// See https://goo.gl/6GugX3 for more details. -type Network struct { - Name string - ID string `json:"Id"` - Scope string - Driver string - IPAM IPAMOptions - Containers map[string]Endpoint - Options map[string]string - Internal bool - EnableIPv6 bool `json:"EnableIPv6"` - Labels map[string]string -} - -// Endpoint contains network resources allocated and used for a container in a network -// -// See https://goo.gl/6GugX3 for more details. -type Endpoint struct { - Name string - ID string `json:"EndpointID"` - MacAddress string - IPv4Address string - IPv6Address string -} - -// ListNetworks returns all networks. -// -// See https://goo.gl/6GugX3 for more details. -func (c *Client) ListNetworks() ([]Network, error) { - resp, err := c.do("GET", "/networks", doOptions{}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - var networks []Network - if err := json.NewDecoder(resp.Body).Decode(&networks); err != nil { - return nil, err - } - return networks, nil -} - -// NetworkFilterOpts is an aggregation of key=value that Docker -// uses to filter networks -type NetworkFilterOpts map[string]map[string]bool - -// FilteredListNetworks returns all networks with the filters applied -// -// See goo.gl/zd2mx4 for more details. -func (c *Client) FilteredListNetworks(opts NetworkFilterOpts) ([]Network, error) { - params, err := json.Marshal(opts) - if err != nil { - return nil, err - } - path := "/networks?filters=" + string(params) - resp, err := c.do("GET", path, doOptions{}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - var networks []Network - if err := json.NewDecoder(resp.Body).Decode(&networks); err != nil { - return nil, err - } - return networks, nil -} - -// NetworkInfo returns information about a network by its ID. -// -// See https://goo.gl/6GugX3 for more details. -func (c *Client) NetworkInfo(id string) (*Network, error) { - path := "/networks/" + id - resp, err := c.do("GET", path, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return nil, &NoSuchNetwork{ID: id} - } - return nil, err - } - defer resp.Body.Close() - var network Network - if err := json.NewDecoder(resp.Body).Decode(&network); err != nil { - return nil, err - } - return &network, nil -} - -// CreateNetworkOptions specify parameters to the CreateNetwork function and -// (for now) is the expected body of the "create network" http request message -// -// See https://goo.gl/6GugX3 for more details. -type CreateNetworkOptions struct { - Name string `json:"Name" yaml:"Name" toml:"Name"` - Driver string `json:"Driver" yaml:"Driver" toml:"Driver"` - IPAM IPAMOptions `json:"IPAM" yaml:"IPAM" toml:"IPAM"` - Options map[string]interface{} `json:"Options" yaml:"Options" toml:"Options"` - Labels map[string]string `json:"Labels" yaml:"Labels" toml:"Labels"` - CheckDuplicate bool `json:"CheckDuplicate" yaml:"CheckDuplicate" toml:"CheckDuplicate"` - Internal bool `json:"Internal" yaml:"Internal" toml:"Internal"` - EnableIPv6 bool `json:"EnableIPv6" yaml:"EnableIPv6" toml:"EnableIPv6"` - Context context.Context `json:"-"` -} - -// IPAMOptions controls IP Address Management when creating a network -// -// See https://goo.gl/T8kRVH for more details. -type IPAMOptions struct { - Driver string `json:"Driver" yaml:"Driver" toml:"Driver"` - Config []IPAMConfig `json:"Config" yaml:"Config" toml:"Config"` -} - -// IPAMConfig represents IPAM configurations -// -// See https://goo.gl/T8kRVH for more details. -type IPAMConfig struct { - Subnet string `json:",omitempty"` - IPRange string `json:",omitempty"` - Gateway string `json:",omitempty"` - AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` -} - -// CreateNetwork creates a new network, returning the network instance, -// or an error in case of failure. -// -// See https://goo.gl/6GugX3 for more details. -func (c *Client) CreateNetwork(opts CreateNetworkOptions) (*Network, error) { - resp, err := c.do( - "POST", - "/networks/create", - doOptions{ - data: opts, - context: opts.Context, - }, - ) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - type createNetworkResponse struct { - ID string - } - var ( - network Network - cnr createNetworkResponse - ) - if err := json.NewDecoder(resp.Body).Decode(&cnr); err != nil { - return nil, err - } - - network.Name = opts.Name - network.ID = cnr.ID - network.Driver = opts.Driver - - return &network, nil -} - -// RemoveNetwork removes a network or returns an error in case of failure. -// -// See https://goo.gl/6GugX3 for more details. -func (c *Client) RemoveNetwork(id string) error { - resp, err := c.do("DELETE", "/networks/"+id, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return &NoSuchNetwork{ID: id} - } - return err - } - resp.Body.Close() - return nil -} - -// NetworkConnectionOptions specify parameters to the ConnectNetwork and -// DisconnectNetwork function. -// -// See https://goo.gl/RV7BJU for more details. -type NetworkConnectionOptions struct { - Container string - - // EndpointConfig is only applicable to the ConnectNetwork call - EndpointConfig *EndpointConfig `json:"EndpointConfig,omitempty"` - - // Force is only applicable to the DisconnectNetwork call - Force bool - - Context context.Context `json:"-"` -} - -// EndpointConfig stores network endpoint details -// -// See https://goo.gl/RV7BJU for more details. -type EndpointConfig struct { - IPAMConfig *EndpointIPAMConfig `json:"IPAMConfig,omitempty" yaml:"IPAMConfig,omitempty" toml:"IPAMConfig,omitempty"` - Links []string `json:"Links,omitempty" yaml:"Links,omitempty" toml:"Links,omitempty"` - Aliases []string `json:"Aliases,omitempty" yaml:"Aliases,omitempty" toml:"Aliases,omitempty"` - NetworkID string `json:"NetworkID,omitempty" yaml:"NetworkID,omitempty" toml:"NetworkID,omitempty"` - EndpointID string `json:"EndpointID,omitempty" yaml:"EndpointID,omitempty" toml:"EndpointID,omitempty"` - Gateway string `json:"Gateway,omitempty" yaml:"Gateway,omitempty" toml:"Gateway,omitempty"` - IPAddress string `json:"IPAddress,omitempty" yaml:"IPAddress,omitempty" toml:"IPAddress,omitempty"` - IPPrefixLen int `json:"IPPrefixLen,omitempty" yaml:"IPPrefixLen,omitempty" toml:"IPPrefixLen,omitempty"` - IPv6Gateway string `json:"IPv6Gateway,omitempty" yaml:"IPv6Gateway,omitempty" toml:"IPv6Gateway,omitempty"` - GlobalIPv6Address string `json:"GlobalIPv6Address,omitempty" yaml:"GlobalIPv6Address,omitempty" toml:"GlobalIPv6Address,omitempty"` - GlobalIPv6PrefixLen int `json:"GlobalIPv6PrefixLen,omitempty" yaml:"GlobalIPv6PrefixLen,omitempty" toml:"GlobalIPv6PrefixLen,omitempty"` - MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty" toml:"MacAddress,omitempty"` -} - -// EndpointIPAMConfig represents IPAM configurations for an -// endpoint -// -// See https://goo.gl/RV7BJU for more details. -type EndpointIPAMConfig struct { - IPv4Address string `json:",omitempty"` - IPv6Address string `json:",omitempty"` -} - -// ConnectNetwork adds a container to a network or returns an error in case of -// failure. -// -// See https://goo.gl/6GugX3 for more details. -func (c *Client) ConnectNetwork(id string, opts NetworkConnectionOptions) error { - resp, err := c.do("POST", "/networks/"+id+"/connect", doOptions{ - data: opts, - context: opts.Context, - }) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return &NoSuchNetworkOrContainer{NetworkID: id, ContainerID: opts.Container} - } - return err - } - resp.Body.Close() - return nil -} - -// DisconnectNetwork removes a container from a network or returns an error in -// case of failure. -// -// See https://goo.gl/6GugX3 for more details. -func (c *Client) DisconnectNetwork(id string, opts NetworkConnectionOptions) error { - resp, err := c.do("POST", "/networks/"+id+"/disconnect", doOptions{data: opts}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return &NoSuchNetworkOrContainer{NetworkID: id, ContainerID: opts.Container} - } - return err - } - resp.Body.Close() - return nil -} - -// PruneNetworksOptions specify parameters to the PruneNetworks function. -// -// See https://goo.gl/kX0S9h for more details. -type PruneNetworksOptions struct { - Filters map[string][]string - Context context.Context -} - -// PruneNetworksResults specify results from the PruneNetworks function. -// -// See https://goo.gl/kX0S9h for more details. -type PruneNetworksResults struct { - NetworksDeleted []string -} - -// PruneNetworks deletes networks which are unused. -// -// See https://goo.gl/kX0S9h for more details. -func (c *Client) PruneNetworks(opts PruneNetworksOptions) (*PruneNetworksResults, error) { - path := "/networks/prune?" + queryString(opts) - resp, err := c.do("POST", path, doOptions{context: opts.Context}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - var results PruneNetworksResults - if err := json.NewDecoder(resp.Body).Decode(&results); err != nil { - return nil, err - } - return &results, nil -} - -// NoSuchNetwork is the error returned when a given network does not exist. -type NoSuchNetwork struct { - ID string -} - -func (err *NoSuchNetwork) Error() string { - return fmt.Sprintf("No such network: %s", err.ID) -} - -// NoSuchNetworkOrContainer is the error returned when a given network or -// container does not exist. -type NoSuchNetworkOrContainer struct { - NetworkID string - ContainerID string -} - -func (err *NoSuchNetworkOrContainer) Error() string { - return fmt.Sprintf("No such network (%s) or container (%s)", err.NetworkID, err.ContainerID) -} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/node.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/node.go deleted file mode 100644 index 84340254..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/node.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2016 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "encoding/json" - "net/http" - "net/url" - "strconv" - - "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" -) - -// NoSuchNode is the error returned when a given node does not exist. -type NoSuchNode struct { - ID string - Err error -} - -func (err *NoSuchNode) Error() string { - if err.Err != nil { - return err.Err.Error() - } - return "No such node: " + err.ID -} - -// ListNodesOptions specify parameters to the ListNodes function. -// -// See http://goo.gl/3K4GwU for more details. -type ListNodesOptions struct { - Filters map[string][]string - Context context.Context -} - -// ListNodes returns a slice of nodes matching the given criteria. -// -// See http://goo.gl/3K4GwU for more details. -func (c *Client) ListNodes(opts ListNodesOptions) ([]swarm.Node, error) { - path := "/nodes?" + queryString(opts) - resp, err := c.do("GET", path, doOptions{context: opts.Context}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - var nodes []swarm.Node - if err := json.NewDecoder(resp.Body).Decode(&nodes); err != nil { - return nil, err - } - return nodes, nil -} - -// InspectNode returns information about a node by its ID. -// -// See http://goo.gl/WjkTOk for more details. -func (c *Client) InspectNode(id string) (*swarm.Node, error) { - resp, err := c.do("GET", "/nodes/"+id, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return nil, &NoSuchNode{ID: id} - } - return nil, err - } - defer resp.Body.Close() - var node swarm.Node - if err := json.NewDecoder(resp.Body).Decode(&node); err != nil { - return nil, err - } - return &node, nil -} - -// UpdateNodeOptions specify parameters to the NodeUpdate function. -// -// See http://goo.gl/VPBFgA for more details. -type UpdateNodeOptions struct { - swarm.NodeSpec - Version uint64 - Context context.Context -} - -// UpdateNode updates a node. -// -// See http://goo.gl/VPBFgA for more details. -func (c *Client) UpdateNode(id string, opts UpdateNodeOptions) error { - params := make(url.Values) - params.Set("version", strconv.FormatUint(opts.Version, 10)) - path := "/nodes/" + id + "/update?" + params.Encode() - resp, err := c.do("POST", path, doOptions{ - context: opts.Context, - forceJSON: true, - data: opts.NodeSpec, - }) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return &NoSuchNode{ID: id} - } - return err - } - resp.Body.Close() - return nil -} - -// RemoveNodeOptions specify parameters to the RemoveNode function. -// -// See http://goo.gl/0SNvYg for more details. -type RemoveNodeOptions struct { - ID string - Force bool - Context context.Context -} - -// RemoveNode removes a node. -// -// See http://goo.gl/0SNvYg for more details. -func (c *Client) RemoveNode(opts RemoveNodeOptions) error { - params := make(url.Values) - params.Set("force", strconv.FormatBool(opts.Force)) - path := "/nodes/" + opts.ID + "?" + params.Encode() - resp, err := c.do("DELETE", path, doOptions{context: opts.Context}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return &NoSuchNode{ID: opts.ID} - } - return err - } - resp.Body.Close() - return nil -} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/service.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/service.go deleted file mode 100644 index fa6c96d8..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/service.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2016 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "encoding/json" - "net/http" - "net/url" - "strconv" - - "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" -) - -// NoSuchService is the error returned when a given service does not exist. -type NoSuchService struct { - ID string - Err error -} - -func (err *NoSuchService) Error() string { - if err.Err != nil { - return err.Err.Error() - } - return "No such service: " + err.ID -} - -// CreateServiceOptions specify parameters to the CreateService function. -// -// See https://goo.gl/KrVjHz for more details. -type CreateServiceOptions struct { - Auth AuthConfiguration `qs:"-"` - swarm.ServiceSpec - Context context.Context -} - -// CreateService creates a new service, returning the service instance -// or an error in case of failure. -// -// See https://goo.gl/KrVjHz for more details. -func (c *Client) CreateService(opts CreateServiceOptions) (*swarm.Service, error) { - headers, err := headersWithAuth(opts.Auth) - if err != nil { - return nil, err - } - path := "/services/create?" + queryString(opts) - resp, err := c.do("POST", path, doOptions{ - headers: headers, - data: opts.ServiceSpec, - forceJSON: true, - context: opts.Context, - }) - if err != nil { - return nil, err - } - defer resp.Body.Close() - var service swarm.Service - if err := json.NewDecoder(resp.Body).Decode(&service); err != nil { - return nil, err - } - return &service, nil -} - -// RemoveServiceOptions encapsulates options to remove a service. -// -// See https://goo.gl/Tqrtya for more details. -type RemoveServiceOptions struct { - ID string `qs:"-"` - Context context.Context -} - -// RemoveService removes a service, returning an error in case of failure. -// -// See https://goo.gl/Tqrtya for more details. -func (c *Client) RemoveService(opts RemoveServiceOptions) error { - path := "/services/" + opts.ID - resp, err := c.do("DELETE", path, doOptions{context: opts.Context}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return &NoSuchService{ID: opts.ID} - } - return err - } - resp.Body.Close() - return nil -} - -// UpdateServiceOptions specify parameters to the UpdateService function. -// -// See https://goo.gl/wu3MmS for more details. -type UpdateServiceOptions struct { - Auth AuthConfiguration `qs:"-"` - swarm.ServiceSpec - Context context.Context - Version uint64 -} - -// UpdateService updates the service at ID with the options -// -// See https://goo.gl/wu3MmS for more details. -func (c *Client) UpdateService(id string, opts UpdateServiceOptions) error { - headers, err := headersWithAuth(opts.Auth) - if err != nil { - return err - } - params := make(url.Values) - params.Set("version", strconv.FormatUint(opts.Version, 10)) - resp, err := c.do("POST", "/services/"+id+"/update?"+params.Encode(), doOptions{ - headers: headers, - data: opts.ServiceSpec, - forceJSON: true, - context: opts.Context, - }) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return &NoSuchService{ID: id} - } - return err - } - defer resp.Body.Close() - return nil -} - -// InspectService returns information about a service by its ID. -// -// See https://goo.gl/dHmr75 for more details. -func (c *Client) InspectService(id string) (*swarm.Service, error) { - path := "/services/" + id - resp, err := c.do("GET", path, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return nil, &NoSuchService{ID: id} - } - return nil, err - } - defer resp.Body.Close() - var service swarm.Service - if err := json.NewDecoder(resp.Body).Decode(&service); err != nil { - return nil, err - } - return &service, nil -} - -// ListServicesOptions specify parameters to the ListServices function. -// -// See https://goo.gl/DwvNMd for more details. -type ListServicesOptions struct { - Filters map[string][]string - Context context.Context -} - -// ListServices returns a slice of services matching the given criteria. -// -// See https://goo.gl/DwvNMd for more details. -func (c *Client) ListServices(opts ListServicesOptions) ([]swarm.Service, error) { - path := "/services?" + queryString(opts) - resp, err := c.do("GET", path, doOptions{context: opts.Context}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - var services []swarm.Service - if err := json.NewDecoder(resp.Body).Decode(&services); err != nil { - return nil, err - } - return services, nil -} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/signal.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/signal.go deleted file mode 100644 index 16aa0038..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/signal.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2014 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -// Signal represents a signal that can be send to the container on -// KillContainer call. -type Signal int - -// These values represent all signals available on Linux, where containers will -// be running. -const ( - SIGABRT = Signal(0x6) - SIGALRM = Signal(0xe) - SIGBUS = Signal(0x7) - SIGCHLD = Signal(0x11) - SIGCLD = Signal(0x11) - SIGCONT = Signal(0x12) - SIGFPE = Signal(0x8) - SIGHUP = Signal(0x1) - SIGILL = Signal(0x4) - SIGINT = Signal(0x2) - SIGIO = Signal(0x1d) - SIGIOT = Signal(0x6) - SIGKILL = Signal(0x9) - SIGPIPE = Signal(0xd) - SIGPOLL = Signal(0x1d) - SIGPROF = Signal(0x1b) - SIGPWR = Signal(0x1e) - SIGQUIT = Signal(0x3) - SIGSEGV = Signal(0xb) - SIGSTKFLT = Signal(0x10) - SIGSTOP = Signal(0x13) - SIGSYS = Signal(0x1f) - SIGTERM = Signal(0xf) - SIGTRAP = Signal(0x5) - SIGTSTP = Signal(0x14) - SIGTTIN = Signal(0x15) - SIGTTOU = Signal(0x16) - SIGUNUSED = Signal(0x1f) - SIGURG = Signal(0x17) - SIGUSR1 = Signal(0xa) - SIGUSR2 = Signal(0xc) - SIGVTALRM = Signal(0x1a) - SIGWINCH = Signal(0x1c) - SIGXCPU = Signal(0x18) - SIGXFSZ = Signal(0x19) -) diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/swarm.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/swarm.go deleted file mode 100644 index 6d9086a5..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/swarm.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2016 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "encoding/json" - "errors" - "net/http" - "net/url" - "strconv" - - "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" -) - -var ( - // ErrNodeAlreadyInSwarm is the error returned by InitSwarm and JoinSwarm - // when the node is already part of a Swarm. - ErrNodeAlreadyInSwarm = errors.New("node already in a Swarm") - - // ErrNodeNotInSwarm is the error returned by LeaveSwarm and UpdateSwarm - // when the node is not part of a Swarm. - ErrNodeNotInSwarm = errors.New("node is not in a Swarm") -) - -// InitSwarmOptions specify parameters to the InitSwarm function. -// See https://goo.gl/hzkgWu for more details. -type InitSwarmOptions struct { - swarm.InitRequest - Context context.Context -} - -// InitSwarm initializes a new Swarm and returns the node ID. -// See https://goo.gl/ZWyG1M for more details. -func (c *Client) InitSwarm(opts InitSwarmOptions) (string, error) { - path := "/swarm/init" - resp, err := c.do("POST", path, doOptions{ - data: opts.InitRequest, - forceJSON: true, - context: opts.Context, - }) - if err != nil { - if e, ok := err.(*Error); ok && (e.Status == http.StatusNotAcceptable || e.Status == http.StatusServiceUnavailable) { - return "", ErrNodeAlreadyInSwarm - } - return "", err - } - defer resp.Body.Close() - var response string - if err := json.NewDecoder(resp.Body).Decode(&response); err != nil { - return "", err - } - return response, nil -} - -// JoinSwarmOptions specify parameters to the JoinSwarm function. -// See https://goo.gl/TdhJWU for more details. -type JoinSwarmOptions struct { - swarm.JoinRequest - Context context.Context -} - -// JoinSwarm joins an existing Swarm. -// See https://goo.gl/N59IP1 for more details. -func (c *Client) JoinSwarm(opts JoinSwarmOptions) error { - path := "/swarm/join" - resp, err := c.do("POST", path, doOptions{ - data: opts.JoinRequest, - forceJSON: true, - context: opts.Context, - }) - if err != nil { - if e, ok := err.(*Error); ok && (e.Status == http.StatusNotAcceptable || e.Status == http.StatusServiceUnavailable) { - return ErrNodeAlreadyInSwarm - } - } - resp.Body.Close() - return err -} - -// LeaveSwarmOptions specify parameters to the LeaveSwarm function. -// See https://goo.gl/UWDlLg for more details. -type LeaveSwarmOptions struct { - Force bool - Context context.Context -} - -// LeaveSwarm leaves a Swarm. -// See https://goo.gl/FTX1aD for more details. -func (c *Client) LeaveSwarm(opts LeaveSwarmOptions) error { - params := make(url.Values) - params.Set("force", strconv.FormatBool(opts.Force)) - path := "/swarm/leave?" + params.Encode() - resp, err := c.do("POST", path, doOptions{ - context: opts.Context, - }) - if err != nil { - if e, ok := err.(*Error); ok && (e.Status == http.StatusNotAcceptable || e.Status == http.StatusServiceUnavailable) { - return ErrNodeNotInSwarm - } - } - resp.Body.Close() - return err -} - -// UpdateSwarmOptions specify parameters to the UpdateSwarm function. -// See https://goo.gl/vFbq36 for more details. -type UpdateSwarmOptions struct { - Version int - RotateWorkerToken bool - RotateManagerToken bool - Swarm swarm.Spec - Context context.Context -} - -// UpdateSwarm updates a Swarm. -// See https://goo.gl/iJFnsw for more details. -func (c *Client) UpdateSwarm(opts UpdateSwarmOptions) error { - params := make(url.Values) - params.Set("version", strconv.Itoa(opts.Version)) - params.Set("rotateWorkerToken", strconv.FormatBool(opts.RotateWorkerToken)) - params.Set("rotateManagerToken", strconv.FormatBool(opts.RotateManagerToken)) - path := "/swarm/update?" + params.Encode() - resp, err := c.do("POST", path, doOptions{ - data: opts.Swarm, - forceJSON: true, - context: opts.Context, - }) - if err != nil { - if e, ok := err.(*Error); ok && (e.Status == http.StatusNotAcceptable || e.Status == http.StatusServiceUnavailable) { - return ErrNodeNotInSwarm - } - } - resp.Body.Close() - return err -} - -// InspectSwarm inspects a Swarm. -// See https://goo.gl/MFwgX9 for more details. -func (c *Client) InspectSwarm(ctx context.Context) (swarm.Swarm, error) { - response := swarm.Swarm{} - resp, err := c.do("GET", "/swarm", doOptions{ - context: ctx, - }) - if err != nil { - if e, ok := err.(*Error); ok && (e.Status == http.StatusNotAcceptable || e.Status == http.StatusServiceUnavailable) { - return response, ErrNodeNotInSwarm - } - return response, err - } - defer resp.Body.Close() - err = json.NewDecoder(resp.Body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/tar.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/tar.go deleted file mode 100644 index f68ff947..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/tar.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2014 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "fmt" - "io/ioutil" - "os" - "path" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/fileutils" -) - -// validateContextDirectory checks if all the contents of the directory -// can be read and returns an error if some files can't be read. -// Symlinks which point to non-existing files don't trigger an error -func validateContextDirectory(srcPath string, excludes []string) error { - return filepath.Walk(filepath.Join(srcPath, "."), func(filePath string, f os.FileInfo, err error) error { - // skip this directory/file if it's not in the path, it won't get added to the context - if relFilePath, relErr := filepath.Rel(srcPath, filePath); relErr != nil { - return relErr - } else if skip, matchErr := fileutils.Matches(relFilePath, excludes); matchErr != nil { - return matchErr - } else if skip { - if f.IsDir() { - return filepath.SkipDir - } - return nil - } - - if err != nil { - if os.IsPermission(err) { - return fmt.Errorf("can't stat '%s'", filePath) - } - if os.IsNotExist(err) { - return nil - } - return err - } - - // skip checking if symlinks point to non-existing files, such symlinks can be useful - // also skip named pipes, because they hanging on open - if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { - return nil - } - - if !f.IsDir() { - currentFile, err := os.Open(filePath) - if err != nil && os.IsPermission(err) { - return fmt.Errorf("no permission to read from '%s'", filePath) - } - currentFile.Close() - } - return nil - }) -} - -func parseDockerignore(root string) ([]string, error) { - var excludes []string - ignore, err := ioutil.ReadFile(path.Join(root, ".dockerignore")) - if err != nil && !os.IsNotExist(err) { - return excludes, fmt.Errorf("error reading .dockerignore: '%s'", err) - } - excludes = strings.Split(string(ignore), "\n") - - return excludes, nil -} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/task.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/task.go deleted file mode 100644 index b1dad4b2..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/task.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2016 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "encoding/json" - "net/http" - - "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" -) - -// NoSuchTask is the error returned when a given task does not exist. -type NoSuchTask struct { - ID string - Err error -} - -func (err *NoSuchTask) Error() string { - if err.Err != nil { - return err.Err.Error() - } - return "No such task: " + err.ID -} - -// ListTasksOptions specify parameters to the ListTasks function. -// -// See http://goo.gl/rByLzw for more details. -type ListTasksOptions struct { - Filters map[string][]string - Context context.Context -} - -// ListTasks returns a slice of tasks matching the given criteria. -// -// See http://goo.gl/rByLzw for more details. -func (c *Client) ListTasks(opts ListTasksOptions) ([]swarm.Task, error) { - path := "/tasks?" + queryString(opts) - resp, err := c.do("GET", path, doOptions{context: opts.Context}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - var tasks []swarm.Task - if err := json.NewDecoder(resp.Body).Decode(&tasks); err != nil { - return nil, err - } - return tasks, nil -} - -// InspectTask returns information about a task by its ID. -// -// See http://goo.gl/kyziuq for more details. -func (c *Client) InspectTask(id string) (*swarm.Task, error) { - resp, err := c.do("GET", "/tasks/"+id, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return nil, &NoSuchTask{ID: id} - } - return nil, err - } - defer resp.Body.Close() - var task swarm.Task - if err := json.NewDecoder(resp.Body).Decode(&task); err != nil { - return nil, err - } - return &task, nil -} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/tls.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/tls.go deleted file mode 100644 index bb5790b5..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/tls.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2014 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// -// The content is borrowed from Docker's own source code to provide a simple -// tls based dialer - -package docker - -import ( - "crypto/tls" - "errors" - "net" - "strings" - "time" -) - -type tlsClientCon struct { - *tls.Conn - rawConn net.Conn -} - -func (c *tlsClientCon) CloseWrite() error { - // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it - // on its underlying connection. - if cwc, ok := c.rawConn.(interface { - CloseWrite() error - }); ok { - return cwc.CloseWrite() - } - return nil -} - -func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) { - // We want the Timeout and Deadline values from dialer to cover the - // whole process: TCP connection and TLS handshake. This means that we - // also need to start our own timers now. - timeout := dialer.Timeout - - if !dialer.Deadline.IsZero() { - deadlineTimeout := dialer.Deadline.Sub(time.Now()) - if timeout == 0 || deadlineTimeout < timeout { - timeout = deadlineTimeout - } - } - - var errChannel chan error - - if timeout != 0 { - errChannel = make(chan error, 2) - time.AfterFunc(timeout, func() { - errChannel <- errors.New("") - }) - } - - rawConn, err := dialer.Dial(network, addr) - if err != nil { - return nil, err - } - - colonPos := strings.LastIndex(addr, ":") - if colonPos == -1 { - colonPos = len(addr) - } - hostname := addr[:colonPos] - - // If no ServerName is set, infer the ServerName - // from the hostname we're connecting to. - if config.ServerName == "" { - // Make a copy to avoid polluting argument or default. - config = copyTLSConfig(config) - config.ServerName = hostname - } - - conn := tls.Client(rawConn, config) - - if timeout == 0 { - err = conn.Handshake() - } else { - go func() { - errChannel <- conn.Handshake() - }() - - err = <-errChannel - } - - if err != nil { - rawConn.Close() - return nil, err - } - - // This is Docker difference with standard's crypto/tls package: returned a - // wrapper which holds both the TLS and raw connections. - return &tlsClientCon{conn, rawConn}, nil -} - -// this exists to silent an error message in go vet -func copyTLSConfig(cfg *tls.Config) *tls.Config { - return &tls.Config{ - Certificates: cfg.Certificates, - CipherSuites: cfg.CipherSuites, - ClientAuth: cfg.ClientAuth, - ClientCAs: cfg.ClientCAs, - ClientSessionCache: cfg.ClientSessionCache, - CurvePreferences: cfg.CurvePreferences, - InsecureSkipVerify: cfg.InsecureSkipVerify, - MaxVersion: cfg.MaxVersion, - MinVersion: cfg.MinVersion, - NameToCertificate: cfg.NameToCertificate, - NextProtos: cfg.NextProtos, - PreferServerCipherSuites: cfg.PreferServerCipherSuites, - Rand: cfg.Rand, - RootCAs: cfg.RootCAs, - ServerName: cfg.ServerName, - SessionTicketKey: cfg.SessionTicketKey, - SessionTicketsDisabled: cfg.SessionTicketsDisabled, - } -} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/volume.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/volume.go deleted file mode 100644 index 3c7bdeaa..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/volume.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "encoding/json" - "errors" - "net/http" - - "golang.org/x/net/context" -) - -var ( - // ErrNoSuchVolume is the error returned when the volume does not exist. - ErrNoSuchVolume = errors.New("no such volume") - - // ErrVolumeInUse is the error returned when the volume requested to be removed is still in use. - ErrVolumeInUse = errors.New("volume in use and cannot be removed") -) - -// Volume represents a volume. -// -// See https://goo.gl/FZA4BK for more details. -type Volume struct { - Name string `json:"Name" yaml:"Name" toml:"Name"` - Driver string `json:"Driver,omitempty" yaml:"Driver,omitempty" toml:"Driver,omitempty"` - Mountpoint string `json:"Mountpoint,omitempty" yaml:"Mountpoint,omitempty" toml:"Mountpoint,omitempty"` - Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty" toml:"Labels,omitempty"` -} - -// ListVolumesOptions specify parameters to the ListVolumes function. -// -// See https://goo.gl/FZA4BK for more details. -type ListVolumesOptions struct { - Filters map[string][]string - Context context.Context -} - -// ListVolumes returns a list of available volumes in the server. -// -// See https://goo.gl/FZA4BK for more details. -func (c *Client) ListVolumes(opts ListVolumesOptions) ([]Volume, error) { - resp, err := c.do("GET", "/volumes?"+queryString(opts), doOptions{ - context: opts.Context, - }) - if err != nil { - return nil, err - } - defer resp.Body.Close() - m := make(map[string]interface{}) - if err = json.NewDecoder(resp.Body).Decode(&m); err != nil { - return nil, err - } - var volumes []Volume - volumesJSON, ok := m["Volumes"] - if !ok { - return volumes, nil - } - data, err := json.Marshal(volumesJSON) - if err != nil { - return nil, err - } - if err := json.Unmarshal(data, &volumes); err != nil { - return nil, err - } - return volumes, nil -} - -// CreateVolumeOptions specify parameters to the CreateVolume function. -// -// See https://goo.gl/pBUbZ9 for more details. -type CreateVolumeOptions struct { - Name string - Driver string - DriverOpts map[string]string - Context context.Context `json:"-"` - Labels map[string]string -} - -// CreateVolume creates a volume on the server. -// -// See https://goo.gl/pBUbZ9 for more details. -func (c *Client) CreateVolume(opts CreateVolumeOptions) (*Volume, error) { - resp, err := c.do("POST", "/volumes/create", doOptions{ - data: opts, - context: opts.Context, - }) - if err != nil { - return nil, err - } - defer resp.Body.Close() - var volume Volume - if err := json.NewDecoder(resp.Body).Decode(&volume); err != nil { - return nil, err - } - return &volume, nil -} - -// InspectVolume returns a volume by its name. -// -// See https://goo.gl/0g9A6i for more details. -func (c *Client) InspectVolume(name string) (*Volume, error) { - resp, err := c.do("GET", "/volumes/"+name, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return nil, ErrNoSuchVolume - } - return nil, err - } - defer resp.Body.Close() - var volume Volume - if err := json.NewDecoder(resp.Body).Decode(&volume); err != nil { - return nil, err - } - return &volume, nil -} - -// RemoveVolume removes a volume by its name. -// -// See https://goo.gl/79GNQz for more details. -func (c *Client) RemoveVolume(name string) error { - resp, err := c.do("DELETE", "/volumes/"+name, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok { - if e.Status == http.StatusNotFound { - return ErrNoSuchVolume - } - if e.Status == http.StatusConflict { - return ErrVolumeInUse - } - } - return nil - } - defer resp.Body.Close() - return nil -} - -// PruneVolumesOptions specify parameters to the PruneVolumes function. -// -// See https://goo.gl/pFN1Hj for more details. -type PruneVolumesOptions struct { - Filters map[string][]string - Context context.Context -} - -// PruneVolumesResults specify results from the PruneVolumes function. -// -// See https://goo.gl/pFN1Hj for more details. -type PruneVolumesResults struct { - VolumesDeleted []string - SpaceReclaimed int64 -} - -// PruneVolumes deletes volumes which are unused. -// -// See https://goo.gl/pFN1Hj for more details. -func (c *Client) PruneVolumes(opts PruneVolumesOptions) (*PruneVolumesResults, error) { - path := "/volumes/prune?" + queryString(opts) - resp, err := c.do("POST", path, doOptions{context: opts.Context}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - var results PruneVolumesResults - if err := json.NewDecoder(resp.Body).Decode(&results); err != nil { - return nil, err - } - return &results, nil -} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/hashicorp/go-cleanhttp/LICENSE b/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/hashicorp/go-cleanhttp/LICENSE deleted file mode 100644 index e87a115e..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/hashicorp/go-cleanhttp/LICENSE +++ /dev/null @@ -1,363 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/hashicorp/go-cleanhttp/README.md b/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/hashicorp/go-cleanhttp/README.md deleted file mode 100644 index 036e5313..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/hashicorp/go-cleanhttp/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# cleanhttp - -Functions for accessing "clean" Go http.Client values - -------------- - -The Go standard library contains a default `http.Client` called -`http.DefaultClient`. It is a common idiom in Go code to start with -`http.DefaultClient` and tweak it as necessary, and in fact, this is -encouraged; from the `http` package documentation: - -> The Client's Transport typically has internal state (cached TCP connections), -so Clients should be reused instead of created as needed. Clients are safe for -concurrent use by multiple goroutines. - -Unfortunately, this is a shared value, and it is not uncommon for libraries to -assume that they are free to modify it at will. With enough dependencies, it -can be very easy to encounter strange problems and race conditions due to -manipulation of this shared value across libraries and goroutines (clients are -safe for concurrent use, but writing values to the client struct itself is not -protected). - -Making things worse is the fact that a bare `http.Client` will use a default -`http.Transport` called `http.DefaultTransport`, which is another global value -that behaves the same way. So it is not simply enough to replace -`http.DefaultClient` with `&http.Client{}`. - -This repository provides some simple functions to get a "clean" `http.Client` --- one that uses the same default values as the Go standard library, but -returns a client that does not share any state with other clients. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go deleted file mode 100644 index 7d8a57c2..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go +++ /dev/null @@ -1,56 +0,0 @@ -package cleanhttp - -import ( - "net" - "net/http" - "runtime" - "time" -) - -// DefaultTransport returns a new http.Transport with similar default values to -// http.DefaultTransport, but with idle connections and keepalives disabled. -func DefaultTransport() *http.Transport { - transport := DefaultPooledTransport() - transport.DisableKeepAlives = true - transport.MaxIdleConnsPerHost = -1 - return transport -} - -// DefaultPooledTransport returns a new http.Transport with similar default -// values to http.DefaultTransport. Do not use this for transient transports as -// it can leak file descriptors over time. Only use this for transports that -// will be re-used for the same host(s). -func DefaultPooledTransport() *http.Transport { - transport := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).DialContext, - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1, - } - return transport -} - -// DefaultClient returns a new http.Client with similar default values to -// http.Client, but with a non-shared Transport, idle connections disabled, and -// keepalives disabled. -func DefaultClient() *http.Client { - return &http.Client{ - Transport: DefaultTransport(), - } -} - -// DefaultPooledClient returns a new http.Client with similar default values to -// http.Client, but with a shared Transport. Do not use this function for -// transient clients as it can leak file descriptors over time. Only use this -// for clients that will be re-used for the same host(s). -func DefaultPooledClient() *http.Client { - return &http.Client{ - Transport: DefaultPooledTransport(), - } -} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/hashicorp/go-cleanhttp/doc.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/hashicorp/go-cleanhttp/doc.go deleted file mode 100644 index 05841092..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/hashicorp/go-cleanhttp/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Package cleanhttp offers convenience utilities for acquiring "clean" -// http.Transport and http.Client structs. -// -// Values set on http.DefaultClient and http.DefaultTransport affect all -// callers. This can have detrimental effects, esepcially in TLS contexts, -// where client or root certificates set to talk to multiple endpoints can end -// up displacing each other, leading to hard-to-debug issues. This package -// provides non-shared http.Client and http.Transport structs to ensure that -// the configuration will not be overwritten by other parts of the application -// or dependencies. -// -// The DefaultClient and DefaultTransport functions disable idle connections -// and keepalives. Without ensuring that idle connections are closed before -// garbage collection, short-term clients/transports can leak file descriptors, -// eventually leading to "too many open files" errors. If you will be -// connecting to the same hosts repeatedly from the same client, you can use -// DefaultPooledClient to receive a client that has connection pooling -// semantics similar to http.DefaultClient. -// -package cleanhttp diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/vendor.json b/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/vendor.json deleted file mode 100644 index 3f6fb300..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/vendor.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "comment": "", - "ignore": "test github.com/fsouza/go-dockerclient", - "package": [ - { - "checksumSHA1": "QndA9F9cJjk2u2SZ/T1xsHSfmrU=", - "origin": "github.com/ruflin/go-dockerclient", - "path": "github.com/fsouza/go-dockerclient", - "revision": "ba365ff5e4281feb28654e4ca599a1defd063497", - "revisionTime": "2017-05-08T06:34:48Z", - "version": "beats-branch", - "versionExact": "beats-branch" - }, - { - "checksumSHA1": "b8F628srIitj5p7Y130xc9k0QWs=", - "path": "github.com/hashicorp/go-cleanhttp", - "revision": "3573b8b52aa7b37b9358d966a898feb387f62437", - "revisionTime": "2017-02-11T01:34:15Z" - } - ], - "rootPath": "github.com/elastic/beats/metricbeat/module/docker" -} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/dropwizard/_meta/config.reference.yml b/vendor/github.com/elastic/beats/metricbeat/module/dropwizard/_meta/config.reference.yml new file mode 100644 index 00000000..ac8266c4 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/dropwizard/_meta/config.reference.yml @@ -0,0 +1,7 @@ +- module: dropwizard + metricsets: ["collector"] + period: 10s + hosts: ["localhost:8080"] + metrics_path: /metrics/metrics + namespace: example + enabled: true diff --git a/vendor/github.com/elastic/beats/metricbeat/module/dropwizard/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/dropwizard/_meta/config.yml index 6dfa3755..bc989151 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/dropwizard/_meta/config.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/dropwizard/_meta/config.yml @@ -1,6 +1,6 @@ - module: dropwizard - metricsets: ["collector"] - period: 10s hosts: ["localhost:8080"] metrics_path: /metrics/metrics namespace: example + #username: "user" + #password: "secret" diff --git a/vendor/github.com/elastic/beats/metricbeat/module/dropwizard/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/dropwizard/_meta/docs.asciidoc index c5c4ba56..31cef104 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/dropwizard/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/dropwizard/_meta/docs.asciidoc @@ -1,2 +1,2 @@ -This is the http://dropwizard.io[Dropwizard] module. +This is the http://dropwizard.io[Dropwizard] module. The default metricset is `collector`. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/dropwizard/collector/collector.go b/vendor/github.com/elastic/beats/metricbeat/module/dropwizard/collector/collector.go index e2adb471..4355c25a 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/dropwizard/collector/collector.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/dropwizard/collector/collector.go @@ -28,9 +28,10 @@ var ( // The New method will be called after the setup of the module and before starting to fetch data func init() { - if err := mb.Registry.AddMetricSet("dropwizard", "collector", New, hostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("dropwizard", "collector", New, + mb.WithHostParser(hostParser), + mb.DefaultMetricSet(), + ) } // MetricSet type defines all fields of the MetricSet @@ -56,9 +57,13 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return nil, err } + http, err := helper.NewHTTP(base) + if err != nil { + return nil, err + } return &MetricSet{ BaseMetricSet: base, - http: helper.NewHTTP(base), + http: http, namespace: config.Namespace, }, nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/_meta/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/_meta/Dockerfile index c0aeb366..a9b0b8cc 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/_meta/Dockerfile +++ b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/_meta/Dockerfile @@ -1,2 +1,2 @@ -FROM docker.elastic.co/elasticsearch/elasticsearch:6.0.0 +FROM docker.elastic.co/elasticsearch/elasticsearch:6.2.4 HEALTHCHECK --interval=1s --retries=90 CMD curl -f http://localhost:9200 diff --git a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/_meta/config.reference.yml b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/_meta/config.reference.yml new file mode 100644 index 00000000..a792d2be --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/_meta/config.reference.yml @@ -0,0 +1,4 @@ +- module: elasticsearch + metricsets: ["node", "node_stats"] + period: 10s + hosts: ["localhost:9200"] diff --git a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/_meta/config.yml index a792d2be..2531498f 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/_meta/config.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/_meta/config.yml @@ -1,4 +1,4 @@ - module: elasticsearch - metricsets: ["node", "node_stats"] - period: 10s hosts: ["localhost:9200"] + #username: "user" + #password: "secret" diff --git a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/_meta/docs.asciidoc index c56628f1..9e6909c5 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/_meta/docs.asciidoc @@ -1 +1,3 @@ -The Elasticsearch module contains a minimal set of metrics to enable monitoring of Elasticsearch across multiple versions. To monitor more Elasticsearch metrics, use our {monitoringdoc}/xpack-monitoring.html[X-Pack monitoring] which is available under a free basic license. +The Elasticsearch module contains a minimal set of metrics to enable monitoring of Elasticsearch across multiple versions. To monitor more Elasticsearch metrics, use our {monitoringdoc}/xpack-monitoring.html[monitoring] feature. + +The default metricsets are `node` and `node_stats`. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/node/node.go b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/node/node.go index f3ca3534..c7b38bbd 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/node/node.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/node/node.go @@ -11,9 +11,10 @@ import ( // init registers the MetricSet with the central registry. // The New method will be called after the setup of the module and before starting to fetch data func init() { - if err := mb.Registry.AddMetricSet("elasticsearch", "node", New, hostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("elasticsearch", "node", New, + mb.WithHostParser(hostParser), + mb.DefaultMetricSet(), + ) } var ( @@ -35,9 +36,13 @@ type MetricSet struct { func New(base mb.BaseMetricSet) (mb.MetricSet, error) { cfgwarn.Beta("The elasticsearch node metricset is beta") + http, err := helper.NewHTTP(base) + if err != nil { + return nil, err + } return &MetricSet{ base, - helper.NewHTTP(base), + http, }, nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/node_stats/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/node_stats/_meta/docs.asciidoc index 33bea6eb..e059f318 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/node_stats/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/node_stats/_meta/docs.asciidoc @@ -1,5 +1,3 @@ -=== Elasticsearch node_stats metricset - The `node_stats` metricset interrogates the https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-stats.html[Cluster API endpoint] of Elasticsearch to get the cluster nodes statistics. The data received is only for the local node so this Metricbeat has diff --git a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/node_stats/node_stats.go b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/node_stats/node_stats.go index db876844..db44f224 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/node_stats/node_stats.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/node_stats/node_stats.go @@ -11,9 +11,10 @@ import ( // init registers the MetricSet with the central registry. // The New method will be called after the setup of the module and before starting to fetch data func init() { - if err := mb.Registry.AddMetricSet("elasticsearch", "node_stats", New, hostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("elasticsearch", "node_stats", New, + mb.WithHostParser(hostParser), + mb.DefaultMetricSet(), + ) } var ( @@ -35,9 +36,13 @@ type MetricSet struct { func New(base mb.BaseMetricSet) (mb.MetricSet, error) { cfgwarn.Beta("The elasticsearch node_stats metricset is beta") + http, err := helper.NewHTTP(base) + if err != nil { + return nil, err + } return &MetricSet{ base, - helper.NewHTTP(base), + http, }, nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/test_elasticsearch.py b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/test_elasticsearch.py new file mode 100644 index 00000000..87192696 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/test_elasticsearch.py @@ -0,0 +1,31 @@ +import re +import sys +import os +import unittest + +sys.path.append(os.path.join(os.path.dirname(__file__), '../../tests/system')) + +import metricbeat + + +class Test(metricbeat.BaseTest): + + COMPOSE_SERVICES = ['elasticsearch'] + + @unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test") + def test_node(self): + """ + elasticsearch node metricset test + """ + self.check_metricset("elasticsearch", "node", self.get_hosts()) + + @unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test") + def test_node_stats(self): + """ + elasticsearch node_stats metricset test + """ + self.check_metricset("elasticsearch", "node_stats", self.get_hosts()) + + def get_hosts(self): + return [os.getenv('ES_HOST', 'localhost') + ':' + + os.getenv('ES_PORT', '9200')] diff --git a/vendor/github.com/elastic/beats/metricbeat/module/etcd/_meta/config.reference.yml b/vendor/github.com/elastic/beats/metricbeat/module/etcd/_meta/config.reference.yml new file mode 100644 index 00000000..f269a2b7 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/etcd/_meta/config.reference.yml @@ -0,0 +1,4 @@ +- module: etcd + metricsets: ["leader", "self", "store"] + period: 10s + hosts: ["localhost:2379"] diff --git a/vendor/github.com/elastic/beats/metricbeat/module/etcd/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/etcd/_meta/config.yml index 6a5d97c6..00ac532e 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/etcd/_meta/config.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/etcd/_meta/config.yml @@ -1,5 +1,4 @@ - module: etcd - metricsets: ["leader", "self", "store"] - period: 10s hosts: ["localhost:2379"] - + #username: "user" + #password: "secret" diff --git a/vendor/github.com/elastic/beats/metricbeat/module/etcd/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/etcd/_meta/docs.asciidoc index c346daa5..06493114 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/etcd/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/etcd/_meta/docs.asciidoc @@ -1,3 +1,3 @@ This is the Etcd Module. The Etcd module uses https://coreos.com/etcd/docs/latest/v2/api.html [Etcd v2 API] to collect metrics. - +The default metricsets are `leader`, `self` and `store`. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/etcd/leader/leader.go b/vendor/github.com/elastic/beats/metricbeat/module/etcd/leader/leader.go index 3e9c1277..415c4c55 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/etcd/leader/leader.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/etcd/leader/leader.go @@ -21,9 +21,10 @@ var ( ) func init() { - if err := mb.Registry.AddMetricSet("etcd", "leader", New, hostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("etcd", "leader", New, + mb.WithHostParser(hostParser), + mb.DefaultMetricSet(), + ) } type MetricSet struct { @@ -38,9 +39,13 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return nil, err } + http, err := helper.NewHTTP(base) + if err != nil { + return nil, err + } return &MetricSet{ base, - helper.NewHTTP(base), + http, }, nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/etcd/self/self.go b/vendor/github.com/elastic/beats/metricbeat/module/etcd/self/self.go index 7958e82b..f890c98e 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/etcd/self/self.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/etcd/self/self.go @@ -21,9 +21,10 @@ var ( ) func init() { - if err := mb.Registry.AddMetricSet("etcd", "self", New, hostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("etcd", "self", New, + mb.WithHostParser(hostParser), + mb.DefaultMetricSet(), + ) } type MetricSet struct { @@ -39,9 +40,13 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return nil, err } + http, err := helper.NewHTTP(base) + if err != nil { + return nil, err + } return &MetricSet{ base, - helper.NewHTTP(base), + http, }, nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/etcd/store/store.go b/vendor/github.com/elastic/beats/metricbeat/module/etcd/store/store.go index 93660611..842fb4c2 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/etcd/store/store.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/etcd/store/store.go @@ -21,9 +21,10 @@ var ( ) func init() { - if err := mb.Registry.AddMetricSet("etcd", "store", New, hostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("etcd", "store", New, + mb.WithHostParser(hostParser), + mb.DefaultMetricSet(), + ) } type MetricSet struct { @@ -39,9 +40,13 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return nil, err } + http, err := helper.NewHTTP(base) + if err != nil { + return nil, err + } return &MetricSet{ base, - helper.NewHTTP(base), + http, }, nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/golang/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/golang/_meta/fields.yml index f980a17c..c0b1b36f 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/golang/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/golang/_meta/fields.yml @@ -3,7 +3,7 @@ description: > Golang module short_config: false - release: experimental + release: beta settings: ["ssl"] fields: - name: golang diff --git a/vendor/github.com/elastic/beats/metricbeat/module/golang/_meta/kibana/6/dashboard/Metricbeat-golang-overview.json b/vendor/github.com/elastic/beats/metricbeat/module/golang/_meta/kibana/6/dashboard/Metricbeat-golang-overview.json index 838a295d..44555c2e 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/golang/_meta/kibana/6/dashboard/Metricbeat-golang-overview.json +++ b/vendor/github.com/elastic/beats/metricbeat/module/golang/_meta/kibana/6/dashboard/Metricbeat-golang-overview.json @@ -9,7 +9,7 @@ "title": "Heap Summary [Metricbeat Golang]", "uiStateJSON": "{}", "version": 1, - "visState": "{\"title\":\"Heap Summary [Metricbeat Golang]\",\"type\":\"timelion\",\"params\":{\"expression\":\".es(index=\\\"metricbeat*\\\",metric=\\\"avg:golang.heap.system.total\\\").label(\\\"System Total Memory\\\").yaxis(label=\\\"Bytes\\\"),.es(index=\\\"metricbeat*\\\",metric=\\\"min:golang.heap.allocations.allocated\\\").label(\\\"Bytes Allocated(min)\\\").movingaverage(30),.es(index=\\\"metricbeat*\\\",metric=\\\"max:golang.heap.allocations.allocated\\\").label(\\\"Bytes Allocated(max)\\\").movingaverage(30),.es(index=\\\"metricbeat*\\\",metric=\\\"avg:golang.heap.gc.next_gc_limit\\\").label(\\\"GC Limit\\\"),.es(index=\\\"metricbeat*\\\",metric=\\\"avg:golang.heap.gc.pause.count\\\").condition(lt,1, null).points().label(\\\"GC Cycles(count)\\\").yaxis(2,label=\\\"Count\\\")\",\"interval\":\"10s\"},\"aggs\":[]}" + "visState": "{\"title\":\"Heap Summary [Metricbeat Golang]\",\"type\":\"timelion\",\"params\":{\"expression\":\".es(index=\\\"metricbeat*\\\",metric=\\\"avg:golang.heap.system.total\\\").label(\\\"System Total Memory\\\").yaxis(label=\\\"Bytes\\\",units=bytes),.es(index=\\\"metricbeat*\\\",metric=\\\"min:golang.heap.allocations.allocated\\\").label(\\\"Bytes Allocated(min)\\\").movingaverage(30),.es(index=\\\"metricbeat*\\\",metric=\\\"max:golang.heap.allocations.allocated\\\").label(\\\"Bytes Allocated(max)\\\").movingaverage(30),.es(index=\\\"metricbeat*\\\",metric=\\\"avg:golang.heap.gc.next_gc_limit\\\").label(\\\"GC Limit\\\"),.es(index=\\\"metricbeat*\\\",metric=\\\"avg:golang.heap.gc.pause.count\\\").condition(lt,1, null).points().label(\\\"GC Cycles(count)\\\").yaxis(2,label=\\\"Count\\\")\",\"interval\":\"10s\"},\"aggs\":[]}" }, "id": "58000780-f529-11e6-844d-b170e2f0a07e", "type": "visualization", @@ -21,10 +21,10 @@ "kibanaSavedObjectMeta": { "searchSourceJSON": "{\"query\":{\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}},\"language\":\"lucene\"},\"filter\":[]}" }, - "title": "Heap [Metricbeat Golang]", + "title": "Heap [Metricbeat Golang]", "uiStateJSON": "{}", "version": 1, - "visState": "{\"title\":\"Heap [Metricbeat Golang]\",\"type\":\"timelion\",\"params\":{\"expression\":\".es(index=\\\"metricbeat*\\\",metric=\\\"avg:golang.heap.allocations.total\\\").label(\\\"Heap Total\\\").derivative().movingaverage(30),.es(index=\\\"metricbeat*\\\",metric=\\\"avg:golang.heap.allocations.active\\\").label(\\\"Heap Inuse\\\").movingaverage(30),.es(index=\\\"metricbeat*\\\",metric=\\\"avg:golang.heap.allocations.allocated\\\").label(\\\"Heap Allocated\\\").movingaverage(30),.es(index=\\\"metricbeat*\\\",metric=\\\"avg:golang.heap.allocations.idle\\\").label(\\\"Heap Idle\\\").movingaverage(30)\",\"interval\":\"10s\"},\"aggs\":[]}" + "visState": "{\"title\":\"Heap [Metricbeat Golang]\",\"type\":\"timelion\",\"params\":{\"expression\":\".es(index=\\\"metricbeat*\\\",metric=\\\"avg:golang.heap.allocations.total\\\").label(\\\"Heap Total\\\").derivative().movingaverage(30).yaxis(label=\\\"Bytes\\\",units=bytes),.es(index=\\\"metricbeat*\\\",metric=\\\"avg:golang.heap.allocations.active\\\").label(\\\"Heap Inuse\\\").movingaverage(30),.es(index=\\\"metricbeat*\\\",metric=\\\"avg:golang.heap.allocations.allocated\\\").label(\\\"Heap Allocated\\\").movingaverage(30),.es(index=\\\"metricbeat*\\\",metric=\\\"avg:golang.heap.allocations.idle\\\").label(\\\"Heap Idle\\\").movingaverage(30)\",\"interval\":\"10s\"},\"aggs\":[]}" }, "id": "95388680-f52a-11e6-969c-518c48c913e4", "type": "visualization", @@ -51,10 +51,10 @@ "kibanaSavedObjectMeta": { "searchSourceJSON": "{\"query\":{\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}},\"language\":\"lucene\"},\"filter\":[]}" }, - "title": "System [Metricbeat Golang]", + "title": "System [Metricbeat Golang]", "uiStateJSON": "{}", "version": 1, - "visState": "{\"title\":\"System [Metricbeat Golang]\",\"type\":\"timelion\",\"params\":{\"expression\":\".es(index=\\\"metricbeat*\\\",metric=\\\"avg:golang.heap.system.total\\\").label(\\\"System Total\\\"),.es(index=\\\"metricbeat*\\\",metric=\\\"avg:golang.heap.system.obtained\\\").label(\\\"System Obtained\\\"),.es(index=\\\"metricbeat*\\\",metric=\\\"avg:golang.heap.system.stack\\\").label(\\\"System Stack\\\"),.es(index=\\\"metricbeat*\\\",metric=\\\"avg:golang.heap.system.released\\\").label(\\\"System Released\\\")\",\"interval\":\"10s\"},\"aggs\":[]}" + "visState": "{\"title\":\"System [Metricbeat Golang]\",\"type\":\"timelion\",\"params\":{\"expression\":\".es(index=\\\"metricbeat*\\\",metric=\\\"avg:golang.heap.system.total\\\").label(\\\"System Total\\\").yaxis(label=\\\"Bytes\\\",units=bytes),.es(index=\\\"metricbeat*\\\",metric=\\\"avg:golang.heap.system.obtained\\\").label(\\\"System Obtained\\\"),.es(index=\\\"metricbeat*\\\",metric=\\\"avg:golang.heap.system.stack\\\").label(\\\"System Stack\\\"),.es(index=\\\"metricbeat*\\\",metric=\\\"avg:golang.heap.system.released\\\").label(\\\"System Released\\\")\",\"interval\":\"10s\"},\"aggs\":[]}" }, "id": "9a9a8bf0-f52a-11e6-969c-518c48c913e4", "type": "visualization", diff --git a/vendor/github.com/elastic/beats/metricbeat/module/golang/expvar/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/golang/expvar/_meta/fields.yml index a6b740ab..b316b605 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/golang/expvar/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/golang/expvar/_meta/fields.yml @@ -2,7 +2,7 @@ type: group description: > expvar - release: experimental + release: beta fields: - name: cmdline type: keyword diff --git a/vendor/github.com/elastic/beats/metricbeat/module/golang/expvar/expvar.go b/vendor/github.com/elastic/beats/metricbeat/module/golang/expvar/expvar.go index 1ffcff81..e7c7dbf4 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/golang/expvar/expvar.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/golang/expvar/expvar.go @@ -44,7 +44,7 @@ type MetricSet struct { // Part of new is also setting up the configuration by processing additional // configuration entries if needed. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { - cfgwarn.Experimental("The golang expvar metricset is experimental") + cfgwarn.Beta("The golang expvar metricset is beta") config := struct { Namespace string `config:"expvar.namespace" validate:"required"` @@ -54,9 +54,13 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return nil, err } + http, err := helper.NewHTTP(base) + if err != nil { + return nil, err + } return &MetricSet{ BaseMetricSet: base, - http: helper.NewHTTP(base), + http: http, namespace: config.Namespace, }, nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/golang/heap/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/golang/heap/_meta/fields.yml index 7675349d..b37a746b 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/golang/heap/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/golang/heap/_meta/fields.yml @@ -2,7 +2,7 @@ type: group description: > The golang program heap information exposed by expvar. - release: experimental + release: beta fields: - name: cmdline type: keyword diff --git a/vendor/github.com/elastic/beats/metricbeat/module/golang/heap/heap.go b/vendor/github.com/elastic/beats/metricbeat/module/golang/heap/heap.go index 54beb5a9..61da13df 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/golang/heap/heap.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/golang/heap/heap.go @@ -48,11 +48,15 @@ type MetricSet struct { // Part of new is also setting up the configuration by processing additional // configuration entries if needed. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { - cfgwarn.Experimental("The golang heap metricset is experimental") + cfgwarn.Beta("The golang heap metricset is beta") + http, err := helper.NewHTTP(base) + if err != nil { + return nil, err + } return &MetricSet{ BaseMetricSet: base, - http: helper.NewHTTP(base), + http: http, }, nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/graphite/_meta/config.reference.yml b/vendor/github.com/elastic/beats/metricbeat/module/graphite/_meta/config.reference.yml new file mode 100644 index 00000000..29bb1a47 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/graphite/_meta/config.reference.yml @@ -0,0 +1,22 @@ +- module: graphite + metricsets: ["server"] + enabled: true + + # Host address to listen on. Default localhost. + #host: localhost + + # Listening port. Default 2003. + #port: 2003 + + # Protocol to listen on. This can be udp or tcp. Default udp. + #protocol: "udp" + + # Receive buffer size in bytes + #receive_buffer_size: 1024 + + #templates: + # - filter: "test.*.bash.*" # This would match metrics like test.localhost.bash.stats + # namespace: "test" + # template: ".host.shell.metric*" # test.localhost.bash.stats would become metric=stats and tags host=localhost,shell=bash + # delimiter: "_" + diff --git a/vendor/github.com/elastic/beats/metricbeat/module/graphite/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/graphite/_meta/config.yml index b5318ecd..2c6ac0e7 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/graphite/_meta/config.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/graphite/_meta/config.yml @@ -1,10 +1 @@ - module: graphite - metricsets: ["server"] - enabled: true -# protocol: "udp" -# templates: -# - filter: "test.*.bash.*" # This would match metrics like test.localhost.bash.stats -# namespace: "test" -# template: ".host.shell.metric*" # test.localhost.bash.stats would become metric=stats and tags host=localhost,shell=bash -# delimiter: "_" - diff --git a/vendor/github.com/elastic/beats/metricbeat/module/graphite/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/graphite/_meta/docs.asciidoc index 38e61c08..0ddac8ff 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/graphite/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/graphite/_meta/docs.asciidoc @@ -1,2 +1,3 @@ This is the graphite Module. +The default metricset is `server`. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/graphite/server/server.go b/vendor/github.com/elastic/beats/metricbeat/module/graphite/server/server.go index 6a3973aa..caa2f9bc 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/graphite/server/server.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/graphite/server/server.go @@ -14,9 +14,9 @@ import ( // init registers the MetricSet with the central registry. // The New method will be called after the setup of the module and before starting to fetch data func init() { - if err := mb.Registry.AddMetricSet("graphite", "server", New); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("graphite", "server", New, + mb.DefaultMetricSet(), + ) } // MetricSet type defines all fields of the MetricSet diff --git a/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/Dockerfile index 911edb33..995a9a94 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/Dockerfile +++ b/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/Dockerfile @@ -1,4 +1,4 @@ -FROM haproxy:1.6 +FROM haproxy:1.8 RUN apt-get update && apt-get install -y netcat HEALTHCHECK --interval=1s --retries=90 CMD nc -z localhost 14567 COPY ./haproxy.conf /usr/local/etc/haproxy/haproxy.cfg diff --git a/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/Dockerfile.1.6 b/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/Dockerfile.1.6 new file mode 100644 index 00000000..911edb33 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/Dockerfile.1.6 @@ -0,0 +1,5 @@ +FROM haproxy:1.6 +RUN apt-get update && apt-get install -y netcat +HEALTHCHECK --interval=1s --retries=90 CMD nc -z localhost 14567 +COPY ./haproxy.conf /usr/local/etc/haproxy/haproxy.cfg +EXPOSE 14567 diff --git a/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/Dockerfile.1.7 b/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/Dockerfile.1.7 new file mode 100644 index 00000000..29867269 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/Dockerfile.1.7 @@ -0,0 +1,5 @@ +FROM haproxy:1.7 +RUN apt-get update && apt-get install -y netcat +HEALTHCHECK --interval=1s --retries=90 CMD nc -z localhost 14567 +COPY ./haproxy.conf /usr/local/etc/haproxy/haproxy.cfg +EXPOSE 14567 diff --git a/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/config.reference.yml b/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/config.reference.yml new file mode 100644 index 00000000..bf45b4dc --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/config.reference.yml @@ -0,0 +1,5 @@ +- module: haproxy + metricsets: ["info", "stat"] + period: 10s + hosts: ["tcp://127.0.0.1:14567"] + enabled: true diff --git a/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/config.yml index febce8d2..203e1cc7 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/config.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/config.yml @@ -1,4 +1,2 @@ - module: haproxy - metricsets: ["info", "stat"] - period: 10s hosts: ["tcp://127.0.0.1:14567"] diff --git a/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/docs.asciidoc index cb88103f..791954a3 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/docs.asciidoc @@ -25,13 +25,15 @@ required authentication add this to the haproxy config: [source,haproxy] ---- listen stats - bind 0.0.0.0:14569 + bind 0.0.0.0:14567 stats enable stats uri /stats stats auth admin:admin ---- +The default metricsets are `info`and `stat`. + [float] === Compatibility -The HAProxy metricsets were tested with HAProxy 1.6 and are expected to work with all 1.6 versions. +The HAProxy metricsets are tested with HAProxy versions from 1.6, 1.7 to 1.8. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/env b/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/env index 34526c1b..c0762e53 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/env +++ b/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/env @@ -1,2 +1,4 @@ +HAPROXY_1_6_HOST=haproxy_1_6 +HAPROXY_1_7_HOST=haproxy_1_7 HAPROXY_HOST=haproxy HAPROXY_PORT=14567 diff --git a/vendor/github.com/elastic/beats/metricbeat/module/haproxy/info/info.go b/vendor/github.com/elastic/beats/metricbeat/module/haproxy/info/info.go index e5243d8f..02b30107 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/haproxy/info/info.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/haproxy/info/info.go @@ -19,9 +19,10 @@ var ( // init registers the haproxy info MetricSet. func init() { - if err := mb.Registry.AddMetricSet("haproxy", "info", New, haproxy.HostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("haproxy", "info", New, + mb.WithHostParser(haproxy.HostParser), + mb.DefaultMetricSet(), + ) } // MetricSet for haproxy info. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/haproxy/stat/stat.go b/vendor/github.com/elastic/beats/metricbeat/module/haproxy/stat/stat.go index d7fd1443..8b74cac2 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/haproxy/stat/stat.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/haproxy/stat/stat.go @@ -19,9 +19,10 @@ var ( // init registers the haproxy stat MetricSet. func init() { - if err := mb.Registry.AddMetricSet("haproxy", statsMethod, New, haproxy.HostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("haproxy", statsMethod, New, + mb.WithHostParser(haproxy.HostParser), + mb.DefaultMetricSet(), + ) } // MetricSet for haproxy stats. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/haproxy/stat/stat_ingetration_test.go b/vendor/github.com/elastic/beats/metricbeat/module/haproxy/stat/stat_integration_test.go similarity index 100% rename from vendor/github.com/elastic/beats/metricbeat/module/haproxy/stat/stat_ingetration_test.go rename to vendor/github.com/elastic/beats/metricbeat/module/haproxy/stat/stat_integration_test.go diff --git a/vendor/github.com/elastic/beats/metricbeat/module/http/_meta/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/module/http/_meta/Dockerfile index 3cb26850..b422466c 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/http/_meta/Dockerfile +++ b/vendor/github.com/elastic/beats/metricbeat/module/http/_meta/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.9.2 +FROM golang:1.9.4 COPY test/main.go main.go diff --git a/vendor/github.com/elastic/beats/metricbeat/module/http/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/http/_meta/config.yml index 02c71778..a7e4e4eb 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/http/_meta/config.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/http/_meta/config.yml @@ -6,8 +6,11 @@ path: "/" #body: "" #method: "GET" + #username: "user" + #password: "secret" #request.enabled: false #response.enabled: false + #json.is_array: false #dedot.enabled: false - module: http diff --git a/vendor/github.com/elastic/beats/metricbeat/module/http/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/http/_meta/fields.yml index 70a1d9b5..0a3805e0 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/http/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/http/_meta/fields.yml @@ -2,7 +2,7 @@ title: "HTTP" description: > HTTP module - release: beta + release: ga settings: ["ssl"] fields: - name: http diff --git a/vendor/github.com/elastic/beats/metricbeat/module/http/_meta/test/main.go b/vendor/github.com/elastic/beats/metricbeat/module/http/_meta/test/main.go index 9dc520f6..40e02dee 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/http/_meta/test/main.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/http/_meta/test/main.go @@ -7,13 +7,20 @@ import ( ) func main() { - http.HandleFunc("/", serve) + http.HandleFunc("/jsonarr", serveJSONArr) + http.HandleFunc("/jsonobj", serveJSONObj) + http.HandleFunc("/", serveJSONObj) + err := http.ListenAndServe(":8080", nil) if err != nil { log.Fatal("ListenAndServe: ", err) } } -func serve(w http.ResponseWriter, r *http.Request) { +func serveJSONArr(w http.ResponseWriter, r *http.Request) { + fmt.Fprint(w, `[{"hello1":"world1"}, {"hello2": "world2"}]`) +} + +func serveJSONObj(w http.ResponseWriter, r *http.Request) { fmt.Fprint(w, `{"hello":"world"}`) } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/http/json/_meta/data.json b/vendor/github.com/elastic/beats/metricbeat/module/http/json/_meta/data.json index c6c86579..af93a154 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/http/json/_meta/data.json +++ b/vendor/github.com/elastic/beats/metricbeat/module/http/json/_meta/data.json @@ -5,15 +5,14 @@ "name": "host.example.com" }, "http": { - "testnamespace": { + "json": { "hello": "world" } }, "metricset": { - "host": "http:8080", + "host": "127.0.0.1:8080", "module": "http", "name": "json", - "namespace": "testnamespace", "rtt": 115 } } \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/metricbeat/module/http/json/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/http/json/_meta/fields.yml index 5804d87a..efaa0638 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/http/json/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/http/json/_meta/fields.yml @@ -2,5 +2,5 @@ type: group description: > json metricset - release: beta + release: ga fields: diff --git a/vendor/github.com/elastic/beats/metricbeat/module/http/json/_meta/test/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/http/json/_meta/test/config.yml index bec83bfa..57eaafb4 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/http/json/_meta/test/config.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/http/json/_meta/test/config.yml @@ -13,6 +13,7 @@ metricbeat.modules: headers: Accept: application/json request.enabled: true + json.is_array: false response.enabled: true #================================ Outputs ===================================== diff --git a/vendor/github.com/elastic/beats/metricbeat/module/http/json/json.go b/vendor/github.com/elastic/beats/metricbeat/module/http/json/json.go index 09441b7d..532fa493 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/http/json/json.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/http/json/json.go @@ -8,7 +8,6 @@ import ( "strings" "github.com/elastic/beats/libbeat/common" - "github.com/elastic/beats/libbeat/common/cfgwarn" "github.com/elastic/beats/metricbeat/helper" "github.com/elastic/beats/metricbeat/mb" "github.com/elastic/beats/metricbeat/mb/parse" @@ -50,6 +49,7 @@ type MetricSet struct { body string requestEnabled bool responseEnabled bool + jsonIsArray bool deDotEnabled bool } @@ -57,7 +57,6 @@ type MetricSet struct { // Part of new is also setting up the configuration by processing additional // configuration entries if needed. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { - cfgwarn.Beta("The http json metricset is in beta.") config := struct { Namespace string `config:"namespace" validate:"required"` @@ -65,12 +64,14 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { Body string `config:"body"` RequestEnabled bool `config:"request.enabled"` ResponseEnabled bool `config:"response.enabled"` + JSONIsArray bool `config:"json.is_array"` DeDotEnabled bool `config:"dedot.enabled"` }{ Method: "GET", Body: "", RequestEnabled: false, ResponseEnabled: false, + JSONIsArray: false, DeDotEnabled: false, } @@ -78,7 +79,10 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return nil, err } - http := helper.NewHTTP(base) + http, err := helper.NewHTTP(base) + if err != nil { + return nil, err + } http.SetMethod(config.Method) http.SetBody([]byte(config.Body)) @@ -90,37 +94,18 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { http: http, requestEnabled: config.RequestEnabled, responseEnabled: config.ResponseEnabled, + jsonIsArray: config.JSONIsArray, deDotEnabled: config.DeDotEnabled, }, nil } -// Fetch methods implements the data gathering and data conversion to the right format -// It returns the event which is then forward to the output. In case of an error, a -// descriptive error must be returned. -func (m *MetricSet) Fetch() (common.MapStr, error) { - response, err := m.http.FetchResponse() - if err != nil { - return nil, err - } - defer response.Body.Close() - - var jsonBody map[string]interface{} - var event map[string]interface{} - - body, err := ioutil.ReadAll(response.Body) - if err != nil { - return nil, err - } - - err = json.Unmarshal(body, &jsonBody) - if err != nil { - return nil, err - } +func (m *MetricSet) processBody(response *http.Response, jsonBody interface{}) common.MapStr { + var event common.MapStr if m.deDotEnabled { - event = common.DeDotJSON(jsonBody).(map[string]interface{}) + event = common.DeDotJSON(jsonBody).(common.MapStr) } else { - event = jsonBody + event = jsonBody.(common.MapStr) } if m.requestEnabled { @@ -147,7 +132,49 @@ func (m *MetricSet) Fetch() (common.MapStr, error) { // Set dynamic namespace event["_namespace"] = m.namespace - return event, nil + return event +} + +// Fetch methods implements the data gathering and data conversion to the right format +// It returns the event which is then forward to the output. In case of an error, a +// descriptive error must be returned. +func (m *MetricSet) Fetch() ([]common.MapStr, error) { + response, err := m.http.FetchResponse() + if err != nil { + return nil, err + } + defer response.Body.Close() + + var jsonBody common.MapStr + var jsonBodyArr []common.MapStr + var events []common.MapStr + + body, err := ioutil.ReadAll(response.Body) + if err != nil { + return nil, err + } + + if m.jsonIsArray { + err = json.Unmarshal(body, &jsonBodyArr) + if err != nil { + return nil, err + } + + for _, obj := range jsonBodyArr { + event := m.processBody(response, obj) + events = append(events, event) + } + } else { + err = json.Unmarshal(body, &jsonBody) + if err != nil { + return nil, err + } + + event := m.processBody(response, jsonBody) + events = append(events, event) + } + + return events, nil } func (m *MetricSet) getHeaders(header http.Header) map[string]string { diff --git a/vendor/github.com/elastic/beats/metricbeat/module/http/json/json_integration_test.go b/vendor/github.com/elastic/beats/metricbeat/module/http/json/json_integration_test.go index 30c7361e..44e19ae4 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/http/json/json_integration_test.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/http/json/json_integration_test.go @@ -12,10 +12,10 @@ import ( mbtest "github.com/elastic/beats/metricbeat/mb/testing" ) -func TestFetch(t *testing.T) { +func TestFetchObject(t *testing.T) { compose.EnsureUp(t, "http") - f := mbtest.NewEventFetcher(t, getConfig()) + f := mbtest.NewEventsFetcher(t, getConfig("object")) event, err := f.Fetch() if !assert.NoError(t, err) { t.FailNow() @@ -24,23 +24,47 @@ func TestFetch(t *testing.T) { t.Logf("%s/%s event: %+v", f.Module().Name(), f.Name(), event) } +func TestFetchArray(t *testing.T) { + compose.EnsureUp(t, "http") + + f := mbtest.NewEventsFetcher(t, getConfig("array")) + event, err := f.Fetch() + if !assert.NoError(t, err) { + t.FailNow() + } + + t.Logf("%s/%s event: %+v", f.Module().Name(), f.Name(), event) +} func TestData(t *testing.T) { compose.EnsureUp(t, "http") - f := mbtest.NewEventFetcher(t, getConfig()) - err := mbtest.WriteEvent(f, t) + f := mbtest.NewEventsFetcher(t, getConfig("object")) + err := mbtest.WriteEvents(f, t) if err != nil { t.Fatal("write", err) } + } -func getConfig() map[string]interface{} { +func getConfig(jsonType string) map[string]interface{} { + var path string + var responseIsArray bool + switch jsonType { + case "object": + path = "/jsonobj" + responseIsArray = false + case "array": + path = "/jsonarr" + responseIsArray = true + } + return map[string]interface{}{ - "module": "http", - "metricsets": []string{"json"}, - "hosts": []string{getEnvHost() + ":" + getEnvPort()}, - "path": "/", - "namespace": "testnamespace", + "module": "http", + "metricsets": []string{"json"}, + "hosts": []string{getEnvHost() + ":" + getEnvPort()}, + "path": path, + "namespace": "testnamespace", + "json.is_array": responseIsArray, } } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/http/server/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/http/server/_meta/fields.yml index d531fca8..028dd892 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/http/server/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/http/server/_meta/fields.yml @@ -2,5 +2,5 @@ type: group description: > server - release: experimental + release: beta fields: diff --git a/vendor/github.com/elastic/beats/metricbeat/module/http/server/server.go b/vendor/github.com/elastic/beats/metricbeat/module/http/server/server.go index 562908d4..85418c1b 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/http/server/server.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/http/server/server.go @@ -29,7 +29,7 @@ type MetricSet struct { // Part of new is also setting up the configuration by processing additional // configuration entries if needed. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { - cfgwarn.Experimental("EXPERIMENTAL: The http server metricset is experimental") + cfgwarn.Beta("The http server metricset is beta") config := defaultHttpServerConfig() if err := base.Module().UnpackConfig(&config); err != nil { diff --git a/vendor/github.com/elastic/beats/metricbeat/module/jolokia/_meta/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/module/jolokia/_meta/Dockerfile index b555e9f7..0e642174 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/jolokia/_meta/Dockerfile +++ b/vendor/github.com/elastic/beats/metricbeat/module/jolokia/_meta/Dockerfile @@ -5,7 +5,7 @@ ENV TC apache-tomcat-${TOMCAT_VERSION} HEALTHCHECK --interval=1s --retries=90 CMD curl -f curl localhost:8778/jolokia/ EXPOSE 8778 -RUN wget http://archive.apache.org/dist/tomcat/tomcat-7/v${TOMCAT_VERSION}/bin/${TC}.tar.gz +RUN wget -q http://archive.apache.org/dist/tomcat/tomcat-7/v${TOMCAT_VERSION}/bin/${TC}.tar.gz RUN tar xzf ${TC}.tar.gz -C /opt CMD env CATALINA_OPTS=$(jolokia_opts) /opt/${TC}/bin/catalina.sh run diff --git a/vendor/github.com/elastic/beats/metricbeat/module/jolokia/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/jolokia/_meta/config.yml index de331606..9075615f 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/jolokia/_meta/config.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/jolokia/_meta/config.yml @@ -3,7 +3,23 @@ period: 10s hosts: ["localhost"] namespace: "metrics" - path: "/jolokia/?ignoreErrors=true&canonicalNaming=false" - jmx.mapping: - jmx.application: - jmx.instance: + #username: "user" + #password: "secret" + jmx.mappings: + - mbean: 'java.lang:type=Runtime' + attributes: + - attr: Uptime + field: uptime + - mbean: 'java.lang:type=Memory' + attributes: + - attr: HeapMemoryUsage + field: memory.heap_usage + - attr: NonHeapMemoryUsage + field: memory.non_heap_usage + # GC Metrics - this depends on what is available on your JVM + # - mbean: 'java.lang:type=GarbageCollector,name=ConcurrentMarkSweep' + # attributes: + # - attr: CollectionTime + # field: gc.cms_collection_time + # - attr: CollectionCount + # field: gc.cms_collection_count diff --git a/vendor/github.com/elastic/beats/metricbeat/module/jolokia/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/jolokia/_meta/fields.yml index 84b8f267..529544d1 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/jolokia/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/jolokia/_meta/fields.yml @@ -3,7 +3,7 @@ description: > Jolokia module short_config: false - release: beta + release: ga settings: ["ssl"] fields: - name: jolokia diff --git a/vendor/github.com/elastic/beats/metricbeat/module/jolokia/jmx/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/jolokia/jmx/_meta/docs.asciidoc index 3956b4cc..f328bd24 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/jolokia/jmx/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/jolokia/jmx/_meta/docs.asciidoc @@ -22,6 +22,7 @@ mapping: attributes: - attr: Uptime field: uptime + event: uptime --- In case the underlying attribute is an object (e.g. see HeapMemoryUsage attribute in java.lang:type=Memory) its @@ -34,11 +35,16 @@ All metrics from a single mapping will be POSTed to the defined host/port and se To make it possible to differentiate between metrics from multiple similar applications running on the same host, please configure multiple modules. +When wildcards are used, an event will be sent to Elastic for each matching mbean, in that case a `mbean` field is added. + +Optionally, an `event` name can be added to each attribute, this makes all metrics with the same `event` +to be grouped in the same event when being sent to Elastic. + It is required to set a namespace in the general module config section. [float] === Limitations -No authentication against Jolokia is supported yet. No wildcards in Jolokia requests supported yet. +No authentication against Jolokia is supported yet. All Jolokia requests have canonicalNaming set to false (details see here: https://jolokia.org/reference/html/protocol.html). diff --git a/vendor/github.com/elastic/beats/metricbeat/module/jolokia/jmx/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/jolokia/jmx/_meta/fields.yml index 8033a27f..a927f3fc 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/jolokia/jmx/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/jolokia/jmx/_meta/fields.yml @@ -1 +1 @@ -- release: beta +- release: ga diff --git a/vendor/github.com/elastic/beats/metricbeat/module/jolokia/jmx/_meta/test/jolokia_response_wildcard.json b/vendor/github.com/elastic/beats/metricbeat/module/jolokia/jmx/_meta/test/jolokia_response_wildcard.json new file mode 100644 index 00000000..47d9cdf9 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/jolokia/jmx/_meta/test/jolokia_response_wildcard.json @@ -0,0 +1,24 @@ +[ + { + "value": { + "Catalina:name=\"http-bio-8080\",type=ThreadPool": { + "maxConnections": 200, + "port": 8080 + }, + "Catalina:name=\"ajp-bio-8009\",type=ThreadPool": { + "maxConnections": 200, + "port": 8009 + } + }, + "request": { + "type": "read", + "attribute": [ + "port", + "maxConnections" + ], + "mbean": "Catalina:name=*,type=ThreadPool" + }, + "status": 200, + "timestamp": 1520469345 + } +] diff --git a/vendor/github.com/elastic/beats/metricbeat/module/jolokia/jmx/config.go b/vendor/github.com/elastic/beats/metricbeat/module/jolokia/jmx/config.go index a711f305..a91e3f4c 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/jolokia/jmx/config.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/jolokia/jmx/config.go @@ -1,6 +1,12 @@ package jmx -import "encoding/json" +import ( + "encoding/json" + "fmt" + "regexp" + "sort" + "strings" +) type JMXMapping struct { MBean string @@ -10,6 +16,7 @@ type JMXMapping struct { type Attribute struct { Attr string Field string + Event string } // RequestBlock is used to build the request blocks of the following format: @@ -32,24 +39,87 @@ type Attribute struct { // } // ] type RequestBlock struct { - Type string `json:"type"` - MBean string `json:"mbean"` - Attribute []string `json:"attribute"` + Type string `json:"type"` + MBean string `json:"mbean"` + Attribute []string `json:"attribute"` + Config map[string]interface{} `json:"config"` } -func buildRequestBodyAndMapping(mappings []JMXMapping) ([]byte, map[string]string, error) { - responseMapping := map[string]string{} +type attributeMappingKey struct { + mbean, attr string +} + +// AttributeMapping contains the mapping information between attributes in Jolokia +// responses and fields in metricbeat events +type AttributeMapping map[attributeMappingKey]Attribute + +// Get the mapping options for the attribute of an mbean +func (m AttributeMapping) Get(mbean, attr string) (Attribute, bool) { + a, found := m[attributeMappingKey{mbean, attr}] + return a, found +} + +// Parse strings with properties with the format key=value, being: +// - key a nonempty string of characters which may not contain any of the characters, +// comma (,), equals (=), colon, asterisk, or question mark. +// - value a string that can be quoted or unquoted, if unquoted it cannot be empty and +// cannot contain any of the characters comma, equals, colon, or quote. +var propertyRegexp = regexp.MustCompile("[^,=:*?]+=([^,=:\"]+|\".*\")") + +func canonicalizeMBeanName(name string) (string, error) { + // From https://docs.oracle.com/javase/8/docs/api/javax/management/ObjectName.html#getCanonicalName-- + // + // Returns the canonical form of the name; that is, a string representation where the + // properties are sorted in lexical order. + // The canonical form of the name is a String consisting of the domain part, + // a colon (:), the canonical key property list, and a pattern indication. + // + parts := strings.SplitN(name, ":", 2) + if len(parts) != 2 || parts[0] == "" || parts[1] == "" { + return name, fmt.Errorf("domain and properties needed in mbean name: %s", name) + } + domain := parts[0] + + // Using this regexp instead of just splitting by commas because values can be quoted + // and contain commas, what complicates the parsing. + properties := propertyRegexp.FindAllString(parts[1], -1) + propertyList := strings.Join(properties, ",") + if len(propertyList) != len(parts[1]) { + // Some property didn't match + return name, fmt.Errorf("mbean properties must be in the form key=value: %s", name) + } + + sort.Strings(properties) + return domain + ":" + strings.Join(properties, ","), nil +} + +func buildRequestBodyAndMapping(mappings []JMXMapping) ([]byte, AttributeMapping, error) { + responseMapping := make(AttributeMapping) var blocks []RequestBlock + // At least Jolokia 1.5 responses with canonicalized MBean names when using + // wildcards, even when canonicalNaming is set to false, this makes mappings to fail. + // So use canonicalzed names everywhere. + // If Jolokia returns non-canonicalized MBean names, then we'll need to canonicalize + // them or change our approach to mappings. + config := map[string]interface{}{ + "ignoreErrors": true, + "canonicalNaming": true, + } for _, mapping := range mappings { + mbean, err := canonicalizeMBeanName(mapping.MBean) + if err != nil { + return nil, nil, err + } rb := RequestBlock{ - Type: "read", - MBean: mapping.MBean, + Type: "read", + MBean: mbean, + Config: config, } for _, attribute := range mapping.Attributes { rb.Attribute = append(rb.Attribute, attribute.Attr) - responseMapping[mapping.MBean+"_"+attribute.Attr] = attribute.Field + responseMapping[attributeMappingKey{mbean, attribute.Attr}] = attribute } blocks = append(blocks, rb) } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/jolokia/jmx/config_test.go b/vendor/github.com/elastic/beats/metricbeat/module/jolokia/jmx/config_test.go new file mode 100644 index 00000000..4e1f3f21 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/jolokia/jmx/config_test.go @@ -0,0 +1,81 @@ +package jmx + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCanonicalMBeanName(t *testing.T) { + cases := []struct { + mbean string + expected string + ok bool + }{ + { + mbean: ``, + ok: false, + }, + { + mbean: `type=Runtime`, + ok: false, + }, + { + mbean: `java.lang`, + ok: false, + }, + { + mbean: `java.lang:`, + ok: false, + }, + { + mbean: `java.lang:type=Runtime,name`, + ok: false, + }, + { + mbean: `java.lang:type=Runtime`, + expected: `java.lang:type=Runtime`, + ok: true, + }, + { + mbean: `java.lang:name=Foo,type=Runtime`, + expected: `java.lang:name=Foo,type=Runtime`, + ok: true, + }, + { + mbean: `java.lang:type=Runtime,name=Foo`, + expected: `java.lang:name=Foo,type=Runtime`, + ok: true, + }, + { + mbean: `java.lang:type=Runtime,name=Foo*`, + expected: `java.lang:name=Foo*,type=Runtime`, + ok: true, + }, + { + mbean: `java.lang:type=Runtime,name=*`, + expected: `java.lang:name=*,type=Runtime`, + ok: true, + }, + { + mbean: `java.lang:type=Runtime,name="foo,bar"`, + expected: `java.lang:name="foo,bar",type=Runtime`, + ok: true, + }, + { + mbean: `Catalina:type=RequestProcessor,worker="http-nio-8080",name=HttpRequest1`, + expected: `Catalina:name=HttpRequest1,type=RequestProcessor,worker="http-nio-8080"`, + ok: true, + }, + } + + for _, c := range cases { + canonical, err := canonicalizeMBeanName(c.mbean) + if c.ok { + assert.NoError(t, err, "failed parsing for: "+c.mbean) + assert.Equal(t, c.expected, canonical, "mbean: "+c.mbean) + } else { + assert.Error(t, err, "should have failed for: "+c.mbean) + } + } +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/jolokia/jmx/data.go b/vendor/github.com/elastic/beats/metricbeat/module/jolokia/jmx/data.go index 294c4260..61ea8e36 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/jolokia/jmx/data.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/jolokia/jmx/data.go @@ -2,11 +2,17 @@ package jmx import ( "encoding/json" + "strings" "github.com/joeshaw/multierror" "github.com/pkg/errors" "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" +) + +const ( + mbeanEventKey = "mbean" ) type Entry struct { @@ -47,56 +53,126 @@ type Entry struct { // "status": 200 // } // ] -func eventMapping(content []byte, mapping map[string]string) (common.MapStr, error) { +// +// With wildcards there is an additional nesting level: +// +// [ +// { +// "request": { +// "type": "read", +// "attribute": "maxConnections", +// "mbean": "Catalina:name=*,type=ThreadPool" +// }, +// "value": { +// "Catalina:name=\"http-bio-8080\",type=ThreadPool": { +// "maxConnections": 200 +// }, +// "Catalina:name=\"ajp-bio-8009\",type=ThreadPool": { +// "maxConnections": 200 +// } +// }, +// "timestamp": 1519409583 +// "status": 200, +// } +// } +type eventKey struct { + mbean, event string +} + +func eventMapping(content []byte, mapping AttributeMapping) ([]common.MapStr, error) { var entries []Entry if err := json.Unmarshal(content, &entries); err != nil { return nil, errors.Wrapf(err, "failed to unmarshal jolokia JSON response '%v'", string(content)) } - event := common.MapStr{} + // Generate a different event for each wildcard mbean, and and additional one + // for non-wildcard requested mbeans, group them by event name if defined + mbeanEvents := make(map[eventKey]common.MapStr) var errs multierror.Errors for _, v := range entries { + hasWildcard := strings.Contains(v.Request.Mbean, "*") for attribute, value := range v.Value { - // Extend existing event - err := parseResponseEntry(v.Request.Mbean, attribute, value, event, mapping) - if err != nil { - errs = append(errs, err) + if !hasWildcard { + err := parseResponseEntry(v.Request.Mbean, v.Request.Mbean, attribute, value, mbeanEvents, mapping) + if err != nil { + errs = append(errs, err) + } + continue + } + + // If there was a wildcard, we are going to have an additional + // nesting level in response values, and attribute here is going + // to be actually the matching mbean name + values, ok := value.(map[string]interface{}) + if !ok { + errs = append(errs, errors.Errorf("expected map of values for %s", v.Request.Mbean)) + continue + } + + responseMbean := attribute + for attribute, value := range values { + err := parseResponseEntry(v.Request.Mbean, responseMbean, attribute, value, mbeanEvents, mapping) + if err != nil { + errs = append(errs, err) + } } } } - return event, errs.Err() + var events []common.MapStr + for _, event := range mbeanEvents { + events = append(events, event) + } + + return events, errs.Err() +} + +func selectEvent(events map[eventKey]common.MapStr, key eventKey) common.MapStr { + event, found := events[key] + if !found { + event = common.MapStr{} + if key.mbean != "" { + event.Put(mbeanEventKey, key.mbean) + } + events[key] = event + } + return event } func parseResponseEntry( - mbeanName string, + requestMbeanName string, + responseMbeanName string, attributeName string, - attibuteValue interface{}, - event common.MapStr, - mapping map[string]string, + attributeValue interface{}, + events map[eventKey]common.MapStr, + mapping AttributeMapping, ) error { - // Create metric name by merging mbean and attribute fields. - var metricName = mbeanName + "_" + attributeName - - key, exists := mapping[metricName] + field, exists := mapping.Get(requestMbeanName, attributeName) if !exists { - return errors.Errorf("metric key '%v' not found in response", metricName) + // This shouldn't ever happen, if it does it is probably that some of our + // assumptions when building the request and the mapping is wrong. + logp.Debug("jolokia.jmx", "mapping: %+v", mapping) + return errors.Errorf("metric key '%v' for mbean '%s' not found in mapping", attributeName, requestMbeanName) } - var err error + var key eventKey + key.event = field.Event + if responseMbeanName != requestMbeanName { + key.mbean = responseMbeanName + } + event := selectEvent(events, key) // In case the attributeValue is a map the keys are dedotted - c, ok := attibuteValue.(map[string]interface{}) + data := attributeValue + c, ok := data.(map[string]interface{}) if ok { newData := map[string]interface{}{} for k, v := range c { newData[common.DeDot(k)] = v } - _, err = event.Put(key, newData) - } else { - _, err = event.Put(key, attibuteValue) + data = newData } - + _, err := event.Put(field.Field, data) return err } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/jolokia/jmx/data_test.go b/vendor/github.com/elastic/beats/metricbeat/module/jolokia/jmx/data_test.go index 864fbc41..06b5e992 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/jolokia/jmx/data_test.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/jolokia/jmx/data_test.go @@ -20,45 +20,198 @@ func TestEventMapper(t *testing.T) { assert.Nil(t, err) - var mapping = map[string]string{ - "java.lang:type=Runtime_Uptime": "uptime", - "java.lang:type=GarbageCollector,name=ConcurrentMarkSweep_CollectionTime": "gc.cms_collection_time", - "java.lang:type=GarbageCollector,name=ConcurrentMarkSweep_CollectionCount": "gc.cms_collection_count", - "java.lang:type=Memory_HeapMemoryUsage": "memory.heap_usage", - "java.lang:type=Memory_NonHeapMemoryUsage": "memory.non_heap_usage", - "org.springframework.boot:type=Endpoint,name=metricsEndpoint_Metrics": "metrics", + var mapping = AttributeMapping{ + attributeMappingKey{"java.lang:type=Runtime", "Uptime"}: Attribute{ + Attr: "Uptime", Field: "uptime"}, + attributeMappingKey{"java.lang:type=GarbageCollector,name=ConcurrentMarkSweep", "CollectionTime"}: Attribute{ + Attr: "CollectionTime", Field: "gc.cms_collection_time"}, + attributeMappingKey{"java.lang:type=GarbageCollector,name=ConcurrentMarkSweep", "CollectionCount"}: Attribute{ + Attr: "CollectionCount", Field: "gc.cms_collection_count"}, + attributeMappingKey{"java.lang:type=Memory", "HeapMemoryUsage"}: Attribute{ + Attr: "HeapMemoryUsage", Field: "memory.heap_usage"}, + attributeMappingKey{"java.lang:type=Memory", "NonHeapMemoryUsage"}: Attribute{ + Attr: "NonHEapMemoryUsage", Field: "memory.non_heap_usage"}, + attributeMappingKey{"org.springframework.boot:type=Endpoint,name=metricsEndpoint", "Metrics"}: Attribute{ + Attr: "Metrics", Field: "metrics"}, } - event, err := eventMapping(jolokiaResponse, mapping) + events, err := eventMapping(jolokiaResponse, mapping) assert.Nil(t, err) - expected := common.MapStr{ - "uptime": float64(47283), - "gc": common.MapStr{ - "cms_collection_time": float64(53), - "cms_collection_count": float64(1), + expected := []common.MapStr{ + { + "uptime": float64(47283), + "gc": common.MapStr{ + "cms_collection_time": float64(53), + "cms_collection_count": float64(1), + }, + "memory": common.MapStr{ + "heap_usage": map[string]interface{}{ + "init": float64(1073741824), + "committed": float64(1037959168), + "max": float64(1037959168), + "used": float64(227420472), + }, + "non_heap_usage": map[string]interface{}{ + "init": float64(2555904), + "committed": float64(53477376), + "max": float64(-1), + "used": float64(50519768), + }, + }, + "metrics": map[string]interface{}{ + "atomikos_nbTransactions": float64(0), + "classes": float64(18857), + "classes_loaded": float64(19127), + "classes_unloaded": float64(270), + }, + }, + } + + assert.ElementsMatch(t, expected, events) +} + +func TestEventGroupingMapper(t *testing.T) { + absPath, err := filepath.Abs("./_meta/test") + + assert.NotNil(t, absPath) + assert.Nil(t, err) + + jolokiaResponse, err := ioutil.ReadFile(absPath + "/jolokia_response.json") + + assert.Nil(t, err) + + var mapping = AttributeMapping{ + attributeMappingKey{"java.lang:type=Runtime", "Uptime"}: Attribute{ + Attr: "Uptime", Field: "uptime"}, + attributeMappingKey{"java.lang:type=GarbageCollector,name=ConcurrentMarkSweep", "CollectionTime"}: Attribute{ + Attr: "CollectionTime", Field: "gc.cms_collection_time", Event: "gc"}, + attributeMappingKey{"java.lang:type=GarbageCollector,name=ConcurrentMarkSweep", "CollectionCount"}: Attribute{ + Attr: "CollectionCount", Field: "gc.cms_collection_count", Event: "gc"}, + attributeMappingKey{"java.lang:type=Memory", "HeapMemoryUsage"}: Attribute{ + Attr: "HeapMemoryUsage", Field: "memory.heap_usage", Event: "memory"}, + attributeMappingKey{"java.lang:type=Memory", "NonHeapMemoryUsage"}: Attribute{ + Attr: "NonHEapMemoryUsage", Field: "memory.non_heap_usage", Event: "memory"}, + attributeMappingKey{"org.springframework.boot:type=Endpoint,name=metricsEndpoint", "Metrics"}: Attribute{ + Attr: "Metrics", Field: "metrics"}, + } + + events, err := eventMapping(jolokiaResponse, mapping) + assert.Nil(t, err) + + expected := []common.MapStr{ + { + "uptime": float64(47283), + "metrics": map[string]interface{}{ + "atomikos_nbTransactions": float64(0), + "classes": float64(18857), + "classes_loaded": float64(19127), + "classes_unloaded": float64(270), + }, }, - "memory": common.MapStr{ - "heap_usage": map[string]interface{}{ - "init": float64(1073741824), - "committed": float64(1037959168), - "max": float64(1037959168), - "used": float64(227420472), + { + "gc": common.MapStr{ + "cms_collection_time": float64(53), + "cms_collection_count": float64(1), }, - "non_heap_usage": map[string]interface{}{ - "init": float64(2555904), - "committed": float64(53477376), - "max": float64(-1), - "used": float64(50519768), + }, + { + "memory": common.MapStr{ + "heap_usage": map[string]interface{}{ + "init": float64(1073741824), + "committed": float64(1037959168), + "max": float64(1037959168), + "used": float64(227420472), + }, + "non_heap_usage": map[string]interface{}{ + "init": float64(2555904), + "committed": float64(53477376), + "max": float64(-1), + "used": float64(50519768), + }, }, }, - "metrics": map[string]interface{}{ - "atomikos_nbTransactions": float64(0), - "classes": float64(18857), - "classes_loaded": float64(19127), - "classes_unloaded": float64(270), + } + + assert.ElementsMatch(t, expected, events) +} + +func TestEventMapperWithWildcard(t *testing.T) { + absPath, err := filepath.Abs("./_meta/test") + + assert.NotNil(t, absPath) + assert.Nil(t, err) + + jolokiaResponse, err := ioutil.ReadFile(absPath + "/jolokia_response_wildcard.json") + + assert.Nil(t, err) + + var mapping = AttributeMapping{ + attributeMappingKey{"Catalina:name=*,type=ThreadPool", "port"}: Attribute{ + Attr: "port", Field: "port"}, + attributeMappingKey{"Catalina:name=*,type=ThreadPool", "maxConnections"}: Attribute{ + Attr: "maxConnections", Field: "max_connections"}, + } + + events, err := eventMapping(jolokiaResponse, mapping) + assert.Nil(t, err) + assert.Equal(t, 2, len(events)) + + expected := []common.MapStr{ + { + "mbean": "Catalina:name=\"http-bio-8080\",type=ThreadPool", + "max_connections": float64(200), + "port": float64(8080), + }, + { + "mbean": "Catalina:name=\"ajp-bio-8009\",type=ThreadPool", + "max_connections": float64(200), + "port": float64(8009), + }, + } + + assert.ElementsMatch(t, expected, events) +} + +func TestEventGroupingMapperWithWildcard(t *testing.T) { + absPath, err := filepath.Abs("./_meta/test") + + assert.NotNil(t, absPath) + assert.Nil(t, err) + + jolokiaResponse, err := ioutil.ReadFile(absPath + "/jolokia_response_wildcard.json") + + assert.Nil(t, err) + + var mapping = AttributeMapping{ + attributeMappingKey{"Catalina:name=*,type=ThreadPool", "port"}: Attribute{ + Attr: "port", Field: "port", Event: "port"}, + attributeMappingKey{"Catalina:name=*,type=ThreadPool", "maxConnections"}: Attribute{ + Attr: "maxConnections", Field: "max_connections", Event: "network"}, + } + + events, err := eventMapping(jolokiaResponse, mapping) + assert.Nil(t, err) + assert.Equal(t, 4, len(events)) + + expected := []common.MapStr{ + { + "mbean": "Catalina:name=\"http-bio-8080\",type=ThreadPool", + "port": float64(8080), + }, + { + "mbean": "Catalina:name=\"http-bio-8080\",type=ThreadPool", + "max_connections": float64(200), + }, + { + "mbean": "Catalina:name=\"ajp-bio-8009\",type=ThreadPool", + "port": float64(8009), + }, + { + "mbean": "Catalina:name=\"ajp-bio-8009\",type=ThreadPool", + "max_connections": float64(200), }, } - assert.Equal(t, expected, event) + assert.ElementsMatch(t, expected, events) } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/jolokia/jmx/jmx.go b/vendor/github.com/elastic/beats/metricbeat/module/jolokia/jmx/jmx.go index 5f0f4c10..10a502fd 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/jolokia/jmx/jmx.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/jolokia/jmx/jmx.go @@ -1,8 +1,9 @@ package jmx import ( + "github.com/joeshaw/multierror" + "github.com/elastic/beats/libbeat/common" - "github.com/elastic/beats/libbeat/common/cfgwarn" "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/metricbeat/helper" "github.com/elastic/beats/metricbeat/mb" @@ -11,7 +12,6 @@ import ( var ( metricsetName = "jolokia.jmx" - debugf = logp.MakeDebug(metricsetName) ) // init registers the MetricSet with the central registry. @@ -22,12 +22,8 @@ func init() { } const ( - // defaultScheme is the default scheme to use when it is not specified in - // the host config. defaultScheme = "http" - - // defaultPath is the default path to the ngx_http_stub_status_module endpoint on Nginx. - defaultPath = "/jolokia/?ignoreErrors=true&canonicalNaming=false" + defaultPath = "/jolokia/" ) var ( @@ -41,16 +37,14 @@ var ( // MetricSet type defines all fields of the MetricSet type MetricSet struct { mb.BaseMetricSet - mapping map[string]string + mapping AttributeMapping namespace string http *helper.HTTP + log *logp.Logger } // New create a new instance of the MetricSet func New(base mb.BaseMetricSet) (mb.MetricSet, error) { - cfgwarn.Beta("The jolokia jmx metricset is beta") - - // Additional configuration options config := struct { Namespace string `config:"namespace" validate:"required"` Mappings []JMXMapping `config:"jmx.mappings" validate:"required"` @@ -65,13 +59,18 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return nil, err } - http := helper.NewHTTP(base) + http, err := helper.NewHTTP(base) + if err != nil { + return nil, err + } http.SetMethod("POST") http.SetBody(body) + log := logp.NewLogger(metricsetName).With("host", base.HostData().Host) + if logp.IsDebug(metricsetName) { - debugf("The body for POST requests to jolokia host %v is: %v", - base.HostData().Host, string(body)) + log.Debugw("Jolokia request body", + "body", string(body), "type", "request") } return &MetricSet{ @@ -79,28 +78,35 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { mapping: mapping, namespace: config.Namespace, http: http, + log: log, }, nil } // Fetch methods implements the data gathering and data conversion to the right format -func (m *MetricSet) Fetch() (common.MapStr, error) { +func (m *MetricSet) Fetch() ([]common.MapStr, error) { body, err := m.http.FetchContent() if err != nil { return nil, err } if logp.IsDebug(metricsetName) { - debugf("The response body from jolokia host %v is: %v", - m.HostData().Host, string(body)) + m.log.Debugw("Jolokia response body", + "host", m.HostData().Host, "body", string(body), "type", "response") } - event, err := eventMapping(body, m.mapping) + events, err := eventMapping(body, m.mapping) if err != nil { return nil, err } // Set dynamic namespace. - event[mb.NamespaceKey] = m.namespace + var errs multierror.Errors + for _, event := range events { + _, err = event.Put(mb.NamespaceKey, m.namespace) + if err != nil { + errs = append(errs, err) + } + } - return event, nil + return events, errs.Err() } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/jolokia/jmx/jmx_integration_test.go b/vendor/github.com/elastic/beats/metricbeat/module/jolokia/jmx/jmx_integration_test.go index 9dcd5492..9dc6970a 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/jolokia/jmx/jmx_integration_test.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/jolokia/jmx/jmx_integration_test.go @@ -15,62 +15,91 @@ import ( func TestFetch(t *testing.T) { compose.EnsureUp(t, "jolokia") - f := mbtest.NewEventFetcher(t, getConfig()) - event, err := f.Fetch() - if !assert.NoError(t, err) { - t.FailNow() + for _, config := range getConfigs() { + f := mbtest.NewEventsFetcher(t, config) + events, err := f.Fetch() + if !assert.NoError(t, err) { + t.FailNow() + } + t.Logf("%s/%s events: %+v", f.Module().Name(), f.Name(), events) + if len(events) == 0 || len(events[0]) <= 1 { + t.Fatal("Empty events") + } } - - t.Logf("%s/%s event: %+v", f.Module().Name(), f.Name(), event) } func TestData(t *testing.T) { - f := mbtest.NewEventFetcher(t, getConfig()) - err := mbtest.WriteEvent(f, t) - if err != nil { - t.Fatal("write", err) + for _, config := range getConfigs() { + f := mbtest.NewEventsFetcher(t, config) + err := mbtest.WriteEvents(f, t) + if err != nil { + t.Fatal("write", err) + } } } -func getConfig() map[string]interface{} { - return map[string]interface{}{ - "module": "jolokia", - "metricsets": []string{"jmx"}, - "hosts": []string{getEnvHost() + ":" + getEnvPort()}, - "namespace": "testnamespace", - "jmx.mappings": []map[string]interface{}{ - { - "mbean": "java.lang:type=Runtime", - "attributes": []map[string]string{ - { - "attr": "Uptime", - "field": "uptime", +func getConfigs() []map[string]interface{} { + return []map[string]interface{}{ + { + "module": "jolokia", + "metricsets": []string{"jmx"}, + "hosts": []string{getEnvHost() + ":" + getEnvPort()}, + "namespace": "testnamespace", + "jmx.mappings": []map[string]interface{}{ + { + "mbean": "java.lang:type=Runtime", + "attributes": []map[string]string{ + { + "attr": "Uptime", + "field": "uptime", + }, }, }, - }, - { - "mbean": "java.lang:type=GarbageCollector,name=ConcurrentMarkSweep", - "attributes": []map[string]string{ - { - "attr": "CollectionTime", - "field": "gc.cms_collection_time", + { + "mbean": "java.lang:type=GarbageCollector,name=ConcurrentMarkSweep", + "attributes": []map[string]string{ + { + "attr": "CollectionTime", + "field": "gc.cms_collection_time", + }, + { + "attr": "CollectionCount", + "field": "gc.cms_collection_count", + }, }, - { - "attr": "CollectionCount", - "field": "gc.cms_collection_count", + }, + { + "mbean": "java.lang:type=Memory", + "attributes": []map[string]string{ + { + "attr": "HeapMemoryUsage", + "field": "memory.heap_usage", + }, + { + "attr": "NonHeapMemoryUsage", + "field": "memory.non_heap_usage", + }, }, }, }, - { - "mbean": "java.lang:type=Memory", - "attributes": []map[string]string{ - { - "attr": "HeapMemoryUsage", - "field": "memory.heap_usage", - }, - { - "attr": "NonHeapMemoryUsage", - "field": "memory.non_heap_usage", + }, + { + "module": "jolokia", + "metricsets": []string{"jmx"}, + "hosts": []string{getEnvHost() + ":" + getEnvPort()}, + "namespace": "testnamespace", + "jmx.mappings": []map[string]interface{}{ + { + "mbean": "Catalina:name=*,type=ThreadPool", + "attributes": []map[string]string{ + { + "attr": "maxConnections", + "field": "max_connections", + }, + { + "attr": "port", + "field": "port", + }, }, }, }, diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kafka/_meta/config.reference.yml b/vendor/github.com/elastic/beats/metricbeat/module/kafka/_meta/config.reference.yml new file mode 100644 index 00000000..0a9d9fe8 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kafka/_meta/config.reference.yml @@ -0,0 +1,26 @@ +- module: kafka + metricsets: ["consumergroup", "partition"] + period: 10s + hosts: ["localhost:9092"] + enabled: true + + #client_id: metricbeat + #retries: 3 + #backoff: 250ms + + # List of Topics to query metadata for. If empty, all topics will be queried. + #topics: [] + + # Optional SSL. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # SASL authentication + #username: "" + #password: "" diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kafka/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/kafka/_meta/config.yml index 91ce183a..f9db3711 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kafka/_meta/config.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/kafka/_meta/config.yml @@ -1,25 +1,2 @@ - module: kafka - metricsets: ["partition"] - period: 10s hosts: ["localhost:9092"] - - #client_id: metricbeat - #retries: 3 - #backoff: 250ms - - # List of Topics to query metadata for. If empty, all topics will be queried. - #topics: [] - - # Optional SSL. By default is off. - # List of root certificates for HTTPS server verifications - #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Certificate for SSL client authentication - #ssl.certificate: "/etc/pki/client/cert.pem" - - # Client Certificate Key - #ssl.key: "/etc/pki/client/cert.key" - - # SASL authentication - #username: "" - #password: "" diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kafka/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/kafka/_meta/docs.asciidoc index 9eb0eb94..7840da1e 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kafka/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/kafka/_meta/docs.asciidoc @@ -1,2 +1,5 @@ This is the Kafka module. +The default metricsets are `consumergroup` and `partition`. + +This module is tested with 0.10.2. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kafka/_meta/run.sh b/vendor/github.com/elastic/beats/metricbeat/module/kafka/_meta/run.sh index 2d44882e..adec90dd 100755 --- a/vendor/github.com/elastic/beats/metricbeat/module/kafka/_meta/run.sh +++ b/vendor/github.com/elastic/beats/metricbeat/module/kafka/_meta/run.sh @@ -21,7 +21,7 @@ mkdir -p ${KAFKA_LOGS_DIR} ${KAFKA_HOME}/bin/kafka-server-start.sh ${KAFKA_HOME}/config/server.properties \ --override delete.topic.enable=true --override advertised.host.name=${KAFKA_ADVERTISED_HOST} \ --override listeners=PLAINTEXT://0.0.0.0:9092 \ - --override logs.dir=${KAFKA_LOGS_DIR} --override log.flush.interval.ms=200 & + --override logs.dir=${KAFKA_LOGS_DIR} & wait_for_port 9092 diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kafka/consumergroup/consumergroup.go b/vendor/github.com/elastic/beats/metricbeat/module/kafka/consumergroup/consumergroup.go index 083d357b..97298a35 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kafka/consumergroup/consumergroup.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kafka/consumergroup/consumergroup.go @@ -13,9 +13,9 @@ import ( // init registers the MetricSet with the central registry. func init() { - if err := mb.Registry.AddMetricSet("kafka", "consumergroup", New); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("kafka", "consumergroup", New, + mb.DefaultMetricSet(), + ) } // MetricSet type defines all fields of the MetricSet diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kafka/partition/partition.go b/vendor/github.com/elastic/beats/metricbeat/module/kafka/partition/partition.go index 84e22827..f5f4859f 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kafka/partition/partition.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kafka/partition/partition.go @@ -17,9 +17,10 @@ import ( // init registers the partition MetricSet with the central registry. func init() { - if err := mb.Registry.AddMetricSet("kafka", "partition", New, parse.PassThruHostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("kafka", "partition", New, + mb.WithHostParser(parse.PassThruHostParser), + mb.DefaultMetricSet(), + ) } // MetricSet type defines all fields of the partition MetricSet diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kibana/_meta/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/module/kibana/_meta/Dockerfile index 3fd697cb..94c508b5 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kibana/_meta/Dockerfile +++ b/vendor/github.com/elastic/beats/metricbeat/module/kibana/_meta/Dockerfile @@ -1,2 +1,2 @@ -FROM docker.elastic.co/kibana/kibana:6.0.0-rc1 +FROM docker.elastic.co/kibana/kibana:6.2.4 HEALTHCHECK --interval=1s --retries=300 CMD curl -f http://localhost:5601/api/status | grep '"disconnects"' diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kibana/_meta/config.reference.yml b/vendor/github.com/elastic/beats/metricbeat/module/kibana/_meta/config.reference.yml new file mode 100644 index 00000000..05d0b4d3 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kibana/_meta/config.reference.yml @@ -0,0 +1,5 @@ +- module: kibana + metricsets: ["status"] + period: 10s + hosts: ["localhost:5601"] + enabled: true diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kibana/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/kibana/_meta/config.yml index 09a30295..e7629293 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kibana/_meta/config.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/kibana/_meta/config.yml @@ -1,4 +1,4 @@ - module: kibana - metricsets: ["status"] - period: 10s hosts: ["localhost:5601"] + #username: "user" + #password: "secret" diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kibana/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/kibana/_meta/docs.asciidoc index 9859f2b0..a979535a 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kibana/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/kibana/_meta/docs.asciidoc @@ -1 +1,3 @@ -The Kibana module only tracks the high-level metrics. To monitor more Kibana metrics, use our {monitoringdoc}/xpack-monitoring.html[X-Pack monitoring] which is available under a free basic license. +The Kibana module only tracks the high-level metrics. To monitor more Kibana metrics, use our {monitoringdoc}/xpack-monitoring.html[monitoring] feature. + +The default metricset is `status`. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kibana/status/status.go b/vendor/github.com/elastic/beats/metricbeat/module/kibana/status/status.go index 736e827a..e193ed80 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kibana/status/status.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kibana/status/status.go @@ -11,9 +11,10 @@ import ( // init registers the MetricSet with the central registry. // The New method will be called after the setup of the module and before starting to fetch data func init() { - if err := mb.Registry.AddMetricSet("kibana", "status", New, hostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("kibana", "status", New, + mb.WithHostParser(hostParser), + mb.DefaultMetricSet(), + ) } var ( @@ -33,9 +34,14 @@ type MetricSet struct { // New create a new instance of the MetricSet func New(base mb.BaseMetricSet) (mb.MetricSet, error) { cfgwarn.Beta("The kafka partition metricset is beta") + + http, err := helper.NewHTTP(base) + if err != nil { + return nil, err + } return &MetricSet{ base, - helper.NewHTTP(base), + http, }, nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/config.reference.yml b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/config.reference.yml new file mode 100644 index 00000000..4f38fa02 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/config.reference.yml @@ -0,0 +1,35 @@ +# Node metrics, from kubelet: +- module: kubernetes + metricsets: + - container + - node + - pod + - system + - volume + period: 10s + hosts: ["localhost:10255"] + enabled: true + #bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + #ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt + #ssl.certificate: "/etc/pki/client/cert.pem" + #ssl.key: "/etc/pki/client/cert.key" + +# State metrics from kube-state-metrics service: +- module: kubernetes + enabled: true + metricsets: + - state_node + - state_deployment + - state_replicaset + - state_statefulset + - state_pod + - state_container + period: 10s + hosts: ["kube-state-metrics:8080"] + +# Kubernetes events +- module: kubernetes + enabled: true + metricsets: + - event diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/config.yml index cf00d10c..48a0bfdc 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/config.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/config.yml @@ -1,28 +1,13 @@ -# Node metrics, from kubelet: - module: kubernetes metricsets: + - container - node - - system - pod - - container + - system - volume - period: 10s hosts: ["localhost:10255"] - -# State metrics from kube-state-metrics service: -- module: kubernetes - enabled: false - metricsets: - - state_node - - state_deployment - - state_replicaset - - state_pod - - state_container - period: 10s - hosts: ["kube-state-metrics:8080"] - -# Kubernetes events -- module: kubernetes - enabled: false - metricsets: - - event + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + ssl.certificate_authorities: + - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt + #username: "user" + #password: "secret" diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/docs.asciidoc index 662ef817..30909d41 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/docs.asciidoc @@ -4,3 +4,5 @@ agent and https://github.com/kubernetes/kube-state-metrics[kube-state-metrics] s All metricsets with the `state_` prefix require `hosts` field pointing to kube-stat-metrics service within the cluster, while the rest should be pointed to kubelet service. Check the example configuration on how to do it. + +The default metricsets are `container`, `node`, `pod`, `system` and `volume`. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/test/kube-state-metrics b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/test/kube-state-metrics index ec85a6b2..80624f18 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/test/kube-state-metrics +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/test/kube-state-metrics @@ -82,91 +82,114 @@ go_memstats_sys_bytes 2.6564856e+07 # HELP kube_deployment_metadata_generation Sequence number representing a specific generation of the desired state. # TYPE kube_deployment_metadata_generation gauge kube_deployment_metadata_generation{deployment="jumpy-owl-redis",namespace="default"} 1 +kube_deployment_metadata_generation{deployment="jumpy-owl-redis",namespace="test"} 2 kube_deployment_metadata_generation{deployment="kube-state-metrics",namespace="kube-system"} 1 kube_deployment_metadata_generation{deployment="tiller-deploy",namespace="kube-system"} 1 kube_deployment_metadata_generation{deployment="wise-lynx-jenkins",namespace="jenkins"} 1 # HELP kube_deployment_spec_paused Whether the deployment is paused and will not be processed by the deployment controller. # TYPE kube_deployment_spec_paused gauge kube_deployment_spec_paused{deployment="jumpy-owl-redis",namespace="default"} 0 +kube_deployment_spec_paused{deployment="jumpy-owl-redis",namespace="test"} 1 kube_deployment_spec_paused{deployment="kube-state-metrics",namespace="kube-system"} 0 kube_deployment_spec_paused{deployment="tiller-deploy",namespace="kube-system"} 0 kube_deployment_spec_paused{deployment="wise-lynx-jenkins",namespace="jenkins"} 0 # HELP kube_deployment_spec_replicas Number of desired pods for a deployment. # TYPE kube_deployment_spec_replicas gauge kube_deployment_spec_replicas{deployment="jumpy-owl-redis",namespace="default"} 1 +kube_deployment_spec_replicas{deployment="jumpy-owl-redis",namespace="test"} 2 kube_deployment_spec_replicas{deployment="kube-state-metrics",namespace="kube-system"} 2 kube_deployment_spec_replicas{deployment="tiller-deploy",namespace="kube-system"} 1 kube_deployment_spec_replicas{deployment="wise-lynx-jenkins",namespace="jenkins"} 1 # HELP kube_deployment_spec_strategy_rollingupdate_max_unavailable Maximum number of unavailable replicas during a rolling update of a deployment. # TYPE kube_deployment_spec_strategy_rollingupdate_max_unavailable gauge kube_deployment_spec_strategy_rollingupdate_max_unavailable{deployment="jumpy-owl-redis",namespace="default"} 1 +kube_deployment_spec_strategy_rollingupdate_max_unavailable{deployment="jumpy-owl-redis",namespace="test"} 3 kube_deployment_spec_strategy_rollingupdate_max_unavailable{deployment="kube-state-metrics",namespace="kube-system"} 1 kube_deployment_spec_strategy_rollingupdate_max_unavailable{deployment="tiller-deploy",namespace="kube-system"} 1 kube_deployment_spec_strategy_rollingupdate_max_unavailable{deployment="wise-lynx-jenkins",namespace="jenkins"} 1 # HELP kube_deployment_status_observed_generation The generation observed by the deployment controller. # TYPE kube_deployment_status_observed_generation gauge kube_deployment_status_observed_generation{deployment="jumpy-owl-redis",namespace="default"} 1 +kube_deployment_status_observed_generation{deployment="jumpy-owl-redis",namespace="test"} 4 kube_deployment_status_observed_generation{deployment="kube-state-metrics",namespace="kube-system"} 1 kube_deployment_status_observed_generation{deployment="tiller-deploy",namespace="kube-system"} 1 kube_deployment_status_observed_generation{deployment="wise-lynx-jenkins",namespace="jenkins"} 1 # HELP kube_deployment_status_replicas The number of replicas per deployment. # TYPE kube_deployment_status_replicas gauge kube_deployment_status_replicas{deployment="jumpy-owl-redis",namespace="default"} 1 +kube_deployment_status_replicas{deployment="jumpy-owl-redis",namespace="test"} 5 kube_deployment_status_replicas{deployment="kube-state-metrics",namespace="kube-system"} 2 kube_deployment_status_replicas{deployment="tiller-deploy",namespace="kube-system"} 1 kube_deployment_status_replicas{deployment="wise-lynx-jenkins",namespace="jenkins"} 1 # HELP kube_deployment_status_replicas_available The number of available replicas per deployment. # TYPE kube_deployment_status_replicas_available gauge kube_deployment_status_replicas_available{deployment="jumpy-owl-redis",namespace="default"} 0 +kube_deployment_status_replicas_available{deployment="jumpy-owl-redis",namespace="test"} 6 kube_deployment_status_replicas_available{deployment="kube-state-metrics",namespace="kube-system"} 1 kube_deployment_status_replicas_available{deployment="tiller-deploy",namespace="kube-system"} 1 kube_deployment_status_replicas_available{deployment="wise-lynx-jenkins",namespace="jenkins"} 1 # HELP kube_deployment_status_replicas_unavailable The number of unavailable replicas per deployment. # TYPE kube_deployment_status_replicas_unavailable gauge kube_deployment_status_replicas_unavailable{deployment="jumpy-owl-redis",namespace="default"} 1 +kube_deployment_status_replicas_unavailable{deployment="jumpy-owl-redis",namespace="test"} 7 kube_deployment_status_replicas_unavailable{deployment="kube-state-metrics",namespace="kube-system"} 1 kube_deployment_status_replicas_unavailable{deployment="tiller-deploy",namespace="kube-system"} 0 kube_deployment_status_replicas_unavailable{deployment="wise-lynx-jenkins",namespace="jenkins"} 0 # HELP kube_deployment_status_replicas_updated The number of updated replicas per deployment. # TYPE kube_deployment_status_replicas_updated gauge kube_deployment_status_replicas_updated{deployment="jumpy-owl-redis",namespace="default"} 1 +kube_deployment_status_replicas_updated{deployment="jumpy-owl-redis",namespace="test"} 8 kube_deployment_status_replicas_updated{deployment="kube-state-metrics",namespace="kube-system"} 2 kube_deployment_status_replicas_updated{deployment="tiller-deploy",namespace="kube-system"} 1 kube_deployment_status_replicas_updated{deployment="wise-lynx-jenkins",namespace="jenkins"} 1 # HELP kube_node_info Information about a cluster node. # TYPE kube_node_info gauge kube_node_info{container_runtime_version="docker://1.11.1",kernel_version="4.7.2",kubelet_version="v1.5.3",kubeproxy_version="v1.5.3",node="minikube",os_image="Buildroot 2016.08"} 1 +kube_node_info{container_runtime_version="docker://1.11.1",kernel_version="4.7.2",kubelet_version="v1.5.3",kubeproxy_version="v1.5.3",node="minikube-test",os_image="Buildroot 2016.08"} 1 # HELP kube_node_spec_unschedulable Whether a node can schedule new pods. # TYPE kube_node_spec_unschedulable gauge kube_node_spec_unschedulable{node="minikube"} 0 +kube_node_spec_unschedulable{node="minikube-test"} 1 # HELP kube_node_status_allocatable_cpu_cores The CPU resources of a node that are available for scheduling. # TYPE kube_node_status_allocatable_cpu_cores gauge kube_node_status_allocatable_cpu_cores{node="minikube"} 2 +kube_node_status_allocatable_cpu_cores{node="minikube-test"} 3 # HELP kube_node_status_allocatable_memory_bytes The memory resources of a node that are available for scheduling. # TYPE kube_node_status_allocatable_memory_bytes gauge kube_node_status_allocatable_memory_bytes{node="minikube"} 2.09778688e+09 +kube_node_status_allocatable_memory_bytes{node="minikube-test"} 3.09778688e+09 # HELP kube_node_status_allocatable_pods The pod resources of a node that are available for scheduling. # TYPE kube_node_status_allocatable_pods gauge kube_node_status_allocatable_pods{node="minikube"} 110 +kube_node_status_allocatable_pods{node="minikube-test"} 210 # HELP kube_node_status_capacity_cpu_cores The total CPU resources of the node. # TYPE kube_node_status_capacity_cpu_cores gauge kube_node_status_capacity_cpu_cores{node="minikube"} 2 +kube_node_status_capacity_cpu_cores{node="minikube-test"} 4 # HELP kube_node_status_capacity_memory_bytes The total memory resources of the node. # TYPE kube_node_status_capacity_memory_bytes gauge kube_node_status_capacity_memory_bytes{node="minikube"} 2.09778688e+09 +kube_node_status_capacity_memory_bytes{node="minikube-test"} 4.09778688e+09 # HELP kube_node_status_capacity_pods The total pod resources of the node. # TYPE kube_node_status_capacity_pods gauge kube_node_status_capacity_pods{node="minikube"} 110 +kube_node_status_capacity_pods{node="minikube-test"} 310 # HELP kube_node_status_out_of_disk Whether the node is out of disk space # TYPE kube_node_status_out_of_disk gauge kube_node_status_out_of_disk{condition="false",node="minikube"} 1 kube_node_status_out_of_disk{condition="true",node="minikube"} 0 kube_node_status_out_of_disk{condition="unknown",node="minikube"} 0 +kube_node_status_out_of_disk{condition="false",node="minikube-test"} 1 +kube_node_status_out_of_disk{condition="true",node="minikube-test"} 0 +kube_node_status_out_of_disk{condition="unknown",node="minikube-test"} 0 # HELP kube_node_status_ready The ready status of a cluster node. # TYPE kube_node_status_ready gauge kube_node_status_ready{condition="false",node="minikube"} 0 kube_node_status_ready{condition="true",node="minikube"} 1 kube_node_status_ready{condition="unknown",node="minikube"} 0 +kube_node_status_ready{condition="false",node="minikube-test"} 0 +kube_node_status_ready{condition="true",node="minikube-test"} 1 +kube_node_status_ready{condition="unknown",node="minikube-test"} 0 # HELP kube_pod_container_info Information about a container in a pod. # TYPE kube_pod_container_info gauge kube_pod_container_info{container="dnsmasq",container_id="docker://9a4c9462cd078d7be4f0a9b94bcfeb69d5fdd76bff67142df3f58367ac7e8d61",image="gcr.io/google_containers/kube-dnsmasq-amd64:1.4",image_id="docker://sha256:3ec65756a89b70b4095e43a340a6e2d5696cac7a93a29619ff5c4b6be9af2773",namespace="kube-system",pod="kube-dns-v20-5g5cb"} 1 @@ -175,6 +198,7 @@ kube_pod_container_info{container="jumpy-owl-redis",container_id="docker://4fa22 kube_pod_container_info{container="kube-addon-manager",container_id="docker://91fdd43f6b1b4c3dd133cfca53e0b1210bc557c2ae56006026b5ccdb5f52826f",image="gcr.io/google-containers/kube-addon-manager:v6.3",image_id="docker://sha256:79eb64bc98df10a9af7e39f70df817e1862f8a5ec7657714df68439a617ee9ec",namespace="kube-system",pod="kube-addon-manager-minikube"} 1 kube_pod_container_info{container="kube-state-metrics",container_id="docker://973cbe45982c5126a5caf8c58d964c0ab1d5bb2c165ccc59715fcc1ebd58ab3d",image="gcr.io/google_containers/kube-state-metrics:v0.4.1",image_id="docker://sha256:be329a05c2e77e7d067b4e1dbefa1567a91d0487d3500d608171489369bfd945",namespace="kube-system",pod="kube-state-metrics-1303537707-7ncd1"} 1 kube_pod_container_info{container="kubedns",container_id="docker://fa3d83f648de42492b38fa3e8501d109376f391c50f2bd210c895c8477ae4b62",image="gcr.io/google_containers/kubedns-amd64:1.9",image_id="docker://sha256:26cf1ed9b14486b93acd70c060a17fea13620393d3aa8e76036b773197c47a05",namespace="kube-system",pod="kube-dns-v20-5g5cb"} 1 +kube_pod_container_info{container="kubedns",container_id="docker://fa3d83f648de42492b38fa3e8501d109376f391c50f2bd210c895c8477ae4b62-test",image="gcr.io/google_containers/kubedns-amd64:1.9-test",image_id="docker://sha256:26cf1ed9b14486b93acd70c060a17fea13620393d3aa8e76036b773197c47a05",namespace="test",pod="kube-dns-v20-5g5cb-test"} 0 kube_pod_container_info{container="kubernetes-dashboard",container_id="docker://3aaee8bdd311c015240e99fa2a5a5f2f26b11b51236a683b39d8c1902e423978",image="gcr.io/google_containers/kubernetes-dashboard-amd64:v1.5.1",image_id="docker://sha256:1180413103fdfd00a7882d3d8653a220d88c6ea4466fb860e98376c45ee1a1d0",namespace="kube-system",pod="kubernetes-dashboard-vw0l6"} 1 kube_pod_container_info{container="tiller",container_id="docker://469f5d2b7854eb52e5d13dc0cd3e664c1b682b157aabaf596ffe4984f1516902",image="gcr.io/kubernetes-helm/tiller:v2.3.1",image_id="docker://sha256:38527daf791dbe472c37ecb1e8b13a62e31c00d9ff4c8a1f019d7022a96a43da",namespace="kube-system",pod="tiller-deploy-3067024529-9lpmb"} 1 kube_pod_container_info{container="wise-lynx-jenkins",container_id="docker://e2ee1c2c7b8d4e5fd8c834b83cba8377d6b0e39da18157688ccc1a06b7c53117",image="jenkinsci/jenkins:2.46.1",image_id="docker://sha256:36023b9defd066ee53c03e33ba3add7225aee8447cb3154133012b1e152153c0",namespace="jenkins",pod="wise-lynx-jenkins-1616735317-svn6k"} 1 @@ -188,6 +212,7 @@ kube_pod_container_resource_limits_memory_bytes{container="healthz",namespace="k kube_pod_container_resource_limits_memory_bytes{container="kube-state-metrics",namespace="kube-system",node="",pod="kube-state-metrics-1303537707-mnzbp"} 5.24288e+07 kube_pod_container_resource_limits_memory_bytes{container="kube-state-metrics",namespace="kube-system",node="minikube",pod="kube-state-metrics-1303537707-7ncd1"} 5.24288e+07 kube_pod_container_resource_limits_memory_bytes{container="kubedns",namespace="kube-system",node="minikube",pod="kube-dns-v20-5g5cb"} 1.7825792e+08 +kube_pod_container_resource_limits_memory_bytes{container="kubedns",namespace="test",node="minikube-test",pod="kube-dns-v20-5g5cb-test"} 2.7825792e+08 # HELP kube_pod_container_resource_requests_cpu_cores The number of requested cpu cores by a container. # TYPE kube_pod_container_resource_requests_cpu_cores gauge kube_pod_container_resource_requests_cpu_cores{container="healthz",namespace="kube-system",node="minikube",pod="kube-dns-v20-5g5cb"} 0.01 @@ -196,6 +221,7 @@ kube_pod_container_resource_requests_cpu_cores{container="kube-addon-manager",na kube_pod_container_resource_requests_cpu_cores{container="kube-state-metrics",namespace="kube-system",node="",pod="kube-state-metrics-1303537707-mnzbp"} 0.1 kube_pod_container_resource_requests_cpu_cores{container="kube-state-metrics",namespace="kube-system",node="minikube",pod="kube-state-metrics-1303537707-7ncd1"} 0.1 kube_pod_container_resource_requests_cpu_cores{container="kubedns",namespace="kube-system",node="minikube",pod="kube-dns-v20-5g5cb"} 0.1 +kube_pod_container_resource_requests_cpu_cores{container="kubedns",namespace="test",node="minikube-test",pod="kube-dns-v20-5g5cb-test"} 0.2 kube_pod_container_resource_requests_cpu_cores{container="wise-lynx-jenkins",namespace="jenkins",node="minikube",pod="wise-lynx-jenkins-1616735317-svn6k"} 0.2 # HELP kube_pod_container_resource_requests_memory_bytes The number of requested memory bytes by a container. # TYPE kube_pod_container_resource_requests_memory_bytes gauge @@ -205,6 +231,7 @@ kube_pod_container_resource_requests_memory_bytes{container="kube-addon-manager" kube_pod_container_resource_requests_memory_bytes{container="kube-state-metrics",namespace="kube-system",node="",pod="kube-state-metrics-1303537707-mnzbp"} 3.145728e+07 kube_pod_container_resource_requests_memory_bytes{container="kube-state-metrics",namespace="kube-system",node="minikube",pod="kube-state-metrics-1303537707-7ncd1"} 3.145728e+07 kube_pod_container_resource_requests_memory_bytes{container="kubedns",namespace="kube-system",node="minikube",pod="kube-dns-v20-5g5cb"} 7.340032e+07 +kube_pod_container_resource_requests_memory_bytes{container="kubedns",namespace="test",node="minikube-test",pod="kube-dns-v20-5g5cb-test"} 8.340032e+07 kube_pod_container_resource_requests_memory_bytes{container="wise-lynx-jenkins",namespace="jenkins",node="minikube",pod="wise-lynx-jenkins-1616735317-svn6k"} 2.68435456e+08 # HELP kube_pod_container_status_ready Describes whether the containers readiness check succeeded. # TYPE kube_pod_container_status_ready gauge @@ -214,6 +241,7 @@ kube_pod_container_status_ready{container="jumpy-owl-redis",namespace="default", kube_pod_container_status_ready{container="kube-addon-manager",namespace="kube-system",pod="kube-addon-manager-minikube"} 1 kube_pod_container_status_ready{container="kube-state-metrics",namespace="kube-system",pod="kube-state-metrics-1303537707-7ncd1"} 1 kube_pod_container_status_ready{container="kubedns",namespace="kube-system",pod="kube-dns-v20-5g5cb"} 1 +kube_pod_container_status_ready{container="kubedns",namespace="test",pod="kube-dns-v20-5g5cb-test"} 0 kube_pod_container_status_ready{container="kubernetes-dashboard",namespace="kube-system",pod="kubernetes-dashboard-vw0l6"} 1 kube_pod_container_status_ready{container="tiller",namespace="kube-system",pod="tiller-deploy-3067024529-9lpmb"} 1 kube_pod_container_status_ready{container="wise-lynx-jenkins",namespace="jenkins",pod="wise-lynx-jenkins-1616735317-svn6k"} 1 @@ -225,6 +253,7 @@ kube_pod_container_status_restarts{container="jumpy-owl-redis",namespace="defaul kube_pod_container_status_restarts{container="kube-addon-manager",namespace="kube-system",pod="kube-addon-manager-minikube"} 2 kube_pod_container_status_restarts{container="kube-state-metrics",namespace="kube-system",pod="kube-state-metrics-1303537707-7ncd1"} 1 kube_pod_container_status_restarts{container="kubedns",namespace="kube-system",pod="kube-dns-v20-5g5cb"} 2 +kube_pod_container_status_restarts{container="kubedns",namespace="test",pod="kube-dns-v20-5g5cb-test"} 3 kube_pod_container_status_restarts{container="kubernetes-dashboard",namespace="kube-system",pod="kubernetes-dashboard-vw0l6"} 2 kube_pod_container_status_restarts{container="tiller",namespace="kube-system",pod="tiller-deploy-3067024529-9lpmb"} 1 kube_pod_container_status_restarts{container="wise-lynx-jenkins",namespace="jenkins",pod="wise-lynx-jenkins-1616735317-svn6k"} 1 @@ -236,6 +265,7 @@ kube_pod_container_status_running{container="jumpy-owl-redis",namespace="default kube_pod_container_status_running{container="kube-addon-manager",namespace="kube-system",pod="kube-addon-manager-minikube"} 1 kube_pod_container_status_running{container="kube-state-metrics",namespace="kube-system",pod="kube-state-metrics-1303537707-7ncd1"} 1 kube_pod_container_status_running{container="kubedns",namespace="kube-system",pod="kube-dns-v20-5g5cb"} 1 +kube_pod_container_status_running{container="kubedns",namespace="test",pod="kube-dns-v20-5g5cb-test"} 0 kube_pod_container_status_running{container="kubernetes-dashboard",namespace="kube-system",pod="kubernetes-dashboard-vw0l6"} 1 kube_pod_container_status_running{container="tiller",namespace="kube-system",pod="tiller-deploy-3067024529-9lpmb"} 1 kube_pod_container_status_running{container="wise-lynx-jenkins",namespace="jenkins",pod="wise-lynx-jenkins-1616735317-svn6k"} 1 @@ -247,6 +277,7 @@ kube_pod_container_status_terminated{container="jumpy-owl-redis",namespace="defa kube_pod_container_status_terminated{container="kube-addon-manager",namespace="kube-system",pod="kube-addon-manager-minikube"} 0 kube_pod_container_status_terminated{container="kube-state-metrics",namespace="kube-system",pod="kube-state-metrics-1303537707-7ncd1"} 0 kube_pod_container_status_terminated{container="kubedns",namespace="kube-system",pod="kube-dns-v20-5g5cb"} 0 +kube_pod_container_status_terminated{container="kubedns",namespace="test",pod="kube-dns-v20-5g5cb-test"} 1 kube_pod_container_status_terminated{container="kubernetes-dashboard",namespace="kube-system",pod="kubernetes-dashboard-vw0l6"} 0 kube_pod_container_status_terminated{container="tiller",namespace="kube-system",pod="tiller-deploy-3067024529-9lpmb"} 0 kube_pod_container_status_terminated{container="wise-lynx-jenkins",namespace="jenkins",pod="wise-lynx-jenkins-1616735317-svn6k"} 0 @@ -258,6 +289,7 @@ kube_pod_container_status_waiting{container="jumpy-owl-redis",namespace="default kube_pod_container_status_waiting{container="kube-addon-manager",namespace="kube-system",pod="kube-addon-manager-minikube"} 0 kube_pod_container_status_waiting{container="kube-state-metrics",namespace="kube-system",pod="kube-state-metrics-1303537707-7ncd1"} 0 kube_pod_container_status_waiting{container="kubedns",namespace="kube-system",pod="kube-dns-v20-5g5cb"} 0 +kube_pod_container_status_waiting{container="kubedns",namespace="test",pod="kube-dns-v20-5g5cb"} 0 kube_pod_container_status_waiting{container="kubernetes-dashboard",namespace="kube-system",pod="kubernetes-dashboard-vw0l6"} 0 kube_pod_container_status_waiting{container="tiller",namespace="kube-system",pod="tiller-deploy-3067024529-9lpmb"} 0 kube_pod_container_status_waiting{container="wise-lynx-jenkins",namespace="jenkins",pod="wise-lynx-jenkins-1616735317-svn6k"} 0 @@ -265,6 +297,7 @@ kube_pod_container_status_waiting{container="wise-lynx-jenkins",namespace="jenki # TYPE kube_pod_info gauge kube_pod_info{host_ip="",namespace="kube-system",node="",pod="kube-state-metrics-1303537707-mnzbp",pod_ip=""} 1 kube_pod_info{host_ip="192.168.99.100",namespace="default",node="minikube",pod="jumpy-owl-redis-3481028193-s78x9",pod_ip="172.17.0.4"} 1 +kube_pod_info{host_ip="192.168.99.200",namespace="test",node="minikube-test",pod="jumpy-owl-redis-3481028193-s78x9",pod_ip="172.17.0.5"} 1 kube_pod_info{host_ip="192.168.99.100",namespace="jenkins",node="minikube",pod="wise-lynx-jenkins-1616735317-svn6k",pod_ip="172.17.0.7"} 1 kube_pod_info{host_ip="192.168.99.100",namespace="kube-system",node="minikube",pod="kube-addon-manager-minikube",pod_ip="192.168.99.100"} 1 kube_pod_info{host_ip="192.168.99.100",namespace="kube-system",node="minikube",pod="kube-dns-v20-5g5cb",pod_ip="172.17.0.6"} 1 @@ -276,6 +309,9 @@ kube_pod_info{host_ip="192.168.99.100",namespace="kube-system",node="minikube",p kube_pod_status_phase{namespace="default",phase="Running",pod="jumpy-owl-redis-3481028193-s78x9"} 0 kube_pod_status_phase{namespace="default",phase="Succeeded",pod="jumpy-owl-redis-3481028193-s78x9"} 1 kube_pod_status_phase{namespace="default",phase="Unknown",pod="jumpy-owl-redis-3481028193-s78x9"} 0 +kube_pod_status_phase{namespace="test",phase="Running",pod="jumpy-owl-redis-3481028193-s78x9"} 1 +kube_pod_status_phase{namespace="test",phase="Succeeded",pod="jumpy-owl-redis-3481028193-s78x9"} 0 +kube_pod_status_phase{namespace="test",phase="Unknown",pod="jumpy-owl-redis-3481028193-s78x9"} 0 kube_pod_status_phase{namespace="jenkins",phase="Running",pod="wise-lynx-jenkins-1616735317-svn6k"} 1 kube_pod_status_phase{namespace="kube-system",phase="Pending",pod="kube-state-metrics-1303537707-mnzbp"} 1 kube_pod_status_phase{namespace="kube-system",phase="Running",pod="kube-addon-manager-minikube"} 1 @@ -286,6 +322,7 @@ kube_pod_status_phase{namespace="kube-system",phase="Running",pod="tiller-deploy # HELP kube_pod_status_ready Describes whether the pod is ready to serve requests. # TYPE kube_pod_status_ready gauge kube_pod_status_ready{condition="false",namespace="default",pod="jumpy-owl-redis-3481028193-s78x9"} 1 +kube_pod_status_ready{condition="false",namespace="test",pod="jumpy-owl-redis-3481028193-s78x9"} 0 kube_pod_status_ready{condition="false",namespace="jenkins",pod="wise-lynx-jenkins-1616735317-svn6k"} 0 kube_pod_status_ready{condition="false",namespace="kube-system",pod="kube-addon-manager-minikube"} 0 kube_pod_status_ready{condition="false",namespace="kube-system",pod="kube-dns-v20-5g5cb"} 0 @@ -293,6 +330,7 @@ kube_pod_status_ready{condition="false",namespace="kube-system",pod="kube-state- kube_pod_status_ready{condition="false",namespace="kube-system",pod="kubernetes-dashboard-vw0l6"} 0 kube_pod_status_ready{condition="false",namespace="kube-system",pod="tiller-deploy-3067024529-9lpmb"} 0 kube_pod_status_ready{condition="true",namespace="default",pod="jumpy-owl-redis-3481028193-s78x9"} 0 +kube_pod_status_ready{condition="true",namespace="test",pod="jumpy-owl-redis-3481028193-s78x9"} 1 kube_pod_status_ready{condition="true",namespace="jenkins",pod="wise-lynx-jenkins-1616735317-svn6k"} 1 kube_pod_status_ready{condition="true",namespace="kube-system",pod="kube-addon-manager-minikube"} 1 kube_pod_status_ready{condition="true",namespace="kube-system",pod="kube-dns-v20-5g5cb"} 1 @@ -300,6 +338,7 @@ kube_pod_status_ready{condition="true",namespace="kube-system",pod="kube-state-m kube_pod_status_ready{condition="true",namespace="kube-system",pod="kubernetes-dashboard-vw0l6"} 1 kube_pod_status_ready{condition="true",namespace="kube-system",pod="tiller-deploy-3067024529-9lpmb"} 1 kube_pod_status_ready{condition="unknown",namespace="default",pod="jumpy-owl-redis-3481028193-s78x9"} 0 +kube_pod_status_ready{condition="unknown",namespace="test",pod="jumpy-owl-redis-3481028193-s78x9"} 0 kube_pod_status_ready{condition="unknown",namespace="jenkins",pod="wise-lynx-jenkins-1616735317-svn6k"} 0 kube_pod_status_ready{condition="unknown",namespace="kube-system",pod="kube-addon-manager-minikube"} 0 kube_pod_status_ready{condition="unknown",namespace="kube-system",pod="kube-dns-v20-5g5cb"} 0 @@ -309,6 +348,7 @@ kube_pod_status_ready{condition="unknown",namespace="kube-system",pod="tiller-de # HELP kube_pod_status_scheduled Describes the status of the scheduling process for the pod. # TYPE kube_pod_status_scheduled gauge kube_pod_status_scheduled{condition="false",namespace="default",pod="jumpy-owl-redis-3481028193-s78x9"} 0 +kube_pod_status_scheduled{condition="false",namespace="test",pod="jumpy-owl-redis-3481028193-s78x9"} 1 kube_pod_status_scheduled{condition="false",namespace="jenkins",pod="wise-lynx-jenkins-1616735317-svn6k"} 0 kube_pod_status_scheduled{condition="false",namespace="kube-system",pod="kube-addon-manager-minikube"} 0 kube_pod_status_scheduled{condition="false",namespace="kube-system",pod="kube-dns-v20-5g5cb"} 0 @@ -337,36 +377,42 @@ kube_pod_status_scheduled{condition="unknown",namespace="kube-system",pod="tille kube_replicaset_metadata_generation{namespace="default",replicaset="jumpy-owl-redis-3481028193"} 1 kube_replicaset_metadata_generation{namespace="jenkins",replicaset="wise-lynx-jenkins-1616735317"} 1 kube_replicaset_metadata_generation{namespace="kube-system",replicaset="kube-state-metrics-1303537707"} 1 +kube_replicaset_metadata_generation{namespace="test",replicaset="kube-state-metrics-1303537707"} 1 kube_replicaset_metadata_generation{namespace="kube-system",replicaset="tiller-deploy-3067024529"} 1 # HELP kube_replicaset_spec_replicas Number of desired pods for a ReplicaSet. # TYPE kube_replicaset_spec_replicas gauge kube_replicaset_spec_replicas{namespace="default",replicaset="jumpy-owl-redis-3481028193"} 1 kube_replicaset_spec_replicas{namespace="jenkins",replicaset="wise-lynx-jenkins-1616735317"} 1 kube_replicaset_spec_replicas{namespace="kube-system",replicaset="kube-state-metrics-1303537707"} 2 +kube_replicaset_spec_replicas{namespace="test",replicaset="kube-state-metrics-1303537707"} 3 kube_replicaset_spec_replicas{namespace="kube-system",replicaset="tiller-deploy-3067024529"} 1 # HELP kube_replicaset_status_fully_labeled_replicas The number of fully labeled replicas per ReplicaSet. # TYPE kube_replicaset_status_fully_labeled_replicas gauge kube_replicaset_status_fully_labeled_replicas{namespace="default",replicaset="jumpy-owl-redis-3481028193"} 1 kube_replicaset_status_fully_labeled_replicas{namespace="jenkins",replicaset="wise-lynx-jenkins-1616735317"} 1 kube_replicaset_status_fully_labeled_replicas{namespace="kube-system",replicaset="kube-state-metrics-1303537707"} 2 +kube_replicaset_status_fully_labeled_replicas{namespace="test",replicaset="kube-state-metrics-1303537707"} 4 kube_replicaset_status_fully_labeled_replicas{namespace="kube-system",replicaset="tiller-deploy-3067024529"} 1 # HELP kube_replicaset_status_observed_generation The generation observed by the ReplicaSet controller. # TYPE kube_replicaset_status_observed_generation gauge kube_replicaset_status_observed_generation{namespace="default",replicaset="jumpy-owl-redis-3481028193"} 1 kube_replicaset_status_observed_generation{namespace="jenkins",replicaset="wise-lynx-jenkins-1616735317"} 1 kube_replicaset_status_observed_generation{namespace="kube-system",replicaset="kube-state-metrics-1303537707"} 1 +kube_replicaset_status_observed_generation{namespace="test",replicaset="kube-state-metrics-1303537707"} 5 kube_replicaset_status_observed_generation{namespace="kube-system",replicaset="tiller-deploy-3067024529"} 1 # HELP kube_replicaset_status_ready_replicas The number of ready replicas per ReplicaSet. # TYPE kube_replicaset_status_ready_replicas gauge kube_replicaset_status_ready_replicas{namespace="default",replicaset="jumpy-owl-redis-3481028193"} 0 kube_replicaset_status_ready_replicas{namespace="jenkins",replicaset="wise-lynx-jenkins-1616735317"} 1 kube_replicaset_status_ready_replicas{namespace="kube-system",replicaset="kube-state-metrics-1303537707"} 1 +kube_replicaset_status_ready_replicas{namespace="test",replicaset="kube-state-metrics-1303537707"} 6 kube_replicaset_status_ready_replicas{namespace="kube-system",replicaset="tiller-deploy-3067024529"} 1 # HELP kube_replicaset_status_replicas The number of replicas per ReplicaSet. # TYPE kube_replicaset_status_replicas gauge kube_replicaset_status_replicas{namespace="default",replicaset="jumpy-owl-redis-3481028193"} 1 kube_replicaset_status_replicas{namespace="jenkins",replicaset="wise-lynx-jenkins-1616735317"} 1 kube_replicaset_status_replicas{namespace="kube-system",replicaset="kube-state-metrics-1303537707"} 2 +kube_replicaset_status_replicas{namespace="test",replicaset="kube-state-metrics-1303537707"} 7 kube_replicaset_status_replicas{namespace="kube-system",replicaset="tiller-deploy-3067024529"} 1 # HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. # TYPE process_cpu_seconds_total counter @@ -386,3 +432,34 @@ process_start_time_seconds 1.4939719827e+09 # HELP process_virtual_memory_bytes Virtual memory size in bytes. # TYPE process_virtual_memory_bytes gauge process_virtual_memory_bytes 5.2932608e+07 +# HELP kube_statefulset_created Unix creation timestamp +# TYPE kube_statefulset_created gauge +kube_statefulset_created{namespace="default",statefulset="elasticsearch"} 1.511973651e+09 +kube_statefulset_created{namespace="default",statefulset="mysql"} 1.511989697e+09 +kube_statefulset_created{namespace="custom",statefulset="mysql"} 1.511999697e+09 +# HELP kube_statefulset_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_statefulset_labels gauge +kube_statefulset_labels{label_app="oci",label_io_kompose_service="elasticsearch",namespace="default",statefulset="elasticsearch"} 1 +kube_statefulset_labels{label_app="oci",label_custom_pod="true",label_io_kompose_service="s-mysql",namespace="default",statefulset="mysql"} 1 +kube_statefulset_labels{label_app="oci",label_custom_pod="true",label_io_kompose_service="s-mysql",namespace="custom",statefulset="mysql"} 1 +# HELP kube_statefulset_metadata_generation Sequence number representing a specific generation of the desired state for the StatefulSet. +# TYPE kube_statefulset_metadata_generation gauge +kube_statefulset_metadata_generation{namespace="default",statefulset="elasticsearch"} 3 +kube_statefulset_metadata_generation{namespace="default",statefulset="mysql"} 4 +kube_statefulset_metadata_generation{namespace="custom",statefulset="mysql"} 5 +# HELP kube_statefulset_replicas Number of desired pods for a StatefulSet. +# TYPE kube_statefulset_replicas gauge +kube_statefulset_replicas{namespace="default",statefulset="elasticsearch"} 4 +kube_statefulset_replicas{namespace="default",statefulset="mysql"} 5 +kube_statefulset_replicas{namespace="custom",statefulset="mysql"} 6 +# HELP kube_statefulset_status_observed_generation The generation observed by the StatefulSet controller. +# TYPE kube_statefulset_status_observed_generation gauge +kube_statefulset_status_observed_generation{namespace="default",statefulset="elasticsearch"} 1 +kube_statefulset_status_observed_generation{namespace="default",statefulset="mysql"} 2 +kube_statefulset_status_observed_generation{namespace="custom",statefulset="mysql"} 3 +# HELP kube_statefulset_status_replicas The number of replicas per StatefulSet. +# TYPE kube_statefulset_status_replicas gauge +kube_statefulset_status_replicas{namespace="default",statefulset="elasticsearch"} 1 +kube_statefulset_status_replicas{namespace="default",statefulset="mysql"} 2 +kube_statefulset_status_replicas{namespace="custom",statefulset="mysql"} 3 + diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/test/stats_summary.json b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/test/stats_summary.json index 17c72c64..dfb10513 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/test/stats_summary.json +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/test/stats_summary.json @@ -76,7 +76,7 @@ "startTime": "2017-04-18T16:47:44Z", "cpu": { "time": "2017-04-20T08:06:34Z", - "usageNanoCores": 0, + "usageNanoCores": 11263994, "usageCoreNanoSeconds": 43959424 }, "memory": { diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/container/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/container/_meta/fields.yml index 1b497211..a0795098 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/container/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/container/_meta/fields.yml @@ -27,6 +27,16 @@ type: long description: > CPU used nanocores + - name: node.pct + type: scaled_float + format: percentage + description: > + CPU usage as a percentage of the total node allocatable CPU + - name: limit.pct + type: scaled_float + format: percentage + description: > + CPU usage as a percentage of the defined limit for the container (or total node allocatable CPU if unlimited) - name: logs type: group description: > @@ -90,6 +100,16 @@ format: bytes description: > Total memory usage + - name: node.pct + type: scaled_float + format: percentage + description: > + Memory usage as a percentage of the total node allocatable memory + - name: limit.pct + type: scaled_float + format: percentage + description: > + Memory usage as a percentage of the defined limit for the container (or total node allocatable memory if unlimited) - name: rss type: group fields: diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/container/container.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/container/container.go index 655cae09..d85a32c3 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/container/container.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/container/container.go @@ -5,6 +5,7 @@ import ( "github.com/elastic/beats/metricbeat/helper" "github.com/elastic/beats/metricbeat/mb" "github.com/elastic/beats/metricbeat/mb/parse" + "github.com/elastic/beats/metricbeat/module/kubernetes/util" ) const ( @@ -22,9 +23,10 @@ var ( // init registers the MetricSet with the central registry. // The New method will be called after the setup of the module and before starting to fetch data func init() { - if err := mb.Registry.AddMetricSet("kubernetes", "container", New, hostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("kubernetes", "container", New, + mb.WithHostParser(hostParser), + mb.DefaultMetricSet(), + ) } // MetricSet type defines all fields of the MetricSet @@ -40,9 +42,13 @@ type MetricSet struct { // Part of new is also setting up the configuration by processing additional // configuration entries if needed. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + http, err := helper.NewHTTP(base) + if err != nil { + return nil, err + } return &MetricSet{ BaseMetricSet: base, - http: helper.NewHTTP(base), + http: http, }, nil } @@ -55,7 +61,7 @@ func (m *MetricSet) Fetch() ([]common.MapStr, error) { return nil, err } - events, err := eventMapping(body) + events, err := eventMapping(body, util.PerfMetrics) if err != nil { return nil, err } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/container/container_test.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/container/container_test.go index a200f8b3..025256bb 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/container/container_test.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/container/container_test.go @@ -10,6 +10,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/metricbeat/module/kubernetes/util" ) const testFile = "../_meta/test/stats_summary.json" @@ -21,14 +22,19 @@ func TestEventMapping(t *testing.T) { body, err := ioutil.ReadAll(f) assert.NoError(t, err, "cannot read test file "+testFile) - events, err := eventMapping(body) + cache := util.NewPerfMetricsCache() + cache.NodeCoresAllocatable.Set("gke-beats-default-pool-a5b33e2e-hdww", 2) + cache.NodeMemAllocatable.Set("gke-beats-default-pool-a5b33e2e-hdww", 146227200) + cache.ContainerMemLimit.Set(util.ContainerUID("default", "nginx-deployment-2303442956-pcqfc", "nginx"), 14622720) + + events, err := eventMapping(body, cache) assert.NoError(t, err, "error mapping "+testFile) assert.Len(t, events, 1, "got wrong number of events") testCases := map[string]interface{}{ "cpu.usage.core.ns": 43959424, - "cpu.usage.nanocores": 0, + "cpu.usage.nanocores": 11263994, "logs.available.bytes": 98727014400, "logs.capacity.bytes": 101258067968, @@ -44,6 +50,12 @@ func TestEventMapping(t *testing.T) { "memory.pagefaults": 841, "memory.majorpagefaults": 0, + // calculated pct fields: + "cpu.usage.node.pct": 0.005631997, + "cpu.usage.limit.pct": 0.005631997, + "memory.usage.node.pct": 0.01, + "memory.usage.limit.pct": 0.1, + "name": "nginx", "rootfs.available.bytes": 98727014400, diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/container/data.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/container/data.go index 375dc6f9..d38bca96 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/container/data.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/container/data.go @@ -7,9 +7,10 @@ import ( "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/metricbeat/mb" "github.com/elastic/beats/metricbeat/module/kubernetes" + "github.com/elastic/beats/metricbeat/module/kubernetes/util" ) -func eventMapping(content []byte) ([]common.MapStr, error) { +func eventMapping(content []byte, perfMetrics *util.PerfMetricsCache) ([]common.MapStr, error) { events := []common.MapStr{} var summary kubernetes.Summary @@ -19,6 +20,8 @@ func eventMapping(content []byte) ([]common.MapStr, error) { } node := summary.Node + nodeCores := perfMetrics.NodeCoresAllocatable.Get(node.NodeName) + nodeMem := perfMetrics.NodeMemAllocatable.Get(node.NodeName) for _, pod := range summary.Pods { for _, container := range pod.Containers { containerEvent := common.MapStr{ @@ -93,6 +96,27 @@ func eventMapping(content []byte) ([]common.MapStr, error) { }, }, } + + if nodeCores > 0 { + containerEvent.Put("cpu.usage.node.pct", float64(container.CPU.UsageNanoCores)/1e9/nodeCores) + } + + if nodeMem > 0 { + containerEvent.Put("memory.usage.node.pct", float64(container.Memory.UsageBytes)/nodeMem) + } + + cuid := util.ContainerUID(pod.PodRef.Namespace, pod.PodRef.Name, container.Name) + coresLimit := perfMetrics.ContainerCoresLimit.GetWithDefault(cuid, nodeCores) + memLimit := perfMetrics.ContainerMemLimit.GetWithDefault(cuid, nodeMem) + + if coresLimit > 0 { + containerEvent.Put("cpu.usage.limit.pct", float64(container.CPU.UsageNanoCores)/1e9/coresLimit) + } + + if memLimit > 0 { + containerEvent.Put("memory.usage.limit.pct", float64(container.Memory.UsageBytes)/memLimit) + } + events = append(events, containerEvent) } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/event/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/event/_meta/fields.yml index 5f1da37e..82f09cfd 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/event/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/event/_meta/fields.yml @@ -3,7 +3,7 @@ description: > The Kubernetes events metricset collects events that are generated by objects running inside of Kubernetes - release: experimental + release: beta fields: - name: count type: long diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/event/config.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/event/config.go index 5c65f2e2..9599be0f 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/event/config.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/event/config.go @@ -16,7 +16,7 @@ type Enabled struct { Enabled bool `config:"enabled"` } -func defaultKuberentesEventsConfig() kubeEventsConfig { +func defaultKubernetesEventsConfig() kubeEventsConfig { return kubeEventsConfig{ InCluster: true, SyncPeriod: 1 * time.Second, diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/event/event.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/event/event.go index 50d64e39..551e5bb9 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/event/event.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/event/event.go @@ -2,14 +2,13 @@ package event import ( "fmt" - "io/ioutil" + "time" "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/common/cfgwarn" + "github.com/elastic/beats/libbeat/common/kubernetes" + "github.com/elastic/beats/libbeat/common/safemapstr" "github.com/elastic/beats/metricbeat/mb" - - "github.com/ericchiang/k8s" - "github.com/ghodss/yaml" ) // init registers the MetricSet with the central registry. @@ -25,46 +24,34 @@ func init() { // MetricSet implements the mb.PushMetricSet interface, and therefore does not rely on polling. type MetricSet struct { mb.BaseMetricSet - watcher *Watcher + watcher kubernetes.Watcher } // New create a new instance of the MetricSet // Part of new is also setting up the configuration by processing additional // configuration entries if needed. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { - cfgwarn.Experimental("The kubernetes event metricset is experimental") + cfgwarn.Beta("The kubernetes event metricset is beta") - config := defaultKuberentesEventsConfig() + config := defaultKubernetesEventsConfig() err := base.Module().UnpackConfig(&config) if err != nil { return nil, fmt.Errorf("fail to unpack the kubernetes event configuration: %s", err) } - var client *k8s.Client - if config.InCluster == true { - client, err = k8s.NewInClusterClient() - if err != nil { - return nil, fmt.Errorf("Unable to get in cluster configuration") - } - } else { - data, err := ioutil.ReadFile(config.KubeConfig) - if err != nil { - return nil, fmt.Errorf("read kubeconfig: %v", err) - } - - // Unmarshal YAML into a Kubernetes config object. - var config k8s.Config - if err = yaml.Unmarshal(data, &config); err != nil { - return nil, fmt.Errorf("unmarshal kubeconfig: %v", err) - } - client, err = k8s.NewClient(&config) - if err != nil { - return nil, err - } + client, err := kubernetes.GetKubernetesClient(config.InCluster, config.KubeConfig) + if err != nil { + return nil, fmt.Errorf("fail to get kubernetes client: %s", err.Error()) } - watcher := NewWatcher(client, config.SyncPeriod, config.Namespace) + watcher, err := kubernetes.NewWatcher(client, &kubernetes.Event{}, kubernetes.WatchOptions{ + SyncTimeout: config.SyncPeriod, + Namespace: config.Namespace, + }) + if err != nil { + return nil, fmt.Errorf("fail to init kubernetes watcher: %s", err.Error()) + } return &MetricSet{ BaseMetricSet: base, @@ -74,24 +61,36 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { // Run method provides the Kubernetes event watcher with a reporter with which events can be reported. func (m *MetricSet) Run(reporter mb.PushReporter) { - // Start event watcher - m.watcher.Run() - - for { - select { - case <-reporter.Done(): - m.watcher.Stop() - return - case msg := <-m.watcher.eventQueue: - // Ignore events that are deleted - if msg.Metadata.DeletionTimestamp == nil { - reporter.Event(generateMapStrFromEvent(msg)) - } - } + now := time.Now() + handler := kubernetes.ResourceEventHandlerFuncs{ + AddFunc: func(obj kubernetes.Resource) { + reporter.Event(generateMapStrFromEvent(obj.(*kubernetes.Event))) + }, + UpdateFunc: func(obj kubernetes.Resource) { + reporter.Event(generateMapStrFromEvent(obj.(*kubernetes.Event))) + }, + // ignore events that are deleted + DeleteFunc: nil, } + m.watcher.AddEventHandler(kubernetes.FilteringResourceEventHandler{ + // skip events happened before watch + FilterFunc: func(obj kubernetes.Resource) bool { + eve := obj.(*kubernetes.Event) + if eve.LastTimestamp.Before(now) { + return false + } + return true + }, + Handler: handler, + }) + // start event watcher + m.watcher.Start() + <-reporter.Done() + m.watcher.Stop() + return } -func generateMapStrFromEvent(eve *Event) common.MapStr { +func generateMapStrFromEvent(eve *kubernetes.Event) common.MapStr { eventMeta := common.MapStr{ "timestamp": common.MapStr{ "created": eve.Metadata.CreationTimestamp, @@ -107,7 +106,7 @@ func generateMapStrFromEvent(eve *Event) common.MapStr { if len(eve.Metadata.Labels) != 0 { labels := make(common.MapStr, len(eve.Metadata.Labels)) for k, v := range eve.Metadata.Labels { - labels[k] = v + safemapstr.Put(labels, k, v) } eventMeta["labels"] = labels @@ -116,7 +115,7 @@ func generateMapStrFromEvent(eve *Event) common.MapStr { if len(eve.Metadata.Annotations) != 0 { annotations := make(common.MapStr, len(eve.Metadata.Annotations)) for k, v := range eve.Metadata.Annotations { - annotations[k] = v + safemapstr.Put(annotations, k, v) } eventMeta["annotations"] = annotations diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/event/types.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/event/types.go deleted file mode 100644 index 2abb51bf..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/event/types.go +++ /dev/null @@ -1,46 +0,0 @@ -package event - -import "time" - -type ObjectMeta struct { - Annotations map[string]string `json:"annotations"` - CreationTimestamp *time.Time `json:"creationTimestamp"` - DeletionTimestamp *time.Time `json:"deletionTimestamp"` - GenerateName string `json:"generateName"` - Labels map[string]string `json:"labels"` - Name string `json:"name"` - Namespace string `json:"namespace"` - OwnerReferences []struct { - APIVersion string `json:"apiVersion"` - Controller bool `json:"controller"` - Kind string `json:"kind"` - Name string `json:"name"` - UID string `json:"uid"` - } `json:"ownerReferences"` - ResourceVersion string `json:"resourceVersion"` - SelfLink string `json:"selfLink"` - UID string `json:"uid"` -} - -type Event struct { - APIVersion string `json:"apiVersion"` - Count int64 `json:"count"` - FirstTimestamp *time.Time `json:"firstTimestamp"` - InvolvedObject struct { - APIVersion string `json:"apiVersion"` - Kind string `json:"kind"` - Name string `json:"name"` - ResourceVersion string `json:"resourceVersion"` - UID string `json:"uid"` - } `json:"involvedObject"` - Kind string `json:"kind"` - LastTimestamp *time.Time `json:"lastTimestamp"` - Message string `json:"message"` - Metadata ObjectMeta `json:"metadata"` - Reason string `json:"reason"` - Source struct { - Component string `json:"component"` - Host string `json:"host"` - } `json:"source"` - Type string `json:"type"` -} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/event/watcher.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/event/watcher.go deleted file mode 100644 index 3ebe7d7d..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/event/watcher.go +++ /dev/null @@ -1,118 +0,0 @@ -package event - -import ( - "context" - "encoding/json" - "time" - - "github.com/elastic/beats/libbeat/logp" - - "github.com/ericchiang/k8s" - corev1 "github.com/ericchiang/k8s/api/v1" -) - -// Watcher is a controller that synchronizes Pods. -type Watcher struct { - kubeClient *k8s.Client - namespace string - syncPeriod time.Duration - eventQueue chan *Event - lastResourceVersion string - ctx context.Context - stop context.CancelFunc -} - -// NewWatcher initializes the watcher client to provide a local state of -// pods from the cluster (filtered to the given host) -func NewWatcher(kubeClient *k8s.Client, syncPeriod time.Duration, namespace string) *Watcher { - ctx, cancel := context.WithCancel(context.Background()) - return &Watcher{ - kubeClient: kubeClient, - namespace: namespace, - syncPeriod: syncPeriod, - eventQueue: make(chan *Event, 10), - lastResourceVersion: "0", - ctx: ctx, - stop: cancel, - } -} - -// watchEvents watches on the Kubernetes API server and puts them onto a channel. -// watchEvents only starts from the most recent event. -func (w *Watcher) watchEvents() { - for { - //To avoid writing old events, list events to get last resource version - events, err := w.kubeClient.CoreV1().ListEvents( - w.ctx, - w.namespace, - ) - - if err != nil { - //if listing fails try again after sometime - logp.Err("kubernetes: List API error %v", err) - // Sleep for a second to prevent API server from being bombarded - // API server could be down - time.Sleep(time.Second) - continue - } - - w.lastResourceVersion = events.Metadata.GetResourceVersion() - - logp.Info("kubernetes: %s", "Watching API for events") - watcher, err := w.kubeClient.CoreV1().WatchEvents( - w.ctx, - w.namespace, - k8s.ResourceVersion(w.lastResourceVersion), - ) - if err != nil { - //watch events failures should be logged and gracefully failed over as metadata retrieval - //should never stop. - logp.Err("kubernetes: Watching API eror %v", err) - // Sleep for a second to prevent API server from being bombarded - // API server could be down - time.Sleep(time.Second) - continue - } - - for { - _, eve, err := watcher.Next() - if err != nil { - logp.Err("kubernetes: Watching API error %v", err) - break - } - - event := w.getEventMeta(eve) - if event != nil { - w.eventQueue <- event - } - - } - } -} - -func (w *Watcher) Run() { - // Start watching on events - go w.watchEvents() -} - -func (w *Watcher) getEventMeta(pod *corev1.Event) *Event { - bytes, err := json.Marshal(pod) - if err != nil { - logp.Warn("Unable to marshal %v", pod.String()) - return nil - } - - eve := &Event{} - err = json.Unmarshal(bytes, eve) - if err != nil { - logp.Warn("Unable to marshal %v", pod.String()) - return nil - } - - return eve -} - -func (w *Watcher) Stop() { - w.stop() - close(w.eventQueue) -} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/node/node.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/node/node.go index 17c3d7af..c5f5809b 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/node/node.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/node/node.go @@ -22,9 +22,10 @@ var ( // init registers the MetricSet with the central registry. // The New method will be called after the setup of the module and before starting to fetch data func init() { - if err := mb.Registry.AddMetricSet("kubernetes", "node", New, hostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("kubernetes", "node", New, + mb.WithHostParser(hostParser), + mb.DefaultMetricSet(), + ) } // MetricSet type defines all fields of the MetricSet @@ -40,9 +41,13 @@ type MetricSet struct { // Part of new is also setting up the configuration by processing additional // configuration entries if needed. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + http, err := helper.NewHTTP(base) + if err != nil { + return nil, err + } return &MetricSet{ BaseMetricSet: base, - http: helper.NewHTTP(base), + http: http, }, nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/pod/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/pod/_meta/fields.yml index d5f24bf9..fc2b4d62 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/pod/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/pod/_meta/fields.yml @@ -35,3 +35,46 @@ type: long description: > Tx errors + - name: cpu + type: group + description: > + CPU usage metrics + fields: + - name: usage + type: group + fields: + - name: nanocores + type: long + description: > + CPU used nanocores + - name: node.pct + type: scaled_float + format: percentage + description: > + CPU usage as a percentage of the total node CPU + - name: limit.pct + type: scaled_float + format: percentage + description: > + CPU usage as a percentage of the defined limit for the pod containers (or total node CPU if unlimited) + - name: memory + type: group + fields: + - name: usage + type: group + fields: + - name: bytes + type: long + format: bytes + description: > + Total memory usage + - name: node.pct + type: scaled_float + format: percentage + description: > + Memory usage as a percentage of the total node allocatable memory + - name: limit.pct + type: scaled_float + format: percentage + description: > + Memory usage as a percentage of the defined limit for the pod containers (or total node allocatable memory if unlimited) diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/pod/data.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/pod/data.go index d85a1d93..0a8b7683 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/pod/data.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/pod/data.go @@ -7,9 +7,10 @@ import ( "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/metricbeat/mb" "github.com/elastic/beats/metricbeat/module/kubernetes" + "github.com/elastic/beats/metricbeat/module/kubernetes/util" ) -func eventMapping(content []byte) ([]common.MapStr, error) { +func eventMapping(content []byte, perfMetrics *util.PerfMetricsCache) ([]common.MapStr, error) { events := []common.MapStr{} var summary kubernetes.Summary @@ -19,8 +20,20 @@ func eventMapping(content []byte) ([]common.MapStr, error) { } node := summary.Node - + nodeCores := perfMetrics.NodeCoresAllocatable.Get(node.NodeName) + nodeMem := perfMetrics.NodeMemAllocatable.Get(node.NodeName) for _, pod := range summary.Pods { + var usageNanoCores, usageMem int64 + var coresLimit, memLimit float64 + + for _, cont := range pod.Containers { + cuid := util.ContainerUID(pod.PodRef.Namespace, pod.PodRef.Name, cont.Name) + usageNanoCores += cont.CPU.UsageNanoCores + usageMem += cont.Memory.UsageBytes + coresLimit += perfMetrics.ContainerCoresLimit.GetWithDefault(cuid, nodeCores) + memLimit += perfMetrics.ContainerMemLimit.GetWithDefault(cuid, nodeMem) + } + podEvent := common.MapStr{ mb.ModuleDataKey: common.MapStr{ "namespace": pod.PodRef.Namespace, @@ -31,6 +44,18 @@ func eventMapping(content []byte) ([]common.MapStr, error) { "name": pod.PodRef.Name, "start_time": pod.StartTime, + "cpu": common.MapStr{ + "usage": common.MapStr{ + "nanocores": usageNanoCores, + }, + }, + + "memory": common.MapStr{ + "usage": common.MapStr{ + "bytes": usageMem, + }, + }, + "network": common.MapStr{ "rx": common.MapStr{ "bytes": pod.Network.RxBytes, @@ -42,6 +67,31 @@ func eventMapping(content []byte) ([]common.MapStr, error) { }, }, } + + if coresLimit > nodeCores { + coresLimit = nodeCores + } + + if memLimit > nodeMem { + memLimit = nodeMem + } + + if nodeCores > 0 { + podEvent.Put("cpu.usage.node.pct", float64(usageNanoCores)/1e9/nodeCores) + } + + if nodeMem > 0 { + podEvent.Put("memory.usage.node.pct", float64(usageMem)/nodeMem) + } + + if coresLimit > 0 { + podEvent.Put("cpu.usage.limit.pct", float64(usageNanoCores)/1e9/coresLimit) + } + + if memLimit > 0 { + podEvent.Put("memory.usage.limit.pct", float64(usageMem)/memLimit) + } + events = append(events, podEvent) } return events, nil diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/pod/pod.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/pod/pod.go index ebab685f..84be743c 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/pod/pod.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/pod/pod.go @@ -5,6 +5,7 @@ import ( "github.com/elastic/beats/metricbeat/helper" "github.com/elastic/beats/metricbeat/mb" "github.com/elastic/beats/metricbeat/mb/parse" + "github.com/elastic/beats/metricbeat/module/kubernetes/util" ) const ( @@ -22,9 +23,10 @@ var ( // init registers the MetricSet with the central registry. // The New method will be called after the setup of the module and before starting to fetch data func init() { - if err := mb.Registry.AddMetricSet("kubernetes", "pod", New, hostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("kubernetes", "pod", New, + mb.WithHostParser(hostParser), + mb.DefaultMetricSet(), + ) } // MetricSet type defines all fields of the MetricSet @@ -40,9 +42,13 @@ type MetricSet struct { // Part of new is also setting up the configuration by processing additional // configuration entries if needed. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + http, err := helper.NewHTTP(base) + if err != nil { + return nil, err + } return &MetricSet{ BaseMetricSet: base, - http: helper.NewHTTP(base), + http: http, }, nil } @@ -55,7 +61,7 @@ func (m *MetricSet) Fetch() ([]common.MapStr, error) { return nil, err } - events, err := eventMapping(body) + events, err := eventMapping(body, util.PerfMetrics) if err != nil { return nil, err } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/pod/pod_test.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/pod/pod_test.go index d0ed5ffb..e527c64a 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/pod/pod_test.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/pod/pod_test.go @@ -10,6 +10,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/metricbeat/module/kubernetes/util" ) const testFile = "../_meta/test/stats_summary.json" @@ -21,7 +22,12 @@ func TestEventMapping(t *testing.T) { body, err := ioutil.ReadAll(f) assert.NoError(t, err, "cannot read test file "+testFile) - events, err := eventMapping(body) + cache := util.NewPerfMetricsCache() + cache.NodeCoresAllocatable.Set("gke-beats-default-pool-a5b33e2e-hdww", 2) + cache.NodeMemAllocatable.Set("gke-beats-default-pool-a5b33e2e-hdww", 146227200) + cache.ContainerMemLimit.Set(util.ContainerUID("default", "nginx-deployment-2303442956-pcqfc", "nginx"), 14622720) + + events, err := eventMapping(body, cache) assert.NoError(t, err, "error mapping "+testFile) assert.Len(t, events, 1, "got wrong number of events") @@ -33,6 +39,15 @@ func TestEventMapping(t *testing.T) { "network.rx.errors": 0, "network.tx.bytes": 72447, "network.tx.errors": 0, + + // calculated pct fields: + "cpu.usage.nanocores": 11263994, + "cpu.usage.node.pct": 0.005631997, + "cpu.usage.limit.pct": 0.005631997, + + "memory.usage.bytes": 1462272, + "memory.usage.node.pct": 0.01, + "memory.usage.limit.pct": 0.1, } for k, v := range testCases { diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_container/data.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_container/data.go index efc22b28..d3b8b35d 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_container/data.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_container/data.go @@ -21,11 +21,16 @@ func eventMapping(families []*dto.MetricFamily) ([]common.MapStr, error) { if container == "" { continue } - event, ok := eventsMap[container] + + namespace := util.GetLabel(metric, "namespace") + pod := util.GetLabel(metric, "pod") + containerKey := namespace + "::" + pod + "::" + container + event, ok := eventsMap[containerKey] if !ok { event = common.MapStr{} - eventsMap[container] = event + eventsMap[containerKey] = event } + switch family.GetName() { case "kube_pod_container_info": event.Put(mb.ModuleDataKey+".pod.name", util.GetLabel(metric, "pod")) @@ -39,6 +44,8 @@ func eventMapping(families []*dto.MetricFamily) ([]common.MapStr, error) { case "kube_pod_container_resource_limits_cpu_cores": event.Put(mb.ModuleDataKey+".node.name", util.GetLabel(metric, "node")) event.Put("cpu.limit.nanocores", metric.GetGauge().GetValue()*nanocores) + cuid := util.ContainerUID(util.GetLabel(metric, "namespace"), util.GetLabel(metric, "pod"), util.GetLabel(metric, "container")) + util.PerfMetrics.ContainerCoresLimit.Set(cuid, metric.GetGauge().GetValue()) case "kube_pod_container_resource_requests_cpu_cores": event.Put(mb.ModuleDataKey+".node.name", util.GetLabel(metric, "node")) @@ -47,6 +54,8 @@ func eventMapping(families []*dto.MetricFamily) ([]common.MapStr, error) { case "kube_pod_container_resource_limits_memory_bytes": event.Put(mb.ModuleDataKey+".node.name", util.GetLabel(metric, "node")) event.Put("memory.limit.bytes", metric.GetGauge().GetValue()) + cuid := util.ContainerUID(util.GetLabel(metric, "namespace"), util.GetLabel(metric, "pod"), util.GetLabel(metric, "container")) + util.PerfMetrics.ContainerMemLimit.Set(cuid, metric.GetGauge().GetValue()) case "kube_pod_container_resource_requests_memory_bytes": event.Put(mb.ModuleDataKey+".node.name", util.GetLabel(metric, "node")) @@ -80,7 +89,8 @@ func eventMapping(families []*dto.MetricFamily) ([]common.MapStr, error) { } } - var events []common.MapStr + // initialize, populate events array from values in eventsMap + events := make([]common.MapStr, 0, len(eventsMap)) for _, event := range eventsMap { events = append(events, event) } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_container/state_container.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_container/state_container.go index 29cfb87a..9ad7a845 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_container/state_container.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_container/state_container.go @@ -40,9 +40,13 @@ type MetricSet struct { // Part of new is also setting up the configuration by processing additional // configuration entries if needed. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + prometheus, err := helper.NewPrometheusClient(base) + if err != nil { + return nil, err + } return &MetricSet{ BaseMetricSet: base, - prometheus: helper.NewPrometheusClient(base), + prometheus: prometheus, }, nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_container/state_container_test.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_container/state_container_test.go index 6adb69bc..4de73cc0 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_container/state_container_test.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_container/state_container_test.go @@ -44,38 +44,96 @@ func TestEventMapping(t *testing.T) { events, err := f.Fetch() assert.NoError(t, err) - assert.Equal(t, 9, len(events), "Wrong number of returned events") - - testCases := map[string]interface{}{ - "_module.namespace": "kube-system", - "_module.node.name": "minikube", - "_module.pod.name": "kube-dns-v20-5g5cb", - - "image": "gcr.io/google_containers/exechealthz-amd64:1.2", - "status.phase": "running", - "status.ready": true, - "status.restarts": 2, - - "memory.limit.bytes": 52428800, - "memory.request.bytes": 52428800, - "cpu.request.nanocores": 10000000, - } + assert.Equal(t, 12, len(events), "Wrong number of returned events") + testCases := testCases() for _, event := range events { name, err := event.GetValue("name") - if err == nil && name == "healthz" { - for k, v := range testCases { - testValue(t, event, k, v) + if err == nil { + namespace, err := event.GetValue("_module.namespace") + if err != nil { + continue + } + pod, err := event.GetValue("_module.pod.name") + if err != nil { + continue + } + eventKey := namespace.(string) + "@" + pod.(string) + "@" + name.(string) + oneTestCase, oneTestCaseFound := testCases[eventKey] + if oneTestCaseFound { + for k, v := range oneTestCase { + testValue(eventKey, t, event, k, v) + } + delete(testCases, eventKey) } - return } } - t.Error("Test reference event not found") + if len(testCases) > 0 { + t.Errorf("Test reference events not found: %v, \n\ngot: %v", testCases, events) + } } -func testValue(t *testing.T, event common.MapStr, field string, expected interface{}) { +func testValue(eventKey string, t *testing.T, event common.MapStr, field string, expected interface{}) { data, err := event.GetValue(field) - assert.NoError(t, err, "Could not read field "+field) - assert.EqualValues(t, expected, data, "Wrong value for field "+field) + assert.NoError(t, err, eventKey+": Could not read field "+field) + assert.EqualValues(t, expected, data, eventKey+": Wrong value for field "+field) +} + +// Test cases built to match 3 examples in 'module/kubernetes/_meta/test/kube-state-metrics'. +// In particular, test same named containers in different namespaces +func testCases() map[string]map[string]interface{} { + return map[string]map[string]interface{}{ + "kube-system@kube-dns-v20-5g5cb@kubedns": { + "_namespace": "container", + "_module.namespace": "kube-system", + "_module.node.name": "minikube", + "_module.pod.name": "kube-dns-v20-5g5cb", + "name": "kubedns", + "id": "docker://fa3d83f648de42492b38fa3e8501d109376f391c50f2bd210c895c8477ae4b62", + + "image": "gcr.io/google_containers/kubedns-amd64:1.9", + "status.phase": "running", + "status.ready": true, + "status.restarts": 2, + + "memory.limit.bytes": 178257920, + "memory.request.bytes": 73400320, + "cpu.request.nanocores": float64(1e+08), + }, + "test@kube-dns-v20-5g5cb-test@kubedns": { + "_namespace": "container", + "_module.namespace": "test", + "_module.node.name": "minikube-test", + "_module.pod.name": "kube-dns-v20-5g5cb-test", + "name": "kubedns", + "id": "docker://fa3d83f648de42492b38fa3e8501d109376f391c50f2bd210c895c8477ae4b62-test", + + "image": "gcr.io/google_containers/kubedns-amd64:1.9-test", + "status.phase": "terminate", + "status.ready": false, + "status.restarts": 3, + + "memory.limit.bytes": 278257920, + "memory.request.bytes": 83400320, + "cpu.request.nanocores": float64(2e+08), + }, + "kube-system@kube-dns-v20-5g5cb@healthz": { + "_namespace": "container", + "_module.namespace": "kube-system", + "_module.node.name": "minikube", + "_module.pod.name": "kube-dns-v20-5g5cb", + "name": "healthz", + "id": "docker://52fa55e051dc5b68e44c027588685b7edd85aaa03b07f7216d399249ff4fc821", + + "image": "gcr.io/google_containers/exechealthz-amd64:1.2", + "status.phase": "running", + "status.ready": true, + "status.restarts": 2, + + "memory.limit.bytes": 52428800, + "memory.request.bytes": 52428800, + "cpu.request.nanocores": float64(1e+07), + }, + } } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_deployment/data.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_deployment/data.go index 9634de2d..a9b21660 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_deployment/data.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_deployment/data.go @@ -21,11 +21,14 @@ func eventMapping(families []*dto.MetricFamily) ([]common.MapStr, error) { if deployment == "" { continue } - event, ok := eventsMap[deployment] + namespace := util.GetLabel(metric, "namespace") + deploymentKey := namespace + "::" + deployment + event, ok := eventsMap[deploymentKey] if !ok { event = common.MapStr{} - eventsMap[deployment] = event + eventsMap[deploymentKey] = event } + switch family.GetName() { case "kube_deployment_metadata_generation": event.Put(mb.ModuleDataKey+".namespace", util.GetLabel(metric, "namespace")) @@ -54,7 +57,8 @@ func eventMapping(families []*dto.MetricFamily) ([]common.MapStr, error) { } } - var events []common.MapStr + // initialize, populate events array from values in eventsMap + events := make([]common.MapStr, 0, len(eventsMap)) for _, event := range eventsMap { events = append(events, event) } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_deployment/state_deployment.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_deployment/state_deployment.go index 3c507045..f4a51dd9 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_deployment/state_deployment.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_deployment/state_deployment.go @@ -40,9 +40,13 @@ type MetricSet struct { // Part of new is also setting up the configuration by processing additional // configuration entries if needed. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + prometheus, err := helper.NewPrometheusClient(base) + if err != nil { + return nil, err + } return &MetricSet{ BaseMetricSet: base, - prometheus: helper.NewPrometheusClient(base), + prometheus: prometheus, }, nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_deployment/state_deployment_test.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_deployment/state_deployment_test.go index 69f2cd73..3a2761a3 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_deployment/state_deployment_test.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_deployment/state_deployment_test.go @@ -44,35 +44,76 @@ func TestEventMapping(t *testing.T) { events, err := f.Fetch() assert.NoError(t, err) - assert.Equal(t, 4, len(events), "Wrong number of returned events") - - testCases := map[string]interface{}{ - "_module.namespace": "default", - - "name": "jumpy-owl-redis", - "paused": false, - - "replicas.available": 0, - "replicas.desired": 1, - "replicas.unavailable": 1, - "replicas.updated": 1, - } + assert.Equal(t, 5, len(events), "Wrong number of returned events") + testCases := testCases() for _, event := range events { name, err := event.GetValue("name") - if err == nil && name == "jumpy-owl-redis" { - for k, v := range testCases { - testValue(t, event, k, v) + if err == nil { + namespace, err := event.GetValue("_module.namespace") + if err == nil { + eventKey := namespace.(string) + "@" + name.(string) + oneTestCase, oneTestCaseFound := testCases[eventKey] + if oneTestCaseFound { + for k, v := range oneTestCase { + testValue(eventKey, t, event, k, v) + } + delete(testCases, eventKey) + } } - return } } - t.Error("Test reference event not found") + if len(testCases) > 0 { + t.Errorf("Test reference events not found: %v, \n\ngot: %v", testCases, events) + } } -func testValue(t *testing.T, event common.MapStr, field string, expected interface{}) { +func testValue(eventKey string, t *testing.T, event common.MapStr, field string, expected interface{}) { data, err := event.GetValue(field) - assert.NoError(t, err, "Could not read field "+field) - assert.EqualValues(t, expected, data, "Wrong value for field "+field) + assert.NoError(t, err, eventKey+": Could not read field "+field) + assert.EqualValues(t, expected, data, eventKey+": Wrong value for field "+field) +} + +// Test cases built to match 3 examples in 'module/kubernetes/_meta/test/kube-state-metrics'. +// In particular, test same named deployments in different namespaces +func testCases() map[string]map[string]interface{} { + return map[string]map[string]interface{}{ + "default@jumpy-owl-redis": { + "_namespace": "deployment", + "_module.namespace": "default", + + "name": "jumpy-owl-redis", + "paused": false, + + "replicas.available": 0, + "replicas.desired": 1, + "replicas.unavailable": 1, + "replicas.updated": 1, + }, + "test@jumpy-owl-redis": { + "_namespace": "deployment", + "_module.namespace": "test", + + "name": "jumpy-owl-redis", + "paused": true, + + "replicas.available": 6, + "replicas.desired": 2, + "replicas.unavailable": 7, + "replicas.updated": 8, + }, + "kube-system@tiller-deploy": { + "_namespace": "deployment", + "_module.namespace": "kube-system", + + "name": "tiller-deploy", + "paused": false, + + "replicas.available": 1, + "replicas.desired": 1, + "replicas.unavailable": 0, + "replicas.updated": 1, + }, + } } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_node/data.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_node/data.go index 138a5625..ce336e29 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_node/data.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_node/data.go @@ -28,12 +28,14 @@ func eventMapping(families []*dto.MetricFamily) ([]common.MapStr, error) { case "kube_node_status_allocatable_cpu_cores": event.Put("cpu.allocatable.cores", metric.GetGauge().GetValue()) + util.PerfMetrics.NodeCoresAllocatable.Set(util.GetLabel(metric, "node"), metric.GetGauge().GetValue()) case "kube_node_status_capacity_cpu_cores": event.Put("cpu.capacity.cores", metric.GetGauge().GetValue()) case "kube_node_status_allocatable_memory_bytes": event.Put("memory.allocatable.bytes", metric.GetGauge().GetValue()) + util.PerfMetrics.NodeMemAllocatable.Set(util.GetLabel(metric, "node"), metric.GetGauge().GetValue()) case "kube_node_status_capacity_memory_bytes": event.Put("memory.capacity.bytes", metric.GetGauge().GetValue()) diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_node/state_node.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_node/state_node.go index 302fe592..52f05aa7 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_node/state_node.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_node/state_node.go @@ -40,9 +40,13 @@ type MetricSet struct { // Part of new is also setting up the configuration by processing additional // configuration entries if needed. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + prometheus, err := helper.NewPrometheusClient(base) + if err != nil { + return nil, err + } return &MetricSet{ BaseMetricSet: base, - prometheus: helper.NewPrometheusClient(base), + prometheus: prometheus, }, nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_node/state_node_test.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_node/state_node_test.go index 321b0566..ed282bbc 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_node/state_node_test.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_node/state_node_test.go @@ -44,32 +44,67 @@ func TestEventMapping(t *testing.T) { events, err := f.Fetch() assert.NoError(t, err) - assert.Equal(t, 1, len(events), "Wrong number of returned events") + assert.Equal(t, 2, len(events), "Wrong number of returned events") + + testCases := testCases() + for _, event := range events { + name, err := event.GetValue("name") + if err == nil { + eventKey := name.(string) + oneTestCase, oneTestCaseFound := testCases[eventKey] + if oneTestCaseFound { + for k, v := range oneTestCase { + testValue(eventKey, t, event, k, v) + } + delete(testCases, eventKey) + } + } + } - testCases := map[string]interface{}{ - "_namespace": "node", - "name": "minikube", + if len(testCases) > 0 { + t.Errorf("Test reference events not found: %v, \n\ngot: %v", testCases, events) + } +} - "status.ready": "true", - "status.unschedulable": false, +func testValue(eventKey string, t *testing.T, event common.MapStr, field string, expected interface{}) { + data, err := event.GetValue(field) + assert.NoError(t, err, eventKey+": Could not read field "+field) + assert.EqualValues(t, expected, data, eventKey+": Wrong value for field "+field) +} - "cpu.allocatable.cores": 2, - "cpu.capacity.cores": 2, +func testCases() map[string]map[string]interface{} { + return map[string]map[string]interface{}{ + "minikube": { + "_namespace": "node", + "name": "minikube", - "memory.allocatable.bytes": 2097786880, - "memory.capacity.bytes": 2097786880, + "status.ready": "true", + "status.unschedulable": false, - "pod.allocatable.total": 110, - "pod.capacity.total": 110, - } + "cpu.allocatable.cores": 2, + "cpu.capacity.cores": 2, - for k, v := range testCases { - testValue(t, events[0], k, v) - } -} + "memory.allocatable.bytes": 2097786880, + "memory.capacity.bytes": 2097786880, -func testValue(t *testing.T, event common.MapStr, field string, expected interface{}) { - data, err := event.GetValue(field) - assert.NoError(t, err, "Could not read field "+field) - assert.EqualValues(t, expected, data, "Wrong value for field "+field) + "pod.allocatable.total": 110, + "pod.capacity.total": 110, + }, + "minikube-test": { + "_namespace": "node", + "name": "minikube-test", + + "status.ready": "true", + "status.unschedulable": true, + + "cpu.allocatable.cores": 3, + "cpu.capacity.cores": 4, + + "memory.allocatable.bytes": 3097786880, + "memory.capacity.bytes": 4097786880, + + "pod.allocatable.total": 210, + "pod.capacity.total": 310, + }, + } } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_pod/data.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_pod/data.go index 6bd76937..7a2790c5 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_pod/data.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_pod/data.go @@ -18,11 +18,14 @@ func eventMapping(families []*dto.MetricFamily) ([]common.MapStr, error) { if pod == "" { continue } - event, ok := eventsMap[pod] + namespace := util.GetLabel(metric, "namespace") + podKey := namespace + "::" + pod + event, ok := eventsMap[podKey] if !ok { event = common.MapStr{} - eventsMap[pod] = event + eventsMap[podKey] = event } + switch family.GetName() { case "kube_pod_info": event.Put(mb.ModuleDataKey+".node.name", util.GetLabel(metric, "node")) @@ -62,7 +65,8 @@ func eventMapping(families []*dto.MetricFamily) ([]common.MapStr, error) { } } - var events []common.MapStr + // initialize, populate events array from values in eventsMap + events := make([]common.MapStr, 0, len(eventsMap)) for _, event := range eventsMap { events = append(events, event) } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_pod/state_pod.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_pod/state_pod.go index d830bd42..0fbddda4 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_pod/state_pod.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_pod/state_pod.go @@ -40,9 +40,13 @@ type MetricSet struct { // Part of new is also setting up the configuration by processing additional // configuration entries if needed. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + prometheus, err := helper.NewPrometheusClient(base) + if err != nil { + return nil, err + } return &MetricSet{ BaseMetricSet: base, - prometheus: helper.NewPrometheusClient(base), + prometheus: prometheus, }, nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_pod/state_pod_test.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_pod/state_pod_test.go index 7035a491..39fa60fc 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_pod/state_pod_test.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_pod/state_pod_test.go @@ -44,36 +44,79 @@ func TestEventMapping(t *testing.T) { events, err := f.Fetch() assert.NoError(t, err) - assert.Equal(t, 8, len(events), "Wrong number of returned events") - - testCases := map[string]interface{}{ - "_module.namespace": "default", - "_module.node.name": "minikube", - "name": "jumpy-owl-redis-3481028193-s78x9", - - "host_ip": "192.168.99.100", - "ip": "172.17.0.4", - - "status.phase": "succeeded", - "status.ready": "false", - "status.scheduled": "true", - } + assert.Equal(t, 11, len(events), "Wrong number of returned events") + testCases := testCases() for _, event := range events { name, err := event.GetValue("name") - if err == nil && name == "jumpy-owl-redis-3481028193-s78x9" { - for k, v := range testCases { - testValue(t, event, k, v) + if err == nil { + namespace, err := event.GetValue("_module.namespace") + if err == nil { + eventKey := namespace.(string) + "@" + name.(string) + oneTestCase, oneTestCaseFound := testCases[eventKey] + if oneTestCaseFound { + for k, v := range oneTestCase { + testValue(eventKey, t, event, k, v) + } + delete(testCases, eventKey) + } } - return } } - t.Error("Test reference event not found") + if len(testCases) > 0 { + t.Errorf("Test reference events not found: %v\n\n got: %v", testCases, events) + } } -func testValue(t *testing.T, event common.MapStr, field string, expected interface{}) { +func testValue(eventKey string, t *testing.T, event common.MapStr, field string, expected interface{}) { data, err := event.GetValue(field) - assert.NoError(t, err, "Could not read field "+field) - assert.EqualValues(t, expected, data, "Wrong value for field "+field) + assert.NoError(t, err, eventKey+": Could not read field "+field) + assert.EqualValues(t, expected, data, eventKey+": Wrong value for field "+field) +} + +// Test cases built to match 3 examples in 'module/kubernetes/_meta/test/kube-state-metrics'. +// In particular, test same named pods in different namespaces +func testCases() map[string]map[string]interface{} { + return map[string]map[string]interface{}{ + "default@jumpy-owl-redis-3481028193-s78x9": { + "_namespace": "pod", + "_module.namespace": "default", + "_module.node.name": "minikube", + "name": "jumpy-owl-redis-3481028193-s78x9", + + "host_ip": "192.168.99.100", + "ip": "172.17.0.4", + + "status.phase": "succeeded", + "status.ready": "false", + "status.scheduled": "true", + }, + "test@jumpy-owl-redis-3481028193-s78x9": { + "_namespace": "pod", + "_module.namespace": "test", + "_module.node.name": "minikube-test", + "name": "jumpy-owl-redis-3481028193-s78x9", + + "host_ip": "192.168.99.200", + "ip": "172.17.0.5", + + "status.phase": "running", + "status.ready": "true", + "status.scheduled": "false", + }, + "jenkins@wise-lynx-jenkins-1616735317-svn6k": { + "_namespace": "pod", + "_module.namespace": "jenkins", + "_module.node.name": "minikube", + "name": "wise-lynx-jenkins-1616735317-svn6k", + + "host_ip": "192.168.99.100", + "ip": "172.17.0.7", + + "status.phase": "running", + "status.ready": "true", + "status.scheduled": "true", + }, + } } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_replicaset/data.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_replicaset/data.go index 16bddcd9..b4e82bcf 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_replicaset/data.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_replicaset/data.go @@ -16,11 +16,14 @@ func eventMapping(families []*dto.MetricFamily) ([]common.MapStr, error) { if replicaset == "" { continue } - event, ok := eventsMap[replicaset] + namespace := util.GetLabel(metric, "namespace") + replicasetKey := namespace + "::" + replicaset + event, ok := eventsMap[replicasetKey] if !ok { event = common.MapStr{} - eventsMap[replicaset] = event + eventsMap[replicasetKey] = event } + switch family.GetName() { case "kube_replicaset_metadata_generation": event.Put(mb.ModuleDataKey+".namespace", util.GetLabel(metric, "namespace")) @@ -50,7 +53,8 @@ func eventMapping(families []*dto.MetricFamily) ([]common.MapStr, error) { } } - var events []common.MapStr + // initialize, populate events array from values in eventsMap + events := make([]common.MapStr, 0, len(eventsMap)) for _, event := range eventsMap { events = append(events, event) } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_replicaset/state_replicaset.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_replicaset/state_replicaset.go index 12afb657..d9f663b0 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_replicaset/state_replicaset.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_replicaset/state_replicaset.go @@ -40,9 +40,13 @@ type MetricSet struct { // Part of new is also setting up the configuration by processing additional // configuration entries if needed. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + prometheus, err := helper.NewPrometheusClient(base) + if err != nil { + return nil, err + } return &MetricSet{ BaseMetricSet: base, - prometheus: helper.NewPrometheusClient(base), + prometheus: prometheus, }, nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_replicaset/state_replicaset_test.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_replicaset/state_replicaset_test.go index 802419e9..1cef891a 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_replicaset/state_replicaset_test.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_replicaset/state_replicaset_test.go @@ -44,34 +44,70 @@ func TestEventMapping(t *testing.T) { events, err := f.Fetch() assert.NoError(t, err) - assert.Equal(t, 4, len(events), "Wrong number of returned events") - - testCases := map[string]interface{}{ - "_module.namespace": "kube-system", - "name": "kube-state-metrics-1303537707", - - "replicas.labeled": 2, - "replicas.observed": 1, - "replicas.ready": 1, - "replicas.available": 2, - "replicas.desired": 2, - } + assert.Equal(t, 5, len(events), "Wrong number of returned events") + testCases := testCases() for _, event := range events { name, err := event.GetValue("name") - if err == nil && name == "kube-state-metrics-1303537707" { - for k, v := range testCases { - testValue(t, event, k, v) + if err == nil { + namespace, err := event.GetValue("_module.namespace") + if err == nil { + eventKey := namespace.(string) + "@" + name.(string) + oneTestCase, oneTestCaseFound := testCases[eventKey] + if oneTestCaseFound { + for k, v := range oneTestCase { + testValue(eventKey, t, event, k, v) + } + delete(testCases, eventKey) + } } - return } } - t.Error("Test reference event not found") + if len(testCases) > 0 { + t.Errorf("Test reference events not found: %v", testCases) + } } -func testValue(t *testing.T, event common.MapStr, field string, expected interface{}) { +func testValue(eventKey string, t *testing.T, event common.MapStr, field string, expected interface{}) { data, err := event.GetValue(field) - assert.NoError(t, err, "Could not read field "+field) - assert.EqualValues(t, expected, data, "Wrong value for field "+field) + assert.NoError(t, err, eventKey+": Could not read field "+field) + assert.EqualValues(t, expected, data, eventKey+": Wrong value for field "+field) +} + +// Test cases built to match 3 examples in 'module/kubernetes/_meta/test/kube-state-metrics'. +// In particular, test same named replica sets in different namespaces +func testCases() map[string]map[string]interface{} { + return map[string]map[string]interface{}{ + "kube-system@kube-state-metrics-1303537707": { + "_module.namespace": "kube-system", + "name": "kube-state-metrics-1303537707", + + "replicas.labeled": 2, + "replicas.observed": 1, + "replicas.ready": 1, + "replicas.available": 2, + "replicas.desired": 2, + }, + "test@kube-state-metrics-1303537707": { + "_module.namespace": "test", + "name": "kube-state-metrics-1303537707", + + "replicas.labeled": 4, + "replicas.observed": 5, + "replicas.ready": 6, + "replicas.available": 7, + "replicas.desired": 3, + }, + "kube-system@tiller-deploy-3067024529": { + "_module.namespace": "kube-system", + "name": "tiller-deploy-3067024529", + + "replicas.labeled": 1, + "replicas.observed": 1, + "replicas.ready": 1, + "replicas.available": 1, + "replicas.desired": 1, + }, + } } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_statefulset/_meta/data.json b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_statefulset/_meta/data.json new file mode 100644 index 00000000..4cba9b59 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_statefulset/_meta/data.json @@ -0,0 +1,30 @@ +{ + "@timestamp": "2017-05-10T16:46:37.821Z", + "beat": { + "hostname": "X1", + "name": "X1", + "version": "6.0.0-alpha1" + }, + "kubernetes": { + "namespace": "jenkins", + "statefulset": { + "name": "wise-lynx-jenkins-1616735317", + "created": 123454, + "replicas": { + "desired": 1, + "observed": 1, + }, + "generation": { + "desired": 1, + "observed": 1, + } + } + }, + "metricset": { + "host": "192.168.99.100:18080", + "module": "kubernetes", + "name": "state_statefulset", + "namespace": "statefulset", + "rtt": 6719 + } +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_statefulset/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_statefulset/_meta/docs.asciidoc new file mode 100644 index 00000000..d2f15f21 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_statefulset/_meta/docs.asciidoc @@ -0,0 +1 @@ +This is the `state_statefulset` metricset of the Kubernetes module. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_statefulset/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_statefulset/_meta/fields.yml new file mode 100644 index 00000000..c524aa07 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_statefulset/_meta/fields.yml @@ -0,0 +1,40 @@ +- name: statefulset + type: group + description: > + kubernetes stateful set metrics + release: ga + fields: + - name: name + type: keyword + description: > + Kubernetes stateful set name + - name: created + type: long + description: > + The creation timestamp (epoch) for StatefulSet + - name: replicas + type: group + description: > + Kubernetes stateful set replicas status + fields: + - name: observed + type: long + description: > + The number of observed replicas per StatefulSet + - name: desired + type: long + description: > + The number of desired replicas per StatefulSet + - name: generation + type: group + description: > + Kubernetes stateful set generation information + fields: + - name: observed + type: long + description: > + The observed generation per StatefulSet + - name: desired + type: long + description: > + The desired generation per StatefulSet diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_statefulset/data.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_statefulset/data.go new file mode 100644 index 00000000..aa0d3833 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_statefulset/data.go @@ -0,0 +1,53 @@ +package state_statefulset + +import ( + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/metricbeat/mb" + "github.com/elastic/beats/metricbeat/module/kubernetes/util" + + dto "github.com/prometheus/client_model/go" +) + +func eventMapping(families []*dto.MetricFamily) ([]common.MapStr, error) { + eventsMap := map[string]common.MapStr{} + for _, family := range families { + for _, metric := range family.GetMetric() { + statefulset := util.GetLabel(metric, "statefulset") + if statefulset == "" { + continue + } + namespace := util.GetLabel(metric, "namespace") + statefulsetKey := namespace + "::" + statefulset + event, ok := eventsMap[statefulsetKey] + if !ok { + event = common.MapStr{} + eventsMap[statefulsetKey] = event + } + switch family.GetName() { + case "kube_statefulset_metadata_generation": + event.Put(mb.ModuleDataKey+".namespace", util.GetLabel(metric, "namespace")) + event.Put(mb.NamespaceKey, "statefulset") + event.Put("name", util.GetLabel(metric, "statefulset")) + event.Put("generation.desired", metric.GetGauge().GetValue()) + case "kube_statefulset_status_observed_generation": + event.Put("generation.observed", metric.GetGauge().GetValue()) + case "kube_statefulset_created": + event.Put("created", metric.GetGauge().GetValue()) + case "kube_statefulset_replicas": + event.Put("replicas.desired", metric.GetGauge().GetValue()) + case "kube_statefulset_status_replicas": + event.Put("replicas.observed", metric.GetGauge().GetValue()) + default: + // Ignore unknown metric + continue + } + } + } + + // initialize, populate events array from values in eventsMap + events := make([]common.MapStr, 0, len(eventsMap)) + for _, event := range eventsMap { + events = append(events, event) + } + return events, nil +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_statefulset/state_statefulset.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_statefulset/state_statefulset.go new file mode 100644 index 00000000..bfeba9f1 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_statefulset/state_statefulset.go @@ -0,0 +1,63 @@ +package state_statefulset + +import ( + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/metricbeat/helper" + "github.com/elastic/beats/metricbeat/mb" + "github.com/elastic/beats/metricbeat/mb/parse" +) + +const ( + defaultScheme = "http" + defaultPath = "/metrics" +) + +var ( + hostParser = parse.URLHostParserBuilder{ + DefaultScheme: defaultScheme, + DefaultPath: defaultPath, + }.Build() +) + +// init registers the MetricSet with the central registry. +// The New method will be called after the setup of the module and before starting to fetch data +func init() { + if err := mb.Registry.AddMetricSet("kubernetes", "state_statefulset", New, hostParser); err != nil { + panic(err) + } +} + +// MetricSet type defines all fields of the MetricSet +// As a minimum it must inherit the mb.BaseMetricSet fields, but can be extended with +// additional entries. These variables can be used to persist data or configuration between +// multiple fetch calls. +type MetricSet struct { + mb.BaseMetricSet + prometheus *helper.Prometheus +} + +// New create a new instance of the MetricSet +// Part of new is also setting up the configuration by processing additional +// configuration entries if needed. +func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + prometheus, err := helper.NewPrometheusClient(base) + if err != nil { + return nil, err + } + return &MetricSet{ + BaseMetricSet: base, + prometheus: prometheus, + }, nil +} + +// Fetch methods implements the data gathering and data conversion to the right format +// It returns the event which is then forward to the output. In case of an error, a +// descriptive error must be returned. +func (m *MetricSet) Fetch() ([]common.MapStr, error) { + families, err := m.prometheus.GetFamilies() + if err != nil { + return nil, err + } + + return eventMapping(families) +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_statefulset/state_statefulset_test.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_statefulset/state_statefulset_test.go new file mode 100644 index 00000000..2e6dd8a8 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_statefulset/state_statefulset_test.go @@ -0,0 +1,111 @@ +// +build !integration + +package state_statefulset + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/libbeat/common" + mbtest "github.com/elastic/beats/metricbeat/mb/testing" +) + +const testFile = "../_meta/test/kube-state-metrics" + +func TestEventMapping(t *testing.T) { + file, err := os.Open(testFile) + assert.NoError(t, err, "cannot open test file "+testFile) + + body, err := ioutil.ReadAll(file) + assert.NoError(t, err, "cannot read test file "+testFile) + + server := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) + w.Header().Set("Content-Type", "text/plain; charset=ISO-8859-1") + w.Write([]byte(body)) + })) + + server.Start() + defer server.Close() + + config := map[string]interface{}{ + "module": "kubernetes", + "metricsets": []string{"state_statefulset"}, + "hosts": []string{server.URL}, + } + + f := mbtest.NewEventsFetcher(t, config) + + events, err := f.Fetch() + assert.NoError(t, err) + + assert.Equal(t, 3, len(events), "Wrong number of returned events") + + testCases := testCases() + for _, event := range events { + name, err := event.GetValue("name") + if err == nil { + namespace, err := event.GetValue("_module.namespace") + if err == nil { + eventKey := namespace.(string) + "@" + name.(string) + oneTestCase, oneTestCaseFound := testCases[eventKey] + if oneTestCaseFound { + for k, v := range oneTestCase { + testValue(t, event, k, v) + } + delete(testCases, eventKey) + } + } + } + } + + if len(testCases) > 0 { + t.Errorf("Test reference events not found: %v", testCases) + } +} + +func testValue(t *testing.T, event common.MapStr, field string, expected interface{}) { + data, err := event.GetValue(field) + assert.NoError(t, err, "Could not read field "+field) + assert.EqualValues(t, expected, data, "Wrong value for field "+field) +} + +func testCases() map[string]map[string]interface{} { + return map[string]map[string]interface{}{ + "default@elasticsearch": { + "_module.namespace": "default", + "name": "elasticsearch", + + "created": 1511973651, + "replicas.observed": 1, + "replicas.desired": 4, + "generation.observed": 1, + "generation.desired": 3, + }, + "default@mysql": { + "_module.namespace": "default", + "name": "mysql", + + "created": 1511989697, + "replicas.observed": 2, + "replicas.desired": 5, + "generation.observed": 2, + "generation.desired": 4, + }, + "custom@mysql": { + "_module.namespace": "custom", + "name": "mysql", + + "created": 1511999697, + "replicas.observed": 3, + "replicas.desired": 6, + "generation.observed": 3, + "generation.desired": 5, + }, + } +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/system/system.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/system/system.go index 2be38d0e..d63ea771 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/system/system.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/system/system.go @@ -22,9 +22,10 @@ var ( // init registers the MetricSet with the central registry. // The New method will be called after the setup of the module and before starting to fetch data func init() { - if err := mb.Registry.AddMetricSet("kubernetes", "system", New, hostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("kubernetes", "system", New, + mb.WithHostParser(hostParser), + mb.DefaultMetricSet(), + ) } // MetricSet type defines all fields of the MetricSet @@ -40,9 +41,13 @@ type MetricSet struct { // Part of new is also setting up the configuration by processing additional // configuration entries if needed. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + http, err := helper.NewHTTP(base) + if err != nil { + return nil, err + } return &MetricSet{ BaseMetricSet: base, - http: helper.NewHTTP(base), + http: http, }, nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/util/metrics_cache.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/util/metrics_cache.go new file mode 100644 index 00000000..481ef9e9 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/util/metrics_cache.go @@ -0,0 +1,105 @@ +package util + +import ( + "sync" + "time" +) + +// PerfMetrics stores known metrics from Kubernetes nodes and containers +var PerfMetrics = NewPerfMetricsCache() + +const defaultTimeout = 120 * time.Second + +var now = time.Now +var sleep = time.Sleep + +// NewPerfMetricsCache initializes and returns a new PerfMetricsCache +func NewPerfMetricsCache() *PerfMetricsCache { + return &PerfMetricsCache{ + NodeMemAllocatable: newValueMap(defaultTimeout), + NodeCoresAllocatable: newValueMap(defaultTimeout), + + ContainerMemLimit: newValueMap(defaultTimeout), + ContainerCoresLimit: newValueMap(defaultTimeout), + } +} + +// PerfMetricsCache stores known metrics from Kubernetes nodes and containers +type PerfMetricsCache struct { + mutex sync.RWMutex + NodeMemAllocatable *valueMap + NodeCoresAllocatable *valueMap + + ContainerMemLimit *valueMap + ContainerCoresLimit *valueMap +} + +func newValueMap(timeout time.Duration) *valueMap { + return &valueMap{ + values: map[string]value{}, + timeout: timeout, + } +} + +type valueMap struct { + sync.RWMutex + running bool + timeout time.Duration + values map[string]value +} + +type value struct { + value float64 + expires int64 +} + +// ContainerUID creates an unique ID for from namespace, pod name and container name +func ContainerUID(namespace, pod, container string) string { + return namespace + "-" + pod + "-" + container +} + +// Get value +func (m *valueMap) Get(name string) float64 { + m.RLock() + defer m.RUnlock() + return m.values[name].value +} + +// Get value +func (m *valueMap) GetWithDefault(name string, def float64) float64 { + m.RLock() + defer m.RUnlock() + val, ok := m.values[name] + if ok { + return val.value + } + return def +} + +// Set value +func (m *valueMap) Set(name string, val float64) { + m.Lock() + defer m.Unlock() + m.ensureCleanupWorker() + m.values[name] = value{val, now().Add(m.timeout).Unix()} +} + +func (m *valueMap) ensureCleanupWorker() { + if !m.running { + // Run worker to cleanup expired entries + m.running = true + go func() { + for { + sleep(m.timeout) + m.Lock() + now := now().Unix() + for name, val := range m.values { + if now > val.expires { + delete(m.values, name) + } + } + m.Unlock() + } + }() + } +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/util/metrics_cache_test.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/util/metrics_cache_test.go new file mode 100644 index 00000000..488af7b8 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/util/metrics_cache_test.go @@ -0,0 +1,69 @@ +package util + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestTimeout(t *testing.T) { + // Mocknotonic time: + fakeTimeCh := make(chan int64) + go func() { + fakeTime := time.Now().Unix() + for { + fakeTime++ + fakeTimeCh <- fakeTime + } + }() + + now = func() time.Time { + return time.Unix(<-fakeTimeCh, 0) + } + + // Blocking sleep: + sleepCh := make(chan struct{}) + sleep = func(time.Duration) { + <-sleepCh + } + + test := newValueMap(1 * time.Second) + + test.Set("foo", 3.14) + + // Let cleanup do its job + sleepCh <- struct{}{} + sleepCh <- struct{}{} + sleepCh <- struct{}{} + + // Check it expired + assert.Equal(t, 0.0, test.Get("foo")) +} + +func TestValueMap(t *testing.T) { + test := newValueMap(defaultTimeout) + + // no value + assert.Equal(t, 0.0, test.Get("foo")) + + // Set and test + test.Set("foo", 3.14) + assert.Equal(t, 3.14, test.Get("foo")) +} + +func TestGetWithDefault(t *testing.T) { + test := newValueMap(defaultTimeout) + + // Empty + default + assert.Equal(t, 0.0, test.Get("foo")) + assert.Equal(t, 3.14, test.GetWithDefault("foo", 3.14)) + + // Defined value + test.Set("foo", 38.2) + assert.Equal(t, 38.2, test.GetWithDefault("foo", 3.14)) +} + +func TestContainerUID(t *testing.T) { + assert.Equal(t, "a-b-c", ContainerUID("a", "b", "c")) +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/volume/volume.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/volume/volume.go index 42a3ca7b..1d0e1057 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/volume/volume.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/volume/volume.go @@ -22,9 +22,10 @@ var ( // init registers the MetricSet with the central registry. // The New method will be called after the setup of the module and before starting to fetch data func init() { - if err := mb.Registry.AddMetricSet("kubernetes", "volume", New, hostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("kubernetes", "volume", New, + mb.WithHostParser(hostParser), + mb.DefaultMetricSet(), + ) } // MetricSet type defines all fields of the MetricSet @@ -40,9 +41,13 @@ type MetricSet struct { // Part of new is also setting up the configuration by processing additional // configuration entries if needed. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + http, err := helper.NewHTTP(base) + if err != nil { + return nil, err + } return &MetricSet{ BaseMetricSet: base, - http: helper.NewHTTP(base), + http: http, }, nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kvm/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/kvm/_meta/config.yml new file mode 100644 index 00000000..1e459638 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kvm/_meta/config.yml @@ -0,0 +1,8 @@ +- module: kvm + metricsets: ["dommemstat"] + enabled: false + period: 10s + hosts: ["localhost"] + + # Timeout to connect to Libvirt server + #timeout: 1s diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kvm/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/kvm/_meta/docs.asciidoc new file mode 100644 index 00000000..b4d11cbf --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kvm/_meta/docs.asciidoc @@ -0,0 +1,4 @@ +== kvm module + +This is the kvm module. + diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kvm/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/kvm/_meta/fields.yml new file mode 100644 index 00000000..396dfdd9 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kvm/_meta/fields.yml @@ -0,0 +1,11 @@ +- key: kvm + title: "kvm" + description: > + experimental[] + + kvm module + fields: + - name: kvm + type: group + description: > + fields: diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kvm/doc.go b/vendor/github.com/elastic/beats/metricbeat/module/kvm/doc.go new file mode 100644 index 00000000..fb772efa --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kvm/doc.go @@ -0,0 +1,2 @@ +// Package kvm is a Metricbeat module that contains MetricSets. +package kvm diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kvm/dommemstat/_meta/data.json b/vendor/github.com/elastic/beats/metricbeat/module/kvm/dommemstat/_meta/data.json new file mode 100644 index 00000000..4779095e --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kvm/dommemstat/_meta/data.json @@ -0,0 +1,19 @@ +{ + "@timestamp":"2016-05-23T08:05:34.853Z", + "beat":{ + "hostname":"beathost", + "name":"beathost" + }, + "metricset":{ + "host":"localhost", + "module":"kvm", + "name":"dommemstat", + "rtt":44269 + }, + "kvm":{ + "dommemstat":{ + "example": "dommemstat" + } + }, + "type":"metricsets" +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kvm/dommemstat/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/kvm/dommemstat/_meta/docs.asciidoc new file mode 100644 index 00000000..9b34db19 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kvm/dommemstat/_meta/docs.asciidoc @@ -0,0 +1,3 @@ +=== kvm dommemstat MetricSet + +This is the dommemstat metricset of the module kvm. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kvm/dommemstat/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/kvm/dommemstat/_meta/fields.yml new file mode 100644 index 00000000..7281ea70 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kvm/dommemstat/_meta/fields.yml @@ -0,0 +1,26 @@ +- name: dommemstat + type: group + description: > + dommemstat + fields: + - name: stat + type: group + description: > + Memory stat + fields: + - name: name + type: keyword + description: > + Memory stat name + - name: value + type: long + description: > + Memory stat value + - name: id + type: long + description: > + Domain id + - name: name + type: keyword + description: > + Domain name diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kvm/dommemstat/dommemstat.go b/vendor/github.com/elastic/beats/metricbeat/module/kvm/dommemstat/dommemstat.go new file mode 100644 index 00000000..750ed0df --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kvm/dommemstat/dommemstat.go @@ -0,0 +1,158 @@ +package dommemstat + +import ( + "errors" + "net" + "net/url" + "time" + + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/common/cfgwarn" + "github.com/elastic/beats/metricbeat/mb" + + "github.com/digitalocean/go-libvirt" + "github.com/digitalocean/go-libvirt/libvirttest" +) + +const ( + // maximum number of memory stats to be collected + // limit is defined by REMOTE_DOMAIN_MEMORY_STATS_MAX + // based on https://github.com/libvirt/libvirt/blob/5bb07527c11a6123e044a5dfc48bdeccee144994/src/remote/remote_protocol.x#L136 + maximumStats = 11 + // flag VIR_DOMAIN_AFFECT_CURRENT passed to collect memory stats + // based on https://libvirt.org/html/libvirt-libvirt-domain.html#virDomainModificationImpact + flags = 0 +) + +// init registers the MetricSet with the central registry as soon as the program +// starts. The New function will be called later to instantiate an instance of +// the MetricSet for each host defined in the module's configuration. After the +// MetricSet has been created then Fetch will begin to be called periodically. +func init() { + mb.Registry.MustAddMetricSet("kvm", "dommemstat", New) +} + +// MetricSet holds any configuration or state information. It must implement +// the mb.MetricSet interface. And this is best achieved by embedding +// mb.BaseMetricSet because it implements all of the required mb.MetricSet +// interface methods except for Fetch. +type MetricSet struct { + mb.BaseMetricSet + Timeout time.Duration + HostURL *url.URL +} + +// New creates a new instance of the MetricSet. New is responsible for unpacking +// any MetricSet specific configuration options if there are any. +func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + cfgwarn.Experimental("The kvm dommemstat metricset is experimental.") + + u, err := url.Parse(base.HostData().URI) + if err != nil { + return nil, err + } + + return &MetricSet{ + BaseMetricSet: base, + Timeout: base.Module().Config().Timeout, + HostURL: u, + }, nil +} + +// Fetch methods implements the data gathering and data conversion to the right +// format. It publishes the event which is then forwarded to the output. In case +// of an error set the Error field of mb.Event or simply call report.Error(). +func (m *MetricSet) Fetch(report mb.ReporterV2) { + + var ( + c net.Conn + err error + ) + + u := m.HostURL + + if u.Scheme == "test" { + // when running tests, a mock Libvirt server is used + c = libvirttest.New() + } else { + address := u.Host + if u.Host == "" { + address = u.Path + } + + c, err = net.DialTimeout(u.Scheme, address, m.Timeout) + if err != nil { + report.Error(err) + } + } + + defer c.Close() + + l := libvirt.New(c) + if err := l.Connect(); err != nil { + report.Error(err) + } + + domains, err := l.Domains() + if err != nil { + report.Error(err) + } + + for _, d := range domains { + gotDomainMemoryStats, err := l.DomainMemoryStats(d, maximumStats, flags) + if err != nil { + report.Error(err) + } + + if len(gotDomainMemoryStats) == 0 { + report.Error(errors.New("no domain memory stats found")) + } + + for i := range gotDomainMemoryStats { + report.Event(mb.Event{ + MetricSetFields: common.MapStr{ + "id": d.ID, + "name": d.Name, + "stat": common.MapStr{ + "name": getDomainMemoryStatName(gotDomainMemoryStats[i].Tag), + "value": gotDomainMemoryStats[i].Val, + }, + }, + }) + } + } + + if err := l.Disconnect(); err != nil { + report.Error(errors.New("failed to disconnect")) + } +} + +func getDomainMemoryStatName(tag int32) string { + // this is based on https://github.com/digitalocean/go-libvirt/blob/59d541f19311883ad82708651353009fb207d8a9/const.gen.go#L718 + switch tag { + case 0: + return "swapin" + case 1: + return "swapout" + case 2: + return "majorfault" + case 3: + return "minorfault" + case 4: + return "unused" + case 5: + return "available" + case 6: + return "actualballon" + case 7: + return "rss" + case 8: + return "usable" + case 9: + return "lastupdate" + case 10: + return "nr" + default: + return "unidentified" + } +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kvm/dommemstat/dommemstat_test.go b/vendor/github.com/elastic/beats/metricbeat/module/kvm/dommemstat/dommemstat_test.go new file mode 100644 index 00000000..adddf57d --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kvm/dommemstat/dommemstat_test.go @@ -0,0 +1,60 @@ +package dommemstat + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + mbtest "github.com/elastic/beats/metricbeat/mb/testing" + + "github.com/digitalocean/go-libvirt/libvirttest" +) + +func TestFetchEventContents(t *testing.T) { + conn := libvirttest.New() + + f := mbtest.NewReportingMetricSetV2(t, getConfig(conn)) + + events, errs := mbtest.ReportingFetchV2(f) + if len(errs) > 0 { + t.Fatal(errs) + } + if len(events) == 0 { + t.Fatal("no events received") + } + + for _, e := range events { + if e.Error != nil { + t.Fatalf("received error: %+v", e.Error) + } + } + if len(events) == 0 { + t.Fatal("received no events") + } + + e := events[0] + + t.Logf("%s/%s event: %+v", f.Module().Name(), f.Name(), e) + + statName, err := e.MetricSetFields.GetValue("stat.name") + if err == nil { + assert.EqualValues(t, statName.(string), "actualballon") + } else { + t.Errorf("error while getting value from event: %v", err) + } + + statValue, err := e.MetricSetFields.GetValue("stat.value") + if err == nil { + assert.EqualValues(t, statValue, uint64(1048576)) + } else { + t.Errorf("error while getting value from event: %v", err) + } +} + +func getConfig(conn *libvirttest.MockLibvirt) map[string]interface{} { + return map[string]interface{}{ + "module": "kvm", + "metricsets": []string{"dommemstat"}, + "hosts": []string{"test://" + conn.RemoteAddr().String() + ":123"}, + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/davecgh/go-xdr/LICENSE similarity index 84% rename from vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/LICENSE rename to vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/davecgh/go-xdr/LICENSE index 2a7cfd2b..0cc3543c 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/LICENSE +++ b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/davecgh/go-xdr/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2012-2013 Dave Collins +Copyright (c) 2012-2014 Dave Collins Permission to use, copy, modify, and distribute this software for any purpose with or without fee is hereby granted, provided that the above @@ -10,4 +10,4 @@ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/davecgh/go-xdr/xdr2/decode.go b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/davecgh/go-xdr/xdr2/decode.go new file mode 100644 index 00000000..494dae64 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/davecgh/go-xdr/xdr2/decode.go @@ -0,0 +1,873 @@ +/* + * Copyright (c) 2012-2014 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package xdr + +import ( + "fmt" + "io" + "math" + "reflect" + "time" +) + +var ( + errMaxSlice = "data exceeds max slice limit" + errIODecode = "%s while decoding %d bytes" +) + +/* +Unmarshal parses XDR-encoded data into the value pointed to by v reading from +reader r and returning the total number of bytes read. An addressable pointer +must be provided since Unmarshal needs to both store the result of the decode as +well as obtain target type information. Unmarhsal traverses v recursively and +automatically indirects pointers through arbitrary depth, allocating them as +necessary, to decode the data into the underlying value pointed to. + +Unmarshal uses reflection to determine the type of the concrete value contained +by v and performs a mapping of underlying XDR types to Go types as follows: + + Go Type <- XDR Type + -------------------- + int8, int16, int32, int <- XDR Integer + uint8, uint16, uint32, uint <- XDR Unsigned Integer + int64 <- XDR Hyper Integer + uint64 <- XDR Unsigned Hyper Integer + bool <- XDR Boolean + float32 <- XDR Floating-Point + float64 <- XDR Double-Precision Floating-Point + string <- XDR String + byte <- XDR Integer + []byte <- XDR Variable-Length Opaque Data + [#]byte <- XDR Fixed-Length Opaque Data + [] <- XDR Variable-Length Array + [#] <- XDR Fixed-Length Array + struct <- XDR Structure + map <- XDR Variable-Length Array of two-element XDR Structures + time.Time <- XDR String encoded with RFC3339 nanosecond precision + +Notes and Limitations: + + * Automatic unmarshalling of variable and fixed-length arrays of uint8s + requires a special struct tag `xdropaque:"false"` since byte slices + and byte arrays are assumed to be opaque data and byte is a Go alias + for uint8 thus indistinguishable under reflection + * Cyclic data structures are not supported and will result in infinite + loops + +If any issues are encountered during the unmarshalling process, an +UnmarshalError is returned with a human readable description as well as +an ErrorCode value for further inspection from sophisticated callers. Some +potential issues are unsupported Go types, attempting to decode a value which is +too large to fit into a specified Go type, and exceeding max slice limitations. +*/ +func Unmarshal(r io.Reader, v interface{}) (int, error) { + d := Decoder{r: r} + return d.Decode(v) +} + +// UnmarshalLimited is identical to Unmarshal but it sets maxReadSize in order +// to cap reads. +func UnmarshalLimited(r io.Reader, v interface{}, maxSize uint) (int, error) { + d := Decoder{r: r, maxReadSize: maxSize} + return d.Decode(v) +} + +// A Decoder wraps an io.Reader that is expected to provide an XDR-encoded byte +// stream and provides several exposed methods to manually decode various XDR +// primitives without relying on reflection. The NewDecoder function can be +// used to get a new Decoder directly. +// +// Typically, Unmarshal should be used instead of manual decoding. A Decoder +// is exposed so it is possible to perform manual decoding should it be +// necessary in complex scenarios where automatic reflection-based decoding +// won't work. +type Decoder struct { + r io.Reader + + // maxReadSize is the default maximum bytes an element can contain. 0 + // is unlimited and provides backwards compatability. Setting it to a + // non-zero value caps reads. + maxReadSize uint +} + +// DecodeInt treats the next 4 bytes as an XDR encoded integer and returns the +// result as an int32 along with the number of bytes actually read. +// +// An UnmarshalError is returned if there are insufficient bytes remaining. +// +// Reference: +// RFC Section 4.1 - Integer +// 32-bit big-endian signed integer in range [-2147483648, 2147483647] +func (d *Decoder) DecodeInt() (int32, int, error) { + var buf [4]byte + n, err := io.ReadFull(d.r, buf[:]) + if err != nil { + msg := fmt.Sprintf(errIODecode, err.Error(), 4) + err := unmarshalError("DecodeInt", ErrIO, msg, buf[:n], err) + return 0, n, err + } + + rv := int32(buf[3]) | int32(buf[2])<<8 | + int32(buf[1])<<16 | int32(buf[0])<<24 + return rv, n, nil +} + +// DecodeUint treats the next 4 bytes as an XDR encoded unsigned integer and +// returns the result as a uint32 along with the number of bytes actually read. +// +// An UnmarshalError is returned if there are insufficient bytes remaining. +// +// Reference: +// RFC Section 4.2 - Unsigned Integer +// 32-bit big-endian unsigned integer in range [0, 4294967295] +func (d *Decoder) DecodeUint() (uint32, int, error) { + var buf [4]byte + n, err := io.ReadFull(d.r, buf[:]) + if err != nil { + msg := fmt.Sprintf(errIODecode, err.Error(), 4) + err := unmarshalError("DecodeUint", ErrIO, msg, buf[:n], err) + return 0, n, err + } + + rv := uint32(buf[3]) | uint32(buf[2])<<8 | + uint32(buf[1])<<16 | uint32(buf[0])<<24 + return rv, n, nil +} + +// DecodeEnum treats the next 4 bytes as an XDR encoded enumeration value and +// returns the result as an int32 after verifying that the value is in the +// provided map of valid values. It also returns the number of bytes actually +// read. +// +// An UnmarshalError is returned if there are insufficient bytes remaining or +// the parsed enumeration value is not one of the provided valid values. +// +// Reference: +// RFC Section 4.3 - Enumeration +// Represented as an XDR encoded signed integer +func (d *Decoder) DecodeEnum(validEnums map[int32]bool) (int32, int, error) { + val, n, err := d.DecodeInt() + if err != nil { + return 0, n, err + } + + if !validEnums[val] { + err := unmarshalError("DecodeEnum", ErrBadEnumValue, + "invalid enum", val, nil) + return 0, n, err + } + return val, n, nil +} + +// DecodeBool treats the next 4 bytes as an XDR encoded boolean value and +// returns the result as a bool along with the number of bytes actually read. +// +// An UnmarshalError is returned if there are insufficient bytes remaining or +// the parsed value is not a 0 or 1. +// +// Reference: +// RFC Section 4.4 - Boolean +// Represented as an XDR encoded enumeration where 0 is false and 1 is true +func (d *Decoder) DecodeBool() (bool, int, error) { + val, n, err := d.DecodeInt() + if err != nil { + return false, n, err + } + switch val { + case 0: + return false, n, nil + case 1: + return true, n, nil + } + + err = unmarshalError("DecodeBool", ErrBadEnumValue, "bool not 0 or 1", + val, nil) + return false, n, err +} + +// DecodeHyper treats the next 8 bytes as an XDR encoded hyper value and +// returns the result as an int64 along with the number of bytes actually read. +// +// An UnmarshalError is returned if there are insufficient bytes remaining. +// +// Reference: +// RFC Section 4.5 - Hyper Integer +// 64-bit big-endian signed integer in range [-9223372036854775808, 9223372036854775807] +func (d *Decoder) DecodeHyper() (int64, int, error) { + var buf [8]byte + n, err := io.ReadFull(d.r, buf[:]) + if err != nil { + msg := fmt.Sprintf(errIODecode, err.Error(), 8) + err := unmarshalError("DecodeHyper", ErrIO, msg, buf[:n], err) + return 0, n, err + } + + rv := int64(buf[7]) | int64(buf[6])<<8 | + int64(buf[5])<<16 | int64(buf[4])<<24 | + int64(buf[3])<<32 | int64(buf[2])<<40 | + int64(buf[1])<<48 | int64(buf[0])<<56 + return rv, n, err +} + +// DecodeUhyper treats the next 8 bytes as an XDR encoded unsigned hyper value +// and returns the result as a uint64 along with the number of bytes actually +// read. +// +// An UnmarshalError is returned if there are insufficient bytes remaining. +// +// Reference: +// RFC Section 4.5 - Unsigned Hyper Integer +// 64-bit big-endian unsigned integer in range [0, 18446744073709551615] +func (d *Decoder) DecodeUhyper() (uint64, int, error) { + var buf [8]byte + n, err := io.ReadFull(d.r, buf[:]) + if err != nil { + msg := fmt.Sprintf(errIODecode, err.Error(), 8) + err := unmarshalError("DecodeUhyper", ErrIO, msg, buf[:n], err) + return 0, n, err + } + + rv := uint64(buf[7]) | uint64(buf[6])<<8 | + uint64(buf[5])<<16 | uint64(buf[4])<<24 | + uint64(buf[3])<<32 | uint64(buf[2])<<40 | + uint64(buf[1])<<48 | uint64(buf[0])<<56 + return rv, n, nil +} + +// DecodeFloat treats the next 4 bytes as an XDR encoded floating point and +// returns the result as a float32 along with the number of bytes actually read. +// +// An UnmarshalError is returned if there are insufficient bytes remaining. +// +// Reference: +// RFC Section 4.6 - Floating Point +// 32-bit single-precision IEEE 754 floating point +func (d *Decoder) DecodeFloat() (float32, int, error) { + var buf [4]byte + n, err := io.ReadFull(d.r, buf[:]) + if err != nil { + msg := fmt.Sprintf(errIODecode, err.Error(), 4) + err := unmarshalError("DecodeFloat", ErrIO, msg, buf[:n], err) + return 0, n, err + } + + val := uint32(buf[3]) | uint32(buf[2])<<8 | + uint32(buf[1])<<16 | uint32(buf[0])<<24 + return math.Float32frombits(val), n, nil +} + +// DecodeDouble treats the next 8 bytes as an XDR encoded double-precision +// floating point and returns the result as a float64 along with the number of +// bytes actually read. +// +// An UnmarshalError is returned if there are insufficient bytes remaining. +// +// Reference: +// RFC Section 4.7 - Double-Precision Floating Point +// 64-bit double-precision IEEE 754 floating point +func (d *Decoder) DecodeDouble() (float64, int, error) { + var buf [8]byte + n, err := io.ReadFull(d.r, buf[:]) + if err != nil { + msg := fmt.Sprintf(errIODecode, err.Error(), 8) + err := unmarshalError("DecodeDouble", ErrIO, msg, buf[:n], err) + return 0, n, err + } + + val := uint64(buf[7]) | uint64(buf[6])<<8 | + uint64(buf[5])<<16 | uint64(buf[4])<<24 | + uint64(buf[3])<<32 | uint64(buf[2])<<40 | + uint64(buf[1])<<48 | uint64(buf[0])<<56 + return math.Float64frombits(val), n, nil +} + +// RFC Section 4.8 - Quadruple-Precision Floating Point +// 128-bit quadruple-precision floating point +// Not Implemented + +// DecodeFixedOpaque treats the next 'size' bytes as XDR encoded opaque data and +// returns the result as a byte slice along with the number of bytes actually +// read. +// +// An UnmarshalError is returned if there are insufficient bytes remaining to +// satisfy the passed size, including the necessary padding to make it a +// multiple of 4. +// +// Reference: +// RFC Section 4.9 - Fixed-Length Opaque Data +// Fixed-length uninterpreted data zero-padded to a multiple of four +func (d *Decoder) DecodeFixedOpaque(size int32) ([]byte, int, error) { + // Nothing to do if size is 0. + if size == 0 { + return nil, 0, nil + } + + pad := (4 - (size % 4)) % 4 + paddedSize := size + pad + if uint(paddedSize) > uint(math.MaxInt32) { + err := unmarshalError("DecodeFixedOpaque", ErrOverflow, + errMaxSlice, paddedSize, nil) + return nil, 0, err + } + + buf := make([]byte, paddedSize) + n, err := io.ReadFull(d.r, buf) + if err != nil { + msg := fmt.Sprintf(errIODecode, err.Error(), paddedSize) + err := unmarshalError("DecodeFixedOpaque", ErrIO, msg, buf[:n], + err) + return nil, n, err + } + return buf[0:size], n, nil +} + +// DecodeOpaque treats the next bytes as variable length XDR encoded opaque +// data and returns the result as a byte slice along with the number of bytes +// actually read. +// +// An UnmarshalError is returned if there are insufficient bytes remaining or +// the opaque data is larger than the max length of a Go slice. +// +// Reference: +// RFC Section 4.10 - Variable-Length Opaque Data +// Unsigned integer length followed by fixed opaque data of that length +func (d *Decoder) DecodeOpaque() ([]byte, int, error) { + dataLen, n, err := d.DecodeUint() + if err != nil { + return nil, n, err + } + if uint(dataLen) > uint(math.MaxInt32) || + (d.maxReadSize != 0 && uint(dataLen) > d.maxReadSize) { + err := unmarshalError("DecodeOpaque", ErrOverflow, errMaxSlice, + dataLen, nil) + return nil, n, err + } + + rv, n2, err := d.DecodeFixedOpaque(int32(dataLen)) + n += n2 + if err != nil { + return nil, n, err + } + return rv, n, nil +} + +// DecodeString treats the next bytes as a variable length XDR encoded string +// and returns the result as a string along with the number of bytes actually +// read. Character encoding is assumed to be UTF-8 and therefore ASCII +// compatible. If the underlying character encoding is not compatibile with +// this assumption, the data can instead be read as variable-length opaque data +// (DecodeOpaque) and manually converted as needed. +// +// An UnmarshalError is returned if there are insufficient bytes remaining or +// the string data is larger than the max length of a Go slice. +// +// Reference: +// RFC Section 4.11 - String +// Unsigned integer length followed by bytes zero-padded to a multiple of +// four +func (d *Decoder) DecodeString() (string, int, error) { + dataLen, n, err := d.DecodeUint() + if err != nil { + return "", n, err + } + if uint(dataLen) > uint(math.MaxInt32) || + (d.maxReadSize != 0 && uint(dataLen) > d.maxReadSize) { + err = unmarshalError("DecodeString", ErrOverflow, errMaxSlice, + dataLen, nil) + return "", n, err + } + + opaque, n2, err := d.DecodeFixedOpaque(int32(dataLen)) + n += n2 + if err != nil { + return "", n, err + } + return string(opaque), n, nil +} + +// decodeFixedArray treats the next bytes as a series of XDR encoded elements +// of the same type as the array represented by the reflection value and decodes +// each element into the passed array. The ignoreOpaque flag controls whether +// or not uint8 (byte) elements should be decoded individually or as a fixed +// sequence of opaque data. It returns the the number of bytes actually read. +// +// An UnmarshalError is returned if any issues are encountered while decoding +// the array elements. +// +// Reference: +// RFC Section 4.12 - Fixed-Length Array +// Individually XDR encoded array elements +func (d *Decoder) decodeFixedArray(v reflect.Value, ignoreOpaque bool) (int, error) { + // Treat [#]byte (byte is alias for uint8) as opaque data unless + // ignored. + if !ignoreOpaque && v.Type().Elem().Kind() == reflect.Uint8 { + data, n, err := d.DecodeFixedOpaque(int32(v.Len())) + if err != nil { + return n, err + } + reflect.Copy(v, reflect.ValueOf(data)) + return n, nil + } + + // Decode each array element. + var n int + for i := 0; i < v.Len(); i++ { + n2, err := d.decode(v.Index(i)) + n += n2 + if err != nil { + return n, err + } + } + return n, nil +} + +// decodeArray treats the next bytes as a variable length series of XDR encoded +// elements of the same type as the array represented by the reflection value. +// The number of elements is obtained by first decoding the unsigned integer +// element count. Then each element is decoded into the passed array. The +// ignoreOpaque flag controls whether or not uint8 (byte) elements should be +// decoded individually or as a variable sequence of opaque data. It returns +// the number of bytes actually read. +// +// An UnmarshalError is returned if any issues are encountered while decoding +// the array elements. +// +// Reference: +// RFC Section 4.13 - Variable-Length Array +// Unsigned integer length followed by individually XDR encoded array +// elements +func (d *Decoder) decodeArray(v reflect.Value, ignoreOpaque bool) (int, error) { + dataLen, n, err := d.DecodeUint() + if err != nil { + return n, err + } + if uint(dataLen) > uint(math.MaxInt32) || + (d.maxReadSize != 0 && uint(dataLen) > d.maxReadSize) { + err := unmarshalError("decodeArray", ErrOverflow, errMaxSlice, + dataLen, nil) + return n, err + } + + // Allocate storage for the slice elements (the underlying array) if + // existing slice does not have enough capacity. + sliceLen := int(dataLen) + if v.Cap() < sliceLen { + v.Set(reflect.MakeSlice(v.Type(), sliceLen, sliceLen)) + } + if v.Len() < sliceLen { + v.SetLen(sliceLen) + } + + // Treat []byte (byte is alias for uint8) as opaque data unless ignored. + if !ignoreOpaque && v.Type().Elem().Kind() == reflect.Uint8 { + data, n2, err := d.DecodeFixedOpaque(int32(sliceLen)) + n += n2 + if err != nil { + return n, err + } + v.SetBytes(data) + return n, nil + } + + // Decode each slice element. + for i := 0; i < sliceLen; i++ { + n2, err := d.decode(v.Index(i)) + n += n2 + if err != nil { + return n, err + } + } + return n, nil +} + +// decodeStruct treats the next bytes as a series of XDR encoded elements +// of the same type as the exported fields of the struct represented by the +// passed reflection value. Pointers are automatically indirected and +// allocated as necessary. It returns the the number of bytes actually read. +// +// An UnmarshalError is returned if any issues are encountered while decoding +// the elements. +// +// Reference: +// RFC Section 4.14 - Structure +// XDR encoded elements in the order of their declaration in the struct +func (d *Decoder) decodeStruct(v reflect.Value) (int, error) { + var n int + vt := v.Type() + for i := 0; i < v.NumField(); i++ { + // Skip unexported fields. + vtf := vt.Field(i) + if vtf.PkgPath != "" { + continue + } + + // Indirect through pointers allocating them as needed and + // ensure the field is settable. + vf := v.Field(i) + vf, err := d.indirect(vf) + if err != nil { + return n, err + } + if !vf.CanSet() { + msg := fmt.Sprintf("can't decode to unsettable '%v'", + vf.Type().String()) + err := unmarshalError("decodeStruct", ErrNotSettable, + msg, nil, nil) + return n, err + } + + // Handle non-opaque data to []uint8 and [#]uint8 based on + // struct tag. + tag := vtf.Tag.Get("xdropaque") + if tag == "false" { + switch vf.Kind() { + case reflect.Slice: + n2, err := d.decodeArray(vf, true) + n += n2 + if err != nil { + return n, err + } + continue + + case reflect.Array: + n2, err := d.decodeFixedArray(vf, true) + n += n2 + if err != nil { + return n, err + } + continue + } + } + + // Decode each struct field. + n2, err := d.decode(vf) + n += n2 + if err != nil { + return n, err + } + } + + return n, nil +} + +// RFC Section 4.15 - Discriminated Union +// RFC Section 4.16 - Void +// RFC Section 4.17 - Constant +// RFC Section 4.18 - Typedef +// RFC Section 4.19 - Optional data +// RFC Sections 4.15 though 4.19 only apply to the data specification language +// which is not implemented by this package. In the case of discriminated +// unions, struct tags are used to perform a similar function. + +// decodeMap treats the next bytes as an XDR encoded variable array of 2-element +// structures whose fields are of the same type as the map keys and elements +// represented by the passed reflection value. Pointers are automatically +// indirected and allocated as necessary. It returns the the number of bytes +// actually read. +// +// An UnmarshalError is returned if any issues are encountered while decoding +// the elements. +func (d *Decoder) decodeMap(v reflect.Value) (int, error) { + dataLen, n, err := d.DecodeUint() + if err != nil { + return n, err + } + + // Allocate storage for the underlying map if needed. + vt := v.Type() + if v.IsNil() { + v.Set(reflect.MakeMap(vt)) + } + + // Decode each key and value according to their type. + keyType := vt.Key() + elemType := vt.Elem() + for i := uint32(0); i < dataLen; i++ { + key := reflect.New(keyType).Elem() + n2, err := d.decode(key) + n += n2 + if err != nil { + return n, err + } + + val := reflect.New(elemType).Elem() + n2, err = d.decode(val) + n += n2 + if err != nil { + return n, err + } + v.SetMapIndex(key, val) + } + return n, nil +} + +// decodeInterface examines the interface represented by the passed reflection +// value to detect whether it is an interface that can be decoded into and +// if it is, extracts the underlying value to pass back into the decode function +// for decoding according to its type. It returns the the number of bytes +// actually read. +// +// An UnmarshalError is returned if any issues are encountered while decoding +// the interface. +func (d *Decoder) decodeInterface(v reflect.Value) (int, error) { + if v.IsNil() || !v.CanInterface() { + msg := fmt.Sprintf("can't decode to nil interface") + err := unmarshalError("decodeInterface", ErrNilInterface, msg, + nil, nil) + return 0, err + } + + // Extract underlying value from the interface and indirect through + // pointers allocating them as needed. + ve := reflect.ValueOf(v.Interface()) + ve, err := d.indirect(ve) + if err != nil { + return 0, err + } + if !ve.CanSet() { + msg := fmt.Sprintf("can't decode to unsettable '%v'", + ve.Type().String()) + err := unmarshalError("decodeInterface", ErrNotSettable, msg, + nil, nil) + return 0, err + } + return d.decode(ve) +} + +// decode is the main workhorse for unmarshalling via reflection. It uses +// the passed reflection value to choose the XDR primitives to decode from +// the encapsulated reader. It is a recursive function, +// so cyclic data structures are not supported and will result in an infinite +// loop. It returns the the number of bytes actually read. +func (d *Decoder) decode(v reflect.Value) (int, error) { + if !v.IsValid() { + msg := fmt.Sprintf("type '%s' is not valid", v.Kind().String()) + err := unmarshalError("decode", ErrUnsupportedType, msg, nil, nil) + return 0, err + } + + // Indirect through pointers allocating them as needed. + ve, err := d.indirect(v) + if err != nil { + return 0, err + } + + // Handle time.Time values by decoding them as an RFC3339 formatted + // string with nanosecond precision. Check the type string rather + // than doing a full blown conversion to interface and type assertion + // since checking a string is much quicker. + if ve.Type().String() == "time.Time" { + // Read the value as a string and parse it. + timeString, n, err := d.DecodeString() + if err != nil { + return n, err + } + ttv, err := time.Parse(time.RFC3339, timeString) + if err != nil { + err := unmarshalError("decode", ErrParseTime, + err.Error(), timeString, err) + return n, err + } + ve.Set(reflect.ValueOf(ttv)) + return n, nil + } + + // Handle native Go types. + switch ve.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int: + i, n, err := d.DecodeInt() + if err != nil { + return n, err + } + if ve.OverflowInt(int64(i)) { + msg := fmt.Sprintf("signed integer too large to fit '%s'", + ve.Kind().String()) + err = unmarshalError("decode", ErrOverflow, msg, i, nil) + return n, err + } + ve.SetInt(int64(i)) + return n, nil + + case reflect.Int64: + i, n, err := d.DecodeHyper() + if err != nil { + return n, err + } + ve.SetInt(i) + return n, nil + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint: + ui, n, err := d.DecodeUint() + if err != nil { + return n, err + } + if ve.OverflowUint(uint64(ui)) { + msg := fmt.Sprintf("unsigned integer too large to fit '%s'", + ve.Kind().String()) + err = unmarshalError("decode", ErrOverflow, msg, ui, nil) + return n, err + } + ve.SetUint(uint64(ui)) + return n, nil + + case reflect.Uint64: + ui, n, err := d.DecodeUhyper() + if err != nil { + return n, err + } + ve.SetUint(ui) + return n, nil + + case reflect.Bool: + b, n, err := d.DecodeBool() + if err != nil { + return n, err + } + ve.SetBool(b) + return n, nil + + case reflect.Float32: + f, n, err := d.DecodeFloat() + if err != nil { + return n, err + } + ve.SetFloat(float64(f)) + return n, nil + + case reflect.Float64: + f, n, err := d.DecodeDouble() + if err != nil { + return n, err + } + ve.SetFloat(f) + return n, nil + + case reflect.String: + s, n, err := d.DecodeString() + if err != nil { + return n, err + } + ve.SetString(s) + return n, nil + + case reflect.Array: + n, err := d.decodeFixedArray(ve, false) + if err != nil { + return n, err + } + return n, nil + + case reflect.Slice: + n, err := d.decodeArray(ve, false) + if err != nil { + return n, err + } + return n, nil + + case reflect.Struct: + n, err := d.decodeStruct(ve) + if err != nil { + return n, err + } + return n, nil + + case reflect.Map: + n, err := d.decodeMap(ve) + if err != nil { + return n, err + } + return n, nil + + case reflect.Interface: + n, err := d.decodeInterface(ve) + if err != nil { + return n, err + } + return n, nil + } + + // The only unhandled types left are unsupported. At the time of this + // writing the only remaining unsupported types that exist are + // reflect.Uintptr and reflect.UnsafePointer. + msg := fmt.Sprintf("unsupported Go type '%s'", ve.Kind().String()) + err = unmarshalError("decode", ErrUnsupportedType, msg, nil, nil) + return 0, err +} + +// indirect dereferences pointers allocating them as needed until it reaches +// a non-pointer. This allows transparent decoding through arbitrary levels +// of indirection. +func (d *Decoder) indirect(v reflect.Value) (reflect.Value, error) { + rv := v + for rv.Kind() == reflect.Ptr { + // Allocate pointer if needed. + isNil := rv.IsNil() + if isNil && !rv.CanSet() { + msg := fmt.Sprintf("unable to allocate pointer for '%v'", + rv.Type().String()) + err := unmarshalError("indirect", ErrNotSettable, msg, + nil, nil) + return rv, err + } + if isNil { + rv.Set(reflect.New(rv.Type().Elem())) + } + rv = rv.Elem() + } + return rv, nil +} + +// Decode operates identically to the Unmarshal function with the exception of +// using the reader associated with the Decoder as the source of XDR-encoded +// data instead of a user-supplied reader. See the Unmarhsal documentation for +// specifics. +func (d *Decoder) Decode(v interface{}) (int, error) { + if v == nil { + msg := "can't unmarshal to nil interface" + return 0, unmarshalError("Unmarshal", ErrNilInterface, msg, nil, + nil) + } + + vv := reflect.ValueOf(v) + if vv.Kind() != reflect.Ptr { + msg := fmt.Sprintf("can't unmarshal to non-pointer '%v' - use "+ + "& operator", vv.Type().String()) + err := unmarshalError("Unmarshal", ErrBadArguments, msg, nil, nil) + return 0, err + } + if vv.IsNil() && !vv.CanSet() { + msg := fmt.Sprintf("can't unmarshal to unsettable '%v' - use "+ + "& operator", vv.Type().String()) + err := unmarshalError("Unmarshal", ErrNotSettable, msg, nil, nil) + return 0, err + } + + return d.decode(vv) +} + +// NewDecoder returns a Decoder that can be used to manually decode XDR data +// from a provided reader. Typically, Unmarshal should be used instead of +// manually creating a Decoder. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: r} +} + +// NewDecoderLimited is identical to NewDecoder but it sets maxReadSize in +// order to cap reads. +func NewDecoderLimited(r io.Reader, maxSize uint) *Decoder { + return &Decoder{r: r, maxReadSize: maxSize} +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/davecgh/go-xdr/xdr2/doc.go b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/davecgh/go-xdr/xdr2/doc.go new file mode 100644 index 00000000..8823d62f --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/davecgh/go-xdr/xdr2/doc.go @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2012-2014 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Package xdr implements the data representation portion of the External Data +Representation (XDR) standard protocol as specified in RFC 4506 (obsoletes +RFC 1832 and RFC 1014). + +The XDR RFC defines both a data specification language and a data +representation standard. This package implements methods to encode and decode +XDR data per the data representation standard with the exception of 128-bit +quadruple-precision floating points. It does not currently implement parsing of +the data specification language. In other words, the ability to automatically +generate Go code by parsing an XDR data specification file (typically .x +extension) is not supported. In practice, this limitation of the package is +fairly minor since it is largely unnecessary due to the reflection capabilities +of Go as described below. + +This package provides two approaches for encoding and decoding XDR data: + + 1) Marshal/Unmarshal functions which automatically map between XDR and Go types + 2) Individual Encoder/Decoder objects to manually work with XDR primitives + +For the Marshal/Unmarshal functions, Go reflection capabilities are used to +choose the type of the underlying XDR data based upon the Go type to encode or +the target Go type to decode into. A description of how each type is mapped is +provided below, however one important type worth reviewing is Go structs. In +the case of structs, each exported field (first letter capitalized) is reflected +and mapped in order. As a result, this means a Go struct with exported fields +of the appropriate types listed in the expected order can be used to +automatically encode / decode the XDR data thereby eliminating the need to write +a lot of boilerplate code to encode/decode and error check each piece of XDR +data as is typically required with C based XDR libraries. + +Go Type to XDR Type Mappings + +The following chart shows an overview of how Go types are mapped to XDR types +for automatic marshalling and unmarshalling. The documentation for the Marshal +and Unmarshal functions has specific details of how the mapping proceeds. + + Go Type <-> XDR Type + -------------------- + int8, int16, int32, int <-> XDR Integer + uint8, uint16, uint32, uint <-> XDR Unsigned Integer + int64 <-> XDR Hyper Integer + uint64 <-> XDR Unsigned Hyper Integer + bool <-> XDR Boolean + float32 <-> XDR Floating-Point + float64 <-> XDR Double-Precision Floating-Point + string <-> XDR String + byte <-> XDR Integer + []byte <-> XDR Variable-Length Opaque Data + [#]byte <-> XDR Fixed-Length Opaque Data + [] <-> XDR Variable-Length Array + [#] <-> XDR Fixed-Length Array + struct <-> XDR Structure + map <-> XDR Variable-Length Array of two-element XDR Structures + time.Time <-> XDR String encoded with RFC3339 nanosecond precision + +Notes and Limitations: + + * Automatic marshalling and unmarshalling of variable and fixed-length + arrays of uint8s require a special struct tag `xdropaque:"false"` + since byte slices and byte arrays are assumed to be opaque data and + byte is a Go alias for uint8 thus indistinguishable under reflection + * Channel, complex, and function types cannot be encoded + * Interfaces without a concrete value cannot be encoded + * Cyclic data structures are not supported and will result in infinite + loops + * Strings are marshalled and unmarshalled with UTF-8 character encoding + which differs from the XDR specification of ASCII, however UTF-8 is + backwards compatible with ASCII so this should rarely cause issues + + +Encoding + +To encode XDR data, use the Marshal function. + func Marshal(w io.Writer, v interface{}) (int, error) + +For example, given the following code snippet: + + type ImageHeader struct { + Signature [3]byte + Version uint32 + IsGrayscale bool + NumSections uint32 + } + h := ImageHeader{[3]byte{0xAB, 0xCD, 0xEF}, 2, true, 10} + + var w bytes.Buffer + bytesWritten, err := xdr.Marshal(&w, &h) + // Error check elided + +The result, encodedData, will then contain the following XDR encoded byte +sequence: + + 0xAB, 0xCD, 0xEF, 0x00, + 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x0A + + +In addition, while the automatic marshalling discussed above will work for the +vast majority of cases, an Encoder object is provided that can be used to +manually encode XDR primitives for complex scenarios where automatic +reflection-based encoding won't work. The included examples provide a sample of +manual usage via an Encoder. + + +Decoding + +To decode XDR data, use the Unmarshal function. + func Unmarshal(r io.Reader, v interface{}) (int, error) + +For example, given the following code snippet: + + type ImageHeader struct { + Signature [3]byte + Version uint32 + IsGrayscale bool + NumSections uint32 + } + + // Using output from the Encoding section above. + encodedData := []byte{ + 0xAB, 0xCD, 0xEF, 0x00, + 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x0A, + } + + var h ImageHeader + bytesRead, err := xdr.Unmarshal(bytes.NewReader(encodedData), &h) + // Error check elided + +The struct instance, h, will then contain the following values: + + h.Signature = [3]byte{0xAB, 0xCD, 0xEF} + h.Version = 2 + h.IsGrayscale = true + h.NumSections = 10 + +In addition, while the automatic unmarshalling discussed above will work for the +vast majority of cases, a Decoder object is provided that can be used to +manually decode XDR primitives for complex scenarios where automatic +reflection-based decoding won't work. The included examples provide a sample of +manual usage via a Decoder. + +Errors + +All errors are either of type UnmarshalError or MarshalError. Both provide +human-readable output as well as an ErrorCode field which can be inspected by +sophisticated callers if necessary. + +See the documentation of UnmarshalError, MarshalError, and ErrorCode for further +details. +*/ +package xdr diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/davecgh/go-xdr/xdr2/encode.go b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/davecgh/go-xdr/xdr2/encode.go new file mode 100644 index 00000000..7bac2681 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/davecgh/go-xdr/xdr2/encode.go @@ -0,0 +1,669 @@ +/* + * Copyright (c) 2012-2014 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package xdr + +import ( + "fmt" + "io" + "math" + "reflect" + "time" +) + +var errIOEncode = "%s while encoding %d bytes" + +/* +Marshal writes the XDR encoding of v to writer w and returns the number of bytes +written. It traverses v recursively and automatically indirects pointers +through arbitrary depth to encode the actual value pointed to. + +Marshal uses reflection to determine the type of the concrete value contained by +v and performs a mapping of Go types to the underlying XDR types as follows: + + Go Type -> XDR Type + -------------------- + int8, int16, int32, int -> XDR Integer + uint8, uint16, uint32, uint -> XDR Unsigned Integer + int64 -> XDR Hyper Integer + uint64 -> XDR Unsigned Hyper Integer + bool -> XDR Boolean + float32 -> XDR Floating-Point + float64 -> XDR Double-Precision Floating-Point + string -> XDR String + byte -> XDR Integer + []byte -> XDR Variable-Length Opaque Data + [#]byte -> XDR Fixed-Length Opaque Data + [] -> XDR Variable-Length Array + [#] -> XDR Fixed-Length Array + struct -> XDR Structure + map -> XDR Variable-Length Array of two-element XDR Structures + time.Time -> XDR String encoded with RFC3339 nanosecond precision + +Notes and Limitations: + + * Automatic marshalling of variable and fixed-length arrays of uint8s + requires a special struct tag `xdropaque:"false"` since byte slices and + byte arrays are assumed to be opaque data and byte is a Go alias for uint8 + thus indistinguishable under reflection + * Channel, complex, and function types cannot be encoded + * Interfaces without a concrete value cannot be encoded + * Cyclic data structures are not supported and will result in infinite loops + * Strings are marshalled with UTF-8 character encoding which differs from + the XDR specification of ASCII, however UTF-8 is backwards compatible with + ASCII so this should rarely cause issues + +If any issues are encountered during the marshalling process, a MarshalError is +returned with a human readable description as well as an ErrorCode value for +further inspection from sophisticated callers. Some potential issues are +unsupported Go types, attempting to encode more opaque data than can be +represented by a single opaque XDR entry, and exceeding max slice limitations. +*/ +func Marshal(w io.Writer, v interface{}) (int, error) { + enc := Encoder{w: w} + return enc.Encode(v) +} + +// An Encoder wraps an io.Writer that will receive the XDR encoded byte stream. +// See NewEncoder. +type Encoder struct { + w io.Writer +} + +// EncodeInt writes the XDR encoded representation of the passed 32-bit signed +// integer to the encapsulated writer and returns the number of bytes written. +// +// A MarshalError with an error code of ErrIO is returned if writing the data +// fails. +// +// Reference: +// RFC Section 4.1 - Integer +// 32-bit big-endian signed integer in range [-2147483648, 2147483647] +func (enc *Encoder) EncodeInt(v int32) (int, error) { + var b [4]byte + b[0] = byte(v >> 24) + b[1] = byte(v >> 16) + b[2] = byte(v >> 8) + b[3] = byte(v) + + n, err := enc.w.Write(b[:]) + if err != nil { + msg := fmt.Sprintf(errIOEncode, err.Error(), 4) + err := marshalError("EncodeInt", ErrIO, msg, b[:n], err) + return n, err + } + + return n, nil +} + +// EncodeUint writes the XDR encoded representation of the passed 32-bit +// unsigned integer to the encapsulated writer and returns the number of bytes +// written. +// +// A MarshalError with an error code of ErrIO is returned if writing the data +// fails. +// +// Reference: +// RFC Section 4.2 - Unsigned Integer +// 32-bit big-endian unsigned integer in range [0, 4294967295] +func (enc *Encoder) EncodeUint(v uint32) (int, error) { + var b [4]byte + b[0] = byte(v >> 24) + b[1] = byte(v >> 16) + b[2] = byte(v >> 8) + b[3] = byte(v) + + n, err := enc.w.Write(b[:]) + if err != nil { + msg := fmt.Sprintf(errIOEncode, err.Error(), 4) + err := marshalError("EncodeUint", ErrIO, msg, b[:n], err) + return n, err + } + + return n, nil +} + +// EncodeEnum treats the passed 32-bit signed integer as an enumeration value +// and, if it is in the list of passed valid enumeration values, writes the XDR +// encoded representation of it to the encapsulated writer. It returns the +// number of bytes written. +// +// A MarshalError is returned if the enumeration value is not one of the +// provided valid values or if writing the data fails. +// +// Reference: +// RFC Section 4.3 - Enumeration +// Represented as an XDR encoded signed integer +func (enc *Encoder) EncodeEnum(v int32, validEnums map[int32]bool) (int, error) { + if !validEnums[v] { + err := marshalError("EncodeEnum", ErrBadEnumValue, + "invalid enum", v, nil) + return 0, err + } + return enc.EncodeInt(v) +} + +// EncodeBool writes the XDR encoded representation of the passed boolean to the +// encapsulated writer and returns the number of bytes written. +// +// A MarshalError with an error code of ErrIO is returned if writing the data +// fails. +// +// Reference: +// RFC Section 4.4 - Boolean +// Represented as an XDR encoded enumeration where 0 is false and 1 is true +func (enc *Encoder) EncodeBool(v bool) (int, error) { + i := int32(0) + if v == true { + i = 1 + } + return enc.EncodeInt(i) +} + +// EncodeHyper writes the XDR encoded representation of the passed 64-bit +// signed integer to the encapsulated writer and returns the number of bytes +// written. +// +// A MarshalError with an error code of ErrIO is returned if writing the data +// fails. +// +// Reference: +// RFC Section 4.5 - Hyper Integer +// 64-bit big-endian signed integer in range [-9223372036854775808, 9223372036854775807] +func (enc *Encoder) EncodeHyper(v int64) (int, error) { + var b [8]byte + b[0] = byte(v >> 56) + b[1] = byte(v >> 48) + b[2] = byte(v >> 40) + b[3] = byte(v >> 32) + b[4] = byte(v >> 24) + b[5] = byte(v >> 16) + b[6] = byte(v >> 8) + b[7] = byte(v) + + n, err := enc.w.Write(b[:]) + if err != nil { + msg := fmt.Sprintf(errIOEncode, err.Error(), 8) + err := marshalError("EncodeHyper", ErrIO, msg, b[:n], err) + return n, err + } + + return n, nil +} + +// EncodeUhyper writes the XDR encoded representation of the passed 64-bit +// unsigned integer to the encapsulated writer and returns the number of bytes +// written. +// +// A MarshalError with an error code of ErrIO is returned if writing the data +// fails. +// +// Reference: +// RFC Section 4.5 - Unsigned Hyper Integer +// 64-bit big-endian unsigned integer in range [0, 18446744073709551615] +func (enc *Encoder) EncodeUhyper(v uint64) (int, error) { + var b [8]byte + b[0] = byte(v >> 56) + b[1] = byte(v >> 48) + b[2] = byte(v >> 40) + b[3] = byte(v >> 32) + b[4] = byte(v >> 24) + b[5] = byte(v >> 16) + b[6] = byte(v >> 8) + b[7] = byte(v) + + n, err := enc.w.Write(b[:]) + if err != nil { + msg := fmt.Sprintf(errIOEncode, err.Error(), 8) + err := marshalError("EncodeUhyper", ErrIO, msg, b[:n], err) + return n, err + } + + return n, nil +} + +// EncodeFloat writes the XDR encoded representation of the passed 32-bit +// (single-precision) floating point to the encapsulated writer and returns the +// number of bytes written. +// +// A MarshalError with an error code of ErrIO is returned if writing the data +// fails. +// +// Reference: +// RFC Section 4.6 - Floating Point +// 32-bit single-precision IEEE 754 floating point +func (enc *Encoder) EncodeFloat(v float32) (int, error) { + ui := math.Float32bits(v) + return enc.EncodeUint(ui) +} + +// EncodeDouble writes the XDR encoded representation of the passed 64-bit +// (double-precision) floating point to the encapsulated writer and returns the +// number of bytes written. +// +// A MarshalError with an error code of ErrIO is returned if writing the data +// fails. +// +// Reference: +// RFC Section 4.7 - Double-Precision Floating Point +// 64-bit double-precision IEEE 754 floating point +func (enc *Encoder) EncodeDouble(v float64) (int, error) { + ui := math.Float64bits(v) + return enc.EncodeUhyper(ui) +} + +// RFC Section 4.8 - Quadruple-Precision Floating Point +// 128-bit quadruple-precision floating point +// Not Implemented + +// EncodeFixedOpaque treats the passed byte slice as opaque data of a fixed +// size and writes the XDR encoded representation of it to the encapsulated +// writer. It returns the number of bytes written. +// +// A MarshalError with an error code of ErrIO is returned if writing the data +// fails. +// +// Reference: +// RFC Section 4.9 - Fixed-Length Opaque Data +// Fixed-length uninterpreted data zero-padded to a multiple of four +func (enc *Encoder) EncodeFixedOpaque(v []byte) (int, error) { + l := len(v) + pad := (4 - (l % 4)) % 4 + + // Write the actual bytes. + n, err := enc.w.Write(v) + if err != nil { + msg := fmt.Sprintf(errIOEncode, err.Error(), len(v)) + err := marshalError("EncodeFixedOpaque", ErrIO, msg, v[:n], err) + return n, err + } + + // Write any padding if needed. + if pad > 0 { + b := make([]byte, pad) + n2, err := enc.w.Write(b) + n += n2 + if err != nil { + written := make([]byte, l+n2) + copy(written, v) + copy(written[l:], b[:n2]) + msg := fmt.Sprintf(errIOEncode, err.Error(), l+pad) + err := marshalError("EncodeFixedOpaque", ErrIO, msg, + written, err) + return n, err + } + } + + return n, nil +} + +// EncodeOpaque treats the passed byte slice as opaque data of a variable +// size and writes the XDR encoded representation of it to the encapsulated +// writer. It returns the number of bytes written. +// +// A MarshalError with an error code of ErrIO is returned if writing the data +// fails. +// +// Reference: +// RFC Section 4.10 - Variable-Length Opaque Data +// Unsigned integer length followed by fixed opaque data of that length +func (enc *Encoder) EncodeOpaque(v []byte) (int, error) { + // Length of opaque data. + n, err := enc.EncodeUint(uint32(len(v))) + if err != nil { + return n, err + } + + n2, err := enc.EncodeFixedOpaque(v) + n += n2 + return n, err +} + +// EncodeString writes the XDR encoded representation of the passed string +// to the encapsulated writer and returns the number of bytes written. +// Character encoding is assumed to be UTF-8 and therefore ASCII compatible. If +// the underlying character encoding is not compatible with this assumption, the +// data can instead be written as variable-length opaque data (EncodeOpaque) and +// manually converted as needed. +// +// A MarshalError with an error code of ErrIO is returned if writing the data +// fails. +// +// Reference: +// RFC Section 4.11 - String +// Unsigned integer length followed by bytes zero-padded to a multiple of four +func (enc *Encoder) EncodeString(v string) (int, error) { + // Length of string. + n, err := enc.EncodeUint(uint32(len(v))) + if err != nil { + return n, err + } + + n2, err := enc.EncodeFixedOpaque([]byte(v)) + n += n2 + return n, err +} + +// encodeFixedArray writes the XDR encoded representation of each element +// in the passed array represented by the reflection value to the encapsulated +// writer and returns the number of bytes written. The ignoreOpaque flag +// controls whether or not uint8 (byte) elements should be encoded individually +// or as a fixed sequence of opaque data. +// +// A MarshalError is returned if any issues are encountered while encoding +// the array elements. +// +// Reference: +// RFC Section 4.12 - Fixed-Length Array +// Individually XDR encoded array elements +func (enc *Encoder) encodeFixedArray(v reflect.Value, ignoreOpaque bool) (int, error) { + // Treat [#]byte (byte is alias for uint8) as opaque data unless ignored. + if !ignoreOpaque && v.Type().Elem().Kind() == reflect.Uint8 { + // Create a slice of the underlying array for better efficiency + // when possible. Can't create a slice of an unaddressable + // value. + if v.CanAddr() { + return enc.EncodeFixedOpaque(v.Slice(0, v.Len()).Bytes()) + } + + // When the underlying array isn't addressable fall back to + // copying the array into a new slice. This is rather ugly, but + // the inability to create a constant slice from an + // unaddressable array is a limitation of Go. + slice := make([]byte, v.Len(), v.Len()) + reflect.Copy(reflect.ValueOf(slice), v) + return enc.EncodeFixedOpaque(slice) + } + + // Encode each array element. + var n int + for i := 0; i < v.Len(); i++ { + n2, err := enc.encode(v.Index(i)) + n += n2 + if err != nil { + return n, err + } + } + + return n, nil +} + +// encodeArray writes an XDR encoded integer representing the number of +// elements in the passed slice represented by the reflection value followed by +// the XDR encoded representation of each element in slice to the encapsulated +// writer and returns the number of bytes written. The ignoreOpaque flag +// controls whether or not uint8 (byte) elements should be encoded individually +// or as a variable sequence of opaque data. +// +// A MarshalError is returned if any issues are encountered while encoding +// the array elements. +// +// Reference: +// RFC Section 4.13 - Variable-Length Array +// Unsigned integer length followed by individually XDR encoded array elements +func (enc *Encoder) encodeArray(v reflect.Value, ignoreOpaque bool) (int, error) { + numItems := uint32(v.Len()) + n, err := enc.EncodeUint(numItems) + if err != nil { + return n, err + } + + n2, err := enc.encodeFixedArray(v, ignoreOpaque) + n += n2 + return n, err +} + +// encodeStruct writes an XDR encoded representation of each value in the +// exported fields of the struct represented by the passed reflection value to +// the encapsulated writer and returns the number of bytes written. Pointers +// are automatically indirected through arbitrary depth to encode the actual +// value pointed to. +// +// A MarshalError is returned if any issues are encountered while encoding +// the elements. +// +// Reference: +// RFC Section 4.14 - Structure +// XDR encoded elements in the order of their declaration in the struct +func (enc *Encoder) encodeStruct(v reflect.Value) (int, error) { + var n int + vt := v.Type() + for i := 0; i < v.NumField(); i++ { + // Skip unexported fields and indirect through pointers. + vtf := vt.Field(i) + if vtf.PkgPath != "" { + continue + } + vf := v.Field(i) + vf = enc.indirect(vf) + + // Handle non-opaque data to []uint8 and [#]uint8 based on struct tag. + tag := vtf.Tag.Get("xdropaque") + if tag == "false" { + switch vf.Kind() { + case reflect.Slice: + n2, err := enc.encodeArray(vf, true) + n += n2 + if err != nil { + return n, err + } + continue + + case reflect.Array: + n2, err := enc.encodeFixedArray(vf, true) + n += n2 + if err != nil { + return n, err + } + continue + } + } + + // Encode each struct field. + n2, err := enc.encode(vf) + n += n2 + if err != nil { + return n, err + } + } + + return n, nil +} + +// RFC Section 4.15 - Discriminated Union +// RFC Section 4.16 - Void +// RFC Section 4.17 - Constant +// RFC Section 4.18 - Typedef +// RFC Section 4.19 - Optional data +// RFC Sections 4.15 though 4.19 only apply to the data specification language +// which is not implemented by this package. In the case of discriminated +// unions, struct tags are used to perform a similar function. + +// encodeMap treats the map represented by the passed reflection value as a +// variable-length array of 2-element structures whose fields are of the same +// type as the map keys and elements and writes its XDR encoded representation +// to the encapsulated writer. It returns the number of bytes written. +// +// A MarshalError is returned if any issues are encountered while encoding +// the elements. +func (enc *Encoder) encodeMap(v reflect.Value) (int, error) { + // Number of elements. + n, err := enc.EncodeUint(uint32(v.Len())) + if err != nil { + return n, err + } + + // Encode each key and value according to their type. + for _, key := range v.MapKeys() { + n2, err := enc.encode(key) + n += n2 + if err != nil { + return n, err + } + + n2, err = enc.encode(v.MapIndex(key)) + n += n2 + if err != nil { + return n, err + } + } + + return n, nil +} + +// encodeInterface examines the interface represented by the passed reflection +// value to detect whether it is an interface that can be encoded if it is, +// extracts the underlying value to pass back into the encode function for +// encoding according to its type. +// +// A MarshalError is returned if any issues are encountered while encoding +// the interface. +func (enc *Encoder) encodeInterface(v reflect.Value) (int, error) { + if v.IsNil() || !v.CanInterface() { + msg := fmt.Sprintf("can't encode nil interface") + err := marshalError("encodeInterface", ErrNilInterface, msg, + nil, nil) + return 0, err + } + + // Extract underlying value from the interface and indirect through pointers. + ve := reflect.ValueOf(v.Interface()) + ve = enc.indirect(ve) + return enc.encode(ve) +} + +// encode is the main workhorse for marshalling via reflection. It uses +// the passed reflection value to choose the XDR primitives to encode into +// the encapsulated writer and returns the number of bytes written. It is a +// recursive function, so cyclic data structures are not supported and will +// result in an infinite loop. +func (enc *Encoder) encode(v reflect.Value) (int, error) { + if !v.IsValid() { + msg := fmt.Sprintf("type '%s' is not valid", v.Kind().String()) + err := marshalError("encode", ErrUnsupportedType, msg, nil, nil) + return 0, err + } + + // Indirect through pointers to get at the concrete value. + ve := enc.indirect(v) + + // Handle time.Time values by encoding them as an RFC3339 formatted + // string with nanosecond precision. Check the type string before + // doing a full blown conversion to interface and type assertion since + // checking a string is much quicker. + if ve.Type().String() == "time.Time" && ve.CanInterface() { + viface := ve.Interface() + if tv, ok := viface.(time.Time); ok { + return enc.EncodeString(tv.Format(time.RFC3339Nano)) + } + } + + // Handle native Go types. + switch ve.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int: + return enc.EncodeInt(int32(ve.Int())) + + case reflect.Int64: + return enc.EncodeHyper(ve.Int()) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint: + return enc.EncodeUint(uint32(ve.Uint())) + + case reflect.Uint64: + return enc.EncodeUhyper(ve.Uint()) + + case reflect.Bool: + return enc.EncodeBool(ve.Bool()) + + case reflect.Float32: + return enc.EncodeFloat(float32(ve.Float())) + + case reflect.Float64: + return enc.EncodeDouble(ve.Float()) + + case reflect.String: + return enc.EncodeString(ve.String()) + + case reflect.Array: + return enc.encodeFixedArray(ve, false) + + case reflect.Slice: + return enc.encodeArray(ve, false) + + case reflect.Struct: + return enc.encodeStruct(ve) + + case reflect.Map: + return enc.encodeMap(ve) + + case reflect.Interface: + return enc.encodeInterface(ve) + } + + // The only unhandled types left are unsupported. At the time of this + // writing the only remaining unsupported types that exist are + // reflect.Uintptr and reflect.UnsafePointer. + msg := fmt.Sprintf("unsupported Go type '%s'", ve.Kind().String()) + err := marshalError("encode", ErrUnsupportedType, msg, nil, nil) + return 0, err +} + +// indirect dereferences pointers until it reaches a non-pointer. This allows +// transparent encoding through arbitrary levels of indirection. +func (enc *Encoder) indirect(v reflect.Value) reflect.Value { + rv := v + for rv.Kind() == reflect.Ptr { + rv = rv.Elem() + } + return rv +} + +// Encode operates identically to the Marshal function with the exception of +// using the writer associated with the Encoder for the destination of the +// XDR-encoded data instead of a user-supplied writer. See the Marshal +// documentation for specifics. +func (enc *Encoder) Encode(v interface{}) (int, error) { + if v == nil { + msg := "can't marshal nil interface" + err := marshalError("Marshal", ErrNilInterface, msg, nil, nil) + return 0, err + } + + vv := reflect.ValueOf(v) + vve := vv + for vve.Kind() == reflect.Ptr { + if vve.IsNil() { + msg := fmt.Sprintf("can't marshal nil pointer '%v'", + vv.Type().String()) + err := marshalError("Marshal", ErrBadArguments, msg, + nil, nil) + return 0, err + } + vve = vve.Elem() + } + + return enc.encode(vve) +} + +// NewEncoder returns an object that can be used to manually choose fields to +// XDR encode to the passed writer w. Typically, Marshal should be used instead +// of manually creating an Encoder. An Encoder, along with several of its +// methods to encode XDR primitives, is exposed so it is possible to perform +// manual encoding of data without relying on reflection should it be necessary +// in complex scenarios where automatic reflection-based encoding won't work. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{w: w} +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/davecgh/go-xdr/xdr2/error.go b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/davecgh/go-xdr/xdr2/error.go new file mode 100644 index 00000000..42079ad3 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/davecgh/go-xdr/xdr2/error.go @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2012-2014 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package xdr + +import "fmt" + +// ErrorCode identifies a kind of error. +type ErrorCode int + +const ( + // ErrBadArguments indicates arguments passed to the function are not + // what was expected. + ErrBadArguments ErrorCode = iota + + // ErrUnsupportedType indicates the Go type is not a supported type for + // marshalling and unmarshalling XDR data. + ErrUnsupportedType + + // ErrBadEnumValue indicates an enumeration value is not in the list of + // valid values. + ErrBadEnumValue + + // ErrNotSettable indicates an interface value cannot be written to. + // This usually means the interface value was not passed with the & + // operator, but it can also happen if automatic pointer allocation + // fails. + ErrNotSettable + + // ErrOverflow indicates that the data in question is too large to fit + // into the corresponding Go or XDR data type. For example, an integer + // decoded from XDR that is too large to fit into a target type of int8, + // or opaque data that exceeds the max length of a Go slice. + ErrOverflow + + // ErrNilInterface indicates an interface with no concrete type + // information was encountered. Type information is necessary to + // perform mapping between XDR and Go types. + ErrNilInterface + + // ErrIO indicates an error was encountered while reading or writing to + // an io.Reader or io.Writer, respectively. The actual underlying error + // will be available via the Err field of the MarshalError or + // UnmarshalError struct. + ErrIO + + // ErrParseTime indicates an error was encountered while parsing an + // RFC3339 formatted time value. The actual underlying error will be + // available via the Err field of the UnmarshalError struct. + ErrParseTime +) + +// Map of ErrorCode values back to their constant names for pretty printing. +var errorCodeStrings = map[ErrorCode]string{ + ErrBadArguments: "ErrBadArguments", + ErrUnsupportedType: "ErrUnsupportedType", + ErrBadEnumValue: "ErrBadEnumValue", + ErrNotSettable: "ErrNotSettable", + ErrOverflow: "ErrOverflow", + ErrNilInterface: "ErrNilInterface", + ErrIO: "ErrIO", + ErrParseTime: "ErrParseTime", +} + +// String returns the ErrorCode as a human-readable name. +func (e ErrorCode) String() string { + if s := errorCodeStrings[e]; s != "" { + return s + } + return fmt.Sprintf("Unknown ErrorCode (%d)", e) +} + +// UnmarshalError describes a problem encountered while unmarshaling data. +// Some potential issues are unsupported Go types, attempting to decode a value +// which is too large to fit into a specified Go type, and exceeding max slice +// limitations. +type UnmarshalError struct { + ErrorCode ErrorCode // Describes the kind of error + Func string // Function name + Value interface{} // Value actually parsed where appropriate + Description string // Human readable description of the issue + Err error // The underlying error for IO errors +} + +// Error satisfies the error interface and prints human-readable errors. +func (e *UnmarshalError) Error() string { + switch e.ErrorCode { + case ErrBadEnumValue, ErrOverflow, ErrIO, ErrParseTime: + return fmt.Sprintf("xdr:%s: %s - read: '%v'", e.Func, + e.Description, e.Value) + } + return fmt.Sprintf("xdr:%s: %s", e.Func, e.Description) +} + +// unmarshalError creates an error given a set of arguments and will copy byte +// slices into the Value field since they might otherwise be changed from from +// the original value. +func unmarshalError(f string, c ErrorCode, desc string, v interface{}, err error) *UnmarshalError { + e := &UnmarshalError{ErrorCode: c, Func: f, Description: desc, Err: err} + switch t := v.(type) { + case []byte: + slice := make([]byte, len(t)) + copy(slice, t) + e.Value = slice + default: + e.Value = v + } + + return e +} + +// IsIO returns a boolean indicating whether the error is known to report that +// the underlying reader or writer encountered an ErrIO. +func IsIO(err error) bool { + switch e := err.(type) { + case *UnmarshalError: + return e.ErrorCode == ErrIO + case *MarshalError: + return e.ErrorCode == ErrIO + } + return false +} + +// MarshalError describes a problem encountered while marshaling data. +// Some potential issues are unsupported Go types, attempting to encode more +// opaque data than can be represented by a single opaque XDR entry, and +// exceeding max slice limitations. +type MarshalError struct { + ErrorCode ErrorCode // Describes the kind of error + Func string // Function name + Value interface{} // Value actually parsed where appropriate + Description string // Human readable description of the issue + Err error // The underlying error for IO errors +} + +// Error satisfies the error interface and prints human-readable errors. +func (e *MarshalError) Error() string { + switch e.ErrorCode { + case ErrIO: + return fmt.Sprintf("xdr:%s: %s - wrote: '%v'", e.Func, + e.Description, e.Value) + case ErrBadEnumValue: + return fmt.Sprintf("xdr:%s: %s - value: '%v'", e.Func, + e.Description, e.Value) + } + return fmt.Sprintf("xdr:%s: %s", e.Func, e.Description) +} + +// marshalError creates an error given a set of arguments and will copy byte +// slices into the Value field since they might otherwise be changed from from +// the original value. +func marshalError(f string, c ErrorCode, desc string, v interface{}, err error) *MarshalError { + e := &MarshalError{ErrorCode: c, Func: f, Description: desc, Err: err} + switch t := v.(type) { + case []byte: + slice := make([]byte, len(t)) + copy(slice, t) + e.Value = slice + default: + e.Value = v + } + + return e +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/AUTHORS b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/AUTHORS new file mode 100644 index 00000000..1db99ced --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/AUTHORS @@ -0,0 +1,19 @@ +Maintainer +---------- +DigitalOcean, Inc + +Original Authors +---------------- +Ben LeMasurier +Matt Layher + +Contributors +------------ +Justin Kim +Ricky Medina +Charlie Drage +Michael Koppmann +Simarpreet Singh +Alexander Polyakov +Amanda Andrade +Geoff Hickey diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/CONTRIBUTING.md b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/CONTRIBUTING.md new file mode 100644 index 00000000..1b5b7410 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/CONTRIBUTING.md @@ -0,0 +1,30 @@ +Contributing +============ + +The `go-libvirt` project makes use of the [GitHub Flow](https://guides.github.com/introduction/flow/) +for contributions. + +If you'd like to contribute to the project, please +[open an issue](https://github.com/digitalocean/go-libvirt/issues/new) or find an +[existing issue](https://github.com/digitalocean/go-libvirt/issues) that you'd like +to take on. This ensures that efforts are not duplicated, and that a new feature +aligns with the focus of the rest of the repository. + +Once your suggestion has been submitted and discussed, please be sure that your +code meets the following criteria: + - code is completely `gofmt`'d + - new features or codepaths have appropriate test coverage + - `go test ./...` passes + - `go vet ./...` passes + - `golint ./...` returns no warnings, including documentation comment warnings + +In addition, if this is your first time contributing to the `go-libvirt` project, +add your name and email address to the +[AUTHORS](https://github.com/digitalocean/go-libvirt/blob/master/AUTHORS) file +under the "Contributors" section using the format: +`First Last `. + +Finally, submit a pull request for review! + +Questions? Feel free to join us in [`#go-qemu` on freenode](https://webchat.freenode.net/) +if you'd like to discuss the project. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/LICENSE.md b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/LICENSE.md new file mode 100644 index 00000000..f5f4b8b5 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/LICENSE.md @@ -0,0 +1,195 @@ +Apache License +============== + +_Version 2.0, January 2004_ +_<>_ + +### Terms and Conditions for use, reproduction, and distribution + +#### 1. Definitions + +“License” shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +“Licensor” shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +“Legal Entity” shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, “control” means **(i)** the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or **(ii)** ownership of fifty percent (50%) or more of the +outstanding shares, or **(iii)** beneficial ownership of such entity. + +“You” (or “Your”) shall mean an individual or Legal Entity exercising +permissions granted by this License. + +“Source” form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +“Object” form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +“Work” shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +“Derivative Works” shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +“Contribution” shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +“submitted” means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as “Not a Contribution.” + +“Contributor” shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +#### 2. Grant of Copyright License + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +#### 3. Grant of Patent License + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +#### 4. Redistribution + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +* **(a)** You must give any other recipients of the Work or Derivative Works a copy of +this License; and +* **(b)** You must cause any modified files to carry prominent notices stating that You +changed the files; and +* **(c)** You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +* **(d)** If the Work includes a “NOTICE” text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. + +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +#### 5. Submission of Contributions + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +#### 6. Trademarks + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +#### 7. Disclaimer of Warranty + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an “AS IS” BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +#### 8. Limitation of Liability + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +#### 9. Accepting Warranty or Additional Liability + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +_END OF TERMS AND CONDITIONS_ + +### APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets `[]` replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same “printed page” as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/README.md b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/README.md new file mode 100644 index 00000000..969a6b66 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/README.md @@ -0,0 +1,153 @@ +libvirt [![GoDoc](http://godoc.org/github.com/digitalocean/go-libvirt?status.svg)](http://godoc.org/github.com/digitalocean/go-libvirt) [![Build Status](https://travis-ci.org/digitalocean/go-libvirt.svg?branch=master)](https://travis-ci.org/digitalocean/go-libvirt) [![Report Card](https://goreportcard.com/badge/github.com/digitalocean/go-libvirt)](https://goreportcard.com/report/github.com/digitalocean/go-libvirt) +==== + +Package `go-libvirt` provides a pure Go interface for interacting with libvirt. + +Rather than using Libvirt's C bindings, this package makes use of +libvirt's RPC interface, as documented [here](https://libvirt.org/internals/rpc.html). +Connections to the libvirt server may be local, or remote. RPC packets are encoded +using the XDR standard as defined by [RFC 4506](https://tools.ietf.org/html/rfc4506.html). + +libvirt's RPC interface is quite extensive, and changes from one version to the next, so +this project uses a code generator to build the go bindings. The code generator should +be run whenever you want to build go-libvirt for a new version of libvirt. To do this, +you'll need to set an environment variable `LIBVIRT_SOURCE` to the directory containing +the untarred libvirt sources, and then run `go generate ./...` from the go-libvirt directory. +The code generator consumes [src/remote/remote_protocol.x](https://github.com/libvirt/libvirt/blob/master/src/remote/remote_protocol.x) +and produces go bindings for all the remote procedures defined there. + +[Pull requests are welcome](https://github.com/digitalocean/go-libvirt/blob/master/CONTRIBUTING.md)! + +How to Use This Library +----------------------- +Once you've vendored go-libvirt into your project, you'll probably want to call +some libvirt functions. There's some example code below showing how to connect +to libvirt and make one such call, but once you get past the introduction you'll +next want to call some other libvirt functions. How do you find them? + +Start with the [libvirt API reference](https://libvirt.org/html/index.html). +Let's say you want to gracefully shutdown a VM, and after reading through the +libvirt docs you determine that virDomainShutdown() is the function you want to +call to do that. Where's that function in go-libvirt? We transform the names +slightly when building the go bindings. There's no need for a global prefix like +"vir" in Go, since all our functions are inside the package namespace, so we +drop it. That means the Go function for `virDomainShutdown()` is just `DomainShutdown()`, +and sure enough, you can find the Go function `DomainShutdown()` in libvirt.gen.go, +with parameters and return values equivalent to those documented in the API +reference. + +Suppose you then decide you need more control over your shutdown, so you switch +over to `virDomainShutdownFlags()`. As its name suggests, this function takes a +flag parameter which has possible values specified in an enum called +`virDomainShutdownFlagValues`. Flag types like this are a little tricky for the +code generator, because the C functions just take an integer type - only the +libvirt documentation actually ties the flags to the enum types. In most cases +though we're able to generate a wrapper function with a distinct flag type, +making it easier for Go tooling to suggest possible flag values while you're +working. Checking the documentation for this function: + +`godoc github.com/digitalocean/go-libvirt DomainShutdownFlags` + +returns this: + +`func (l *Libvirt) DomainShutdownFlags(Dom Domain, Flags DomainShutdownFlagValues) (err error)` + +If you want to see the possible flag values, `godoc` can help again: + +``` +$ godoc github.com/digitalocean/go-libvirt DomainShutdownFlagValues + +type DomainShutdownFlagValues int32 + DomainShutdownFlagValues as declared in libvirt/libvirt-domain.h:1121 + +const ( + DomainShutdownDefault DomainShutdownFlagValues = iota + DomainShutdownAcpiPowerBtn DomainShutdownFlagValues = 1 + DomainShutdownGuestAgent DomainShutdownFlagValues = 2 + DomainShutdownInitctl DomainShutdownFlagValues = 4 + DomainShutdownSignal DomainShutdownFlagValues = 8 + DomainShutdownParavirt DomainShutdownFlagValues = 16 +) + DomainShutdownFlagValues enumeration from libvirt/libvirt-domain.h:1121 +``` + +One other suggestion: most of the code in go-libvirt is now generated, but a few +hand-written routines still exist in libvirt.go, and wrap calls to the generated +code with slightly different parameters or return values. We suggest avoiding +these hand-written routines and calling the generated routines in libvirt.gen.go +instead. Over time these handwritten routines will be removed from go-libvirt. + +Warning +------- + +The libvirt project strongly recommends *against* talking to the RPC interface +directly. They consider it to be a private implementation detail with the +possibility of being entirely rearchitected in the future. + +While these package are reasonably well-tested and have seen some use inside of +DigitalOcean, there may be subtle bugs which could cause the packages to act +in unexpected ways. Use at your own risk! + +In addition, the API is not considered stable at this time. If you would like +to include package `libvirt` in a project, we highly recommend vendoring it into +your project. + +Example +------- + +```go +package main + +import ( + "fmt" + "log" + "net" + "time" + + "github.com/digitalocean/go-libvirt" +) + +func main() { + //c, err := net.DialTimeout("tcp", "127.0.0.1:16509", 2*time.Second) + //c, err := net.DialTimeout("tcp", "192.168.1.12:16509", 2*time.Second) + c, err := net.DialTimeout("unix", "/var/run/libvirt/libvirt-sock", 2*time.Second) + if err != nil { + log.Fatalf("failed to dial libvirt: %v", err) + } + + l := libvirt.New(c) + if err := l.Connect(); err != nil { + log.Fatalf("failed to connect: %v", err) + } + + v, err := l.Version() + if err != nil { + log.Fatalf("failed to retrieve libvirt version: %v", err) + } + fmt.Println("Version:", v) + + domains, err := l.Domains() + if err != nil { + log.Fatalf("failed to retrieve domains: %v", err) + } + + fmt.Println("ID\tName\t\tUUID") + fmt.Printf("--------------------------------------------------------\n") + for _, d := range domains { + fmt.Printf("%d\t%s\t%x\n", d.ID, d.Name, d.UUID) + } + + if err := l.Disconnect(); err != nil { + log.Fatal("failed to disconnect: %v", err) + } +} + +``` + +``` +Version: 1.3.4 +ID Name UUID +-------------------------------------------------------- +1 Test-1 dc329f87d4de47198cfd2e21c6105b01 +2 Test-2 dc229f87d4de47198cfd2e21c6105b01 +``` diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/const.gen.go b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/const.gen.go new file mode 100644 index 00000000..9f444e8f --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/const.gen.go @@ -0,0 +1,1995 @@ +// Copyright 2017 The go-libvirt Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// WARNING: This file has automatically been generated +// by https://git.io/c-for-go. DO NOT EDIT. + +package libvirt + +const ( + // ExportVar as defined in libvirt/libvirt-common.h:58 + ExportVar = 0 + // TypedParamFieldLength as defined in libvirt/libvirt-common.h:171 + TypedParamFieldLength = 80 + // SecurityLabelBuflen as defined in libvirt/libvirt-host.h:85 + SecurityLabelBuflen = 4097 + // SecurityModelBuflen as defined in libvirt/libvirt-host.h:113 + SecurityModelBuflen = 257 + // SecurityDoiBuflen as defined in libvirt/libvirt-host.h:120 + SecurityDoiBuflen = 257 + // NodeCPUStatsFieldLength as defined in libvirt/libvirt-host.h:177 + NodeCPUStatsFieldLength = 80 + // NodeCPUStatsKernel as defined in libvirt/libvirt-host.h:194 + NodeCPUStatsKernel = "kernel" + // NodeCPUStatsUser as defined in libvirt/libvirt-host.h:202 + NodeCPUStatsUser = "user" + // NodeCPUStatsIdle as defined in libvirt/libvirt-host.h:210 + NodeCPUStatsIdle = "idle" + // NodeCPUStatsIowait as defined in libvirt/libvirt-host.h:218 + NodeCPUStatsIowait = "iowait" + // NodeCPUStatsIntr as defined in libvirt/libvirt-host.h:226 + NodeCPUStatsIntr = "intr" + // NodeCPUStatsUtilization as defined in libvirt/libvirt-host.h:235 + NodeCPUStatsUtilization = "utilization" + // NodeMemoryStatsFieldLength as defined in libvirt/libvirt-host.h:255 + NodeMemoryStatsFieldLength = 80 + // NodeMemoryStatsTotal as defined in libvirt/libvirt-host.h:272 + NodeMemoryStatsTotal = "total" + // NodeMemoryStatsFree as defined in libvirt/libvirt-host.h:281 + NodeMemoryStatsFree = "free" + // NodeMemoryStatsBuffers as defined in libvirt/libvirt-host.h:289 + NodeMemoryStatsBuffers = "buffers" + // NodeMemoryStatsCached as defined in libvirt/libvirt-host.h:297 + NodeMemoryStatsCached = "cached" + // NodeMemorySharedPagesToScan as defined in libvirt/libvirt-host.h:318 + NodeMemorySharedPagesToScan = "shm_pages_to_scan" + // NodeMemorySharedSleepMillisecs as defined in libvirt/libvirt-host.h:326 + NodeMemorySharedSleepMillisecs = "shm_sleep_millisecs" + // NodeMemorySharedPagesShared as defined in libvirt/libvirt-host.h:334 + NodeMemorySharedPagesShared = "shm_pages_shared" + // NodeMemorySharedPagesSharing as defined in libvirt/libvirt-host.h:342 + NodeMemorySharedPagesSharing = "shm_pages_sharing" + // NodeMemorySharedPagesUnshared as defined in libvirt/libvirt-host.h:350 + NodeMemorySharedPagesUnshared = "shm_pages_unshared" + // NodeMemorySharedPagesVolatile as defined in libvirt/libvirt-host.h:358 + NodeMemorySharedPagesVolatile = "shm_pages_volatile" + // NodeMemorySharedFullScans as defined in libvirt/libvirt-host.h:366 + NodeMemorySharedFullScans = "shm_full_scans" + // NodeMemorySharedMergeAcrossNodes as defined in libvirt/libvirt-host.h:378 + NodeMemorySharedMergeAcrossNodes = "shm_merge_across_nodes" + // UUIDBuflen as defined in libvirt/libvirt-host.h:513 + UUIDBuflen = 16 + // UUIDStringBuflen as defined in libvirt/libvirt-host.h:522 + UUIDStringBuflen = 37 + // DomainSchedulerCPUShares as defined in libvirt/libvirt-domain.h:315 + DomainSchedulerCPUShares = "cpu_shares" + // DomainSchedulerGlobalPeriod as defined in libvirt/libvirt-domain.h:323 + DomainSchedulerGlobalPeriod = "global_period" + // DomainSchedulerGlobalQuota as defined in libvirt/libvirt-domain.h:331 + DomainSchedulerGlobalQuota = "global_quota" + // DomainSchedulerVCPUPeriod as defined in libvirt/libvirt-domain.h:339 + DomainSchedulerVCPUPeriod = "vcpu_period" + // DomainSchedulerVCPUQuota as defined in libvirt/libvirt-domain.h:347 + DomainSchedulerVCPUQuota = "vcpu_quota" + // DomainSchedulerEmulatorPeriod as defined in libvirt/libvirt-domain.h:356 + DomainSchedulerEmulatorPeriod = "emulator_period" + // DomainSchedulerEmulatorQuota as defined in libvirt/libvirt-domain.h:365 + DomainSchedulerEmulatorQuota = "emulator_quota" + // DomainSchedulerIothreadPeriod as defined in libvirt/libvirt-domain.h:373 + DomainSchedulerIothreadPeriod = "iothread_period" + // DomainSchedulerIothreadQuota as defined in libvirt/libvirt-domain.h:381 + DomainSchedulerIothreadQuota = "iothread_quota" + // DomainSchedulerWeight as defined in libvirt/libvirt-domain.h:389 + DomainSchedulerWeight = "weight" + // DomainSchedulerCap as defined in libvirt/libvirt-domain.h:397 + DomainSchedulerCap = "cap" + // DomainSchedulerReservation as defined in libvirt/libvirt-domain.h:405 + DomainSchedulerReservation = "reservation" + // DomainSchedulerLimit as defined in libvirt/libvirt-domain.h:413 + DomainSchedulerLimit = "limit" + // DomainSchedulerShares as defined in libvirt/libvirt-domain.h:421 + DomainSchedulerShares = "shares" + // DomainBlockStatsFieldLength as defined in libvirt/libvirt-domain.h:479 + DomainBlockStatsFieldLength = 80 + // DomainBlockStatsReadBytes as defined in libvirt/libvirt-domain.h:487 + DomainBlockStatsReadBytes = "rd_bytes" + // DomainBlockStatsReadReq as defined in libvirt/libvirt-domain.h:495 + DomainBlockStatsReadReq = "rd_operations" + // DomainBlockStatsReadTotalTimes as defined in libvirt/libvirt-domain.h:503 + DomainBlockStatsReadTotalTimes = "rd_total_times" + // DomainBlockStatsWriteBytes as defined in libvirt/libvirt-domain.h:511 + DomainBlockStatsWriteBytes = "wr_bytes" + // DomainBlockStatsWriteReq as defined in libvirt/libvirt-domain.h:519 + DomainBlockStatsWriteReq = "wr_operations" + // DomainBlockStatsWriteTotalTimes as defined in libvirt/libvirt-domain.h:527 + DomainBlockStatsWriteTotalTimes = "wr_total_times" + // DomainBlockStatsFlushReq as defined in libvirt/libvirt-domain.h:535 + DomainBlockStatsFlushReq = "flush_operations" + // DomainBlockStatsFlushTotalTimes as defined in libvirt/libvirt-domain.h:543 + DomainBlockStatsFlushTotalTimes = "flush_total_times" + // DomainBlockStatsErrs as defined in libvirt/libvirt-domain.h:550 + DomainBlockStatsErrs = "errs" + // MigrateParamURI as defined in libvirt/libvirt-domain.h:842 + MigrateParamURI = "migrate_uri" + // MigrateParamDestName as defined in libvirt/libvirt-domain.h:852 + MigrateParamDestName = "destination_name" + // MigrateParamDestXML as defined in libvirt/libvirt-domain.h:871 + MigrateParamDestXML = "destination_xml" + // MigrateParamPersistXML as defined in libvirt/libvirt-domain.h:886 + MigrateParamPersistXML = "persistent_xml" + // MigrateParamBandwidth as defined in libvirt/libvirt-domain.h:896 + MigrateParamBandwidth = "bandwidth" + // MigrateParamGraphicsURI as defined in libvirt/libvirt-domain.h:917 + MigrateParamGraphicsURI = "graphics_uri" + // MigrateParamListenAddress as defined in libvirt/libvirt-domain.h:928 + MigrateParamListenAddress = "listen_address" + // MigrateParamMigrateDisks as defined in libvirt/libvirt-domain.h:937 + MigrateParamMigrateDisks = "migrate_disks" + // MigrateParamDisksPort as defined in libvirt/libvirt-domain.h:947 + MigrateParamDisksPort = "disks_port" + // MigrateParamCompression as defined in libvirt/libvirt-domain.h:957 + MigrateParamCompression = "compression" + // MigrateParamCompressionMtLevel as defined in libvirt/libvirt-domain.h:966 + MigrateParamCompressionMtLevel = "compression.mt.level" + // MigrateParamCompressionMtThreads as defined in libvirt/libvirt-domain.h:974 + MigrateParamCompressionMtThreads = "compression.mt.threads" + // MigrateParamCompressionMtDthreads as defined in libvirt/libvirt-domain.h:982 + MigrateParamCompressionMtDthreads = "compression.mt.dthreads" + // MigrateParamCompressionXbzrleCache as defined in libvirt/libvirt-domain.h:990 + MigrateParamCompressionXbzrleCache = "compression.xbzrle.cache" + // MigrateParamAutoConvergeInitial as defined in libvirt/libvirt-domain.h:999 + MigrateParamAutoConvergeInitial = "auto_converge.initial" + // MigrateParamAutoConvergeIncrement as defined in libvirt/libvirt-domain.h:1009 + MigrateParamAutoConvergeIncrement = "auto_converge.increment" + // DomainCPUStatsCputime as defined in libvirt/libvirt-domain.h:1252 + DomainCPUStatsCputime = "cpu_time" + // DomainCPUStatsUsertime as defined in libvirt/libvirt-domain.h:1258 + DomainCPUStatsUsertime = "user_time" + // DomainCPUStatsSystemtime as defined in libvirt/libvirt-domain.h:1264 + DomainCPUStatsSystemtime = "system_time" + // DomainCPUStatsVcputime as defined in libvirt/libvirt-domain.h:1271 + DomainCPUStatsVcputime = "vcpu_time" + // DomainBlkioWeight as defined in libvirt/libvirt-domain.h:1300 + DomainBlkioWeight = "weight" + // DomainBlkioDeviceWeight as defined in libvirt/libvirt-domain.h:1310 + DomainBlkioDeviceWeight = "device_weight" + // DomainBlkioDeviceReadIops as defined in libvirt/libvirt-domain.h:1321 + DomainBlkioDeviceReadIops = "device_read_iops_sec" + // DomainBlkioDeviceWriteIops as defined in libvirt/libvirt-domain.h:1332 + DomainBlkioDeviceWriteIops = "device_write_iops_sec" + // DomainBlkioDeviceReadBps as defined in libvirt/libvirt-domain.h:1343 + DomainBlkioDeviceReadBps = "device_read_bytes_sec" + // DomainBlkioDeviceWriteBps as defined in libvirt/libvirt-domain.h:1354 + DomainBlkioDeviceWriteBps = "device_write_bytes_sec" + // DomainMemoryParamUnlimited as defined in libvirt/libvirt-domain.h:1373 + DomainMemoryParamUnlimited = 9007199254740991 + // DomainMemoryHardLimit as defined in libvirt/libvirt-domain.h:1382 + DomainMemoryHardLimit = "hard_limit" + // DomainMemorySoftLimit as defined in libvirt/libvirt-domain.h:1391 + DomainMemorySoftLimit = "soft_limit" + // DomainMemoryMinGuarantee as defined in libvirt/libvirt-domain.h:1400 + DomainMemoryMinGuarantee = "min_guarantee" + // DomainMemorySwapHardLimit as defined in libvirt/libvirt-domain.h:1410 + DomainMemorySwapHardLimit = "swap_hard_limit" + // DomainNumaNodeset as defined in libvirt/libvirt-domain.h:1455 + DomainNumaNodeset = "numa_nodeset" + // DomainNumaMode as defined in libvirt/libvirt-domain.h:1463 + DomainNumaMode = "numa_mode" + // DomainBandwidthInAverage as defined in libvirt/libvirt-domain.h:1575 + DomainBandwidthInAverage = "inbound.average" + // DomainBandwidthInPeak as defined in libvirt/libvirt-domain.h:1582 + DomainBandwidthInPeak = "inbound.peak" + // DomainBandwidthInBurst as defined in libvirt/libvirt-domain.h:1589 + DomainBandwidthInBurst = "inbound.burst" + // DomainBandwidthInFloor as defined in libvirt/libvirt-domain.h:1596 + DomainBandwidthInFloor = "inbound.floor" + // DomainBandwidthOutAverage as defined in libvirt/libvirt-domain.h:1603 + DomainBandwidthOutAverage = "outbound.average" + // DomainBandwidthOutPeak as defined in libvirt/libvirt-domain.h:1610 + DomainBandwidthOutPeak = "outbound.peak" + // DomainBandwidthOutBurst as defined in libvirt/libvirt-domain.h:1617 + DomainBandwidthOutBurst = "outbound.burst" + // PerfParamCmt as defined in libvirt/libvirt-domain.h:2073 + PerfParamCmt = "cmt" + // PerfParamMbmt as defined in libvirt/libvirt-domain.h:2084 + PerfParamMbmt = "mbmt" + // PerfParamMbml as defined in libvirt/libvirt-domain.h:2094 + PerfParamMbml = "mbml" + // PerfParamCacheMisses as defined in libvirt/libvirt-domain.h:2104 + PerfParamCacheMisses = "cache_misses" + // PerfParamCacheReferences as defined in libvirt/libvirt-domain.h:2114 + PerfParamCacheReferences = "cache_references" + // PerfParamInstructions as defined in libvirt/libvirt-domain.h:2124 + PerfParamInstructions = "instructions" + // PerfParamCPUCycles as defined in libvirt/libvirt-domain.h:2134 + PerfParamCPUCycles = "cpu_cycles" + // PerfParamBranchInstructions as defined in libvirt/libvirt-domain.h:2144 + PerfParamBranchInstructions = "branch_instructions" + // PerfParamBranchMisses as defined in libvirt/libvirt-domain.h:2154 + PerfParamBranchMisses = "branch_misses" + // PerfParamBusCycles as defined in libvirt/libvirt-domain.h:2164 + PerfParamBusCycles = "bus_cycles" + // PerfParamStalledCyclesFrontend as defined in libvirt/libvirt-domain.h:2175 + PerfParamStalledCyclesFrontend = "stalled_cycles_frontend" + // PerfParamStalledCyclesBackend as defined in libvirt/libvirt-domain.h:2186 + PerfParamStalledCyclesBackend = "stalled_cycles_backend" + // PerfParamRefCPUCycles as defined in libvirt/libvirt-domain.h:2197 + PerfParamRefCPUCycles = "ref_cpu_cycles" + // PerfParamCPUClock as defined in libvirt/libvirt-domain.h:2208 + PerfParamCPUClock = "cpu_clock" + // PerfParamTaskClock as defined in libvirt/libvirt-domain.h:2219 + PerfParamTaskClock = "task_clock" + // PerfParamPageFaults as defined in libvirt/libvirt-domain.h:2229 + PerfParamPageFaults = "page_faults" + // PerfParamContextSwitches as defined in libvirt/libvirt-domain.h:2239 + PerfParamContextSwitches = "context_switches" + // PerfParamCPUMigrations as defined in libvirt/libvirt-domain.h:2249 + PerfParamCPUMigrations = "cpu_migrations" + // PerfParamPageFaultsMin as defined in libvirt/libvirt-domain.h:2259 + PerfParamPageFaultsMin = "page_faults_min" + // PerfParamPageFaultsMaj as defined in libvirt/libvirt-domain.h:2269 + PerfParamPageFaultsMaj = "page_faults_maj" + // PerfParamAlignmentFaults as defined in libvirt/libvirt-domain.h:2279 + PerfParamAlignmentFaults = "alignment_faults" + // PerfParamEmulationFaults as defined in libvirt/libvirt-domain.h:2289 + PerfParamEmulationFaults = "emulation_faults" + // DomainBlockCopyBandwidth as defined in libvirt/libvirt-domain.h:2453 + DomainBlockCopyBandwidth = "bandwidth" + // DomainBlockCopyGranularity as defined in libvirt/libvirt-domain.h:2464 + DomainBlockCopyGranularity = "granularity" + // DomainBlockCopyBufSize as defined in libvirt/libvirt-domain.h:2473 + DomainBlockCopyBufSize = "buf-size" + // DomainBlockIotuneTotalBytesSec as defined in libvirt/libvirt-domain.h:2514 + DomainBlockIotuneTotalBytesSec = "total_bytes_sec" + // DomainBlockIotuneReadBytesSec as defined in libvirt/libvirt-domain.h:2522 + DomainBlockIotuneReadBytesSec = "read_bytes_sec" + // DomainBlockIotuneWriteBytesSec as defined in libvirt/libvirt-domain.h:2530 + DomainBlockIotuneWriteBytesSec = "write_bytes_sec" + // DomainBlockIotuneTotalIopsSec as defined in libvirt/libvirt-domain.h:2538 + DomainBlockIotuneTotalIopsSec = "total_iops_sec" + // DomainBlockIotuneReadIopsSec as defined in libvirt/libvirt-domain.h:2546 + DomainBlockIotuneReadIopsSec = "read_iops_sec" + // DomainBlockIotuneWriteIopsSec as defined in libvirt/libvirt-domain.h:2553 + DomainBlockIotuneWriteIopsSec = "write_iops_sec" + // DomainBlockIotuneTotalBytesSecMax as defined in libvirt/libvirt-domain.h:2561 + DomainBlockIotuneTotalBytesSecMax = "total_bytes_sec_max" + // DomainBlockIotuneReadBytesSecMax as defined in libvirt/libvirt-domain.h:2569 + DomainBlockIotuneReadBytesSecMax = "read_bytes_sec_max" + // DomainBlockIotuneWriteBytesSecMax as defined in libvirt/libvirt-domain.h:2577 + DomainBlockIotuneWriteBytesSecMax = "write_bytes_sec_max" + // DomainBlockIotuneTotalIopsSecMax as defined in libvirt/libvirt-domain.h:2585 + DomainBlockIotuneTotalIopsSecMax = "total_iops_sec_max" + // DomainBlockIotuneReadIopsSecMax as defined in libvirt/libvirt-domain.h:2593 + DomainBlockIotuneReadIopsSecMax = "read_iops_sec_max" + // DomainBlockIotuneWriteIopsSecMax as defined in libvirt/libvirt-domain.h:2600 + DomainBlockIotuneWriteIopsSecMax = "write_iops_sec_max" + // DomainBlockIotuneTotalBytesSecMaxLength as defined in libvirt/libvirt-domain.h:2608 + DomainBlockIotuneTotalBytesSecMaxLength = "total_bytes_sec_max_length" + // DomainBlockIotuneReadBytesSecMaxLength as defined in libvirt/libvirt-domain.h:2616 + DomainBlockIotuneReadBytesSecMaxLength = "read_bytes_sec_max_length" + // DomainBlockIotuneWriteBytesSecMaxLength as defined in libvirt/libvirt-domain.h:2624 + DomainBlockIotuneWriteBytesSecMaxLength = "write_bytes_sec_max_length" + // DomainBlockIotuneTotalIopsSecMaxLength as defined in libvirt/libvirt-domain.h:2632 + DomainBlockIotuneTotalIopsSecMaxLength = "total_iops_sec_max_length" + // DomainBlockIotuneReadIopsSecMaxLength as defined in libvirt/libvirt-domain.h:2640 + DomainBlockIotuneReadIopsSecMaxLength = "read_iops_sec_max_length" + // DomainBlockIotuneWriteIopsSecMaxLength as defined in libvirt/libvirt-domain.h:2648 + DomainBlockIotuneWriteIopsSecMaxLength = "write_iops_sec_max_length" + // DomainBlockIotuneSizeIopsSec as defined in libvirt/libvirt-domain.h:2655 + DomainBlockIotuneSizeIopsSec = "size_iops_sec" + // DomainBlockIotuneGroupName as defined in libvirt/libvirt-domain.h:2662 + DomainBlockIotuneGroupName = "group_name" + // DomainSendKeyMaxKeys as defined in libvirt/libvirt-domain.h:2743 + DomainSendKeyMaxKeys = 16 + // DomainJobOperationStr as defined in libvirt/libvirt-domain.h:3155 + DomainJobOperationStr = "operation" + // DomainJobTimeElapsed as defined in libvirt/libvirt-domain.h:3165 + DomainJobTimeElapsed = "time_elapsed" + // DomainJobTimeElapsedNet as defined in libvirt/libvirt-domain.h:3175 + DomainJobTimeElapsedNet = "time_elapsed_net" + // DomainJobTimeRemaining as defined in libvirt/libvirt-domain.h:3185 + DomainJobTimeRemaining = "time_remaining" + // DomainJobDowntime as defined in libvirt/libvirt-domain.h:3195 + DomainJobDowntime = "downtime" + // DomainJobDowntimeNet as defined in libvirt/libvirt-domain.h:3204 + DomainJobDowntimeNet = "downtime_net" + // DomainJobSetupTime as defined in libvirt/libvirt-domain.h:3213 + DomainJobSetupTime = "setup_time" + // DomainJobDataTotal as defined in libvirt/libvirt-domain.h:3228 + DomainJobDataTotal = "data_total" + // DomainJobDataProcessed as defined in libvirt/libvirt-domain.h:3238 + DomainJobDataProcessed = "data_processed" + // DomainJobDataRemaining as defined in libvirt/libvirt-domain.h:3248 + DomainJobDataRemaining = "data_remaining" + // DomainJobMemoryTotal as defined in libvirt/libvirt-domain.h:3258 + DomainJobMemoryTotal = "memory_total" + // DomainJobMemoryProcessed as defined in libvirt/libvirt-domain.h:3268 + DomainJobMemoryProcessed = "memory_processed" + // DomainJobMemoryRemaining as defined in libvirt/libvirt-domain.h:3278 + DomainJobMemoryRemaining = "memory_remaining" + // DomainJobMemoryConstant as defined in libvirt/libvirt-domain.h:3290 + DomainJobMemoryConstant = "memory_constant" + // DomainJobMemoryNormal as defined in libvirt/libvirt-domain.h:3300 + DomainJobMemoryNormal = "memory_normal" + // DomainJobMemoryNormalBytes as defined in libvirt/libvirt-domain.h:3310 + DomainJobMemoryNormalBytes = "memory_normal_bytes" + // DomainJobMemoryBps as defined in libvirt/libvirt-domain.h:3318 + DomainJobMemoryBps = "memory_bps" + // DomainJobMemoryDirtyRate as defined in libvirt/libvirt-domain.h:3326 + DomainJobMemoryDirtyRate = "memory_dirty_rate" + // DomainJobMemoryIteration as defined in libvirt/libvirt-domain.h:3337 + DomainJobMemoryIteration = "memory_iteration" + // DomainJobDiskTotal as defined in libvirt/libvirt-domain.h:3347 + DomainJobDiskTotal = "disk_total" + // DomainJobDiskProcessed as defined in libvirt/libvirt-domain.h:3357 + DomainJobDiskProcessed = "disk_processed" + // DomainJobDiskRemaining as defined in libvirt/libvirt-domain.h:3367 + DomainJobDiskRemaining = "disk_remaining" + // DomainJobDiskBps as defined in libvirt/libvirt-domain.h:3375 + DomainJobDiskBps = "disk_bps" + // DomainJobCompressionCache as defined in libvirt/libvirt-domain.h:3384 + DomainJobCompressionCache = "compression_cache" + // DomainJobCompressionBytes as defined in libvirt/libvirt-domain.h:3392 + DomainJobCompressionBytes = "compression_bytes" + // DomainJobCompressionPages as defined in libvirt/libvirt-domain.h:3400 + DomainJobCompressionPages = "compression_pages" + // DomainJobCompressionCacheMisses as defined in libvirt/libvirt-domain.h:3409 + DomainJobCompressionCacheMisses = "compression_cache_misses" + // DomainJobCompressionOverflow as defined in libvirt/libvirt-domain.h:3419 + DomainJobCompressionOverflow = "compression_overflow" + // DomainJobAutoConvergeThrottle as defined in libvirt/libvirt-domain.h:3428 + DomainJobAutoConvergeThrottle = "auto_converge_throttle" + // DomainTunableCPUVcpupin as defined in libvirt/libvirt-domain.h:3981 + DomainTunableCPUVcpupin = "cputune.vcpupin%u" + // DomainTunableCPUEmulatorpin as defined in libvirt/libvirt-domain.h:3989 + DomainTunableCPUEmulatorpin = "cputune.emulatorpin" + // DomainTunableCPUIothreadspin as defined in libvirt/libvirt-domain.h:3998 + DomainTunableCPUIothreadspin = "cputune.iothreadpin%u" + // DomainTunableCPUCpuShares as defined in libvirt/libvirt-domain.h:4006 + DomainTunableCPUCpuShares = "cputune.cpu_shares" + // DomainTunableCPUGlobalPeriod as defined in libvirt/libvirt-domain.h:4014 + DomainTunableCPUGlobalPeriod = "cputune.global_period" + // DomainTunableCPUGlobalQuota as defined in libvirt/libvirt-domain.h:4022 + DomainTunableCPUGlobalQuota = "cputune.global_quota" + // DomainTunableCPUVCPUPeriod as defined in libvirt/libvirt-domain.h:4030 + DomainTunableCPUVCPUPeriod = "cputune.vcpu_period" + // DomainTunableCPUVCPUQuota as defined in libvirt/libvirt-domain.h:4038 + DomainTunableCPUVCPUQuota = "cputune.vcpu_quota" + // DomainTunableCPUEmulatorPeriod as defined in libvirt/libvirt-domain.h:4047 + DomainTunableCPUEmulatorPeriod = "cputune.emulator_period" + // DomainTunableCPUEmulatorQuota as defined in libvirt/libvirt-domain.h:4056 + DomainTunableCPUEmulatorQuota = "cputune.emulator_quota" + // DomainTunableCPUIothreadPeriod as defined in libvirt/libvirt-domain.h:4064 + DomainTunableCPUIothreadPeriod = "cputune.iothread_period" + // DomainTunableCPUIothreadQuota as defined in libvirt/libvirt-domain.h:4072 + DomainTunableCPUIothreadQuota = "cputune.iothread_quota" + // DomainTunableBlkdevDisk as defined in libvirt/libvirt-domain.h:4080 + DomainTunableBlkdevDisk = "blkdeviotune.disk" + // DomainTunableBlkdevTotalBytesSec as defined in libvirt/libvirt-domain.h:4088 + DomainTunableBlkdevTotalBytesSec = "blkdeviotune.total_bytes_sec" + // DomainTunableBlkdevReadBytesSec as defined in libvirt/libvirt-domain.h:4096 + DomainTunableBlkdevReadBytesSec = "blkdeviotune.read_bytes_sec" + // DomainTunableBlkdevWriteBytesSec as defined in libvirt/libvirt-domain.h:4104 + DomainTunableBlkdevWriteBytesSec = "blkdeviotune.write_bytes_sec" + // DomainTunableBlkdevTotalIopsSec as defined in libvirt/libvirt-domain.h:4112 + DomainTunableBlkdevTotalIopsSec = "blkdeviotune.total_iops_sec" + // DomainTunableBlkdevReadIopsSec as defined in libvirt/libvirt-domain.h:4120 + DomainTunableBlkdevReadIopsSec = "blkdeviotune.read_iops_sec" + // DomainTunableBlkdevWriteIopsSec as defined in libvirt/libvirt-domain.h:4128 + DomainTunableBlkdevWriteIopsSec = "blkdeviotune.write_iops_sec" + // DomainTunableBlkdevTotalBytesSecMax as defined in libvirt/libvirt-domain.h:4136 + DomainTunableBlkdevTotalBytesSecMax = "blkdeviotune.total_bytes_sec_max" + // DomainTunableBlkdevReadBytesSecMax as defined in libvirt/libvirt-domain.h:4144 + DomainTunableBlkdevReadBytesSecMax = "blkdeviotune.read_bytes_sec_max" + // DomainTunableBlkdevWriteBytesSecMax as defined in libvirt/libvirt-domain.h:4152 + DomainTunableBlkdevWriteBytesSecMax = "blkdeviotune.write_bytes_sec_max" + // DomainTunableBlkdevTotalIopsSecMax as defined in libvirt/libvirt-domain.h:4160 + DomainTunableBlkdevTotalIopsSecMax = "blkdeviotune.total_iops_sec_max" + // DomainTunableBlkdevReadIopsSecMax as defined in libvirt/libvirt-domain.h:4168 + DomainTunableBlkdevReadIopsSecMax = "blkdeviotune.read_iops_sec_max" + // DomainTunableBlkdevWriteIopsSecMax as defined in libvirt/libvirt-domain.h:4176 + DomainTunableBlkdevWriteIopsSecMax = "blkdeviotune.write_iops_sec_max" + // DomainTunableBlkdevSizeIopsSec as defined in libvirt/libvirt-domain.h:4184 + DomainTunableBlkdevSizeIopsSec = "blkdeviotune.size_iops_sec" + // DomainTunableBlkdevGroupName as defined in libvirt/libvirt-domain.h:4192 + DomainTunableBlkdevGroupName = "blkdeviotune.group_name" + // DomainTunableBlkdevTotalBytesSecMaxLength as defined in libvirt/libvirt-domain.h:4201 + DomainTunableBlkdevTotalBytesSecMaxLength = "blkdeviotune.total_bytes_sec_max_length" + // DomainTunableBlkdevReadBytesSecMaxLength as defined in libvirt/libvirt-domain.h:4210 + DomainTunableBlkdevReadBytesSecMaxLength = "blkdeviotune.read_bytes_sec_max_length" + // DomainTunableBlkdevWriteBytesSecMaxLength as defined in libvirt/libvirt-domain.h:4219 + DomainTunableBlkdevWriteBytesSecMaxLength = "blkdeviotune.write_bytes_sec_max_length" + // DomainTunableBlkdevTotalIopsSecMaxLength as defined in libvirt/libvirt-domain.h:4228 + DomainTunableBlkdevTotalIopsSecMaxLength = "blkdeviotune.total_iops_sec_max_length" + // DomainTunableBlkdevReadIopsSecMaxLength as defined in libvirt/libvirt-domain.h:4237 + DomainTunableBlkdevReadIopsSecMaxLength = "blkdeviotune.read_iops_sec_max_length" + // DomainTunableBlkdevWriteIopsSecMaxLength as defined in libvirt/libvirt-domain.h:4246 + DomainTunableBlkdevWriteIopsSecMaxLength = "blkdeviotune.write_iops_sec_max_length" + // DomainSchedFieldLength as defined in libvirt/libvirt-domain.h:4534 + DomainSchedFieldLength = 80 + // DomainBlkioFieldLength as defined in libvirt/libvirt-domain.h:4578 + DomainBlkioFieldLength = 80 + // DomainMemoryFieldLength as defined in libvirt/libvirt-domain.h:4622 + DomainMemoryFieldLength = 80 +) + +// ConnectCloseReason as declared in libvirt/libvirt-common.h:120 +type ConnectCloseReason int32 + +// ConnectCloseReason enumeration from libvirt/libvirt-common.h:120 +const ( + ConnectCloseReasonError ConnectCloseReason = iota + ConnectCloseReasonEOF ConnectCloseReason = 1 + ConnectCloseReasonKeepalive ConnectCloseReason = 2 + ConnectCloseReasonClient ConnectCloseReason = 3 +) + +// TypedParameterType as declared in libvirt/libvirt-common.h:139 +type TypedParameterType int32 + +// TypedParameterType enumeration from libvirt/libvirt-common.h:139 +const ( + TypedParamInt TypedParameterType = 1 + TypedParamUint TypedParameterType = 2 + TypedParamLlong TypedParameterType = 3 + TypedParamUllong TypedParameterType = 4 + TypedParamDouble TypedParameterType = 5 + TypedParamBoolean TypedParameterType = 6 + TypedParamString TypedParameterType = 7 +) + +// TypedParameterFlags as declared in libvirt/libvirt-common.h:164 +type TypedParameterFlags int32 + +// TypedParameterFlags enumeration from libvirt/libvirt-common.h:164 +const ( + TypedParamStringOkay TypedParameterFlags = 4 +) + +// NodeSuspendTarget as declared in libvirt/libvirt-host.h:62 +type NodeSuspendTarget int32 + +// NodeSuspendTarget enumeration from libvirt/libvirt-host.h:62 +const ( + NodeSuspendTargetMem NodeSuspendTarget = iota + NodeSuspendTargetDisk NodeSuspendTarget = 1 + NodeSuspendTargetHybrid NodeSuspendTarget = 2 +) + +// NodeGetCPUStatsAllCPUs as declared in libvirt/libvirt-host.h:186 +type NodeGetCPUStatsAllCPUs int32 + +// NodeGetCPUStatsAllCPUs enumeration from libvirt/libvirt-host.h:186 +const ( + NodeCPUStatsAllCpus NodeGetCPUStatsAllCPUs = -1 +) + +// NodeGetMemoryStatsAllCells as declared in libvirt/libvirt-host.h:264 +type NodeGetMemoryStatsAllCells int32 + +// NodeGetMemoryStatsAllCells enumeration from libvirt/libvirt-host.h:264 +const ( + NodeMemoryStatsAllCells NodeGetMemoryStatsAllCells = -1 +) + +// ConnectFlags as declared in libvirt/libvirt-host.h:443 +type ConnectFlags int32 + +// ConnectFlags enumeration from libvirt/libvirt-host.h:443 +const ( + ConnectRo ConnectFlags = 1 + ConnectNoAliases ConnectFlags = 2 +) + +// ConnectCredentialType as declared in libvirt/libvirt-host.h:460 +type ConnectCredentialType int32 + +// ConnectCredentialType enumeration from libvirt/libvirt-host.h:460 +const ( + CredUsername ConnectCredentialType = 1 + CredAuthname ConnectCredentialType = 2 + CredLanguage ConnectCredentialType = 3 + CredCnonce ConnectCredentialType = 4 + CredPassphrase ConnectCredentialType = 5 + CredEchoprompt ConnectCredentialType = 6 + CredNoechoprompt ConnectCredentialType = 7 + CredRealm ConnectCredentialType = 8 + CredExternal ConnectCredentialType = 9 +) + +// CPUCompareResult as declared in libvirt/libvirt-host.h:633 +type CPUCompareResult int32 + +// CPUCompareResult enumeration from libvirt/libvirt-host.h:633 +const ( + CPUCompareError CPUCompareResult = -1 + CPUCompareIncompatible CPUCompareResult = 0 + CPUCompareIdentical CPUCompareResult = 1 + CPUCompareSuperset CPUCompareResult = 2 +) + +// ConnectCompareCPUFlags as declared in libvirt/libvirt-host.h:638 +type ConnectCompareCPUFlags int32 + +// ConnectCompareCPUFlags enumeration from libvirt/libvirt-host.h:638 +const ( + ConnectCompareCPUFailIncompatible ConnectCompareCPUFlags = 1 +) + +// ConnectBaselineCPUFlags as declared in libvirt/libvirt-host.h:657 +type ConnectBaselineCPUFlags int32 + +// ConnectBaselineCPUFlags enumeration from libvirt/libvirt-host.h:657 +const ( + ConnectBaselineCPUExpandFeatures ConnectBaselineCPUFlags = 1 + ConnectBaselineCPUMigratable ConnectBaselineCPUFlags = 2 +) + +// NodeAllocPagesFlags as declared in libvirt/libvirt-host.h:679 +type NodeAllocPagesFlags int32 + +// NodeAllocPagesFlags enumeration from libvirt/libvirt-host.h:679 +const ( + NodeAllocPagesAdd NodeAllocPagesFlags = iota + NodeAllocPagesSet NodeAllocPagesFlags = 1 +) + +// DomainState as declared in libvirt/libvirt-domain.h:71 +type DomainState int32 + +// DomainState enumeration from libvirt/libvirt-domain.h:71 +const ( + DomainNostate DomainState = iota + DomainRunning DomainState = 1 + DomainBlocked DomainState = 2 + DomainPaused DomainState = 3 + DomainShutdown DomainState = 4 + DomainShutoff DomainState = 5 + DomainCrashed DomainState = 6 + DomainPmsuspended DomainState = 7 +) + +// DomainNostateReason as declared in libvirt/libvirt-domain.h:79 +type DomainNostateReason int32 + +// DomainNostateReason enumeration from libvirt/libvirt-domain.h:79 +const ( + DomainNostateUnknown DomainNostateReason = iota +) + +// DomainRunningReason as declared in libvirt/libvirt-domain.h:98 +type DomainRunningReason int32 + +// DomainRunningReason enumeration from libvirt/libvirt-domain.h:98 +const ( + DomainRunningUnknown DomainRunningReason = iota + DomainRunningBooted DomainRunningReason = 1 + DomainRunningMigrated DomainRunningReason = 2 + DomainRunningRestored DomainRunningReason = 3 + DomainRunningFromSnapshot DomainRunningReason = 4 + DomainRunningUnpaused DomainRunningReason = 5 + DomainRunningMigrationCanceled DomainRunningReason = 6 + DomainRunningSaveCanceled DomainRunningReason = 7 + DomainRunningWakeup DomainRunningReason = 8 + DomainRunningCrashed DomainRunningReason = 9 + DomainRunningPostcopy DomainRunningReason = 10 +) + +// DomainBlockedReason as declared in libvirt/libvirt-domain.h:106 +type DomainBlockedReason int32 + +// DomainBlockedReason enumeration from libvirt/libvirt-domain.h:106 +const ( + DomainBlockedUnknown DomainBlockedReason = iota +) + +// DomainPausedReason as declared in libvirt/libvirt-domain.h:127 +type DomainPausedReason int32 + +// DomainPausedReason enumeration from libvirt/libvirt-domain.h:127 +const ( + DomainPausedUnknown DomainPausedReason = iota + DomainPausedUser DomainPausedReason = 1 + DomainPausedMigration DomainPausedReason = 2 + DomainPausedSave DomainPausedReason = 3 + DomainPausedDump DomainPausedReason = 4 + DomainPausedIoerror DomainPausedReason = 5 + DomainPausedWatchdog DomainPausedReason = 6 + DomainPausedFromSnapshot DomainPausedReason = 7 + DomainPausedShuttingDown DomainPausedReason = 8 + DomainPausedSnapshot DomainPausedReason = 9 + DomainPausedCrashed DomainPausedReason = 10 + DomainPausedStartingUp DomainPausedReason = 11 + DomainPausedPostcopy DomainPausedReason = 12 + DomainPausedPostcopyFailed DomainPausedReason = 13 +) + +// DomainShutdownReason as declared in libvirt/libvirt-domain.h:136 +type DomainShutdownReason int32 + +// DomainShutdownReason enumeration from libvirt/libvirt-domain.h:136 +const ( + DomainShutdownUnknown DomainShutdownReason = iota + DomainShutdownUser DomainShutdownReason = 1 +) + +// DomainShutoffReason as declared in libvirt/libvirt-domain.h:151 +type DomainShutoffReason int32 + +// DomainShutoffReason enumeration from libvirt/libvirt-domain.h:151 +const ( + DomainShutoffUnknown DomainShutoffReason = iota + DomainShutoffShutdown DomainShutoffReason = 1 + DomainShutoffDestroyed DomainShutoffReason = 2 + DomainShutoffCrashed DomainShutoffReason = 3 + DomainShutoffMigrated DomainShutoffReason = 4 + DomainShutoffSaved DomainShutoffReason = 5 + DomainShutoffFailed DomainShutoffReason = 6 + DomainShutoffFromSnapshot DomainShutoffReason = 7 +) + +// DomainCrashedReason as declared in libvirt/libvirt-domain.h:160 +type DomainCrashedReason int32 + +// DomainCrashedReason enumeration from libvirt/libvirt-domain.h:160 +const ( + DomainCrashedUnknown DomainCrashedReason = iota + DomainCrashedPanicked DomainCrashedReason = 1 +) + +// DomainPMSuspendedReason as declared in libvirt/libvirt-domain.h:168 +type DomainPMSuspendedReason int32 + +// DomainPMSuspendedReason enumeration from libvirt/libvirt-domain.h:168 +const ( + DomainPmsuspendedUnknown DomainPMSuspendedReason = iota +) + +// DomainPMSuspendedDiskReason as declared in libvirt/libvirt-domain.h:176 +type DomainPMSuspendedDiskReason int32 + +// DomainPMSuspendedDiskReason enumeration from libvirt/libvirt-domain.h:176 +const ( + DomainPmsuspendedDiskUnknown DomainPMSuspendedDiskReason = iota +) + +// DomainControlState as declared in libvirt/libvirt-domain.h:196 +type DomainControlState int32 + +// DomainControlState enumeration from libvirt/libvirt-domain.h:196 +const ( + DomainControlOk DomainControlState = iota + DomainControlJob DomainControlState = 1 + DomainControlOccupied DomainControlState = 2 + DomainControlError DomainControlState = 3 +) + +// DomainControlErrorReason as declared in libvirt/libvirt-domain.h:216 +type DomainControlErrorReason int32 + +// DomainControlErrorReason enumeration from libvirt/libvirt-domain.h:216 +const ( + DomainControlErrorReasonNone DomainControlErrorReason = iota + DomainControlErrorReasonUnknown DomainControlErrorReason = 1 + DomainControlErrorReasonMonitor DomainControlErrorReason = 2 + DomainControlErrorReasonInternal DomainControlErrorReason = 3 +) + +// DomainModificationImpact as declared in libvirt/libvirt-domain.h:264 +type DomainModificationImpact int32 + +// DomainModificationImpact enumeration from libvirt/libvirt-domain.h:264 +const ( + DomainAffectCurrent DomainModificationImpact = iota + DomainAffectLive DomainModificationImpact = 1 + DomainAffectConfig DomainModificationImpact = 2 +) + +// DomainCreateFlags as declared in libvirt/libvirt-domain.h:304 +type DomainCreateFlags int32 + +// DomainCreateFlags enumeration from libvirt/libvirt-domain.h:304 +const ( + DomainNone DomainCreateFlags = iota + DomainStartPaused DomainCreateFlags = 1 + DomainStartAutodestroy DomainCreateFlags = 2 + DomainStartBypassCache DomainCreateFlags = 4 + DomainStartForceBoot DomainCreateFlags = 8 + DomainStartValidate DomainCreateFlags = 16 +) + +// DomainMemoryStatTags as declared in libvirt/libvirt-domain.h:640 +type DomainMemoryStatTags int32 + +// DomainMemoryStatTags enumeration from libvirt/libvirt-domain.h:640 +const ( + DomainMemoryStatSwapIn DomainMemoryStatTags = iota + DomainMemoryStatSwapOut DomainMemoryStatTags = 1 + DomainMemoryStatMajorFault DomainMemoryStatTags = 2 + DomainMemoryStatMinorFault DomainMemoryStatTags = 3 + DomainMemoryStatUnused DomainMemoryStatTags = 4 + DomainMemoryStatAvailable DomainMemoryStatTags = 5 + DomainMemoryStatActualBalloon DomainMemoryStatTags = 6 + DomainMemoryStatRss DomainMemoryStatTags = 7 + DomainMemoryStatUsable DomainMemoryStatTags = 8 + DomainMemoryStatLastUpdate DomainMemoryStatTags = 9 + DomainMemoryStatNr DomainMemoryStatTags = 10 +) + +// DomainCoreDumpFlags as declared in libvirt/libvirt-domain.h:659 +type DomainCoreDumpFlags int32 + +// DomainCoreDumpFlags enumeration from libvirt/libvirt-domain.h:659 +const ( + DumpCrash DomainCoreDumpFlags = 1 + DumpLive DomainCoreDumpFlags = 2 + DumpBypassCache DomainCoreDumpFlags = 4 + DumpReset DomainCoreDumpFlags = 8 + DumpMemoryOnly DomainCoreDumpFlags = 16 +) + +// DomainCoreDumpFormat as declared in libvirt/libvirt-domain.h:682 +type DomainCoreDumpFormat int32 + +// DomainCoreDumpFormat enumeration from libvirt/libvirt-domain.h:682 +const ( + DomainCoreDumpFormatRaw DomainCoreDumpFormat = iota + DomainCoreDumpFormatKdumpZlib DomainCoreDumpFormat = 1 + DomainCoreDumpFormatKdumpLzo DomainCoreDumpFormat = 2 + DomainCoreDumpFormatKdumpSnappy DomainCoreDumpFormat = 3 +) + +// DomainMigrateFlags as declared in libvirt/libvirt-domain.h:826 +type DomainMigrateFlags int32 + +// DomainMigrateFlags enumeration from libvirt/libvirt-domain.h:826 +const ( + MigrateLive DomainMigrateFlags = 1 + MigratePeer2peer DomainMigrateFlags = 2 + MigrateTunnelled DomainMigrateFlags = 4 + MigratePersistDest DomainMigrateFlags = 8 + MigrateUndefineSource DomainMigrateFlags = 16 + MigratePaused DomainMigrateFlags = 32 + MigrateNonSharedDisk DomainMigrateFlags = 64 + MigrateNonSharedInc DomainMigrateFlags = 128 + MigrateChangeProtection DomainMigrateFlags = 256 + MigrateUnsafe DomainMigrateFlags = 512 + MigrateOffline DomainMigrateFlags = 1024 + MigrateCompressed DomainMigrateFlags = 2048 + MigrateAbortOnError DomainMigrateFlags = 4096 + MigrateAutoConverge DomainMigrateFlags = 8192 + MigrateRdmaPinAll DomainMigrateFlags = 16384 + MigratePostcopy DomainMigrateFlags = 32768 + MigrateTLS DomainMigrateFlags = 65536 +) + +// DomainShutdownFlagValues as declared in libvirt/libvirt-domain.h:1117 +type DomainShutdownFlagValues int32 + +// DomainShutdownFlagValues enumeration from libvirt/libvirt-domain.h:1117 +const ( + DomainShutdownDefault DomainShutdownFlagValues = iota + DomainShutdownAcpiPowerBtn DomainShutdownFlagValues = 1 + DomainShutdownGuestAgent DomainShutdownFlagValues = 2 + DomainShutdownInitctl DomainShutdownFlagValues = 4 + DomainShutdownSignal DomainShutdownFlagValues = 8 + DomainShutdownParavirt DomainShutdownFlagValues = 16 +) + +// DomainRebootFlagValues as declared in libvirt/libvirt-domain.h:1130 +type DomainRebootFlagValues int32 + +// DomainRebootFlagValues enumeration from libvirt/libvirt-domain.h:1130 +const ( + DomainRebootDefault DomainRebootFlagValues = iota + DomainRebootAcpiPowerBtn DomainRebootFlagValues = 1 + DomainRebootGuestAgent DomainRebootFlagValues = 2 + DomainRebootInitctl DomainRebootFlagValues = 4 + DomainRebootSignal DomainRebootFlagValues = 8 + DomainRebootParavirt DomainRebootFlagValues = 16 +) + +// DomainDestroyFlagsValues as declared in libvirt/libvirt-domain.h:1148 +type DomainDestroyFlagsValues int32 + +// DomainDestroyFlagsValues enumeration from libvirt/libvirt-domain.h:1148 +const ( + DomainDestroyDefault DomainDestroyFlagsValues = iota + DomainDestroyGraceful DomainDestroyFlagsValues = 1 +) + +// DomainSaveRestoreFlags as declared in libvirt/libvirt-domain.h:1180 +type DomainSaveRestoreFlags int32 + +// DomainSaveRestoreFlags enumeration from libvirt/libvirt-domain.h:1180 +const ( + DomainSaveBypassCache DomainSaveRestoreFlags = 1 + DomainSaveRunning DomainSaveRestoreFlags = 2 + DomainSavePaused DomainSaveRestoreFlags = 4 +) + +// DomainMemoryModFlags as declared in libvirt/libvirt-domain.h:1429 +type DomainMemoryModFlags int32 + +// DomainMemoryModFlags enumeration from libvirt/libvirt-domain.h:1429 +const ( + DomainMemCurrent DomainMemoryModFlags = iota + DomainMemLive DomainMemoryModFlags = 1 + DomainMemConfig DomainMemoryModFlags = 2 + DomainMemMaximum DomainMemoryModFlags = 4 +) + +// DomainNumatuneMemMode as declared in libvirt/libvirt-domain.h:1447 +type DomainNumatuneMemMode int32 + +// DomainNumatuneMemMode enumeration from libvirt/libvirt-domain.h:1447 +const ( + DomainNumatuneMemStrict DomainNumatuneMemMode = iota + DomainNumatuneMemPreferred DomainNumatuneMemMode = 1 + DomainNumatuneMemInterleave DomainNumatuneMemMode = 2 +) + +// DomainMetadataType as declared in libvirt/libvirt-domain.h:1509 +type DomainMetadataType int32 + +// DomainMetadataType enumeration from libvirt/libvirt-domain.h:1509 +const ( + DomainMetadataDescription DomainMetadataType = iota + DomainMetadataTitle DomainMetadataType = 1 + DomainMetadataElement DomainMetadataType = 2 +) + +// DomainXMLFlags as declared in libvirt/libvirt-domain.h:1539 +type DomainXMLFlags int32 + +// DomainXMLFlags enumeration from libvirt/libvirt-domain.h:1539 +const ( + DomainXMLSecure DomainXMLFlags = 1 + DomainXMLInactive DomainXMLFlags = 2 + DomainXMLUpdateCPU DomainXMLFlags = 4 + DomainXMLMigratable DomainXMLFlags = 8 +) + +// DomainBlockResizeFlags as declared in libvirt/libvirt-domain.h:1644 +type DomainBlockResizeFlags int32 + +// DomainBlockResizeFlags enumeration from libvirt/libvirt-domain.h:1644 +const ( + DomainBlockResizeBytes DomainBlockResizeFlags = 1 +) + +// DomainMemoryFlags as declared in libvirt/libvirt-domain.h:1707 +type DomainMemoryFlags int32 + +// DomainMemoryFlags enumeration from libvirt/libvirt-domain.h:1707 +const ( + MemoryVirtual DomainMemoryFlags = 1 + MemoryPhysical DomainMemoryFlags = 2 +) + +// DomainDefineFlags as declared in libvirt/libvirt-domain.h:1717 +type DomainDefineFlags int32 + +// DomainDefineFlags enumeration from libvirt/libvirt-domain.h:1717 +const ( + DomainDefineValidate DomainDefineFlags = 1 +) + +// DomainUndefineFlagsValues as declared in libvirt/libvirt-domain.h:1741 +type DomainUndefineFlagsValues int32 + +// DomainUndefineFlagsValues enumeration from libvirt/libvirt-domain.h:1741 +const ( + DomainUndefineManagedSave DomainUndefineFlagsValues = 1 + DomainUndefineSnapshotsMetadata DomainUndefineFlagsValues = 2 + DomainUndefineNvram DomainUndefineFlagsValues = 4 + DomainUndefineKeepNvram DomainUndefineFlagsValues = 8 +) + +// ConnectListAllDomainsFlags as declared in libvirt/libvirt-domain.h:1777 +type ConnectListAllDomainsFlags int32 + +// ConnectListAllDomainsFlags enumeration from libvirt/libvirt-domain.h:1777 +const ( + ConnectListDomainsActive ConnectListAllDomainsFlags = 1 + ConnectListDomainsInactive ConnectListAllDomainsFlags = 2 + ConnectListDomainsPersistent ConnectListAllDomainsFlags = 4 + ConnectListDomainsTransient ConnectListAllDomainsFlags = 8 + ConnectListDomainsRunning ConnectListAllDomainsFlags = 16 + ConnectListDomainsPaused ConnectListAllDomainsFlags = 32 + ConnectListDomainsShutoff ConnectListAllDomainsFlags = 64 + ConnectListDomainsOther ConnectListAllDomainsFlags = 128 + ConnectListDomainsManagedsave ConnectListAllDomainsFlags = 256 + ConnectListDomainsNoManagedsave ConnectListAllDomainsFlags = 512 + ConnectListDomainsAutostart ConnectListAllDomainsFlags = 1024 + ConnectListDomainsNoAutostart ConnectListAllDomainsFlags = 2048 + ConnectListDomainsHasSnapshot ConnectListAllDomainsFlags = 4096 + ConnectListDomainsNoSnapshot ConnectListAllDomainsFlags = 8192 +) + +// VCPUState as declared in libvirt/libvirt-domain.h:1808 +type VCPUState int32 + +// VCPUState enumeration from libvirt/libvirt-domain.h:1808 +const ( + VCPUOffline VCPUState = iota + VCPURunning VCPUState = 1 + VCPUBlocked VCPUState = 2 +) + +// DomainVCPUFlags as declared in libvirt/libvirt-domain.h:1830 +type DomainVCPUFlags int32 + +// DomainVCPUFlags enumeration from libvirt/libvirt-domain.h:1830 +const ( + DomainVCPUCurrent DomainVCPUFlags = iota + DomainVCPULive DomainVCPUFlags = 1 + DomainVCPUConfig DomainVCPUFlags = 2 + DomainVCPUMaximum DomainVCPUFlags = 4 + DomainVCPUGuest DomainVCPUFlags = 8 + DomainVCPUHotpluggable DomainVCPUFlags = 16 +) + +// DomainDeviceModifyFlags as declared in libvirt/libvirt-domain.h:2003 +type DomainDeviceModifyFlags int32 + +// DomainDeviceModifyFlags enumeration from libvirt/libvirt-domain.h:2003 +const ( + DomainDeviceModifyCurrent DomainDeviceModifyFlags = iota + DomainDeviceModifyLive DomainDeviceModifyFlags = 1 + DomainDeviceModifyConfig DomainDeviceModifyFlags = 2 + DomainDeviceModifyForce DomainDeviceModifyFlags = 4 +) + +// DomainStatsTypes as declared in libvirt/libvirt-domain.h:2031 +type DomainStatsTypes int32 + +// DomainStatsTypes enumeration from libvirt/libvirt-domain.h:2031 +const ( + DomainStatsState DomainStatsTypes = 1 + DomainStatsCPUTotal DomainStatsTypes = 2 + DomainStatsBalloon DomainStatsTypes = 4 + DomainStatsVCPU DomainStatsTypes = 8 + DomainStatsInterface DomainStatsTypes = 16 + DomainStatsBlock DomainStatsTypes = 32 + DomainStatsPerf DomainStatsTypes = 64 +) + +// ConnectGetAllDomainStatsFlags as declared in libvirt/libvirt-domain.h:2047 +type ConnectGetAllDomainStatsFlags int32 + +// ConnectGetAllDomainStatsFlags enumeration from libvirt/libvirt-domain.h:2047 +const ( + ConnectGetAllDomainsStatsActive ConnectGetAllDomainStatsFlags = 1 + ConnectGetAllDomainsStatsInactive ConnectGetAllDomainStatsFlags = 2 + ConnectGetAllDomainsStatsPersistent ConnectGetAllDomainStatsFlags = 4 + ConnectGetAllDomainsStatsTransient ConnectGetAllDomainStatsFlags = 8 + ConnectGetAllDomainsStatsRunning ConnectGetAllDomainStatsFlags = 16 + ConnectGetAllDomainsStatsPaused ConnectGetAllDomainStatsFlags = 32 + ConnectGetAllDomainsStatsShutoff ConnectGetAllDomainStatsFlags = 64 + ConnectGetAllDomainsStatsOther ConnectGetAllDomainStatsFlags = 128 + ConnectGetAllDomainsStatsBacking ConnectGetAllDomainStatsFlags = 1073741824 + ConnectGetAllDomainsStatsEnforceStats ConnectGetAllDomainStatsFlags = -2147483648 +) + +// DomainBlockJobType as declared in libvirt/libvirt-domain.h:2331 +type DomainBlockJobType int32 + +// DomainBlockJobType enumeration from libvirt/libvirt-domain.h:2331 +const ( + DomainBlockJobTypeUnknown DomainBlockJobType = iota + DomainBlockJobTypePull DomainBlockJobType = 1 + DomainBlockJobTypeCopy DomainBlockJobType = 2 + DomainBlockJobTypeCommit DomainBlockJobType = 3 + DomainBlockJobTypeActiveCommit DomainBlockJobType = 4 +) + +// DomainBlockJobAbortFlags as declared in libvirt/libvirt-domain.h:2343 +type DomainBlockJobAbortFlags int32 + +// DomainBlockJobAbortFlags enumeration from libvirt/libvirt-domain.h:2343 +const ( + DomainBlockJobAbortAsync DomainBlockJobAbortFlags = 1 + DomainBlockJobAbortPivot DomainBlockJobAbortFlags = 2 +) + +// DomainBlockJobInfoFlags as declared in libvirt/libvirt-domain.h:2352 +type DomainBlockJobInfoFlags int32 + +// DomainBlockJobInfoFlags enumeration from libvirt/libvirt-domain.h:2352 +const ( + DomainBlockJobInfoBandwidthBytes DomainBlockJobInfoFlags = 1 +) + +// DomainBlockJobSetSpeedFlags as declared in libvirt/libvirt-domain.h:2381 +type DomainBlockJobSetSpeedFlags int32 + +// DomainBlockJobSetSpeedFlags enumeration from libvirt/libvirt-domain.h:2381 +const ( + DomainBlockJobSpeedBandwidthBytes DomainBlockJobSetSpeedFlags = 1 +) + +// DomainBlockPullFlags as declared in libvirt/libvirt-domain.h:2391 +type DomainBlockPullFlags int32 + +// DomainBlockPullFlags enumeration from libvirt/libvirt-domain.h:2391 +const ( + DomainBlockPullBandwidthBytes DomainBlockPullFlags = 64 +) + +// DomainBlockRebaseFlags as declared in libvirt/libvirt-domain.h:2415 +type DomainBlockRebaseFlags int32 + +// DomainBlockRebaseFlags enumeration from libvirt/libvirt-domain.h:2415 +const ( + DomainBlockRebaseShallow DomainBlockRebaseFlags = 1 + DomainBlockRebaseReuseExt DomainBlockRebaseFlags = 2 + DomainBlockRebaseCopyRaw DomainBlockRebaseFlags = 4 + DomainBlockRebaseCopy DomainBlockRebaseFlags = 8 + DomainBlockRebaseRelative DomainBlockRebaseFlags = 16 + DomainBlockRebaseCopyDev DomainBlockRebaseFlags = 32 + DomainBlockRebaseBandwidthBytes DomainBlockRebaseFlags = 64 +) + +// DomainBlockCopyFlags as declared in libvirt/libvirt-domain.h:2434 +type DomainBlockCopyFlags int32 + +// DomainBlockCopyFlags enumeration from libvirt/libvirt-domain.h:2434 +const ( + DomainBlockCopyShallow DomainBlockCopyFlags = 1 + DomainBlockCopyReuseExt DomainBlockCopyFlags = 2 + DomainBlockCopyTransientJob DomainBlockCopyFlags = 4 +) + +// DomainBlockCommitFlags as declared in libvirt/libvirt-domain.h:2499 +type DomainBlockCommitFlags int32 + +// DomainBlockCommitFlags enumeration from libvirt/libvirt-domain.h:2499 +const ( + DomainBlockCommitShallow DomainBlockCommitFlags = 1 + DomainBlockCommitDelete DomainBlockCommitFlags = 2 + DomainBlockCommitActive DomainBlockCommitFlags = 4 + DomainBlockCommitRelative DomainBlockCommitFlags = 8 + DomainBlockCommitBandwidthBytes DomainBlockCommitFlags = 16 +) + +// DomainDiskErrorCode as declared in libvirt/libvirt-domain.h:2690 +type DomainDiskErrorCode int32 + +// DomainDiskErrorCode enumeration from libvirt/libvirt-domain.h:2690 +const ( + DomainDiskErrorNone DomainDiskErrorCode = iota + DomainDiskErrorUnspec DomainDiskErrorCode = 1 + DomainDiskErrorNoSpace DomainDiskErrorCode = 2 +) + +// KeycodeSet as declared in libvirt/libvirt-domain.h:2736 +type KeycodeSet int32 + +// KeycodeSet enumeration from libvirt/libvirt-domain.h:2736 +const ( + KeycodeSetLinux KeycodeSet = iota + KeycodeSetXt KeycodeSet = 1 + KeycodeSetAtset1 KeycodeSet = 2 + KeycodeSetAtset2 KeycodeSet = 3 + KeycodeSetAtset3 KeycodeSet = 4 + KeycodeSetOsx KeycodeSet = 5 + KeycodeSetXtKbd KeycodeSet = 6 + KeycodeSetUsb KeycodeSet = 7 + KeycodeSetWin32 KeycodeSet = 8 + KeycodeSetRfb KeycodeSet = 9 +) + +// DomainProcessSignal as declared in libvirt/libvirt-domain.h:2838 +type DomainProcessSignal int32 + +// DomainProcessSignal enumeration from libvirt/libvirt-domain.h:2838 +const ( + DomainProcessSignalNop DomainProcessSignal = iota + DomainProcessSignalHup DomainProcessSignal = 1 + DomainProcessSignalInt DomainProcessSignal = 2 + DomainProcessSignalQuit DomainProcessSignal = 3 + DomainProcessSignalIll DomainProcessSignal = 4 + DomainProcessSignalTrap DomainProcessSignal = 5 + DomainProcessSignalAbrt DomainProcessSignal = 6 + DomainProcessSignalBus DomainProcessSignal = 7 + DomainProcessSignalFpe DomainProcessSignal = 8 + DomainProcessSignalKill DomainProcessSignal = 9 + DomainProcessSignalUsr1 DomainProcessSignal = 10 + DomainProcessSignalSegv DomainProcessSignal = 11 + DomainProcessSignalUsr2 DomainProcessSignal = 12 + DomainProcessSignalPipe DomainProcessSignal = 13 + DomainProcessSignalAlrm DomainProcessSignal = 14 + DomainProcessSignalTerm DomainProcessSignal = 15 + DomainProcessSignalStkflt DomainProcessSignal = 16 + DomainProcessSignalChld DomainProcessSignal = 17 + DomainProcessSignalCont DomainProcessSignal = 18 + DomainProcessSignalStop DomainProcessSignal = 19 + DomainProcessSignalTstp DomainProcessSignal = 20 + DomainProcessSignalTtin DomainProcessSignal = 21 + DomainProcessSignalTtou DomainProcessSignal = 22 + DomainProcessSignalUrg DomainProcessSignal = 23 + DomainProcessSignalXcpu DomainProcessSignal = 24 + DomainProcessSignalXfsz DomainProcessSignal = 25 + DomainProcessSignalVtalrm DomainProcessSignal = 26 + DomainProcessSignalProf DomainProcessSignal = 27 + DomainProcessSignalWinch DomainProcessSignal = 28 + DomainProcessSignalPoll DomainProcessSignal = 29 + DomainProcessSignalPwr DomainProcessSignal = 30 + DomainProcessSignalSys DomainProcessSignal = 31 + DomainProcessSignalRt0 DomainProcessSignal = 32 + DomainProcessSignalRt1 DomainProcessSignal = 33 + DomainProcessSignalRt2 DomainProcessSignal = 34 + DomainProcessSignalRt3 DomainProcessSignal = 35 + DomainProcessSignalRt4 DomainProcessSignal = 36 + DomainProcessSignalRt5 DomainProcessSignal = 37 + DomainProcessSignalRt6 DomainProcessSignal = 38 + DomainProcessSignalRt7 DomainProcessSignal = 39 + DomainProcessSignalRt8 DomainProcessSignal = 40 + DomainProcessSignalRt9 DomainProcessSignal = 41 + DomainProcessSignalRt10 DomainProcessSignal = 42 + DomainProcessSignalRt11 DomainProcessSignal = 43 + DomainProcessSignalRt12 DomainProcessSignal = 44 + DomainProcessSignalRt13 DomainProcessSignal = 45 + DomainProcessSignalRt14 DomainProcessSignal = 46 + DomainProcessSignalRt15 DomainProcessSignal = 47 + DomainProcessSignalRt16 DomainProcessSignal = 48 + DomainProcessSignalRt17 DomainProcessSignal = 49 + DomainProcessSignalRt18 DomainProcessSignal = 50 + DomainProcessSignalRt19 DomainProcessSignal = 51 + DomainProcessSignalRt20 DomainProcessSignal = 52 + DomainProcessSignalRt21 DomainProcessSignal = 53 + DomainProcessSignalRt22 DomainProcessSignal = 54 + DomainProcessSignalRt23 DomainProcessSignal = 55 + DomainProcessSignalRt24 DomainProcessSignal = 56 + DomainProcessSignalRt25 DomainProcessSignal = 57 + DomainProcessSignalRt26 DomainProcessSignal = 58 + DomainProcessSignalRt27 DomainProcessSignal = 59 + DomainProcessSignalRt28 DomainProcessSignal = 60 + DomainProcessSignalRt29 DomainProcessSignal = 61 + DomainProcessSignalRt30 DomainProcessSignal = 62 + DomainProcessSignalRt31 DomainProcessSignal = 63 + DomainProcessSignalRt32 DomainProcessSignal = 64 +) + +// DomainEventType as declared in libvirt/libvirt-domain.h:2876 +type DomainEventType int32 + +// DomainEventType enumeration from libvirt/libvirt-domain.h:2876 +const ( + DomainEventDefined DomainEventType = iota + DomainEventUndefined DomainEventType = 1 + DomainEventStarted DomainEventType = 2 + DomainEventSuspended DomainEventType = 3 + DomainEventResumed DomainEventType = 4 + DomainEventStopped DomainEventType = 5 + DomainEventShutdown DomainEventType = 6 + DomainEventPmsuspended DomainEventType = 7 + DomainEventCrashed DomainEventType = 8 +) + +// DomainEventDefinedDetailType as declared in libvirt/libvirt-domain.h:2892 +type DomainEventDefinedDetailType int32 + +// DomainEventDefinedDetailType enumeration from libvirt/libvirt-domain.h:2892 +const ( + DomainEventDefinedAdded DomainEventDefinedDetailType = iota + DomainEventDefinedUpdated DomainEventDefinedDetailType = 1 + DomainEventDefinedRenamed DomainEventDefinedDetailType = 2 + DomainEventDefinedFromSnapshot DomainEventDefinedDetailType = 3 +) + +// DomainEventUndefinedDetailType as declared in libvirt/libvirt-domain.h:2906 +type DomainEventUndefinedDetailType int32 + +// DomainEventUndefinedDetailType enumeration from libvirt/libvirt-domain.h:2906 +const ( + DomainEventUndefinedRemoved DomainEventUndefinedDetailType = iota + DomainEventUndefinedRenamed DomainEventUndefinedDetailType = 1 +) + +// DomainEventStartedDetailType as declared in libvirt/libvirt-domain.h:2923 +type DomainEventStartedDetailType int32 + +// DomainEventStartedDetailType enumeration from libvirt/libvirt-domain.h:2923 +const ( + DomainEventStartedBooted DomainEventStartedDetailType = iota + DomainEventStartedMigrated DomainEventStartedDetailType = 1 + DomainEventStartedRestored DomainEventStartedDetailType = 2 + DomainEventStartedFromSnapshot DomainEventStartedDetailType = 3 + DomainEventStartedWakeup DomainEventStartedDetailType = 4 +) + +// DomainEventSuspendedDetailType as declared in libvirt/libvirt-domain.h:2944 +type DomainEventSuspendedDetailType int32 + +// DomainEventSuspendedDetailType enumeration from libvirt/libvirt-domain.h:2944 +const ( + DomainEventSuspendedPaused DomainEventSuspendedDetailType = iota + DomainEventSuspendedMigrated DomainEventSuspendedDetailType = 1 + DomainEventSuspendedIoerror DomainEventSuspendedDetailType = 2 + DomainEventSuspendedWatchdog DomainEventSuspendedDetailType = 3 + DomainEventSuspendedRestored DomainEventSuspendedDetailType = 4 + DomainEventSuspendedFromSnapshot DomainEventSuspendedDetailType = 5 + DomainEventSuspendedAPIError DomainEventSuspendedDetailType = 6 + DomainEventSuspendedPostcopy DomainEventSuspendedDetailType = 7 + DomainEventSuspendedPostcopyFailed DomainEventSuspendedDetailType = 8 +) + +// DomainEventResumedDetailType as declared in libvirt/libvirt-domain.h:2961 +type DomainEventResumedDetailType int32 + +// DomainEventResumedDetailType enumeration from libvirt/libvirt-domain.h:2961 +const ( + DomainEventResumedUnpaused DomainEventResumedDetailType = iota + DomainEventResumedMigrated DomainEventResumedDetailType = 1 + DomainEventResumedFromSnapshot DomainEventResumedDetailType = 2 + DomainEventResumedPostcopy DomainEventResumedDetailType = 3 +) + +// DomainEventStoppedDetailType as declared in libvirt/libvirt-domain.h:2980 +type DomainEventStoppedDetailType int32 + +// DomainEventStoppedDetailType enumeration from libvirt/libvirt-domain.h:2980 +const ( + DomainEventStoppedShutdown DomainEventStoppedDetailType = iota + DomainEventStoppedDestroyed DomainEventStoppedDetailType = 1 + DomainEventStoppedCrashed DomainEventStoppedDetailType = 2 + DomainEventStoppedMigrated DomainEventStoppedDetailType = 3 + DomainEventStoppedSaved DomainEventStoppedDetailType = 4 + DomainEventStoppedFailed DomainEventStoppedDetailType = 5 + DomainEventStoppedFromSnapshot DomainEventStoppedDetailType = 6 +) + +// DomainEventShutdownDetailType as declared in libvirt/libvirt-domain.h:3003 +type DomainEventShutdownDetailType int32 + +// DomainEventShutdownDetailType enumeration from libvirt/libvirt-domain.h:3003 +const ( + DomainEventShutdownFinished DomainEventShutdownDetailType = iota + DomainEventShutdownGuest DomainEventShutdownDetailType = 1 + DomainEventShutdownHost DomainEventShutdownDetailType = 2 +) + +// DomainEventPMSuspendedDetailType as declared in libvirt/libvirt-domain.h:3017 +type DomainEventPMSuspendedDetailType int32 + +// DomainEventPMSuspendedDetailType enumeration from libvirt/libvirt-domain.h:3017 +const ( + DomainEventPmsuspendedMemory DomainEventPMSuspendedDetailType = iota + DomainEventPmsuspendedDisk DomainEventPMSuspendedDetailType = 1 +) + +// DomainEventCrashedDetailType as declared in libvirt/libvirt-domain.h:3030 +type DomainEventCrashedDetailType int32 + +// DomainEventCrashedDetailType enumeration from libvirt/libvirt-domain.h:3030 +const ( + DomainEventCrashedPanicked DomainEventCrashedDetailType = iota +) + +// DomainJobType as declared in libvirt/libvirt-domain.h:3074 +type DomainJobType int32 + +// DomainJobType enumeration from libvirt/libvirt-domain.h:3074 +const ( + DomainJobNone DomainJobType = iota + DomainJobBounded DomainJobType = 1 + DomainJobUnbounded DomainJobType = 2 + DomainJobCompleted DomainJobType = 3 + DomainJobFailed DomainJobType = 4 + DomainJobCancelled DomainJobType = 5 +) + +// DomainGetJobStatsFlags as declared in libvirt/libvirt-domain.h:3121 +type DomainGetJobStatsFlags int32 + +// DomainGetJobStatsFlags enumeration from libvirt/libvirt-domain.h:3121 +const ( + DomainJobStatsCompleted DomainGetJobStatsFlags = 1 +) + +// DomainJobOperation as declared in libvirt/libvirt-domain.h:3146 +type DomainJobOperation int32 + +// DomainJobOperation enumeration from libvirt/libvirt-domain.h:3146 +const ( + DomainJobOperationStrUnknown DomainJobOperation = iota + DomainJobOperationStrStart DomainJobOperation = 1 + DomainJobOperationStrSave DomainJobOperation = 2 + DomainJobOperationStrRestore DomainJobOperation = 3 + DomainJobOperationStrMigrationIn DomainJobOperation = 4 + DomainJobOperationStrMigrationOut DomainJobOperation = 5 + DomainJobOperationStrSnapshot DomainJobOperation = 6 + DomainJobOperationStrSnapshotRevert DomainJobOperation = 7 + DomainJobOperationStrDump DomainJobOperation = 8 +) + +// DomainEventWatchdogAction as declared in libvirt/libvirt-domain.h:3479 +type DomainEventWatchdogAction int32 + +// DomainEventWatchdogAction enumeration from libvirt/libvirt-domain.h:3479 +const ( + DomainEventWatchdogNone DomainEventWatchdogAction = iota + DomainEventWatchdogPause DomainEventWatchdogAction = 1 + DomainEventWatchdogReset DomainEventWatchdogAction = 2 + DomainEventWatchdogPoweroff DomainEventWatchdogAction = 3 + DomainEventWatchdogShutdown DomainEventWatchdogAction = 4 + DomainEventWatchdogDebug DomainEventWatchdogAction = 5 + DomainEventWatchdogInjectnmi DomainEventWatchdogAction = 6 +) + +// DomainEventIOErrorAction as declared in libvirt/libvirt-domain.h:3510 +type DomainEventIOErrorAction int32 + +// DomainEventIOErrorAction enumeration from libvirt/libvirt-domain.h:3510 +const ( + DomainEventIoErrorNone DomainEventIOErrorAction = iota + DomainEventIoErrorPause DomainEventIOErrorAction = 1 + DomainEventIoErrorReport DomainEventIOErrorAction = 2 +) + +// DomainEventGraphicsPhase as declared in libvirt/libvirt-domain.h:3573 +type DomainEventGraphicsPhase int32 + +// DomainEventGraphicsPhase enumeration from libvirt/libvirt-domain.h:3573 +const ( + DomainEventGraphicsConnect DomainEventGraphicsPhase = iota + DomainEventGraphicsInitialize DomainEventGraphicsPhase = 1 + DomainEventGraphicsDisconnect DomainEventGraphicsPhase = 2 +) + +// DomainEventGraphicsAddressType as declared in libvirt/libvirt-domain.h:3588 +type DomainEventGraphicsAddressType int32 + +// DomainEventGraphicsAddressType enumeration from libvirt/libvirt-domain.h:3588 +const ( + DomainEventGraphicsAddressIpv4 DomainEventGraphicsAddressType = iota + DomainEventGraphicsAddressIpv6 DomainEventGraphicsAddressType = 1 + DomainEventGraphicsAddressUnix DomainEventGraphicsAddressType = 2 +) + +// ConnectDomainEventBlockJobStatus as declared in libvirt/libvirt-domain.h:3676 +type ConnectDomainEventBlockJobStatus int32 + +// ConnectDomainEventBlockJobStatus enumeration from libvirt/libvirt-domain.h:3676 +const ( + DomainBlockJobCompleted ConnectDomainEventBlockJobStatus = iota + DomainBlockJobFailed ConnectDomainEventBlockJobStatus = 1 + DomainBlockJobCanceled ConnectDomainEventBlockJobStatus = 2 + DomainBlockJobReady ConnectDomainEventBlockJobStatus = 3 +) + +// ConnectDomainEventDiskChangeReason as declared in libvirt/libvirt-domain.h:3725 +type ConnectDomainEventDiskChangeReason int32 + +// ConnectDomainEventDiskChangeReason enumeration from libvirt/libvirt-domain.h:3725 +const ( + DomainEventDiskChangeMissingOnStart ConnectDomainEventDiskChangeReason = iota + DomainEventDiskDropMissingOnStart ConnectDomainEventDiskChangeReason = 1 +) + +// DomainEventTrayChangeReason as declared in libvirt/libvirt-domain.h:3766 +type DomainEventTrayChangeReason int32 + +// DomainEventTrayChangeReason enumeration from libvirt/libvirt-domain.h:3766 +const ( + DomainEventTrayChangeOpen DomainEventTrayChangeReason = iota + DomainEventTrayChangeClose DomainEventTrayChangeReason = 1 +) + +// ConnectDomainEventAgentLifecycleState as declared in libvirt/libvirt-domain.h:4281 +type ConnectDomainEventAgentLifecycleState int32 + +// ConnectDomainEventAgentLifecycleState enumeration from libvirt/libvirt-domain.h:4281 +const ( + ConnectDomainEventAgentLifecycleStateConnected ConnectDomainEventAgentLifecycleState = 1 + ConnectDomainEventAgentLifecycleStateDisconnected ConnectDomainEventAgentLifecycleState = 2 +) + +// ConnectDomainEventAgentLifecycleReason as declared in libvirt/libvirt-domain.h:4291 +type ConnectDomainEventAgentLifecycleReason int32 + +// ConnectDomainEventAgentLifecycleReason enumeration from libvirt/libvirt-domain.h:4291 +const ( + ConnectDomainEventAgentLifecycleReasonUnknown ConnectDomainEventAgentLifecycleReason = iota + ConnectDomainEventAgentLifecycleReasonDomainStarted ConnectDomainEventAgentLifecycleReason = 1 + ConnectDomainEventAgentLifecycleReasonChannel ConnectDomainEventAgentLifecycleReason = 2 +) + +// DomainEventID as declared in libvirt/libvirt-domain.h:4395 +type DomainEventID int32 + +// DomainEventID enumeration from libvirt/libvirt-domain.h:4395 +const ( + DomainEventIDLifecycle DomainEventID = iota + DomainEventIDReboot DomainEventID = 1 + DomainEventIDRtcChange DomainEventID = 2 + DomainEventIDWatchdog DomainEventID = 3 + DomainEventIDIoError DomainEventID = 4 + DomainEventIDGraphics DomainEventID = 5 + DomainEventIDIoErrorReason DomainEventID = 6 + DomainEventIDControlError DomainEventID = 7 + DomainEventIDBlockJob DomainEventID = 8 + DomainEventIDDiskChange DomainEventID = 9 + DomainEventIDTrayChange DomainEventID = 10 + DomainEventIDPmwakeup DomainEventID = 11 + DomainEventIDPmsuspend DomainEventID = 12 + DomainEventIDBalloonChange DomainEventID = 13 + DomainEventIDPmsuspendDisk DomainEventID = 14 + DomainEventIDDeviceRemoved DomainEventID = 15 + DomainEventIDBlockJob2 DomainEventID = 16 + DomainEventIDTunable DomainEventID = 17 + DomainEventIDAgentLifecycle DomainEventID = 18 + DomainEventIDDeviceAdded DomainEventID = 19 + DomainEventIDMigrationIteration DomainEventID = 20 + DomainEventIDJobCompleted DomainEventID = 21 + DomainEventIDDeviceRemovalFailed DomainEventID = 22 + DomainEventIDMetadataChange DomainEventID = 23 + DomainEventIDBlockThreshold DomainEventID = 24 +) + +// DomainConsoleFlags as declared in libvirt/libvirt-domain.h:4422 +type DomainConsoleFlags int32 + +// DomainConsoleFlags enumeration from libvirt/libvirt-domain.h:4422 +const ( + DomainConsoleForce DomainConsoleFlags = 1 + DomainConsoleSafe DomainConsoleFlags = 2 +) + +// DomainChannelFlags as declared in libvirt/libvirt-domain.h:4438 +type DomainChannelFlags int32 + +// DomainChannelFlags enumeration from libvirt/libvirt-domain.h:4438 +const ( + DomainChannelForce DomainChannelFlags = 1 +) + +// DomainOpenGraphicsFlags as declared in libvirt/libvirt-domain.h:4447 +type DomainOpenGraphicsFlags int32 + +// DomainOpenGraphicsFlags enumeration from libvirt/libvirt-domain.h:4447 +const ( + DomainOpenGraphicsSkipauth DomainOpenGraphicsFlags = 1 +) + +// DomainSetTimeFlags as declared in libvirt/libvirt-domain.h:4504 +type DomainSetTimeFlags int32 + +// DomainSetTimeFlags enumeration from libvirt/libvirt-domain.h:4504 +const ( + DomainTimeSync DomainSetTimeFlags = 1 +) + +// SchedParameterType as declared in libvirt/libvirt-domain.h:4525 +type SchedParameterType int32 + +// SchedParameterType enumeration from libvirt/libvirt-domain.h:4525 +const ( + DomainSchedFieldInt SchedParameterType = 1 + DomainSchedFieldUint SchedParameterType = 2 + DomainSchedFieldLlong SchedParameterType = 3 + DomainSchedFieldUllong SchedParameterType = 4 + DomainSchedFieldDouble SchedParameterType = 5 + DomainSchedFieldBoolean SchedParameterType = 6 +) + +// BlkioParameterType as declared in libvirt/libvirt-domain.h:4569 +type BlkioParameterType int32 + +// BlkioParameterType enumeration from libvirt/libvirt-domain.h:4569 +const ( + DomainBlkioParamInt BlkioParameterType = 1 + DomainBlkioParamUint BlkioParameterType = 2 + DomainBlkioParamLlong BlkioParameterType = 3 + DomainBlkioParamUllong BlkioParameterType = 4 + DomainBlkioParamDouble BlkioParameterType = 5 + DomainBlkioParamBoolean BlkioParameterType = 6 +) + +// MemoryParameterType as declared in libvirt/libvirt-domain.h:4613 +type MemoryParameterType int32 + +// MemoryParameterType enumeration from libvirt/libvirt-domain.h:4613 +const ( + DomainMemoryParamInt MemoryParameterType = 1 + DomainMemoryParamUint MemoryParameterType = 2 + DomainMemoryParamLlong MemoryParameterType = 3 + DomainMemoryParamUllong MemoryParameterType = 4 + DomainMemoryParamDouble MemoryParameterType = 5 + DomainMemoryParamBoolean MemoryParameterType = 6 +) + +// DomainInterfaceAddressesSource as declared in libvirt/libvirt-domain.h:4650 +type DomainInterfaceAddressesSource int32 + +// DomainInterfaceAddressesSource enumeration from libvirt/libvirt-domain.h:4650 +const ( + DomainInterfaceAddressesSrcLease DomainInterfaceAddressesSource = iota + DomainInterfaceAddressesSrcAgent DomainInterfaceAddressesSource = 1 +) + +// DomainSetUserPasswordFlags as declared in libvirt/libvirt-domain.h:4678 +type DomainSetUserPasswordFlags int32 + +// DomainSetUserPasswordFlags enumeration from libvirt/libvirt-domain.h:4678 +const ( + DomainPasswordEncrypted DomainSetUserPasswordFlags = 1 +) + +// DomainSnapshotCreateFlags as declared in libvirt/libvirt-domain-snapshot.h:73 +type DomainSnapshotCreateFlags int32 + +// DomainSnapshotCreateFlags enumeration from libvirt/libvirt-domain-snapshot.h:73 +const ( + DomainSnapshotCreateRedefine DomainSnapshotCreateFlags = 1 + DomainSnapshotCreateCurrent DomainSnapshotCreateFlags = 2 + DomainSnapshotCreateNoMetadata DomainSnapshotCreateFlags = 4 + DomainSnapshotCreateHalt DomainSnapshotCreateFlags = 8 + DomainSnapshotCreateDiskOnly DomainSnapshotCreateFlags = 16 + DomainSnapshotCreateReuseExt DomainSnapshotCreateFlags = 32 + DomainSnapshotCreateQuiesce DomainSnapshotCreateFlags = 64 + DomainSnapshotCreateAtomic DomainSnapshotCreateFlags = 128 + DomainSnapshotCreateLive DomainSnapshotCreateFlags = 256 +) + +// DomainSnapshotListFlags as declared in libvirt/libvirt-domain-snapshot.h:133 +type DomainSnapshotListFlags int32 + +// DomainSnapshotListFlags enumeration from libvirt/libvirt-domain-snapshot.h:133 +const ( + DomainSnapshotListRoots DomainSnapshotListFlags = 1 + DomainSnapshotListDescendants DomainSnapshotListFlags = 1 + DomainSnapshotListLeaves DomainSnapshotListFlags = 4 + DomainSnapshotListNoLeaves DomainSnapshotListFlags = 8 + DomainSnapshotListMetadata DomainSnapshotListFlags = 2 + DomainSnapshotListNoMetadata DomainSnapshotListFlags = 16 + DomainSnapshotListInactive DomainSnapshotListFlags = 32 + DomainSnapshotListActive DomainSnapshotListFlags = 64 + DomainSnapshotListDiskOnly DomainSnapshotListFlags = 128 + DomainSnapshotListInternal DomainSnapshotListFlags = 256 + DomainSnapshotListExternal DomainSnapshotListFlags = 512 +) + +// DomainSnapshotRevertFlags as declared in libvirt/libvirt-domain-snapshot.h:190 +type DomainSnapshotRevertFlags int32 + +// DomainSnapshotRevertFlags enumeration from libvirt/libvirt-domain-snapshot.h:190 +const ( + DomainSnapshotRevertRunning DomainSnapshotRevertFlags = 1 + DomainSnapshotRevertPaused DomainSnapshotRevertFlags = 2 + DomainSnapshotRevertForce DomainSnapshotRevertFlags = 4 +) + +// DomainSnapshotDeleteFlags as declared in libvirt/libvirt-domain-snapshot.h:204 +type DomainSnapshotDeleteFlags int32 + +// DomainSnapshotDeleteFlags enumeration from libvirt/libvirt-domain-snapshot.h:204 +const ( + DomainSnapshotDeleteChildren DomainSnapshotDeleteFlags = 1 + DomainSnapshotDeleteMetadataOnly DomainSnapshotDeleteFlags = 2 + DomainSnapshotDeleteChildrenOnly DomainSnapshotDeleteFlags = 4 +) + +// EventHandleType as declared in libvirt/libvirt-event.h:44 +type EventHandleType int32 + +// EventHandleType enumeration from libvirt/libvirt-event.h:44 +const ( + EventHandleReadable EventHandleType = 1 + EventHandleWritable EventHandleType = 2 + EventHandleError EventHandleType = 4 + EventHandleHangup EventHandleType = 8 +) + +// ConnectListAllInterfacesFlags as declared in libvirt/libvirt-interface.h:65 +type ConnectListAllInterfacesFlags int32 + +// ConnectListAllInterfacesFlags enumeration from libvirt/libvirt-interface.h:65 +const ( + ConnectListInterfacesInactive ConnectListAllInterfacesFlags = 1 + ConnectListInterfacesActive ConnectListAllInterfacesFlags = 2 +) + +// InterfaceXMLFlags as declared in libvirt/libvirt-interface.h:81 +type InterfaceXMLFlags int32 + +// InterfaceXMLFlags enumeration from libvirt/libvirt-interface.h:81 +const ( + InterfaceXMLInactive InterfaceXMLFlags = 1 +) + +// NetworkXMLFlags as declared in libvirt/libvirt-network.h:33 +type NetworkXMLFlags int32 + +// NetworkXMLFlags enumeration from libvirt/libvirt-network.h:33 +const ( + NetworkXMLInactive NetworkXMLFlags = 1 +) + +// ConnectListAllNetworksFlags as declared in libvirt/libvirt-network.h:85 +type ConnectListAllNetworksFlags int32 + +// ConnectListAllNetworksFlags enumeration from libvirt/libvirt-network.h:85 +const ( + ConnectListNetworksInactive ConnectListAllNetworksFlags = 1 + ConnectListNetworksActive ConnectListAllNetworksFlags = 2 + ConnectListNetworksPersistent ConnectListAllNetworksFlags = 4 + ConnectListNetworksTransient ConnectListAllNetworksFlags = 8 + ConnectListNetworksAutostart ConnectListAllNetworksFlags = 16 + ConnectListNetworksNoAutostart ConnectListAllNetworksFlags = 32 +) + +// NetworkUpdateCommand as declared in libvirt/libvirt-network.h:134 +type NetworkUpdateCommand int32 + +// NetworkUpdateCommand enumeration from libvirt/libvirt-network.h:134 +const ( + NetworkUpdateCommandNone NetworkUpdateCommand = iota + NetworkUpdateCommandModify NetworkUpdateCommand = 1 + NetworkUpdateCommandDelete NetworkUpdateCommand = 2 + NetworkUpdateCommandAddLast NetworkUpdateCommand = 3 + NetworkUpdateCommandAddFirst NetworkUpdateCommand = 4 +) + +// NetworkUpdateSection as declared in libvirt/libvirt-network.h:160 +type NetworkUpdateSection int32 + +// NetworkUpdateSection enumeration from libvirt/libvirt-network.h:160 +const ( + NetworkSectionNone NetworkUpdateSection = iota + NetworkSectionBridge NetworkUpdateSection = 1 + NetworkSectionDomain NetworkUpdateSection = 2 + NetworkSectionIP NetworkUpdateSection = 3 + NetworkSectionIPDhcpHost NetworkUpdateSection = 4 + NetworkSectionIPDhcpRange NetworkUpdateSection = 5 + NetworkSectionForward NetworkUpdateSection = 6 + NetworkSectionForwardInterface NetworkUpdateSection = 7 + NetworkSectionForwardPf NetworkUpdateSection = 8 + NetworkSectionPortgroup NetworkUpdateSection = 9 + NetworkSectionDNSHost NetworkUpdateSection = 10 + NetworkSectionDNSTxt NetworkUpdateSection = 11 + NetworkSectionDNSSrv NetworkUpdateSection = 12 +) + +// NetworkUpdateFlags as declared in libvirt/libvirt-network.h:172 +type NetworkUpdateFlags int32 + +// NetworkUpdateFlags enumeration from libvirt/libvirt-network.h:172 +const ( + NetworkUpdateAffectCurrent NetworkUpdateFlags = iota + NetworkUpdateAffectLive NetworkUpdateFlags = 1 + NetworkUpdateAffectConfig NetworkUpdateFlags = 2 +) + +// NetworkEventLifecycleType as declared in libvirt/libvirt-network.h:230 +type NetworkEventLifecycleType int32 + +// NetworkEventLifecycleType enumeration from libvirt/libvirt-network.h:230 +const ( + NetworkEventDefined NetworkEventLifecycleType = iota + NetworkEventUndefined NetworkEventLifecycleType = 1 + NetworkEventStarted NetworkEventLifecycleType = 2 + NetworkEventStopped NetworkEventLifecycleType = 3 +) + +// NetworkEventID as declared in libvirt/libvirt-network.h:278 +type NetworkEventID int32 + +// NetworkEventID enumeration from libvirt/libvirt-network.h:278 +const ( + NetworkEventIDLifecycle NetworkEventID = iota +) + +// IPAddrType as declared in libvirt/libvirt-network.h:287 +type IPAddrType int32 + +// IPAddrType enumeration from libvirt/libvirt-network.h:287 +const ( + IPAddrTypeIpv4 IPAddrType = iota + IPAddrTypeIpv6 IPAddrType = 1 +) + +// ConnectListAllNodeDeviceFlags as declared in libvirt/libvirt-nodedev.h:85 +type ConnectListAllNodeDeviceFlags int32 + +// ConnectListAllNodeDeviceFlags enumeration from libvirt/libvirt-nodedev.h:85 +const ( + ConnectListNodeDevicesCapSystem ConnectListAllNodeDeviceFlags = 1 + ConnectListNodeDevicesCapPciDev ConnectListAllNodeDeviceFlags = 2 + ConnectListNodeDevicesCapUsbDev ConnectListAllNodeDeviceFlags = 4 + ConnectListNodeDevicesCapUsbInterface ConnectListAllNodeDeviceFlags = 8 + ConnectListNodeDevicesCapNet ConnectListAllNodeDeviceFlags = 16 + ConnectListNodeDevicesCapScsiHost ConnectListAllNodeDeviceFlags = 32 + ConnectListNodeDevicesCapScsiTarget ConnectListAllNodeDeviceFlags = 64 + ConnectListNodeDevicesCapScsi ConnectListAllNodeDeviceFlags = 128 + ConnectListNodeDevicesCapStorage ConnectListAllNodeDeviceFlags = 256 + ConnectListNodeDevicesCapFcHost ConnectListAllNodeDeviceFlags = 512 + ConnectListNodeDevicesCapVports ConnectListAllNodeDeviceFlags = 1024 + ConnectListNodeDevicesCapScsiGeneric ConnectListAllNodeDeviceFlags = 2048 + ConnectListNodeDevicesCapDrm ConnectListAllNodeDeviceFlags = 4096 + ConnectListNodeDevicesCapMdevTypes ConnectListAllNodeDeviceFlags = 8192 + ConnectListNodeDevicesCapMdev ConnectListAllNodeDeviceFlags = 16384 + ConnectListNodeDevicesCapCcwDev ConnectListAllNodeDeviceFlags = 32768 +) + +// NodeDeviceEventID as declared in libvirt/libvirt-nodedev.h:155 +type NodeDeviceEventID int32 + +// NodeDeviceEventID enumeration from libvirt/libvirt-nodedev.h:155 +const ( + NodeDeviceEventIDLifecycle NodeDeviceEventID = iota + NodeDeviceEventIDUpdate NodeDeviceEventID = 1 +) + +// NodeDeviceEventLifecycleType as declared in libvirt/libvirt-nodedev.h:197 +type NodeDeviceEventLifecycleType int32 + +// NodeDeviceEventLifecycleType enumeration from libvirt/libvirt-nodedev.h:197 +const ( + NodeDeviceEventCreated NodeDeviceEventLifecycleType = iota + NodeDeviceEventDeleted NodeDeviceEventLifecycleType = 1 +) + +// SecretUsageType as declared in libvirt/libvirt-secret.h:56 +type SecretUsageType int32 + +// SecretUsageType enumeration from libvirt/libvirt-secret.h:56 +const ( + SecretUsageTypeNone SecretUsageType = iota + SecretUsageTypeVolume SecretUsageType = 1 + SecretUsageTypeCeph SecretUsageType = 2 + SecretUsageTypeIscsi SecretUsageType = 3 + SecretUsageTypeTLS SecretUsageType = 4 +) + +// ConnectListAllSecretsFlags as declared in libvirt/libvirt-secret.h:79 +type ConnectListAllSecretsFlags int32 + +// ConnectListAllSecretsFlags enumeration from libvirt/libvirt-secret.h:79 +const ( + ConnectListSecretsEphemeral ConnectListAllSecretsFlags = 1 + ConnectListSecretsNoEphemeral ConnectListAllSecretsFlags = 2 + ConnectListSecretsPrivate ConnectListAllSecretsFlags = 4 + ConnectListSecretsNoPrivate ConnectListAllSecretsFlags = 8 +) + +// SecretEventID as declared in libvirt/libvirt-secret.h:140 +type SecretEventID int32 + +// SecretEventID enumeration from libvirt/libvirt-secret.h:140 +const ( + SecretEventIDLifecycle SecretEventID = iota + SecretEventIDValueChanged SecretEventID = 1 +) + +// SecretEventLifecycleType as declared in libvirt/libvirt-secret.h:182 +type SecretEventLifecycleType int32 + +// SecretEventLifecycleType enumeration from libvirt/libvirt-secret.h:182 +const ( + SecretEventDefined SecretEventLifecycleType = iota + SecretEventUndefined SecretEventLifecycleType = 1 +) + +// StoragePoolState as declared in libvirt/libvirt-storage.h:58 +type StoragePoolState int32 + +// StoragePoolState enumeration from libvirt/libvirt-storage.h:58 +const ( + StoragePoolInactive StoragePoolState = iota + StoragePoolBuilding StoragePoolState = 1 + StoragePoolRunning StoragePoolState = 2 + StoragePoolDegraded StoragePoolState = 3 + StoragePoolInaccessible StoragePoolState = 4 +) + +// StoragePoolBuildFlags as declared in libvirt/libvirt-storage.h:66 +type StoragePoolBuildFlags int32 + +// StoragePoolBuildFlags enumeration from libvirt/libvirt-storage.h:66 +const ( + StoragePoolBuildNew StoragePoolBuildFlags = iota + StoragePoolBuildRepair StoragePoolBuildFlags = 1 + StoragePoolBuildResize StoragePoolBuildFlags = 2 + StoragePoolBuildNoOverwrite StoragePoolBuildFlags = 4 + StoragePoolBuildOverwrite StoragePoolBuildFlags = 8 +) + +// StoragePoolDeleteFlags as declared in libvirt/libvirt-storage.h:71 +type StoragePoolDeleteFlags int32 + +// StoragePoolDeleteFlags enumeration from libvirt/libvirt-storage.h:71 +const ( + StoragePoolDeleteNormal StoragePoolDeleteFlags = iota + StoragePoolDeleteZeroed StoragePoolDeleteFlags = 1 +) + +// StoragePoolCreateFlags as declared in libvirt/libvirt-storage.h:88 +type StoragePoolCreateFlags int32 + +// StoragePoolCreateFlags enumeration from libvirt/libvirt-storage.h:88 +const ( + StoragePoolCreateNormal StoragePoolCreateFlags = iota + StoragePoolCreateWithBuild StoragePoolCreateFlags = 1 + StoragePoolCreateWithBuildOverwrite StoragePoolCreateFlags = 2 + StoragePoolCreateWithBuildNoOverwrite StoragePoolCreateFlags = 4 +) + +// StorageVolType as declared in libvirt/libvirt-storage.h:130 +type StorageVolType int32 + +// StorageVolType enumeration from libvirt/libvirt-storage.h:130 +const ( + StorageVolFile StorageVolType = iota + StorageVolBlock StorageVolType = 1 + StorageVolDir StorageVolType = 2 + StorageVolNetwork StorageVolType = 3 + StorageVolNetdir StorageVolType = 4 + StorageVolPloop StorageVolType = 5 +) + +// StorageVolDeleteFlags as declared in libvirt/libvirt-storage.h:136 +type StorageVolDeleteFlags int32 + +// StorageVolDeleteFlags enumeration from libvirt/libvirt-storage.h:136 +const ( + StorageVolDeleteNormal StorageVolDeleteFlags = iota + StorageVolDeleteZeroed StorageVolDeleteFlags = 1 + StorageVolDeleteWithSnapshots StorageVolDeleteFlags = 2 +) + +// StorageVolWipeAlgorithm as declared in libvirt/libvirt-storage.h:168 +type StorageVolWipeAlgorithm int32 + +// StorageVolWipeAlgorithm enumeration from libvirt/libvirt-storage.h:168 +const ( + StorageVolWipeAlgZero StorageVolWipeAlgorithm = iota + StorageVolWipeAlgNnsa StorageVolWipeAlgorithm = 1 + StorageVolWipeAlgDod StorageVolWipeAlgorithm = 2 + StorageVolWipeAlgBsi StorageVolWipeAlgorithm = 3 + StorageVolWipeAlgGutmann StorageVolWipeAlgorithm = 4 + StorageVolWipeAlgSchneier StorageVolWipeAlgorithm = 5 + StorageVolWipeAlgPfitzner7 StorageVolWipeAlgorithm = 6 + StorageVolWipeAlgPfitzner33 StorageVolWipeAlgorithm = 7 + StorageVolWipeAlgRandom StorageVolWipeAlgorithm = 8 + StorageVolWipeAlgTrim StorageVolWipeAlgorithm = 9 +) + +// StorageVolInfoFlags as declared in libvirt/libvirt-storage.h:176 +type StorageVolInfoFlags int32 + +// StorageVolInfoFlags enumeration from libvirt/libvirt-storage.h:176 +const ( + StorageVolUseAllocation StorageVolInfoFlags = iota + StorageVolGetPhysical StorageVolInfoFlags = 1 +) + +// StorageXMLFlags as declared in libvirt/libvirt-storage.h:190 +type StorageXMLFlags int32 + +// StorageXMLFlags enumeration from libvirt/libvirt-storage.h:190 +const ( + StorageXMLInactive StorageXMLFlags = 1 +) + +// ConnectListAllStoragePoolsFlags as declared in libvirt/libvirt-storage.h:244 +type ConnectListAllStoragePoolsFlags int32 + +// ConnectListAllStoragePoolsFlags enumeration from libvirt/libvirt-storage.h:244 +const ( + ConnectListStoragePoolsInactive ConnectListAllStoragePoolsFlags = 1 + ConnectListStoragePoolsActive ConnectListAllStoragePoolsFlags = 2 + ConnectListStoragePoolsPersistent ConnectListAllStoragePoolsFlags = 4 + ConnectListStoragePoolsTransient ConnectListAllStoragePoolsFlags = 8 + ConnectListStoragePoolsAutostart ConnectListAllStoragePoolsFlags = 16 + ConnectListStoragePoolsNoAutostart ConnectListAllStoragePoolsFlags = 32 + ConnectListStoragePoolsDir ConnectListAllStoragePoolsFlags = 64 + ConnectListStoragePoolsFs ConnectListAllStoragePoolsFlags = 128 + ConnectListStoragePoolsNetfs ConnectListAllStoragePoolsFlags = 256 + ConnectListStoragePoolsLogical ConnectListAllStoragePoolsFlags = 512 + ConnectListStoragePoolsDisk ConnectListAllStoragePoolsFlags = 1024 + ConnectListStoragePoolsIscsi ConnectListAllStoragePoolsFlags = 2048 + ConnectListStoragePoolsScsi ConnectListAllStoragePoolsFlags = 4096 + ConnectListStoragePoolsMpath ConnectListAllStoragePoolsFlags = 8192 + ConnectListStoragePoolsRbd ConnectListAllStoragePoolsFlags = 16384 + ConnectListStoragePoolsSheepdog ConnectListAllStoragePoolsFlags = 32768 + ConnectListStoragePoolsGluster ConnectListAllStoragePoolsFlags = 65536 + ConnectListStoragePoolsZfs ConnectListAllStoragePoolsFlags = 131072 + ConnectListStoragePoolsVstorage ConnectListAllStoragePoolsFlags = 262144 +) + +// StorageVolCreateFlags as declared in libvirt/libvirt-storage.h:340 +type StorageVolCreateFlags int32 + +// StorageVolCreateFlags enumeration from libvirt/libvirt-storage.h:340 +const ( + StorageVolCreatePreallocMetadata StorageVolCreateFlags = 1 + StorageVolCreateReflink StorageVolCreateFlags = 2 +) + +// StorageVolDownloadFlags as declared in libvirt/libvirt-storage.h:352 +type StorageVolDownloadFlags int32 + +// StorageVolDownloadFlags enumeration from libvirt/libvirt-storage.h:352 +const ( + StorageVolDownloadSparseStream StorageVolDownloadFlags = 1 +) + +// StorageVolUploadFlags as declared in libvirt/libvirt-storage.h:361 +type StorageVolUploadFlags int32 + +// StorageVolUploadFlags enumeration from libvirt/libvirt-storage.h:361 +const ( + StorageVolUploadSparseStream StorageVolUploadFlags = 1 +) + +// StorageVolResizeFlags as declared in libvirt/libvirt-storage.h:392 +type StorageVolResizeFlags int32 + +// StorageVolResizeFlags enumeration from libvirt/libvirt-storage.h:392 +const ( + StorageVolResizeAllocate StorageVolResizeFlags = 1 + StorageVolResizeDelta StorageVolResizeFlags = 2 + StorageVolResizeShrink StorageVolResizeFlags = 4 +) + +// StoragePoolEventID as declared in libvirt/libvirt-storage.h:428 +type StoragePoolEventID int32 + +// StoragePoolEventID enumeration from libvirt/libvirt-storage.h:428 +const ( + StoragePoolEventIDLifecycle StoragePoolEventID = iota + StoragePoolEventIDRefresh StoragePoolEventID = 1 +) + +// StoragePoolEventLifecycleType as declared in libvirt/libvirt-storage.h:472 +type StoragePoolEventLifecycleType int32 + +// StoragePoolEventLifecycleType enumeration from libvirt/libvirt-storage.h:472 +const ( + StoragePoolEventDefined StoragePoolEventLifecycleType = iota + StoragePoolEventUndefined StoragePoolEventLifecycleType = 1 + StoragePoolEventStarted StoragePoolEventLifecycleType = 2 + StoragePoolEventStopped StoragePoolEventLifecycleType = 3 +) + +// StreamFlags as declared in libvirt/libvirt-stream.h:34 +type StreamFlags int32 + +// StreamFlags enumeration from libvirt/libvirt-stream.h:34 +const ( + StreamNonblock StreamFlags = 1 +) + +// StreamRecvFlagsValues as declared in libvirt/libvirt-stream.h:50 +type StreamRecvFlagsValues int32 + +// StreamRecvFlagsValues enumeration from libvirt/libvirt-stream.h:50 +const ( + StreamRecvStopAtHole StreamRecvFlagsValues = 1 +) + +// StreamEventType as declared in libvirt/libvirt-stream.h:223 +type StreamEventType int32 + +// StreamEventType enumeration from libvirt/libvirt-stream.h:223 +const ( + StreamEventReadable StreamEventType = 1 + StreamEventWritable StreamEventType = 2 + StreamEventError StreamEventType = 4 + StreamEventHangup StreamEventType = 8 +) diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/doc.go b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/doc.go new file mode 100644 index 00000000..1229f30c --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/doc.go @@ -0,0 +1,76 @@ +// Copyright 2016 The go-libvirt Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* + Package libvirt provides a pure Go interface for Libvirt. + + Rather than using Libvirt's C bindings, this package makes use of + Libvirt's RPC interface, as documented here: https://libvirt.org/internals/rpc.html. + Connections to the libvirt server may be local, or remote. RPC packets are encoded + using the XDR standard as defined by RFC 4506. + + This should be considered a work in progress. Most functionaly provided by the C + bindings have not yet made their way into this library. Pull requests are welcome! + The definition of the RPC protocol is in the libvirt source tree under src/rpc/virnetprotocol.x. + + Example usage: + + package main + + import ( + "fmt" + "log" + "net" + "time" + + "github.com/digitalocean/go-libvirt" + ) + + func main() { + //c, err := net.DialTimeout("tcp", "127.0.0.1:16509", 2*time.Second) + //c, err := net.DialTimeout("tcp", "192.168.1.12:16509", 2*time.Second) + c, err := net.DialTimeout("unix", "/var/run/libvirt/libvirt-sock", 2*time.Second) + if err != nil { + log.Fatalf("failed to dial libvirt: %v", err) + } + + l := libvirt.New(c) + if err := l.Connect(); err != nil { + log.Fatalf("failed to connect: %v", err) + } + + v, err := l.Version() + if err != nil { + log.Fatalf("failed to retrieve libvirt version: %v", err) + } + fmt.Println("Version:", v) + + domains, err := l.Domains() + if err != nil { + log.Fatalf("failed to retrieve domains: %v", err) + } + + fmt.Println("ID\tName\t\tUUID") + fmt.Printf("--------------------------------------------------------\n") + for _, d := range domains { + fmt.Printf("%d\t%s\t%x\n", d.ID, d.Name, d.UUID) + } + + if err := l.Disconnect(); err != nil { + log.Fatal("failed to disconnect: %v", err) + } + } +*/ + +package libvirt diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/internal/constants/constants.gen.go b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/internal/constants/constants.gen.go new file mode 100644 index 00000000..afbab13c --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/internal/constants/constants.gen.go @@ -0,0 +1,937 @@ +// Copyright 2017 The go-libvirt Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +// Code generated by internal/lvgen/generate.go. DO NOT EDIT. +// +// To regenerate, run 'go generate' in internal/lvgen. +// + +// Package constants contains libvirt procedure identifiers and other enums and +// constants. +package constants + +// These are libvirt procedure numbers which correspond to each respective +// API call between remote_internal driver and libvirtd. Each procedure is +// identified by a unique number which *may change in any future libvirt +// update*. +// +// Examples: +// REMOTE_PROC_CONNECT_OPEN = 1 +// REMOTE_PROC_DOMAIN_DEFINE_XML = 11 +// REMOTE_PROC_DOMAIN_MIGRATE_SET_MAX_SPEED = 207, +const ( + // From enums: + // AuthNone is libvirt's REMOTE_AUTH_NONE + AuthNone = 0 + // AuthSasl is libvirt's REMOTE_AUTH_SASL + AuthSasl = 1 + // AuthPolkit is libvirt's REMOTE_AUTH_POLKIT + AuthPolkit = 2 + // ProcConnectOpen is libvirt's REMOTE_PROC_CONNECT_OPEN + ProcConnectOpen = 1 + // ProcConnectClose is libvirt's REMOTE_PROC_CONNECT_CLOSE + ProcConnectClose = 2 + // ProcConnectGetType is libvirt's REMOTE_PROC_CONNECT_GET_TYPE + ProcConnectGetType = 3 + // ProcConnectGetVersion is libvirt's REMOTE_PROC_CONNECT_GET_VERSION + ProcConnectGetVersion = 4 + // ProcConnectGetMaxVcpus is libvirt's REMOTE_PROC_CONNECT_GET_MAX_VCPUS + ProcConnectGetMaxVcpus = 5 + // ProcNodeGetInfo is libvirt's REMOTE_PROC_NODE_GET_INFO + ProcNodeGetInfo = 6 + // ProcConnectGetCapabilities is libvirt's REMOTE_PROC_CONNECT_GET_CAPABILITIES + ProcConnectGetCapabilities = 7 + // ProcDomainAttachDevice is libvirt's REMOTE_PROC_DOMAIN_ATTACH_DEVICE + ProcDomainAttachDevice = 8 + // ProcDomainCreate is libvirt's REMOTE_PROC_DOMAIN_CREATE + ProcDomainCreate = 9 + // ProcDomainCreateXML is libvirt's REMOTE_PROC_DOMAIN_CREATE_XML + ProcDomainCreateXML = 10 + // ProcDomainDefineXML is libvirt's REMOTE_PROC_DOMAIN_DEFINE_XML + ProcDomainDefineXML = 11 + // ProcDomainDestroy is libvirt's REMOTE_PROC_DOMAIN_DESTROY + ProcDomainDestroy = 12 + // ProcDomainDetachDevice is libvirt's REMOTE_PROC_DOMAIN_DETACH_DEVICE + ProcDomainDetachDevice = 13 + // ProcDomainGetXMLDesc is libvirt's REMOTE_PROC_DOMAIN_GET_XML_DESC + ProcDomainGetXMLDesc = 14 + // ProcDomainGetAutostart is libvirt's REMOTE_PROC_DOMAIN_GET_AUTOSTART + ProcDomainGetAutostart = 15 + // ProcDomainGetInfo is libvirt's REMOTE_PROC_DOMAIN_GET_INFO + ProcDomainGetInfo = 16 + // ProcDomainGetMaxMemory is libvirt's REMOTE_PROC_DOMAIN_GET_MAX_MEMORY + ProcDomainGetMaxMemory = 17 + // ProcDomainGetMaxVcpus is libvirt's REMOTE_PROC_DOMAIN_GET_MAX_VCPUS + ProcDomainGetMaxVcpus = 18 + // ProcDomainGetOsType is libvirt's REMOTE_PROC_DOMAIN_GET_OS_TYPE + ProcDomainGetOsType = 19 + // ProcDomainGetVcpus is libvirt's REMOTE_PROC_DOMAIN_GET_VCPUS + ProcDomainGetVcpus = 20 + // ProcConnectListDefinedDomains is libvirt's REMOTE_PROC_CONNECT_LIST_DEFINED_DOMAINS + ProcConnectListDefinedDomains = 21 + // ProcDomainLookupByID is libvirt's REMOTE_PROC_DOMAIN_LOOKUP_BY_ID + ProcDomainLookupByID = 22 + // ProcDomainLookupByName is libvirt's REMOTE_PROC_DOMAIN_LOOKUP_BY_NAME + ProcDomainLookupByName = 23 + // ProcDomainLookupByUUID is libvirt's REMOTE_PROC_DOMAIN_LOOKUP_BY_UUID + ProcDomainLookupByUUID = 24 + // ProcConnectNumOfDefinedDomains is libvirt's REMOTE_PROC_CONNECT_NUM_OF_DEFINED_DOMAINS + ProcConnectNumOfDefinedDomains = 25 + // ProcDomainPinVcpu is libvirt's REMOTE_PROC_DOMAIN_PIN_VCPU + ProcDomainPinVcpu = 26 + // ProcDomainReboot is libvirt's REMOTE_PROC_DOMAIN_REBOOT + ProcDomainReboot = 27 + // ProcDomainResume is libvirt's REMOTE_PROC_DOMAIN_RESUME + ProcDomainResume = 28 + // ProcDomainSetAutostart is libvirt's REMOTE_PROC_DOMAIN_SET_AUTOSTART + ProcDomainSetAutostart = 29 + // ProcDomainSetMaxMemory is libvirt's REMOTE_PROC_DOMAIN_SET_MAX_MEMORY + ProcDomainSetMaxMemory = 30 + // ProcDomainSetMemory is libvirt's REMOTE_PROC_DOMAIN_SET_MEMORY + ProcDomainSetMemory = 31 + // ProcDomainSetVcpus is libvirt's REMOTE_PROC_DOMAIN_SET_VCPUS + ProcDomainSetVcpus = 32 + // ProcDomainShutdown is libvirt's REMOTE_PROC_DOMAIN_SHUTDOWN + ProcDomainShutdown = 33 + // ProcDomainSuspend is libvirt's REMOTE_PROC_DOMAIN_SUSPEND + ProcDomainSuspend = 34 + // ProcDomainUndefine is libvirt's REMOTE_PROC_DOMAIN_UNDEFINE + ProcDomainUndefine = 35 + // ProcConnectListDefinedNetworks is libvirt's REMOTE_PROC_CONNECT_LIST_DEFINED_NETWORKS + ProcConnectListDefinedNetworks = 36 + // ProcConnectListDomains is libvirt's REMOTE_PROC_CONNECT_LIST_DOMAINS + ProcConnectListDomains = 37 + // ProcConnectListNetworks is libvirt's REMOTE_PROC_CONNECT_LIST_NETWORKS + ProcConnectListNetworks = 38 + // ProcNetworkCreate is libvirt's REMOTE_PROC_NETWORK_CREATE + ProcNetworkCreate = 39 + // ProcNetworkCreateXML is libvirt's REMOTE_PROC_NETWORK_CREATE_XML + ProcNetworkCreateXML = 40 + // ProcNetworkDefineXML is libvirt's REMOTE_PROC_NETWORK_DEFINE_XML + ProcNetworkDefineXML = 41 + // ProcNetworkDestroy is libvirt's REMOTE_PROC_NETWORK_DESTROY + ProcNetworkDestroy = 42 + // ProcNetworkGetXMLDesc is libvirt's REMOTE_PROC_NETWORK_GET_XML_DESC + ProcNetworkGetXMLDesc = 43 + // ProcNetworkGetAutostart is libvirt's REMOTE_PROC_NETWORK_GET_AUTOSTART + ProcNetworkGetAutostart = 44 + // ProcNetworkGetBridgeName is libvirt's REMOTE_PROC_NETWORK_GET_BRIDGE_NAME + ProcNetworkGetBridgeName = 45 + // ProcNetworkLookupByName is libvirt's REMOTE_PROC_NETWORK_LOOKUP_BY_NAME + ProcNetworkLookupByName = 46 + // ProcNetworkLookupByUUID is libvirt's REMOTE_PROC_NETWORK_LOOKUP_BY_UUID + ProcNetworkLookupByUUID = 47 + // ProcNetworkSetAutostart is libvirt's REMOTE_PROC_NETWORK_SET_AUTOSTART + ProcNetworkSetAutostart = 48 + // ProcNetworkUndefine is libvirt's REMOTE_PROC_NETWORK_UNDEFINE + ProcNetworkUndefine = 49 + // ProcConnectNumOfDefinedNetworks is libvirt's REMOTE_PROC_CONNECT_NUM_OF_DEFINED_NETWORKS + ProcConnectNumOfDefinedNetworks = 50 + // ProcConnectNumOfDomains is libvirt's REMOTE_PROC_CONNECT_NUM_OF_DOMAINS + ProcConnectNumOfDomains = 51 + // ProcConnectNumOfNetworks is libvirt's REMOTE_PROC_CONNECT_NUM_OF_NETWORKS + ProcConnectNumOfNetworks = 52 + // ProcDomainCoreDump is libvirt's REMOTE_PROC_DOMAIN_CORE_DUMP + ProcDomainCoreDump = 53 + // ProcDomainRestore is libvirt's REMOTE_PROC_DOMAIN_RESTORE + ProcDomainRestore = 54 + // ProcDomainSave is libvirt's REMOTE_PROC_DOMAIN_SAVE + ProcDomainSave = 55 + // ProcDomainGetSchedulerType is libvirt's REMOTE_PROC_DOMAIN_GET_SCHEDULER_TYPE + ProcDomainGetSchedulerType = 56 + // ProcDomainGetSchedulerParameters is libvirt's REMOTE_PROC_DOMAIN_GET_SCHEDULER_PARAMETERS + ProcDomainGetSchedulerParameters = 57 + // ProcDomainSetSchedulerParameters is libvirt's REMOTE_PROC_DOMAIN_SET_SCHEDULER_PARAMETERS + ProcDomainSetSchedulerParameters = 58 + // ProcConnectGetHostname is libvirt's REMOTE_PROC_CONNECT_GET_HOSTNAME + ProcConnectGetHostname = 59 + // ProcConnectSupportsFeature is libvirt's REMOTE_PROC_CONNECT_SUPPORTS_FEATURE + ProcConnectSupportsFeature = 60 + // ProcDomainMigratePrepare is libvirt's REMOTE_PROC_DOMAIN_MIGRATE_PREPARE + ProcDomainMigratePrepare = 61 + // ProcDomainMigratePerform is libvirt's REMOTE_PROC_DOMAIN_MIGRATE_PERFORM + ProcDomainMigratePerform = 62 + // ProcDomainMigrateFinish is libvirt's REMOTE_PROC_DOMAIN_MIGRATE_FINISH + ProcDomainMigrateFinish = 63 + // ProcDomainBlockStats is libvirt's REMOTE_PROC_DOMAIN_BLOCK_STATS + ProcDomainBlockStats = 64 + // ProcDomainInterfaceStats is libvirt's REMOTE_PROC_DOMAIN_INTERFACE_STATS + ProcDomainInterfaceStats = 65 + // ProcAuthList is libvirt's REMOTE_PROC_AUTH_LIST + ProcAuthList = 66 + // ProcAuthSaslInit is libvirt's REMOTE_PROC_AUTH_SASL_INIT + ProcAuthSaslInit = 67 + // ProcAuthSaslStart is libvirt's REMOTE_PROC_AUTH_SASL_START + ProcAuthSaslStart = 68 + // ProcAuthSaslStep is libvirt's REMOTE_PROC_AUTH_SASL_STEP + ProcAuthSaslStep = 69 + // ProcAuthPolkit is libvirt's REMOTE_PROC_AUTH_POLKIT + ProcAuthPolkit = 70 + // ProcConnectNumOfStoragePools is libvirt's REMOTE_PROC_CONNECT_NUM_OF_STORAGE_POOLS + ProcConnectNumOfStoragePools = 71 + // ProcConnectListStoragePools is libvirt's REMOTE_PROC_CONNECT_LIST_STORAGE_POOLS + ProcConnectListStoragePools = 72 + // ProcConnectNumOfDefinedStoragePools is libvirt's REMOTE_PROC_CONNECT_NUM_OF_DEFINED_STORAGE_POOLS + ProcConnectNumOfDefinedStoragePools = 73 + // ProcConnectListDefinedStoragePools is libvirt's REMOTE_PROC_CONNECT_LIST_DEFINED_STORAGE_POOLS + ProcConnectListDefinedStoragePools = 74 + // ProcConnectFindStoragePoolSources is libvirt's REMOTE_PROC_CONNECT_FIND_STORAGE_POOL_SOURCES + ProcConnectFindStoragePoolSources = 75 + // ProcStoragePoolCreateXML is libvirt's REMOTE_PROC_STORAGE_POOL_CREATE_XML + ProcStoragePoolCreateXML = 76 + // ProcStoragePoolDefineXML is libvirt's REMOTE_PROC_STORAGE_POOL_DEFINE_XML + ProcStoragePoolDefineXML = 77 + // ProcStoragePoolCreate is libvirt's REMOTE_PROC_STORAGE_POOL_CREATE + ProcStoragePoolCreate = 78 + // ProcStoragePoolBuild is libvirt's REMOTE_PROC_STORAGE_POOL_BUILD + ProcStoragePoolBuild = 79 + // ProcStoragePoolDestroy is libvirt's REMOTE_PROC_STORAGE_POOL_DESTROY + ProcStoragePoolDestroy = 80 + // ProcStoragePoolDelete is libvirt's REMOTE_PROC_STORAGE_POOL_DELETE + ProcStoragePoolDelete = 81 + // ProcStoragePoolUndefine is libvirt's REMOTE_PROC_STORAGE_POOL_UNDEFINE + ProcStoragePoolUndefine = 82 + // ProcStoragePoolRefresh is libvirt's REMOTE_PROC_STORAGE_POOL_REFRESH + ProcStoragePoolRefresh = 83 + // ProcStoragePoolLookupByName is libvirt's REMOTE_PROC_STORAGE_POOL_LOOKUP_BY_NAME + ProcStoragePoolLookupByName = 84 + // ProcStoragePoolLookupByUUID is libvirt's REMOTE_PROC_STORAGE_POOL_LOOKUP_BY_UUID + ProcStoragePoolLookupByUUID = 85 + // ProcStoragePoolLookupByVolume is libvirt's REMOTE_PROC_STORAGE_POOL_LOOKUP_BY_VOLUME + ProcStoragePoolLookupByVolume = 86 + // ProcStoragePoolGetInfo is libvirt's REMOTE_PROC_STORAGE_POOL_GET_INFO + ProcStoragePoolGetInfo = 87 + // ProcStoragePoolGetXMLDesc is libvirt's REMOTE_PROC_STORAGE_POOL_GET_XML_DESC + ProcStoragePoolGetXMLDesc = 88 + // ProcStoragePoolGetAutostart is libvirt's REMOTE_PROC_STORAGE_POOL_GET_AUTOSTART + ProcStoragePoolGetAutostart = 89 + // ProcStoragePoolSetAutostart is libvirt's REMOTE_PROC_STORAGE_POOL_SET_AUTOSTART + ProcStoragePoolSetAutostart = 90 + // ProcStoragePoolNumOfVolumes is libvirt's REMOTE_PROC_STORAGE_POOL_NUM_OF_VOLUMES + ProcStoragePoolNumOfVolumes = 91 + // ProcStoragePoolListVolumes is libvirt's REMOTE_PROC_STORAGE_POOL_LIST_VOLUMES + ProcStoragePoolListVolumes = 92 + // ProcStorageVolCreateXML is libvirt's REMOTE_PROC_STORAGE_VOL_CREATE_XML + ProcStorageVolCreateXML = 93 + // ProcStorageVolDelete is libvirt's REMOTE_PROC_STORAGE_VOL_DELETE + ProcStorageVolDelete = 94 + // ProcStorageVolLookupByName is libvirt's REMOTE_PROC_STORAGE_VOL_LOOKUP_BY_NAME + ProcStorageVolLookupByName = 95 + // ProcStorageVolLookupByKey is libvirt's REMOTE_PROC_STORAGE_VOL_LOOKUP_BY_KEY + ProcStorageVolLookupByKey = 96 + // ProcStorageVolLookupByPath is libvirt's REMOTE_PROC_STORAGE_VOL_LOOKUP_BY_PATH + ProcStorageVolLookupByPath = 97 + // ProcStorageVolGetInfo is libvirt's REMOTE_PROC_STORAGE_VOL_GET_INFO + ProcStorageVolGetInfo = 98 + // ProcStorageVolGetXMLDesc is libvirt's REMOTE_PROC_STORAGE_VOL_GET_XML_DESC + ProcStorageVolGetXMLDesc = 99 + // ProcStorageVolGetPath is libvirt's REMOTE_PROC_STORAGE_VOL_GET_PATH + ProcStorageVolGetPath = 100 + // ProcNodeGetCellsFreeMemory is libvirt's REMOTE_PROC_NODE_GET_CELLS_FREE_MEMORY + ProcNodeGetCellsFreeMemory = 101 + // ProcNodeGetFreeMemory is libvirt's REMOTE_PROC_NODE_GET_FREE_MEMORY + ProcNodeGetFreeMemory = 102 + // ProcDomainBlockPeek is libvirt's REMOTE_PROC_DOMAIN_BLOCK_PEEK + ProcDomainBlockPeek = 103 + // ProcDomainMemoryPeek is libvirt's REMOTE_PROC_DOMAIN_MEMORY_PEEK + ProcDomainMemoryPeek = 104 + // ProcConnectDomainEventRegister is libvirt's REMOTE_PROC_CONNECT_DOMAIN_EVENT_REGISTER + ProcConnectDomainEventRegister = 105 + // ProcConnectDomainEventDeregister is libvirt's REMOTE_PROC_CONNECT_DOMAIN_EVENT_DEREGISTER + ProcConnectDomainEventDeregister = 106 + // ProcDomainEventLifecycle is libvirt's REMOTE_PROC_DOMAIN_EVENT_LIFECYCLE + ProcDomainEventLifecycle = 107 + // ProcDomainMigratePrepare2 is libvirt's REMOTE_PROC_DOMAIN_MIGRATE_PREPARE2 + ProcDomainMigratePrepare2 = 108 + // ProcDomainMigrateFinish2 is libvirt's REMOTE_PROC_DOMAIN_MIGRATE_FINISH2 + ProcDomainMigrateFinish2 = 109 + // ProcConnectGetUri is libvirt's REMOTE_PROC_CONNECT_GET_URI + ProcConnectGetUri = 110 + // ProcNodeNumOfDevices is libvirt's REMOTE_PROC_NODE_NUM_OF_DEVICES + ProcNodeNumOfDevices = 111 + // ProcNodeListDevices is libvirt's REMOTE_PROC_NODE_LIST_DEVICES + ProcNodeListDevices = 112 + // ProcNodeDeviceLookupByName is libvirt's REMOTE_PROC_NODE_DEVICE_LOOKUP_BY_NAME + ProcNodeDeviceLookupByName = 113 + // ProcNodeDeviceGetXMLDesc is libvirt's REMOTE_PROC_NODE_DEVICE_GET_XML_DESC + ProcNodeDeviceGetXMLDesc = 114 + // ProcNodeDeviceGetParent is libvirt's REMOTE_PROC_NODE_DEVICE_GET_PARENT + ProcNodeDeviceGetParent = 115 + // ProcNodeDeviceNumOfCaps is libvirt's REMOTE_PROC_NODE_DEVICE_NUM_OF_CAPS + ProcNodeDeviceNumOfCaps = 116 + // ProcNodeDeviceListCaps is libvirt's REMOTE_PROC_NODE_DEVICE_LIST_CAPS + ProcNodeDeviceListCaps = 117 + // ProcNodeDeviceDettach is libvirt's REMOTE_PROC_NODE_DEVICE_DETTACH + ProcNodeDeviceDettach = 118 + // ProcNodeDeviceReAttach is libvirt's REMOTE_PROC_NODE_DEVICE_RE_ATTACH + ProcNodeDeviceReAttach = 119 + // ProcNodeDeviceReset is libvirt's REMOTE_PROC_NODE_DEVICE_RESET + ProcNodeDeviceReset = 120 + // ProcDomainGetSecurityLabel is libvirt's REMOTE_PROC_DOMAIN_GET_SECURITY_LABEL + ProcDomainGetSecurityLabel = 121 + // ProcNodeGetSecurityModel is libvirt's REMOTE_PROC_NODE_GET_SECURITY_MODEL + ProcNodeGetSecurityModel = 122 + // ProcNodeDeviceCreateXML is libvirt's REMOTE_PROC_NODE_DEVICE_CREATE_XML + ProcNodeDeviceCreateXML = 123 + // ProcNodeDeviceDestroy is libvirt's REMOTE_PROC_NODE_DEVICE_DESTROY + ProcNodeDeviceDestroy = 124 + // ProcStorageVolCreateXMLFrom is libvirt's REMOTE_PROC_STORAGE_VOL_CREATE_XML_FROM + ProcStorageVolCreateXMLFrom = 125 + // ProcConnectNumOfInterfaces is libvirt's REMOTE_PROC_CONNECT_NUM_OF_INTERFACES + ProcConnectNumOfInterfaces = 126 + // ProcConnectListInterfaces is libvirt's REMOTE_PROC_CONNECT_LIST_INTERFACES + ProcConnectListInterfaces = 127 + // ProcInterfaceLookupByName is libvirt's REMOTE_PROC_INTERFACE_LOOKUP_BY_NAME + ProcInterfaceLookupByName = 128 + // ProcInterfaceLookupByMacString is libvirt's REMOTE_PROC_INTERFACE_LOOKUP_BY_MAC_STRING + ProcInterfaceLookupByMacString = 129 + // ProcInterfaceGetXMLDesc is libvirt's REMOTE_PROC_INTERFACE_GET_XML_DESC + ProcInterfaceGetXMLDesc = 130 + // ProcInterfaceDefineXML is libvirt's REMOTE_PROC_INTERFACE_DEFINE_XML + ProcInterfaceDefineXML = 131 + // ProcInterfaceUndefine is libvirt's REMOTE_PROC_INTERFACE_UNDEFINE + ProcInterfaceUndefine = 132 + // ProcInterfaceCreate is libvirt's REMOTE_PROC_INTERFACE_CREATE + ProcInterfaceCreate = 133 + // ProcInterfaceDestroy is libvirt's REMOTE_PROC_INTERFACE_DESTROY + ProcInterfaceDestroy = 134 + // ProcConnectDomainXMLFromNative is libvirt's REMOTE_PROC_CONNECT_DOMAIN_XML_FROM_NATIVE + ProcConnectDomainXMLFromNative = 135 + // ProcConnectDomainXMLToNative is libvirt's REMOTE_PROC_CONNECT_DOMAIN_XML_TO_NATIVE + ProcConnectDomainXMLToNative = 136 + // ProcConnectNumOfDefinedInterfaces is libvirt's REMOTE_PROC_CONNECT_NUM_OF_DEFINED_INTERFACES + ProcConnectNumOfDefinedInterfaces = 137 + // ProcConnectListDefinedInterfaces is libvirt's REMOTE_PROC_CONNECT_LIST_DEFINED_INTERFACES + ProcConnectListDefinedInterfaces = 138 + // ProcConnectNumOfSecrets is libvirt's REMOTE_PROC_CONNECT_NUM_OF_SECRETS + ProcConnectNumOfSecrets = 139 + // ProcConnectListSecrets is libvirt's REMOTE_PROC_CONNECT_LIST_SECRETS + ProcConnectListSecrets = 140 + // ProcSecretLookupByUUID is libvirt's REMOTE_PROC_SECRET_LOOKUP_BY_UUID + ProcSecretLookupByUUID = 141 + // ProcSecretDefineXML is libvirt's REMOTE_PROC_SECRET_DEFINE_XML + ProcSecretDefineXML = 142 + // ProcSecretGetXMLDesc is libvirt's REMOTE_PROC_SECRET_GET_XML_DESC + ProcSecretGetXMLDesc = 143 + // ProcSecretSetValue is libvirt's REMOTE_PROC_SECRET_SET_VALUE + ProcSecretSetValue = 144 + // ProcSecretGetValue is libvirt's REMOTE_PROC_SECRET_GET_VALUE + ProcSecretGetValue = 145 + // ProcSecretUndefine is libvirt's REMOTE_PROC_SECRET_UNDEFINE + ProcSecretUndefine = 146 + // ProcSecretLookupByUsage is libvirt's REMOTE_PROC_SECRET_LOOKUP_BY_USAGE + ProcSecretLookupByUsage = 147 + // ProcDomainMigratePrepareTunnel is libvirt's REMOTE_PROC_DOMAIN_MIGRATE_PREPARE_TUNNEL + ProcDomainMigratePrepareTunnel = 148 + // ProcConnectIsSecure is libvirt's REMOTE_PROC_CONNECT_IS_SECURE + ProcConnectIsSecure = 149 + // ProcDomainIsActive is libvirt's REMOTE_PROC_DOMAIN_IS_ACTIVE + ProcDomainIsActive = 150 + // ProcDomainIsPersistent is libvirt's REMOTE_PROC_DOMAIN_IS_PERSISTENT + ProcDomainIsPersistent = 151 + // ProcNetworkIsActive is libvirt's REMOTE_PROC_NETWORK_IS_ACTIVE + ProcNetworkIsActive = 152 + // ProcNetworkIsPersistent is libvirt's REMOTE_PROC_NETWORK_IS_PERSISTENT + ProcNetworkIsPersistent = 153 + // ProcStoragePoolIsActive is libvirt's REMOTE_PROC_STORAGE_POOL_IS_ACTIVE + ProcStoragePoolIsActive = 154 + // ProcStoragePoolIsPersistent is libvirt's REMOTE_PROC_STORAGE_POOL_IS_PERSISTENT + ProcStoragePoolIsPersistent = 155 + // ProcInterfaceIsActive is libvirt's REMOTE_PROC_INTERFACE_IS_ACTIVE + ProcInterfaceIsActive = 156 + // ProcConnectGetLibVersion is libvirt's REMOTE_PROC_CONNECT_GET_LIB_VERSION + ProcConnectGetLibVersion = 157 + // ProcConnectCompareCPU is libvirt's REMOTE_PROC_CONNECT_COMPARE_CPU + ProcConnectCompareCPU = 158 + // ProcDomainMemoryStats is libvirt's REMOTE_PROC_DOMAIN_MEMORY_STATS + ProcDomainMemoryStats = 159 + // ProcDomainAttachDeviceFlags is libvirt's REMOTE_PROC_DOMAIN_ATTACH_DEVICE_FLAGS + ProcDomainAttachDeviceFlags = 160 + // ProcDomainDetachDeviceFlags is libvirt's REMOTE_PROC_DOMAIN_DETACH_DEVICE_FLAGS + ProcDomainDetachDeviceFlags = 161 + // ProcConnectBaselineCPU is libvirt's REMOTE_PROC_CONNECT_BASELINE_CPU + ProcConnectBaselineCPU = 162 + // ProcDomainGetJobInfo is libvirt's REMOTE_PROC_DOMAIN_GET_JOB_INFO + ProcDomainGetJobInfo = 163 + // ProcDomainAbortJob is libvirt's REMOTE_PROC_DOMAIN_ABORT_JOB + ProcDomainAbortJob = 164 + // ProcStorageVolWipe is libvirt's REMOTE_PROC_STORAGE_VOL_WIPE + ProcStorageVolWipe = 165 + // ProcDomainMigrateSetMaxDowntime is libvirt's REMOTE_PROC_DOMAIN_MIGRATE_SET_MAX_DOWNTIME + ProcDomainMigrateSetMaxDowntime = 166 + // ProcConnectDomainEventRegisterAny is libvirt's REMOTE_PROC_CONNECT_DOMAIN_EVENT_REGISTER_ANY + ProcConnectDomainEventRegisterAny = 167 + // ProcConnectDomainEventDeregisterAny is libvirt's REMOTE_PROC_CONNECT_DOMAIN_EVENT_DEREGISTER_ANY + ProcConnectDomainEventDeregisterAny = 168 + // ProcDomainEventReboot is libvirt's REMOTE_PROC_DOMAIN_EVENT_REBOOT + ProcDomainEventReboot = 169 + // ProcDomainEventRtcChange is libvirt's REMOTE_PROC_DOMAIN_EVENT_RTC_CHANGE + ProcDomainEventRtcChange = 170 + // ProcDomainEventWatchdog is libvirt's REMOTE_PROC_DOMAIN_EVENT_WATCHDOG + ProcDomainEventWatchdog = 171 + // ProcDomainEventIOError is libvirt's REMOTE_PROC_DOMAIN_EVENT_IO_ERROR + ProcDomainEventIOError = 172 + // ProcDomainEventGraphics is libvirt's REMOTE_PROC_DOMAIN_EVENT_GRAPHICS + ProcDomainEventGraphics = 173 + // ProcDomainUpdateDeviceFlags is libvirt's REMOTE_PROC_DOMAIN_UPDATE_DEVICE_FLAGS + ProcDomainUpdateDeviceFlags = 174 + // ProcNwfilterLookupByName is libvirt's REMOTE_PROC_NWFILTER_LOOKUP_BY_NAME + ProcNwfilterLookupByName = 175 + // ProcNwfilterLookupByUUID is libvirt's REMOTE_PROC_NWFILTER_LOOKUP_BY_UUID + ProcNwfilterLookupByUUID = 176 + // ProcNwfilterGetXMLDesc is libvirt's REMOTE_PROC_NWFILTER_GET_XML_DESC + ProcNwfilterGetXMLDesc = 177 + // ProcConnectNumOfNwfilters is libvirt's REMOTE_PROC_CONNECT_NUM_OF_NWFILTERS + ProcConnectNumOfNwfilters = 178 + // ProcConnectListNwfilters is libvirt's REMOTE_PROC_CONNECT_LIST_NWFILTERS + ProcConnectListNwfilters = 179 + // ProcNwfilterDefineXML is libvirt's REMOTE_PROC_NWFILTER_DEFINE_XML + ProcNwfilterDefineXML = 180 + // ProcNwfilterUndefine is libvirt's REMOTE_PROC_NWFILTER_UNDEFINE + ProcNwfilterUndefine = 181 + // ProcDomainManagedSave is libvirt's REMOTE_PROC_DOMAIN_MANAGED_SAVE + ProcDomainManagedSave = 182 + // ProcDomainHasManagedSaveImage is libvirt's REMOTE_PROC_DOMAIN_HAS_MANAGED_SAVE_IMAGE + ProcDomainHasManagedSaveImage = 183 + // ProcDomainManagedSaveRemove is libvirt's REMOTE_PROC_DOMAIN_MANAGED_SAVE_REMOVE + ProcDomainManagedSaveRemove = 184 + // ProcDomainSnapshotCreateXML is libvirt's REMOTE_PROC_DOMAIN_SNAPSHOT_CREATE_XML + ProcDomainSnapshotCreateXML = 185 + // ProcDomainSnapshotGetXMLDesc is libvirt's REMOTE_PROC_DOMAIN_SNAPSHOT_GET_XML_DESC + ProcDomainSnapshotGetXMLDesc = 186 + // ProcDomainSnapshotNum is libvirt's REMOTE_PROC_DOMAIN_SNAPSHOT_NUM + ProcDomainSnapshotNum = 187 + // ProcDomainSnapshotListNames is libvirt's REMOTE_PROC_DOMAIN_SNAPSHOT_LIST_NAMES + ProcDomainSnapshotListNames = 188 + // ProcDomainSnapshotLookupByName is libvirt's REMOTE_PROC_DOMAIN_SNAPSHOT_LOOKUP_BY_NAME + ProcDomainSnapshotLookupByName = 189 + // ProcDomainHasCurrentSnapshot is libvirt's REMOTE_PROC_DOMAIN_HAS_CURRENT_SNAPSHOT + ProcDomainHasCurrentSnapshot = 190 + // ProcDomainSnapshotCurrent is libvirt's REMOTE_PROC_DOMAIN_SNAPSHOT_CURRENT + ProcDomainSnapshotCurrent = 191 + // ProcDomainRevertToSnapshot is libvirt's REMOTE_PROC_DOMAIN_REVERT_TO_SNAPSHOT + ProcDomainRevertToSnapshot = 192 + // ProcDomainSnapshotDelete is libvirt's REMOTE_PROC_DOMAIN_SNAPSHOT_DELETE + ProcDomainSnapshotDelete = 193 + // ProcDomainGetBlockInfo is libvirt's REMOTE_PROC_DOMAIN_GET_BLOCK_INFO + ProcDomainGetBlockInfo = 194 + // ProcDomainEventIOErrorReason is libvirt's REMOTE_PROC_DOMAIN_EVENT_IO_ERROR_REASON + ProcDomainEventIOErrorReason = 195 + // ProcDomainCreateWithFlags is libvirt's REMOTE_PROC_DOMAIN_CREATE_WITH_FLAGS + ProcDomainCreateWithFlags = 196 + // ProcDomainSetMemoryParameters is libvirt's REMOTE_PROC_DOMAIN_SET_MEMORY_PARAMETERS + ProcDomainSetMemoryParameters = 197 + // ProcDomainGetMemoryParameters is libvirt's REMOTE_PROC_DOMAIN_GET_MEMORY_PARAMETERS + ProcDomainGetMemoryParameters = 198 + // ProcDomainSetVcpusFlags is libvirt's REMOTE_PROC_DOMAIN_SET_VCPUS_FLAGS + ProcDomainSetVcpusFlags = 199 + // ProcDomainGetVcpusFlags is libvirt's REMOTE_PROC_DOMAIN_GET_VCPUS_FLAGS + ProcDomainGetVcpusFlags = 200 + // ProcDomainOpenConsole is libvirt's REMOTE_PROC_DOMAIN_OPEN_CONSOLE + ProcDomainOpenConsole = 201 + // ProcDomainIsUpdated is libvirt's REMOTE_PROC_DOMAIN_IS_UPDATED + ProcDomainIsUpdated = 202 + // ProcConnectGetSysinfo is libvirt's REMOTE_PROC_CONNECT_GET_SYSINFO + ProcConnectGetSysinfo = 203 + // ProcDomainSetMemoryFlags is libvirt's REMOTE_PROC_DOMAIN_SET_MEMORY_FLAGS + ProcDomainSetMemoryFlags = 204 + // ProcDomainSetBlkioParameters is libvirt's REMOTE_PROC_DOMAIN_SET_BLKIO_PARAMETERS + ProcDomainSetBlkioParameters = 205 + // ProcDomainGetBlkioParameters is libvirt's REMOTE_PROC_DOMAIN_GET_BLKIO_PARAMETERS + ProcDomainGetBlkioParameters = 206 + // ProcDomainMigrateSetMaxSpeed is libvirt's REMOTE_PROC_DOMAIN_MIGRATE_SET_MAX_SPEED + ProcDomainMigrateSetMaxSpeed = 207 + // ProcStorageVolUpload is libvirt's REMOTE_PROC_STORAGE_VOL_UPLOAD + ProcStorageVolUpload = 208 + // ProcStorageVolDownload is libvirt's REMOTE_PROC_STORAGE_VOL_DOWNLOAD + ProcStorageVolDownload = 209 + // ProcDomainInjectNmi is libvirt's REMOTE_PROC_DOMAIN_INJECT_NMI + ProcDomainInjectNmi = 210 + // ProcDomainScreenshot is libvirt's REMOTE_PROC_DOMAIN_SCREENSHOT + ProcDomainScreenshot = 211 + // ProcDomainGetState is libvirt's REMOTE_PROC_DOMAIN_GET_STATE + ProcDomainGetState = 212 + // ProcDomainMigrateBegin3 is libvirt's REMOTE_PROC_DOMAIN_MIGRATE_BEGIN3 + ProcDomainMigrateBegin3 = 213 + // ProcDomainMigratePrepare3 is libvirt's REMOTE_PROC_DOMAIN_MIGRATE_PREPARE3 + ProcDomainMigratePrepare3 = 214 + // ProcDomainMigratePrepareTunnel3 is libvirt's REMOTE_PROC_DOMAIN_MIGRATE_PREPARE_TUNNEL3 + ProcDomainMigratePrepareTunnel3 = 215 + // ProcDomainMigratePerform3 is libvirt's REMOTE_PROC_DOMAIN_MIGRATE_PERFORM3 + ProcDomainMigratePerform3 = 216 + // ProcDomainMigrateFinish3 is libvirt's REMOTE_PROC_DOMAIN_MIGRATE_FINISH3 + ProcDomainMigrateFinish3 = 217 + // ProcDomainMigrateConfirm3 is libvirt's REMOTE_PROC_DOMAIN_MIGRATE_CONFIRM3 + ProcDomainMigrateConfirm3 = 218 + // ProcDomainSetSchedulerParametersFlags is libvirt's REMOTE_PROC_DOMAIN_SET_SCHEDULER_PARAMETERS_FLAGS + ProcDomainSetSchedulerParametersFlags = 219 + // ProcInterfaceChangeBegin is libvirt's REMOTE_PROC_INTERFACE_CHANGE_BEGIN + ProcInterfaceChangeBegin = 220 + // ProcInterfaceChangeCommit is libvirt's REMOTE_PROC_INTERFACE_CHANGE_COMMIT + ProcInterfaceChangeCommit = 221 + // ProcInterfaceChangeRollback is libvirt's REMOTE_PROC_INTERFACE_CHANGE_ROLLBACK + ProcInterfaceChangeRollback = 222 + // ProcDomainGetSchedulerParametersFlags is libvirt's REMOTE_PROC_DOMAIN_GET_SCHEDULER_PARAMETERS_FLAGS + ProcDomainGetSchedulerParametersFlags = 223 + // ProcDomainEventControlError is libvirt's REMOTE_PROC_DOMAIN_EVENT_CONTROL_ERROR + ProcDomainEventControlError = 224 + // ProcDomainPinVcpuFlags is libvirt's REMOTE_PROC_DOMAIN_PIN_VCPU_FLAGS + ProcDomainPinVcpuFlags = 225 + // ProcDomainSendKey is libvirt's REMOTE_PROC_DOMAIN_SEND_KEY + ProcDomainSendKey = 226 + // ProcNodeGetCPUStats is libvirt's REMOTE_PROC_NODE_GET_CPU_STATS + ProcNodeGetCPUStats = 227 + // ProcNodeGetMemoryStats is libvirt's REMOTE_PROC_NODE_GET_MEMORY_STATS + ProcNodeGetMemoryStats = 228 + // ProcDomainGetControlInfo is libvirt's REMOTE_PROC_DOMAIN_GET_CONTROL_INFO + ProcDomainGetControlInfo = 229 + // ProcDomainGetVcpuPinInfo is libvirt's REMOTE_PROC_DOMAIN_GET_VCPU_PIN_INFO + ProcDomainGetVcpuPinInfo = 230 + // ProcDomainUndefineFlags is libvirt's REMOTE_PROC_DOMAIN_UNDEFINE_FLAGS + ProcDomainUndefineFlags = 231 + // ProcDomainSaveFlags is libvirt's REMOTE_PROC_DOMAIN_SAVE_FLAGS + ProcDomainSaveFlags = 232 + // ProcDomainRestoreFlags is libvirt's REMOTE_PROC_DOMAIN_RESTORE_FLAGS + ProcDomainRestoreFlags = 233 + // ProcDomainDestroyFlags is libvirt's REMOTE_PROC_DOMAIN_DESTROY_FLAGS + ProcDomainDestroyFlags = 234 + // ProcDomainSaveImageGetXMLDesc is libvirt's REMOTE_PROC_DOMAIN_SAVE_IMAGE_GET_XML_DESC + ProcDomainSaveImageGetXMLDesc = 235 + // ProcDomainSaveImageDefineXML is libvirt's REMOTE_PROC_DOMAIN_SAVE_IMAGE_DEFINE_XML + ProcDomainSaveImageDefineXML = 236 + // ProcDomainBlockJobAbort is libvirt's REMOTE_PROC_DOMAIN_BLOCK_JOB_ABORT + ProcDomainBlockJobAbort = 237 + // ProcDomainGetBlockJobInfo is libvirt's REMOTE_PROC_DOMAIN_GET_BLOCK_JOB_INFO + ProcDomainGetBlockJobInfo = 238 + // ProcDomainBlockJobSetSpeed is libvirt's REMOTE_PROC_DOMAIN_BLOCK_JOB_SET_SPEED + ProcDomainBlockJobSetSpeed = 239 + // ProcDomainBlockPull is libvirt's REMOTE_PROC_DOMAIN_BLOCK_PULL + ProcDomainBlockPull = 240 + // ProcDomainEventBlockJob is libvirt's REMOTE_PROC_DOMAIN_EVENT_BLOCK_JOB + ProcDomainEventBlockJob = 241 + // ProcDomainMigrateGetMaxSpeed is libvirt's REMOTE_PROC_DOMAIN_MIGRATE_GET_MAX_SPEED + ProcDomainMigrateGetMaxSpeed = 242 + // ProcDomainBlockStatsFlags is libvirt's REMOTE_PROC_DOMAIN_BLOCK_STATS_FLAGS + ProcDomainBlockStatsFlags = 243 + // ProcDomainSnapshotGetParent is libvirt's REMOTE_PROC_DOMAIN_SNAPSHOT_GET_PARENT + ProcDomainSnapshotGetParent = 244 + // ProcDomainReset is libvirt's REMOTE_PROC_DOMAIN_RESET + ProcDomainReset = 245 + // ProcDomainSnapshotNumChildren is libvirt's REMOTE_PROC_DOMAIN_SNAPSHOT_NUM_CHILDREN + ProcDomainSnapshotNumChildren = 246 + // ProcDomainSnapshotListChildrenNames is libvirt's REMOTE_PROC_DOMAIN_SNAPSHOT_LIST_CHILDREN_NAMES + ProcDomainSnapshotListChildrenNames = 247 + // ProcDomainEventDiskChange is libvirt's REMOTE_PROC_DOMAIN_EVENT_DISK_CHANGE + ProcDomainEventDiskChange = 248 + // ProcDomainOpenGraphics is libvirt's REMOTE_PROC_DOMAIN_OPEN_GRAPHICS + ProcDomainOpenGraphics = 249 + // ProcNodeSuspendForDuration is libvirt's REMOTE_PROC_NODE_SUSPEND_FOR_DURATION + ProcNodeSuspendForDuration = 250 + // ProcDomainBlockResize is libvirt's REMOTE_PROC_DOMAIN_BLOCK_RESIZE + ProcDomainBlockResize = 251 + // ProcDomainSetBlockIOTune is libvirt's REMOTE_PROC_DOMAIN_SET_BLOCK_IO_TUNE + ProcDomainSetBlockIOTune = 252 + // ProcDomainGetBlockIOTune is libvirt's REMOTE_PROC_DOMAIN_GET_BLOCK_IO_TUNE + ProcDomainGetBlockIOTune = 253 + // ProcDomainSetNumaParameters is libvirt's REMOTE_PROC_DOMAIN_SET_NUMA_PARAMETERS + ProcDomainSetNumaParameters = 254 + // ProcDomainGetNumaParameters is libvirt's REMOTE_PROC_DOMAIN_GET_NUMA_PARAMETERS + ProcDomainGetNumaParameters = 255 + // ProcDomainSetInterfaceParameters is libvirt's REMOTE_PROC_DOMAIN_SET_INTERFACE_PARAMETERS + ProcDomainSetInterfaceParameters = 256 + // ProcDomainGetInterfaceParameters is libvirt's REMOTE_PROC_DOMAIN_GET_INTERFACE_PARAMETERS + ProcDomainGetInterfaceParameters = 257 + // ProcDomainShutdownFlags is libvirt's REMOTE_PROC_DOMAIN_SHUTDOWN_FLAGS + ProcDomainShutdownFlags = 258 + // ProcStorageVolWipePattern is libvirt's REMOTE_PROC_STORAGE_VOL_WIPE_PATTERN + ProcStorageVolWipePattern = 259 + // ProcStorageVolResize is libvirt's REMOTE_PROC_STORAGE_VOL_RESIZE + ProcStorageVolResize = 260 + // ProcDomainPmSuspendForDuration is libvirt's REMOTE_PROC_DOMAIN_PM_SUSPEND_FOR_DURATION + ProcDomainPmSuspendForDuration = 261 + // ProcDomainGetCPUStats is libvirt's REMOTE_PROC_DOMAIN_GET_CPU_STATS + ProcDomainGetCPUStats = 262 + // ProcDomainGetDiskErrors is libvirt's REMOTE_PROC_DOMAIN_GET_DISK_ERRORS + ProcDomainGetDiskErrors = 263 + // ProcDomainSetMetadata is libvirt's REMOTE_PROC_DOMAIN_SET_METADATA + ProcDomainSetMetadata = 264 + // ProcDomainGetMetadata is libvirt's REMOTE_PROC_DOMAIN_GET_METADATA + ProcDomainGetMetadata = 265 + // ProcDomainBlockRebase is libvirt's REMOTE_PROC_DOMAIN_BLOCK_REBASE + ProcDomainBlockRebase = 266 + // ProcDomainPmWakeup is libvirt's REMOTE_PROC_DOMAIN_PM_WAKEUP + ProcDomainPmWakeup = 267 + // ProcDomainEventTrayChange is libvirt's REMOTE_PROC_DOMAIN_EVENT_TRAY_CHANGE + ProcDomainEventTrayChange = 268 + // ProcDomainEventPmwakeup is libvirt's REMOTE_PROC_DOMAIN_EVENT_PMWAKEUP + ProcDomainEventPmwakeup = 269 + // ProcDomainEventPmsuspend is libvirt's REMOTE_PROC_DOMAIN_EVENT_PMSUSPEND + ProcDomainEventPmsuspend = 270 + // ProcDomainSnapshotIsCurrent is libvirt's REMOTE_PROC_DOMAIN_SNAPSHOT_IS_CURRENT + ProcDomainSnapshotIsCurrent = 271 + // ProcDomainSnapshotHasMetadata is libvirt's REMOTE_PROC_DOMAIN_SNAPSHOT_HAS_METADATA + ProcDomainSnapshotHasMetadata = 272 + // ProcConnectListAllDomains is libvirt's REMOTE_PROC_CONNECT_LIST_ALL_DOMAINS + ProcConnectListAllDomains = 273 + // ProcDomainListAllSnapshots is libvirt's REMOTE_PROC_DOMAIN_LIST_ALL_SNAPSHOTS + ProcDomainListAllSnapshots = 274 + // ProcDomainSnapshotListAllChildren is libvirt's REMOTE_PROC_DOMAIN_SNAPSHOT_LIST_ALL_CHILDREN + ProcDomainSnapshotListAllChildren = 275 + // ProcDomainEventBalloonChange is libvirt's REMOTE_PROC_DOMAIN_EVENT_BALLOON_CHANGE + ProcDomainEventBalloonChange = 276 + // ProcDomainGetHostname is libvirt's REMOTE_PROC_DOMAIN_GET_HOSTNAME + ProcDomainGetHostname = 277 + // ProcDomainGetSecurityLabelList is libvirt's REMOTE_PROC_DOMAIN_GET_SECURITY_LABEL_LIST + ProcDomainGetSecurityLabelList = 278 + // ProcDomainPinEmulator is libvirt's REMOTE_PROC_DOMAIN_PIN_EMULATOR + ProcDomainPinEmulator = 279 + // ProcDomainGetEmulatorPinInfo is libvirt's REMOTE_PROC_DOMAIN_GET_EMULATOR_PIN_INFO + ProcDomainGetEmulatorPinInfo = 280 + // ProcConnectListAllStoragePools is libvirt's REMOTE_PROC_CONNECT_LIST_ALL_STORAGE_POOLS + ProcConnectListAllStoragePools = 281 + // ProcStoragePoolListAllVolumes is libvirt's REMOTE_PROC_STORAGE_POOL_LIST_ALL_VOLUMES + ProcStoragePoolListAllVolumes = 282 + // ProcConnectListAllNetworks is libvirt's REMOTE_PROC_CONNECT_LIST_ALL_NETWORKS + ProcConnectListAllNetworks = 283 + // ProcConnectListAllInterfaces is libvirt's REMOTE_PROC_CONNECT_LIST_ALL_INTERFACES + ProcConnectListAllInterfaces = 284 + // ProcConnectListAllNodeDevices is libvirt's REMOTE_PROC_CONNECT_LIST_ALL_NODE_DEVICES + ProcConnectListAllNodeDevices = 285 + // ProcConnectListAllNwfilters is libvirt's REMOTE_PROC_CONNECT_LIST_ALL_NWFILTERS + ProcConnectListAllNwfilters = 286 + // ProcConnectListAllSecrets is libvirt's REMOTE_PROC_CONNECT_LIST_ALL_SECRETS + ProcConnectListAllSecrets = 287 + // ProcNodeSetMemoryParameters is libvirt's REMOTE_PROC_NODE_SET_MEMORY_PARAMETERS + ProcNodeSetMemoryParameters = 288 + // ProcNodeGetMemoryParameters is libvirt's REMOTE_PROC_NODE_GET_MEMORY_PARAMETERS + ProcNodeGetMemoryParameters = 289 + // ProcDomainBlockCommit is libvirt's REMOTE_PROC_DOMAIN_BLOCK_COMMIT + ProcDomainBlockCommit = 290 + // ProcNetworkUpdate is libvirt's REMOTE_PROC_NETWORK_UPDATE + ProcNetworkUpdate = 291 + // ProcDomainEventPmsuspendDisk is libvirt's REMOTE_PROC_DOMAIN_EVENT_PMSUSPEND_DISK + ProcDomainEventPmsuspendDisk = 292 + // ProcNodeGetCPUMap is libvirt's REMOTE_PROC_NODE_GET_CPU_MAP + ProcNodeGetCPUMap = 293 + // ProcDomainFstrim is libvirt's REMOTE_PROC_DOMAIN_FSTRIM + ProcDomainFstrim = 294 + // ProcDomainSendProcessSignal is libvirt's REMOTE_PROC_DOMAIN_SEND_PROCESS_SIGNAL + ProcDomainSendProcessSignal = 295 + // ProcDomainOpenChannel is libvirt's REMOTE_PROC_DOMAIN_OPEN_CHANNEL + ProcDomainOpenChannel = 296 + // ProcNodeDeviceLookupScsiHostByWwn is libvirt's REMOTE_PROC_NODE_DEVICE_LOOKUP_SCSI_HOST_BY_WWN + ProcNodeDeviceLookupScsiHostByWwn = 297 + // ProcDomainGetJobStats is libvirt's REMOTE_PROC_DOMAIN_GET_JOB_STATS + ProcDomainGetJobStats = 298 + // ProcDomainMigrateGetCompressionCache is libvirt's REMOTE_PROC_DOMAIN_MIGRATE_GET_COMPRESSION_CACHE + ProcDomainMigrateGetCompressionCache = 299 + // ProcDomainMigrateSetCompressionCache is libvirt's REMOTE_PROC_DOMAIN_MIGRATE_SET_COMPRESSION_CACHE + ProcDomainMigrateSetCompressionCache = 300 + // ProcNodeDeviceDetachFlags is libvirt's REMOTE_PROC_NODE_DEVICE_DETACH_FLAGS + ProcNodeDeviceDetachFlags = 301 + // ProcDomainMigrateBegin3Params is libvirt's REMOTE_PROC_DOMAIN_MIGRATE_BEGIN3_PARAMS + ProcDomainMigrateBegin3Params = 302 + // ProcDomainMigratePrepare3Params is libvirt's REMOTE_PROC_DOMAIN_MIGRATE_PREPARE3_PARAMS + ProcDomainMigratePrepare3Params = 303 + // ProcDomainMigratePrepareTunnel3Params is libvirt's REMOTE_PROC_DOMAIN_MIGRATE_PREPARE_TUNNEL3_PARAMS + ProcDomainMigratePrepareTunnel3Params = 304 + // ProcDomainMigratePerform3Params is libvirt's REMOTE_PROC_DOMAIN_MIGRATE_PERFORM3_PARAMS + ProcDomainMigratePerform3Params = 305 + // ProcDomainMigrateFinish3Params is libvirt's REMOTE_PROC_DOMAIN_MIGRATE_FINISH3_PARAMS + ProcDomainMigrateFinish3Params = 306 + // ProcDomainMigrateConfirm3Params is libvirt's REMOTE_PROC_DOMAIN_MIGRATE_CONFIRM3_PARAMS + ProcDomainMigrateConfirm3Params = 307 + // ProcDomainSetMemoryStatsPeriod is libvirt's REMOTE_PROC_DOMAIN_SET_MEMORY_STATS_PERIOD + ProcDomainSetMemoryStatsPeriod = 308 + // ProcDomainCreateXMLWithFiles is libvirt's REMOTE_PROC_DOMAIN_CREATE_XML_WITH_FILES + ProcDomainCreateXMLWithFiles = 309 + // ProcDomainCreateWithFiles is libvirt's REMOTE_PROC_DOMAIN_CREATE_WITH_FILES + ProcDomainCreateWithFiles = 310 + // ProcDomainEventDeviceRemoved is libvirt's REMOTE_PROC_DOMAIN_EVENT_DEVICE_REMOVED + ProcDomainEventDeviceRemoved = 311 + // ProcConnectGetCPUModelNames is libvirt's REMOTE_PROC_CONNECT_GET_CPU_MODEL_NAMES + ProcConnectGetCPUModelNames = 312 + // ProcConnectNetworkEventRegisterAny is libvirt's REMOTE_PROC_CONNECT_NETWORK_EVENT_REGISTER_ANY + ProcConnectNetworkEventRegisterAny = 313 + // ProcConnectNetworkEventDeregisterAny is libvirt's REMOTE_PROC_CONNECT_NETWORK_EVENT_DEREGISTER_ANY + ProcConnectNetworkEventDeregisterAny = 314 + // ProcNetworkEventLifecycle is libvirt's REMOTE_PROC_NETWORK_EVENT_LIFECYCLE + ProcNetworkEventLifecycle = 315 + // ProcConnectDomainEventCallbackRegisterAny is libvirt's REMOTE_PROC_CONNECT_DOMAIN_EVENT_CALLBACK_REGISTER_ANY + ProcConnectDomainEventCallbackRegisterAny = 316 + // ProcConnectDomainEventCallbackDeregisterAny is libvirt's REMOTE_PROC_CONNECT_DOMAIN_EVENT_CALLBACK_DEREGISTER_ANY + ProcConnectDomainEventCallbackDeregisterAny = 317 + // ProcDomainEventCallbackLifecycle is libvirt's REMOTE_PROC_DOMAIN_EVENT_CALLBACK_LIFECYCLE + ProcDomainEventCallbackLifecycle = 318 + // ProcDomainEventCallbackReboot is libvirt's REMOTE_PROC_DOMAIN_EVENT_CALLBACK_REBOOT + ProcDomainEventCallbackReboot = 319 + // ProcDomainEventCallbackRtcChange is libvirt's REMOTE_PROC_DOMAIN_EVENT_CALLBACK_RTC_CHANGE + ProcDomainEventCallbackRtcChange = 320 + // ProcDomainEventCallbackWatchdog is libvirt's REMOTE_PROC_DOMAIN_EVENT_CALLBACK_WATCHDOG + ProcDomainEventCallbackWatchdog = 321 + // ProcDomainEventCallbackIOError is libvirt's REMOTE_PROC_DOMAIN_EVENT_CALLBACK_IO_ERROR + ProcDomainEventCallbackIOError = 322 + // ProcDomainEventCallbackGraphics is libvirt's REMOTE_PROC_DOMAIN_EVENT_CALLBACK_GRAPHICS + ProcDomainEventCallbackGraphics = 323 + // ProcDomainEventCallbackIOErrorReason is libvirt's REMOTE_PROC_DOMAIN_EVENT_CALLBACK_IO_ERROR_REASON + ProcDomainEventCallbackIOErrorReason = 324 + // ProcDomainEventCallbackControlError is libvirt's REMOTE_PROC_DOMAIN_EVENT_CALLBACK_CONTROL_ERROR + ProcDomainEventCallbackControlError = 325 + // ProcDomainEventCallbackBlockJob is libvirt's REMOTE_PROC_DOMAIN_EVENT_CALLBACK_BLOCK_JOB + ProcDomainEventCallbackBlockJob = 326 + // ProcDomainEventCallbackDiskChange is libvirt's REMOTE_PROC_DOMAIN_EVENT_CALLBACK_DISK_CHANGE + ProcDomainEventCallbackDiskChange = 327 + // ProcDomainEventCallbackTrayChange is libvirt's REMOTE_PROC_DOMAIN_EVENT_CALLBACK_TRAY_CHANGE + ProcDomainEventCallbackTrayChange = 328 + // ProcDomainEventCallbackPmwakeup is libvirt's REMOTE_PROC_DOMAIN_EVENT_CALLBACK_PMWAKEUP + ProcDomainEventCallbackPmwakeup = 329 + // ProcDomainEventCallbackPmsuspend is libvirt's REMOTE_PROC_DOMAIN_EVENT_CALLBACK_PMSUSPEND + ProcDomainEventCallbackPmsuspend = 330 + // ProcDomainEventCallbackBalloonChange is libvirt's REMOTE_PROC_DOMAIN_EVENT_CALLBACK_BALLOON_CHANGE + ProcDomainEventCallbackBalloonChange = 331 + // ProcDomainEventCallbackPmsuspendDisk is libvirt's REMOTE_PROC_DOMAIN_EVENT_CALLBACK_PMSUSPEND_DISK + ProcDomainEventCallbackPmsuspendDisk = 332 + // ProcDomainEventCallbackDeviceRemoved is libvirt's REMOTE_PROC_DOMAIN_EVENT_CALLBACK_DEVICE_REMOVED + ProcDomainEventCallbackDeviceRemoved = 333 + // ProcDomainCoreDumpWithFormat is libvirt's REMOTE_PROC_DOMAIN_CORE_DUMP_WITH_FORMAT + ProcDomainCoreDumpWithFormat = 334 + // ProcDomainFsfreeze is libvirt's REMOTE_PROC_DOMAIN_FSFREEZE + ProcDomainFsfreeze = 335 + // ProcDomainFsthaw is libvirt's REMOTE_PROC_DOMAIN_FSTHAW + ProcDomainFsthaw = 336 + // ProcDomainGetTime is libvirt's REMOTE_PROC_DOMAIN_GET_TIME + ProcDomainGetTime = 337 + // ProcDomainSetTime is libvirt's REMOTE_PROC_DOMAIN_SET_TIME + ProcDomainSetTime = 338 + // ProcDomainEventBlockJob2 is libvirt's REMOTE_PROC_DOMAIN_EVENT_BLOCK_JOB_2 + ProcDomainEventBlockJob2 = 339 + // ProcNodeGetFreePages is libvirt's REMOTE_PROC_NODE_GET_FREE_PAGES + ProcNodeGetFreePages = 340 + // ProcNetworkGetDhcpLeases is libvirt's REMOTE_PROC_NETWORK_GET_DHCP_LEASES + ProcNetworkGetDhcpLeases = 341 + // ProcConnectGetDomainCapabilities is libvirt's REMOTE_PROC_CONNECT_GET_DOMAIN_CAPABILITIES + ProcConnectGetDomainCapabilities = 342 + // ProcDomainOpenGraphicsFd is libvirt's REMOTE_PROC_DOMAIN_OPEN_GRAPHICS_FD + ProcDomainOpenGraphicsFd = 343 + // ProcConnectGetAllDomainStats is libvirt's REMOTE_PROC_CONNECT_GET_ALL_DOMAIN_STATS + ProcConnectGetAllDomainStats = 344 + // ProcDomainBlockCopy is libvirt's REMOTE_PROC_DOMAIN_BLOCK_COPY + ProcDomainBlockCopy = 345 + // ProcDomainEventCallbackTunable is libvirt's REMOTE_PROC_DOMAIN_EVENT_CALLBACK_TUNABLE + ProcDomainEventCallbackTunable = 346 + // ProcNodeAllocPages is libvirt's REMOTE_PROC_NODE_ALLOC_PAGES + ProcNodeAllocPages = 347 + // ProcDomainEventCallbackAgentLifecycle is libvirt's REMOTE_PROC_DOMAIN_EVENT_CALLBACK_AGENT_LIFECYCLE + ProcDomainEventCallbackAgentLifecycle = 348 + // ProcDomainGetFsinfo is libvirt's REMOTE_PROC_DOMAIN_GET_FSINFO + ProcDomainGetFsinfo = 349 + // ProcDomainDefineXMLFlags is libvirt's REMOTE_PROC_DOMAIN_DEFINE_XML_FLAGS + ProcDomainDefineXMLFlags = 350 + // ProcDomainGetIothreadInfo is libvirt's REMOTE_PROC_DOMAIN_GET_IOTHREAD_INFO + ProcDomainGetIothreadInfo = 351 + // ProcDomainPinIothread is libvirt's REMOTE_PROC_DOMAIN_PIN_IOTHREAD + ProcDomainPinIothread = 352 + // ProcDomainInterfaceAddresses is libvirt's REMOTE_PROC_DOMAIN_INTERFACE_ADDRESSES + ProcDomainInterfaceAddresses = 353 + // ProcDomainEventCallbackDeviceAdded is libvirt's REMOTE_PROC_DOMAIN_EVENT_CALLBACK_DEVICE_ADDED + ProcDomainEventCallbackDeviceAdded = 354 + // ProcDomainAddIothread is libvirt's REMOTE_PROC_DOMAIN_ADD_IOTHREAD + ProcDomainAddIothread = 355 + // ProcDomainDelIothread is libvirt's REMOTE_PROC_DOMAIN_DEL_IOTHREAD + ProcDomainDelIothread = 356 + // ProcDomainSetUserPassword is libvirt's REMOTE_PROC_DOMAIN_SET_USER_PASSWORD + ProcDomainSetUserPassword = 357 + // ProcDomainRename is libvirt's REMOTE_PROC_DOMAIN_RENAME + ProcDomainRename = 358 + // ProcDomainEventCallbackMigrationIteration is libvirt's REMOTE_PROC_DOMAIN_EVENT_CALLBACK_MIGRATION_ITERATION + ProcDomainEventCallbackMigrationIteration = 359 + // ProcConnectRegisterCloseCallback is libvirt's REMOTE_PROC_CONNECT_REGISTER_CLOSE_CALLBACK + ProcConnectRegisterCloseCallback = 360 + // ProcConnectUnregisterCloseCallback is libvirt's REMOTE_PROC_CONNECT_UNREGISTER_CLOSE_CALLBACK + ProcConnectUnregisterCloseCallback = 361 + // ProcConnectEventConnectionClosed is libvirt's REMOTE_PROC_CONNECT_EVENT_CONNECTION_CLOSED + ProcConnectEventConnectionClosed = 362 + // ProcDomainEventCallbackJobCompleted is libvirt's REMOTE_PROC_DOMAIN_EVENT_CALLBACK_JOB_COMPLETED + ProcDomainEventCallbackJobCompleted = 363 + // ProcDomainMigrateStartPostCopy is libvirt's REMOTE_PROC_DOMAIN_MIGRATE_START_POST_COPY + ProcDomainMigrateStartPostCopy = 364 + // ProcDomainGetPerfEvents is libvirt's REMOTE_PROC_DOMAIN_GET_PERF_EVENTS + ProcDomainGetPerfEvents = 365 + // ProcDomainSetPerfEvents is libvirt's REMOTE_PROC_DOMAIN_SET_PERF_EVENTS + ProcDomainSetPerfEvents = 366 + // ProcDomainEventCallbackDeviceRemovalFailed is libvirt's REMOTE_PROC_DOMAIN_EVENT_CALLBACK_DEVICE_REMOVAL_FAILED + ProcDomainEventCallbackDeviceRemovalFailed = 367 + // ProcConnectStoragePoolEventRegisterAny is libvirt's REMOTE_PROC_CONNECT_STORAGE_POOL_EVENT_REGISTER_ANY + ProcConnectStoragePoolEventRegisterAny = 368 + // ProcConnectStoragePoolEventDeregisterAny is libvirt's REMOTE_PROC_CONNECT_STORAGE_POOL_EVENT_DEREGISTER_ANY + ProcConnectStoragePoolEventDeregisterAny = 369 + // ProcStoragePoolEventLifecycle is libvirt's REMOTE_PROC_STORAGE_POOL_EVENT_LIFECYCLE + ProcStoragePoolEventLifecycle = 370 + // ProcDomainGetGuestVcpus is libvirt's REMOTE_PROC_DOMAIN_GET_GUEST_VCPUS + ProcDomainGetGuestVcpus = 371 + // ProcDomainSetGuestVcpus is libvirt's REMOTE_PROC_DOMAIN_SET_GUEST_VCPUS + ProcDomainSetGuestVcpus = 372 + // ProcStoragePoolEventRefresh is libvirt's REMOTE_PROC_STORAGE_POOL_EVENT_REFRESH + ProcStoragePoolEventRefresh = 373 + // ProcConnectNodeDeviceEventRegisterAny is libvirt's REMOTE_PROC_CONNECT_NODE_DEVICE_EVENT_REGISTER_ANY + ProcConnectNodeDeviceEventRegisterAny = 374 + // ProcConnectNodeDeviceEventDeregisterAny is libvirt's REMOTE_PROC_CONNECT_NODE_DEVICE_EVENT_DEREGISTER_ANY + ProcConnectNodeDeviceEventDeregisterAny = 375 + // ProcNodeDeviceEventLifecycle is libvirt's REMOTE_PROC_NODE_DEVICE_EVENT_LIFECYCLE + ProcNodeDeviceEventLifecycle = 376 + // ProcNodeDeviceEventUpdate is libvirt's REMOTE_PROC_NODE_DEVICE_EVENT_UPDATE + ProcNodeDeviceEventUpdate = 377 + // ProcStorageVolGetInfoFlags is libvirt's REMOTE_PROC_STORAGE_VOL_GET_INFO_FLAGS + ProcStorageVolGetInfoFlags = 378 + // ProcDomainEventCallbackMetadataChange is libvirt's REMOTE_PROC_DOMAIN_EVENT_CALLBACK_METADATA_CHANGE + ProcDomainEventCallbackMetadataChange = 379 + // ProcConnectSecretEventRegisterAny is libvirt's REMOTE_PROC_CONNECT_SECRET_EVENT_REGISTER_ANY + ProcConnectSecretEventRegisterAny = 380 + // ProcConnectSecretEventDeregisterAny is libvirt's REMOTE_PROC_CONNECT_SECRET_EVENT_DEREGISTER_ANY + ProcConnectSecretEventDeregisterAny = 381 + // ProcSecretEventLifecycle is libvirt's REMOTE_PROC_SECRET_EVENT_LIFECYCLE + ProcSecretEventLifecycle = 382 + // ProcSecretEventValueChanged is libvirt's REMOTE_PROC_SECRET_EVENT_VALUE_CHANGED + ProcSecretEventValueChanged = 383 + // ProcDomainSetVcpu is libvirt's REMOTE_PROC_DOMAIN_SET_VCPU + ProcDomainSetVcpu = 384 + // ProcDomainEventBlockThreshold is libvirt's REMOTE_PROC_DOMAIN_EVENT_BLOCK_THRESHOLD + ProcDomainEventBlockThreshold = 385 + // ProcDomainSetBlockThreshold is libvirt's REMOTE_PROC_DOMAIN_SET_BLOCK_THRESHOLD + ProcDomainSetBlockThreshold = 386 + // ProcDomainMigrateGetMaxDowntime is libvirt's REMOTE_PROC_DOMAIN_MIGRATE_GET_MAX_DOWNTIME + ProcDomainMigrateGetMaxDowntime = 387 + // ProcDomainManagedSaveGetXMLDesc is libvirt's REMOTE_PROC_DOMAIN_MANAGED_SAVE_GET_XML_DESC + ProcDomainManagedSaveGetXMLDesc = 388 + // ProcDomainManagedSaveDefineXML is libvirt's REMOTE_PROC_DOMAIN_MANAGED_SAVE_DEFINE_XML + ProcDomainManagedSaveDefineXML = 389 + // ProcDomainSetLifecycleAction is libvirt's REMOTE_PROC_DOMAIN_SET_LIFECYCLE_ACTION + ProcDomainSetLifecycleAction = 390 + + + // From consts: + // StringMax is libvirt's REMOTE_STRING_MAX + StringMax = 4194304 + // DomainListMax is libvirt's REMOTE_DOMAIN_LIST_MAX + DomainListMax = 16384 + // CpumapMax is libvirt's REMOTE_CPUMAP_MAX + CpumapMax = 2048 + // VcpuinfoMax is libvirt's REMOTE_VCPUINFO_MAX + VcpuinfoMax = 16384 + // CpumapsMax is libvirt's REMOTE_CPUMAPS_MAX + CpumapsMax = 8388608 + // IothreadInfoMax is libvirt's REMOTE_IOTHREAD_INFO_MAX + IothreadInfoMax = 16384 + // MigrateCookieMax is libvirt's REMOTE_MIGRATE_COOKIE_MAX + MigrateCookieMax = 4194304 + // NetworkListMax is libvirt's REMOTE_NETWORK_LIST_MAX + NetworkListMax = 16384 + // InterfaceListMax is libvirt's REMOTE_INTERFACE_LIST_MAX + InterfaceListMax = 16384 + // StoragePoolListMax is libvirt's REMOTE_STORAGE_POOL_LIST_MAX + StoragePoolListMax = 16384 + // StorageVolListMax is libvirt's REMOTE_STORAGE_VOL_LIST_MAX + StorageVolListMax = 16384 + // NodeDeviceListMax is libvirt's REMOTE_NODE_DEVICE_LIST_MAX + NodeDeviceListMax = 65536 + // NodeDeviceCapsListMax is libvirt's REMOTE_NODE_DEVICE_CAPS_LIST_MAX + NodeDeviceCapsListMax = 65536 + // NwfilterListMax is libvirt's REMOTE_NWFILTER_LIST_MAX + NwfilterListMax = 16384 + // DomainSchedulerParametersMax is libvirt's REMOTE_DOMAIN_SCHEDULER_PARAMETERS_MAX + DomainSchedulerParametersMax = 16 + // DomainBlkioParametersMax is libvirt's REMOTE_DOMAIN_BLKIO_PARAMETERS_MAX + DomainBlkioParametersMax = 16 + // DomainMemoryParametersMax is libvirt's REMOTE_DOMAIN_MEMORY_PARAMETERS_MAX + DomainMemoryParametersMax = 16 + // DomainBlockIOTuneParametersMax is libvirt's REMOTE_DOMAIN_BLOCK_IO_TUNE_PARAMETERS_MAX + DomainBlockIOTuneParametersMax = 32 + // DomainNumaParametersMax is libvirt's REMOTE_DOMAIN_NUMA_PARAMETERS_MAX + DomainNumaParametersMax = 16 + // DomainPerfEventsMax is libvirt's REMOTE_DOMAIN_PERF_EVENTS_MAX + DomainPerfEventsMax = 64 + // DomainBlockCopyParametersMax is libvirt's REMOTE_DOMAIN_BLOCK_COPY_PARAMETERS_MAX + DomainBlockCopyParametersMax = 16 + // NodeCPUStatsMax is libvirt's REMOTE_NODE_CPU_STATS_MAX + NodeCPUStatsMax = 16 + // NodeMemoryStatsMax is libvirt's REMOTE_NODE_MEMORY_STATS_MAX + NodeMemoryStatsMax = 16 + // DomainBlockStatsParametersMax is libvirt's REMOTE_DOMAIN_BLOCK_STATS_PARAMETERS_MAX + DomainBlockStatsParametersMax = 16 + // NodeMaxCells is libvirt's REMOTE_NODE_MAX_CELLS + NodeMaxCells = 1024 + // AuthSaslDataMax is libvirt's REMOTE_AUTH_SASL_DATA_MAX + AuthSaslDataMax = 65536 + // AuthTypeListMax is libvirt's REMOTE_AUTH_TYPE_LIST_MAX + AuthTypeListMax = 20 + // DomainMemoryStatsMax is libvirt's REMOTE_DOMAIN_MEMORY_STATS_MAX + DomainMemoryStatsMax = 1024 + // DomainSnapshotListMax is libvirt's REMOTE_DOMAIN_SNAPSHOT_LIST_MAX + DomainSnapshotListMax = 16384 + // DomainBlockPeekBufferMax is libvirt's REMOTE_DOMAIN_BLOCK_PEEK_BUFFER_MAX + DomainBlockPeekBufferMax = 4194304 + // DomainMemoryPeekBufferMax is libvirt's REMOTE_DOMAIN_MEMORY_PEEK_BUFFER_MAX + DomainMemoryPeekBufferMax = 4194304 + // SecurityLabelListMax is libvirt's REMOTE_SECURITY_LABEL_LIST_MAX + SecurityLabelListMax = 64 + // SecretValueMax is libvirt's REMOTE_SECRET_VALUE_MAX + SecretValueMax = 65536 + // SecretListMax is libvirt's REMOTE_SECRET_LIST_MAX + SecretListMax = 16384 + // CPUBaselineMax is libvirt's REMOTE_CPU_BASELINE_MAX + CPUBaselineMax = 256 + // DomainSendKeyMax is libvirt's REMOTE_DOMAIN_SEND_KEY_MAX + DomainSendKeyMax = 16 + // DomainInterfaceParametersMax is libvirt's REMOTE_DOMAIN_INTERFACE_PARAMETERS_MAX + DomainInterfaceParametersMax = 16 + // DomainGetCPUStatsNcpusMax is libvirt's REMOTE_DOMAIN_GET_CPU_STATS_NCPUS_MAX + DomainGetCPUStatsNcpusMax = 128 + // DomainGetCPUStatsMax is libvirt's REMOTE_DOMAIN_GET_CPU_STATS_MAX + DomainGetCPUStatsMax = 2048 + // DomainDiskErrorsMax is libvirt's REMOTE_DOMAIN_DISK_ERRORS_MAX + DomainDiskErrorsMax = 256 + // NodeMemoryParametersMax is libvirt's REMOTE_NODE_MEMORY_PARAMETERS_MAX + NodeMemoryParametersMax = 64 + // DomainMigrateParamListMax is libvirt's REMOTE_DOMAIN_MIGRATE_PARAM_LIST_MAX + DomainMigrateParamListMax = 64 + // DomainJobStatsMax is libvirt's REMOTE_DOMAIN_JOB_STATS_MAX + DomainJobStatsMax = 64 + // ConnectCPUModelsMax is libvirt's REMOTE_CONNECT_CPU_MODELS_MAX + ConnectCPUModelsMax = 8192 + // DomainFsfreezeMountpointsMax is libvirt's REMOTE_DOMAIN_FSFREEZE_MOUNTPOINTS_MAX + DomainFsfreezeMountpointsMax = 256 + // NetworkDhcpLeasesMax is libvirt's REMOTE_NETWORK_DHCP_LEASES_MAX + NetworkDhcpLeasesMax = 65536 + // ConnectGetAllDomainStatsMax is libvirt's REMOTE_CONNECT_GET_ALL_DOMAIN_STATS_MAX + ConnectGetAllDomainStatsMax = 262144 + // DomainEventTunableMax is libvirt's REMOTE_DOMAIN_EVENT_TUNABLE_MAX + DomainEventTunableMax = 2048 + // DomainFsinfoMax is libvirt's REMOTE_DOMAIN_FSINFO_MAX + DomainFsinfoMax = 256 + // DomainFsinfoDisksMax is libvirt's REMOTE_DOMAIN_FSINFO_DISKS_MAX + DomainFsinfoDisksMax = 256 + // DomainInterfaceMax is libvirt's REMOTE_DOMAIN_INTERFACE_MAX + DomainInterfaceMax = 2048 + // DomainIPAddrMax is libvirt's REMOTE_DOMAIN_IP_ADDR_MAX + DomainIPAddrMax = 2048 + // DomainGuestVcpuParamsMax is libvirt's REMOTE_DOMAIN_GUEST_VCPU_PARAMS_MAX + DomainGuestVcpuParamsMax = 64 + // DomainEventGraphicsIdentityMax is libvirt's REMOTE_DOMAIN_EVENT_GRAPHICS_IDENTITY_MAX + DomainEventGraphicsIdentityMax = 20 + // Program is libvirt's REMOTE_PROGRAM + Program = 0x20008086 + // ProtocolVersion is libvirt's REMOTE_PROTOCOL_VERSION + ProtocolVersion = 1 +) diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/internal/constants/constants.go b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/internal/constants/constants.go new file mode 100644 index 00000000..da94c250 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/internal/constants/constants.go @@ -0,0 +1,48 @@ +// Copyright 2016 The go-libvirt Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package constants provides shared data for the libvirt package. This file +// includes only things not generated automatically by the parser that runs on +// libvirt's remote_protocol.x file - see constants.gen.go for the generated +// definitions. +package constants + +// qemu constants +const ( + ProgramQEMU = 0x20008087 + ProgramKeepAlive = 0x6b656570 +) + +// qemu procedure identifiers +const ( + QEMUDomainMonitor = 1 + QEMUConnectDomainMonitorEventRegister = 4 + QEMUConnectDomainMonitorEventDeregister = 5 + QEMUDomainMonitorEvent = 6 +) + +const ( + // PacketLengthSize is the packet length, in bytes. + PacketLengthSize = 4 + + // HeaderSize is the packet header size, in bytes. + HeaderSize = 24 + + // UUIDSize is the length of a UUID, in bytes. + UUIDSize = 16 + + // TypedParamFieldLength is VIR_TYPED_PARAM_FIELD_LENGTH, and is the maximum + // length of the Field string in virTypedParameter structs. + TypedParamFieldLength = 80 +) diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/libvirt.gen.go b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/libvirt.gen.go new file mode 100644 index 00000000..b6aabaf1 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/libvirt.gen.go @@ -0,0 +1,17020 @@ +// Copyright 2017 The go-libvirt Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +// Code generated by internal/lvgen/generate.go. DO NOT EDIT. +// +// To regenerate, run 'go generate' in internal/lvgen. +// + +package libvirt + +import ( + "bytes" + "fmt" + + "github.com/davecgh/go-xdr/xdr2" + "github.com/digitalocean/go-libvirt/internal/constants" +) + +const ( + VirUUIDBuflen = 16 +) + +// +// Typedefs: +// +// OptString is libvirt's remote_string +type OptString []string +// UUID is libvirt's remote_uuid +type UUID [VirUUIDBuflen]byte +// OptDomain is libvirt's remote_domain +type OptDomain []Domain +// OptNetwork is libvirt's remote_network +type OptNetwork []Network +// OptNwfilter is libvirt's remote_nwfilter +type OptNwfilter []Nwfilter +// OptStoragePool is libvirt's remote_storage_pool +type OptStoragePool []StoragePool +// OptStorageVol is libvirt's remote_storage_vol +type OptStorageVol []StorageVol +// OptNodeDevice is libvirt's remote_node_device +type OptNodeDevice []NodeDevice +// OptSecret is libvirt's remote_secret +type OptSecret []Secret + +// +// Enums: +// +// AuthType is libvirt's remote_auth_type +type AuthType int32 +// Procedure is libvirt's remote_procedure +type Procedure int32 + +// +// Structs: +// +// Domain is libvirt's remote_nonnull_domain +type Domain struct { + Name string + UUID UUID + ID int32 +} + +// Network is libvirt's remote_nonnull_network +type Network struct { + Name string + UUID UUID +} + +// Nwfilter is libvirt's remote_nonnull_nwfilter +type Nwfilter struct { + Name string + UUID UUID +} + +// Interface is libvirt's remote_nonnull_interface +type Interface struct { + Name string + Mac string +} + +// StoragePool is libvirt's remote_nonnull_storage_pool +type StoragePool struct { + Name string + UUID UUID +} + +// StorageVol is libvirt's remote_nonnull_storage_vol +type StorageVol struct { + Pool string + Name string + Key string +} + +// NodeDevice is libvirt's remote_nonnull_node_device +type NodeDevice struct { + Name string +} + +// Secret is libvirt's remote_nonnull_secret +type Secret struct { + UUID UUID + UsageType int32 + UsageID string +} + +// DomainSnapshot is libvirt's remote_nonnull_domain_snapshot +type DomainSnapshot struct { + Name string + Dom Domain +} + +// Error is libvirt's remote_error +type Error struct { + Code int32 + OptDomain int32 + Message OptString + Level int32 + Dom OptDomain + Str1 OptString + Str2 OptString + Str3 OptString + Int1 int32 + Int2 int32 + Net OptNetwork +} + +// VcpuInfo is libvirt's remote_vcpu_info +type VcpuInfo struct { + Number uint32 + State int32 + CPUTime uint64 + CPU int32 +} + +// TypedParam is libvirt's remote_typed_param +type TypedParam struct { + Field string + Value TypedParamValue +} + +// NodeGetCPUStats is libvirt's remote_node_get_cpu_stats +type NodeGetCPUStats struct { + Field string + Value uint64 +} + +// NodeGetMemoryStats is libvirt's remote_node_get_memory_stats +type NodeGetMemoryStats struct { + Field string + Value uint64 +} + +// DomainDiskError is libvirt's remote_domain_disk_error +type DomainDiskError struct { + Disk string + Error int32 +} + +// ConnectOpenArgs is libvirt's remote_connect_open_args +type ConnectOpenArgs struct { + Name OptString + Flags ConnectFlags +} + +// ConnectSupportsFeatureArgs is libvirt's remote_connect_supports_feature_args +type ConnectSupportsFeatureArgs struct { + Feature int32 +} + +// ConnectSupportsFeatureRet is libvirt's remote_connect_supports_feature_ret +type ConnectSupportsFeatureRet struct { + Supported int32 +} + +// ConnectGetTypeRet is libvirt's remote_connect_get_type_ret +type ConnectGetTypeRet struct { + Type string +} + +// ConnectGetVersionRet is libvirt's remote_connect_get_version_ret +type ConnectGetVersionRet struct { + HvVer uint64 +} + +// ConnectGetLibVersionRet is libvirt's remote_connect_get_lib_version_ret +type ConnectGetLibVersionRet struct { + LibVer uint64 +} + +// ConnectGetHostnameRet is libvirt's remote_connect_get_hostname_ret +type ConnectGetHostnameRet struct { + Hostname string +} + +// ConnectGetSysinfoArgs is libvirt's remote_connect_get_sysinfo_args +type ConnectGetSysinfoArgs struct { + Flags uint32 +} + +// ConnectGetSysinfoRet is libvirt's remote_connect_get_sysinfo_ret +type ConnectGetSysinfoRet struct { + Sysinfo string +} + +// ConnectGetUriRet is libvirt's remote_connect_get_uri_ret +type ConnectGetUriRet struct { + Uri string +} + +// ConnectGetMaxVcpusArgs is libvirt's remote_connect_get_max_vcpus_args +type ConnectGetMaxVcpusArgs struct { + Type OptString +} + +// ConnectGetMaxVcpusRet is libvirt's remote_connect_get_max_vcpus_ret +type ConnectGetMaxVcpusRet struct { + MaxVcpus int32 +} + +// NodeGetInfoRet is libvirt's remote_node_get_info_ret +type NodeGetInfoRet struct { + Model [32]int8 + Memory uint64 + Cpus int32 + Mhz int32 + Nodes int32 + Sockets int32 + Cores int32 + Threads int32 +} + +// ConnectGetCapabilitiesRet is libvirt's remote_connect_get_capabilities_ret +type ConnectGetCapabilitiesRet struct { + Capabilities string +} + +// ConnectGetDomainCapabilitiesArgs is libvirt's remote_connect_get_domain_capabilities_args +type ConnectGetDomainCapabilitiesArgs struct { + Emulatorbin OptString + Arch OptString + Machine OptString + Virttype OptString + Flags uint32 +} + +// ConnectGetDomainCapabilitiesRet is libvirt's remote_connect_get_domain_capabilities_ret +type ConnectGetDomainCapabilitiesRet struct { + Capabilities string +} + +// NodeGetCPUStatsArgs is libvirt's remote_node_get_cpu_stats_args +type NodeGetCPUStatsArgs struct { + CPUNum int32 + Nparams int32 + Flags uint32 +} + +// NodeGetCPUStatsRet is libvirt's remote_node_get_cpu_stats_ret +type NodeGetCPUStatsRet struct { + Params []NodeGetCPUStats + Nparams int32 +} + +// NodeGetMemoryStatsArgs is libvirt's remote_node_get_memory_stats_args +type NodeGetMemoryStatsArgs struct { + Nparams int32 + CellNum int32 + Flags uint32 +} + +// NodeGetMemoryStatsRet is libvirt's remote_node_get_memory_stats_ret +type NodeGetMemoryStatsRet struct { + Params []NodeGetMemoryStats + Nparams int32 +} + +// NodeGetCellsFreeMemoryArgs is libvirt's remote_node_get_cells_free_memory_args +type NodeGetCellsFreeMemoryArgs struct { + StartCell int32 + Maxcells int32 +} + +// NodeGetCellsFreeMemoryRet is libvirt's remote_node_get_cells_free_memory_ret +type NodeGetCellsFreeMemoryRet struct { + Cells []uint64 +} + +// NodeGetFreeMemoryRet is libvirt's remote_node_get_free_memory_ret +type NodeGetFreeMemoryRet struct { + FreeMem uint64 +} + +// DomainGetSchedulerTypeArgs is libvirt's remote_domain_get_scheduler_type_args +type DomainGetSchedulerTypeArgs struct { + Dom Domain +} + +// DomainGetSchedulerTypeRet is libvirt's remote_domain_get_scheduler_type_ret +type DomainGetSchedulerTypeRet struct { + Type string + Nparams int32 +} + +// DomainGetSchedulerParametersArgs is libvirt's remote_domain_get_scheduler_parameters_args +type DomainGetSchedulerParametersArgs struct { + Dom Domain + Nparams int32 +} + +// DomainGetSchedulerParametersRet is libvirt's remote_domain_get_scheduler_parameters_ret +type DomainGetSchedulerParametersRet struct { + Params []TypedParam +} + +// DomainGetSchedulerParametersFlagsArgs is libvirt's remote_domain_get_scheduler_parameters_flags_args +type DomainGetSchedulerParametersFlagsArgs struct { + Dom Domain + Nparams int32 + Flags uint32 +} + +// DomainGetSchedulerParametersFlagsRet is libvirt's remote_domain_get_scheduler_parameters_flags_ret +type DomainGetSchedulerParametersFlagsRet struct { + Params []TypedParam +} + +// DomainSetSchedulerParametersArgs is libvirt's remote_domain_set_scheduler_parameters_args +type DomainSetSchedulerParametersArgs struct { + Dom Domain + Params []TypedParam +} + +// DomainSetSchedulerParametersFlagsArgs is libvirt's remote_domain_set_scheduler_parameters_flags_args +type DomainSetSchedulerParametersFlagsArgs struct { + Dom Domain + Params []TypedParam + Flags uint32 +} + +// DomainSetBlkioParametersArgs is libvirt's remote_domain_set_blkio_parameters_args +type DomainSetBlkioParametersArgs struct { + Dom Domain + Params []TypedParam + Flags uint32 +} + +// DomainGetBlkioParametersArgs is libvirt's remote_domain_get_blkio_parameters_args +type DomainGetBlkioParametersArgs struct { + Dom Domain + Nparams int32 + Flags uint32 +} + +// DomainGetBlkioParametersRet is libvirt's remote_domain_get_blkio_parameters_ret +type DomainGetBlkioParametersRet struct { + Params []TypedParam + Nparams int32 +} + +// DomainSetMemoryParametersArgs is libvirt's remote_domain_set_memory_parameters_args +type DomainSetMemoryParametersArgs struct { + Dom Domain + Params []TypedParam + Flags uint32 +} + +// DomainGetMemoryParametersArgs is libvirt's remote_domain_get_memory_parameters_args +type DomainGetMemoryParametersArgs struct { + Dom Domain + Nparams int32 + Flags uint32 +} + +// DomainGetMemoryParametersRet is libvirt's remote_domain_get_memory_parameters_ret +type DomainGetMemoryParametersRet struct { + Params []TypedParam + Nparams int32 +} + +// DomainBlockResizeArgs is libvirt's remote_domain_block_resize_args +type DomainBlockResizeArgs struct { + Dom Domain + Disk string + Size uint64 + Flags DomainBlockResizeFlags +} + +// DomainSetNumaParametersArgs is libvirt's remote_domain_set_numa_parameters_args +type DomainSetNumaParametersArgs struct { + Dom Domain + Params []TypedParam + Flags uint32 +} + +// DomainGetNumaParametersArgs is libvirt's remote_domain_get_numa_parameters_args +type DomainGetNumaParametersArgs struct { + Dom Domain + Nparams int32 + Flags uint32 +} + +// DomainGetNumaParametersRet is libvirt's remote_domain_get_numa_parameters_ret +type DomainGetNumaParametersRet struct { + Params []TypedParam + Nparams int32 +} + +// DomainSetPerfEventsArgs is libvirt's remote_domain_set_perf_events_args +type DomainSetPerfEventsArgs struct { + Dom Domain + Params []TypedParam + Flags DomainModificationImpact +} + +// DomainGetPerfEventsArgs is libvirt's remote_domain_get_perf_events_args +type DomainGetPerfEventsArgs struct { + Dom Domain + Flags DomainModificationImpact +} + +// DomainGetPerfEventsRet is libvirt's remote_domain_get_perf_events_ret +type DomainGetPerfEventsRet struct { + Params []TypedParam +} + +// DomainBlockStatsArgs is libvirt's remote_domain_block_stats_args +type DomainBlockStatsArgs struct { + Dom Domain + Path string +} + +// DomainBlockStatsRet is libvirt's remote_domain_block_stats_ret +type DomainBlockStatsRet struct { + RdReq int64 + RdBytes int64 + WrReq int64 + WrBytes int64 + Errs int64 +} + +// DomainBlockStatsFlagsArgs is libvirt's remote_domain_block_stats_flags_args +type DomainBlockStatsFlagsArgs struct { + Dom Domain + Path string + Nparams int32 + Flags uint32 +} + +// DomainBlockStatsFlagsRet is libvirt's remote_domain_block_stats_flags_ret +type DomainBlockStatsFlagsRet struct { + Params []TypedParam + Nparams int32 +} + +// DomainInterfaceStatsArgs is libvirt's remote_domain_interface_stats_args +type DomainInterfaceStatsArgs struct { + Dom Domain + Device string +} + +// DomainInterfaceStatsRet is libvirt's remote_domain_interface_stats_ret +type DomainInterfaceStatsRet struct { + RxBytes int64 + RxPackets int64 + RxErrs int64 + RxDrop int64 + TxBytes int64 + TxPackets int64 + TxErrs int64 + TxDrop int64 +} + +// DomainSetInterfaceParametersArgs is libvirt's remote_domain_set_interface_parameters_args +type DomainSetInterfaceParametersArgs struct { + Dom Domain + Device string + Params []TypedParam + Flags uint32 +} + +// DomainGetInterfaceParametersArgs is libvirt's remote_domain_get_interface_parameters_args +type DomainGetInterfaceParametersArgs struct { + Dom Domain + Device string + Nparams int32 + Flags DomainModificationImpact +} + +// DomainGetInterfaceParametersRet is libvirt's remote_domain_get_interface_parameters_ret +type DomainGetInterfaceParametersRet struct { + Params []TypedParam + Nparams int32 +} + +// DomainMemoryStatsArgs is libvirt's remote_domain_memory_stats_args +type DomainMemoryStatsArgs struct { + Dom Domain + MaxStats uint32 + Flags uint32 +} + +// DomainMemoryStat is libvirt's remote_domain_memory_stat +type DomainMemoryStat struct { + Tag int32 + Val uint64 +} + +// DomainMemoryStatsRet is libvirt's remote_domain_memory_stats_ret +type DomainMemoryStatsRet struct { + Stats []DomainMemoryStat +} + +// DomainBlockPeekArgs is libvirt's remote_domain_block_peek_args +type DomainBlockPeekArgs struct { + Dom Domain + Path string + Offset uint64 + Size uint32 + Flags uint32 +} + +// DomainBlockPeekRet is libvirt's remote_domain_block_peek_ret +type DomainBlockPeekRet struct { + Buffer []byte +} + +// DomainMemoryPeekArgs is libvirt's remote_domain_memory_peek_args +type DomainMemoryPeekArgs struct { + Dom Domain + Offset uint64 + Size uint32 + Flags DomainMemoryFlags +} + +// DomainMemoryPeekRet is libvirt's remote_domain_memory_peek_ret +type DomainMemoryPeekRet struct { + Buffer []byte +} + +// DomainGetBlockInfoArgs is libvirt's remote_domain_get_block_info_args +type DomainGetBlockInfoArgs struct { + Dom Domain + Path string + Flags uint32 +} + +// DomainGetBlockInfoRet is libvirt's remote_domain_get_block_info_ret +type DomainGetBlockInfoRet struct { + Allocation uint64 + Capacity uint64 + Physical uint64 +} + +// ConnectListDomainsArgs is libvirt's remote_connect_list_domains_args +type ConnectListDomainsArgs struct { + Maxids int32 +} + +// ConnectListDomainsRet is libvirt's remote_connect_list_domains_ret +type ConnectListDomainsRet struct { + Ids []int32 +} + +// ConnectNumOfDomainsRet is libvirt's remote_connect_num_of_domains_ret +type ConnectNumOfDomainsRet struct { + Num int32 +} + +// DomainCreateXMLArgs is libvirt's remote_domain_create_xml_args +type DomainCreateXMLArgs struct { + XMLDesc string + Flags DomainCreateFlags +} + +// DomainCreateXMLRet is libvirt's remote_domain_create_xml_ret +type DomainCreateXMLRet struct { + Dom Domain +} + +// DomainCreateXMLWithFilesArgs is libvirt's remote_domain_create_xml_with_files_args +type DomainCreateXMLWithFilesArgs struct { + XMLDesc string + Flags DomainCreateFlags +} + +// DomainCreateXMLWithFilesRet is libvirt's remote_domain_create_xml_with_files_ret +type DomainCreateXMLWithFilesRet struct { + Dom Domain +} + +// DomainLookupByIDArgs is libvirt's remote_domain_lookup_by_id_args +type DomainLookupByIDArgs struct { + ID int32 +} + +// DomainLookupByIDRet is libvirt's remote_domain_lookup_by_id_ret +type DomainLookupByIDRet struct { + Dom Domain +} + +// DomainLookupByUUIDArgs is libvirt's remote_domain_lookup_by_uuid_args +type DomainLookupByUUIDArgs struct { + UUID UUID +} + +// DomainLookupByUUIDRet is libvirt's remote_domain_lookup_by_uuid_ret +type DomainLookupByUUIDRet struct { + Dom Domain +} + +// DomainLookupByNameArgs is libvirt's remote_domain_lookup_by_name_args +type DomainLookupByNameArgs struct { + Name string +} + +// DomainLookupByNameRet is libvirt's remote_domain_lookup_by_name_ret +type DomainLookupByNameRet struct { + Dom Domain +} + +// DomainSuspendArgs is libvirt's remote_domain_suspend_args +type DomainSuspendArgs struct { + Dom Domain +} + +// DomainResumeArgs is libvirt's remote_domain_resume_args +type DomainResumeArgs struct { + Dom Domain +} + +// DomainPmSuspendForDurationArgs is libvirt's remote_domain_pm_suspend_for_duration_args +type DomainPmSuspendForDurationArgs struct { + Dom Domain + Target uint32 + Duration uint64 + Flags uint32 +} + +// DomainPmWakeupArgs is libvirt's remote_domain_pm_wakeup_args +type DomainPmWakeupArgs struct { + Dom Domain + Flags uint32 +} + +// DomainShutdownArgs is libvirt's remote_domain_shutdown_args +type DomainShutdownArgs struct { + Dom Domain +} + +// DomainRebootArgs is libvirt's remote_domain_reboot_args +type DomainRebootArgs struct { + Dom Domain + Flags DomainRebootFlagValues +} + +// DomainResetArgs is libvirt's remote_domain_reset_args +type DomainResetArgs struct { + Dom Domain + Flags uint32 +} + +// DomainDestroyArgs is libvirt's remote_domain_destroy_args +type DomainDestroyArgs struct { + Dom Domain +} + +// DomainDestroyFlagsArgs is libvirt's remote_domain_destroy_flags_args +type DomainDestroyFlagsArgs struct { + Dom Domain + Flags DomainDestroyFlagsValues +} + +// DomainGetOsTypeArgs is libvirt's remote_domain_get_os_type_args +type DomainGetOsTypeArgs struct { + Dom Domain +} + +// DomainGetOsTypeRet is libvirt's remote_domain_get_os_type_ret +type DomainGetOsTypeRet struct { + Type string +} + +// DomainGetMaxMemoryArgs is libvirt's remote_domain_get_max_memory_args +type DomainGetMaxMemoryArgs struct { + Dom Domain +} + +// DomainGetMaxMemoryRet is libvirt's remote_domain_get_max_memory_ret +type DomainGetMaxMemoryRet struct { + Memory uint64 +} + +// DomainSetMaxMemoryArgs is libvirt's remote_domain_set_max_memory_args +type DomainSetMaxMemoryArgs struct { + Dom Domain + Memory uint64 +} + +// DomainSetMemoryArgs is libvirt's remote_domain_set_memory_args +type DomainSetMemoryArgs struct { + Dom Domain + Memory uint64 +} + +// DomainSetMemoryFlagsArgs is libvirt's remote_domain_set_memory_flags_args +type DomainSetMemoryFlagsArgs struct { + Dom Domain + Memory uint64 + Flags uint32 +} + +// DomainSetMemoryStatsPeriodArgs is libvirt's remote_domain_set_memory_stats_period_args +type DomainSetMemoryStatsPeriodArgs struct { + Dom Domain + Period int32 + Flags DomainMemoryModFlags +} + +// DomainGetInfoArgs is libvirt's remote_domain_get_info_args +type DomainGetInfoArgs struct { + Dom Domain +} + +// DomainGetInfoRet is libvirt's remote_domain_get_info_ret +type DomainGetInfoRet struct { + State uint8 + MaxMem uint64 + Memory uint64 + NrVirtCPU uint16 + CPUTime uint64 +} + +// DomainSaveArgs is libvirt's remote_domain_save_args +type DomainSaveArgs struct { + Dom Domain + To string +} + +// DomainSaveFlagsArgs is libvirt's remote_domain_save_flags_args +type DomainSaveFlagsArgs struct { + Dom Domain + To string + Dxml OptString + Flags uint32 +} + +// DomainRestoreArgs is libvirt's remote_domain_restore_args +type DomainRestoreArgs struct { + From string +} + +// DomainRestoreFlagsArgs is libvirt's remote_domain_restore_flags_args +type DomainRestoreFlagsArgs struct { + From string + Dxml OptString + Flags uint32 +} + +// DomainSaveImageGetXMLDescArgs is libvirt's remote_domain_save_image_get_xml_desc_args +type DomainSaveImageGetXMLDescArgs struct { + File string + Flags uint32 +} + +// DomainSaveImageGetXMLDescRet is libvirt's remote_domain_save_image_get_xml_desc_ret +type DomainSaveImageGetXMLDescRet struct { + XML string +} + +// DomainSaveImageDefineXMLArgs is libvirt's remote_domain_save_image_define_xml_args +type DomainSaveImageDefineXMLArgs struct { + File string + Dxml string + Flags uint32 +} + +// DomainCoreDumpArgs is libvirt's remote_domain_core_dump_args +type DomainCoreDumpArgs struct { + Dom Domain + To string + Flags DomainCoreDumpFlags +} + +// DomainCoreDumpWithFormatArgs is libvirt's remote_domain_core_dump_with_format_args +type DomainCoreDumpWithFormatArgs struct { + Dom Domain + To string + Dumpformat uint32 + Flags DomainCoreDumpFlags +} + +// DomainScreenshotArgs is libvirt's remote_domain_screenshot_args +type DomainScreenshotArgs struct { + Dom Domain + Screen uint32 + Flags uint32 +} + +// DomainScreenshotRet is libvirt's remote_domain_screenshot_ret +type DomainScreenshotRet struct { + Mime OptString +} + +// DomainGetXMLDescArgs is libvirt's remote_domain_get_xml_desc_args +type DomainGetXMLDescArgs struct { + Dom Domain + Flags DomainXMLFlags +} + +// DomainGetXMLDescRet is libvirt's remote_domain_get_xml_desc_ret +type DomainGetXMLDescRet struct { + XML string +} + +// DomainMigratePrepareArgs is libvirt's remote_domain_migrate_prepare_args +type DomainMigratePrepareArgs struct { + UriIn OptString + Flags uint64 + Dname OptString + Resource uint64 +} + +// DomainMigratePrepareRet is libvirt's remote_domain_migrate_prepare_ret +type DomainMigratePrepareRet struct { + Cookie []byte + UriOut OptString +} + +// DomainMigratePerformArgs is libvirt's remote_domain_migrate_perform_args +type DomainMigratePerformArgs struct { + Dom Domain + Cookie []byte + Uri string + Flags uint64 + Dname OptString + Resource uint64 +} + +// DomainMigrateFinishArgs is libvirt's remote_domain_migrate_finish_args +type DomainMigrateFinishArgs struct { + Dname string + Cookie []byte + Uri string + Flags uint64 +} + +// DomainMigrateFinishRet is libvirt's remote_domain_migrate_finish_ret +type DomainMigrateFinishRet struct { + Ddom Domain +} + +// DomainMigratePrepare2Args is libvirt's remote_domain_migrate_prepare2_args +type DomainMigratePrepare2Args struct { + UriIn OptString + Flags uint64 + Dname OptString + Resource uint64 + DomXML string +} + +// DomainMigratePrepare2Ret is libvirt's remote_domain_migrate_prepare2_ret +type DomainMigratePrepare2Ret struct { + Cookie []byte + UriOut OptString +} + +// DomainMigrateFinish2Args is libvirt's remote_domain_migrate_finish2_args +type DomainMigrateFinish2Args struct { + Dname string + Cookie []byte + Uri string + Flags uint64 + Retcode int32 +} + +// DomainMigrateFinish2Ret is libvirt's remote_domain_migrate_finish2_ret +type DomainMigrateFinish2Ret struct { + Ddom Domain +} + +// ConnectListDefinedDomainsArgs is libvirt's remote_connect_list_defined_domains_args +type ConnectListDefinedDomainsArgs struct { + Maxnames int32 +} + +// ConnectListDefinedDomainsRet is libvirt's remote_connect_list_defined_domains_ret +type ConnectListDefinedDomainsRet struct { + Names []string +} + +// ConnectNumOfDefinedDomainsRet is libvirt's remote_connect_num_of_defined_domains_ret +type ConnectNumOfDefinedDomainsRet struct { + Num int32 +} + +// DomainCreateArgs is libvirt's remote_domain_create_args +type DomainCreateArgs struct { + Dom Domain +} + +// DomainCreateWithFlagsArgs is libvirt's remote_domain_create_with_flags_args +type DomainCreateWithFlagsArgs struct { + Dom Domain + Flags uint32 +} + +// DomainCreateWithFlagsRet is libvirt's remote_domain_create_with_flags_ret +type DomainCreateWithFlagsRet struct { + Dom Domain +} + +// DomainCreateWithFilesArgs is libvirt's remote_domain_create_with_files_args +type DomainCreateWithFilesArgs struct { + Dom Domain + Flags DomainCreateFlags +} + +// DomainCreateWithFilesRet is libvirt's remote_domain_create_with_files_ret +type DomainCreateWithFilesRet struct { + Dom Domain +} + +// DomainDefineXMLArgs is libvirt's remote_domain_define_xml_args +type DomainDefineXMLArgs struct { + XML string +} + +// DomainDefineXMLRet is libvirt's remote_domain_define_xml_ret +type DomainDefineXMLRet struct { + Dom Domain +} + +// DomainDefineXMLFlagsArgs is libvirt's remote_domain_define_xml_flags_args +type DomainDefineXMLFlagsArgs struct { + XML string + Flags DomainDefineFlags +} + +// DomainDefineXMLFlagsRet is libvirt's remote_domain_define_xml_flags_ret +type DomainDefineXMLFlagsRet struct { + Dom Domain +} + +// DomainUndefineArgs is libvirt's remote_domain_undefine_args +type DomainUndefineArgs struct { + Dom Domain +} + +// DomainUndefineFlagsArgs is libvirt's remote_domain_undefine_flags_args +type DomainUndefineFlagsArgs struct { + Dom Domain + Flags DomainUndefineFlagsValues +} + +// DomainInjectNmiArgs is libvirt's remote_domain_inject_nmi_args +type DomainInjectNmiArgs struct { + Dom Domain + Flags uint32 +} + +// DomainSendKeyArgs is libvirt's remote_domain_send_key_args +type DomainSendKeyArgs struct { + Dom Domain + Codeset uint32 + Holdtime uint32 + Keycodes []uint32 + Flags uint32 +} + +// DomainSendProcessSignalArgs is libvirt's remote_domain_send_process_signal_args +type DomainSendProcessSignalArgs struct { + Dom Domain + PidValue int64 + Signum uint32 + Flags uint32 +} + +// DomainSetVcpusArgs is libvirt's remote_domain_set_vcpus_args +type DomainSetVcpusArgs struct { + Dom Domain + Nvcpus uint32 +} + +// DomainSetVcpusFlagsArgs is libvirt's remote_domain_set_vcpus_flags_args +type DomainSetVcpusFlagsArgs struct { + Dom Domain + Nvcpus uint32 + Flags uint32 +} + +// DomainGetVcpusFlagsArgs is libvirt's remote_domain_get_vcpus_flags_args +type DomainGetVcpusFlagsArgs struct { + Dom Domain + Flags uint32 +} + +// DomainGetVcpusFlagsRet is libvirt's remote_domain_get_vcpus_flags_ret +type DomainGetVcpusFlagsRet struct { + Num int32 +} + +// DomainPinVcpuArgs is libvirt's remote_domain_pin_vcpu_args +type DomainPinVcpuArgs struct { + Dom Domain + Vcpu uint32 + Cpumap []byte +} + +// DomainPinVcpuFlagsArgs is libvirt's remote_domain_pin_vcpu_flags_args +type DomainPinVcpuFlagsArgs struct { + Dom Domain + Vcpu uint32 + Cpumap []byte + Flags uint32 +} + +// DomainGetVcpuPinInfoArgs is libvirt's remote_domain_get_vcpu_pin_info_args +type DomainGetVcpuPinInfoArgs struct { + Dom Domain + Ncpumaps int32 + Maplen int32 + Flags uint32 +} + +// DomainGetVcpuPinInfoRet is libvirt's remote_domain_get_vcpu_pin_info_ret +type DomainGetVcpuPinInfoRet struct { + Cpumaps []byte + Num int32 +} + +// DomainPinEmulatorArgs is libvirt's remote_domain_pin_emulator_args +type DomainPinEmulatorArgs struct { + Dom Domain + Cpumap []byte + Flags DomainModificationImpact +} + +// DomainGetEmulatorPinInfoArgs is libvirt's remote_domain_get_emulator_pin_info_args +type DomainGetEmulatorPinInfoArgs struct { + Dom Domain + Maplen int32 + Flags DomainModificationImpact +} + +// DomainGetEmulatorPinInfoRet is libvirt's remote_domain_get_emulator_pin_info_ret +type DomainGetEmulatorPinInfoRet struct { + Cpumaps []byte + Ret int32 +} + +// DomainGetVcpusArgs is libvirt's remote_domain_get_vcpus_args +type DomainGetVcpusArgs struct { + Dom Domain + Maxinfo int32 + Maplen int32 +} + +// DomainGetVcpusRet is libvirt's remote_domain_get_vcpus_ret +type DomainGetVcpusRet struct { + Info []VcpuInfo + Cpumaps []byte +} + +// DomainGetMaxVcpusArgs is libvirt's remote_domain_get_max_vcpus_args +type DomainGetMaxVcpusArgs struct { + Dom Domain +} + +// DomainGetMaxVcpusRet is libvirt's remote_domain_get_max_vcpus_ret +type DomainGetMaxVcpusRet struct { + Num int32 +} + +// DomainIothreadInfo is libvirt's remote_domain_iothread_info +type DomainIothreadInfo struct { + IothreadID uint32 + Cpumap []byte +} + +// DomainGetIothreadInfoArgs is libvirt's remote_domain_get_iothread_info_args +type DomainGetIothreadInfoArgs struct { + Dom Domain + Flags DomainModificationImpact +} + +// DomainGetIothreadInfoRet is libvirt's remote_domain_get_iothread_info_ret +type DomainGetIothreadInfoRet struct { + Info []DomainIothreadInfo + Ret uint32 +} + +// DomainPinIothreadArgs is libvirt's remote_domain_pin_iothread_args +type DomainPinIothreadArgs struct { + Dom Domain + IothreadsID uint32 + Cpumap []byte + Flags DomainModificationImpact +} + +// DomainAddIothreadArgs is libvirt's remote_domain_add_iothread_args +type DomainAddIothreadArgs struct { + Dom Domain + IothreadID uint32 + Flags DomainModificationImpact +} + +// DomainDelIothreadArgs is libvirt's remote_domain_del_iothread_args +type DomainDelIothreadArgs struct { + Dom Domain + IothreadID uint32 + Flags DomainModificationImpact +} + +// DomainGetSecurityLabelArgs is libvirt's remote_domain_get_security_label_args +type DomainGetSecurityLabelArgs struct { + Dom Domain +} + +// DomainGetSecurityLabelRet is libvirt's remote_domain_get_security_label_ret +type DomainGetSecurityLabelRet struct { + Label []int8 + Enforcing int32 +} + +// DomainGetSecurityLabelListArgs is libvirt's remote_domain_get_security_label_list_args +type DomainGetSecurityLabelListArgs struct { + Dom Domain +} + +// DomainGetSecurityLabelListRet is libvirt's remote_domain_get_security_label_list_ret +type DomainGetSecurityLabelListRet struct { + Labels []DomainGetSecurityLabelRet + Ret int32 +} + +// NodeGetSecurityModelRet is libvirt's remote_node_get_security_model_ret +type NodeGetSecurityModelRet struct { + Model []int8 + Doi []int8 +} + +// DomainAttachDeviceArgs is libvirt's remote_domain_attach_device_args +type DomainAttachDeviceArgs struct { + Dom Domain + XML string +} + +// DomainAttachDeviceFlagsArgs is libvirt's remote_domain_attach_device_flags_args +type DomainAttachDeviceFlagsArgs struct { + Dom Domain + XML string + Flags uint32 +} + +// DomainDetachDeviceArgs is libvirt's remote_domain_detach_device_args +type DomainDetachDeviceArgs struct { + Dom Domain + XML string +} + +// DomainDetachDeviceFlagsArgs is libvirt's remote_domain_detach_device_flags_args +type DomainDetachDeviceFlagsArgs struct { + Dom Domain + XML string + Flags uint32 +} + +// DomainUpdateDeviceFlagsArgs is libvirt's remote_domain_update_device_flags_args +type DomainUpdateDeviceFlagsArgs struct { + Dom Domain + XML string + Flags uint32 +} + +// DomainGetAutostartArgs is libvirt's remote_domain_get_autostart_args +type DomainGetAutostartArgs struct { + Dom Domain +} + +// DomainGetAutostartRet is libvirt's remote_domain_get_autostart_ret +type DomainGetAutostartRet struct { + Autostart int32 +} + +// DomainSetAutostartArgs is libvirt's remote_domain_set_autostart_args +type DomainSetAutostartArgs struct { + Dom Domain + Autostart int32 +} + +// DomainSetMetadataArgs is libvirt's remote_domain_set_metadata_args +type DomainSetMetadataArgs struct { + Dom Domain + Type int32 + Metadata OptString + Key OptString + Uri OptString + Flags DomainModificationImpact +} + +// DomainGetMetadataArgs is libvirt's remote_domain_get_metadata_args +type DomainGetMetadataArgs struct { + Dom Domain + Type int32 + Uri OptString + Flags DomainModificationImpact +} + +// DomainGetMetadataRet is libvirt's remote_domain_get_metadata_ret +type DomainGetMetadataRet struct { + Metadata string +} + +// DomainBlockJobAbortArgs is libvirt's remote_domain_block_job_abort_args +type DomainBlockJobAbortArgs struct { + Dom Domain + Path string + Flags DomainBlockJobAbortFlags +} + +// DomainGetBlockJobInfoArgs is libvirt's remote_domain_get_block_job_info_args +type DomainGetBlockJobInfoArgs struct { + Dom Domain + Path string + Flags uint32 +} + +// DomainGetBlockJobInfoRet is libvirt's remote_domain_get_block_job_info_ret +type DomainGetBlockJobInfoRet struct { + Found int32 + Type int32 + Bandwidth uint64 + Cur uint64 + End uint64 +} + +// DomainBlockJobSetSpeedArgs is libvirt's remote_domain_block_job_set_speed_args +type DomainBlockJobSetSpeedArgs struct { + Dom Domain + Path string + Bandwidth uint64 + Flags DomainBlockJobSetSpeedFlags +} + +// DomainBlockPullArgs is libvirt's remote_domain_block_pull_args +type DomainBlockPullArgs struct { + Dom Domain + Path string + Bandwidth uint64 + Flags DomainBlockPullFlags +} + +// DomainBlockRebaseArgs is libvirt's remote_domain_block_rebase_args +type DomainBlockRebaseArgs struct { + Dom Domain + Path string + Base OptString + Bandwidth uint64 + Flags DomainBlockRebaseFlags +} + +// DomainBlockCopyArgs is libvirt's remote_domain_block_copy_args +type DomainBlockCopyArgs struct { + Dom Domain + Path string + Destxml string + Params []TypedParam + Flags DomainBlockCopyFlags +} + +// DomainBlockCommitArgs is libvirt's remote_domain_block_commit_args +type DomainBlockCommitArgs struct { + Dom Domain + Disk string + Base OptString + Top OptString + Bandwidth uint64 + Flags DomainBlockCommitFlags +} + +// DomainSetBlockIOTuneArgs is libvirt's remote_domain_set_block_io_tune_args +type DomainSetBlockIOTuneArgs struct { + Dom Domain + Disk string + Params []TypedParam + Flags uint32 +} + +// DomainGetBlockIOTuneArgs is libvirt's remote_domain_get_block_io_tune_args +type DomainGetBlockIOTuneArgs struct { + Dom Domain + Disk OptString + Nparams int32 + Flags uint32 +} + +// DomainGetBlockIOTuneRet is libvirt's remote_domain_get_block_io_tune_ret +type DomainGetBlockIOTuneRet struct { + Params []TypedParam + Nparams int32 +} + +// DomainGetCPUStatsArgs is libvirt's remote_domain_get_cpu_stats_args +type DomainGetCPUStatsArgs struct { + Dom Domain + Nparams uint32 + StartCPU int32 + Ncpus uint32 + Flags TypedParameterFlags +} + +// DomainGetCPUStatsRet is libvirt's remote_domain_get_cpu_stats_ret +type DomainGetCPUStatsRet struct { + Params []TypedParam + Nparams int32 +} + +// DomainGetHostnameArgs is libvirt's remote_domain_get_hostname_args +type DomainGetHostnameArgs struct { + Dom Domain + Flags uint32 +} + +// DomainGetHostnameRet is libvirt's remote_domain_get_hostname_ret +type DomainGetHostnameRet struct { + Hostname string +} + +// ConnectNumOfNetworksRet is libvirt's remote_connect_num_of_networks_ret +type ConnectNumOfNetworksRet struct { + Num int32 +} + +// ConnectListNetworksArgs is libvirt's remote_connect_list_networks_args +type ConnectListNetworksArgs struct { + Maxnames int32 +} + +// ConnectListNetworksRet is libvirt's remote_connect_list_networks_ret +type ConnectListNetworksRet struct { + Names []string +} + +// ConnectNumOfDefinedNetworksRet is libvirt's remote_connect_num_of_defined_networks_ret +type ConnectNumOfDefinedNetworksRet struct { + Num int32 +} + +// ConnectListDefinedNetworksArgs is libvirt's remote_connect_list_defined_networks_args +type ConnectListDefinedNetworksArgs struct { + Maxnames int32 +} + +// ConnectListDefinedNetworksRet is libvirt's remote_connect_list_defined_networks_ret +type ConnectListDefinedNetworksRet struct { + Names []string +} + +// NetworkLookupByUUIDArgs is libvirt's remote_network_lookup_by_uuid_args +type NetworkLookupByUUIDArgs struct { + UUID UUID +} + +// NetworkLookupByUUIDRet is libvirt's remote_network_lookup_by_uuid_ret +type NetworkLookupByUUIDRet struct { + Net Network +} + +// NetworkLookupByNameArgs is libvirt's remote_network_lookup_by_name_args +type NetworkLookupByNameArgs struct { + Name string +} + +// NetworkLookupByNameRet is libvirt's remote_network_lookup_by_name_ret +type NetworkLookupByNameRet struct { + Net Network +} + +// NetworkCreateXMLArgs is libvirt's remote_network_create_xml_args +type NetworkCreateXMLArgs struct { + XML string +} + +// NetworkCreateXMLRet is libvirt's remote_network_create_xml_ret +type NetworkCreateXMLRet struct { + Net Network +} + +// NetworkDefineXMLArgs is libvirt's remote_network_define_xml_args +type NetworkDefineXMLArgs struct { + XML string +} + +// NetworkDefineXMLRet is libvirt's remote_network_define_xml_ret +type NetworkDefineXMLRet struct { + Net Network +} + +// NetworkUndefineArgs is libvirt's remote_network_undefine_args +type NetworkUndefineArgs struct { + Net Network +} + +// NetworkUpdateArgs is libvirt's remote_network_update_args +type NetworkUpdateArgs struct { + Net Network + Command uint32 + Section uint32 + ParentIndex int32 + XML string + Flags NetworkUpdateFlags +} + +// NetworkCreateArgs is libvirt's remote_network_create_args +type NetworkCreateArgs struct { + Net Network +} + +// NetworkDestroyArgs is libvirt's remote_network_destroy_args +type NetworkDestroyArgs struct { + Net Network +} + +// NetworkGetXMLDescArgs is libvirt's remote_network_get_xml_desc_args +type NetworkGetXMLDescArgs struct { + Net Network + Flags uint32 +} + +// NetworkGetXMLDescRet is libvirt's remote_network_get_xml_desc_ret +type NetworkGetXMLDescRet struct { + XML string +} + +// NetworkGetBridgeNameArgs is libvirt's remote_network_get_bridge_name_args +type NetworkGetBridgeNameArgs struct { + Net Network +} + +// NetworkGetBridgeNameRet is libvirt's remote_network_get_bridge_name_ret +type NetworkGetBridgeNameRet struct { + Name string +} + +// NetworkGetAutostartArgs is libvirt's remote_network_get_autostart_args +type NetworkGetAutostartArgs struct { + Net Network +} + +// NetworkGetAutostartRet is libvirt's remote_network_get_autostart_ret +type NetworkGetAutostartRet struct { + Autostart int32 +} + +// NetworkSetAutostartArgs is libvirt's remote_network_set_autostart_args +type NetworkSetAutostartArgs struct { + Net Network + Autostart int32 +} + +// ConnectNumOfNwfiltersRet is libvirt's remote_connect_num_of_nwfilters_ret +type ConnectNumOfNwfiltersRet struct { + Num int32 +} + +// ConnectListNwfiltersArgs is libvirt's remote_connect_list_nwfilters_args +type ConnectListNwfiltersArgs struct { + Maxnames int32 +} + +// ConnectListNwfiltersRet is libvirt's remote_connect_list_nwfilters_ret +type ConnectListNwfiltersRet struct { + Names []string +} + +// NwfilterLookupByUUIDArgs is libvirt's remote_nwfilter_lookup_by_uuid_args +type NwfilterLookupByUUIDArgs struct { + UUID UUID +} + +// NwfilterLookupByUUIDRet is libvirt's remote_nwfilter_lookup_by_uuid_ret +type NwfilterLookupByUUIDRet struct { + OptNwfilter Nwfilter +} + +// NwfilterLookupByNameArgs is libvirt's remote_nwfilter_lookup_by_name_args +type NwfilterLookupByNameArgs struct { + Name string +} + +// NwfilterLookupByNameRet is libvirt's remote_nwfilter_lookup_by_name_ret +type NwfilterLookupByNameRet struct { + OptNwfilter Nwfilter +} + +// NwfilterDefineXMLArgs is libvirt's remote_nwfilter_define_xml_args +type NwfilterDefineXMLArgs struct { + XML string +} + +// NwfilterDefineXMLRet is libvirt's remote_nwfilter_define_xml_ret +type NwfilterDefineXMLRet struct { + OptNwfilter Nwfilter +} + +// NwfilterUndefineArgs is libvirt's remote_nwfilter_undefine_args +type NwfilterUndefineArgs struct { + OptNwfilter Nwfilter +} + +// NwfilterGetXMLDescArgs is libvirt's remote_nwfilter_get_xml_desc_args +type NwfilterGetXMLDescArgs struct { + OptNwfilter Nwfilter + Flags uint32 +} + +// NwfilterGetXMLDescRet is libvirt's remote_nwfilter_get_xml_desc_ret +type NwfilterGetXMLDescRet struct { + XML string +} + +// ConnectNumOfInterfacesRet is libvirt's remote_connect_num_of_interfaces_ret +type ConnectNumOfInterfacesRet struct { + Num int32 +} + +// ConnectListInterfacesArgs is libvirt's remote_connect_list_interfaces_args +type ConnectListInterfacesArgs struct { + Maxnames int32 +} + +// ConnectListInterfacesRet is libvirt's remote_connect_list_interfaces_ret +type ConnectListInterfacesRet struct { + Names []string +} + +// ConnectNumOfDefinedInterfacesRet is libvirt's remote_connect_num_of_defined_interfaces_ret +type ConnectNumOfDefinedInterfacesRet struct { + Num int32 +} + +// ConnectListDefinedInterfacesArgs is libvirt's remote_connect_list_defined_interfaces_args +type ConnectListDefinedInterfacesArgs struct { + Maxnames int32 +} + +// ConnectListDefinedInterfacesRet is libvirt's remote_connect_list_defined_interfaces_ret +type ConnectListDefinedInterfacesRet struct { + Names []string +} + +// InterfaceLookupByNameArgs is libvirt's remote_interface_lookup_by_name_args +type InterfaceLookupByNameArgs struct { + Name string +} + +// InterfaceLookupByNameRet is libvirt's remote_interface_lookup_by_name_ret +type InterfaceLookupByNameRet struct { + Iface Interface +} + +// InterfaceLookupByMacStringArgs is libvirt's remote_interface_lookup_by_mac_string_args +type InterfaceLookupByMacStringArgs struct { + Mac string +} + +// InterfaceLookupByMacStringRet is libvirt's remote_interface_lookup_by_mac_string_ret +type InterfaceLookupByMacStringRet struct { + Iface Interface +} + +// InterfaceGetXMLDescArgs is libvirt's remote_interface_get_xml_desc_args +type InterfaceGetXMLDescArgs struct { + Iface Interface + Flags uint32 +} + +// InterfaceGetXMLDescRet is libvirt's remote_interface_get_xml_desc_ret +type InterfaceGetXMLDescRet struct { + XML string +} + +// InterfaceDefineXMLArgs is libvirt's remote_interface_define_xml_args +type InterfaceDefineXMLArgs struct { + XML string + Flags uint32 +} + +// InterfaceDefineXMLRet is libvirt's remote_interface_define_xml_ret +type InterfaceDefineXMLRet struct { + Iface Interface +} + +// InterfaceUndefineArgs is libvirt's remote_interface_undefine_args +type InterfaceUndefineArgs struct { + Iface Interface +} + +// InterfaceCreateArgs is libvirt's remote_interface_create_args +type InterfaceCreateArgs struct { + Iface Interface + Flags uint32 +} + +// InterfaceDestroyArgs is libvirt's remote_interface_destroy_args +type InterfaceDestroyArgs struct { + Iface Interface + Flags uint32 +} + +// InterfaceChangeBeginArgs is libvirt's remote_interface_change_begin_args +type InterfaceChangeBeginArgs struct { + Flags uint32 +} + +// InterfaceChangeCommitArgs is libvirt's remote_interface_change_commit_args +type InterfaceChangeCommitArgs struct { + Flags uint32 +} + +// InterfaceChangeRollbackArgs is libvirt's remote_interface_change_rollback_args +type InterfaceChangeRollbackArgs struct { + Flags uint32 +} + +// AuthListRet is libvirt's remote_auth_list_ret +type AuthListRet struct { + Types []AuthType +} + +// AuthSaslInitRet is libvirt's remote_auth_sasl_init_ret +type AuthSaslInitRet struct { + Mechlist string +} + +// AuthSaslStartArgs is libvirt's remote_auth_sasl_start_args +type AuthSaslStartArgs struct { + Mech string + Nil int32 + Data []int8 +} + +// AuthSaslStartRet is libvirt's remote_auth_sasl_start_ret +type AuthSaslStartRet struct { + Complete int32 + Nil int32 + Data []int8 +} + +// AuthSaslStepArgs is libvirt's remote_auth_sasl_step_args +type AuthSaslStepArgs struct { + Nil int32 + Data []int8 +} + +// AuthSaslStepRet is libvirt's remote_auth_sasl_step_ret +type AuthSaslStepRet struct { + Complete int32 + Nil int32 + Data []int8 +} + +// AuthPolkitRet is libvirt's remote_auth_polkit_ret +type AuthPolkitRet struct { + Complete int32 +} + +// ConnectNumOfStoragePoolsRet is libvirt's remote_connect_num_of_storage_pools_ret +type ConnectNumOfStoragePoolsRet struct { + Num int32 +} + +// ConnectListStoragePoolsArgs is libvirt's remote_connect_list_storage_pools_args +type ConnectListStoragePoolsArgs struct { + Maxnames int32 +} + +// ConnectListStoragePoolsRet is libvirt's remote_connect_list_storage_pools_ret +type ConnectListStoragePoolsRet struct { + Names []string +} + +// ConnectNumOfDefinedStoragePoolsRet is libvirt's remote_connect_num_of_defined_storage_pools_ret +type ConnectNumOfDefinedStoragePoolsRet struct { + Num int32 +} + +// ConnectListDefinedStoragePoolsArgs is libvirt's remote_connect_list_defined_storage_pools_args +type ConnectListDefinedStoragePoolsArgs struct { + Maxnames int32 +} + +// ConnectListDefinedStoragePoolsRet is libvirt's remote_connect_list_defined_storage_pools_ret +type ConnectListDefinedStoragePoolsRet struct { + Names []string +} + +// ConnectFindStoragePoolSourcesArgs is libvirt's remote_connect_find_storage_pool_sources_args +type ConnectFindStoragePoolSourcesArgs struct { + Type string + SrcSpec OptString + Flags uint32 +} + +// ConnectFindStoragePoolSourcesRet is libvirt's remote_connect_find_storage_pool_sources_ret +type ConnectFindStoragePoolSourcesRet struct { + XML string +} + +// StoragePoolLookupByUUIDArgs is libvirt's remote_storage_pool_lookup_by_uuid_args +type StoragePoolLookupByUUIDArgs struct { + UUID UUID +} + +// StoragePoolLookupByUUIDRet is libvirt's remote_storage_pool_lookup_by_uuid_ret +type StoragePoolLookupByUUIDRet struct { + Pool StoragePool +} + +// StoragePoolLookupByNameArgs is libvirt's remote_storage_pool_lookup_by_name_args +type StoragePoolLookupByNameArgs struct { + Name string +} + +// StoragePoolLookupByNameRet is libvirt's remote_storage_pool_lookup_by_name_ret +type StoragePoolLookupByNameRet struct { + Pool StoragePool +} + +// StoragePoolLookupByVolumeArgs is libvirt's remote_storage_pool_lookup_by_volume_args +type StoragePoolLookupByVolumeArgs struct { + Vol StorageVol +} + +// StoragePoolLookupByVolumeRet is libvirt's remote_storage_pool_lookup_by_volume_ret +type StoragePoolLookupByVolumeRet struct { + Pool StoragePool +} + +// StoragePoolCreateXMLArgs is libvirt's remote_storage_pool_create_xml_args +type StoragePoolCreateXMLArgs struct { + XML string + Flags StoragePoolCreateFlags +} + +// StoragePoolCreateXMLRet is libvirt's remote_storage_pool_create_xml_ret +type StoragePoolCreateXMLRet struct { + Pool StoragePool +} + +// StoragePoolDefineXMLArgs is libvirt's remote_storage_pool_define_xml_args +type StoragePoolDefineXMLArgs struct { + XML string + Flags uint32 +} + +// StoragePoolDefineXMLRet is libvirt's remote_storage_pool_define_xml_ret +type StoragePoolDefineXMLRet struct { + Pool StoragePool +} + +// StoragePoolBuildArgs is libvirt's remote_storage_pool_build_args +type StoragePoolBuildArgs struct { + Pool StoragePool + Flags StoragePoolBuildFlags +} + +// StoragePoolUndefineArgs is libvirt's remote_storage_pool_undefine_args +type StoragePoolUndefineArgs struct { + Pool StoragePool +} + +// StoragePoolCreateArgs is libvirt's remote_storage_pool_create_args +type StoragePoolCreateArgs struct { + Pool StoragePool + Flags StoragePoolCreateFlags +} + +// StoragePoolDestroyArgs is libvirt's remote_storage_pool_destroy_args +type StoragePoolDestroyArgs struct { + Pool StoragePool +} + +// StoragePoolDeleteArgs is libvirt's remote_storage_pool_delete_args +type StoragePoolDeleteArgs struct { + Pool StoragePool + Flags StoragePoolDeleteFlags +} + +// StoragePoolRefreshArgs is libvirt's remote_storage_pool_refresh_args +type StoragePoolRefreshArgs struct { + Pool StoragePool + Flags uint32 +} + +// StoragePoolGetXMLDescArgs is libvirt's remote_storage_pool_get_xml_desc_args +type StoragePoolGetXMLDescArgs struct { + Pool StoragePool + Flags StorageXMLFlags +} + +// StoragePoolGetXMLDescRet is libvirt's remote_storage_pool_get_xml_desc_ret +type StoragePoolGetXMLDescRet struct { + XML string +} + +// StoragePoolGetInfoArgs is libvirt's remote_storage_pool_get_info_args +type StoragePoolGetInfoArgs struct { + Pool StoragePool +} + +// StoragePoolGetInfoRet is libvirt's remote_storage_pool_get_info_ret +type StoragePoolGetInfoRet struct { + State uint8 + Capacity uint64 + Allocation uint64 + Available uint64 +} + +// StoragePoolGetAutostartArgs is libvirt's remote_storage_pool_get_autostart_args +type StoragePoolGetAutostartArgs struct { + Pool StoragePool +} + +// StoragePoolGetAutostartRet is libvirt's remote_storage_pool_get_autostart_ret +type StoragePoolGetAutostartRet struct { + Autostart int32 +} + +// StoragePoolSetAutostartArgs is libvirt's remote_storage_pool_set_autostart_args +type StoragePoolSetAutostartArgs struct { + Pool StoragePool + Autostart int32 +} + +// StoragePoolNumOfVolumesArgs is libvirt's remote_storage_pool_num_of_volumes_args +type StoragePoolNumOfVolumesArgs struct { + Pool StoragePool +} + +// StoragePoolNumOfVolumesRet is libvirt's remote_storage_pool_num_of_volumes_ret +type StoragePoolNumOfVolumesRet struct { + Num int32 +} + +// StoragePoolListVolumesArgs is libvirt's remote_storage_pool_list_volumes_args +type StoragePoolListVolumesArgs struct { + Pool StoragePool + Maxnames int32 +} + +// StoragePoolListVolumesRet is libvirt's remote_storage_pool_list_volumes_ret +type StoragePoolListVolumesRet struct { + Names []string +} + +// StorageVolLookupByNameArgs is libvirt's remote_storage_vol_lookup_by_name_args +type StorageVolLookupByNameArgs struct { + Pool StoragePool + Name string +} + +// StorageVolLookupByNameRet is libvirt's remote_storage_vol_lookup_by_name_ret +type StorageVolLookupByNameRet struct { + Vol StorageVol +} + +// StorageVolLookupByKeyArgs is libvirt's remote_storage_vol_lookup_by_key_args +type StorageVolLookupByKeyArgs struct { + Key string +} + +// StorageVolLookupByKeyRet is libvirt's remote_storage_vol_lookup_by_key_ret +type StorageVolLookupByKeyRet struct { + Vol StorageVol +} + +// StorageVolLookupByPathArgs is libvirt's remote_storage_vol_lookup_by_path_args +type StorageVolLookupByPathArgs struct { + Path string +} + +// StorageVolLookupByPathRet is libvirt's remote_storage_vol_lookup_by_path_ret +type StorageVolLookupByPathRet struct { + Vol StorageVol +} + +// StorageVolCreateXMLArgs is libvirt's remote_storage_vol_create_xml_args +type StorageVolCreateXMLArgs struct { + Pool StoragePool + XML string + Flags StorageVolCreateFlags +} + +// StorageVolCreateXMLRet is libvirt's remote_storage_vol_create_xml_ret +type StorageVolCreateXMLRet struct { + Vol StorageVol +} + +// StorageVolCreateXMLFromArgs is libvirt's remote_storage_vol_create_xml_from_args +type StorageVolCreateXMLFromArgs struct { + Pool StoragePool + XML string + Clonevol StorageVol + Flags StorageVolCreateFlags +} + +// StorageVolCreateXMLFromRet is libvirt's remote_storage_vol_create_xml_from_ret +type StorageVolCreateXMLFromRet struct { + Vol StorageVol +} + +// StorageVolDeleteArgs is libvirt's remote_storage_vol_delete_args +type StorageVolDeleteArgs struct { + Vol StorageVol + Flags StorageVolDeleteFlags +} + +// StorageVolWipeArgs is libvirt's remote_storage_vol_wipe_args +type StorageVolWipeArgs struct { + Vol StorageVol + Flags uint32 +} + +// StorageVolWipePatternArgs is libvirt's remote_storage_vol_wipe_pattern_args +type StorageVolWipePatternArgs struct { + Vol StorageVol + Algorithm uint32 + Flags uint32 +} + +// StorageVolGetXMLDescArgs is libvirt's remote_storage_vol_get_xml_desc_args +type StorageVolGetXMLDescArgs struct { + Vol StorageVol + Flags uint32 +} + +// StorageVolGetXMLDescRet is libvirt's remote_storage_vol_get_xml_desc_ret +type StorageVolGetXMLDescRet struct { + XML string +} + +// StorageVolGetInfoArgs is libvirt's remote_storage_vol_get_info_args +type StorageVolGetInfoArgs struct { + Vol StorageVol +} + +// StorageVolGetInfoRet is libvirt's remote_storage_vol_get_info_ret +type StorageVolGetInfoRet struct { + Type int8 + Capacity uint64 + Allocation uint64 +} + +// StorageVolGetInfoFlagsArgs is libvirt's remote_storage_vol_get_info_flags_args +type StorageVolGetInfoFlagsArgs struct { + Vol StorageVol + Flags uint32 +} + +// StorageVolGetInfoFlagsRet is libvirt's remote_storage_vol_get_info_flags_ret +type StorageVolGetInfoFlagsRet struct { + Type int8 + Capacity uint64 + Allocation uint64 +} + +// StorageVolGetPathArgs is libvirt's remote_storage_vol_get_path_args +type StorageVolGetPathArgs struct { + Vol StorageVol +} + +// StorageVolGetPathRet is libvirt's remote_storage_vol_get_path_ret +type StorageVolGetPathRet struct { + Name string +} + +// StorageVolResizeArgs is libvirt's remote_storage_vol_resize_args +type StorageVolResizeArgs struct { + Vol StorageVol + Capacity uint64 + Flags StorageVolResizeFlags +} + +// NodeNumOfDevicesArgs is libvirt's remote_node_num_of_devices_args +type NodeNumOfDevicesArgs struct { + Cap OptString + Flags uint32 +} + +// NodeNumOfDevicesRet is libvirt's remote_node_num_of_devices_ret +type NodeNumOfDevicesRet struct { + Num int32 +} + +// NodeListDevicesArgs is libvirt's remote_node_list_devices_args +type NodeListDevicesArgs struct { + Cap OptString + Maxnames int32 + Flags uint32 +} + +// NodeListDevicesRet is libvirt's remote_node_list_devices_ret +type NodeListDevicesRet struct { + Names []string +} + +// NodeDeviceLookupByNameArgs is libvirt's remote_node_device_lookup_by_name_args +type NodeDeviceLookupByNameArgs struct { + Name string +} + +// NodeDeviceLookupByNameRet is libvirt's remote_node_device_lookup_by_name_ret +type NodeDeviceLookupByNameRet struct { + Dev NodeDevice +} + +// NodeDeviceLookupScsiHostByWwnArgs is libvirt's remote_node_device_lookup_scsi_host_by_wwn_args +type NodeDeviceLookupScsiHostByWwnArgs struct { + Wwnn string + Wwpn string + Flags uint32 +} + +// NodeDeviceLookupScsiHostByWwnRet is libvirt's remote_node_device_lookup_scsi_host_by_wwn_ret +type NodeDeviceLookupScsiHostByWwnRet struct { + Dev NodeDevice +} + +// NodeDeviceGetXMLDescArgs is libvirt's remote_node_device_get_xml_desc_args +type NodeDeviceGetXMLDescArgs struct { + Name string + Flags uint32 +} + +// NodeDeviceGetXMLDescRet is libvirt's remote_node_device_get_xml_desc_ret +type NodeDeviceGetXMLDescRet struct { + XML string +} + +// NodeDeviceGetParentArgs is libvirt's remote_node_device_get_parent_args +type NodeDeviceGetParentArgs struct { + Name string +} + +// NodeDeviceGetParentRet is libvirt's remote_node_device_get_parent_ret +type NodeDeviceGetParentRet struct { + Parent OptString +} + +// NodeDeviceNumOfCapsArgs is libvirt's remote_node_device_num_of_caps_args +type NodeDeviceNumOfCapsArgs struct { + Name string +} + +// NodeDeviceNumOfCapsRet is libvirt's remote_node_device_num_of_caps_ret +type NodeDeviceNumOfCapsRet struct { + Num int32 +} + +// NodeDeviceListCapsArgs is libvirt's remote_node_device_list_caps_args +type NodeDeviceListCapsArgs struct { + Name string + Maxnames int32 +} + +// NodeDeviceListCapsRet is libvirt's remote_node_device_list_caps_ret +type NodeDeviceListCapsRet struct { + Names []string +} + +// NodeDeviceDettachArgs is libvirt's remote_node_device_dettach_args +type NodeDeviceDettachArgs struct { + Name string +} + +// NodeDeviceDetachFlagsArgs is libvirt's remote_node_device_detach_flags_args +type NodeDeviceDetachFlagsArgs struct { + Name string + DriverName OptString + Flags uint32 +} + +// NodeDeviceReAttachArgs is libvirt's remote_node_device_re_attach_args +type NodeDeviceReAttachArgs struct { + Name string +} + +// NodeDeviceResetArgs is libvirt's remote_node_device_reset_args +type NodeDeviceResetArgs struct { + Name string +} + +// NodeDeviceCreateXMLArgs is libvirt's remote_node_device_create_xml_args +type NodeDeviceCreateXMLArgs struct { + XMLDesc string + Flags uint32 +} + +// NodeDeviceCreateXMLRet is libvirt's remote_node_device_create_xml_ret +type NodeDeviceCreateXMLRet struct { + Dev NodeDevice +} + +// NodeDeviceDestroyArgs is libvirt's remote_node_device_destroy_args +type NodeDeviceDestroyArgs struct { + Name string +} + +// ConnectDomainEventRegisterRet is libvirt's remote_connect_domain_event_register_ret +type ConnectDomainEventRegisterRet struct { + CbRegistered int32 +} + +// ConnectDomainEventDeregisterRet is libvirt's remote_connect_domain_event_deregister_ret +type ConnectDomainEventDeregisterRet struct { + CbRegistered int32 +} + +// DomainEventLifecycleMsg is libvirt's remote_domain_event_lifecycle_msg +type DomainEventLifecycleMsg struct { + Dom Domain + Event int32 + Detail int32 +} + +// DomainEventCallbackLifecycleMsg is libvirt's remote_domain_event_callback_lifecycle_msg +type DomainEventCallbackLifecycleMsg struct { + CallbackID int32 + Msg DomainEventLifecycleMsg +} + +// ConnectDomainXMLFromNativeArgs is libvirt's remote_connect_domain_xml_from_native_args +type ConnectDomainXMLFromNativeArgs struct { + NativeFormat string + NativeConfig string + Flags uint32 +} + +// ConnectDomainXMLFromNativeRet is libvirt's remote_connect_domain_xml_from_native_ret +type ConnectDomainXMLFromNativeRet struct { + DomainXML string +} + +// ConnectDomainXMLToNativeArgs is libvirt's remote_connect_domain_xml_to_native_args +type ConnectDomainXMLToNativeArgs struct { + NativeFormat string + DomainXML string + Flags uint32 +} + +// ConnectDomainXMLToNativeRet is libvirt's remote_connect_domain_xml_to_native_ret +type ConnectDomainXMLToNativeRet struct { + NativeConfig string +} + +// ConnectNumOfSecretsRet is libvirt's remote_connect_num_of_secrets_ret +type ConnectNumOfSecretsRet struct { + Num int32 +} + +// ConnectListSecretsArgs is libvirt's remote_connect_list_secrets_args +type ConnectListSecretsArgs struct { + Maxuuids int32 +} + +// ConnectListSecretsRet is libvirt's remote_connect_list_secrets_ret +type ConnectListSecretsRet struct { + Uuids []string +} + +// SecretLookupByUUIDArgs is libvirt's remote_secret_lookup_by_uuid_args +type SecretLookupByUUIDArgs struct { + UUID UUID +} + +// SecretLookupByUUIDRet is libvirt's remote_secret_lookup_by_uuid_ret +type SecretLookupByUUIDRet struct { + OptSecret Secret +} + +// SecretDefineXMLArgs is libvirt's remote_secret_define_xml_args +type SecretDefineXMLArgs struct { + XML string + Flags uint32 +} + +// SecretDefineXMLRet is libvirt's remote_secret_define_xml_ret +type SecretDefineXMLRet struct { + OptSecret Secret +} + +// SecretGetXMLDescArgs is libvirt's remote_secret_get_xml_desc_args +type SecretGetXMLDescArgs struct { + OptSecret Secret + Flags uint32 +} + +// SecretGetXMLDescRet is libvirt's remote_secret_get_xml_desc_ret +type SecretGetXMLDescRet struct { + XML string +} + +// SecretSetValueArgs is libvirt's remote_secret_set_value_args +type SecretSetValueArgs struct { + OptSecret Secret + Value []byte + Flags uint32 +} + +// SecretGetValueArgs is libvirt's remote_secret_get_value_args +type SecretGetValueArgs struct { + OptSecret Secret + Flags uint32 +} + +// SecretGetValueRet is libvirt's remote_secret_get_value_ret +type SecretGetValueRet struct { + Value []byte +} + +// SecretUndefineArgs is libvirt's remote_secret_undefine_args +type SecretUndefineArgs struct { + OptSecret Secret +} + +// SecretLookupByUsageArgs is libvirt's remote_secret_lookup_by_usage_args +type SecretLookupByUsageArgs struct { + UsageType int32 + UsageID string +} + +// SecretLookupByUsageRet is libvirt's remote_secret_lookup_by_usage_ret +type SecretLookupByUsageRet struct { + OptSecret Secret +} + +// DomainMigratePrepareTunnelArgs is libvirt's remote_domain_migrate_prepare_tunnel_args +type DomainMigratePrepareTunnelArgs struct { + Flags uint64 + Dname OptString + Resource uint64 + DomXML string +} + +// ConnectIsSecureRet is libvirt's remote_connect_is_secure_ret +type ConnectIsSecureRet struct { + Secure int32 +} + +// DomainIsActiveArgs is libvirt's remote_domain_is_active_args +type DomainIsActiveArgs struct { + Dom Domain +} + +// DomainIsActiveRet is libvirt's remote_domain_is_active_ret +type DomainIsActiveRet struct { + Active int32 +} + +// DomainIsPersistentArgs is libvirt's remote_domain_is_persistent_args +type DomainIsPersistentArgs struct { + Dom Domain +} + +// DomainIsPersistentRet is libvirt's remote_domain_is_persistent_ret +type DomainIsPersistentRet struct { + Persistent int32 +} + +// DomainIsUpdatedArgs is libvirt's remote_domain_is_updated_args +type DomainIsUpdatedArgs struct { + Dom Domain +} + +// DomainIsUpdatedRet is libvirt's remote_domain_is_updated_ret +type DomainIsUpdatedRet struct { + Updated int32 +} + +// NetworkIsActiveArgs is libvirt's remote_network_is_active_args +type NetworkIsActiveArgs struct { + Net Network +} + +// NetworkIsActiveRet is libvirt's remote_network_is_active_ret +type NetworkIsActiveRet struct { + Active int32 +} + +// NetworkIsPersistentArgs is libvirt's remote_network_is_persistent_args +type NetworkIsPersistentArgs struct { + Net Network +} + +// NetworkIsPersistentRet is libvirt's remote_network_is_persistent_ret +type NetworkIsPersistentRet struct { + Persistent int32 +} + +// StoragePoolIsActiveArgs is libvirt's remote_storage_pool_is_active_args +type StoragePoolIsActiveArgs struct { + Pool StoragePool +} + +// StoragePoolIsActiveRet is libvirt's remote_storage_pool_is_active_ret +type StoragePoolIsActiveRet struct { + Active int32 +} + +// StoragePoolIsPersistentArgs is libvirt's remote_storage_pool_is_persistent_args +type StoragePoolIsPersistentArgs struct { + Pool StoragePool +} + +// StoragePoolIsPersistentRet is libvirt's remote_storage_pool_is_persistent_ret +type StoragePoolIsPersistentRet struct { + Persistent int32 +} + +// InterfaceIsActiveArgs is libvirt's remote_interface_is_active_args +type InterfaceIsActiveArgs struct { + Iface Interface +} + +// InterfaceIsActiveRet is libvirt's remote_interface_is_active_ret +type InterfaceIsActiveRet struct { + Active int32 +} + +// ConnectCompareCPUArgs is libvirt's remote_connect_compare_cpu_args +type ConnectCompareCPUArgs struct { + XML string + Flags ConnectCompareCPUFlags +} + +// ConnectCompareCPURet is libvirt's remote_connect_compare_cpu_ret +type ConnectCompareCPURet struct { + Result int32 +} + +// ConnectBaselineCPUArgs is libvirt's remote_connect_baseline_cpu_args +type ConnectBaselineCPUArgs struct { + XMLCPUs []string + Flags ConnectBaselineCPUFlags +} + +// ConnectBaselineCPURet is libvirt's remote_connect_baseline_cpu_ret +type ConnectBaselineCPURet struct { + CPU string +} + +// DomainGetJobInfoArgs is libvirt's remote_domain_get_job_info_args +type DomainGetJobInfoArgs struct { + Dom Domain +} + +// DomainGetJobInfoRet is libvirt's remote_domain_get_job_info_ret +type DomainGetJobInfoRet struct { + Type int32 + TimeElapsed uint64 + TimeRemaining uint64 + DataTotal uint64 + DataProcessed uint64 + DataRemaining uint64 + MemTotal uint64 + MemProcessed uint64 + MemRemaining uint64 + FileTotal uint64 + FileProcessed uint64 + FileRemaining uint64 +} + +// DomainGetJobStatsArgs is libvirt's remote_domain_get_job_stats_args +type DomainGetJobStatsArgs struct { + Dom Domain + Flags DomainGetJobStatsFlags +} + +// DomainGetJobStatsRet is libvirt's remote_domain_get_job_stats_ret +type DomainGetJobStatsRet struct { + Type int32 + Params []TypedParam +} + +// DomainAbortJobArgs is libvirt's remote_domain_abort_job_args +type DomainAbortJobArgs struct { + Dom Domain +} + +// DomainMigrateGetMaxDowntimeArgs is libvirt's remote_domain_migrate_get_max_downtime_args +type DomainMigrateGetMaxDowntimeArgs struct { + Dom Domain + Flags uint32 +} + +// DomainMigrateGetMaxDowntimeRet is libvirt's remote_domain_migrate_get_max_downtime_ret +type DomainMigrateGetMaxDowntimeRet struct { + Downtime uint64 +} + +// DomainMigrateSetMaxDowntimeArgs is libvirt's remote_domain_migrate_set_max_downtime_args +type DomainMigrateSetMaxDowntimeArgs struct { + Dom Domain + Downtime uint64 + Flags uint32 +} + +// DomainMigrateGetCompressionCacheArgs is libvirt's remote_domain_migrate_get_compression_cache_args +type DomainMigrateGetCompressionCacheArgs struct { + Dom Domain + Flags uint32 +} + +// DomainMigrateGetCompressionCacheRet is libvirt's remote_domain_migrate_get_compression_cache_ret +type DomainMigrateGetCompressionCacheRet struct { + CacheSize uint64 +} + +// DomainMigrateSetCompressionCacheArgs is libvirt's remote_domain_migrate_set_compression_cache_args +type DomainMigrateSetCompressionCacheArgs struct { + Dom Domain + CacheSize uint64 + Flags uint32 +} + +// DomainMigrateSetMaxSpeedArgs is libvirt's remote_domain_migrate_set_max_speed_args +type DomainMigrateSetMaxSpeedArgs struct { + Dom Domain + Bandwidth uint64 + Flags uint32 +} + +// DomainMigrateGetMaxSpeedArgs is libvirt's remote_domain_migrate_get_max_speed_args +type DomainMigrateGetMaxSpeedArgs struct { + Dom Domain + Flags uint32 +} + +// DomainMigrateGetMaxSpeedRet is libvirt's remote_domain_migrate_get_max_speed_ret +type DomainMigrateGetMaxSpeedRet struct { + Bandwidth uint64 +} + +// ConnectDomainEventRegisterAnyArgs is libvirt's remote_connect_domain_event_register_any_args +type ConnectDomainEventRegisterAnyArgs struct { + EventID int32 +} + +// ConnectDomainEventDeregisterAnyArgs is libvirt's remote_connect_domain_event_deregister_any_args +type ConnectDomainEventDeregisterAnyArgs struct { + EventID int32 +} + +// ConnectDomainEventCallbackRegisterAnyArgs is libvirt's remote_connect_domain_event_callback_register_any_args +type ConnectDomainEventCallbackRegisterAnyArgs struct { + EventID int32 + Dom OptDomain +} + +// ConnectDomainEventCallbackRegisterAnyRet is libvirt's remote_connect_domain_event_callback_register_any_ret +type ConnectDomainEventCallbackRegisterAnyRet struct { + CallbackID int32 +} + +// ConnectDomainEventCallbackDeregisterAnyArgs is libvirt's remote_connect_domain_event_callback_deregister_any_args +type ConnectDomainEventCallbackDeregisterAnyArgs struct { + CallbackID int32 +} + +// DomainEventRebootMsg is libvirt's remote_domain_event_reboot_msg +type DomainEventRebootMsg struct { + Dom Domain +} + +// DomainEventCallbackRebootMsg is libvirt's remote_domain_event_callback_reboot_msg +type DomainEventCallbackRebootMsg struct { + CallbackID int32 + Msg DomainEventRebootMsg +} + +// DomainEventRtcChangeMsg is libvirt's remote_domain_event_rtc_change_msg +type DomainEventRtcChangeMsg struct { + Dom Domain + Offset int64 +} + +// DomainEventCallbackRtcChangeMsg is libvirt's remote_domain_event_callback_rtc_change_msg +type DomainEventCallbackRtcChangeMsg struct { + CallbackID int32 + Msg DomainEventRtcChangeMsg +} + +// DomainEventWatchdogMsg is libvirt's remote_domain_event_watchdog_msg +type DomainEventWatchdogMsg struct { + Dom Domain + Action int32 +} + +// DomainEventCallbackWatchdogMsg is libvirt's remote_domain_event_callback_watchdog_msg +type DomainEventCallbackWatchdogMsg struct { + CallbackID int32 + Msg DomainEventWatchdogMsg +} + +// DomainEventIOErrorMsg is libvirt's remote_domain_event_io_error_msg +type DomainEventIOErrorMsg struct { + Dom Domain + SrcPath string + DevAlias string + Action int32 +} + +// DomainEventCallbackIOErrorMsg is libvirt's remote_domain_event_callback_io_error_msg +type DomainEventCallbackIOErrorMsg struct { + CallbackID int32 + Msg DomainEventIOErrorMsg +} + +// DomainEventIOErrorReasonMsg is libvirt's remote_domain_event_io_error_reason_msg +type DomainEventIOErrorReasonMsg struct { + Dom Domain + SrcPath string + DevAlias string + Action int32 + Reason string +} + +// DomainEventCallbackIOErrorReasonMsg is libvirt's remote_domain_event_callback_io_error_reason_msg +type DomainEventCallbackIOErrorReasonMsg struct { + CallbackID int32 + Msg DomainEventIOErrorReasonMsg +} + +// DomainEventGraphicsAddress is libvirt's remote_domain_event_graphics_address +type DomainEventGraphicsAddress struct { + Family int32 + Node string + Service string +} + +// DomainEventGraphicsIdentity is libvirt's remote_domain_event_graphics_identity +type DomainEventGraphicsIdentity struct { + Type string + Name string +} + +// DomainEventGraphicsMsg is libvirt's remote_domain_event_graphics_msg +type DomainEventGraphicsMsg struct { + Dom Domain + Phase int32 + Local DomainEventGraphicsAddress + Remote DomainEventGraphicsAddress + AuthScheme string + Subject []DomainEventGraphicsIdentity +} + +// DomainEventCallbackGraphicsMsg is libvirt's remote_domain_event_callback_graphics_msg +type DomainEventCallbackGraphicsMsg struct { + CallbackID int32 + Msg DomainEventGraphicsMsg +} + +// DomainEventBlockJobMsg is libvirt's remote_domain_event_block_job_msg +type DomainEventBlockJobMsg struct { + Dom Domain + Path string + Type int32 + Status int32 +} + +// DomainEventCallbackBlockJobMsg is libvirt's remote_domain_event_callback_block_job_msg +type DomainEventCallbackBlockJobMsg struct { + CallbackID int32 + Msg DomainEventBlockJobMsg +} + +// DomainEventDiskChangeMsg is libvirt's remote_domain_event_disk_change_msg +type DomainEventDiskChangeMsg struct { + Dom Domain + OldSrcPath OptString + NewSrcPath OptString + DevAlias string + Reason int32 +} + +// DomainEventCallbackDiskChangeMsg is libvirt's remote_domain_event_callback_disk_change_msg +type DomainEventCallbackDiskChangeMsg struct { + CallbackID int32 + Msg DomainEventDiskChangeMsg +} + +// DomainEventTrayChangeMsg is libvirt's remote_domain_event_tray_change_msg +type DomainEventTrayChangeMsg struct { + Dom Domain + DevAlias string + Reason int32 +} + +// DomainEventCallbackTrayChangeMsg is libvirt's remote_domain_event_callback_tray_change_msg +type DomainEventCallbackTrayChangeMsg struct { + CallbackID int32 + Msg DomainEventTrayChangeMsg +} + +// DomainEventPmwakeupMsg is libvirt's remote_domain_event_pmwakeup_msg +type DomainEventPmwakeupMsg struct { + Dom Domain +} + +// DomainEventCallbackPmwakeupMsg is libvirt's remote_domain_event_callback_pmwakeup_msg +type DomainEventCallbackPmwakeupMsg struct { + CallbackID int32 + Reason int32 + Msg DomainEventPmwakeupMsg +} + +// DomainEventPmsuspendMsg is libvirt's remote_domain_event_pmsuspend_msg +type DomainEventPmsuspendMsg struct { + Dom Domain +} + +// DomainEventCallbackPmsuspendMsg is libvirt's remote_domain_event_callback_pmsuspend_msg +type DomainEventCallbackPmsuspendMsg struct { + CallbackID int32 + Reason int32 + Msg DomainEventPmsuspendMsg +} + +// DomainEventBalloonChangeMsg is libvirt's remote_domain_event_balloon_change_msg +type DomainEventBalloonChangeMsg struct { + Dom Domain + Actual uint64 +} + +// DomainEventCallbackBalloonChangeMsg is libvirt's remote_domain_event_callback_balloon_change_msg +type DomainEventCallbackBalloonChangeMsg struct { + CallbackID int32 + Msg DomainEventBalloonChangeMsg +} + +// DomainEventPmsuspendDiskMsg is libvirt's remote_domain_event_pmsuspend_disk_msg +type DomainEventPmsuspendDiskMsg struct { + Dom Domain +} + +// DomainEventCallbackPmsuspendDiskMsg is libvirt's remote_domain_event_callback_pmsuspend_disk_msg +type DomainEventCallbackPmsuspendDiskMsg struct { + CallbackID int32 + Reason int32 + Msg DomainEventPmsuspendDiskMsg +} + +// DomainManagedSaveArgs is libvirt's remote_domain_managed_save_args +type DomainManagedSaveArgs struct { + Dom Domain + Flags uint32 +} + +// DomainHasManagedSaveImageArgs is libvirt's remote_domain_has_managed_save_image_args +type DomainHasManagedSaveImageArgs struct { + Dom Domain + Flags uint32 +} + +// DomainHasManagedSaveImageRet is libvirt's remote_domain_has_managed_save_image_ret +type DomainHasManagedSaveImageRet struct { + Result int32 +} + +// DomainManagedSaveRemoveArgs is libvirt's remote_domain_managed_save_remove_args +type DomainManagedSaveRemoveArgs struct { + Dom Domain + Flags uint32 +} + +// DomainManagedSaveGetXMLDescArgs is libvirt's remote_domain_managed_save_get_xml_desc_args +type DomainManagedSaveGetXMLDescArgs struct { + Dom Domain + Flags DomainXMLFlags +} + +// DomainManagedSaveGetXMLDescRet is libvirt's remote_domain_managed_save_get_xml_desc_ret +type DomainManagedSaveGetXMLDescRet struct { + XML string +} + +// DomainManagedSaveDefineXMLArgs is libvirt's remote_domain_managed_save_define_xml_args +type DomainManagedSaveDefineXMLArgs struct { + Dom Domain + Dxml OptString + Flags DomainSaveRestoreFlags +} + +// DomainSnapshotCreateXMLArgs is libvirt's remote_domain_snapshot_create_xml_args +type DomainSnapshotCreateXMLArgs struct { + Dom Domain + XMLDesc string + Flags uint32 +} + +// DomainSnapshotCreateXMLRet is libvirt's remote_domain_snapshot_create_xml_ret +type DomainSnapshotCreateXMLRet struct { + Snap DomainSnapshot +} + +// DomainSnapshotGetXMLDescArgs is libvirt's remote_domain_snapshot_get_xml_desc_args +type DomainSnapshotGetXMLDescArgs struct { + Snap DomainSnapshot + Flags uint32 +} + +// DomainSnapshotGetXMLDescRet is libvirt's remote_domain_snapshot_get_xml_desc_ret +type DomainSnapshotGetXMLDescRet struct { + XML string +} + +// DomainSnapshotNumArgs is libvirt's remote_domain_snapshot_num_args +type DomainSnapshotNumArgs struct { + Dom Domain + Flags uint32 +} + +// DomainSnapshotNumRet is libvirt's remote_domain_snapshot_num_ret +type DomainSnapshotNumRet struct { + Num int32 +} + +// DomainSnapshotListNamesArgs is libvirt's remote_domain_snapshot_list_names_args +type DomainSnapshotListNamesArgs struct { + Dom Domain + Maxnames int32 + Flags uint32 +} + +// DomainSnapshotListNamesRet is libvirt's remote_domain_snapshot_list_names_ret +type DomainSnapshotListNamesRet struct { + Names []string +} + +// DomainListAllSnapshotsArgs is libvirt's remote_domain_list_all_snapshots_args +type DomainListAllSnapshotsArgs struct { + Dom Domain + NeedResults int32 + Flags uint32 +} + +// DomainListAllSnapshotsRet is libvirt's remote_domain_list_all_snapshots_ret +type DomainListAllSnapshotsRet struct { + Snapshots []DomainSnapshot + Ret int32 +} + +// DomainSnapshotNumChildrenArgs is libvirt's remote_domain_snapshot_num_children_args +type DomainSnapshotNumChildrenArgs struct { + Snap DomainSnapshot + Flags uint32 +} + +// DomainSnapshotNumChildrenRet is libvirt's remote_domain_snapshot_num_children_ret +type DomainSnapshotNumChildrenRet struct { + Num int32 +} + +// DomainSnapshotListChildrenNamesArgs is libvirt's remote_domain_snapshot_list_children_names_args +type DomainSnapshotListChildrenNamesArgs struct { + Snap DomainSnapshot + Maxnames int32 + Flags uint32 +} + +// DomainSnapshotListChildrenNamesRet is libvirt's remote_domain_snapshot_list_children_names_ret +type DomainSnapshotListChildrenNamesRet struct { + Names []string +} + +// DomainSnapshotListAllChildrenArgs is libvirt's remote_domain_snapshot_list_all_children_args +type DomainSnapshotListAllChildrenArgs struct { + Snapshot DomainSnapshot + NeedResults int32 + Flags uint32 +} + +// DomainSnapshotListAllChildrenRet is libvirt's remote_domain_snapshot_list_all_children_ret +type DomainSnapshotListAllChildrenRet struct { + Snapshots []DomainSnapshot + Ret int32 +} + +// DomainSnapshotLookupByNameArgs is libvirt's remote_domain_snapshot_lookup_by_name_args +type DomainSnapshotLookupByNameArgs struct { + Dom Domain + Name string + Flags uint32 +} + +// DomainSnapshotLookupByNameRet is libvirt's remote_domain_snapshot_lookup_by_name_ret +type DomainSnapshotLookupByNameRet struct { + Snap DomainSnapshot +} + +// DomainHasCurrentSnapshotArgs is libvirt's remote_domain_has_current_snapshot_args +type DomainHasCurrentSnapshotArgs struct { + Dom Domain + Flags uint32 +} + +// DomainHasCurrentSnapshotRet is libvirt's remote_domain_has_current_snapshot_ret +type DomainHasCurrentSnapshotRet struct { + Result int32 +} + +// DomainSnapshotGetParentArgs is libvirt's remote_domain_snapshot_get_parent_args +type DomainSnapshotGetParentArgs struct { + Snap DomainSnapshot + Flags uint32 +} + +// DomainSnapshotGetParentRet is libvirt's remote_domain_snapshot_get_parent_ret +type DomainSnapshotGetParentRet struct { + Snap DomainSnapshot +} + +// DomainSnapshotCurrentArgs is libvirt's remote_domain_snapshot_current_args +type DomainSnapshotCurrentArgs struct { + Dom Domain + Flags uint32 +} + +// DomainSnapshotCurrentRet is libvirt's remote_domain_snapshot_current_ret +type DomainSnapshotCurrentRet struct { + Snap DomainSnapshot +} + +// DomainSnapshotIsCurrentArgs is libvirt's remote_domain_snapshot_is_current_args +type DomainSnapshotIsCurrentArgs struct { + Snap DomainSnapshot + Flags uint32 +} + +// DomainSnapshotIsCurrentRet is libvirt's remote_domain_snapshot_is_current_ret +type DomainSnapshotIsCurrentRet struct { + Current int32 +} + +// DomainSnapshotHasMetadataArgs is libvirt's remote_domain_snapshot_has_metadata_args +type DomainSnapshotHasMetadataArgs struct { + Snap DomainSnapshot + Flags uint32 +} + +// DomainSnapshotHasMetadataRet is libvirt's remote_domain_snapshot_has_metadata_ret +type DomainSnapshotHasMetadataRet struct { + Metadata int32 +} + +// DomainRevertToSnapshotArgs is libvirt's remote_domain_revert_to_snapshot_args +type DomainRevertToSnapshotArgs struct { + Snap DomainSnapshot + Flags uint32 +} + +// DomainSnapshotDeleteArgs is libvirt's remote_domain_snapshot_delete_args +type DomainSnapshotDeleteArgs struct { + Snap DomainSnapshot + Flags DomainSnapshotDeleteFlags +} + +// DomainOpenConsoleArgs is libvirt's remote_domain_open_console_args +type DomainOpenConsoleArgs struct { + Dom Domain + DevName OptString + Flags uint32 +} + +// DomainOpenChannelArgs is libvirt's remote_domain_open_channel_args +type DomainOpenChannelArgs struct { + Dom Domain + Name OptString + Flags DomainChannelFlags +} + +// StorageVolUploadArgs is libvirt's remote_storage_vol_upload_args +type StorageVolUploadArgs struct { + Vol StorageVol + Offset uint64 + Length uint64 + Flags StorageVolUploadFlags +} + +// StorageVolDownloadArgs is libvirt's remote_storage_vol_download_args +type StorageVolDownloadArgs struct { + Vol StorageVol + Offset uint64 + Length uint64 + Flags StorageVolDownloadFlags +} + +// DomainGetStateArgs is libvirt's remote_domain_get_state_args +type DomainGetStateArgs struct { + Dom Domain + Flags uint32 +} + +// DomainGetStateRet is libvirt's remote_domain_get_state_ret +type DomainGetStateRet struct { + State int32 + Reason int32 +} + +// DomainMigrateBegin3Args is libvirt's remote_domain_migrate_begin3_args +type DomainMigrateBegin3Args struct { + Dom Domain + Xmlin OptString + Flags uint64 + Dname OptString + Resource uint64 +} + +// DomainMigrateBegin3Ret is libvirt's remote_domain_migrate_begin3_ret +type DomainMigrateBegin3Ret struct { + CookieOut []byte + XML string +} + +// DomainMigratePrepare3Args is libvirt's remote_domain_migrate_prepare3_args +type DomainMigratePrepare3Args struct { + CookieIn []byte + UriIn OptString + Flags uint64 + Dname OptString + Resource uint64 + DomXML string +} + +// DomainMigratePrepare3Ret is libvirt's remote_domain_migrate_prepare3_ret +type DomainMigratePrepare3Ret struct { + CookieOut []byte + UriOut OptString +} + +// DomainMigratePrepareTunnel3Args is libvirt's remote_domain_migrate_prepare_tunnel3_args +type DomainMigratePrepareTunnel3Args struct { + CookieIn []byte + Flags uint64 + Dname OptString + Resource uint64 + DomXML string +} + +// DomainMigratePrepareTunnel3Ret is libvirt's remote_domain_migrate_prepare_tunnel3_ret +type DomainMigratePrepareTunnel3Ret struct { + CookieOut []byte +} + +// DomainMigratePerform3Args is libvirt's remote_domain_migrate_perform3_args +type DomainMigratePerform3Args struct { + Dom Domain + Xmlin OptString + CookieIn []byte + Dconnuri OptString + Uri OptString + Flags uint64 + Dname OptString + Resource uint64 +} + +// DomainMigratePerform3Ret is libvirt's remote_domain_migrate_perform3_ret +type DomainMigratePerform3Ret struct { + CookieOut []byte +} + +// DomainMigrateFinish3Args is libvirt's remote_domain_migrate_finish3_args +type DomainMigrateFinish3Args struct { + Dname string + CookieIn []byte + Dconnuri OptString + Uri OptString + Flags uint64 + Cancelled int32 +} + +// DomainMigrateFinish3Ret is libvirt's remote_domain_migrate_finish3_ret +type DomainMigrateFinish3Ret struct { + Dom Domain + CookieOut []byte +} + +// DomainMigrateConfirm3Args is libvirt's remote_domain_migrate_confirm3_args +type DomainMigrateConfirm3Args struct { + Dom Domain + CookieIn []byte + Flags uint64 + Cancelled int32 +} + +// DomainEventControlErrorMsg is libvirt's remote_domain_event_control_error_msg +type DomainEventControlErrorMsg struct { + Dom Domain +} + +// DomainEventCallbackControlErrorMsg is libvirt's remote_domain_event_callback_control_error_msg +type DomainEventCallbackControlErrorMsg struct { + CallbackID int32 + Msg DomainEventControlErrorMsg +} + +// DomainGetControlInfoArgs is libvirt's remote_domain_get_control_info_args +type DomainGetControlInfoArgs struct { + Dom Domain + Flags uint32 +} + +// DomainGetControlInfoRet is libvirt's remote_domain_get_control_info_ret +type DomainGetControlInfoRet struct { + State uint32 + Details uint32 + StateTime uint64 +} + +// DomainOpenGraphicsArgs is libvirt's remote_domain_open_graphics_args +type DomainOpenGraphicsArgs struct { + Dom Domain + Idx uint32 + Flags DomainOpenGraphicsFlags +} + +// DomainOpenGraphicsFdArgs is libvirt's remote_domain_open_graphics_fd_args +type DomainOpenGraphicsFdArgs struct { + Dom Domain + Idx uint32 + Flags DomainOpenGraphicsFlags +} + +// NodeSuspendForDurationArgs is libvirt's remote_node_suspend_for_duration_args +type NodeSuspendForDurationArgs struct { + Target uint32 + Duration uint64 + Flags uint32 +} + +// DomainShutdownFlagsArgs is libvirt's remote_domain_shutdown_flags_args +type DomainShutdownFlagsArgs struct { + Dom Domain + Flags DomainShutdownFlagValues +} + +// DomainGetDiskErrorsArgs is libvirt's remote_domain_get_disk_errors_args +type DomainGetDiskErrorsArgs struct { + Dom Domain + Maxerrors uint32 + Flags uint32 +} + +// DomainGetDiskErrorsRet is libvirt's remote_domain_get_disk_errors_ret +type DomainGetDiskErrorsRet struct { + Errors []DomainDiskError + Nerrors int32 +} + +// ConnectListAllDomainsArgs is libvirt's remote_connect_list_all_domains_args +type ConnectListAllDomainsArgs struct { + NeedResults int32 + Flags ConnectListAllDomainsFlags +} + +// ConnectListAllDomainsRet is libvirt's remote_connect_list_all_domains_ret +type ConnectListAllDomainsRet struct { + Domains []Domain + Ret uint32 +} + +// ConnectListAllStoragePoolsArgs is libvirt's remote_connect_list_all_storage_pools_args +type ConnectListAllStoragePoolsArgs struct { + NeedResults int32 + Flags ConnectListAllStoragePoolsFlags +} + +// ConnectListAllStoragePoolsRet is libvirt's remote_connect_list_all_storage_pools_ret +type ConnectListAllStoragePoolsRet struct { + Pools []StoragePool + Ret uint32 +} + +// StoragePoolListAllVolumesArgs is libvirt's remote_storage_pool_list_all_volumes_args +type StoragePoolListAllVolumesArgs struct { + Pool StoragePool + NeedResults int32 + Flags uint32 +} + +// StoragePoolListAllVolumesRet is libvirt's remote_storage_pool_list_all_volumes_ret +type StoragePoolListAllVolumesRet struct { + Vols []StorageVol + Ret uint32 +} + +// ConnectListAllNetworksArgs is libvirt's remote_connect_list_all_networks_args +type ConnectListAllNetworksArgs struct { + NeedResults int32 + Flags ConnectListAllNetworksFlags +} + +// ConnectListAllNetworksRet is libvirt's remote_connect_list_all_networks_ret +type ConnectListAllNetworksRet struct { + Nets []Network + Ret uint32 +} + +// ConnectListAllInterfacesArgs is libvirt's remote_connect_list_all_interfaces_args +type ConnectListAllInterfacesArgs struct { + NeedResults int32 + Flags ConnectListAllInterfacesFlags +} + +// ConnectListAllInterfacesRet is libvirt's remote_connect_list_all_interfaces_ret +type ConnectListAllInterfacesRet struct { + Ifaces []Interface + Ret uint32 +} + +// ConnectListAllNodeDevicesArgs is libvirt's remote_connect_list_all_node_devices_args +type ConnectListAllNodeDevicesArgs struct { + NeedResults int32 + Flags uint32 +} + +// ConnectListAllNodeDevicesRet is libvirt's remote_connect_list_all_node_devices_ret +type ConnectListAllNodeDevicesRet struct { + Devices []NodeDevice + Ret uint32 +} + +// ConnectListAllNwfiltersArgs is libvirt's remote_connect_list_all_nwfilters_args +type ConnectListAllNwfiltersArgs struct { + NeedResults int32 + Flags uint32 +} + +// ConnectListAllNwfiltersRet is libvirt's remote_connect_list_all_nwfilters_ret +type ConnectListAllNwfiltersRet struct { + Filters []Nwfilter + Ret uint32 +} + +// ConnectListAllSecretsArgs is libvirt's remote_connect_list_all_secrets_args +type ConnectListAllSecretsArgs struct { + NeedResults int32 + Flags ConnectListAllSecretsFlags +} + +// ConnectListAllSecretsRet is libvirt's remote_connect_list_all_secrets_ret +type ConnectListAllSecretsRet struct { + Secrets []Secret + Ret uint32 +} + +// NodeSetMemoryParametersArgs is libvirt's remote_node_set_memory_parameters_args +type NodeSetMemoryParametersArgs struct { + Params []TypedParam + Flags uint32 +} + +// NodeGetMemoryParametersArgs is libvirt's remote_node_get_memory_parameters_args +type NodeGetMemoryParametersArgs struct { + Nparams int32 + Flags uint32 +} + +// NodeGetMemoryParametersRet is libvirt's remote_node_get_memory_parameters_ret +type NodeGetMemoryParametersRet struct { + Params []TypedParam + Nparams int32 +} + +// NodeGetCPUMapArgs is libvirt's remote_node_get_cpu_map_args +type NodeGetCPUMapArgs struct { + NeedMap int32 + NeedOnline int32 + Flags uint32 +} + +// NodeGetCPUMapRet is libvirt's remote_node_get_cpu_map_ret +type NodeGetCPUMapRet struct { + Cpumap []byte + Online uint32 + Ret int32 +} + +// DomainFstrimArgs is libvirt's remote_domain_fstrim_args +type DomainFstrimArgs struct { + Dom Domain + MountPoint OptString + Minimum uint64 + Flags uint32 +} + +// DomainGetTimeArgs is libvirt's remote_domain_get_time_args +type DomainGetTimeArgs struct { + Dom Domain + Flags uint32 +} + +// DomainGetTimeRet is libvirt's remote_domain_get_time_ret +type DomainGetTimeRet struct { + Seconds int64 + Nseconds uint32 +} + +// DomainSetTimeArgs is libvirt's remote_domain_set_time_args +type DomainSetTimeArgs struct { + Dom Domain + Seconds int64 + Nseconds uint32 + Flags DomainSetTimeFlags +} + +// DomainMigrateBegin3ParamsArgs is libvirt's remote_domain_migrate_begin3_params_args +type DomainMigrateBegin3ParamsArgs struct { + Dom Domain + Params []TypedParam + Flags uint32 +} + +// DomainMigrateBegin3ParamsRet is libvirt's remote_domain_migrate_begin3_params_ret +type DomainMigrateBegin3ParamsRet struct { + CookieOut []byte + XML string +} + +// DomainMigratePrepare3ParamsArgs is libvirt's remote_domain_migrate_prepare3_params_args +type DomainMigratePrepare3ParamsArgs struct { + Params []TypedParam + CookieIn []byte + Flags uint32 +} + +// DomainMigratePrepare3ParamsRet is libvirt's remote_domain_migrate_prepare3_params_ret +type DomainMigratePrepare3ParamsRet struct { + CookieOut []byte + UriOut OptString +} + +// DomainMigratePrepareTunnel3ParamsArgs is libvirt's remote_domain_migrate_prepare_tunnel3_params_args +type DomainMigratePrepareTunnel3ParamsArgs struct { + Params []TypedParam + CookieIn []byte + Flags uint32 +} + +// DomainMigratePrepareTunnel3ParamsRet is libvirt's remote_domain_migrate_prepare_tunnel3_params_ret +type DomainMigratePrepareTunnel3ParamsRet struct { + CookieOut []byte +} + +// DomainMigratePerform3ParamsArgs is libvirt's remote_domain_migrate_perform3_params_args +type DomainMigratePerform3ParamsArgs struct { + Dom Domain + Dconnuri OptString + Params []TypedParam + CookieIn []byte + Flags DomainMigrateFlags +} + +// DomainMigratePerform3ParamsRet is libvirt's remote_domain_migrate_perform3_params_ret +type DomainMigratePerform3ParamsRet struct { + CookieOut []byte +} + +// DomainMigrateFinish3ParamsArgs is libvirt's remote_domain_migrate_finish3_params_args +type DomainMigrateFinish3ParamsArgs struct { + Params []TypedParam + CookieIn []byte + Flags uint32 + Cancelled int32 +} + +// DomainMigrateFinish3ParamsRet is libvirt's remote_domain_migrate_finish3_params_ret +type DomainMigrateFinish3ParamsRet struct { + Dom Domain + CookieOut []byte +} + +// DomainMigrateConfirm3ParamsArgs is libvirt's remote_domain_migrate_confirm3_params_args +type DomainMigrateConfirm3ParamsArgs struct { + Dom Domain + Params []TypedParam + CookieIn []byte + Flags uint32 + Cancelled int32 +} + +// DomainEventDeviceRemovedMsg is libvirt's remote_domain_event_device_removed_msg +type DomainEventDeviceRemovedMsg struct { + Dom Domain + DevAlias string +} + +// DomainEventCallbackDeviceRemovedMsg is libvirt's remote_domain_event_callback_device_removed_msg +type DomainEventCallbackDeviceRemovedMsg struct { + CallbackID int32 + Msg DomainEventDeviceRemovedMsg +} + +// DomainEventBlockJob2Msg is libvirt's remote_domain_event_block_job_2_msg +type DomainEventBlockJob2Msg struct { + CallbackID int32 + Dom Domain + Dst string + Type int32 + Status int32 +} + +// DomainEventBlockThresholdMsg is libvirt's remote_domain_event_block_threshold_msg +type DomainEventBlockThresholdMsg struct { + CallbackID int32 + Dom Domain + Dev string + Path OptString + Threshold uint64 + Excess uint64 +} + +// DomainEventCallbackTunableMsg is libvirt's remote_domain_event_callback_tunable_msg +type DomainEventCallbackTunableMsg struct { + CallbackID int32 + Dom Domain + Params []TypedParam +} + +// DomainEventCallbackDeviceAddedMsg is libvirt's remote_domain_event_callback_device_added_msg +type DomainEventCallbackDeviceAddedMsg struct { + CallbackID int32 + Dom Domain + DevAlias string +} + +// ConnectEventConnectionClosedMsg is libvirt's remote_connect_event_connection_closed_msg +type ConnectEventConnectionClosedMsg struct { + Reason int32 +} + +// ConnectGetCPUModelNamesArgs is libvirt's remote_connect_get_cpu_model_names_args +type ConnectGetCPUModelNamesArgs struct { + Arch string + NeedResults int32 + Flags uint32 +} + +// ConnectGetCPUModelNamesRet is libvirt's remote_connect_get_cpu_model_names_ret +type ConnectGetCPUModelNamesRet struct { + Models []string + Ret int32 +} + +// ConnectNetworkEventRegisterAnyArgs is libvirt's remote_connect_network_event_register_any_args +type ConnectNetworkEventRegisterAnyArgs struct { + EventID int32 + Net OptNetwork +} + +// ConnectNetworkEventRegisterAnyRet is libvirt's remote_connect_network_event_register_any_ret +type ConnectNetworkEventRegisterAnyRet struct { + CallbackID int32 +} + +// ConnectNetworkEventDeregisterAnyArgs is libvirt's remote_connect_network_event_deregister_any_args +type ConnectNetworkEventDeregisterAnyArgs struct { + CallbackID int32 +} + +// NetworkEventLifecycleMsg is libvirt's remote_network_event_lifecycle_msg +type NetworkEventLifecycleMsg struct { + CallbackID int32 + Net Network + Event int32 + Detail int32 +} + +// ConnectStoragePoolEventRegisterAnyArgs is libvirt's remote_connect_storage_pool_event_register_any_args +type ConnectStoragePoolEventRegisterAnyArgs struct { + EventID int32 + Pool OptStoragePool +} + +// ConnectStoragePoolEventRegisterAnyRet is libvirt's remote_connect_storage_pool_event_register_any_ret +type ConnectStoragePoolEventRegisterAnyRet struct { + CallbackID int32 +} + +// ConnectStoragePoolEventDeregisterAnyArgs is libvirt's remote_connect_storage_pool_event_deregister_any_args +type ConnectStoragePoolEventDeregisterAnyArgs struct { + CallbackID int32 +} + +// StoragePoolEventLifecycleMsg is libvirt's remote_storage_pool_event_lifecycle_msg +type StoragePoolEventLifecycleMsg struct { + CallbackID int32 + Pool StoragePool + Event int32 + Detail int32 +} + +// StoragePoolEventRefreshMsg is libvirt's remote_storage_pool_event_refresh_msg +type StoragePoolEventRefreshMsg struct { + CallbackID int32 + Pool StoragePool +} + +// ConnectNodeDeviceEventRegisterAnyArgs is libvirt's remote_connect_node_device_event_register_any_args +type ConnectNodeDeviceEventRegisterAnyArgs struct { + EventID int32 + Dev OptNodeDevice +} + +// ConnectNodeDeviceEventRegisterAnyRet is libvirt's remote_connect_node_device_event_register_any_ret +type ConnectNodeDeviceEventRegisterAnyRet struct { + CallbackID int32 +} + +// ConnectNodeDeviceEventDeregisterAnyArgs is libvirt's remote_connect_node_device_event_deregister_any_args +type ConnectNodeDeviceEventDeregisterAnyArgs struct { + CallbackID int32 +} + +// NodeDeviceEventLifecycleMsg is libvirt's remote_node_device_event_lifecycle_msg +type NodeDeviceEventLifecycleMsg struct { + CallbackID int32 + Dev NodeDevice + Event int32 + Detail int32 +} + +// NodeDeviceEventUpdateMsg is libvirt's remote_node_device_event_update_msg +type NodeDeviceEventUpdateMsg struct { + CallbackID int32 + Dev NodeDevice +} + +// DomainFsfreezeArgs is libvirt's remote_domain_fsfreeze_args +type DomainFsfreezeArgs struct { + Dom Domain + Mountpoints []string + Flags uint32 +} + +// DomainFsfreezeRet is libvirt's remote_domain_fsfreeze_ret +type DomainFsfreezeRet struct { + Filesystems int32 +} + +// DomainFsthawArgs is libvirt's remote_domain_fsthaw_args +type DomainFsthawArgs struct { + Dom Domain + Mountpoints []string + Flags uint32 +} + +// DomainFsthawRet is libvirt's remote_domain_fsthaw_ret +type DomainFsthawRet struct { + Filesystems int32 +} + +// NodeGetFreePagesArgs is libvirt's remote_node_get_free_pages_args +type NodeGetFreePagesArgs struct { + Pages []uint32 + StartCell int32 + CellCount uint32 + Flags uint32 +} + +// NodeGetFreePagesRet is libvirt's remote_node_get_free_pages_ret +type NodeGetFreePagesRet struct { + Counts []uint64 +} + +// NodeAllocPagesArgs is libvirt's remote_node_alloc_pages_args +type NodeAllocPagesArgs struct { + PageSizes []uint32 + PageCounts []uint64 + StartCell int32 + CellCount uint32 + Flags NodeAllocPagesFlags +} + +// NodeAllocPagesRet is libvirt's remote_node_alloc_pages_ret +type NodeAllocPagesRet struct { + Ret int32 +} + +// NetworkDhcpLease is libvirt's remote_network_dhcp_lease +type NetworkDhcpLease struct { + Iface string + Expirytime int64 + Type int32 + Mac OptString + Iaid OptString + Ipaddr string + Prefix uint32 + Hostname OptString + Clientid OptString +} + +// NetworkGetDhcpLeasesArgs is libvirt's remote_network_get_dhcp_leases_args +type NetworkGetDhcpLeasesArgs struct { + Net Network + Mac OptString + NeedResults int32 + Flags uint32 +} + +// NetworkGetDhcpLeasesRet is libvirt's remote_network_get_dhcp_leases_ret +type NetworkGetDhcpLeasesRet struct { + Leases []NetworkDhcpLease + Ret uint32 +} + +// DomainStatsRecord is libvirt's remote_domain_stats_record +type DomainStatsRecord struct { + Dom Domain + Params []TypedParam +} + +// ConnectGetAllDomainStatsArgs is libvirt's remote_connect_get_all_domain_stats_args +type ConnectGetAllDomainStatsArgs struct { + Doms []Domain + Stats uint32 + Flags ConnectGetAllDomainStatsFlags +} + +// DomainEventCallbackAgentLifecycleMsg is libvirt's remote_domain_event_callback_agent_lifecycle_msg +type DomainEventCallbackAgentLifecycleMsg struct { + CallbackID int32 + Dom Domain + State int32 + Reason int32 +} + +// ConnectGetAllDomainStatsRet is libvirt's remote_connect_get_all_domain_stats_ret +type ConnectGetAllDomainStatsRet struct { + RetStats []DomainStatsRecord +} + +// DomainFsinfo is libvirt's remote_domain_fsinfo +type DomainFsinfo struct { + Mountpoint string + Name string + Fstype string + DevAliases []string +} + +// DomainGetFsinfoArgs is libvirt's remote_domain_get_fsinfo_args +type DomainGetFsinfoArgs struct { + Dom Domain + Flags uint32 +} + +// DomainGetFsinfoRet is libvirt's remote_domain_get_fsinfo_ret +type DomainGetFsinfoRet struct { + Info []DomainFsinfo + Ret uint32 +} + +// DomainIPAddr is libvirt's remote_domain_ip_addr +type DomainIPAddr struct { + Type int32 + Addr string + Prefix uint32 +} + +// DomainInterface is libvirt's remote_domain_interface +type DomainInterface struct { + Name string + Hwaddr OptString + Addrs []DomainIPAddr +} + +// DomainInterfaceAddressesArgs is libvirt's remote_domain_interface_addresses_args +type DomainInterfaceAddressesArgs struct { + Dom Domain + Source uint32 + Flags uint32 +} + +// DomainInterfaceAddressesRet is libvirt's remote_domain_interface_addresses_ret +type DomainInterfaceAddressesRet struct { + Ifaces []DomainInterface +} + +// DomainSetUserPasswordArgs is libvirt's remote_domain_set_user_password_args +type DomainSetUserPasswordArgs struct { + Dom Domain + User OptString + Password OptString + Flags DomainSetUserPasswordFlags +} + +// DomainRenameArgs is libvirt's remote_domain_rename_args +type DomainRenameArgs struct { + Dom Domain + NewName OptString + Flags uint32 +} + +// DomainRenameRet is libvirt's remote_domain_rename_ret +type DomainRenameRet struct { + Retcode int32 +} + +// DomainEventCallbackMigrationIterationMsg is libvirt's remote_domain_event_callback_migration_iteration_msg +type DomainEventCallbackMigrationIterationMsg struct { + CallbackID int32 + Dom Domain + Iteration int32 +} + +// DomainEventCallbackJobCompletedMsg is libvirt's remote_domain_event_callback_job_completed_msg +type DomainEventCallbackJobCompletedMsg struct { + CallbackID int32 + Dom Domain + Params []TypedParam +} + +// DomainMigrateStartPostCopyArgs is libvirt's remote_domain_migrate_start_post_copy_args +type DomainMigrateStartPostCopyArgs struct { + Dom Domain + Flags uint32 +} + +// DomainEventCallbackDeviceRemovalFailedMsg is libvirt's remote_domain_event_callback_device_removal_failed_msg +type DomainEventCallbackDeviceRemovalFailedMsg struct { + CallbackID int32 + Dom Domain + DevAlias string +} + +// DomainGetGuestVcpusArgs is libvirt's remote_domain_get_guest_vcpus_args +type DomainGetGuestVcpusArgs struct { + Dom Domain + Flags uint32 +} + +// DomainGetGuestVcpusRet is libvirt's remote_domain_get_guest_vcpus_ret +type DomainGetGuestVcpusRet struct { + Params []TypedParam +} + +// DomainSetGuestVcpusArgs is libvirt's remote_domain_set_guest_vcpus_args +type DomainSetGuestVcpusArgs struct { + Dom Domain + Cpumap string + State int32 + Flags uint32 +} + +// DomainSetVcpuArgs is libvirt's remote_domain_set_vcpu_args +type DomainSetVcpuArgs struct { + Dom Domain + Cpumap string + State int32 + Flags DomainModificationImpact +} + +// DomainEventCallbackMetadataChangeMsg is libvirt's remote_domain_event_callback_metadata_change_msg +type DomainEventCallbackMetadataChangeMsg struct { + CallbackID int32 + Dom Domain + Type int32 + Nsuri OptString +} + +// ConnectSecretEventRegisterAnyArgs is libvirt's remote_connect_secret_event_register_any_args +type ConnectSecretEventRegisterAnyArgs struct { + EventID int32 + OptSecret OptSecret +} + +// ConnectSecretEventRegisterAnyRet is libvirt's remote_connect_secret_event_register_any_ret +type ConnectSecretEventRegisterAnyRet struct { + CallbackID int32 +} + +// ConnectSecretEventDeregisterAnyArgs is libvirt's remote_connect_secret_event_deregister_any_args +type ConnectSecretEventDeregisterAnyArgs struct { + CallbackID int32 +} + +// SecretEventLifecycleMsg is libvirt's remote_secret_event_lifecycle_msg +type SecretEventLifecycleMsg struct { + CallbackID int32 + OptSecret Secret + Event int32 + Detail int32 +} + +// SecretEventValueChangedMsg is libvirt's remote_secret_event_value_changed_msg +type SecretEventValueChangedMsg struct { + CallbackID int32 + OptSecret Secret +} + +// DomainSetBlockThresholdArgs is libvirt's remote_domain_set_block_threshold_args +type DomainSetBlockThresholdArgs struct { + Dom Domain + Dev string + Threshold uint64 + Flags uint32 +} + +// DomainSetLifecycleActionArgs is libvirt's remote_domain_set_lifecycle_action_args +type DomainSetLifecycleActionArgs struct { + Dom Domain + Type uint32 + Action uint32 + Flags DomainModificationImpact +} + + +// TypedParamValue is a discriminated union. +type TypedParamValue interface { + Get() interface{} +} + +// TypedParamValueInt is one of the possible values of the TypedParamValue union. +type TypedParamValueInt struct { + DVal uint32 + I int32 +} +// NewTypedParamValueInt creates a discriminated union value satisfying +// the TypedParamValue interface. +func NewTypedParamValueInt(v int32) *TypedParamValueInt { + return &TypedParamValueInt{DVal: 1, I: v} +} +func decodeTypedParamValueInt(dec *xdr.Decoder) (*TypedParamValueInt, error) { + var v int32 + _, err := dec.Decode(&v) + if err != nil { + return nil, err + } + return NewTypedParamValueInt(v), nil +} +// Get satisfies the TypedParamValue interface. +func (c *TypedParamValueInt) Get() interface{} { return c.I } + +// TypedParamValueUint is one of the possible values of the TypedParamValue union. +type TypedParamValueUint struct { + DVal uint32 + Ui uint32 +} +// NewTypedParamValueUint creates a discriminated union value satisfying +// the TypedParamValue interface. +func NewTypedParamValueUint(v uint32) *TypedParamValueUint { + return &TypedParamValueUint{DVal: 2, Ui: v} +} +func decodeTypedParamValueUint(dec *xdr.Decoder) (*TypedParamValueUint, error) { + var v uint32 + _, err := dec.Decode(&v) + if err != nil { + return nil, err + } + return NewTypedParamValueUint(v), nil +} +// Get satisfies the TypedParamValue interface. +func (c *TypedParamValueUint) Get() interface{} { return c.Ui } + +// TypedParamValueLlong is one of the possible values of the TypedParamValue union. +type TypedParamValueLlong struct { + DVal uint32 + L int64 +} +// NewTypedParamValueLlong creates a discriminated union value satisfying +// the TypedParamValue interface. +func NewTypedParamValueLlong(v int64) *TypedParamValueLlong { + return &TypedParamValueLlong{DVal: 3, L: v} +} +func decodeTypedParamValueLlong(dec *xdr.Decoder) (*TypedParamValueLlong, error) { + var v int64 + _, err := dec.Decode(&v) + if err != nil { + return nil, err + } + return NewTypedParamValueLlong(v), nil +} +// Get satisfies the TypedParamValue interface. +func (c *TypedParamValueLlong) Get() interface{} { return c.L } + +// TypedParamValueUllong is one of the possible values of the TypedParamValue union. +type TypedParamValueUllong struct { + DVal uint32 + Ul uint64 +} +// NewTypedParamValueUllong creates a discriminated union value satisfying +// the TypedParamValue interface. +func NewTypedParamValueUllong(v uint64) *TypedParamValueUllong { + return &TypedParamValueUllong{DVal: 4, Ul: v} +} +func decodeTypedParamValueUllong(dec *xdr.Decoder) (*TypedParamValueUllong, error) { + var v uint64 + _, err := dec.Decode(&v) + if err != nil { + return nil, err + } + return NewTypedParamValueUllong(v), nil +} +// Get satisfies the TypedParamValue interface. +func (c *TypedParamValueUllong) Get() interface{} { return c.Ul } + +// TypedParamValueDouble is one of the possible values of the TypedParamValue union. +type TypedParamValueDouble struct { + DVal uint32 + D float64 +} +// NewTypedParamValueDouble creates a discriminated union value satisfying +// the TypedParamValue interface. +func NewTypedParamValueDouble(v float64) *TypedParamValueDouble { + return &TypedParamValueDouble{DVal: 5, D: v} +} +func decodeTypedParamValueDouble(dec *xdr.Decoder) (*TypedParamValueDouble, error) { + var v float64 + _, err := dec.Decode(&v) + if err != nil { + return nil, err + } + return NewTypedParamValueDouble(v), nil +} +// Get satisfies the TypedParamValue interface. +func (c *TypedParamValueDouble) Get() interface{} { return c.D } + +// TypedParamValueBoolean is one of the possible values of the TypedParamValue union. +type TypedParamValueBoolean struct { + DVal uint32 + B int32 +} +// NewTypedParamValueBoolean creates a discriminated union value satisfying +// the TypedParamValue interface. +func NewTypedParamValueBoolean(v int32) *TypedParamValueBoolean { + return &TypedParamValueBoolean{DVal: 6, B: v} +} +func decodeTypedParamValueBoolean(dec *xdr.Decoder) (*TypedParamValueBoolean, error) { + var v int32 + _, err := dec.Decode(&v) + if err != nil { + return nil, err + } + return NewTypedParamValueBoolean(v), nil +} +// Get satisfies the TypedParamValue interface. +func (c *TypedParamValueBoolean) Get() interface{} { return c.B } + +// TypedParamValueString is one of the possible values of the TypedParamValue union. +type TypedParamValueString struct { + DVal uint32 + S string +} +// NewTypedParamValueString creates a discriminated union value satisfying +// the TypedParamValue interface. +func NewTypedParamValueString(v string) *TypedParamValueString { + return &TypedParamValueString{DVal: 7, S: v} +} +func decodeTypedParamValueString(dec *xdr.Decoder) (*TypedParamValueString, error) { + var v string + _, err := dec.Decode(&v) + if err != nil { + return nil, err + } + return NewTypedParamValueString(v), nil +} +// Get satisfies the TypedParamValue interface. +func (c *TypedParamValueString) Get() interface{} { return c.S } + +func decodeTypedParamValue(dec *xdr.Decoder) (TypedParamValue, error) { + discriminant, _, err := dec.DecodeInt() + if err != nil { + return nil, err + } + var caseval TypedParamValue + switch discriminant { + case 1: + caseval, err = decodeTypedParamValueInt(dec) + case 2: + caseval, err = decodeTypedParamValueUint(dec) + case 3: + caseval, err = decodeTypedParamValueLlong(dec) + case 4: + caseval, err = decodeTypedParamValueUllong(dec) + case 5: + caseval, err = decodeTypedParamValueDouble(dec) + case 6: + caseval, err = decodeTypedParamValueBoolean(dec) + case 7: + caseval, err = decodeTypedParamValueString(dec) + + default: + err = fmt.Errorf("invalid parameter type %v", discriminant) + } + + return caseval, err +} + +// TODO: Generate these. +func decodeTypedParam(dec *xdr.Decoder) (*TypedParam, error) { + name, _, err := dec.DecodeString() + if err != nil { + return nil, err + } + val, err := decodeTypedParamValue(dec) + return &TypedParam{name, val}, nil +} + +func decodeTypedParams(dec *xdr.Decoder) ([]TypedParam, error) { + count, _, err := dec.DecodeInt() + if err != nil { + return nil, err + } + params := make([]TypedParam, count) + for ix := int32(0); ix < count; ix++ { + p, err := decodeTypedParam(dec) + if err != nil { + return nil, err + } + params[ix] = *p + } + + return params, nil +} + + +// ConnectOpen is the go wrapper for REMOTE_PROC_CONNECT_OPEN. +func (l *Libvirt) ConnectOpen(Name OptString, Flags ConnectFlags) (err error) { + var buf bytes.Buffer + + args := ConnectOpenArgs { + Name: Name, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(1, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// ConnectClose is the go wrapper for REMOTE_PROC_CONNECT_CLOSE. +func (l *Libvirt) ConnectClose() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(2, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// ConnectGetType is the go wrapper for REMOTE_PROC_CONNECT_GET_TYPE. +func (l *Libvirt) ConnectGetType() (rType string, err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(3, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Type: string + _, err = dec.Decode(&rType) + if err != nil { + return + } + + return +} + +// ConnectGetVersion is the go wrapper for REMOTE_PROC_CONNECT_GET_VERSION. +func (l *Libvirt) ConnectGetVersion() (rHvVer uint64, err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(4, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // HvVer: uint64 + _, err = dec.Decode(&rHvVer) + if err != nil { + return + } + + return +} + +// ConnectGetMaxVcpus is the go wrapper for REMOTE_PROC_CONNECT_GET_MAX_VCPUS. +func (l *Libvirt) ConnectGetMaxVcpus(Type OptString) (rMaxVcpus int32, err error) { + var buf bytes.Buffer + + args := ConnectGetMaxVcpusArgs { + Type: Type, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(5, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // MaxVcpus: int32 + _, err = dec.Decode(&rMaxVcpus) + if err != nil { + return + } + + return +} + +// NodeGetInfo is the go wrapper for REMOTE_PROC_NODE_GET_INFO. +func (l *Libvirt) NodeGetInfo() (rModel [32]int8, rMemory uint64, rCpus int32, rMhz int32, rNodes int32, rSockets int32, rCores int32, rThreads int32, err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(6, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Model: [32]int8 + _, err = dec.Decode(&rModel) + if err != nil { + return + } + // Memory: uint64 + _, err = dec.Decode(&rMemory) + if err != nil { + return + } + // Cpus: int32 + _, err = dec.Decode(&rCpus) + if err != nil { + return + } + // Mhz: int32 + _, err = dec.Decode(&rMhz) + if err != nil { + return + } + // Nodes: int32 + _, err = dec.Decode(&rNodes) + if err != nil { + return + } + // Sockets: int32 + _, err = dec.Decode(&rSockets) + if err != nil { + return + } + // Cores: int32 + _, err = dec.Decode(&rCores) + if err != nil { + return + } + // Threads: int32 + _, err = dec.Decode(&rThreads) + if err != nil { + return + } + + return +} + +// ConnectGetCapabilities is the go wrapper for REMOTE_PROC_CONNECT_GET_CAPABILITIES. +func (l *Libvirt) ConnectGetCapabilities() (rCapabilities string, err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(7, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Capabilities: string + _, err = dec.Decode(&rCapabilities) + if err != nil { + return + } + + return +} + +// DomainAttachDevice is the go wrapper for REMOTE_PROC_DOMAIN_ATTACH_DEVICE. +func (l *Libvirt) DomainAttachDevice(Dom Domain, XML string) (err error) { + var buf bytes.Buffer + + args := DomainAttachDeviceArgs { + Dom: Dom, + XML: XML, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(8, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainCreate is the go wrapper for REMOTE_PROC_DOMAIN_CREATE. +func (l *Libvirt) DomainCreate(Dom Domain) (err error) { + var buf bytes.Buffer + + args := DomainCreateArgs { + Dom: Dom, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(9, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainCreateXML is the go wrapper for REMOTE_PROC_DOMAIN_CREATE_XML. +func (l *Libvirt) DomainCreateXML(XMLDesc string, Flags DomainCreateFlags) (rDom Domain, err error) { + var buf bytes.Buffer + + args := DomainCreateXMLArgs { + XMLDesc: XMLDesc, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(10, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Dom: Domain + _, err = dec.Decode(&rDom) + if err != nil { + return + } + + return +} + +// DomainDefineXML is the go wrapper for REMOTE_PROC_DOMAIN_DEFINE_XML. +func (l *Libvirt) DomainDefineXML(XML string) (rDom Domain, err error) { + var buf bytes.Buffer + + args := DomainDefineXMLArgs { + XML: XML, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(11, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Dom: Domain + _, err = dec.Decode(&rDom) + if err != nil { + return + } + + return +} + +// DomainDestroy is the go wrapper for REMOTE_PROC_DOMAIN_DESTROY. +func (l *Libvirt) DomainDestroy(Dom Domain) (err error) { + var buf bytes.Buffer + + args := DomainDestroyArgs { + Dom: Dom, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(12, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainDetachDevice is the go wrapper for REMOTE_PROC_DOMAIN_DETACH_DEVICE. +func (l *Libvirt) DomainDetachDevice(Dom Domain, XML string) (err error) { + var buf bytes.Buffer + + args := DomainDetachDeviceArgs { + Dom: Dom, + XML: XML, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(13, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainGetXMLDesc is the go wrapper for REMOTE_PROC_DOMAIN_GET_XML_DESC. +func (l *Libvirt) DomainGetXMLDesc(Dom Domain, Flags DomainXMLFlags) (rXML string, err error) { + var buf bytes.Buffer + + args := DomainGetXMLDescArgs { + Dom: Dom, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(14, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // XML: string + _, err = dec.Decode(&rXML) + if err != nil { + return + } + + return +} + +// DomainGetAutostart is the go wrapper for REMOTE_PROC_DOMAIN_GET_AUTOSTART. +func (l *Libvirt) DomainGetAutostart(Dom Domain) (rAutostart int32, err error) { + var buf bytes.Buffer + + args := DomainGetAutostartArgs { + Dom: Dom, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(15, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Autostart: int32 + _, err = dec.Decode(&rAutostart) + if err != nil { + return + } + + return +} + +// DomainGetInfo is the go wrapper for REMOTE_PROC_DOMAIN_GET_INFO. +func (l *Libvirt) DomainGetInfo(Dom Domain) (rState uint8, rMaxMem uint64, rMemory uint64, rNrVirtCPU uint16, rCPUTime uint64, err error) { + var buf bytes.Buffer + + args := DomainGetInfoArgs { + Dom: Dom, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(16, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // State: uint8 + _, err = dec.Decode(&rState) + if err != nil { + return + } + // MaxMem: uint64 + _, err = dec.Decode(&rMaxMem) + if err != nil { + return + } + // Memory: uint64 + _, err = dec.Decode(&rMemory) + if err != nil { + return + } + // NrVirtCPU: uint16 + _, err = dec.Decode(&rNrVirtCPU) + if err != nil { + return + } + // CPUTime: uint64 + _, err = dec.Decode(&rCPUTime) + if err != nil { + return + } + + return +} + +// DomainGetMaxMemory is the go wrapper for REMOTE_PROC_DOMAIN_GET_MAX_MEMORY. +func (l *Libvirt) DomainGetMaxMemory(Dom Domain) (rMemory uint64, err error) { + var buf bytes.Buffer + + args := DomainGetMaxMemoryArgs { + Dom: Dom, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(17, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Memory: uint64 + _, err = dec.Decode(&rMemory) + if err != nil { + return + } + + return +} + +// DomainGetMaxVcpus is the go wrapper for REMOTE_PROC_DOMAIN_GET_MAX_VCPUS. +func (l *Libvirt) DomainGetMaxVcpus(Dom Domain) (rNum int32, err error) { + var buf bytes.Buffer + + args := DomainGetMaxVcpusArgs { + Dom: Dom, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(18, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Num: int32 + _, err = dec.Decode(&rNum) + if err != nil { + return + } + + return +} + +// DomainGetOsType is the go wrapper for REMOTE_PROC_DOMAIN_GET_OS_TYPE. +func (l *Libvirt) DomainGetOsType(Dom Domain) (rType string, err error) { + var buf bytes.Buffer + + args := DomainGetOsTypeArgs { + Dom: Dom, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(19, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Type: string + _, err = dec.Decode(&rType) + if err != nil { + return + } + + return +} + +// DomainGetVcpus is the go wrapper for REMOTE_PROC_DOMAIN_GET_VCPUS. +func (l *Libvirt) DomainGetVcpus(Dom Domain, Maxinfo int32, Maplen int32) (rInfo []VcpuInfo, rCpumaps []byte, err error) { + var buf bytes.Buffer + + args := DomainGetVcpusArgs { + Dom: Dom, + Maxinfo: Maxinfo, + Maplen: Maplen, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(20, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Info: []VcpuInfo + _, err = dec.Decode(&rInfo) + if err != nil { + return + } + // Cpumaps: []byte + _, err = dec.Decode(&rCpumaps) + if err != nil { + return + } + + return +} + +// ConnectListDefinedDomains is the go wrapper for REMOTE_PROC_CONNECT_LIST_DEFINED_DOMAINS. +func (l *Libvirt) ConnectListDefinedDomains(Maxnames int32) (rNames []string, err error) { + var buf bytes.Buffer + + args := ConnectListDefinedDomainsArgs { + Maxnames: Maxnames, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(21, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Names: []string + _, err = dec.Decode(&rNames) + if err != nil { + return + } + + return +} + +// DomainLookupByID is the go wrapper for REMOTE_PROC_DOMAIN_LOOKUP_BY_ID. +func (l *Libvirt) DomainLookupByID(ID int32) (rDom Domain, err error) { + var buf bytes.Buffer + + args := DomainLookupByIDArgs { + ID: ID, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(22, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Dom: Domain + _, err = dec.Decode(&rDom) + if err != nil { + return + } + + return +} + +// DomainLookupByName is the go wrapper for REMOTE_PROC_DOMAIN_LOOKUP_BY_NAME. +func (l *Libvirt) DomainLookupByName(Name string) (rDom Domain, err error) { + var buf bytes.Buffer + + args := DomainLookupByNameArgs { + Name: Name, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(23, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Dom: Domain + _, err = dec.Decode(&rDom) + if err != nil { + return + } + + return +} + +// DomainLookupByUUID is the go wrapper for REMOTE_PROC_DOMAIN_LOOKUP_BY_UUID. +func (l *Libvirt) DomainLookupByUUID(UUID UUID) (rDom Domain, err error) { + var buf bytes.Buffer + + args := DomainLookupByUUIDArgs { + UUID: UUID, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(24, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Dom: Domain + _, err = dec.Decode(&rDom) + if err != nil { + return + } + + return +} + +// ConnectNumOfDefinedDomains is the go wrapper for REMOTE_PROC_CONNECT_NUM_OF_DEFINED_DOMAINS. +func (l *Libvirt) ConnectNumOfDefinedDomains() (rNum int32, err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(25, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Num: int32 + _, err = dec.Decode(&rNum) + if err != nil { + return + } + + return +} + +// DomainPinVcpu is the go wrapper for REMOTE_PROC_DOMAIN_PIN_VCPU. +func (l *Libvirt) DomainPinVcpu(Dom Domain, Vcpu uint32, Cpumap []byte) (err error) { + var buf bytes.Buffer + + args := DomainPinVcpuArgs { + Dom: Dom, + Vcpu: Vcpu, + Cpumap: Cpumap, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(26, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainReboot is the go wrapper for REMOTE_PROC_DOMAIN_REBOOT. +func (l *Libvirt) DomainReboot(Dom Domain, Flags DomainRebootFlagValues) (err error) { + var buf bytes.Buffer + + args := DomainRebootArgs { + Dom: Dom, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(27, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainResume is the go wrapper for REMOTE_PROC_DOMAIN_RESUME. +func (l *Libvirt) DomainResume(Dom Domain) (err error) { + var buf bytes.Buffer + + args := DomainResumeArgs { + Dom: Dom, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(28, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainSetAutostart is the go wrapper for REMOTE_PROC_DOMAIN_SET_AUTOSTART. +func (l *Libvirt) DomainSetAutostart(Dom Domain, Autostart int32) (err error) { + var buf bytes.Buffer + + args := DomainSetAutostartArgs { + Dom: Dom, + Autostart: Autostart, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(29, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainSetMaxMemory is the go wrapper for REMOTE_PROC_DOMAIN_SET_MAX_MEMORY. +func (l *Libvirt) DomainSetMaxMemory(Dom Domain, Memory uint64) (err error) { + var buf bytes.Buffer + + args := DomainSetMaxMemoryArgs { + Dom: Dom, + Memory: Memory, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(30, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainSetMemory is the go wrapper for REMOTE_PROC_DOMAIN_SET_MEMORY. +func (l *Libvirt) DomainSetMemory(Dom Domain, Memory uint64) (err error) { + var buf bytes.Buffer + + args := DomainSetMemoryArgs { + Dom: Dom, + Memory: Memory, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(31, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainSetVcpus is the go wrapper for REMOTE_PROC_DOMAIN_SET_VCPUS. +func (l *Libvirt) DomainSetVcpus(Dom Domain, Nvcpus uint32) (err error) { + var buf bytes.Buffer + + args := DomainSetVcpusArgs { + Dom: Dom, + Nvcpus: Nvcpus, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(32, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainShutdown is the go wrapper for REMOTE_PROC_DOMAIN_SHUTDOWN. +func (l *Libvirt) DomainShutdown(Dom Domain) (err error) { + var buf bytes.Buffer + + args := DomainShutdownArgs { + Dom: Dom, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(33, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainSuspend is the go wrapper for REMOTE_PROC_DOMAIN_SUSPEND. +func (l *Libvirt) DomainSuspend(Dom Domain) (err error) { + var buf bytes.Buffer + + args := DomainSuspendArgs { + Dom: Dom, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(34, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainUndefine is the go wrapper for REMOTE_PROC_DOMAIN_UNDEFINE. +func (l *Libvirt) DomainUndefine(Dom Domain) (err error) { + var buf bytes.Buffer + + args := DomainUndefineArgs { + Dom: Dom, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(35, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// ConnectListDefinedNetworks is the go wrapper for REMOTE_PROC_CONNECT_LIST_DEFINED_NETWORKS. +func (l *Libvirt) ConnectListDefinedNetworks(Maxnames int32) (rNames []string, err error) { + var buf bytes.Buffer + + args := ConnectListDefinedNetworksArgs { + Maxnames: Maxnames, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(36, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Names: []string + _, err = dec.Decode(&rNames) + if err != nil { + return + } + + return +} + +// ConnectListDomains is the go wrapper for REMOTE_PROC_CONNECT_LIST_DOMAINS. +func (l *Libvirt) ConnectListDomains(Maxids int32) (rIds []int32, err error) { + var buf bytes.Buffer + + args := ConnectListDomainsArgs { + Maxids: Maxids, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(37, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Ids: []int32 + _, err = dec.Decode(&rIds) + if err != nil { + return + } + + return +} + +// ConnectListNetworks is the go wrapper for REMOTE_PROC_CONNECT_LIST_NETWORKS. +func (l *Libvirt) ConnectListNetworks(Maxnames int32) (rNames []string, err error) { + var buf bytes.Buffer + + args := ConnectListNetworksArgs { + Maxnames: Maxnames, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(38, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Names: []string + _, err = dec.Decode(&rNames) + if err != nil { + return + } + + return +} + +// NetworkCreate is the go wrapper for REMOTE_PROC_NETWORK_CREATE. +func (l *Libvirt) NetworkCreate(Net Network) (err error) { + var buf bytes.Buffer + + args := NetworkCreateArgs { + Net: Net, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(39, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// NetworkCreateXML is the go wrapper for REMOTE_PROC_NETWORK_CREATE_XML. +func (l *Libvirt) NetworkCreateXML(XML string) (rNet Network, err error) { + var buf bytes.Buffer + + args := NetworkCreateXMLArgs { + XML: XML, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(40, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Net: Network + _, err = dec.Decode(&rNet) + if err != nil { + return + } + + return +} + +// NetworkDefineXML is the go wrapper for REMOTE_PROC_NETWORK_DEFINE_XML. +func (l *Libvirt) NetworkDefineXML(XML string) (rNet Network, err error) { + var buf bytes.Buffer + + args := NetworkDefineXMLArgs { + XML: XML, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(41, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Net: Network + _, err = dec.Decode(&rNet) + if err != nil { + return + } + + return +} + +// NetworkDestroy is the go wrapper for REMOTE_PROC_NETWORK_DESTROY. +func (l *Libvirt) NetworkDestroy(Net Network) (err error) { + var buf bytes.Buffer + + args := NetworkDestroyArgs { + Net: Net, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(42, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// NetworkGetXMLDesc is the go wrapper for REMOTE_PROC_NETWORK_GET_XML_DESC. +func (l *Libvirt) NetworkGetXMLDesc(Net Network, Flags uint32) (rXML string, err error) { + var buf bytes.Buffer + + args := NetworkGetXMLDescArgs { + Net: Net, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(43, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // XML: string + _, err = dec.Decode(&rXML) + if err != nil { + return + } + + return +} + +// NetworkGetAutostart is the go wrapper for REMOTE_PROC_NETWORK_GET_AUTOSTART. +func (l *Libvirt) NetworkGetAutostart(Net Network) (rAutostart int32, err error) { + var buf bytes.Buffer + + args := NetworkGetAutostartArgs { + Net: Net, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(44, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Autostart: int32 + _, err = dec.Decode(&rAutostart) + if err != nil { + return + } + + return +} + +// NetworkGetBridgeName is the go wrapper for REMOTE_PROC_NETWORK_GET_BRIDGE_NAME. +func (l *Libvirt) NetworkGetBridgeName(Net Network) (rName string, err error) { + var buf bytes.Buffer + + args := NetworkGetBridgeNameArgs { + Net: Net, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(45, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Name: string + _, err = dec.Decode(&rName) + if err != nil { + return + } + + return +} + +// NetworkLookupByName is the go wrapper for REMOTE_PROC_NETWORK_LOOKUP_BY_NAME. +func (l *Libvirt) NetworkLookupByName(Name string) (rNet Network, err error) { + var buf bytes.Buffer + + args := NetworkLookupByNameArgs { + Name: Name, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(46, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Net: Network + _, err = dec.Decode(&rNet) + if err != nil { + return + } + + return +} + +// NetworkLookupByUUID is the go wrapper for REMOTE_PROC_NETWORK_LOOKUP_BY_UUID. +func (l *Libvirt) NetworkLookupByUUID(UUID UUID) (rNet Network, err error) { + var buf bytes.Buffer + + args := NetworkLookupByUUIDArgs { + UUID: UUID, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(47, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Net: Network + _, err = dec.Decode(&rNet) + if err != nil { + return + } + + return +} + +// NetworkSetAutostart is the go wrapper for REMOTE_PROC_NETWORK_SET_AUTOSTART. +func (l *Libvirt) NetworkSetAutostart(Net Network, Autostart int32) (err error) { + var buf bytes.Buffer + + args := NetworkSetAutostartArgs { + Net: Net, + Autostart: Autostart, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(48, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// NetworkUndefine is the go wrapper for REMOTE_PROC_NETWORK_UNDEFINE. +func (l *Libvirt) NetworkUndefine(Net Network) (err error) { + var buf bytes.Buffer + + args := NetworkUndefineArgs { + Net: Net, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(49, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// ConnectNumOfDefinedNetworks is the go wrapper for REMOTE_PROC_CONNECT_NUM_OF_DEFINED_NETWORKS. +func (l *Libvirt) ConnectNumOfDefinedNetworks() (rNum int32, err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(50, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Num: int32 + _, err = dec.Decode(&rNum) + if err != nil { + return + } + + return +} + +// ConnectNumOfDomains is the go wrapper for REMOTE_PROC_CONNECT_NUM_OF_DOMAINS. +func (l *Libvirt) ConnectNumOfDomains() (rNum int32, err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(51, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Num: int32 + _, err = dec.Decode(&rNum) + if err != nil { + return + } + + return +} + +// ConnectNumOfNetworks is the go wrapper for REMOTE_PROC_CONNECT_NUM_OF_NETWORKS. +func (l *Libvirt) ConnectNumOfNetworks() (rNum int32, err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(52, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Num: int32 + _, err = dec.Decode(&rNum) + if err != nil { + return + } + + return +} + +// DomainCoreDump is the go wrapper for REMOTE_PROC_DOMAIN_CORE_DUMP. +func (l *Libvirt) DomainCoreDump(Dom Domain, To string, Flags DomainCoreDumpFlags) (err error) { + var buf bytes.Buffer + + args := DomainCoreDumpArgs { + Dom: Dom, + To: To, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(53, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainRestore is the go wrapper for REMOTE_PROC_DOMAIN_RESTORE. +func (l *Libvirt) DomainRestore(From string) (err error) { + var buf bytes.Buffer + + args := DomainRestoreArgs { + From: From, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(54, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainSave is the go wrapper for REMOTE_PROC_DOMAIN_SAVE. +func (l *Libvirt) DomainSave(Dom Domain, To string) (err error) { + var buf bytes.Buffer + + args := DomainSaveArgs { + Dom: Dom, + To: To, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(55, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainGetSchedulerType is the go wrapper for REMOTE_PROC_DOMAIN_GET_SCHEDULER_TYPE. +func (l *Libvirt) DomainGetSchedulerType(Dom Domain) (rType string, rNparams int32, err error) { + var buf bytes.Buffer + + args := DomainGetSchedulerTypeArgs { + Dom: Dom, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(56, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Type: string + _, err = dec.Decode(&rType) + if err != nil { + return + } + // Nparams: int32 + _, err = dec.Decode(&rNparams) + if err != nil { + return + } + + return +} + +// DomainGetSchedulerParameters is the go wrapper for REMOTE_PROC_DOMAIN_GET_SCHEDULER_PARAMETERS. +func (l *Libvirt) DomainGetSchedulerParameters(Dom Domain, Nparams int32) (rParams []TypedParam, err error) { + var buf bytes.Buffer + + args := DomainGetSchedulerParametersArgs { + Dom: Dom, + Nparams: Nparams, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(57, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Params: []TypedParam + rParams, err = decodeTypedParams(dec) + if err != nil { + fmt.Println("error decoding typedparams") + return + } + + return +} + +// DomainSetSchedulerParameters is the go wrapper for REMOTE_PROC_DOMAIN_SET_SCHEDULER_PARAMETERS. +func (l *Libvirt) DomainSetSchedulerParameters(Dom Domain, Params []TypedParam) (err error) { + var buf bytes.Buffer + + args := DomainSetSchedulerParametersArgs { + Dom: Dom, + Params: Params, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(58, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// ConnectGetHostname is the go wrapper for REMOTE_PROC_CONNECT_GET_HOSTNAME. +func (l *Libvirt) ConnectGetHostname() (rHostname string, err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(59, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Hostname: string + _, err = dec.Decode(&rHostname) + if err != nil { + return + } + + return +} + +// ConnectSupportsFeature is the go wrapper for REMOTE_PROC_CONNECT_SUPPORTS_FEATURE. +func (l *Libvirt) ConnectSupportsFeature(Feature int32) (rSupported int32, err error) { + var buf bytes.Buffer + + args := ConnectSupportsFeatureArgs { + Feature: Feature, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(60, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Supported: int32 + _, err = dec.Decode(&rSupported) + if err != nil { + return + } + + return +} + +// DomainMigratePrepare is the go wrapper for REMOTE_PROC_DOMAIN_MIGRATE_PREPARE. +func (l *Libvirt) DomainMigratePrepare(UriIn OptString, Flags uint64, Dname OptString, Resource uint64) (rCookie []byte, rUriOut OptString, err error) { + var buf bytes.Buffer + + args := DomainMigratePrepareArgs { + UriIn: UriIn, + Flags: Flags, + Dname: Dname, + Resource: Resource, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(61, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Cookie: []byte + _, err = dec.Decode(&rCookie) + if err != nil { + return + } + // UriOut: OptString + _, err = dec.Decode(&rUriOut) + if err != nil { + return + } + + return +} + +// DomainMigratePerform is the go wrapper for REMOTE_PROC_DOMAIN_MIGRATE_PERFORM. +func (l *Libvirt) DomainMigratePerform(Dom Domain, Cookie []byte, Uri string, Flags uint64, Dname OptString, Resource uint64) (err error) { + var buf bytes.Buffer + + args := DomainMigratePerformArgs { + Dom: Dom, + Cookie: Cookie, + Uri: Uri, + Flags: Flags, + Dname: Dname, + Resource: Resource, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(62, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainMigrateFinish is the go wrapper for REMOTE_PROC_DOMAIN_MIGRATE_FINISH. +func (l *Libvirt) DomainMigrateFinish(Dname string, Cookie []byte, Uri string, Flags uint64) (rDdom Domain, err error) { + var buf bytes.Buffer + + args := DomainMigrateFinishArgs { + Dname: Dname, + Cookie: Cookie, + Uri: Uri, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(63, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Ddom: Domain + _, err = dec.Decode(&rDdom) + if err != nil { + return + } + + return +} + +// DomainBlockStats is the go wrapper for REMOTE_PROC_DOMAIN_BLOCK_STATS. +func (l *Libvirt) DomainBlockStats(Dom Domain, Path string) (rRdReq int64, rRdBytes int64, rWrReq int64, rWrBytes int64, rErrs int64, err error) { + var buf bytes.Buffer + + args := DomainBlockStatsArgs { + Dom: Dom, + Path: Path, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(64, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // RdReq: int64 + _, err = dec.Decode(&rRdReq) + if err != nil { + return + } + // RdBytes: int64 + _, err = dec.Decode(&rRdBytes) + if err != nil { + return + } + // WrReq: int64 + _, err = dec.Decode(&rWrReq) + if err != nil { + return + } + // WrBytes: int64 + _, err = dec.Decode(&rWrBytes) + if err != nil { + return + } + // Errs: int64 + _, err = dec.Decode(&rErrs) + if err != nil { + return + } + + return +} + +// DomainInterfaceStats is the go wrapper for REMOTE_PROC_DOMAIN_INTERFACE_STATS. +func (l *Libvirt) DomainInterfaceStats(Dom Domain, Device string) (rRxBytes int64, rRxPackets int64, rRxErrs int64, rRxDrop int64, rTxBytes int64, rTxPackets int64, rTxErrs int64, rTxDrop int64, err error) { + var buf bytes.Buffer + + args := DomainInterfaceStatsArgs { + Dom: Dom, + Device: Device, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(65, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // RxBytes: int64 + _, err = dec.Decode(&rRxBytes) + if err != nil { + return + } + // RxPackets: int64 + _, err = dec.Decode(&rRxPackets) + if err != nil { + return + } + // RxErrs: int64 + _, err = dec.Decode(&rRxErrs) + if err != nil { + return + } + // RxDrop: int64 + _, err = dec.Decode(&rRxDrop) + if err != nil { + return + } + // TxBytes: int64 + _, err = dec.Decode(&rTxBytes) + if err != nil { + return + } + // TxPackets: int64 + _, err = dec.Decode(&rTxPackets) + if err != nil { + return + } + // TxErrs: int64 + _, err = dec.Decode(&rTxErrs) + if err != nil { + return + } + // TxDrop: int64 + _, err = dec.Decode(&rTxDrop) + if err != nil { + return + } + + return +} + +// AuthList is the go wrapper for REMOTE_PROC_AUTH_LIST. +func (l *Libvirt) AuthList() (rTypes []AuthType, err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(66, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Types: []AuthType + _, err = dec.Decode(&rTypes) + if err != nil { + return + } + + return +} + +// AuthSaslInit is the go wrapper for REMOTE_PROC_AUTH_SASL_INIT. +func (l *Libvirt) AuthSaslInit() (rMechlist string, err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(67, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Mechlist: string + _, err = dec.Decode(&rMechlist) + if err != nil { + return + } + + return +} + +// AuthSaslStart is the go wrapper for REMOTE_PROC_AUTH_SASL_START. +func (l *Libvirt) AuthSaslStart(Mech string, Nil int32, Data []int8) (rComplete int32, rNil int32, rData []int8, err error) { + var buf bytes.Buffer + + args := AuthSaslStartArgs { + Mech: Mech, + Nil: Nil, + Data: Data, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(68, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Complete: int32 + _, err = dec.Decode(&rComplete) + if err != nil { + return + } + // Nil: int32 + _, err = dec.Decode(&rNil) + if err != nil { + return + } + // Data: []int8 + _, err = dec.Decode(&rData) + if err != nil { + return + } + + return +} + +// AuthSaslStep is the go wrapper for REMOTE_PROC_AUTH_SASL_STEP. +func (l *Libvirt) AuthSaslStep(Nil int32, Data []int8) (rComplete int32, rNil int32, rData []int8, err error) { + var buf bytes.Buffer + + args := AuthSaslStepArgs { + Nil: Nil, + Data: Data, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(69, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Complete: int32 + _, err = dec.Decode(&rComplete) + if err != nil { + return + } + // Nil: int32 + _, err = dec.Decode(&rNil) + if err != nil { + return + } + // Data: []int8 + _, err = dec.Decode(&rData) + if err != nil { + return + } + + return +} + +// AuthPolkit is the go wrapper for REMOTE_PROC_AUTH_POLKIT. +func (l *Libvirt) AuthPolkit() (rComplete int32, err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(70, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Complete: int32 + _, err = dec.Decode(&rComplete) + if err != nil { + return + } + + return +} + +// ConnectNumOfStoragePools is the go wrapper for REMOTE_PROC_CONNECT_NUM_OF_STORAGE_POOLS. +func (l *Libvirt) ConnectNumOfStoragePools() (rNum int32, err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(71, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Num: int32 + _, err = dec.Decode(&rNum) + if err != nil { + return + } + + return +} + +// ConnectListStoragePools is the go wrapper for REMOTE_PROC_CONNECT_LIST_STORAGE_POOLS. +func (l *Libvirt) ConnectListStoragePools(Maxnames int32) (rNames []string, err error) { + var buf bytes.Buffer + + args := ConnectListStoragePoolsArgs { + Maxnames: Maxnames, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(72, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Names: []string + _, err = dec.Decode(&rNames) + if err != nil { + return + } + + return +} + +// ConnectNumOfDefinedStoragePools is the go wrapper for REMOTE_PROC_CONNECT_NUM_OF_DEFINED_STORAGE_POOLS. +func (l *Libvirt) ConnectNumOfDefinedStoragePools() (rNum int32, err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(73, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Num: int32 + _, err = dec.Decode(&rNum) + if err != nil { + return + } + + return +} + +// ConnectListDefinedStoragePools is the go wrapper for REMOTE_PROC_CONNECT_LIST_DEFINED_STORAGE_POOLS. +func (l *Libvirt) ConnectListDefinedStoragePools(Maxnames int32) (rNames []string, err error) { + var buf bytes.Buffer + + args := ConnectListDefinedStoragePoolsArgs { + Maxnames: Maxnames, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(74, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Names: []string + _, err = dec.Decode(&rNames) + if err != nil { + return + } + + return +} + +// ConnectFindStoragePoolSources is the go wrapper for REMOTE_PROC_CONNECT_FIND_STORAGE_POOL_SOURCES. +func (l *Libvirt) ConnectFindStoragePoolSources(Type string, SrcSpec OptString, Flags uint32) (rXML string, err error) { + var buf bytes.Buffer + + args := ConnectFindStoragePoolSourcesArgs { + Type: Type, + SrcSpec: SrcSpec, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(75, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // XML: string + _, err = dec.Decode(&rXML) + if err != nil { + return + } + + return +} + +// StoragePoolCreateXML is the go wrapper for REMOTE_PROC_STORAGE_POOL_CREATE_XML. +func (l *Libvirt) StoragePoolCreateXML(XML string, Flags StoragePoolCreateFlags) (rPool StoragePool, err error) { + var buf bytes.Buffer + + args := StoragePoolCreateXMLArgs { + XML: XML, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(76, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Pool: StoragePool + _, err = dec.Decode(&rPool) + if err != nil { + return + } + + return +} + +// StoragePoolDefineXML is the go wrapper for REMOTE_PROC_STORAGE_POOL_DEFINE_XML. +func (l *Libvirt) StoragePoolDefineXML(XML string, Flags uint32) (rPool StoragePool, err error) { + var buf bytes.Buffer + + args := StoragePoolDefineXMLArgs { + XML: XML, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(77, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Pool: StoragePool + _, err = dec.Decode(&rPool) + if err != nil { + return + } + + return +} + +// StoragePoolCreate is the go wrapper for REMOTE_PROC_STORAGE_POOL_CREATE. +func (l *Libvirt) StoragePoolCreate(Pool StoragePool, Flags StoragePoolCreateFlags) (err error) { + var buf bytes.Buffer + + args := StoragePoolCreateArgs { + Pool: Pool, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(78, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// StoragePoolBuild is the go wrapper for REMOTE_PROC_STORAGE_POOL_BUILD. +func (l *Libvirt) StoragePoolBuild(Pool StoragePool, Flags StoragePoolBuildFlags) (err error) { + var buf bytes.Buffer + + args := StoragePoolBuildArgs { + Pool: Pool, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(79, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// StoragePoolDestroy is the go wrapper for REMOTE_PROC_STORAGE_POOL_DESTROY. +func (l *Libvirt) StoragePoolDestroy(Pool StoragePool) (err error) { + var buf bytes.Buffer + + args := StoragePoolDestroyArgs { + Pool: Pool, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(80, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// StoragePoolDelete is the go wrapper for REMOTE_PROC_STORAGE_POOL_DELETE. +func (l *Libvirt) StoragePoolDelete(Pool StoragePool, Flags StoragePoolDeleteFlags) (err error) { + var buf bytes.Buffer + + args := StoragePoolDeleteArgs { + Pool: Pool, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(81, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// StoragePoolUndefine is the go wrapper for REMOTE_PROC_STORAGE_POOL_UNDEFINE. +func (l *Libvirt) StoragePoolUndefine(Pool StoragePool) (err error) { + var buf bytes.Buffer + + args := StoragePoolUndefineArgs { + Pool: Pool, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(82, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// StoragePoolRefresh is the go wrapper for REMOTE_PROC_STORAGE_POOL_REFRESH. +func (l *Libvirt) StoragePoolRefresh(Pool StoragePool, Flags uint32) (err error) { + var buf bytes.Buffer + + args := StoragePoolRefreshArgs { + Pool: Pool, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(83, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// StoragePoolLookupByName is the go wrapper for REMOTE_PROC_STORAGE_POOL_LOOKUP_BY_NAME. +func (l *Libvirt) StoragePoolLookupByName(Name string) (rPool StoragePool, err error) { + var buf bytes.Buffer + + args := StoragePoolLookupByNameArgs { + Name: Name, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(84, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Pool: StoragePool + _, err = dec.Decode(&rPool) + if err != nil { + return + } + + return +} + +// StoragePoolLookupByUUID is the go wrapper for REMOTE_PROC_STORAGE_POOL_LOOKUP_BY_UUID. +func (l *Libvirt) StoragePoolLookupByUUID(UUID UUID) (rPool StoragePool, err error) { + var buf bytes.Buffer + + args := StoragePoolLookupByUUIDArgs { + UUID: UUID, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(85, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Pool: StoragePool + _, err = dec.Decode(&rPool) + if err != nil { + return + } + + return +} + +// StoragePoolLookupByVolume is the go wrapper for REMOTE_PROC_STORAGE_POOL_LOOKUP_BY_VOLUME. +func (l *Libvirt) StoragePoolLookupByVolume(Vol StorageVol) (rPool StoragePool, err error) { + var buf bytes.Buffer + + args := StoragePoolLookupByVolumeArgs { + Vol: Vol, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(86, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Pool: StoragePool + _, err = dec.Decode(&rPool) + if err != nil { + return + } + + return +} + +// StoragePoolGetInfo is the go wrapper for REMOTE_PROC_STORAGE_POOL_GET_INFO. +func (l *Libvirt) StoragePoolGetInfo(Pool StoragePool) (rState uint8, rCapacity uint64, rAllocation uint64, rAvailable uint64, err error) { + var buf bytes.Buffer + + args := StoragePoolGetInfoArgs { + Pool: Pool, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(87, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // State: uint8 + _, err = dec.Decode(&rState) + if err != nil { + return + } + // Capacity: uint64 + _, err = dec.Decode(&rCapacity) + if err != nil { + return + } + // Allocation: uint64 + _, err = dec.Decode(&rAllocation) + if err != nil { + return + } + // Available: uint64 + _, err = dec.Decode(&rAvailable) + if err != nil { + return + } + + return +} + +// StoragePoolGetXMLDesc is the go wrapper for REMOTE_PROC_STORAGE_POOL_GET_XML_DESC. +func (l *Libvirt) StoragePoolGetXMLDesc(Pool StoragePool, Flags StorageXMLFlags) (rXML string, err error) { + var buf bytes.Buffer + + args := StoragePoolGetXMLDescArgs { + Pool: Pool, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(88, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // XML: string + _, err = dec.Decode(&rXML) + if err != nil { + return + } + + return +} + +// StoragePoolGetAutostart is the go wrapper for REMOTE_PROC_STORAGE_POOL_GET_AUTOSTART. +func (l *Libvirt) StoragePoolGetAutostart(Pool StoragePool) (rAutostart int32, err error) { + var buf bytes.Buffer + + args := StoragePoolGetAutostartArgs { + Pool: Pool, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(89, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Autostart: int32 + _, err = dec.Decode(&rAutostart) + if err != nil { + return + } + + return +} + +// StoragePoolSetAutostart is the go wrapper for REMOTE_PROC_STORAGE_POOL_SET_AUTOSTART. +func (l *Libvirt) StoragePoolSetAutostart(Pool StoragePool, Autostart int32) (err error) { + var buf bytes.Buffer + + args := StoragePoolSetAutostartArgs { + Pool: Pool, + Autostart: Autostart, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(90, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// StoragePoolNumOfVolumes is the go wrapper for REMOTE_PROC_STORAGE_POOL_NUM_OF_VOLUMES. +func (l *Libvirt) StoragePoolNumOfVolumes(Pool StoragePool) (rNum int32, err error) { + var buf bytes.Buffer + + args := StoragePoolNumOfVolumesArgs { + Pool: Pool, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(91, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Num: int32 + _, err = dec.Decode(&rNum) + if err != nil { + return + } + + return +} + +// StoragePoolListVolumes is the go wrapper for REMOTE_PROC_STORAGE_POOL_LIST_VOLUMES. +func (l *Libvirt) StoragePoolListVolumes(Pool StoragePool, Maxnames int32) (rNames []string, err error) { + var buf bytes.Buffer + + args := StoragePoolListVolumesArgs { + Pool: Pool, + Maxnames: Maxnames, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(92, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Names: []string + _, err = dec.Decode(&rNames) + if err != nil { + return + } + + return +} + +// StorageVolCreateXML is the go wrapper for REMOTE_PROC_STORAGE_VOL_CREATE_XML. +func (l *Libvirt) StorageVolCreateXML(Pool StoragePool, XML string, Flags StorageVolCreateFlags) (rVol StorageVol, err error) { + var buf bytes.Buffer + + args := StorageVolCreateXMLArgs { + Pool: Pool, + XML: XML, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(93, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Vol: StorageVol + _, err = dec.Decode(&rVol) + if err != nil { + return + } + + return +} + +// StorageVolDelete is the go wrapper for REMOTE_PROC_STORAGE_VOL_DELETE. +func (l *Libvirt) StorageVolDelete(Vol StorageVol, Flags StorageVolDeleteFlags) (err error) { + var buf bytes.Buffer + + args := StorageVolDeleteArgs { + Vol: Vol, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(94, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// StorageVolLookupByName is the go wrapper for REMOTE_PROC_STORAGE_VOL_LOOKUP_BY_NAME. +func (l *Libvirt) StorageVolLookupByName(Pool StoragePool, Name string) (rVol StorageVol, err error) { + var buf bytes.Buffer + + args := StorageVolLookupByNameArgs { + Pool: Pool, + Name: Name, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(95, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Vol: StorageVol + _, err = dec.Decode(&rVol) + if err != nil { + return + } + + return +} + +// StorageVolLookupByKey is the go wrapper for REMOTE_PROC_STORAGE_VOL_LOOKUP_BY_KEY. +func (l *Libvirt) StorageVolLookupByKey(Key string) (rVol StorageVol, err error) { + var buf bytes.Buffer + + args := StorageVolLookupByKeyArgs { + Key: Key, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(96, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Vol: StorageVol + _, err = dec.Decode(&rVol) + if err != nil { + return + } + + return +} + +// StorageVolLookupByPath is the go wrapper for REMOTE_PROC_STORAGE_VOL_LOOKUP_BY_PATH. +func (l *Libvirt) StorageVolLookupByPath(Path string) (rVol StorageVol, err error) { + var buf bytes.Buffer + + args := StorageVolLookupByPathArgs { + Path: Path, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(97, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Vol: StorageVol + _, err = dec.Decode(&rVol) + if err != nil { + return + } + + return +} + +// StorageVolGetInfo is the go wrapper for REMOTE_PROC_STORAGE_VOL_GET_INFO. +func (l *Libvirt) StorageVolGetInfo(Vol StorageVol) (rType int8, rCapacity uint64, rAllocation uint64, err error) { + var buf bytes.Buffer + + args := StorageVolGetInfoArgs { + Vol: Vol, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(98, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Type: int8 + _, err = dec.Decode(&rType) + if err != nil { + return + } + // Capacity: uint64 + _, err = dec.Decode(&rCapacity) + if err != nil { + return + } + // Allocation: uint64 + _, err = dec.Decode(&rAllocation) + if err != nil { + return + } + + return +} + +// StorageVolGetXMLDesc is the go wrapper for REMOTE_PROC_STORAGE_VOL_GET_XML_DESC. +func (l *Libvirt) StorageVolGetXMLDesc(Vol StorageVol, Flags uint32) (rXML string, err error) { + var buf bytes.Buffer + + args := StorageVolGetXMLDescArgs { + Vol: Vol, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(99, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // XML: string + _, err = dec.Decode(&rXML) + if err != nil { + return + } + + return +} + +// StorageVolGetPath is the go wrapper for REMOTE_PROC_STORAGE_VOL_GET_PATH. +func (l *Libvirt) StorageVolGetPath(Vol StorageVol) (rName string, err error) { + var buf bytes.Buffer + + args := StorageVolGetPathArgs { + Vol: Vol, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(100, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Name: string + _, err = dec.Decode(&rName) + if err != nil { + return + } + + return +} + +// NodeGetCellsFreeMemory is the go wrapper for REMOTE_PROC_NODE_GET_CELLS_FREE_MEMORY. +func (l *Libvirt) NodeGetCellsFreeMemory(StartCell int32, Maxcells int32) (rCells []uint64, err error) { + var buf bytes.Buffer + + args := NodeGetCellsFreeMemoryArgs { + StartCell: StartCell, + Maxcells: Maxcells, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(101, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Cells: []uint64 + _, err = dec.Decode(&rCells) + if err != nil { + return + } + + return +} + +// NodeGetFreeMemory is the go wrapper for REMOTE_PROC_NODE_GET_FREE_MEMORY. +func (l *Libvirt) NodeGetFreeMemory() (rFreeMem uint64, err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(102, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // FreeMem: uint64 + _, err = dec.Decode(&rFreeMem) + if err != nil { + return + } + + return +} + +// DomainBlockPeek is the go wrapper for REMOTE_PROC_DOMAIN_BLOCK_PEEK. +func (l *Libvirt) DomainBlockPeek(Dom Domain, Path string, Offset uint64, Size uint32, Flags uint32) (rBuffer []byte, err error) { + var buf bytes.Buffer + + args := DomainBlockPeekArgs { + Dom: Dom, + Path: Path, + Offset: Offset, + Size: Size, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(103, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Buffer: []byte + _, err = dec.Decode(&rBuffer) + if err != nil { + return + } + + return +} + +// DomainMemoryPeek is the go wrapper for REMOTE_PROC_DOMAIN_MEMORY_PEEK. +func (l *Libvirt) DomainMemoryPeek(Dom Domain, Offset uint64, Size uint32, Flags DomainMemoryFlags) (rBuffer []byte, err error) { + var buf bytes.Buffer + + args := DomainMemoryPeekArgs { + Dom: Dom, + Offset: Offset, + Size: Size, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(104, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Buffer: []byte + _, err = dec.Decode(&rBuffer) + if err != nil { + return + } + + return +} + +// ConnectDomainEventRegister is the go wrapper for REMOTE_PROC_CONNECT_DOMAIN_EVENT_REGISTER. +func (l *Libvirt) ConnectDomainEventRegister() (rCbRegistered int32, err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(105, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // CbRegistered: int32 + _, err = dec.Decode(&rCbRegistered) + if err != nil { + return + } + + return +} + +// ConnectDomainEventDeregister is the go wrapper for REMOTE_PROC_CONNECT_DOMAIN_EVENT_DEREGISTER. +func (l *Libvirt) ConnectDomainEventDeregister() (rCbRegistered int32, err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(106, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // CbRegistered: int32 + _, err = dec.Decode(&rCbRegistered) + if err != nil { + return + } + + return +} + +// DomainEventLifecycle is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_LIFECYCLE. +func (l *Libvirt) DomainEventLifecycle() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(107, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainMigratePrepare2 is the go wrapper for REMOTE_PROC_DOMAIN_MIGRATE_PREPARE2. +func (l *Libvirt) DomainMigratePrepare2(UriIn OptString, Flags uint64, Dname OptString, Resource uint64, DomXML string) (rCookie []byte, rUriOut OptString, err error) { + var buf bytes.Buffer + + args := DomainMigratePrepare2Args { + UriIn: UriIn, + Flags: Flags, + Dname: Dname, + Resource: Resource, + DomXML: DomXML, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(108, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Cookie: []byte + _, err = dec.Decode(&rCookie) + if err != nil { + return + } + // UriOut: OptString + _, err = dec.Decode(&rUriOut) + if err != nil { + return + } + + return +} + +// DomainMigrateFinish2 is the go wrapper for REMOTE_PROC_DOMAIN_MIGRATE_FINISH2. +func (l *Libvirt) DomainMigrateFinish2(Dname string, Cookie []byte, Uri string, Flags uint64, Retcode int32) (rDdom Domain, err error) { + var buf bytes.Buffer + + args := DomainMigrateFinish2Args { + Dname: Dname, + Cookie: Cookie, + Uri: Uri, + Flags: Flags, + Retcode: Retcode, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(109, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Ddom: Domain + _, err = dec.Decode(&rDdom) + if err != nil { + return + } + + return +} + +// ConnectGetUri is the go wrapper for REMOTE_PROC_CONNECT_GET_URI. +func (l *Libvirt) ConnectGetUri() (rUri string, err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(110, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Uri: string + _, err = dec.Decode(&rUri) + if err != nil { + return + } + + return +} + +// NodeNumOfDevices is the go wrapper for REMOTE_PROC_NODE_NUM_OF_DEVICES. +func (l *Libvirt) NodeNumOfDevices(Cap OptString, Flags uint32) (rNum int32, err error) { + var buf bytes.Buffer + + args := NodeNumOfDevicesArgs { + Cap: Cap, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(111, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Num: int32 + _, err = dec.Decode(&rNum) + if err != nil { + return + } + + return +} + +// NodeListDevices is the go wrapper for REMOTE_PROC_NODE_LIST_DEVICES. +func (l *Libvirt) NodeListDevices(Cap OptString, Maxnames int32, Flags uint32) (rNames []string, err error) { + var buf bytes.Buffer + + args := NodeListDevicesArgs { + Cap: Cap, + Maxnames: Maxnames, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(112, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Names: []string + _, err = dec.Decode(&rNames) + if err != nil { + return + } + + return +} + +// NodeDeviceLookupByName is the go wrapper for REMOTE_PROC_NODE_DEVICE_LOOKUP_BY_NAME. +func (l *Libvirt) NodeDeviceLookupByName(Name string) (rDev NodeDevice, err error) { + var buf bytes.Buffer + + args := NodeDeviceLookupByNameArgs { + Name: Name, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(113, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Dev: NodeDevice + _, err = dec.Decode(&rDev) + if err != nil { + return + } + + return +} + +// NodeDeviceGetXMLDesc is the go wrapper for REMOTE_PROC_NODE_DEVICE_GET_XML_DESC. +func (l *Libvirt) NodeDeviceGetXMLDesc(Name string, Flags uint32) (rXML string, err error) { + var buf bytes.Buffer + + args := NodeDeviceGetXMLDescArgs { + Name: Name, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(114, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // XML: string + _, err = dec.Decode(&rXML) + if err != nil { + return + } + + return +} + +// NodeDeviceGetParent is the go wrapper for REMOTE_PROC_NODE_DEVICE_GET_PARENT. +func (l *Libvirt) NodeDeviceGetParent(Name string) (rParent OptString, err error) { + var buf bytes.Buffer + + args := NodeDeviceGetParentArgs { + Name: Name, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(115, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Parent: OptString + _, err = dec.Decode(&rParent) + if err != nil { + return + } + + return +} + +// NodeDeviceNumOfCaps is the go wrapper for REMOTE_PROC_NODE_DEVICE_NUM_OF_CAPS. +func (l *Libvirt) NodeDeviceNumOfCaps(Name string) (rNum int32, err error) { + var buf bytes.Buffer + + args := NodeDeviceNumOfCapsArgs { + Name: Name, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(116, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Num: int32 + _, err = dec.Decode(&rNum) + if err != nil { + return + } + + return +} + +// NodeDeviceListCaps is the go wrapper for REMOTE_PROC_NODE_DEVICE_LIST_CAPS. +func (l *Libvirt) NodeDeviceListCaps(Name string, Maxnames int32) (rNames []string, err error) { + var buf bytes.Buffer + + args := NodeDeviceListCapsArgs { + Name: Name, + Maxnames: Maxnames, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(117, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Names: []string + _, err = dec.Decode(&rNames) + if err != nil { + return + } + + return +} + +// NodeDeviceDettach is the go wrapper for REMOTE_PROC_NODE_DEVICE_DETTACH. +func (l *Libvirt) NodeDeviceDettach(Name string) (err error) { + var buf bytes.Buffer + + args := NodeDeviceDettachArgs { + Name: Name, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(118, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// NodeDeviceReAttach is the go wrapper for REMOTE_PROC_NODE_DEVICE_RE_ATTACH. +func (l *Libvirt) NodeDeviceReAttach(Name string) (err error) { + var buf bytes.Buffer + + args := NodeDeviceReAttachArgs { + Name: Name, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(119, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// NodeDeviceReset is the go wrapper for REMOTE_PROC_NODE_DEVICE_RESET. +func (l *Libvirt) NodeDeviceReset(Name string) (err error) { + var buf bytes.Buffer + + args := NodeDeviceResetArgs { + Name: Name, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(120, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainGetSecurityLabel is the go wrapper for REMOTE_PROC_DOMAIN_GET_SECURITY_LABEL. +func (l *Libvirt) DomainGetSecurityLabel(Dom Domain) (rLabel []int8, rEnforcing int32, err error) { + var buf bytes.Buffer + + args := DomainGetSecurityLabelArgs { + Dom: Dom, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(121, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Label: []int8 + _, err = dec.Decode(&rLabel) + if err != nil { + return + } + // Enforcing: int32 + _, err = dec.Decode(&rEnforcing) + if err != nil { + return + } + + return +} + +// NodeGetSecurityModel is the go wrapper for REMOTE_PROC_NODE_GET_SECURITY_MODEL. +func (l *Libvirt) NodeGetSecurityModel() (rModel []int8, rDoi []int8, err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(122, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Model: []int8 + _, err = dec.Decode(&rModel) + if err != nil { + return + } + // Doi: []int8 + _, err = dec.Decode(&rDoi) + if err != nil { + return + } + + return +} + +// NodeDeviceCreateXML is the go wrapper for REMOTE_PROC_NODE_DEVICE_CREATE_XML. +func (l *Libvirt) NodeDeviceCreateXML(XMLDesc string, Flags uint32) (rDev NodeDevice, err error) { + var buf bytes.Buffer + + args := NodeDeviceCreateXMLArgs { + XMLDesc: XMLDesc, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(123, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Dev: NodeDevice + _, err = dec.Decode(&rDev) + if err != nil { + return + } + + return +} + +// NodeDeviceDestroy is the go wrapper for REMOTE_PROC_NODE_DEVICE_DESTROY. +func (l *Libvirt) NodeDeviceDestroy(Name string) (err error) { + var buf bytes.Buffer + + args := NodeDeviceDestroyArgs { + Name: Name, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(124, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// StorageVolCreateXMLFrom is the go wrapper for REMOTE_PROC_STORAGE_VOL_CREATE_XML_FROM. +func (l *Libvirt) StorageVolCreateXMLFrom(Pool StoragePool, XML string, Clonevol StorageVol, Flags StorageVolCreateFlags) (rVol StorageVol, err error) { + var buf bytes.Buffer + + args := StorageVolCreateXMLFromArgs { + Pool: Pool, + XML: XML, + Clonevol: Clonevol, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(125, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Vol: StorageVol + _, err = dec.Decode(&rVol) + if err != nil { + return + } + + return +} + +// ConnectNumOfInterfaces is the go wrapper for REMOTE_PROC_CONNECT_NUM_OF_INTERFACES. +func (l *Libvirt) ConnectNumOfInterfaces() (rNum int32, err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(126, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Num: int32 + _, err = dec.Decode(&rNum) + if err != nil { + return + } + + return +} + +// ConnectListInterfaces is the go wrapper for REMOTE_PROC_CONNECT_LIST_INTERFACES. +func (l *Libvirt) ConnectListInterfaces(Maxnames int32) (rNames []string, err error) { + var buf bytes.Buffer + + args := ConnectListInterfacesArgs { + Maxnames: Maxnames, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(127, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Names: []string + _, err = dec.Decode(&rNames) + if err != nil { + return + } + + return +} + +// InterfaceLookupByName is the go wrapper for REMOTE_PROC_INTERFACE_LOOKUP_BY_NAME. +func (l *Libvirt) InterfaceLookupByName(Name string) (rIface Interface, err error) { + var buf bytes.Buffer + + args := InterfaceLookupByNameArgs { + Name: Name, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(128, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Iface: Interface + _, err = dec.Decode(&rIface) + if err != nil { + return + } + + return +} + +// InterfaceLookupByMacString is the go wrapper for REMOTE_PROC_INTERFACE_LOOKUP_BY_MAC_STRING. +func (l *Libvirt) InterfaceLookupByMacString(Mac string) (rIface Interface, err error) { + var buf bytes.Buffer + + args := InterfaceLookupByMacStringArgs { + Mac: Mac, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(129, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Iface: Interface + _, err = dec.Decode(&rIface) + if err != nil { + return + } + + return +} + +// InterfaceGetXMLDesc is the go wrapper for REMOTE_PROC_INTERFACE_GET_XML_DESC. +func (l *Libvirt) InterfaceGetXMLDesc(Iface Interface, Flags uint32) (rXML string, err error) { + var buf bytes.Buffer + + args := InterfaceGetXMLDescArgs { + Iface: Iface, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(130, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // XML: string + _, err = dec.Decode(&rXML) + if err != nil { + return + } + + return +} + +// InterfaceDefineXML is the go wrapper for REMOTE_PROC_INTERFACE_DEFINE_XML. +func (l *Libvirt) InterfaceDefineXML(XML string, Flags uint32) (rIface Interface, err error) { + var buf bytes.Buffer + + args := InterfaceDefineXMLArgs { + XML: XML, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(131, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Iface: Interface + _, err = dec.Decode(&rIface) + if err != nil { + return + } + + return +} + +// InterfaceUndefine is the go wrapper for REMOTE_PROC_INTERFACE_UNDEFINE. +func (l *Libvirt) InterfaceUndefine(Iface Interface) (err error) { + var buf bytes.Buffer + + args := InterfaceUndefineArgs { + Iface: Iface, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(132, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// InterfaceCreate is the go wrapper for REMOTE_PROC_INTERFACE_CREATE. +func (l *Libvirt) InterfaceCreate(Iface Interface, Flags uint32) (err error) { + var buf bytes.Buffer + + args := InterfaceCreateArgs { + Iface: Iface, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(133, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// InterfaceDestroy is the go wrapper for REMOTE_PROC_INTERFACE_DESTROY. +func (l *Libvirt) InterfaceDestroy(Iface Interface, Flags uint32) (err error) { + var buf bytes.Buffer + + args := InterfaceDestroyArgs { + Iface: Iface, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(134, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// ConnectDomainXMLFromNative is the go wrapper for REMOTE_PROC_CONNECT_DOMAIN_XML_FROM_NATIVE. +func (l *Libvirt) ConnectDomainXMLFromNative(NativeFormat string, NativeConfig string, Flags uint32) (rDomainXML string, err error) { + var buf bytes.Buffer + + args := ConnectDomainXMLFromNativeArgs { + NativeFormat: NativeFormat, + NativeConfig: NativeConfig, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(135, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // DomainXML: string + _, err = dec.Decode(&rDomainXML) + if err != nil { + return + } + + return +} + +// ConnectDomainXMLToNative is the go wrapper for REMOTE_PROC_CONNECT_DOMAIN_XML_TO_NATIVE. +func (l *Libvirt) ConnectDomainXMLToNative(NativeFormat string, DomainXML string, Flags uint32) (rNativeConfig string, err error) { + var buf bytes.Buffer + + args := ConnectDomainXMLToNativeArgs { + NativeFormat: NativeFormat, + DomainXML: DomainXML, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(136, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // NativeConfig: string + _, err = dec.Decode(&rNativeConfig) + if err != nil { + return + } + + return +} + +// ConnectNumOfDefinedInterfaces is the go wrapper for REMOTE_PROC_CONNECT_NUM_OF_DEFINED_INTERFACES. +func (l *Libvirt) ConnectNumOfDefinedInterfaces() (rNum int32, err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(137, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Num: int32 + _, err = dec.Decode(&rNum) + if err != nil { + return + } + + return +} + +// ConnectListDefinedInterfaces is the go wrapper for REMOTE_PROC_CONNECT_LIST_DEFINED_INTERFACES. +func (l *Libvirt) ConnectListDefinedInterfaces(Maxnames int32) (rNames []string, err error) { + var buf bytes.Buffer + + args := ConnectListDefinedInterfacesArgs { + Maxnames: Maxnames, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(138, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Names: []string + _, err = dec.Decode(&rNames) + if err != nil { + return + } + + return +} + +// ConnectNumOfSecrets is the go wrapper for REMOTE_PROC_CONNECT_NUM_OF_SECRETS. +func (l *Libvirt) ConnectNumOfSecrets() (rNum int32, err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(139, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Num: int32 + _, err = dec.Decode(&rNum) + if err != nil { + return + } + + return +} + +// ConnectListSecrets is the go wrapper for REMOTE_PROC_CONNECT_LIST_SECRETS. +func (l *Libvirt) ConnectListSecrets(Maxuuids int32) (rUuids []string, err error) { + var buf bytes.Buffer + + args := ConnectListSecretsArgs { + Maxuuids: Maxuuids, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(140, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Uuids: []string + _, err = dec.Decode(&rUuids) + if err != nil { + return + } + + return +} + +// SecretLookupByUUID is the go wrapper for REMOTE_PROC_SECRET_LOOKUP_BY_UUID. +func (l *Libvirt) SecretLookupByUUID(UUID UUID) (rOptSecret Secret, err error) { + var buf bytes.Buffer + + args := SecretLookupByUUIDArgs { + UUID: UUID, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(141, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // OptSecret: Secret + _, err = dec.Decode(&rOptSecret) + if err != nil { + return + } + + return +} + +// SecretDefineXML is the go wrapper for REMOTE_PROC_SECRET_DEFINE_XML. +func (l *Libvirt) SecretDefineXML(XML string, Flags uint32) (rOptSecret Secret, err error) { + var buf bytes.Buffer + + args := SecretDefineXMLArgs { + XML: XML, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(142, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // OptSecret: Secret + _, err = dec.Decode(&rOptSecret) + if err != nil { + return + } + + return +} + +// SecretGetXMLDesc is the go wrapper for REMOTE_PROC_SECRET_GET_XML_DESC. +func (l *Libvirt) SecretGetXMLDesc(OptSecret Secret, Flags uint32) (rXML string, err error) { + var buf bytes.Buffer + + args := SecretGetXMLDescArgs { + OptSecret: OptSecret, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(143, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // XML: string + _, err = dec.Decode(&rXML) + if err != nil { + return + } + + return +} + +// SecretSetValue is the go wrapper for REMOTE_PROC_SECRET_SET_VALUE. +func (l *Libvirt) SecretSetValue(OptSecret Secret, Value []byte, Flags uint32) (err error) { + var buf bytes.Buffer + + args := SecretSetValueArgs { + OptSecret: OptSecret, + Value: Value, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(144, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// SecretGetValue is the go wrapper for REMOTE_PROC_SECRET_GET_VALUE. +func (l *Libvirt) SecretGetValue(OptSecret Secret, Flags uint32) (rValue []byte, err error) { + var buf bytes.Buffer + + args := SecretGetValueArgs { + OptSecret: OptSecret, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(145, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Value: []byte + _, err = dec.Decode(&rValue) + if err != nil { + return + } + + return +} + +// SecretUndefine is the go wrapper for REMOTE_PROC_SECRET_UNDEFINE. +func (l *Libvirt) SecretUndefine(OptSecret Secret) (err error) { + var buf bytes.Buffer + + args := SecretUndefineArgs { + OptSecret: OptSecret, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(146, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// SecretLookupByUsage is the go wrapper for REMOTE_PROC_SECRET_LOOKUP_BY_USAGE. +func (l *Libvirt) SecretLookupByUsage(UsageType int32, UsageID string) (rOptSecret Secret, err error) { + var buf bytes.Buffer + + args := SecretLookupByUsageArgs { + UsageType: UsageType, + UsageID: UsageID, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(147, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // OptSecret: Secret + _, err = dec.Decode(&rOptSecret) + if err != nil { + return + } + + return +} + +// DomainMigratePrepareTunnel is the go wrapper for REMOTE_PROC_DOMAIN_MIGRATE_PREPARE_TUNNEL. +func (l *Libvirt) DomainMigratePrepareTunnel(Flags uint64, Dname OptString, Resource uint64, DomXML string) (err error) { + var buf bytes.Buffer + + args := DomainMigratePrepareTunnelArgs { + Flags: Flags, + Dname: Dname, + Resource: Resource, + DomXML: DomXML, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(148, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// ConnectIsSecure is the go wrapper for REMOTE_PROC_CONNECT_IS_SECURE. +func (l *Libvirt) ConnectIsSecure() (rSecure int32, err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(149, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Secure: int32 + _, err = dec.Decode(&rSecure) + if err != nil { + return + } + + return +} + +// DomainIsActive is the go wrapper for REMOTE_PROC_DOMAIN_IS_ACTIVE. +func (l *Libvirt) DomainIsActive(Dom Domain) (rActive int32, err error) { + var buf bytes.Buffer + + args := DomainIsActiveArgs { + Dom: Dom, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(150, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Active: int32 + _, err = dec.Decode(&rActive) + if err != nil { + return + } + + return +} + +// DomainIsPersistent is the go wrapper for REMOTE_PROC_DOMAIN_IS_PERSISTENT. +func (l *Libvirt) DomainIsPersistent(Dom Domain) (rPersistent int32, err error) { + var buf bytes.Buffer + + args := DomainIsPersistentArgs { + Dom: Dom, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(151, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Persistent: int32 + _, err = dec.Decode(&rPersistent) + if err != nil { + return + } + + return +} + +// NetworkIsActive is the go wrapper for REMOTE_PROC_NETWORK_IS_ACTIVE. +func (l *Libvirt) NetworkIsActive(Net Network) (rActive int32, err error) { + var buf bytes.Buffer + + args := NetworkIsActiveArgs { + Net: Net, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(152, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Active: int32 + _, err = dec.Decode(&rActive) + if err != nil { + return + } + + return +} + +// NetworkIsPersistent is the go wrapper for REMOTE_PROC_NETWORK_IS_PERSISTENT. +func (l *Libvirt) NetworkIsPersistent(Net Network) (rPersistent int32, err error) { + var buf bytes.Buffer + + args := NetworkIsPersistentArgs { + Net: Net, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(153, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Persistent: int32 + _, err = dec.Decode(&rPersistent) + if err != nil { + return + } + + return +} + +// StoragePoolIsActive is the go wrapper for REMOTE_PROC_STORAGE_POOL_IS_ACTIVE. +func (l *Libvirt) StoragePoolIsActive(Pool StoragePool) (rActive int32, err error) { + var buf bytes.Buffer + + args := StoragePoolIsActiveArgs { + Pool: Pool, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(154, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Active: int32 + _, err = dec.Decode(&rActive) + if err != nil { + return + } + + return +} + +// StoragePoolIsPersistent is the go wrapper for REMOTE_PROC_STORAGE_POOL_IS_PERSISTENT. +func (l *Libvirt) StoragePoolIsPersistent(Pool StoragePool) (rPersistent int32, err error) { + var buf bytes.Buffer + + args := StoragePoolIsPersistentArgs { + Pool: Pool, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(155, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Persistent: int32 + _, err = dec.Decode(&rPersistent) + if err != nil { + return + } + + return +} + +// InterfaceIsActive is the go wrapper for REMOTE_PROC_INTERFACE_IS_ACTIVE. +func (l *Libvirt) InterfaceIsActive(Iface Interface) (rActive int32, err error) { + var buf bytes.Buffer + + args := InterfaceIsActiveArgs { + Iface: Iface, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(156, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Active: int32 + _, err = dec.Decode(&rActive) + if err != nil { + return + } + + return +} + +// ConnectGetLibVersion is the go wrapper for REMOTE_PROC_CONNECT_GET_LIB_VERSION. +func (l *Libvirt) ConnectGetLibVersion() (rLibVer uint64, err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(157, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // LibVer: uint64 + _, err = dec.Decode(&rLibVer) + if err != nil { + return + } + + return +} + +// ConnectCompareCPU is the go wrapper for REMOTE_PROC_CONNECT_COMPARE_CPU. +func (l *Libvirt) ConnectCompareCPU(XML string, Flags ConnectCompareCPUFlags) (rResult int32, err error) { + var buf bytes.Buffer + + args := ConnectCompareCPUArgs { + XML: XML, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(158, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Result: int32 + _, err = dec.Decode(&rResult) + if err != nil { + return + } + + return +} + +// DomainMemoryStats is the go wrapper for REMOTE_PROC_DOMAIN_MEMORY_STATS. +func (l *Libvirt) DomainMemoryStats(Dom Domain, MaxStats uint32, Flags uint32) (rStats []DomainMemoryStat, err error) { + var buf bytes.Buffer + + args := DomainMemoryStatsArgs { + Dom: Dom, + MaxStats: MaxStats, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(159, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Stats: []DomainMemoryStat + _, err = dec.Decode(&rStats) + if err != nil { + return + } + + return +} + +// DomainAttachDeviceFlags is the go wrapper for REMOTE_PROC_DOMAIN_ATTACH_DEVICE_FLAGS. +func (l *Libvirt) DomainAttachDeviceFlags(Dom Domain, XML string, Flags uint32) (err error) { + var buf bytes.Buffer + + args := DomainAttachDeviceFlagsArgs { + Dom: Dom, + XML: XML, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(160, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainDetachDeviceFlags is the go wrapper for REMOTE_PROC_DOMAIN_DETACH_DEVICE_FLAGS. +func (l *Libvirt) DomainDetachDeviceFlags(Dom Domain, XML string, Flags uint32) (err error) { + var buf bytes.Buffer + + args := DomainDetachDeviceFlagsArgs { + Dom: Dom, + XML: XML, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(161, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// ConnectBaselineCPU is the go wrapper for REMOTE_PROC_CONNECT_BASELINE_CPU. +func (l *Libvirt) ConnectBaselineCPU(XMLCPUs []string, Flags ConnectBaselineCPUFlags) (rCPU string, err error) { + var buf bytes.Buffer + + args := ConnectBaselineCPUArgs { + XMLCPUs: XMLCPUs, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(162, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // CPU: string + _, err = dec.Decode(&rCPU) + if err != nil { + return + } + + return +} + +// DomainGetJobInfo is the go wrapper for REMOTE_PROC_DOMAIN_GET_JOB_INFO. +func (l *Libvirt) DomainGetJobInfo(Dom Domain) (rType int32, rTimeElapsed uint64, rTimeRemaining uint64, rDataTotal uint64, rDataProcessed uint64, rDataRemaining uint64, rMemTotal uint64, rMemProcessed uint64, rMemRemaining uint64, rFileTotal uint64, rFileProcessed uint64, rFileRemaining uint64, err error) { + var buf bytes.Buffer + + args := DomainGetJobInfoArgs { + Dom: Dom, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(163, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Type: int32 + _, err = dec.Decode(&rType) + if err != nil { + return + } + // TimeElapsed: uint64 + _, err = dec.Decode(&rTimeElapsed) + if err != nil { + return + } + // TimeRemaining: uint64 + _, err = dec.Decode(&rTimeRemaining) + if err != nil { + return + } + // DataTotal: uint64 + _, err = dec.Decode(&rDataTotal) + if err != nil { + return + } + // DataProcessed: uint64 + _, err = dec.Decode(&rDataProcessed) + if err != nil { + return + } + // DataRemaining: uint64 + _, err = dec.Decode(&rDataRemaining) + if err != nil { + return + } + // MemTotal: uint64 + _, err = dec.Decode(&rMemTotal) + if err != nil { + return + } + // MemProcessed: uint64 + _, err = dec.Decode(&rMemProcessed) + if err != nil { + return + } + // MemRemaining: uint64 + _, err = dec.Decode(&rMemRemaining) + if err != nil { + return + } + // FileTotal: uint64 + _, err = dec.Decode(&rFileTotal) + if err != nil { + return + } + // FileProcessed: uint64 + _, err = dec.Decode(&rFileProcessed) + if err != nil { + return + } + // FileRemaining: uint64 + _, err = dec.Decode(&rFileRemaining) + if err != nil { + return + } + + return +} + +// DomainAbortJob is the go wrapper for REMOTE_PROC_DOMAIN_ABORT_JOB. +func (l *Libvirt) DomainAbortJob(Dom Domain) (err error) { + var buf bytes.Buffer + + args := DomainAbortJobArgs { + Dom: Dom, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(164, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// StorageVolWipe is the go wrapper for REMOTE_PROC_STORAGE_VOL_WIPE. +func (l *Libvirt) StorageVolWipe(Vol StorageVol, Flags uint32) (err error) { + var buf bytes.Buffer + + args := StorageVolWipeArgs { + Vol: Vol, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(165, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainMigrateSetMaxDowntime is the go wrapper for REMOTE_PROC_DOMAIN_MIGRATE_SET_MAX_DOWNTIME. +func (l *Libvirt) DomainMigrateSetMaxDowntime(Dom Domain, Downtime uint64, Flags uint32) (err error) { + var buf bytes.Buffer + + args := DomainMigrateSetMaxDowntimeArgs { + Dom: Dom, + Downtime: Downtime, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(166, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// ConnectDomainEventRegisterAny is the go wrapper for REMOTE_PROC_CONNECT_DOMAIN_EVENT_REGISTER_ANY. +func (l *Libvirt) ConnectDomainEventRegisterAny(EventID int32) (err error) { + var buf bytes.Buffer + + args := ConnectDomainEventRegisterAnyArgs { + EventID: EventID, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(167, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// ConnectDomainEventDeregisterAny is the go wrapper for REMOTE_PROC_CONNECT_DOMAIN_EVENT_DEREGISTER_ANY. +func (l *Libvirt) ConnectDomainEventDeregisterAny(EventID int32) (err error) { + var buf bytes.Buffer + + args := ConnectDomainEventDeregisterAnyArgs { + EventID: EventID, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(168, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainEventReboot is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_REBOOT. +func (l *Libvirt) DomainEventReboot() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(169, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainEventRtcChange is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_RTC_CHANGE. +func (l *Libvirt) DomainEventRtcChange() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(170, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainEventWatchdog is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_WATCHDOG. +func (l *Libvirt) DomainEventWatchdog() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(171, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainEventIOError is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_IO_ERROR. +func (l *Libvirt) DomainEventIOError() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(172, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainEventGraphics is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_GRAPHICS. +func (l *Libvirt) DomainEventGraphics() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(173, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainUpdateDeviceFlags is the go wrapper for REMOTE_PROC_DOMAIN_UPDATE_DEVICE_FLAGS. +func (l *Libvirt) DomainUpdateDeviceFlags(Dom Domain, XML string, Flags uint32) (err error) { + var buf bytes.Buffer + + args := DomainUpdateDeviceFlagsArgs { + Dom: Dom, + XML: XML, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(174, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// NwfilterLookupByName is the go wrapper for REMOTE_PROC_NWFILTER_LOOKUP_BY_NAME. +func (l *Libvirt) NwfilterLookupByName(Name string) (rOptNwfilter Nwfilter, err error) { + var buf bytes.Buffer + + args := NwfilterLookupByNameArgs { + Name: Name, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(175, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // OptNwfilter: Nwfilter + _, err = dec.Decode(&rOptNwfilter) + if err != nil { + return + } + + return +} + +// NwfilterLookupByUUID is the go wrapper for REMOTE_PROC_NWFILTER_LOOKUP_BY_UUID. +func (l *Libvirt) NwfilterLookupByUUID(UUID UUID) (rOptNwfilter Nwfilter, err error) { + var buf bytes.Buffer + + args := NwfilterLookupByUUIDArgs { + UUID: UUID, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(176, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // OptNwfilter: Nwfilter + _, err = dec.Decode(&rOptNwfilter) + if err != nil { + return + } + + return +} + +// NwfilterGetXMLDesc is the go wrapper for REMOTE_PROC_NWFILTER_GET_XML_DESC. +func (l *Libvirt) NwfilterGetXMLDesc(OptNwfilter Nwfilter, Flags uint32) (rXML string, err error) { + var buf bytes.Buffer + + args := NwfilterGetXMLDescArgs { + OptNwfilter: OptNwfilter, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(177, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // XML: string + _, err = dec.Decode(&rXML) + if err != nil { + return + } + + return +} + +// ConnectNumOfNwfilters is the go wrapper for REMOTE_PROC_CONNECT_NUM_OF_NWFILTERS. +func (l *Libvirt) ConnectNumOfNwfilters() (rNum int32, err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(178, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Num: int32 + _, err = dec.Decode(&rNum) + if err != nil { + return + } + + return +} + +// ConnectListNwfilters is the go wrapper for REMOTE_PROC_CONNECT_LIST_NWFILTERS. +func (l *Libvirt) ConnectListNwfilters(Maxnames int32) (rNames []string, err error) { + var buf bytes.Buffer + + args := ConnectListNwfiltersArgs { + Maxnames: Maxnames, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(179, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Names: []string + _, err = dec.Decode(&rNames) + if err != nil { + return + } + + return +} + +// NwfilterDefineXML is the go wrapper for REMOTE_PROC_NWFILTER_DEFINE_XML. +func (l *Libvirt) NwfilterDefineXML(XML string) (rOptNwfilter Nwfilter, err error) { + var buf bytes.Buffer + + args := NwfilterDefineXMLArgs { + XML: XML, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(180, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // OptNwfilter: Nwfilter + _, err = dec.Decode(&rOptNwfilter) + if err != nil { + return + } + + return +} + +// NwfilterUndefine is the go wrapper for REMOTE_PROC_NWFILTER_UNDEFINE. +func (l *Libvirt) NwfilterUndefine(OptNwfilter Nwfilter) (err error) { + var buf bytes.Buffer + + args := NwfilterUndefineArgs { + OptNwfilter: OptNwfilter, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(181, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainManagedSave is the go wrapper for REMOTE_PROC_DOMAIN_MANAGED_SAVE. +func (l *Libvirt) DomainManagedSave(Dom Domain, Flags uint32) (err error) { + var buf bytes.Buffer + + args := DomainManagedSaveArgs { + Dom: Dom, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(182, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainHasManagedSaveImage is the go wrapper for REMOTE_PROC_DOMAIN_HAS_MANAGED_SAVE_IMAGE. +func (l *Libvirt) DomainHasManagedSaveImage(Dom Domain, Flags uint32) (rResult int32, err error) { + var buf bytes.Buffer + + args := DomainHasManagedSaveImageArgs { + Dom: Dom, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(183, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Result: int32 + _, err = dec.Decode(&rResult) + if err != nil { + return + } + + return +} + +// DomainManagedSaveRemove is the go wrapper for REMOTE_PROC_DOMAIN_MANAGED_SAVE_REMOVE. +func (l *Libvirt) DomainManagedSaveRemove(Dom Domain, Flags uint32) (err error) { + var buf bytes.Buffer + + args := DomainManagedSaveRemoveArgs { + Dom: Dom, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(184, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainSnapshotCreateXML is the go wrapper for REMOTE_PROC_DOMAIN_SNAPSHOT_CREATE_XML. +func (l *Libvirt) DomainSnapshotCreateXML(Dom Domain, XMLDesc string, Flags uint32) (rSnap DomainSnapshot, err error) { + var buf bytes.Buffer + + args := DomainSnapshotCreateXMLArgs { + Dom: Dom, + XMLDesc: XMLDesc, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(185, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Snap: DomainSnapshot + _, err = dec.Decode(&rSnap) + if err != nil { + return + } + + return +} + +// DomainSnapshotGetXMLDesc is the go wrapper for REMOTE_PROC_DOMAIN_SNAPSHOT_GET_XML_DESC. +func (l *Libvirt) DomainSnapshotGetXMLDesc(Snap DomainSnapshot, Flags uint32) (rXML string, err error) { + var buf bytes.Buffer + + args := DomainSnapshotGetXMLDescArgs { + Snap: Snap, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(186, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // XML: string + _, err = dec.Decode(&rXML) + if err != nil { + return + } + + return +} + +// DomainSnapshotNum is the go wrapper for REMOTE_PROC_DOMAIN_SNAPSHOT_NUM. +func (l *Libvirt) DomainSnapshotNum(Dom Domain, Flags uint32) (rNum int32, err error) { + var buf bytes.Buffer + + args := DomainSnapshotNumArgs { + Dom: Dom, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(187, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Num: int32 + _, err = dec.Decode(&rNum) + if err != nil { + return + } + + return +} + +// DomainSnapshotListNames is the go wrapper for REMOTE_PROC_DOMAIN_SNAPSHOT_LIST_NAMES. +func (l *Libvirt) DomainSnapshotListNames(Dom Domain, Maxnames int32, Flags uint32) (rNames []string, err error) { + var buf bytes.Buffer + + args := DomainSnapshotListNamesArgs { + Dom: Dom, + Maxnames: Maxnames, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(188, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Names: []string + _, err = dec.Decode(&rNames) + if err != nil { + return + } + + return +} + +// DomainSnapshotLookupByName is the go wrapper for REMOTE_PROC_DOMAIN_SNAPSHOT_LOOKUP_BY_NAME. +func (l *Libvirt) DomainSnapshotLookupByName(Dom Domain, Name string, Flags uint32) (rSnap DomainSnapshot, err error) { + var buf bytes.Buffer + + args := DomainSnapshotLookupByNameArgs { + Dom: Dom, + Name: Name, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(189, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Snap: DomainSnapshot + _, err = dec.Decode(&rSnap) + if err != nil { + return + } + + return +} + +// DomainHasCurrentSnapshot is the go wrapper for REMOTE_PROC_DOMAIN_HAS_CURRENT_SNAPSHOT. +func (l *Libvirt) DomainHasCurrentSnapshot(Dom Domain, Flags uint32) (rResult int32, err error) { + var buf bytes.Buffer + + args := DomainHasCurrentSnapshotArgs { + Dom: Dom, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(190, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Result: int32 + _, err = dec.Decode(&rResult) + if err != nil { + return + } + + return +} + +// DomainSnapshotCurrent is the go wrapper for REMOTE_PROC_DOMAIN_SNAPSHOT_CURRENT. +func (l *Libvirt) DomainSnapshotCurrent(Dom Domain, Flags uint32) (rSnap DomainSnapshot, err error) { + var buf bytes.Buffer + + args := DomainSnapshotCurrentArgs { + Dom: Dom, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(191, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Snap: DomainSnapshot + _, err = dec.Decode(&rSnap) + if err != nil { + return + } + + return +} + +// DomainRevertToSnapshot is the go wrapper for REMOTE_PROC_DOMAIN_REVERT_TO_SNAPSHOT. +func (l *Libvirt) DomainRevertToSnapshot(Snap DomainSnapshot, Flags uint32) (err error) { + var buf bytes.Buffer + + args := DomainRevertToSnapshotArgs { + Snap: Snap, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(192, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainSnapshotDelete is the go wrapper for REMOTE_PROC_DOMAIN_SNAPSHOT_DELETE. +func (l *Libvirt) DomainSnapshotDelete(Snap DomainSnapshot, Flags DomainSnapshotDeleteFlags) (err error) { + var buf bytes.Buffer + + args := DomainSnapshotDeleteArgs { + Snap: Snap, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(193, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainGetBlockInfo is the go wrapper for REMOTE_PROC_DOMAIN_GET_BLOCK_INFO. +func (l *Libvirt) DomainGetBlockInfo(Dom Domain, Path string, Flags uint32) (rAllocation uint64, rCapacity uint64, rPhysical uint64, err error) { + var buf bytes.Buffer + + args := DomainGetBlockInfoArgs { + Dom: Dom, + Path: Path, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(194, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Allocation: uint64 + _, err = dec.Decode(&rAllocation) + if err != nil { + return + } + // Capacity: uint64 + _, err = dec.Decode(&rCapacity) + if err != nil { + return + } + // Physical: uint64 + _, err = dec.Decode(&rPhysical) + if err != nil { + return + } + + return +} + +// DomainEventIOErrorReason is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_IO_ERROR_REASON. +func (l *Libvirt) DomainEventIOErrorReason() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(195, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainCreateWithFlags is the go wrapper for REMOTE_PROC_DOMAIN_CREATE_WITH_FLAGS. +func (l *Libvirt) DomainCreateWithFlags(Dom Domain, Flags uint32) (rDom Domain, err error) { + var buf bytes.Buffer + + args := DomainCreateWithFlagsArgs { + Dom: Dom, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(196, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Dom: Domain + _, err = dec.Decode(&rDom) + if err != nil { + return + } + + return +} + +// DomainSetMemoryParameters is the go wrapper for REMOTE_PROC_DOMAIN_SET_MEMORY_PARAMETERS. +func (l *Libvirt) DomainSetMemoryParameters(Dom Domain, Params []TypedParam, Flags uint32) (err error) { + var buf bytes.Buffer + + args := DomainSetMemoryParametersArgs { + Dom: Dom, + Params: Params, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(197, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainGetMemoryParameters is the go wrapper for REMOTE_PROC_DOMAIN_GET_MEMORY_PARAMETERS. +func (l *Libvirt) DomainGetMemoryParameters(Dom Domain, Nparams int32, Flags uint32) (rParams []TypedParam, rNparams int32, err error) { + var buf bytes.Buffer + + args := DomainGetMemoryParametersArgs { + Dom: Dom, + Nparams: Nparams, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(198, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Params: []TypedParam + rParams, err = decodeTypedParams(dec) + if err != nil { + fmt.Println("error decoding typedparams") + return + } + // Nparams: int32 + _, err = dec.Decode(&rNparams) + if err != nil { + return + } + + return +} + +// DomainSetVcpusFlags is the go wrapper for REMOTE_PROC_DOMAIN_SET_VCPUS_FLAGS. +func (l *Libvirt) DomainSetVcpusFlags(Dom Domain, Nvcpus uint32, Flags uint32) (err error) { + var buf bytes.Buffer + + args := DomainSetVcpusFlagsArgs { + Dom: Dom, + Nvcpus: Nvcpus, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(199, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainGetVcpusFlags is the go wrapper for REMOTE_PROC_DOMAIN_GET_VCPUS_FLAGS. +func (l *Libvirt) DomainGetVcpusFlags(Dom Domain, Flags uint32) (rNum int32, err error) { + var buf bytes.Buffer + + args := DomainGetVcpusFlagsArgs { + Dom: Dom, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(200, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Num: int32 + _, err = dec.Decode(&rNum) + if err != nil { + return + } + + return +} + +// DomainOpenConsole is the go wrapper for REMOTE_PROC_DOMAIN_OPEN_CONSOLE. +func (l *Libvirt) DomainOpenConsole(Dom Domain, DevName OptString, Flags uint32) (err error) { + var buf bytes.Buffer + + args := DomainOpenConsoleArgs { + Dom: Dom, + DevName: DevName, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(201, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainIsUpdated is the go wrapper for REMOTE_PROC_DOMAIN_IS_UPDATED. +func (l *Libvirt) DomainIsUpdated(Dom Domain) (rUpdated int32, err error) { + var buf bytes.Buffer + + args := DomainIsUpdatedArgs { + Dom: Dom, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(202, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Updated: int32 + _, err = dec.Decode(&rUpdated) + if err != nil { + return + } + + return +} + +// ConnectGetSysinfo is the go wrapper for REMOTE_PROC_CONNECT_GET_SYSINFO. +func (l *Libvirt) ConnectGetSysinfo(Flags uint32) (rSysinfo string, err error) { + var buf bytes.Buffer + + args := ConnectGetSysinfoArgs { + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(203, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Sysinfo: string + _, err = dec.Decode(&rSysinfo) + if err != nil { + return + } + + return +} + +// DomainSetMemoryFlags is the go wrapper for REMOTE_PROC_DOMAIN_SET_MEMORY_FLAGS. +func (l *Libvirt) DomainSetMemoryFlags(Dom Domain, Memory uint64, Flags uint32) (err error) { + var buf bytes.Buffer + + args := DomainSetMemoryFlagsArgs { + Dom: Dom, + Memory: Memory, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(204, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainSetBlkioParameters is the go wrapper for REMOTE_PROC_DOMAIN_SET_BLKIO_PARAMETERS. +func (l *Libvirt) DomainSetBlkioParameters(Dom Domain, Params []TypedParam, Flags uint32) (err error) { + var buf bytes.Buffer + + args := DomainSetBlkioParametersArgs { + Dom: Dom, + Params: Params, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(205, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainGetBlkioParameters is the go wrapper for REMOTE_PROC_DOMAIN_GET_BLKIO_PARAMETERS. +func (l *Libvirt) DomainGetBlkioParameters(Dom Domain, Nparams int32, Flags uint32) (rParams []TypedParam, rNparams int32, err error) { + var buf bytes.Buffer + + args := DomainGetBlkioParametersArgs { + Dom: Dom, + Nparams: Nparams, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(206, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Params: []TypedParam + rParams, err = decodeTypedParams(dec) + if err != nil { + fmt.Println("error decoding typedparams") + return + } + // Nparams: int32 + _, err = dec.Decode(&rNparams) + if err != nil { + return + } + + return +} + +// DomainMigrateSetMaxSpeed is the go wrapper for REMOTE_PROC_DOMAIN_MIGRATE_SET_MAX_SPEED. +func (l *Libvirt) DomainMigrateSetMaxSpeed(Dom Domain, Bandwidth uint64, Flags uint32) (err error) { + var buf bytes.Buffer + + args := DomainMigrateSetMaxSpeedArgs { + Dom: Dom, + Bandwidth: Bandwidth, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(207, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// StorageVolUpload is the go wrapper for REMOTE_PROC_STORAGE_VOL_UPLOAD. +func (l *Libvirt) StorageVolUpload(Vol StorageVol, Offset uint64, Length uint64, Flags StorageVolUploadFlags) (err error) { + var buf bytes.Buffer + + args := StorageVolUploadArgs { + Vol: Vol, + Offset: Offset, + Length: Length, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(208, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// StorageVolDownload is the go wrapper for REMOTE_PROC_STORAGE_VOL_DOWNLOAD. +func (l *Libvirt) StorageVolDownload(Vol StorageVol, Offset uint64, Length uint64, Flags StorageVolDownloadFlags) (err error) { + var buf bytes.Buffer + + args := StorageVolDownloadArgs { + Vol: Vol, + Offset: Offset, + Length: Length, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(209, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainInjectNmi is the go wrapper for REMOTE_PROC_DOMAIN_INJECT_NMI. +func (l *Libvirt) DomainInjectNmi(Dom Domain, Flags uint32) (err error) { + var buf bytes.Buffer + + args := DomainInjectNmiArgs { + Dom: Dom, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(210, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainScreenshot is the go wrapper for REMOTE_PROC_DOMAIN_SCREENSHOT. +func (l *Libvirt) DomainScreenshot(Dom Domain, Screen uint32, Flags uint32) (rMime OptString, err error) { + var buf bytes.Buffer + + args := DomainScreenshotArgs { + Dom: Dom, + Screen: Screen, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(211, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Mime: OptString + _, err = dec.Decode(&rMime) + if err != nil { + return + } + + return +} + +// DomainGetState is the go wrapper for REMOTE_PROC_DOMAIN_GET_STATE. +func (l *Libvirt) DomainGetState(Dom Domain, Flags uint32) (rState int32, rReason int32, err error) { + var buf bytes.Buffer + + args := DomainGetStateArgs { + Dom: Dom, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(212, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // State: int32 + _, err = dec.Decode(&rState) + if err != nil { + return + } + // Reason: int32 + _, err = dec.Decode(&rReason) + if err != nil { + return + } + + return +} + +// DomainMigrateBegin3 is the go wrapper for REMOTE_PROC_DOMAIN_MIGRATE_BEGIN3. +func (l *Libvirt) DomainMigrateBegin3(Dom Domain, Xmlin OptString, Flags uint64, Dname OptString, Resource uint64) (rCookieOut []byte, rXML string, err error) { + var buf bytes.Buffer + + args := DomainMigrateBegin3Args { + Dom: Dom, + Xmlin: Xmlin, + Flags: Flags, + Dname: Dname, + Resource: Resource, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(213, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // CookieOut: []byte + _, err = dec.Decode(&rCookieOut) + if err != nil { + return + } + // XML: string + _, err = dec.Decode(&rXML) + if err != nil { + return + } + + return +} + +// DomainMigratePrepare3 is the go wrapper for REMOTE_PROC_DOMAIN_MIGRATE_PREPARE3. +func (l *Libvirt) DomainMigratePrepare3(CookieIn []byte, UriIn OptString, Flags uint64, Dname OptString, Resource uint64, DomXML string) (rCookieOut []byte, rUriOut OptString, err error) { + var buf bytes.Buffer + + args := DomainMigratePrepare3Args { + CookieIn: CookieIn, + UriIn: UriIn, + Flags: Flags, + Dname: Dname, + Resource: Resource, + DomXML: DomXML, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(214, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // CookieOut: []byte + _, err = dec.Decode(&rCookieOut) + if err != nil { + return + } + // UriOut: OptString + _, err = dec.Decode(&rUriOut) + if err != nil { + return + } + + return +} + +// DomainMigratePrepareTunnel3 is the go wrapper for REMOTE_PROC_DOMAIN_MIGRATE_PREPARE_TUNNEL3. +func (l *Libvirt) DomainMigratePrepareTunnel3(CookieIn []byte, Flags uint64, Dname OptString, Resource uint64, DomXML string) (rCookieOut []byte, err error) { + var buf bytes.Buffer + + args := DomainMigratePrepareTunnel3Args { + CookieIn: CookieIn, + Flags: Flags, + Dname: Dname, + Resource: Resource, + DomXML: DomXML, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(215, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // CookieOut: []byte + _, err = dec.Decode(&rCookieOut) + if err != nil { + return + } + + return +} + +// DomainMigratePerform3 is the go wrapper for REMOTE_PROC_DOMAIN_MIGRATE_PERFORM3. +func (l *Libvirt) DomainMigratePerform3(Dom Domain, Xmlin OptString, CookieIn []byte, Dconnuri OptString, Uri OptString, Flags uint64, Dname OptString, Resource uint64) (rCookieOut []byte, err error) { + var buf bytes.Buffer + + args := DomainMigratePerform3Args { + Dom: Dom, + Xmlin: Xmlin, + CookieIn: CookieIn, + Dconnuri: Dconnuri, + Uri: Uri, + Flags: Flags, + Dname: Dname, + Resource: Resource, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(216, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // CookieOut: []byte + _, err = dec.Decode(&rCookieOut) + if err != nil { + return + } + + return +} + +// DomainMigrateFinish3 is the go wrapper for REMOTE_PROC_DOMAIN_MIGRATE_FINISH3. +func (l *Libvirt) DomainMigrateFinish3(Dname string, CookieIn []byte, Dconnuri OptString, Uri OptString, Flags uint64, Cancelled int32) (rDom Domain, rCookieOut []byte, err error) { + var buf bytes.Buffer + + args := DomainMigrateFinish3Args { + Dname: Dname, + CookieIn: CookieIn, + Dconnuri: Dconnuri, + Uri: Uri, + Flags: Flags, + Cancelled: Cancelled, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(217, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Dom: Domain + _, err = dec.Decode(&rDom) + if err != nil { + return + } + // CookieOut: []byte + _, err = dec.Decode(&rCookieOut) + if err != nil { + return + } + + return +} + +// DomainMigrateConfirm3 is the go wrapper for REMOTE_PROC_DOMAIN_MIGRATE_CONFIRM3. +func (l *Libvirt) DomainMigrateConfirm3(Dom Domain, CookieIn []byte, Flags uint64, Cancelled int32) (err error) { + var buf bytes.Buffer + + args := DomainMigrateConfirm3Args { + Dom: Dom, + CookieIn: CookieIn, + Flags: Flags, + Cancelled: Cancelled, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(218, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainSetSchedulerParametersFlags is the go wrapper for REMOTE_PROC_DOMAIN_SET_SCHEDULER_PARAMETERS_FLAGS. +func (l *Libvirt) DomainSetSchedulerParametersFlags(Dom Domain, Params []TypedParam, Flags uint32) (err error) { + var buf bytes.Buffer + + args := DomainSetSchedulerParametersFlagsArgs { + Dom: Dom, + Params: Params, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(219, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// InterfaceChangeBegin is the go wrapper for REMOTE_PROC_INTERFACE_CHANGE_BEGIN. +func (l *Libvirt) InterfaceChangeBegin(Flags uint32) (err error) { + var buf bytes.Buffer + + args := InterfaceChangeBeginArgs { + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(220, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// InterfaceChangeCommit is the go wrapper for REMOTE_PROC_INTERFACE_CHANGE_COMMIT. +func (l *Libvirt) InterfaceChangeCommit(Flags uint32) (err error) { + var buf bytes.Buffer + + args := InterfaceChangeCommitArgs { + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(221, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// InterfaceChangeRollback is the go wrapper for REMOTE_PROC_INTERFACE_CHANGE_ROLLBACK. +func (l *Libvirt) InterfaceChangeRollback(Flags uint32) (err error) { + var buf bytes.Buffer + + args := InterfaceChangeRollbackArgs { + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(222, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainGetSchedulerParametersFlags is the go wrapper for REMOTE_PROC_DOMAIN_GET_SCHEDULER_PARAMETERS_FLAGS. +func (l *Libvirt) DomainGetSchedulerParametersFlags(Dom Domain, Nparams int32, Flags uint32) (rParams []TypedParam, err error) { + var buf bytes.Buffer + + args := DomainGetSchedulerParametersFlagsArgs { + Dom: Dom, + Nparams: Nparams, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(223, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Params: []TypedParam + rParams, err = decodeTypedParams(dec) + if err != nil { + fmt.Println("error decoding typedparams") + return + } + + return +} + +// DomainEventControlError is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_CONTROL_ERROR. +func (l *Libvirt) DomainEventControlError() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(224, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainPinVcpuFlags is the go wrapper for REMOTE_PROC_DOMAIN_PIN_VCPU_FLAGS. +func (l *Libvirt) DomainPinVcpuFlags(Dom Domain, Vcpu uint32, Cpumap []byte, Flags uint32) (err error) { + var buf bytes.Buffer + + args := DomainPinVcpuFlagsArgs { + Dom: Dom, + Vcpu: Vcpu, + Cpumap: Cpumap, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(225, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainSendKey is the go wrapper for REMOTE_PROC_DOMAIN_SEND_KEY. +func (l *Libvirt) DomainSendKey(Dom Domain, Codeset uint32, Holdtime uint32, Keycodes []uint32, Flags uint32) (err error) { + var buf bytes.Buffer + + args := DomainSendKeyArgs { + Dom: Dom, + Codeset: Codeset, + Holdtime: Holdtime, + Keycodes: Keycodes, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(226, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// NodeGetCPUStats is the go wrapper for REMOTE_PROC_NODE_GET_CPU_STATS. +func (l *Libvirt) NodeGetCPUStats(CPUNum int32, Nparams int32, Flags uint32) (rParams []NodeGetCPUStats, rNparams int32, err error) { + var buf bytes.Buffer + + args := NodeGetCPUStatsArgs { + CPUNum: CPUNum, + Nparams: Nparams, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(227, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Params: []NodeGetCPUStats + _, err = dec.Decode(&rParams) + if err != nil { + return + } + // Nparams: int32 + _, err = dec.Decode(&rNparams) + if err != nil { + return + } + + return +} + +// NodeGetMemoryStats is the go wrapper for REMOTE_PROC_NODE_GET_MEMORY_STATS. +func (l *Libvirt) NodeGetMemoryStats(Nparams int32, CellNum int32, Flags uint32) (rParams []NodeGetMemoryStats, rNparams int32, err error) { + var buf bytes.Buffer + + args := NodeGetMemoryStatsArgs { + Nparams: Nparams, + CellNum: CellNum, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(228, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Params: []NodeGetMemoryStats + _, err = dec.Decode(&rParams) + if err != nil { + return + } + // Nparams: int32 + _, err = dec.Decode(&rNparams) + if err != nil { + return + } + + return +} + +// DomainGetControlInfo is the go wrapper for REMOTE_PROC_DOMAIN_GET_CONTROL_INFO. +func (l *Libvirt) DomainGetControlInfo(Dom Domain, Flags uint32) (rState uint32, rDetails uint32, rStateTime uint64, err error) { + var buf bytes.Buffer + + args := DomainGetControlInfoArgs { + Dom: Dom, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(229, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // State: uint32 + _, err = dec.Decode(&rState) + if err != nil { + return + } + // Details: uint32 + _, err = dec.Decode(&rDetails) + if err != nil { + return + } + // StateTime: uint64 + _, err = dec.Decode(&rStateTime) + if err != nil { + return + } + + return +} + +// DomainGetVcpuPinInfo is the go wrapper for REMOTE_PROC_DOMAIN_GET_VCPU_PIN_INFO. +func (l *Libvirt) DomainGetVcpuPinInfo(Dom Domain, Ncpumaps int32, Maplen int32, Flags uint32) (rCpumaps []byte, rNum int32, err error) { + var buf bytes.Buffer + + args := DomainGetVcpuPinInfoArgs { + Dom: Dom, + Ncpumaps: Ncpumaps, + Maplen: Maplen, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(230, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Cpumaps: []byte + _, err = dec.Decode(&rCpumaps) + if err != nil { + return + } + // Num: int32 + _, err = dec.Decode(&rNum) + if err != nil { + return + } + + return +} + +// DomainUndefineFlags is the go wrapper for REMOTE_PROC_DOMAIN_UNDEFINE_FLAGS. +func (l *Libvirt) DomainUndefineFlags(Dom Domain, Flags DomainUndefineFlagsValues) (err error) { + var buf bytes.Buffer + + args := DomainUndefineFlagsArgs { + Dom: Dom, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(231, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainSaveFlags is the go wrapper for REMOTE_PROC_DOMAIN_SAVE_FLAGS. +func (l *Libvirt) DomainSaveFlags(Dom Domain, To string, Dxml OptString, Flags uint32) (err error) { + var buf bytes.Buffer + + args := DomainSaveFlagsArgs { + Dom: Dom, + To: To, + Dxml: Dxml, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(232, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainRestoreFlags is the go wrapper for REMOTE_PROC_DOMAIN_RESTORE_FLAGS. +func (l *Libvirt) DomainRestoreFlags(From string, Dxml OptString, Flags uint32) (err error) { + var buf bytes.Buffer + + args := DomainRestoreFlagsArgs { + From: From, + Dxml: Dxml, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(233, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainDestroyFlags is the go wrapper for REMOTE_PROC_DOMAIN_DESTROY_FLAGS. +func (l *Libvirt) DomainDestroyFlags(Dom Domain, Flags DomainDestroyFlagsValues) (err error) { + var buf bytes.Buffer + + args := DomainDestroyFlagsArgs { + Dom: Dom, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(234, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainSaveImageGetXMLDesc is the go wrapper for REMOTE_PROC_DOMAIN_SAVE_IMAGE_GET_XML_DESC. +func (l *Libvirt) DomainSaveImageGetXMLDesc(File string, Flags uint32) (rXML string, err error) { + var buf bytes.Buffer + + args := DomainSaveImageGetXMLDescArgs { + File: File, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(235, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // XML: string + _, err = dec.Decode(&rXML) + if err != nil { + return + } + + return +} + +// DomainSaveImageDefineXML is the go wrapper for REMOTE_PROC_DOMAIN_SAVE_IMAGE_DEFINE_XML. +func (l *Libvirt) DomainSaveImageDefineXML(File string, Dxml string, Flags uint32) (err error) { + var buf bytes.Buffer + + args := DomainSaveImageDefineXMLArgs { + File: File, + Dxml: Dxml, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(236, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainBlockJobAbort is the go wrapper for REMOTE_PROC_DOMAIN_BLOCK_JOB_ABORT. +func (l *Libvirt) DomainBlockJobAbort(Dom Domain, Path string, Flags DomainBlockJobAbortFlags) (err error) { + var buf bytes.Buffer + + args := DomainBlockJobAbortArgs { + Dom: Dom, + Path: Path, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(237, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainGetBlockJobInfo is the go wrapper for REMOTE_PROC_DOMAIN_GET_BLOCK_JOB_INFO. +func (l *Libvirt) DomainGetBlockJobInfo(Dom Domain, Path string, Flags uint32) (rFound int32, rType int32, rBandwidth uint64, rCur uint64, rEnd uint64, err error) { + var buf bytes.Buffer + + args := DomainGetBlockJobInfoArgs { + Dom: Dom, + Path: Path, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(238, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Found: int32 + _, err = dec.Decode(&rFound) + if err != nil { + return + } + // Type: int32 + _, err = dec.Decode(&rType) + if err != nil { + return + } + // Bandwidth: uint64 + _, err = dec.Decode(&rBandwidth) + if err != nil { + return + } + // Cur: uint64 + _, err = dec.Decode(&rCur) + if err != nil { + return + } + // End: uint64 + _, err = dec.Decode(&rEnd) + if err != nil { + return + } + + return +} + +// DomainBlockJobSetSpeed is the go wrapper for REMOTE_PROC_DOMAIN_BLOCK_JOB_SET_SPEED. +func (l *Libvirt) DomainBlockJobSetSpeed(Dom Domain, Path string, Bandwidth uint64, Flags DomainBlockJobSetSpeedFlags) (err error) { + var buf bytes.Buffer + + args := DomainBlockJobSetSpeedArgs { + Dom: Dom, + Path: Path, + Bandwidth: Bandwidth, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(239, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainBlockPull is the go wrapper for REMOTE_PROC_DOMAIN_BLOCK_PULL. +func (l *Libvirt) DomainBlockPull(Dom Domain, Path string, Bandwidth uint64, Flags DomainBlockPullFlags) (err error) { + var buf bytes.Buffer + + args := DomainBlockPullArgs { + Dom: Dom, + Path: Path, + Bandwidth: Bandwidth, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(240, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainEventBlockJob is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_BLOCK_JOB. +func (l *Libvirt) DomainEventBlockJob() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(241, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainMigrateGetMaxSpeed is the go wrapper for REMOTE_PROC_DOMAIN_MIGRATE_GET_MAX_SPEED. +func (l *Libvirt) DomainMigrateGetMaxSpeed(Dom Domain, Flags uint32) (rBandwidth uint64, err error) { + var buf bytes.Buffer + + args := DomainMigrateGetMaxSpeedArgs { + Dom: Dom, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(242, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Bandwidth: uint64 + _, err = dec.Decode(&rBandwidth) + if err != nil { + return + } + + return +} + +// DomainBlockStatsFlags is the go wrapper for REMOTE_PROC_DOMAIN_BLOCK_STATS_FLAGS. +func (l *Libvirt) DomainBlockStatsFlags(Dom Domain, Path string, Nparams int32, Flags uint32) (rParams []TypedParam, rNparams int32, err error) { + var buf bytes.Buffer + + args := DomainBlockStatsFlagsArgs { + Dom: Dom, + Path: Path, + Nparams: Nparams, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(243, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Params: []TypedParam + rParams, err = decodeTypedParams(dec) + if err != nil { + fmt.Println("error decoding typedparams") + return + } + // Nparams: int32 + _, err = dec.Decode(&rNparams) + if err != nil { + return + } + + return +} + +// DomainSnapshotGetParent is the go wrapper for REMOTE_PROC_DOMAIN_SNAPSHOT_GET_PARENT. +func (l *Libvirt) DomainSnapshotGetParent(Snap DomainSnapshot, Flags uint32) (rSnap DomainSnapshot, err error) { + var buf bytes.Buffer + + args := DomainSnapshotGetParentArgs { + Snap: Snap, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(244, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Snap: DomainSnapshot + _, err = dec.Decode(&rSnap) + if err != nil { + return + } + + return +} + +// DomainReset is the go wrapper for REMOTE_PROC_DOMAIN_RESET. +func (l *Libvirt) DomainReset(Dom Domain, Flags uint32) (err error) { + var buf bytes.Buffer + + args := DomainResetArgs { + Dom: Dom, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(245, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainSnapshotNumChildren is the go wrapper for REMOTE_PROC_DOMAIN_SNAPSHOT_NUM_CHILDREN. +func (l *Libvirt) DomainSnapshotNumChildren(Snap DomainSnapshot, Flags uint32) (rNum int32, err error) { + var buf bytes.Buffer + + args := DomainSnapshotNumChildrenArgs { + Snap: Snap, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(246, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Num: int32 + _, err = dec.Decode(&rNum) + if err != nil { + return + } + + return +} + +// DomainSnapshotListChildrenNames is the go wrapper for REMOTE_PROC_DOMAIN_SNAPSHOT_LIST_CHILDREN_NAMES. +func (l *Libvirt) DomainSnapshotListChildrenNames(Snap DomainSnapshot, Maxnames int32, Flags uint32) (rNames []string, err error) { + var buf bytes.Buffer + + args := DomainSnapshotListChildrenNamesArgs { + Snap: Snap, + Maxnames: Maxnames, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(247, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Names: []string + _, err = dec.Decode(&rNames) + if err != nil { + return + } + + return +} + +// DomainEventDiskChange is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_DISK_CHANGE. +func (l *Libvirt) DomainEventDiskChange() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(248, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainOpenGraphics is the go wrapper for REMOTE_PROC_DOMAIN_OPEN_GRAPHICS. +func (l *Libvirt) DomainOpenGraphics(Dom Domain, Idx uint32, Flags DomainOpenGraphicsFlags) (err error) { + var buf bytes.Buffer + + args := DomainOpenGraphicsArgs { + Dom: Dom, + Idx: Idx, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(249, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// NodeSuspendForDuration is the go wrapper for REMOTE_PROC_NODE_SUSPEND_FOR_DURATION. +func (l *Libvirt) NodeSuspendForDuration(Target uint32, Duration uint64, Flags uint32) (err error) { + var buf bytes.Buffer + + args := NodeSuspendForDurationArgs { + Target: Target, + Duration: Duration, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(250, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainBlockResize is the go wrapper for REMOTE_PROC_DOMAIN_BLOCK_RESIZE. +func (l *Libvirt) DomainBlockResize(Dom Domain, Disk string, Size uint64, Flags DomainBlockResizeFlags) (err error) { + var buf bytes.Buffer + + args := DomainBlockResizeArgs { + Dom: Dom, + Disk: Disk, + Size: Size, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(251, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainSetBlockIOTune is the go wrapper for REMOTE_PROC_DOMAIN_SET_BLOCK_IO_TUNE. +func (l *Libvirt) DomainSetBlockIOTune(Dom Domain, Disk string, Params []TypedParam, Flags uint32) (err error) { + var buf bytes.Buffer + + args := DomainSetBlockIOTuneArgs { + Dom: Dom, + Disk: Disk, + Params: Params, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(252, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainGetBlockIOTune is the go wrapper for REMOTE_PROC_DOMAIN_GET_BLOCK_IO_TUNE. +func (l *Libvirt) DomainGetBlockIOTune(Dom Domain, Disk OptString, Nparams int32, Flags uint32) (rParams []TypedParam, rNparams int32, err error) { + var buf bytes.Buffer + + args := DomainGetBlockIOTuneArgs { + Dom: Dom, + Disk: Disk, + Nparams: Nparams, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(253, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Params: []TypedParam + rParams, err = decodeTypedParams(dec) + if err != nil { + fmt.Println("error decoding typedparams") + return + } + // Nparams: int32 + _, err = dec.Decode(&rNparams) + if err != nil { + return + } + + return +} + +// DomainSetNumaParameters is the go wrapper for REMOTE_PROC_DOMAIN_SET_NUMA_PARAMETERS. +func (l *Libvirt) DomainSetNumaParameters(Dom Domain, Params []TypedParam, Flags uint32) (err error) { + var buf bytes.Buffer + + args := DomainSetNumaParametersArgs { + Dom: Dom, + Params: Params, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(254, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainGetNumaParameters is the go wrapper for REMOTE_PROC_DOMAIN_GET_NUMA_PARAMETERS. +func (l *Libvirt) DomainGetNumaParameters(Dom Domain, Nparams int32, Flags uint32) (rParams []TypedParam, rNparams int32, err error) { + var buf bytes.Buffer + + args := DomainGetNumaParametersArgs { + Dom: Dom, + Nparams: Nparams, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(255, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Params: []TypedParam + rParams, err = decodeTypedParams(dec) + if err != nil { + fmt.Println("error decoding typedparams") + return + } + // Nparams: int32 + _, err = dec.Decode(&rNparams) + if err != nil { + return + } + + return +} + +// DomainSetInterfaceParameters is the go wrapper for REMOTE_PROC_DOMAIN_SET_INTERFACE_PARAMETERS. +func (l *Libvirt) DomainSetInterfaceParameters(Dom Domain, Device string, Params []TypedParam, Flags uint32) (err error) { + var buf bytes.Buffer + + args := DomainSetInterfaceParametersArgs { + Dom: Dom, + Device: Device, + Params: Params, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(256, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainGetInterfaceParameters is the go wrapper for REMOTE_PROC_DOMAIN_GET_INTERFACE_PARAMETERS. +func (l *Libvirt) DomainGetInterfaceParameters(Dom Domain, Device string, Nparams int32, Flags DomainModificationImpact) (rParams []TypedParam, rNparams int32, err error) { + var buf bytes.Buffer + + args := DomainGetInterfaceParametersArgs { + Dom: Dom, + Device: Device, + Nparams: Nparams, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(257, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Params: []TypedParam + rParams, err = decodeTypedParams(dec) + if err != nil { + fmt.Println("error decoding typedparams") + return + } + // Nparams: int32 + _, err = dec.Decode(&rNparams) + if err != nil { + return + } + + return +} + +// DomainShutdownFlags is the go wrapper for REMOTE_PROC_DOMAIN_SHUTDOWN_FLAGS. +func (l *Libvirt) DomainShutdownFlags(Dom Domain, Flags DomainShutdownFlagValues) (err error) { + var buf bytes.Buffer + + args := DomainShutdownFlagsArgs { + Dom: Dom, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(258, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// StorageVolWipePattern is the go wrapper for REMOTE_PROC_STORAGE_VOL_WIPE_PATTERN. +func (l *Libvirt) StorageVolWipePattern(Vol StorageVol, Algorithm uint32, Flags uint32) (err error) { + var buf bytes.Buffer + + args := StorageVolWipePatternArgs { + Vol: Vol, + Algorithm: Algorithm, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(259, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// StorageVolResize is the go wrapper for REMOTE_PROC_STORAGE_VOL_RESIZE. +func (l *Libvirt) StorageVolResize(Vol StorageVol, Capacity uint64, Flags StorageVolResizeFlags) (err error) { + var buf bytes.Buffer + + args := StorageVolResizeArgs { + Vol: Vol, + Capacity: Capacity, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(260, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainPmSuspendForDuration is the go wrapper for REMOTE_PROC_DOMAIN_PM_SUSPEND_FOR_DURATION. +func (l *Libvirt) DomainPmSuspendForDuration(Dom Domain, Target uint32, Duration uint64, Flags uint32) (err error) { + var buf bytes.Buffer + + args := DomainPmSuspendForDurationArgs { + Dom: Dom, + Target: Target, + Duration: Duration, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(261, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainGetCPUStats is the go wrapper for REMOTE_PROC_DOMAIN_GET_CPU_STATS. +func (l *Libvirt) DomainGetCPUStats(Dom Domain, Nparams uint32, StartCPU int32, Ncpus uint32, Flags TypedParameterFlags) (rParams []TypedParam, rNparams int32, err error) { + var buf bytes.Buffer + + args := DomainGetCPUStatsArgs { + Dom: Dom, + Nparams: Nparams, + StartCPU: StartCPU, + Ncpus: Ncpus, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(262, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Params: []TypedParam + rParams, err = decodeTypedParams(dec) + if err != nil { + fmt.Println("error decoding typedparams") + return + } + // Nparams: int32 + _, err = dec.Decode(&rNparams) + if err != nil { + return + } + + return +} + +// DomainGetDiskErrors is the go wrapper for REMOTE_PROC_DOMAIN_GET_DISK_ERRORS. +func (l *Libvirt) DomainGetDiskErrors(Dom Domain, Maxerrors uint32, Flags uint32) (rErrors []DomainDiskError, rNerrors int32, err error) { + var buf bytes.Buffer + + args := DomainGetDiskErrorsArgs { + Dom: Dom, + Maxerrors: Maxerrors, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(263, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Errors: []DomainDiskError + _, err = dec.Decode(&rErrors) + if err != nil { + return + } + // Nerrors: int32 + _, err = dec.Decode(&rNerrors) + if err != nil { + return + } + + return +} + +// DomainSetMetadata is the go wrapper for REMOTE_PROC_DOMAIN_SET_METADATA. +func (l *Libvirt) DomainSetMetadata(Dom Domain, Type int32, Metadata OptString, Key OptString, Uri OptString, Flags DomainModificationImpact) (err error) { + var buf bytes.Buffer + + args := DomainSetMetadataArgs { + Dom: Dom, + Type: Type, + Metadata: Metadata, + Key: Key, + Uri: Uri, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(264, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainGetMetadata is the go wrapper for REMOTE_PROC_DOMAIN_GET_METADATA. +func (l *Libvirt) DomainGetMetadata(Dom Domain, Type int32, Uri OptString, Flags DomainModificationImpact) (rMetadata string, err error) { + var buf bytes.Buffer + + args := DomainGetMetadataArgs { + Dom: Dom, + Type: Type, + Uri: Uri, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(265, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Metadata: string + _, err = dec.Decode(&rMetadata) + if err != nil { + return + } + + return +} + +// DomainBlockRebase is the go wrapper for REMOTE_PROC_DOMAIN_BLOCK_REBASE. +func (l *Libvirt) DomainBlockRebase(Dom Domain, Path string, Base OptString, Bandwidth uint64, Flags DomainBlockRebaseFlags) (err error) { + var buf bytes.Buffer + + args := DomainBlockRebaseArgs { + Dom: Dom, + Path: Path, + Base: Base, + Bandwidth: Bandwidth, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(266, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainPmWakeup is the go wrapper for REMOTE_PROC_DOMAIN_PM_WAKEUP. +func (l *Libvirt) DomainPmWakeup(Dom Domain, Flags uint32) (err error) { + var buf bytes.Buffer + + args := DomainPmWakeupArgs { + Dom: Dom, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(267, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainEventTrayChange is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_TRAY_CHANGE. +func (l *Libvirt) DomainEventTrayChange() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(268, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainEventPmwakeup is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_PMWAKEUP. +func (l *Libvirt) DomainEventPmwakeup() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(269, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainEventPmsuspend is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_PMSUSPEND. +func (l *Libvirt) DomainEventPmsuspend() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(270, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainSnapshotIsCurrent is the go wrapper for REMOTE_PROC_DOMAIN_SNAPSHOT_IS_CURRENT. +func (l *Libvirt) DomainSnapshotIsCurrent(Snap DomainSnapshot, Flags uint32) (rCurrent int32, err error) { + var buf bytes.Buffer + + args := DomainSnapshotIsCurrentArgs { + Snap: Snap, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(271, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Current: int32 + _, err = dec.Decode(&rCurrent) + if err != nil { + return + } + + return +} + +// DomainSnapshotHasMetadata is the go wrapper for REMOTE_PROC_DOMAIN_SNAPSHOT_HAS_METADATA. +func (l *Libvirt) DomainSnapshotHasMetadata(Snap DomainSnapshot, Flags uint32) (rMetadata int32, err error) { + var buf bytes.Buffer + + args := DomainSnapshotHasMetadataArgs { + Snap: Snap, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(272, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Metadata: int32 + _, err = dec.Decode(&rMetadata) + if err != nil { + return + } + + return +} + +// ConnectListAllDomains is the go wrapper for REMOTE_PROC_CONNECT_LIST_ALL_DOMAINS. +func (l *Libvirt) ConnectListAllDomains(NeedResults int32, Flags ConnectListAllDomainsFlags) (rDomains []Domain, rRet uint32, err error) { + var buf bytes.Buffer + + args := ConnectListAllDomainsArgs { + NeedResults: NeedResults, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(273, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Domains: []Domain + _, err = dec.Decode(&rDomains) + if err != nil { + return + } + // Ret: uint32 + _, err = dec.Decode(&rRet) + if err != nil { + return + } + + return +} + +// DomainListAllSnapshots is the go wrapper for REMOTE_PROC_DOMAIN_LIST_ALL_SNAPSHOTS. +func (l *Libvirt) DomainListAllSnapshots(Dom Domain, NeedResults int32, Flags uint32) (rSnapshots []DomainSnapshot, rRet int32, err error) { + var buf bytes.Buffer + + args := DomainListAllSnapshotsArgs { + Dom: Dom, + NeedResults: NeedResults, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(274, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Snapshots: []DomainSnapshot + _, err = dec.Decode(&rSnapshots) + if err != nil { + return + } + // Ret: int32 + _, err = dec.Decode(&rRet) + if err != nil { + return + } + + return +} + +// DomainSnapshotListAllChildren is the go wrapper for REMOTE_PROC_DOMAIN_SNAPSHOT_LIST_ALL_CHILDREN. +func (l *Libvirt) DomainSnapshotListAllChildren(Snapshot DomainSnapshot, NeedResults int32, Flags uint32) (rSnapshots []DomainSnapshot, rRet int32, err error) { + var buf bytes.Buffer + + args := DomainSnapshotListAllChildrenArgs { + Snapshot: Snapshot, + NeedResults: NeedResults, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(275, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Snapshots: []DomainSnapshot + _, err = dec.Decode(&rSnapshots) + if err != nil { + return + } + // Ret: int32 + _, err = dec.Decode(&rRet) + if err != nil { + return + } + + return +} + +// DomainEventBalloonChange is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_BALLOON_CHANGE. +func (l *Libvirt) DomainEventBalloonChange() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(276, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainGetHostname is the go wrapper for REMOTE_PROC_DOMAIN_GET_HOSTNAME. +func (l *Libvirt) DomainGetHostname(Dom Domain, Flags uint32) (rHostname string, err error) { + var buf bytes.Buffer + + args := DomainGetHostnameArgs { + Dom: Dom, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(277, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Hostname: string + _, err = dec.Decode(&rHostname) + if err != nil { + return + } + + return +} + +// DomainGetSecurityLabelList is the go wrapper for REMOTE_PROC_DOMAIN_GET_SECURITY_LABEL_LIST. +func (l *Libvirt) DomainGetSecurityLabelList(Dom Domain) (rLabels []DomainGetSecurityLabelRet, rRet int32, err error) { + var buf bytes.Buffer + + args := DomainGetSecurityLabelListArgs { + Dom: Dom, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(278, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Labels: []DomainGetSecurityLabelRet + _, err = dec.Decode(&rLabels) + if err != nil { + return + } + // Ret: int32 + _, err = dec.Decode(&rRet) + if err != nil { + return + } + + return +} + +// DomainPinEmulator is the go wrapper for REMOTE_PROC_DOMAIN_PIN_EMULATOR. +func (l *Libvirt) DomainPinEmulator(Dom Domain, Cpumap []byte, Flags DomainModificationImpact) (err error) { + var buf bytes.Buffer + + args := DomainPinEmulatorArgs { + Dom: Dom, + Cpumap: Cpumap, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(279, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainGetEmulatorPinInfo is the go wrapper for REMOTE_PROC_DOMAIN_GET_EMULATOR_PIN_INFO. +func (l *Libvirt) DomainGetEmulatorPinInfo(Dom Domain, Maplen int32, Flags DomainModificationImpact) (rCpumaps []byte, rRet int32, err error) { + var buf bytes.Buffer + + args := DomainGetEmulatorPinInfoArgs { + Dom: Dom, + Maplen: Maplen, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(280, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Cpumaps: []byte + _, err = dec.Decode(&rCpumaps) + if err != nil { + return + } + // Ret: int32 + _, err = dec.Decode(&rRet) + if err != nil { + return + } + + return +} + +// ConnectListAllStoragePools is the go wrapper for REMOTE_PROC_CONNECT_LIST_ALL_STORAGE_POOLS. +func (l *Libvirt) ConnectListAllStoragePools(NeedResults int32, Flags ConnectListAllStoragePoolsFlags) (rPools []StoragePool, rRet uint32, err error) { + var buf bytes.Buffer + + args := ConnectListAllStoragePoolsArgs { + NeedResults: NeedResults, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(281, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Pools: []StoragePool + _, err = dec.Decode(&rPools) + if err != nil { + return + } + // Ret: uint32 + _, err = dec.Decode(&rRet) + if err != nil { + return + } + + return +} + +// StoragePoolListAllVolumes is the go wrapper for REMOTE_PROC_STORAGE_POOL_LIST_ALL_VOLUMES. +func (l *Libvirt) StoragePoolListAllVolumes(Pool StoragePool, NeedResults int32, Flags uint32) (rVols []StorageVol, rRet uint32, err error) { + var buf bytes.Buffer + + args := StoragePoolListAllVolumesArgs { + Pool: Pool, + NeedResults: NeedResults, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(282, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Vols: []StorageVol + _, err = dec.Decode(&rVols) + if err != nil { + return + } + // Ret: uint32 + _, err = dec.Decode(&rRet) + if err != nil { + return + } + + return +} + +// ConnectListAllNetworks is the go wrapper for REMOTE_PROC_CONNECT_LIST_ALL_NETWORKS. +func (l *Libvirt) ConnectListAllNetworks(NeedResults int32, Flags ConnectListAllNetworksFlags) (rNets []Network, rRet uint32, err error) { + var buf bytes.Buffer + + args := ConnectListAllNetworksArgs { + NeedResults: NeedResults, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(283, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Nets: []Network + _, err = dec.Decode(&rNets) + if err != nil { + return + } + // Ret: uint32 + _, err = dec.Decode(&rRet) + if err != nil { + return + } + + return +} + +// ConnectListAllInterfaces is the go wrapper for REMOTE_PROC_CONNECT_LIST_ALL_INTERFACES. +func (l *Libvirt) ConnectListAllInterfaces(NeedResults int32, Flags ConnectListAllInterfacesFlags) (rIfaces []Interface, rRet uint32, err error) { + var buf bytes.Buffer + + args := ConnectListAllInterfacesArgs { + NeedResults: NeedResults, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(284, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Ifaces: []Interface + _, err = dec.Decode(&rIfaces) + if err != nil { + return + } + // Ret: uint32 + _, err = dec.Decode(&rRet) + if err != nil { + return + } + + return +} + +// ConnectListAllNodeDevices is the go wrapper for REMOTE_PROC_CONNECT_LIST_ALL_NODE_DEVICES. +func (l *Libvirt) ConnectListAllNodeDevices(NeedResults int32, Flags uint32) (rDevices []NodeDevice, rRet uint32, err error) { + var buf bytes.Buffer + + args := ConnectListAllNodeDevicesArgs { + NeedResults: NeedResults, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(285, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Devices: []NodeDevice + _, err = dec.Decode(&rDevices) + if err != nil { + return + } + // Ret: uint32 + _, err = dec.Decode(&rRet) + if err != nil { + return + } + + return +} + +// ConnectListAllNwfilters is the go wrapper for REMOTE_PROC_CONNECT_LIST_ALL_NWFILTERS. +func (l *Libvirt) ConnectListAllNwfilters(NeedResults int32, Flags uint32) (rFilters []Nwfilter, rRet uint32, err error) { + var buf bytes.Buffer + + args := ConnectListAllNwfiltersArgs { + NeedResults: NeedResults, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(286, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Filters: []Nwfilter + _, err = dec.Decode(&rFilters) + if err != nil { + return + } + // Ret: uint32 + _, err = dec.Decode(&rRet) + if err != nil { + return + } + + return +} + +// ConnectListAllSecrets is the go wrapper for REMOTE_PROC_CONNECT_LIST_ALL_SECRETS. +func (l *Libvirt) ConnectListAllSecrets(NeedResults int32, Flags ConnectListAllSecretsFlags) (rSecrets []Secret, rRet uint32, err error) { + var buf bytes.Buffer + + args := ConnectListAllSecretsArgs { + NeedResults: NeedResults, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(287, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Secrets: []Secret + _, err = dec.Decode(&rSecrets) + if err != nil { + return + } + // Ret: uint32 + _, err = dec.Decode(&rRet) + if err != nil { + return + } + + return +} + +// NodeSetMemoryParameters is the go wrapper for REMOTE_PROC_NODE_SET_MEMORY_PARAMETERS. +func (l *Libvirt) NodeSetMemoryParameters(Params []TypedParam, Flags uint32) (err error) { + var buf bytes.Buffer + + args := NodeSetMemoryParametersArgs { + Params: Params, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(288, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// NodeGetMemoryParameters is the go wrapper for REMOTE_PROC_NODE_GET_MEMORY_PARAMETERS. +func (l *Libvirt) NodeGetMemoryParameters(Nparams int32, Flags uint32) (rParams []TypedParam, rNparams int32, err error) { + var buf bytes.Buffer + + args := NodeGetMemoryParametersArgs { + Nparams: Nparams, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(289, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Params: []TypedParam + rParams, err = decodeTypedParams(dec) + if err != nil { + fmt.Println("error decoding typedparams") + return + } + // Nparams: int32 + _, err = dec.Decode(&rNparams) + if err != nil { + return + } + + return +} + +// DomainBlockCommit is the go wrapper for REMOTE_PROC_DOMAIN_BLOCK_COMMIT. +func (l *Libvirt) DomainBlockCommit(Dom Domain, Disk string, Base OptString, Top OptString, Bandwidth uint64, Flags DomainBlockCommitFlags) (err error) { + var buf bytes.Buffer + + args := DomainBlockCommitArgs { + Dom: Dom, + Disk: Disk, + Base: Base, + Top: Top, + Bandwidth: Bandwidth, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(290, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// NetworkUpdate is the go wrapper for REMOTE_PROC_NETWORK_UPDATE. +func (l *Libvirt) NetworkUpdate(Net Network, Command uint32, Section uint32, ParentIndex int32, XML string, Flags NetworkUpdateFlags) (err error) { + var buf bytes.Buffer + + args := NetworkUpdateArgs { + Net: Net, + Command: Command, + Section: Section, + ParentIndex: ParentIndex, + XML: XML, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(291, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainEventPmsuspendDisk is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_PMSUSPEND_DISK. +func (l *Libvirt) DomainEventPmsuspendDisk() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(292, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// NodeGetCPUMap is the go wrapper for REMOTE_PROC_NODE_GET_CPU_MAP. +func (l *Libvirt) NodeGetCPUMap(NeedMap int32, NeedOnline int32, Flags uint32) (rCpumap []byte, rOnline uint32, rRet int32, err error) { + var buf bytes.Buffer + + args := NodeGetCPUMapArgs { + NeedMap: NeedMap, + NeedOnline: NeedOnline, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(293, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Cpumap: []byte + _, err = dec.Decode(&rCpumap) + if err != nil { + return + } + // Online: uint32 + _, err = dec.Decode(&rOnline) + if err != nil { + return + } + // Ret: int32 + _, err = dec.Decode(&rRet) + if err != nil { + return + } + + return +} + +// DomainFstrim is the go wrapper for REMOTE_PROC_DOMAIN_FSTRIM. +func (l *Libvirt) DomainFstrim(Dom Domain, MountPoint OptString, Minimum uint64, Flags uint32) (err error) { + var buf bytes.Buffer + + args := DomainFstrimArgs { + Dom: Dom, + MountPoint: MountPoint, + Minimum: Minimum, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(294, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainSendProcessSignal is the go wrapper for REMOTE_PROC_DOMAIN_SEND_PROCESS_SIGNAL. +func (l *Libvirt) DomainSendProcessSignal(Dom Domain, PidValue int64, Signum uint32, Flags uint32) (err error) { + var buf bytes.Buffer + + args := DomainSendProcessSignalArgs { + Dom: Dom, + PidValue: PidValue, + Signum: Signum, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(295, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainOpenChannel is the go wrapper for REMOTE_PROC_DOMAIN_OPEN_CHANNEL. +func (l *Libvirt) DomainOpenChannel(Dom Domain, Name OptString, Flags DomainChannelFlags) (err error) { + var buf bytes.Buffer + + args := DomainOpenChannelArgs { + Dom: Dom, + Name: Name, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(296, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// NodeDeviceLookupScsiHostByWwn is the go wrapper for REMOTE_PROC_NODE_DEVICE_LOOKUP_SCSI_HOST_BY_WWN. +func (l *Libvirt) NodeDeviceLookupScsiHostByWwn(Wwnn string, Wwpn string, Flags uint32) (rDev NodeDevice, err error) { + var buf bytes.Buffer + + args := NodeDeviceLookupScsiHostByWwnArgs { + Wwnn: Wwnn, + Wwpn: Wwpn, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(297, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Dev: NodeDevice + _, err = dec.Decode(&rDev) + if err != nil { + return + } + + return +} + +// DomainGetJobStats is the go wrapper for REMOTE_PROC_DOMAIN_GET_JOB_STATS. +func (l *Libvirt) DomainGetJobStats(Dom Domain, Flags DomainGetJobStatsFlags) (rType int32, rParams []TypedParam, err error) { + var buf bytes.Buffer + + args := DomainGetJobStatsArgs { + Dom: Dom, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(298, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Type: int32 + _, err = dec.Decode(&rType) + if err != nil { + return + } + // Params: []TypedParam + rParams, err = decodeTypedParams(dec) + if err != nil { + fmt.Println("error decoding typedparams") + return + } + + return +} + +// DomainMigrateGetCompressionCache is the go wrapper for REMOTE_PROC_DOMAIN_MIGRATE_GET_COMPRESSION_CACHE. +func (l *Libvirt) DomainMigrateGetCompressionCache(Dom Domain, Flags uint32) (rCacheSize uint64, err error) { + var buf bytes.Buffer + + args := DomainMigrateGetCompressionCacheArgs { + Dom: Dom, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(299, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // CacheSize: uint64 + _, err = dec.Decode(&rCacheSize) + if err != nil { + return + } + + return +} + +// DomainMigrateSetCompressionCache is the go wrapper for REMOTE_PROC_DOMAIN_MIGRATE_SET_COMPRESSION_CACHE. +func (l *Libvirt) DomainMigrateSetCompressionCache(Dom Domain, CacheSize uint64, Flags uint32) (err error) { + var buf bytes.Buffer + + args := DomainMigrateSetCompressionCacheArgs { + Dom: Dom, + CacheSize: CacheSize, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(300, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// NodeDeviceDetachFlags is the go wrapper for REMOTE_PROC_NODE_DEVICE_DETACH_FLAGS. +func (l *Libvirt) NodeDeviceDetachFlags(Name string, DriverName OptString, Flags uint32) (err error) { + var buf bytes.Buffer + + args := NodeDeviceDetachFlagsArgs { + Name: Name, + DriverName: DriverName, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(301, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainMigrateBegin3Params is the go wrapper for REMOTE_PROC_DOMAIN_MIGRATE_BEGIN3_PARAMS. +func (l *Libvirt) DomainMigrateBegin3Params(Dom Domain, Params []TypedParam, Flags uint32) (rCookieOut []byte, rXML string, err error) { + var buf bytes.Buffer + + args := DomainMigrateBegin3ParamsArgs { + Dom: Dom, + Params: Params, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(302, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // CookieOut: []byte + _, err = dec.Decode(&rCookieOut) + if err != nil { + return + } + // XML: string + _, err = dec.Decode(&rXML) + if err != nil { + return + } + + return +} + +// DomainMigratePrepare3Params is the go wrapper for REMOTE_PROC_DOMAIN_MIGRATE_PREPARE3_PARAMS. +func (l *Libvirt) DomainMigratePrepare3Params(Params []TypedParam, CookieIn []byte, Flags uint32) (rCookieOut []byte, rUriOut OptString, err error) { + var buf bytes.Buffer + + args := DomainMigratePrepare3ParamsArgs { + Params: Params, + CookieIn: CookieIn, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(303, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // CookieOut: []byte + _, err = dec.Decode(&rCookieOut) + if err != nil { + return + } + // UriOut: OptString + _, err = dec.Decode(&rUriOut) + if err != nil { + return + } + + return +} + +// DomainMigratePrepareTunnel3Params is the go wrapper for REMOTE_PROC_DOMAIN_MIGRATE_PREPARE_TUNNEL3_PARAMS. +func (l *Libvirt) DomainMigratePrepareTunnel3Params(Params []TypedParam, CookieIn []byte, Flags uint32) (rCookieOut []byte, err error) { + var buf bytes.Buffer + + args := DomainMigratePrepareTunnel3ParamsArgs { + Params: Params, + CookieIn: CookieIn, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(304, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // CookieOut: []byte + _, err = dec.Decode(&rCookieOut) + if err != nil { + return + } + + return +} + +// DomainMigratePerform3Params is the go wrapper for REMOTE_PROC_DOMAIN_MIGRATE_PERFORM3_PARAMS. +func (l *Libvirt) DomainMigratePerform3Params(Dom Domain, Dconnuri OptString, Params []TypedParam, CookieIn []byte, Flags DomainMigrateFlags) (rCookieOut []byte, err error) { + var buf bytes.Buffer + + args := DomainMigratePerform3ParamsArgs { + Dom: Dom, + Dconnuri: Dconnuri, + Params: Params, + CookieIn: CookieIn, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(305, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // CookieOut: []byte + _, err = dec.Decode(&rCookieOut) + if err != nil { + return + } + + return +} + +// DomainMigrateFinish3Params is the go wrapper for REMOTE_PROC_DOMAIN_MIGRATE_FINISH3_PARAMS. +func (l *Libvirt) DomainMigrateFinish3Params(Params []TypedParam, CookieIn []byte, Flags uint32, Cancelled int32) (rDom Domain, rCookieOut []byte, err error) { + var buf bytes.Buffer + + args := DomainMigrateFinish3ParamsArgs { + Params: Params, + CookieIn: CookieIn, + Flags: Flags, + Cancelled: Cancelled, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(306, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Dom: Domain + _, err = dec.Decode(&rDom) + if err != nil { + return + } + // CookieOut: []byte + _, err = dec.Decode(&rCookieOut) + if err != nil { + return + } + + return +} + +// DomainMigrateConfirm3Params is the go wrapper for REMOTE_PROC_DOMAIN_MIGRATE_CONFIRM3_PARAMS. +func (l *Libvirt) DomainMigrateConfirm3Params(Dom Domain, Params []TypedParam, CookieIn []byte, Flags uint32, Cancelled int32) (err error) { + var buf bytes.Buffer + + args := DomainMigrateConfirm3ParamsArgs { + Dom: Dom, + Params: Params, + CookieIn: CookieIn, + Flags: Flags, + Cancelled: Cancelled, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(307, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainSetMemoryStatsPeriod is the go wrapper for REMOTE_PROC_DOMAIN_SET_MEMORY_STATS_PERIOD. +func (l *Libvirt) DomainSetMemoryStatsPeriod(Dom Domain, Period int32, Flags DomainMemoryModFlags) (err error) { + var buf bytes.Buffer + + args := DomainSetMemoryStatsPeriodArgs { + Dom: Dom, + Period: Period, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(308, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainCreateXMLWithFiles is the go wrapper for REMOTE_PROC_DOMAIN_CREATE_XML_WITH_FILES. +func (l *Libvirt) DomainCreateXMLWithFiles(XMLDesc string, Flags DomainCreateFlags) (rDom Domain, err error) { + var buf bytes.Buffer + + args := DomainCreateXMLWithFilesArgs { + XMLDesc: XMLDesc, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(309, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Dom: Domain + _, err = dec.Decode(&rDom) + if err != nil { + return + } + + return +} + +// DomainCreateWithFiles is the go wrapper for REMOTE_PROC_DOMAIN_CREATE_WITH_FILES. +func (l *Libvirt) DomainCreateWithFiles(Dom Domain, Flags DomainCreateFlags) (rDom Domain, err error) { + var buf bytes.Buffer + + args := DomainCreateWithFilesArgs { + Dom: Dom, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(310, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Dom: Domain + _, err = dec.Decode(&rDom) + if err != nil { + return + } + + return +} + +// DomainEventDeviceRemoved is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_DEVICE_REMOVED. +func (l *Libvirt) DomainEventDeviceRemoved() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(311, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// ConnectGetCPUModelNames is the go wrapper for REMOTE_PROC_CONNECT_GET_CPU_MODEL_NAMES. +func (l *Libvirt) ConnectGetCPUModelNames(Arch string, NeedResults int32, Flags uint32) (rModels []string, rRet int32, err error) { + var buf bytes.Buffer + + args := ConnectGetCPUModelNamesArgs { + Arch: Arch, + NeedResults: NeedResults, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(312, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Models: []string + _, err = dec.Decode(&rModels) + if err != nil { + return + } + // Ret: int32 + _, err = dec.Decode(&rRet) + if err != nil { + return + } + + return +} + +// ConnectNetworkEventRegisterAny is the go wrapper for REMOTE_PROC_CONNECT_NETWORK_EVENT_REGISTER_ANY. +func (l *Libvirt) ConnectNetworkEventRegisterAny(EventID int32, Net OptNetwork) (rCallbackID int32, err error) { + var buf bytes.Buffer + + args := ConnectNetworkEventRegisterAnyArgs { + EventID: EventID, + Net: Net, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(313, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // CallbackID: int32 + _, err = dec.Decode(&rCallbackID) + if err != nil { + return + } + + return +} + +// ConnectNetworkEventDeregisterAny is the go wrapper for REMOTE_PROC_CONNECT_NETWORK_EVENT_DEREGISTER_ANY. +func (l *Libvirt) ConnectNetworkEventDeregisterAny(CallbackID int32) (err error) { + var buf bytes.Buffer + + args := ConnectNetworkEventDeregisterAnyArgs { + CallbackID: CallbackID, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(314, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// NetworkEventLifecycle is the go wrapper for REMOTE_PROC_NETWORK_EVENT_LIFECYCLE. +func (l *Libvirt) NetworkEventLifecycle() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(315, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// ConnectDomainEventCallbackRegisterAny is the go wrapper for REMOTE_PROC_CONNECT_DOMAIN_EVENT_CALLBACK_REGISTER_ANY. +func (l *Libvirt) ConnectDomainEventCallbackRegisterAny(EventID int32, Dom OptDomain) (rCallbackID int32, err error) { + var buf bytes.Buffer + + args := ConnectDomainEventCallbackRegisterAnyArgs { + EventID: EventID, + Dom: Dom, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(316, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // CallbackID: int32 + _, err = dec.Decode(&rCallbackID) + if err != nil { + return + } + + return +} + +// ConnectDomainEventCallbackDeregisterAny is the go wrapper for REMOTE_PROC_CONNECT_DOMAIN_EVENT_CALLBACK_DEREGISTER_ANY. +func (l *Libvirt) ConnectDomainEventCallbackDeregisterAny(CallbackID int32) (err error) { + var buf bytes.Buffer + + args := ConnectDomainEventCallbackDeregisterAnyArgs { + CallbackID: CallbackID, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(317, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainEventCallbackLifecycle is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_CALLBACK_LIFECYCLE. +func (l *Libvirt) DomainEventCallbackLifecycle() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(318, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainEventCallbackReboot is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_CALLBACK_REBOOT. +func (l *Libvirt) DomainEventCallbackReboot() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(319, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainEventCallbackRtcChange is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_CALLBACK_RTC_CHANGE. +func (l *Libvirt) DomainEventCallbackRtcChange() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(320, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainEventCallbackWatchdog is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_CALLBACK_WATCHDOG. +func (l *Libvirt) DomainEventCallbackWatchdog() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(321, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainEventCallbackIOError is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_CALLBACK_IO_ERROR. +func (l *Libvirt) DomainEventCallbackIOError() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(322, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainEventCallbackGraphics is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_CALLBACK_GRAPHICS. +func (l *Libvirt) DomainEventCallbackGraphics() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(323, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainEventCallbackIOErrorReason is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_CALLBACK_IO_ERROR_REASON. +func (l *Libvirt) DomainEventCallbackIOErrorReason() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(324, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainEventCallbackControlError is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_CALLBACK_CONTROL_ERROR. +func (l *Libvirt) DomainEventCallbackControlError() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(325, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainEventCallbackBlockJob is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_CALLBACK_BLOCK_JOB. +func (l *Libvirt) DomainEventCallbackBlockJob() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(326, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainEventCallbackDiskChange is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_CALLBACK_DISK_CHANGE. +func (l *Libvirt) DomainEventCallbackDiskChange() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(327, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainEventCallbackTrayChange is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_CALLBACK_TRAY_CHANGE. +func (l *Libvirt) DomainEventCallbackTrayChange() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(328, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainEventCallbackPmwakeup is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_CALLBACK_PMWAKEUP. +func (l *Libvirt) DomainEventCallbackPmwakeup() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(329, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainEventCallbackPmsuspend is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_CALLBACK_PMSUSPEND. +func (l *Libvirt) DomainEventCallbackPmsuspend() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(330, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainEventCallbackBalloonChange is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_CALLBACK_BALLOON_CHANGE. +func (l *Libvirt) DomainEventCallbackBalloonChange() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(331, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainEventCallbackPmsuspendDisk is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_CALLBACK_PMSUSPEND_DISK. +func (l *Libvirt) DomainEventCallbackPmsuspendDisk() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(332, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainEventCallbackDeviceRemoved is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_CALLBACK_DEVICE_REMOVED. +func (l *Libvirt) DomainEventCallbackDeviceRemoved() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(333, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainCoreDumpWithFormat is the go wrapper for REMOTE_PROC_DOMAIN_CORE_DUMP_WITH_FORMAT. +func (l *Libvirt) DomainCoreDumpWithFormat(Dom Domain, To string, Dumpformat uint32, Flags DomainCoreDumpFlags) (err error) { + var buf bytes.Buffer + + args := DomainCoreDumpWithFormatArgs { + Dom: Dom, + To: To, + Dumpformat: Dumpformat, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(334, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainFsfreeze is the go wrapper for REMOTE_PROC_DOMAIN_FSFREEZE. +func (l *Libvirt) DomainFsfreeze(Dom Domain, Mountpoints []string, Flags uint32) (rFilesystems int32, err error) { + var buf bytes.Buffer + + args := DomainFsfreezeArgs { + Dom: Dom, + Mountpoints: Mountpoints, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(335, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Filesystems: int32 + _, err = dec.Decode(&rFilesystems) + if err != nil { + return + } + + return +} + +// DomainFsthaw is the go wrapper for REMOTE_PROC_DOMAIN_FSTHAW. +func (l *Libvirt) DomainFsthaw(Dom Domain, Mountpoints []string, Flags uint32) (rFilesystems int32, err error) { + var buf bytes.Buffer + + args := DomainFsthawArgs { + Dom: Dom, + Mountpoints: Mountpoints, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(336, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Filesystems: int32 + _, err = dec.Decode(&rFilesystems) + if err != nil { + return + } + + return +} + +// DomainGetTime is the go wrapper for REMOTE_PROC_DOMAIN_GET_TIME. +func (l *Libvirt) DomainGetTime(Dom Domain, Flags uint32) (rSeconds int64, rNseconds uint32, err error) { + var buf bytes.Buffer + + args := DomainGetTimeArgs { + Dom: Dom, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(337, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Seconds: int64 + _, err = dec.Decode(&rSeconds) + if err != nil { + return + } + // Nseconds: uint32 + _, err = dec.Decode(&rNseconds) + if err != nil { + return + } + + return +} + +// DomainSetTime is the go wrapper for REMOTE_PROC_DOMAIN_SET_TIME. +func (l *Libvirt) DomainSetTime(Dom Domain, Seconds int64, Nseconds uint32, Flags DomainSetTimeFlags) (err error) { + var buf bytes.Buffer + + args := DomainSetTimeArgs { + Dom: Dom, + Seconds: Seconds, + Nseconds: Nseconds, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(338, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainEventBlockJob2 is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_BLOCK_JOB_2. +func (l *Libvirt) DomainEventBlockJob2() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(339, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// NodeGetFreePages is the go wrapper for REMOTE_PROC_NODE_GET_FREE_PAGES. +func (l *Libvirt) NodeGetFreePages(Pages []uint32, StartCell int32, CellCount uint32, Flags uint32) (rCounts []uint64, err error) { + var buf bytes.Buffer + + args := NodeGetFreePagesArgs { + Pages: Pages, + StartCell: StartCell, + CellCount: CellCount, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(340, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Counts: []uint64 + _, err = dec.Decode(&rCounts) + if err != nil { + return + } + + return +} + +// NetworkGetDhcpLeases is the go wrapper for REMOTE_PROC_NETWORK_GET_DHCP_LEASES. +func (l *Libvirt) NetworkGetDhcpLeases(Net Network, Mac OptString, NeedResults int32, Flags uint32) (rLeases []NetworkDhcpLease, rRet uint32, err error) { + var buf bytes.Buffer + + args := NetworkGetDhcpLeasesArgs { + Net: Net, + Mac: Mac, + NeedResults: NeedResults, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(341, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Leases: []NetworkDhcpLease + _, err = dec.Decode(&rLeases) + if err != nil { + return + } + // Ret: uint32 + _, err = dec.Decode(&rRet) + if err != nil { + return + } + + return +} + +// ConnectGetDomainCapabilities is the go wrapper for REMOTE_PROC_CONNECT_GET_DOMAIN_CAPABILITIES. +func (l *Libvirt) ConnectGetDomainCapabilities(Emulatorbin OptString, Arch OptString, Machine OptString, Virttype OptString, Flags uint32) (rCapabilities string, err error) { + var buf bytes.Buffer + + args := ConnectGetDomainCapabilitiesArgs { + Emulatorbin: Emulatorbin, + Arch: Arch, + Machine: Machine, + Virttype: Virttype, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(342, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Capabilities: string + _, err = dec.Decode(&rCapabilities) + if err != nil { + return + } + + return +} + +// DomainOpenGraphicsFd is the go wrapper for REMOTE_PROC_DOMAIN_OPEN_GRAPHICS_FD. +func (l *Libvirt) DomainOpenGraphicsFd(Dom Domain, Idx uint32, Flags DomainOpenGraphicsFlags) (err error) { + var buf bytes.Buffer + + args := DomainOpenGraphicsFdArgs { + Dom: Dom, + Idx: Idx, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(343, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// ConnectGetAllDomainStats is the go wrapper for REMOTE_PROC_CONNECT_GET_ALL_DOMAIN_STATS. +func (l *Libvirt) ConnectGetAllDomainStats(Doms []Domain, Stats uint32, Flags ConnectGetAllDomainStatsFlags) (rRetStats []DomainStatsRecord, err error) { + var buf bytes.Buffer + + args := ConnectGetAllDomainStatsArgs { + Doms: Doms, + Stats: Stats, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(344, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // RetStats: []DomainStatsRecord + _, err = dec.Decode(&rRetStats) + if err != nil { + return + } + + return +} + +// DomainBlockCopy is the go wrapper for REMOTE_PROC_DOMAIN_BLOCK_COPY. +func (l *Libvirt) DomainBlockCopy(Dom Domain, Path string, Destxml string, Params []TypedParam, Flags DomainBlockCopyFlags) (err error) { + var buf bytes.Buffer + + args := DomainBlockCopyArgs { + Dom: Dom, + Path: Path, + Destxml: Destxml, + Params: Params, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(345, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainEventCallbackTunable is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_CALLBACK_TUNABLE. +func (l *Libvirt) DomainEventCallbackTunable() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(346, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// NodeAllocPages is the go wrapper for REMOTE_PROC_NODE_ALLOC_PAGES. +func (l *Libvirt) NodeAllocPages(PageSizes []uint32, PageCounts []uint64, StartCell int32, CellCount uint32, Flags NodeAllocPagesFlags) (rRet int32, err error) { + var buf bytes.Buffer + + args := NodeAllocPagesArgs { + PageSizes: PageSizes, + PageCounts: PageCounts, + StartCell: StartCell, + CellCount: CellCount, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(347, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Ret: int32 + _, err = dec.Decode(&rRet) + if err != nil { + return + } + + return +} + +// DomainEventCallbackAgentLifecycle is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_CALLBACK_AGENT_LIFECYCLE. +func (l *Libvirt) DomainEventCallbackAgentLifecycle() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(348, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainGetFsinfo is the go wrapper for REMOTE_PROC_DOMAIN_GET_FSINFO. +func (l *Libvirt) DomainGetFsinfo(Dom Domain, Flags uint32) (rInfo []DomainFsinfo, rRet uint32, err error) { + var buf bytes.Buffer + + args := DomainGetFsinfoArgs { + Dom: Dom, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(349, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Info: []DomainFsinfo + _, err = dec.Decode(&rInfo) + if err != nil { + return + } + // Ret: uint32 + _, err = dec.Decode(&rRet) + if err != nil { + return + } + + return +} + +// DomainDefineXMLFlags is the go wrapper for REMOTE_PROC_DOMAIN_DEFINE_XML_FLAGS. +func (l *Libvirt) DomainDefineXMLFlags(XML string, Flags DomainDefineFlags) (rDom Domain, err error) { + var buf bytes.Buffer + + args := DomainDefineXMLFlagsArgs { + XML: XML, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(350, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Dom: Domain + _, err = dec.Decode(&rDom) + if err != nil { + return + } + + return +} + +// DomainGetIothreadInfo is the go wrapper for REMOTE_PROC_DOMAIN_GET_IOTHREAD_INFO. +func (l *Libvirt) DomainGetIothreadInfo(Dom Domain, Flags DomainModificationImpact) (rInfo []DomainIothreadInfo, rRet uint32, err error) { + var buf bytes.Buffer + + args := DomainGetIothreadInfoArgs { + Dom: Dom, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(351, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Info: []DomainIothreadInfo + _, err = dec.Decode(&rInfo) + if err != nil { + return + } + // Ret: uint32 + _, err = dec.Decode(&rRet) + if err != nil { + return + } + + return +} + +// DomainPinIothread is the go wrapper for REMOTE_PROC_DOMAIN_PIN_IOTHREAD. +func (l *Libvirt) DomainPinIothread(Dom Domain, IothreadsID uint32, Cpumap []byte, Flags DomainModificationImpact) (err error) { + var buf bytes.Buffer + + args := DomainPinIothreadArgs { + Dom: Dom, + IothreadsID: IothreadsID, + Cpumap: Cpumap, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(352, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainInterfaceAddresses is the go wrapper for REMOTE_PROC_DOMAIN_INTERFACE_ADDRESSES. +func (l *Libvirt) DomainInterfaceAddresses(Dom Domain, Source uint32, Flags uint32) (rIfaces []DomainInterface, err error) { + var buf bytes.Buffer + + args := DomainInterfaceAddressesArgs { + Dom: Dom, + Source: Source, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(353, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Ifaces: []DomainInterface + _, err = dec.Decode(&rIfaces) + if err != nil { + return + } + + return +} + +// DomainEventCallbackDeviceAdded is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_CALLBACK_DEVICE_ADDED. +func (l *Libvirt) DomainEventCallbackDeviceAdded() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(354, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainAddIothread is the go wrapper for REMOTE_PROC_DOMAIN_ADD_IOTHREAD. +func (l *Libvirt) DomainAddIothread(Dom Domain, IothreadID uint32, Flags DomainModificationImpact) (err error) { + var buf bytes.Buffer + + args := DomainAddIothreadArgs { + Dom: Dom, + IothreadID: IothreadID, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(355, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainDelIothread is the go wrapper for REMOTE_PROC_DOMAIN_DEL_IOTHREAD. +func (l *Libvirt) DomainDelIothread(Dom Domain, IothreadID uint32, Flags DomainModificationImpact) (err error) { + var buf bytes.Buffer + + args := DomainDelIothreadArgs { + Dom: Dom, + IothreadID: IothreadID, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(356, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainSetUserPassword is the go wrapper for REMOTE_PROC_DOMAIN_SET_USER_PASSWORD. +func (l *Libvirt) DomainSetUserPassword(Dom Domain, User OptString, Password OptString, Flags DomainSetUserPasswordFlags) (err error) { + var buf bytes.Buffer + + args := DomainSetUserPasswordArgs { + Dom: Dom, + User: User, + Password: Password, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(357, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainRename is the go wrapper for REMOTE_PROC_DOMAIN_RENAME. +func (l *Libvirt) DomainRename(Dom Domain, NewName OptString, Flags uint32) (rRetcode int32, err error) { + var buf bytes.Buffer + + args := DomainRenameArgs { + Dom: Dom, + NewName: NewName, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(358, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Retcode: int32 + _, err = dec.Decode(&rRetcode) + if err != nil { + return + } + + return +} + +// DomainEventCallbackMigrationIteration is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_CALLBACK_MIGRATION_ITERATION. +func (l *Libvirt) DomainEventCallbackMigrationIteration() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(359, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// ConnectRegisterCloseCallback is the go wrapper for REMOTE_PROC_CONNECT_REGISTER_CLOSE_CALLBACK. +func (l *Libvirt) ConnectRegisterCloseCallback() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(360, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// ConnectUnregisterCloseCallback is the go wrapper for REMOTE_PROC_CONNECT_UNREGISTER_CLOSE_CALLBACK. +func (l *Libvirt) ConnectUnregisterCloseCallback() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(361, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// ConnectEventConnectionClosed is the go wrapper for REMOTE_PROC_CONNECT_EVENT_CONNECTION_CLOSED. +func (l *Libvirt) ConnectEventConnectionClosed() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(362, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainEventCallbackJobCompleted is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_CALLBACK_JOB_COMPLETED. +func (l *Libvirt) DomainEventCallbackJobCompleted() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(363, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainMigrateStartPostCopy is the go wrapper for REMOTE_PROC_DOMAIN_MIGRATE_START_POST_COPY. +func (l *Libvirt) DomainMigrateStartPostCopy(Dom Domain, Flags uint32) (err error) { + var buf bytes.Buffer + + args := DomainMigrateStartPostCopyArgs { + Dom: Dom, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(364, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainGetPerfEvents is the go wrapper for REMOTE_PROC_DOMAIN_GET_PERF_EVENTS. +func (l *Libvirt) DomainGetPerfEvents(Dom Domain, Flags DomainModificationImpact) (rParams []TypedParam, err error) { + var buf bytes.Buffer + + args := DomainGetPerfEventsArgs { + Dom: Dom, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(365, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Params: []TypedParam + rParams, err = decodeTypedParams(dec) + if err != nil { + fmt.Println("error decoding typedparams") + return + } + + return +} + +// DomainSetPerfEvents is the go wrapper for REMOTE_PROC_DOMAIN_SET_PERF_EVENTS. +func (l *Libvirt) DomainSetPerfEvents(Dom Domain, Params []TypedParam, Flags DomainModificationImpact) (err error) { + var buf bytes.Buffer + + args := DomainSetPerfEventsArgs { + Dom: Dom, + Params: Params, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(366, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainEventCallbackDeviceRemovalFailed is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_CALLBACK_DEVICE_REMOVAL_FAILED. +func (l *Libvirt) DomainEventCallbackDeviceRemovalFailed() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(367, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// ConnectStoragePoolEventRegisterAny is the go wrapper for REMOTE_PROC_CONNECT_STORAGE_POOL_EVENT_REGISTER_ANY. +func (l *Libvirt) ConnectStoragePoolEventRegisterAny(EventID int32, Pool OptStoragePool) (rCallbackID int32, err error) { + var buf bytes.Buffer + + args := ConnectStoragePoolEventRegisterAnyArgs { + EventID: EventID, + Pool: Pool, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(368, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // CallbackID: int32 + _, err = dec.Decode(&rCallbackID) + if err != nil { + return + } + + return +} + +// ConnectStoragePoolEventDeregisterAny is the go wrapper for REMOTE_PROC_CONNECT_STORAGE_POOL_EVENT_DEREGISTER_ANY. +func (l *Libvirt) ConnectStoragePoolEventDeregisterAny(CallbackID int32) (err error) { + var buf bytes.Buffer + + args := ConnectStoragePoolEventDeregisterAnyArgs { + CallbackID: CallbackID, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(369, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// StoragePoolEventLifecycle is the go wrapper for REMOTE_PROC_STORAGE_POOL_EVENT_LIFECYCLE. +func (l *Libvirt) StoragePoolEventLifecycle() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(370, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainGetGuestVcpus is the go wrapper for REMOTE_PROC_DOMAIN_GET_GUEST_VCPUS. +func (l *Libvirt) DomainGetGuestVcpus(Dom Domain, Flags uint32) (rParams []TypedParam, err error) { + var buf bytes.Buffer + + args := DomainGetGuestVcpusArgs { + Dom: Dom, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(371, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Params: []TypedParam + rParams, err = decodeTypedParams(dec) + if err != nil { + fmt.Println("error decoding typedparams") + return + } + + return +} + +// DomainSetGuestVcpus is the go wrapper for REMOTE_PROC_DOMAIN_SET_GUEST_VCPUS. +func (l *Libvirt) DomainSetGuestVcpus(Dom Domain, Cpumap string, State int32, Flags uint32) (err error) { + var buf bytes.Buffer + + args := DomainSetGuestVcpusArgs { + Dom: Dom, + Cpumap: Cpumap, + State: State, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(372, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// StoragePoolEventRefresh is the go wrapper for REMOTE_PROC_STORAGE_POOL_EVENT_REFRESH. +func (l *Libvirt) StoragePoolEventRefresh() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(373, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// ConnectNodeDeviceEventRegisterAny is the go wrapper for REMOTE_PROC_CONNECT_NODE_DEVICE_EVENT_REGISTER_ANY. +func (l *Libvirt) ConnectNodeDeviceEventRegisterAny(EventID int32, Dev OptNodeDevice) (rCallbackID int32, err error) { + var buf bytes.Buffer + + args := ConnectNodeDeviceEventRegisterAnyArgs { + EventID: EventID, + Dev: Dev, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(374, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // CallbackID: int32 + _, err = dec.Decode(&rCallbackID) + if err != nil { + return + } + + return +} + +// ConnectNodeDeviceEventDeregisterAny is the go wrapper for REMOTE_PROC_CONNECT_NODE_DEVICE_EVENT_DEREGISTER_ANY. +func (l *Libvirt) ConnectNodeDeviceEventDeregisterAny(CallbackID int32) (err error) { + var buf bytes.Buffer + + args := ConnectNodeDeviceEventDeregisterAnyArgs { + CallbackID: CallbackID, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(375, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// NodeDeviceEventLifecycle is the go wrapper for REMOTE_PROC_NODE_DEVICE_EVENT_LIFECYCLE. +func (l *Libvirt) NodeDeviceEventLifecycle() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(376, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// NodeDeviceEventUpdate is the go wrapper for REMOTE_PROC_NODE_DEVICE_EVENT_UPDATE. +func (l *Libvirt) NodeDeviceEventUpdate() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(377, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// StorageVolGetInfoFlags is the go wrapper for REMOTE_PROC_STORAGE_VOL_GET_INFO_FLAGS. +func (l *Libvirt) StorageVolGetInfoFlags(Vol StorageVol, Flags uint32) (rType int8, rCapacity uint64, rAllocation uint64, err error) { + var buf bytes.Buffer + + args := StorageVolGetInfoFlagsArgs { + Vol: Vol, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(378, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Type: int8 + _, err = dec.Decode(&rType) + if err != nil { + return + } + // Capacity: uint64 + _, err = dec.Decode(&rCapacity) + if err != nil { + return + } + // Allocation: uint64 + _, err = dec.Decode(&rAllocation) + if err != nil { + return + } + + return +} + +// DomainEventCallbackMetadataChange is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_CALLBACK_METADATA_CHANGE. +func (l *Libvirt) DomainEventCallbackMetadataChange() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(379, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// ConnectSecretEventRegisterAny is the go wrapper for REMOTE_PROC_CONNECT_SECRET_EVENT_REGISTER_ANY. +func (l *Libvirt) ConnectSecretEventRegisterAny(EventID int32, OptSecret OptSecret) (rCallbackID int32, err error) { + var buf bytes.Buffer + + args := ConnectSecretEventRegisterAnyArgs { + EventID: EventID, + OptSecret: OptSecret, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(380, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // CallbackID: int32 + _, err = dec.Decode(&rCallbackID) + if err != nil { + return + } + + return +} + +// ConnectSecretEventDeregisterAny is the go wrapper for REMOTE_PROC_CONNECT_SECRET_EVENT_DEREGISTER_ANY. +func (l *Libvirt) ConnectSecretEventDeregisterAny(CallbackID int32) (err error) { + var buf bytes.Buffer + + args := ConnectSecretEventDeregisterAnyArgs { + CallbackID: CallbackID, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(381, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// SecretEventLifecycle is the go wrapper for REMOTE_PROC_SECRET_EVENT_LIFECYCLE. +func (l *Libvirt) SecretEventLifecycle() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(382, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// SecretEventValueChanged is the go wrapper for REMOTE_PROC_SECRET_EVENT_VALUE_CHANGED. +func (l *Libvirt) SecretEventValueChanged() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(383, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainSetVcpu is the go wrapper for REMOTE_PROC_DOMAIN_SET_VCPU. +func (l *Libvirt) DomainSetVcpu(Dom Domain, Cpumap string, State int32, Flags DomainModificationImpact) (err error) { + var buf bytes.Buffer + + args := DomainSetVcpuArgs { + Dom: Dom, + Cpumap: Cpumap, + State: State, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(384, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainEventBlockThreshold is the go wrapper for REMOTE_PROC_DOMAIN_EVENT_BLOCK_THRESHOLD. +func (l *Libvirt) DomainEventBlockThreshold() (err error) { + var buf bytes.Buffer + + var resp <-chan response + resp, err = l.request(385, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainSetBlockThreshold is the go wrapper for REMOTE_PROC_DOMAIN_SET_BLOCK_THRESHOLD. +func (l *Libvirt) DomainSetBlockThreshold(Dom Domain, Dev string, Threshold uint64, Flags uint32) (err error) { + var buf bytes.Buffer + + args := DomainSetBlockThresholdArgs { + Dom: Dom, + Dev: Dev, + Threshold: Threshold, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(386, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainMigrateGetMaxDowntime is the go wrapper for REMOTE_PROC_DOMAIN_MIGRATE_GET_MAX_DOWNTIME. +func (l *Libvirt) DomainMigrateGetMaxDowntime(Dom Domain, Flags uint32) (rDowntime uint64, err error) { + var buf bytes.Buffer + + args := DomainMigrateGetMaxDowntimeArgs { + Dom: Dom, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(387, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // Downtime: uint64 + _, err = dec.Decode(&rDowntime) + if err != nil { + return + } + + return +} + +// DomainManagedSaveGetXMLDesc is the go wrapper for REMOTE_PROC_DOMAIN_MANAGED_SAVE_GET_XML_DESC. +func (l *Libvirt) DomainManagedSaveGetXMLDesc(Dom Domain, Flags DomainXMLFlags) (rXML string, err error) { + var buf bytes.Buffer + + args := DomainManagedSaveGetXMLDescArgs { + Dom: Dom, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(388, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + // Return value unmarshaling + rdr := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(rdr) + // XML: string + _, err = dec.Decode(&rXML) + if err != nil { + return + } + + return +} + +// DomainManagedSaveDefineXML is the go wrapper for REMOTE_PROC_DOMAIN_MANAGED_SAVE_DEFINE_XML. +func (l *Libvirt) DomainManagedSaveDefineXML(Dom Domain, Dxml OptString, Flags DomainSaveRestoreFlags) (err error) { + var buf bytes.Buffer + + args := DomainManagedSaveDefineXMLArgs { + Dom: Dom, + Dxml: Dxml, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(389, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + +// DomainSetLifecycleAction is the go wrapper for REMOTE_PROC_DOMAIN_SET_LIFECYCLE_ACTION. +func (l *Libvirt) DomainSetLifecycleAction(Dom Domain, Type uint32, Action uint32, Flags DomainModificationImpact) (err error) { + var buf bytes.Buffer + + args := DomainSetLifecycleActionArgs { + Dom: Dom, + Type: Type, + Action: Action, + Flags: Flags, + } + + buf, err = encode(&args) + if err != nil { + return + } + + var resp <-chan response + resp, err = l.request(390, constants.Program, &buf) + if err != nil { + return + } + + r := <-resp + if r.Status != StatusOK { + err = decodeError(r.Payload) + return + } + + return +} + diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/libvirt.go b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/libvirt.go new file mode 100644 index 00000000..7f17dfbb --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/libvirt.go @@ -0,0 +1,505 @@ +// Copyright 2016 The go-libvirt Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package libvirt is a pure Go implementation of the libvirt RPC protocol. +// For more information on the protocol, see https://libvirt.org/internals/l.html +package libvirt + +// We'll use c-for-go to extract the consts and typedefs from the libvirt +// sources so we don't have to duplicate them here. +//go:generate scripts/gen-consts.sh + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "fmt" + "net" + "net/url" + "sync" + + "github.com/davecgh/go-xdr/xdr2" + "github.com/digitalocean/go-libvirt/internal/constants" +) + +// ErrEventsNotSupported is returned by Events() if event streams +// are unsupported by either QEMU or libvirt. +var ErrEventsNotSupported = errors.New("event monitor is not supported") + +// Libvirt implements libvirt's remote procedure call protocol. +type Libvirt struct { + conn net.Conn + r *bufio.Reader + w *bufio.Writer + mu *sync.Mutex + + // method callbacks + cm sync.Mutex + callbacks map[uint32]chan response + + // event listeners + em sync.Mutex + events map[uint32]chan *DomainEvent + + // next request serial number + s uint32 +} + +// DomainEvent represents a libvirt domain event. +type DomainEvent struct { + CallbackID uint32 + Domain Domain + Event string + Seconds uint64 + Microseconds uint32 + Padding uint8 + Details []byte +} + +// qemuError represents a QEMU process error. +type qemuError struct { + Error struct { + Class string `json:"class"` + Description string `json:"desc"` + } `json:"error"` +} + +// Capabilities returns an XML document describing the host's capabilties. +func (l *Libvirt) Capabilities() ([]byte, error) { + caps, err := l.ConnectGetCapabilities() + return []byte(caps), err +} + +// Connect establishes communication with the libvirt server. +// The underlying libvirt socket connection must be previously established. +func (l *Libvirt) Connect() error { + return l.connect() +} + +// Disconnect shuts down communication with the libvirt server +// and closes the underlying net.Conn. +func (l *Libvirt) Disconnect() error { + // close event streams + for id := range l.events { + if err := l.removeStream(id); err != nil { + return err + } + } + + // inform libvirt we're done + if err := l.disconnect(); err != nil { + return err + } + + return l.conn.Close() +} + +// Domains returns a list of all domains managed by libvirt. +func (l *Libvirt) Domains() ([]Domain, error) { + // these are the flags as passed by `virsh`, defined in: + // src/remote/remote_protocol.x # remote_connect_list_all_domains_args + domains, _, err := l.ConnectListAllDomains(1, 3) + return domains, err +} + +// DomainState returns state of the domain managed by libvirt. +func (l *Libvirt) DomainState(dom string) (DomainState, error) { + d, err := l.lookup(dom) + if err != nil { + return DomainNostate, err + } + + state, _, err := l.DomainGetState(d, 0) + return DomainState(state), err +} + +// Events streams domain events. +// If a problem is encountered setting up the event monitor connection +// an error will be returned. Errors encountered during streaming will +// cause the returned event channel to be closed. +func (l *Libvirt) Events(dom string) (<-chan DomainEvent, error) { + d, err := l.lookup(dom) + if err != nil { + return nil, err + } + + payload := struct { + Padding [4]byte + Domain Domain + Event [2]byte + Flags [2]byte + }{ + Padding: [4]byte{0x0, 0x0, 0x1, 0x0}, + Domain: d, + Event: [2]byte{0x0, 0x0}, + Flags: [2]byte{0x0, 0x0}, + } + + buf, err := encode(&payload) + if err != nil { + return nil, err + } + + resp, err := l.request(constants.QEMUConnectDomainMonitorEventRegister, constants.ProgramQEMU, &buf) + if err != nil { + return nil, err + } + + res := <-resp + if res.Status != StatusOK { + err = decodeError(res.Payload) + if err == ErrUnsupported { + return nil, ErrEventsNotSupported + } + + return nil, decodeError(res.Payload) + } + + dec := xdr.NewDecoder(bytes.NewReader(res.Payload)) + + cbID, _, err := dec.DecodeUint() + if err != nil { + return nil, err + } + + stream := make(chan *DomainEvent) + l.addStream(cbID, stream) + c := make(chan DomainEvent) + go func() { + // process events + for e := range stream { + c <- *e + } + }() + + return c, nil +} + +// Migrate synchronously migrates the domain specified by dom, e.g., +// 'prod-lb-01', to the destination hypervisor specified by dest, e.g., +// 'qemu+tcp://example.com/system'. The flags argument determines the +// type of migration and how it will be performed. For more information +// on available migration flags and their meaning, see MigrateFlag*. +func (l *Libvirt) Migrate(dom string, dest string, flags DomainMigrateFlags) error { + _, err := url.Parse(dest) + if err != nil { + return err + } + + d, err := l.lookup(dom) + if err != nil { + return err + } + + // Two unknowns remain here , Libvirt specifies RemoteParameters + // and CookieIn. In testing both values are always set to 0 by virsh + // and the source does not provide clear definitions of their purpose. + // For now, using the same zero'd values as done by virsh will be Good Enough. + destURI := []string{dest} + remoteParams := []TypedParam{} + cookieIn := []byte{} + _, err = l.DomainMigratePerform3Params(d, destURI, remoteParams, cookieIn, flags) + return err +} + +// MigrateSetMaxSpeed set the maximum migration bandwidth (in MiB/s) for a +// domain which is being migrated to another host. Specifying a negative value +// results in an essentially unlimited value being provided to the hypervisor. +func (l *Libvirt) MigrateSetMaxSpeed(dom string, speed int64) error { + d, err := l.lookup(dom) + if err != nil { + return err + } + + return l.DomainMigrateSetMaxSpeed(d, uint64(speed), 0) +} + +// Run executes the given QAPI command against a domain's QEMU instance. +// For a list of available QAPI commands, see: +// http://git.qemu.org/?p=qemu.git;a=blob;f=qapi-schema.json;hb=HEAD +func (l *Libvirt) Run(dom string, cmd []byte) ([]byte, error) { + d, err := l.lookup(dom) + if err != nil { + return nil, err + } + + payload := struct { + Domain Domain + Command []byte + Flags uint32 + }{ + Domain: d, + Command: cmd, + Flags: 0, + } + + buf, err := encode(&payload) + if err != nil { + return nil, err + } + + resp, err := l.request(constants.QEMUDomainMonitor, constants.ProgramQEMU, &buf) + if err != nil { + return nil, err + } + + res := <-resp + // check for libvirt errors + if res.Status != StatusOK { + return nil, decodeError(res.Payload) + } + + // check for QEMU process errors + if err = getQEMUError(res); err != nil { + return nil, err + } + + r := bytes.NewReader(res.Payload) + dec := xdr.NewDecoder(r) + data, _, err := dec.DecodeFixedOpaque(int32(r.Len())) + if err != nil { + return nil, err + } + + // drop QMP control characters from start of line, and drop + // any trailing NULL characters from the end + return bytes.TrimRight(data[4:], "\x00"), nil +} + +// Secrets returns all secrets managed by the libvirt daemon. +func (l *Libvirt) Secrets() ([]Secret, error) { + secrets, _, err := l.ConnectListAllSecrets(1, 0) + return secrets, err +} + +// StoragePool returns the storage pool associated with the provided name. +// An error is returned if the requested storage pool is not found. +func (l *Libvirt) StoragePool(name string) (StoragePool, error) { + return l.StoragePoolLookupByName(name) +} + +// StoragePools returns a list of defined storage pools. Pools are filtered by +// the provided flags. See StoragePools*. +func (l *Libvirt) StoragePools(flags ConnectListAllStoragePoolsFlags) ([]StoragePool, error) { + pools, _, err := l.ConnectListAllStoragePools(1, flags) + return pools, err +} + +// Undefine undefines the domain specified by dom, e.g., 'prod-lb-01'. +// The flags argument allows additional options to be specified such as +// cleaning up snapshot metadata. For more information on available +// flags, see DomainUndefine*. +func (l *Libvirt) Undefine(dom string, flags DomainUndefineFlagsValues) error { + d, err := l.lookup(dom) + if err != nil { + return err + } + + return l.DomainUndefineFlags(d, flags) +} + +// Destroy destroys the domain specified by dom, e.g., 'prod-lb-01'. +// The flags argument allows additional options to be specified such as +// allowing a graceful shutdown with SIGTERM than SIGKILL. +// For more information on available flags, see DomainDestroy*. +func (l *Libvirt) Destroy(dom string, flags DomainDestroyFlagsValues) error { + d, err := l.lookup(dom) + if err != nil { + return err + } + + return l.DomainDestroyFlags(d, flags) +} + +// XML returns a domain's raw XML definition, akin to `virsh dumpxml `. +// See DomainXMLFlag* for optional flags. +func (l *Libvirt) XML(dom string, flags DomainXMLFlags) ([]byte, error) { + d, err := l.lookup(dom) + if err != nil { + return nil, err + } + + xml, err := l.DomainGetXMLDesc(d, flags) + return []byte(xml), err +} + +// DefineXML defines a domain, but does not start it. +func (l *Libvirt) DefineXML(x []byte, flags DomainDefineFlags) error { + _, err := l.DomainDefineXMLFlags(string(x), flags) + return err +} + +// Version returns the version of the libvirt daemon. +func (l *Libvirt) Version() (string, error) { + ver, err := l.ConnectGetLibVersion() + if err != nil { + return "", err + } + + // The version is provided as an int following this formula: + // version * 1,000,000 + minor * 1000 + micro + // See src/libvirt-host.c # virConnectGetLibVersion + major := ver / 1000000 + ver %= 1000000 + minor := ver / 1000 + ver %= 1000 + micro := ver + + versionString := fmt.Sprintf("%d.%d.%d", major, minor, micro) + return versionString, nil +} + +// Shutdown shuts down a domain. Note that the guest OS may ignore the request. +// If flags is set to 0 then the hypervisor will choose the method of shutdown it considers best. +func (l *Libvirt) Shutdown(dom string, flags DomainShutdownFlagValues) error { + d, err := l.lookup(dom) + if err != nil { + return err + } + + return l.DomainShutdownFlags(d, flags) +} + +// Reboot reboots the domain. Note that the guest OS may ignore the request. +// If flags is set to zero, then the hypervisor will choose the method of shutdown it considers best. +func (l *Libvirt) Reboot(dom string, flags DomainRebootFlagValues) error { + d, err := l.lookup(dom) + if err != nil { + return err + } + + return l.DomainReboot(d, flags) +} + +// Reset resets domain immediately without any guest OS shutdown +func (l *Libvirt) Reset(dom string) error { + d, err := l.lookup(dom) + if err != nil { + return err + } + + return l.DomainReset(d, 0) +} + +// BlockLimit contains a name and value pair for a Get/SetBlockIOTune limit. The +// Name field is the name of the limit (to see a list of the limits that can be +// applied, execute the 'blkdeviotune' command on a VM in virsh). Callers can +// use the QEMUBlockIO... constants below for the Name value. The Value field is +// the limit to apply. +type BlockLimit struct { + Name string + Value uint64 +} + +// SetBlockIOTune changes the per-device block I/O tunables within a guest. +// Parameters are the name of the VM, the name of the disk device to which the +// limits should be applied, and 1 or more BlockLimit structs containing the +// actual limits. +// +// The limits which can be applied here are enumerated in the QEMUBlockIO... +// constants above, and you can also see the full list by executing the +// 'blkdeviotune' command on a VM in virsh. +// +// Example usage: +// SetBlockIOTune("vm-name", "vda", BlockLimit{libvirt.QEMUBlockIOWriteBytesSec, 1000000}) +func (l *Libvirt) SetBlockIOTune(dom string, disk string, limits ...BlockLimit) error { + d, err := l.lookup(dom) + if err != nil { + return err + } + + params := make([]TypedParam, len(limits)) + for ix, limit := range limits { + tpval := NewTypedParamValueUllong(limit.Value) + params[ix] = TypedParam{Field: limit.Name, Value: tpval} + } + + return l.DomainSetBlockIOTune(d, disk, params, uint32(DomainAffectLive)) +} + +// GetBlockIOTune returns a slice containing the current block I/O tunables for +// a disk. +func (l *Libvirt) GetBlockIOTune(dom string, disk string) ([]BlockLimit, error) { + d, err := l.lookup(dom) + if err != nil { + return nil, err + } + + lims, _, err := l.DomainGetBlockIOTune(d, []string{disk}, 32, uint32(TypedParamStringOkay)) + if err != nil { + return nil, err + } + + var limits []BlockLimit + + // now decode each of the returned TypedParams. To do this we read the field + // name and type, then use the type information to decode the value. + for _, lim := range lims { + var l BlockLimit + name := lim.Field + switch lim.Value.Get().(type) { + case uint64: + l = BlockLimit{Name: name, Value: lim.Value.Get().(uint64)} + } + limits = append(limits, l) + } + + return limits, nil +} + +// lookup returns a domain as seen by libvirt. +func (l *Libvirt) lookup(name string) (Domain, error) { + return l.DomainLookupByName(name) +} + +// getQEMUError checks the provided response for QEMU process errors. +// If an error is found, it is extracted an returned, otherwise nil. +func getQEMUError(r response) error { + pl := bytes.NewReader(r.Payload) + dec := xdr.NewDecoder(pl) + + s, _, err := dec.DecodeString() + if err != nil { + return err + } + + var e qemuError + if err = json.Unmarshal([]byte(s), &e); err != nil { + return err + } + + if e.Error.Description != "" { + return errors.New(e.Error.Description) + } + + return nil +} + +// New configures a new Libvirt RPC connection. +func New(conn net.Conn) *Libvirt { + l := &Libvirt{ + conn: conn, + s: 0, + r: bufio.NewReader(conn), + w: bufio.NewWriter(conn), + mu: &sync.Mutex{}, + callbacks: make(map[uint32]chan response), + events: make(map[uint32]chan *DomainEvent), + } + + go l.listen() + + return l +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/libvirt.yml b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/libvirt.yml new file mode 100644 index 00000000..28c2532f --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/libvirt.yml @@ -0,0 +1,59 @@ +# Configuration file for c-for-go, which go-libvirt uses to translate the const +# and type definitions from the C-language sources in the libvirt project into +# Go. This file is used by the c-for-go binary (github.com/xlab/c-for-go), which +# is called when 'go generate' is run. See libvirt.go for the command line used. +--- +GENERATOR: + PackageName: libvirt + PackageLicense: | + Copyright 2017 The go-libvirt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + Includes: [] + +PARSER: + # We can't use environment variables here, but we don't want to process the + # libvirt version installed in the system folders (if any). Instead we'll + # rely on our caller to link the libvirt source directory to lv_source/, and + # run on that code. This isn't ideal, but changes to c-for-go are needed to + # fix it. + IncludePaths: [./lv_source/include] + SourcesPaths: + - libvirt/libvirt.h + +TRANSLATOR: + ConstRules: + defines: eval + Rules: + global: + - {action: accept, from: "^vir"} + post-global: + - {action: replace, from: "^vir"} + - {load: snakecase} + # Follow golint's capitalization conventions. + - {action: replace, from: "Api([A-Z]|$)", to: "API$1"} + - {action: replace, from: "Cpu([A-Z]|$)", to: "CPU$1"} + - {action: replace, from: "Dns([A-Z]|$)", to: "DNS$1"} + - {action: replace, from: "Eof([A-Z]|$)", to: "EOF$1"} + - {action: replace, from: "Id([A-Z]|$)", to: "ID$1"} + - {action: replace, from: "Ip([A-Z]|$)", to: "IP$1"} + - {action: replace, from: "Tls([A-Z]|$)", to: "TLS$1"} + - {action: replace, from: "Uuid([A-Z]|$)", to: "UUID$1"} + - {action: replace, from: "Uri([A-Z]|$)", to: "URI$1"} + - {action: replace, from: "Vcpu([A-Z]|$)", to: "VCPU$1"} + - {action: replace, from: "Xml([A-Z]|$)", to: "XML$1"} + const: + - {action: accept, from: "^VIR_"} + # Special case to prevent a collision with a type: + - {action: replace, from: "^VIR_DOMAIN_JOB_OPERATION", to: "VIR_DOMAIN_JOB_OPERATION_STR"} + - {transform: lower} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/libvirtd.conf b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/libvirtd.conf new file mode 100644 index 00000000..d416d7ba --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/libvirtd.conf @@ -0,0 +1,7 @@ +# libvirtd configuration for travis-ci +listen_tls = 0 +listen_tcp = 1 +tcp_port = "16509" +listen_addr = "127.0.0.1" +auth_unix_rw = "none" +auth_tcp = "none" diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/libvirttest/libvirt.go b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/libvirttest/libvirt.go new file mode 100644 index 00000000..8d650390 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/libvirttest/libvirt.go @@ -0,0 +1,649 @@ +// Copyright 2016 The go-libvirt Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package libvirttest provides a mock libvirt server for RPC testing. +package libvirttest + +import ( + "encoding/binary" + "net" + "sync/atomic" + + "fmt" + "os" + + "github.com/digitalocean/go-libvirt/internal/constants" +) + +var testDomainResponse = []byte{ + 0x00, 0x00, 0x00, 0x38, // length + 0x20, 0x00, 0x80, 0x86, // program + 0x00, 0x00, 0x00, 0x01, // version + 0x00, 0x00, 0x00, 0x17, // procedure + 0x00, 0x00, 0x00, 0x01, // type + 0x00, 0x00, 0x00, 0x00, // serial + 0x00, 0x00, 0x00, 0x00, // status + + // domain name ("test") + 0x00, 0x00, 0x00, 0x04, 0x74, 0x65, 0x73, 0x74, + + // uuid (dc229f87d4de47198cfd2e21c6105b01) + 0xdc, 0x22, 0x9f, 0x87, 0xd4, 0xde, 0x47, 0x19, + 0x8c, 0xfd, 0x2e, 0x21, 0xc6, 0x10, 0x5b, 0x01, + + // domain id (14) + 0x00, 0x00, 0x00, 0x0e, +} + +var testRegisterEvent = []byte{ + 0x00, 0x00, 0x00, 0x20, // length + 0x20, 0x00, 0x80, 0x87, // program + 0x00, 0x00, 0x00, 0x01, // version + 0x00, 0x00, 0x00, 0x04, // procedure + 0x00, 0x00, 0x00, 0x01, // type + 0x00, 0x00, 0x00, 0x00, // serial + 0x00, 0x00, 0x00, 0x00, // status + 0x00, 0x00, 0x00, 0x01, // callback id +} + +var testDeregisterEvent = []byte{ + 0x00, 0x00, 0x00, 0x1c, // length + 0x20, 0x00, 0x80, 0x87, // program + 0x00, 0x00, 0x00, 0x01, // version + 0x00, 0x00, 0x00, 0x05, // procedure + 0x00, 0x00, 0x00, 0x01, // type + 0x00, 0x00, 0x00, 0x00, // serial + 0x00, 0x00, 0x00, 0x00, // status +} + +var testAuthReply = []byte{ + 0x00, 0x00, 0x00, 0x1c, // length + 0x20, 0x00, 0x80, 0x86, // program + 0x00, 0x00, 0x00, 0x01, // version + 0x00, 0x00, 0x00, 0x42, // procedure + 0x00, 0x00, 0x00, 0x01, // type + 0x00, 0x00, 0x00, 0x00, // serial + 0x00, 0x00, 0x00, 0x00, // status +} + +var testConnectReply = []byte{ + 0x00, 0x00, 0x00, 0x1c, // length + 0x20, 0x00, 0x80, 0x86, // program + 0x00, 0x00, 0x00, 0x01, // version + 0x00, 0x00, 0x00, 0x01, // procedure + 0x00, 0x00, 0x00, 0x01, // type + 0x00, 0x00, 0x00, 0x00, // serial + 0x00, 0x00, 0x00, 0x00, // status +} + +var testDisconnectReply = []byte{ + 0x00, 0x00, 0x00, 0x1c, // length + 0x20, 0x00, 0x80, 0x86, // program + 0x00, 0x00, 0x00, 0x01, // version + 0x00, 0x00, 0x00, 0x02, // procedure + 0x00, 0x00, 0x00, 0x01, // type + 0x00, 0x00, 0x00, 0x00, // serial + 0x00, 0x00, 0x00, 0x00, // status +} + +var testMigrateReply = []byte{ + 0x00, 0x00, 0x00, 0x20, // length + 0x20, 0x00, 0x80, 0x86, // program + 0x00, 0x00, 0x00, 0x01, // version + 0x00, 0x00, 0x01, 0x31, // procedure + 0x00, 0x00, 0x00, 0x01, // type + 0x00, 0x00, 0x00, 0x00, // serial + 0x00, 0x00, 0x00, 0x00, // status + + // cookie out: 0 + 0x00, 0x00, 0x00, 0x00, +} + +var testRunReply = []byte{ + 0x00, 0x00, 0x00, 0x74, // length + 0x20, 0x00, 0x80, 0x87, // program + 0x00, 0x00, 0x00, 0x01, // version + 0x00, 0x00, 0x00, 0x01, // procedure + 0x00, 0x00, 0x00, 0x01, // type + 0x00, 0x00, 0x00, 0x00, // serial + 0x00, 0x00, 0x00, 0x00, // status + + // {"return":{"qemu":{"micro":1,"minor":5,"major":2},"package":""},"id":"libvirt-53"} + 0x00, 0x00, 0x00, 0x52, 0x7b, 0x22, 0x72, 0x65, + 0x74, 0x75, 0x72, 0x6e, 0x22, 0x3a, 0x7b, 0x22, + 0x71, 0x65, 0x6d, 0x75, 0x22, 0x3a, 0x7b, 0x22, + 0x6d, 0x69, 0x63, 0x72, 0x6f, 0x22, 0x3a, 0x31, + 0x2c, 0x22, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x22, + 0x3a, 0x35, 0x2c, 0x22, 0x6d, 0x61, 0x6a, 0x6f, + 0x72, 0x22, 0x3a, 0x32, 0x7d, 0x2c, 0x22, 0x70, + 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x22, 0x3a, + 0x22, 0x22, 0x7d, 0x2c, 0x22, 0x69, 0x64, 0x22, + 0x3a, 0x22, 0x6c, 0x69, 0x62, 0x76, 0x69, 0x72, + 0x74, 0x2d, 0x35, 0x33, 0x22, 0x7d, + + // All trailing NULL characters should be removed + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +} + +var testRunReplyFail = []byte{ + 0x00, 0x00, 0x00, 0x8c, // length + 0x20, 0x00, 0x80, 0x87, // program + 0x00, 0x00, 0x00, 0x01, // version + 0x00, 0x00, 0x00, 0x01, // procedure + 0x00, 0x00, 0x00, 0x01, // type + 0x00, 0x00, 0x00, 0x0a, // serial + 0x00, 0x00, 0x00, 0x00, // status + + // {"id":"libvirt-68","error":{"class":"CommandNotFound","desc":"The command drive-foo has not been found"}}` + 0x00, 0x00, 0x00, 0x69, 0x7b, 0x22, 0x69, 0x64, + 0x22, 0x3a, 0x22, 0x6c, 0x69, 0x62, 0x76, 0x69, + 0x72, 0x74, 0x2d, 0x36, 0x38, 0x22, 0x2c, 0x22, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x3a, 0x7b, + 0x22, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x22, 0x3a, + 0x22, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, + 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, + 0x22, 0x2c, 0x22, 0x64, 0x65, 0x73, 0x63, 0x22, + 0x3a, 0x22, 0x54, 0x68, 0x65, 0x20, 0x63, 0x6f, + 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x20, 0x64, 0x72, + 0x69, 0x76, 0x65, 0x2d, 0x66, 0x6f, 0x6f, 0x20, + 0x68, 0x61, 0x73, 0x20, 0x6e, 0x6f, 0x74, 0x20, + 0x62, 0x65, 0x65, 0x6e, 0x20, 0x66, 0x6f, 0x75, + 0x6e, 0x64, 0x22, 0x7d, 0x7d, 0x00, 0x00, 0x00, +} + +var testSetSpeedReply = []byte{ + 0x00, 0x00, 0x00, 0x1c, // length + 0x20, 0x00, 0x80, 0x86, // program + 0x00, 0x00, 0x00, 0x01, // version + 0x00, 0x00, 0x00, 0xcf, // procedure + 0x00, 0x00, 0x00, 0x01, // type + 0x00, 0x00, 0x00, 0x00, // serial + 0x00, 0x00, 0x00, 0x00, // status +} + +var testDomainsReply = []byte{ + 0x00, 0x00, 0x00, 0x6c, // length + 0x20, 0x00, 0x80, 0x86, // program + 0x00, 0x00, 0x00, 0x01, // version + 0x00, 0x00, 0x01, 0x11, // procedure + 0x00, 0x00, 0x00, 0x01, // type + 0x00, 0x00, 0x00, 0x00, // serial + 0x00, 0x00, 0x00, 0x00, // status + + // struct of domains + 0x00, 0x00, 0x00, 0x02, + + // first domain + // name - aaaaaaa-1 + 0x00, 0x00, 0x00, 0x09, 0x61, 0x61, 0x61, 0x61, + 0x61, 0x61, 0x61, 0x2d, 0x31, 0x00, 0x00, 0x00, + // uuid - dc:32:9f:87:d4:de:47:19:8c:fd:2e:21:c6:10:5b:01 + 0xdc, 0x32, 0x9f, 0x87, 0xd4, 0xde, 0x47, 0x19, + 0x8c, 0xfd, 0x2e, 0x21, 0xc6, 0x10, 0x5b, 0x01, + // id + 0x00, 0x00, 0x00, 0x01, + + // second domain + // name - aaaaaaa-2 + 0x00, 0x00, 0x00, 0x09, 0x61, 0x61, 0x61, 0x61, + 0x61, 0x61, 0x61, 0x2d, 0x32, 0x00, 0x00, 0x00, + // uuid - dc:22:9f:87:d4:de:47:19:8c:fd:2e:21:c6:10:5b:01 + 0xdc, 0x22, 0x9f, 0x87, 0xd4, 0xde, 0x47, 0x19, 0x8c, + 0xfd, 0x2e, 0x21, 0xc6, 0x10, 0x5b, 0x01, 0x00, 0x00, + // id + 0x00, 0x02, 0x00, + + // count of domains returned + 0x00, 0x00, 0x02, +} + +var testDomainMemoryStatsReply = []byte{ + 0x00, 0x00, 0x00, 0x38, // length + 0x20, 0x00, 0x80, 0x86, // program + 0x00, 0x00, 0x00, 0x01, // version + 0x00, 0x00, 0x00, 0x9f, // procedure + 0x00, 0x00, 0x00, 0x01, // type + 0x00, 0x00, 0x00, 0x00, // serial + 0x00, 0x00, 0x00, 0x00, // status + + // tag 6 val 1048576 + // tag 7 val 91272 + 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x06, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x10, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x07, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x64, 0x88, +} + +var testDomainStateReply = []byte{ + 0x00, 0x00, 0x00, 0x24, // length + 0x20, 0x00, 0x80, 0x86, // program + 0x00, 0x00, 0x00, 0x01, // version + 0x00, 0x00, 0x00, 0xd4, // procedure + 0x00, 0x00, 0x00, 0x01, // type + 0x00, 0x00, 0x00, 0x00, // serial + 0x00, 0x00, 0x00, 0x00, // status + 0x00, 0x00, 0x00, 0x01, // state + 0x00, 0x00, 0x00, 0x01, // reason +} + +var testSecretsReply = []byte{ + 0x00, 0x00, 0x00, 0x40, // length + 0x20, 0x00, 0x80, 0x86, // program + 0x00, 0x00, 0x00, 0x01, // version + 0x00, 0x00, 0x01, 0x1f, // procedure + 0x00, 0x00, 0x00, 0x01, // type + 0x00, 0x00, 0x00, 0x00, // serial + 0x00, 0x00, 0x00, 0x00, // status + + // list of secrets + 0x00, 0x00, 0x00, 0x01, + + // first secret + // UUID: 19fdc2f2fa64-46f3bacf42a8aafca6dd + 0x19, 0xfd, 0xc2, 0xf2, 0xfa, 0x64, 0x46, 0xf3, + 0xba, 0xcf, 0x42, 0xa8, 0xaa, 0xfc, 0xa6, 0xdd, + + // usage type: (1, volume) + 0x00, 0x00, 0x00, 0x01, + + // usage id: "/tmp" + 0x00, 0x00, 0x00, 0x04, 0x2f, 0x74, 0x6d, 0x70, + + // end of secrets + 0x00, 0x00, 0x00, 0x01, +} + +var testStoragePoolLookup = []byte{ + 0x00, 0x00, 0x00, 0x38, // length + 0x20, 0x00, 0x80, 0x86, // program + 0x00, 0x00, 0x00, 0x01, // version + 0x00, 0x00, 0x00, 0x54, // procedure + 0x00, 0x00, 0x00, 0x01, // type + 0x00, 0x00, 0x00, 0x00, // serial + 0x00, 0x00, 0x00, 0x00, // status + + // pool: name = default + 0x00, 0x00, 0x00, 0x07, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x00, + + // uuid = bb30a11c084648278bba3e6b5cf1b65f + 0xbb, 0x30, 0xa1, 0x1c, 0x08, 0x46, 0x48, 0x27, + 0x8b, 0xba, 0x3e, 0x6b, 0x5c, 0xf1, 0xb6, 0x5f, +} + +var testStoragePoolRefresh = []byte{ + 0x00, 0x00, 0x00, 0x1c, // length + 0x20, 0x00, 0x80, 0x86, // program + 0x00, 0x00, 0x00, 0x01, // version + 0x00, 0x00, 0x00, 0x53, // procedure + 0x00, 0x00, 0x00, 0x01, // type + 0x00, 0x00, 0x00, 0x00, // serial + 0x00, 0x00, 0x00, 0x00, // status +} + +var testListPoolsReply = []byte{ + 0x00, 0x00, 0x00, 0x40, // length + 0x20, 0x00, 0x80, 0x86, // program + 0x00, 0x00, 0x00, 0x01, // version + 0x00, 0x00, 0x01, 0x19, // procedure + 0x00, 0x00, 0x00, 0x01, // type + 0x00, 0x00, 0x00, 0x00, // serial + 0x00, 0x00, 0x00, 0x00, // status + + 0x00, 0x00, 0x00, 0x01, // pools + + // first pool, name: "default" + 0x00, 0x00, 0x00, 0x07, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x00, + + // uuid: bb30a11c084648278bba3e6b5cf1b65f + 0xbb, 0x30, 0xa1, 0x1c, 0x08, 0x46, 0x48, 0x27, + 0x8b, 0xba, 0x3e, 0x6b, 0x5c, 0xf1, 0xb6, 0x5f, + + 0x00, 0x00, 0x00, 0x01, // count +} + +var testUndefineReply = []byte{ + 0x00, 0x00, 0x00, 0x1c, // length + 0x20, 0x00, 0x80, 0x86, // program + 0x00, 0x00, 0x00, 0x01, // version + 0x00, 0x00, 0x00, 0xe7, // procedure + 0x00, 0x00, 0x00, 0x01, // type + 0x00, 0x00, 0x00, 0x00, // serial + 0x00, 0x00, 0x00, 0x00, // status +} + +var testDestroyReply = []byte{ + 0x00, 0x00, 0x00, 0x1c, // length + 0x20, 0x00, 0x80, 0x86, // program + 0x00, 0x00, 0x00, 0x01, // version + 0x00, 0x00, 0x00, 0xea, // procedure + 0x00, 0x00, 0x00, 0x01, // type + 0x00, 0x00, 0x00, 0x00, // serial + 0x00, 0x00, 0x00, 0x00, // status +} + +var testVersionReply = []byte{ + 0x00, 0x00, 0x00, 0x24, // length + 0x20, 0x00, 0x80, 0x86, // program + 0x00, 0x00, 0x00, 0x01, // version + 0x00, 0x00, 0x00, 0x9d, // procedure + 0x00, 0x00, 0x00, 0x01, // type + 0x00, 0x00, 0x00, 0x00, // serial + 0x00, 0x00, 0x00, 0x00, // status + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x4d, 0xfc, // version (1003004) +} + +var testDefineXML = []byte{ + 0x00, 0x00, 0x00, 0x38, // length + 0x20, 0x00, 0x80, 0x86, // program + 0x00, 0x00, 0x00, 0x01, // version + 0x00, 0x00, 0x01, 0x5e, // procedure + 0x00, 0x00, 0x00, 0x01, // type + 0x00, 0x00, 0x00, 0x00, // serial + 0x00, 0x00, 0x00, 0x00, // status + 0x00, 0x00, 0x00, 0x04, // dom + 0x74, 0x65, 0x73, 0x74, // name + // uuid + 0xaf, 0xc2, 0xef, 0x71, 0x66, 0xe0, 0x45, 0xa7, + 0xa5, 0xec, 0xd8, 0xba, 0x1e, 0xa8, 0x17, 0x7d, + 0xff, 0xff, 0xff, 0xff, // id +} + +var testCreateWithFlags = []byte{ + 0x00, 0x00, 0x00, 0x38, // length + 0x20, 0x00, 0x80, 0x86, // program + 0x00, 0x00, 0x00, 0x01, // version + 0x00, 0x00, 0x01, 0x5e, // procedure + 0x00, 0x00, 0x00, 0x01, // type + 0x00, 0x00, 0x00, 0x00, // serial + 0x00, 0x00, 0x00, 0x00, // status + 0x00, 0x00, 0x00, 0x04, // dom + 0x74, 0x65, 0x73, 0x74, // name + // uuid + 0xaf, 0xc2, 0xef, 0x71, 0x66, 0xe0, 0x45, 0xa7, + 0xa5, 0xec, 0xd8, 0xba, 0x1e, 0xa8, 0x17, 0x7d, + 0xff, 0xff, 0xff, 0xff, // id +} + +var testShutdownReply = []byte{ + 0x00, 0x00, 0x00, 0x1c, // length + 0x20, 0x00, 0x80, 0x86, // program + 0x00, 0x00, 0x00, 0x01, // version + 0x00, 0x00, 0x00, 0xea, // procedure + 0x00, 0x00, 0x00, 0x01, // type + 0x00, 0x00, 0x00, 0x00, // serial + 0x00, 0x00, 0x00, 0x00, // status +} + +var testRebootReply = []byte{ + 0x00, 0x00, 0x00, 0x1c, // length + 0x20, 0x00, 0x80, 0x86, // program + 0x00, 0x00, 0x00, 0x01, // version + 0x00, 0x00, 0x00, 0xea, // procedure + 0x00, 0x00, 0x00, 0x01, // type + 0x00, 0x00, 0x00, 0x00, // serial + 0x00, 0x00, 0x00, 0x00, // status +} + +var testSetBlockIoTuneReply = []byte{ + 0x00, 0x00, 0x00, 0x1c, // length + 0x20, 0x00, 0x80, 0x86, // program + 0x00, 0x00, 0x00, 0x01, // version + 0x00, 0x00, 0x00, 0xfc, // procedure + 0x00, 0x00, 0x00, 0x00, // type + 0x00, 0x00, 0x00, 0x00, // serial + 0x00, 0x00, 0x00, 0x00, // status +} + +// This result block was obtained by calling `fmt.Printf("%#v", r.Payload)` on +// the result returned by an actual call to GetBlockIoTune, and then adding the +// standard header to the beginning. The length parameter has to be correct! +var testGetBlockIoTuneReply = []byte{ + 0x00, 0x00, 0x03, 0x00, // length + 0x20, 0x00, 0x80, 0x86, // program + 0x00, 0x00, 0x00, 0x01, // version + 0x00, 0x00, 0x00, 0xfd, // procedure + 0x00, 0x00, 0x00, 0x00, // type + 0x00, 0x00, 0x00, 0x00, // serial + 0x00, 0x00, 0x00, 0x00, // status + + 0x0, 0x0, 0x0, 0x14, // 14 TypedParams follow + + 0x0, 0x0, 0x0, 0xf, // field name is 15 bytes, padded to a multiple of 4 + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x0, + 0x0, 0x0, 0x0, 0x4, // type + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // value + + 0x0, 0x0, 0x0, 0xe, + 0x72, 0x65, 0x61, 0x64, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + + 0x0, 0x0, 0x0, 0xf, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x0, + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x7, 0xa1, 0x20, + + 0x0, 0x0, 0x0, 0xe, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x69, 0x6f, 0x70, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + + 0x0, 0x0, 0x0, 0xd, + 0x72, 0x65, 0x61, 0x64, 0x5f, 0x69, 0x6f, 0x70, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + + 0x0, 0x0, 0x0, 0xe, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x69, 0x6f, 0x70, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + + 0x0, 0x0, 0x0, 0x13, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x5f, 0x6d, 0x61, 0x78, 0x0, + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + + 0x0, 0x0, 0x0, 0x12, + 0x72, 0x65, 0x61, 0x64, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x5f, 0x6d, 0x61, 0x78, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + + 0x0, 0x0, 0x0, 0x13, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x5f, 0x6d, 0x61, 0x78, 0x0, + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc3, 0x50, + + 0x0, 0x0, 0x0, 0x12, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x69, 0x6f, 0x70, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x5f, 0x6d, 0x61, 0x78, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + + 0x0, 0x0, 0x0, 0x11, + 0x72, 0x65, 0x61, 0x64, 0x5f, 0x69, 0x6f, 0x70, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x5f, 0x6d, 0x61, 0x78, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + + 0x0, 0x0, 0x0, 0x12, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x69, 0x6f, 0x70, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x5f, 0x6d, 0x61, 0x78, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + + 0x0, 0x0, 0x0, 0xd, + 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x69, 0x6f, 0x70, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + + 0x0, 0x0, 0x0, 0x1a, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + + 0x0, 0x0, 0x0, 0x19, + 0x72, 0x65, 0x61, 0x64, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + + 0x0, 0x0, 0x0, 0x1a, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, + + 0x0, 0x0, 0x0, 0x19, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x69, 0x6f, 0x70, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + + 0x0, 0x0, 0x0, 0x18, + 0x72, 0x65, 0x61, 0x64, 0x5f, 0x69, 0x6f, 0x70, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + + 0x0, 0x0, 0x0, 0x19, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x69, 0x6f, 0x70, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + + 0x0, 0x0, 0x0, 0xa, // This is field "group_name", a string (type 7), whose value is "somename" + 0x67, 0x72, 0x6F, 0x75, 0x70, 0x5F, 0x6E, 0x61, 0x6D, 0x65, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x7, + 0x0, 0x0, 0x0, 0x8, + 0x73, 0x6F, 0x6D, 0x65, 0x6E, 0x61, 0x6D, 0x65, + + 0x0, 0x0, 0x0, 0x0, // End of TypedParams +} + +// MockLibvirt provides a mock libvirt server for testing. +type MockLibvirt struct { + net.Conn + Test net.Conn + Fail bool + serial uint32 +} + +// New creates a new mock Libvirt server. +func New() *MockLibvirt { + serv, conn := net.Pipe() + + m := &MockLibvirt{ + Conn: conn, + Test: serv, + } + + go m.handle(serv) + + return m +} + +func (m *MockLibvirt) handle(conn net.Conn) { + for { + // packetLengthSize + headerSize + buf := make([]byte, 28) + conn.Read(buf) + + // extract program + prog := binary.BigEndian.Uint32(buf[4:8]) + + // extract procedure + proc := binary.BigEndian.Uint32(buf[12:16]) + + switch prog { + case constants.Program: + m.handleRemote(proc, conn) + case constants.ProgramQEMU: + m.handleQEMU(proc, conn) + } + } +} + +func (m *MockLibvirt) handleRemote(procedure uint32, conn net.Conn) { + switch procedure { + case constants.ProcAuthList: + conn.Write(m.reply(testAuthReply)) + case constants.ProcStoragePoolRefresh: + conn.Write(m.reply(testStoragePoolRefresh)) + case constants.ProcStoragePoolLookupByName: + conn.Write(m.reply(testStoragePoolLookup)) + case constants.ProcConnectOpen: + conn.Write(m.reply(testConnectReply)) + case constants.ProcConnectClose: + conn.Write(m.reply(testDisconnectReply)) + case constants.ProcConnectGetLibVersion: + conn.Write(m.reply(testVersionReply)) + case constants.ProcDomainLookupByName: + conn.Write(m.reply(testDomainResponse)) + case constants.ProcConnectListAllDomains: + conn.Write(m.reply(testDomainsReply)) + case constants.ProcConnectListAllStoragePools: + conn.Write(m.reply(testListPoolsReply)) + case constants.ProcConnectListAllSecrets: + conn.Write(m.reply(testSecretsReply)) + case constants.ProcDomainGetState: + conn.Write(m.reply(testDomainStateReply)) + case constants.ProcDomainMemoryStats: + conn.Write(m.reply(testDomainMemoryStatsReply)) + case constants.ProcDomainMigrateSetMaxSpeed: + conn.Write(m.reply(testSetSpeedReply)) + case constants.ProcDomainMigratePerform3Params: + conn.Write(m.reply(testMigrateReply)) + case constants.ProcDomainUndefineFlags: + conn.Write(m.reply(testUndefineReply)) + case constants.ProcDomainDestroyFlags: + conn.Write(m.reply(testDestroyReply)) + case constants.ProcDomainDefineXMLFlags: + conn.Write(m.reply(testDefineXML)) + case constants.ProcDomainReboot: + conn.Write(m.reply(testRebootReply)) + case constants.ProcDomainReset: + conn.Write(m.reply(testRebootReply)) + case constants.ProcDomainCreateWithFlags: + conn.Write(m.reply(testCreateWithFlags)) + case constants.ProcDomainShutdownFlags: + conn.Write(m.reply(testShutdownReply)) + case constants.ProcDomainSetBlockIOTune: + conn.Write(m.reply(testSetBlockIoTuneReply)) + case constants.ProcDomainGetBlockIOTune: + conn.Write(m.reply(testGetBlockIoTuneReply)) + default: + fmt.Fprintln(os.Stderr, "unknown procedure", procedure) + } +} + +func (m *MockLibvirt) handleQEMU(procedure uint32, conn net.Conn) { + switch procedure { + case constants.QEMUConnectDomainMonitorEventRegister: + conn.Write(m.reply(testRegisterEvent)) + case constants.QEMUConnectDomainMonitorEventDeregister: + conn.Write(m.reply(testDeregisterEvent)) + case constants.QEMUDomainMonitor: + if m.Fail { + conn.Write(m.reply(testRunReplyFail)) + } else { + conn.Write(m.reply(testRunReply)) + } + } +} + +// reply automatically injects the correct serial +// number into the provided response buffer. +func (m *MockLibvirt) reply(buf []byte) []byte { + atomic.AddUint32(&m.serial, 1) + binary.BigEndian.PutUint32(buf[20:24], m.serial) + + return buf +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/rpc.go b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/rpc.go new file mode 100644 index 00000000..0e2bbf07 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/rpc.go @@ -0,0 +1,445 @@ +// Copyright 2016 The go-libvirt Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package libvirt + +import ( + "bytes" + "encoding/binary" + "errors" + "io" + "strings" + "sync/atomic" + + "github.com/davecgh/go-xdr/xdr2" + "github.com/digitalocean/go-libvirt/internal/constants" +) + +// ErrUnsupported is returned if a procedure is not supported by libvirt +var ErrUnsupported = errors.New("unsupported procedure requested") + +// request and response types +const ( + // Call is used when making calls to the remote server. + Call = iota + + // Reply indicates a server reply. + Reply + + // Message is an asynchronous notification. + Message + + // Stream represents a stream data packet. + Stream + + // CallWithFDs is used by a client to indicate the request has + // arguments with file descriptors. + CallWithFDs + + // ReplyWithFDs is used by a server to indicate the request has + // arguments with file descriptors. + ReplyWithFDs +) + +// request and response statuses +const ( + // StatusOK is always set for method calls or events. + // For replies it indicates successful completion of the method. + // For streams it indicates confirmation of the end of file on the stream. + StatusOK = iota + + // StatusError for replies indicates that the method call failed + // and error information is being returned. For streams this indicates + // that not all data was sent and the stream has aborted. + StatusError + + // StatusContinue is only used for streams. + // This indicates that further data packets will be following. + StatusContinue +) + +// header is a libvirt rpc packet header +type header struct { + // Program identifier + Program uint32 + + // Program version + Version uint32 + + // Remote procedure identifier + Procedure uint32 + + // Call type, e.g., Reply + Type uint32 + + // Call serial number + Serial uint32 + + // Request status, e.g., StatusOK + Status uint32 +} + +// packet represents a RPC request or response. +type packet struct { + // Size of packet, in bytes, including length. + // Len + Header + Payload + Len uint32 + Header header +} + +// internal rpc response +type response struct { + Payload []byte + Status uint32 +} + +// libvirt error response +type libvirtError struct { + Code uint32 + DomainID uint32 + Padding uint8 + Message string + Level uint32 +} + +func (l *Libvirt) connect() error { + payload := struct { + Padding [3]byte + Name string + Flags uint32 + }{ + Padding: [3]byte{0x1, 0x0, 0x0}, + Name: "qemu:///system", + Flags: 0, + } + + buf, err := encode(&payload) + if err != nil { + return err + } + + // libvirt requires that we call auth-list prior to connecting, + // event when no authentication is used. + resp, err := l.request(constants.ProcAuthList, constants.Program, &buf) + if err != nil { + return err + } + + r := <-resp + if r.Status != StatusOK { + return decodeError(r.Payload) + } + + resp, err = l.request(constants.ProcConnectOpen, constants.Program, &buf) + if err != nil { + return err + } + + r = <-resp + if r.Status != StatusOK { + return decodeError(r.Payload) + } + + return nil +} + +func (l *Libvirt) disconnect() error { + resp, err := l.request(constants.ProcConnectClose, constants.Program, nil) + if err != nil { + return err + } + + r := <-resp + if r.Status != StatusOK { + return decodeError(r.Payload) + } + + return nil +} + +// listen processes incoming data and routes +// responses to their respective callback handler. +func (l *Libvirt) listen() { + for { + // response packet length + length, err := pktlen(l.r) + if err != nil { + // When the underlying connection EOFs or is closed, stop + // this goroutine + if err == io.EOF || strings.Contains(err.Error(), "use of closed network connection") { + return + } + + // invalid packet + continue + } + + // response header + h, err := extractHeader(l.r) + if err != nil { + // invalid packet + continue + } + + // payload: packet length minus what was previously read + size := int(length) - (constants.PacketLengthSize + constants.HeaderSize) + buf := make([]byte, size) + _, err = io.ReadFull(l.r, buf) + if err != nil { + // invalid packet + continue + } + + // route response to caller + l.route(h, buf) + } +} + +// callback sends rpc responses to their respective caller. +func (l *Libvirt) callback(id uint32, res response) { + l.cm.Lock() + c, ok := l.callbacks[id] + l.cm.Unlock() + if ok { + c <- res + } + + l.deregister(id) +} + +// route sends incoming packets to their listeners. +func (l *Libvirt) route(h *header, buf []byte) { + // route events to their respective listener + if h.Program == constants.ProgramQEMU && h.Procedure == constants.QEMUDomainMonitorEvent { + l.stream(buf) + return + } + + // send responses to caller + res := response{ + Payload: buf, + Status: h.Status, + } + l.callback(h.Serial, res) +} + +// serial provides atomic access to the next sequential request serial number. +func (l *Libvirt) serial() uint32 { + return atomic.AddUint32(&l.s, 1) +} + +// stream decodes domain events and sends them +// to the respective event listener. +func (l *Libvirt) stream(buf []byte) { + e, err := decodeEvent(buf) + if err != nil { + // event was malformed, drop. + return + } + + // send to event listener + l.em.Lock() + c, ok := l.events[e.CallbackID] + l.em.Unlock() + if ok { + c <- e + } +} + +// addStream configures the routing for an event stream. +func (l *Libvirt) addStream(id uint32, stream chan *DomainEvent) { + l.em.Lock() + l.events[id] = stream + l.em.Unlock() +} + +// removeStream notifies the libvirt server to stop sending events +// for the provided callback id. Upon successful de-registration the +// callback handler is destroyed. +func (l *Libvirt) removeStream(id uint32) error { + close(l.events[id]) + + payload := struct { + CallbackID uint32 + }{ + CallbackID: id, + } + + buf, err := encode(&payload) + if err != nil { + return err + } + + resp, err := l.request(constants.QEMUConnectDomainMonitorEventDeregister, constants.ProgramQEMU, &buf) + if err != nil { + return err + } + + res := <-resp + if res.Status != StatusOK { + return decodeError(res.Payload) + } + + l.em.Lock() + delete(l.events, id) + l.em.Unlock() + + return nil +} + +// register configures a method response callback +func (l *Libvirt) register(id uint32, c chan response) { + l.cm.Lock() + l.callbacks[id] = c + l.cm.Unlock() +} + +// deregister destroys a method response callback +func (l *Libvirt) deregister(id uint32) { + l.cm.Lock() + close(l.callbacks[id]) + delete(l.callbacks, id) + l.cm.Unlock() +} + +// request performs a libvirt RPC request. +// The returned channel is used by the caller to receive the asynchronous +// call response. The channel is closed once a response has been sent. +func (l *Libvirt) request(proc uint32, program uint32, payload *bytes.Buffer) (<-chan response, error) { + serial := l.serial() + c := make(chan response) + + l.register(serial, c) + + size := constants.PacketLengthSize + constants.HeaderSize + if payload != nil { + size += payload.Len() + } + + p := packet{ + Len: uint32(size), + Header: header{ + Program: program, + Version: constants.ProtocolVersion, + Procedure: proc, + Type: Call, + Serial: serial, + Status: StatusOK, + }, + } + + // write header + l.mu.Lock() + defer l.mu.Unlock() + err := binary.Write(l.w, binary.BigEndian, p) + if err != nil { + return nil, err + } + + // write payload + if payload != nil { + err = binary.Write(l.w, binary.BigEndian, payload.Bytes()) + if err != nil { + return nil, err + } + } + + if err := l.w.Flush(); err != nil { + return nil, err + } + + return c, nil +} + +// encode XDR encodes the provided data. +func encode(data interface{}) (bytes.Buffer, error) { + var buf bytes.Buffer + _, err := xdr.Marshal(&buf, data) + + return buf, err +} + +// decodeError extracts an error message from the provider buffer. +func decodeError(buf []byte) error { + var e libvirtError + + dec := xdr.NewDecoder(bytes.NewReader(buf)) + _, err := dec.Decode(&e) + if err != nil { + return err + } + + if strings.Contains(e.Message, "unknown procedure") { + return ErrUnsupported + } + + return errors.New(e.Message) +} + +// decodeEvent extracts an event from the given byte slice. +// Errors encountered will be returned along with a nil event. +func decodeEvent(buf []byte) (*DomainEvent, error) { + var e DomainEvent + + dec := xdr.NewDecoder(bytes.NewReader(buf)) + _, err := dec.Decode(&e) + if err != nil { + return nil, err + } + + return &e, nil +} + +// pktlen determines the length of an incoming rpc response. +// If an error is encountered reading the provided Reader, the +// error is returned and response length will be 0. +func pktlen(r io.Reader) (uint32, error) { + buf := make([]byte, constants.PacketLengthSize) + + for n := 0; n < cap(buf); { + nn, err := r.Read(buf) + if err != nil { + return 0, err + } + + n += nn + } + + return binary.BigEndian.Uint32(buf), nil +} + +// extractHeader returns the decoded header from an incoming response. +func extractHeader(r io.Reader) (*header, error) { + buf := make([]byte, constants.HeaderSize) + + for n := 0; n < cap(buf); { + nn, err := r.Read(buf) + if err != nil { + return nil, err + } + + n += nn + } + + h := &header{ + Program: binary.BigEndian.Uint32(buf[0:4]), + Version: binary.BigEndian.Uint32(buf[4:8]), + Procedure: binary.BigEndian.Uint32(buf[8:12]), + Type: binary.BigEndian.Uint32(buf[12:16]), + Serial: binary.BigEndian.Uint32(buf[16:20]), + Status: binary.BigEndian.Uint32(buf[20:24]), + } + + return h, nil +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/vendor.json b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/vendor.json new file mode 100644 index 00000000..a0195e6d --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/vendor.json @@ -0,0 +1,31 @@ +{ + "comment": "", + "ignore": "test", + "package": [ + { + "checksumSHA1": "2u6FcCnyfRnJvUCWpEZZv6Yy+WE=", + "path": "github.com/davecgh/go-xdr/xdr2", + "revision": "e6a2ba005892b6a5b27cb5352f64c2e96942dd28", + "revisionTime": "2016-11-22T19:31:51Z" + }, + { + "checksumSHA1": "r7EVnpz8g88YB0wTes+hs8VdBBo=", + "path": "github.com/digitalocean/go-libvirt", + "revision": "59d541f19311883ad82708651353009fb207d8a9", + "revisionTime": "2018-01-03T20:19:28Z" + }, + { + "checksumSHA1": "NY8di5qB457jytcEX4DVcNw4u8k=", + "path": "github.com/digitalocean/go-libvirt/internal/constants", + "revision": "59d541f19311883ad82708651353009fb207d8a9", + "revisionTime": "2018-01-03T20:19:28Z" + }, + { + "checksumSHA1": "wTAyp0Z2KqjokzJC3hHxMJTilIQ=", + "path": "github.com/digitalocean/go-libvirt/libvirttest", + "revision": "6075ea3c39a182efd22179110e92d4e8a8892d00", + "revisionTime": "2018-03-01T20:00:12Z" + } + ], + "rootPath": "github.com/elastic/beats/metricbeat/module/kvm" +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/logstash/_meta/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/module/logstash/_meta/Dockerfile index 2610e50f..f6a9d30a 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/logstash/_meta/Dockerfile +++ b/vendor/github.com/elastic/beats/metricbeat/module/logstash/_meta/Dockerfile @@ -1,4 +1,4 @@ -FROM docker.elastic.co/logstash/logstash:6.0.0 +FROM docker.elastic.co/logstash/logstash:6.2.4 COPY healthcheck.sh / ENV XPACK_MONITORING_ENABLED=FALSE diff --git a/vendor/github.com/elastic/beats/metricbeat/module/logstash/_meta/config.reference.yml b/vendor/github.com/elastic/beats/metricbeat/module/logstash/_meta/config.reference.yml new file mode 100644 index 00000000..70de278f --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/logstash/_meta/config.reference.yml @@ -0,0 +1,5 @@ +- module: logstash + metricsets: ["node", "node_stats"] + enabled: true + period: 10s + hosts: ["localhost:9600"] diff --git a/vendor/github.com/elastic/beats/metricbeat/module/logstash/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/logstash/_meta/config.yml index 7d1e8fc9..0739ea47 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/logstash/_meta/config.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/logstash/_meta/config.yml @@ -1,6 +1,4 @@ - module: logstash - metricsets: ["node", "node_stats"] - enabled: false - period: 10s hosts: ["localhost:9600"] - + #username: "user" + #password: "secret" diff --git a/vendor/github.com/elastic/beats/metricbeat/module/logstash/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/logstash/_meta/docs.asciidoc index 833d3b91..bddd411d 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/logstash/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/logstash/_meta/docs.asciidoc @@ -1,2 +1,3 @@ This is the Logstash module. +The default metricsets are `node` and `node_stats`. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/logstash/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/logstash/_meta/fields.yml index fd124eb0..a4bc5088 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/logstash/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/logstash/_meta/fields.yml @@ -2,7 +2,7 @@ title: "Logstash" description: > Logstash module - release: experimental + release: beta settings: ["ssl"] fields: - name: logstash diff --git a/vendor/github.com/elastic/beats/metricbeat/module/logstash/node/_meta/data.json b/vendor/github.com/elastic/beats/metricbeat/module/logstash/node/_meta/data.json index d8404cab..5dcd8bca 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/logstash/node/_meta/data.json +++ b/vendor/github.com/elastic/beats/metricbeat/module/logstash/node/_meta/data.json @@ -6,12 +6,12 @@ }, "logstash": { "node": { - "host": "4318d3c307b3", + "host": "5256c6d0f05a", "jvm": { "pid": 1, - "version": "1.8.0_151" + "version": "1.8.0_161" }, - "version": "6.0.0-rc2" + "version": "6.2.3" } }, "metricset": { diff --git a/vendor/github.com/elastic/beats/metricbeat/module/logstash/node/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/logstash/node/_meta/fields.yml index 5ca497fc..5fe3293f 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/logstash/node/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/logstash/node/_meta/fields.yml @@ -2,6 +2,7 @@ type: group description: > node + release: beta fields: - name: host type: keyword diff --git a/vendor/github.com/elastic/beats/metricbeat/module/logstash/node/node.go b/vendor/github.com/elastic/beats/metricbeat/module/logstash/node/node.go index d242622a..bc6456ba 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/logstash/node/node.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/logstash/node/node.go @@ -11,9 +11,10 @@ import ( // init registers the MetricSet with the central registry. // The New method will be called after the setup of the module and before starting to fetch data func init() { - if err := mb.Registry.AddMetricSet("logstash", "node", New, hostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("logstash", "node", New, + mb.WithHostParser(hostParser), + mb.DefaultMetricSet(), + ) } var ( @@ -32,10 +33,14 @@ type MetricSet struct { // New create a new instance of the MetricSet func New(base mb.BaseMetricSet) (mb.MetricSet, error) { - cfgwarn.Experimental("The logstash node metricset is experimental") + cfgwarn.Beta("The logstash node metricset is beta") + http, err := helper.NewHTTP(base) + if err != nil { + return nil, err + } return &MetricSet{ base, - helper.NewHTTP(base), + http, }, nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/logstash/node_stats/_meta/data.json b/vendor/github.com/elastic/beats/metricbeat/module/logstash/node_stats/_meta/data.json index f36173b3..0d2378cd 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/logstash/node_stats/_meta/data.json +++ b/vendor/github.com/elastic/beats/metricbeat/module/logstash/node_stats/_meta/data.json @@ -5,11 +5,13 @@ "name": "host.example.com" }, "logstash": { - "node_stats": { - "events": { - "filtered": 0, - "in": 0, - "out": 0 + "node": { + "stats": { + "events": { + "filtered": 0, + "in": 0, + "out": 0 + } } } }, @@ -17,6 +19,7 @@ "host": "127.0.0.1:9600", "module": "logstash", "name": "node_stats", + "namespace": "logstash.node.stats", "rtt": 115 } } \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/metricbeat/module/logstash/node_stats/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/logstash/node_stats/_meta/fields.yml index d718ec55..144a2618 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/logstash/node_stats/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/logstash/node_stats/_meta/fields.yml @@ -1,8 +1,8 @@ -- name: node_stats +- name: node.stats type: group description: > node_stats metrics. - release: experimental + release: beta fields: - name: events type: group diff --git a/vendor/github.com/elastic/beats/metricbeat/module/logstash/node_stats/node_stats.go b/vendor/github.com/elastic/beats/metricbeat/module/logstash/node_stats/node_stats.go index 3ffb1261..54d119a1 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/logstash/node_stats/node_stats.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/logstash/node_stats/node_stats.go @@ -8,12 +8,20 @@ import ( "github.com/elastic/beats/metricbeat/mb/parse" ) +const ( + moduleName = "logstash" + metricsetName = "node_stats" + namespace = "logstash.node.stats" +) + // init registers the MetricSet with the central registry. // The New method will be called after the setup of the module and before starting to fetch data func init() { - if err := mb.Registry.AddMetricSet("logstash", "node_stats", New, hostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet(moduleName, metricsetName, New, + mb.WithHostParser(hostParser), + mb.WithNamespace(namespace), + mb.DefaultMetricSet(), + ) } var ( @@ -32,11 +40,15 @@ type MetricSet struct { // New create a new instance of the MetricSet func New(base mb.BaseMetricSet) (mb.MetricSet, error) { - cfgwarn.Experimental("The logstash node_stats metricset is experimental") + cfgwarn.Beta("The logstash node_stats metricset is beta") + http, err := helper.NewHTTP(base) + if err != nil { + return nil, err + } return &MetricSet{ base, - helper.NewHTTP(base), + http, }, nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/memcached/_meta/config.reference.yml b/vendor/github.com/elastic/beats/metricbeat/module/memcached/_meta/config.reference.yml new file mode 100644 index 00000000..d3c847d2 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/memcached/_meta/config.reference.yml @@ -0,0 +1,5 @@ +- module: memcached + metricsets: ["stats"] + period: 10s + hosts: ["localhost:11211"] + enabled: true diff --git a/vendor/github.com/elastic/beats/metricbeat/module/memcached/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/memcached/_meta/config.yml index 1b230087..f1f6533d 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/memcached/_meta/config.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/memcached/_meta/config.yml @@ -1,4 +1,2 @@ - module: memcached - metricsets: ["stats"] - period: 10s hosts: ["localhost:11211"] diff --git a/vendor/github.com/elastic/beats/metricbeat/module/memcached/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/memcached/_meta/docs.asciidoc index 3d2c61a9..848df9e5 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/memcached/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/memcached/_meta/docs.asciidoc @@ -1,2 +1,3 @@ This is the Memcached module. These metricsets were tested with Memcached version 1.4.35. +The default metricset is `stats`. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/memcached/stats/stats.go b/vendor/github.com/elastic/beats/metricbeat/module/memcached/stats/stats.go index faf23ab5..b951d9fd 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/memcached/stats/stats.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/memcached/stats/stats.go @@ -11,9 +11,9 @@ import ( ) func init() { - if err := mb.Registry.AddMetricSet("memcached", "stats", New); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("memcached", "stats", New, + mb.DefaultMetricSet(), + ) } type MetricSet struct { diff --git a/vendor/github.com/elastic/beats/metricbeat/module/mongodb/_meta/config.reference.yml b/vendor/github.com/elastic/beats/metricbeat/module/mongodb/_meta/config.reference.yml new file mode 100644 index 00000000..8f2386a2 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/mongodb/_meta/config.reference.yml @@ -0,0 +1,17 @@ +- module: mongodb + metricsets: ["dbstats", "status"] + period: 10s + enabled: true + + # The hosts must be passed as MongoDB URLs in the format: + # [mongodb://][user:pass@]host[:port]. + # The username and password can also be set using the respective configuration + # options. The credentials in the URL take precedence over the username and + # password configuration options. + hosts: ["localhost:27017"] + + # Username to use when connecting to MongoDB. Empty by default. + #username: user + + # Password to use when connecting to MongoDB. Empty by default. + #password: pass diff --git a/vendor/github.com/elastic/beats/metricbeat/module/mongodb/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/mongodb/_meta/config.yml index fb6b19e7..f22fc378 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/mongodb/_meta/config.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/mongodb/_meta/config.yml @@ -1,16 +1,2 @@ - module: mongodb - metricsets: ["dbstats", "status"] - period: 10s - - # The hosts must be passed as MongoDB URLs in the format: - # [mongodb://][user:pass@]host[:port]. - # The username and password can also be set using the respective configuration - # options. The credentials in the URL take precedence over the username and - # password configuration options. hosts: ["localhost:27017"] - - # Username to use when connecting to MongoDB. Empty by default. - #username: user - - # Password to use when connecting to MongoDB. Empty by default. - #password: pass diff --git a/vendor/github.com/elastic/beats/metricbeat/module/mongodb/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/mongodb/_meta/docs.asciidoc index 258ea661..84091262 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/mongodb/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/mongodb/_meta/docs.asciidoc @@ -40,6 +40,8 @@ over the username and password configuration options. password: test ---- +The default metricsets are `collstats`, `dbstats` and `status`. + [float] === Compatibility diff --git a/vendor/github.com/elastic/beats/metricbeat/module/mongodb/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/mongodb/_meta/fields.yml index 6a8000ec..8ed17fb6 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/mongodb/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/mongodb/_meta/fields.yml @@ -3,7 +3,7 @@ description: > Metrics collected from MongoDB servers. short_config: false - release: beta + release: ga fields: - name: mongodb type: group diff --git a/vendor/github.com/elastic/beats/metricbeat/module/mongodb/collstats/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/mongodb/collstats/_meta/fields.yml index 84e0b3be..5b255dba 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/mongodb/collstats/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/mongodb/collstats/_meta/fields.yml @@ -2,6 +2,7 @@ type: group description: > MongoDB collection statistics metrics. + release: ga fields: - name: db type: keyword diff --git a/vendor/github.com/elastic/beats/metricbeat/module/mongodb/collstats/collstats.go b/vendor/github.com/elastic/beats/metricbeat/module/mongodb/collstats/collstats.go index 7fe951fd..53df2803 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/mongodb/collstats/collstats.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/mongodb/collstats/collstats.go @@ -4,7 +4,6 @@ import ( "errors" "github.com/elastic/beats/libbeat/common" - "github.com/elastic/beats/libbeat/common/cfgwarn" "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/metricbeat/mb" "github.com/elastic/beats/metricbeat/module/mongodb" @@ -15,9 +14,10 @@ import ( var debugf = logp.MakeDebug("mongodb.collstats") func init() { - if err := mb.Registry.AddMetricSet("mongodb", "collstats", New, mongodb.ParseURL); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("mongodb", "collstats", New, + mb.WithHostParser(mongodb.ParseURL), + mb.DefaultMetricSet(), + ) } // MetricSet type defines all fields of the MetricSet @@ -33,8 +33,6 @@ type MetricSet struct { // Part of new is also setting up the configuration by processing additional // configuration entries if needed. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { - cfgwarn.Experimental("The %v %v metricset is experimental", base.Module().Name(), base.Name()) - dialInfo, err := mgo.ParseURL(base.HostData().URI) if err != nil { return nil, err diff --git a/vendor/github.com/elastic/beats/metricbeat/module/mongodb/dbstats/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/mongodb/dbstats/_meta/fields.yml index ffb5c6fb..b5c0980f 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/mongodb/dbstats/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/mongodb/dbstats/_meta/fields.yml @@ -3,7 +3,7 @@ description: > dbstats provides an overview of a particular mongo database. This document is most concerned with data volumes of a database. - release: beta + release: ga fields: - name: avg_obj_size.bytes type: long diff --git a/vendor/github.com/elastic/beats/metricbeat/module/mongodb/dbstats/dbstats.go b/vendor/github.com/elastic/beats/metricbeat/module/mongodb/dbstats/dbstats.go index 19beedb7..63d74b56 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/mongodb/dbstats/dbstats.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/mongodb/dbstats/dbstats.go @@ -6,7 +6,6 @@ import ( "gopkg.in/mgo.v2" "github.com/elastic/beats/libbeat/common" - "github.com/elastic/beats/libbeat/common/cfgwarn" "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/metricbeat/mb" "github.com/elastic/beats/metricbeat/module/mongodb" @@ -17,9 +16,10 @@ var debugf = logp.MakeDebug("mongodb.dbstats") // init registers the MetricSet with the central registry. // The New method will be called after the setup of the module and before starting to fetch data func init() { - if err := mb.Registry.AddMetricSet("mongodb", "dbstats", New, mongodb.ParseURL); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("mongodb", "dbstats", New, + mb.WithHostParser(mongodb.ParseURL), + mb.DefaultMetricSet(), + ) } // MetricSet type defines all fields of the MetricSet @@ -35,8 +35,6 @@ type MetricSet struct { // Part of new is also setting up the configuration by processing additional // configuration entries if needed. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { - cfgwarn.Beta("The %v %v metricset is Beta", base.Module().Name(), base.Name()) - dialInfo, err := mgo.ParseURL(base.HostData().URI) if err != nil { return nil, err diff --git a/vendor/github.com/elastic/beats/metricbeat/module/mongodb/status/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/mongodb/status/_meta/fields.yml index 0d1f296a..c692f345 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/mongodb/status/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/mongodb/status/_meta/fields.yml @@ -2,7 +2,7 @@ type: group description: > MongoDB server status metrics. - release: beta + release: ga fields: - name: version type: keyword diff --git a/vendor/github.com/elastic/beats/metricbeat/module/mongodb/status/status.go b/vendor/github.com/elastic/beats/metricbeat/module/mongodb/status/status.go index 9d3711d5..9dc0510b 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/mongodb/status/status.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/mongodb/status/status.go @@ -2,7 +2,6 @@ package status import ( "github.com/elastic/beats/libbeat/common" - "github.com/elastic/beats/libbeat/common/cfgwarn" "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/metricbeat/mb" "github.com/elastic/beats/metricbeat/module/mongodb" @@ -20,9 +19,10 @@ TODOs: var debugf = logp.MakeDebug("mongodb.status") func init() { - if err := mb.Registry.AddMetricSet("mongodb", "status", New, mongodb.ParseURL); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("mongodb", "status", New, + mb.WithHostParser(mongodb.ParseURL), + mb.DefaultMetricSet(), + ) } // MetricSet type defines all fields of the MetricSet @@ -38,9 +38,6 @@ type MetricSet struct { // Part of new is also setting up the configuration by processing additional // configuration entries if needed. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { - - cfgwarn.Beta("The %v %v metricset is Beta", base.Module().Name(), base.Name()) - dialInfo, err := mgo.ParseURL(base.HostData().URI) if err != nil { return nil, err diff --git a/vendor/github.com/elastic/beats/metricbeat/module/munin/_meta/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/module/munin/_meta/Dockerfile new file mode 100644 index 00000000..bae8d611 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/munin/_meta/Dockerfile @@ -0,0 +1,13 @@ +FROM ubuntu:16.04 + +RUN apt-get update && \ + apt-get install -y munin-node netcat && \ + apt-get clean && rm rm -rf /var/lib/apt/lists/* + +EXPOSE 4949 + +COPY munin-node.conf /etc/munin/munin-node.conf + +HEALTHCHECK --interval=1s --retries=90 CMD nc -z 127.0.0.1 4949 + +CMD munin-node diff --git a/vendor/github.com/elastic/beats/metricbeat/module/munin/_meta/config.reference.yml b/vendor/github.com/elastic/beats/metricbeat/module/munin/_meta/config.reference.yml new file mode 100644 index 00000000..cdef11f3 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/munin/_meta/config.reference.yml @@ -0,0 +1,6 @@ +- module: munin + metricsets: ["node"] + enabled: true + period: 10s + hosts: ["localhost:4949"] + node.namespace: node diff --git a/vendor/github.com/elastic/beats/metricbeat/module/munin/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/munin/_meta/config.yml new file mode 100644 index 00000000..1ea9bf8c --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/munin/_meta/config.yml @@ -0,0 +1,3 @@ +- module: munin + hosts: ["localhost:4949"] + node.namespace: node diff --git a/vendor/github.com/elastic/beats/metricbeat/module/munin/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/munin/_meta/docs.asciidoc new file mode 100644 index 00000000..d7e009b4 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/munin/_meta/docs.asciidoc @@ -0,0 +1,5 @@ +== munin module + +This is the munin module. + +The default metricset is `node`. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/munin/_meta/env b/vendor/github.com/elastic/beats/metricbeat/module/munin/_meta/env new file mode 100644 index 00000000..b81c5ee8 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/munin/_meta/env @@ -0,0 +1,2 @@ +MUNIN_HOST=munin +MUNIN_PORT=4949 diff --git a/vendor/github.com/elastic/beats/metricbeat/module/munin/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/munin/_meta/fields.yml new file mode 100644 index 00000000..35912fe1 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/munin/_meta/fields.yml @@ -0,0 +1,13 @@ +- key: munin + title: "Munin" + description: > + experimental[] + + Munin node metrics exporter + release: experimental + fields: + - name: munin + type: group + description: > + munin contains metrics exposed by a munin node agent + fields: diff --git a/vendor/github.com/elastic/beats/metricbeat/module/munin/_meta/munin-node.conf b/vendor/github.com/elastic/beats/metricbeat/module/munin/_meta/munin-node.conf new file mode 100644 index 00000000..17c6c319 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/munin/_meta/munin-node.conf @@ -0,0 +1,15 @@ +setsid 0 + +ignore_file [\#~]$ +ignore_file DEADJOE$ +ignore_file \.bak$ +ignore_file %$ +ignore_file \.dpkg-(tmp|new|old|dist)$ +ignore_file \.rpm(save|new)$ +ignore_file \.pod$ + +allow .* + +host 0.0.0.0 + +port 4949 diff --git a/vendor/github.com/elastic/beats/metricbeat/module/munin/doc.go b/vendor/github.com/elastic/beats/metricbeat/module/munin/doc.go new file mode 100644 index 00000000..4b993186 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/munin/doc.go @@ -0,0 +1,2 @@ +// Package munin is a Metricbeat module that contains MetricSets. +package munin diff --git a/vendor/github.com/elastic/beats/metricbeat/module/munin/munin.go b/vendor/github.com/elastic/beats/metricbeat/module/munin/munin.go new file mode 100644 index 00000000..ca93abfc --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/munin/munin.go @@ -0,0 +1,115 @@ +package munin + +import ( + "bufio" + "fmt" + "io" + "net" + "strconv" + "strings" + "time" + + "github.com/joeshaw/multierror" + "github.com/pkg/errors" + + "github.com/elastic/beats/libbeat/common" +) + +const ( + unknownValue = "U" +) + +// Node connection +type Node struct { + conn net.Conn + + writer io.Writer + reader *bufio.Reader +} + +// Connect with a munin node +func Connect(address string, timeout time.Duration) (*Node, error) { + conn, err := net.DialTimeout("tcp", address, timeout) + if err != nil { + return nil, err + } + n := &Node{conn: conn, + writer: conn, + reader: bufio.NewReader(conn), + } + // Cosume and ignore first line returned by munin, it is a comment + // about the node + scanner := bufio.NewScanner(n.reader) + scanner.Scan() + return n, scanner.Err() +} + +// Close node connection relasing its resources +func (n *Node) Close() error { + return n.conn.Close() +} + +// List of items exposed by the node +func (n *Node) List() ([]string, error) { + _, err := io.WriteString(n.writer, "list\n") + if err != nil { + return nil, err + } + + scanner := bufio.NewScanner(n.reader) + scanner.Scan() + return strings.Fields(scanner.Text()), scanner.Err() +} + +// Fetch metrics from munin node +func (n *Node) Fetch(items ...string) (common.MapStr, error) { + var errs multierror.Errors + event := common.MapStr{} + + for _, item := range items { + _, err := io.WriteString(n.writer, "fetch "+item+"\n") + if err != nil { + errs = append(errs, err) + continue + } + + scanner := bufio.NewScanner(n.reader) + scanner.Split(bufio.ScanWords) + for scanner.Scan() { + name := strings.TrimSpace(scanner.Text()) + + // Munin delimites metrics with a dot + if name == "." { + break + } + + name = strings.TrimSuffix(name, ".value") + + if !scanner.Scan() { + if scanner.Err() == nil { + errs = append(errs, errors.New("unexpected EOF when expecting value")) + } + break + } + value := scanner.Text() + + key := fmt.Sprintf("%s.%s", item, name) + + if value == unknownValue { + errs = append(errs, errors.Errorf("unknown value for %s", key)) + continue + } + if f, err := strconv.ParseFloat(value, 64); err == nil { + event.Put(key, f) + continue + } + event.Put(key, value) + } + + if scanner.Err() != nil { + errs = append(errs, scanner.Err()) + } + } + + return event, errs.Err() +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/munin/munin_test.go b/vendor/github.com/elastic/beats/metricbeat/module/munin/munin_test.go new file mode 100644 index 00000000..adf9c8d6 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/munin/munin_test.go @@ -0,0 +1,82 @@ +package munin + +import ( + "bufio" + "bytes" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/libbeat/common" +) + +func dummyNode(response string) *Node { + return &Node{ + writer: &bytes.Buffer{}, + reader: bufio.NewReader(bytes.NewBuffer([]byte(response))), + } +} + +func TestList(t *testing.T) { + n := dummyNode("cpu df uptime\n") + + list, err := n.List() + + assert.Nil(t, err) + + expected := []string{"cpu", "df", "uptime"} + assert.ElementsMatch(t, expected, list) +} + +func TestFetch(t *testing.T) { + response := `user.value 4679836 +nice.value 59278 +system.value 1979168 +idle.value 59957502 +iowait.value 705373 +irq.value 76 +softirq.value 36404 +steal.value 0 +guest.value 0 +. +` + n := dummyNode(response) + + event, err := n.Fetch("cpu", "swap") + + assert.Nil(t, err) + + expected := common.MapStr{ + "cpu": common.MapStr{ + "user": float64(4679836), + "nice": float64(59278), + "system": float64(1979168), + "idle": float64(59957502), + "iowait": float64(705373), + "irq": float64(76), + "softirq": float64(36404), + "steal": float64(0), + "guest": float64(0), + }, + } + assert.Equal(t, expected, event) +} + +func TestFetchUnknown(t *testing.T) { + response := `some.value U +other.value 42 +. +` + n := dummyNode(response) + + event, err := n.Fetch("test") + + assert.NotNil(t, err) + + expected := common.MapStr{ + "test": common.MapStr{ + "other": float64(42), + }, + } + assert.Equal(t, expected, event) +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/munin/node/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/munin/node/_meta/docs.asciidoc new file mode 100644 index 00000000..a20a538c --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/munin/node/_meta/docs.asciidoc @@ -0,0 +1,58 @@ +=== munin node MetricSet + +This is the node metricset of the module munin. + +[float] +=== Features and configuration + +The node metricset of the munin module collects metrics from a munin node agent +and sends them as events to Elastic. + +[source,yaml] +--- +- module: munin + metricsets: ["node"] + hosts: ["localhost:4949"] + node.namespace: node +--- + +All metrics exposed by a single munin node will be sent in a single event, +grouped by munin items, e.g: + +[source,json] +--- +"munin": { + "node": { + "swap": { + "swap_in": 198609, + "swap_out": 612629 + }, + "cpu": { + "softirq": 680, + "guest": 0, + "user": 158212, + "iowait": 71095, + "irq": 1, + "system": 35906, + "idle": 1185709, + "steal": 0, + "nice": 1633 + } + } +} +--- + +In principle this module can be used to collect metrics from any agent that +implements the munin node protocol (http://guide.munin-monitoring.org/en/latest/master/network-protocol.html). + +[float] +=== Limitations +Currently this module only collects metrics using the basic protocol. It doesn't +support capabilities or automatic dashboards generation based on munin +configuration. + +[float] +=== Exposed fields, dashboards, indexes, etc. +Munin supports a great variety of plugins each of them can be used to obtain different +sets of metrics. Metricbeat cannot know the metrics exposed beforehand, so no field +description or dashboard is generated automatically. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/munin/node/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/munin/node/_meta/fields.yml new file mode 100644 index 00000000..dd5e036b --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/munin/node/_meta/fields.yml @@ -0,0 +1 @@ +- release: experimental diff --git a/vendor/github.com/elastic/beats/metricbeat/module/munin/node/node.go b/vendor/github.com/elastic/beats/metricbeat/module/munin/node/node.go new file mode 100644 index 00000000..959fd9bc --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/munin/node/node.go @@ -0,0 +1,77 @@ +package node + +import ( + "time" + + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/common/cfgwarn" + "github.com/elastic/beats/metricbeat/mb" + "github.com/elastic/beats/metricbeat/module/munin" +) + +// init registers the MetricSet with the central registry as soon as the program +// starts. The New function will be called later to instantiate an instance of +// the MetricSet for each host defined in the module's configuration. After the +// MetricSet has been created then Fetch will begin to be called periodically. +func init() { + mb.Registry.MustAddMetricSet("munin", "node", New, + mb.DefaultMetricSet(), + ) +} + +// MetricSet holds any configuration or state information. It must implement +// the mb.MetricSet interface. And this is best achieved by embedding +// mb.BaseMetricSet because it implements all of the required mb.MetricSet +// interface methods except for Fetch. +type MetricSet struct { + mb.BaseMetricSet + namespace string + timeout time.Duration +} + +// New creates a new instance of the MetricSet. New is responsible for unpacking +// any MetricSet specific configuration options if there are any. +func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + cfgwarn.Experimental("The munin node metricset is experimental.") + + config := struct { + Namespace string `config:"node.namespace" validate:"required"` + }{} + if err := base.Module().UnpackConfig(&config); err != nil { + return nil, err + } + + return &MetricSet{ + BaseMetricSet: base, + namespace: config.Namespace, + timeout: base.Module().Config().Timeout, + }, nil +} + +// Fetch method implements the data gathering +func (m *MetricSet) Fetch() (common.MapStr, error) { + node, err := munin.Connect(m.Host(), m.timeout) + if err != nil { + return nil, err + } + defer node.Close() + + items, err := node.List() + if err != nil { + return nil, err + } + + event, err := node.Fetch(items...) + if err != nil { + return nil, err + } + + // Set dynamic namespace. + _, err = event.Put(mb.NamespaceKey, m.namespace) + if err != nil { + return nil, err + } + + return event, nil + +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/mysql/_meta/config.reference.yml b/vendor/github.com/elastic/beats/metricbeat/module/mysql/_meta/config.reference.yml new file mode 100644 index 00000000..afff5ff7 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/mysql/_meta/config.reference.yml @@ -0,0 +1,17 @@ +- module: mysql + metricsets: ["status"] + period: 10s + + # Host DSN should be defined as "user:pass@tcp(127.0.0.1:3306)/" + # The username and password can either be set in the DSN or using the username + # and password config options. Those specified in the DSN take precedence. + hosts: ["root:secret@tcp(127.0.0.1:3306)/"] + + # Username of hosts. Empty by default. + #username: root + + # Password of hosts. Empty by default. + #password: secret + + # By setting raw to true, all raw fields from the status metricset will be added to the event. + #raw: false diff --git a/vendor/github.com/elastic/beats/metricbeat/module/mysql/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/mysql/_meta/config.yml index afff5ff7..8855531c 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/mysql/_meta/config.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/mysql/_meta/config.yml @@ -1,17 +1,2 @@ - module: mysql - metricsets: ["status"] - period: 10s - - # Host DSN should be defined as "user:pass@tcp(127.0.0.1:3306)/" - # The username and password can either be set in the DSN or using the username - # and password config options. Those specified in the DSN take precedence. - hosts: ["root:secret@tcp(127.0.0.1:3306)/"] - - # Username of hosts. Empty by default. - #username: root - - # Password of hosts. Empty by default. - #password: secret - - # By setting raw to true, all raw fields from the status metricset will be added to the event. - #raw: false + hosts: ["tcp(127.0.0.1:3306)/"] diff --git a/vendor/github.com/elastic/beats/metricbeat/module/mysql/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/mysql/_meta/docs.asciidoc index 22dcdd5f..95dc69a1 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/mysql/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/mysql/_meta/docs.asciidoc @@ -1,6 +1,8 @@ This module periodically fetches metrics from https://www.mysql.com/[MySQL] servers. +The default metricset is `status`. + [float] === Module-specific configuration notes diff --git a/vendor/github.com/elastic/beats/metricbeat/module/mysql/_meta/kibana/6/dashboard/Metricbeat-mysql-overview.json b/vendor/github.com/elastic/beats/metricbeat/module/mysql/_meta/kibana/6/dashboard/Metricbeat-mysql-overview.json index d7dd993d..8bba4106 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/mysql/_meta/kibana/6/dashboard/Metricbeat-mysql-overview.json +++ b/vendor/github.com/elastic/beats/metricbeat/module/mysql/_meta/kibana/6/dashboard/Metricbeat-mysql-overview.json @@ -120,7 +120,7 @@ "searchSourceJSON": "{\"filter\":[],\"query\":{\"language\":\"lucene\",\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"default_field\":\"*\",\"query\":\"*\"}}},\"highlightAll\":true,\"version\":true}" }, "optionsJSON": "{\"darkTheme\":false,\"useMargins\":false}", - "panelsJSON": "[{\"panelIndex\":\"10\",\"gridData\":{\"x\":0,\"y\":3,\"w\":6,\"h\":3,\"i\":\"10\"},\"id\":\"d7e6bee0-f1f3-11e7-85ab-594b1652e0d1\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1\"},{\"panelIndex\":\"11\",\"gridData\":{\"x\":0,\"y\":0,\"w\":12,\"h\":3,\"i\":\"11\"},\"id\":\"695a4f90-f1f4-11e7-85ab-594b1652e0d1\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1\"},{\"panelIndex\":\"13\",\"gridData\":{\"x\":6,\"y\":3,\"w\":6,\"h\":3,\"i\":\"13\"},\"id\":\"124dce60-f1f5-11e7-85ab-594b1652e0d1\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1\"},{\"panelIndex\":\"14\",\"gridData\":{\"x\":0,\"y\":6,\"w\":6,\"h\":3,\"i\":\"14\"},\"id\":\"aaa326b0-f1f5-11e7-85ab-594b1652e0d1\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1\"},{\"panelIndex\":\"15\",\"gridData\":{\"x\":6,\"y\":6,\"w\":6,\"h\":3,\"i\":\"15\"},\"id\":\"fb1f3f20-f1f5-11e7-85ab-594b1652e0d1\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1\"},{\"panelIndex\":\"16\",\"gridData\":{\"x\":6,\"y\":9,\"w\":6,\"h\":3,\"i\":\"16\"},\"id\":\"f5b35930-f1f6-11e7-85ab-594b1652e0d1\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1\"},{\"panelIndex\":\"17\",\"gridData\":{\"x\":0,\"y\":9,\"w\":6,\"h\":3,\"i\":\"17\"},\"version\":\"7.0.0-alpha1\",\"type\":\"visualization\",\"id\":\"7404feb0-f1f7-11e7-85ab-594b1652e0d1\"}]", + "panelsJSON": "[{\"panelIndex\":\"10\",\"gridData\":{\"x\":0,\"y\":3,\"w\":6,\"h\":3,\"i\":\"10\"},\"id\":\"d7e6bee0-f1f3-11e7-85ab-594b1652e0d1\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"panelIndex\":\"11\",\"gridData\":{\"x\":0,\"y\":0,\"w\":12,\"h\":3,\"i\":\"11\"},\"id\":\"695a4f90-f1f4-11e7-85ab-594b1652e0d1\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"panelIndex\":\"13\",\"gridData\":{\"x\":6,\"y\":3,\"w\":6,\"h\":3,\"i\":\"13\"},\"id\":\"124dce60-f1f5-11e7-85ab-594b1652e0d1\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"panelIndex\":\"14\",\"gridData\":{\"x\":0,\"y\":6,\"w\":6,\"h\":3,\"i\":\"14\"},\"id\":\"aaa326b0-f1f5-11e7-85ab-594b1652e0d1\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"panelIndex\":\"15\",\"gridData\":{\"x\":6,\"y\":6,\"w\":6,\"h\":3,\"i\":\"15\"},\"id\":\"fb1f3f20-f1f5-11e7-85ab-594b1652e0d1\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"panelIndex\":\"16\",\"gridData\":{\"x\":6,\"y\":9,\"w\":6,\"h\":3,\"i\":\"16\"},\"id\":\"f5b35930-f1f6-11e7-85ab-594b1652e0d1\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"panelIndex\":\"17\",\"gridData\":{\"x\":0,\"y\":9,\"w\":6,\"h\":3,\"i\":\"17\"},\"version\":\"6.2.4\",\"type\":\"visualization\",\"id\":\"7404feb0-f1f7-11e7-85ab-594b1652e0d1\"}]", "timeRestore": false, "title": "[Metricbeat MySQL] Overview", "version": 1 @@ -131,5 +131,5 @@ "version": 3 } ], - "version": "7.0.0-alpha1" -} \ No newline at end of file + "version": "6.2.4" +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/mysql/status/status.go b/vendor/github.com/elastic/beats/metricbeat/module/mysql/status/status.go index e34c8e5e..7e6c98b3 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/mysql/status/status.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/mysql/status/status.go @@ -22,9 +22,10 @@ var ( ) func init() { - if err := mb.Registry.AddMetricSet("mysql", "status", New, mysql.ParseDSN); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("mysql", "status", New, + mb.WithHostParser(mysql.ParseDSN), + mb.DefaultMetricSet(), + ) } // MetricSet for fetching MySQL server status. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/nginx/_meta/config.reference.yml b/vendor/github.com/elastic/beats/metricbeat/module/nginx/_meta/config.reference.yml index d394da30..e718ff27 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/nginx/_meta/config.reference.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/nginx/_meta/config.reference.yml @@ -1,10 +1,10 @@ -#- module: nginx - #metricsets: ["stubstatus"] - #enabled: true - #period: 10s +- module: nginx + metricsets: ["stubstatus"] + enabled: true + period: 10s # Nginx hosts - #hosts: ["http://127.0.0.1"] + hosts: ["http://127.0.0.1"] # Path to server status. Default server-status - #server_status_path: "server-status" + server_status_path: "server-status" diff --git a/vendor/github.com/elastic/beats/metricbeat/module/nginx/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/nginx/_meta/config.yml index e3737ed9..5f130609 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/nginx/_meta/config.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/nginx/_meta/config.yml @@ -1,9 +1,4 @@ - module: nginx - metricsets: ["stubstatus"] - period: 10s - - # Nginx hosts hosts: ["http://127.0.0.1"] - - # Path to server status. Default server-status - #server_status_path: "server-status" + #username: "user" + #password: "secret" diff --git a/vendor/github.com/elastic/beats/metricbeat/module/nginx/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/nginx/_meta/docs.asciidoc index a509c6df..ab9ca5ab 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/nginx/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/nginx/_meta/docs.asciidoc @@ -1,5 +1,7 @@ This module periodically fetches metrics from https://nginx.org/[Nginx] servers. +The default metricset is `stubstatus`. + [float] === Compatibility diff --git a/vendor/github.com/elastic/beats/metricbeat/module/nginx/_meta/kibana/6/dashboard/metricbeat-nginx-overview.json b/vendor/github.com/elastic/beats/metricbeat/module/nginx/_meta/kibana/6/dashboard/metricbeat-nginx-overview.json index 2bd4d184..4450040c 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/nginx/_meta/kibana/6/dashboard/metricbeat-nginx-overview.json +++ b/vendor/github.com/elastic/beats/metricbeat/module/nginx/_meta/kibana/6/dashboard/metricbeat-nginx-overview.json @@ -88,7 +88,7 @@ "searchSourceJSON": "{\"query\":{\"language\":\"lucene\",\"query\":\"\"},\"filter\":[],\"highlightAll\":true,\"version\":true}" }, "optionsJSON": "{\"darkTheme\":false,\"hidePanelTitles\":false,\"useMargins\":true}", - "panelsJSON": "[{\"panelIndex\":\"1\",\"gridData\":{\"x\":6,\"y\":0,\"w\":6,\"h\":3,\"i\":\"1\"},\"id\":\"555df8a0-f1a1-11e7-a9ef-93c69af7b129\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1\"},{\"panelIndex\":\"2\",\"gridData\":{\"x\":6,\"y\":3,\"w\":6,\"h\":3,\"i\":\"2\"},\"id\":\"a1d92240-f1a1-11e7-a9ef-93c69af7b129\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1\"},{\"panelIndex\":\"3\",\"gridData\":{\"x\":0,\"y\":3,\"w\":6,\"h\":3,\"i\":\"3\"},\"id\":\"d763a570-f1a1-11e7-a9ef-93c69af7b129\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1\"},{\"panelIndex\":\"4\",\"gridData\":{\"x\":0,\"y\":0,\"w\":6,\"h\":3,\"i\":\"4\"},\"id\":\"47a8e0f0-f1a4-11e7-a9ef-93c69af7b129\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1\"},{\"panelIndex\":\"5\",\"gridData\":{\"x\":0,\"y\":6,\"w\":12,\"h\":3,\"i\":\"5\"},\"version\":\"7.0.0-alpha1\",\"type\":\"visualization\",\"id\":\"dcbffe30-f1a4-11e7-a9ef-93c69af7b129\"}]", + "panelsJSON": "[{\"panelIndex\":\"1\",\"gridData\":{\"x\":6,\"y\":0,\"w\":6,\"h\":3,\"i\":\"1\"},\"id\":\"555df8a0-f1a1-11e7-a9ef-93c69af7b129\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"panelIndex\":\"2\",\"gridData\":{\"x\":6,\"y\":3,\"w\":6,\"h\":3,\"i\":\"2\"},\"id\":\"a1d92240-f1a1-11e7-a9ef-93c69af7b129\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"panelIndex\":\"3\",\"gridData\":{\"x\":0,\"y\":3,\"w\":6,\"h\":3,\"i\":\"3\"},\"id\":\"d763a570-f1a1-11e7-a9ef-93c69af7b129\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"panelIndex\":\"4\",\"gridData\":{\"x\":0,\"y\":0,\"w\":6,\"h\":3,\"i\":\"4\"},\"id\":\"47a8e0f0-f1a4-11e7-a9ef-93c69af7b129\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"panelIndex\":\"5\",\"gridData\":{\"x\":0,\"y\":6,\"w\":12,\"h\":3,\"i\":\"5\"},\"version\":\"6.2.4\",\"type\":\"visualization\",\"id\":\"dcbffe30-f1a4-11e7-a9ef-93c69af7b129\"}]", "timeRestore": false, "title": "[Metricbeat Nginx] Overview", "version": 1 @@ -99,5 +99,5 @@ "version": 1 } ], - "version": "7.0.0-alpha1" -} \ No newline at end of file + "version": "6.2.4" +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/nginx/stubstatus/stubstatus.go b/vendor/github.com/elastic/beats/metricbeat/module/nginx/stubstatus/stubstatus.go index 3bf30b4e..ea785528 100755 --- a/vendor/github.com/elastic/beats/metricbeat/module/nginx/stubstatus/stubstatus.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/nginx/stubstatus/stubstatus.go @@ -26,9 +26,10 @@ var ( ) func init() { - if err := mb.Registry.AddMetricSet("nginx", "stubstatus", New, hostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("nginx", "stubstatus", New, + mb.WithHostParser(hostParser), + mb.DefaultMetricSet(), + ) } // MetricSet for fetching Nginx stub status. @@ -40,9 +41,13 @@ type MetricSet struct { // New creates new instance of MetricSet func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + http, err := helper.NewHTTP(base) + if err != nil { + return nil, err + } return &MetricSet{ BaseMetricSet: base, - http: helper.NewHTTP(base), + http: http, }, nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/php_fpm/_meta/config.reference.yml b/vendor/github.com/elastic/beats/metricbeat/module/php_fpm/_meta/config.reference.yml new file mode 100644 index 00000000..05fe3ca0 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/php_fpm/_meta/config.reference.yml @@ -0,0 +1,6 @@ +- module: php_fpm + metricsets: ["pool"] + enabled: true + period: 10s + status_path: "/status" + hosts: ["localhost:8080"] diff --git a/vendor/github.com/elastic/beats/metricbeat/module/php_fpm/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/php_fpm/_meta/config.yml index be576451..53d78469 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/php_fpm/_meta/config.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/php_fpm/_meta/config.yml @@ -1,5 +1,4 @@ - module: php_fpm - metricsets: ["pool"] - period: 10s - status_path: "/status" hosts: ["localhost:8080"] + #username: "user" + #password: "secret" diff --git a/vendor/github.com/elastic/beats/metricbeat/module/php_fpm/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/php_fpm/_meta/docs.asciidoc index 41b8c142..f84acaa2 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/php_fpm/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/php_fpm/_meta/docs.asciidoc @@ -1,6 +1,8 @@ This module periodically fetches metrics from https://php-fpm.org[PHP-FPM] servers. +The default metricset is `pool`. + [float] === Module-specific configuration notes diff --git a/vendor/github.com/elastic/beats/metricbeat/module/php_fpm/pool/pool.go b/vendor/github.com/elastic/beats/metricbeat/module/php_fpm/pool/pool.go index c59f05c8..0f753beb 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/php_fpm/pool/pool.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/php_fpm/pool/pool.go @@ -13,9 +13,10 @@ import ( // init registers the MetricSet with the central registry. func init() { - if err := mb.Registry.AddMetricSet("php_fpm", "pool", New, HostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("php_fpm", "pool", New, + mb.WithHostParser(hostParser), + mb.DefaultMetricSet(), + ) } const ( @@ -23,8 +24,8 @@ const ( defaultPath = "/status" ) -// HostParser is used for parsing the configured php-fpm hosts. -var HostParser = parse.URLHostParserBuilder{ +// hostParser is used for parsing the configured php-fpm hosts. +var hostParser = parse.URLHostParserBuilder{ DefaultScheme: defaultScheme, DefaultPath: defaultPath, QueryParams: "json", @@ -41,9 +42,13 @@ type MetricSet struct { func New(base mb.BaseMetricSet) (mb.MetricSet, error) { cfgwarn.Beta("The php_fpm pool metricset is beta") + http, err := helper.NewHTTP(base) + if err != nil { + return nil, err + } return &MetricSet{ base, - helper.NewHTTP(base), + http, }, nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/postgresql/_meta/config.reference.yml b/vendor/github.com/elastic/beats/metricbeat/module/postgresql/_meta/config.reference.yml new file mode 100644 index 00000000..f27874ee --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/postgresql/_meta/config.reference.yml @@ -0,0 +1,25 @@ +- module: postgresql + enabled: true + metricsets: + # Stats about every PostgreSQL database + - database + + # Stats about the background writer process's activity + - bgwriter + + # Stats about every PostgreSQL process + - activity + + period: 10s + + # The host must be passed as PostgreSQL URL. Example: + # postgres://localhost:5432?sslmode=disable + # The available parameters are documented here: + # https://godoc.org/github.com/lib/pq#hdr-Connection_String_Parameters + hosts: ["postgres://localhost:5432"] + + # Username to use when connecting to PostgreSQL. Empty by default. + #username: user + + # Password to use when connecting to PostgreSQL. Empty by default. + #password: pass diff --git a/vendor/github.com/elastic/beats/metricbeat/module/postgresql/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/postgresql/_meta/config.yml index 383c66d9..8adc4408 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/postgresql/_meta/config.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/postgresql/_meta/config.yml @@ -1,24 +1,2 @@ - module: postgresql - metricsets: - # Stats about every PostgreSQL database - - database - - # Stats about the background writer process's activity - - bgwriter - - # Stats about every PostgreSQL process - - activity - - period: 10s - - # The host must be passed as PostgreSQL URL. Example: - # postgres://localhost:5432?sslmode=disable - # The available parameters are documented here: - # https://godoc.org/github.com/lib/pq#hdr-Connection_String_Parameters hosts: ["postgres://localhost:5432"] - - # Username to use when connecting to PostgreSQL. Empty by default. - #username: user - - # Password to use when connecting to PostgreSQL. Empty by default. - #password: pass diff --git a/vendor/github.com/elastic/beats/metricbeat/module/postgresql/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/postgresql/_meta/docs.asciidoc index 8c9ffd9c..053b3312 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/postgresql/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/postgresql/_meta/docs.asciidoc @@ -1,6 +1,8 @@ This module periodically fetches metrics from https://www.postgresql.org/[PostgreSQL] servers. +Default metricsets are `activity`, `bgwriter` and `database`. + [float] === Module-specific configuration notes diff --git a/vendor/github.com/elastic/beats/metricbeat/module/postgresql/activity/activity.go b/vendor/github.com/elastic/beats/metricbeat/module/postgresql/activity/activity.go index b8ecfd94..85ab57d4 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/postgresql/activity/activity.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/postgresql/activity/activity.go @@ -16,9 +16,10 @@ import ( // init registers the MetricSet with the central registry. // The New method will be called after the setup of the module and before starting to fetch data func init() { - if err := mb.Registry.AddMetricSet("postgresql", "activity", New, postgresql.ParseURL); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("postgresql", "activity", New, + mb.WithHostParser(postgresql.ParseURL), + mb.DefaultMetricSet(), + ) } // MetricSet type defines all fields of the Postgresql MetricSet diff --git a/vendor/github.com/elastic/beats/metricbeat/module/postgresql/bgwriter/bgwriter.go b/vendor/github.com/elastic/beats/metricbeat/module/postgresql/bgwriter/bgwriter.go index 7faa962e..f69305cb 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/postgresql/bgwriter/bgwriter.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/postgresql/bgwriter/bgwriter.go @@ -17,9 +17,10 @@ import ( // init registers the MetricSet with the central registry. // The New method will be called after the setup of the module and before starting to fetch data func init() { - if err := mb.Registry.AddMetricSet("postgresql", "bgwriter", New, postgresql.ParseURL); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("postgresql", "bgwriter", New, + mb.WithHostParser(postgresql.ParseURL), + mb.DefaultMetricSet(), + ) } // MetricSet type defines all fields of the MetricSet diff --git a/vendor/github.com/elastic/beats/metricbeat/module/postgresql/database/database.go b/vendor/github.com/elastic/beats/metricbeat/module/postgresql/database/database.go index 13b94e08..270425f9 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/postgresql/database/database.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/postgresql/database/database.go @@ -16,9 +16,10 @@ import ( // init registers the MetricSet with the central registry. // The New method will be called after the setup of the module and before starting to fetch data func init() { - if err := mb.Registry.AddMetricSet("postgresql", "database", New, postgresql.ParseURL); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("postgresql", "database", New, + mb.WithHostParser(postgresql.ParseURL), + mb.DefaultMetricSet(), + ) } // MetricSet type defines all fields of the MetricSet diff --git a/vendor/github.com/elastic/beats/metricbeat/module/prometheus/_meta/config.reference.yml b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/_meta/config.reference.yml new file mode 100644 index 00000000..59680f2a --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/_meta/config.reference.yml @@ -0,0 +1,20 @@ +- module: prometheus + metricsets: ["stats"] + enabled: true + period: 10s + hosts: ["localhost:9090"] + #metrics_path: /metrics + #namespace: example + +- module: prometheus + metricsets: ["collector"] + enabled: true + period: 10s + hosts: ["localhost:9090"] + #metrics_path: /metrics + #namespace: example + + # This can be used for service account based authorization: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + #ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt diff --git a/vendor/github.com/elastic/beats/metricbeat/module/prometheus/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/_meta/config.yml index 76bee349..1d5f9437 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/prometheus/_meta/config.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/_meta/config.yml @@ -1,6 +1,11 @@ - module: prometheus - metricsets: ["stats"] period: 10s hosts: ["localhost:9090"] - metrics_path: /metrics #namespace: example + #username: "user" + #password: "secret" + + # This can be used for service account based authorization: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + #ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt diff --git a/vendor/github.com/elastic/beats/metricbeat/module/prometheus/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/_meta/docs.asciidoc index 446f1951..f15195df 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/prometheus/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/_meta/docs.asciidoc @@ -1,2 +1,4 @@ This module periodically fetches metrics from https://prometheus.io/docs/[Prometheus]. + +The default metricset is `collector`. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/prometheus/collector/collector.go b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/collector/collector.go index 80165560..5cb3b026 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/prometheus/collector/collector.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/collector/collector.go @@ -24,9 +24,10 @@ var ( ) func init() { - if err := mb.Registry.AddMetricSet("prometheus", "collector", New, hostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("prometheus", "collector", New, + mb.WithHostParser(hostParser), + mb.DefaultMetricSet(), + ) } type MetricSet struct { @@ -46,9 +47,14 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return nil, err } + prometheus, err := helper.NewPrometheusClient(base) + if err != nil { + return nil, err + } + return &MetricSet{ BaseMetricSet: base, - prometheus: helper.NewPrometheusClient(base), + prometheus: prometheus, namespace: config.Namespace, }, nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/prometheus/stats/stats.go b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/stats/stats.go index e17c31a4..5044e970 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/prometheus/stats/stats.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/stats/stats.go @@ -23,9 +23,9 @@ var ( ) func init() { - if err := mb.Registry.AddMetricSet("prometheus", "stats", New, hostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("prometheus", "stats", New, + mb.WithHostParser(hostParser), + ) } type MetricSet struct { @@ -36,9 +36,13 @@ type MetricSet struct { func New(base mb.BaseMetricSet) (mb.MetricSet, error) { cfgwarn.Beta("The prometheus stats metricset is beta") + http, err := helper.NewHTTP(base) + if err != nil { + return nil, err + } return &MetricSet{ BaseMetricSet: base, - http: helper.NewHTTP(base), + http: http, }, nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/_meta/config.reference.yml b/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/_meta/config.reference.yml new file mode 100644 index 00000000..62bf0879 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/_meta/config.reference.yml @@ -0,0 +1,8 @@ +- module: rabbitmq + metricsets: ["node", "queue", "connection"] + enabled: true + period: 10s + hosts: ["localhost:15672"] + + #username: guest + #password: guest diff --git a/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/_meta/config.yml index c0343876..b5912718 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/_meta/config.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/_meta/config.yml @@ -1,7 +1,2 @@ - module: rabbitmq - metricsets: ["node", "queue"] - period: 10s hosts: ["localhost:15672"] - - username: guest - password: guest diff --git a/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/_meta/docs.asciidoc index 2dbfe35c..f6fac935 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/_meta/docs.asciidoc @@ -1,2 +1,3 @@ The RabbitMQ module uses http://www.rabbitmq.com/management.html[HTTP API] created by the management plugin to collect metrics. +The default metricsets are `connection`, `node` and `queue`. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/_meta/testdata/connection_sample_response.json b/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/_meta/testdata/connection_sample_response.json new file mode 100644 index 00000000..9f0bd338 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/_meta/testdata/connection_sample_response.json @@ -0,0 +1,116 @@ +[ + { + "reductions_details": { + "rate": 94.8 + }, + "reductions": 356914, + "recv_oct_details": { + "rate": 0 + }, + "recv_oct": 3764, + "send_oct_details": { + "rate": 0 + }, + "send_oct": 3840, + "connected_at": 1519397210964, + "client_properties": { + "product": "https://github.com/streadway/amqp", + "version": "β", + "capabilities": { + "consumer_cancel_notify": true, + "connection.blocked": true + } + }, + "channel_max": 65535, + "frame_max": 131072, + "timeout": 10, + "vhost": "/", + "user": "guest", + "protocol": "AMQP 0-9-1", + "ssl_hash": null, + "ssl_cipher": null, + "ssl_key_exchange": null, + "ssl_protocol": null, + "auth_mechanism": "PLAIN", + "peer_cert_validity": null, + "peer_cert_issuer": null, + "peer_cert_subject": null, + "ssl": false, + "peer_host": "::1", + "host": "::1", + "peer_port": 60938, + "port": 5672, + "name": "[::1]:60938 -> [::1]:5672", + "node": "nodename", + "type": "network", + "garbage_collection": { + "minor_gcs": 228, + "fullsweep_after": 65535, + "min_heap_size": 233, + "min_bin_vheap_size": 46422, + "max_heap_size": 0 + }, + "channels": 8, + "state": "running", + "send_pend": 0, + "send_cnt": 376, + "recv_cnt": 376 + }, + { + "reductions_details": { + "rate": 94.8 + }, + "reductions": 354441, + "recv_oct_details": { + "rate": 0 + }, + "recv_oct": 3057, + "send_oct_details": { + "rate": 0 + }, + "send_oct": 3344, + "connected_at": 1519397211051, + "client_properties": { + "product": "https://github.com/streadway/amqp", + "version": "β", + "capabilities": { + "connection.blocked": true, + "consumer_cancel_notify": true + } + }, + "channel_max": 65535, + "frame_max": 131072, + "timeout": 10, + "vhost": "/", + "user": "guest", + "protocol": "AMQP 0-9-1", + "ssl_hash": null, + "ssl_cipher": null, + "ssl_key_exchange": null, + "ssl_protocol": null, + "auth_mechanism": "PLAIN", + "peer_cert_validity": null, + "peer_cert_issuer": null, + "peer_cert_subject": null, + "ssl": false, + "peer_host": "::1", + "host": "::1", + "peer_port": 60940, + "port": 5672, + "name": "[::1]:60940 -> [::1]:5672", + "node": "nodename", + "type": "network", + "garbage_collection": { + "minor_gcs": 197, + "fullsweep_after": 65535, + "min_heap_size": 233, + "min_bin_vheap_size": 46422, + "max_heap_size": 0 + }, + "channels": 2, + "state": "running", + "send_pend": 0, + "send_cnt": 352, + "recv_cnt": 352 + } +] diff --git a/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/connection/_meta/data.json b/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/connection/_meta/data.json new file mode 100644 index 00000000..e6ceea2b --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/connection/_meta/data.json @@ -0,0 +1,40 @@ +{ + "@timestamp": "2017-10-12T08:05:34.853Z", + "beat": { + "hostname": "host.example.com", + "name": "host.example.com" + }, + "metricset": { + "host": "localhost:15672", + "module": "rabbitmq", + "name": "connection", + "rtt": 115 + }, + "rabbitmq": { + "connection": { + "channel_max": 0, + "channels": 1, + "frame_max": 4096, + "host": "127.0.0.1", + "name": "127.0.0.1:42256 -\u003e 127.0.0.1:5672", + "node": "rabbit@lake", + "octet_count": { + "received": 1025, + "sent": 1056 + }, + "packet_count": { + "pending": 0, + "received": 50, + "sent": 49 + }, + "peer": { + "host": "127.0.0.1", + "port": 42256 + }, + "port": 5672, + "type": "network", + "user": "guest", + "vhost": "/" + } + } +} \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/connection/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/connection/_meta/docs.asciidoc new file mode 100644 index 00000000..5a61046f --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/connection/_meta/docs.asciidoc @@ -0,0 +1 @@ +This is the `connection` metricset of the RabbitMQ module. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/connection/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/connection/_meta/fields.yml new file mode 100644 index 00000000..083cf9c4 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/connection/_meta/fields.yml @@ -0,0 +1,75 @@ +- name: connection + type: group + description: > + connection + release: beta + fields: + - name: name + type: keyword + description: > + The name of the connection with non-ASCII characters escaped as in C. + - name: vhost + type: keyword + description: > + Virtual host name with non-ASCII characters escaped as in C. + - name: user + type: keyword + description: > + User name. + - name: node + type: keyword + description: > + Node name. + - name: channels + type: long + description: > + The number of channels on the connection. + - name: channel_max + type: long + description: > + The maximum number of channels allowed on the connection. + - name: frame_max + type: long + description: > + Maximum permissible size of a frame (in bytes) to negotiate with clients. + format: bytes + - name: type + type: keyword + description: > + Type of the connection. + - name: host + type: keyword + description: > + Server hostname obtained via reverse DNS, or its IP address if reverse DNS failed or was disabled. + - name: peer.host + type: keyword + description: > + Peer hostname obtained via reverse DNS, or its IP address if reverse DNS failed or was not enabled. + - name: port + type: long + description: > + Server port. + - name: peer.port + type: long + description: > + Peer port. + - name: packet_count.sent + type: long + description: > + Number of packets sent on the connection. + - name: packet_count.received + type: long + description: > + Number of packets received on the connection. + - name: packet_count.pending + type: long + description: > + Number of packets pending on the connection. + - name: octet_count.sent + type: long + description: > + Number of octets sent on the connection. + - name: octet_count.received + type: long + description: > + Number of octets received on the connection. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/connection/connection.go b/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/connection/connection.go new file mode 100644 index 00000000..6050ea5d --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/connection/connection.go @@ -0,0 +1,62 @@ +package connection + +import ( + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/common/cfgwarn" + "github.com/elastic/beats/metricbeat/helper" + "github.com/elastic/beats/metricbeat/mb" + "github.com/elastic/beats/metricbeat/mb/parse" +) + +const ( + defaultScheme = "http" + defaultPath = "/api/connections" +) + +var ( + hostParser = parse.URLHostParserBuilder{ + DefaultScheme: defaultScheme, + DefaultPath: defaultPath, + }.Build() +) + +func init() { + mb.Registry.MustAddMetricSet("rabbitmq", "connection", New, + mb.WithHostParser(hostParser), + mb.DefaultMetricSet(), + ) +} + +// MetricSet for fetching RabbitMQ connections. +type MetricSet struct { + mb.BaseMetricSet + *helper.HTTP +} + +// New creates new instance of MetricSet +func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + cfgwarn.Beta("The rabbitmq connection metricset is beta") + + http, err := helper.NewHTTP(base) + if err != nil { + return nil, err + } + http.SetHeader("Accept", "application/json") + + return &MetricSet{ + base, + http, + }, nil +} + +// Fetch makes an HTTP request to fetch connections metrics from the connections endpoint. +func (m *MetricSet) Fetch() ([]common.MapStr, error) { + content, err := m.HTTP.FetchContent() + + if err != nil { + return nil, err + } + + events, _ := eventsMapping(content) + return events, nil +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/connection/connection_integration_test.go b/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/connection/connection_integration_test.go new file mode 100644 index 00000000..8d31ba26 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/connection/connection_integration_test.go @@ -0,0 +1,65 @@ +// +build integration + +package connection + +import ( + "fmt" + "os" + "testing" + + "github.com/elastic/beats/libbeat/tests/compose" + mbtest "github.com/elastic/beats/metricbeat/mb/testing" +) + +func TestData(t *testing.T) { + compose.EnsureUp(t, "rabbitmq") + + f := mbtest.NewEventsFetcher(t, getConfig()) + err := mbtest.WriteEvents(f, t) + if err != nil { + t.Fatal("write", err) + } +} + +func getConfig() map[string]interface{} { + return map[string]interface{}{ + "module": "rabbitmq", + "metricsets": []string{"connection"}, + "hosts": getTestRabbitMQHost(), + "username": getTestRabbitMQUsername(), + "password": getTestRabbitMQPassword(), + } +} + +const ( + rabbitmqDefaultHost = "localhost" + rabbitmqDefaultPort = "15672" + rabbitmqDefaultUsername = "guest" + rabbitmqDefaultPassword = "guest" +) + +func getTestRabbitMQHost() string { + return fmt.Sprintf("%v:%v", + getenv("RABBITMQ_HOST", rabbitmqDefaultHost), + getenv("RABBITMQ_PORT", rabbitmqDefaultPort), + ) +} + +func getTestRabbitMQUsername() string { + return getenv("RABBITMQ_USERNAME", rabbitmqDefaultUsername) +} + +func getTestRabbitMQPassword() string { + return getenv("RABBITMQ_PASSWORD", rabbitmqDefaultPassword) +} + +func getenv(name, defaultValue string) string { + return strDefault(os.Getenv(name), defaultValue) +} + +func strDefault(a, defaults string) string { + if len(a) == 0 { + return defaults + } + return a +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/connection/connection_test.go b/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/connection/connection_test.go new file mode 100644 index 00000000..b625496f --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/connection/connection_test.go @@ -0,0 +1,66 @@ +package connection + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "path/filepath" + "testing" + + "github.com/elastic/beats/libbeat/common" + mbtest "github.com/elastic/beats/metricbeat/mb/testing" + + "github.com/stretchr/testify/assert" +) + +func TestFetchEventContents(t *testing.T) { + absPath, err := filepath.Abs("../_meta/testdata/") + + response, err := ioutil.ReadFile(absPath + "/connection_sample_response.json") + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) + w.Header().Set("Content-Type", "application/json;") + w.Write([]byte(response)) + })) + defer server.Close() + + config := map[string]interface{}{ + "module": "rabbitmq", + "metricsets": []string{"connection"}, + "hosts": []string{server.URL}, + } + + f := mbtest.NewEventsFetcher(t, config) + events, err := f.Fetch() + event := events[0] + if !assert.NoError(t, err) { + t.FailNow() + } + + t.Logf("%s/%s event: %+v", f.Module().Name(), f.Name(), event.StringToPrint()) + + assert.EqualValues(t, "[::1]:60938 -> [::1]:5672", event["name"]) + assert.EqualValues(t, "/", event["vhost"]) + assert.EqualValues(t, "guest", event["user"]) + assert.EqualValues(t, "nodename", event["node"]) + assert.EqualValues(t, 8, event["channels"]) + assert.EqualValues(t, 65535, event["channel_max"]) + assert.EqualValues(t, 131072, event["frame_max"]) + assert.EqualValues(t, "network", event["type"]) + + packetCount := event["packet_count"].(common.MapStr) + assert.EqualValues(t, 376, packetCount["sent"]) + assert.EqualValues(t, 376, packetCount["received"]) + assert.EqualValues(t, 0, packetCount["pending"]) + + octetCount := event["octet_count"].(common.MapStr) + assert.EqualValues(t, 3840, octetCount["sent"]) + assert.EqualValues(t, 3764, octetCount["received"]) + + assert.EqualValues(t, "::1", event["host"]) + assert.EqualValues(t, 5672, event["port"]) + + peer := event["peer"].(common.MapStr) + assert.EqualValues(t, "::1", peer["host"]) + assert.EqualValues(t, 60938, peer["port"]) +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/connection/data.go b/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/connection/data.go new file mode 100644 index 00000000..e8faee22 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/connection/data.go @@ -0,0 +1,62 @@ +package connection + +import ( + "encoding/json" + + "github.com/elastic/beats/libbeat/common" + s "github.com/elastic/beats/libbeat/common/schema" + c "github.com/elastic/beats/libbeat/common/schema/mapstriface" + "github.com/elastic/beats/libbeat/logp" +) + +var ( + schema = s.Schema{ + "name": c.Str("name"), + "vhost": c.Str("vhost"), + "user": c.Str("user"), + "node": c.Str("node"), + "channels": c.Int("channels"), + "channel_max": c.Int("channel_max"), + "frame_max": c.Int("frame_max"), + "type": c.Str("type"), + "packet_count": s.Object{ + "sent": c.Int("send_cnt"), + "received": c.Int("recv_cnt"), + "pending": c.Int("send_pend"), + }, + "octet_count": s.Object{ + "sent": c.Int("send_oct"), + "received": c.Int("recv_oct"), + }, + "host": c.Str("host"), + "port": c.Int("port"), + "peer": s.Object{ + "host": c.Str("peer_host"), + "port": c.Int("peer_port"), + }, + } +) + +func eventsMapping(content []byte) ([]common.MapStr, error) { + var connections []map[string]interface{} + err := json.Unmarshal(content, &connections) + if err != nil { + logp.Err("Error: ", err) + } + + events := []common.MapStr{} + errors := s.NewErrors() + + for _, node := range connections { + event, errs := eventMapping(node) + events = append(events, event) + errors.AddErrors(errs) + + } + + return events, errors +} + +func eventMapping(connection map[string]interface{}) (common.MapStr, *s.Errors) { + return schema.Apply(connection) +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/node/node.go b/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/node/node.go index f10895ee..f24dc13f 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/node/node.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/node/node.go @@ -21,9 +21,10 @@ var ( ) func init() { - if err := mb.Registry.AddMetricSet("rabbitmq", "node", New, hostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("rabbitmq", "node", New, + mb.WithHostParser(hostParser), + mb.DefaultMetricSet(), + ) } type MetricSet struct { @@ -32,9 +33,12 @@ type MetricSet struct { } func New(base mb.BaseMetricSet) (mb.MetricSet, error) { - cfgwarn.Experimental("The rabbitmq node metricset is experimental") + cfgwarn.Beta("The rabbitmq node metricset is beta") - http := helper.NewHTTP(base) + http, err := helper.NewHTTP(base) + if err != nil { + return nil, err + } http.SetHeader("Accept", "application/json") return &MetricSet{ diff --git a/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/queue/data.go b/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/queue/data.go index 6c7a04b7..0fdb107d 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/queue/data.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/queue/data.go @@ -46,10 +46,10 @@ var ( }, "disk": s.Object{ "reads": s.Object{ - "count": c.Int("disk_reads"), + "count": c.Int("disk_reads", s.Optional), }, "writes": s.Object{ - "count": c.Int("disk_writes"), + "count": c.Int("disk_writes", s.Optional), }, }, } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/queue/queue.go b/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/queue/queue.go index 688361fe..37582943 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/queue/queue.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/queue/queue.go @@ -21,9 +21,10 @@ var ( ) func init() { - if err := mb.Registry.AddMetricSet("rabbitmq", "queue", New, hostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("rabbitmq", "queue", New, + mb.WithHostParser(hostParser), + mb.DefaultMetricSet(), + ) } type MetricSet struct { @@ -32,9 +33,12 @@ type MetricSet struct { } func New(base mb.BaseMetricSet) (mb.MetricSet, error) { - cfgwarn.Experimental("The rabbitmq queue metricset is experimental") + cfgwarn.Beta("The rabbitmq queue metricset is beta") - http := helper.NewHTTP(base) + http, err := helper.NewHTTP(base) + if err != nil { + return nil, err + } http.SetHeader("Accept", "application/json") return &MetricSet{ diff --git a/vendor/github.com/elastic/beats/metricbeat/module/redis/_meta/config.reference.yml b/vendor/github.com/elastic/beats/metricbeat/module/redis/_meta/config.reference.yml new file mode 100644 index 00000000..33b439ef --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/redis/_meta/config.reference.yml @@ -0,0 +1,30 @@ +- module: redis + metricsets: ["info", "keyspace"] + enabled: true + period: 10s + + # Redis hosts + hosts: ["127.0.0.1:6379"] + + # Timeout after which time a metricset should return an error + # Timeout is by default defined as period, as a fetch of a metricset + # should never take longer then period, as otherwise calls can pile up. + #timeout: 1s + + # Optional fields to be added to each event + #fields: + # datacenter: west + + # Network type to be used for redis connection. Default: tcp + #network: tcp + + # Max number of concurrent connections. Default: 10 + #maxconn: 10 + + # Filters can be used to reduce the number of fields sent. + #processors: + # - include_fields: + # fields: ["beat", "metricset", "redis.info.stats"] + + # Redis AUTH password. Empty by default. + #password: foobared diff --git a/vendor/github.com/elastic/beats/metricbeat/module/redis/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/redis/_meta/config.yml index 3f5ea513..e4ad9bb9 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/redis/_meta/config.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/redis/_meta/config.yml @@ -1,29 +1,2 @@ - module: redis - metricsets: ["info", "keyspace"] - period: 10s - - # Redis hosts hosts: ["127.0.0.1:6379"] - - # Timeout after which time a metricset should return an error - # Timeout is by default defined as period, as a fetch of a metricset - # should never take longer then period, as otherwise calls can pile up. - #timeout: 1s - - # Optional fields to be added to each event - #fields: - # datacenter: west - - # Network type to be used for redis connection. Default: tcp - #network: tcp - - # Max number of concurrent connections. Default: 10 - #maxconn: 10 - - # Filters can be used to reduce the number of fields sent. - #processors: - # - include_fields: - # fields: ["beat", "metricset", "redis.info.stats"] - - # Redis AUTH password. Empty by default. - #password: foobared diff --git a/vendor/github.com/elastic/beats/metricbeat/module/redis/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/redis/_meta/docs.asciidoc index 167290f8..a0989d2f 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/redis/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/redis/_meta/docs.asciidoc @@ -1,5 +1,7 @@ This module periodically fetches metrics from http://redis.io/[Redis] servers. +The defaut metricsets are `info` and `keyspace`. + [float] === Module-specific configuration notes diff --git a/vendor/github.com/elastic/beats/metricbeat/module/redis/info/info.go b/vendor/github.com/elastic/beats/metricbeat/module/redis/info/info.go index 225d9263..553a5673 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/redis/info/info.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/redis/info/info.go @@ -17,9 +17,10 @@ var ( ) func init() { - if err := mb.Registry.AddMetricSet("redis", "info", New, parse.PassThruHostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("redis", "info", New, + mb.WithHostParser(parse.PassThruHostParser), + mb.DefaultMetricSet(), + ) } // MetricSet for fetching Redis server information and statistics. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/redis/keyspace/keyspace.go b/vendor/github.com/elastic/beats/metricbeat/module/redis/keyspace/keyspace.go index 739846c9..257f7440 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/redis/keyspace/keyspace.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/redis/keyspace/keyspace.go @@ -17,9 +17,10 @@ var ( ) func init() { - if err := mb.Registry.AddMetricSet("redis", "keyspace", New, parse.PassThruHostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("redis", "keyspace", New, + mb.WithHostParser(parse.PassThruHostParser), + mb.DefaultMetricSet(), + ) } // MetricSet for fetching Redis server information and statistics. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/_meta/config.reference.yml b/vendor/github.com/elastic/beats/metricbeat/module/system/_meta/config.reference.yml index 17dedccb..6e799cd7 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/_meta/config.reference.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/_meta/config.reference.yml @@ -11,6 +11,7 @@ - uptime # System Uptime #- core # Per CPU core usage #- diskio # Disk IO + #- raid # Raid #- socket # Sockets and connection info (linux only) enabled: true period: 10s @@ -23,6 +24,9 @@ # A list of filesystem types to ignore. The filesystem metricset will not # collect data from filesystems matching any of the specified types, and # fsstats will not include data from these filesystems in its summary stats. + # If not set, types associated to virtual filesystems are automatically + # added when this information is available in the system (e.g. the list of + # `nodev` types in `/proc/filesystem`). #filesystem.ignore_types: [] # These options allow you to filter out all processes that are not @@ -30,7 +34,7 @@ # If both the `by_cpu` and `by_memory` options are used, the union of the two sets # is included. #process.include_top_n: - # + # Set to false to disable this feature and include all processes #enabled: true @@ -56,7 +60,13 @@ # to false. #process.include_cpu_ticks: false + # Raid mount point to monitor + #raid.mount_point: '/' + # Configure reverse DNS lookup on remote IP addresses in the socket metricset. #socket.reverse_lookup.enabled: false #socket.reverse_lookup.success_ttl: 60s #socket.reverse_lookup.failure_ttl: 60s + + # Diskio configurations + #diskio.include_devices: [] diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/system/_meta/config.yml index 409ec1b6..82a6338c 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/_meta/config.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/_meta/config.yml @@ -1,20 +1,14 @@ - module: system - period: 10s - metricsets: - - cpu - - load - - memory - - network - - process - - process_summary - #- core - #- diskio - #- socket - processes: ['.*'] process.include_top_n: by_cpu: 5 # include top 5 processes by CPU by_memory: 5 # include top 5 processes by memory +#- module: system +# metricsets: +# - core +# - diskio +# - socket + - module: system period: 1m metricsets: @@ -28,3 +22,9 @@ period: 15m metricsets: - uptime + +#- module: system +# period: 5m +# metricsets: +# - raid +# raid.mount_point: '/' diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/system/_meta/docs.asciidoc index 65ca79ed..c9fa7301 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/_meta/docs.asciidoc @@ -1,6 +1,9 @@ The System module allows you to monitor your servers. Because the System module always applies to the local server, the `hosts` config option is not needed. +The default metricsets are `cpu`, `load`, `memory`, `network`, `process` and +`process_summary`. + [float] === Dashboard diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/core/core.go b/vendor/github.com/elastic/beats/metricbeat/module/system/core/core.go index 8612f26c..99547f36 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/core/core.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/core/core.go @@ -14,9 +14,9 @@ import ( ) func init() { - if err := mb.Registry.AddMetricSet("system", "core", New, parse.EmptyHostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("system", "core", New, + mb.WithHostParser(parse.EmptyHostParser), + ) } // MetricSet for fetching system core metrics. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/cpu/cpu.go b/vendor/github.com/elastic/beats/metricbeat/module/system/cpu/cpu.go index 22f514c6..504b1090 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/cpu/cpu.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/cpu/cpu.go @@ -14,9 +14,10 @@ import ( ) func init() { - if err := mb.Registry.AddMetricSet("system", "cpu", New, parse.EmptyHostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("system", "cpu", New, + mb.WithHostParser(parse.EmptyHostParser), + mb.DefaultMetricSet(), + ) } // MetricSet for fetching system CPU metrics. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/diskio/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/system/diskio/_meta/docs.asciidoc index fe8da1ce..cce8b162 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/diskio/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/diskio/_meta/docs.asciidoc @@ -11,4 +11,17 @@ This metricset is available on: [float] === Configuration -There are no configuration options for this metricset. +*`diskio.include_devices`*:: When the `diskio` metricset is enabled, you can use the +`diskio.include_devices` option to define a list of device names to pre-filter the +devices that are reported. Filters only exact matches. +If not set or given `[]` empty array, all disk devices are returned ++ +The following example config returns metrics for devices matching include_devices: ++ +[source,yaml] +---- +metricbeat.modules: +- module: system + metricsets: ["diskio"] + diskio.include_devices: ["sda", "sda1"] +---- diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/diskio/_meta/testdata/proc/diskstats b/vendor/github.com/elastic/beats/metricbeat/module/system/diskio/_meta/testdata/proc/diskstats new file mode 100644 index 00000000..8423e5c2 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/diskio/_meta/testdata/proc/diskstats @@ -0,0 +1,10 @@ + 8 0 sda 592 0 4730 4924 0 0 0 0 0 1496 4924 + 8 1 sda1 85 0 680 204 0 0 0 0 0 204 204 + 8 2 sda2 81 0 648 1056 0 0 0 0 0 1056 1056 + 8 3 sda3 81 0 648 1044 0 0 0 0 0 1044 1044 + 8 4 sda4 1 0 2 0 0 0 0 0 0 0 0 + 8 5 sda5 80 0 640 1164 0 0 0 0 0 1164 1164 + 8 6 sda6 82 0 656 1296 0 0 0 0 0 1296 1296 + 8 16 sdb 7681508 3044909 164279706 2902996 9494419 12814534 568199104 61998668 0 3842396 64959416 + 8 17 sdb1 3146154 2988374 49078184 833872 861040 7368290 67190704 11224168 0 601740 12075776 + 8 18 sdb2 4535191 56535 115200218 2069120 7703033 5446244 501008400 48646568 0 2440248 50884632 diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/diskio/diskio.go b/vendor/github.com/elastic/beats/metricbeat/module/system/diskio/diskio.go index a0a8444d..7e536f34 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/diskio/diskio.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/diskio/diskio.go @@ -12,29 +12,38 @@ import ( ) func init() { - if err := mb.Registry.AddMetricSet("system", "diskio", New, parse.EmptyHostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("system", "diskio", New, + mb.WithHostParser(parse.EmptyHostParser), + ) } // MetricSet for fetching system disk IO metrics. type MetricSet struct { mb.BaseMetricSet - statistics *DiskIOStat + statistics *DiskIOStat + includeDevices []string } // New is a mb.MetricSetFactory that returns a new MetricSet. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { - ms := &MetricSet{ - BaseMetricSet: base, - statistics: NewDiskIOStat(), + config := struct { + IncludeDevices []string `config:"diskio.include_devices"` + }{IncludeDevices: []string{}} + + if err := base.Module().UnpackConfig(&config); err != nil { + return nil, err } - return ms, nil + + return &MetricSet{ + BaseMetricSet: base, + statistics: NewDiskIOStat(), + includeDevices: config.IncludeDevices, + }, nil } // Fetch fetches disk IO metrics from the OS. func (m *MetricSet) Fetch() ([]common.MapStr, error) { - stats, err := disk.IOCounters() + stats, err := disk.IOCounters(m.includeDevices...) if err != nil { return nil, errors.Wrap(err, "disk io counters") } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/diskio/diskstat_linux_test.go b/vendor/github.com/elastic/beats/metricbeat/module/system/diskio/diskstat_linux_test.go index ff806e5c..342597dc 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/diskio/diskstat_linux_test.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/diskio/diskstat_linux_test.go @@ -6,9 +6,61 @@ import ( "testing" "github.com/stretchr/testify/assert" + + mbtest "github.com/elastic/beats/metricbeat/mb/testing" + "github.com/elastic/beats/metricbeat/module/system" ) func Test_Get_CLK_TCK(t *testing.T) { //usually the tick is 100 assert.Equal(t, uint32(100), Get_CLK_TCK()) } + +func TestDataNameFilter(t *testing.T) { + oldFS := system.HostFS + newFS := "_meta/testdata" + system.HostFS = &newFS + defer func() { + system.HostFS = oldFS + }() + + conf := map[string]interface{}{ + "module": "system", + "metricsets": []string{"diskio"}, + "diskio.include_devices": []string{"sda", "sda1", "sda2"}, + } + + f := mbtest.NewEventsFetcher(t, conf) + + if err := mbtest.WriteEvents(f, t); err != nil { + t.Fatal("write", err) + } + + data, err := f.Fetch() + assert.NoError(t, err) + assert.Equal(t, 3, len(data)) +} + +func TestDataEmptyFilter(t *testing.T) { + oldFS := system.HostFS + newFS := "_meta/testdata" + system.HostFS = &newFS + defer func() { + system.HostFS = oldFS + }() + + conf := map[string]interface{}{ + "module": "system", + "metricsets": []string{"diskio"}, + } + + f := mbtest.NewEventsFetcher(t, conf) + + if err := mbtest.WriteEvents(f, t); err != nil { + t.Fatal("write", err) + } + + data, err := f.Fetch() + assert.NoError(t, err) + assert.Equal(t, 10, len(data)) +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/filesystem/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/system/filesystem/_meta/docs.asciidoc index 997b5eb1..dd10e665 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/filesystem/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/filesystem/_meta/docs.asciidoc @@ -14,7 +14,9 @@ This metricset is available on: *`filesystem.ignore_types`* - A list of filesystem types to ignore. Metrics will not be collected from filesystems matching these types. This setting also -affects the `fsstats` metricset. +affects the `fsstats` metricset. If this option is not set, metricbeat ignores +all types for virtual devices in systems where this information is available (e.g. +all types marked as `nodev` in `/proc/filesystems` in Linux systems). [float] === Filtering diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/filesystem/filesystem.go b/vendor/github.com/elastic/beats/metricbeat/module/system/filesystem/filesystem.go index 7bf133be..591f8ad8 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/filesystem/filesystem.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/filesystem/filesystem.go @@ -3,6 +3,8 @@ package filesystem import ( + "strings" + "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/metricbeat/mb" @@ -14,9 +16,9 @@ import ( var debugf = logp.MakeDebug("system.filesystem") func init() { - if err := mb.Registry.AddMetricSet("system", "filesystem", New, parse.EmptyHostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("system", "filesystem", New, + mb.WithHostParser(parse.EmptyHostParser), + ) } // MetricSet for fetching filesystem metrics. @@ -32,6 +34,13 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return nil, err } + if config.IgnoreTypes == nil { + config.IgnoreTypes = DefaultIgnoredTypes() + } + if len(config.IgnoreTypes) > 0 { + logp.Info("Ignoring filesystem types: %s", strings.Join(config.IgnoreTypes, ", ")) + } + return &MetricSet{ BaseMetricSet: base, config: config, diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/filesystem/helper.go b/vendor/github.com/elastic/beats/metricbeat/module/system/filesystem/helper.go index 83467a19..29a26b73 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/filesystem/helper.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/filesystem/helper.go @@ -3,12 +3,17 @@ package filesystem import ( + "bufio" + "os" + "path" "path/filepath" + "strings" "time" "runtime" "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/metricbeat/module/system" sigar "github.com/elastic/gosigar" ) @@ -31,19 +36,57 @@ func GetFileSystemList() ([]sigar.FileSystem, error) { return nil, err } - // Ignore relative mount points, which are present for example - // in /proc/mounts on Linux with network namespaces. - filtered := fss.List[:0] - for _, fs := range fss.List { - if filepath.IsAbs(fs.DirName) { + if runtime.GOOS == "windows" { + // No filtering on Windows + return fss.List, nil + } + + return filterFileSystemList(fss.List), nil +} + +// filterFileSystemList filters mountpoints to avoid virtual filesystems +// and duplications +func filterFileSystemList(fsList []sigar.FileSystem) []sigar.FileSystem { + var filtered []sigar.FileSystem + devices := make(map[string]sigar.FileSystem) + for _, fs := range fsList { + // Ignore relative mount points, which are present for example + // in /proc/mounts on Linux with network namespaces. + if !filepath.IsAbs(fs.DirName) { + debugf("Filtering filesystem with relative mountpoint %+v", fs) + continue + } + + // Don't do further checks in special devices + if !filepath.IsAbs(fs.DevName) { filtered = append(filtered, fs) continue } - debugf("Filtering filesystem with relative mountpoint %+v", fs) + + // If the device name is a directory, this is a bind mount or nullfs, + // don't count it as it'd be counting again its parent filesystem. + devFileInfo, _ := os.Stat(fs.DevName) + if devFileInfo != nil && devFileInfo.IsDir() { + continue + } + + // If a block device is mounted multiple times (e.g. with some bind mounts), + // store it only once, and use the shorter mount point path. + if seen, found := devices[fs.DevName]; found { + if len(fs.DirName) < len(seen.DirName) { + devices[fs.DevName] = fs + } + continue + } + + devices[fs.DevName] = fs + } + + for _, fs := range devices { + filtered = append(filtered, fs) } - fss.List = filtered - return fss.List, nil + return filtered } func GetFileSystemStat(fs sigar.FileSystem) (*FileSystemStat, error) { @@ -74,7 +117,7 @@ func AddFileSystemUsedPercentage(f *FileSystemStat) { return } - perc := float64(f.Used) / float64(f.Total) + perc := float64(f.Used) / float64(f.Used+f.Avail) f.UsedPercent = common.Round(perc, common.DefaultDecimalPlacesCount) } @@ -126,3 +169,21 @@ func BuildTypeFilter(ignoreType ...string) Predicate { return true } } + +// DefaultIgnoredTypes tries to guess a sane list of filesystem types that +// could be ignored in the running system +func DefaultIgnoredTypes() (types []string) { + // If /proc/filesystems exist, default ignored types are all marked + // as nodev + fsListFile := path.Join(*system.HostFS, "/proc/filesystems") + if f, err := os.Open(fsListFile); err == nil { + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := strings.Fields(scanner.Text()) + if len(line) == 2 && line[0] == "nodev" { + types = append(types, line[1]) + } + } + } + return +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/filesystem/helper_test.go b/vendor/github.com/elastic/beats/metricbeat/module/system/filesystem/helper_test.go index 6cc8805c..9fa62115 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/filesystem/helper_test.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/filesystem/helper_test.go @@ -4,6 +4,7 @@ package filesystem import ( + "io/ioutil" "os" "runtime" "testing" @@ -47,6 +48,98 @@ func TestFileSystemList(t *testing.T) { } } +func TestFileSystemListFiltering(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("These cases don't need to work on Windows") + } + + fakeDevDir, err := ioutil.TempDir(os.TempDir(), "dir") + assert.Empty(t, err) + defer os.RemoveAll(fakeDevDir) + + cases := []struct { + description string + fss, expected []sigar.FileSystem + }{ + { + fss: []sigar.FileSystem{ + {DirName: "/", DevName: "/dev/sda1"}, + {DirName: "/", DevName: "/dev/sda1"}, + }, + expected: []sigar.FileSystem{ + {DirName: "/", DevName: "/dev/sda1"}, + }, + }, + { + description: "Don't repeat devices, shortest of dir names should be used", + fss: []sigar.FileSystem{ + {DirName: "/", DevName: "/dev/sda1"}, + {DirName: "/bind", DevName: "/dev/sda1"}, + }, + expected: []sigar.FileSystem{ + {DirName: "/", DevName: "/dev/sda1"}, + }, + }, + { + description: "Don't repeat devices, shortest of dir names should be used", + fss: []sigar.FileSystem{ + {DirName: "/bind", DevName: "/dev/sda1"}, + {DirName: "/", DevName: "/dev/sda1"}, + }, + expected: []sigar.FileSystem{ + {DirName: "/", DevName: "/dev/sda1"}, + }, + }, + { + description: "Keep tmpfs", + fss: []sigar.FileSystem{ + {DirName: "/run", DevName: "tmpfs"}, + {DirName: "/tmp", DevName: "tmpfs"}, + }, + expected: []sigar.FileSystem{ + {DirName: "/run", DevName: "tmpfs"}, + {DirName: "/tmp", DevName: "tmpfs"}, + }, + }, + { + description: "Don't repeat devices, shortest of dir names should be used, keep tmpfs", + fss: []sigar.FileSystem{ + {DirName: "/", DevName: "/dev/sda1"}, + {DirName: "/bind", DevName: "/dev/sda1"}, + {DirName: "/run", DevName: "tmpfs"}, + }, + expected: []sigar.FileSystem{ + {DirName: "/", DevName: "/dev/sda1"}, + {DirName: "/run", DevName: "tmpfs"}, + }, + }, + { + description: "Don't keep the fs if the device is a directory (it'd be a bind mount)", + fss: []sigar.FileSystem{ + {DirName: "/", DevName: "/dev/sda1"}, + {DirName: "/bind", DevName: fakeDevDir}, + }, + expected: []sigar.FileSystem{ + {DirName: "/", DevName: "/dev/sda1"}, + }, + }, + { + description: "Don't filter out NFS", + fss: []sigar.FileSystem{ + {DirName: "/srv/data", DevName: "192.168.42.42:/exports/nfs1"}, + }, + expected: []sigar.FileSystem{ + {DirName: "/srv/data", DevName: "192.168.42.42:/exports/nfs1"}, + }, + }, + } + + for _, c := range cases { + filtered := filterFileSystemList(c.fss) + assert.ElementsMatch(t, c.expected, filtered, c.description) + } +} + func TestFilter(t *testing.T) { in := []sigar.FileSystem{ {SysTypeName: "nfs"}, diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/fsstat/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/system/fsstat/_meta/docs.asciidoc index 581ed92d..8db580e1 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/fsstat/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/fsstat/_meta/docs.asciidoc @@ -13,4 +13,6 @@ This metricset is available on: *`filesystem.ignore_types`* - A list of filesystem types to ignore. Metrics will not be collected from filesystems matching these types. This setting also -affects the `filesystem` metricset. +affects the `filesystem` metricset. If this option is not set, metricbeat ignores +all types for virtual devices in systems where this information is available (e.g. +all types marked as `nodev` in `/proc/filesystems` in Linux systems). diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/fsstat/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/system/fsstat/_meta/fields.yml index 72e687c7..d9d2b0e7 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/fsstat/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/fsstat/_meta/fields.yml @@ -2,7 +2,7 @@ type: group description: > `system.fsstat` contains filesystem metrics aggregated from all mounted - filesystems, similar with what `df -a` prints out. + filesystems. release: ga fields: - name: count diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/fsstat/fsstat.go b/vendor/github.com/elastic/beats/metricbeat/module/system/fsstat/fsstat.go index aec4069f..f8cbadbf 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/fsstat/fsstat.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/fsstat/fsstat.go @@ -3,6 +3,8 @@ package fsstat import ( + "strings" + "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/metricbeat/mb" @@ -15,9 +17,9 @@ import ( var debugf = logp.MakeDebug("system-fsstat") func init() { - if err := mb.Registry.AddMetricSet("system", "fsstat", New, parse.EmptyHostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("system", "fsstat", New, + mb.WithHostParser(parse.EmptyHostParser), + ) } // MetricSet for fetching a summary of filesystem stats. @@ -33,6 +35,13 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return nil, err } + if config.IgnoreTypes == nil { + config.IgnoreTypes = filesystem.DefaultIgnoredTypes() + } + if len(config.IgnoreTypes) > 0 { + logp.Info("Ignoring filesystem types: %s", strings.Join(config.IgnoreTypes, ", ")) + } + return &MetricSet{ BaseMetricSet: base, config: config, @@ -53,7 +62,6 @@ func (m *MetricSet) Fetch() (common.MapStr, error) { // These values are optional and could also be calculated by Kibana var totalFiles, totalSize, totalSizeFree, totalSizeUsed uint64 - dict := map[string]bool{} for _, fs := range fss { stat, err := filesystem.GetFileSystemStat(fs) @@ -63,18 +71,10 @@ func (m *MetricSet) Fetch() (common.MapStr, error) { } logp.Debug("fsstat", "filesystem: %s total=%d, used=%d, free=%d", stat.Mount, stat.Total, stat.Used, stat.Free) - if _, ok := dict[stat.Mount]; ok { - // ignore filesystem with the same mounting point - continue - } - totalFiles += stat.Files totalSize += stat.Total totalSizeFree += stat.Free totalSizeUsed += stat.Used - - dict[stat.Mount] = true - } return common.MapStr{ @@ -83,7 +83,7 @@ func (m *MetricSet) Fetch() (common.MapStr, error) { "used": totalSizeUsed, "total": totalSize, }, - "count": len(dict), + "count": len(fss), "total_files": totalFiles, }, nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/load/load.go b/vendor/github.com/elastic/beats/metricbeat/module/system/load/load.go index ae066112..e75cc6dc 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/load/load.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/load/load.go @@ -12,9 +12,10 @@ import ( ) func init() { - if err := mb.Registry.AddMetricSet("system", "load", New, parse.EmptyHostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("system", "load", New, + mb.WithHostParser(parse.EmptyHostParser), + mb.DefaultMetricSet(), + ) } // MetricSet for fetching system CPU load metrics. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/memory/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/system/memory/_meta/fields.yml index bca698c9..9544dfee 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/memory/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/memory/_meta/fields.yml @@ -84,3 +84,50 @@ format: percent description: > The percentage of used swap memory. + + - name: hugepages + type: group + prefix: "[float]" + description: This group contains statistics related to huge pages usage on the system. + fields: + - name: total + type: long + format: number + description: > + Number of huge pages in the pool. + + - name: used.bytes + type: long + format: bytes + description: > + Memory used in allocated huge pages. + + - name: used.pct + type: long + format: percent + description: > + Percentage of huge pages used. + + - name: free + type: long + format: number + description: > + Number of available huge pages in the pool. + + - name: reserved + type: long + format: number + description: > + Number of reserved but not allocated huge pages in the pool. + + - name: surplus + type: long + format: number + description: > + Number of overcommited huge pages. + + - name: default_size + type: long + format: bytes + description: > + Default size for huge pages. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/memory/memory.go b/vendor/github.com/elastic/beats/metricbeat/module/system/memory/memory.go index 92763673..863f8252 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/memory/memory.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/memory/memory.go @@ -12,9 +12,10 @@ import ( ) func init() { - if err := mb.Registry.AddMetricSet("system", "memory", New, parse.EmptyHostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("system", "memory", New, + mb.WithHostParser(parse.EmptyHostParser), + mb.DefaultMetricSet(), + ) } // MetricSet for fetching system memory metrics. @@ -65,7 +66,26 @@ func (m *MetricSet) Fetch() (event common.MapStr, err error) { }, "free": swapStat.Free, } - memory["swap"] = swap + + hugePagesStat, err := mem.GetHugeTLBPages() + if err != nil { + return nil, errors.Wrap(err, "hugepages") + } + if hugePagesStat != nil { + mem.AddHugeTLBPagesPercentage(hugePagesStat) + memory["hugepages"] = common.MapStr{ + "total": hugePagesStat.Total, + "used": common.MapStr{ + "bytes": hugePagesStat.TotalAllocatedSize, + "pct": hugePagesStat.UsedPercent, + }, + "free": hugePagesStat.Free, + "reserved": hugePagesStat.Reserved, + "surplus": hugePagesStat.Surplus, + "default_size": hugePagesStat.DefaultSize, + } + } + return memory, nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/network/network.go b/vendor/github.com/elastic/beats/metricbeat/module/system/network/network.go index 06db4f67..63c4d59b 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/network/network.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/network/network.go @@ -17,9 +17,10 @@ import ( var debugf = logp.MakeDebug("system-network") func init() { - if err := mb.Registry.AddMetricSet("system", "network", New, parse.EmptyHostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("system", "network", New, + mb.WithHostParser(parse.EmptyHostParser), + mb.DefaultMetricSet(), + ) } // MetricSet for fetching system network IO metrics. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/process/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/system/process/_meta/fields.yml index bbefe026..6900257a 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/process/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/process/_meta/fields.yml @@ -52,7 +52,7 @@ prefix: "[float]" description: CPU-specific statistics per process. fields: - - name: user + - name: user.ticks type: long description: > The amount of CPU time the process spent in user space. @@ -73,7 +73,7 @@ The percentage of CPU time spent by the process since the last event. This value is normalized by the number of CPU cores and it ranges from 0 to 100%. - - name: system + - name: system.ticks type: long description: > The amount of CPU time the process spent in kernel space. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/process/process.go b/vendor/github.com/elastic/beats/metricbeat/module/system/process/process.go index dabc9be8..56973691 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/process/process.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/process/process.go @@ -20,9 +20,10 @@ import ( var debugf = logp.MakeDebug("system.process") func init() { - if err := mb.Registry.AddMetricSet("system", "process", New, parse.EmptyHostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("system", "process", New, + mb.WithHostParser(parse.EmptyHostParser), + mb.DefaultMetricSet(), + ) } // MetricSet that fetches process metrics. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/process_summary/process_summary.go b/vendor/github.com/elastic/beats/metricbeat/module/system/process_summary/process_summary.go index a22f837c..05c409e0 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/process_summary/process_summary.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/process_summary/process_summary.go @@ -16,9 +16,10 @@ import ( // init registers the MetricSet with the central registry. // The New method will be called after the setup of the module and before starting to fetch data func init() { - if err := mb.Registry.AddMetricSet("system", "process_summary", New, parse.EmptyHostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("system", "process_summary", New, + mb.WithHostParser(parse.EmptyHostParser), + mb.DefaultMetricSet(), + ) } // MetricSet type defines all fields of the MetricSet @@ -71,6 +72,8 @@ func (m *MetricSet) Fetch() (common.MapStr, error) { summary.running++ case 'D': summary.idle++ + case 'I': + summary.idle++ case 'T': summary.stopped++ case 'Z': diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/raid/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/system/raid/_meta/fields.yml index 405c04c0..3a06b220 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/raid/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/raid/_meta/fields.yml @@ -2,7 +2,7 @@ type: group description: > raid - release: experimental + release: beta fields: - name: name type: keyword diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/raid/_meta/testdata/proc/mdstat b/vendor/github.com/elastic/beats/metricbeat/module/system/raid/_meta/testdata/proc/mdstat index 162aa1b9..459c37ee 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/raid/_meta/testdata/proc/mdstat +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/raid/_meta/testdata/proc/mdstat @@ -23,4 +23,8 @@ md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1] 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU] bitmap: 0/30 pages [0KB], 65536KB chunk +md1 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1] + 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU] + bitmap: 0/30 pages [0KB], 65536KB chunk + unused devices: diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/raid/raid.go b/vendor/github.com/elastic/beats/metricbeat/module/system/raid/raid.go index 23eae508..0167c30b 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/raid/raid.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/raid/raid.go @@ -3,20 +3,20 @@ package raid import ( "path/filepath" + "github.com/pkg/errors" + "github.com/prometheus/procfs" + "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/common/cfgwarn" "github.com/elastic/beats/metricbeat/mb" "github.com/elastic/beats/metricbeat/mb/parse" "github.com/elastic/beats/metricbeat/module/system" - "github.com/elastic/procfs" - - "github.com/pkg/errors" ) func init() { - if err := mb.Registry.AddMetricSet("system", "raid", New, parse.EmptyHostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("system", "raid", New, + mb.WithHostParser(parse.EmptyHostParser), + ) } // MetricSet contains proc fs data. @@ -27,7 +27,7 @@ type MetricSet struct { // New creates a new instance of the raid metricset. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { - cfgwarn.Experimental("The system raid metricset is experimental") + cfgwarn.Beta("The system raid metricset is beta") systemModule, ok := base.Module().(*system.Module) if !ok { @@ -53,12 +53,10 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return nil, err } - m := &MetricSet{ + return &MetricSet{ BaseMetricSet: base, fs: fs, - } - - return m, nil + }, nil } // Fetch fetches one event for each device diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/raid/raid_test.go b/vendor/github.com/elastic/beats/metricbeat/module/system/raid/raid_test.go index bb5fb150..9c2e2a58 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/raid/raid_test.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/raid/raid_test.go @@ -20,7 +20,7 @@ func TestFetch(t *testing.T) { f := mbtest.NewEventsFetcher(t, getConfig()) data, err := f.Fetch() assert.NoError(t, err) - assert.Equal(t, 7, len(data)) + assert.Equal(t, 8, len(data)) } func getConfig() map[string]interface{} { diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/socket/ptable.go b/vendor/github.com/elastic/beats/metricbeat/module/system/socket/ptable.go index 2142d73c..5be3ea2a 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/socket/ptable.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/socket/ptable.go @@ -6,8 +6,7 @@ import ( "strings" "github.com/joeshaw/multierror" - - "github.com/elastic/procfs" + "github.com/prometheus/procfs" ) // process tools diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/socket/socket.go b/vendor/github.com/elastic/beats/metricbeat/module/system/socket/socket.go index 80fd480b..01c76b34 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/socket/socket.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/socket/socket.go @@ -27,9 +27,9 @@ var ( ) func init() { - if err := mb.Registry.AddMetricSet("system", "socket", New, parse.EmptyHostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("system", "socket", New, + mb.WithHostParser(parse.EmptyHostParser), + ) } type MetricSet struct { diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/uptime/metricset.go b/vendor/github.com/elastic/beats/metricbeat/module/system/uptime/metricset.go index 67133fb4..6f07aeb6 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/uptime/metricset.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/uptime/metricset.go @@ -12,9 +12,10 @@ import ( ) func init() { - if err := mb.Registry.AddMetricSet("system", "uptime", New, parse.EmptyHostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("system", "uptime", New, + mb.WithHostParser(parse.EmptyHostParser), + mb.DefaultMetricSet(), + ) } // MetricSet for fetching an OS uptime metric. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/uwsgi/_meta/config.reference.yml b/vendor/github.com/elastic/beats/metricbeat/module/uwsgi/_meta/config.reference.yml new file mode 100644 index 00000000..180ae4ab --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/uwsgi/_meta/config.reference.yml @@ -0,0 +1,5 @@ +- module: uwsgi + metricsets: ["status"] + enable: true + period: 10s + hosts: ["tcp://127.0.0.1:9191"] diff --git a/vendor/github.com/elastic/beats/metricbeat/module/uwsgi/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/uwsgi/_meta/config.yml index 46dbc35d..e16ad9be 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/uwsgi/_meta/config.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/uwsgi/_meta/config.yml @@ -1,4 +1,2 @@ - module: uwsgi - metricsets: ["status"] - period: 10s hosts: ["tcp://127.0.0.1:9191"] diff --git a/vendor/github.com/elastic/beats/metricbeat/module/uwsgi/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/uwsgi/_meta/docs.asciidoc index f4b66585..af530322 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/uwsgi/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/uwsgi/_meta/docs.asciidoc @@ -1,19 +1,20 @@ == uwsgi module -This is the uwsgi module. Uses http://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html[StatsServer]. +This is the uwsgi module. By default collects the `stats` metricset, using +http://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html[StatsServer]. [float] === Module-specific configuration notes The uWSGI module has these additional config options: -*`hosts`*:: host URLs to get data from. by default `tcp://127.0.0.1:9191`. +*`hosts`*:: host URLs to get data from (e.g: `tcp://127.0.0.1:9191`). Can obtain data from 3 types of schemes: tcp (tcp://ip:port), unix socket (unix:///tmp/uwsgi.sock) and http/https server (http://ip:port) [float] === Dashboard -The nginx module comes with a predefined dashboard. For example: +The uwsgi module comes with a predefined dashboard. For example: image::./images/uwsgi_dashboard.png[] diff --git a/vendor/github.com/elastic/beats/metricbeat/module/uwsgi/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/uwsgi/_meta/fields.yml index 41ababd0..899043d2 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/uwsgi/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/uwsgi/_meta/fields.yml @@ -2,7 +2,7 @@ title: "uwsgi" description: > uwsgi module - + release: beta fields: - name: uwsgi type: group diff --git a/vendor/github.com/elastic/beats/metricbeat/module/uwsgi/status/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/uwsgi/status/_meta/fields.yml index ba034259..1fdd65eb 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/uwsgi/status/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/uwsgi/status/_meta/fields.yml @@ -1,5 +1,6 @@ - name: status type: group + release: beta description: > uwsgi.status metricset fields fields: diff --git a/vendor/github.com/elastic/beats/metricbeat/module/uwsgi/status/status.go b/vendor/github.com/elastic/beats/metricbeat/module/uwsgi/status/status.go index 9c5ed3dc..6d227be1 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/uwsgi/status/status.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/uwsgi/status/status.go @@ -18,7 +18,10 @@ import ( ) func init() { - mb.Registry.AddMetricSet("uwsgi", "status", New, uwsgi.HostParser) + mb.Registry.MustAddMetricSet("uwsgi", "status", New, + mb.WithHostParser(uwsgi.HostParser), + mb.DefaultMetricSet(), + ) } // MetricSet for fetching uwsgi metrics from StatServer. @@ -28,7 +31,7 @@ type MetricSet struct { // New creates a new instance of the MetricSet. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { - cfgwarn.Experimental("The uWSGI status metricset is experimental") + cfgwarn.Beta("The uWSGI status metricset is beta") return &MetricSet{BaseMetricSet: base}, nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/vsphere/_meta/config.reference.yml b/vendor/github.com/elastic/beats/metricbeat/module/vsphere/_meta/config.reference.yml new file mode 100644 index 00000000..d4803d0c --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/vsphere/_meta/config.reference.yml @@ -0,0 +1,12 @@ +- module: vsphere + enabled: true + metricsets: ["datastore", "host", "virtualmachine"] + period: 10s + hosts: ["https://localhost/sdk"] + + username: "user" + password: "password" + # If insecure is true, don't verify the server's certificate chain + insecure: false + # Get custom fields when using virtualmachine metric set. Default false. + # get_custom_fields: false diff --git a/vendor/github.com/elastic/beats/metricbeat/module/vsphere/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/vsphere/_meta/config.yml index c5356379..d2badbc4 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/vsphere/_meta/config.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/vsphere/_meta/config.yml @@ -1,5 +1,4 @@ - module: vsphere - metricsets: ["datastore", "host", "virtualmachine"] period: 10s hosts: ["https://localhost/sdk"] diff --git a/vendor/github.com/elastic/beats/metricbeat/module/vsphere/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/vsphere/_meta/docs.asciidoc index f1164080..7a281ae4 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/vsphere/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/vsphere/_meta/docs.asciidoc @@ -1,2 +1,3 @@ The vSphere module uses the https://github.com/vmware/govmomi[Govmomi] library to collect metrics from any Vmware SDK URL (ESXi/VCenter). This library is built for and tested against ESXi and vCenter 5.5, 6.0 and 6.5. +By default it enables the metricsets `datastore`, `host` and `virtualmachine`. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/vsphere/datastore/datastore.go b/vendor/github.com/elastic/beats/metricbeat/module/vsphere/datastore/datastore.go index 153ee028..2069d366 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/vsphere/datastore/datastore.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/vsphere/datastore/datastore.go @@ -14,9 +14,9 @@ import ( ) func init() { - if err := mb.Registry.AddMetricSet("vsphere", "datastore", New); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("vsphere", "datastore", New, + mb.DefaultMetricSet(), + ) } type MetricSet struct { diff --git a/vendor/github.com/elastic/beats/metricbeat/module/vsphere/host/host.go b/vendor/github.com/elastic/beats/metricbeat/module/vsphere/host/host.go index 8a129d53..d76b80e0 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/vsphere/host/host.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/vsphere/host/host.go @@ -22,9 +22,9 @@ import ( ) func init() { - if err := mb.Registry.AddMetricSet("vsphere", "host", New); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("vsphere", "host", New, + mb.DefaultMetricSet(), + ) } type MetricSet struct { diff --git a/vendor/github.com/elastic/beats/metricbeat/module/vsphere/virtualmachine/virtualmachine.go b/vendor/github.com/elastic/beats/metricbeat/module/vsphere/virtualmachine/virtualmachine.go index a6f8792c..3ca221c6 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/vsphere/virtualmachine/virtualmachine.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/vsphere/virtualmachine/virtualmachine.go @@ -23,9 +23,9 @@ import ( ) func init() { - if err := mb.Registry.AddMetricSet("vsphere", "virtualmachine", New); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("vsphere", "virtualmachine", New, + mb.DefaultMetricSet(), + ) } type MetricSet struct { diff --git a/vendor/github.com/elastic/beats/metricbeat/module/windows/_meta/config.reference.yml b/vendor/github.com/elastic/beats/metricbeat/module/windows/_meta/config.reference.yml new file mode 100644 index 00000000..d891fe62 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/windows/_meta/config.reference.yml @@ -0,0 +1,15 @@ +- module: windows + metricsets: ["perfmon"] + enabled: true + period: 10s + perfmon.ignore_non_existent_counters: true + perfmon.counters: + # - instance_label: processor.name + # instance_name: total + # measurement_label: processor.time.total.pct + # query: '\Processor Information(_Total)\% Processor Time' + +- module: windows + metricsets: ["service"] + enabled: true + period: 60s diff --git a/vendor/github.com/elastic/beats/metricbeat/module/windows/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/windows/_meta/config.yml index 2c56fe8e..866e0001 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/windows/_meta/config.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/windows/_meta/config.yml @@ -1,8 +1,2 @@ - module: windows - metricsets: ["perfmon"] - period: 10s - perfmon.counters: - -- module: windows - metricsets: ["service"] period: 60s diff --git a/vendor/github.com/elastic/beats/metricbeat/module/windows/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/windows/_meta/docs.asciidoc index 020e508b..3a14b2eb 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/windows/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/windows/_meta/docs.asciidoc @@ -1 +1,2 @@ -This is the Windows module. +This is the Windows module. It collects metrics from Windows systems, +by default metricset `service` is enabled. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/_meta/data.json b/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/_meta/data.json index 8247ffa2..e6b39dd8 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/_meta/data.json +++ b/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/_meta/data.json @@ -1,5 +1,5 @@ { - "@timestamp": "2016-05-23T08:05:34.853Z", + "@timestamp": "2017-10-12T08:05:34.853Z", "beat": { "hostname": "host.example.com", "name": "host.example.com" @@ -9,25 +9,13 @@ "name": "perfmon", "rtt": 115 }, - "type": "metricsets", "windows": { "perfmon": { - "disk": { - "bytes": { - "read": { - "total": 0 - } - } - }, "processor": { + "name": "_Total", "time": { - "idle": { - "average": { - "ns": 670661.5894039735 - } - }, "total": { - "pct": 3.135058464112306 + "pct": 1.4663385364361736 } } } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/_meta/docs.asciidoc index e1a8e980..28c05d37 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/_meta/docs.asciidoc @@ -1,31 +1,76 @@ -The `perfmon` metricset of the Windows module reads Windows -performance counters. +The `perfmon` metricset of the Windows module reads Windows performance +counters. [float] === Configuration You must configure queries for the Windows performance counters that you wish -to collect. The example below collects processor time and disk writes. -With `format` you can set the output format for a specific counter. Possible values are -`float` and `long`. If nothing is selected the default value is `float`. -With `instance_name`, you can specify the name of the instance. Use this setting when: -- You want to use an instance name that is different from the computed name. For example, `Total` instead of `_Total`. -- You specify a counter that has no instance. For example, `\TCPIP Performance Diagnostics\IPv4 NBLs/sec indicated without prevalidation`. -For wildcard queries this setting has no effect. - +to collect. The example below collects processor time and disk writes every +10 seconds. If either of the counters do not exist it will ignore the error. [source,yaml] ---- - module: windows - metricsets: ["perfmon"] + metricsets: [perfmon] period: 10s + perfmon.ignore_non_existent_counters: true perfmon.counters: - - instance_label: "processor.name" - instance_name: "Total" - measurement_label: "processor.time.total.pct" + - instance_label: processor.name + instance_name: total + measurement_label: processor.time.total.pct query: '\Processor Information(_Total)\% Processor Time' - - instance_label: "diskio.name" - measurement_label: "diskio.write.bytes" + + - instance_label: physical_disk.name + measurement_label: physical_disk.write.per_sec query: '\PhysicalDisk(*)\Disk Writes/sec' - format: "long" + + - instance_label: physical_disk.name + measurement_label: physical_disk.write.time.pct + query: '\PhysicalDisk(*)\% Disk Write Time' ---- + +*`ignore_non_existent_counters`*:: A boolean option that causes the +metricset to ignore errors caused by counters that do not exist when set to +true. Instead of an error, a message will be logged at the info level stating +that the counter does not exist. + +*`counters`*:: Counters specifies a list of queries to perform. Each individual +counter requires three config options - `instance_label`, `measurement_label`, +and `query`. + +[float] +==== Counter Configuration + +Each item in the `counters` list specifies a perfmon query to perform. In the +events generated by the metricset these configuration options map to the field +values as shown below. + +---- +"%[instance_label]": "%[instance_name] or ", +"%[measurement_label]": , +---- + +*`instance_label`*:: The label used to identify the counter instance. This +field is required. + +*`instance_name`*:: The instance name to use in the event when the counter's +path (`query`) does not include an instance or when you want to override the +instance name. For example with `\Processor Information(_Total)` the +instance name would be `_Total` and by setting `instance_name: total` you can +override the value. ++ +The setting has no effect with wildcard queries (e.g. +`\PhysicalDisk(*)\Disk Writes/sec`). + +*`measurement_label`*:: The label used for the value returned by the query. +This field is required. + +*`query`*:: The perfmon query. This is the counter path specified in +Performance Data Helper (PDH) syntax. This field is required. For example +`\Processor Information(_Total)\% Processor Time`. An asterisk can be used in +place of an instance name to perform a wildcard query that generates an event +for each counter instance (e.g. `\PhysicalDisk(*)\Disk Writes/sec`). + +*`format`*:: Format of the measurement value. The value can be either `float` or +`long`. The default is `float`. + diff --git a/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/pdh_integration_windows_test.go b/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/pdh_integration_windows_test.go index 89ba6315..f6f101f9 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/pdh_integration_windows_test.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/pdh_integration_windows_test.go @@ -7,10 +7,11 @@ import ( "time" "unsafe" - mbtest "github.com/elastic/beats/metricbeat/mb/testing" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/metricbeat/mb" + mbtest "github.com/elastic/beats/metricbeat/mb/testing" ) const processorTimeCounter = `\Processor Information(_Total)\% Processor Time` @@ -38,16 +39,20 @@ func TestData(t *testing.T) { }, } - f := mbtest.NewEventsFetcher(t, config) - - f.Fetch() - + ms := mbtest.NewReportingMetricSetV2(t, config) + mbtest.ReportingFetchV2(ms) time.Sleep(60 * time.Millisecond) - err := mbtest.WriteEvents(f, t) - if err != nil { - t.Fatal("write", err) + events, errs := mbtest.ReportingFetchV2(ms) + if len(errs) > 0 { + t.Fatal(errs) + } + if len(events) == 0 { + t.Fatal("no events received") } + + beatEvent := mbtest.StandardizeEvent(ms, events[0], mb.AddMetricSetInfo) + mbtest.WriteEventToDataJSON(t, beatEvent) } func TestQuery(t *testing.T) { @@ -85,11 +90,13 @@ func TestQuery(t *testing.T) { } func TestExistingCounter(t *testing.T) { - config := make([]CounterConfig, 1) - config[0].InstanceLabel = "processor.name" - config[0].MeasurementLabel = "processor.time.total.pct" - config[0].Query = processorTimeCounter - config[0].Format = "float" + config := Config{ + CounterConfig: make([]CounterConfig, 1), + } + config.CounterConfig[0].InstanceLabel = "processor.name" + config.CounterConfig[0].MeasurementLabel = "processor.time.total.pct" + config.CounterConfig[0].Query = processorTimeCounter + config.CounterConfig[0].Format = "float" handle, err := NewPerfmonReader(config) if err != nil { t.Fatal(err) @@ -105,11 +112,13 @@ func TestExistingCounter(t *testing.T) { } func TestNonExistingCounter(t *testing.T) { - config := make([]CounterConfig, 1) - config[0].InstanceLabel = "processor.name" - config[0].MeasurementLabel = "processor.time.total.pct" - config[0].Query = "\\Processor Information(_Total)\\not existing counter" - config[0].Format = "float" + config := Config{ + CounterConfig: make([]CounterConfig, 1), + } + config.CounterConfig[0].InstanceLabel = "processor.name" + config.CounterConfig[0].MeasurementLabel = "processor.time.total.pct" + config.CounterConfig[0].Query = "\\Processor Information(_Total)\\not existing counter" + config.CounterConfig[0].Format = "float" handle, err := NewPerfmonReader(config) if assert.Error(t, err) { assert.EqualValues(t, PDH_CSTATUS_NO_COUNTER, errors.Cause(err)) @@ -121,12 +130,39 @@ func TestNonExistingCounter(t *testing.T) { } } +func TestIgnoreNonExistentCounter(t *testing.T) { + config := Config{ + CounterConfig: make([]CounterConfig, 1), + IgnoreNECounters: true, + } + config.CounterConfig[0].InstanceLabel = "processor.name" + config.CounterConfig[0].MeasurementLabel = "processor.time.total.pct" + config.CounterConfig[0].Query = "\\Processor Information(_Total)\\not existing counter" + config.CounterConfig[0].Format = "float" + handle, err := NewPerfmonReader(config) + + values, err := handle.Read() + + if assert.Error(t, err) { + assert.EqualValues(t, PDH_NO_DATA, errors.Cause(err)) + } + + if handle != nil { + err = handle.query.Close() + assert.NoError(t, err) + } + + t.Log(values) +} + func TestNonExistingObject(t *testing.T) { - config := make([]CounterConfig, 1) - config[0].InstanceLabel = "processor.name" - config[0].MeasurementLabel = "processor.time.total.pct" - config[0].Query = "\\non existing object\\% Processor Performance" - config[0].Format = "float" + config := Config{ + CounterConfig: make([]CounterConfig, 1), + } + config.CounterConfig[0].InstanceLabel = "processor.name" + config.CounterConfig[0].MeasurementLabel = "processor.time.total.pct" + config.CounterConfig[0].Query = "\\non existing object\\% Processor Performance" + config.CounterConfig[0].Format = "float" handle, err := NewPerfmonReader(config) if assert.Error(t, err) { assert.EqualValues(t, PDH_CSTATUS_NO_OBJECT, errors.Cause(err)) @@ -253,11 +289,13 @@ func TestRawValues(t *testing.T) { } func TestWildcardQuery(t *testing.T) { - config := make([]CounterConfig, 1) - config[0].InstanceLabel = "processor.name" - config[0].MeasurementLabel = "processor.time.pct" - config[0].Query = `\Processor Information(*)\% Processor Time` - config[0].Format = "float" + config := Config{ + CounterConfig: make([]CounterConfig, 1), + } + config.CounterConfig[0].InstanceLabel = "processor.name" + config.CounterConfig[0].MeasurementLabel = "processor.time.pct" + config.CounterConfig[0].Query = `\Processor Information(*)\% Processor Time` + config.CounterConfig[0].Format = "float" handle, err := NewPerfmonReader(config) if err != nil { t.Fatal(err) @@ -273,7 +311,7 @@ func TestWildcardQuery(t *testing.T) { t.Fatal(err) } - pctKey, err := values[0].HasKey("processor.time.pct") + pctKey, err := values[0].MetricSetFields.HasKey("processor.time.pct") if err != nil { t.Fatal(err) } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/pdh_windows.go b/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/pdh_windows.go index 34016d29..a5d59892 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/pdh_windows.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/pdh_windows.go @@ -10,11 +10,12 @@ import ( "unicode/utf16" "unsafe" - "github.com/joeshaw/multierror" "github.com/pkg/errors" "golang.org/x/sys/windows" "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" + "github.com/elastic/beats/metricbeat/mb" "github.com/elastic/beats/winlogbeat/sys" ) @@ -222,7 +223,7 @@ func (q *Query) AddCounter(counterPath string, format Format, instanceName strin h, err := PdhAddCounter(q.handle, counterPath, 0) if err != nil { - return errors.Wrapf(err, `failed to add counter (path="%v")`, counterPath) + return err } wildcard := wildcardRegexp.MatchString(counterPath) @@ -309,12 +310,14 @@ func (q *Query) Close() error { type PerfmonReader struct { query *Query // PDH Query - instanceLabel map[string]string // Mapping of counter path to key used in output. - measurement map[string]string - executed bool // Indicates if the query has been executed. + instanceLabel map[string]string // Mapping of counter path to key used for the label (e.g. processor.name) + measurement map[string]string // Mapping of counter path to key used for the value (e.g. processor.cpu_time). + executed bool // Indicates if the query has been executed. + log *logp.Logger } -func NewPerfmonReader(config []CounterConfig) (*PerfmonReader, error) { +// NewPerfmonReader creates a new instance of PerfmonReader. +func NewPerfmonReader(config Config) (*PerfmonReader, error) { query, err := NewQuery("") if err != nil { return nil, err @@ -324,9 +327,10 @@ func NewPerfmonReader(config []CounterConfig) (*PerfmonReader, error) { query: query, instanceLabel: map[string]string{}, measurement: map[string]string{}, + log: logp.NewLogger("perfmon"), } - for _, counter := range config { + for _, counter := range config.CounterConfig { var format Format switch counter.Format { case "float": @@ -335,8 +339,17 @@ func NewPerfmonReader(config []CounterConfig) (*PerfmonReader, error) { format = LongFormat } if err := query.AddCounter(counter.Query, format, counter.InstanceName); err != nil { + if config.IgnoreNECounters { + switch err { + case PDH_CSTATUS_NO_COUNTER, PDH_CSTATUS_NO_COUNTERNAME, + PDH_CSTATUS_NO_INSTANCE, PDH_CSTATUS_NO_OBJECT: + r.log.Infow("Ignoring non existent counter", "error", err, + logp.Namespace("perfmon"), "query", counter.Query) + continue + } + } query.Close() - return nil, err + return nil, errors.Wrapf(err, `failed to add counter (query="%v")`, counter.Query) } r.instanceLabel[counter.Query] = counter.InstanceLabel @@ -347,9 +360,9 @@ func NewPerfmonReader(config []CounterConfig) (*PerfmonReader, error) { return r, nil } -func (r *PerfmonReader) Read() ([]common.MapStr, error) { +func (r *PerfmonReader) Read() ([]mb.Event, error) { if err := r.query.Execute(); err != nil { - return nil, err + return nil, errors.Wrap(err, "failed querying counter values") } // Get the values. @@ -359,38 +372,37 @@ func (r *PerfmonReader) Read() ([]common.MapStr, error) { } // Write the values into the map. - result := make([]common.MapStr, 0, len(values)) - var errs multierror.Errors - - for counterPath, counter := range values { - for _, val := range counter { - ev := common.MapStr{} - instanceKey := r.instanceLabel[counterPath] - ev.Put(instanceKey, val.Instance) - measurementKey := r.measurement[counterPath] - ev.Put(measurementKey, val.Measurement) - - if val.Err != nil { - switch val.Err { - case PDH_CALC_NEGATIVE_DENOMINATOR: - case PDH_INVALID_DATA: - if r.executed { - errs = append(errs, errors.Wrapf(val.Err, "key=%v", measurementKey)) - } - default: - errs = append(errs, errors.Wrapf(val.Err, "key=%v", measurementKey)) - } + events := make([]mb.Event, 0, len(values)) + + for counterPath, values := range values { + for _, val := range values { + if val.Err != nil && !r.executed { + r.log.Debugw("Ignoring the first measurement because the data isn't ready", + "error", val.Err, logp.Namespace("perfmon"), "query", counterPath) + continue } - result = append(result, ev) - } - } + event := mb.Event{ + MetricSetFields: common.MapStr{}, + Error: errors.Wrapf(val.Err, "failed on query=%v", counterPath), + } + + if val.Instance != "" { + event.MetricSetFields.Put(r.instanceLabel[counterPath], val.Instance) + } - if !r.executed { - r.executed = true + if val.Measurement != nil { + event.MetricSetFields.Put(r.measurement[counterPath], val.Measurement) + } else { + event.MetricSetFields.Put(r.measurement[counterPath], 0) + } + + events = append(events, event) + } } - return result, errs.Err() + r.executed = true + return events, nil } func (e PdhErrno) Error() string { diff --git a/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/perfmon.go b/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/perfmon.go index 29bc1004..0c352eb0 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/perfmon.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/perfmon.go @@ -3,43 +3,45 @@ package perfmon import ( - "fmt" "strings" "github.com/pkg/errors" - "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/common/cfgwarn" + "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/metricbeat/mb" ) +// CounterConfig for perfmon counters. type CounterConfig struct { - InstanceLabel string `config:"instance_label" validate:"required"` + InstanceLabel string `config:"instance_label" validate:"required"` InstanceName string `config:"instance_name"` MeasurementLabel string `config:"measurement_label" validate:"required"` - Query string `config:"query" validate:"required"` + Query string `config:"query" validate:"required"` Format string `config:"format"` } +// Config for the windows perfmon metricset. +type Config struct { + IgnoreNECounters bool `config:"perfmon.ignore_non_existent_counters"` + CounterConfig []CounterConfig `config:"perfmon.counters" validate:"required"` +} + func init() { - if err := mb.Registry.AddMetricSet("windows", "perfmon", New); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("windows", "perfmon", New) } type MetricSet struct { mb.BaseMetricSet reader *PerfmonReader + log *logp.Logger } // New create a new instance of the MetricSet. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { cfgwarn.Beta("The perfmon metricset is beta") - config := struct { - CounterConfig []CounterConfig `config:"perfmon.counters" validate:"required"` - }{} - + var config Config if err := base.Module().UnpackConfig(&config); err != nil { return nil, err } @@ -51,28 +53,34 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { value.Format = "float" case "float", "long": default: - err := fmt.Errorf("format '%s' for counter '%s' are not valid", value.Format, value.InstanceLabel) - return nil, errors.Wrap(err, "initialization failed") + return nil, errors.Errorf("initialization failed: format '%s' "+ + "for counter '%s' is invalid (must be float or long)", + value.Format, value.InstanceLabel) } } - reader, err := NewPerfmonReader(config.CounterConfig) + reader, err := NewPerfmonReader(config) if err != nil { - return nil, errors.Wrap(err, "initialization failed") + return nil, errors.Wrap(err, "initialization of reader failed") } return &MetricSet{ BaseMetricSet: base, reader: reader, + log: logp.NewLogger("perfmon"), }, nil } -func (m *MetricSet) Fetch() ([]common.MapStr, error) { - data, err := m.reader.Read() +func (m *MetricSet) Fetch(report mb.ReporterV2) { + events, err := m.reader.Read() if err != nil { - return nil, errors.Wrap(err, "failed reading counters") + m.log.Debugw("Failed reading counters", "error", err) + err = errors.Wrap(err, "failed reading counters") + report.Error(err) } - return data, nil + for _, event := range events { + report.Event(event) + } } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/windows/service/service.go b/vendor/github.com/elastic/beats/metricbeat/module/windows/service/service.go index c9b9f15e..1596e199 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/windows/service/service.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/windows/service/service.go @@ -11,9 +11,9 @@ import ( // init registers the MetricSet with the central registry. // The New method will be called after the setup of the module and before starting to fetch data func init() { - if err := mb.Registry.AddMetricSet("windows", "service", New); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("windows", "service", New, + mb.DefaultMetricSet(), + ) } // MetricSet type defines all fields of the MetricSet diff --git a/vendor/github.com/elastic/beats/metricbeat/module/zookeeper/_meta/config.reference.yml b/vendor/github.com/elastic/beats/metricbeat/module/zookeeper/_meta/config.reference.yml new file mode 100644 index 00000000..04742813 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/zookeeper/_meta/config.reference.yml @@ -0,0 +1,5 @@ +- module: zookeeper + enabled: true + metricsets: ["mntr"] + period: 10s + hosts: ["localhost:2181"] diff --git a/vendor/github.com/elastic/beats/metricbeat/module/zookeeper/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/zookeeper/_meta/config.yml index 63543cef..fb77d08f 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/zookeeper/_meta/config.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/zookeeper/_meta/config.yml @@ -1,4 +1,3 @@ - module: zookeeper - metricsets: ["mntr"] period: 10s hosts: ["localhost:2181"] diff --git a/vendor/github.com/elastic/beats/metricbeat/module/zookeeper/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/zookeeper/_meta/docs.asciidoc index 8675c576..34b3f950 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/zookeeper/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/zookeeper/_meta/docs.asciidoc @@ -1,4 +1,5 @@ -The ZooKeeper module fetches statistics from the ZooKeeper service. +The ZooKeeper module fetches statistics from the ZooKeeper service. The default +metricset is `mntr`. [float] === Compatibility diff --git a/vendor/github.com/elastic/beats/metricbeat/module/zookeeper/mntr/mntr.go b/vendor/github.com/elastic/beats/metricbeat/module/zookeeper/mntr/mntr.go index 2cc27c71..396bc0f0 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/zookeeper/mntr/mntr.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/zookeeper/mntr/mntr.go @@ -36,9 +36,10 @@ import ( ) func init() { - if err := mb.Registry.AddMetricSet("zookeeper", "mntr", New, parse.PassThruHostParser); err != nil { - panic(err) - } + mb.Registry.MustAddMetricSet("zookeeper", "mntr", New, + mb.WithHostParser(parse.PassThruHostParser), + mb.DefaultMetricSet(), + ) } // MetricSet for fetching ZooKeeper health metrics. diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/aerospike.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/aerospike.yml.disabled index 787e314d..0a4a883f 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/aerospike.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/aerospike.yml.disabled @@ -1,5 +1,2 @@ - module: aerospike - metricsets: ["namespace"] - enabled: false - period: 10s hosts: ["localhost:3000"] diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/apache.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/apache.yml.disabled index 08d1efaf..04e689e0 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/apache.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/apache.yml.disabled @@ -1,6 +1,4 @@ - module: apache - metricsets: ["status"] - period: 10s - - # Apache hosts hosts: ["http://127.0.0.1"] + #username: "user" + #password: "secret" diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/ceph.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/ceph.yml.disabled index a069c8e6..d77bcf36 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/ceph.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/ceph.yml.disabled @@ -1,4 +1,11 @@ - module: ceph - metricsets: ["cluster_disk", "cluster_health", "monitor_health", "pool_disk", "osd_tree"] + metricsets: ["cluster_health", "cluster_status", "monitor_health"] period: 10s hosts: ["localhost:5000"] + #username: "user" + #password: "secret" + +- module: ceph + metricsets: ["cluster_disk", "osd_tree", "pool_disk"] + period: 1m + hosts: ["localhost:5000"] diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/couchbase.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/couchbase.yml.disabled index 2691d77a..760115c7 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/couchbase.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/couchbase.yml.disabled @@ -1,4 +1,4 @@ - module: couchbase - metricsets: ["bucket", "cluster", "node"] - period: 10s hosts: ["localhost:8091"] + #username: "user" + #password: "secret" diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/docker.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/docker.yml.disabled index 87049258..5b25e17f 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/docker.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/docker.yml.disabled @@ -1,10 +1,5 @@ - module: docker - metricsets: ["container", "cpu", "diskio", "healthcheck", "info", "memory", "network"] hosts: ["unix:///var/run/docker.sock"] - period: 10s - # To connect to Docker over TLS you must specify a client and CA certificate. - #ssl: - #certificate_authority: "/etc/pki/root/ca.pem" - #certificate: "/etc/pki/client/cert.pem" - #key: "/etc/pki/client/cert.key" + # Replace dots in labels with `_`. Set to false to keep dots + labels.dedot: true diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/dropwizard.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/dropwizard.yml.disabled index 6dfa3755..bc989151 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/dropwizard.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/dropwizard.yml.disabled @@ -1,6 +1,6 @@ - module: dropwizard - metricsets: ["collector"] - period: 10s hosts: ["localhost:8080"] metrics_path: /metrics/metrics namespace: example + #username: "user" + #password: "secret" diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/elasticsearch.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/elasticsearch.yml.disabled index a792d2be..2531498f 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/elasticsearch.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/elasticsearch.yml.disabled @@ -1,4 +1,4 @@ - module: elasticsearch - metricsets: ["node", "node_stats"] - period: 10s hosts: ["localhost:9200"] + #username: "user" + #password: "secret" diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/etcd.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/etcd.yml.disabled index 6a5d97c6..00ac532e 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/etcd.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/etcd.yml.disabled @@ -1,5 +1,4 @@ - module: etcd - metricsets: ["leader", "self", "store"] - period: 10s hosts: ["localhost:2379"] - + #username: "user" + #password: "secret" diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/graphite.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/graphite.yml.disabled index b5318ecd..2c6ac0e7 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/graphite.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/graphite.yml.disabled @@ -1,10 +1 @@ - module: graphite - metricsets: ["server"] - enabled: true -# protocol: "udp" -# templates: -# - filter: "test.*.bash.*" # This would match metrics like test.localhost.bash.stats -# namespace: "test" -# template: ".host.shell.metric*" # test.localhost.bash.stats would become metric=stats and tags host=localhost,shell=bash -# delimiter: "_" - diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/haproxy.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/haproxy.yml.disabled index febce8d2..203e1cc7 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/haproxy.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/haproxy.yml.disabled @@ -1,4 +1,2 @@ - module: haproxy - metricsets: ["info", "stat"] - period: 10s hosts: ["tcp://127.0.0.1:14567"] diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/http.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/http.yml.disabled index 02c71778..a7e4e4eb 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/http.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/http.yml.disabled @@ -6,8 +6,11 @@ path: "/" #body: "" #method: "GET" + #username: "user" + #password: "secret" #request.enabled: false #response.enabled: false + #json.is_array: false #dedot.enabled: false - module: http diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/jolokia.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/jolokia.yml.disabled index de331606..9075615f 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/jolokia.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/jolokia.yml.disabled @@ -3,7 +3,23 @@ period: 10s hosts: ["localhost"] namespace: "metrics" - path: "/jolokia/?ignoreErrors=true&canonicalNaming=false" - jmx.mapping: - jmx.application: - jmx.instance: + #username: "user" + #password: "secret" + jmx.mappings: + - mbean: 'java.lang:type=Runtime' + attributes: + - attr: Uptime + field: uptime + - mbean: 'java.lang:type=Memory' + attributes: + - attr: HeapMemoryUsage + field: memory.heap_usage + - attr: NonHeapMemoryUsage + field: memory.non_heap_usage + # GC Metrics - this depends on what is available on your JVM + # - mbean: 'java.lang:type=GarbageCollector,name=ConcurrentMarkSweep' + # attributes: + # - attr: CollectionTime + # field: gc.cms_collection_time + # - attr: CollectionCount + # field: gc.cms_collection_count diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/kafka.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/kafka.yml.disabled index 91ce183a..f9db3711 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/kafka.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/kafka.yml.disabled @@ -1,25 +1,2 @@ - module: kafka - metricsets: ["partition"] - period: 10s hosts: ["localhost:9092"] - - #client_id: metricbeat - #retries: 3 - #backoff: 250ms - - # List of Topics to query metadata for. If empty, all topics will be queried. - #topics: [] - - # Optional SSL. By default is off. - # List of root certificates for HTTPS server verifications - #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Certificate for SSL client authentication - #ssl.certificate: "/etc/pki/client/cert.pem" - - # Client Certificate Key - #ssl.key: "/etc/pki/client/cert.key" - - # SASL authentication - #username: "" - #password: "" diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/kibana.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/kibana.yml.disabled index 09a30295..e7629293 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/kibana.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/kibana.yml.disabled @@ -1,4 +1,4 @@ - module: kibana - metricsets: ["status"] - period: 10s hosts: ["localhost:5601"] + #username: "user" + #password: "secret" diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/kubernetes.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/kubernetes.yml.disabled index cf00d10c..48a0bfdc 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/kubernetes.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/kubernetes.yml.disabled @@ -1,28 +1,13 @@ -# Node metrics, from kubelet: - module: kubernetes metricsets: + - container - node - - system - pod - - container + - system - volume - period: 10s hosts: ["localhost:10255"] - -# State metrics from kube-state-metrics service: -- module: kubernetes - enabled: false - metricsets: - - state_node - - state_deployment - - state_replicaset - - state_pod - - state_container - period: 10s - hosts: ["kube-state-metrics:8080"] - -# Kubernetes events -- module: kubernetes - enabled: false - metricsets: - - event + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + ssl.certificate_authorities: + - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt + #username: "user" + #password: "secret" diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/kvm.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/kvm.yml.disabled new file mode 100644 index 00000000..1e459638 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/kvm.yml.disabled @@ -0,0 +1,8 @@ +- module: kvm + metricsets: ["dommemstat"] + enabled: false + period: 10s + hosts: ["localhost"] + + # Timeout to connect to Libvirt server + #timeout: 1s diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/logstash.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/logstash.yml.disabled index 7d1e8fc9..0739ea47 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/logstash.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/logstash.yml.disabled @@ -1,6 +1,4 @@ - module: logstash - metricsets: ["node", "node_stats"] - enabled: false - period: 10s hosts: ["localhost:9600"] - + #username: "user" + #password: "secret" diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/memcached.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/memcached.yml.disabled index 1b230087..f1f6533d 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/memcached.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/memcached.yml.disabled @@ -1,4 +1,2 @@ - module: memcached - metricsets: ["stats"] - period: 10s hosts: ["localhost:11211"] diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/mongodb.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/mongodb.yml.disabled index fb6b19e7..f22fc378 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/mongodb.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/mongodb.yml.disabled @@ -1,16 +1,2 @@ - module: mongodb - metricsets: ["dbstats", "status"] - period: 10s - - # The hosts must be passed as MongoDB URLs in the format: - # [mongodb://][user:pass@]host[:port]. - # The username and password can also be set using the respective configuration - # options. The credentials in the URL take precedence over the username and - # password configuration options. hosts: ["localhost:27017"] - - # Username to use when connecting to MongoDB. Empty by default. - #username: user - - # Password to use when connecting to MongoDB. Empty by default. - #password: pass diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/munin.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/munin.yml.disabled new file mode 100644 index 00000000..1ea9bf8c --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/munin.yml.disabled @@ -0,0 +1,3 @@ +- module: munin + hosts: ["localhost:4949"] + node.namespace: node diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/mysql.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/mysql.yml.disabled index afff5ff7..8855531c 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/mysql.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/mysql.yml.disabled @@ -1,17 +1,2 @@ - module: mysql - metricsets: ["status"] - period: 10s - - # Host DSN should be defined as "user:pass@tcp(127.0.0.1:3306)/" - # The username and password can either be set in the DSN or using the username - # and password config options. Those specified in the DSN take precedence. - hosts: ["root:secret@tcp(127.0.0.1:3306)/"] - - # Username of hosts. Empty by default. - #username: root - - # Password of hosts. Empty by default. - #password: secret - - # By setting raw to true, all raw fields from the status metricset will be added to the event. - #raw: false + hosts: ["tcp(127.0.0.1:3306)/"] diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/nginx.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/nginx.yml.disabled index e3737ed9..5f130609 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/nginx.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/nginx.yml.disabled @@ -1,9 +1,4 @@ - module: nginx - metricsets: ["stubstatus"] - period: 10s - - # Nginx hosts hosts: ["http://127.0.0.1"] - - # Path to server status. Default server-status - #server_status_path: "server-status" + #username: "user" + #password: "secret" diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/php_fpm.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/php_fpm.yml.disabled index be576451..53d78469 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/php_fpm.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/php_fpm.yml.disabled @@ -1,5 +1,4 @@ - module: php_fpm - metricsets: ["pool"] - period: 10s - status_path: "/status" hosts: ["localhost:8080"] + #username: "user" + #password: "secret" diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/postgresql.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/postgresql.yml.disabled index 383c66d9..8adc4408 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/postgresql.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/postgresql.yml.disabled @@ -1,24 +1,2 @@ - module: postgresql - metricsets: - # Stats about every PostgreSQL database - - database - - # Stats about the background writer process's activity - - bgwriter - - # Stats about every PostgreSQL process - - activity - - period: 10s - - # The host must be passed as PostgreSQL URL. Example: - # postgres://localhost:5432?sslmode=disable - # The available parameters are documented here: - # https://godoc.org/github.com/lib/pq#hdr-Connection_String_Parameters hosts: ["postgres://localhost:5432"] - - # Username to use when connecting to PostgreSQL. Empty by default. - #username: user - - # Password to use when connecting to PostgreSQL. Empty by default. - #password: pass diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/prometheus.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/prometheus.yml.disabled index 76bee349..1d5f9437 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/prometheus.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/prometheus.yml.disabled @@ -1,6 +1,11 @@ - module: prometheus - metricsets: ["stats"] period: 10s hosts: ["localhost:9090"] - metrics_path: /metrics #namespace: example + #username: "user" + #password: "secret" + + # This can be used for service account based authorization: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + #ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/rabbitmq.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/rabbitmq.yml.disabled index c0343876..b5912718 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/rabbitmq.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/rabbitmq.yml.disabled @@ -1,7 +1,2 @@ - module: rabbitmq - metricsets: ["node", "queue"] - period: 10s hosts: ["localhost:15672"] - - username: guest - password: guest diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/redis.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/redis.yml.disabled index 3f5ea513..e4ad9bb9 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/redis.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/redis.yml.disabled @@ -1,29 +1,2 @@ - module: redis - metricsets: ["info", "keyspace"] - period: 10s - - # Redis hosts hosts: ["127.0.0.1:6379"] - - # Timeout after which time a metricset should return an error - # Timeout is by default defined as period, as a fetch of a metricset - # should never take longer then period, as otherwise calls can pile up. - #timeout: 1s - - # Optional fields to be added to each event - #fields: - # datacenter: west - - # Network type to be used for redis connection. Default: tcp - #network: tcp - - # Max number of concurrent connections. Default: 10 - #maxconn: 10 - - # Filters can be used to reduce the number of fields sent. - #processors: - # - include_fields: - # fields: ["beat", "metricset", "redis.info.stats"] - - # Redis AUTH password. Empty by default. - #password: foobared diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/system.yml b/vendor/github.com/elastic/beats/metricbeat/modules.d/system.yml index 409ec1b6..82a6338c 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/system.yml +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/system.yml @@ -1,20 +1,14 @@ - module: system - period: 10s - metricsets: - - cpu - - load - - memory - - network - - process - - process_summary - #- core - #- diskio - #- socket - processes: ['.*'] process.include_top_n: by_cpu: 5 # include top 5 processes by CPU by_memory: 5 # include top 5 processes by memory +#- module: system +# metricsets: +# - core +# - diskio +# - socket + - module: system period: 1m metricsets: @@ -28,3 +22,9 @@ period: 15m metricsets: - uptime + +#- module: system +# period: 5m +# metricsets: +# - raid +# raid.mount_point: '/' diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/uwsgi.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/uwsgi.yml.disabled index 46dbc35d..e16ad9be 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/uwsgi.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/uwsgi.yml.disabled @@ -1,4 +1,2 @@ - module: uwsgi - metricsets: ["status"] - period: 10s hosts: ["tcp://127.0.0.1:9191"] diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/vsphere.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/vsphere.yml.disabled index c5356379..d2badbc4 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/vsphere.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/vsphere.yml.disabled @@ -1,5 +1,4 @@ - module: vsphere - metricsets: ["datastore", "host", "virtualmachine"] period: 10s hosts: ["https://localhost/sdk"] diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/windows.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/windows.yml.disabled index 2c56fe8e..866e0001 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/windows.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/windows.yml.disabled @@ -1,8 +1,2 @@ - module: windows - metricsets: ["perfmon"] - period: 10s - perfmon.counters: - -- module: windows - metricsets: ["service"] period: 60s diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/zookeeper.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/zookeeper.yml.disabled index 63543cef..fb77d08f 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/zookeeper.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/zookeeper.yml.disabled @@ -1,4 +1,3 @@ - module: zookeeper - metricsets: ["mntr"] period: 10s hosts: ["localhost:2181"] diff --git a/vendor/github.com/elastic/beats/metricbeat/scripts/docs_collector.py b/vendor/github.com/elastic/beats/metricbeat/scripts/docs_collector.py index a4d9a318..07ca0b46 100644 --- a/vendor/github.com/elastic/beats/metricbeat/scripts/docs_collector.py +++ b/vendor/github.com/elastic/beats/metricbeat/scripts/docs_collector.py @@ -32,11 +32,11 @@ def collect(beat_name): os.mkdir(os.path.abspath("docs") + "/modules/" + module) module_file = generated_note - beat_path = path + "/" + module + "/_meta" + module_meta_path = path + "/" + module + "/_meta" # Load module fields.yml module_fields = "" - with open(beat_path + "/fields.yml") as f: + with open(module_meta_path + "/fields.yml") as f: module_fields = yaml.load(f.read()) module_fields = module_fields[0] @@ -56,9 +56,13 @@ def collect(beat_name): modules_list[module] = {} modules_list[module]["title"] = title modules_list[module]["release"] = release + modules_list[module]["dashboards"] = os.path.exists(module_meta_path + "/kibana") modules_list[module]["metricsets"] = {} - config_file = beat_path + "/config.yml" + config_file = module_meta_path + "/config.reference.yml" + + if os.path.isfile(config_file) == False: + config_file = module_meta_path + "/config.yml" # Add example config file if os.path.isfile(config_file) == True: @@ -85,7 +89,9 @@ def collect(beat_name): # HTTP helper if 'ssl' in get_settings(module_fields): - module_file += "This module supports TLS connection when using `ssl` config field, as described in <>.\n\n" + module_file += "This module supports TLS connection when using `ssl`" + \ + " config field, as described in <>." + \ + " It also supports the options described in <>.\n\n" # Add metricsets title as below each metricset adds its link module_file += "[float]\n" @@ -177,8 +183,8 @@ def collect(beat_name): module_list_output = generated_note module_list_output += '[options="header"]\n' - module_list_output += '|========================\n' - module_list_output += '|Modules |Metricsets \n' + module_list_output += '|===================================\n' + module_list_output += '|Modules |Dashboards |Metricsets \n' for key, m in sorted(six.iteritems(modules_list)): @@ -186,11 +192,15 @@ def collect(beat_name): if m["release"] != "ga": release_label = m["release"] + "[]" - module_list_output += '|{} {} |{} \n'.format("<> ", release_label, "") + dashboard_no = "image:./images/icon-no.png[No prebuilt dashboards] " + dashboard_yes = "image:./images/icon-yes.png[Prebuilt dashboards are available] " + dashboards = dashboard_yes if m["dashboards"] else dashboard_no + + module_list_output += '|{} {} |{} |{} \n'.format("<> ", + release_label, dashboards, "") # Make sure empty entry row spans over all metricset rows for this module - module_list_output += '.{}+| '.format(len(m["metricsets"])) + module_list_output += '.{}+| .{}+| '.format(len(m["metricsets"]), len(m["metricsets"])) for key, ms in sorted(six.iteritems(m["metricsets"])): diff --git a/vendor/github.com/elastic/beats/metricbeat/tests/system/config/metricbeat.yml.j2 b/vendor/github.com/elastic/beats/metricbeat/tests/system/config/metricbeat.yml.j2 index 3ed3879f..b237292d 100644 --- a/vendor/github.com/elastic/beats/metricbeat/tests/system/config/metricbeat.yml.j2 +++ b/vendor/github.com/elastic/beats/metricbeat/tests/system/config/metricbeat.yml.j2 @@ -101,43 +101,6 @@ metricbeat.config.modules: # Disable random start delay for metricsets. metricbeat.max_start_delay: 0 -#================================ General ===================================== - -# The name of the shipper that publishes the network data. It can be used to group -# all the transactions sent by a single shipper in the web interface. -# If this options is not defined, the hostname is used. -name: {{shipper_name}} - -# The tags of the shipper are included in their own field with each -# transaction published. Tags make it easy to group servers by different -# logical properties. -tags: [ - {%- if agent_tags -%} - {%- for tag in agent_tags -%} - "{{ tag }}" - {%- if not loop.last %}, {% endif -%} - {%- endfor -%} - {%- endif -%} -] - - -#================================ Processors ===================================== - -{%- if processors %} -processors: -{%- for processor in processors %} -{%- for name, settings in processor.items() %} -- {{name}}: - {%- if settings %} - {%- for k, v in settings.items() %} - {{k}}: - {{v | default([])}} - {%- endfor %} - {%- endif %} -{%- endfor %} -{%- endfor %} - -{%- endif %} #============================== Autodiscover ================================== @@ -148,51 +111,10 @@ metricbeat.autodiscover: - type: {{provider}} {%- if settings %} {%- for k, v in settings.items() %} - {{k}}: - {{v | default([])}} + {{k}}: {{v | default([])}} {%- endfor %} {%- endif %} {%- endfor %} {% endif %} -#================================ Queue ===================================== - -queue.mem: - events: 4096 - flush.min_events: {{ flush_min_events|default(8) }} - flush.timeout: 0.1s - -{% if kibana -%} -setup.kibana.host: "{{ kibana.host }}" -{%- endif %} - -#================================ Outputs ===================================== - -# Configure what outputs to use when sending the data collected by the beat. -# Multiple outputs may be used. - -output: - {% if elasticsearch -%} - elasticsearch: - hosts: ["{{ elasticsearch.host }}"] - {%- endif %} - - # File as output - # Options - # path: where to save the files - # filename: name of the files - # rotate_every_kb: maximum size of the files in path - # number of files: maximum number of files in path - {% if not (console or elasticsearch) -%} - file: - path: {{ output_file_path|default(beat.working_dir + "/output") }} - filename: "{{ output_file_filename|default("metricbeat") }}" - rotate_every_kb: 1000 - #number_of_files: 7 - {%- endif %} - -{% if path_data %} -#================================ Paths ===================================== -path: - data: {{path_data}} -{%endif%} +{% include './tests/system/config/libbeat.yml.j2' %} diff --git a/vendor/github.com/elastic/beats/metricbeat/tests/system/metricbeat.py b/vendor/github.com/elastic/beats/metricbeat/tests/system/metricbeat.py index 490659ad..6cbaa317 100644 --- a/vendor/github.com/elastic/beats/metricbeat/tests/system/metricbeat.py +++ b/vendor/github.com/elastic/beats/metricbeat/tests/system/metricbeat.py @@ -7,7 +7,7 @@ from beat.beat import TestCase COMMON_FIELDS = ["@timestamp", "beat", "metricset.name", "metricset.host", - "metricset.module", "metricset.rtt"] + "metricset.module", "metricset.rtt", "host.name"] INTEGRATION_TESTS = os.environ.get('INTEGRATION_TESTS', False) @@ -78,3 +78,25 @@ def assert_no_logged_warnings(self, replace=None): def build_log_regex(self, message): return re.compile(r"^.*\t(?:ERROR|WARN)\t.*" + message + r".*$", re.MULTILINE) + + def check_metricset(self, module, metricset, hosts): + """ + Method to test a metricset for its fields + """ + self.render_config_template(modules=[{ + "name": module, + "metricsets": [metricset], + "hosts": hosts, + "period": "1s", + }]) + proc = self.start_beat() + self.wait_until(lambda: self.output_lines() > 0, max_timeout=20) + proc.check_kill_and_wait() + self.assert_no_logged_warnings() + + output = self.read_output_json() + self.assertTrue(len(output) >= 1) + evt = output[0] + print(evt) + + self.assert_fields_are_documented(evt) diff --git a/vendor/github.com/elastic/beats/metricbeat/tests/system/requirements.txt b/vendor/github.com/elastic/beats/metricbeat/tests/system/requirements.txt index d328a914..cc3c0ad3 100644 --- a/vendor/github.com/elastic/beats/metricbeat/tests/system/requirements.txt +++ b/vendor/github.com/elastic/beats/metricbeat/tests/system/requirements.txt @@ -1 +1,2 @@ -kafka-python +kafka-python==1.4.2 +parameterized==0.6.1 diff --git a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_apache.py b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_apache.py index 21f32c15..41f73c03 100644 --- a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_apache.py +++ b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_apache.py @@ -7,13 +7,24 @@ APACHE_FIELDS = metricbeat.COMMON_FIELDS + ["apache"] -APACHE_STATUS_FIELDS = ["hostname", "total_accesses", "total_kbytes", - "requests_per_sec", "bytes_per_sec", "bytes_per_request", - "workers.busy", "workers.idle", "uptime", "cpu", - "connections", "load", "scoreboard"] +APACHE_STATUS_FIELDS = [ + "hostname", "total_accesses", "total_kbytes", + "requests_per_sec", "bytes_per_sec", "bytes_per_request", + "workers.busy", "workers.idle", "uptime", "cpu", + "connections", "load", "scoreboard" +] -CPU_FIELDS = ["load", "user", "system", "children_user", - "children_system"] +APACHE_OLD_STATUS_FIELDS = [ + "hostname", "total_accesses", "total_kbytes", + "requests_per_sec", "bytes_per_sec", + "workers.busy", "workers.idle", "uptime", + "connections", "scoreboard" +] + + +CPU_FIELDS = [ + "load", "user", "system", "children_user", "children_system" +] class ApacheStatusTest(metricbeat.BaseTest): @@ -37,7 +48,7 @@ def test_output(self): found = False # Waits until CPULoad is part of the status - while found == False: + while not found: res = urllib2.urlopen(hosts[0] + "/server-status?auto").read() if "CPULoad" in res: found = True @@ -52,16 +63,35 @@ def test_output(self): self.assertEqual(len(output), 1) evt = output[0] - # Verify the required fields are present. - self.assertItemsEqual(self.de_dot(APACHE_FIELDS), evt.keys()) - apache_status = evt["apache"]["status"] - self.assertItemsEqual(self.de_dot(APACHE_STATUS_FIELDS), apache_status.keys()) - self.assertItemsEqual(self.de_dot(CPU_FIELDS), apache_status["cpu"].keys()) - # There are more fields that could be checked. + self.verify_fields(evt) # Verify all fields present are documented. self.assert_fields_are_documented(evt) + def verify_fields(self, evt): + self.assertItemsEqual(self.de_dot(APACHE_FIELDS), evt.keys()) + apache_status = evt["apache"]["status"] + self.assertItemsEqual( + self.de_dot(APACHE_STATUS_FIELDS), apache_status.keys()) + self.assertItemsEqual( + self.de_dot(CPU_FIELDS), apache_status["cpu"].keys()) + # There are more fields that could be checked. + def get_hosts(self): return ['http://' + os.getenv('APACHE_HOST', 'localhost') + ':' + os.getenv('APACHE_PORT', '80')] + + +class ApacheOldStatusTest(ApacheStatusTest): + + COMPOSE_SERVICES = ['apache_2_4_12'] + + def verify_fields(self, evt): + self.assertItemsEqual(self.de_dot(APACHE_FIELDS), evt.keys()) + apache_status = evt["apache"]["status"] + self.assertItemsEqual( + self.de_dot(APACHE_OLD_STATUS_FIELDS), apache_status.keys()) + + def get_hosts(self): + return ['http://' + os.getenv('APACHE_OLD_HOST', 'localhost') + ':' + + os.getenv('APACHE_PORT', '80')] diff --git a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_autodiscover.py b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_autodiscover.py index df8ce668..3773d7ae 100644 --- a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_autodiscover.py +++ b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_autodiscover.py @@ -15,7 +15,7 @@ class TestAutodiscover(metricbeat.BaseTest): "integration test not available on 2.x") def test_docker(self): """ - Test docker autodiscover starts modules + Test docker autodiscover starts modules from templates """ import docker docker_client = docker.from_env() @@ -41,9 +41,10 @@ def test_docker(self): container = docker_client.containers.run('memcached:1.5.3', detach=True) self.wait_until(lambda: self.log_contains('Autodiscover starting runner: memcached')) - sleep(2) + self.wait_until(lambda: self.output_count(lambda x: x >= 1)) container.stop() + self.wait_until(lambda: self.log_contains('Autodiscover stopping runner: memcached')) output = self.read_output_json() @@ -53,3 +54,44 @@ def test_docker(self): assert output[0]['docker']['container']['image'] == 'memcached:1.5.3' assert output[0]['docker']['container']['labels'] == {} assert 'name' in output[0]['docker']['container'] + + @unittest.skipIf(not INTEGRATION_TESTS or + os.getenv("TESTING_ENVIRONMENT") == "2x", + "integration test not available on 2.x") + def test_docker_labels(self): + """ + Test docker autodiscover starts modules from labels + """ + import docker + docker_client = docker.from_env() + + self.render_config_template( + autodiscover={ + 'docker': { + 'hints.enabled': 'true', + }, + }, + ) + + proc = self.start_beat() + docker_client.images.pull('memcached:1.5.3') + labels = { + 'co.elastic.metrics/module': 'memcached', + 'co.elastic.metrics/period': '1s', + 'co.elastic.metrics/hosts': "'${data.host}:11211'", + } + container = docker_client.containers.run('memcached:1.5.3', labels=labels, detach=True) + + self.wait_until(lambda: self.log_contains('Autodiscover starting runner: memcached')) + + self.wait_until(lambda: self.output_count(lambda x: x >= 1)) + container.stop() + + self.wait_until(lambda: self.log_contains('Autodiscover stopping runner: memcached')) + + output = self.read_output_json() + proc.check_kill_and_wait() + + # Check metadata is added + assert output[0]['docker']['container']['image'] == 'memcached:1.5.3' + assert 'name' in output[0]['docker']['container'] diff --git a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_elasticsearch.py b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_elasticsearch.py deleted file mode 100644 index 75784c0c..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_elasticsearch.py +++ /dev/null @@ -1,58 +0,0 @@ -import os -import metricbeat -import unittest - - -class Test(metricbeat.BaseTest): - - COMPOSE_SERVICES = ['elasticsearch'] - - @unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test") - def test_node(self): - """ - elasticsearch node metricset test - """ - self.render_config_template(modules=[{ - "name": "elasticsearch", - "metricsets": ["node"], - "hosts": self.get_hosts(), - "period": "1s", - }]) - proc = self.start_beat() - self.wait_until(lambda: self.output_lines() > 0, max_timeout=20) - proc.check_kill_and_wait() - self.assert_no_logged_warnings() - - output = self.read_output_json() - self.assertTrue(len(output) >= 1) - evt = output[0] - print(evt) - - self.assert_fields_are_documented(evt) - - @unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test") - def test_node_stats(self): - """ - elasticsearch node_stats metricset test - """ - self.render_config_template(modules=[{ - "name": "elasticsearch", - "metricsets": ["node_stats"], - "hosts": self.get_hosts(), - "period": "1s", - }]) - proc = self.start_beat() - self.wait_until(lambda: self.output_lines() > 0, max_timeout=20) - proc.check_kill_and_wait() - self.assert_no_logged_warnings() - - output = self.read_output_json() - self.assertTrue(len(output) >= 1) - evt = output[0] - print(evt) - - self.assert_fields_are_documented(evt) - - def get_hosts(self): - return [os.getenv('ES_HOST', 'localhost') + ':' + - os.getenv('ES_PORT', '9200')] diff --git a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_haproxy.py b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_haproxy.py index 37788fe5..8b7dd6e9 100644 --- a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_haproxy.py +++ b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_haproxy.py @@ -6,7 +6,7 @@ HAPROXY_FIELDS = metricbeat.COMMON_FIELDS + ["haproxy"] -class Test(metricbeat.BaseTest): +class HaproxyTest(metricbeat.BaseTest): COMPOSE_SERVICES = ['haproxy'] @@ -32,7 +32,7 @@ def test_info_socket(self): self.render_config_template(modules=[{ "name": "haproxy", "metricsets": ["info"], - "hosts": ["tcp://%s:%d" % (os.getenv('HAPROXY_HOST', 'localhost'), 14567)], + "hosts": ["tcp://%s:%d" % (self.compose_hosts()[0], 14567)], "period": "5s" }]) self._test_info() @@ -59,7 +59,7 @@ def test_stat_socket(self): self.render_config_template(modules=[{ "name": "haproxy", "metricsets": ["stat"], - "hosts": ["tcp://%s:%d" % (os.getenv('HAPROXY_HOST', 'localhost'), 14567)], + "hosts": ["tcp://%s:%d" % (self.compose_hosts()[0], 14567)], "period": "5s" }]) self._test_stat() @@ -72,7 +72,7 @@ def test_stat_http(self): self.render_config_template(modules=[{ "name": "haproxy", "metricsets": ["stat"], - "hosts": ["http://%s:%d/stats" % (os.getenv('HAPROXY_HOST', 'localhost'), 14568)], + "hosts": ["http://%s:%d/stats" % (self.compose_hosts()[0], 14568)], "period": "5s" }]) self._test_stat() @@ -87,7 +87,15 @@ def test_stat_http_auth(self): "metricsets": ["stat"], "username": "admin", "password": "admin", - "hosts": ["http://%s:%d/stats" % (os.getenv('HAPROXY_HOST', 'localhost'), 14569)], + "hosts": ["http://%s:%d/stats" % (self.compose_hosts()[0], 14569)], "period": "5s" }]) self._test_stat() + + +class Haproxy_1_6_Test(HaproxyTest): + COMPOSE_SERVICES = ['haproxy_1_6'] + + +class Haproxy_1_7_Test(HaproxyTest): + COMPOSE_SERVICES = ['haproxy_1_7'] diff --git a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_jolokia.py b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_jolokia.py index 189dcc91..921ee5ae 100644 --- a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_jolokia.py +++ b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_jolokia.py @@ -2,25 +2,32 @@ import metricbeat import unittest from nose.plugins.attrib import attr +from parameterized import parameterized class Test(metricbeat.BaseTest): COMPOSE_SERVICES = ['jolokia'] + @parameterized.expand([ + 'java.lang:name=PS MarkSweep,type=GarbageCollector', + 'java.lang:type=GarbageCollector,name=PS MarkSweep', + 'java.lang:name=*,type=GarbageCollector', + 'java.lang:type=GarbageCollector,name=*', + ]) @unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test") - def test_jmx(self): + def test_jmx(self, mbean): """ jolokia jmx metricset test """ additional_content = """ jmx.mappings: - - mbean: 'java.lang:type=Runtime' + - mbean: '%s' attributes: - - attr: Uptime - field: uptime -""" + - attr: CollectionCount + field: gc.collection_count +""" % (mbean) self.render_config_template(modules=[{ "name": "jolokia", @@ -40,7 +47,7 @@ def test_jmx(self): evt = output[0] print(evt) - assert evt["jolokia"]["test"]["uptime"] > 0 + assert evt["jolokia"]["test"]["gc"]["collection_count"] >= 0 def get_hosts(self): return [os.getenv('JOLOKIA_HOST', 'localhost') + ':' + diff --git a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_munin.py b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_munin.py new file mode 100644 index 00000000..e1618202 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_munin.py @@ -0,0 +1,38 @@ +import os +import metricbeat +import unittest +from nose.plugins.attrib import attr + + +class Test(metricbeat.BaseTest): + + COMPOSE_SERVICES = ['munin'] + + @unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test") + def test_munin_node(self): + namespace = "node_test" + + self.render_config_template(modules=[{ + "name": "munin", + "metricsets": ["node"], + "hosts": self.get_hosts(), + "period": "1s", + "extras": { + "node.namespace": namespace, + }, + }]) + proc = self.start_beat() + self.wait_until(lambda: self.output_lines() > 0, max_timeout=20) + proc.check_kill_and_wait() + self.assert_no_logged_warnings() + + output = self.read_output_json() + self.assertTrue(len(output) >= 1) + evt = output[0] + print(evt) + + assert evt["munin"][namespace]["cpu"]["user"] > 0 + + def get_hosts(self): + return [os.getenv('MUNIN_HOST', 'localhost') + ':' + + os.getenv('MUNIN_PORT', '4949')] diff --git a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_processors.py b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_processors.py index 1c83e228..e9192d09 100644 --- a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_processors.py +++ b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_processors.py @@ -5,7 +5,7 @@ @unittest.skipUnless(re.match("(?i)win|linux|darwin|freebsd", sys.platform), "os") -class TestProcessors(metricbeat.BaseTest): +class Test(metricbeat.BaseTest): def test_drop_fields(self): @@ -35,7 +35,7 @@ def test_drop_fields(self): print(evt.keys()) self.assertItemsEqual(self.de_dot([ 'beat', '@timestamp', 'system', 'metricset.module', - 'metricset.rtt', 'metricset.name' + 'metricset.rtt', 'metricset.name', 'host' ]), evt.keys()) cpu = evt["system"]["cpu"] print(cpu.keys()) @@ -259,3 +259,31 @@ def test_contradictory_multiple_actions(self): "system.process.memory.rss.pct" ]: assert key not in output + + def test_rename_field(self): + + self.render_config_template( + modules=[{ + "name": "system", + "metricsets": ["cpu"], + "period": "1s" + }], + processors=[{ + "rename": { + "fields": [{"from": "metricset.name", "to": "hello.world"}], + }, + }] + ) + proc = self.start_beat() + self.wait_until(lambda: self.output_lines() > 0) + proc.check_kill_and_wait() + + output = self.read_output_json() + self.assertEqual(len(output), 1) + evt = output[0] + + print(evt) + print(evt.keys()) + + assert "name" not in output[0]["metricset"] + assert "cpu" in output[0]["hello"]["world"] diff --git a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_system.py b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_system.py index 7d183b55..6754657a 100644 --- a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_system.py +++ b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_system.py @@ -39,7 +39,7 @@ SYSTEM_FSSTAT_FIELDS = ["count", "total_files", "total_size"] SYSTEM_MEMORY_FIELDS = ["swap", "actual.free", "free", "total", "used.bytes", "used.pct", "actual.used.bytes", - "actual.used.pct"] + "actual.used.pct", "hugepages"] SYSTEM_NETWORK_FIELDS = ["name", "out.bytes", "in.bytes", "out.packets", "in.packets", "in.error", "out.error", "in.dropped", "out.dropped"] @@ -195,8 +195,9 @@ def test_diskio(self): for evt in output: self.assert_fields_are_documented(evt) - diskio = evt["system"]["diskio"] - self.assertItemsEqual(self.de_dot(SYSTEM_DISKIO_FIELDS), diskio.keys()) + if 'error' not in evt: + diskio = evt["system"]["diskio"] + self.assertItemsEqual(self.de_dot(SYSTEM_DISKIO_FIELDS), diskio.keys()) @unittest.skipUnless(re.match("(?i)linux", sys.platform), "os") def test_diskio_linux(self): @@ -288,6 +289,9 @@ def test_memory(self): self.assert_fields_are_documented(evt) memory = evt["system"]["memory"] + if not re.match("(?i)linux", sys.platform) and not "hugepages" in memory: + # Ensure presence of hugepages only in Linux + memory["hugepages"] = None self.assertItemsEqual(self.de_dot(SYSTEM_MEMORY_FIELDS), memory.keys()) # Check that percentages are calculated. @@ -373,7 +377,8 @@ def test_process(self): "metricsets": ["process"], "period": "5s", "extras": { - "process.env.whitelist": ["PATH"] + "process.env.whitelist": ["PATH"], + "process.include_cpu_ticks": True, } }]) proc = self.start_beat() diff --git a/vendor/github.com/elastic/beats/metricbeat/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/elastic/beats/metricbeat/vendor/github.com/golang/protobuf/proto/discard.go new file mode 100644 index 00000000..bd0e3bb4 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/vendor/github.com/golang/protobuf/proto/discard.go @@ -0,0 +1,151 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2017 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" +) + +// DiscardUnknown recursively discards all unknown fields from this message +// and all embedded messages. +// +// When unmarshaling a message with unrecognized fields, the tags and values +// of such fields are preserved in the Message. This allows a later call to +// marshal to be able to produce a message that continues to have those +// unrecognized fields. To avoid this, DiscardUnknown is used to +// explicitly clear the unknown fields after unmarshaling. +// +// For proto2 messages, the unknown fields of message extensions are only +// discarded from messages that have been accessed via GetExtension. +func DiscardUnknown(m Message) { + discardLegacy(m) +} + +func discardLegacy(m Message) { + v := reflect.ValueOf(m) + if v.Kind() != reflect.Ptr || v.IsNil() { + return + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return + } + t := v.Type() + + for i := 0; i < v.NumField(); i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + vf := v.Field(i) + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) + case isSlice: // E.g., []*pb.T + for j := 0; j < vf.Len(); j++ { + discardLegacy(vf.Index(j).Interface().(Message)) + } + default: // E.g., *pb.T + discardLegacy(vf.Interface().(Message)) + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) + default: // E.g., map[K]V + tv := vf.Type().Elem() + if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) + for _, key := range vf.MapKeys() { + val := vf.MapIndex(key) + discardLegacy(val.Interface().(Message)) + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) + default: // E.g., test_proto.isCommunique_Union interface + if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { + vf = vf.Elem() // E.g., *test_proto.Communique_Msg + if !vf.IsNil() { + vf = vf.Elem() // E.g., test_proto.Communique_Msg + vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value + if vf.Kind() == reflect.Ptr { + discardLegacy(vf.Interface().(Message)) + } + } + } + } + } + } + + if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { + if vf.Type() != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + vf.Set(reflect.ValueOf([]byte(nil))) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, ok := extendable(m); ok { + // Ignore lock since discardLegacy is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + discardLegacy(m) + } + } + } +} diff --git a/vendor/github.com/elastic/beats/metricbeat/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/elastic/beats/metricbeat/vendor/github.com/golang/protobuf/proto/encode.go index 2b30f846..8b84d1b2 100644 --- a/vendor/github.com/elastic/beats/metricbeat/vendor/github.com/golang/protobuf/proto/encode.go +++ b/vendor/github.com/elastic/beats/metricbeat/vendor/github.com/golang/protobuf/proto/encode.go @@ -174,11 +174,11 @@ func sizeFixed32(x uint64) int { // This is the format used for the sint64 protocol buffer type. func (p *Buffer) EncodeZigzag64(x uint64) error { // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63))) } func sizeZigzag64(x uint64) int { - return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + return sizeVarint((x << 1) ^ uint64((int64(x) >> 63))) } // EncodeZigzag32 writes a zigzag-encoded 32-bit integer diff --git a/vendor/github.com/elastic/beats/metricbeat/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/elastic/beats/metricbeat/vendor/github.com/golang/protobuf/proto/lib.go index ac4ddbc0..1c225504 100644 --- a/vendor/github.com/elastic/beats/metricbeat/vendor/github.com/golang/protobuf/proto/lib.go +++ b/vendor/github.com/elastic/beats/metricbeat/vendor/github.com/golang/protobuf/proto/lib.go @@ -73,7 +73,6 @@ for a protocol buffer variable v: When the .proto file specifies `syntax="proto3"`, there are some differences: - Non-repeated fields of non-message type are values instead of pointers. - - Getters are only generated for message and oneof fields. - Enum types do not get an Enum method. The simplest way to describe this is to see an example. diff --git a/vendor/github.com/elastic/beats/metricbeat/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/elastic/beats/metricbeat/vendor/github.com/golang/protobuf/proto/text_parser.go index 61f83c1e..5e14513f 100644 --- a/vendor/github.com/elastic/beats/metricbeat/vendor/github.com/golang/protobuf/proto/text_parser.go +++ b/vendor/github.com/elastic/beats/metricbeat/vendor/github.com/golang/protobuf/proto/text_parser.go @@ -865,7 +865,7 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { return p.readStruct(fv, terminator) case reflect.Uint32: if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) + fv.SetUint(x) return nil } case reflect.Uint64: diff --git a/vendor/github.com/elastic/beats/metricbeat/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/elastic/beats/metricbeat/vendor/github.com/prometheus/common/expfmt/text_parse.go index ef9a1507..54bcfde2 100644 --- a/vendor/github.com/elastic/beats/metricbeat/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/elastic/beats/metricbeat/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -315,6 +315,10 @@ func (p *TextParser) startLabelValue() stateFn { if p.readTokenAsLabelValue(); p.err != nil { return nil } + if !model.LabelValue(p.currentToken.String()).IsValid() { + p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String())) + return nil + } p.currentLabelPair.Value = proto.String(p.currentToken.String()) // Special treatment of summaries: // - Quantile labels are special, will result in dto.Quantile later. diff --git a/vendor/github.com/elastic/beats/metricbeat/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/elastic/beats/metricbeat/vendor/github.com/prometheus/common/model/time.go index 548968ae..74ed5a9f 100644 --- a/vendor/github.com/elastic/beats/metricbeat/vendor/github.com/prometheus/common/model/time.go +++ b/vendor/github.com/elastic/beats/metricbeat/vendor/github.com/prometheus/common/model/time.go @@ -163,9 +163,21 @@ func (t *Time) UnmarshalJSON(b []byte) error { // This type should not propagate beyond the scope of input/output processing. type Duration time.Duration +// Set implements pflag/flag.Value +func (d *Duration) Set(s string) error { + var err error + *d, err = ParseDuration(s) + return err +} + +// Type implements pflag.Value +func (d *Duration) Type() string { + return "duration" +} + var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$") -// StringToDuration parses a string into a time.Duration, assuming that a year +// ParseDuration parses a string into a time.Duration, assuming that a year // always has 365d, a week always has 7d, and a day always has 24h. func ParseDuration(durationStr string) (Duration, error) { matches := durationRE.FindStringSubmatch(durationStr) @@ -202,6 +214,9 @@ func (d Duration) String() string { ms = int64(time.Duration(d) / time.Millisecond) unit = "ms" ) + if ms == 0 { + return "0s" + } factors := map[string]int64{ "y": 1000 * 60 * 60 * 24 * 365, "w": 1000 * 60 * 60 * 24 * 7, diff --git a/vendor/github.com/elastic/beats/metricbeat/vendor/vendor.json b/vendor/github.com/elastic/beats/metricbeat/vendor/vendor.json index e115f579..a71efb78 100644 --- a/vendor/github.com/elastic/beats/metricbeat/vendor/vendor.json +++ b/vendor/github.com/elastic/beats/metricbeat/vendor/vendor.json @@ -3,10 +3,10 @@ "ignore": "test github.com/elastic/beats", "package": [ { - "checksumSHA1": "kBeNcaKk56FguvPSUCEaH6AxpRc=", + "checksumSHA1": "WX1+2gktHcBmE9MGwFSGs7oqexU=", "path": "github.com/golang/protobuf/proto", - "revision": "18c9bb3261723cd5401db4d0c9fbc5c3b6c70fe8", - "revisionTime": "2017-04-27T21:32:20Z" + "revision": "bbd03ef6da3a115852eaf24c8a1c46aeb39aa175", + "revisionTime": "2018-02-02T18:43:18Z" }, { "checksumSHA1": "bKMZjd2wPw13VwoE7mBeSv5djFA=", @@ -17,26 +17,26 @@ { "checksumSHA1": "DvwvOlPNAgRntBzt3b3OSRMS2N4=", "path": "github.com/prometheus/client_model/go", - "revision": "6f3806018612930941127f2a7c6c453ba2c527d2", - "revisionTime": "2017-02-16T18:52:47Z" + "revision": "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c", + "revisionTime": "2017-11-17T10:05:41Z" }, { - "checksumSHA1": "Wtpzndm/+bdwwNU5PCTfb4oUhc8=", + "checksumSHA1": "xfnn0THnqNwjwimeTClsxahYrIo=", "path": "github.com/prometheus/common/expfmt", - "revision": "13ba4ddd0caa9c28ca7b7bffe1dfa9ed8d5ef207", - "revisionTime": "2017-04-27T09:54:55Z" + "revision": "89604d197083d4781071d3c65855d24ecfb0a563", + "revisionTime": "2018-01-10T21:49:58Z" }, { "checksumSHA1": "GWlM3d2vPYyNATtTFgftS10/A9w=", "path": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg", - "revision": "13ba4ddd0caa9c28ca7b7bffe1dfa9ed8d5ef207", - "revisionTime": "2017-04-27T09:54:55Z" + "revision": "89604d197083d4781071d3c65855d24ecfb0a563", + "revisionTime": "2018-01-10T21:49:58Z" }, { - "checksumSHA1": "0LL9u9tfv1KPBjNEiMDP6q7lpog=", + "checksumSHA1": "YU+/K48IMawQnToO4ETE6a+hhj4=", "path": "github.com/prometheus/common/model", - "revision": "13ba4ddd0caa9c28ca7b7bffe1dfa9ed8d5ef207", - "revisionTime": "2017-04-27T09:54:55Z" + "revision": "89604d197083d4781071d3c65855d24ecfb0a563", + "revisionTime": "2018-01-10T21:49:58Z" } ], "rootPath": "github.com/elastic/beats/metricbeat" diff --git a/vendor/github.com/elastic/beats/packetbeat/_meta/beat.reference.yml b/vendor/github.com/elastic/beats/packetbeat/_meta/beat.reference.yml index 6180cc8f..51e6ba4b 100644 --- a/vendor/github.com/elastic/beats/packetbeat/_meta/beat.reference.yml +++ b/vendor/github.com/elastic/beats/packetbeat/_meta/beat.reference.yml @@ -179,9 +179,18 @@ packetbeat.protocols: #send_all_headers: false # The list of content types for which Packetbeat includes the full HTTP - # payload in the response field. + # payload. If the request's or response's Content-Type matches any on this + # list, the full body will be included under the request or response field. #include_body_for: [] + # The list of content types for which Packetbeat includes the full HTTP + # request payload. + #include_request_body_for: [] + + # The list of content types for which Packetbeat includes the full HTTP + # response payload. + #include_response_body_for: [] + # If the Cookie or Set-Cookie headers are sent, this option controls whether # they are split into individual values. #split_cookie: false diff --git a/vendor/github.com/elastic/beats/packetbeat/_meta/fields.yml b/vendor/github.com/elastic/beats/packetbeat/_meta/fields.yml index b6df8534..2ec0d21d 100644 --- a/vendor/github.com/elastic/beats/packetbeat/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/packetbeat/_meta/fields.yml @@ -1200,6 +1200,7 @@ same header name are present in the message, they will be separated by commas. - name: body + type: text description: The body of the HTTP response. - key: icmp diff --git a/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/default/dashboard/Packetbeat-tls.json b/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/6/dashboard/Packetbeat-tls.json similarity index 91% rename from vendor/github.com/elastic/beats/packetbeat/_meta/kibana/default/dashboard/Packetbeat-tls.json rename to vendor/github.com/elastic/beats/packetbeat/_meta/kibana/6/dashboard/Packetbeat-tls.json index 5f27a5ac..aaa89592 100644 --- a/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/default/dashboard/Packetbeat-tls.json +++ b/vendor/github.com/elastic/beats/packetbeat/_meta/kibana/6/dashboard/Packetbeat-tls.json @@ -358,7 +358,7 @@ "searchSourceJSON": "{\"filter\":[],\"query\":{\"language\":\"lucene\",\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"default_field\":\"*\",\"query\":\"*\"}}},\"highlightAll\":true,\"version\":true}" }, "optionsJSON": "{\"darkTheme\":false,\"useMargins\":false}", - "panelsJSON": "[{\"panelIndex\":\"4\",\"gridData\":{\"x\":0,\"y\":0,\"w\":3,\"h\":4,\"i\":\"4\"},\"id\":\"Navigation\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"panelIndex\":\"8\",\"gridData\":{\"x\":3,\"y\":0,\"w\":9,\"h\":4,\"i\":\"8\"},\"id\":\"059fe5e0-d2dd-11e7-9914-4982455b3063\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"panelIndex\":\"9\",\"gridData\":{\"x\":3,\"y\":7,\"w\":3,\"h\":3,\"i\":\"9\"},\"id\":\"c14377a0-d353-11e7-9914-4982455b3063\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"panelIndex\":\"10\",\"gridData\":{\"x\":0,\"y\":4,\"w\":3,\"h\":3,\"i\":\"10\"},\"id\":\"061de380-d361-11e7-9914-4982455b3063\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"panelIndex\":\"11\",\"gridData\":{\"x\":0,\"y\":10,\"w\":12,\"h\":3,\"i\":\"11\"},\"id\":\"a28d09d0-d361-11e7-9914-4982455b3063\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"panelIndex\":\"12\",\"gridData\":{\"x\":6,\"y\":7,\"w\":3,\"h\":3,\"i\":\"12\"},\"id\":\"0af0b790-d37d-11e7-9914-4982455b3063\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"panelIndex\":\"13\",\"gridData\":{\"x\":9,\"y\":7,\"w\":3,\"h\":3,\"i\":\"13\"},\"id\":\"ae6e33c0-d37d-11e7-9914-4982455b3063\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"panelIndex\":\"14\",\"gridData\":{\"x\":0,\"y\":7,\"w\":3,\"h\":3,\"i\":\"14\"},\"id\":\"2c467370-d392-11e7-8fa0-232aa9259081\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"panelIndex\":\"15\",\"gridData\":{\"x\":0,\"y\":13,\"w\":6,\"h\":3,\"i\":\"15\"},\"id\":\"0958a910-d396-11e7-8fa0-232aa9259081\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"panelIndex\":\"16\",\"gridData\":{\"x\":0,\"y\":16,\"w\":6,\"h\":3,\"i\":\"16\"},\"id\":\"86743f90-d396-11e7-8fa0-232aa9259081\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"panelIndex\":\"17\",\"gridData\":{\"x\":6,\"y\":13,\"w\":6,\"h\":3,\"i\":\"17\"},\"id\":\"463d2bf0-d3a8-11e7-9081-ab2af08e9961\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"panelIndex\":\"18\",\"gridData\":{\"x\":6,\"y\":16,\"w\":6,\"h\":3,\"i\":\"18\"},\"id\":\"ad2a8b50-d49d-11e7-996f-bd7c1ca4591b\",\"type\":\"visualization\",\"version\":\"7.0.0-alpha1-SNAPSHOT\"},{\"panelIndex\":\"19\",\"gridData\":{\"x\":3,\"y\":4,\"w\":9,\"h\":3,\"i\":\"19\"},\"version\":\"7.0.0-alpha1-SNAPSHOT\",\"type\":\"visualization\",\"id\":\"d2e15950-d560-11e7-9fff-7b1ebf397ba9\"}]", + "panelsJSON": "[{\"panelIndex\":\"4\",\"gridData\":{\"x\":0,\"y\":0,\"w\":3,\"h\":4,\"i\":\"4\"},\"id\":\"Navigation\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"panelIndex\":\"8\",\"gridData\":{\"x\":3,\"y\":0,\"w\":9,\"h\":4,\"i\":\"8\"},\"id\":\"059fe5e0-d2dd-11e7-9914-4982455b3063\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"panelIndex\":\"9\",\"gridData\":{\"x\":3,\"y\":7,\"w\":3,\"h\":3,\"i\":\"9\"},\"id\":\"c14377a0-d353-11e7-9914-4982455b3063\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"panelIndex\":\"10\",\"gridData\":{\"x\":0,\"y\":4,\"w\":3,\"h\":3,\"i\":\"10\"},\"id\":\"061de380-d361-11e7-9914-4982455b3063\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"panelIndex\":\"11\",\"gridData\":{\"x\":0,\"y\":10,\"w\":12,\"h\":3,\"i\":\"11\"},\"id\":\"a28d09d0-d361-11e7-9914-4982455b3063\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"panelIndex\":\"12\",\"gridData\":{\"x\":6,\"y\":7,\"w\":3,\"h\":3,\"i\":\"12\"},\"id\":\"0af0b790-d37d-11e7-9914-4982455b3063\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"panelIndex\":\"13\",\"gridData\":{\"x\":9,\"y\":7,\"w\":3,\"h\":3,\"i\":\"13\"},\"id\":\"ae6e33c0-d37d-11e7-9914-4982455b3063\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"panelIndex\":\"14\",\"gridData\":{\"x\":0,\"y\":7,\"w\":3,\"h\":3,\"i\":\"14\"},\"id\":\"2c467370-d392-11e7-8fa0-232aa9259081\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"panelIndex\":\"15\",\"gridData\":{\"x\":0,\"y\":13,\"w\":6,\"h\":3,\"i\":\"15\"},\"id\":\"0958a910-d396-11e7-8fa0-232aa9259081\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"panelIndex\":\"16\",\"gridData\":{\"x\":0,\"y\":16,\"w\":6,\"h\":3,\"i\":\"16\"},\"id\":\"86743f90-d396-11e7-8fa0-232aa9259081\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"panelIndex\":\"17\",\"gridData\":{\"x\":6,\"y\":13,\"w\":6,\"h\":3,\"i\":\"17\"},\"id\":\"463d2bf0-d3a8-11e7-9081-ab2af08e9961\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"panelIndex\":\"18\",\"gridData\":{\"x\":6,\"y\":16,\"w\":6,\"h\":3,\"i\":\"18\"},\"id\":\"ad2a8b50-d49d-11e7-996f-bd7c1ca4591b\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"panelIndex\":\"19\",\"gridData\":{\"x\":3,\"y\":4,\"w\":9,\"h\":3,\"i\":\"19\"},\"version\":\"6.2.4\",\"type\":\"visualization\",\"id\":\"d2e15950-d560-11e7-9fff-7b1ebf397ba9\"}]", "timeRestore": false, "title": "[Packetbeat] TLS Sessions", "uiStateJSON": "{\"P-15\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}},\"P-16\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}},\"P-17\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}},\"P-18\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}},\"P-5\":{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}},\"P-7\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}}}", @@ -370,5 +370,5 @@ "version": 2 } ], - "version": "7.0.0-alpha1-SNAPSHOT" -} \ No newline at end of file + "version": "6.2.4" +} diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/configuring-howto.asciidoc b/vendor/github.com/elastic/beats/packetbeat/docs/configuring-howto.asciidoc index 8c66cff5..8906ff66 100644 --- a/vendor/github.com/elastic/beats/packetbeat/docs/configuring-howto.asciidoc +++ b/vendor/github.com/elastic/beats/packetbeat/docs/configuring-howto.asciidoc @@ -74,4 +74,3 @@ include::../../libbeat/docs/shared-env-vars.asciidoc[] include::../../libbeat/docs/yaml.asciidoc[] include::../../libbeat/docs/reference-yml.asciidoc[] - diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/fields.asciidoc b/vendor/github.com/elastic/beats/packetbeat/docs/fields.asciidoc index 20edde5a..6b141695 100644 --- a/vendor/github.com/elastic/beats/packetbeat/docs/fields.asciidoc +++ b/vendor/github.com/elastic/beats/packetbeat/docs/fields.asciidoc @@ -20,6 +20,7 @@ grouped in the following categories: * <> * <> * <> +* <> * <> * <> * <> @@ -43,9 +44,9 @@ AMQP specific event fields. -[float] -=== `amqp.reply-code` - +*`amqp.reply-code`*:: ++ +-- type: long example: 404 @@ -53,41 +54,51 @@ example: 404 AMQP reply code to an error, similar to http reply-code -[float] -=== `amqp.reply-text` +-- +*`amqp.reply-text`*:: ++ +-- type: keyword Text explaining the error. -[float] -=== `amqp.class-id` +-- +*`amqp.class-id`*:: ++ +-- type: long Failing method class. -[float] -=== `amqp.method-id` +-- +*`amqp.method-id`*:: ++ +-- type: long Failing method ID. -[float] -=== `amqp.exchange` +-- +*`amqp.exchange`*:: ++ +-- type: keyword Name of the exchange. -[float] -=== `amqp.exchange-type` +-- +*`amqp.exchange-type`*:: ++ +-- type: keyword example: fanout @@ -95,167 +106,209 @@ example: fanout Exchange type. -[float] -=== `amqp.passive` +-- +*`amqp.passive`*:: ++ +-- type: boolean If set, do not create exchange/queue. -[float] -=== `amqp.durable` +-- +*`amqp.durable`*:: ++ +-- type: boolean If set, request a durable exchange/queue. -[float] -=== `amqp.exclusive` +-- +*`amqp.exclusive`*:: ++ +-- type: boolean If set, request an exclusive queue. -[float] -=== `amqp.auto-delete` +-- +*`amqp.auto-delete`*:: ++ +-- type: boolean If set, auto-delete queue when unused. -[float] -=== `amqp.no-wait` +-- +*`amqp.no-wait`*:: ++ +-- type: boolean If set, the server will not respond to the method. -[float] -=== `amqp.consumer-tag` +-- +*`amqp.consumer-tag`*:: ++ +-- Identifier for the consumer, valid within the current channel. -[float] -=== `amqp.delivery-tag` +-- +*`amqp.delivery-tag`*:: ++ +-- type: long The server-assigned and channel-specific delivery tag. -[float] -=== `amqp.message-count` +-- +*`amqp.message-count`*:: ++ +-- type: long The number of messages in the queue, which will be zero for newly-declared queues. -[float] -=== `amqp.consumer-count` +-- +*`amqp.consumer-count`*:: ++ +-- type: long The number of consumers of a queue. -[float] -=== `amqp.routing-key` +-- +*`amqp.routing-key`*:: ++ +-- type: keyword Message routing key. -[float] -=== `amqp.no-ack` +-- +*`amqp.no-ack`*:: ++ +-- type: boolean If set, the server does not expect acknowledgements for messages. -[float] -=== `amqp.no-local` +-- +*`amqp.no-local`*:: ++ +-- type: boolean If set, the server will not send messages to the connection that published them. -[float] -=== `amqp.if-unused` +-- +*`amqp.if-unused`*:: ++ +-- type: boolean Delete only if unused. -[float] -=== `amqp.if-empty` +-- +*`amqp.if-empty`*:: ++ +-- type: boolean Delete only if empty. -[float] -=== `amqp.queue` +-- +*`amqp.queue`*:: ++ +-- type: keyword The queue name identifies the queue within the vhost. -[float] -=== `amqp.redelivered` +-- +*`amqp.redelivered`*:: ++ +-- type: boolean Indicates that the message has been previously delivered to this or another client. -[float] -=== `amqp.multiple` +-- +*`amqp.multiple`*:: ++ +-- type: boolean Acknowledge multiple messages. -[float] -=== `amqp.arguments` +-- +*`amqp.arguments`*:: ++ +-- type: object Optional additional arguments passed to some methods. Can be of various types. -[float] -=== `amqp.mandatory` +-- +*`amqp.mandatory`*:: ++ +-- type: boolean Indicates mandatory routing. -[float] -=== `amqp.immediate` +-- +*`amqp.immediate`*:: ++ +-- type: boolean Request immediate delivery. -[float] -=== `amqp.content-type` +-- +*`amqp.content-type`*:: ++ +-- type: keyword example: text/plain @@ -263,102 +316,128 @@ example: text/plain MIME content type. -[float] -=== `amqp.content-encoding` +-- +*`amqp.content-encoding`*:: ++ +-- type: keyword MIME content encoding. -[float] -=== `amqp.headers` +-- +*`amqp.headers`*:: ++ +-- type: object Message header field table. -[float] -=== `amqp.delivery-mode` +-- +*`amqp.delivery-mode`*:: ++ +-- type: keyword Non-persistent (1) or persistent (2). -[float] -=== `amqp.priority` +-- +*`amqp.priority`*:: ++ +-- type: long Message priority, 0 to 9. -[float] -=== `amqp.correlation-id` +-- +*`amqp.correlation-id`*:: ++ +-- type: keyword Application correlation identifier. -[float] -=== `amqp.reply-to` +-- +*`amqp.reply-to`*:: ++ +-- type: keyword Address to reply to. -[float] -=== `amqp.expiration` +-- +*`amqp.expiration`*:: ++ +-- type: keyword Message expiration specification. -[float] -=== `amqp.message-id` +-- +*`amqp.message-id`*:: ++ +-- type: keyword Application message identifier. -[float] -=== `amqp.timestamp` +-- +*`amqp.timestamp`*:: ++ +-- type: keyword Message timestamp. -[float] -=== `amqp.type` +-- +*`amqp.type`*:: ++ +-- type: keyword Message type name. -[float] -=== `amqp.user-id` +-- +*`amqp.user-id`*:: ++ +-- type: keyword Creating user id. -[float] -=== `amqp.app-id` +-- +*`amqp.app-id`*:: ++ +-- type: keyword Creating application id. +-- + [[exported-fields-beat]] == Beat fields @@ -366,33 +445,41 @@ Contains common beat fields available in all event types. -[float] -=== `beat.name` - +*`beat.name`*:: ++ +-- The name of the Beat sending the log messages. If the Beat name is set in the configuration file, then that value is used. If it is not set, the hostname is used. To set the Beat name, use the `name` option in the configuration file. -[float] -=== `beat.hostname` +-- +*`beat.hostname`*:: ++ +-- The hostname as returned by the operating system on which the Beat is running. -[float] -=== `beat.timezone` +-- +*`beat.timezone`*:: ++ +-- The timezone as returned by the operating system on which the Beat is running. -[float] -=== `beat.version` +-- +*`beat.version`*:: ++ +-- The version of the beat that generated this event. -[float] -=== `@timestamp` +-- +*`@timestamp`*:: ++ +-- type: date example: August 26th 2016, 12:35:53.332 @@ -404,20 +491,26 @@ required: True The timestamp when the event log record was generated. -[float] -=== `tags` +-- +*`tags`*:: ++ +-- Arbitrary tags that can be set per Beat and per transaction type. -[float] -=== `fields` +-- +*`fields`*:: ++ +-- type: object Contains user configurable fields. +-- + [float] == error fields @@ -425,30 +518,36 @@ Error fields containing additional info in case of errors. -[float] -=== `error.message` - +*`error.message`*:: ++ +-- type: text Error message. -[float] -=== `error.code` +-- +*`error.code`*:: ++ +-- type: long Error code. -[float] -=== `error.type` +-- +*`error.type`*:: ++ +-- type: keyword Error type. +-- + [[exported-fields-cassandra]] == Cassandra fields @@ -473,48 +572,60 @@ Cassandra request. Cassandra request headers. -[float] -=== `cassandra.request.headers.version` - +*`cassandra.request.headers.version`*:: ++ +-- type: long The version of the protocol. -[float] -=== `cassandra.request.headers.flags` +-- +*`cassandra.request.headers.flags`*:: ++ +-- type: keyword Flags applying to this frame. -[float] -=== `cassandra.request.headers.stream` +-- +*`cassandra.request.headers.stream`*:: ++ +-- type: keyword A frame has a stream id. If a client sends a request message with the stream id X, it is guaranteed that the stream id of the response to that message will be X. -[float] -=== `cassandra.request.headers.op` +-- +*`cassandra.request.headers.op`*:: ++ +-- type: keyword An operation type that distinguishes the actual message. -[float] -=== `cassandra.request.headers.length` +-- +*`cassandra.request.headers.length`*:: ++ +-- type: long A integer representing the length of the body of the frame (a frame is limited to 256MB in length). -[float] -=== `cassandra.request.query` +-- +*`cassandra.request.query`*:: ++ +-- type: keyword The CQL query which client send to cassandra. +-- + [float] == response fields @@ -527,526 +638,654 @@ Cassandra response. Cassandra response headers, the structure is as same as request's header. -[float] -=== `cassandra.response.headers.version` - +*`cassandra.response.headers.version`*:: ++ +-- type: long The version of the protocol. -[float] -=== `cassandra.response.headers.flags` +-- +*`cassandra.response.headers.flags`*:: ++ +-- type: keyword Flags applying to this frame. -[float] -=== `cassandra.response.headers.stream` +-- +*`cassandra.response.headers.stream`*:: ++ +-- type: keyword A frame has a stream id. If a client sends a request message with the stream id X, it is guaranteed that the stream id of the response to that message will be X. -[float] -=== `cassandra.response.headers.op` +-- +*`cassandra.response.headers.op`*:: ++ +-- type: keyword An operation type that distinguishes the actual message. -[float] -=== `cassandra.response.headers.length` +-- +*`cassandra.response.headers.length`*:: ++ +-- type: long A integer representing the length of the body of the frame (a frame is limited to 256MB in length). +-- + [float] == result fields Details about the returned result. -[float] -=== `cassandra.response.result.type` - +*`cassandra.response.result.type`*:: ++ +-- type: keyword Cassandra result type. +-- + [float] == rows fields Details about the rows. -[float] -=== `cassandra.response.result.rows.num_rows` - +*`cassandra.response.result.rows.num_rows`*:: ++ +-- type: long Representing the number of rows present in this result. +-- + [float] == meta fields Composed of result metadata. -[float] -=== `cassandra.response.result.rows.meta.keyspace` - +*`cassandra.response.result.rows.meta.keyspace`*:: ++ +-- type: keyword Only present after set Global_tables_spec, the keyspace name. -[float] -=== `cassandra.response.result.rows.meta.table` +-- +*`cassandra.response.result.rows.meta.table`*:: ++ +-- type: keyword Only present after set Global_tables_spec, the table name. -[float] -=== `cassandra.response.result.rows.meta.flags` +-- +*`cassandra.response.result.rows.meta.flags`*:: ++ +-- type: keyword Provides information on the formatting of the remaining information. -[float] -=== `cassandra.response.result.rows.meta.col_count` +-- +*`cassandra.response.result.rows.meta.col_count`*:: ++ +-- type: long Representing the number of columns selected by the query that produced this result. -[float] -=== `cassandra.response.result.rows.meta.pkey_columns` +-- +*`cassandra.response.result.rows.meta.pkey_columns`*:: ++ +-- type: long Representing the PK columns index and counts. -[float] -=== `cassandra.response.result.rows.meta.paging_state` +-- +*`cassandra.response.result.rows.meta.paging_state`*:: ++ +-- type: keyword The paging_state is a bytes value that should be used in QUERY/EXECUTE to continue paging and retrieve the remainder of the result for this query. -[float] -=== `cassandra.response.result.keyspace` +-- +*`cassandra.response.result.keyspace`*:: ++ +-- type: keyword Indicating the name of the keyspace that has been set. +-- + [float] == schema_change fields The result to a schema_change message. -[float] -=== `cassandra.response.result.schema_change.change` - +*`cassandra.response.result.schema_change.change`*:: ++ +-- type: keyword Representing the type of changed involved. -[float] -=== `cassandra.response.result.schema_change.keyspace` +-- +*`cassandra.response.result.schema_change.keyspace`*:: ++ +-- type: keyword This describes which keyspace has changed. -[float] -=== `cassandra.response.result.schema_change.table` +-- +*`cassandra.response.result.schema_change.table`*:: ++ +-- type: keyword This describes which table has changed. -[float] -=== `cassandra.response.result.schema_change.object` +-- +*`cassandra.response.result.schema_change.object`*:: ++ +-- type: keyword This describes the name of said affected object (either the table, user type, function, or aggregate name). -[float] -=== `cassandra.response.result.schema_change.target` +-- +*`cassandra.response.result.schema_change.target`*:: ++ +-- type: keyword Target could be "FUNCTION" or "AGGREGATE", multiple arguments. -[float] -=== `cassandra.response.result.schema_change.name` +-- +*`cassandra.response.result.schema_change.name`*:: ++ +-- type: keyword The function/aggregate name. -[float] -=== `cassandra.response.result.schema_change.args` +-- +*`cassandra.response.result.schema_change.args`*:: ++ +-- type: keyword One string for each argument type (as CQL type). +-- + [float] == prepared fields The result to a PREPARE message. -[float] -=== `cassandra.response.result.prepared.prepared_id` - +*`cassandra.response.result.prepared.prepared_id`*:: ++ +-- type: keyword Representing the prepared query ID. +-- + [float] == req_meta fields This describes the request metadata. -[float] -=== `cassandra.response.result.prepared.req_meta.keyspace` - +*`cassandra.response.result.prepared.req_meta.keyspace`*:: ++ +-- type: keyword Only present after set Global_tables_spec, the keyspace name. -[float] -=== `cassandra.response.result.prepared.req_meta.table` +-- +*`cassandra.response.result.prepared.req_meta.table`*:: ++ +-- type: keyword Only present after set Global_tables_spec, the table name. -[float] -=== `cassandra.response.result.prepared.req_meta.flags` +-- +*`cassandra.response.result.prepared.req_meta.flags`*:: ++ +-- type: keyword Provides information on the formatting of the remaining information. -[float] -=== `cassandra.response.result.prepared.req_meta.col_count` +-- +*`cassandra.response.result.prepared.req_meta.col_count`*:: ++ +-- type: long Representing the number of columns selected by the query that produced this result. -[float] -=== `cassandra.response.result.prepared.req_meta.pkey_columns` +-- +*`cassandra.response.result.prepared.req_meta.pkey_columns`*:: ++ +-- type: long Representing the PK columns index and counts. -[float] -=== `cassandra.response.result.prepared.req_meta.paging_state` +-- +*`cassandra.response.result.prepared.req_meta.paging_state`*:: ++ +-- type: keyword The paging_state is a bytes value that should be used in QUERY/EXECUTE to continue paging and retrieve the remainder of the result for this query. +-- + [float] == resp_meta fields This describes the metadata for the result set. -[float] -=== `cassandra.response.result.prepared.resp_meta.keyspace` - +*`cassandra.response.result.prepared.resp_meta.keyspace`*:: ++ +-- type: keyword Only present after set Global_tables_spec, the keyspace name. -[float] -=== `cassandra.response.result.prepared.resp_meta.table` +-- +*`cassandra.response.result.prepared.resp_meta.table`*:: ++ +-- type: keyword Only present after set Global_tables_spec, the table name. -[float] -=== `cassandra.response.result.prepared.resp_meta.flags` +-- +*`cassandra.response.result.prepared.resp_meta.flags`*:: ++ +-- type: keyword Provides information on the formatting of the remaining information. -[float] -=== `cassandra.response.result.prepared.resp_meta.col_count` +-- +*`cassandra.response.result.prepared.resp_meta.col_count`*:: ++ +-- type: long Representing the number of columns selected by the query that produced this result. -[float] -=== `cassandra.response.result.prepared.resp_meta.pkey_columns` +-- +*`cassandra.response.result.prepared.resp_meta.pkey_columns`*:: ++ +-- type: long Representing the PK columns index and counts. -[float] -=== `cassandra.response.result.prepared.resp_meta.paging_state` +-- +*`cassandra.response.result.prepared.resp_meta.paging_state`*:: ++ +-- type: keyword The paging_state is a bytes value that should be used in QUERY/EXECUTE to continue paging and retrieve the remainder of the result for this query. -[float] -=== `cassandra.response.supported` +-- +*`cassandra.response.supported`*:: ++ +-- type: object Indicates which startup options are supported by the server. This message comes as a response to an OPTIONS message. +-- + [float] == authentication fields Indicates that the server requires authentication, and which authentication mechanism to use. -[float] -=== `cassandra.response.authentication.class` - +*`cassandra.response.authentication.class`*:: ++ +-- type: keyword Indicates the full class name of the IAuthenticator in use -[float] -=== `cassandra.response.warnings` +-- +*`cassandra.response.warnings`*:: ++ +-- type: keyword The text of the warnings, only occur when Warning flag was set. +-- + [float] == event fields Event pushed by the server. A client will only receive events for the types it has REGISTERed to. -[float] -=== `cassandra.response.event.type` - +*`cassandra.response.event.type`*:: ++ +-- type: keyword Representing the event type. -[float] -=== `cassandra.response.event.change` +-- +*`cassandra.response.event.change`*:: ++ +-- type: keyword The message corresponding respectively to the type of change followed by the address of the new/removed node. -[float] -=== `cassandra.response.event.host` +-- +*`cassandra.response.event.host`*:: ++ +-- type: keyword Representing the node ip. -[float] -=== `cassandra.response.event.port` +-- +*`cassandra.response.event.port`*:: ++ +-- type: long Representing the node port. +-- + [float] == schema_change fields The events details related to schema change. -[float] -=== `cassandra.response.event.schema_change.change` - +*`cassandra.response.event.schema_change.change`*:: ++ +-- type: keyword Representing the type of changed involved. -[float] -=== `cassandra.response.event.schema_change.keyspace` +-- +*`cassandra.response.event.schema_change.keyspace`*:: ++ +-- type: keyword This describes which keyspace has changed. -[float] -=== `cassandra.response.event.schema_change.table` +-- +*`cassandra.response.event.schema_change.table`*:: ++ +-- type: keyword This describes which table has changed. -[float] -=== `cassandra.response.event.schema_change.object` +-- +*`cassandra.response.event.schema_change.object`*:: ++ +-- type: keyword This describes the name of said affected object (either the table, user type, function, or aggregate name). -[float] -=== `cassandra.response.event.schema_change.target` +-- +*`cassandra.response.event.schema_change.target`*:: ++ +-- type: keyword Target could be "FUNCTION" or "AGGREGATE", multiple arguments. -[float] -=== `cassandra.response.event.schema_change.name` +-- +*`cassandra.response.event.schema_change.name`*:: ++ +-- type: keyword The function/aggregate name. -[float] -=== `cassandra.response.event.schema_change.args` +-- +*`cassandra.response.event.schema_change.args`*:: ++ +-- type: keyword One string for each argument type (as CQL type). +-- + [float] == error fields Indicates an error processing a request. The body of the message will be an error code followed by a error message. Then, depending on the exception, more content may follow. -[float] -=== `cassandra.response.error.code` - +*`cassandra.response.error.code`*:: ++ +-- type: long The error code of the Cassandra response. -[float] -=== `cassandra.response.error.msg` +-- +*`cassandra.response.error.msg`*:: ++ +-- type: keyword The error message of the Cassandra response. -[float] -=== `cassandra.response.error.type` +-- +*`cassandra.response.error.type`*:: ++ +-- type: keyword The error type of the Cassandra response. +-- + [float] == details fields The details of the error. -[float] -=== `cassandra.response.error.details.read_consistency` - +*`cassandra.response.error.details.read_consistency`*:: ++ +-- type: keyword Representing the consistency level of the query that triggered the exception. -[float] -=== `cassandra.response.error.details.required` +-- +*`cassandra.response.error.details.required`*:: ++ +-- type: long Representing the number of nodes that should be alive to respect consistency level. -[float] -=== `cassandra.response.error.details.alive` +-- +*`cassandra.response.error.details.alive`*:: ++ +-- type: long Representing the number of replicas that were known to be alive when the request had been processed (since an unavailable exception has been triggered). -[float] -=== `cassandra.response.error.details.received` +-- +*`cassandra.response.error.details.received`*:: ++ +-- type: long Representing the number of nodes having acknowledged the request. -[float] -=== `cassandra.response.error.details.blockfor` +-- +*`cassandra.response.error.details.blockfor`*:: ++ +-- type: long Representing the number of replicas whose acknowledgement is required to achieve consistency level. -[float] -=== `cassandra.response.error.details.write_type` +-- +*`cassandra.response.error.details.write_type`*:: ++ +-- type: keyword Describe the type of the write that timed out. -[float] -=== `cassandra.response.error.details.data_present` +-- +*`cassandra.response.error.details.data_present`*:: ++ +-- type: boolean It means the replica that was asked for data had responded. -[float] -=== `cassandra.response.error.details.keyspace` +-- +*`cassandra.response.error.details.keyspace`*:: ++ +-- type: keyword The keyspace of the failed function. -[float] -=== `cassandra.response.error.details.table` +-- +*`cassandra.response.error.details.table`*:: ++ +-- type: keyword The keyspace of the failed function. -[float] -=== `cassandra.response.error.details.stmt_id` +-- +*`cassandra.response.error.details.stmt_id`*:: ++ +-- type: keyword Representing the unknown ID. -[float] -=== `cassandra.response.error.details.num_failures` +-- +*`cassandra.response.error.details.num_failures`*:: ++ +-- type: keyword Representing the number of nodes that experience a failure while executing the request. -[float] -=== `cassandra.response.error.details.function` +-- +*`cassandra.response.error.details.function`*:: ++ +-- type: keyword The name of the failed function. -[float] -=== `cassandra.response.error.details.arg_types` +-- +*`cassandra.response.error.details.arg_types`*:: ++ +-- type: keyword One string for each argument type (as CQL type) of the failed function. +-- + [[exported-fields-cloud]] == Cloud provider metadata fields @@ -1054,56 +1293,70 @@ Metadata from cloud providers added by the add_cloud_metadata processor. -[float] -=== `meta.cloud.provider` - +*`meta.cloud.provider`*:: ++ +-- example: ec2 Name of the cloud provider. Possible values are ec2, gce, or digitalocean. -[float] -=== `meta.cloud.instance_id` +-- +*`meta.cloud.instance_id`*:: ++ +-- Instance ID of the host machine. -[float] -=== `meta.cloud.instance_name` +-- +*`meta.cloud.instance_name`*:: ++ +-- Instance name of the host machine. -[float] -=== `meta.cloud.machine_type` +-- +*`meta.cloud.machine_type`*:: ++ +-- example: t2.medium Machine type of the host machine. -[float] -=== `meta.cloud.availability_zone` +-- +*`meta.cloud.availability_zone`*:: ++ +-- example: us-east-1c Availability zone in which this host is running. -[float] -=== `meta.cloud.project_id` +-- +*`meta.cloud.project_id`*:: ++ +-- example: project-x Name of the project in Google Cloud. -[float] -=== `meta.cloud.region` +-- +*`meta.cloud.region`*:: ++ +-- Region in which this host is running. +-- + [[exported-fields-common]] == Common fields @@ -1111,64 +1364,78 @@ These fields contain data about the environment in which the transaction or flow -[float] -=== `server` - +*`server`*:: ++ +-- The name of the server that served the transaction. -[float] -=== `client_server` +-- +*`client_server`*:: ++ +-- The name of the server that initiated the transaction. -[float] -=== `service` +-- +*`service`*:: ++ +-- The name of the logical service that served the transaction. -[float] -=== `client_service` +-- +*`client_service`*:: ++ +-- The name of the logical service that initiated the transaction. -[float] -=== `ip` +-- +*`ip`*:: ++ +-- format: dotted notation. The IP address of the server that served the transaction. -[float] -=== `client_ip` +-- +*`client_ip`*:: ++ +-- format: dotted notation. The IP address of the server that initiated the transaction. -[float] -=== `real_ip` +-- +*`real_ip`*:: ++ +-- format: Dotted notation. If the server initiating the transaction is a proxy, this field contains the original client IP address. For HTTP, for example, the IP address extracted from a configurable HTTP header, by default `X-Forwarded-For`. Unless this field is disabled, it always has a value, and it matches the `client_ip` for non proxy clients. +-- + [float] == client_geoip fields The GeoIP information of the client. -[float] -=== `client_geoip.location` - +*`client_geoip.location`*:: ++ +-- type: geo_point example: {'lat': 51, 'lon': 9} @@ -1176,56 +1443,72 @@ example: {'lat': 51, 'lon': 9} The GeoIP location of the `client_ip` address. This field is available only if you define a https://www.elastic.co/guide/en/elasticsearch/plugins/master/using-ingest-geoip.html[GeoIP Processor] as a pipeline in the https://www.elastic.co/guide/en/elasticsearch/plugins/master/ingest-geoip.html[Ingest GeoIP processor plugin] or using Logstash. -[float] -=== `client_port` +-- +*`client_port`*:: ++ +-- format: dotted notation. The layer 4 port of the process that initiated the transaction. -[float] -=== `transport` +-- +*`transport`*:: ++ +-- example: udp The transport protocol used for the transaction. If not specified, then tcp is assumed. -[float] -=== `type` +-- +*`type`*:: ++ +-- required: True The type of the transaction (for example, HTTP, MySQL, Redis, or RUM) or "flow" in case of flows. -[float] -=== `port` +-- +*`port`*:: ++ +-- format: dotted notation. The layer 4 port of the process that served the transaction. -[float] -=== `proc` +-- +*`proc`*:: ++ +-- The name of the process that served the transaction. -[float] -=== `client_proc` +-- +*`client_proc`*:: ++ +-- The name of the process that initiated the transaction. -[float] -=== `release` +-- +*`release`*:: ++ +-- The software release of the service serving the transaction. This can be the commit id or a semantic version. +-- + [[exported-fields-dns]] == DNS fields @@ -1233,273 +1516,345 @@ DNS-specific event fields. -[float] -=== `dns.id` - +*`dns.id`*:: ++ +-- type: long The DNS packet identifier assigned by the program that generated the query. The identifier is copied to the response. -[float] -=== `dns.op_code` +-- +*`dns.op_code`*:: ++ +-- example: QUERY The DNS operation code that specifies the kind of query in the message. This value is set by the originator of a query and copied into the response. -[float] -=== `dns.flags.authoritative` +-- +*`dns.flags.authoritative`*:: ++ +-- type: boolean A DNS flag specifying that the responding server is an authority for the domain name used in the question. -[float] -=== `dns.flags.recursion_available` +-- +*`dns.flags.recursion_available`*:: ++ +-- type: boolean A DNS flag specifying whether recursive query support is available in the name server. -[float] -=== `dns.flags.recursion_desired` +-- +*`dns.flags.recursion_desired`*:: ++ +-- type: boolean A DNS flag specifying that the client directs the server to pursue a query recursively. Recursive query support is optional. -[float] -=== `dns.flags.authentic_data` +-- +*`dns.flags.authentic_data`*:: ++ +-- type: boolean A DNS flag specifying that the recursive server considers the response authentic. -[float] -=== `dns.flags.checking_disabled` +-- +*`dns.flags.checking_disabled`*:: ++ +-- type: boolean A DNS flag specifying that the client disables the server signature validation of the query. -[float] -=== `dns.flags.truncated_response` +-- +*`dns.flags.truncated_response`*:: ++ +-- type: boolean A DNS flag specifying that only the first 512 bytes of the reply were returned. -[float] -=== `dns.response_code` +-- +*`dns.response_code`*:: ++ +-- example: NOERROR The DNS status code. -[float] -=== `dns.question.name` +-- +*`dns.question.name`*:: ++ +-- example: www.google.com. The domain name being queried. If the name field contains non-printable characters (below 32 or above 126), then those characters are represented as escaped base 10 integers (\DDD). Back slashes and quotes are escaped. Tabs, carriage returns, and line feeds are converted to \t, \r, and \n respectively. -[float] -=== `dns.question.type` +-- +*`dns.question.type`*:: ++ +-- example: AAAA The type of records being queried. -[float] -=== `dns.question.class` +-- +*`dns.question.class`*:: ++ +-- example: IN The class of of records being queried. -[float] -=== `dns.question.etld_plus_one` +-- +*`dns.question.etld_plus_one`*:: ++ +-- example: amazon.co.uk. The effective top-level domain (eTLD) plus one more label. For example, the eTLD+1 for "foo.bar.golang.org." is "golang.org.". The data for determining the eTLD comes from an embedded copy of the data from http://publicsuffix.org. -[float] -=== `dns.answers` +-- +*`dns.answers`*:: ++ +-- type: object An array containing a dictionary about each answer section returned by the server. -[float] -=== `dns.answers_count` +-- +*`dns.answers_count`*:: ++ +-- type: long The number of resource records contained in the `dns.answers` field. -[float] -=== `dns.answers.name` +-- +*`dns.answers.name`*:: ++ +-- example: example.com. The domain name to which this resource record pertains. -[float] -=== `dns.answers.type` +-- +*`dns.answers.type`*:: ++ +-- example: MX The type of data contained in this resource record. -[float] -=== `dns.answers.class` +-- +*`dns.answers.class`*:: ++ +-- example: IN The class of DNS data contained in this resource record. -[float] -=== `dns.answers.ttl` +-- +*`dns.answers.ttl`*:: ++ +-- type: long The time interval in seconds that this resource record may be cached before it should be discarded. Zero values mean that the data should not be cached. -[float] -=== `dns.answers.data` +-- +*`dns.answers.data`*:: ++ +-- The data describing the resource. The meaning of this data depends on the type and class of the resource record. -[float] -=== `dns.authorities` +-- +*`dns.authorities`*:: ++ +-- type: object An array containing a dictionary for each authority section from the answer. -[float] -=== `dns.authorities_count` +-- +*`dns.authorities_count`*:: ++ +-- type: long The number of resource records contained in the `dns.authorities` field. The `dns.authorities` field may or may not be included depending on the configuration of Packetbeat. -[float] -=== `dns.authorities.name` +-- +*`dns.authorities.name`*:: ++ +-- example: example.com. The domain name to which this resource record pertains. -[float] -=== `dns.authorities.type` +-- +*`dns.authorities.type`*:: ++ +-- example: NS The type of data contained in this resource record. -[float] -=== `dns.authorities.class` +-- +*`dns.authorities.class`*:: ++ +-- example: IN The class of DNS data contained in this resource record. -[float] -=== `dns.additionals` +-- +*`dns.additionals`*:: ++ +-- type: object An array containing a dictionary for each additional section from the answer. -[float] -=== `dns.additionals_count` +-- +*`dns.additionals_count`*:: ++ +-- type: long The number of resource records contained in the `dns.additionals` field. The `dns.additionals` field may or may not be included depending on the configuration of Packetbeat. -[float] -=== `dns.additionals.name` +-- +*`dns.additionals.name`*:: ++ +-- example: example.com. The domain name to which this resource record pertains. -[float] -=== `dns.additionals.type` +-- +*`dns.additionals.type`*:: ++ +-- example: NS The type of data contained in this resource record. -[float] -=== `dns.additionals.class` +-- +*`dns.additionals.class`*:: ++ +-- example: IN The class of DNS data contained in this resource record. -[float] -=== `dns.additionals.ttl` +-- +*`dns.additionals.ttl`*:: ++ +-- type: long The time interval in seconds that this resource record may be cached before it should be discarded. Zero values mean that the data should not be cached. -[float] -=== `dns.additionals.data` +-- +*`dns.additionals.data`*:: ++ +-- The data describing the resource. The meaning of this data depends on the type and class of the resource record. -[float] -=== `dns.opt.version` +-- +*`dns.opt.version`*:: ++ +-- example: 0 The EDNS version. -[float] -=== `dns.opt.do` +-- +*`dns.opt.do`*:: ++ +-- type: boolean If set, the transaction uses DNSSEC. -[float] -=== `dns.opt.ext_rcode` +-- +*`dns.opt.ext_rcode`*:: ++ +-- example: BADVERS Extended response code field. -[float] -=== `dns.opt.udp_size` +-- +*`dns.opt.udp_size`*:: ++ +-- type: long Requestor's UDP payload size (in bytes). +-- + [[exported-fields-docker-processor]] == Docker fields @@ -1508,38 +1863,46 @@ Docker stats collected from Docker. -[float] -=== `docker.container.id` - +*`docker.container.id`*:: ++ +-- type: keyword Unique container id. -[float] -=== `docker.container.image` +-- +*`docker.container.image`*:: ++ +-- type: keyword Name of the image the container was built on. -[float] -=== `docker.container.name` +-- +*`docker.container.name`*:: ++ +-- type: keyword Container name. -[float] -=== `docker.container.labels` +-- +*`docker.container.labels`*:: ++ +-- type: object Image labels. +-- + [[exported-fields-flows_event]] == Flow Event fields @@ -1547,9 +1910,9 @@ These fields contain data about the flow itself. -[float] -=== `start_time` - +*`start_time`*:: ++ +-- type: date example: 2015-01-24 14:06:05.071000 @@ -1561,9 +1924,11 @@ required: True The time, the first packet for the flow has been seen. -[float] -=== `last_time` +-- +*`last_time`*:: ++ +-- type: date example: 2015-01-24 14:06:05.071000 @@ -1575,30 +1940,40 @@ required: True The time, the most recent processed packet for the flow has been seen. -[float] -=== `final` +-- +*`final`*:: ++ +-- Indicates if event is last event in flow. If final is false, the event reports an intermediate flow state only. -[float] -=== `flow_id` +-- +*`flow_id`*:: ++ +-- Internal flow id based on connection meta data and address. -[float] -=== `vlan` +-- +*`vlan`*:: ++ +-- Innermost VLAN address used in network packets. -[float] -=== `outer_vlan` +-- +*`outer_vlan`*:: ++ +-- Second innermost VLAN address used in network packets. +-- + [float] == source fields @@ -1606,21 +1981,25 @@ Properties of the source host -[float] -=== `source.mac` - +*`source.mac`*:: ++ +-- Source MAC address as indicated by first packet seen for the current flow. -[float] -=== `source.ip` +-- +*`source.ip`*:: ++ +-- Innermost IPv4 source address as indicated by first packet seen for the current flow. -[float] -=== `source.ip_location` +-- +*`source.ip_location`*:: ++ +-- type: geo_point example: 40.715, -74.011 @@ -1628,15 +2007,19 @@ example: 40.715, -74.011 The GeoIP location of the `ip_source` IP address. The field is a string containing the latitude and longitude separated by a comma. -[float] -=== `source.outer_ip` +-- +*`source.outer_ip`*:: ++ +-- Second innermost IPv4 source address as indicated by first packet seen for the current flow. -[float] -=== `source.outer_ip_location` +-- +*`source.outer_ip_location`*:: ++ +-- type: geo_point example: 40.715, -74.011 @@ -1644,15 +2027,19 @@ example: 40.715, -74.011 The GeoIP location of the `outer_ip_source` IP address. The field is a string containing the latitude and longitude separated by a comma. -[float] -=== `source.ipv6` +-- +*`source.ipv6`*:: ++ +-- Innermost IPv6 source address as indicated by first packet seen for the current flow. -[float] -=== `source.ipv6_location` +-- +*`source.ipv6_location`*:: ++ +-- type: geo_point example: 60.715, -76.011 @@ -1660,15 +2047,19 @@ example: 60.715, -76.011 The GeoIP location of the `ipv6_source` IP address. The field is a string containing the latitude and longitude separated by a comma. -[float] -=== `source.outer_ipv6` +-- +*`source.outer_ipv6`*:: ++ +-- Second innermost IPv6 source address as indicated by first packet seen for the current flow. -[float] -=== `source.outer_ipv6_location` +-- +*`source.outer_ipv6_location`*:: ++ +-- type: geo_point example: 60.715, -76.011 @@ -1676,12 +2067,16 @@ example: 60.715, -76.011 The GeoIP location of the `outer_ipv6_source` IP address. The field is a string containing the latitude and longitude separated by a comma. -[float] -=== `source.port` +-- +*`source.port`*:: ++ +-- Source port number as indicated by first packet seen for the current flow. +-- + [float] == stats fields @@ -1689,22 +2084,26 @@ Object with source to destination flow measurements. -[float] -=== `source.stats.net_packets_total` - +*`source.stats.net_packets_total`*:: ++ +-- type: long Total number of packets -[float] -=== `source.stats.net_bytes_total` +-- +*`source.stats.net_bytes_total`*:: ++ +-- type: long Total number of bytes +-- + [float] == dest fields @@ -1712,21 +2111,25 @@ Properties of the destination host -[float] -=== `dest.mac` - +*`dest.mac`*:: ++ +-- Destination MAC address as indicated by first packet seen for the current flow. -[float] -=== `dest.ip` +-- +*`dest.ip`*:: ++ +-- Innermost IPv4 destination address as indicated by first packet seen for the current flow. -[float] -=== `dest.ip_location` +-- +*`dest.ip_location`*:: ++ +-- type: geo_point example: 40.715, -74.011 @@ -1734,15 +2137,19 @@ example: 40.715, -74.011 The GeoIP location of the `ip_dest` IP address. The field is a string containing the latitude and longitude separated by a comma. -[float] -=== `dest.outer_ip` +-- +*`dest.outer_ip`*:: ++ +-- Second innermost IPv4 destination address as indicated by first packet seen for the current flow. -[float] -=== `dest.outer_ip_location` +-- +*`dest.outer_ip_location`*:: ++ +-- type: geo_point example: 40.715, -74.011 @@ -1750,15 +2157,19 @@ example: 40.715, -74.011 The GeoIP location of the `outer_ip_dest` IP address. The field is a string containing the latitude and longitude separated by a comma. -[float] -=== `dest.ipv6` +-- +*`dest.ipv6`*:: ++ +-- Innermost IPv6 destination address as indicated by first packet seen for the current flow. -[float] -=== `dest.ipv6_location` +-- +*`dest.ipv6_location`*:: ++ +-- type: geo_point example: 60.715, -76.011 @@ -1766,15 +2177,19 @@ example: 60.715, -76.011 The GeoIP location of the `ipv6_dest` IP address. The field is a string containing the latitude and longitude separated by a comma. -[float] -=== `dest.outer_ipv6` +-- +*`dest.outer_ipv6`*:: ++ +-- Second innermost IPv6 destination address as indicated by first packet seen for the current flow. -[float] -=== `dest.outer_ipv6_location` +-- +*`dest.outer_ipv6_location`*:: ++ +-- type: geo_point example: 60.715, -76.011 @@ -1782,12 +2197,16 @@ example: 60.715, -76.011 The GeoIP location of the `outer_ipv6_dest` IP address. The field is a string containing the latitude and longitude separated by a comma. -[float] -=== `dest.port` +-- +*`dest.port`*:: ++ +-- Destination port number as indicated by first packet seen for the current flow. +-- + [float] == stats fields @@ -1795,34 +2214,110 @@ Object with destination to source flow measurements. -[float] -=== `dest.stats.net_packets_total` - +*`dest.stats.net_packets_total`*:: ++ +-- type: long Total number of packets -[float] -=== `dest.stats.net_bytes_total` +-- +*`dest.stats.net_bytes_total`*:: ++ +-- type: long Total number of bytes -[float] -=== `icmp_id` +-- +*`icmp_id`*:: ++ +-- ICMP id used in ICMP based flow. -[float] -=== `connection_id` +-- +*`connection_id`*:: ++ +-- optional TCP connection id +-- + +[[exported-fields-host-processor]] +== Host fields + +Info collected for the host machine. + + + + +*`host.name`*:: ++ +-- +type: keyword + +Hostname. + + +-- + +*`host.id`*:: ++ +-- +type: keyword + +Unique host id. + + +-- + +*`host.architecture`*:: ++ +-- +type: keyword + +Host architecture (e.g. x86_64, arm, ppc, mips). + + +-- + +*`host.os.platform`*:: ++ +-- +type: keyword + +OS platform (e.g. centos, ubuntu, windows). + + +-- + +*`host.os.version`*:: ++ +-- +type: keyword + +OS version. + + +-- + +*`host.os.family`*:: ++ +-- +type: keyword + +OS family (e.g. redhat, debian, freebsd, windows). + + +-- + [[exported-fields-http]] == HTTP fields @@ -1841,60 +2336,76 @@ Information about the HTTP request and response. HTTP request -[float] -=== `http.request.params` - +*`http.request.params`*:: ++ +-- The query parameters or form values. The query parameters are available in the Request-URI and the form values are set in the HTTP body when the content-type is set to `x-www-form-urlencoded`. -[float] -=== `http.request.headers` +-- +*`http.request.headers`*:: ++ +-- type: object A map containing the captured header fields from the request. Which headers to capture is configurable. If headers with the same header name are present in the message, they will be separated by commas. -[float] -=== `http.request.body` +-- +*`http.request.body`*:: ++ +-- type: text The body of the HTTP request. +-- + [float] == response fields HTTP response -[float] -=== `http.response.code` - +*`http.response.code`*:: ++ +-- example: 404 The HTTP status code. -[float] -=== `http.response.phrase` +-- +*`http.response.phrase`*:: ++ +-- example: Not found. The HTTP status phrase. -[float] -=== `http.response.headers` +-- +*`http.response.headers`*:: ++ +-- type: object A map containing the captured header fields from the response. Which headers to capture is configurable. If headers with the same header name are present in the message, they will be separated by commas. -[float] -=== `http.response.body` +-- + +*`http.response.body`*:: ++ +-- +type: text The body of the HTTP response. +-- + [[exported-fields-icmp]] == ICMP fields @@ -1903,53 +2414,67 @@ ICMP specific event fields. -[float] -=== `icmp.version` - +*`icmp.version`*:: ++ +-- The version of the ICMP protocol. -[float] -=== `icmp.request.message` +-- +*`icmp.request.message`*:: ++ +-- type: keyword A human readable form of the request. -[float] -=== `icmp.request.type` +-- +*`icmp.request.type`*:: ++ +-- type: long The request type. -[float] -=== `icmp.request.code` +-- +*`icmp.request.code`*:: ++ +-- type: long The request code. -[float] -=== `icmp.response.message` +-- +*`icmp.response.message`*:: ++ +-- type: keyword A human readable form of the response. -[float] -=== `icmp.response.type` +-- +*`icmp.response.type`*:: ++ +-- type: long The response type. -[float] -=== `icmp.response.code` +-- +*`icmp.response.code`*:: ++ +-- type: long The response code. +-- + [[exported-fields-kubernetes-processor]] == Kubernetes fields @@ -1958,62 +2483,76 @@ Kubernetes metadata added by the kubernetes processor -[float] -=== `kubernetes.pod.name` - +*`kubernetes.pod.name`*:: ++ +-- type: keyword Kubernetes pod name -[float] -=== `kubernetes.namespace` +-- +*`kubernetes.namespace`*:: ++ +-- type: keyword Kubernetes namespace -[float] -=== `kubernetes.node.name` +-- +*`kubernetes.node.name`*:: ++ +-- type: keyword Kubernetes node name -[float] -=== `kubernetes.labels` +-- +*`kubernetes.labels`*:: ++ +-- type: object Kubernetes labels map -[float] -=== `kubernetes.annotations` +-- +*`kubernetes.annotations`*:: ++ +-- type: object Kubernetes annotations map -[float] -=== `kubernetes.container.name` +-- +*`kubernetes.container.name`*:: ++ +-- type: keyword Kubernetes container name -[float] -=== `kubernetes.container.image` +-- +*`kubernetes.container.image`*:: ++ +-- type: keyword Kubernetes container image +-- + [[exported-fields-memcache]] == Memcache fields @@ -2021,185 +2560,229 @@ Memcached-specific event fields -[float] -=== `memcache.protocol_type` - +*`memcache.protocol_type`*:: ++ +-- type: keyword The memcache protocol implementation. The value can be "binary" for binary-based, "text" for text-based, or "unknown" for an unknown memcache protocol type. -[float] -=== `memcache.request.line` +-- +*`memcache.request.line`*:: ++ +-- type: keyword The raw command line for unknown commands ONLY. -[float] -=== `memcache.request.command` +-- +*`memcache.request.command`*:: ++ +-- type: keyword The memcache command being requested in the memcache text protocol. For example "set" or "get". The binary protocol opcodes are translated into memcache text protocol commands. -[float] -=== `memcache.response.command` +-- +*`memcache.response.command`*:: ++ +-- type: keyword Either the text based protocol response message type or the name of the originating request if binary protocol is used. -[float] -=== `memcache.request.type` +-- +*`memcache.request.type`*:: ++ +-- type: keyword The memcache command classification. This value can be "UNKNOWN", "Load", "Store", "Delete", "Counter", "Info", "SlabCtrl", "LRUCrawler", "Stats", "Success", "Fail", or "Auth". -[float] -=== `memcache.response.type` +-- +*`memcache.response.type`*:: ++ +-- type: keyword The memcache command classification. This value can be "UNKNOWN", "Load", "Store", "Delete", "Counter", "Info", "SlabCtrl", "LRUCrawler", "Stats", "Success", "Fail", or "Auth". The text based protocol will employ any of these, whereas the binary based protocol will mirror the request commands only (see `memcache.response.status` for binary protocol). -[float] -=== `memcache.response.error_msg` +-- +*`memcache.response.error_msg`*:: ++ +-- type: keyword The optional error message in the memcache response (text based protocol only). -[float] -=== `memcache.request.opcode` +-- +*`memcache.request.opcode`*:: ++ +-- type: keyword The binary protocol message opcode name. -[float] -=== `memcache.response.opcode` +-- +*`memcache.response.opcode`*:: ++ +-- type: keyword The binary protocol message opcode name. -[float] -=== `memcache.request.opcode_value` +-- +*`memcache.request.opcode_value`*:: ++ +-- type: long The binary protocol message opcode value. -[float] -=== `memcache.response.opcode_value` +-- +*`memcache.response.opcode_value`*:: ++ +-- type: long The binary protocol message opcode value. -[float] -=== `memcache.request.opaque` +-- +*`memcache.request.opaque`*:: ++ +-- type: long The binary protocol opaque header value used for correlating request with response messages. -[float] -=== `memcache.response.opaque` +-- +*`memcache.response.opaque`*:: ++ +-- type: long The binary protocol opaque header value used for correlating request with response messages. -[float] -=== `memcache.request.vbucket` +-- +*`memcache.request.vbucket`*:: ++ +-- type: long The vbucket index sent in the binary message. -[float] -=== `memcache.response.status` +-- +*`memcache.response.status`*:: ++ +-- type: keyword The textual representation of the response error code (binary protocol only). -[float] -=== `memcache.response.status_code` +-- +*`memcache.response.status_code`*:: ++ +-- type: long The status code value returned in the response (binary protocol only). -[float] -=== `memcache.request.keys` +-- +*`memcache.request.keys`*:: ++ +-- type: array The list of keys sent in the store or load commands. -[float] -=== `memcache.response.keys` +-- +*`memcache.response.keys`*:: ++ +-- type: array The list of keys returned for the load command (if present). -[float] -=== `memcache.request.count_values` +-- +*`memcache.request.count_values`*:: ++ +-- type: long The number of values found in the memcache request message. If the command does not send any data, this field is missing. -[float] -=== `memcache.response.count_values` +-- +*`memcache.response.count_values`*:: ++ +-- type: long The number of values found in the memcache response message. If the command does not send any data, this field is missing. -[float] -=== `memcache.request.values` +-- +*`memcache.request.values`*:: ++ +-- type: array The list of base64 encoded values sent with the request (if present). -[float] -=== `memcache.response.values` +-- +*`memcache.response.values`*:: ++ +-- type: array The list of base64 encoded values sent with the response (if present). -[float] -=== `memcache.request.bytes` +-- +*`memcache.request.bytes`*:: ++ +-- type: long format: bytes @@ -2207,9 +2790,11 @@ format: bytes The byte count of the values being transferred. -[float] -=== `memcache.response.bytes` +-- +*`memcache.response.bytes`*:: ++ +-- type: long format: bytes @@ -2217,150 +2802,188 @@ format: bytes The byte count of the values being transferred. -[float] -=== `memcache.request.delta` +-- +*`memcache.request.delta`*:: ++ +-- type: long The counter increment/decrement delta value. -[float] -=== `memcache.request.initial` +-- +*`memcache.request.initial`*:: ++ +-- type: long The counter increment/decrement initial value parameter (binary protocol only). -[float] -=== `memcache.request.verbosity` +-- +*`memcache.request.verbosity`*:: ++ +-- type: long The value of the memcache "verbosity" command. -[float] -=== `memcache.request.raw_args` +-- +*`memcache.request.raw_args`*:: ++ +-- type: keyword The text protocol raw arguments for the "stats ..." and "lru crawl ..." commands. -[float] -=== `memcache.request.source_class` +-- +*`memcache.request.source_class`*:: ++ +-- type: long The source class id in 'slab reassign' command. -[float] -=== `memcache.request.dest_class` +-- +*`memcache.request.dest_class`*:: ++ +-- type: long The destination class id in 'slab reassign' command. -[float] -=== `memcache.request.automove` +-- +*`memcache.request.automove`*:: ++ +-- type: keyword The automove mode in the 'slab automove' command expressed as a string. This value can be "standby"(=0), "slow"(=1), "aggressive"(=2), or the raw value if the value is unknown. -[float] -=== `memcache.request.flags` +-- +*`memcache.request.flags`*:: ++ +-- type: long The memcache command flags sent in the request (if present). -[float] -=== `memcache.response.flags` +-- +*`memcache.response.flags`*:: ++ +-- type: long The memcache message flags sent in the response (if present). -[float] -=== `memcache.request.exptime` +-- +*`memcache.request.exptime`*:: ++ +-- type: long The data expiry time in seconds sent with the memcache command (if present). If the value is <30 days, the expiry time is relative to "now", or else it is an absolute Unix time in seconds (32-bit). -[float] -=== `memcache.request.sleep_us` +-- +*`memcache.request.sleep_us`*:: ++ +-- type: long The sleep setting in microseconds for the 'lru_crawler sleep' command. -[float] -=== `memcache.response.value` +-- +*`memcache.response.value`*:: ++ +-- type: long The counter value returned by a counter operation. -[float] -=== `memcache.request.noreply` +-- +*`memcache.request.noreply`*:: ++ +-- type: boolean Set to true if noreply was set in the request. The `memcache.response` field will be missing. -[float] -=== `memcache.request.quiet` +-- +*`memcache.request.quiet`*:: ++ +-- type: boolean Set to true if the binary protocol message is to be treated as a quiet message. -[float] -=== `memcache.request.cas_unique` +-- +*`memcache.request.cas_unique`*:: ++ +-- type: long The CAS (compare-and-swap) identifier if present. -[float] -=== `memcache.response.cas_unique` +-- +*`memcache.response.cas_unique`*:: ++ +-- type: long The CAS (compare-and-swap) identifier to be used with CAS-based updates (if present). -[float] -=== `memcache.response.stats` +-- +*`memcache.response.stats`*:: ++ +-- type: array The list of statistic values returned. Each entry is a dictionary with the fields "name" and "value". -[float] -=== `memcache.response.version` +-- +*`memcache.response.version`*:: ++ +-- type: keyword The returned memcache version string. +-- + [[exported-fields-mongodb]] == MongoDb fields @@ -2369,156 +2992,202 @@ MongoDB-specific event fields. These fields mirror closely the fields for the Mo -[float] -=== `mongodb.error` - +*`mongodb.error`*:: ++ +-- If the MongoDB request has resulted in an error, this field contains the error message returned by the server. -[float] -=== `mongodb.fullCollectionName` +-- +*`mongodb.fullCollectionName`*:: ++ +-- The full collection name. The full collection name is the concatenation of the database name with the collection name, using a dot (.) for the concatenation. For example, for the database foo and the collection bar, the full collection name is foo.bar. -[float] -=== `mongodb.numberToSkip` +-- +*`mongodb.numberToSkip`*:: ++ +-- type: long Sets the number of documents to omit - starting from the first document in the resulting dataset - when returning the result of the query. -[float] -=== `mongodb.numberToReturn` +-- +*`mongodb.numberToReturn`*:: ++ +-- type: long The requested maximum number of documents to be returned. -[float] -=== `mongodb.numberReturned` +-- +*`mongodb.numberReturned`*:: ++ +-- type: long The number of documents in the reply. -[float] -=== `mongodb.startingFrom` +-- +*`mongodb.startingFrom`*:: ++ +-- Where in the cursor this reply is starting. -[float] -=== `mongodb.query` +-- +*`mongodb.query`*:: ++ +-- A JSON document that represents the query. The query will contain one or more elements, all of which must match for a document to be included in the result set. Possible elements include $query, $orderby, $hint, $explain, and $snapshot. -[float] -=== `mongodb.returnFieldsSelector` +-- +*`mongodb.returnFieldsSelector`*:: ++ +-- A JSON document that limits the fields in the returned documents. The returnFieldsSelector contains one or more elements, each of which is the name of a field that should be returned, and the integer value 1. -[float] -=== `mongodb.selector` +-- +*`mongodb.selector`*:: ++ +-- A BSON document that specifies the query for selecting the document to update or delete. -[float] -=== `mongodb.update` +-- +*`mongodb.update`*:: ++ +-- A BSON document that specifies the update to be performed. For information on specifying updates, see the Update Operations documentation from the MongoDB Manual. -[float] -=== `mongodb.cursorId` +-- +*`mongodb.cursorId`*:: ++ +-- The cursor identifier returned in the OP_REPLY. This must be the value that was returned from the database. +-- + [float] == rpc fields OncRPC specific event fields. -[float] -=== `rpc.xid` - +*`rpc.xid`*:: ++ +-- RPC message transaction identifier. -[float] -=== `rpc.call_size` +-- +*`rpc.call_size`*:: ++ +-- type: long RPC call size with argument. -[float] -=== `rpc.reply_size` +-- +*`rpc.reply_size`*:: ++ +-- type: long RPC reply size with argument. -[float] -=== `rpc.status` +-- +*`rpc.status`*:: ++ +-- RPC message reply status. -[float] -=== `rpc.time` +-- +*`rpc.time`*:: ++ +-- type: long RPC message processing time. -[float] -=== `rpc.time_str` +-- +*`rpc.time_str`*:: ++ +-- RPC message processing time in human readable form. -[float] -=== `rpc.auth_flavor` +-- +*`rpc.auth_flavor`*:: ++ +-- RPC authentication flavor. -[float] -=== `rpc.cred.uid` +-- +*`rpc.cred.uid`*:: ++ +-- type: long RPC caller's user id, in case of auth-unix. -[float] -=== `rpc.cred.gid` +-- +*`rpc.cred.gid`*:: ++ +-- type: long RPC caller's group id, in case of auth-unix. -[float] -=== `rpc.cred.gids` +-- +*`rpc.cred.gids`*:: ++ +-- RPC caller's secondary group ids, in case of auth-unix. -[float] -=== `rpc.cred.stamp` +-- +*`rpc.cred.stamp`*:: ++ +-- type: long Arbitrary ID which the caller machine may generate. -[float] -=== `rpc.cred.machinename` +-- +*`rpc.cred.machinename`*:: ++ +-- The name of the caller's machine. +-- + [[exported-fields-mysql]] == MySQL fields @@ -2527,60 +3196,76 @@ MySQL-specific event fields. -[float] -=== `mysql.iserror` - +*`mysql.iserror`*:: ++ +-- type: boolean If the MySQL query returns an error, this field is set to true. -[float] -=== `mysql.affected_rows` +-- +*`mysql.affected_rows`*:: ++ +-- type: long If the MySQL command is successful, this field contains the affected number of rows of the last statement. -[float] -=== `mysql.insert_id` +-- +*`mysql.insert_id`*:: ++ +-- If the INSERT query is successful, this field contains the id of the newly inserted row. -[float] -=== `mysql.num_fields` +-- +*`mysql.num_fields`*:: ++ +-- If the SELECT query is successful, this field is set to the number of fields returned. -[float] -=== `mysql.num_rows` +-- +*`mysql.num_rows`*:: ++ +-- If the SELECT query is successful, this field is set to the number of rows returned. -[float] -=== `mysql.query` +-- +*`mysql.query`*:: ++ +-- The row mysql query as read from the transaction's request. -[float] -=== `mysql.error_code` +-- +*`mysql.error_code`*:: ++ +-- type: long The error code returned by MySQL. -[float] -=== `mysql.error_message` +-- +*`mysql.error_message`*:: ++ +-- The error info message returned by MySQL. +-- + [[exported-fields-nfs]] == NFS fields @@ -2588,36 +3273,46 @@ NFS v4/3 specific event fields. -[float] -=== `nfs.version` - +*`nfs.version`*:: ++ +-- type: long NFS protocol version number. -[float] -=== `nfs.minor_version` +-- +*`nfs.minor_version`*:: ++ +-- type: long NFS protocol minor version number. -[float] -=== `nfs.tag` +-- +*`nfs.tag`*:: ++ +-- NFS v4 COMPOUND operation tag. -[float] -=== `nfs.opcode` +-- +*`nfs.opcode`*:: ++ +-- NFS operation name, or main operation name, in case of COMPOUND calls. -[float] -=== `nfs.status` +-- +*`nfs.status`*:: ++ +-- NFS operation reply status. +-- + [[exported-fields-pgsql]] == PostgreSQL fields @@ -2626,71 +3321,89 @@ PostgreSQL-specific event fields. -[float] -=== `pgsql.query` - +*`pgsql.query`*:: ++ +-- The row pgsql query as read from the transaction's request. -[float] -=== `pgsql.iserror` +-- +*`pgsql.iserror`*:: ++ +-- type: boolean If the PgSQL query returns an error, this field is set to true. -[float] -=== `pgsql.error_code` +-- +*`pgsql.error_code`*:: ++ +-- type: long The PostgreSQL error code. -[float] -=== `pgsql.error_message` +-- +*`pgsql.error_message`*:: ++ +-- The PostgreSQL error message. -[float] -=== `pgsql.error_severity` +-- +*`pgsql.error_severity`*:: ++ +-- The PostgreSQL error severity. -[float] -=== `pgsql.num_fields` +-- +*`pgsql.num_fields`*:: ++ +-- If the SELECT query if successful, this field is set to the number of fields returned. -[float] -=== `pgsql.num_rows` +-- +*`pgsql.num_rows`*:: ++ +-- If the SELECT query if successful, this field is set to the number of rows returned. +-- + [[exported-fields-raw]] == Raw fields These fields contain the raw transaction data. -[float] -=== `request` - +*`request`*:: ++ +-- type: text For text protocols, this is the request as seen on the wire (application layer only). For binary protocols this is our representation of the request. -[float] -=== `response` +-- +*`response`*:: ++ +-- type: text For text protocols, this is the response as seen on the wire (application layer only). For binary protocols this is our representation of the request. +-- + [[exported-fields-redis]] == Redis fields @@ -2699,18 +3412,22 @@ Redis-specific event fields. -[float] -=== `redis.return_value` - +*`redis.return_value`*:: ++ +-- The return value of the Redis command in a human readable format. -[float] -=== `redis.error` +-- +*`redis.error`*:: ++ +-- If the Redis command has resulted in an error, this field contains the error message returned by the Redis server. +-- + [[exported-fields-thrift]] == Thrift-RPC fields @@ -2719,30 +3436,38 @@ Thrift-RPC specific event fields. -[float] -=== `thrift.params` - +*`thrift.params`*:: ++ +-- The RPC method call parameters in a human readable format. If the IDL files are available, the parameters use names whenever possible. Otherwise, the IDs from the message are used. -[float] -=== `thrift.service` +-- +*`thrift.service`*:: ++ +-- The name of the Thrift-RPC service as defined in the IDL files. -[float] -=== `thrift.return_value` +-- +*`thrift.return_value`*:: ++ +-- The value returned by the Thrift-RPC call. This is encoded in a human readable format. -[float] -=== `thrift.exceptions` +-- +*`thrift.exceptions`*:: ++ +-- If the call resulted in exceptions, this field contains the exceptions in a human readable format. +-- + [[exported-fields-tls]] == TLS fields @@ -2751,466 +3476,578 @@ TLS-specific event fields. -[float] -=== `tls.handshake_completed` - +*`tls.handshake_completed`*:: ++ +-- type: boolean Whether the TLS negotiation has been successful and the session has transitioned to encrypted mode. -[float] -=== `tls.resumed` +-- +*`tls.resumed`*:: ++ +-- type: boolean If the TLS session has been resumed from a previous session. -[float] -=== `tls.resumption_method` +-- +*`tls.resumption_method`*:: ++ +-- type: keyword If the session has been resumed, the underlying method used. One of "id" for TLS session ID or "ticket" for TLS ticket extension. -[float] -=== `tls.client_certificate_requested` +-- +*`tls.client_certificate_requested`*:: ++ +-- type: boolean Whether the server has requested the client to authenticate itself using a client certificate. +-- -[float] -=== `tls.client_hello.version` +*`tls.client_hello.version`*:: ++ +-- type: keyword The version of the TLS protocol by which the client wishes to communicate during this session. -[float] -=== `tls.client_hello.supported_ciphers` +-- +*`tls.client_hello.supported_ciphers`*:: ++ +-- type: array List of ciphers the client is willing to use for this session. See https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-4 -[float] -=== `tls.client_hello.supported_compression_methods` +-- +*`tls.client_hello.supported_compression_methods`*:: ++ +-- type: array The list of compression methods the client supports. See https://www.iana.org/assignments/comp-meth-ids/comp-meth-ids.xhtml +-- + [float] == extensions fields The hello extensions provided by the client. -[float] -=== `tls.client_hello.extensions.server_name_indication` - +*`tls.client_hello.extensions.server_name_indication`*:: ++ +-- type: keyword List of hostnames -[float] -=== `tls.client_hello.extensions.application_layer_protocol_negotiation` +-- +*`tls.client_hello.extensions.application_layer_protocol_negotiation`*:: ++ +-- type: keyword List of application-layer protocols the client is willing to use. -[float] -=== `tls.client_hello.extensions.session_ticket` +-- +*`tls.client_hello.extensions.session_ticket`*:: ++ +-- type: keyword Length of the session ticket, if provided, or an empty string to advertise support for tickets. +-- -[float] -=== `tls.server_hello.version` +*`tls.server_hello.version`*:: ++ +-- type: keyword The version of the TLS protocol that is used for this session. It is the highest version supported by the server not exceeding the version requested in the client hello. -[float] -=== `tls.server_hello.selected_cipher` +-- +*`tls.server_hello.selected_cipher`*:: ++ +-- type: keyword The cipher suite selected by the server from the list provided by in the client hello. -[float] -=== `tls.server_hello.selected_compression_method` +-- +*`tls.server_hello.selected_compression_method`*:: ++ +-- type: keyword The compression method selected by the server from the list provided in the client hello. +-- + [float] == extensions fields The hello extensions provided by the server. -[float] -=== `tls.server_hello.extensions.application_layer_protocol_negotiation` - +*`tls.server_hello.extensions.application_layer_protocol_negotiation`*:: ++ +-- type: array Negotiated application layer protocol -[float] -=== `tls.server_hello.extensions.session_ticket` +-- +*`tls.server_hello.extensions.session_ticket`*:: ++ +-- type: keyword Used to announce that a session ticket will be provided by the server. Always an empty string. +-- + [float] == client_certificate fields Certificate provided by the client for authentication. -[float] -=== `tls.client_certificate.version` - +*`tls.client_certificate.version`*:: ++ +-- type: long X509 format version. -[float] -=== `tls.client_certificate.serial_number` +-- +*`tls.client_certificate.serial_number`*:: ++ +-- type: keyword The certificate's serial number. -[float] -=== `tls.client_certificate.not_before` +-- +*`tls.client_certificate.not_before`*:: ++ +-- type: date Date before which the certificate is not valid. -[float] -=== `tls.client_certificate.not_after` +-- +*`tls.client_certificate.not_after`*:: ++ +-- type: date Date after which the certificate expires. -[float] -=== `tls.client_certificate.public_key_algorithm` +-- +*`tls.client_certificate.public_key_algorithm`*:: ++ +-- type: keyword The algorithm used for this certificate's public key. One of RSA, DSA or ECDSA. -[float] -=== `tls.client_certificate.public_key_size` +-- +*`tls.client_certificate.public_key_size`*:: ++ +-- type: long Size of the public key. -[float] -=== `tls.client_certificate.signature_algorithm` +-- +*`tls.client_certificate.signature_algorithm`*:: ++ +-- type: keyword The algorithm used for the certificate's signature. -[float] -=== `tls.client_certificate.alternative_names` +-- +*`tls.client_certificate.alternative_names`*:: ++ +-- type: array Subject Alternative Names for this certificate. -[float] -=== `tls.client_certificate.raw` +-- +*`tls.client_certificate.raw`*:: ++ +-- type: keyword The raw certificate in PEM format. +-- + [float] == subject fields Subject represented by this certificate. -[float] -=== `tls.client_certificate.subject.country` - +*`tls.client_certificate.subject.country`*:: ++ +-- type: keyword Country code. -[float] -=== `tls.client_certificate.subject.organization` +-- +*`tls.client_certificate.subject.organization`*:: ++ +-- type: keyword Organization name. -[float] -=== `tls.client_certificate.subject.organizational_unit` +-- +*`tls.client_certificate.subject.organizational_unit`*:: ++ +-- type: keyword Unit within organization. -[float] -=== `tls.client_certificate.subject.province` +-- +*`tls.client_certificate.subject.province`*:: ++ +-- type: keyword Province or region within country. -[float] -=== `tls.client_certificate.subject.common_name` +-- +*`tls.client_certificate.subject.common_name`*:: ++ +-- type: keyword Name or host name identified by the certificate. +-- + [float] == issuer fields Entity that issued and signed this certificate. -[float] -=== `tls.client_certificate.issuer.country` - +*`tls.client_certificate.issuer.country`*:: ++ +-- type: keyword Country code. -[float] -=== `tls.client_certificate.issuer.organization` +-- +*`tls.client_certificate.issuer.organization`*:: ++ +-- type: keyword Organization name. -[float] -=== `tls.client_certificate.issuer.organizational_unit` +-- +*`tls.client_certificate.issuer.organizational_unit`*:: ++ +-- type: keyword Unit within organization. -[float] -=== `tls.client_certificate.issuer.province` +-- +*`tls.client_certificate.issuer.province`*:: ++ +-- type: keyword Province or region within country. -[float] -=== `tls.client_certificate.issuer.common_name` +-- +*`tls.client_certificate.issuer.common_name`*:: ++ +-- type: keyword Name or host name identified by the certificate. +-- + [float] == server_certificate fields Certificate provided by the server for authentication. -[float] -=== `tls.server_certificate.version` - +*`tls.server_certificate.version`*:: ++ +-- type: long X509 format version. -[float] -=== `tls.server_certificate.serial_number` +-- +*`tls.server_certificate.serial_number`*:: ++ +-- type: keyword The certificate's serial number. -[float] -=== `tls.server_certificate.not_before` +-- +*`tls.server_certificate.not_before`*:: ++ +-- type: date Date before which the certificate is not valid. -[float] -=== `tls.server_certificate.not_after` +-- +*`tls.server_certificate.not_after`*:: ++ +-- type: date Date after which the certificate expires. -[float] -=== `tls.server_certificate.public_key_algorithm` +-- +*`tls.server_certificate.public_key_algorithm`*:: ++ +-- type: keyword The algorithm used for this certificate's public key. One of RSA, DSA or ECDSA. -[float] -=== `tls.server_certificate.public_key_size` +-- +*`tls.server_certificate.public_key_size`*:: ++ +-- type: long Size of the public key. -[float] -=== `tls.server_certificate.signature_algorithm` +-- +*`tls.server_certificate.signature_algorithm`*:: ++ +-- type: keyword The algorithm used for the certificate's signature. -[float] -=== `tls.server_certificate.alternative_names` +-- +*`tls.server_certificate.alternative_names`*:: ++ +-- type: array Subject Alternative Names for this certificate. -[float] -=== `tls.server_certificate.raw` +-- +*`tls.server_certificate.raw`*:: ++ +-- type: keyword The raw certificate in PEM format. +-- + [float] == subject fields Subject represented by this certificate. -[float] -=== `tls.server_certificate.subject.country` - +*`tls.server_certificate.subject.country`*:: ++ +-- type: keyword Country code. -[float] -=== `tls.server_certificate.subject.organization` +-- +*`tls.server_certificate.subject.organization`*:: ++ +-- type: keyword Organization name. -[float] -=== `tls.server_certificate.subject.organizational_unit` +-- +*`tls.server_certificate.subject.organizational_unit`*:: ++ +-- type: keyword Unit within organization. -[float] -=== `tls.server_certificate.subject.province` +-- +*`tls.server_certificate.subject.province`*:: ++ +-- type: keyword Province or region within country. -[float] -=== `tls.server_certificate.subject.common_name` +-- +*`tls.server_certificate.subject.common_name`*:: ++ +-- type: keyword Name or host name identified by the certificate. +-- + [float] == issuer fields Entity that issued and signed this certificate. -[float] -=== `tls.server_certificate.issuer.country` - +*`tls.server_certificate.issuer.country`*:: ++ +-- type: keyword Country code. -[float] -=== `tls.server_certificate.issuer.organization` +-- +*`tls.server_certificate.issuer.organization`*:: ++ +-- type: keyword Organization name. -[float] -=== `tls.server_certificate.issuer.organizational_unit` +-- +*`tls.server_certificate.issuer.organizational_unit`*:: ++ +-- type: keyword Unit within organization. -[float] -=== `tls.server_certificate.issuer.province` +-- +*`tls.server_certificate.issuer.province`*:: ++ +-- type: keyword Province or region within country. -[float] -=== `tls.server_certificate.issuer.common_name` +-- +*`tls.server_certificate.issuer.common_name`*:: ++ +-- type: keyword Name or host name identified by the certificate. -[float] -=== `tls.server_certificate_chain` +-- +*`tls.server_certificate_chain`*:: ++ +-- type: array Chain of trust for the server certificate. -[float] -=== `tls.client_certificate_chain` +-- +*`tls.client_certificate_chain`*:: ++ +-- type: array Chain of trust for the client certificate. -[float] -=== `tls.alert_types` +-- +*`tls.alert_types`*:: ++ +-- type: keyword An array containing the TLS alert type for every alert received. +-- + [float] == fingerprints fields @@ -3223,22 +4060,26 @@ Fingerprints for this TLS session. JA3 TLS client fingerprint -[float] -=== `tls.fingerprints.ja3.hash` - +*`tls.fingerprints.ja3.hash`*:: ++ +-- type: keyword The JA3 fingerprint hash for the client side. -[float] -=== `tls.fingerprints.ja3.str` +-- +*`tls.fingerprints.ja3.str`*:: ++ +-- type: keyword The JA3 string used to calculate the hash. +-- + [[exported-fields-trans_event]] == Transaction Event fields @@ -3246,64 +4087,80 @@ These fields contain data about the transaction itself. -[float] -=== `direction` - +*`direction`*:: ++ +-- required: True Indicates whether the transaction is inbound (emitted by server) or outbound (emitted by the client). Values can be in or out. No defaults. -[float] -=== `status` +-- +*`status`*:: ++ +-- required: True The high level status of the transaction. The way to compute this value depends on the protocol, but the result has a meaning independent of the protocol. -[float] -=== `method` +-- +*`method`*:: ++ +-- The command/verb/method of the transaction. For HTTP, this is the method name (GET, POST, PUT, and so on), for SQL this is the verb (SELECT, UPDATE, DELETE, and so on). -[float] -=== `resource` +-- +*`resource`*:: ++ +-- The logical resource that this transaction refers to. For HTTP, this is the URL path up to the last slash (/). For example, if the URL is `/users/1`, the resource is `/users`. For databases, the resource is typically the table name. The field is not filled for all transaction types. -[float] -=== `path` +-- +*`path`*:: ++ +-- required: True The path the transaction refers to. For HTTP, this is the URL. For SQL databases, this is the table name. For key-value stores, this is the key. -[float] -=== `query` +-- +*`query`*:: ++ +-- type: keyword The query in a human readable format. For HTTP, it will typically be something like `GET /users/_search?name=test`. For MySQL, it is something like `SELECT id from users where name=test`. -[float] -=== `params` +-- +*`params`*:: ++ +-- type: text The request parameters. For HTTP, these are the POST or GET parameters. For Thrift-RPC, these are the parameters from the request. -[float] -=== `notes` +-- +*`notes`*:: ++ +-- Messages from Packetbeat itself. This field usually contains error messages for interpreting the raw data. This information can be helpful for troubleshooting. +-- + [[exported-fields-trans_measurements]] == Measurements (Transactions) fields @@ -3311,24 +4168,28 @@ These fields contain measurements related to the transaction. -[float] -=== `responsetime` - +*`responsetime`*:: ++ +-- type: long The wall clock time it took to complete the transaction. The precision is in milliseconds. -[float] -=== `cpu_time` +-- +*`cpu_time`*:: ++ +-- type: long The CPU time it took to complete the transaction. -[float] -=== `bytes_in` +-- +*`bytes_in`*:: ++ +-- type: long format: bytes @@ -3336,9 +4197,11 @@ format: bytes The number of bytes of the request. Note that this size is the application layer message length, without the length of the IP or TCP headers. -[float] -=== `bytes_out` +-- +*`bytes_out`*:: ++ +-- type: long format: bytes @@ -3346,35 +4209,45 @@ format: bytes The number of bytes of the response. Note that this size is the application layer message length, without the length of the IP or TCP headers. -[float] -=== `dnstime` +-- +*`dnstime`*:: ++ +-- type: long The time it takes to query the name server for a given request. This is typically used for RUM (real-user-monitoring) but can also have values for server-to-server communication when DNS is used for service discovery. The precision is in microseconds. -[float] -=== `connecttime` +-- +*`connecttime`*:: ++ +-- type: long The time it takes for the TCP connection to be established for the given transaction. The precision is in microseconds. -[float] -=== `loadtime` +-- +*`loadtime`*:: ++ +-- type: long The time it takes for the content to be loaded. This is typically used for RUM (real-user-monitoring) but it can make sense in other cases as well. The precision is in microseconds. -[float] -=== `domloadtime` +-- +*`domloadtime`*:: ++ +-- type: long In RUM (real-user-monitoring), the total time it takes for the DOM to be loaded. In terms of the W3 Navigation Timing API, this is the difference between `domContentLoadedEnd` and `domContentLoadedStart`. +-- + diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/filtering.asciidoc b/vendor/github.com/elastic/beats/packetbeat/docs/filtering.asciidoc index c7b68c90..8eea8844 100644 --- a/vendor/github.com/elastic/beats/packetbeat/docs/filtering.asciidoc +++ b/vendor/github.com/elastic/beats/packetbeat/docs/filtering.asciidoc @@ -1,19 +1,22 @@ [[kibana-queries-filters]] == Kibana queries and filters -In Kibana, you can filter transactions either by entering a search query or by clicking on elements within a visualization. +This topic provides a short introduction to some useful queries for searching +Packetbeat data. For a full description of the query syntax, see +{kibana-ref}/search.html[Searching Your Data] in the _Kibana User Guide_. + +In Kibana, you can filter transactions either by entering a search query or by +clicking on elements within a visualization. [float] === Create queries -The search field on the *Discover* page provides a way to query -a specific subset of transactions from the selected time frame. -The query syntax is based on the -http://lucene.apache.org/core/3_5_0/queryparsersyntax.html[Lucene query syntax]. -It allows boolean operators, wildcards, and field filtering. For example, if -you want to find the HTTP redirects, you can search for -`type: http AND http.response.code: 302`. +The search field on the *Discover* page provides a way to query a specific +subset of transactions from the selected time frame. It allows boolean +operators, wildcards, and field filtering. For example, if you want to find the +HTTP redirects, you can search for `type: http AND http.code: 302`. +[role="screenshot"] image:./images/kibana-query-filtering.png[Kibana query] [float] @@ -25,10 +28,11 @@ group of words surrounded by double quotation marks, such as `"test search"`. To search for all HTTP requests initiated by Mozilla Web browser version 5.0: [source,yaml] ---------------- +-------------- "Mozilla/5.0" -------------- + To search for all the transactions that contain the following message: [source,yaml] @@ -36,8 +40,11 @@ To search for all the transactions that contain the following message: "Cannot change the info of a user" ------------------------------------ -NOTE: To search for an exact string, you need to wrap the string in double quotation -marks. Without quotation marks, the search in the example would match any documents containing one of the following words: "Cannot" OR "change" OR "the" OR "info" OR "a" OR "user". + +NOTE: To search for an exact string, you need to wrap the string in double +quotation marks. Without quotation marks, the search in the example would match +any documents containing one of the following words: "Cannot" OR "change" OR +"the" OR "info" OR "a" OR "user". To search for all transactions with the "chunked" encoding: @@ -46,6 +53,7 @@ To search for all transactions with the "chunked" encoding: "Transfer-Encoding: chunked" ----------------------------- + [float] ==== Field-based queries @@ -54,10 +62,11 @@ Kibana allows you to search specific fields. To view HTTP transactions only: [source,yaml] --------------------- +------------------- type: http ------------------- + To view failed transactions only: [source,yaml] @@ -66,13 +75,14 @@ status: Error ------------------- -To view MySQL INSERT queries only: +To view INSERT queries only: [source,yaml] --------------------- -mysql.method: INSERT +method: INSERT --------------------- + [float] ==== Regexp queries @@ -81,12 +91,13 @@ to search for all HTTP responses with JSON as the returned value type: [source,yaml] ------------------------- -http.response.headers["content-type"]: *json +http.response_headers.content_type: *json ------------------------- + See -http://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-regexp-query.html[Elasticsearch regexp query] -for more details about the syntax. +{ref}/query-dsl-regexp-query.html[Elasticsearch regexp query] for more details +about the syntax. [float] ==== Range queries @@ -95,13 +106,15 @@ Range queries allow a field to have values between the lower and upper bounds. The interval can include or exclude the bounds depending on the type of brackets that you use. -To search for slow transactions with a response time greater than or equal to 10ms: +To search for slow transactions with a response time greater than or equal to +10ms: [source,yaml] ------------------------ responsetime: [10 TO *] ------------------------ + To search for slow transactions with a response time greater than 10ms: [source,yaml] @@ -109,49 +122,58 @@ To search for slow transactions with a response time greater than 10ms: responsetime: {10 TO *} ------------------------- + [float] ==== Boolean queries -Boolean operators (AND, OR, NOT) allow combining multiple sub-queries through logic operators. +Boolean operators (AND, OR, NOT) allow combining multiple sub-queries through +logic operators. -NOTE: Operators such as AND, OR, and NOT must be capitalized. See http://lucene.apache.org/core/3_5_0/queryparsersyntax.html[Lucene query syntax] for more details about the boolean operators. +NOTE: Operators such as AND, OR, and NOT must be capitalized. To search for all transactions except MySQL transactions: [source,yaml] ----------------- +--------------- NOT type: mysql --------------- -To search for all MySQL SELECT queries with large attachments: +To search for all MySQL INSERT queries with errors: [source,yaml] ------------------------------------------------- -mysql.method: SELECT AND mysql.size: [10000 TO *] +method: INSERT AND mysql.iserror: true ------------------------------------------------- Lucene also supports parentheses to group sub-queries. -To search for either INSERT or UPDATE MySQL queries with a response time greater than or equal to 30ms: +To search for either INSERT or UPDATE queries with a response time greater +than or equal to 30ms: [source,yaml] --------------------------------------------------------------------------- -(mysql.method: INSERT OR mysql.method: UPDATE) AND responsetime: [30 TO *] +(method: INSERT OR method: UPDATE) AND responsetime: [30 TO *] --------------------------------------------------------------------------- + [float] === Create filters -In Kibana, you can also filter transactions by clicking on -elements within a visualization. For example, to filter for all the HTTP redirects that are coming from a specific -IP and port, click the "Filter for value" icons (highlighted in green below) for the `client_ip` and `client_port` fields in the transaction detail widget. To -exclude the HTTP redirects coming from the IP and port, click the icons highlighted in red. +In Kibana, you can also filter transactions by clicking on elements within a +visualization. For example, to filter for all the HTTP redirects that are coming +from a specific IP and port, click the *Filter for value* +image:./images/filterforval_icon.png[] icon next to the `client_ip` +and `client_port` fields in the transaction detail table. To exclude the HTTP +redirects coming from the IP and port, click the *Filter out value* +image:./images/filteroutval_icon.png[] icon instead. +[role="screenshot"] image:./images/filter_from_context.png[Filter from context] The selected filters appear under the search box. +[role="screenshot"] image:./images/kibana-filters.png[Kibana filters] diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/gettingstarted.asciidoc b/vendor/github.com/elastic/beats/packetbeat/docs/gettingstarted.asciidoc index ec4d1595..d7ef280e 100644 --- a/vendor/github.com/elastic/beats/packetbeat/docs/gettingstarted.asciidoc +++ b/vendor/github.com/elastic/beats/packetbeat/docs/gettingstarted.asciidoc @@ -4,15 +4,7 @@ The best way to understand the value of a network packet analytics system like Packetbeat is to try it on your own traffic. -To get started with your own Packetbeat setup, install and configure these related products: - - * Elasticsearch for storage and indexing the data. - * Kibana for the UI. - * Logstash (optional) for inserting data into Elasticsearch. - -See {libbeat}/getting-started.html[Getting Started with Beats and the Elastic Stack] for more information. - -After installing the Elastic Stack, read the following topics to learn how to install, configure, and run Packetbeat: +include::../../libbeat/docs/shared-getting-started-intro.asciidoc[] * <> * <> @@ -126,7 +118,7 @@ https://www.elastic.co/downloads/beats/packetbeat[downloads page]. . Rename the `packetbeat--windows` directory to `Packetbeat`. -. Open a PowerShell prompt as an Administrator (right-click the PowerShell icon and select *Run As Administrator*). If you are running Windows XP, you may need to download and install PowerShell. +. Open a PowerShell prompt as an Administrator (right-click the PowerShell icon and select *Run As Administrator*). . From the PowerShell prompt, run the following commands to install Packetbeat as a Windows service: + @@ -170,7 +162,7 @@ capture. + [source,shell] ---------------------------------------------------------------------- -PS C:\Program Files\Packetbeat> .\packetbeat.exe -devices +PS C:\Program Files\Packetbeat> .\packetbeat.exe devices 0: \Device\NPF_{113535AD-934A-452E-8D5F-3004797DE286} (Intel(R) PRO/1000 MT Desktop Adapter) ---------------------------------------------------------------------- diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/images/discovery-packetbeat-flows.png b/vendor/github.com/elastic/beats/packetbeat/docs/images/discovery-packetbeat-flows.png index 0fcf6190..62924505 100644 Binary files a/vendor/github.com/elastic/beats/packetbeat/docs/images/discovery-packetbeat-flows.png and b/vendor/github.com/elastic/beats/packetbeat/docs/images/discovery-packetbeat-flows.png differ diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/images/discovery-packetbeat-transactions.png b/vendor/github.com/elastic/beats/packetbeat/docs/images/discovery-packetbeat-transactions.png index 5b33caae..0accb305 100644 Binary files a/vendor/github.com/elastic/beats/packetbeat/docs/images/discovery-packetbeat-transactions.png and b/vendor/github.com/elastic/beats/packetbeat/docs/images/discovery-packetbeat-transactions.png differ diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/images/filter_from_context.png b/vendor/github.com/elastic/beats/packetbeat/docs/images/filter_from_context.png index 550a6af0..320c3487 100644 Binary files a/vendor/github.com/elastic/beats/packetbeat/docs/images/filter_from_context.png and b/vendor/github.com/elastic/beats/packetbeat/docs/images/filter_from_context.png differ diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/images/filterforval_icon.png b/vendor/github.com/elastic/beats/packetbeat/docs/images/filterforval_icon.png new file mode 100644 index 00000000..2209bd8b Binary files /dev/null and b/vendor/github.com/elastic/beats/packetbeat/docs/images/filterforval_icon.png differ diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/images/filteroutval_icon.png b/vendor/github.com/elastic/beats/packetbeat/docs/images/filteroutval_icon.png new file mode 100644 index 00000000..b2e7b392 Binary files /dev/null and b/vendor/github.com/elastic/beats/packetbeat/docs/images/filteroutval_icon.png differ diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/images/kibana-filters.png b/vendor/github.com/elastic/beats/packetbeat/docs/images/kibana-filters.png index 72b54496..eb5ae590 100644 Binary files a/vendor/github.com/elastic/beats/packetbeat/docs/images/kibana-filters.png and b/vendor/github.com/elastic/beats/packetbeat/docs/images/kibana-filters.png differ diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/images/kibana-query-filtering.png b/vendor/github.com/elastic/beats/packetbeat/docs/images/kibana-query-filtering.png index e0244062..cb3096e1 100644 Binary files a/vendor/github.com/elastic/beats/packetbeat/docs/images/kibana-query-filtering.png and b/vendor/github.com/elastic/beats/packetbeat/docs/images/kibana-query-filtering.png differ diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/images/kibana-update-map.png b/vendor/github.com/elastic/beats/packetbeat/docs/images/kibana-update-map.png index 7a7785f1..1a71050e 100644 Binary files a/vendor/github.com/elastic/beats/packetbeat/docs/images/kibana-update-map.png and b/vendor/github.com/elastic/beats/packetbeat/docs/images/kibana-update-map.png differ diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/images/saved-packetbeat-searches.png b/vendor/github.com/elastic/beats/packetbeat/docs/images/saved-packetbeat-searches.png index 15a25e28..b825ec59 100644 Binary files a/vendor/github.com/elastic/beats/packetbeat/docs/images/saved-packetbeat-searches.png and b/vendor/github.com/elastic/beats/packetbeat/docs/images/saved-packetbeat-searches.png differ diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/index.asciidoc b/vendor/github.com/elastic/beats/packetbeat/docs/index.asciidoc index ed21282f..804bb9e8 100644 --- a/vendor/github.com/elastic/beats/packetbeat/docs/index.asciidoc +++ b/vendor/github.com/elastic/beats/packetbeat/docs/index.asciidoc @@ -2,19 +2,21 @@ include::../../libbeat/docs/version.asciidoc[] -include::{asciidoc-dir}/../../shared/attributes62.asciidoc[] +include::{asciidoc-dir}/../../shared/attributes.asciidoc[] :version: {stack-version} :beatname_lc: packetbeat :beatname_uc: Packetbeat :beatname_pkg: {beatname_lc} +:github_repo_name: beats +:discuss_forum: beats/{beatname_lc} +:beat_default_index_prefix: {beatname_lc} +:has_ml_jobs: yes include::../../libbeat/docs/shared-beats-attributes.asciidoc[] include::./overview.asciidoc[] -include::../../libbeat/docs/contributing-to-beats.asciidoc[] - include::./gettingstarted.asciidoc[] include::../../libbeat/docs/repositories.asciidoc[] @@ -40,3 +42,5 @@ include::./filtering.asciidoc[] include::./troubleshooting.asciidoc[] include::./faq.asciidoc[] + +include::../../libbeat/docs/contributing-to-beats.asciidoc[] diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/packetbeat-geoip.asciidoc b/vendor/github.com/elastic/beats/packetbeat/docs/packetbeat-geoip.asciidoc index 5c108ef8..2a83c8db 100644 --- a/vendor/github.com/elastic/beats/packetbeat/docs/packetbeat-geoip.asciidoc +++ b/vendor/github.com/elastic/beats/packetbeat/docs/packetbeat-geoip.asciidoc @@ -2,22 +2,18 @@ == Export GeoIP Information You can use Packetbeat along with the -{plugindoc}/ingest-geoip.html[ingest geoIP processor plugin] in Elasticsearch +{plugins}/ingest-geoip.html[ingest geoIP processor plugin] in Elasticsearch to export geographic location information about source IPs for incoming HTTP requests. Then you can use this info to visualize the location of your clients on a map in Kibana. -Prior to version 5.0, Packetbeat provided a `geoip` configuration option for -exporting geoIP information about the source IPs. Starting with 5.0, the -`geoip` configuration option in Beats is deprecated in favor of using the -ingest geoIP processor plugin. This plugin adds information about the -geographical location of IP addresses, based on data from the Maxmind GeoLite2 -City Database. Because the plugin uses a geoIP database that's installed on -Elasticsearch, you no longer need to install a geoIP database on the -machines running Beats. +The geoIP processor plugin adds information about the geographical location of +IP addresses, based on data from the Maxmind GeoLite2 City Database. Because the +plugin uses a geoIP database that's installed on Elasticsearch, you don't need +to install a geoIP database on the machines running Beats. NOTE: If your use case involves using Logstash, you can use the -{logstashdoc}/plugins-filters-geoip.html[GeoIP filter] available in Logstash +{logstash-ref}/plugins-filters-geoip.html[GeoIP filter] available in Logstash instead of using the ingest plugin. However, using the ingest plugin is the simplest approach when you don't require the additional processing power of Logstash. @@ -28,13 +24,14 @@ Logstash. To configure Packetbeat and the ingest geoIP processor plugin: -1. {plugindoc}/ingest-geoip.html[Install the ingest geoIP processor plugin]. +1. {plugins}/ingest-geoip.html[Install the ingest geoIP processor plugin]. After installing the plugin, remember to restart the node. 2. Define an ingest node pipeline that uses a `geoip` processor to add location info to the event. For example, you can use the Console in Kibana to create the following pipeline: + +-- [source,json] ------------------------------------------------------------------------------- PUT _ingest/pipeline/geoip-info @@ -52,6 +49,8 @@ PUT _ingest/pipeline/geoip-info ] } ------------------------------------------------------------------------------- +//CONSOLE +-- + This pipeline adds a `client_geoip.location` field of type `geo_point` to the event. The ID of the pipeline is `geoip-info`. `client_ip` is the output field @@ -60,7 +59,7 @@ in Packetbeat that contains the IP address of the client. You set when it encounters an event that doesn't have a `client_ip` field. + See -{plugindoc}/using-ingest-geoip.html[Using the Geoip Processor in a Pipeline] +{plugins}/using-ingest-geoip.html[Using the Geoip Processor in a Pipeline] for more options. 3. In the Packetbeat config file, configure the Elasticsearch output to use the @@ -78,7 +77,7 @@ output.elasticsearch: + [source,shell] ------------------------------------------------------------------------------- -./packetbeat -e -c packetbeat.yml +sudo ./packetbeat -e -c packetbeat.yml ------------------------------------------------------------------------------- + The event that's sent to Elasticsearch should now include a @@ -90,9 +89,10 @@ The event that's sent to Elasticsearch should now include a To visualize the location of your Packetbeat clients, you can either <> (if -you haven't already), or create a new {kibana-ref}/tilemap.html[Tile map] in -Kibana and use the `client_geoip.location` field as the Geohash. +you haven't already), or create a new {kibana-ref}/tilemap.html[coordinate map] +in Kibana and use the `client_geoip.location` field as the Geohash. +[role="screenshot"] image:./images/kibana-update-map.png[Update Packetbeat client location map in Kibana] TIP: If the map in the dashboard reports "no results found", and you don't see diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/packetbeat-options.asciidoc b/vendor/github.com/elastic/beats/packetbeat/docs/packetbeat-options.asciidoc index 90a6a9e5..5c2f5b46 100644 --- a/vendor/github.com/elastic/beats/packetbeat/docs/packetbeat-options.asciidoc +++ b/vendor/github.com/elastic/beats/packetbeat/docs/packetbeat-options.asciidoc @@ -204,7 +204,7 @@ You can use the `bpf_filter` setting to overwrite the generated BPF filter. For [source,yaml] ------------------------------------------------------------------------------ packetbeat.interfaces.device: eth0 -packetbeat.interfaces.bpf_filter: "net 192.168.238.0/0 and port 80 and port 3306" +packetbeat.interfaces.bpf_filter: "net 192.168.238.0/0 and port 80 or port 3306" ------------------------------------------------------------------------------ NOTE: This setting disables automatic generation of the BPF filter. If @@ -570,7 +570,7 @@ If enabled Packetbeat will generate the following BPF filter: `"icmp or icmp6"`. DNS ++++ -The `dns` section of the +{beatname_lc}.yml+ config file specifies configuration options for the DNS protocol. The DNS protocol supports processing DNS messages on UDP. Here is a sample configuration section for DNS: +The `dns` section of the +{beatname_lc}.yml+ config file specifies configuration options for the DNS protocol. The DNS protocol supports processing DNS messages on TCP and UDP. Here is a sample configuration section for DNS: [source,yaml] ------------------------------------------------------------------------------ diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/securing-packetbeat.asciidoc b/vendor/github.com/elastic/beats/packetbeat/docs/securing-packetbeat.asciidoc index dadc6980..37467c29 100644 --- a/vendor/github.com/elastic/beats/packetbeat/docs/securing-packetbeat.asciidoc +++ b/vendor/github.com/elastic/beats/packetbeat/docs/securing-packetbeat.asciidoc @@ -8,6 +8,7 @@ The following topics describe how to secure communication between Packetbeat and * <> * <> +* <> //sets block macro for https.asciidoc included in next section diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/setting-up-running.asciidoc b/vendor/github.com/elastic/beats/packetbeat/docs/setting-up-running.asciidoc index 4e179b40..8f215865 100644 --- a/vendor/github.com/elastic/beats/packetbeat/docs/setting-up-running.asciidoc +++ b/vendor/github.com/elastic/beats/packetbeat/docs/setting-up-running.asciidoc @@ -4,7 +4,7 @@ // that is unique to each beat. ///// -[[seting-up-and-running]] +[[setting-up-and-running]] == Setting up and running {beatname_uc} Before reading this section, see the @@ -29,3 +29,5 @@ include::../../libbeat/docs/keystore.asciidoc[] include::../../libbeat/docs/command-reference.asciidoc[] include::./running-on-docker.asciidoc[] + +include::../../libbeat/docs/shared-shutdown.asciidoc[] diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/visualizing-data-packetbeat.asciidoc b/vendor/github.com/elastic/beats/packetbeat/docs/visualizing-data-packetbeat.asciidoc index eca103a2..55f2fafc 100644 --- a/vendor/github.com/elastic/beats/packetbeat/docs/visualizing-data-packetbeat.asciidoc +++ b/vendor/github.com/elastic/beats/packetbeat/docs/visualizing-data-packetbeat.asciidoc @@ -4,33 +4,44 @@ [partintro] -- -Before trying to visualize Packetbeat data in Kibana, we recommend that you <>. Then read the topics in this -section to learn how to work with Packetbeat data in Kibana: +Before trying to visualize Packetbeat data in Kibana, we recommend that you +<>. Then read the +topics in this section to learn how to work with Packetbeat data in Kibana: * <> * <> +Also see the {kibana-ref}/index.html[Kibana User Guide]. -- [[customizing-discover]] == Customize the Discover page -To make it easier for you to search and discover Packetbeat data in Kibana, the sample dashboards contain -predefined searches. +To make it easier for you to search and discover Packetbeat data in Kibana, the +sample dashboards contain predefined searches. These searches are not default +views on the *Discover* page. To use these searches, make sure you've +<>. Then go to the +*Discover* page and click *Open*. -Use the *Packetbeat Search* to customize the columns in the Discover table: +Type `Packetbeat` in the Search field to filter the list of searches. +[role="screenshot"] +image:./images/saved-packetbeat-searches.png[Saved Packetbeat Searches] + +You can use the predefined searches to customize the columns in the Discover +table. For example, select the *Packetbeat Search* to customize the columns in +the Discover table: + +[role="screenshot"] image:./images/discovery-packetbeat-transactions.png[Packetbeat Search] -Use the *Packetbeat Flows Search* to display the most important information for Packetbeat flows: +Select the *Packetbeat Flows Search* to display the most important information +for Packetbeat flows: +[role="screenshot"] image:./images/discovery-packetbeat-flows.png[Packetbeat Flows Search] -These searches are not default views on the *Discover* page. To use these searches, make sure you've -<>. Then go to the *Discover* page and click -*Open*. -You can type `Packetbeat` in the Search field to filter the list of searches. -image:./images/saved-packetbeat-searches.png[Saved Packetbeat Searches] + diff --git a/vendor/github.com/elastic/beats/packetbeat/flows/worker.go b/vendor/github.com/elastic/beats/packetbeat/flows/worker.go index 41b37f41..aa37d3c4 100644 --- a/vendor/github.com/elastic/beats/packetbeat/flows/worker.go +++ b/vendor/github.com/elastic/beats/packetbeat/flows/worker.go @@ -19,7 +19,7 @@ type flowsProcessor struct { } var ( - ErrInvalidTimeout = errors.New("timeout must not >= 1s") + ErrInvalidTimeout = errors.New("timeout must not <= 1s") ErrInvalidPeriod = errors.New("report period must be -1 or >= 1s") ) diff --git a/vendor/github.com/elastic/beats/packetbeat/packetbeat.reference.yml b/vendor/github.com/elastic/beats/packetbeat/packetbeat.reference.yml index a2078c05..b7faea72 100644 --- a/vendor/github.com/elastic/beats/packetbeat/packetbeat.reference.yml +++ b/vendor/github.com/elastic/beats/packetbeat/packetbeat.reference.yml @@ -179,9 +179,18 @@ packetbeat.protocols: #send_all_headers: false # The list of content types for which Packetbeat includes the full HTTP - # payload in the response field. + # payload. If the request's or response's Content-Type matches any on this + # list, the full body will be included under the request or response field. #include_body_for: [] + # The list of content types for which Packetbeat includes the full HTTP + # request payload. + #include_request_body_for: [] + + # The list of content types for which Packetbeat includes the full HTTP + # response payload. + #include_response_body_for: [] + # If the Cookie or Set-Cookie headers are sent, this option controls whether # they are split into individual values. #split_cookie: false @@ -502,7 +511,8 @@ packetbeat.protocols: # Hints the minimum number of events stored in the queue, # before providing a batch of events to the outputs. - # A value of 0 (the default) ensures events are immediately available + # The default value is set to 2048. + # A value of 0 ensures events are immediately available # to be sent to the outputs. #flush.min_events: 2048 @@ -510,6 +520,66 @@ packetbeat.protocols: # if the number of events stored in the queue is < min_flush_events. #flush.timeout: 1s + # The spool queue will store events in a local spool file, before + # forwarding the events to the outputs. + # + # Beta: spooling to disk is currently a beta feature. Use with care. + # + # The spool file is a circular buffer, which blocks once the file/buffer is full. + # Events are put into a write buffer and flushed once the write buffer + # is full or the flush_timeout is triggered. + # Once ACKed by the output, events are removed immediately from the queue, + # making space for new events to be persisted. + #spool: + # The file namespace configures the file path and the file creation settings. + # Once the file exists, the `size`, `page_size` and `prealloc` settings + # will have no more effect. + #file: + # Location of spool file. The default value is ${path.data}/spool.dat. + #path: "${path.data}/spool.dat" + + # Configure file permissions if file is created. The default value is 0600. + #permissions: 0600 + + # File size hint. The spool blocks, once this limit is reached. The default value is 100 MiB. + #size: 100MiB + + # The files page size. A file is split into multiple pages of the same size. The default value is 4KiB. + #page_size: 4KiB + + # If prealloc is set, the required space for the file is reserved using + # truncate. The default value is true. + #prealloc: true + + # Spool writer settings + # Events are serialized into a write buffer. The write buffer is flushed if: + # - The buffer limit has been reached. + # - The configured limit of buffered events is reached. + # - The flush timeout is triggered. + #write: + # Sets the write buffer size. + #buffer_size: 1MiB + + # Maximum duration after which events are flushed, if the write buffer + # is not full yet. The default value is 1s. + #flush.timeout: 1s + + # Number of maximum buffered events. The write buffer is flushed once the + # limit is reached. + #flush.events: 16384 + + # Configure the on-disk event encoding. The encoding can be changed + # between restarts. + # Valid encodings are: json, ubjson, and cbor. + #codec: cbor + #read: + # Reader flush timeout, waiting for more events to become available, so + # to fill a complete batch, as required by the outputs. + # If flush_timeout is 0, all available events are forwarded to the + # outputs immediately. + # The default value is 0s. + #flush.timeout: 0s + # Sets the maximum number of CPUs that can be executing simultaneously. The # default is the number of logical CPUs available in the system. #max_procs: @@ -544,6 +614,14 @@ packetbeat.protocols: # equals: # http.code: 200 # +# The following example renames the field a to b: +# +#processors: +#- rename: +# fields: +# - from: "a" +# to: "b" +# # The following example enriches each event with metadata from the cloud # provider about the host machine. It works on EC2, GCE, DigitalOcean, # Tencent Cloud, and Alibaba Cloud. @@ -568,6 +646,7 @@ packetbeat.protocols: # match_pids: ["process.pid", "process.ppid"] # match_source: true # match_source_index: 4 +# match_short_id: false # cleanup_timeout: 60 # # To connect to Docker over TLS you must specify a client and CA certificate. # #ssl: @@ -581,6 +660,7 @@ packetbeat.protocols: # #processors: #- add_docker_metadata: ~ +#- add_host_metadata: ~ #============================= Elastic Cloud ================================== @@ -653,7 +733,18 @@ output.elasticsearch: # The default is 50. #bulk_max_size: 50 - # Configure http request timeout before failing an request to Elasticsearch. + # The number of seconds to wait before trying to reconnect to Elasticsearch + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Elasticsearch after a network error. The default is 60s. + #backoff.max: 60s + + # Configure http request timeout before failing a request to Elasticsearch. #timeout: 90 # Use SSL settings for HTTPS. @@ -717,7 +808,7 @@ output.elasticsearch: # Optional load balance the events between the Logstash hosts. Default is false. #loadbalance: false - # Number of batches to be sent asynchronously to logstash while processing + # Number of batches to be sent asynchronously to Logstash while processing # new batches. #pipelining: 2 @@ -726,6 +817,17 @@ output.elasticsearch: # if no error is encountered. #slow_start: false + # The number of seconds to wait before trying to reconnect to Logstash + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Logstash after a network error. The default is 60s. + #backoff.max: 60s + # Optional index name. The default index name is set to packetbeat # in all lowercase. #index: 'packetbeat' @@ -1070,6 +1172,10 @@ output.elasticsearch: # the default for the logs path is a logs subdirectory inside the home path. #path.logs: ${path.home}/logs +#================================ Keystore ========================================== +# Location of the Keystore containing the keys and their sensitive values. +#keystore.path: "${path.config}/beats.keystore" + #============================== Dashboards ===================================== # These settings control loading the sample dashboards to the Kibana index. Loading # the dashboards are disabled by default and can be enabled either by setting the @@ -1104,6 +1210,17 @@ output.elasticsearch: # how to install the dashboards by first querying Elasticsearch. #setup.dashboards.always_kibana: false +# If true and Kibana is not reachable at the time when dashboards are loaded, +# it will retry to reconnect to Kibana instead of exiting with an error. +#setup.dashboards.retry.enabled: false + +# Duration interval between Kibana connection retries. +#setup.dashboards.retry.interval: 1s + +# Maximum number of retries before exiting with an error, 0 for unlimited retrying. +#setup.dashboards.retry.maximum: 0 + + #============================== Template ===================================== # A template is used to set the mapping in Elasticsearch diff --git a/vendor/github.com/elastic/beats/packetbeat/protos/amqp/amqp_parser.go b/vendor/github.com/elastic/beats/packetbeat/protos/amqp/amqp_parser.go index 11ff24e8..7141f495 100644 --- a/vendor/github.com/elastic/beats/packetbeat/protos/amqp/amqp_parser.go +++ b/vendor/github.com/elastic/beats/packetbeat/protos/amqp/amqp_parser.go @@ -72,7 +72,10 @@ func isProtocolHeader(data []byte) (isHeader bool, version string) { //func to read a frame header and check if it is valid and complete func readFrameHeader(data []byte) (ret *amqpFrame, err bool) { var frame amqpFrame - + if len(data) < 8 { + logp.Warn("Partial frame header, waiting for more data") + return nil, false + } frame.size = binary.BigEndian.Uint32(data[3:7]) if len(data) < int(frame.size)+8 { logp.Warn("Frame shorter than declared size, waiting for more data") diff --git a/vendor/github.com/elastic/beats/packetbeat/protos/amqp/amqp_test.go b/vendor/github.com/elastic/beats/packetbeat/protos/amqp/amqp_test.go index 0299ee22..e435958d 100644 --- a/vendor/github.com/elastic/beats/packetbeat/protos/amqp/amqp_test.go +++ b/vendor/github.com/elastic/beats/packetbeat/protos/amqp/amqp_test.go @@ -89,6 +89,28 @@ func TestAmqp_FrameSize(t *testing.T) { } } +// Test that the parser doesn't panic on a partial message that includes +// a client header +func TestAmqp_PartialFrameSize(t *testing.T) { + logp.TestingSetup(logp.WithSelectors("amqp", "amqpdetailed")) + + _, amqp := amqpModForTests() + + //incomplete frame + data, err := hex.DecodeString("414d515000060606010000000000") + assert.Nil(t, err) + + stream := &amqpStream{data: data, message: new(amqpMessage)} + ok, complete := amqp.amqpMessageParser(stream) + + if !ok { + t.Errorf("Parsing should not raise an error") + } + if complete { + t.Errorf("message should not be complete") + } +} + func TestAmqp_WrongShortStringSize(t *testing.T) { logp.TestingSetup(logp.WithSelectors("amqp", "amqpdetailed")) diff --git a/vendor/github.com/elastic/beats/packetbeat/protos/http/_meta/fields.yml b/vendor/github.com/elastic/beats/packetbeat/protos/http/_meta/fields.yml index e40f0157..66891f74 100644 --- a/vendor/github.com/elastic/beats/packetbeat/protos/http/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/packetbeat/protos/http/_meta/fields.yml @@ -47,5 +47,6 @@ same header name are present in the message, they will be separated by commas. - name: body + type: text description: The body of the HTTP response. diff --git a/vendor/github.com/elastic/beats/packetbeat/protos/http/config.go b/vendor/github.com/elastic/beats/packetbeat/protos/http/config.go index 00acf70e..fbb2f56b 100644 --- a/vendor/github.com/elastic/beats/packetbeat/protos/http/config.go +++ b/vendor/github.com/elastic/beats/packetbeat/protos/http/config.go @@ -7,15 +7,17 @@ import ( ) type httpConfig struct { - config.ProtocolCommon `config:",inline"` - SendAllHeaders bool `config:"send_all_headers"` - SendHeaders []string `config:"send_headers"` - SplitCookie bool `config:"split_cookie"` - RealIPHeader string `config:"real_ip_header"` - IncludeBodyFor []string `config:"include_body_for"` - HideKeywords []string `config:"hide_keywords"` - RedactAuthorization bool `config:"redact_authorization"` - MaxMessageSize int `config:"max_message_size"` + config.ProtocolCommon `config:",inline"` + SendAllHeaders bool `config:"send_all_headers"` + SendHeaders []string `config:"send_headers"` + SplitCookie bool `config:"split_cookie"` + RealIPHeader string `config:"real_ip_header"` + IncludeBodyFor []string `config:"include_body_for"` + IncludeRequestBodyFor []string `config:"include_request_body_for"` + IncludeResponseBodyFor []string `config:"include_response_body_for"` + HideKeywords []string `config:"hide_keywords"` + RedactAuthorization bool `config:"redact_authorization"` + MaxMessageSize int `config:"max_message_size"` } var ( diff --git a/vendor/github.com/elastic/beats/packetbeat/protos/http/http.go b/vendor/github.com/elastic/beats/packetbeat/protos/http/http.go index 0d4756a8..a5c911b7 100644 --- a/vendor/github.com/elastic/beats/packetbeat/protos/http/http.go +++ b/vendor/github.com/elastic/beats/packetbeat/protos/http/http.go @@ -14,7 +14,6 @@ import ( "github.com/elastic/beats/packetbeat/procs" "github.com/elastic/beats/packetbeat/protos" - "github.com/elastic/beats/packetbeat/protos/tcp" ) var debugf = logp.MakeDebug("http") @@ -24,7 +23,6 @@ type parserState uint8 const ( stateStart parserState = iota - stateFLine stateHeaders stateBody stateBodyChunkedStart @@ -34,6 +32,7 @@ const ( var ( unmatchedResponses = monitoring.NewInt(nil, "http.unmatched_responses") + unmatchedRequests = monitoring.NewInt(nil, "http.unmatched_requests") ) type stream struct { @@ -67,7 +66,6 @@ type httpPlugin struct { splitCookie bool hideKeywords []string redactAuthorization bool - includeBodyFor []string maxMessageSize int parserConfig parserConfig @@ -124,7 +122,12 @@ func (http *httpPlugin) setFromConfig(config *httpConfig) { http.splitCookie = config.SplitCookie http.parserConfig.realIPHeader = strings.ToLower(config.RealIPHeader) http.transactionTimeout = config.TransactionTimeout - http.includeBodyFor = config.IncludeBodyFor + for _, list := range [][]string{config.IncludeBodyFor, config.IncludeRequestBodyFor} { + http.parserConfig.includeRequestBodyFor = append(http.parserConfig.includeRequestBodyFor, list...) + } + for _, list := range [][]string{config.IncludeBodyFor, config.IncludeResponseBodyFor} { + http.parserConfig.includeResponseBodyFor = append(http.parserConfig.includeResponseBodyFor, list...) + } http.maxMessageSize = config.MaxMessageSize if config.SendAllHeaders { @@ -168,13 +171,11 @@ func (http *httpPlugin) messageGap(s *stream, nbytes int) (ok bool, complete boo } if !m.hasContentLength && (bytes.Equal(m.connection, constClose) || (isVersion(m.version, 1, 0) && !bytes.Equal(m.connection, constKeepAlive))) { - s.bodyReceived += nbytes m.contentLength += nbytes return true, false - } else if len(s.data[s.parseOffset:])+nbytes >= m.contentLength-s.bodyReceived { + } else if len(s.data)+nbytes >= m.contentLength-s.bodyReceived { // we're done, but the last portion of the data is gone - m.end = s.parseOffset return true, true } else { s.bodyReceived += nbytes @@ -186,7 +187,6 @@ func (http *httpPlugin) messageGap(s *stream, nbytes int) (ok bool, complete boo } func (st *stream) PrepareForNewMessage() { - st.data = st.data[st.message.end:] st.parseState = stateStart st.parseOffset = 0 st.bodyReceived = 0 @@ -201,8 +201,6 @@ func (http *httpPlugin) messageComplete( dir uint8, st *stream, ) { - st.message.raw = st.data[st.message.start:st.message.end] - http.handleHTTP(conn, st.message, tcptuple, dir) } @@ -274,7 +272,12 @@ func (http *httpPlugin) doParse( conn.streams[dir] = st } else { // concatenate bytes - if len(st.data)+len(pkt.Payload) > http.maxMessageSize { + totalLength := len(st.data) + len(pkt.Payload) + msg := st.message + if msg != nil { + totalLength += len(msg.body) + } + if totalLength > http.maxMessageSize { if isDebug { debugf("Stream data too large, ignoring message") } @@ -284,13 +287,14 @@ func (http *httpPlugin) doParse( } } - for len(st.data) > 0 { + for len(st.data) > 0 || extraMsgSize > 0 { if st.message == nil { st.message = &message{ts: pkt.Ts} } parser := newParser(&http.parserConfig) ok, complete := parser.parse(st, extraMsgSize) + extraMsgSize = 0 if !ok { // drop this tcp stream. Will retry parsing with the next // segment in it @@ -338,8 +342,7 @@ func (http *httpPlugin) ReceivedFin(tcptuple *common.TCPTuple, dir uint8, // send whatever data we got so far as complete. This // is needed for the HTTP/1.0 without Content-Length situation. - if stream.message != nil && len(stream.data[stream.message.start:]) > 0 { - stream.message.raw = stream.data[stream.message.start:] + if stream.message != nil { http.handleHTTP(conn, stream.message, tcptuple, dir) // and reset message. Probably not needed, just to be sure. @@ -412,14 +415,31 @@ func (http *httpPlugin) handleHTTP( } } +func (http *httpPlugin) flushResponses(conn *httpConnectionData) { + for !conn.responses.empty() { + unmatchedResponses.Add(1) + resp := conn.responses.pop() + debugf("Response from unknown transaction: %s. Reporting error.", resp.tcpTuple) + event := http.newTransaction(nil, resp) + http.publishTransaction(event) + } +} + +func (http *httpPlugin) flushRequests(conn *httpConnectionData) { + for !conn.requests.empty() { + unmatchedRequests.Add(1) + requ := conn.requests.pop() + debugf("Request from unknown transaction %s. Reporting error.", requ.tcpTuple) + event := http.newTransaction(requ, nil) + http.publishTransaction(event) + } +} + func (http *httpPlugin) correlate(conn *httpConnectionData) { + // drop responses with missing requests if conn.requests.empty() { - for !conn.responses.empty() { - debugf("Response from unknown transaction. Ignoring.") - unmatchedResponses.Add(1) - conn.responses.pop() - } + http.flushResponses(conn) return } @@ -438,74 +458,92 @@ func (http *httpPlugin) correlate(conn *httpConnectionData) { func (http *httpPlugin) newTransaction(requ, resp *message) beat.Event { status := common.OK_STATUS - if resp.statusCode >= 400 { + if resp == nil { + status = common.ERROR_STATUS + if requ != nil { + requ.notes = append(requ.notes, "Unmatched request") + } + } else if resp.statusCode >= 400 { status = common.ERROR_STATUS } - - // resp_time in milliseconds - responseTime := int32(resp.ts.Sub(requ.ts).Nanoseconds() / 1e6) - - path, params, err := http.extractParameters(requ, requ.raw) - if err != nil { - logp.Warn("Fail to parse HTTP parameters: %v", err) + if requ == nil { + status = common.ERROR_STATUS + if resp != nil { + resp.notes = append(resp.notes, "Unmatched response") + } } - src := common.Endpoint{ - IP: requ.tcpTuple.SrcIP.String(), - Port: requ.tcpTuple.SrcPort, - Proc: string(requ.cmdlineTuple.Src), - } - dst := common.Endpoint{ - IP: requ.tcpTuple.DstIP.String(), - Port: requ.tcpTuple.DstPort, - Proc: string(requ.cmdlineTuple.Dst), - } - if requ.direction == tcp.TCPDirectionReverse { - src, dst = dst, src + httpDetails := common.MapStr{} + fields := common.MapStr{ + "type": "http", + "status": status, + "http": httpDetails, } - httpDetails := common.MapStr{ - "request": common.MapStr{ + var timestamp time.Time + + if requ != nil { + path, params, err := http.extractParameters(requ) + if err != nil { + logp.Warn("Fail to parse HTTP parameters: %v", err) + } + httpDetails["request"] = common.MapStr{ "params": params, "headers": http.collectHeaders(requ), - }, - "response": common.MapStr{ + } + fields["method"] = requ.method + fields["path"] = path + fields["query"] = fmt.Sprintf("%s %s", requ.method, path) + fields["bytes_in"] = requ.size + + fields["src"], fields["dst"] = requ.getEndpoints() + + http.setBody(httpDetails["request"].(common.MapStr), requ) + + timestamp = requ.ts + + if len(requ.notes) > 0 { + fields["notes"] = requ.notes + } + + if len(requ.realIP) > 0 { + fields["real_ip"] = requ.realIP + } + + if http.sendRequest { + fields["request"] = string(http.makeRawMessage(requ)) + } + } + + if resp != nil { + httpDetails["response"] = common.MapStr{ "code": resp.statusCode, "phrase": resp.statusPhrase, "headers": http.collectHeaders(resp), - }, - } - - http.setBody(httpDetails["request"].(common.MapStr), requ) - http.setBody(httpDetails["response"].(common.MapStr), resp) + } + http.setBody(httpDetails["response"].(common.MapStr), resp) + fields["bytes_out"] = resp.size - timestamp := requ.ts - fields := common.MapStr{ - "type": "http", - "status": status, - "responsetime": responseTime, - "method": requ.method, - "path": path, - "query": fmt.Sprintf("%s %s", requ.method, path), - "http": httpDetails, - "bytes_out": resp.size, - "bytes_in": requ.size, - "src": &src, - "dst": &dst, - } + if http.sendResponse { + fields["response"] = string(http.makeRawMessage(resp)) + } - if http.sendRequest { - fields["request"] = string(http.cutMessageBody(requ)) - } - if http.sendResponse { - fields["response"] = string(http.cutMessageBody(resp)) + if len(resp.notes) > 0 { + if fields["notes"] != nil { + fields["notes"] = append(fields["notes"].([]string), resp.notes...) + } else { + fields["notes"] = resp.notes + } + } + if requ == nil { + timestamp = resp.ts + fields["src"], fields["dst"] = resp.getEndpoints() + } } - if len(requ.notes)+len(resp.notes) > 0 { - fields["notes"] = append(requ.notes, resp.notes...) - } - if len(requ.realIP) > 0 { - fields["real_ip"] = requ.realIP + // resp_time in milliseconds + if requ != nil && resp != nil { + fields["responsetime"] = int32(resp.ts.Sub(requ.ts).Nanoseconds() / 1e6) } return beat.Event{ @@ -514,6 +552,16 @@ func (http *httpPlugin) newTransaction(requ, resp *message) beat.Event { } } +func (http *httpPlugin) makeRawMessage(m *message) string { + var result []byte + result = append(result, m.rawHeaders...) + if m.sendBody { + result = append(result, m.body...) + } + // TODO: (go1.10) Use strings.Builder to avoid allocation/copying + return string(result) +} + func (http *httpPlugin) publishTransaction(event beat.Event) { if http.results == nil { return @@ -554,9 +602,8 @@ func (http *httpPlugin) collectHeaders(m *message) interface{} { } func (http *httpPlugin) setBody(result common.MapStr, m *message) { - body := string(http.extractBody(m)) - if len(body) > 0 { - result["body"] = body + if m.sendBody && len(m.body) > 0 { + result["body"] = string(m.body) } } @@ -583,57 +630,13 @@ func parseCookieValue(raw string) string { return raw } -func (http *httpPlugin) extractBody(m *message) []byte { - body := []byte{} - - if len(m.contentType) > 0 && http.shouldIncludeInBody(m.contentType) { - if len(m.chunkedBody) > 0 { - body = append(body, m.chunkedBody...) - } else { - if isDebug { - debugf("Body to include: [%s]", m.raw[m.bodyOffset:]) - } - body = append(body, m.raw[m.bodyOffset:]...) - } - } - - return body -} - -func (http *httpPlugin) cutMessageBody(m *message) []byte { - cutMsg := []byte{} - - // add headers always - cutMsg = m.raw[:m.bodyOffset] - - // add body - return append(cutMsg, http.extractBody(m)...) -} - -func (http *httpPlugin) shouldIncludeInBody(contenttype []byte) bool { - includedBodies := http.includeBodyFor - for _, include := range includedBodies { - if bytes.Contains(contenttype, []byte(include)) { - if isDebug { - debugf("Should Include Body = true Content-Type %s include_body %s", - contenttype, include) - } - return true - } - if isDebug { - debugf("Should Include Body = false Content-Type %s include_body %s", - contenttype, include) - } - } - return false -} - func (http *httpPlugin) hideHeaders(m *message) { if !m.isRequest || !http.redactAuthorization { return } - msg := m.raw + msg := m.rawHeaders + limit := len(msg) // byte64 != encryption, so obscure it in headers in case of Basic Authentication @@ -641,24 +644,24 @@ func (http *httpPlugin) hideHeaders(m *message) { authText := []byte("uthorization:") // [aA] case insensitive, also catches Proxy-Authorization: authHeaderStartX := m.headerOffset - authHeaderEndX := m.bodyOffset + authHeaderEndX := limit - for authHeaderStartX < m.bodyOffset { + for authHeaderStartX < limit { if isDebug { debugf("looking for authorization from %d to %d", authHeaderStartX, authHeaderEndX) } - startOfHeader := bytes.Index(msg[authHeaderStartX:m.bodyOffset], authText) + startOfHeader := bytes.Index(msg[authHeaderStartX:], authText) if startOfHeader >= 0 { authHeaderStartX = authHeaderStartX + startOfHeader - endOfHeader := bytes.Index(msg[authHeaderStartX:m.bodyOffset], []byte("\r\n")) + endOfHeader := bytes.Index(msg[authHeaderStartX:], constCRLF) if endOfHeader >= 0 { authHeaderEndX = authHeaderStartX + endOfHeader - if authHeaderEndX > m.bodyOffset { - authHeaderEndX = m.bodyOffset + if authHeaderEndX > limit { + authHeaderEndX = limit } if isDebug { @@ -670,8 +673,8 @@ func (http *httpPlugin) hideHeaders(m *message) { } } } - authHeaderStartX = authHeaderEndX + len("\r\n") - authHeaderEndX = m.bodyOffset + authHeaderStartX = authHeaderEndX + len(constCRLF) + authHeaderEndX = len(m.rawHeaders) } for _, header := range redactHeaders { @@ -679,8 +682,6 @@ func (http *httpPlugin) hideHeaders(m *message) { m.headers[header] = []byte("*") } } - - m.raw = msg } func (http *httpPlugin) hideSecrets(values url.Values) url.Values { @@ -700,7 +701,7 @@ func (http *httpPlugin) hideSecrets(values url.Values) url.Values { // extractParameters parses the URL and the form parameters and replaces the secrets // with the string xxxxx. The parameters containing secrets are defined in http.Hide_secrets. // Returns the Request URI path and the (adjusted) parameters. -func (http *httpPlugin) extractParameters(m *message, msg []byte) (path string, params string, err error) { +func (http *httpPlugin) extractParameters(m *message) (path string, params string, err error) { var values url.Values u, err := url.Parse(string(m.requestURI)) @@ -712,9 +713,9 @@ func (http *httpPlugin) extractParameters(m *message, msg []byte) (path string, paramsMap := http.hideSecrets(values) - if m.contentLength > 0 && bytes.Contains(m.contentType, []byte("urlencoded")) { + if m.contentLength > 0 && m.saveBody && bytes.Contains(m.contentType, []byte("urlencoded")) { - values, err = url.ParseQuery(string(msg[m.bodyOffset:])) + values, err = url.ParseQuery(string(m.body)) if err != nil { return } @@ -740,6 +741,33 @@ func (http *httpPlugin) isSecretParameter(key string) bool { return false } +func (http *httpPlugin) Expired(tuple *common.TCPTuple, private protos.ProtocolData) { + conn := getHTTPConnection(private) + if conn == nil { + return + } + if isDebug { + debugf("expired connection %s", tuple) + } + // terminate streams + for dir, s := range conn.streams { + // Do not send incomplete or empty messages + if s != nil && s.message != nil && s.message.headersReceived() { + if isDebug { + debugf("got message %+v", s.message) + } + http.handleHTTP(conn, s.message, tuple, uint8(dir)) + s.PrepareForNewMessage() + } + } + // correlate transactions + http.correlate(conn) + + // flush uncorrelated requests and responses + http.flushRequests(conn) + http.flushResponses(conn) +} + func (ml *messageList) append(msg *message) { if ml.tail == nil { ml.head = msg diff --git a/vendor/github.com/elastic/beats/packetbeat/protos/http/http_parser.go b/vendor/github.com/elastic/beats/packetbeat/protos/http/http_parser.go index 1f893811..e6c6f84c 100644 --- a/vendor/github.com/elastic/beats/packetbeat/protos/http/http_parser.go +++ b/vendor/github.com/elastic/beats/packetbeat/protos/http/http_parser.go @@ -4,12 +4,14 @@ import ( "bytes" "errors" "fmt" + "strconv" "time" "unicode" "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/common/streambuf" "github.com/elastic/beats/libbeat/logp" + "github.com/elastic/beats/packetbeat/protos/tcp" ) // Http Message @@ -20,7 +22,6 @@ type message struct { version version connection common.NetString chunkedLength int - chunkedBody []byte isRequest bool tcpTuple common.TCPTuple @@ -38,18 +39,21 @@ type message struct { contentLength int contentType common.NetString transferEncoding common.NetString + isChunked bool headers map[string]common.NetString size uint64 - //Raw Data - raw []byte + rawHeaders []byte - notes []string + // sendBody determines if the body must be sent along with the event + // because the content-type is included in the send_body_for setting. + sendBody bool + // saveBody determines if the body must be saved. It is set when sendBody + // is true or when the body type is form-urlencoded. + saveBody bool + body []byte - //Offsets - start int - end int - bodyOffset int + notes []string next *message } @@ -64,10 +68,12 @@ type parser struct { } type parserConfig struct { - realIPHeader string - sendHeaders bool - sendAllHeaders bool - headersWhitelist map[string]bool + realIPHeader string + sendHeaders bool + sendAllHeaders bool + headersWhitelist map[string]bool + includeRequestBodyFor []string + includeResponseBodyFor []string } var ( @@ -130,7 +136,6 @@ func (parser *parser) parse(s *stream, extraMsgSize int) (bool, bool) { } func (*parser) parseHTTPLine(s *stream, m *message) (cont, ok, complete bool) { - m.start = s.parseOffset i := bytes.Index(s.data[s.parseOffset:], []byte("\r\n")) if i == -1 { return false, true, false @@ -141,7 +146,7 @@ func (*parser) parseHTTPLine(s *stream, m *message) (cont, ok, complete bool) { var version []byte var err error fline := s.data[s.parseOffset:i] - if len(fline) < 8 { + if len(fline) < 9 { if isDebug { debugf("First line too small") } @@ -176,9 +181,10 @@ func (*parser) parseHTTPLine(s *stream, m *message) (cont, ok, complete bool) { m.method = common.NetString(fline[:afterMethodIdx]) m.requestURI = common.NetString(fline[afterMethodIdx+1 : afterRequestURIIdx]) - if bytes.Equal(fline[afterRequestURIIdx+1:afterRequestURIIdx+len(constHTTPVersion)+1], constHTTPVersion) { + versionIdx := afterRequestURIIdx + len(constHTTPVersion) + 1 + if len(fline) > versionIdx && bytes.Equal(fline[afterRequestURIIdx+1:versionIdx], constHTTPVersion) { m.isRequest = true - version = fline[afterRequestURIIdx+len(constHTTPVersion)+1:] + version = fline[versionIdx:] } else { if isDebug { debugf("Couldn't understand HTTP version: %s", fline) @@ -243,8 +249,10 @@ func (parser *parser) parseHeaders(s *stream, m *message) (cont, ok, complete bo if len(s.data)-s.parseOffset >= 2 && bytes.Equal(s.data[s.parseOffset:s.parseOffset+2], []byte("\r\n")) { // EOH - s.parseOffset += 2 - m.bodyOffset = s.parseOffset + m.size = uint64(s.parseOffset + 2) + m.rawHeaders = s.data[:m.size] + s.data = s.data[m.size:] + s.parseOffset = 0 if !m.isRequest && ((100 <= m.statusCode && m.statusCode < 200) || m.statusCode == 204 || m.statusCode == 304) { //response with a 1xx, 204 , or 304 status code is always terminated @@ -252,12 +260,17 @@ func (parser *parser) parseHeaders(s *stream, m *message) (cont, ok, complete bo if isDebug { debugf("Terminate response, status code %d", m.statusCode) } - m.end = s.parseOffset - m.size = uint64(m.end - m.start) return false, true, true } - if bytes.Equal(m.transferEncoding, transferEncodingChunked) { + if m.isRequest { + m.sendBody = parser.shouldIncludeInBody(m.contentType, parser.config.includeRequestBodyFor) + } else { + m.sendBody = parser.shouldIncludeInBody(m.contentType, parser.config.includeResponseBodyFor) + } + m.saveBody = m.sendBody || (m.contentLength > 0 && bytes.Contains(m.contentType, []byte("urlencoded"))) + + if m.isChunked { // support for HTTP/1.1 Chunked transfer // Transfer-Encoding overrides the Content-Length if isDebug { @@ -272,8 +285,6 @@ func (parser *parser) parseHeaders(s *stream, m *message) (cont, ok, complete bo debugf("Empty content length, ignore body") } // Ignore body for request that contains a message body but not a Content-Length - m.end = s.parseOffset - m.size = uint64(m.end - m.start) return false, true, true } @@ -338,7 +349,7 @@ func (parser *parser) parseHeader(m *message, data []byte) (bool, bool, int) { } else if bytes.Equal(headerName, nameContentType) { m.contentType = headerVal } else if bytes.Equal(headerName, nameTransferEncoding) { - m.transferEncoding = common.NetString(headerVal) + m.isChunked = bytes.Equal(common.NetString(headerVal), transferEncodingChunked) } else if bytes.Equal(headerName, nameConnection) { m.connection = headerVal } @@ -375,28 +386,39 @@ func (parser *parser) parseHeader(m *message, data []byte) (bool, bool, int) { } func (*parser) parseBody(s *stream, m *message) (ok, complete bool) { - if isDebug { - debugf("parseBody body: %d", s.parseOffset) - } + nbytes := len(s.data) if !m.hasContentLength && (bytes.Equal(m.connection, constClose) || (isVersion(m.version, 1, 0) && !bytes.Equal(m.connection, constKeepAlive))) { + m.size += uint64(nbytes) + s.bodyReceived += nbytes + m.contentLength += nbytes + // HTTP/1.0 no content length. Add until the end of the connection if isDebug { - debugf("http conn close, received %d", len(s.data)-s.parseOffset) + debugf("http conn close, received %d", len(s.data)) } - s.bodyReceived += (len(s.data) - s.parseOffset) - m.contentLength += (len(s.data) - s.parseOffset) - s.parseOffset = len(s.data) + if m.saveBody { + m.body = append(m.body, s.data...) + } + s.data = nil return true, false - } else if len(s.data[s.parseOffset:]) >= m.contentLength-s.bodyReceived { - s.parseOffset += (m.contentLength - s.bodyReceived) - m.end = s.parseOffset - m.size = uint64(m.end - m.start) + } else if nbytes >= m.contentLength-s.bodyReceived { + wanted := m.contentLength - s.bodyReceived + if m.saveBody { + m.body = append(m.body, s.data[:wanted]...) + } + s.bodyReceived = m.contentLength + m.size += uint64(wanted) + s.data = s.data[wanted:] return true, true } else { - s.bodyReceived += (len(s.data) - s.parseOffset) - s.parseOffset = len(s.data) + if m.saveBody { + m.body = append(m.body, s.data...) + } + s.data = nil + s.bodyReceived += nbytes + m.size += uint64(nbytes) if isDebug { debugf("bodyReceived: %d", s.bodyReceived) } @@ -408,7 +430,7 @@ func (*parser) parseBody(s *stream, m *message) (ok, complete bool) { // those bytes. func (*parser) eatBody(s *stream, m *message, size int) (ok, complete bool) { if isDebug { - debugf("eatBody body: %d", s.parseOffset) + debugf("eatBody body") } if !m.hasContentLength && (bytes.Equal(m.connection, constClose) || (isVersion(m.version, 1, 0) && !bytes.Equal(m.connection, constKeepAlive))) { @@ -417,16 +439,18 @@ func (*parser) eatBody(s *stream, m *message, size int) (ok, complete bool) { if isDebug { debugf("http conn close, received %d", size) } + m.size += uint64(size) s.bodyReceived += size m.contentLength += size return true, false } else if size >= m.contentLength-s.bodyReceived { - s.bodyReceived += (m.contentLength - s.bodyReceived) - m.end = s.parseOffset - m.size = uint64(m.bodyOffset-m.start) + uint64(m.contentLength) + wanted := m.contentLength - s.bodyReceived + s.bodyReceived += wanted + m.size = uint64(len(m.rawHeaders) + m.contentLength) return true, true } else { s.bodyReceived += size + m.size += uint64(size) if isDebug { debugf("bodyReceived: %d", s.bodyReceived) } @@ -436,31 +460,32 @@ func (*parser) eatBody(s *stream, m *message, size int) (ok, complete bool) { func (*parser) parseBodyChunkedStart(s *stream, m *message) (cont, ok, complete bool) { // read hexa length - i := bytes.Index(s.data[s.parseOffset:], constCRLF) + i := bytes.Index(s.data, constCRLF) if i == -1 { return false, true, false } - line := string(s.data[s.parseOffset : s.parseOffset+i]) - _, err := fmt.Sscanf(line, "%x", &m.chunkedLength) + line := string(s.data[:i]) + chunkLength, err := strconv.ParseInt(line, 16, 32) if err != nil { logp.Warn("Failed to understand chunked body start line") return false, false, false } + m.chunkedLength = int(chunkLength) + + s.data = s.data[i+2:] //+ \r\n + m.size += uint64(i + 2) - s.parseOffset += i + 2 //+ \r\n if m.chunkedLength == 0 { - if len(s.data[s.parseOffset:]) < 2 { + if len(s.data) < 2 { s.parseState = stateBodyChunkedWaitFinalCRLF return false, true, false } - if s.data[s.parseOffset] != '\r' || s.data[s.parseOffset+1] != '\n' { + m.size += 2 + if s.data[0] != '\r' || s.data[1] != '\n' { logp.Warn("Expected CRLF sequence at end of message") return false, false, false } - s.parseOffset += 2 // skip final CRLF - - m.end = s.parseOffset - m.size = uint64(m.end - m.start) + s.data = s.data[2:] return false, true, true } s.bodyReceived = 0 @@ -470,43 +495,86 @@ func (*parser) parseBodyChunkedStart(s *stream, m *message) (cont, ok, complete } func (*parser) parseBodyChunked(s *stream, m *message) (cont, ok, complete bool) { - if len(s.data[s.parseOffset:]) >= m.chunkedLength-s.bodyReceived+2 /*\r\n*/ { + wanted := m.chunkedLength - s.bodyReceived + if len(s.data) >= wanted+2 /*\r\n*/ { // Received more data than expected - m.chunkedBody = append(m.chunkedBody, s.data[s.parseOffset:s.parseOffset+m.chunkedLength-s.bodyReceived]...) - s.parseOffset += (m.chunkedLength - s.bodyReceived + 2 /*\r\n*/) + if m.saveBody { + m.body = append(m.body, s.data[:wanted]...) + } + m.size += uint64(wanted + 2) + s.data = s.data[wanted+2:] m.contentLength += m.chunkedLength s.parseState = stateBodyChunkedStart return true, true, false } - if len(s.data[s.parseOffset:]) >= m.chunkedLength-s.bodyReceived { + if len(s.data) >= wanted { // we need need to wait for the +2, else we can crash on next call return false, true, false } // Received less data than expected - m.chunkedBody = append(m.chunkedBody, s.data[s.parseOffset:]...) - s.bodyReceived += (len(s.data) - s.parseOffset) - s.parseOffset = len(s.data) + if m.saveBody { + m.body = append(m.body, s.data...) + } + s.bodyReceived += len(s.data) + m.size += uint64(len(s.data)) + s.data = nil return false, true, false } func (*parser) parseBodyChunkedWaitFinalCRLF(s *stream, m *message) (ok, complete bool) { - if len(s.data[s.parseOffset:]) < 2 { + if len(s.data) < 2 { return true, false } - if s.data[s.parseOffset] != '\r' || s.data[s.parseOffset+1] != '\n' { + m.size += 2 + if s.data[0] != '\r' || s.data[1] != '\n' { logp.Warn("Expected CRLF sequence at end of message") return false, false } - s.parseOffset += 2 // skip final CRLF - m.end = s.parseOffset - m.size = uint64(m.end - m.start) + s.data = s.data[2:] return true, true } +func (parser *parser) shouldIncludeInBody(contenttype []byte, capturedContentTypes []string) bool { + for _, include := range capturedContentTypes { + if bytes.Contains(contenttype, []byte(include)) { + if isDebug { + debugf("Should Include Body = true Content-Type %s include_body %s", + contenttype, include) + } + return true + } + } + if isDebug { + debugf("Should Include Body = false Content-Type %s", contenttype) + } + return false +} + +func (m *message) headersReceived() bool { + return m.headerOffset > 0 +} + +func (m *message) getEndpoints() (src *common.Endpoint, dst *common.Endpoint) { + src = &common.Endpoint{ + IP: m.tcpTuple.SrcIP.String(), + Port: m.tcpTuple.SrcPort, + Proc: string(m.cmdlineTuple.Src), + } + dst = &common.Endpoint{ + IP: m.tcpTuple.DstIP.String(), + Port: m.tcpTuple.DstPort, + Proc: string(m.cmdlineTuple.Dst), + } + if m.direction == tcp.TCPDirectionReverse { + src, dst = dst, src + } + return src, dst +} + func isVersion(v version, major, minor uint8) bool { return v.major == major && v.minor == minor } diff --git a/vendor/github.com/elastic/beats/packetbeat/protos/http/http_test.go b/vendor/github.com/elastic/beats/packetbeat/protos/http/http_test.go index 980230a6..0bb81d0f 100644 --- a/vendor/github.com/elastic/beats/packetbeat/protos/http/http_test.go +++ b/vendor/github.com/elastic/beats/packetbeat/protos/http/http_test.go @@ -204,18 +204,17 @@ func TestHttpParser_eatBody(t *testing.T) { ok, complete := testParseStream(http, st, 0) assert.True(t, ok) assert.False(t, complete) - assert.Equal(t, st.bodyReceived, 10) + assert.Equal(t, 10, st.bodyReceived) ok, complete = testParseStream(http, st, 5) assert.True(t, ok) assert.False(t, complete) - assert.Equal(t, st.bodyReceived, 15) + assert.Equal(t, 15, st.bodyReceived) ok, complete = testParseStream(http, st, 5) assert.True(t, ok) assert.True(t, complete) - assert.Equal(t, st.bodyReceived, 20) - assert.Equal(t, st.message.end, len(data)) + assert.Equal(t, 20, st.bodyReceived) } func TestHttpParser_eatBody_connclose(t *testing.T) { @@ -511,12 +510,10 @@ func TestHttpParser_RequestResponseBody(t *testing.T) { "\r\n" data := data1 + data2 tp := newTestParser(nil, data) - msg, ok, complete := tp.parse() assert.True(t, ok) assert.True(t, complete) assert.Equal(t, 2, msg.contentLength) - assert.Equal(t, []byte(data1), tp.stream.data[tp.stream.message.start:tp.stream.message.end]) tp.stream.PrepareForNewMessage() tp.stream.message = &message{ts: time.Now()} @@ -623,38 +620,38 @@ func TestEatBodyChunked(t *testing.T) { if cont != false || ok != true || complete != false { t.Errorf("Wrong return values") } - assert.Equal(t, 0, st.parseOffset) + assert.Equal(t, 0, len(msg.body)) st.data = append(st.data, msgs[1]...) cont, ok, complete = parser.parseBodyChunkedStart(st, msg) assert.True(t, cont) assert.Equal(t, 3, msg.chunkedLength) - assert.Equal(t, 4, st.parseOffset) + assert.Equal(t, 0, len(msg.body)) assert.Equal(t, stateBodyChunked, st.parseState) cont, ok, complete = parser.parseBodyChunked(st, msg) assert.True(t, cont) assert.Equal(t, stateBodyChunkedStart, st.parseState) - assert.Equal(t, 9, st.parseOffset) + assert.Equal(t, 3, msg.contentLength) cont, ok, complete = parser.parseBodyChunkedStart(st, msg) assert.True(t, cont) assert.Equal(t, 3, msg.chunkedLength) - assert.Equal(t, 13, st.parseOffset) + assert.Equal(t, 3, msg.contentLength) assert.Equal(t, stateBodyChunked, st.parseState) cont, ok, complete = parser.parseBodyChunked(st, msg) assert.False(t, cont) assert.True(t, ok) assert.False(t, complete) - assert.Equal(t, 13, st.parseOffset) + assert.Equal(t, 3, msg.contentLength) assert.Equal(t, 0, st.bodyReceived) assert.Equal(t, stateBodyChunked, st.parseState) st.data = append(st.data, msgs[2]...) cont, ok, complete = parser.parseBodyChunked(st, msg) assert.True(t, cont) - assert.Equal(t, 18, st.parseOffset) + assert.Equal(t, 6, msg.contentLength) assert.Equal(t, stateBodyChunkedStart, st.parseState) cont, ok, complete = parser.parseBodyChunkedStart(st, msg) @@ -719,9 +716,6 @@ func TestEatBodyChunkedWaitCRLF(t *testing.T) { if ok != true || complete != true { t.Error("Wrong return values", ok, complete) } - if msg.end != 14 { - t.Error("Wrong message end", msg.end) - } } func TestHttpParser_requestURIWithSpace(t *testing.T) { @@ -753,8 +747,7 @@ func TestHttpParser_requestURIWithSpace(t *testing.T) { msg, ok, complete := tp.parse() assert.True(t, ok) assert.True(t, complete) - rawMsg := tp.stream.data[tp.stream.message.start:tp.stream.message.end] - path, params, err := http.extractParameters(msg, rawMsg) + path, params, err := http.extractParameters(msg) assert.Nil(t, err) assert.Equal(t, "/test", path) assert.Equal(t, string(msg.requestURI), "http://localhost:8080/test?password=two secret") @@ -789,8 +782,7 @@ func TestHttpParser_censorPasswordURL(t *testing.T) { msg, ok, complete := tp.parse() assert.True(t, ok) assert.True(t, complete) - rawMsg := tp.stream.data[tp.stream.message.start:tp.stream.message.end] - path, params, err := http.extractParameters(msg, rawMsg) + path, params, err := http.extractParameters(msg) assert.Nil(t, err) assert.Equal(t, "/test", path) assert.False(t, strings.Contains(params, "secret")) @@ -817,10 +809,10 @@ func TestHttpParser_censorPasswordPOST(t *testing.T) { assert.True(t, ok) assert.True(t, complete) - rawMsg := tp.stream.data[tp.stream.message.start:tp.stream.message.end] - path, params, err := http.extractParameters(msg, rawMsg) + path, params, err := http.extractParameters(msg) assert.Nil(t, err) assert.Equal(t, "/users/login", path) + assert.True(t, strings.Contains(params, "username=ME")) assert.False(t, strings.Contains(params, "secret")) } @@ -852,8 +844,7 @@ func TestHttpParser_censorPasswordGET(t *testing.T) { t.Errorf("Expecting a complete message") } - msg := st.data[st.message.start:st.message.end] - path, params, err := http.extractParameters(st.message, msg) + path, params, err := http.extractParameters(st.message) if err != nil { t.Errorf("Faile to parse parameters") } @@ -864,7 +855,7 @@ func TestHttpParser_censorPasswordGET(t *testing.T) { } if strings.Contains(params, "secret") { - t.Errorf("Failed to censor the password: %s", msg) + t.Errorf("Failed to censor the password: %s", string(st.message.rawHeaders)) } } @@ -893,9 +884,8 @@ func TestHttpParser_RedactAuthorization(t *testing.T) { ok, _ := testParseStream(http, st, 0) - st.message.raw = st.data[st.message.start:] http.hideHeaders(st.message) - msg := st.message.raw + msg := st.message.rawHeaders assert.True(t, ok) assert.Equal(t, "*", string(st.message.headers["authorization"])) @@ -929,9 +919,8 @@ func TestHttpParser_RedactAuthorization_raw(t *testing.T) { ok, complete := testParseStream(http, st, 0) - st.message.raw = st.data[st.message.start:] http.hideHeaders(st.message) - msg := st.message.raw + msg := st.message.rawHeaders if !ok { t.Errorf("Parsing returned error") @@ -965,9 +954,8 @@ func TestHttpParser_RedactAuthorization_Proxy_raw(t *testing.T) { ok, complete := testParseStream(http, st, 0) - st.message.raw = st.data[st.message.start:] http.hideHeaders(st.message) - msg := st.message.raw + msg := st.message.rawHeaders if !ok { t.Errorf("Parsing returned error") @@ -1161,6 +1149,85 @@ func TestHttpParser_composedHeaders(t *testing.T) { assert.Equal(t, "aCookie=yummy, anotherCookie=why%20not", string(header)) } +func TestHttpParser_includeBodyFor(t *testing.T) { + req := []byte("PUT /node HTTP/1.1\r\n" + + "Host: server\r\n" + + "Content-Length: 4\r\n" + + "Content-Type: application/x-foo\r\n" + + "\r\n" + + "body") + resp := []byte("HTTP/1.1 200 OK\r\n" + + "Content-Length: 5\r\n" + + "Content-Type: text/plain\r\n" + + "\r\n" + + "done.") + + var store eventStore + http := httpModForTests(&store) + http.parserConfig.includeRequestBodyFor = []string{"application/x-foo", "text/plain"} + http.parserConfig.includeResponseBodyFor = []string{"application/x-foo", "text/plain"} + + tcptuple := testCreateTCPTuple() + packet := protos.Packet{Payload: req} + private := protos.ProtocolData(&httpConnectionData{}) + private = http.Parse(&packet, tcptuple, 0, private) + http.ReceivedFin(tcptuple, 0, private) + + packet.Payload = resp + private = http.Parse(&packet, tcptuple, 1, private) + http.ReceivedFin(tcptuple, 1, private) + + trans := expectTransaction(t, &store) + assert.NotNil(t, trans) + hasKey, err := trans.HasKey("http.request.body") + if err != nil { + t.Fatal(err) + } + assert.True(t, hasKey) + contents, err := trans.GetValue("http.response.body") + if err != nil { + t.Fatal(err) + } + assert.Equal(t, "done.", contents) +} + +func TestHttpParser_sendRequestResponse(t *testing.T) { + req := "POST / HTTP/1.1\r\n" + + "\r\n" + resp := "HTTP/1.1 404 Not Found\r\n" + + "Content-Length: 10\r\n" + + "\r\n" + respWithBody := resp + "not found" + + var store eventStore + http := httpModForTests(&store) + http.sendRequest = true + http.sendResponse = true + + tcptuple := testCreateTCPTuple() + packet := protos.Packet{Payload: []byte(req)} + private := protos.ProtocolData(&httpConnectionData{}) + private = http.Parse(&packet, tcptuple, 0, private) + http.ReceivedFin(tcptuple, 0, private) + + packet.Payload = []byte(respWithBody) + private = http.Parse(&packet, tcptuple, 1, private) + http.ReceivedFin(tcptuple, 1, private) + + trans := expectTransaction(t, &store) + assert.NotNil(t, trans) + contents, err := trans.GetValue("request") + if err != nil { + t.Fatal(err) + } + assert.Equal(t, req, contents) + contents, err = trans.GetValue("response") + if err != nil { + t.Fatal(err) + } + assert.Equal(t, resp, contents) +} + func testCreateTCPTuple() *common.TCPTuple { t := &common.TCPTuple{ IPLength: 4, @@ -1239,6 +1306,9 @@ func TestHttp_configsSettingAll(t *testing.T) { config.SendAllHeaders = true config.SplitCookie = true config.RealIPHeader = "X-Forwarded-For" + config.IncludeBodyFor = []string{"body"} + config.IncludeRequestBodyFor = []string{"req1", "req2"} + config.IncludeResponseBodyFor = []string{"resp1", "resp2", "resp3"} // Set config http.setFromConfig(&config) @@ -1254,6 +1324,8 @@ func TestHttp_configsSettingAll(t *testing.T) { assert.True(t, http.parserConfig.sendAllHeaders) assert.Equal(t, config.SplitCookie, http.splitCookie) assert.Equal(t, strings.ToLower(config.RealIPHeader), http.parserConfig.realIPHeader) + assert.Equal(t, append(config.IncludeBodyFor, config.IncludeRequestBodyFor...), http.parserConfig.includeRequestBodyFor) + assert.Equal(t, append(config.IncludeBodyFor, config.IncludeResponseBodyFor...), http.parserConfig.includeResponseBodyFor) } func TestHttp_configsSettingHeaders(t *testing.T) { @@ -1275,6 +1347,63 @@ func TestHttp_configsSettingHeaders(t *testing.T) { } } +func TestHttp_includeBodies(t *testing.T) { + reqTp := "PUT /node HTTP/1.1\r\n" + + "Host: server\r\n" + + "Content-Length: 12\r\n" + + "Content-Type: %s\r\n" + + "\r\n" + + "request_body" + respTp := "HTTP/1.1 200 OK\r\n" + + "Content-Length: 5\r\n" + + "Content-Type: %s\r\n" + + "\r\n" + + "done." + var store eventStore + http := httpModForTests(&store) + config := defaultConfig + config.IncludeBodyFor = []string{"both"} + config.IncludeRequestBodyFor = []string{"req1", "req2"} + config.IncludeResponseBodyFor = []string{"resp1", "resp2", "resp3"} + http.setFromConfig(&config) + + tcptuple := testCreateTCPTuple() + + for idx, testCase := range []struct { + requestCt, responseCt string + hasRequest, hasResponse bool + }{ + {"none", "none", false, false}, + {"both", "other", true, false}, + {"other", "both", false, true}, + {"both", "both", true, true}, + {"req1", "none", true, false}, + {"none", "req1", false, false}, + {"req2", "resp1", true, true}, + {"none", "resp2", false, true}, + {"resp3", "req2", false, false}, + } { + msg := fmt.Sprintf("test case %d (%s, %s)", idx, testCase.requestCt, testCase.responseCt) + req := fmt.Sprintf(reqTp, testCase.requestCt) + resp := fmt.Sprintf(respTp, testCase.responseCt) + + packet := protos.Packet{Payload: []byte(req)} + private := protos.ProtocolData(&httpConnectionData{}) + private = http.Parse(&packet, tcptuple, 0, private) + + packet.Payload = []byte(resp) + private = http.Parse(&packet, tcptuple, 1, private) + http.ReceivedFin(tcptuple, 1, private) + + trans := expectTransaction(t, &store) + assert.NotNil(t, trans) + hasKey, _ := trans.HasKey("http.request.body") + assert.Equal(t, testCase.hasRequest, hasKey, msg) + hasKey, _ = trans.HasKey("http.response.body") + assert.Equal(t, testCase.hasResponse, hasKey, msg) + } +} + func benchmarkHTTPMessage(b *testing.B, data []byte) { http := httpModForTests(nil) parser := newParser(&http.parserConfig) @@ -1394,3 +1523,33 @@ func BenchmarkHttpSimpleTransaction(b *testing.B) { http.ReceivedFin(tcptuple, 1, private) } } + +func BenchmarkHttpLargeResponseBody(b *testing.B) { + const PacketSize = 1024 + const BodySize = 10 * 1024 * PacketSize + const numPackets = BodySize / PacketSize + bodyPayload := &protos.Packet{Payload: make([]byte, PacketSize)} + for i := 0; i < PacketSize; i++ { + bodyPayload.Payload[i] = byte(0x30 + (i % 10)) + } + + http := httpModForTests(nil) + tcptuple := testCreateTCPTuple() + header := fmt.Sprintf("HTTP/1.1 200 OK\r\n"+ + "Host: some.server\r\n"+ + "Connection: Close\r\n"+ + "Content-Length: %d\r\n"+ + "\r\n", BodySize) + + for i := 0; i < b.N; i++ { + headPkt := protos.Packet{Payload: []byte(header)} + private := protos.ProtocolData(&httpConnectionData{}) + private = http.Parse(&headPkt, tcptuple, 0, private) + + for j := 0; j < numPackets; j++ { + private = http.Parse(bodyPayload, tcptuple, 0, private) + } + http.ReceivedFin(tcptuple, 1, private) + } + b.ReportAllocs() +} diff --git a/vendor/github.com/elastic/beats/packetbeat/protos/mongodb/mongodb_parser.go b/vendor/github.com/elastic/beats/packetbeat/protos/mongodb/mongodb_parser.go index 6567c250..4631d518 100644 --- a/vendor/github.com/elastic/beats/packetbeat/protos/mongodb/mongodb_parser.go +++ b/vendor/github.com/elastic/beats/packetbeat/protos/mongodb/mongodb_parser.go @@ -341,6 +341,9 @@ func (d *decoder) readDocument() (bson.M, error) { start := d.i documentLength, err := d.readInt32() d.i = start + documentLength + if len(d.in) < d.i { + return nil, errors.New("document out of bounds") + } documentMap := bson.M{} diff --git a/vendor/github.com/elastic/beats/packetbeat/protos/mongodb/mongodb_structs.go b/vendor/github.com/elastic/beats/packetbeat/protos/mongodb/mongodb_structs.go index 2fbf0b96..990de1ba 100644 --- a/vendor/github.com/elastic/beats/packetbeat/protos/mongodb/mongodb_structs.go +++ b/vendor/github.com/elastic/beats/packetbeat/protos/mongodb/mongodb_structs.go @@ -2,6 +2,7 @@ package mongodb // Represent a mongodb message being parsed import ( + "fmt" "time" "github.com/elastic/beats/libbeat/common" @@ -113,7 +114,10 @@ func validOpcode(o opCode) bool { } func (o opCode) String() string { - return opCodeNames[o] + if name, found := opCodeNames[o]; found { + return name + } + return fmt.Sprintf("(value=%d)", int32(o)) } func awaitsReply(c opCode) bool { diff --git a/vendor/github.com/elastic/beats/packetbeat/protos/mongodb/mongodb_test.go b/vendor/github.com/elastic/beats/packetbeat/protos/mongodb/mongodb_test.go index edbebb23..4cc36fc6 100644 --- a/vendor/github.com/elastic/beats/packetbeat/protos/mongodb/mongodb_test.go +++ b/vendor/github.com/elastic/beats/packetbeat/protos/mongodb/mongodb_test.go @@ -333,3 +333,41 @@ func TestMaxDocSize(t *testing.T) { assert.Equal(t, "\"1234 ...\n\"123\"\n\"12\"", res["response"]) } + +func TestOpCodeNames(t *testing.T) { + for _, testData := range []struct { + code int32 + expected string + }{ + {1, "OP_REPLY"}, + {-1, "(value=-1)"}, + } { + assert.Equal(t, testData.expected, opCode(testData.code).String()) + } +} + +// Test for a (recovered) panic parsing document length in request/response messages +func TestDocumentLengthBoundsChecked(t *testing.T) { + logp.TestingSetup(logp.WithSelectors("mongodb", "mongodbdetailed")) + + _, mongodb := mongodbModForTests() + + // request and response from tests/pcaps/mongo_one_row.pcap + reqData, err := hex.DecodeString( + // Request message with out of bounds document + "320000000a000000ffffffffd4070000" + + "00000000746573742e72667374617572" + + "616e7473000000000001000000" + + // Document length (including itself) + "06000000" + + // Document (1 byte instead of 2) + "00") + assert.Nil(t, err) + + tcptuple := testTCPTuple() + req := protos.Packet{Payload: reqData} + private := protos.ProtocolData(new(mongodbConnectionData)) + + private = mongodb.Parse(&req, tcptuple, 0, private) + assert.NotNil(t, private, "mongodb parser recovered from a panic") +} diff --git a/vendor/github.com/elastic/beats/packetbeat/protos/registry.go b/vendor/github.com/elastic/beats/packetbeat/protos/registry.go index b2ba6a2d..0b501d62 100644 --- a/vendor/github.com/elastic/beats/packetbeat/protos/registry.go +++ b/vendor/github.com/elastic/beats/packetbeat/protos/registry.go @@ -50,6 +50,15 @@ type UDPPlugin interface { ParseUDP(pkt *Packet) } +// ExpirationAwareTCPPlugin is a TCPPlugin that also provides the Expired() +// method. No need to use this type directly, just implement the method. +type ExpirationAwareTCPPlugin interface { + TCPPlugin + + // Expired is called when the TCP stream is expired due to connection timeout. + Expired(tuple *common.TCPTuple, private ProtocolData) +} + // Protocol identifier. type Protocol uint16 diff --git a/vendor/github.com/elastic/beats/packetbeat/protos/tcp/tcp.go b/vendor/github.com/elastic/beats/packetbeat/protos/tcp/tcp.go index 00bd577f..ea1cb0bf 100644 --- a/vendor/github.com/elastic/beats/packetbeat/protos/tcp/tcp.go +++ b/vendor/github.com/elastic/beats/packetbeat/protos/tcp/tcp.go @@ -2,6 +2,7 @@ package tcp import ( "fmt" + "sync" "time" "github.com/elastic/beats/libbeat/common" @@ -22,10 +23,21 @@ const ( ) type TCP struct { - id uint32 - streams *common.Cache - portMap map[uint16]protos.Protocol - protocols protos.Protocols + id uint32 + streams *common.Cache + portMap map[uint16]protos.Protocol + protocols protos.Protocols + expiredConns expirationQueue +} + +type expiredConnection struct { + mod protos.ExpirationAwareTCPPlugin + conn *TCPConnection +} + +type expirationQueue struct { + mutex sync.Mutex + conns []expiredConnection } type Processor interface { @@ -132,6 +144,8 @@ func (tcp *TCP) Process(id *flows.FlowID, tcphdr *layers.TCP, pkt *protos.Packet // protocol modules. defer logp.Recover("Process tcp exception") + tcp.expiredConns.notifyAll() + stream, created := tcp.getStream(pkt) if stream.conn == nil { return @@ -298,10 +312,12 @@ func NewTCP(p protos.Protocols) (*TCP, error) { tcp := &TCP{ protocols: p, portMap: portMap, - streams: common.NewCache( - protos.DefaultTransactionExpiration, - protos.DefaultTransactionHashSize), } + tcp.streams = common.NewCacheWithRemovalListener( + protos.DefaultTransactionExpiration, + protos.DefaultTransactionHashSize, + tcp.removalListener) + tcp.streams.StartJanitor(protos.DefaultTransactionExpiration) if isDebug { debugf("tcp", "Port map: %v", portMap) @@ -309,3 +325,40 @@ func NewTCP(p protos.Protocols) (*TCP, error) { return tcp, nil } + +func (tcp *TCP) removalListener(_ common.Key, value common.Value) { + conn := value.(*TCPConnection) + mod := conn.tcp.protocols.GetTCP(conn.protocol) + if mod != nil { + awareMod, ok := mod.(protos.ExpirationAwareTCPPlugin) + if ok { + tcp.expiredConns.add(awareMod, conn) + } + } +} + +func (ec *expiredConnection) notify() { + ec.mod.Expired(&ec.conn.tcptuple, ec.conn.data) +} + +func (eq *expirationQueue) add(mod protos.ExpirationAwareTCPPlugin, conn *TCPConnection) { + eq.mutex.Lock() + eq.conns = append(eq.conns, expiredConnection{ + mod: mod, + conn: conn, + }) + eq.mutex.Unlock() +} + +func (eq *expirationQueue) getExpired() (conns []expiredConnection) { + eq.mutex.Lock() + conns, eq.conns = eq.conns, nil + eq.mutex.Unlock() + return conns +} + +func (eq *expirationQueue) notifyAll() { + for _, expiration := range eq.getExpired() { + expiration.notify() + } +} diff --git a/vendor/github.com/elastic/beats/packetbeat/protos/tls/algos.go b/vendor/github.com/elastic/beats/packetbeat/protos/tls/algos.go index 35584361..0a53e10e 100644 --- a/vendor/github.com/elastic/beats/packetbeat/protos/tls/algos.go +++ b/vendor/github.com/elastic/beats/packetbeat/protos/tls/algos.go @@ -176,6 +176,13 @@ var cipherSuites = map[cipherSuite]string{ 0x00C5: "TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256", 0x00FF: "TLS_EMPTY_RENEGOTIATION_INFO_SCSV", + + 0x1301: "TLS_AES_128_GCM_SHA256", + 0x1302: "TLS_AES_256_GCM_SHA384", + 0x1303: "TLS_CHACHA20_POLY1305_SHA256", + 0x1304: "TLS_AES_128_CCM_SHA256", + 0x1305: "TLS_AES_128_CCM_8_SHA256", + 0x5600: "TLS_FALLBACK_SCSV", 0xC001: "TLS_ECDH_ECDSA_WITH_NULL_SHA", diff --git a/vendor/github.com/elastic/beats/packetbeat/tests/system/files/ThriftTest.thrift b/vendor/github.com/elastic/beats/packetbeat/tests/files/ThriftTest.thrift similarity index 100% rename from vendor/github.com/elastic/beats/packetbeat/tests/system/files/ThriftTest.thrift rename to vendor/github.com/elastic/beats/packetbeat/tests/files/ThriftTest.thrift diff --git a/vendor/github.com/elastic/beats/packetbeat/tests/system/files/shared.thrift b/vendor/github.com/elastic/beats/packetbeat/tests/files/shared.thrift similarity index 100% rename from vendor/github.com/elastic/beats/packetbeat/tests/system/files/shared.thrift rename to vendor/github.com/elastic/beats/packetbeat/tests/files/shared.thrift diff --git a/vendor/github.com/elastic/beats/packetbeat/tests/system/files/tutorial.thrift b/vendor/github.com/elastic/beats/packetbeat/tests/files/tutorial.thrift similarity index 100% rename from vendor/github.com/elastic/beats/packetbeat/tests/system/files/tutorial.thrift rename to vendor/github.com/elastic/beats/packetbeat/tests/files/tutorial.thrift diff --git a/vendor/github.com/elastic/beats/packetbeat/tests/system/config/packetbeat.yml.j2 b/vendor/github.com/elastic/beats/packetbeat/tests/system/config/packetbeat.yml.j2 index 513e94ac..95b02acc 100644 --- a/vendor/github.com/elastic/beats/packetbeat/tests/system/config/packetbeat.yml.j2 +++ b/vendor/github.com/elastic/beats/packetbeat/tests/system/config/packetbeat.yml.j2 @@ -81,6 +81,7 @@ packetbeat.protocols: ] {%- endif %} {%- if http_max_message_size %} max_message_size: {{ http_max_message_size }} {%- endif %} +{%- if http_transaction_timeout %} transaction_timeout: {{ http_transaction_timeout }} {%- endif %} - type: memcache ports: [{{ memcache_ports|default([11211])|join(", ") }}] diff --git a/vendor/github.com/elastic/beats/packetbeat/tests/system/packetbeat.py b/vendor/github.com/elastic/beats/packetbeat/tests/system/packetbeat.py index 2d142ba8..d58b6ad1 100644 --- a/vendor/github.com/elastic/beats/packetbeat/tests/system/packetbeat.py +++ b/vendor/github.com/elastic/beats/packetbeat/tests/system/packetbeat.py @@ -29,7 +29,8 @@ def run_packetbeat(self, pcap, output="packetbeat.log", extra_args=[], debug_selectors=[], - exit_code=0): + exit_code=0, + real_time=False): """ Executes packetbeat on an input pcap file. Waits for the process to finish before returning to @@ -41,11 +42,13 @@ def run_packetbeat(self, pcap, args = [cmd] + if not real_time: + args.extend(["-t"]) + args.extend([ "-e", "-I", os.path.join(self.beat_path + "/tests/system/pcaps", pcap), "-c", os.path.join(self.working_dir, config), - "-t", "-systemTest", "-test.coverprofile", os.path.join(self.working_dir, "coverage.cov"), ]) @@ -72,7 +75,7 @@ def run_packetbeat(self, pcap, return actual_exit_code def start_packetbeat(self, - cmd="../../packetbeat.test", + cmd=None, config="packetbeat.yml", output="packetbeat.log", extra_args=[], @@ -82,6 +85,9 @@ def start_packetbeat(self, caller is responsible for stopping / waiting for the Proc instance. """ + if cmd is None: + cmd = self.beat_path + "/packetbeat.test" + args = [cmd, "-e", "-c", os.path.join(self.working_dir, config), diff --git a/vendor/github.com/elastic/beats/packetbeat/tests/system/pcaps/http_post.pcap b/vendor/github.com/elastic/beats/packetbeat/tests/system/pcaps/http_post.pcap index e757a845..828c55c3 100644 Binary files a/vendor/github.com/elastic/beats/packetbeat/tests/system/pcaps/http_post.pcap and b/vendor/github.com/elastic/beats/packetbeat/tests/system/pcaps/http_post.pcap differ diff --git a/vendor/github.com/elastic/beats/packetbeat/tests/system/pcaps/http_unmatched.pcap b/vendor/github.com/elastic/beats/packetbeat/tests/system/pcaps/http_unmatched.pcap new file mode 100644 index 00000000..f556accc Binary files /dev/null and b/vendor/github.com/elastic/beats/packetbeat/tests/system/pcaps/http_unmatched.pcap differ diff --git a/vendor/github.com/elastic/beats/packetbeat/tests/system/pcaps/http_unmatched_timeout.pcap b/vendor/github.com/elastic/beats/packetbeat/tests/system/pcaps/http_unmatched_timeout.pcap new file mode 100644 index 00000000..b9bedaf8 Binary files /dev/null and b/vendor/github.com/elastic/beats/packetbeat/tests/system/pcaps/http_unmatched_timeout.pcap differ diff --git a/vendor/github.com/elastic/beats/packetbeat/tests/system/pcaps/http_url_params.pcap b/vendor/github.com/elastic/beats/packetbeat/tests/system/pcaps/http_url_params.pcap index b8bb30e8..96682ef7 100644 Binary files a/vendor/github.com/elastic/beats/packetbeat/tests/system/pcaps/http_url_params.pcap and b/vendor/github.com/elastic/beats/packetbeat/tests/system/pcaps/http_url_params.pcap differ diff --git a/vendor/github.com/elastic/beats/packetbeat/tests/system/test_0065_unmatched_http.py b/vendor/github.com/elastic/beats/packetbeat/tests/system/test_0065_unmatched_http.py new file mode 100644 index 00000000..57a33d63 --- /dev/null +++ b/vendor/github.com/elastic/beats/packetbeat/tests/system/test_0065_unmatched_http.py @@ -0,0 +1,60 @@ +from packetbeat import BaseTest + + +def check_event(event, expected): + for key in expected: + assert key in event, "key '{0}' not found in event".format(key) + assert event[key] == expected[key],\ + "key '{0}' has value '{1}', expected '{2}'".format(key, + event[key], + expected[key]) + + +class Test(BaseTest): + + def test_unmatched_response(self): + """ + Unmatched response in stream + """ + + self.render_config_template( + http_ports=[8080], + ) + self.run_packetbeat(pcap="http_unmatched.pcap", + debug_selectors=["http", "httpdetailed"]) + objs = self.read_output() + + assert len(objs) == 2 + + check_event(objs[0], { + "type": "http", + "status": "Error", + "http.response.code": 404, + "notes": ["Unmatched response"]}) + + check_event(objs[1], { + "type": "http", + "http.response.code": 200, + "http.request.headers": {"content-length": 0}, + "status": "OK"}) + + def test_unmatched_request(self): + """ + Unmatched request due to timeout (15s) + """ + + self.render_config_template( + http_ports=[8080], + http_transaction_timeout="1s", + ) + self.run_packetbeat(pcap="http_unmatched_timeout.pcap", + debug_selectors=["http", "httpdetailed"], + real_time=True) + objs = self.read_output() + print objs + assert len(objs) == 1 + check_event(objs[0], { + "type": "http", + "status": "Error", + "query": "GET /something", + "notes": ["Unmatched request"]}) diff --git a/vendor/github.com/elastic/beats/script/update_golang_x.py b/vendor/github.com/elastic/beats/script/update_golang_x.py new file mode 100644 index 00000000..939fe05b --- /dev/null +++ b/vendor/github.com/elastic/beats/script/update_golang_x.py @@ -0,0 +1,40 @@ + +import json +import os +import argparse +import subprocess + + +def update(pkg_name): + """Call govendor on the targeted golang/x packages""" + + vendor_file = os.path.join('vendor', 'vendor.json') + target = 'golang.org/x/{}'.format(pkg_name) + + with open(vendor_file) as content: + deps = json.load(content) + packages = [dep['path'] for dep in deps['package'] if dep['path'].startswith(target)] + revision = '@{revision}'.format(revision=args.revision) if args.revision else '' + packages = ['{pkg}{revision}'.format(pkg=pkg, revision=revision) for pkg in packages] + cmd = ['govendor', 'fetch'] + packages + if args.verbose: + print(' '.join(cmd)) + subprocess.check_call(cmd) + + +def get_parser(): + """Creates parser to parse script params + """ + parser = argparse.ArgumentParser(description="Update golang.org/x/ in vendor folder") + parser.add_argument('-q', '--quiet', dest='verbose', action='store_false', help='work quietly') + parser.add_argument('--revision', help='update deps to this revision', default='') + parser.add_argument('name', help='name of the golang.org/x/ package. Can be empty', default='', nargs='?') + return parser + + +if __name__ == "__main__": + + parser = get_parser() + args = parser.parse_args() + + update(args.name) diff --git a/vendor/github.com/elastic/beats/testing/environments/5.0.0-cgroups.yml b/vendor/github.com/elastic/beats/testing/environments/5.0.0-cgroups.yml deleted file mode 100644 index 00d3cf98..00000000 --- a/vendor/github.com/elastic/beats/testing/environments/5.0.0-cgroups.yml +++ /dev/null @@ -1,31 +0,0 @@ -# This should test the environment with the latest snapshots -# This is based on base.yml - -version: '2' -services: - elasticsearch: - build: - context: ./docker/elasticsearch - dockerfile: Dockerfile-5.0.0-alpha5 - command: elasticsearch -Enetwork.host=0.0.0.0 -Ediscovery.zen.minimum_master_nodes=1 -Ebootstrap.ignore_system_bootstrap_checks=true - - logstash: - build: - context: ./docker/logstash - dockerfile: Dockerfile-5.0.0-alpha5 - - kibana: - build: - context: ./docker/kibana - dockerfile: Dockerfile-5.0.0-alpha5 - - metricbeat: - build: - context: ./docker/metricbeat - dockerfile: Dockerfile-5.0.0-cgroups - links: - - elasticsearch - volumes: - - "/proc:/hostfs/proc:ro" - - "/sys/fs/cgroup:/hostfs/sys/fs/cgroup" - command: -system.hostfs=/hostfs -E output.elasticsearch.hosts=elasticsearch -E metricbeat.modules.0.cgroups=true diff --git a/vendor/github.com/elastic/beats/testing/environments/5x.yml b/vendor/github.com/elastic/beats/testing/environments/5x.yml index bac0cdf1..31d4412b 100644 --- a/vendor/github.com/elastic/beats/testing/environments/5x.yml +++ b/vendor/github.com/elastic/beats/testing/environments/5x.yml @@ -3,7 +3,7 @@ version: '2.1' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:5.6.3 + image: docker.elastic.co/elasticsearch/elasticsearch:5.6.9 healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9200"] environment: @@ -14,17 +14,15 @@ services: - "xpack.security.enabled=false" logstash: - build: - context: docker/logstash - dockerfile: Dockerfile - args: - ELASTIC_VERSION: 5.6.3 - DOWNLOAD_URL: https://artifacts.elastic.co/downloads - environment: - - ES_HOST=elasticsearch + image: docker.elastic.co/logstash/logstash:5.6.9 + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] + volumes: + - ./docker/logstash/pipeline:/usr/share/logstash/pipeline:ro + - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:5.6.3 + image: docker.elastic.co/kibana/kibana:5.6.9 healthcheck: test: ["CMD", "curl", "-f", "http://localhost:5601"] retries: 6 diff --git a/vendor/github.com/elastic/beats/testing/environments/Makefile b/vendor/github.com/elastic/beats/testing/environments/Makefile index 116f90ea..19037aaf 100644 --- a/vendor/github.com/elastic/beats/testing/environments/Makefile +++ b/vendor/github.com/elastic/beats/testing/environments/Makefile @@ -3,7 +3,7 @@ BASE_COMMAND=docker-compose -f ${ENV} -f local.yml start: # This is run every time to make sure the environment is up-to-date - ${BASE_COMMAND} build + ${BASE_COMMAND} build --pull --force-rm ${BASE_COMMAND} run beat bash stop: diff --git a/vendor/github.com/elastic/beats/testing/environments/args.yml b/vendor/github.com/elastic/beats/testing/environments/args.yml index f8f39338..5f1aa3e1 100644 --- a/vendor/github.com/elastic/beats/testing/environments/args.yml +++ b/vendor/github.com/elastic/beats/testing/environments/args.yml @@ -6,5 +6,5 @@ services: build: args: DOWNLOAD_URL: https://snapshots.elastic.co/downloads - ELASTIC_VERSION: 6.2.3-SNAPSHOT - CACHE_BUST: 20180208 + ELASTIC_VERSION: 6.3.2-SNAPSHOT + CACHE_BUST: 20180615 diff --git a/vendor/github.com/elastic/beats/testing/environments/docker/elasticsearch/Dockerfile b/vendor/github.com/elastic/beats/testing/environments/docker/elasticsearch/Dockerfile index 2920021d..8b1c1e34 100644 --- a/vendor/github.com/elastic/beats/testing/environments/docker/elasticsearch/Dockerfile +++ b/vendor/github.com/elastic/beats/testing/environments/docker/elasticsearch/Dockerfile @@ -1,5 +1,4 @@ # Copy of https://github.com/elastic/elasticsearch-docker/blob/master/build/elasticsearch/Dockerfile -#FROM docker.elastic.co/elasticsearch/elasticsearch-alpine-base:latest FROM centos:7 MAINTAINER Elastic Docker Team @@ -7,7 +6,7 @@ ARG ELASTIC_VERSION ARG DOWNLOAD_URL ARG ES_JAVA_OPTS ARG CACHE_BUST=1 -ARG XPACK +ARG IMAGE_FLAVOR=x-pack ENV ELASTIC_CONTAINER true ENV PATH /usr/share/elasticsearch/bin:$PATH @@ -19,30 +18,29 @@ RUN groupadd -g 1000 elasticsearch && adduser -u 1000 -g 1000 -d /usr/share/elas WORKDIR /usr/share/elasticsearch -# Download/extract defined ES version. busybox tar can't strip leading dir. -RUN curl -L -o elasticsearch-${ELASTIC_VERSION}.tar.gz ${DOWNLOAD_URL}/elasticsearch/elasticsearch-${ELASTIC_VERSION}.tar.gz?c=${CACHE_BUST} && \ - EXPECTED_SHA=$(wget -O - ${DOWNLOAD_URL}/elasticsearch/elasticsearch-${ELASTIC_VERSION}.tar.gz.sha512 | awk '{print $1}') && \ - test $EXPECTED_SHA == $(sha512sum elasticsearch-${ELASTIC_VERSION}.tar.gz | awk '{print $1}') && \ - tar zxf elasticsearch-${ELASTIC_VERSION}.tar.gz && \ +# Download/extract the defined ES version. +COPY download.sh /download.sh +RUN /download.sh $DOWNLOAD_URL $ELASTIC_VERSION $CACHE_BUST && rm /download.sh + +RUN tar zxf elasticsearch-${ELASTIC_VERSION}.tar.gz && \ chown -R elasticsearch:elasticsearch elasticsearch-${ELASTIC_VERSION} && \ mv elasticsearch-${ELASTIC_VERSION}/* . && \ rmdir elasticsearch-${ELASTIC_VERSION} && \ rm elasticsearch-${ELASTIC_VERSION}.tar.gz -RUN set -ex && for esdirs in config data logs; do \ +RUN set -e && for esdirs in config data logs; do \ mkdir -p "$esdirs"; \ chown -R elasticsearch:elasticsearch "$esdirs"; \ done USER elasticsearch -# Install xpack -RUN if [ ${XPACK} = "1" ]; then elasticsearch-plugin install --batch ${DOWNLOAD_URL}/packs/x-pack/x-pack-${ELASTIC_VERSION}.zip?c=${CACHE_BUST}; fi -RUN elasticsearch-plugin install --batch ${DOWNLOAD_URL}/elasticsearch-plugins/ingest-user-agent/ingest-user-agent-${ELASTIC_VERSION}.zip?c=${CACHE_BUST} -RUN elasticsearch-plugin install --batch ${DOWNLOAD_URL}/elasticsearch-plugins/ingest-geoip/ingest-geoip-${ELASTIC_VERSION}.zip?c=${CACHE_BUST} +# Install plugins. +RUN elasticsearch-plugin install --batch ${DOWNLOAD_URL}/elasticsearch-plugins/ingest-user-agent/ingest-user-agent-${ELASTIC_VERSION}.zip +RUN elasticsearch-plugin install --batch ${DOWNLOAD_URL}/elasticsearch-plugins/ingest-geoip/ingest-geoip-${ELASTIC_VERSION}.zip # Set bootstrap password (for when security is used) -RUN if [ ${XPACK} = "1" ]; then elasticsearch-keystore create; echo "changeme" | elasticsearch-keystore add -x 'bootstrap.password'; fi +RUN if [ "${IMAGE_FLAVOR}" = "x-pack" ]; then elasticsearch-keystore create; echo "changeme" | elasticsearch-keystore add -x 'bootstrap.password'; fi COPY config/elasticsearch.yml config/ COPY config/log4j2.properties config/ @@ -52,6 +50,9 @@ USER root RUN chown elasticsearch:elasticsearch config/elasticsearch.yml config/log4j2.properties bin/es-docker && \ chmod 0750 bin/es-docker +# Enable a trial license for testing ML and Alerting. +RUN if [ "${IMAGE_FLAVOR}" = "x-pack" ]; then echo "xpack.license.self_generated.type: trial" >> config/elasticsearch.yml; fi + USER elasticsearch CMD ["/bin/bash", "bin/es-docker"] diff --git a/vendor/github.com/elastic/beats/testing/environments/docker/elasticsearch/download.sh b/vendor/github.com/elastic/beats/testing/environments/docker/elasticsearch/download.sh new file mode 100755 index 00000000..2610741a --- /dev/null +++ b/vendor/github.com/elastic/beats/testing/environments/docker/elasticsearch/download.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash +set -eo pipefail + +if [ -z ${DOWNLOAD_URL+x} ]; then echo "DOWNLOAD_URL is unset"; exit 1; fi +if [ -z ${ELASTIC_VERSION+x} ]; then echo "ELASTIC_VERSION is unset"; exit 1; fi +if [ -z ${IMAGE_FLAVOR+x} ]; then echo "IMAGE_FLAVOR is unset"; exit 1; fi + +url=${DOWNLOAD_URL}/elasticsearch/elasticsearch-oss/elasticsearch-oss-${ELASTIC_VERSION}.tar.gz +if [ "${IMAGE_FLAVOR}" = "x-pack" ]; then + url=${DOWNLOAD_URL}/elasticsearch/elasticsearch-${ELASTIC_VERSION}.tar.gz +fi + +# Download. +curl -s -L -o elasticsearch-${ELASTIC_VERSION}.tar.gz $url + +# Validate SHA512. +expected_sha=$(curl -s -L $url.sha512 | awk '{print $1}') +observed_sha=$(sha512sum elasticsearch-${ELASTIC_VERSION}.tar.gz | awk '{print $1}') +test "${expected_sha}" == "${observed_sha}" diff --git a/vendor/github.com/elastic/beats/testing/environments/docker/kibana/Dockerfile b/vendor/github.com/elastic/beats/testing/environments/docker/kibana/Dockerfile index 447cfa15..416bae46 100644 --- a/vendor/github.com/elastic/beats/testing/environments/docker/kibana/Dockerfile +++ b/vendor/github.com/elastic/beats/testing/environments/docker/kibana/Dockerfile @@ -3,12 +3,11 @@ FROM centos:7 LABEL maintainer "Elastic Docker Team " EXPOSE 5601 - ### Beats specific args #### -ARG DOWNLOAD_URL=https://snapshots.elastic.co/downloads -ARG ELASTIC_VERSION=6.2.2-SNAPSHOT +ARG DOWNLOAD_URL +ARG ELASTIC_VERSION ARG CACHE_BUST=1 -ARG XPACK=1 +ARG IMAGE_FLAVOR=x-pack # Healthcheck create by beats team RUN yum install update -y epel-release && yum install -y jq @@ -19,7 +18,10 @@ HEALTHCHECK --interval=1s --retries=600 CMD curl -f http://localhost:5601/api/st RUN yum update -y && yum install -y fontconfig freetype && yum clean all WORKDIR /usr/share/kibana -RUN curl -Ls ${DOWNLOAD_URL}/kibana/kibana-${ELASTIC_VERSION}-linux-x86_64.tar.gz?c=${CACHE_BUST} | tar --strip-components=1 -zxf - && \ +COPY download.sh /download.sh +RUN /download.sh $DOWNLOAD_URL $ELASTIC_VERSION $CACHE_BUST && rm /download.sh +RUN tar --strip-components=1 -zxf kibana-${ELASTIC_VERSION}-linux-x86_64.tar.gz && \ + rm kibana-${ELASTIC_VERSION}-linux-x86_64.tar.gz && \ ln -s /usr/share/kibana /opt/kibana ENV ELASTIC_CONTAINER true @@ -32,9 +34,6 @@ COPY config/kibana-x-pack.yml /usr/share/kibana/config/kibana.yml # variables and translate them to Kibana CLI options. COPY bin/kibana-docker /usr/local/bin/ -# Add a self-signed SSL certificate for use in examples. -#COPY ssl/kibana.example.org.* /usr/share/kibana/config/ - # Provide a non-root user to run the process. RUN groupadd --gid 1000 kibana && \ useradd --uid 1000 --gid 1000 \ @@ -42,8 +41,5 @@ RUN groupadd --gid 1000 kibana && \ kibana USER kibana -# Beats specific check for XPACK to have both variables in one -RUN if [ ${XPACK} = "1" ]; then NODE_OPTIONS="--max-old-space-size=4096" bin/kibana-plugin install ${DOWNLOAD_URL}/kibana-plugins/x-pack/x-pack-${ELASTIC_VERSION}.zip?c=${CACHE_BUST}; fi - CMD ["/bin/bash", "/usr/local/bin/kibana-docker"] diff --git a/vendor/github.com/elastic/beats/testing/environments/docker/kibana/download.sh b/vendor/github.com/elastic/beats/testing/environments/docker/kibana/download.sh new file mode 100755 index 00000000..5e8a423d --- /dev/null +++ b/vendor/github.com/elastic/beats/testing/environments/docker/kibana/download.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash +set -eo pipefail + +if [ -z ${DOWNLOAD_URL+x} ]; then echo "DOWNLOAD_URL is unset"; exit 1; fi +if [ -z ${ELASTIC_VERSION+x} ]; then echo "ELASTIC_VERSION is unset"; exit 1; fi +if [ -z ${IMAGE_FLAVOR+x} ]; then echo "IMAGE_FLAVOR is unset"; exit 1; fi + +url=${DOWNLOAD_URL}/kibana/kibana-oss/kibana-oss-${ELASTIC_VERSION}-linux-x86_64.tar.gz +if [ "${IMAGE_FLAVOR}" = "x-pack" ]; then + url=${DOWNLOAD_URL}/kibana/kibana-${ELASTIC_VERSION}-linux-x86_64.tar.gz +fi + +# Download. +curl -s -L -o kibana-${ELASTIC_VERSION}-linux-x86_64.tar.gz $url + +# Validate SHA512. +expected_sha=$(curl -s -L $url.sha512 | awk '{print $1}') +observed_sha=$(sha512sum kibana-${ELASTIC_VERSION}-linux-x86_64.tar.gz | awk '{print $1}') +test "${expected_sha}" == "${observed_sha}" diff --git a/vendor/github.com/elastic/beats/testing/environments/docker/logstash/Dockerfile b/vendor/github.com/elastic/beats/testing/environments/docker/logstash/Dockerfile index eaa88ed4..8a55dda5 100644 --- a/vendor/github.com/elastic/beats/testing/environments/docker/logstash/Dockerfile +++ b/vendor/github.com/elastic/beats/testing/environments/docker/logstash/Dockerfile @@ -1,31 +1,52 @@ -FROM java:8-jre - -RUN apt-get update && \ - apt-get install -y netcat +FROM centos:7 +LABEL maintainer "Elastic Docker Team " +# Beats variables. ARG DOWNLOAD_URL ARG ELASTIC_VERSION ARG CACHE_BUST=1 +ARG IMAGE_FLAVOR=x-pack + +# Install Java and the "which" command, which is needed by Logstash's shell +# scripts. +RUN yum update -y && yum install -y java-1.8.0-openjdk-devel which && \ + yum clean all + +# Provide a non-root user to run the process. +RUN groupadd --gid 1000 logstash && \ + adduser --uid 1000 --gid 1000 \ + --home-dir /usr/share/logstash --no-create-home \ + logstash + +# Add Logstash itself. +COPY download.sh /download.sh +RUN /download.sh $DOWNLOAD_URL $ELASTIC_VERSION $CACHE_BUST && rm /download.sh +RUN tar zxf logstash-${ELASTIC_VERSION}.tar.gz -C /usr/share && \ + mv /usr/share/logstash-${ELASTIC_VERSION} /usr/share/logstash && \ + chown --recursive logstash:logstash /usr/share/logstash/ && \ + ln -s /usr/share/logstash /opt/logstash + +WORKDIR /usr/share/logstash + +ENV ELASTIC_CONTAINER true +ENV PATH=/usr/share/logstash/bin:$PATH + +# Provide a minimal configuration, so that simple invocations will provide +# a good experience. +ADD config/pipelines.yml config/pipelines.yml +ADD config/logstash-${IMAGE_FLAVOR}.yml config/logstash.yml +ADD config/log4j2.properties config/ +ADD pipeline/default.conf pipeline/logstash.conf +ADD pki /etc/pki +RUN chown --recursive logstash:logstash config/ pipeline/ + +# Ensure Logstash gets a UTF-8 locale by default. +ENV LANG='en_US.UTF-8' LC_ALL='en_US.UTF-8' + +HEALTHCHECK --interval=1s --retries=600 CMD curl -f http://localhost:9600/_node/stats -ENV URL ${DOWNLOAD_URL}/logstash/logstash-${ELASTIC_VERSION}.tar.gz -ENV PATH $PATH:/opt/logstash-${ELASTIC_VERSION}/bin - -# As all snapshot builds have the same url, the image is cached. The date at then can be used to invalidate the image -RUN set -x && \ - cd /opt && \ - wget -qO logstash.tar.gz $URL?${CACHE_BUST} && \ - tar xzf logstash.tar.gz - +EXPOSE 5044 5055 9600 -COPY logstash.conf.tmpl /logstash.conf.tmpl COPY docker-entrypoint.sh /entrypoint.sh - -COPY pki /etc/pki - -HEALTHCHECK --interval=1s --retries=600 CMD nc -z localhost 5044 - ENTRYPOINT ["/entrypoint.sh"] - -EXPOSE 5044 5055 9600 - -CMD logstash -f /logstash.conf --log.level=debug --config.debug --http.host=0.0.0.0 +CMD logstash diff --git a/vendor/github.com/elastic/beats/testing/environments/docker/logstash/config/log4j2.properties b/vendor/github.com/elastic/beats/testing/environments/docker/logstash/config/log4j2.properties new file mode 100644 index 00000000..36bc4514 --- /dev/null +++ b/vendor/github.com/elastic/beats/testing/environments/docker/logstash/config/log4j2.properties @@ -0,0 +1,16 @@ +status = error +name = LogstashPropertiesConfig + +appender.console.type = Console +appender.console.name = plain_console +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %m%n + +appender.json_console.type = Console +appender.json_console.name = json_console +appender.json_console.layout.type = JSONLayout +appender.json_console.layout.compact = true +appender.json_console.layout.eventEol = true + +rootLogger.level = ${sys:ls.log.level} +rootLogger.appenderRef.console.ref = ${sys:ls.log.format}_console diff --git a/vendor/github.com/elastic/beats/testing/environments/docker/logstash/config/logstash-oss.yml b/vendor/github.com/elastic/beats/testing/environments/docker/logstash/config/logstash-oss.yml new file mode 100644 index 00000000..342d19af --- /dev/null +++ b/vendor/github.com/elastic/beats/testing/environments/docker/logstash/config/logstash-oss.yml @@ -0,0 +1 @@ +http.host: "0.0.0.0" diff --git a/vendor/github.com/elastic/beats/testing/environments/docker/logstash/config/logstash-x-pack.yml b/vendor/github.com/elastic/beats/testing/environments/docker/logstash/config/logstash-x-pack.yml new file mode 100644 index 00000000..7c25c459 --- /dev/null +++ b/vendor/github.com/elastic/beats/testing/environments/docker/logstash/config/logstash-x-pack.yml @@ -0,0 +1,4 @@ +http.host: "0.0.0.0" +xpack.monitoring.elasticsearch.url: http://elasticsearch:9200 +xpack.monitoring.elasticsearch.username: logstash_system +xpack.monitoring.elasticsearch.password: changeme diff --git a/vendor/github.com/elastic/beats/testing/environments/docker/logstash/config/pipelines.yml b/vendor/github.com/elastic/beats/testing/environments/docker/logstash/config/pipelines.yml new file mode 100644 index 00000000..aed22ce7 --- /dev/null +++ b/vendor/github.com/elastic/beats/testing/environments/docker/logstash/config/pipelines.yml @@ -0,0 +1,6 @@ +# This file is where you define your pipelines. You can define multiple. +# For more information on multiple pipelines, see the documentation: +# https://www.elastic.co/guide/en/logstash/current/multiple-pipelines.html + +- pipeline.id: main + path.config: "/usr/share/logstash/pipeline" diff --git a/vendor/github.com/elastic/beats/testing/environments/docker/logstash/docker-entrypoint.sh b/vendor/github.com/elastic/beats/testing/environments/docker/logstash/docker-entrypoint.sh index 020c3c9a..71fa83de 100755 --- a/vendor/github.com/elastic/beats/testing/environments/docker/logstash/docker-entrypoint.sh +++ b/vendor/github.com/elastic/beats/testing/environments/docker/logstash/docker-entrypoint.sh @@ -15,7 +15,6 @@ readParams() { # Use default ports if not specified. : ${ES_PORT:=9200} - : ${REDIS_PORT:=6379} } es_url() { @@ -58,13 +57,8 @@ waitForElasticsearch() { exit 1 } -updateConfigFile() { - sed -e "s/hosts.*/hosts => [\"$ES_HOST:$ES_PORT\"]/" /logstash.conf.tmpl > /logstash.conf -} - # Main readParams -updateConfigFile waitForElasticsearch exec "$@" diff --git a/vendor/github.com/elastic/beats/testing/environments/docker/logstash/download.sh b/vendor/github.com/elastic/beats/testing/environments/docker/logstash/download.sh new file mode 100755 index 00000000..868183bd --- /dev/null +++ b/vendor/github.com/elastic/beats/testing/environments/docker/logstash/download.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash +set -eo pipefail + +if [ -z ${DOWNLOAD_URL+x} ]; then echo "DOWNLOAD_URL is unset"; exit 1; fi +if [ -z ${ELASTIC_VERSION+x} ]; then echo "ELASTIC_VERSION is unset"; exit 1; fi +if [ -z ${IMAGE_FLAVOR+x} ]; then echo "IMAGE_FLAVOR is unset"; exit 1; fi + +url=${DOWNLOAD_URL}/logstash/logstash-oss/logstash-oss-${ELASTIC_VERSION}.tar.gz +if [ "${IMAGE_FLAVOR}" = "x-pack" ]; then + url=${DOWNLOAD_URL}/logstash/logstash-${ELASTIC_VERSION}.tar.gz +fi + +# Download. +curl -s -L -o logstash-${ELASTIC_VERSION}.tar.gz $url + +# Validate SHA512. +expected_sha=$(curl -s -L $url.sha512 | awk '{print $1}') +observed_sha=$(sha512sum logstash-${ELASTIC_VERSION}.tar.gz | awk '{print $1}') +test "${expected_sha}" == "${observed_sha}" diff --git a/vendor/github.com/elastic/beats/testing/environments/docker/logstash/logstash.conf.tmpl b/vendor/github.com/elastic/beats/testing/environments/docker/logstash/pipeline/default.conf similarity index 86% rename from vendor/github.com/elastic/beats/testing/environments/docker/logstash/logstash.conf.tmpl rename to vendor/github.com/elastic/beats/testing/environments/docker/logstash/pipeline/default.conf index 26ae7751..d4b11370 100644 --- a/vendor/github.com/elastic/beats/testing/environments/docker/logstash/logstash.conf.tmpl +++ b/vendor/github.com/elastic/beats/testing/environments/docker/logstash/pipeline/default.conf @@ -15,9 +15,7 @@ input { output { elasticsearch { - hosts => [] - #user => "beats" - #password => "testing" + hosts => ["${ES_HOST:elasticsearch}:${ES_PORT:9200}"] index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" document_type => "%{[@metadata][type]}" } diff --git a/vendor/github.com/elastic/beats/testing/environments/docker/metricbeat/Dockerfile-5.0.0-cgroups b/vendor/github.com/elastic/beats/testing/environments/docker/metricbeat/Dockerfile-5.0.0-cgroups deleted file mode 100644 index 0591f6b9..00000000 --- a/vendor/github.com/elastic/beats/testing/environments/docker/metricbeat/Dockerfile-5.0.0-cgroups +++ /dev/null @@ -1,15 +0,0 @@ -FROM ubuntu:14.04 -MAINTAINER Monica Sarbu - -ENV METRICBEAT_FILE=metricbeat-6.0.0-alpha1-SNAPSHOT-linux-x86_64 - -# Cache variable can be set during building to invalidate the build cache with `--build-arg CACHE=$(date +%s) .` -ARG CACHE=1 - -ADD https://beats-nightlies.s3.amazonaws.com/metricbeat/$METRICBEAT_FILE.tar.gz?${CACHE} /$METRICBEAT_FILE.tar.gz - -RUN tar -xzvf $METRICBEAT_FILE.tar.gz && \ - ln -s $METRICBEAT_FILE metricbeat - -EXPOSE 8080 -ENTRYPOINT ["/metricbeat/metricbeat", "-httpprof", "0.0.0.0:8080", "-c", "/metricbeat/metricbeat.yml", "-e", "-v"] diff --git a/vendor/github.com/elastic/beats/testing/environments/docker/metricbeat/config/metricbeat.cgroups.yml b/vendor/github.com/elastic/beats/testing/environments/docker/metricbeat/config/metricbeat.cgroups.yml deleted file mode 100644 index d04c7d63..00000000 --- a/vendor/github.com/elastic/beats/testing/environments/docker/metricbeat/config/metricbeat.cgroups.yml +++ /dev/null @@ -1,103 +0,0 @@ -###################### Metricbeat Configuration Example ####################### - -# This file is an example configuration file highlighting only the most common -# options. The metricbeat.reference.yml file from the same directory contains all the -# supported options with more comments. You can use it as a reference. -# -# You can find the full configuration reference here: -# https://www.elastic.co/guide/en/beats/metricbeat/index.html - -#========================== Modules configuration ============================ -metricbeat.modules: - -#------------------------------- System Module ------------------------------- -- module: system - metricsets: - # CPU stats - - cpu - - # System Load stats - - load - - # Per CPU core stats - #- core - - # IO stats - #- diskio - - # Per filesystem stats - - filesystem - - # File system summary stats - #- fsstat - - # Memory stats - - memory - - # Network stats - - network - - # Per process stats - - process - enabled: true - period: 10s - processes: ['.*'] - cgroups: true - - - -#================================ General ===================================== - -# The name of the shipper that publishes the network data. It can be used to group -# all the transactions sent by a single shipper in the web interface. -#name: - -# The tags of the shipper are included in their own field with each -# transaction published. -#tags: ["service-X", "web-tier"] - -# Optional fields that you can specify to add additional information to the -# output. -#fields: -# env: staging - -#================================ Outputs ===================================== - -# Configure what outputs to use when sending the data collected by the beat. -# Multiple outputs may be used. - -#-------------------------- Elasticsearch output ------------------------------ -output.elasticsearch: - # Array of hosts to connect to. - hosts: ["localhost:9200"] - - # Optional protocol and basic auth credentials. - #protocol: "https" - #username: "elastic" - #password: "changeme" - -#----------------------------- Logstash output -------------------------------- -#output.logstash: - # The Logstash hosts - #hosts: ["localhost:5044"] - - # Optional SSL. By default is off. - # List of root certificates for HTTPS server verifications - #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Certificate for SSL client authentication - #ssl.certificate: "/etc/pki/client/cert.pem" - - # Client Certificate Key - #ssl.key: "/etc/pki/client/cert.key" - -#================================ Logging ===================================== - -# Sets log level. The default log level is info. -# Available log levels are: critical, error, warning, info, debug -#logging.level: debug - -# At debug level, you can selectively enable logging only for some components. -# To enable all selectors use ["*"]. Examples of other selectors are "beat", -# "publish", "service". -#logging.selectors: ["*"] diff --git a/vendor/github.com/elastic/beats/testing/environments/latest.yml b/vendor/github.com/elastic/beats/testing/environments/latest.yml index 88ebb42c..23ef67ff 100644 --- a/vendor/github.com/elastic/beats/testing/environments/latest.yml +++ b/vendor/github.com/elastic/beats/testing/environments/latest.yml @@ -3,7 +3,8 @@ version: '2.1' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:6.0.0 + # TODO: For 6.3 remove "-platinum". + image: docker.elastic.co/elasticsearch/elasticsearch:6.3.0 healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9200"] environment: @@ -14,17 +15,15 @@ services: - "xpack.security.enabled=false" logstash: - build: - context: docker/logstash - dockerfile: Dockerfile - args: - ELASTIC_VERSION: 6.0.0 - DOWNLOAD_URL: https://artifacts.elastic.co/downloads - environment: - - ES_HOST=elasticsearch + image: docker.elastic.co/logstash/logstash:6.3.0 + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] + volumes: + - ./docker/logstash/pipeline:/usr/share/logstash/pipeline:ro + - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:6.0.0 + image: docker.elastic.co/kibana/kibana:6.3.0 healthcheck: test: ["CMD", "curl", "-f", "http://localhost:5601"] retries: 6 diff --git a/vendor/github.com/elastic/beats/testing/environments/snapshot-noxpack.yml b/vendor/github.com/elastic/beats/testing/environments/snapshot-noxpack.yml index cdd3ea2b..cd62dcd3 100644 --- a/vendor/github.com/elastic/beats/testing/environments/snapshot-noxpack.yml +++ b/vendor/github.com/elastic/beats/testing/environments/snapshot-noxpack.yml @@ -8,9 +8,9 @@ services: service: args build: context: ./docker/elasticsearch - dockerfile: Dockerfile-snapshot + dockerfile: Dockerfile args: - XPACK: 0 + IMAGE_FLAVOR: oss environment: - "ES_JAVA_OPTS=-Xms512m -Xmx512m" - "network.host=" @@ -25,7 +25,7 @@ services: context: ./docker/logstash dockerfile: Dockerfile args: - XPACK: 0 + IMAGE_FLAVOR: oss environment: - ES_HOST=elasticsearch @@ -35,4 +35,6 @@ services: service: args build: context: ./docker/kibana - dockerfile: Dockerfile-snapshot + dockerfile: Dockerfile + args: + IMAGE_FLAVOR: oss diff --git a/vendor/github.com/elastic/beats/testing/environments/snapshot.yml b/vendor/github.com/elastic/beats/testing/environments/snapshot.yml index 1874182d..f01bb080 100644 --- a/vendor/github.com/elastic/beats/testing/environments/snapshot.yml +++ b/vendor/github.com/elastic/beats/testing/environments/snapshot.yml @@ -9,8 +9,6 @@ services: build: context: ./docker/elasticsearch dockerfile: Dockerfile - args: - XPACK: 1 environment: - "ES_JAVA_OPTS=-Xms512m -Xmx512m" - "network.host=" @@ -35,5 +33,3 @@ services: build: context: ./docker/kibana dockerfile: Dockerfile - args: - XPACK: 1 diff --git a/vendor/github.com/elastic/beats/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/davecgh/go-spew/LICENSE index 2a7cfd2b..c8364161 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/davecgh/go-spew/LICENSE +++ b/vendor/github.com/elastic/beats/vendor/github.com/davecgh/go-spew/LICENSE @@ -1,4 +1,6 @@ -Copyright (c) 2012-2013 Dave Collins +ISC License + +Copyright (c) 2012-2016 Dave Collins Permission to use, copy, modify, and distribute this software for any purpose with or without fee is hereby granted, provided that the above diff --git a/vendor/github.com/elastic/beats/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/elastic/beats/vendor/github.com/davecgh/go-spew/spew/bypass.go index 565bf589..8a4a6589 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015 Dave Collins +// Copyright (c) 2015-2016 Dave Collins // // Permission to use, copy, modify, and distribute this software for any // purpose with or without fee is hereby granted, provided that the above @@ -13,9 +13,10 @@ // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. // NOTE: Due to the following build constraints, this file will only be compiled -// when the code is not running on Google App Engine and "-tags disableunsafe" -// is not added to the go build command line. -// +build !appengine,!disableunsafe +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build !js,!appengine,!safe,!disableunsafe package spew diff --git a/vendor/github.com/elastic/beats/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/elastic/beats/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go index 457e4123..1fe3cf3d 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015 Dave Collins +// Copyright (c) 2015-2016 Dave Collins // // Permission to use, copy, modify, and distribute this software for any // purpose with or without fee is hereby granted, provided that the above @@ -13,9 +13,10 @@ // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. // NOTE: Due to the following build constraints, this file will only be compiled -// when either the code is running on Google App Engine or "-tags disableunsafe" -// is added to the go build command line. -// +build appengine disableunsafe +// when the code is running on Google App Engine, compiled by GopherJS, or +// "-tags safe" is added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build js appengine safe disableunsafe package spew diff --git a/vendor/github.com/elastic/beats/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/elastic/beats/vendor/github.com/davecgh/go-spew/spew/common.go index 14f02dc1..7c519ff4 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/davecgh/go-spew/spew/common.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/davecgh/go-spew/spew/common.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/vendor/github.com/elastic/beats/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/elastic/beats/vendor/github.com/davecgh/go-spew/spew/config.go index ee1ab07b..2e3d22f3 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/davecgh/go-spew/spew/config.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/davecgh/go-spew/spew/config.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -64,9 +64,18 @@ type ConfigState struct { // inside these interface methods. As a result, this option relies on // access to the unsafe package, so it will not have any effect when // running in environments without access to the unsafe package such as - // Google App Engine or with the "disableunsafe" build tag specified. + // Google App Engine or with the "safe" build tag specified. DisablePointerMethods bool + // DisablePointerAddresses specifies whether to disable the printing of + // pointer addresses. This is useful when diffing data structures in tests. + DisablePointerAddresses bool + + // DisableCapacities specifies whether to disable the printing of capacities + // for arrays, slices, maps and channels. This is useful when diffing + // data structures in tests. + DisableCapacities bool + // ContinueOnMethod specifies whether or not recursion should continue once // a custom error or Stringer interface is invoked. The default, false, // means it will print the results of invoking the custom error or Stringer diff --git a/vendor/github.com/elastic/beats/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/elastic/beats/vendor/github.com/davecgh/go-spew/spew/doc.go index 5be0c406..aacaac6f 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/davecgh/go-spew/spew/doc.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/davecgh/go-spew/spew/doc.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -91,6 +91,15 @@ The following configuration options are available: which only accept pointer receivers from non-pointer variables. Pointer method invocation is enabled by default. + * DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + + * DisableCapacities + DisableCapacities specifies whether to disable the printing of + capacities for arrays, slices, maps and channels. This is useful when + diffing data structures in tests. + * ContinueOnMethod Enables recursion into types after invoking error and Stringer interface methods. Recursion after method invocation is disabled by default. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/elastic/beats/vendor/github.com/davecgh/go-spew/spew/dump.go index a0ff95e2..df1d582a 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/davecgh/go-spew/spew/dump.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -129,7 +129,7 @@ func (d *dumpState) dumpPtr(v reflect.Value) { d.w.Write(closeParenBytes) // Display pointer information. - if len(pointerChain) > 0 { + if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { d.w.Write(openParenBytes) for i, addr := range pointerChain { if i > 0 { @@ -282,13 +282,13 @@ func (d *dumpState) dump(v reflect.Value) { case reflect.Map, reflect.String: valueLen = v.Len() } - if valueLen != 0 || valueCap != 0 { + if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { d.w.Write(openParenBytes) if valueLen != 0 { d.w.Write(lenEqualsBytes) printInt(d.w, int64(valueLen), 10) } - if valueCap != 0 { + if !d.cs.DisableCapacities && valueCap != 0 { if valueLen != 0 { d.w.Write(spaceBytes) } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/elastic/beats/vendor/github.com/davecgh/go-spew/spew/format.go index ecf3b80e..c49875ba 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/davecgh/go-spew/spew/format.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/davecgh/go-spew/spew/format.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/vendor/github.com/elastic/beats/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/elastic/beats/vendor/github.com/davecgh/go-spew/spew/spew.go index d8233f54..32c0e338 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/davecgh/go-spew/spew/spew.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/davecgh/go-spew/spew/spew.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/CHANGELOG.md b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/CHANGELOG.md index 70922cad..b57a42b9 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/CHANGELOG.md +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/CHANGELOG.md @@ -2,6 +2,27 @@ All notable changes to this project will be documented in this file. This project adheres to [Semantic Versioning](http://semver.org/). +## [0.2.1] + +### Added + +- Added better error messages for when `NewAuditClient` fails due to the + Linux kernel not supporting auditing (CONFIG_AUDIT=n). #32 + +## [0.2.0] + +### Changed + +- auparse - Fixed parsing of apparmor AVC messages. #25 +- auparse - Update syscall and audit message type tables for Linux 4.16. +- aucoalesce - Cache UID/GID values for one minute. #24 + +## [0.1.1] + +- rules - Detect s390 or s390x as the runtime architecture (GOOS) and + automatically use the appropriate syscall name to number table without + requiring the rule to explicitly specify an arch (`-F arch=s390x`). #23 + ## [0.1.0] ### Changed diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/LICENSE deleted file mode 100644 index f2eb0024..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2017 Elasticsearch Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/LICENSE.txt b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/LICENSE.txt new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/NOTICE.txt b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/NOTICE.txt new file mode 100644 index 00000000..9e299aad --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/NOTICE.txt @@ -0,0 +1,5 @@ +Elastic go-libaudit +Copyright 2017-2018 Elasticsearch B.V. + +This product includes software developed at +Elasticsearch, B.V. (https://www.elastic.co/). diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/aucoalesce/coalesce.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/aucoalesce/coalesce.go index b0b040b5..34e4986a 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/aucoalesce/coalesce.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/aucoalesce/coalesce.go @@ -1,16 +1,19 @@ -// Copyright 2017-2018 Elasticsearch Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. // Package aucoalesce provides functions to coalesce compound audit messages // into a single event and normalize all message types with some common fields. @@ -429,7 +432,22 @@ func applyNormalization(event *Event) { syscall := event.Data["syscall"] norm = syscallNorms[syscall] } else { - norm = recordTypeNorms[event.Type.String()] + norms := recordTypeNorms[event.Type.String()] + switch len(norms) { + case 0: + // No normalization found. + case 1: + norm = norms[0] + default: + for _, n := range norms { + for _, f := range n.HasFields.Values { + if _, found := event.Data[f]; !found { + continue + } + } + norm = n + } + } } if norm == nil { event.Warnings = append(event.Warnings, errors.New("no normalization found for event")) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/aucoalesce/event_type.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/aucoalesce/event_type.go index 3019b3f2..6b3c4f3b 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/aucoalesce/event_type.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/aucoalesce/event_type.go @@ -1,16 +1,19 @@ -// Copyright 2017 Elasticsearch Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package aucoalesce diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/aucoalesce/id_lookup.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/aucoalesce/id_lookup.go index 9ec325ce..0bd47255 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/aucoalesce/id_lookup.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/aucoalesce/id_lookup.go @@ -1,16 +1,19 @@ -// Copyright 2017-2018 Elasticsearch Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package aucoalesce @@ -21,11 +24,11 @@ import ( "time" ) -const cacheTimeout = 0 +const cacheTimeout = time.Minute var ( - userLookup = NewUserCache() - groupLookup = NewGroupCache() + userLookup = NewUserCache(cacheTimeout) + groupLookup = NewGroupCache(cacheTimeout) ) type stringItem struct { @@ -38,12 +41,18 @@ func (i *stringItem) isExpired() bool { } // UserCache is a cache of UID to username. -type UserCache map[string]stringItem +type UserCache struct { + expiration time.Duration + data map[string]stringItem +} -// NewUserCache returns a new UserCache. -func NewUserCache() UserCache { - return map[string]stringItem{ - "0": {timeout: time.Unix(math.MaxInt64, 0), value: "root"}, +// NewUserCache returns a new UserCache. UserCache is not thread-safe. +func NewUserCache(expiration time.Duration) UserCache { + return UserCache{ + expiration: expiration, + data: map[string]stringItem{ + "0": {timeout: time.Unix(math.MaxInt64, 0), value: "root"}, + }, } } @@ -55,28 +64,34 @@ func (c UserCache) LookupUID(uid string) string { return "" } - if item, found := c[uid]; found && !item.isExpired() { + if item, found := c.data[uid]; found && !item.isExpired() { return item.value } // Cache the value (even on error). user, err := user.LookupId(uid) if err != nil { - c[uid] = stringItem{timeout: time.Now().Add(cacheTimeout), value: ""} + c.data[uid] = stringItem{timeout: time.Now().Add(c.expiration), value: ""} return "" } - c[uid] = stringItem{timeout: time.Now().Add(cacheTimeout), value: user.Username} + c.data[uid] = stringItem{timeout: time.Now().Add(c.expiration), value: user.Username} return user.Username } // GroupCache is a cache of GID to group name. -type GroupCache map[string]stringItem +type GroupCache struct { + expiration time.Duration + data map[string]stringItem +} -// NewGroupCache returns a new GroupCache. -func NewGroupCache() GroupCache { - return map[string]stringItem{ - "0": {timeout: time.Unix(math.MaxInt64, 0), value: "root"}, +// NewGroupCache returns a new GroupCache. GroupCache is not thread-safe. +func NewGroupCache(expiration time.Duration) GroupCache { + return GroupCache{ + expiration: expiration, + data: map[string]stringItem{ + "0": {timeout: time.Unix(math.MaxInt64, 0), value: "root"}, + }, } } @@ -88,29 +103,36 @@ func (c GroupCache) LookupGID(gid string) string { return "" } - if item, found := c[gid]; found && !item.isExpired() { + if item, found := c.data[gid]; found && !item.isExpired() { return item.value } // Cache the value (even on error). group, err := user.LookupGroupId(gid) if err != nil { - c[gid] = stringItem{timeout: time.Now().Add(cacheTimeout), value: ""} + c.data[gid] = stringItem{timeout: time.Now().Add(c.expiration), value: ""} return "" } - c[gid] = stringItem{timeout: time.Now().Add(cacheTimeout), value: group.Name} + c.data[gid] = stringItem{timeout: time.Now().Add(c.expiration), value: group.Name} return group.Name } // ResolveIDs translates all uid and gid values to their associated names. -// This requires cgo on Linux. +// Prior to Go 1.9 this requires cgo on Linux. UID and GID values are cached +// for 60 seconds from the time they are read. func ResolveIDs(event *Event) { + ResolveIDsFromCaches(event, userLookup, groupLookup) +} + +// ResolveIDsFromCaches translates all uid and gid values to their associated +// names using the provided caches. Prior to Go 1.9 this requires cgo on Linux. +func ResolveIDsFromCaches(event *Event, users UserCache, groups GroupCache) { // Actor - if v := userLookup.LookupUID(event.Summary.Actor.Primary); v != "" { + if v := users.LookupUID(event.Summary.Actor.Primary); v != "" { event.Summary.Actor.Primary = v } - if v := userLookup.LookupUID(event.Summary.Actor.Secondary); v != "" { + if v := users.LookupUID(event.Summary.Actor.Secondary); v != "" { event.Summary.Actor.Secondary = v } @@ -118,11 +140,11 @@ func ResolveIDs(event *Event) { names := map[string]string{} for key, id := range event.User.IDs { if strings.HasSuffix(key, "uid") { - if v := userLookup.LookupUID(id); v != "" { + if v := users.LookupUID(id); v != "" { names[key] = v } } else if strings.HasSuffix(key, "gid") { - if v := groupLookup.LookupGID(id); v != "" { + if v := groups.LookupGID(id); v != "" { names[key] = v } } @@ -134,10 +156,10 @@ func ResolveIDs(event *Event) { // File owner/group if event.File != nil { if event.File.UID != "" { - event.File.Owner = userLookup.LookupUID(event.File.UID) + event.File.Owner = users.LookupUID(event.File.UID) } if event.File.GID != "" { - event.File.Group = groupLookup.LookupGID(event.File.GID) + event.File.Group = groups.LookupGID(event.File.GID) } } } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/aucoalesce/normalizations.yaml b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/aucoalesce/normalizations.yaml index b82e01c1..622cf512 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/aucoalesce/normalizations.yaml +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/aucoalesce/normalizations.yaml @@ -370,7 +370,19 @@ normalizations: primary: scontext object: primary: tcontext + secondary: tclass record_types: AVC + has_fields: + - seresult +- + action: violated-apparmor-policy + object: + primary: operation + secondary: [requested_mask, denied_mask, capname] + what: policy + record_types: AVC + has_fields: + - apparmor - action: changed-group record_types: CHGRP_ID diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/aucoalesce/normalize.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/aucoalesce/normalize.go index 44d8fb6a..2004bcc9 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/aucoalesce/normalize.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/aucoalesce/normalize.go @@ -1,16 +1,19 @@ -// Copyright 2017-2018 Elasticsearch Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package aucoalesce @@ -25,7 +28,7 @@ import ( var ( syscallNorms map[string]*Normalization - recordTypeNorms map[string]*Normalization + recordTypeNorms map[string][]*Normalization ) func init() { @@ -69,6 +72,7 @@ type Normalization struct { RecordTypes Strings `yaml:"record_types"` Syscalls Strings `yaml:"syscalls"` SourceIP Strings `yaml:"source_ip"` + HasFields Strings `yaml:"has_fields"` } type SubjectMapping struct { @@ -83,32 +87,33 @@ type ObjectMapping struct { PathIndex int `yaml:"path_index"` } -type HowMapping struct { - FieldName string `yaml:"field"` -} - -func LoadNormalizationConfig(b []byte) (syscalls map[string]*Normalization, recordTypes map[string]*Normalization, err error) { +func LoadNormalizationConfig(b []byte) (syscalls map[string]*Normalization, recordTypes map[string][]*Normalization, err error) { c := &NormalizationConfig{} if err := yaml.Unmarshal(b, c); err != nil { return nil, nil, err } syscalls = map[string]*Normalization{} - recordTypes = map[string]*Normalization{} + recordTypes = map[string][]*Normalization{} for i := range c.Normalizations { norm := c.Normalizations[i] for _, syscall := range norm.Syscalls.Values { if _, found := syscalls[syscall]; found { - return nil, nil, fmt.Errorf("duplication mappings for sycall %v", syscall) + return nil, nil, fmt.Errorf("duplication mappings for syscall %v", syscall) } syscalls[syscall] = &norm } for _, recordType := range norm.RecordTypes.Values { - if _, found := recordTypes[recordType]; found { - return nil, nil, fmt.Errorf("duplication mappings for record_type %v", recordType) + norms, found := recordTypes[recordType] + if found { + for _, n := range norms { + if len(n.HasFields.Values) == 0 { + return nil, nil, fmt.Errorf("duplication mappings for record_type %v without has_fields qualifier", recordType) + } + } } - recordTypes[recordType] = &norm + recordTypes[recordType] = append(norms, &norm) } } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/aucoalesce/znormalize_data.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/aucoalesce/znormalize_data.go index 108080f3..5f6c14be 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/aucoalesce/znormalize_data.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/aucoalesce/znormalize_data.go @@ -1,19 +1,21 @@ -// mknormalize_data.go -// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT - -// Copyright 2017-2018 Elasticsearch Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated by mknormalize_data.go - DO NOT EDIT. package aucoalesce @@ -29,7 +31,7 @@ func asset(key string) ([]byte, error) { assets = map[string][]byte{} var value []byte - value, _ = base64.StdEncoding.DecodeString("---
# Macros declares some YAML anchors that can be referenced for some common
# object type normalizations like user-session, socket, or process.
macros:
- &defaults
  subject:
    primary: auid
    secondary: uid
  how: [exe, comm]

- &macro-user-session
  subject:
    primary: auid
    secondary: [acct, id, uid]
  object:
    primary: terminal
    secondary: [addr, hostname]
    what: user-session
  how: [exe, terminal]

- &macro-socket
  <<: *defaults
  object:
    primary: [addr, path]
    secondary: port
    what: socket

- &macro-process
  <<: *defaults
  object:
    primary: [cmd, exe, comm]
    secondary: pid
    what: process
  how: terminal

# Normalizations is a list of declarations specifying how to normalize the data
# contained in an event. The normalization can be applied based on the syscall
# name (e.g. connect, open) or based on the record type (e.g. USER_LOGIN).
# No two normalizations can apply to the same syscall or record type. This
# will result in a failure at load time.
#
# Each normalization should specify:
#   action - what happened
#   actor  - who did this or who triggered the event
#   object - what was the "thing" involved in the action (e.g. process, socket)
#   how    - how was the action performed (e.g. exe or terminal)
normalizations:
-
  action: opened-file
  object:
    what: file
  syscalls:
  - creat
  - fallocate
  - truncate
  - ftruncate
  - open
  - openat
  - readlink
  - readlinkat
-
  action: changed-file-attributes-of
  object:
    what: file
  syscalls:
  - setxattr
  - fsetxattr
  - lsetxattr
  - removexattr
  - fremovexattr
  - lremovexattr
-
  action: changed-file-permissions-of
  object:
    what: file
  syscalls:
  - chmod
  - fchmod
  - fchmodat
-
  action: changed-file-ownership-of
  object:
    what: file
  syscalls:
  - chown
  - fchown
  - fchownat
  - lchown
-
  action: loaded-kernel-module
  object:
    what: file
    primary: name
  record_types:
  - KERN_MODULE
  syscalls:
  - finit_module
  - init_module
-
  action: unloaded-kernel-module
  object:
    what: file
  syscalls:
  - delete_module
-
  action: created-directory
  object:
    what: file
    path_index: 1
  syscalls:
  - mkdir
  - mkdirat
-
  action: mounted
  object:
    what: filesystem
    path_index: 1
  syscalls:
  - mount
-
  action: renamed
  object:
    what: file
    path_index: 2
  syscalls:
  - rename
  - renameat
  - renameat2
-
  action: checked-metadata-of
  object:
    what: file
  syscalls:
  - access
  - faccessat
  - newfstatat
  - stat
  - fstat
  - lstat
  - stat64
  - getxattr
  - lgetxattr
  - fgetxattr
-
  action: checked-filesystem-metadata-of
  object:
    what: filesystem
  syscalls:
  - statfs
  - fstatfs
-
  action: symlinked
  object:
    what: file
  syscalls:
  - symlink
  - symlinkat
-
  action: unmounted
  object:
    what: filesystem
  syscalls:
  - umount2
-
  action: deleted
  object:
    what: file
  syscalls:
  - rmdir
  - unlink
  - unlinkat
-
  action: changed-timestamp-of
  object:
    what: file
  syscalls:
  - utime
  - utimes
  - futimesat
  - futimens
  - utimensat
-
  action: executed
  object:
    what: file
  syscalls:
  - execve
  - execveat
-
  action: listen-for-connections
  object:
    what: socket
  syscalls:
  - listen
-
  action: accepted-connection-from
  object:
    what: socket
  syscalls:
  - accept
  - accept4
-
  action: bound-socket
  object:
    what: socket
  syscalls:
  - bind
-
  action: connected-to
  object:
    what: socket
  syscalls:
  - connect
-
  action: received-from
  object:
    what: socket
  syscalls:
  - recvfrom
  - recvmsg
-
  action: sent-to
  object:
    what: socket
  syscalls:
  - sendto
  - sendmsg
-
  action: killed-pid
  object:
    what: process
  syscalls:
  - kill
  - tkill
  - tgkill
-
  action: changed-identity-of
  object:
    what: process
  how: syscall
  syscalls:
  - setuid
  - seteuid
  - setfsuid
  - setreuid
  - setresuid
  - setgid
  - setegid
  - setfsgid
  - setregid
  - setresgid
-
  action: changed-system-time
  object:
    what: system
  syscalls:
  - settimeofday
  - clock_settime
  - stime
  - adjtimex
-
  action: make-device
  object:
    what: file
  syscalls:
  - mknod
  - mknodat
-
  action: changed-system-name
  object:
    what: system
  syscalls:
  - sethostname
  - setdomainname
-
  action: allocated-memory
  object:
    what: memory
  syscalls:
  - mmap
  - brk
-
  action: adjusted-scheduling-policy-of
  object:
    what: process
  how: syscall
  syscalls:
  - sched_setparam
  - sched_setscheduler
  - sched_setattr
-
  action: caused-mac-policy-error
  object:
    what: system
  record_types: SELINUX_ERR
-
  action: loaded-firewall-rule-to
  object:
    primary: table
    what: firewall
  record_types: NETFILTER_CFG
-
  # Could be entered or exited based on prom field.
  action: changed-promiscuous-mode-on-device
  object:
    primary: dev
    what: network-device
  record_types: ANOM_PROMISCUOUS
-
  action: locked-account
  record_types: ACCT_LOCK
-
  action: unlocked-account
  record_types: ACCT_UNLOCK
-
  action: added-group-account-to
  object:
    primary: [id, acct]
    what: account
  record_types: ADD_GROUP
-
  action: added-user-account
  object:
    primary: [id, acct]
    what: account
  record_types: ADD_USER
-
  action: crashed-program
  object:
    primary: [comm, exe]
    secondary: pid
    what: process
  how: sig
  record_types: ANOM_ABEND
-
  action: attempted-execution-of-forbidden-program
  object:
    primary: cmd
    what: process
  how: terminal
  record_types: ANOM_EXEC
-
  action: used-suspcious-link
  record_types: ANOM_LINK
-
  <<: *macro-user-session
  action: failed-log-in-too-many-times-to
  record_types: ANOM_LOGIN_FAILURES
-
  <<: *macro-user-session
  action: attempted-log-in-from-unusual-place-to
  record_types: ANOM_LOGIN_LOCATION
-
  <<: *macro-user-session
  action: opened-too-many-sessions-to
  record_types: ANOM_LOGIN_SESSIONS
-
  <<: *macro-user-session
  action: attempted-log-in-during-unusual-hour-to
  record_types: ANOM_LOGIN_TIME
-
  action: tested-file-system-integrity-of
  object:
    primary: hostname
    what: filesystem
  record_types: ANOM_RBAC_INTEGRITY_FAIL
-
  action: violated-selinux-policy
  subject:
    primary: scontext
  object:
    primary: tcontext
  record_types: AVC
-
  action: changed-group
  record_types: CHGRP_ID
-
  action: changed-user-id
  record_types: CHUSER_ID
-
  action: changed-audit-configuration
  object:
    primary: [op, key, audit_enabled, audit_pid, audit_backlog_limit, audit_failure]
    what: audit-config
  record_types: CONFIG_CHANGE
-
  <<: *macro-user-session
  action: acquired-credentials
  record_types: CRED_ACQ
-
  <<: *macro-user-session
  action: disposed-credentials
  record_types: CRED_DISP
-
  <<: *macro-user-session
  action: refreshed-credentials
  record_types: CRED_REFR
-
  <<: *macro-user-session
  action: negotiated-crypto-key
  object:
    primary: fp
    secondary: [addr, hostname]
    what: user-session
  record_types: CRYPTO_KEY_USER
  source_ip: [addr]
-
  action: crypto-officer-logged-in
  record_types: CRYPTO_LOGIN
-
  action: crypto-officer-logged-out
  record_types: CRYPTO_LOGOUT
-
  <<: *macro-user-session
  action: started-crypto-session
  object:
    primary: addr
    secondary: [rport]
  record_types: CRYPTO_SESSION
  source_ip: [addr]
-
  action: access-result
  record_types: DAC_CHECK
-
  action: aborted-auditd-startup
  object:
    what: service
  record_types: DAEMON_ABORT
-
  action: remote-audit-connected
  object:
    what: service
  record_types: DAEMON_ACCEPT
-
  action: remote-audit-disconnected
  object:
    what: service
  record_types: DAEMON_CLOSE
-
  action: changed-auditd-configuration
  object:
    what: service
  record_types: DAEMON_CONFIG
-
  action: shutdown-audit
  object:
    what: service
  record_types: DAEMON_END
-
  action: audit-error
  object:
    what: service
  record_types: DAEMON_ERR
-
  action: reconfigured-auditd
  object:
    what: service
  record_types: DAEMON_RECONFIG
-
  action: resumed-audit-logging
  object:
    what: service
  record_types: DAEMON_RESUME
-
  action: rotated-audit-logs
  object:
    what: service
  record_types: DAEMON_ROTATE
-
  action: started-audit
  object:
    what: service
  record_types: DAEMON_START
-
  action: deleted-group-account-from
  object:
    primary: [id, acct]
    what: account
  record_types: DEL_GROUP
-
  action: deleted-user-account
  object:
    primary: [id, acct]
    what: account
  record_types: DEL_USER
-
  action: changed-audit-feature
  object:
    primary: feature
    what: system
  record_types: FEATURE_CHANGE
-
  action: relabeled-filesystem
  record_types: FS_RELABEL
-
  action: authenticated-to-group
  record_types: GRP_AUTH
-
  <<: *macro-user-session
  action: changed-group-password
  object:
    primary: acct
    what: user-session
  record_types: GRP_CHAUTHTOK
-
  action: modified-group-account
  object:
    primary: [id, acct]
    what: account
  record_types: GRP_MGMT
-
  action: initialized-audit-subsystem
  record_types: KERNEL
-
  action: modified-level-of
  object:
    primary: printer
    what: printer
  record_types: LABEL_LEVEL_CHANGE
-
  action: overrode-label-of
  object:
    what: mac-config
  record_types: LABEL_OVERRIDE
-
  object:
    what: mac-config
  record_types:
  - AUDIT_DEV_ALLOC
  - AUDIT_DEV_DEALLOC
  - AUDIT_FS_RELABEL
  - AUDIT_USER_MAC_POLICY_LOAD
  - AUDIT_USER_MAC_CONFIG_CHANGE
-
  action: changed-login-id-to
  subject:
    primary: [old_auid, old-auid]
    secondary: uid
  object:
    primary: auid
    what: user-session
  record_types: LOGIN
-
  action: mac-permission
  record_types: MAC_CHECK
-
  action: changed-selinux-boolean
  object:
    primary: bool
    what: mac-config
  record_types: MAC_CONFIG_CHANGE
-
  action: loaded-selinux-policy
  object:
    what: mac-config
  record_types: MAC_POLICY_LOAD
-
  action: changed-selinux-enforcement
  object:
    primary: enforcing
    what: mac-config
  record_types: MAC_STATUS
-
  action: assigned-user-role-to
  object:
    primary: [id, acct]
    what: account
  record_types: ROLE_ASSIGN
-
  action: modified-role
  record_types: ROLE_MODIFY
-
  action: removed-use-role-from
  object:
    primary: [id, acct]
    what: account
  record_types: ROLE_REMOVE
-
  action: violated-seccomp-policy
  object:
    primary: syscall
    what: process
  record_types: SECCOMP
-
  action: started-service
  object:
    primary: unit
    what: service
  record_types: SERVICE_START
-
  action: stopped-service
  object:
    primary: unit
    what: service
  record_types: SERVICE_STOP
-
  action: booted-system
  object:
    what: system
  record_types: SYSTEM_BOOT
-
  action: changed-to-runlevel
  object:
    primary: new-level
    what: system
  record_types: SYSTEM_RUNLEVEL
-
  action: shutdown-system
  object:
    what: system
  record_types: SYSTEM_SHUTDOWN
-
  action: sent-test
  record_types: TEST
-
  action: unknown
  record_types: TRUSTED_APP
-
  action: sent-message
  object:
    primary: addr
  record_types: USER
-
  <<: *macro-user-session
  action: was-authorized
  record_types: USER_ACCT
-
  <<: *macro-user-session
  action: authenticated
  record_types: USER_AUTH
-
  action: access-permission
  record_types: USER_AVC
-
  <<: *macro-user-session
  action: changed-password
  record_types: USER_CHAUTHTOK
-
  action: ran-command
  object:
    primary: cmd
    what: process
  record_types: USER_CMD
  description: >
    These messages are from user-space apps, like sudo, that log commands
    being run by a user. The uid contained in these messages is user's UID at
    the time the command was run. It is not the "target" UID used to run the
    command, which is normally root.
-
  <<: *macro-user-session
  action: ended-session
  record_types: USER_END
-
  <<: *macro-user-session
  action: error
  record_types: USER_ERR
  source_ip: [addr]
-
  <<: *macro-user-session
  action: logged-in
  record_types: USER_LOGIN
  source_ip: [addr]
-
  <<: *macro-user-session
  action: logged-out
  record_types: USER_LOGOUT
-
  action: changed-mac-configuration
  record_types: USER_MAC_CONFIG_CHANGE
-
  action: loaded-mac-policy
  record_types: USER_MAC_POLICY_LOAD
-
  <<: *macro-user-session
  action: modified-user-account
  record_types: USER_MGMT
-
  <<: *macro-user-session
  action: changed-role-to
  object:
    primary: selected-context
    what: user-session
  record_types: USER_ROLE_CHANGE
-
  action: access-error
  record_types: USER_SELINUX_ERR
-
  <<: *macro-user-session
  action: started-session
  record_types: USER_START
  source_ip: [addr]
-
  action: changed-configuration
  object:
    primary: op
    what: system
  record_types: USYS_CONFIG
-
  action: issued-vm-control
  object:
    primary: op
    secondary: vm
    what: virtual-machine
  record_types: VIRT_CONTROL
-
  action: created-vm-image
  record_types: VIRT_CREATE
-
  action: deleted-vm-image
  record_types: VIRT_DESTROY
-
  action: checked-integrity-of
  record_types: VIRT_INTEGRITY_CHECK
-
  action: assigned-vm-id
  object:
    primary: vm
    what: virtual-machine
  record_types: VIRT_MACHINE_ID
-
  action: migrated-vm-from
  record_types: VIRT_MIGRATE_IN
-
  action: migrated-vm-to
  record_types: VIRT_MIGRATE_OUT
-
  action: assigned-vm-resource
  object:
    primary: resrc
    secondary: vm
    what: virtual-machine
  record_types: VIRT_RESOURCE
- action: typed
  object:
    primary: data
    what: keystrokes
  how: [comm, exe]
  record_types:
  - TTY
  - USER_TTY
") + value, _ = base64.StdEncoding.DecodeString("---
# Macros declares some YAML anchors that can be referenced for some common
# object type normalizations like user-session, socket, or process.
macros:
- &defaults
  subject:
    primary: auid
    secondary: uid
  how: [exe, comm]

- &macro-user-session
  subject:
    primary: auid
    secondary: [acct, id, uid]
  object:
    primary: terminal
    secondary: [addr, hostname]
    what: user-session
  how: [exe, terminal]

- &macro-socket
  <<: *defaults
  object:
    primary: [addr, path]
    secondary: port
    what: socket

- &macro-process
  <<: *defaults
  object:
    primary: [cmd, exe, comm]
    secondary: pid
    what: process
  how: terminal

# Normalizations is a list of declarations specifying how to normalize the data
# contained in an event. The normalization can be applied based on the syscall
# name (e.g. connect, open) or based on the record type (e.g. USER_LOGIN).
# No two normalizations can apply to the same syscall or record type. This
# will result in a failure at load time.
#
# Each normalization should specify:
#   action - what happened
#   actor  - who did this or who triggered the event
#   object - what was the "thing" involved in the action (e.g. process, socket)
#   how    - how was the action performed (e.g. exe or terminal)
normalizations:
-
  action: opened-file
  object:
    what: file
  syscalls:
  - creat
  - fallocate
  - truncate
  - ftruncate
  - open
  - openat
  - readlink
  - readlinkat
-
  action: changed-file-attributes-of
  object:
    what: file
  syscalls:
  - setxattr
  - fsetxattr
  - lsetxattr
  - removexattr
  - fremovexattr
  - lremovexattr
-
  action: changed-file-permissions-of
  object:
    what: file
  syscalls:
  - chmod
  - fchmod
  - fchmodat
-
  action: changed-file-ownership-of
  object:
    what: file
  syscalls:
  - chown
  - fchown
  - fchownat
  - lchown
-
  action: loaded-kernel-module
  object:
    what: file
    primary: name
  record_types:
  - KERN_MODULE
  syscalls:
  - finit_module
  - init_module
-
  action: unloaded-kernel-module
  object:
    what: file
  syscalls:
  - delete_module
-
  action: created-directory
  object:
    what: file
    path_index: 1
  syscalls:
  - mkdir
  - mkdirat
-
  action: mounted
  object:
    what: filesystem
    path_index: 1
  syscalls:
  - mount
-
  action: renamed
  object:
    what: file
    path_index: 2
  syscalls:
  - rename
  - renameat
  - renameat2
-
  action: checked-metadata-of
  object:
    what: file
  syscalls:
  - access
  - faccessat
  - newfstatat
  - stat
  - fstat
  - lstat
  - stat64
  - getxattr
  - lgetxattr
  - fgetxattr
-
  action: checked-filesystem-metadata-of
  object:
    what: filesystem
  syscalls:
  - statfs
  - fstatfs
-
  action: symlinked
  object:
    what: file
  syscalls:
  - symlink
  - symlinkat
-
  action: unmounted
  object:
    what: filesystem
  syscalls:
  - umount2
-
  action: deleted
  object:
    what: file
  syscalls:
  - rmdir
  - unlink
  - unlinkat
-
  action: changed-timestamp-of
  object:
    what: file
  syscalls:
  - utime
  - utimes
  - futimesat
  - futimens
  - utimensat
-
  action: executed
  object:
    what: file
  syscalls:
  - execve
  - execveat
-
  action: listen-for-connections
  object:
    what: socket
  syscalls:
  - listen
-
  action: accepted-connection-from
  object:
    what: socket
  syscalls:
  - accept
  - accept4
-
  action: bound-socket
  object:
    what: socket
  syscalls:
  - bind
-
  action: connected-to
  object:
    what: socket
  syscalls:
  - connect
-
  action: received-from
  object:
    what: socket
  syscalls:
  - recvfrom
  - recvmsg
-
  action: sent-to
  object:
    what: socket
  syscalls:
  - sendto
  - sendmsg
-
  action: killed-pid
  object:
    what: process
  syscalls:
  - kill
  - tkill
  - tgkill
-
  action: changed-identity-of
  object:
    what: process
  how: syscall
  syscalls:
  - setuid
  - seteuid
  - setfsuid
  - setreuid
  - setresuid
  - setgid
  - setegid
  - setfsgid
  - setregid
  - setresgid
-
  action: changed-system-time
  object:
    what: system
  syscalls:
  - settimeofday
  - clock_settime
  - stime
  - adjtimex
-
  action: make-device
  object:
    what: file
  syscalls:
  - mknod
  - mknodat
-
  action: changed-system-name
  object:
    what: system
  syscalls:
  - sethostname
  - setdomainname
-
  action: allocated-memory
  object:
    what: memory
  syscalls:
  - mmap
  - brk
-
  action: adjusted-scheduling-policy-of
  object:
    what: process
  how: syscall
  syscalls:
  - sched_setparam
  - sched_setscheduler
  - sched_setattr
-
  action: caused-mac-policy-error
  object:
    what: system
  record_types: SELINUX_ERR
-
  action: loaded-firewall-rule-to
  object:
    primary: table
    what: firewall
  record_types: NETFILTER_CFG
-
  # Could be entered or exited based on prom field.
  action: changed-promiscuous-mode-on-device
  object:
    primary: dev
    what: network-device
  record_types: ANOM_PROMISCUOUS
-
  action: locked-account
  record_types: ACCT_LOCK
-
  action: unlocked-account
  record_types: ACCT_UNLOCK
-
  action: added-group-account-to
  object:
    primary: [id, acct]
    what: account
  record_types: ADD_GROUP
-
  action: added-user-account
  object:
    primary: [id, acct]
    what: account
  record_types: ADD_USER
-
  action: crashed-program
  object:
    primary: [comm, exe]
    secondary: pid
    what: process
  how: sig
  record_types: ANOM_ABEND
-
  action: attempted-execution-of-forbidden-program
  object:
    primary: cmd
    what: process
  how: terminal
  record_types: ANOM_EXEC
-
  action: used-suspcious-link
  record_types: ANOM_LINK
-
  <<: *macro-user-session
  action: failed-log-in-too-many-times-to
  record_types: ANOM_LOGIN_FAILURES
-
  <<: *macro-user-session
  action: attempted-log-in-from-unusual-place-to
  record_types: ANOM_LOGIN_LOCATION
-
  <<: *macro-user-session
  action: opened-too-many-sessions-to
  record_types: ANOM_LOGIN_SESSIONS
-
  <<: *macro-user-session
  action: attempted-log-in-during-unusual-hour-to
  record_types: ANOM_LOGIN_TIME
-
  action: tested-file-system-integrity-of
  object:
    primary: hostname
    what: filesystem
  record_types: ANOM_RBAC_INTEGRITY_FAIL
-
  action: violated-selinux-policy
  subject:
    primary: scontext
  object:
    primary: tcontext
    secondary: tclass
  record_types: AVC
  has_fields:
  - seresult
-
  action: violated-apparmor-policy
  object:
    primary: operation
    secondary: [requested_mask, denied_mask, capname]
    what: policy
  record_types: AVC
  has_fields:
  - apparmor
-
  action: changed-group
  record_types: CHGRP_ID
-
  action: changed-user-id
  record_types: CHUSER_ID
-
  action: changed-audit-configuration
  object:
    primary: [op, key, audit_enabled, audit_pid, audit_backlog_limit, audit_failure]
    what: audit-config
  record_types: CONFIG_CHANGE
-
  <<: *macro-user-session
  action: acquired-credentials
  record_types: CRED_ACQ
-
  <<: *macro-user-session
  action: disposed-credentials
  record_types: CRED_DISP
-
  <<: *macro-user-session
  action: refreshed-credentials
  record_types: CRED_REFR
-
  <<: *macro-user-session
  action: negotiated-crypto-key
  object:
    primary: fp
    secondary: [addr, hostname]
    what: user-session
  record_types: CRYPTO_KEY_USER
  source_ip: [addr]
-
  action: crypto-officer-logged-in
  record_types: CRYPTO_LOGIN
-
  action: crypto-officer-logged-out
  record_types: CRYPTO_LOGOUT
-
  <<: *macro-user-session
  action: started-crypto-session
  object:
    primary: addr
    secondary: [rport]
  record_types: CRYPTO_SESSION
  source_ip: [addr]
-
  action: access-result
  record_types: DAC_CHECK
-
  action: aborted-auditd-startup
  object:
    what: service
  record_types: DAEMON_ABORT
-
  action: remote-audit-connected
  object:
    what: service
  record_types: DAEMON_ACCEPT
-
  action: remote-audit-disconnected
  object:
    what: service
  record_types: DAEMON_CLOSE
-
  action: changed-auditd-configuration
  object:
    what: service
  record_types: DAEMON_CONFIG
-
  action: shutdown-audit
  object:
    what: service
  record_types: DAEMON_END
-
  action: audit-error
  object:
    what: service
  record_types: DAEMON_ERR
-
  action: reconfigured-auditd
  object:
    what: service
  record_types: DAEMON_RECONFIG
-
  action: resumed-audit-logging
  object:
    what: service
  record_types: DAEMON_RESUME
-
  action: rotated-audit-logs
  object:
    what: service
  record_types: DAEMON_ROTATE
-
  action: started-audit
  object:
    what: service
  record_types: DAEMON_START
-
  action: deleted-group-account-from
  object:
    primary: [id, acct]
    what: account
  record_types: DEL_GROUP
-
  action: deleted-user-account
  object:
    primary: [id, acct]
    what: account
  record_types: DEL_USER
-
  action: changed-audit-feature
  object:
    primary: feature
    what: system
  record_types: FEATURE_CHANGE
-
  action: relabeled-filesystem
  record_types: FS_RELABEL
-
  action: authenticated-to-group
  record_types: GRP_AUTH
-
  <<: *macro-user-session
  action: changed-group-password
  object:
    primary: acct
    what: user-session
  record_types: GRP_CHAUTHTOK
-
  action: modified-group-account
  object:
    primary: [id, acct]
    what: account
  record_types: GRP_MGMT
-
  action: initialized-audit-subsystem
  record_types: KERNEL
-
  action: modified-level-of
  object:
    primary: printer
    what: printer
  record_types: LABEL_LEVEL_CHANGE
-
  action: overrode-label-of
  object:
    what: mac-config
  record_types: LABEL_OVERRIDE
-
  object:
    what: mac-config
  record_types:
  - AUDIT_DEV_ALLOC
  - AUDIT_DEV_DEALLOC
  - AUDIT_FS_RELABEL
  - AUDIT_USER_MAC_POLICY_LOAD
  - AUDIT_USER_MAC_CONFIG_CHANGE
-
  action: changed-login-id-to
  subject:
    primary: [old_auid, old-auid]
    secondary: uid
  object:
    primary: auid
    what: user-session
  record_types: LOGIN
-
  action: mac-permission
  record_types: MAC_CHECK
-
  action: changed-selinux-boolean
  object:
    primary: bool
    what: mac-config
  record_types: MAC_CONFIG_CHANGE
-
  action: loaded-selinux-policy
  object:
    what: mac-config
  record_types: MAC_POLICY_LOAD
-
  action: changed-selinux-enforcement
  object:
    primary: enforcing
    what: mac-config
  record_types: MAC_STATUS
-
  action: assigned-user-role-to
  object:
    primary: [id, acct]
    what: account
  record_types: ROLE_ASSIGN
-
  action: modified-role
  record_types: ROLE_MODIFY
-
  action: removed-use-role-from
  object:
    primary: [id, acct]
    what: account
  record_types: ROLE_REMOVE
-
  action: violated-seccomp-policy
  object:
    primary: syscall
    what: process
  record_types: SECCOMP
-
  action: started-service
  object:
    primary: unit
    what: service
  record_types: SERVICE_START
-
  action: stopped-service
  object:
    primary: unit
    what: service
  record_types: SERVICE_STOP
-
  action: booted-system
  object:
    what: system
  record_types: SYSTEM_BOOT
-
  action: changed-to-runlevel
  object:
    primary: new-level
    what: system
  record_types: SYSTEM_RUNLEVEL
-
  action: shutdown-system
  object:
    what: system
  record_types: SYSTEM_SHUTDOWN
-
  action: sent-test
  record_types: TEST
-
  action: unknown
  record_types: TRUSTED_APP
-
  action: sent-message
  object:
    primary: addr
  record_types: USER
-
  <<: *macro-user-session
  action: was-authorized
  record_types: USER_ACCT
-
  <<: *macro-user-session
  action: authenticated
  record_types: USER_AUTH
-
  action: access-permission
  record_types: USER_AVC
-
  <<: *macro-user-session
  action: changed-password
  record_types: USER_CHAUTHTOK
-
  action: ran-command
  object:
    primary: cmd
    what: process
  record_types: USER_CMD
  description: >
    These messages are from user-space apps, like sudo, that log commands
    being run by a user. The uid contained in these messages is user's UID at
    the time the command was run. It is not the "target" UID used to run the
    command, which is normally root.
-
  <<: *macro-user-session
  action: ended-session
  record_types: USER_END
-
  <<: *macro-user-session
  action: error
  record_types: USER_ERR
  source_ip: [addr]
-
  <<: *macro-user-session
  action: logged-in
  record_types: USER_LOGIN
  source_ip: [addr]
-
  <<: *macro-user-session
  action: logged-out
  record_types: USER_LOGOUT
-
  action: changed-mac-configuration
  record_types: USER_MAC_CONFIG_CHANGE
-
  action: loaded-mac-policy
  record_types: USER_MAC_POLICY_LOAD
-
  <<: *macro-user-session
  action: modified-user-account
  record_types: USER_MGMT
-
  <<: *macro-user-session
  action: changed-role-to
  object:
    primary: selected-context
    what: user-session
  record_types: USER_ROLE_CHANGE
-
  action: access-error
  record_types: USER_SELINUX_ERR
-
  <<: *macro-user-session
  action: started-session
  record_types: USER_START
  source_ip: [addr]
-
  action: changed-configuration
  object:
    primary: op
    what: system
  record_types: USYS_CONFIG
-
  action: issued-vm-control
  object:
    primary: op
    secondary: vm
    what: virtual-machine
  record_types: VIRT_CONTROL
-
  action: created-vm-image
  record_types: VIRT_CREATE
-
  action: deleted-vm-image
  record_types: VIRT_DESTROY
-
  action: checked-integrity-of
  record_types: VIRT_INTEGRITY_CHECK
-
  action: assigned-vm-id
  object:
    primary: vm
    what: virtual-machine
  record_types: VIRT_MACHINE_ID
-
  action: migrated-vm-from
  record_types: VIRT_MIGRATE_IN
-
  action: migrated-vm-to
  record_types: VIRT_MIGRATE_OUT
-
  action: assigned-vm-resource
  object:
    primary: resrc
    secondary: vm
    what: virtual-machine
  record_types: VIRT_RESOURCE
- action: typed
  object:
    primary: data
    what: keystrokes
  how: [comm, exe]
  record_types:
  - TTY
  - USER_TTY
") assets["normalizationData"] = value } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/audit.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/audit.go index 69f8c0e5..5baf2651 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/audit.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/audit.go @@ -1,16 +1,19 @@ -// Copyright 2017 Elasticsearch Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. // +build linux @@ -102,7 +105,12 @@ func newAuditClient(netlinkGroups uint32, resp io.Writer) (*AuditClient, error) netlink, err := NewNetlinkClient(syscall.NETLINK_AUDIT, netlinkGroups, buf, resp) if err != nil { - return nil, err + switch err { + case syscall.EINVAL, syscall.EPROTONOSUPPORT, syscall.EAFNOSUPPORT: + return nil, errors.Wrap(err, "audit not supported by kernel") + default: + return nil, errors.Wrap(err, "failed to open audit netlink socket") + } } return &AuditClient{Netlink: netlink}, nil diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/auparse/auparse.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/auparse/auparse.go index a4b0f847..6953af5b 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/auparse/auparse.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/auparse/auparse.go @@ -1,16 +1,19 @@ -// Copyright 2017 Elasticsearch Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package auparse @@ -250,9 +253,10 @@ var ( // value pairs. kvRegex = regexp.MustCompile(`([a-z0-9_-]+)=((?:[^"'\s]+)|'(?:\\'|[^'])*'|"(?:\\"|[^"])*")`) - // avcMessageRegex matches the beginning of AVC messages to parse the - // seresult and seperms parameters. Example: "avc: denied { read } for " - avcMessageRegex = regexp.MustCompile(`avc:\s+(\w+)\s+\{\s*(.*)\s*\}\s+for\s+`) + // avcMessageRegex matches the beginning of SELinux AVC messages to parse + // the seresult and seperms parameters. + // Example: "avc: denied { read } for " + selinuxAVCMessageRegex = regexp.MustCompile(`avc:\s+(\w+)\s+\{\s*(.*)\s*\}\s+for\s+`) ) // normalizeAuditMessage fixes some of the peculiarities of certain audit @@ -260,7 +264,14 @@ var ( func normalizeAuditMessage(typ AuditMessageType, msg string) (string, error) { switch typ { case AUDIT_AVC: - i := avcMessageRegex.FindStringSubmatchIndex(msg) + i := selinuxAVCMessageRegex.FindStringSubmatchIndex(msg) + if i == nil { + // It's a different type of AVC (e.g. AppArmor) and doesn't require + // normalization to make it parsable. + return msg, nil + } + + // This selinux AVC regex match should return three pairs. if len(i) != 3*2 { return "", errParseFailure } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/auparse/doc.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/auparse/doc.go index b05809b1..9387043d 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/auparse/doc.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/auparse/doc.go @@ -1,16 +1,19 @@ -// Copyright 2017 Elasticsearch Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. // Package auparse is a pure Go audit log parsing library. It can parse and // enrich audit messages from the Linux kernel. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/auparse/hex.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/auparse/hex.go index 744cbded..3af39a46 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/auparse/hex.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/auparse/hex.go @@ -1,16 +1,19 @@ -// Copyright 2018 Elasticsearch Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package auparse diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/auparse/mk_audit_arches.pl b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/auparse/mk_audit_arches.pl index 70de2d06..de12ae56 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/auparse/mk_audit_arches.pl +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/auparse/mk_audit_arches.pl @@ -17,7 +17,7 @@ my $command = "mk_audit_arches.pl ". join(' ', @ARGV); -`curl -s -O https://raw.githubusercontent.com/linux-audit/audit-kernel/v4.7/include/uapi/linux/audit.h`; +`curl -s -O https://raw.githubusercontent.com/torvalds/linux/v4.16/include/uapi/linux/audit.h`; open(GCC, "gcc -E -dD audit.h |") || die "can't run gcc"; my @arches; @@ -36,22 +36,24 @@ open (FILE, "> $outfile") || die "problem opening $outfile\n"; print FILE < + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/array.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/array.go similarity index 100% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/array.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/array.go diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/basetype_string.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/basetype_string.go similarity index 100% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/basetype_string.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/basetype_string.go diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/cborl/decode.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/cborl/decode.go similarity index 95% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/cborl/decode.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/cborl/decode.go index ac608760..b93f7cf5 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/cborl/decode.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/cborl/decode.go @@ -3,7 +3,7 @@ package cborl import ( "io" - structform "github.com/urso/go-structform" + structform "github.com/elastic/go-structform" ) type Decoder struct { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/cborl/defs.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/cborl/defs.go similarity index 93% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/cborl/defs.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/cborl/defs.go index e203f546..bbdc1ac4 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/cborl/defs.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/cborl/defs.go @@ -1,6 +1,6 @@ package cborl -import "github.com/urso/go-structform/internal/unsafe" +import "github.com/elastic/go-structform/internal/unsafe" const ( majorUint uint8 = 0x00 diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/cborl/error.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/cborl/error.go similarity index 100% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/cborl/error.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/cborl/error.go diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/cborl/parse.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/cborl/parse.go similarity index 99% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/cborl/parse.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/cborl/parse.go index 3cf5d0e1..70da7f48 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/cborl/parse.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/cborl/parse.go @@ -5,7 +5,7 @@ import ( "io" "math" - structform "github.com/urso/go-structform" + structform "github.com/elastic/go-structform" ) type Parser struct { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/cborl/stack.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/cborl/stack.go similarity index 100% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/cborl/stack.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/cborl/stack.go diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/cborl/visitor.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/cborl/visitor.go similarity index 99% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/cborl/visitor.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/cborl/visitor.go index ddd14c02..fbe10a19 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/cborl/visitor.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/cborl/visitor.go @@ -5,7 +5,7 @@ import ( "io" "math" - structform "github.com/urso/go-structform" + structform "github.com/elastic/go-structform" ) type Visitor struct { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/0gen.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/0gen.go similarity index 86% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/0gen.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/0gen.go index e456745e..e5e7b9be 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/0gen.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/0gen.go @@ -1,9 +1,5 @@ package gotype -// define mktmpl alias -//go:generate -command mktmpl go run ../internal/gen/gen_yaml.go -// go:generate -command mktmpl gen - //go:generate mktmpl -f -o fold_map_inline.generated.go fold_map_inline.yml //go:generate mktmpl -f -o fold_refl_sel.generated.go fold_refl_sel.yml //go:generate mktmpl -f -o stacks.generated.go stacks.yml diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/defs.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/defs.go similarity index 91% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/defs.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/defs.go index 9b4fc229..ea148f52 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/defs.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/defs.go @@ -3,8 +3,8 @@ package gotype import ( "reflect" - "github.com/urso/go-structform" - "github.com/urso/go-structform/internal/unsafe" + "github.com/elastic/go-structform" + "github.com/elastic/go-structform/internal/unsafe" ) type options struct { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/error.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/error.go similarity index 100% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/error.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/error.go diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/fold.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/fold.go similarity index 99% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/fold.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/fold.go index 9b6a3750..41357200 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/fold.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/fold.go @@ -3,7 +3,7 @@ package gotype import ( "reflect" - "github.com/urso/go-structform" + "github.com/elastic/go-structform" ) type foldFn func(c *foldContext, v interface{}) error diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/fold_arr.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/fold_arr.go similarity index 98% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/fold_arr.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/fold_arr.go index 19e9db23..82cc5181 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/fold_arr.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/fold_arr.go @@ -3,7 +3,7 @@ package gotype import ( "reflect" - structform "github.com/urso/go-structform" + structform "github.com/elastic/go-structform" ) var ( diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/fold_inline.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/fold_inline.go similarity index 96% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/fold_inline.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/fold_inline.go index 0280cd8d..f4e71e8a 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/fold_inline.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/fold_inline.go @@ -3,8 +3,8 @@ package gotype import ( "reflect" - "github.com/urso/go-structform" - "github.com/urso/go-structform/visitors" + "github.com/elastic/go-structform" + "github.com/elastic/go-structform/visitors" ) // getReflectFoldMapKeys implements inline fold of a map[string]X type, diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/fold_map.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/fold_map.go similarity index 98% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/fold_map.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/fold_map.go index 281615fc..2d26d3b2 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/fold_map.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/fold_map.go @@ -3,7 +3,7 @@ package gotype import ( "reflect" - structform "github.com/urso/go-structform" + structform "github.com/elastic/go-structform" ) var ( diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/fold_map_inline.generated.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/fold_map_inline.generated.go similarity index 100% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/fold_map_inline.generated.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/fold_map_inline.generated.go diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/fold_map_inline.yml b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/fold_map_inline.yml similarity index 100% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/fold_map_inline.yml rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/fold_map_inline.yml diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/fold_primitives.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/fold_primitives.go similarity index 100% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/fold_primitives.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/fold_primitives.go diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/fold_refl_sel.generated.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/fold_refl_sel.generated.go similarity index 100% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/fold_refl_sel.generated.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/fold_refl_sel.generated.go diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/fold_refl_sel.yml b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/fold_refl_sel.yml similarity index 100% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/fold_refl_sel.yml rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/fold_refl_sel.yml diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/fold_reflect.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/fold_reflect.go similarity index 99% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/fold_reflect.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/fold_reflect.go index 25e9c646..2724da16 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/fold_reflect.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/fold_reflect.go @@ -6,7 +6,7 @@ import ( "unicode" "unicode/utf8" - structform "github.com/urso/go-structform" + structform "github.com/elastic/go-structform" ) type typeFoldRegistry struct { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/fold_user.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/fold_user.go similarity index 91% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/fold_user.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/fold_user.go index 880c7d60..a9013533 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/fold_user.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/fold_user.go @@ -6,8 +6,8 @@ import ( "reflect" "unsafe" - structform "github.com/urso/go-structform" - stunsafe "github.com/urso/go-structform/internal/unsafe" + structform "github.com/elastic/go-structform" + stunsafe "github.com/elastic/go-structform/internal/unsafe" ) type userFoldFn func(unsafe.Pointer, structform.ExtVisitor) error diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/opts.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/opts.go similarity index 100% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/opts.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/opts.go diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/stacks.generated.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/stacks.generated.go similarity index 98% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/stacks.generated.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/stacks.generated.go index 922729bb..33398d2c 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/stacks.generated.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/stacks.generated.go @@ -5,7 +5,7 @@ import ( "reflect" "unsafe" - structform "github.com/urso/go-structform" + structform "github.com/elastic/go-structform" ) type unfolderStack struct { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/stacks.yml b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/stacks.yml similarity index 100% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/stacks.yml rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/stacks.yml diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/symbols.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/symbols.go similarity index 100% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/symbols.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/symbols.go diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/tags.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/tags.go similarity index 100% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/tags.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/tags.go diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/types.yml b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/types.yml similarity index 100% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/types.yml rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/types.yml diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold.go similarity index 93% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold.go index b846c7a3..0c4976f1 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold.go @@ -5,7 +5,7 @@ import ( "sync" "unsafe" - structform "github.com/urso/go-structform" + structform "github.com/elastic/go-structform" ) type Unfolder struct { @@ -15,7 +15,7 @@ type Unfolder struct { type unfoldCtx struct { opts options - buf buffer + // buf buffer unfolder unfolderStack value reflectValueStack @@ -25,6 +25,14 @@ type unfoldCtx struct { idx idxStack keyCache symbolCache + + valueBuffer unfoldBuf +} + +type unfoldBuf struct { + arrays [][]byte + mapPrimitive []map[string]byte + mapAny []map[string]interface{} } type ptrUnfolder interface { @@ -86,8 +94,14 @@ func NewUnfolder(to interface{}) (*Unfolder, error) { u.idx.init() u.baseType.init() + u.valueBuffer = unfoldBuf{ + arrays: make([][]byte, 0, 4), + mapPrimitive: make([]map[string]byte, 0, 1), + mapAny: make([]map[string]interface{}, 0, 4), + } + // TODO: make allocation buffer size configurable - u.buf.init(1024) + // u.buf.init(1024) if to != nil { err := u.SetTarget(to) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_arr.generated.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_arr.generated.go similarity index 89% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_arr.generated.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_arr.generated.go index e89b5ae2..96b00fc1 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_arr.generated.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_arr.generated.go @@ -4,7 +4,7 @@ package gotype import ( "unsafe" - "github.com/urso/go-structform" + "github.com/elastic/go-structform" ) var ( @@ -2012,72 +2012,106 @@ func unfoldIfcFinishSubArray(ctx *unfoldCtx) (interface{}, error) { switch bt { case structform.AnyType: - ctx.buf.release() - return *(*[]interface{})(child), nil + value := *(*[]interface{})(child) + last := len(ctx.valueBuffer.arrays) - 1 + ctx.valueBuffer.arrays = ctx.valueBuffer.arrays[:last] + return value, nil case structform.BoolType: - ctx.buf.release() - return *(*[]bool)(child), nil + value := *(*[]bool)(child) + last := len(ctx.valueBuffer.arrays) - 1 + ctx.valueBuffer.arrays = ctx.valueBuffer.arrays[:last] + return value, nil case structform.ByteType: - ctx.buf.release() - return *(*[]uint8)(child), nil + value := *(*[]uint8)(child) + last := len(ctx.valueBuffer.arrays) - 1 + ctx.valueBuffer.arrays = ctx.valueBuffer.arrays[:last] + return value, nil case structform.Float32Type: - ctx.buf.release() - return *(*[]float32)(child), nil + value := *(*[]float32)(child) + last := len(ctx.valueBuffer.arrays) - 1 + ctx.valueBuffer.arrays = ctx.valueBuffer.arrays[:last] + return value, nil case structform.Float64Type: - ctx.buf.release() - return *(*[]float64)(child), nil + value := *(*[]float64)(child) + last := len(ctx.valueBuffer.arrays) - 1 + ctx.valueBuffer.arrays = ctx.valueBuffer.arrays[:last] + return value, nil case structform.Int16Type: - ctx.buf.release() - return *(*[]int16)(child), nil + value := *(*[]int16)(child) + last := len(ctx.valueBuffer.arrays) - 1 + ctx.valueBuffer.arrays = ctx.valueBuffer.arrays[:last] + return value, nil case structform.Int32Type: - ctx.buf.release() - return *(*[]int32)(child), nil + value := *(*[]int32)(child) + last := len(ctx.valueBuffer.arrays) - 1 + ctx.valueBuffer.arrays = ctx.valueBuffer.arrays[:last] + return value, nil case structform.Int64Type: - ctx.buf.release() - return *(*[]int64)(child), nil + value := *(*[]int64)(child) + last := len(ctx.valueBuffer.arrays) - 1 + ctx.valueBuffer.arrays = ctx.valueBuffer.arrays[:last] + return value, nil case structform.Int8Type: - ctx.buf.release() - return *(*[]int8)(child), nil + value := *(*[]int8)(child) + last := len(ctx.valueBuffer.arrays) - 1 + ctx.valueBuffer.arrays = ctx.valueBuffer.arrays[:last] + return value, nil case structform.IntType: - ctx.buf.release() - return *(*[]int)(child), nil + value := *(*[]int)(child) + last := len(ctx.valueBuffer.arrays) - 1 + ctx.valueBuffer.arrays = ctx.valueBuffer.arrays[:last] + return value, nil case structform.StringType: - ctx.buf.release() - return *(*[]string)(child), nil + value := *(*[]string)(child) + last := len(ctx.valueBuffer.arrays) - 1 + ctx.valueBuffer.arrays = ctx.valueBuffer.arrays[:last] + return value, nil case structform.Uint16Type: - ctx.buf.release() - return *(*[]uint16)(child), nil + value := *(*[]uint16)(child) + last := len(ctx.valueBuffer.arrays) - 1 + ctx.valueBuffer.arrays = ctx.valueBuffer.arrays[:last] + return value, nil case structform.Uint32Type: - ctx.buf.release() - return *(*[]uint32)(child), nil + value := *(*[]uint32)(child) + last := len(ctx.valueBuffer.arrays) - 1 + ctx.valueBuffer.arrays = ctx.valueBuffer.arrays[:last] + return value, nil case structform.Uint64Type: - ctx.buf.release() - return *(*[]uint64)(child), nil + value := *(*[]uint64)(child) + last := len(ctx.valueBuffer.arrays) - 1 + ctx.valueBuffer.arrays = ctx.valueBuffer.arrays[:last] + return value, nil case structform.Uint8Type: - ctx.buf.release() - return *(*[]uint8)(child), nil + value := *(*[]uint8)(child) + last := len(ctx.valueBuffer.arrays) - 1 + ctx.valueBuffer.arrays = ctx.valueBuffer.arrays[:last] + return value, nil case structform.UintType: - ctx.buf.release() - return *(*[]uint)(child), nil + value := *(*[]uint)(child) + last := len(ctx.valueBuffer.arrays) - 1 + ctx.valueBuffer.arrays = ctx.valueBuffer.arrays[:last] + return value, nil case structform.ZeroType: - ctx.buf.release() - return *(*[]interface{})(child), nil + value := *(*[]interface{})(child) + last := len(ctx.valueBuffer.arrays) - 1 + ctx.valueBuffer.arrays = ctx.valueBuffer.arrays[:last] + return value, nil default: return nil, errTODO() @@ -2088,222 +2122,171 @@ func makeArrayPtr(ctx *unfoldCtx, l int, bt structform.BaseType) (interface{}, u switch bt { case structform.AnyType: - sz := unsafe.Sizeof([]interface{}{}) - ptr := ctx.buf.alloc(int(sz)) + idx := len(ctx.valueBuffer.arrays) + ctx.valueBuffer.arrays = append(ctx.valueBuffer.arrays, nil) + arrPtr := &ctx.valueBuffer.arrays[idx] + ptr := unsafe.Pointer(arrPtr) to := (*[]interface{})(ptr) - - if l > 0 { - *to = make([]interface{}, l) - } - unfolder := newUnfolderArrIfc() return to, ptr, unfolder case structform.BoolType: - sz := unsafe.Sizeof([]bool{}) - ptr := ctx.buf.alloc(int(sz)) + idx := len(ctx.valueBuffer.arrays) + ctx.valueBuffer.arrays = append(ctx.valueBuffer.arrays, nil) + arrPtr := &ctx.valueBuffer.arrays[idx] + ptr := unsafe.Pointer(arrPtr) to := (*[]bool)(ptr) - - if l > 0 { - *to = make([]bool, l) - } - unfolder := newUnfolderArrBool() return to, ptr, unfolder case structform.ByteType: - sz := unsafe.Sizeof([]uint8{}) - ptr := ctx.buf.alloc(int(sz)) + idx := len(ctx.valueBuffer.arrays) + ctx.valueBuffer.arrays = append(ctx.valueBuffer.arrays, nil) + arrPtr := &ctx.valueBuffer.arrays[idx] + ptr := unsafe.Pointer(arrPtr) to := (*[]uint8)(ptr) - - if l > 0 { - *to = make([]uint8, l) - } - unfolder := newUnfolderArrUint8() return to, ptr, unfolder case structform.Float32Type: - sz := unsafe.Sizeof([]float32{}) - ptr := ctx.buf.alloc(int(sz)) + idx := len(ctx.valueBuffer.arrays) + ctx.valueBuffer.arrays = append(ctx.valueBuffer.arrays, nil) + arrPtr := &ctx.valueBuffer.arrays[idx] + ptr := unsafe.Pointer(arrPtr) to := (*[]float32)(ptr) - - if l > 0 { - *to = make([]float32, l) - } - unfolder := newUnfolderArrFloat32() return to, ptr, unfolder case structform.Float64Type: - sz := unsafe.Sizeof([]float64{}) - ptr := ctx.buf.alloc(int(sz)) + idx := len(ctx.valueBuffer.arrays) + ctx.valueBuffer.arrays = append(ctx.valueBuffer.arrays, nil) + arrPtr := &ctx.valueBuffer.arrays[idx] + ptr := unsafe.Pointer(arrPtr) to := (*[]float64)(ptr) - - if l > 0 { - *to = make([]float64, l) - } - unfolder := newUnfolderArrFloat64() return to, ptr, unfolder case structform.Int16Type: - sz := unsafe.Sizeof([]int16{}) - ptr := ctx.buf.alloc(int(sz)) + idx := len(ctx.valueBuffer.arrays) + ctx.valueBuffer.arrays = append(ctx.valueBuffer.arrays, nil) + arrPtr := &ctx.valueBuffer.arrays[idx] + ptr := unsafe.Pointer(arrPtr) to := (*[]int16)(ptr) - - if l > 0 { - *to = make([]int16, l) - } - unfolder := newUnfolderArrInt16() return to, ptr, unfolder case structform.Int32Type: - sz := unsafe.Sizeof([]int32{}) - ptr := ctx.buf.alloc(int(sz)) + idx := len(ctx.valueBuffer.arrays) + ctx.valueBuffer.arrays = append(ctx.valueBuffer.arrays, nil) + arrPtr := &ctx.valueBuffer.arrays[idx] + ptr := unsafe.Pointer(arrPtr) to := (*[]int32)(ptr) - - if l > 0 { - *to = make([]int32, l) - } - unfolder := newUnfolderArrInt32() return to, ptr, unfolder case structform.Int64Type: - sz := unsafe.Sizeof([]int64{}) - ptr := ctx.buf.alloc(int(sz)) + idx := len(ctx.valueBuffer.arrays) + ctx.valueBuffer.arrays = append(ctx.valueBuffer.arrays, nil) + arrPtr := &ctx.valueBuffer.arrays[idx] + ptr := unsafe.Pointer(arrPtr) to := (*[]int64)(ptr) - - if l > 0 { - *to = make([]int64, l) - } - unfolder := newUnfolderArrInt64() return to, ptr, unfolder case structform.Int8Type: - sz := unsafe.Sizeof([]int8{}) - ptr := ctx.buf.alloc(int(sz)) + idx := len(ctx.valueBuffer.arrays) + ctx.valueBuffer.arrays = append(ctx.valueBuffer.arrays, nil) + arrPtr := &ctx.valueBuffer.arrays[idx] + ptr := unsafe.Pointer(arrPtr) to := (*[]int8)(ptr) - - if l > 0 { - *to = make([]int8, l) - } - unfolder := newUnfolderArrInt8() return to, ptr, unfolder case structform.IntType: - sz := unsafe.Sizeof([]int{}) - ptr := ctx.buf.alloc(int(sz)) + idx := len(ctx.valueBuffer.arrays) + ctx.valueBuffer.arrays = append(ctx.valueBuffer.arrays, nil) + arrPtr := &ctx.valueBuffer.arrays[idx] + ptr := unsafe.Pointer(arrPtr) to := (*[]int)(ptr) - - if l > 0 { - *to = make([]int, l) - } - unfolder := newUnfolderArrInt() return to, ptr, unfolder case structform.StringType: - sz := unsafe.Sizeof([]string{}) - ptr := ctx.buf.alloc(int(sz)) + idx := len(ctx.valueBuffer.arrays) + ctx.valueBuffer.arrays = append(ctx.valueBuffer.arrays, nil) + arrPtr := &ctx.valueBuffer.arrays[idx] + ptr := unsafe.Pointer(arrPtr) to := (*[]string)(ptr) - - if l > 0 { - *to = make([]string, l) - } - unfolder := newUnfolderArrString() return to, ptr, unfolder case structform.Uint16Type: - sz := unsafe.Sizeof([]uint16{}) - ptr := ctx.buf.alloc(int(sz)) + idx := len(ctx.valueBuffer.arrays) + ctx.valueBuffer.arrays = append(ctx.valueBuffer.arrays, nil) + arrPtr := &ctx.valueBuffer.arrays[idx] + ptr := unsafe.Pointer(arrPtr) to := (*[]uint16)(ptr) - - if l > 0 { - *to = make([]uint16, l) - } - unfolder := newUnfolderArrUint16() return to, ptr, unfolder case structform.Uint32Type: - sz := unsafe.Sizeof([]uint32{}) - ptr := ctx.buf.alloc(int(sz)) + idx := len(ctx.valueBuffer.arrays) + ctx.valueBuffer.arrays = append(ctx.valueBuffer.arrays, nil) + arrPtr := &ctx.valueBuffer.arrays[idx] + ptr := unsafe.Pointer(arrPtr) to := (*[]uint32)(ptr) - - if l > 0 { - *to = make([]uint32, l) - } - unfolder := newUnfolderArrUint32() return to, ptr, unfolder case structform.Uint64Type: - sz := unsafe.Sizeof([]uint64{}) - ptr := ctx.buf.alloc(int(sz)) + idx := len(ctx.valueBuffer.arrays) + ctx.valueBuffer.arrays = append(ctx.valueBuffer.arrays, nil) + arrPtr := &ctx.valueBuffer.arrays[idx] + ptr := unsafe.Pointer(arrPtr) to := (*[]uint64)(ptr) - - if l > 0 { - *to = make([]uint64, l) - } - unfolder := newUnfolderArrUint64() return to, ptr, unfolder case structform.Uint8Type: - sz := unsafe.Sizeof([]uint8{}) - ptr := ctx.buf.alloc(int(sz)) + idx := len(ctx.valueBuffer.arrays) + ctx.valueBuffer.arrays = append(ctx.valueBuffer.arrays, nil) + arrPtr := &ctx.valueBuffer.arrays[idx] + ptr := unsafe.Pointer(arrPtr) to := (*[]uint8)(ptr) - - if l > 0 { - *to = make([]uint8, l) - } - unfolder := newUnfolderArrUint8() return to, ptr, unfolder case structform.UintType: - sz := unsafe.Sizeof([]uint{}) - ptr := ctx.buf.alloc(int(sz)) + idx := len(ctx.valueBuffer.arrays) + ctx.valueBuffer.arrays = append(ctx.valueBuffer.arrays, nil) + arrPtr := &ctx.valueBuffer.arrays[idx] + ptr := unsafe.Pointer(arrPtr) to := (*[]uint)(ptr) - - if l > 0 { - *to = make([]uint, l) - } - unfolder := newUnfolderArrUint() return to, ptr, unfolder case structform.ZeroType: - sz := unsafe.Sizeof([]interface{}{}) - ptr := ctx.buf.alloc(int(sz)) + idx := len(ctx.valueBuffer.arrays) + ctx.valueBuffer.arrays = append(ctx.valueBuffer.arrays, nil) + arrPtr := &ctx.valueBuffer.arrays[idx] + ptr := unsafe.Pointer(arrPtr) to := (*[]interface{})(ptr) - - if l > 0 { - *to = make([]interface{}, l) - } - unfolder := newUnfolderArrIfc() return to, ptr, unfolder diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_arr.yml b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_arr.yml similarity index 90% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_arr.yml rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_arr.yml index 3fc86759..62b0b27d 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_arr.yml +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_arr.yml @@ -120,8 +120,10 @@ templates.arrIfc: | switch bt { {{ range $bt, $gt := data.mapTypes }} case structform.{{ $bt }}: - ctx.buf.release() - return *(*[]{{ $gt }})(child), nil + value := *(*[]{{ $gt }})(child) + last := len(ctx.valueBuffer.arrays) - 1 + ctx.valueBuffer.arrays = ctx.valueBuffer.arrays[:last] + return value, nil {{ end }} default: return nil, errTODO() @@ -132,21 +134,17 @@ templates.arrIfc: | switch bt { {{ range $bt, $gt := data.mapTypes }} case structform.{{ $bt }}: - sz := unsafe.Sizeof([]{{ $gt }}{}) - ptr := ctx.buf.alloc(int(sz)) + idx := len(ctx.valueBuffer.arrays) + ctx.valueBuffer.arrays = append(ctx.valueBuffer.arrays, nil) + arrPtr := &ctx.valueBuffer.arrays[idx] + ptr := unsafe.Pointer(arrPtr) to := (*[]{{ $gt }})(ptr) - - if l > 0 { - *to = make([]{{ $gt }}, l) - } - - {{ if or (eq $bt "AnyType") (eq $bt "ZeroType") }} + {{- if or (eq $bt "AnyType") (eq $bt "ZeroType") }} unfolder := newUnfolderArrIfc() {{ else }} unfolder := newUnfolderArr{{ $gt | capitalize }}() {{ end }} return to, ptr, unfolder - {{ end }} default: panic("invalid type code") diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_err.generated.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_err.generated.go similarity index 99% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_err.generated.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_err.generated.go index 05e51534..6e9677ff 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_err.generated.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_err.generated.go @@ -1,7 +1,7 @@ // This file has been generated from 'unfold_err.yml', do not edit package gotype -import structform "github.com/urso/go-structform" +import structform "github.com/elastic/go-structform" type unfolderErrArrayStart struct{} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_err.yml b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_err.yml similarity index 100% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_err.yml rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_err.yml diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_ignore.generated.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_ignore.generated.go similarity index 99% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_ignore.generated.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_ignore.generated.go index 555961cc..6ac5d1ef 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_ignore.generated.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_ignore.generated.go @@ -5,7 +5,7 @@ import ( "reflect" "unsafe" - structform "github.com/urso/go-structform" + structform "github.com/elastic/go-structform" ) type unfoldIgnoreValue struct{} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_ignore.yml b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_ignore.yml similarity index 100% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_ignore.yml rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_ignore.yml diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_lookup.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_lookup.go similarity index 100% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_lookup.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_lookup.go diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_lookup_go.generated.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_lookup_go.generated.go similarity index 100% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_lookup_go.generated.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_lookup_go.generated.go diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_lookup_go.yml b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_lookup_go.yml similarity index 100% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_lookup_go.yml rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_lookup_go.yml diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_map.generated.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_map.generated.go similarity index 89% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_map.generated.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_map.generated.go index 38e35e75..092093ea 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_map.generated.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_map.generated.go @@ -4,7 +4,7 @@ package gotype import ( "unsafe" - "github.com/urso/go-structform" + "github.com/elastic/go-structform" ) var ( @@ -2057,72 +2057,106 @@ func unfoldIfcFinishSubMap(ctx *unfoldCtx) (interface{}, error) { switch bt { case structform.AnyType: - ctx.buf.release() - return *(*map[string]interface{})(child), nil + value := *(*map[string]interface{})(child) + last := len(ctx.valueBuffer.mapAny) - 1 + ctx.valueBuffer.mapAny = ctx.valueBuffer.mapAny[:last] + return value, nil case structform.BoolType: - ctx.buf.release() - return *(*map[string]bool)(child), nil + value := *(*map[string]bool)(child) + last := len(ctx.valueBuffer.mapPrimitive) - 1 + ctx.valueBuffer.mapPrimitive = ctx.valueBuffer.mapPrimitive[:last] + return value, nil case structform.ByteType: - ctx.buf.release() - return *(*map[string]uint8)(child), nil + value := *(*map[string]uint8)(child) + last := len(ctx.valueBuffer.mapPrimitive) - 1 + ctx.valueBuffer.mapPrimitive = ctx.valueBuffer.mapPrimitive[:last] + return value, nil case structform.Float32Type: - ctx.buf.release() - return *(*map[string]float32)(child), nil + value := *(*map[string]float32)(child) + last := len(ctx.valueBuffer.mapPrimitive) - 1 + ctx.valueBuffer.mapPrimitive = ctx.valueBuffer.mapPrimitive[:last] + return value, nil case structform.Float64Type: - ctx.buf.release() - return *(*map[string]float64)(child), nil + value := *(*map[string]float64)(child) + last := len(ctx.valueBuffer.mapPrimitive) - 1 + ctx.valueBuffer.mapPrimitive = ctx.valueBuffer.mapPrimitive[:last] + return value, nil case structform.Int16Type: - ctx.buf.release() - return *(*map[string]int16)(child), nil + value := *(*map[string]int16)(child) + last := len(ctx.valueBuffer.mapPrimitive) - 1 + ctx.valueBuffer.mapPrimitive = ctx.valueBuffer.mapPrimitive[:last] + return value, nil case structform.Int32Type: - ctx.buf.release() - return *(*map[string]int32)(child), nil + value := *(*map[string]int32)(child) + last := len(ctx.valueBuffer.mapPrimitive) - 1 + ctx.valueBuffer.mapPrimitive = ctx.valueBuffer.mapPrimitive[:last] + return value, nil case structform.Int64Type: - ctx.buf.release() - return *(*map[string]int64)(child), nil + value := *(*map[string]int64)(child) + last := len(ctx.valueBuffer.mapPrimitive) - 1 + ctx.valueBuffer.mapPrimitive = ctx.valueBuffer.mapPrimitive[:last] + return value, nil case structform.Int8Type: - ctx.buf.release() - return *(*map[string]int8)(child), nil + value := *(*map[string]int8)(child) + last := len(ctx.valueBuffer.mapPrimitive) - 1 + ctx.valueBuffer.mapPrimitive = ctx.valueBuffer.mapPrimitive[:last] + return value, nil case structform.IntType: - ctx.buf.release() - return *(*map[string]int)(child), nil + value := *(*map[string]int)(child) + last := len(ctx.valueBuffer.mapPrimitive) - 1 + ctx.valueBuffer.mapPrimitive = ctx.valueBuffer.mapPrimitive[:last] + return value, nil case structform.StringType: - ctx.buf.release() - return *(*map[string]string)(child), nil + value := *(*map[string]string)(child) + last := len(ctx.valueBuffer.mapPrimitive) - 1 + ctx.valueBuffer.mapPrimitive = ctx.valueBuffer.mapPrimitive[:last] + return value, nil case structform.Uint16Type: - ctx.buf.release() - return *(*map[string]uint16)(child), nil + value := *(*map[string]uint16)(child) + last := len(ctx.valueBuffer.mapPrimitive) - 1 + ctx.valueBuffer.mapPrimitive = ctx.valueBuffer.mapPrimitive[:last] + return value, nil case structform.Uint32Type: - ctx.buf.release() - return *(*map[string]uint32)(child), nil + value := *(*map[string]uint32)(child) + last := len(ctx.valueBuffer.mapPrimitive) - 1 + ctx.valueBuffer.mapPrimitive = ctx.valueBuffer.mapPrimitive[:last] + return value, nil case structform.Uint64Type: - ctx.buf.release() - return *(*map[string]uint64)(child), nil + value := *(*map[string]uint64)(child) + last := len(ctx.valueBuffer.mapPrimitive) - 1 + ctx.valueBuffer.mapPrimitive = ctx.valueBuffer.mapPrimitive[:last] + return value, nil case structform.Uint8Type: - ctx.buf.release() - return *(*map[string]uint8)(child), nil + value := *(*map[string]uint8)(child) + last := len(ctx.valueBuffer.mapPrimitive) - 1 + ctx.valueBuffer.mapPrimitive = ctx.valueBuffer.mapPrimitive[:last] + return value, nil case structform.UintType: - ctx.buf.release() - return *(*map[string]uint)(child), nil + value := *(*map[string]uint)(child) + last := len(ctx.valueBuffer.mapPrimitive) - 1 + ctx.valueBuffer.mapPrimitive = ctx.valueBuffer.mapPrimitive[:last] + return value, nil case structform.ZeroType: - ctx.buf.release() - return *(*map[string]interface{})(child), nil + value := *(*map[string]interface{})(child) + last := len(ctx.valueBuffer.mapAny) - 1 + ctx.valueBuffer.mapAny = ctx.valueBuffer.mapAny[:last] + return value, nil default: return nil, errTODO() @@ -2133,156 +2167,154 @@ func makeMapPtr(ctx *unfoldCtx, l int, bt structform.BaseType) (interface{}, uns switch bt { case structform.AnyType: - sz := unsafe.Sizeof(map[string]interface{}{}) - ptr := ctx.buf.alloc(int(sz)) - to := (*map[string]interface{})(ptr) - + idx := len(ctx.valueBuffer.mapAny) + ctx.valueBuffer.mapAny = append(ctx.valueBuffer.mapAny, nil) + to := &ctx.valueBuffer.mapAny[idx] + ptr := unsafe.Pointer(to) unfolder := newUnfolderMapIfc() - return to, ptr, unfolder case structform.BoolType: - sz := unsafe.Sizeof(map[string]bool{}) - ptr := ctx.buf.alloc(int(sz)) + idx := len(ctx.valueBuffer.mapPrimitive) + ctx.valueBuffer.mapPrimitive = append(ctx.valueBuffer.mapPrimitive, nil) + mapPtr := &ctx.valueBuffer.mapPrimitive[idx] + ptr := unsafe.Pointer(mapPtr) to := (*map[string]bool)(ptr) - unfolder := newUnfolderMapBool() - return to, ptr, unfolder case structform.ByteType: - sz := unsafe.Sizeof(map[string]uint8{}) - ptr := ctx.buf.alloc(int(sz)) + idx := len(ctx.valueBuffer.mapPrimitive) + ctx.valueBuffer.mapPrimitive = append(ctx.valueBuffer.mapPrimitive, nil) + mapPtr := &ctx.valueBuffer.mapPrimitive[idx] + ptr := unsafe.Pointer(mapPtr) to := (*map[string]uint8)(ptr) - unfolder := newUnfolderMapUint8() - return to, ptr, unfolder case structform.Float32Type: - sz := unsafe.Sizeof(map[string]float32{}) - ptr := ctx.buf.alloc(int(sz)) + idx := len(ctx.valueBuffer.mapPrimitive) + ctx.valueBuffer.mapPrimitive = append(ctx.valueBuffer.mapPrimitive, nil) + mapPtr := &ctx.valueBuffer.mapPrimitive[idx] + ptr := unsafe.Pointer(mapPtr) to := (*map[string]float32)(ptr) - unfolder := newUnfolderMapFloat32() - return to, ptr, unfolder case structform.Float64Type: - sz := unsafe.Sizeof(map[string]float64{}) - ptr := ctx.buf.alloc(int(sz)) + idx := len(ctx.valueBuffer.mapPrimitive) + ctx.valueBuffer.mapPrimitive = append(ctx.valueBuffer.mapPrimitive, nil) + mapPtr := &ctx.valueBuffer.mapPrimitive[idx] + ptr := unsafe.Pointer(mapPtr) to := (*map[string]float64)(ptr) - unfolder := newUnfolderMapFloat64() - return to, ptr, unfolder case structform.Int16Type: - sz := unsafe.Sizeof(map[string]int16{}) - ptr := ctx.buf.alloc(int(sz)) + idx := len(ctx.valueBuffer.mapPrimitive) + ctx.valueBuffer.mapPrimitive = append(ctx.valueBuffer.mapPrimitive, nil) + mapPtr := &ctx.valueBuffer.mapPrimitive[idx] + ptr := unsafe.Pointer(mapPtr) to := (*map[string]int16)(ptr) - unfolder := newUnfolderMapInt16() - return to, ptr, unfolder case structform.Int32Type: - sz := unsafe.Sizeof(map[string]int32{}) - ptr := ctx.buf.alloc(int(sz)) + idx := len(ctx.valueBuffer.mapPrimitive) + ctx.valueBuffer.mapPrimitive = append(ctx.valueBuffer.mapPrimitive, nil) + mapPtr := &ctx.valueBuffer.mapPrimitive[idx] + ptr := unsafe.Pointer(mapPtr) to := (*map[string]int32)(ptr) - unfolder := newUnfolderMapInt32() - return to, ptr, unfolder case structform.Int64Type: - sz := unsafe.Sizeof(map[string]int64{}) - ptr := ctx.buf.alloc(int(sz)) + idx := len(ctx.valueBuffer.mapPrimitive) + ctx.valueBuffer.mapPrimitive = append(ctx.valueBuffer.mapPrimitive, nil) + mapPtr := &ctx.valueBuffer.mapPrimitive[idx] + ptr := unsafe.Pointer(mapPtr) to := (*map[string]int64)(ptr) - unfolder := newUnfolderMapInt64() - return to, ptr, unfolder case structform.Int8Type: - sz := unsafe.Sizeof(map[string]int8{}) - ptr := ctx.buf.alloc(int(sz)) + idx := len(ctx.valueBuffer.mapPrimitive) + ctx.valueBuffer.mapPrimitive = append(ctx.valueBuffer.mapPrimitive, nil) + mapPtr := &ctx.valueBuffer.mapPrimitive[idx] + ptr := unsafe.Pointer(mapPtr) to := (*map[string]int8)(ptr) - unfolder := newUnfolderMapInt8() - return to, ptr, unfolder case structform.IntType: - sz := unsafe.Sizeof(map[string]int{}) - ptr := ctx.buf.alloc(int(sz)) + idx := len(ctx.valueBuffer.mapPrimitive) + ctx.valueBuffer.mapPrimitive = append(ctx.valueBuffer.mapPrimitive, nil) + mapPtr := &ctx.valueBuffer.mapPrimitive[idx] + ptr := unsafe.Pointer(mapPtr) to := (*map[string]int)(ptr) - unfolder := newUnfolderMapInt() - return to, ptr, unfolder case structform.StringType: - sz := unsafe.Sizeof(map[string]string{}) - ptr := ctx.buf.alloc(int(sz)) + idx := len(ctx.valueBuffer.mapPrimitive) + ctx.valueBuffer.mapPrimitive = append(ctx.valueBuffer.mapPrimitive, nil) + mapPtr := &ctx.valueBuffer.mapPrimitive[idx] + ptr := unsafe.Pointer(mapPtr) to := (*map[string]string)(ptr) - unfolder := newUnfolderMapString() - return to, ptr, unfolder case structform.Uint16Type: - sz := unsafe.Sizeof(map[string]uint16{}) - ptr := ctx.buf.alloc(int(sz)) + idx := len(ctx.valueBuffer.mapPrimitive) + ctx.valueBuffer.mapPrimitive = append(ctx.valueBuffer.mapPrimitive, nil) + mapPtr := &ctx.valueBuffer.mapPrimitive[idx] + ptr := unsafe.Pointer(mapPtr) to := (*map[string]uint16)(ptr) - unfolder := newUnfolderMapUint16() - return to, ptr, unfolder case structform.Uint32Type: - sz := unsafe.Sizeof(map[string]uint32{}) - ptr := ctx.buf.alloc(int(sz)) + idx := len(ctx.valueBuffer.mapPrimitive) + ctx.valueBuffer.mapPrimitive = append(ctx.valueBuffer.mapPrimitive, nil) + mapPtr := &ctx.valueBuffer.mapPrimitive[idx] + ptr := unsafe.Pointer(mapPtr) to := (*map[string]uint32)(ptr) - unfolder := newUnfolderMapUint32() - return to, ptr, unfolder case structform.Uint64Type: - sz := unsafe.Sizeof(map[string]uint64{}) - ptr := ctx.buf.alloc(int(sz)) + idx := len(ctx.valueBuffer.mapPrimitive) + ctx.valueBuffer.mapPrimitive = append(ctx.valueBuffer.mapPrimitive, nil) + mapPtr := &ctx.valueBuffer.mapPrimitive[idx] + ptr := unsafe.Pointer(mapPtr) to := (*map[string]uint64)(ptr) - unfolder := newUnfolderMapUint64() - return to, ptr, unfolder case structform.Uint8Type: - sz := unsafe.Sizeof(map[string]uint8{}) - ptr := ctx.buf.alloc(int(sz)) + idx := len(ctx.valueBuffer.mapPrimitive) + ctx.valueBuffer.mapPrimitive = append(ctx.valueBuffer.mapPrimitive, nil) + mapPtr := &ctx.valueBuffer.mapPrimitive[idx] + ptr := unsafe.Pointer(mapPtr) to := (*map[string]uint8)(ptr) - unfolder := newUnfolderMapUint8() - return to, ptr, unfolder case structform.UintType: - sz := unsafe.Sizeof(map[string]uint{}) - ptr := ctx.buf.alloc(int(sz)) + idx := len(ctx.valueBuffer.mapPrimitive) + ctx.valueBuffer.mapPrimitive = append(ctx.valueBuffer.mapPrimitive, nil) + mapPtr := &ctx.valueBuffer.mapPrimitive[idx] + ptr := unsafe.Pointer(mapPtr) to := (*map[string]uint)(ptr) - unfolder := newUnfolderMapUint() - return to, ptr, unfolder case structform.ZeroType: - sz := unsafe.Sizeof(map[string]interface{}{}) - ptr := ctx.buf.alloc(int(sz)) - to := (*map[string]interface{})(ptr) - + idx := len(ctx.valueBuffer.mapAny) + ctx.valueBuffer.mapAny = append(ctx.valueBuffer.mapAny, nil) + to := &ctx.valueBuffer.mapAny[idx] + ptr := unsafe.Pointer(to) unfolder := newUnfolderMapIfc() - return to, ptr, unfolder default: diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_map.yml b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_map.yml similarity index 80% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_map.yml rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_map.yml index 3c7987f4..2326b059 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_map.yml +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_map.yml @@ -116,8 +116,16 @@ templates.mapIfc: | switch bt { {{ range $bt, $gt := data.mapTypes }} case structform.{{ $bt }}: - ctx.buf.release() - return *(*map[string]{{ $gt }})(child), nil + value := *(*map[string]{{ $gt }})(child) + {{- if or (eq $bt "AnyType") (eq $bt "ZeroType") }} + last := len(ctx.valueBuffer.mapAny)-1 + ctx.valueBuffer.mapAny = ctx.valueBuffer.mapAny[:last] + {{ else }} + last := len(ctx.valueBuffer.mapPrimitive)-1 + ctx.valueBuffer.mapPrimitive = ctx.valueBuffer.mapPrimitive[:last] + {{ end -}} + return value, nil + {{ end }} default: return nil, errTODO() @@ -128,15 +136,23 @@ templates.mapIfc: | switch bt { {{ range $bt, $gt := data.mapTypes }} case structform.{{ $bt }}: - sz := unsafe.Sizeof(map[string]{{ $gt }}{}) - ptr := ctx.buf.alloc(int(sz)) - to := (*map[string]{{ $gt }})(ptr) - {{ if or (eq $bt "AnyType") (eq $bt "ZeroType") }} + {{- if or (eq $bt "AnyType") (eq $bt "ZeroType") }} + idx := len(ctx.valueBuffer.mapAny) + ctx.valueBuffer.mapAny = append(ctx.valueBuffer.mapAny, nil) + to := &ctx.valueBuffer.mapAny[idx] + ptr := unsafe.Pointer(to) unfolder := newUnfolderMapIfc() + return to, ptr, unfolder {{ else }} + idx := len(ctx.valueBuffer.mapPrimitive) + ctx.valueBuffer.mapPrimitive = append(ctx.valueBuffer.mapPrimitive, nil) + mapPtr := &ctx.valueBuffer.mapPrimitive[idx] + ptr := unsafe.Pointer(mapPtr) + to := (*map[string]{{ $gt }})(ptr) unfolder := newUnfolderMap{{ $gt | capitalize }}() + return to, ptr, unfolder {{ end }} - return to, ptr, unfolder + {{ end }} default: panic("invalid type code") diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_primitive.generated.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_primitive.generated.go similarity index 99% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_primitive.generated.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_primitive.generated.go index 48cf4752..b2302090 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_primitive.generated.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_primitive.generated.go @@ -4,7 +4,7 @@ package gotype import ( "unsafe" - structform "github.com/urso/go-structform" + structform "github.com/elastic/go-structform" ) var ( diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_primitive.yml b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_primitive.yml similarity index 100% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_primitive.yml rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_primitive.yml diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_refl.generated.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_refl.generated.go similarity index 99% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_refl.generated.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_refl.generated.go index d061587d..5979a2f3 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_refl.generated.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_refl.generated.go @@ -4,7 +4,7 @@ package gotype import ( "reflect" - structform "github.com/urso/go-structform" + structform "github.com/elastic/go-structform" ) func (u *unfolderReflSlice) OnNil(ctx *unfoldCtx) error { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_refl.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_refl.go similarity index 97% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_refl.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_refl.go index a43a5c7f..c3176c91 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_refl.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_refl.go @@ -4,7 +4,7 @@ import ( "reflect" "unsafe" - structform "github.com/urso/go-structform" + structform "github.com/elastic/go-structform" ) type liftedReflUnfolder struct{ unfolder ptrUnfolder } @@ -166,8 +166,7 @@ func (u *unfolderReflMapOnElem) prepare(ctx *unfoldCtx) reflect.Value { v := ptr.Elem() et := v.Type().Elem() - targetPtr := ctx.buf.alloc(int(et.Size())) - target := reflect.NewAt(et, targetPtr) + target := reflect.New(et) ctx.value.push(target) return target } @@ -180,7 +179,6 @@ func (u *unfolderReflMapOnElem) process(ctx *unfoldCtx) { m := ptr.Elem() m.SetMapIndex(reflect.ValueOf(ctx.key.pop()), v) - ctx.buf.release() ctx.unfolder.current = u.shared.waitKey } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_refl.yml b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_refl.yml similarity index 100% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_refl.yml rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_refl.yml diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_struct.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_struct.go similarity index 96% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_struct.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_struct.go index afa4d896..840d60a8 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_struct.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_struct.go @@ -8,7 +8,7 @@ import ( "unicode/utf8" "unsafe" - structform "github.com/urso/go-structform" + structform "github.com/elastic/go-structform" ) type unfolderStruct struct { @@ -166,8 +166,7 @@ func (u *unfolderStruct) OnKey(ctx *unfoldCtx, key string) error { } structPtr := ctx.ptr.current - fieldAddr := uintptr(structPtr) + field.offset - fieldPtr := unsafe.Pointer(fieldAddr) + fieldPtr := unsafe.Pointer(uintptr(structPtr) + field.offset) field.initState(ctx, fieldPtr) return nil } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_templates.yml b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_templates.yml similarity index 100% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/unfold_templates.yml rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_templates.yml diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/internal/unsafe/unsafe.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/internal/unsafe/unsafe.go similarity index 100% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/internal/unsafe/unsafe.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/internal/unsafe/unsafe.go diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/json/decode.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/json/decode.go similarity index 95% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/json/decode.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/json/decode.go index d2a9e2e5..83926ec8 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/json/decode.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/json/decode.go @@ -3,7 +3,7 @@ package json import ( "io" - structform "github.com/urso/go-structform" + structform "github.com/elastic/go-structform" ) type Decoder struct { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/json/defs.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/json/defs.go similarity index 84% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/json/defs.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/json/defs.go index 469738e1..3d2d03bf 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/json/defs.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/json/defs.go @@ -1,6 +1,6 @@ package json -import "github.com/urso/go-structform/internal/unsafe" +import "github.com/elastic/go-structform/internal/unsafe" var ( nullSymbol = []byte("null") diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/json/json.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/json/json.go similarity index 100% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/json/json.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/json/json.go diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/json/parse.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/json/parse.go similarity index 93% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/json/parse.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/json/parse.go index b8b57142..29baee55 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/json/parse.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/json/parse.go @@ -3,13 +3,15 @@ package json import ( "bytes" "errors" + "fmt" "io" + "math" "strconv" "unicode" "unicode/utf16" "unicode/utf8" - structform "github.com/urso/go-structform" + structform "github.com/elastic/go-structform" ) type Parser struct { @@ -28,9 +30,11 @@ type Parser struct { literalBuffer []byte literalBuffer0 [64]byte - isDouble bool - inEscape bool - required int + + inEscape bool + isDouble bool + + required int } var ( @@ -304,6 +308,8 @@ func (p *Parser) stepValue(b []byte, retState state) ([]byte, bool, error) { default: // parse number? + p.isDouble = false + isNumber := c == '-' || c == '+' || c == '.' || isDigit(c) if !isNumber { return b, false, errUnknownChar @@ -444,9 +450,10 @@ func (p *Parser) doString(b []byte) ([]byte, bool, bool, []byte, error) { buf = b[1:] } + inEscape := p.inEscape for i, c := range buf { - if p.inEscape { - p.inEscape = false + if inEscape { + inEscape = false continue } @@ -454,11 +461,11 @@ func (p *Parser) doString(b []byte) ([]byte, bool, bool, []byte, error) { done = true stop = i + delta break - } - if c == '\\' { - p.inEscape = true + } else if c == '\\' { + inEscape = true } } + p.inEscape = inEscape if !done { p.literalBuffer = append(p.literalBuffer, b...) @@ -672,7 +679,7 @@ func (p *Parser) reportNumber(b []byte, isDouble bool) error { } } else { var i int64 - if i, err = strconv.ParseInt(bytes2Str(b), 10, 64); err == nil { + if i, err = parseInt(b); err == nil { err = p.visitor.OnInt64(i) } } @@ -680,6 +687,51 @@ func (p *Parser) reportNumber(b []byte, isDouble bool) error { return err } +func parseInt(b []byte) (int64, error) { + neg := false + if b[0] == '+' { + b = b[1:] + } else if b[0] == '-' { + neg = true + b = b[1:] + } + + u, err := parseUint(b) + n := int64(u) + if neg { + n = -n + } + return n, err +} + +func parseUint(b []byte) (uint64, error) { + const cutoff = math.MaxUint64/10 + 1 + const maxVal = math.MaxUint64 + + var n uint64 + + for _, c := range b { + d := int(c) - '0' + if d < 0 || d > 9 { + return 0, fmt.Errorf("'%s' is no valid number", b) + } + + if n >= cutoff { + return 0, fmt.Errorf("number overflow parsing '%v'", b) + } + + n *= 10 + n1 := n + uint64(d) + if n1 < n || n1 > maxVal { + return 0, fmt.Errorf("number overflow parsing '%v'", b) + } + + n = n1 + } + + return n, nil +} + func (p *Parser) stepNULL(b []byte) ([]byte, bool, error) { b, done, err := p.stepKind(b, []byte("null"), errExpectedNull) if done { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/json/state_string.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/json/state_string.go similarity index 100% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/json/state_string.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/json/state_string.go diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/json/visitor.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/json/visitor.go similarity index 99% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/json/visitor.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/json/visitor.go index 7194f6f5..756fde75 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/json/visitor.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/json/visitor.go @@ -7,7 +7,7 @@ import ( "strconv" "unicode/utf8" - structform "github.com/urso/go-structform" + structform "github.com/elastic/go-structform" ) // Visitor implements the structform.Visitor interface, json encoding the diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/map.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/map.go similarity index 100% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/map.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/map.go diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/string.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/string.go similarity index 100% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/string.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/string.go diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/ubjson/decode.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/ubjson/decode.go similarity index 95% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/ubjson/decode.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/ubjson/decode.go index c884a2c3..3f043bb1 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/ubjson/decode.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/ubjson/decode.go @@ -3,7 +3,7 @@ package ubjson import ( "io" - structform "github.com/urso/go-structform" + structform "github.com/elastic/go-structform" ) type Decoder struct { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/ubjson/defs.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/ubjson/defs.go similarity index 92% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/ubjson/defs.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/ubjson/defs.go index 8d2d5438..09289292 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/ubjson/defs.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/ubjson/defs.go @@ -1,6 +1,6 @@ package ubjson -import "github.com/urso/go-structform/internal/unsafe" +import "github.com/elastic/go-structform/internal/unsafe" const ( noMarker byte = 0 diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/ubjson/parse.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/ubjson/parse.go similarity index 99% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/ubjson/parse.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/ubjson/parse.go index cb225be8..8d46f714 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/ubjson/parse.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/ubjson/parse.go @@ -6,7 +6,7 @@ import ( "io" "math" - structform "github.com/urso/go-structform" + structform "github.com/elastic/go-structform" ) type Parser struct { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/ubjson/stack.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/ubjson/stack.go similarity index 100% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/ubjson/stack.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/ubjson/stack.go diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/ubjson/statestep_string.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/ubjson/statestep_string.go similarity index 100% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/ubjson/statestep_string.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/ubjson/statestep_string.go diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/ubjson/statetype_string.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/ubjson/statetype_string.go similarity index 100% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/ubjson/statetype_string.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/ubjson/statetype_string.go diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/ubjson/visitor.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/ubjson/visitor.go similarity index 99% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/ubjson/visitor.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/ubjson/visitor.go index ce21e64f..e90b4f39 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/ubjson/visitor.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/ubjson/visitor.go @@ -6,7 +6,7 @@ import ( "math" "strconv" - structform "github.com/urso/go-structform" + structform "github.com/elastic/go-structform" ) type Visitor struct { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/visitor.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/visitor.go similarity index 100% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/visitor.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/visitor.go diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/visitors/expect_obj.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/visitors/expect_obj.go similarity index 98% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/visitors/expect_obj.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/visitors/expect_obj.go index ff4fdfa6..7b0413d0 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/visitors/expect_obj.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/visitors/expect_obj.go @@ -3,7 +3,7 @@ package visitors import ( "errors" - structform "github.com/urso/go-structform" + structform "github.com/elastic/go-structform" ) type ExpectObjVisitor struct { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/visitors/nilVisitor.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/visitors/nilVisitor.go new file mode 100644 index 00000000..38f7c0f5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/visitors/nilVisitor.go @@ -0,0 +1,218 @@ +package visitors + +import structform "github.com/elastic/go-structform" + +type emptyVisitor struct { +} + +func NilVisitor() structform.Visitor { + return (*emptyVisitor)(nil) +} + +func (e *emptyVisitor) OnObjectStart(len int, baseType structform.BaseType) error { + return nil +} + +func (e *emptyVisitor) OnObjectFinished() error { + return nil +} + +func (e *emptyVisitor) OnKey(s string) error { + return nil +} + +func (e *emptyVisitor) OnArrayStart(len int, baseType structform.BaseType) error { + return nil +} + +func (e *emptyVisitor) OnArrayFinished() error { + return nil +} + +func (e *emptyVisitor) OnNil() error { + return nil +} + +func (e *emptyVisitor) OnBool(b bool) error { + return nil +} + +func (e *emptyVisitor) OnString(s string) error { + return nil +} + +func (e *emptyVisitor) OnInt8(i int8) error { + return nil +} + +func (e *emptyVisitor) OnInt16(i int16) error { + return nil +} + +func (e *emptyVisitor) OnInt32(i int32) error { + return nil +} + +func (e *emptyVisitor) OnInt64(i int64) error { + return nil +} + +func (e *emptyVisitor) OnInt(i int) error { + return nil +} + +func (e *emptyVisitor) OnByte(b byte) error { + return nil +} + +func (e *emptyVisitor) OnUint8(u uint8) error { + return nil +} + +func (e *emptyVisitor) OnUint16(u uint16) error { + return nil +} + +func (e *emptyVisitor) OnUint32(u uint32) error { + return nil +} + +func (e *emptyVisitor) OnUint64(u uint64) error { + return nil +} + +func (e *emptyVisitor) OnUint(u uint) error { + return nil +} + +func (e *emptyVisitor) OnFloat32(f float32) error { + return nil +} + +func (e *emptyVisitor) OnFloat64(f float64) error { + return nil +} + +func (e *emptyVisitor) OnBoolArray([]bool) error { + return nil +} + +func (e *emptyVisitor) OnStringArray([]string) error { + return nil +} + +func (e *emptyVisitor) OnInt8Array([]int8) error { + return nil +} + +func (e *emptyVisitor) OnInt16Array([]int16) error { + return nil +} + +func (e *emptyVisitor) OnInt32Array([]int32) error { + return nil +} + +func (e *emptyVisitor) OnInt64Array([]int64) error { + return nil +} + +func (e *emptyVisitor) OnIntArray([]int) error { + return nil +} + +func (e *emptyVisitor) OnBytes([]byte) error { + return nil +} + +func (e *emptyVisitor) OnUint8Array([]uint8) error { + return nil +} + +func (e *emptyVisitor) OnUint16Array([]uint16) error { + return nil +} + +func (e *emptyVisitor) OnUint32Array([]uint32) error { + return nil +} + +func (e *emptyVisitor) OnUint64Array([]uint64) error { + return nil +} + +func (e *emptyVisitor) OnUintArray([]uint) error { + return nil +} + +func (e *emptyVisitor) OnFloat32Array([]float32) error { + return nil +} + +func (e *emptyVisitor) OnFloat64Array([]float64) error { + return nil +} + +func (e *emptyVisitor) OnBoolObject(map[string]bool) error { + return nil +} + +func (e *emptyVisitor) OnStringObject(map[string]string) error { + return nil +} + +func (e *emptyVisitor) OnInt8Object(map[string]int8) error { + return nil +} + +func (e *emptyVisitor) OnInt16Object(map[string]int16) error { + return nil +} + +func (e *emptyVisitor) OnInt32Object(map[string]int32) error { + return nil +} + +func (e *emptyVisitor) OnInt64Object(map[string]int64) error { + return nil +} + +func (e *emptyVisitor) OnIntObject(map[string]int) error { + return nil +} + +func (e *emptyVisitor) OnUint8Object(map[string]uint8) error { + return nil +} + +func (e *emptyVisitor) OnUint16Object(map[string]uint16) error { + return nil +} + +func (e *emptyVisitor) OnUint32Object(map[string]uint32) error { + return nil +} + +func (e *emptyVisitor) OnUint64Object(map[string]uint64) error { + return nil +} + +func (e *emptyVisitor) OnUintObject(map[string]uint) error { + return nil +} + +func (e *emptyVisitor) OnFloat32Object(map[string]float32) error { + return nil +} + +func (e *emptyVisitor) OnFloat64Object(map[string]float64) error { + return nil +} + +func (e *emptyVisitor) OnStringRef(s []byte) error { + return nil +} + +func (e *emptyVisitor) OnKeyRef(s []byte) error { + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/visitors/stringer.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/visitors/stringer.go similarity index 98% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/visitors/stringer.go rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/visitors/stringer.go index d2e15a63..bef1db65 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/visitors/stringer.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/visitors/stringer.go @@ -3,7 +3,7 @@ package visitors import ( "fmt" - structform "github.com/urso/go-structform" + structform "github.com/elastic/go-structform" ) type StringConvVisitor struct { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/LICENSE.txt b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/LICENSE.txt new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/NOTICE.txt b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/NOTICE.txt new file mode 100644 index 00000000..b149204f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/NOTICE.txt @@ -0,0 +1,5 @@ +Elastic go-sysinfo +Copyright 2017-2018 Elasticsearch B.V. + +This product includes software developed at +Elasticsearch, B.V. (https://www.elastic.co/). diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/README.md b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/README.md new file mode 100644 index 00000000..5111c720 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/README.md @@ -0,0 +1,12 @@ +# go-sysinfo [WORK IN PROGRESS] + +[![Build Status](http://img.shields.io/travis/elastic/go-sysinfo.svg?style=flat-square)][travis] +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[travis]: http://travis-ci.org/elastic/go-sysinfo +[godocs]: http://godoc.org/github.com/elastic/go-sysinfo + +go-sysinfo is a library for collecting system information. + +This project is a work in progress. + diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/internal/registry/registry.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/internal/registry/registry.go new file mode 100644 index 00000000..3e5794c3 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/internal/registry/registry.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package registry + +import ( + "github.com/pkg/errors" + + "github.com/elastic/go-sysinfo/types" +) + +var ( + hostProvider HostProvider + processProvider ProcessProvider +) + +type HostProvider interface { + Host() (types.Host, error) +} + +type ProcessProvider interface { + Processes() ([]types.Process, error) + Process(pid int) (types.Process, error) + Self() (types.Process, error) +} + +func Register(provider interface{}) { + if h, ok := provider.(HostProvider); ok { + if hostProvider != nil { + panic(errors.Errorf("HostProvider already registered: %v", hostProvider)) + } + hostProvider = h + } + + if p, ok := provider.(ProcessProvider); ok { + if processProvider != nil { + panic(errors.Errorf("ProcessProvider already registered: %v", processProvider)) + } + processProvider = p + } +} + +func GetHostProvider() HostProvider { return hostProvider } +func GetProcessProvider() ProcessProvider { return processProvider } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/darwin/arch_darwin_amd64.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/darwin/arch_darwin_amd64.go new file mode 100644 index 00000000..12368a94 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/darwin/arch_darwin_amd64.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package darwin + +import ( + "syscall" + + "github.com/pkg/errors" +) + +const hardwareMIB = "hw.machine" + +func Architecture() (string, error) { + arch, err := syscall.Sysctl(hardwareMIB) + if err != nil { + return "", errors.Wrap(err, "failed to get architecture") + } + + return arch, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/darwin/boottime_darwin_amd64.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/darwin/boottime_darwin_amd64.go new file mode 100644 index 00000000..7fcab0e5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/darwin/boottime_darwin_amd64.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build darwin,amd64,cgo + +package darwin + +import ( + "syscall" + "time" + + "github.com/pkg/errors" +) + +const kernBoottimeMIB = "kern.boottime" + +func BootTime() (time.Time, error) { + var tv syscall.Timeval + if err := sysctlByName(kernBoottimeMIB, &tv); err != nil { + return time.Time{}, errors.Wrap(err, "failed to get host uptime") + } + + bootTime := time.Unix(int64(tv.Sec), int64(tv.Usec)*int64(time.Microsecond)) + return bootTime, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/darwin/doc.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/darwin/doc.go new file mode 100644 index 00000000..9249438a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/darwin/doc.go @@ -0,0 +1,20 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Package darwin implements the HostProvider and ProcessProvider interfaces +// for providing information about MacOS. +package darwin diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/darwin/host_darwin_amd64.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/darwin/host_darwin_amd64.go new file mode 100644 index 00000000..a47969a4 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/darwin/host_darwin_amd64.go @@ -0,0 +1,223 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build darwin,amd64,cgo + +package darwin + +import ( + "os" + "time" + + "github.com/joeshaw/multierror" + "github.com/pkg/errors" + + "github.com/elastic/go-sysinfo/internal/registry" + "github.com/elastic/go-sysinfo/providers/shared" + "github.com/elastic/go-sysinfo/types" +) + +func init() { + registry.Register(darwinSystem{}) +} + +type darwinSystem struct{} + +func (s darwinSystem) Host() (types.Host, error) { + return newHost() +} + +type host struct { + info types.HostInfo +} + +func (h *host) Info() types.HostInfo { + return h.info +} + +func (h *host) CPUTime() (*types.CPUTimes, error) { + cpu, err := getHostCPULoadInfo() + if err != nil { + return nil, errors.Wrap(err, "failed to get host CPU usage") + } + + ticksPerSecond := time.Duration(getClockTicks()) + + return &types.CPUTimes{ + Timestamp: time.Now(), + User: time.Duration(cpu.User) * time.Second / ticksPerSecond, + System: time.Duration(cpu.System) * time.Second / ticksPerSecond, + Idle: time.Duration(cpu.Idle) * time.Second / ticksPerSecond, + Nice: time.Duration(cpu.Nice) * time.Second / ticksPerSecond, + }, nil +} + +func (h *host) Memory() (*types.HostMemoryInfo, error) { + mem := &types.HostMemoryInfo{Timestamp: time.Now()} + + // Total physical memory. + if err := sysctlByName("hw.memsize", &mem.Total); err != nil { + return nil, errors.Wrap(err, "failed to get total physical memory") + } + + // Page size for computing byte totals. + pageSizeBytes, err := getPageSize() + if err != nil { + return nil, errors.Wrap(err, "failed to get page size") + } + + // Virtual Memory Statistics + vmStat, err := getHostVMInfo64() + if err != nil { + return nil, errors.Wrap(err, "failed to get virtual memory statistics") + } + + // Swap + swap, err := getSwapUsage() + if err != nil { + return nil, errors.Wrap(err, "failed to get swap usage") + } + + inactiveBytes := uint64(vmStat.Inactive_count) * pageSizeBytes + purgeableBytes := uint64(vmStat.Purgeable_count) * pageSizeBytes + mem.Metrics = map[string]uint64{ + "active_bytes": uint64(vmStat.Active_count) * pageSizeBytes, + "compressed_bytes": uint64(vmStat.Compressor_page_count) * pageSizeBytes, + "compressions_bytes": uint64(vmStat.Compressions) * pageSizeBytes, // Cumulative compressions. + "copy_on_write_faults": vmStat.Cow_faults, + "decompressions_bytes": uint64(vmStat.Decompressions) * pageSizeBytes, // Cumulative decompressions. + "external_bytes": uint64(vmStat.External_page_count) * pageSizeBytes, // File Cache / File-backed pages + "inactive_bytes": inactiveBytes, + "internal_bytes": uint64(vmStat.Internal_page_count) * pageSizeBytes, // App Memory / Anonymous + "page_ins_bytes": uint64(vmStat.Pageins) * pageSizeBytes, + "page_outs_bytes": uint64(vmStat.Pageouts) * pageSizeBytes, + "purgeable_bytes": purgeableBytes, + "purged_bytes": uint64(vmStat.Purges) * pageSizeBytes, + "reactivated_bytes": uint64(vmStat.Reactivations) * pageSizeBytes, + "speculative_bytes": uint64(vmStat.Speculative_count) * pageSizeBytes, + "swap_ins_bytes": uint64(vmStat.Swapins) * pageSizeBytes, + "swap_outs_bytes": uint64(vmStat.Swapouts) * pageSizeBytes, + "throttled_bytes": uint64(vmStat.Throttled_count) * pageSizeBytes, + "translation_faults": vmStat.Faults, + "uncompressed_bytes": uint64(vmStat.Total_uncompressed_pages_in_compressor) * pageSizeBytes, + "wired_bytes": uint64(vmStat.Wire_count) * pageSizeBytes, + "zero_filled_bytes": uint64(vmStat.Zero_fill_count) * pageSizeBytes, + } + + // From Activity Monitor: Memory Used = App Memory (internal) + Wired + Compressed + // https://support.apple.com/en-us/HT201538 + mem.Used = uint64(vmStat.Internal_page_count+vmStat.Wire_count+vmStat.Compressor_page_count) * pageSizeBytes + mem.Free = uint64(vmStat.Free_count) * pageSizeBytes + mem.Available = mem.Free + inactiveBytes + purgeableBytes + mem.VirtualTotal = swap.Total + mem.VirtualUsed = swap.Used + mem.VirtualFree = swap.Available + + return mem, nil +} + +func newHost() (*host, error) { + h := &host{} + r := &reader{} + r.architecture(h) + r.bootTime(h) + r.hostname(h) + r.network(h) + r.kernelVersion(h) + r.os(h) + r.time(h) + r.uniqueID(h) + return h, r.Err() +} + +type reader struct { + errs []error +} + +func (r *reader) addErr(err error) bool { + if err != nil { + if errors.Cause(err) != types.ErrNotImplemented { + r.errs = append(r.errs, err) + } + return true + } + return false +} + +func (r *reader) Err() error { + if len(r.errs) > 0 { + return &multierror.MultiError{Errors: r.errs} + } + return nil +} + +func (r *reader) architecture(h *host) { + v, err := Architecture() + if r.addErr(err) { + return + } + h.info.Architecture = v +} + +func (r *reader) bootTime(h *host) { + v, err := BootTime() + if r.addErr(err) { + return + } + h.info.BootTime = v +} + +func (r *reader) hostname(h *host) { + v, err := os.Hostname() + if r.addErr(err) { + return + } + h.info.Hostname = v +} + +func (r *reader) network(h *host) { + ips, macs, err := shared.Network() + if r.addErr(err) { + return + } + h.info.IPs = ips + h.info.MACs = macs +} + +func (r *reader) kernelVersion(h *host) { + v, err := KernelVersion() + if r.addErr(err) { + return + } + h.info.KernelVersion = v +} + +func (r *reader) os(h *host) { + v, err := OperatingSystem() + if r.addErr(err) { + return + } + h.info.OS = v +} + +func (r *reader) time(h *host) { + h.info.Timezone, h.info.TimezoneOffsetSec = time.Now().Zone() +} + +func (r *reader) uniqueID(h *host) { + // TODO: call gethostuuid(uuid [16]byte, timespec) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/darwin/kernel_darwin_amd64.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/darwin/kernel_darwin_amd64.go new file mode 100644 index 00000000..59a81b07 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/darwin/kernel_darwin_amd64.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package darwin + +import ( + "syscall" + + "github.com/pkg/errors" +) + +const kernelReleaseMIB = "kern.osrelease" + +func KernelVersion() (string, error) { + version, err := syscall.Sysctl(kernelReleaseMIB) + if err != nil { + return "", errors.Wrap(err, "failed to get kernel version") + } + + return version, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/darwin/memory_darwin_amd64.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/darwin/memory_darwin_amd64.go new file mode 100644 index 00000000..f61b05e3 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/darwin/memory_darwin_amd64.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build darwin,amd64,cgo + +package darwin + +import ( + "github.com/pkg/errors" +) + +const hwMemsizeMIB = "hw.memsize" + +func MemTotal() (uint64, error) { + var size uint64 + if err := sysctlByName(hwMemsizeMIB, &size); err != nil { + return 0, errors.Wrap(err, "failed to get mem total") + } + + return size, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/darwin/os.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/darwin/os.go new file mode 100644 index 00000000..b066f55a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/darwin/os.go @@ -0,0 +1,93 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package darwin + +import ( + "io/ioutil" + "strconv" + "strings" + + "github.com/pkg/errors" + "howett.net/plist" + + "github.com/elastic/go-sysinfo/types" +) + +const ( + systemVersionPlist = "/System/Library/CoreServices/SystemVersion.plist" + + plistProductName = "ProductName" + plistProductVersion = "ProductVersion" + plistProductBuildVersion = "ProductBuildVersion" +) + +func OperatingSystem() (*types.OSInfo, error) { + data, err := ioutil.ReadFile(systemVersionPlist) + if err != nil { + return nil, errors.Wrap(err, "failed to read plist file") + } + + return getOSInfo(data) +} + +func getOSInfo(data []byte) (*types.OSInfo, error) { + attrs := map[string]string{} + if _, err := plist.Unmarshal(data, &attrs); err != nil { + return nil, errors.Wrap(err, "failed to unmarshal plist data") + } + + productName, found := attrs[plistProductName] + if !found { + return nil, errors.Errorf("plist key %v not found", plistProductName) + } + + version, found := attrs[plistProductVersion] + if !found { + return nil, errors.Errorf("plist key %v not found", plistProductVersion) + } + + build, found := attrs[plistProductBuildVersion] + if !found { + return nil, errors.Errorf("plist key %v not found", plistProductBuildVersion) + } + + var major, minor, patch int + for i, v := range strings.SplitN(version, ".", 3) { + switch i { + case 0: + major, _ = strconv.Atoi(v) + case 1: + minor, _ = strconv.Atoi(v) + case 2: + patch, _ = strconv.Atoi(v) + default: + break + } + } + + return &types.OSInfo{ + Family: "darwin", + Platform: "darwin", + Name: productName, + Version: version, + Major: major, + Minor: minor, + Patch: patch, + Build: build, + }, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/darwin/process_darwin_amd64.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/darwin/process_darwin_amd64.go new file mode 100644 index 00000000..be649598 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/darwin/process_darwin_amd64.go @@ -0,0 +1,207 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build darwin,amd64,cgo + +package darwin + +// #cgo LDFLAGS:-lproc +// #include +// #include +import "C" + +import ( + "bytes" + "encoding/binary" + "os" + "time" + "unsafe" + + "github.com/pkg/errors" + + "github.com/elastic/go-sysinfo/types" +) + +//go:generate sh -c "go tool cgo -godefs defs_darwin.go > ztypes_darwin_amd64.go" + +func (s darwinSystem) Processes() ([]types.Process, error) { + return nil, nil +} + +func (s darwinSystem) Process(pid int) (types.Process, error) { + p := process{pid: pid} + + return &p, nil +} + +func (s darwinSystem) Self() (types.Process, error) { + return s.Process(os.Getpid()) +} + +type process struct { + pid int + cwd string + exe string + args []string + env map[string]string + task procTaskAllInfo + vnode procVnodePathInfo +} + +func (p *process) Info() (types.ProcessInfo, error) { + if err := getProcTaskAllInfo(p.pid, &p.task); err != nil { + return types.ProcessInfo{}, err + } + + if err := getProcVnodePathInfo(p.pid, &p.vnode); err != nil { + return types.ProcessInfo{}, err + } + + if err := kern_procargs(p.pid, p); err != nil { + return types.ProcessInfo{}, err + } + + return types.ProcessInfo{ + Name: int8SliceToString(p.task.Pbsd.Pbi_name[:]), + PID: p.pid, + PPID: int(p.task.Pbsd.Pbi_ppid), + CWD: int8SliceToString(p.vnode.Cdir.Path[:]), + Exe: p.exe, + Args: p.args, + StartTime: time.Unix(int64(p.task.Pbsd.Pbi_start_tvsec), + int64(p.task.Pbsd.Pbi_start_tvusec)*int64(time.Microsecond)), + }, nil +} + +func (p *process) Environment() (map[string]string, error) { + return p.env, nil +} + +func (p *process) CPUTime() types.CPUTimes { + return types.CPUTimes{ + Timestamp: time.Now(), + User: time.Duration(p.task.Ptinfo.Total_user), + System: time.Duration(p.task.Ptinfo.Total_system), + } +} + +func (p *process) Memory() types.MemoryInfo { + return types.MemoryInfo{ + Timestamp: time.Now(), + Virtual: p.task.Ptinfo.Virtual_size, + Resident: p.task.Ptinfo.Resident_size, + Metrics: map[string]uint64{ + "page_ins": uint64(p.task.Ptinfo.Pageins), + "page_faults": uint64(p.task.Ptinfo.Faults), + }, + } +} + +func getProcTaskAllInfo(pid int, info *procTaskAllInfo) error { + size := C.int(unsafe.Sizeof(*info)) + ptr := unsafe.Pointer(info) + + n := C.proc_pidinfo(C.int(pid), C.PROC_PIDTASKALLINFO, 0, ptr, size) + if n != size { + return errors.New("failed to read process info with proc_pidinfo") + } + + return nil +} + +func getProcVnodePathInfo(pid int, info *procVnodePathInfo) error { + size := C.int(unsafe.Sizeof(*info)) + ptr := unsafe.Pointer(info) + + n := C.proc_pidinfo(C.int(pid), C.PROC_PIDVNODEPATHINFO, 0, ptr, size) + if n != size { + return errors.New("failed to read vnode info with proc_pidinfo") + } + + return nil +} + +var nullTerminator = []byte{0} + +// wrapper around sysctl KERN_PROCARGS2 +// callbacks params are optional, +// up to the caller as to which pieces of data they want +func kern_procargs(pid int, p *process) error { + mib := []C.int{C.CTL_KERN, C.KERN_PROCARGS2, C.int(pid)} + var data []byte + if err := sysctl(mib, &data); err != nil { + return nil + } + buf := bytes.NewBuffer(data) + + // argc + var argc int32 + if err := binary.Read(buf, binary.LittleEndian, &argc); err != nil { + return err + } + + // exe + lines := bytes.Split(buf.Bytes(), nullTerminator) + p.exe = string(lines[0]) + lines = lines[1:] + + // skip nulls + for len(lines) > 0 { + if len(lines[0]) == 0 { + lines = lines[1:] + continue + } + break + } + + // args + for i := 0; i < int(argc); i++ { + p.args = append(p.args, string(lines[0])) + lines = lines[1:] + } + + // env vars + env := make(map[string]string, len(lines)) + for _, l := range lines { + if len(l) == 0 { + break + } + parts := bytes.SplitN(l, []byte{'='}, 2) + if len(parts) != 2 { + return errors.New("failed to parse") + } + key := string(parts[0]) + value := string(parts[1]) + env[key] = value + } + p.env = env + + return nil +} + +func int8SliceToString(s []int8) string { + buf := bytes.NewBuffer(make([]byte, len(s))) + buf.Reset() + + for _, b := range s { + if b == 0 { + break + } + buf.WriteByte(byte(b)) + } + return buf.String() +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/darwin/syscall_darwin_amd64.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/darwin/syscall_darwin_amd64.go new file mode 100644 index 00000000..6847b63d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/darwin/syscall_darwin_amd64.go @@ -0,0 +1,240 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build darwin,amd64,cgo + +package darwin + +/* +#cgo LDFLAGS:-lproc +#include +#include +#include +#include +*/ +import "C" + +import ( + "bytes" + "encoding/binary" + "fmt" + "sync" + "syscall" + "unsafe" + + "github.com/pkg/errors" +) + +// Single-word zero for use when we need a valid pointer to 0 bytes. +// See mksyscall.pl. +var _zero uintptr + +// Buffer Pool + +var bufferPool = sync.Pool{ + New: func() interface{} { + return &poolMem{ + buf: make([]byte, argMax), + } + }, +} + +type poolMem struct { + buf []byte + pool *sync.Pool +} + +func getPoolMem() *poolMem { + pm := bufferPool.Get().(*poolMem) + pm.buf = pm.buf[0:cap(pm.buf)] + pm.pool = &bufferPool + return pm +} + +func (m *poolMem) Release() { m.pool.Put(m) } + +// Common errors. + +// Do the interface allocations only once for common +// Errno values. +var ( + errEAGAIN error = syscall.EAGAIN + errEINVAL error = syscall.EINVAL + errENOENT error = syscall.ENOENT +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case syscall.EAGAIN: + return errEAGAIN + case syscall.EINVAL: + return errEINVAL + case syscall.ENOENT: + return errENOENT + } + return e +} + +func _sysctl(mib []C.int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// Translate "kern.hostname" to []_C_int{0,1,2,3}. +func nametomib(name string) (mib []C.int, err error) { + const siz = unsafe.Sizeof(mib[0]) + + // NOTE(rsc): It seems strange to set the buffer to have + // size CTL_MAXNAME+2 but use only CTL_MAXNAME + // as the size. I don't know why the +2 is here, but the + // kernel uses +2 for its own implementation of this function. + // I am scared that if we don't include the +2 here, the kernel + // will silently write 2 words farther than we specify + // and we'll get memory corruption. + var buf [C.CTL_MAXNAME + 2]C.int + n := uintptr(C.CTL_MAXNAME) * siz + + p := (*byte)(unsafe.Pointer(&buf[0])) + bytes, err := syscall.ByteSliceFromString(name) + if err != nil { + return nil, err + } + + // Magic sysctl: "setting" 0.3 to a string name + // lets you read back the array of integers form. + if err = _sysctl([]C.int{0, 3}, p, &n, &bytes[0], uintptr(len(name))); err != nil { + return nil, err + } + return buf[0 : n/siz], nil +} + +func sysctl(mib []C.int, value interface{}) error { + mem := getPoolMem() + defer mem.Release() + + size := uintptr(len(mem.buf)) + if err := _sysctl(mib, &mem.buf[0], &size, nil, 0); err != nil { + return err + } + data := mem.buf[0:size] + + switch v := value.(type) { + case *[]byte: + out := make([]byte, len(data)) + copy(out, data) + *v = out + return nil + default: + return binary.Read(bytes.NewReader(data), binary.LittleEndian, v) + } +} + +func sysctlByName(name string, out interface{}) error { + mib, err := nametomib(name) + if err != nil { + return err + } + + return sysctl(mib, out) +} + +type cpuUsage struct { + User uint32 + System uint32 + Idle uint32 + Nice uint32 +} + +func getHostCPULoadInfo() (*cpuUsage, error) { + var count C.mach_msg_type_number_t = C.HOST_CPU_LOAD_INFO_COUNT + var cpu cpuUsage + status := C.host_statistics(C.host_t(C.mach_host_self()), + C.HOST_CPU_LOAD_INFO, + C.host_info_t(unsafe.Pointer(&cpu)), + &count) + + if status != C.KERN_SUCCESS { + return nil, errors.Errorf("host_statistics returned status %d", status) + } + + return &cpu, nil +} + +// getClockTicks returns the number of click ticks in one jiffie. +func getClockTicks() int { + return int(C.sysconf(C._SC_CLK_TCK)) +} + +func getHostVMInfo64() (*vmStatistics64Data, error) { + var count C.mach_msg_type_number_t = C.HOST_VM_INFO64_COUNT + + var vmStat vmStatistics64Data + status := C.host_statistics64( + C.host_t(C.mach_host_self()), + C.HOST_VM_INFO64, + C.host_info_t(unsafe.Pointer(&vmStat)), + &count) + + if status != C.KERN_SUCCESS { + return nil, fmt.Errorf("host_statistics64 returned status %d", status) + } + + return &vmStat, nil +} + +func getPageSize() (uint64, error) { + var pageSize vmSize + status := C.host_page_size( + C.host_t(C.mach_host_self()), + (*C.vm_size_t)(unsafe.Pointer(&pageSize))) + if status != C.KERN_SUCCESS { + return 0, errors.Errorf("host_page_size returned status %d", status) + } + + return uint64(pageSize), nil +} + +// From sysctl.h - xsw_usage. +type swapUsage struct { + Total uint64 + Available uint64 + Used uint64 + PageSize uint64 +} + +const vmSwapUsageMIB = "vm.swapusage" + +func getSwapUsage() (*swapUsage, error) { + var swap swapUsage + if err := sysctlByName(vmSwapUsageMIB, &swap); err != nil { + return nil, err + } + return &swap, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/darwin/ztypes_darwin_amd64.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/darwin/ztypes_darwin_amd64.go new file mode 100644 index 00000000..ff1855ce --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/darwin/ztypes_darwin_amd64.go @@ -0,0 +1,170 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package darwin + +type processState uint32 + +const ( + stateSIDL processState = iota + 1 + stateRun + stateSleep + stateStop + stateZombie +) + +const argMax = 0x40000 + +type bsdInfo struct { + Pbi_flags uint32 + Pbi_status uint32 + Pbi_xstatus uint32 + Pbi_pid uint32 + Pbi_ppid uint32 + Pbi_uid uint32 + Pbi_gid uint32 + Pbi_ruid uint32 + Pbi_rgid uint32 + Pbi_svuid uint32 + Pbi_svgid uint32 + Rfu_1 uint32 + Pbi_comm [16]int8 + Pbi_name [32]int8 + Pbi_nfiles uint32 + Pbi_pgid uint32 + Pbi_pjobc uint32 + E_tdev uint32 + E_tpgid uint32 + Pbi_nice int32 + Pbi_start_tvsec uint64 + Pbi_start_tvusec uint64 +} + +type procTaskInfo struct { + Virtual_size uint64 + Resident_size uint64 + Total_user uint64 + Total_system uint64 + Threads_user uint64 + Threads_system uint64 + Policy int32 + Faults int32 + Pageins int32 + Cow_faults int32 + Messages_sent int32 + Messages_received int32 + Syscalls_mach int32 + Syscalls_unix int32 + Csw int32 + Threadnum int32 + Numrunning int32 + Priority int32 +} + +type procTaskAllInfo struct { + Pbsd bsdInfo + Ptinfo procTaskInfo +} + +type vinfoStat struct { + Dev uint32 + Mode uint16 + Nlink uint16 + Ino uint64 + Uid uint32 + Gid uint32 + Atime int64 + Atimensec int64 + Mtime int64 + Mtimensec int64 + Ctime int64 + Ctimensec int64 + Birthtime int64 + Birthtimensec int64 + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Rdev uint32 + Qspare [2]int64 +} + +type fsid struct { + Val [2]int32 +} + +type vnodeInfo struct { + Stat vinfoStat + Type int32 + Pad int32 + Fsid fsid +} + +type vnodeInfoPath struct { + Vi vnodeInfo + Path [1024]int8 +} + +type procVnodePathInfo struct { + Cdir vnodeInfoPath + Rdir vnodeInfoPath +} + +type vmStatisticsData struct { + Free_count uint32 + Active_count uint32 + Inactive_count uint32 + Wire_count uint32 + Zero_fill_count uint32 + Reactivations uint32 + Pageins uint32 + Pageouts uint32 + Faults uint32 + Cow_faults uint32 + Lookups uint32 + Hits uint32 + Purgeable_count uint32 + Purges uint32 + Speculative_count uint32 +} + +type vmStatistics64Data struct { + Free_count uint32 + Active_count uint32 + Inactive_count uint32 + Wire_count uint32 + Zero_fill_count uint64 + Reactivations uint64 + Pageins uint64 + Pageouts uint64 + Faults uint64 + Cow_faults uint64 + Lookups uint64 + Hits uint64 + Purges uint64 + Purgeable_count uint32 + Speculative_count uint32 + Decompressions uint64 + Compressions uint64 + Swapins uint64 + Swapouts uint64 + Compressor_page_count uint32 + Throttled_count uint32 + External_page_count uint32 + Internal_page_count uint32 + Total_uncompressed_pages_in_compressor uint64 +} + +type vmSize uint64 + +const ( + cpuStateUser = 0x0 + cpuStateSystem = 0x1 + cpuStateIdle = 0x2 + cpuStateNice = 0x3 +) + +type hostCPULoadInfo struct { + Ticks [4]uint32 +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/arch_linux.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/arch_linux.go new file mode 100644 index 00000000..a07fb936 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/arch_linux.go @@ -0,0 +1,41 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "syscall" + + "github.com/pkg/errors" +) + +func Architecture() (string, error) { + var uname syscall.Utsname + if err := syscall.Uname(&uname); err != nil { + return "", errors.Wrap(err, "architecture") + } + + data := make([]byte, 0, len(uname.Machine)) + for _, v := range uname.Machine { + if v == 0 { + break + } + data = append(data, byte(v)) + } + + return string(data), nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/boottime_linux.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/boottime_linux.go new file mode 100644 index 00000000..96e7d003 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/boottime_linux.go @@ -0,0 +1,47 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "sync" + "time" + + "github.com/prometheus/procfs" +) + +var ( + bootTimeValue time.Time // Cached boot time. + bootTimeLock sync.Mutex // Lock that guards access to bootTime. +) + +func bootTime(fs procfs.FS) (time.Time, error) { + bootTimeLock.Lock() + defer bootTimeLock.Unlock() + + if !bootTimeValue.IsZero() { + return bootTimeValue, nil + } + + stat, err := fs.NewStat() + if err != nil { + return time.Time{}, err + } + + bootTimeValue = time.Unix(int64(stat.BootTime), 0) + return bootTimeValue, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/capabilities_linux.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/capabilities_linux.go new file mode 100644 index 00000000..f0d23d66 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/capabilities_linux.go @@ -0,0 +1,117 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "strconv" + + "github.com/elastic/go-sysinfo/types" +) + +// capabilityNames is mapping of capability constant values to names. +// +// Generated with: +// curl -s https://raw.githubusercontent.com/torvalds/linux/master/include/uapi/linux/capability.h | \ +// grep -P '^#define CAP_\w+\s+\d+' | perl -pe 's/#define (\w+)\s+(\d+)/\2: "\1",/g' +var capabilityNames = map[int]string{ + 0: "chown", + 1: "dac_override", + 2: "dac_read_search", + 3: "fowner", + 4: "fsetid", + 5: "kill", + 6: "setgid", + 7: "setuid", + 8: "setpcap", + 9: "linux_immutable", + 10: "net_bind_service", + 11: "net_broadcast", + 12: "net_admin", + 13: "net_raw", + 14: "ipc_lock", + 15: "ipc_owner", + 16: "sys_module", + 17: "sys_rawio", + 18: "sys_chroot", + 19: "sys_ptrace", + 20: "sys_pacct", + 21: "sys_admin", + 22: "sys_boot", + 23: "sys_nice", + 24: "sys_resource", + 25: "sys_time", + 26: "sys_tty_config", + 27: "mknod", + 28: "lease", + 29: "audit_write", + 30: "audit_control", + 31: "setfcap", + 32: "mac_override", + 33: "mac_admin", + 34: "syslog", + 35: "wake_alarm", + 36: "block_suspend", + 37: "audit_read", +} + +func capabilityName(num int) string { + name, found := capabilityNames[num] + if found { + return name + } + + return strconv.Itoa(num) +} + +func readCapabilities(content []byte) (*types.CapabilityInfo, error) { + var cap types.CapabilityInfo + + err := parseKeyValue(content, ":", func(key, value []byte) error { + var err error + switch string(key) { + case "CapInh": + cap.Inheritable, err = decodeBitMap(string(value), capabilityName) + if err != nil { + return err + } + case "CapPrm": + cap.Permitted, err = decodeBitMap(string(value), capabilityName) + if err != nil { + return err + } + case "CapEff": + cap.Effective, err = decodeBitMap(string(value), capabilityName) + if err != nil { + return err + } + case "CapBnd": + cap.Bounding, err = decodeBitMap(string(value), capabilityName) + if err != nil { + return err + } + case "CapAmb": + cap.Ambient, err = decodeBitMap(string(value), capabilityName) + if err != nil { + return err + } + } + return nil + }) + + return &cap, err +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/container.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/container.go new file mode 100644 index 00000000..3ab01911 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/container.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "bufio" + "bytes" + "io/ioutil" + + "github.com/pkg/errors" +) + +const procOneCgroup = "/proc/1/cgroup" + +// IsContainerized returns true if this process is containerized. +func IsContainerized() (bool, error) { + data, err := ioutil.ReadFile(procOneCgroup) + if err != nil { + return false, errors.Wrap(err, "failed to read process cgroups") + } + + return isContainerizedCgroup(data) +} + +func isContainerizedCgroup(data []byte) (bool, error) { + s := bufio.NewScanner(bytes.NewReader(data)) + for n := 0; s.Scan(); n++ { + line := s.Bytes() + if len(line) == 0 || line[len(line)-1] == '/' { + continue + } + + if bytes.HasSuffix(line, []byte("init.scope")) { + return false, nil + } + } + + return true, s.Err() +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/doc.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/doc.go new file mode 100644 index 00000000..3b564c8e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/doc.go @@ -0,0 +1,20 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Package linux implements the HostProvider and ProcessProvider interfaces +// for providing information about Linux. +package linux diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/host_linux.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/host_linux.go new file mode 100644 index 00000000..75730139 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/host_linux.go @@ -0,0 +1,199 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "io/ioutil" + "os" + "path/filepath" + "time" + + "github.com/joeshaw/multierror" + "github.com/pkg/errors" + "github.com/prometheus/procfs" + + "github.com/elastic/go-sysinfo/internal/registry" + "github.com/elastic/go-sysinfo/providers/shared" + "github.com/elastic/go-sysinfo/types" +) + +func init() { + registry.Register(newLinuxSystem("")) +} + +type linuxSystem struct { + procFS procfs.FS +} + +func newLinuxSystem(hostFS string) linuxSystem { + return linuxSystem{ + procFS: procfs.FS(filepath.Join(hostFS, procfs.DefaultMountPoint)), + } +} + +func (s linuxSystem) Host() (types.Host, error) { + return newHost(s.procFS) +} + +type host struct { + procFS procfs.FS + stat procfs.Stat + info types.HostInfo +} + +func (h *host) Info() types.HostInfo { + return h.info +} + +func (h *host) Memory() (*types.HostMemoryInfo, error) { + content, err := ioutil.ReadFile(h.procFS.Path("meminfo")) + if err != nil { + return nil, err + } + + return parseMemInfo(content) +} + +func (h *host) CPUTime() (*types.CPUTimes, error) { + stat, err := h.procFS.NewStat() + if err != nil { + return nil, err + } + + return &types.CPUTimes{ + Timestamp: time.Now(), + User: time.Duration(stat.CPUTotal.User * float64(time.Second)), + System: time.Duration(stat.CPUTotal.System * float64(time.Second)), + Idle: time.Duration(stat.CPUTotal.Idle * float64(time.Second)), + IOWait: time.Duration(stat.CPUTotal.Iowait * float64(time.Second)), + IRQ: time.Duration(stat.CPUTotal.IRQ * float64(time.Second)), + Nice: time.Duration(stat.CPUTotal.Nice * float64(time.Second)), + SoftIRQ: time.Duration(stat.CPUTotal.SoftIRQ * float64(time.Second)), + Steal: time.Duration(stat.CPUTotal.Steal * float64(time.Second)), + }, nil +} + +func newHost(fs procfs.FS) (*host, error) { + stat, err := fs.NewStat() + if err != nil { + return nil, errors.Wrap(err, "failed to read proc stat") + } + + h := &host{stat: stat, procFS: fs} + r := &reader{} + r.architecture(h) + r.bootTime(h) + r.containerized(h) + r.hostname(h) + r.network(h) + r.kernelVersion(h) + r.os(h) + r.time(h) + r.uniqueID(h) + return h, r.Err() +} + +type reader struct { + errs []error +} + +func (r *reader) addErr(err error) bool { + if err != nil { + if errors.Cause(err) != types.ErrNotImplemented { + r.errs = append(r.errs, err) + } + return true + } + return false +} + +func (r *reader) Err() error { + if len(r.errs) > 0 { + return &multierror.MultiError{Errors: r.errs} + } + return nil +} + +func (r *reader) architecture(h *host) { + v, err := Architecture() + if r.addErr(err) { + return + } + h.info.Architecture = v +} + +func (r *reader) bootTime(h *host) { + v, err := bootTime(h.procFS) + if r.addErr(err) { + return + } + h.info.BootTime = v +} + +func (r *reader) containerized(h *host) { + v, err := IsContainerized() + if r.addErr(err) { + return + } + h.info.Containerized = &v +} + +func (r *reader) hostname(h *host) { + v, err := os.Hostname() + if r.addErr(err) { + return + } + h.info.Hostname = v +} + +func (r *reader) network(h *host) { + ips, macs, err := shared.Network() + if r.addErr(err) { + return + } + h.info.IPs = ips + h.info.MACs = macs +} + +func (r *reader) kernelVersion(h *host) { + v, err := KernelVersion() + if r.addErr(err) { + return + } + h.info.KernelVersion = v +} + +func (r *reader) os(h *host) { + v, err := OperatingSystem() + if r.addErr(err) { + return + } + h.info.OS = v +} + +func (r *reader) time(h *host) { + h.info.Timezone, h.info.TimezoneOffsetSec = time.Now().Zone() +} + +func (r *reader) uniqueID(h *host) { + v, err := MachineID() + if r.addErr(err) { + return + } + h.info.UniqueID = v +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/kernel_linux.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/kernel_linux.go new file mode 100644 index 00000000..82b6019a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/kernel_linux.go @@ -0,0 +1,41 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "syscall" + + "github.com/pkg/errors" +) + +func KernelVersion() (string, error) { + var uname syscall.Utsname + if err := syscall.Uname(&uname); err != nil { + return "", errors.Wrap(err, "kernel version") + } + + data := make([]byte, 0, len(uname.Release)) + for _, v := range uname.Release { + if v == 0 { + break + } + data = append(data, byte(v)) + } + + return string(data), nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/machineid.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/machineid.go new file mode 100644 index 00000000..296acde5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/machineid.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "bytes" + "io/ioutil" + "os" + + "github.com/pkg/errors" + + "github.com/elastic/go-sysinfo/types" +) + +func MachineID() (string, error) { + id, err := ioutil.ReadFile("/etc/machine-id") + if os.IsNotExist(err) { + return "", types.ErrNotImplemented + } + id = bytes.TrimSpace(id) + return string(id), errors.Wrap(err, "failed to read machine-id") +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/memory_linux.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/memory_linux.go new file mode 100644 index 00000000..2a330d69 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/memory_linux.go @@ -0,0 +1,102 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "bytes" + "strconv" + "time" + + "github.com/pkg/errors" + + "github.com/elastic/go-sysinfo/types" +) + +func parseMemInfo(content []byte) (*types.HostMemoryInfo, error) { + memInfo := &types.HostMemoryInfo{ + Timestamp: time.Now().UTC(), + Metrics: map[string]uint64{}, + } + + hasAvailable := false + err := parseKeyValue(content, ":", func(key, value []byte) error { + num, err := parseBytesOrNumber(value) + if err != nil { + return errors.Wrapf(err, "failed to parse %v value of %v", string(key), string(value)) + } + + k := string(key) + switch k { + case "MemTotal": + memInfo.Total = num + case "MemAvailable": + hasAvailable = true + memInfo.Available = num + case "MemFree": + memInfo.Free = num + case "SwapTotal": + memInfo.VirtualTotal = num + case "SwapFree": + memInfo.VirtualFree = num + default: + memInfo.Metrics[k] = num + } + + return nil + }) + if err != nil { + return nil, err + } + + memInfo.Used = memInfo.Total - memInfo.Free + memInfo.VirtualUsed = memInfo.VirtualTotal - memInfo.VirtualFree + + // MemAvailable was added in kernel 3.14. + if !hasAvailable { + // Linux uses this for the calculation (but we are using a simpler calculation). + // https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773 + memInfo.Available = memInfo.Free + memInfo.Metrics["Buffers"] + memInfo.Metrics["Cached"] + } + + return memInfo, nil +} + +func parseBytesOrNumber(data []byte) (uint64, error) { + parts := bytes.Fields(data) + + if len(parts) == 0 { + return 0, errors.New("empty value") + } + + num, err := strconv.ParseUint(string(parts[0]), 10, 64) + if err != nil { + return 0, errors.Wrap(err, "failed to parse value") + } + + var multiplier uint64 = 1 + if len(parts) >= 2 { + switch string(parts[1]) { + case "kB": + multiplier = 1024 + default: + return 0, errors.Errorf("unhandled unit %v", string(parts[1])) + } + } + + return num * multiplier, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/os.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/os.go new file mode 100644 index 00000000..0575848e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/os.go @@ -0,0 +1,263 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "bufio" + "bytes" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + + "github.com/pkg/errors" + + "github.com/elastic/go-sysinfo/types" +) + +const ( + osRelease = "/etc/os-release" + lsbRelease = "/etc/lsb-release" + distribRelease = "/etc/*-release" + versionGrok = `(?P(?P[0-9]+)\.?(?P[0-9]+)?\.?(?P\w+)?)(?: \((?P\w+)\))?` +) + +var ( + // distribReleaseRegexp parses the /etc/-release file. See man lsb-release. + distribReleaseRegexp = regexp.MustCompile(`(?P[\w]+).* ` + versionGrok) + + // versionRegexp parses version numbers (e.g. 6 or 6.1 or 6.1.0 or 6.1.0_20150102). + versionRegexp = regexp.MustCompile(versionGrok) +) + +var familyMap = map[string][]string{ + "redhat": {"redhat", "fedora", "centos", "scientific", "oraclelinux", "amazon"}, + "debian": {"debian", "ubuntu"}, + "suse": {"suse", "sles", "opensuse"}, +} + +var platformToFamilyMap map[string]string + +func init() { + platformToFamilyMap = map[string]string{} + for family, platformList := range familyMap { + for _, platform := range platformList { + platformToFamilyMap[platform] = family + } + } +} + +func OperatingSystem() (*types.OSInfo, error) { + return getOSInfo("") +} + +func getOSInfo(baseDir string) (*types.OSInfo, error) { + osInfo, err := getOSRelease(baseDir) + if err != nil { + // Fallback + return findDistribRelease(baseDir) + } + + // For the redhat family, enrich version info with data from + // /etc/[distrib]-release because the minor and patch info isn't always + // present in os-release. + if osInfo.Family != "redhat" { + return osInfo, nil + } + + distInfo, err := findDistribRelease(baseDir) + if err != nil { + return osInfo, err + } + osInfo.Major = distInfo.Major + osInfo.Minor = distInfo.Minor + osInfo.Patch = distInfo.Patch + osInfo.Codename = distInfo.Codename + return osInfo, nil +} + +func getOSRelease(baseDir string) (*types.OSInfo, error) { + lsbRel, _ := ioutil.ReadFile(filepath.Join(baseDir, lsbRelease)) + + osRel, err := ioutil.ReadFile(filepath.Join(baseDir, osRelease)) + if err != nil { + return nil, err + } + if len(osRel) == 0 { + return nil, errors.Errorf("%v is empty", osRelease) + } + + return parseOSRelease(append(lsbRel, osRel...)) +} + +func parseOSRelease(content []byte) (*types.OSInfo, error) { + fields := map[string]string{} + + s := bufio.NewScanner(bytes.NewReader(content)) + for s.Scan() { + line := bytes.TrimSpace(s.Bytes()) + + // Skip blank lines and comments. + if len(line) == 0 || bytes.HasPrefix(line, []byte("#")) { + continue + } + + parts := bytes.SplitN(s.Bytes(), []byte("="), 2) + if len(parts) != 2 { + continue + } + + key := string(bytes.TrimSpace(parts[0])) + val := string(bytes.TrimSpace(parts[1])) + fields[key] = val + + // Trim quotes. + val, err := strconv.Unquote(val) + if err == nil { + fields[key] = strings.TrimSpace(val) + } + } + + if s.Err() != nil { + return nil, s.Err() + } + + return makeOSInfo(fields) +} + +func makeOSInfo(osRelease map[string]string) (*types.OSInfo, error) { + os := &types.OSInfo{ + Platform: osRelease["ID"], + Name: osRelease["NAME"], + Version: osRelease["VERSION"], + Build: osRelease["BUILD_ID"], + Codename: osRelease["VERSION_CODENAME"], + } + + if os.Codename == "" { + // Some OSes uses their own CODENAME keys (e.g UBUNTU_CODENAME) or we + // can get the DISTRIB_CODENAME value from the lsb-release data. + for k, v := range osRelease { + if strings.Contains(k, "CODENAME") { + os.Codename = v + break + } + } + } + + if os.Platform == "" { + // Fallback to the first word of the NAME field. + parts := strings.SplitN(os.Name, " ", 2) + if len(parts) > 0 { + os.Platform = strings.ToLower(parts[0]) + } + } + + if os.Version != "" { + // Try parsing info from the version. + keys := versionRegexp.SubexpNames() + for i, m := range versionRegexp.FindStringSubmatch(os.Version) { + switch keys[i] { + case "major": + os.Major, _ = strconv.Atoi(m) + case "minor": + os.Minor, _ = strconv.Atoi(m) + case "patch": + os.Patch, _ = strconv.Atoi(m) + case "codename": + if os.Codename == "" { + os.Codename = m + } + } + } + } + + os.Family = platformToFamilyMap[strings.ToLower(os.Platform)] + return os, nil +} + +func findDistribRelease(baseDir string) (*types.OSInfo, error) { + matches, err := filepath.Glob(filepath.Join(baseDir, distribRelease)) + if err != nil { + return nil, err + } + for _, path := range matches { + if strings.HasSuffix(path, osRelease) || strings.HasSuffix(path, lsbRelease) { + continue + } + + info, err := os.Lstat(path) + if err != nil || !info.Mode().IsRegular() || info.Size() == 0 { + continue + } + + return getDistribRelease(path) + } + + return nil, errors.New("no /etc/-release file found") +} + +func getDistribRelease(file string) (*types.OSInfo, error) { + data, err := ioutil.ReadFile(file) + if err != nil { + return nil, err + } + parts := bytes.SplitN(data, []byte("\n"), 2) + if len(parts) != 2 { + return nil, errors.Errorf("failed to parse %v", file) + } + + // Use distrib as platform name. + var platform string + if parts := strings.SplitN(filepath.Base(file), "-", 2); len(parts) > 0 { + platform = strings.ToLower(parts[0]) + } + + return parseDistribRelease(platform, parts[0]) +} + +func parseDistribRelease(platform string, content []byte) (*types.OSInfo, error) { + var ( + line = string(bytes.TrimSpace(content)) + keys = distribReleaseRegexp.SubexpNames() + os = &types.OSInfo{Platform: platform} + ) + + for i, m := range distribReleaseRegexp.FindStringSubmatch(line) { + switch keys[i] { + case "name": + os.Name = m + case "version": + os.Version = m + case "major": + os.Major, _ = strconv.Atoi(m) + case "minor": + os.Minor, _ = strconv.Atoi(m) + case "patch": + os.Patch, _ = strconv.Atoi(m) + case "codename": + os.Version += " (" + m + ")" + os.Codename = m + } + } + + os.Family = platformToFamilyMap[strings.ToLower(os.Platform)] + return os, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/process_linux.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/process_linux.go new file mode 100644 index 00000000..d2ca188b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/process_linux.go @@ -0,0 +1,212 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "strconv" + "time" + + "github.com/prometheus/procfs" + + "github.com/elastic/go-sysinfo/types" +) + +const userHz = 100 + +func (s linuxSystem) Processes() ([]types.Process, error) { + procs, err := s.procFS.AllProcs() + if err != nil { + return nil, err + } + s.procFS.Path() + + processes := make([]types.Process, 0, len(procs)) + for _, proc := range procs { + processes = append(processes, &process{Proc: proc, fs: s.procFS}) + } + return processes, nil +} + +func (s linuxSystem) Process(pid int) (types.Process, error) { + proc, err := s.procFS.NewProc(pid) + if err != nil { + return nil, err + } + + return &process{Proc: proc, fs: s.procFS}, nil +} + +func (s linuxSystem) Self() (types.Process, error) { + proc, err := s.procFS.Self() + if err != nil { + return nil, err + } + + return &process{Proc: proc, fs: s.procFS}, nil +} + +type process struct { + procfs.Proc + fs procfs.FS + info *types.ProcessInfo +} + +func (p *process) path(pa ...string) string { + return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) +} + +func (p *process) CWD() (string, error) { + // TODO: add CWD to procfs + cwd, err := os.Readlink(p.path("cwd")) + if os.IsNotExist(err) { + return "", nil + } + + return cwd, err +} + +func (p *process) Info() (types.ProcessInfo, error) { + if p.info != nil { + return *p.info, nil + } + + stat, err := p.NewStat() + if err != nil { + return types.ProcessInfo{}, err + } + + exe, err := p.Executable() + if err != nil { + return types.ProcessInfo{}, err + } + + args, err := p.CmdLine() + if err != nil { + return types.ProcessInfo{}, err + } + + cwd, err := p.CWD() + if err != nil { + return types.ProcessInfo{}, err + } + + bootTime, err := bootTime(p.fs) + if err != nil { + return types.ProcessInfo{}, err + } + + p.info = &types.ProcessInfo{ + Name: stat.Comm, + PID: p.PID, + PPID: stat.PPID, + CWD: cwd, + Exe: exe, + Args: args, + StartTime: bootTime.Add(ticksToDuration(stat.Starttime)), + } + + return *p.info, nil +} + +func (p *process) Memory() types.MemoryInfo { + stat, err := p.NewStat() + if err != nil { + return types.MemoryInfo{} + } + + return types.MemoryInfo{ + Timestamp: time.Now(), + Resident: uint64(stat.ResidentMemory()), + Virtual: uint64(stat.VirtualMemory()), + } +} + +func (p *process) CPUTime() types.CPUTimes { + stat, err := p.NewStat() + if err != nil { + return types.CPUTimes{} + } + + fmt.Println("UTime", stat.UTime, "STime", stat.STime) + return types.CPUTimes{ + Timestamp: time.Now(), + User: ticksToDuration(uint64(stat.UTime)), + System: ticksToDuration(uint64(stat.STime)), + } +} + +func (p *process) FileDescriptors() ([]string, error) { + return p.Proc.FileDescriptorTargets() +} + +func (p *process) FileDescriptorCount() (int, error) { + return p.Proc.FileDescriptorsLen() +} + +func (p *process) Environment() (map[string]string, error) { + // TODO: add Environment to procfs + content, err := ioutil.ReadFile(p.path("environ")) + if err != nil { + return nil, err + } + + env := map[string]string{} + pairs := bytes.Split(content, []byte{0}) + for _, kv := range pairs { + parts := bytes.SplitN(kv, []byte{'='}, 2) + if len(parts) != 2 { + continue + } + + key := string(bytes.TrimSpace(parts[0])) + if key == "" { + continue + } + + env[key] = string(parts[1]) + } + + return env, nil +} + +func (p *process) Seccomp() (*types.SeccompInfo, error) { + content, err := ioutil.ReadFile(p.path("status")) + if err != nil { + return nil, err + } + + return readSeccompFields(content) +} + +func (p *process) Capabilities() (*types.CapabilityInfo, error) { + content, err := ioutil.ReadFile(p.path("status")) + if err != nil { + return nil, err + } + + return readCapabilities(content) +} + +func ticksToDuration(ticks uint64) time.Duration { + seconds := float64(ticks) / float64(userHz) * float64(time.Second) + return time.Duration(int64(seconds)) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/seccomp_linux.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/seccomp_linux.go new file mode 100644 index 00000000..bc40c072 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/seccomp_linux.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "strconv" + + "github.com/elastic/go-sysinfo/types" +) + +type SeccompMode uint8 + +const ( + SeccompModeDisabled SeccompMode = iota + SeccompModeStrict + SeccompModeFilter +) + +func (m SeccompMode) String() string { + switch m { + case SeccompModeDisabled: + return "disabled" + case SeccompModeStrict: + return "strict" + case SeccompModeFilter: + return "filter" + default: + return strconv.Itoa(int(m)) + } +} + +func readSeccompFields(content []byte) (*types.SeccompInfo, error) { + var seccomp types.SeccompInfo + + err := parseKeyValue(content, ":", func(key, value []byte) error { + switch string(key) { + case "Seccomp": + mode, err := strconv.ParseUint(string(value), 10, 8) + if err != nil { + return err + } + seccomp.Mode = SeccompMode(mode).String() + case "NoNewPrivs": + noNewPrivs, err := strconv.ParseBool(string(value)) + if err != nil { + return err + } + seccomp.NoNewPrivs = &noNewPrivs + } + return nil + }) + + return &seccomp, err +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/util.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/util.go new file mode 100644 index 00000000..90e4506e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/util.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "bufio" + "bytes" + "io/ioutil" + "strconv" + + "github.com/pkg/errors" +) + +func parseKeyValue(content []byte, separator string, callback func(key, value []byte) error) error { + sc := bufio.NewScanner(bytes.NewReader(content)) + for sc.Scan() { + parts := bytes.SplitN(sc.Bytes(), []byte(separator), 2) + if len(parts) != 2 { + continue + } + + if err := callback(parts[0], bytes.TrimSpace(parts[1])); err != nil { + return err + } + } + + return sc.Err() +} + +func findValue(filename, separator, key string) (string, error) { + content, err := ioutil.ReadFile(filename) + if err != nil { + return "", err + } + + var line []byte + sc := bufio.NewScanner(bytes.NewReader(content)) + for sc.Scan() { + if bytes.HasPrefix(sc.Bytes(), []byte(key)) { + line = sc.Bytes() + break + } + } + if len(line) == 0 { + return "", errors.Errorf("%v not found", key) + } + + parts := bytes.SplitN(line, []byte(separator), 2) + if len(parts) != 2 { + return "", errors.Errorf("unexpected line format for '%v'", string(line)) + } + + return string(bytes.TrimSpace(parts[1])), nil +} + +func decodeBitMap(s string, lookupName func(int) string) ([]string, error) { + mask, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return nil, err + } + + var names []string + for i := 0; i < 64; i++ { + bit := mask & (1 << uint(i)) + if bit > 0 { + names = append(names, lookupName(i)) + } + } + + return names, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/shared/network.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/shared/network.go new file mode 100644 index 00000000..2f19455d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/shared/network.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package shared + +import ( + "net" +) + +func Network() (ips, macs []string, err error) { + ifcs, err := net.Interfaces() + if err != nil { + return nil, nil, err + } + + ips = make([]string, 0, len(ifcs)) + macs = make([]string, 0, len(ifcs)) + for _, ifc := range ifcs { + addrs, err := ifc.Addrs() + if err != nil { + return nil, nil, err + } + for _, addr := range addrs { + ips = append(ips, addr.String()) + } + + mac := ifc.HardwareAddr.String() + if mac != "" { + macs = append(macs, mac) + } + } + + return ips, macs, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/windows/arch_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/windows/arch_windows.go new file mode 100644 index 00000000..fee528c5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/windows/arch_windows.go @@ -0,0 +1,31 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package windows + +import ( + "github.com/elastic/go-windows" +) + +func Architecture() (string, error) { + systemInfo, err := windows.GetNativeSystemInfo() + if err != nil { + return "", err + } + + return systemInfo.ProcessorArchitecture.String(), nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/windows/boottime_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/windows/boottime_windows.go new file mode 100644 index 00000000..a6d5a3e4 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/windows/boottime_windows.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package windows + +import ( + "time" + + "github.com/elastic/go-windows" + "github.com/pkg/errors" +) + +func BootTime() (time.Time, error) { + msSinceBoot, err := windows.GetTickCount64() + if err != nil { + return time.Time{}, errors.Wrap(err, "failed to get boot time") + } + + // According to GetTickCount64 the resolution is limited to between 10 to 16 + // milliseconds so truncate the time as to not mislead anyone about the + // resolution. + bootTime := time.Now().Add(-1 * time.Duration(msSinceBoot) * time.Millisecond) + bootTime = bootTime.Truncate(10 * time.Millisecond) + return bootTime, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/windows/doc.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/windows/doc.go new file mode 100644 index 00000000..c5f27c4a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/windows/doc.go @@ -0,0 +1,20 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Package windows implements the HostProvider and ProcessProvider interfaces +// for providing information about Windows. +package windows diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/windows/host_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/windows/host_windows.go new file mode 100644 index 00000000..a70230d3 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/windows/host_windows.go @@ -0,0 +1,177 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package windows + +import ( + "os" + "time" + + "github.com/elastic/go-windows" + "github.com/joeshaw/multierror" + "github.com/pkg/errors" + + "github.com/elastic/go-sysinfo/internal/registry" + "github.com/elastic/go-sysinfo/providers/shared" + "github.com/elastic/go-sysinfo/types" +) + +func init() { + registry.Register(windowsSystem{}) +} + +type windowsSystem struct{} + +func (s windowsSystem) Host() (types.Host, error) { + return newHost() +} + +type host struct { + info types.HostInfo +} + +func (h *host) Info() types.HostInfo { + return h.info +} + +func (h *host) CPUTime() (*types.CPUTimes, error) { + idle, kernel, user, err := windows.GetSystemTimes() + if err != nil { + return nil, err + } + + return &types.CPUTimes{ + Timestamp: time.Now(), + System: kernel, + User: user, + Idle: idle, + }, nil +} + +func (h *host) Memory() (*types.HostMemoryInfo, error) { + mem, err := windows.GlobalMemoryStatusEx() + if err != nil { + return nil, err + } + + return &types.HostMemoryInfo{ + Timestamp: time.Now(), + Total: mem.TotalPhys, + Used: mem.TotalPhys - mem.AvailPhys, + Free: mem.AvailPhys, + Available: mem.AvailPhys, + VirtualTotal: mem.TotalPageFile, + VirtualUsed: mem.TotalPageFile - mem.AvailPageFile, + VirtualFree: mem.AvailPageFile, + }, nil +} + +func newHost() (*host, error) { + h := &host{} + r := &reader{} + r.architecture(h) + r.bootTime(h) + r.hostname(h) + r.network(h) + r.kernelVersion(h) + r.os(h) + r.time(h) + r.uniqueID(h) + return h, r.Err() +} + +type reader struct { + errs []error +} + +func (r *reader) addErr(err error) bool { + if err != nil { + if errors.Cause(err) != types.ErrNotImplemented { + r.errs = append(r.errs, err) + } + return true + } + return false +} + +func (r *reader) Err() error { + if len(r.errs) > 0 { + return &multierror.MultiError{Errors: r.errs} + } + return nil +} + +func (r *reader) architecture(h *host) { + v, err := Architecture() + if r.addErr(err) { + return + } + h.info.Architecture = v +} + +func (r *reader) bootTime(h *host) { + v, err := BootTime() + if r.addErr(err) { + return + } + h.info.BootTime = v +} + +func (r *reader) hostname(h *host) { + v, err := os.Hostname() + if r.addErr(err) { + return + } + h.info.Hostname = v +} + +func (r *reader) network(h *host) { + ips, macs, err := shared.Network() + if r.addErr(err) { + return + } + h.info.IPs = ips + h.info.MACs = macs +} + +func (r *reader) kernelVersion(h *host) { + v, err := KernelVersion() + if r.addErr(err) { + return + } + h.info.KernelVersion = v +} + +func (r *reader) os(h *host) { + v, err := OperatingSystem() + if r.addErr(err) { + return + } + h.info.OS = v +} + +func (r *reader) time(h *host) { + h.info.Timezone, h.info.TimezoneOffsetSec = time.Now().Zone() +} + +func (r *reader) uniqueID(h *host) { + v, err := MachineID() + if r.addErr(err) { + return + } + h.info.UniqueID = v +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/windows/kernel_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/windows/kernel_windows.go new file mode 100644 index 00000000..375ab427 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/windows/kernel_windows.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package windows + +import ( + "github.com/elastic/go-windows" +) + +const windowsKernelExe = `C:\Windows\System32\ntoskrnl.exe` + +func KernelVersion() (string, error) { + versionData, err := windows.GetFileVersionInfo(windowsKernelExe) + if err != nil { + return "", err + } + + fileVersion, err := versionData.QueryValue("FileVersion") + if err == nil { + return fileVersion, nil + } + + // Make a second attempt through the fixed version info. + info, err := versionData.FixedFileInfo() + if err != nil { + return "", err + } + return info.ProductVersion(), nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/windows/machineid_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/windows/machineid_windows.go new file mode 100644 index 00000000..df69d6c5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/windows/machineid_windows.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package windows + +import ( + "github.com/pkg/errors" + "golang.org/x/sys/windows/registry" +) + +func MachineID() (string, error) { + return getMachineGUID() +} + +func getMachineGUID() (string, error) { + const key = registry.LOCAL_MACHINE + const path = `SOFTWARE\Microsoft\Cryptography` + const name = "MachineGuid" + + k, err := registry.OpenKey(key, path, registry.READ|registry.WOW64_64KEY) + if err != nil { + return "", errors.Wrapf(err, `failed to open HKLM\%v`, path) + } + + guid, _, err := k.GetStringValue(name) + if err != nil { + return "", errors.Wrapf(err, `failed to get value of HKLM\%v\%v`, path, name) + } + + return guid, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/windows/os_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/windows/os_windows.go new file mode 100644 index 00000000..7f803ddb --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/windows/os_windows.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package windows + +import ( + "fmt" + "strconv" + "strings" + + "github.com/pkg/errors" + "golang.org/x/sys/windows/registry" + + "github.com/elastic/go-sysinfo/types" +) + +func OperatingSystem() (*types.OSInfo, error) { + const key = registry.LOCAL_MACHINE + const path = `SOFTWARE\Microsoft\Windows NT\CurrentVersion` + const flags = registry.READ | registry.WOW64_64KEY + + k, err := registry.OpenKey(key, path, flags) + if err != nil { + return nil, errors.Wrapf(err, `failed to open HKLM\%v`, path) + } + + osInfo := &types.OSInfo{ + Family: "windows", + Platform: "windows", + } + name := "ProductName" + osInfo.Name, _, err = k.GetStringValue(name) + if err != nil { + return nil, errors.Wrapf(err, `failed to get value of HKLM\%v\%v`, path, name) + } + + // Newer versions (Win 10 and 2016) have CurrentMajor/CurrentMinor. + major, _, majorErr := k.GetIntegerValue("CurrentMajorVersionNumber") + minor, _, minorErr := k.GetIntegerValue("CurrentMinorVersionNumber") + if majorErr == nil && minorErr == nil { + osInfo.Major = int(major) + osInfo.Minor = int(minor) + osInfo.Version = fmt.Sprintf("%d.%d", major, minor) + } else { + name = "CurrentVersion" + osInfo.Version, _, err = k.GetStringValue(name) + if err != nil { + return nil, errors.Wrapf(err, `failed to get value of HKLM\%v\%v`, path, name) + } + parts := strings.SplitN(osInfo.Version, ".", 3) + for i, p := range parts { + switch i { + case 0: + osInfo.Major, _ = strconv.Atoi(p) + case 1: + osInfo.Major, _ = strconv.Atoi(p) + } + } + } + + name = "CurrentBuild" + osInfo.Build, _, err = k.GetStringValue(name) + if err != nil { + return nil, errors.Wrapf(err, `failed to get value of HKLM\%v\%v`, path, name) + } + + // Update Build Revision (optional) + name = "UBR" + updateBuildRevision, _, err := k.GetIntegerValue(name) + if err != nil && err != registry.ErrNotExist { + return nil, errors.Wrapf(err, `failed to get value of HKLM\%v\%v`, path, name) + } else { + osInfo.Build = fmt.Sprintf("%v.%d", osInfo.Build, updateBuildRevision) + } + + return osInfo, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/system.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/system.go new file mode 100644 index 00000000..990c69f1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/system.go @@ -0,0 +1,85 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package sysinfo + +import ( + "runtime" + + "github.com/elastic/go-sysinfo/internal/registry" + "github.com/elastic/go-sysinfo/types" + + // Register host and process providers. + _ "github.com/elastic/go-sysinfo/providers/darwin" + _ "github.com/elastic/go-sysinfo/providers/linux" + _ "github.com/elastic/go-sysinfo/providers/windows" +) + +// Go returns information about the Go runtime. +func Go() types.GoInfo { + return types.GoInfo{ + OS: runtime.GOOS, + Arch: runtime.GOARCH, + MaxProcs: runtime.GOMAXPROCS(0), + Version: runtime.Version(), + } +} + +// Host returns information about host on which this process is running. If +// host information collection is not implemented for this platform then +// types.ErrNotImplemented is returned. +func Host() (types.Host, error) { + provider := registry.GetHostProvider() + if provider == nil { + return nil, types.ErrNotImplemented + } + return provider.Host() +} + +// Process returns a types.Process object representing the process associated +// with the given PID. The types.Process object can be used to query information +// about the process. If process information collection is not implemented for +// this platform then types.ErrNotImplemented is returned. +func Process(pid int) (types.Process, error) { + provider := registry.GetProcessProvider() + if provider == nil { + return nil, types.ErrNotImplemented + } + return provider.Process(pid) +} + +// Processes return a list of all processes. If process information collection +// is not implemented for this platform then types.ErrNotImplemented is +// returned. +func Processes() ([]types.Process, error) { + provider := registry.GetProcessProvider() + if provider == nil { + return nil, types.ErrNotImplemented + } + return provider.Processes() +} + +// Self return a types.Process object representing this process. If process +// information collection is not implemented for this platform then +// types.ErrNotImplemented is returned. +func Self() (types.Process, error) { + provider := registry.GetProcessProvider() + if provider == nil { + return nil, types.ErrNotImplemented + } + return provider.Self() +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/types/errors.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/types/errors.go new file mode 100644 index 00000000..138ea3e9 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/types/errors.go @@ -0,0 +1,22 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package types + +import "github.com/pkg/errors" + +var ErrNotImplemented = errors.New("unimplemented") diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/types/go.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/types/go.go new file mode 100644 index 00000000..3f18f2d6 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/types/go.go @@ -0,0 +1,25 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package types + +type GoInfo struct { + OS string `json:"os"` + Arch string `json:"arch"` + MaxProcs int `json:"max_procs"` + Version string `json:"version"` +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/types/host.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/types/host.go new file mode 100644 index 00000000..1124ee5f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/types/host.go @@ -0,0 +1,79 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package types + +import "time" + +type Host interface { + Info() HostInfo + Memory() (*HostMemoryInfo, error) + CPUTime() (*CPUTimes, error) +} + +type HostInfo struct { + Architecture string `json:"architecture"` // Hardware architecture (e.g. x86_64, arm, ppc, mips). + BootTime time.Time `json:"boot_time"` // Host boot time. + Containerized *bool `json:"containerized,omitempty"` // Is the process containerized. + Hostname string `json:"hostname"` // Hostname + IPs []string `json:"ips,omitempty"` // List of all IPs. + KernelVersion string `json:"kernel_version"` // Kernel version. + MACs []string `json:"mac_addresses"` // List of MAC addresses. + OS *OSInfo `json:"os"` // OS information. + Timezone string `json:"timezone"` // System timezone. + TimezoneOffsetSec int `json:"timezone_offset_sec"` // Timezone offset (seconds from UTC). + UniqueID string `json:"id,omitempty"` // Unique ID of the host (optional). +} + +func (host HostInfo) Uptime() time.Duration { + return time.Since(host.BootTime) +} + +type OSInfo struct { + Family string `json:"family"` // OS Family (e.g. redhat, debian, freebsd, windows). + Platform string `json:"platform"` // OS platform (e.g. centos, ubuntu, windows). + Name string `json:"name"` // OS Name (e.g. Mac OS X, CentOS). + Version string `json:"version"` // OS version (e.g. 10.12.6). + Major int `json:"major"` // Major release version. + Minor int `json:"minor"` // Minor release version. + Patch int `json:"patch"` // Patch release version. + Build string `json:"build,omitempty"` // Build (e.g. 16G1114). + Codename string `json:"codename,omitempty"` // OS codename (e.g. jessie). +} + +type LoadAverage interface { + LoadAverage() LoadAverageInfo +} + +type LoadAverageInfo struct { + One float64 `json:"one_min"` + Five float64 `json:"five_min"` + Fifteen float64 `json:"fifteen_min"` +} + +// HostMemoryInfo (all values are specified in bytes). +type HostMemoryInfo struct { + Timestamp time.Time `json:"timestamp"` // Time at which samples were collected. + Total uint64 `json:"total_bytes"` // Total physical memory. + Used uint64 `json:"used_bytes"` // Total - Free + Available uint64 `json:"available_bytes"` // Amount of memory available without swapping. + Free uint64 `json:"free_bytes"` // Amount of memory not used by the system. + VirtualTotal uint64 `json:"virtual_total_bytes"` // Total virtual memory. + VirtualUsed uint64 `json:"virtual_used_bytes"` // VirtualTotal - VirtualFree + VirtualFree uint64 `json:"virtual_free_bytes"` // Virtual memory that is not used. + Metrics map[string]uint64 `json:"raw,omitempty"` // Other memory related metrics. +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/types/process.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/types/process.go new file mode 100644 index 00000000..b1008e77 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/types/process.go @@ -0,0 +1,96 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package types + +import "time" + +type Process interface { + Info() (ProcessInfo, error) +} + +type ProcessInfo struct { + Name string `json:"name"` + PID int `json:"pid"` + PPID int `json:"ppid"` + CWD string `json:"cwd"` + Exe string `json:"exe"` + Args []string `json:"args"` + StartTime time.Time `json:"start_time"` +} + +type Environment interface { + Environment() (map[string]string, error) +} + +type FileDescriptor interface { + FileDescriptors() ([]string, error) + FileDescriptorCount() (int, error) +} + +type CPUTimer interface { + CPUTime() CPUTimes +} + +type Memory interface { + Memory() MemoryInfo +} + +type CPUTimes struct { + Timestamp time.Time `json:"timestamp"` // Time at which samples were collected. + User time.Duration `json:"user"` + System time.Duration `json:"system"` + Idle time.Duration `json:"idle,omitempty"` + IOWait time.Duration `json:"iowait,omitempty"` + IRQ time.Duration `json:"irq,omitempty"` + Nice time.Duration `json:"nice,omitempty"` + SoftIRQ time.Duration `json:"soft_irq,omitempty"` + Steal time.Duration `json:"steal,omitempty"` +} + +func (cpu CPUTimes) Total() time.Duration { + return cpu.User + cpu.System + cpu.Idle + cpu.IOWait + cpu.IRQ + cpu.Nice + + cpu.SoftIRQ + cpu.Steal +} + +type MemoryInfo struct { + Timestamp time.Time `json:"timestamp"` // Time at which samples were collected. + Resident uint64 `json:"resident_bytes"` + Virtual uint64 `json:"virtual_bytes"` + Metrics map[string]uint64 `json:"raw,omitempty"` // Other memory related metrics. +} + +type SeccompInfo struct { + Mode string `json:"mode"` + NoNewPrivs *bool `json:"no_new_privs,omitempty"` // Added in kernel 4.10. +} + +type CapabilityInfo struct { + Inheritable []string `json:"inheritable"` + Permitted []string `json:"permitted"` + Effective []string `json:"effective"` + Bounding []string `json:"bounding"` + Ambient []string `json:"ambient"` +} + +type Capabilities interface { + Capabilities() (*CapabilityInfo, error) +} + +type Seccomp interface { + Seccomp() (*SeccompInfo, error) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/LICENSE similarity index 100% rename from vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/LICENSE rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/LICENSE diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/README.md b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/README.md new file mode 100644 index 00000000..94a6a4b6 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/README.md @@ -0,0 +1 @@ +# txfile \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/alloc.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/alloc.go new file mode 100644 index 00000000..eb74fa18 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/alloc.go @@ -0,0 +1,765 @@ +package txfile + +import ( + "math" + + "github.com/elastic/go-txfile/internal/invariant" +) + +// file global allocator state +type ( + + // allocator manages the on-disk page allocation. Pages in the allocator can + // be either part of the meta-area or data-area. Users allocate pages from + // the data-area only. The meta-area keeps pages available for in file + // meta-data like overwrite pages and freelists. The meta-area allocates + // pages from the data-area, if required. The meta-area grows by always doubling + // the amount of pages in the meta-area. + // For allocations one must get an instance to the dataAllocator, + // walAllocator or metaAllocator respectively. Each allocator provides + // slightly different allocation strategies. + // The walAllocator is used for contents overwrite pages, while the + // metaAllocator is used to allocate pages for for serializing the overwrite + // mapping and freelispages for for serializing the overwrite mapping and + // freelist. + allocator struct { + // configuration + maxPages uint + maxSize uint + pageSize uint + + // meta area + meta allocArea + metaTotal uint // total number of pages reserved by meta area + + // data area + data allocArea + + // allocator file metadata + freelistRoot PageID + freelistPages regionList // page ids used to store the free list + } + + allocArea struct { + endMarker PageID + freelist freelist + } + + // custom allocator implementations, sharing the global allocator state + dataAllocator allocator // allocate from data area + walAllocator allocator // allocate WAL overwrite pages from beginning of meta area + metaAllocator allocator // allocate meta pages from end of meta area + + // metaManager manages the data and meta regions, by moving regions + // between those areas. The manager is used by walAllocator and metaAllocator + // only. + metaManager allocator +) + +//transaction local allocation state +type ( + // txAllocState is used by write transactions, to record changes to the file + // allocation state. The file global allocator state is modified within the + // write transaction. txAllocState acts as undo/redo-log for the in-memory + // allocation state. + // Page frees are only recorded within the transaction. No pages are returned + // to the allocator, so to ensure a page freed can not be allocated. This + // guarantees freed pages can not be overwritten in the current transaction + // (keep most recent transaction intact). + txAllocState struct { + manager txAreaManageState + data txAllocArea + meta txAllocArea + options txAllocOptions // per transaction allocation options + } + + txAllocArea struct { + endMarker PageID + allocated pageSet // allocated pages from freelist + new pageSet // allocated pages from end of file + freed pageSet // set of pages freed within transaction + } + + txAreaManageState struct { + moveToMeta regionList // list regions moved from data area to meta area + } + + // txAllocOptions keeps track of user options passed to the transaction. + txAllocOptions struct { + overflowAreaEnabled bool // enable allocating pages with ID > maxPages for metadata + metaGrowPercentage int // limit of meta area in use, so to allocate new pages into the meta area + } +) + +// allocCommitState keeps track of the new allocator state during the commit. +// These changes must be recorded for now, as the final allocator state must +// not be updated in memory until after the transaction has been commited to +// the file. +type allocCommitState struct { + tx *txAllocState + updated bool // set if updates to allocator within current transaction + allocRegions regionList // meta pages allocated to write new freelist too + metaList regionList // new meta area freelist + dataList regionList // new data area freelist + overflowFreed uint // number of pages in overflow region to be returned +} + +// noLimit indicates the data/meta-area can grow without any limits. +const noLimit uint = maxUint + +const defaultMetaGrowPercentage = 80 + +// allocator +// --------- + +func (a *allocator) DataAllocator() *dataAllocator { return (*dataAllocator)(a) } +func (a *allocator) WALPageAllocator() *walAllocator { return (*walAllocator)(a) } +func (a *allocator) MetaAllocator() *metaAllocator { return (*metaAllocator)(a) } +func (a *allocator) metaManager() *metaManager { return (*metaManager)(a) } + +func (a *allocator) makeTxAllocState(withOverflow bool, growPercentage int) txAllocState { + if growPercentage <= 0 { + growPercentage = defaultMetaGrowPercentage + } + + return txAllocState{ + data: txAllocArea{ + endMarker: a.data.endMarker, + }, + meta: txAllocArea{ + endMarker: a.meta.endMarker, + }, + options: txAllocOptions{ + overflowAreaEnabled: withOverflow, + metaGrowPercentage: growPercentage, + }, + } +} + +func (a *allocator) fileCommitPrepare(st *allocCommitState, tx *txAllocState) { + st.tx = tx + st.updated = tx.Updated() + if st.updated { + a.MetaAllocator().FreeRegions(tx, a.freelistPages) + } +} + +func (a *allocator) fileCommitAlloc(st *allocCommitState) error { + if !st.updated { + return nil + } + + dataFreed := st.tx.data.freed.Regions() + metaFreed := st.tx.meta.freed.Regions() + + // Predict number of meta pages required to store new freelist, + // by iterating all region entries and take the potential encoding size + // into account. As allocation might force a region from the data area + // being moved (or split) into the meta area, we add more dummy region + // with enforced max size. So the allocator can move pages between + // meta and data if required. + // This method over-estimates the number of required pages, as + // we will have to allocate pages from the metaFree lists end + // after the estimator finishes. + prediction := prepareFreelistEncPagePrediction(freePageHeaderSize, a.pageSize) + prediction.AddRegions(dataFreed) + prediction.AddRegions(metaFreed) + prediction.AddRegions(a.data.freelist.regions) + prediction.AddRegions(a.meta.freelist.regions) + if prediction.count > 0 { + // only add extra pages if we need to write the meta page + prediction.AddRegion(region{id: 1, count: math.MaxUint32}) + prediction.AddRegion(region{id: 1, count: math.MaxUint32}) + } + + // alloc regions for writing the new freelist + var allocRegions regionList + if n := prediction.count; n > 0 { + allocRegions = a.MetaAllocator().AllocRegions(st.tx, n) + if allocRegions == nil { + return errOutOfMemory + } + } + + // Compute new freelist. As consecutive regions are merged the + // resulting list might require less pages + newDataList := mergeRegionLists(a.data.freelist.regions, dataFreed) + newMetaList := mergeRegionLists(a.meta.freelist.regions, metaFreed) + + st.allocRegions = allocRegions + st.dataList = newDataList + + // remove pages from end of overflow area from meta freelist + adjust end marker + st.metaList, st.overflowFreed = releaseOverflowPages(newMetaList, a.maxPages, a.meta.endMarker) + return nil +} + +// releaseOverflowPages removes pages at the end of a region list as long as +// the current end marker is bigger then the maximum number of allowed pages +// and the freelist contains some continuous regions up to endMarker. +func releaseOverflowPages( + list regionList, + maxPages uint, endMarker PageID, +) (regionList, uint) { + overflowStart, overflowEnd := PageID(maxPages), endMarker + if maxPages == 0 || overflowStart >= overflowEnd { + return list, 0 + } + + var freed uint + for i := len(list) - 1; i != -1; i-- { + start, end := list[i].Range() + if end < overflowEnd { + break + } + + if start < overflowStart { + // split + list[i].count = uint32(overflowStart - start) + freed += uint(end - overflowStart) + overflowEnd = overflowStart + } else { + // remove range + overflowEnd = start + freed += uint(list[i].count) + list = list[:i] + } + } + + return list, freed +} + +func (a *allocator) fileCommitSerialize( + st *allocCommitState, + onPage func(id PageID, buf []byte) error, +) error { + if !st.updated || len(st.allocRegions) == 0 { + return nil + } + return writeFreeLists(st.allocRegions, a.pageSize, st.metaList, st.dataList, onPage) +} + +func (a *allocator) fileCommitMeta(meta *metaPage, st *allocCommitState) { + if st.updated { + var freelistRoot PageID + if len(st.allocRegions) > 0 { + freelistRoot = st.allocRegions[0].id + } + meta.freelist.Set(freelistRoot) + + dataEndMarker := a.data.endMarker + metaEndMarker := a.meta.endMarker + if st.overflowFreed > 0 { + metaEndMarker -= PageID(st.overflowFreed) + if metaEndMarker > dataEndMarker { + dataEndMarker = metaEndMarker + } + } + + meta.dataEndMarker.Set(dataEndMarker) + meta.metaEndMarker.Set(metaEndMarker) + meta.metaTotal.Set(uint64(a.metaTotal - st.overflowFreed)) + } +} + +func (a *allocator) Commit(st *allocCommitState) { + if st.updated { + a.freelistPages = st.allocRegions + if len(st.allocRegions) > 0 { + a.freelistRoot = st.allocRegions[0].id + } else { + a.freelistRoot = 0 + } + + a.data.commit(st.dataList) + a.meta.commit(st.metaList) + a.metaTotal -= st.overflowFreed + } +} + +func (a *allocator) Rollback(st *txAllocState) { + // restore meta area + a.meta.rollback(&st.meta) + for _, reg := range st.manager.moveToMeta { + a.meta.freelist.RemoveRegion(reg) + a.metaTotal -= uint(reg.count) + + if reg.id < st.data.endMarker { + reg.EachPage(st.data.allocated.Add) + } + } + + // restore data area + a.data.rollback(&st.data) +} + +func (a *allocArea) commit(regions regionList) { + a.freelist.regions = regions + a.freelist.avail = regions.CountPages() +} + +func (a *allocArea) rollback(st *txAllocArea) { + for id := range st.allocated { + if id >= st.endMarker { + delete(st.allocated, id) + } + } + a.freelist.AddRegions(st.allocated.Regions()) + a.endMarker = st.endMarker +} + +// metaManager +// ----------- + +func (mm *metaManager) DataAllocator() *dataAllocator { + return (*dataAllocator)(mm) +} + +func (mm *metaManager) Avail(st *txAllocState) uint { + dataAvail := mm.DataAllocator().Avail(st) + if dataAvail == noLimit || st.options.overflowAreaEnabled { + return noLimit + } + + return mm.meta.freelist.Avail() + dataAvail +} + +func (mm *metaManager) Ensure(st *txAllocState, n uint) bool { + total := mm.metaTotal + avail := mm.meta.freelist.Avail() + used := total - avail + targetUsed := used + n + + invariant.Check(total >= avail, "invalid meta total page count") + + tracef("ensure(%v): total=%v, avail=%v, used=%v, targetUsed=%v\n", + n, total, avail, used, targetUsed) + + pctGrow := st.options.metaGrowPercentage + pctShrink := pctGrow / 2 + + szMinMeta, szMaxMeta := metaAreaTargetQuota(total, targetUsed, pctShrink, pctGrow) + traceln(" target quota: ", szMinMeta, szMaxMeta) + + invariant.Check(szMaxMeta >= szMinMeta, "required page count must grow") + + if szMaxMeta == total { + // we still have enough memory in the meta area -> return success + + // TODO: allow 'ensure' to shrink the meta area + return true + } + + invariant.Check(szMaxMeta > total, "expected new page count exceeding allocated pages") + + // try to move regions from data area into the meta area: + requiredMax := szMaxMeta - total + if mm.tryGrow(st, requiredMax, false) { + return true + } + + // Can not grow until 'requiredMax' -> try to grow up to requiredMin, + // potentially allocating pages from the overflow area + requiredMin := szMinMeta - total + if mm.tryGrow(st, requiredMin, st.options.overflowAreaEnabled) { + return true + } + + // out of memory + return false +} + +func (mm *metaManager) tryGrow( + st *txAllocState, + count uint, + withOverflow bool, +) bool { + da := mm.DataAllocator() + avail := da.Avail(st) + + tracef("try grow meta area pages=%v, avail=%v\n", count, avail) + + if count == 0 { + return true + } + + if avail < count { + if !withOverflow { + traceln("can not grow meta area yet") + return false + } + + da.AllocRegionsWith(st, avail, func(reg region) { + st.manager.moveToMeta.Add(reg) + mm.metaTotal += uint(reg.count) + mm.meta.freelist.AddRegion(reg) + }) + + // allocate from overflow area + required := count - avail + if required > 0 { + traceln("try to grow overflow area") + } + allocFromArea(&st.meta, &mm.meta.endMarker, required, func(reg region) { + // st.manager.fromOverflow.Add(reg) + mm.metaTotal += uint(reg.count) + mm.meta.freelist.AddRegion(reg) + }) + if mm.maxPages == 0 && mm.data.endMarker < mm.meta.endMarker { + mm.data.endMarker = mm.meta.endMarker + } + + return true + } + + // Enough memory available in data area. Try to allocate continuous region first + reg := da.AllocContinuousRegion(st, count) + if reg.id != 0 { + st.manager.moveToMeta.Add(reg) + mm.metaTotal += uint(reg.count) + mm.meta.freelist.AddRegion(reg) + return true + } + + // no continuous memory block -> allocate single regions + n := da.AllocRegionsWith(st, count, func(reg region) { + st.manager.moveToMeta.Add(reg) + mm.metaTotal += uint(reg.count) + mm.meta.freelist.AddRegion(reg) + }) + return n == count +} + +func (mm *metaManager) Free(st *txAllocState, id PageID) { + // mark page as freed for now + st.meta.freed.Add(id) +} + +func metaAreaTargetQuota( + total, used uint, + shrinkPercentage, growPercentage int, +) (min, max uint) { + min = used + max = uint(nextPowerOf2(uint64(used))) + if max < total { + max = total + } + + usage := 100 * float64(used) / float64(max) + + // grow 'max' by next power of 2, if used area would exceed growPercentage + needsGrow := usage > float64(growPercentage) + + // If memory is to be freed (max < total), still grow 'max' by next power of + // 2 (so not to free too much memory at once), if used area in new meta area + // would exceed shrinkPercentage. + // => percentage of used area in new meta area will be shrinkPercentage/2 + needsGrow = needsGrow || (max < total && usage > float64(shrinkPercentage)) + + if min < total { + min = total + } + + if needsGrow { + max = max * 2 + } + return min, max +} + +// dataAllocator +// ------------- + +func (a *dataAllocator) Avail(_ *txAllocState) uint { + if a.maxPages == 0 { + return noLimit + } + return a.maxPages - uint(a.data.endMarker) + a.data.freelist.Avail() +} + +func (a *dataAllocator) AllocContinuousRegion( + st *txAllocState, + n uint, +) region { + avail := a.Avail(st) + if avail < n { + return region{} + } + + reg := allocContFromFreelist(&a.data.freelist, &st.data, allocFromBeginning, n) + if reg.id != 0 { + return reg + } + + avail = a.maxPages - uint(a.data.endMarker) + if avail < n { + // out of memory + return region{} + } + + allocFromArea(&st.data, &a.data.endMarker, n, func(r region) { reg = r }) + if a.meta.endMarker < a.data.endMarker { + a.meta.endMarker = a.data.endMarker + } + return reg +} + +func (a *dataAllocator) AllocRegionsWith( + st *txAllocState, + n uint, + fn func(region), +) uint { + avail := a.Avail(st) + if avail < n { + return 0 + } + + // Enough space available -> allocate all pages. + count := n + + // 1. allocate subset of regions from freelist + n -= allocFromFreelist(&a.data.freelist, &st.data, allocFromBeginning, n, fn) + if n > 0 { + // 2. allocate from yet unused data area + allocFromArea(&st.data, &a.data.endMarker, n, fn) + if a.meta.endMarker < a.data.endMarker { + a.meta.endMarker = a.data.endMarker + } + } + return count +} + +func (a *dataAllocator) Free(st *txAllocState, id PageID) { + traceln("free page:", id) + + if id < 2 || id >= a.data.endMarker { + panic(errOutOfBounds) + } + + if !st.data.new.Has(id) { + // fast-path, page has not been allocated in current transaction + st.data.freed.Add(id) + return + } + + // page has been allocated in current transaction -> return to allocator for immediate re-use + a.data.freelist.AddRegion(region{id: id, count: 1}) + + if st.data.endMarker >= id { + // allocation from within old data region + return + } + + // allocation was from past the old end-marker. Check if we can shrink the + // end marker again + regions := a.data.freelist.regions + last := len(regions) - 1 + start, end := regions[last].Range() + if end < a.data.endMarker { + // in middle of new data region -> can not adjust end marker -> keep update to freelist + return + } + + if st.data.endMarker > start { + start = st.data.endMarker + count := uint(end - start) + regions[last].count -= uint32(count) + a.data.freelist.avail -= count + } else { + a.data.freelist.avail -= uint(regions[last].count) + a.data.freelist.regions = regions[:last] + } + a.data.endMarker = start +} + +// walAllocator +// ------------ + +func (a *walAllocator) metaManager() *metaManager { return (*metaManager)(a) } + +func (a *walAllocator) Avail(st *txAllocState) uint { + return a.metaManager().Avail(st) +} + +func (a *walAllocator) Alloc(st *txAllocState) PageID { + mm := a.metaManager() + if !mm.Ensure(st, 1) { + return 0 + } + + // Use AllocContinuousRegion to find smallest fitting region + // to allocate from. + reg := a.meta.freelist.AllocContinuousRegion(allocFromBeginning, 1) + if reg.id == 0 { + return 0 + } + st.meta.allocated.Add(reg.id) + return reg.id +} + +func (a *walAllocator) AllocRegionsWith(st *txAllocState, n uint, fn func(region)) uint { + mm := a.metaManager() + if !mm.Ensure(st, n) { + return 0 + } + + return allocFromFreelist(&a.meta.freelist, &st.meta, allocFromBeginning, n, fn) +} + +func (a *walAllocator) Free(st *txAllocState, id PageID) { + a.metaManager().Free(st, id) +} + +// metaAllocator +// ------------ + +func (a *metaAllocator) metaManager() *metaManager { return (*metaManager)(a) } + +func (a *metaAllocator) Avail(st *txAllocState) uint { + return a.metaManager().Avail(st) +} + +func (a *metaAllocator) AllocRegionsWith( + st *txAllocState, + n uint, + fn func(region), +) uint { + mm := a.metaManager() + if !mm.Ensure(st, n) { + return 0 + } + + return allocFromFreelist(&a.meta.freelist, &st.meta, allocFromEnd, n, fn) +} + +func (a *metaAllocator) AllocRegions(st *txAllocState, n uint) regionList { + reg := make(regionList, 0, n) + if n := a.AllocRegionsWith(st, n, reg.Add); n == 0 { + return nil + } + return reg +} + +func (a *metaAllocator) Free(st *txAllocState, id PageID) { + a.metaManager().Free(st, id) +} + +func (a *metaAllocator) FreeAll(st *txAllocState, ids idList) { + for _, id := range ids { + a.Free(st, id) + } +} + +func (a *metaAllocator) FreeRegions(st *txAllocState, regions regionList) { + regions.EachPage(func(id PageID) { + a.Free(st, id) + }) +} + +// tx allocation state methods +// --------------------------- + +func (s *txAllocState) Updated() bool { + return s.meta.Updated() || s.data.Updated() +} + +func (s *txAllocArea) Updated() bool { + return !s.allocated.Empty() || !s.new.Empty() || !s.freed.Empty() +} + +// allocator state (de-)serialization +// ---------------------------------- + +func readAllocatorState(a *allocator, f *File, meta *metaPage, opts Options) error { + if a.maxSize > 0 { + a.maxPages = a.maxSize / a.pageSize + } + + a.data.endMarker = meta.dataEndMarker.Get() + a.meta.endMarker = meta.metaEndMarker.Get() + a.metaTotal = uint(meta.metaTotal.Get()) + + a.freelistRoot = meta.freelist.Get() + if a.freelistRoot == 0 { + return nil + } + + var metaList, dataList freelist + ids, err := readFreeList(f.mmapedPage, a.freelistRoot, func(isMeta bool, region region) { + lst := &dataList + if isMeta { + lst = &metaList + } + + lst.avail += uint(region.count) + lst.regions.Add(region) + }) + if err != nil { + return err + } + + dataList.regions.Sort() + dataList.regions.MergeAdjacent() + metaList.regions.Sort() + metaList.regions.MergeAdjacent() + + a.data.freelist = dataList + a.meta.freelist = metaList + a.freelistPages = ids.Regions() + return nil +} + +// allocator helpers/utilities +// --------------------------- + +// allocFromFreelist allocates up to 'max' pages from the free list. +// The number of allocated pages is returned +func allocFromFreelist( + f *freelist, + area *txAllocArea, + order *allocOrder, + max uint, + fn func(region), +) uint { + count := max + if f.avail < count { + count = f.avail + } + + f.AllocRegionsWith(order, count, func(region region) { + region.EachPage(area.allocated.Add) + fn(region) + }) + return count +} + +func allocContFromFreelist( + f *freelist, + area *txAllocArea, + order *allocOrder, + n uint, +) region { + region := f.AllocContinuousRegion(order, n) + if region.id != 0 { + region.EachPage(area.new.Add) + } + return region +} + +func allocFromArea(area *txAllocArea, marker *PageID, count uint, fn func(region)) { + // region can be max 2<<32 -> allocate in loop + id := *marker + for count > 0 { + n := count + if n > math.MaxUint32 { + n = math.MaxUint32 + } + + region := region{id: id, count: uint32(n)} + region.EachPage(area.new.Add) + fn(region) + + id += PageID(n) + count -= n + } + *marker = id +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/errors.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/errors.go new file mode 100644 index 00000000..49ced4a7 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/errors.go @@ -0,0 +1,32 @@ +package txfile + +import "errors" + +var ( + // file meta page validation errors + + errMagic = errors.New("invalid magic number") + errVersion = errors.New("invalid version number") + errChecksum = errors.New("checksum mismatch") + + // file sizing errors + + errMmapTooLarge = errors.New("mmap too large") + errFileSizeTooLage = errors.New("max file size to large for this system") + errInvalidFileSize = errors.New("invalid file size") + + // page access/allocation errors + + errOutOfBounds = errors.New("out of bounds page id") + errOutOfMemory = errors.New("out of memory") + errFreedPage = errors.New("trying to access an already freed page") + errPageFlushed = errors.New("page is already flushed") + errTooManyBytes = errors.New("contents exceeds page size") + errNoPageData = errors.New("accessing page without contents") + errFreeDirtyPage = errors.New("freeing dirty page") + + // transaction errors + + errTxFinished = errors.New("transaction has already been closed") + errTxReadonly = errors.New("readonly transaction") +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/file.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/file.go new file mode 100644 index 00000000..4c18696d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/file.go @@ -0,0 +1,478 @@ +package txfile + +import ( + "fmt" + "math" + "math/bits" + "os" + "sync" + "unsafe" + + "github.com/elastic/go-txfile/internal/cleanup" + "github.com/elastic/go-txfile/internal/invariant" +) + +// File provides transactional support to pages of a file. A file is split into +// pages of type PageSize. Pages within the file are only accessible by page IDs +// from within active transactions. +type File struct { + path string + file vfsFile + locks lock + wg sync.WaitGroup // local async workers wait group + writer writer + allocator allocator + wal waLog + + // mmap info + mapped []byte + + // meta pages + meta [2]*metaPage + metaActive int +} + +// Options provides common file options used when opening or creating a file. +type Options struct { + // MaxSize sets the maximum file size in bytes. This should be a multiple of PageSize. + // If it's not a multiple of PageSize, the actual files maximum size is rounded downwards + // to the next multiple of PageSize. + // A value of 0 indicates the file can grow without limits. + MaxSize uint64 + + // PageSize sets the files page size on file creation. PageSize is ignored if + // the file already exists. + // If PageSize is not configured, the OSes main memory page size is selected. + PageSize uint32 + + // Prealloc disk space if MaxSize is set. + Prealloc bool + + // Open file in readonly mode. + Readonly bool +} + +// Open opens or creates a new transactional file. +// Open tries to create the file, if the file does not exist yet. Returns an +// error if file access fails, file can not be locked or file meta pages are +// found to be invalid. +func Open(path string, mode os.FileMode, opts Options) (*File, error) { + file, err := openOSFile(path, mode) + if err != nil { + return nil, err + } + + initOK := false + defer cleanup.IfNot(&initOK, cleanup.IgnoreError(file.Close)) + + // Create exclusive lock on the file and initialize the file state. + var f *File + if err = file.Lock(true, true); err == nil { + // initialize the file + f, err = openWith(file, opts) + } + if err != nil { + return nil, err + } + + initOK = true + + tracef("open file: %p (%v)\n", f, path) + traceMetaPage(f.getMetaPage()) + return f, nil +} + +// openWith implements the actual opening sequence, including file +// initialization and validation. +func openWith(file vfsFile, opts Options) (*File, error) { + sz, err := file.Size() + if err != nil { + return nil, err + } + + fileExists := sz > 0 + if !fileExists { + if err := initNewFile(file, opts); err != nil { + return nil, err + } + } + + meta, err := readValidMeta(file) + if err != nil { + return nil, err + } + + pageSize := meta.pageSize.Get() + maxSize := meta.maxSize.Get() + if maxSize == 0 && opts.MaxSize > 0 { + maxSize = opts.MaxSize + } + + if maxSize > uint64(maxUint) { + return nil, errFileSizeTooLage + } + + return newFile(file, opts, uint(maxSize), uint(pageSize)) +} + +// newFile creates and initializes a new File. File state is initialized +// from file and internal workers will be started. +func newFile(file vfsFile, opts Options, maxSize, pageSize uint) (*File, error) { + + f := &File{ + file: file, + path: file.Name(), + allocator: allocator{ + maxSize: maxSize, + pageSize: pageSize, + }, + } + f.locks.init() + + if err := f.mmap(); err != nil { + return nil, err + } + initOK := false + defer cleanup.IfNot(&initOK, cleanup.IgnoreError(f.munmap)) + + if err := f.init(opts); err != nil { + return nil, err + } + + invariant.CheckNot(f.allocator.maxSize != 0 && f.allocator.maxPages == 0, + "page limit not configured on allocator") + + // create asynchronous writer + f.writer.Init(file, f.allocator.pageSize) + f.wg.Add(1) + go func() { + defer f.wg.Done() + f.writer.Run() + }() + + initOK = true + return f, nil +} + +// init initializes the File state from most recent valid meta-page. +func (f *File) init(opts Options) error { + // validate meta pages and set active meta page id + var metaErr [2]error + metaErr[0] = f.meta[0].Validate() + metaErr[1] = f.meta[1].Validate() + switch { + case metaErr[0] != nil && metaErr[1] != nil: + return metaErr[0] + case metaErr[0] == nil && metaErr[1] != nil: + f.metaActive = 1 + case metaErr[0] != nil && metaErr[1] == nil: + f.metaActive = 1 + default: + // both meta pages valid, choose page with highest transaction number + tx0 := f.meta[0].txid.Get() + tx1 := f.meta[1].txid.Get() + if tx0 == tx1 { + panic("meta pages with same transaction id") + } + + if int64(tx0-tx1) > 0 { // if tx0 > tx1 + f.metaActive = 0 + } else { + f.metaActive = 1 + } + } + + // reference active meta page for initializing internal structures + meta := f.meta[f.metaActive] + + if err := readWALMapping(&f.wal, f.mmapedPage, meta.wal.Get()); err != nil { + return err + } + + return readAllocatorState(&f.allocator, f, meta, opts) +} + +// Close closes the file, after all transactions have been quit. After closing +// a file, no more transactions can be started. +func (f *File) Close() error { + // zero out f on exit -> using f after close should generate a panic + defer func() { *f = File{} }() + + tracef("start file shutdown: %p\n", f) + defer tracef("file closed: %p\n", f) + + // get reserved lock, such that no write transactions can be started + f.locks.Reserved().Lock() + defer f.locks.Reserved().Unlock() + + // get pending lock, such that no new read transaction can be started + f.locks.Pending().Lock() + defer f.locks.Pending().Unlock() + + // get exclusive lock, waiting for active read transactions to be finished + f.locks.Exclusive().Lock() + defer f.locks.Exclusive().Unlock() + + // no other active transactions -> close file + f.munmap() + f.writer.Stop() + + err := f.file.Close() + + // wait for workers to stop + f.wg.Wait() + + return err +} + +// Begin creates a new read-write transaction. The transaction returned +// does hold the Reserved Lock on the file. Use Close, Rollback, or Commit to +// release the lock. +func (f *File) Begin() *Tx { + return f.BeginWith(TxOptions{Readonly: false}) +} + +// BeginReadonly creates a new readonly transaction. The transaction returned +// does hold the Shared Lock on the file. Use Close() to release the lock. +func (f *File) BeginReadonly() *Tx { + return f.BeginWith(TxOptions{Readonly: true}) +} + +// BeginWith creates a new readonly or read-write transaction, with additional +// transaction settings. +func (f *File) BeginWith(settings TxOptions) *Tx { + tracef("request new transaction (readonly: %v)\n", settings.Readonly) + lock := f.locks.TxLock(settings.Readonly) + lock.Lock() + tracef("init new transaction (readonly: %v)\n", settings.Readonly) + tx := newTx(f, lock, settings) + tracef("begin transaction: %p (readonly: %v)\n", tx, settings.Readonly) + return tx +} + +// PageSize returns the files page size in bytes +func (f *File) PageSize() int { + return int(f.allocator.pageSize) +} + +// Offset computes a file offset from PageID and offset within the current +// page. +func (f *File) Offset(id PageID, offset uintptr) uintptr { + sz := uintptr(f.allocator.pageSize) + if offset >= sz { + panic("offset not within page boundary") + } + return offset + uintptr(id)*uintptr(f.allocator.pageSize) +} + +// SplitOffset splits a file offset into a page ID for accessing the page and +// and offset within the page. +func (f *File) SplitOffset(offset uintptr) (PageID, uintptr) { + sz := uintptr(f.allocator.pageSize) + id := PageID(offset / sz) + off := offset - ((offset / sz) * sz) + return id, off +} + +// mmapUpdate updates the mmaped states. +// A go-routine updating the mmaped aread, must hold all locks on the file. +func (f *File) mmapUpdate() (err error) { + if err = f.munmap(); err == nil { + err = f.mmap() + } + return +} + +// mmap maps the files contents and updates internal pointers into the mmaped memory area. +func (f *File) mmap() error { + fileSize, err := f.file.Size() + if err != nil { + return err + } + + if fileSize < 0 { + return errInvalidFileSize + } + + maxSize := f.allocator.maxSize + if em := uint(f.allocator.meta.endMarker); maxSize > 0 && em > f.allocator.maxPages { + maxSize = em * f.allocator.pageSize + } + pageSize := f.allocator.pageSize + sz, err := computeMmapSize(uint(fileSize), maxSize, uint(pageSize)) + if err != nil { + return err + } + + // map file + buf, err := f.file.MMap(int(sz)) + if err != nil { + return err + } + + f.mapped = buf + f.meta[0] = castMetaPage(buf[0:]) + f.meta[1] = castMetaPage(buf[pageSize:]) + + return nil +} + +// munmap unmaps the file and sets internal mapping to nil. +func (f *File) munmap() error { + err := f.file.MUnmap(f.mapped) + f.mapped = nil + return err +} + +// mmapedPage finds the mmaped page contents by the given pageID. +// The byte buffer can only be used for reading. +func (f *File) mmapedPage(id PageID) []byte { + pageSize := uint64(f.allocator.pageSize) + start := uint64(id) * pageSize + end := start + pageSize + if uint64(len(f.mapped)) < end { + return nil + } + + return f.mapped[start:end] +} + +// initNewFile initializes a new, yet empty Files metapages. +func initNewFile(file vfsFile, opts Options) error { + var flags uint32 + if opts.MaxSize > 0 && opts.Prealloc { + flags |= metaFlagPrealloc + if err := file.Truncate(int64(opts.MaxSize)); err != nil { + return fmt.Errorf("truncation failed with %v", err) + } + } + + pageSize := opts.PageSize + if opts.PageSize == 0 { + pageSize = uint32(os.Getpagesize()) + if pageSize < minPageSize { + pageSize = minPageSize + } + } + if !isPowerOf2(uint64(pageSize)) { + return fmt.Errorf("pageSize %v is no power of 2", pageSize) + } + if pageSize < minPageSize { + return fmt.Errorf("pageSize must be > %v", minPageSize) + } + + // create buffer to hold contents for the four initial pages: + // 1. meta page 0 + // 2. meta page 1 + // 3. free list page + buf := make([]byte, pageSize*3) + + // write meta pages + for i := 0; i < 2; i++ { + pg := castMetaPage(buf[int(pageSize)*i:]) + pg.Init(flags, pageSize, opts.MaxSize) + pg.txid.Set(uint64(1 - i)) + pg.dataEndMarker.Set(2) // endMarker is index of next to be allocated page at end of file + pg.Finalize() + } + + // write initial pages to disk + err := writeAt(file, buf, 0) + if err == nil { + err = file.Sync() + } + + if err != nil { + return fmt.Errorf("initializing data file failed with %v", err) + } + return nil +} + +// readValidMeta tries to read a valid meta page from the file. +// The first valid meta page encountered is returned. +func readValidMeta(f vfsFile) (metaPage, error) { + meta, err := readMeta(f, 0) + if err != nil { + return meta, err + } + + if err := meta.Validate(); err != nil { + // try next metapage + offset := meta.pageSize.Get() + if meta, err = readMeta(f, int64(offset)); err != nil { + return meta, err + } + return meta, meta.Validate() + } + return meta, nil +} + +func readMeta(f vfsFile, off int64) (metaPage, error) { + var buf [unsafe.Sizeof(metaPage{})]byte + _, err := f.ReadAt(buf[:], off) + return *castMetaPage(buf[:]), err +} + +// computeMmapSize determines the page count in multiple of pages. +// Up to 1GB, the mmaped file area is double (starting at 64KB) on every grows. +// That is, exponential grows with values of 64KB, 128KB, 512KB, 1024KB, and so on. +// Once 1GB is reached, the mmaped area is always a multiple of 1GB. +func computeMmapSize(minSize, maxSize, pageSize uint) (uint, error) { + const ( + initBits uint = 16 // 2 ^ 16 Bytes + initSize = 1 << initBits // 64KB + sz1GB = 1 << 30 + doubleLimit = sz1GB // upper limit when to stop doubling the mmaped area + ) + + var maxMapSize uint + if math.MaxUint32 == maxUint { + maxMapSize = 2 * sz1GB + } else { + tmp := uint64(0x1FFFFFFFFFFF) + maxMapSize = uint(tmp) + } + + if maxSize != 0 { + // return maxSize as multiple of pages. Round downwards in case maxSize + // is not multiple of pages + + if minSize > maxSize { + maxSize = minSize + } + + sz := ((maxSize + pageSize - 1) / pageSize) * pageSize + if sz < initSize { + return 0, fmt.Errorf("max size of %v bytes is too small", maxSize) + } + + return sz, nil + } + + if minSize < doubleLimit { + // grow by next power of 2, starting at 64KB + initBits := uint(16) // 64KB min + power2Bits := uint(64 - bits.LeadingZeros64(uint64(minSize))) + if power2Bits < initBits { + power2Bits = initBits + } + return 1 << power2Bits, nil + } + + // allocate number of 1GB blocks to fulfill minSize + sz := ((minSize + (sz1GB - 1)) / sz1GB) * sz1GB + if sz > maxMapSize { + return 0, errMmapTooLarge + } + + // ensure we have a multiple of pageSize + sz = ((sz + pageSize - 1) / pageSize) * pageSize + + return sz, nil +} + +// getMetaPage returns a pointer to the meta-page of the last valid transaction +// found. +func (f *File) getMetaPage() *metaPage { + return f.meta[f.metaActive] +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/freelist.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/freelist.go new file mode 100644 index 00000000..e6628127 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/freelist.go @@ -0,0 +1,457 @@ +package txfile + +import ( + "math" + "sort" + + "github.com/elastic/go-txfile/internal/invariant" + "github.com/elastic/go-txfile/internal/iter" +) + +// freelist manages freed pages within an area. The freelist uses +// run-length-encoding, compining multiple pages into one region, so to reduce +// memory usage in memory, as well as when serializing the freelist. The +// freelist guarantees pages are sorted by PageID. Depending on allocOrder, +// pages with smallest/biggest PageID will be allocated first. +type freelist struct { + avail uint + regions regionList +} + +// freelistEncPagePrediction is used to predict the number of meta-pages required +// to serialize the freelist. +// The prediction might over-estimate the number of pages required, which is +// perfectly fine, as long as we don't under-estimate (which would break +// serialization -> transaction fail). +type freelistEncPagePrediction struct { + count uint + payloadSize, avail uint +} + +// allocOrder provides freelist access strategies. +type allocOrder struct { + // freelist iteration order + iter iter.Fn + + // reportRange provides iteration limits for reporting allocated regions in + // order (by PageID). + reportRange func(last, len int) (int, int) + + // allocFromRegion split the region into allocated and leftover region. + allocFromRegion func(reg region, N uint32) (region, region) + + // keepRange determines which pages to keep/remove from the freelist, after + // allocation. + keepRange func(last, len int, partial bool) (int, int) +} + +var ( + allocFromBeginning = &allocOrder{ + iter: iter.Forward, + reportRange: func(last, len int) (int, int) { + return 0, last + 1 + }, + allocFromRegion: func(reg region, N uint32) (region, region) { + return region{id: reg.id, count: N}, region{id: reg.id + PageID(N), count: reg.count - N} + }, + keepRange: func(last, len int, partial bool) (int, int) { + if partial { + return last, len + } + return last + 1, len + }, + } + + allocFromEnd = &allocOrder{ + iter: iter.Reversed, + reportRange: func(last, len int) (int, int) { + return last, len + }, + allocFromRegion: func(reg region, N uint32) (region, region) { + return region{id: reg.id + PageID(reg.count-N), count: N}, region{id: reg.id, count: reg.count - N} + }, + keepRange: func(last, len int, partial bool) (int, int) { + if partial { + return 0, last + 1 + } + return 0, last + }, + } +) + +// Avail returns number of pages available in the current freelist. +func (f *freelist) Avail() uint { + return f.avail +} + +// AllocAllRegionsWith allocates all regions in the freelist. +// The list will be empty afterwards. +func (f *freelist) AllocAllRegionsWith(fn func(region)) { + for _, r := range f.AllocAllRegions() { + fn(r) + } +} + +// AllocAllRegions allocates all regions in the freelist. +// The list will be empty afterwards. +func (f *freelist) AllocAllRegions() regionList { + regions := f.regions + f.avail = 0 + f.regions = nil + return regions +} + +// AllocContinuousRegion tries to find a contiuous set of pages in the freelist. +// The best-fitting (or smallest) region having at least n pages will be used +// for allocation. +// Returns an empty region, if no continuous space could be found. +func (f *freelist) AllocContinuousRegion(order *allocOrder, n uint) region { + if f.avail < n || (f.avail == n && len(f.regions) > 1) { + return region{} + } + + if n > math.MaxUint32 { // continuous regions max out at 4GB + return region{} + } + + bestFit := -1 + bestSz := uint(math.MaxUint32) + for i, end, next := order.iter(len(f.regions)); i != end; i = next(i) { + count := uint(f.regions[i].count) + if n <= count && count < bestSz { + bestFit = i + bestSz = count + if bestSz == n { + break + } + } + } + + if bestFit < 0 { + // no continuous region found + return region{} + } + + // allocate best fitting region from list + i := bestFit + selected := f.regions[i] + allocated, rest := order.allocFromRegion(selected, uint32(n)) + + invariant.Check(allocated.count == uint32(n), "allocation mismatch") + invariant.Check(allocated.count+rest.count == selected.count, "region split page count mismatch") + + if rest.count == 0 { + // remove entry + copy(f.regions[i:], f.regions[i+1:]) + f.regions = f.regions[:len(f.regions)-1] + } else { + f.regions[i] = rest + } + + f.avail -= uint(allocated.count) + return allocated +} + +// AllocRegionsWith allocates up n potentially non-continuous pages from the +// freelist. No page will be allocated, if n succeeds the number of available +// pages. +func (f *freelist) AllocRegionsWith(order *allocOrder, n uint, fn func(region)) { + if n == 0 { + return + } + + var ( + last int // last region to allocate from + L = len(f.regions) + N = n // number of pages to be allocated from 'last' region + ) + + if N > f.avail { + // not enough space -> return early + return + } + + // Collect indices of regions to be allocated from. + for i, end, next := order.iter(L); i != end; i = next(i) { + count := uint(f.regions[i].count) + if count >= N { + last = i + break + } + N -= count + } + + // Compute region split on last region to be allocated from. + selected := f.regions[last] + allocated, leftover := order.allocFromRegion(selected, uint32(N)) + + invariant.Check(allocated.count == uint32(N), "allocation mismatch") + invariant.Check(allocated.count+leftover.count == selected.count, "region split page count mismatch") + + // Implicitely update last allocated region to match the allocation size + // and report all regions allocated. + f.regions[last] = allocated + for i, end := order.reportRange(last, L); i != end; i++ { + fn(f.regions[i]) + } + + // update free regions + f.regions[last] = leftover + start, end := order.keepRange(last, L, leftover.count != 0) + f.regions = f.regions[start:end] + f.avail -= n +} + +// AddRegions merges a new list of regions with the freelist. The regions +// in the list must be sorted. +func (f *freelist) AddRegions(list regionList) { + count := list.CountPages() + if count > 0 { + f.regions = mergeRegionLists(f.regions, list) + f.avail += count + } +} + +// AddRegion inserts a new region into the freelist. AddRegion ensures the new +// region is sorted within the freelist, potentially merging the new region +// with existing regions. +// Note: The region to be added MUST NOT overlap with existing regions. +func (f *freelist) AddRegion(reg region) { + if len(f.regions) == 0 { + f.regions = regionList{reg} + f.avail += uint(reg.count) + return + } + + i := sort.Search(len(f.regions), func(i int) bool { + _, end := f.regions[i].Range() + return reg.id < end + }) + + total := uint(reg.count) + switch { + case len(f.regions) <= i: // add to end of region list? + last := len(f.regions) - 1 + if regionsMergable(f.regions[last], reg) { + f.regions[last] = mergeRegions(f.regions[last], reg) + } else { + f.regions.Add(reg) + } + case i == 0: // add to start of region list? + if regionsMergable(reg, f.regions[0]) { + f.regions[0] = mergeRegions(reg, f.regions[0]) + } else { + f.regions = append(f.regions, region{}) + copy(f.regions[1:], f.regions) + f.regions[0] = reg + } + default: // insert in middle of region list + // try to merge region with already existing regions + mergeBefore := regionsMergable(f.regions[i-1], reg) + if mergeBefore { + reg = mergeRegions(f.regions[i-1], reg) + } + mergeAfter := regionsMergable(reg, f.regions[i]) + if mergeAfter { + reg = mergeRegions(reg, f.regions[i]) + } + + // update region list + switch { + case mergeBefore && mergeAfter: // combine adjacent regions -> shrink list + f.regions[i-1] = reg + copy(f.regions[i:], f.regions[i+1:]) + f.regions = f.regions[:len(f.regions)-1] + case mergeBefore: + f.regions[i-1] = reg + case mergeAfter: + f.regions[i] = reg + default: // no adjacent entries -> grow list + f.regions = append(f.regions, region{}) + copy(f.regions[i+1:], f.regions[i:]) + f.regions[i] = reg + } + } + + f.avail += total +} + +// RemoveRegion removes all pages from the freelist, that are found within +// the input region. +func (f *freelist) RemoveRegion(removed region) { + i := sort.Search(len(f.regions), func(i int) bool { + _, end := f.regions[i].Range() + return removed.id <= end + }) + if i < 0 || i >= len(f.regions) { + return + } + + current := &f.regions[i] + if current.id == removed.id && current.count == removed.count { + // fast path: entry can be completely removed + f.regions = append(f.regions[:i], f.regions[i+1:]...) + f.avail -= uint(removed.count) + return + } + + var total uint + removedStart, removedEnd := removed.Range() + for removedStart < removedEnd && i < len(f.regions) { + current := &f.regions[i] + + if removedStart < current.id { + // Gap: advance removedStart, so to deal with holes when removing the all regions + // matching the input region + removedStart = current.id + continue + } + + count := uint32(removedEnd - removedStart) + if removedStart == current.id { + if current.count < count { + count = current.count + } + + // remove entry: + current.id = current.id + PageID(count) + current.count -= count + if current.count == 0 { + // remove region from freelist -> i will point to next region if + // `removed` overlaps 2 non-merged regions + f.regions = append(f.regions[:i], f.regions[i+1:]...) + } else { + // overlapping region, but old region must be preserved: + i++ + } + + removedStart += PageID(count) + total += uint(count) + } else { + // split current region in removedStart + keep := uint32(removedStart - current.id) + leftover := region{ + id: removedStart, + count: current.count - keep, + } + current.count = keep + + // remove sub-region from leftover + if leftover.count < count { + count = leftover.count + } + leftover.id += PageID(count) + leftover.count -= count + + total += uint(count) + removedStart += PageID(count) + i++ // advance to next region + + // insert new entry into regionList if removed did remove region in + // middle of old region + if leftover.count > 0 { + f.regions = append(f.regions, region{}) + copy(f.regions[i+1:], f.regions[i:]) + f.regions[i] = leftover + break // no more region to split from + } + } + } + + f.avail -= total +} + +// (de-)serialization + +func readFreeList( + access func(PageID) []byte, + root PageID, + fn func(bool, region), +) (idList, error) { + if root == 0 { + return nil, nil + } + + rootPage := access(root) + if rootPage == nil { + return nil, errOutOfBounds + } + + var metaPages idList + for pageID := root; pageID != 0; { + metaPages.Add(pageID) + node, payload := castFreePage(access(pageID)) + if node == nil { + return nil, errOutOfBounds + } + + pageID = node.next.Get() + entries := node.count.Get() + tracef("free list node: (next: %v, entries: %v)", pageID, entries) + + for ; entries > 0; entries-- { + isMeta, reg, n := decodeRegion(payload) + payload = payload[n:] + fn(isMeta, reg) + } + } + + return metaPages, nil +} + +func writeFreeLists( + to regionList, + pageSize uint, + metaList, dataList regionList, + onPage func(id PageID, buf []byte) error, +) error { + allocPages := to.PageIDs() + writer := newPagingWriter(allocPages, pageSize, 0, onPage) + + var writeErr error + writeList := func(isMeta bool, lst regionList) { + if writeErr != nil { + return + } + + for _, reg := range lst { + var buf [maxRegionEncSz]byte + n := encodeRegion(buf[:], isMeta, reg) + if err := writer.Write(buf[:n]); err != nil { + writeErr = err + return + } + } + } + + writeList(true, metaList) + writeList(false, dataList) + if writeErr != nil { + return writeErr + } + + return writer.Flush() +} + +func prepareFreelistEncPagePrediction(header int, pageSize uint) freelistEncPagePrediction { + return freelistEncPagePrediction{payloadSize: pageSize - uint(header)} +} + +func (f *freelistEncPagePrediction) Estimate() uint { + return f.count +} + +func (f *freelistEncPagePrediction) AddRegion(reg region) { + sz := uint(regionEncodingSize(reg)) + if f.avail < sz { + f.count++ + f.avail = f.payloadSize + } + f.avail -= sz +} + +func (f *freelistEncPagePrediction) AddRegions(lst regionList) { + for _, reg := range lst { + f.AddRegion(reg) + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/idlist.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/idlist.go new file mode 100644 index 00000000..af18dbea --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/idlist.go @@ -0,0 +1,41 @@ +package txfile + +import "sort" + +type idList []PageID + +func (l *idList) Add(id PageID) { + *l = append(*l, id) +} + +func (l idList) ToSet() pageSet { + L := len(l) + if L == 0 { + return nil + } + + s := make(pageSet, L) + for _, id := range l { + s.Add(id) + } + return s +} + +func (l idList) Sort() { + sort.Slice(l, func(i, j int) bool { + return l[i] < l[j] + }) +} + +func (l idList) Regions() regionList { + if len(l) == 0 { + return nil + } + + regions := make(regionList, len(l)) + for i, id := range l { + regions[i] = region{id: id, count: 1} + } + optimizeRegionList(®ions) + return regions +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/internal/cleanup/cleanup.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/internal/cleanup/cleanup.go new file mode 100644 index 00000000..7cea6ca0 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/internal/cleanup/cleanup.go @@ -0,0 +1,58 @@ +// Package cleanup provides common helpers for common cleanup patterns on defer +// +// Use the helpers with `defer`. For example use IfNot with `defer`, such that +// cleanup functions will be executed if `check` is false, no matter if an +// error has been returned or an panic has occured. +// +// initOK := false +// defer cleanup.IfNot(&initOK, func() { +// cleanup +// }) +// +// ... // init structures... +// +// initOK = true // notify handler cleanup code must not be executed +// +package cleanup + +// If will run the cleanup function if the bool value is true. +func If(check *bool, cleanup func()) { + if *check { + cleanup() + } +} + +// IfNot will run the cleanup function if the bool value is false. +func IfNot(check *bool, cleanup func()) { + if !(*check) { + cleanup() + } +} + +// IfPred will run the cleanup function if pred returns true. +func IfPred(pred func() bool, cleanup func()) { + if pred() { + cleanup() + } +} + +// IfNotPred will run the cleanup function if pred returns false. +func IfNotPred(pred func() bool, cleanup func()) { + if !pred() { + cleanup() + } +} + +// WithError returns a cleanup function calling a custom handler if an error occured. +func WithError(fn func(error), cleanup func() error) func() { + return func() { + if err := cleanup(); err != nil { + fn(err) + } + } +} + +// IgnoreError silently ignores errors in the cleanup function. +func IgnoreError(cleanup func() error) func() { + return func() { _ = cleanup() } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/internal/cleanup/multi.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/internal/cleanup/multi.go new file mode 100644 index 00000000..280010b9 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/internal/cleanup/multi.go @@ -0,0 +1,29 @@ +package cleanup + +// FailClean keeps track of functions to be executed of FailClean did +// not receive a success signal. +type FailClean struct { + success bool + fns []func() +} + +// Signal sends a success or fail signal to FailClean. +func (f *FailClean) Signal(success bool) { + f.success = success +} + +// Add adds another cleanup handler. The last added handler will be run first. +func (f *FailClean) Add(fn func()) { + f.fns = append(f.fns, fn) +} + +// Cleanup runs all cleanup handlers in reverse order. +func (f *FailClean) Cleanup() { + if f.success { + return + } + + for i := len(f.fns) - 1; i >= 0; i-- { + f.fns[i]() + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/internal/invariant/invariant.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/internal/invariant/invariant.go new file mode 100644 index 00000000..fed16395 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/internal/invariant/invariant.go @@ -0,0 +1,54 @@ +// Package invariant provides helpers for checking and panicing on faulty invariants. +package invariant + +import "fmt" + +// Check will raise an error with the provided message in case b is false. +func Check(b bool, msg string) { + if b { + return + } + + if msg == "" { + panic("failing invariant") + } + panic(msg) +} + +// Checkf will raise an error in case b is false. Checkf accept a fmt.Sprintf +// compatible format string with parameters. +func Checkf(b bool, msgAndArgs ...interface{}) { + if b { + return + } + + switch len(msgAndArgs) { + case 0: + panic("failing invariant") + case 1: + panic(msgAndArgs[0].(string)) + default: + panic(fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...)) + } +} + +// CheckNot will raise an error with the provided message in case b is true. +func CheckNot(b bool, msg string) { + Check(!b, msg) +} + +// CheckNotf will raise an error with the provided message in case b is true. +// CheckNotf accept a fmt.Sprintf compatible format string with parameters. +func CheckNotf(b bool, msgAndArgs ...interface{}) { + Checkf(!b, msgAndArgs...) +} + +// Unreachable marks some code sequence that must never be executed. +func Unreachable(msg string) { + panic(msg) +} + +// Unreachablef marks some code sequence that must never be executed. +func Unreachablef(f string, vs ...interface{}) { + panic(fmt.Sprintf(f, vs...)) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/internal/iter/iter.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/internal/iter/iter.go new file mode 100644 index 00000000..0c54a211 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/internal/iter/iter.go @@ -0,0 +1,15 @@ +// Package iter provides functions for common array iteration strategies. +package iter + +// Fn type for range based iterators. +type Fn func(len int) (begin, end int, next func(int) int) + +// Forward returns limits and next function for forward iteration. +func Forward(l int) (begin, end int, next func(int) int) { + return 0, l, func(i int) int { return i + 1 } +} + +// Reversed returns limits and next function for reverse iteration. +func Reversed(l int) (begin, end int, next func(int) int) { + return l - 1, -1, func(i int) int { return i - 1 } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/internal/tracelog/tracelog.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/internal/tracelog/tracelog.go new file mode 100644 index 00000000..c023cf05 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/internal/tracelog/tracelog.go @@ -0,0 +1,44 @@ +package tracelog + +import ( + "fmt" + "os" + "strings" +) + +type Logger interface { + Println(...interface{}) + Printf(string, ...interface{}) +} + +type stderrLogger struct{} + +type nilLogger struct{} + +func Get(selector string) Logger { + if isEnabled(selector) { + return (*stderrLogger)(nil) + } + return (*nilLogger)(nil) +} + +func isEnabled(selector string) bool { + v := os.Getenv("TRACE_SELECTOR") + if v == "" { + return true + } + + selectors := strings.Split(v, ",") + for _, sel := range selectors { + if selector == strings.TrimSpace(sel) { + return true + } + } + return false +} + +func (*nilLogger) Println(...interface{}) {} +func (*nilLogger) Printf(string, ...interface{}) {} + +func (*stderrLogger) Println(vs ...interface{}) { fmt.Fprintln(os.Stderr, vs...) } +func (*stderrLogger) Printf(s string, vs ...interface{}) { fmt.Fprintf(os.Stderr, s, vs...) } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/layout.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/layout.go new file mode 100644 index 00000000..7b02b95e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/layout.go @@ -0,0 +1,184 @@ +package txfile + +import ( + "fmt" + "hash/fnv" + "reflect" + "unsafe" + + bin "github.com/urso/go-bin" +) + +// on disk page layout for writing and parsing + +// primitive types: +type ( + u8 = bin.U8le + u16 = bin.U16le + u32 = bin.U32le + u64 = bin.U64le + i8 = bin.I8le + i16 = bin.I16le + i32 = bin.I32le + i64 = bin.I64le + + pgID u64 +) + +// Special page at beginning of file. +// A file holds to meta pages at the beginning of the file. A metaPage is +// updated after a write transaction has been completed. On error during +// transactions or when updating the metaPage, the old metaPage will still be +// valid, technically ignoring all contents written by the transactions active +// while the program/id did crash/fail. +type metaPage struct { + magic u32 + version u32 + pageSize u32 + maxSize u64 // maximum file size + flags u32 + root pgID // ID of first page to look for data. + txid u64 // page transaction ID + freelist pgID // pointer to user area freelist + wal pgID // write-ahead-log root + dataEndMarker pgID // end marker of user-area page + metaEndMarker pgID // file end marker + metaTotal u64 // total number of pages in meta area + checksum u32 +} + +type metaBuf [unsafe.Sizeof(metaPage{})]byte + +const ( + metaFlagPrealloc = 1 << 0 // indicates the complete file has been preallocated +) + +type listPage struct { + next pgID // pointer to next entry + count u32 // number of entries in current page +} + +type freePage = listPage +type walPage = listPage + +const ( + metaPageHeaderSize = int(unsafe.Sizeof(metaPage{})) + listPageHeaderSize = int(unsafe.Sizeof(listPage{})) + walPageHeaderSize = int(unsafe.Sizeof(walPage{})) + freePageHeaderSize = int(unsafe.Sizeof(freePage{})) +) + +const magic uint32 = 0xBEA77AEB +const version uint32 = 1 + +func init() { + checkPacked := func(t reflect.Type) error { + off := uintptr(0) + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Offset != off { + return fmt.Errorf("field %v offset mismatch (expected=%v, actual=%v)", + f.Name, off, f.Offset) + } + off += f.Type.Size() + } + return nil + } + + // check compiler really generates packed structes. Required, so file can be + // accesed from within different architectures + checksum based on raw bytes + // contents are correct. + checkPacked(reflect.TypeOf(metaPage{})) + checkPacked(reflect.TypeOf(freePage{})) + checkPacked(reflect.TypeOf(walPage{})) +} + +func castMetaPage(b []byte) (p *metaPage) { castPageTo(&p, b); return } + +func (m *metaPage) Init(flags uint32, pageSize uint32, maxSize uint64) { + m.magic.Set(magic) + m.version.Set(version) + m.pageSize.Set(pageSize) + m.maxSize.Set(maxSize) + m.flags.Set(flags) + m.root.Set(0) + m.freelist.Set(0) + m.wal.Set(0) + m.dataEndMarker.Set(0) +} + +func (m *metaPage) Finalize() { + m.checksum.Set(m.computeChecksum()) +} + +func (m *metaPage) Validate() error { + if m.magic.Get() != magic { + return errMagic + } + if m.version.Get() != version { + return errVersion + } + if m.checksum.Get() != m.computeChecksum() { + return errChecksum + } + + return nil +} + +func (b *metaBuf) cast() *metaPage { return castMetaPage((*b)[:]) } + +func (m *metaPage) computeChecksum() uint32 { + h := fnv.New32a() + type metaHashContent [unsafe.Offsetof(metaPage{}.checksum)]byte + contents := *(*metaHashContent)(unsafe.Pointer(m)) + _, _ = h.Write(contents[:]) + return h.Sum32() +} + +func (id *pgID) Len() int { return id.access().Len() } +func (id *pgID) Get() PageID { return PageID(id.access().Get()) } +func (id *pgID) Set(v PageID) { id.access().Set(uint64(v)) } +func (id *pgID) access() *u64 { return (*u64)(id) } + +func castU8(b []byte) (u *u8) { mapMem(&u, b); return } +func castU16(b []byte) (u *u16) { mapMem(&u, b); return } +func castU32(b []byte) (u *u32) { mapMem(&u, b); return } +func castU64(b []byte) (u *u64) { mapMem(&u, b); return } + +func castListPage(b []byte) (node *listPage, data []byte) { + if castPageTo(&node, b); node != nil { + data = b[unsafe.Sizeof(listPage{}):] + } + return +} + +func castFreePage(b []byte) (node *freePage, data []byte) { + return castListPage(b) +} + +func castWalPage(b []byte) (node *walPage, data []byte) { + return castListPage(b) +} + +func mapMem(to interface{}, b []byte) { + bin.UnsafeCastStruct(to, b) +} + +func castPageTo(to interface{}, b []byte) { + mapMem(to, b) +} + +func traceMetaPage(meta *metaPage) { + traceln("meta page:") + traceln(" version:", meta.version.Get()) + traceln(" pagesize:", meta.pageSize.Get()) + traceln(" maxsize:", meta.maxSize.Get()) + traceln(" root:", meta.root.Get()) + traceln(" txid:", meta.txid.Get()) + traceln(" freelist:", meta.freelist.Get()) + traceln(" wal:", meta.wal.Get()) + traceln(" data end:", meta.dataEndMarker.Get()) + traceln(" meta end:", meta.metaEndMarker.Get()) + traceln(" meta total:", meta.metaTotal.Get()) + traceln(" checksum:", meta.checksum.Get()) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/lock.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/lock.go new file mode 100644 index 00000000..b54cf2ee --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/lock.go @@ -0,0 +1,119 @@ +package txfile + +import "sync" + +// lock provides the file locking primitives for use within the current +// process. File locking as provided by lock, is not aware of other processes +// accessing the file. +// +// Lock types: +// - Shared: Shared locks are used by readonly transactions. Multiple readonly +// transactions can co-exist with one active write transaction. +// - Reserved: Write transactions take the 'Reserved' lock on a file, +// such that no other concurrent write transaction can exist. +// The Shared lock can still be locked by concurrent readers. +// - Pending: The Pending lock is used by write transactions to signal +// a write transaction is currently being committed. +// The Shared lock can still be used by readonly transactions, +// but no new readonly transaction can be started after +// the Pending lock has been acquired. +// - Exclusive: Once the exclusive lock is acquired by a write transaction, +// No other active transactions/locks exist on the file. +// +// Each Locktype can be accessed using `(*lock).()`. Each lock type +// implements a `Lock` and `Unlock` method. +// +// Note: Shared file access should be protected using `flock`. +type lock struct { + mu sync.Mutex + + // conditions + mutexes + shared *sync.Cond + exclusive *sync.Cond + reserved sync.Mutex + + // state + sharedCount uint + pendingSet bool +} + +type sharedLock lock +type reservedLock lock +type pendingLock lock +type exclusiveLock lock + +func newLock() *lock { + l := &lock{} + l.init() + return l +} + +func (l *lock) init() { + l.shared = sync.NewCond(&l.mu) + l.exclusive = sync.NewCond(&l.mu) +} + +// TxLock returns the standard Locker for the given transaction type. +func (l *lock) TxLock(readonly bool) sync.Locker { + if readonly { + return l.Shared() + } + return l.Reserved() +} + +// Shared returns the files shared locker. +func (l *lock) Shared() *sharedLock { return (*sharedLock)(l) } + +// Reserved returns the files reserved locker. +func (l *lock) Reserved() *reservedLock { return (*reservedLock)(l) } + +// Pending returns the files pending locker. +func (l *lock) Pending() *pendingLock { return (*pendingLock)(l) } + +// Pending returns the files exclusive locker. +func (l *lock) Exclusive() *exclusiveLock { return (*exclusiveLock)(l) } + +func (l *sharedLock) Lock() { waitCond(l.shared, l.check, l.inc) } +func (l *sharedLock) Unlock() { withLocker(&l.mu, l.dec) } +func (l *sharedLock) check() bool { return !l.pendingSet } +func (l *sharedLock) inc() { l.sharedCount++ } +func (l *sharedLock) dec() { + l.sharedCount-- + if l.sharedCount == 0 { + l.exclusive.Signal() + } +} + +func (l *reservedLock) Lock() { l.reserved.Lock() } +func (l *reservedLock) Unlock() { l.reserved.Unlock() } + +func (l *pendingLock) Lock() { + l.mu.Lock() + l.pendingSet = true + l.mu.Unlock() +} +func (l *pendingLock) Unlock() { + l.mu.Lock() + l.pendingSet = false + l.mu.Unlock() + l.shared.Broadcast() +} + +func (l *exclusiveLock) Lock() { waitCond(l.exclusive, l.check, func() {}) } +func (l *exclusiveLock) Unlock() {} +func (l *exclusiveLock) check() bool { return l.sharedCount == 0 } + +func waitCond(c *sync.Cond, check func() bool, upd func()) { + withLocker(c.L, func() { + for !check() { + c.Wait() + } + upd() + }) +} + +func withLocker(l sync.Locker, fn func()) { + l.Lock() + defer l.Unlock() + fn() +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/meta_sizing.py b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/meta_sizing.py new file mode 100644 index 00000000..4f561d3c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/meta_sizing.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python + +import argparse +import pprint + + +list_header = 8 + 4 # next pointer + page entry count + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('-p', '--pagesize', dest='pagesize', type=long, default=4096) + parser.add_argument('-s', '--maxsize', dest='maxsize', type=long, default=1 << 30) + parser.add_argument('-w', '--wal', dest='wal', type=long, default=1000) + args = parser.parse_args() + + stats = compute_stats(args.pagesize, args.maxsize, args.wal) + pprint.pprint(stats, indent=2) + + +def compute_stats(page_size, max_size, wal_entries): + # multiply by 2, as next transaction might require same amount + # of pages + + max_pages = max_size / page_size + + stats = { + "pagesize": page_size, + "max_size": max_size, + "max_pages": max_pages, + "wal_entries": wal_entries, + } + + wal_meta = wal_mapping_pages(page_size, wal_entries) + stats['wal_meta'] = 2 * wal_meta + stats['wal_meta_bytes'] = 2 * wal_meta * page_size + stats['wal_meta_bytes_io_per_tx'] = wal_meta * page_size + + freelist = freelist_pages(page_size, max_pages) + stats['freelist_pages'] = 2 * freelist + stats['freelist_bytes'] = 2 * freelist * page_size + stats['freelist_bytes_io_per_tx'] = freelist * page_size + + file_header = 2 + stats['file header'] = file_header + + count = wal_meta + wal_entries + 2 * freelist + file_header + stats['min_meta_pages'] = count + + # meta allocator grows in power of 2 + meta_pages = next_power_of_2(count) + internal_frag = meta_pages - count + data_pages = max_pages - meta_pages + + stats['meta_pages'] = meta_pages + stats['data_pages'] = data_pages + stats['meta_bytes'] = meta_pages * page_size + stats['data_bytes'] = data_pages * page_size + stats['internal_fragmentation'] = internal_frag + stats['meta_percentage'] = 100.0 * float(meta_pages) / float(max_pages) + stats['data_percentage'] = 100.0 * float(data_pages) / float(max_pages) + stats['frag_percentage'] = 100.0 * float(internal_frag) / float(max_pages) + + return stats + + +def pages(entries, entries_per_page): + return (entries + (entries_per_page - 1)) / entries_per_page + + +def freelist_pages(page_size, max_pages): + """Compute max number of freelist pages required. + Assumes full fragmentation, such that every second page is free. + Due to run-length-encoding of freelist entries, this assumption gets us + the max number of freelist entries.""" + + # estimate of max number of free pages with full fragmentation + entries = (max_pages + 1) / 2 + + avail = page_size - list_header + entries_per_page = avail / 8 # 8 byte per entry + + return pages(entries, entries_per_page) + + +def wal_mapping_pages(page_size, entries): + """Compute number of required pages for the wal id mapping""" + entries_per_page = (page_size - list_header) / 14 # 14 byte per entry + return pages(entries, entries_per_page) + + +def next_power_of_2(x): + return 1 << (x-1).bit_length() + + +if __name__ == "__main__": + main() diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/page.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/page.go new file mode 100644 index 00000000..8b26781f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/page.go @@ -0,0 +1,238 @@ +package txfile + +// Page provides access to an on disk page. +// Pages can only be overwritten from within a read-write Transaction. +// Writes are be buffered until transaction commit, such that other but the +// current transaction will not be able to see file changes. +type Page struct { + id PageID // Original PageID for user access. + ondiskID PageID // On disk PageID. If contents is loaded from overwrite page, ondiskID != id + + tx *Tx // Parent transaction. + bytes []byte // Page contents. + flags pageFlags +} + +// PageID used to reference a file pages. +type PageID uint64 + +type pageFlags struct { + new bool // page has been allocated. No on-disk contents. + freed bool // page has been freed within current transaction. + flushed bool // page has already been flushed. No more writing possible. + cached bool // original page contents is copied in memory and can be overwritten. + dirty bool // page is marked as dirty and will be written on commit +} + +const minPageSize = 1024 + +// newPage creates a new page context within the current transaction. +func newPage(tx *Tx, id PageID) *Page { + return &Page{id: id, ondiskID: id, tx: tx} +} + +// ID returns the pages PageID. The ID can be used to store a reference +// to this page, for use within another transaction. +func (p *Page) ID() PageID { return p.id } + +// Readonly checks if the page is accessed in readonly mode. +func (p *Page) Readonly() bool { return p.tx.Readonly() } + +// Writable checks if the page can be written to. +func (p *Page) Writable() bool { return !p.Readonly() } + +// Dirty reports if the page is marked as dirty and needs to be flushed on +// commit. +func (p *Page) Dirty() bool { return p.flags.dirty } + +// MarkDirty marks a page as dirty. MarkDirty should only be used if +// in-place modification to the pages buffer have been made, after use of Load(). +func (p *Page) MarkDirty() error { + if err := p.canWrite(); err != nil { + return err + } + + p.flags.dirty = true + return nil +} + +// Free marks a page as free. Freeing a dirty page will return an error. +// The page will be returned to the allocator when the transaction commits. +func (p *Page) Free() error { + if err := p.canWrite(); err != nil { + return err + } + if p.flags.dirty { + return errFreeDirtyPage + } + + p.tx.freePage(p.id) + if p.id != p.ondiskID { + p.tx.freeWALID(p.id, p.ondiskID) + } + + p.flags.freed = true + return nil +} + +// Bytes returns the page its contents. +// One can only modify the buffer in write transaction, if Load() or SetBytes() +// have been called before Bytes(). Otherwise a non-recoverable BUS panic might +// be triggerd (program will be killed by OS). +// Bytes returns an error if the page has just been allocated (no backing buffer) +// or the transaction is already been closed. +// Use SetBytes() or Load(), to initialize the buffer of a newly allocated page. +func (p *Page) Bytes() ([]byte, error) { + if err := p.canRead(); err != nil { + return nil, err + } + if p.bytes == nil && p.flags.new { + return nil, errNoPageData + } + + return p.getBytes() +} + +func (p *Page) getBytes() ([]byte, error) { + if p.bytes == nil { + bytes := p.tx.access(p.ondiskID) + if bytes == nil { + return nil, errOutOfBounds + } + + p.bytes = bytes + } + + return p.bytes, nil +} + +// Load reads the pages original contents into a cached memory buffer, allowing +// for in-place modifications to the page. Load returns and error, if used from +// within a readonly transaction. +// If the page has been allocated from within the current transaction, a new +// temporary buffer will be allocated. +// After load, the write-buffer can be accessed via Bytes(). After modifications to the buffer, +// one must use MarkDirty(), so the page will be flushed on commit. +func (p *Page) Load() error { + if err := p.canWrite(); err != nil { + return err + } + + return p.loadBytes() +} + +func (p *Page) loadBytes() error { + if p.flags.cached { + return nil + } + + if p.flags.new { + p.flags.cached = true + p.bytes = make([]byte, p.tx.PageSize()) + return nil + } + + if p.flags.dirty { + p.flags.cached = true + return nil + } + + // copy original contents into writable buffer (page needs to be marked dirty if contents is overwritten) + orig, err := p.getBytes() + if err != nil { + return err + } + tmp := make([]byte, len(orig)) + copy(tmp, orig) + p.bytes = tmp + p.flags.cached = true + + return nil +} + +// SetBytes sets the new contents of a page. If the size of contents is less +// then the files page size, the original contents must be read. If the length +// of contents matches the page size, a reference to the contents buffer will +// be held. To enforce a copy, use Load(), Bytes(), copy() and MarkDirty(). +func (p *Page) SetBytes(contents []byte) error { + if err := p.canWrite(); err != nil { + return err + } + + pageSize := p.tx.PageSize() + if len(contents) > pageSize { + return errTooManyBytes + } + + if len(contents) < pageSize { + if err := p.loadBytes(); err != nil { + return err + } + copy(p.bytes, contents) + } else { + p.bytes = contents + } + + p.flags.dirty = true + return nil +} + +// Flush flushes the page write buffer, if the page is marked as dirty. +// The page its contents must not be changed after calling Flush, as the flush +// is executed asynchronously in the background. +// Dirty pages will be automatically flushed on commit. +func (p *Page) Flush() error { + if err := p.canWrite(); err != nil { + return err + } + + return p.doFlush() +} + +func (p *Page) doFlush() error { + if !p.flags.dirty || p.flags.flushed { + return nil + } + + if !p.flags.new { + if p.id == p.ondiskID { + walID := p.tx.allocWALID(p.id) + if walID == 0 { + return errOutOfMemory + } + p.ondiskID = walID + } else { + // page already in WAL -> free WAL page and write into original page + p.tx.freeWALID(p.id, p.ondiskID) + p.ondiskID = p.id + } + } + + p.flags.flushed = true + p.tx.scheduleWrite(p.ondiskID, p.bytes) + return nil +} + +func (p *Page) canRead() error { + if !p.tx.Active() { + return errTxFinished + } + if p.flags.freed { + return errFreedPage + } + return nil +} + +func (p *Page) canWrite() error { + if err := p.tx.canWrite(); err != nil { + return err + } + + if p.flags.freed { + return errFreedPage + } + if p.flags.flushed { + return errPageFlushed + } + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pageset.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pageset.go new file mode 100644 index 00000000..33e26cf2 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pageset.go @@ -0,0 +1,49 @@ +package txfile + +type pageSet map[PageID]struct{} + +func (s *pageSet) Add(id PageID) { + if *s == nil { + *s = pageSet{} + } + (*s)[id] = struct{}{} +} + +func (s pageSet) Has(id PageID) bool { + if s != nil { + _, exists := s[id] + return exists + } + return false +} + +func (s pageSet) Empty() bool { return s.Count() == 0 } + +func (s pageSet) Count() int { return len(s) } + +func (s pageSet) IDs() idList { + L := len(s) + if L == 0 { + return nil + } + + l, i := make(idList, L), 0 + for id := range s { + l[i], i = id, i+1 + } + return l +} + +func (s pageSet) Regions() regionList { + if len(s) == 0 { + return nil + } + + regions, i := make(regionList, len(s)), 0 + for id := range s { + regions[i], i = region{id: id, count: 1}, i+1 + } + optimizeRegionList(®ions) + + return regions +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/access.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/access.go new file mode 100644 index 00000000..4459a31b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/access.go @@ -0,0 +1,103 @@ +package pq + +import "github.com/elastic/go-txfile" + +// Access provides transaction support and access to pages and queue header. +// It wraps the Delegate for providing a common interface for working with +// transactions and files. +type access struct { + Delegate + rootID txfile.PageID + rootOff int +} + +func makeAccess(delegate Delegate) (access, error) { + rootID, rootOff := delegate.Root() + if rootID == 0 { + return access{}, errNoQueueRoot + } + + return access{ + Delegate: delegate, + rootID: rootID, + rootOff: int(rootOff), + }, nil +} + +// ReadRoot reads the root page into an array. +// ReadRoot create a short lived read transaction for accessing and copying the +// queue root. +func (a *access) ReadRoot() ([SzRoot]byte, error) { + var buf [SzRoot]byte + + tx := a.BeginRead() + defer tx.Close() + + return buf, withPage(tx, a.rootID, func(page []byte) error { + n := copy(buf[:], page[a.rootOff:]) + if n < SzRoot { + return errIncompleteQueueRoot + } + return nil + }) +} + +// RootPage accesses the queue root page from within the passed transaction. +func (a *access) RootPage(tx *txfile.Tx) (*txfile.Page, error) { + return tx.Page(a.rootID) +} + +// LoadRootPage accesses the queue root page from within the passed write +// transaction. +// The Root page it's content is loaded into the write buffer for manipulations. +// The page returned is not marked as dirty yet. +func (a *access) LoadRootPage(tx *txfile.Tx) (*txfile.Page, *queuePage, error) { + var hdr *queuePage + page, err := a.RootPage(tx) + if err == nil { + err = page.Load() + if err == nil { + buf, _ := page.Bytes() + hdr = castQueueRootPage(buf[a.rootOff:]) + } + } + + return page, hdr, err +} + +// RootHdr returns a pointer to the queue root header. The pointer to the +// header is only valid as long as the transaction is still active. +func (a *access) RootHdr(tx *txfile.Tx) (hdr *queuePage, err error) { + err = withPage(tx, a.rootID, func(buf []byte) error { + hdr = castQueueRootPage(buf[a.rootOff:]) + return nil + }) + return +} + +// ParsePosition parses an on disk position, providing page id, page offset and +// event id in a more accessible format. +func (a *access) ParsePosition(p *pos) position { + page, off := a.SplitOffset(uintptr(p.offset.Get())) + if page != 0 && off == 0 { + off = uintptr(a.PageSize()) + } + + return position{ + page: page, + off: int(off), + id: p.id.Get(), + } +} + +// WritePosition serializes a position into it's on-disk representation. +func (a *access) WritePosition(to *pos, pos position) { + pageOff := pos.off + if pageOff == a.PageSize() { + pageOff = 0 // use 0 to mark page offset as end-of-page + } + + off := a.Offset(pos.page, uintptr(pageOff)) + to.offset.Set(uint64(off)) + to.id.Set(pos.id) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/ack.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/ack.go new file mode 100644 index 00000000..704d5f4c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/ack.go @@ -0,0 +1,297 @@ +package pq + +import ( + "github.com/elastic/go-txfile" + "github.com/elastic/go-txfile/internal/invariant" +) + +// acker is used to asynchronously ack and remove events from the queue. +type acker struct { + accessor *access + active bool + + totalEventCount uint + totalFreedPages uint + + ackCB func(events, pages uint) +} + +// ackState records the changes required to finish the ACK step. +type ackState struct { + free []txfile.PageID // Collect page ids to be freed. + head position // New queue head, pointing to first event in first available page + read position // New on-disk read pointer, pointing to first not-yet ACKed event. +} + +func newAcker( + accessor *access, + cb func(uint, uint), +) (*acker, error) { + return &acker{ + active: true, + accessor: accessor, + ackCB: cb, + }, nil +} + +func (a *acker) close() { + a.active = false +} + +// handle processes an ACK by freeing pages and +// updating the head and read positions in the queue root. +// So to not interfere with concurrent readers potentially updating pointers +// or adding new contents to a page, the last event page in the queue will never +// be freed. Still the read pointer might point past the last page. +func (a *acker) handle(n uint) error { + if n == 0 { + return nil + } + + if !a.active { + return errClosed + } + + traceln("acker: pq ack events:", n) + + state, err := a.initACK(n) + if err != nil { + return err + } + + // start write transaction to free pages and update the next read offset in + // the queue root + tx := a.accessor.BeginCleanup() + defer tx.Close() + + traceln("acker: free data pages:", len(state.free)) + for _, id := range state.free { + page, err := tx.Page(id) + if err != nil { + return err + } + + traceln("free page", id) + if err := page.Free(); err != nil { + return err + } + } + + // update queue header + hdrPage, hdr, err := a.accessor.LoadRootPage(tx) + if err != nil { + return err + } + a.accessor.WritePosition(&hdr.head, state.head) + a.accessor.WritePosition(&hdr.read, state.read) + hdr.inuse.Set(hdr.inuse.Get() - uint64(len(state.free))) + hdrPage.MarkDirty() + + traceQueueHeader(hdr) + + if err := tx.Commit(); err != nil { + return err + } + + a.totalEventCount += n + a.totalFreedPages += uint(len(state.free)) + tracef("Acked events. total events acked: %v, total pages freed: %v \n", a.totalEventCount, a.totalFreedPages) + + if a.ackCB != nil { + a.ackCB(n, uint(len(state.free))) + } + + return nil +} + +// initACK uses a read-transaction to collect pages to be removed from list and +// find offset of next read required to start reading the next un-acked event. +func (a *acker) initACK(n uint) (ackState, error) { + tx := a.accessor.BeginRead() + defer tx.Close() + + hdr, err := a.accessor.RootHdr(tx) + if err != nil { + return ackState{}, err + } + + headPos, startPos, endPos := a.queueRange(hdr) + startID := startPos.id + endID := startID + uint64(n) + if startPos.page == 0 { + return ackState{}, errACKEmptyQueue + } + if !idLessEq(endID, endPos.id) { + return ackState{}, errACKTooManyEvents + } + + c := makeTxCursor(tx, a.accessor, &cursor{ + page: headPos.page, + off: headPos.off, + pageSize: a.accessor.PageSize(), + }) + + // Advance through pages and collect ids of all pages to be freed. + // Free all pages, but the very last data page, so to not interfere with + // concurrent writes. + ids, cleanAll, err := a.collectFreePages(&c, endID) + if err != nil { + return ackState{}, err + } + + // find offset of next event to start reading from + var head, read position + if !cleanAll { + head, read, err = a.findNewStartPositions(&c, endID) + if err != nil { + return ackState{}, err + } + } else { + head = endPos + read = endPos + } + + return ackState{ + free: ids, + head: head, + read: read, + }, nil +} + +// queueRange finds the start and end positions of not yet acked events in the +// queue. +func (a *acker) queueRange(hdr *queuePage) (head, start, end position) { + start = a.accessor.ParsePosition(&hdr.read) + head = a.accessor.ParsePosition(&hdr.head) + if start.page == 0 { + start = head + } + + end = a.accessor.ParsePosition(&hdr.tail) + return +} + +// collectFreePages collects all pages to be freed. A page can be freed if all +// events within the page have been acked. We want to free all pages, but the +// very last data page, so to not interfere with concurrent writes. +// All pages up to endID will be collected. +func (a *acker) collectFreePages(c *txCursor, endID uint64) ([]txfile.PageID, bool, error) { + var ( + ids []txfile.PageID + firstID, lastID uint64 + cleanAll = false + ) + + for { + hdr, err := c.PageHeader() + if err != nil { + return nil, false, err + } + + // stop searching if endID is in the current page + dataOnlyPage := hdr.off.Get() == 0 // no event starts within this page + if !dataOnlyPage { + firstID, lastID = hdr.first.Get(), hdr.last.Get() + + // inc 'lastID', so to hold on current page if endID would point to next + // the page. This helps the reader, potentially pointing to the current + // page, if next page has not been committed when reading events. + lastID++ + + if idLessEq(firstID, endID) && idLessEq(endID, lastID) { + break + } + } + + // stop searching if current page is the last page. The last page must + // be active for the writer to add more events and link new pages. + lastPage := hdr.next.Get() == 0 + if lastPage { + cleanAll = true + invariant.Check(lastID+1 == endID, "last event ID and ack event id missmatch") + break + } + + // found intermediate page with ACKed events/contents + // -> add page id to freelist and advance to next page + ids = append(ids, c.cursor.page) + ok, err := c.AdvancePage() + if err != nil { + return nil, false, err + } + invariant.Check(ok, "page list linkage broken") + } + + return ids, cleanAll, nil +} + +// findNewStartPositions skips acked events, so to find the new head and read pointers to be set +// in the updated queue header. +func (a *acker) findNewStartPositions(c *txCursor, id uint64) (head, read position, err error) { + var hdr *eventPage + + hdr, err = c.PageHeader() + if err != nil { + return + } + + head = position{ + page: c.cursor.page, + off: int(hdr.off.Get()), + id: hdr.first.Get(), + } + + if id == head.id { + read = head + return + } + + // skip contents in current page until we did reach start of next event. + c.cursor.off = head.off + for currentID := head.id; currentID != id; currentID++ { + var evtHdr *eventHeader + evtHdr, err = c.ReadEventHeader() + if err != nil { + return + } + + err = c.Skip(int(evtHdr.sz.Get())) + if err != nil { + return + } + } + + read = position{ + page: c.cursor.page, + off: c.cursor.off, + id: id, + } + return +} + +// Active returns the total number of active, not yet ACKed events. +func (a *acker) Active() (uint, error) { + tx := a.accessor.BeginRead() + defer tx.Close() + + hdr, err := a.accessor.RootHdr(tx) + if err != nil { + return 0, err + } + + // Empty queue? + if hdr.tail.offset.Get() == 0 { + return 0, nil + } + + var start, end uint64 + + end = hdr.tail.id.Get() + if hdr.read.offset.Get() != 0 { + start = hdr.read.id.Get() + } else { + start = hdr.head.id.Get() + } + + return uint(end - start), nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/buffer.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/buffer.go new file mode 100644 index 00000000..c63ed6ad --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/buffer.go @@ -0,0 +1,244 @@ +package pq + +import "github.com/elastic/go-txfile/internal/invariant" + +// buffer holds allocated and yet unallocated in-memory pages, for appending +// events to. +type buffer struct { + // in-memory pages + head, tail *page + + pool *pagePool + + // settings (values don't change after init) + pageSize int + hdrSize int + payloadSize int // effective page contents + + // page write state + avail int // available space before forcing flush + payload []byte // byte slice of available payload/bytes in the current page + page *page // current page + + // Event write state. Stores reference to start of current events, so we can + // put in the event header once the current event is finished. + eventHdrPage *page + eventHdrOffset int + eventHdrSize int +} + +func newBuffer(pool *pagePool, page *page, pages, pageSize, hdrSz int) *buffer { + payloadSz := pageSize - hdrSz + avail := payloadSz * pages + + b := &buffer{ + head: nil, + tail: nil, + pool: pool, + pageSize: pageSize, + hdrSize: hdrSz, + payloadSize: payloadSz, + avail: avail, + payload: nil, + page: nil, + eventHdrPage: nil, + eventHdrOffset: -1, + eventHdrSize: -1, + } + + if page != nil { + // init with end of on-disk list from former writes + b.head = page + b.tail = page + + contentsLength := int(page.Meta.EndOff) - b.hdrSize + b.avail -= contentsLength + b.payload = page.Data[page.Meta.EndOff:] + b.page = page + } + + return b +} + +// Avail returns amount of bytes available. Returns a value <0, if contents in +// buffer exceeds the high-water-marks. +func (b *buffer) Avail() int { + return b.avail +} + +// Append adds more bytes to the current event. Use `CommitEvent` to finalize the +// writing of the current event. +// If required append adds new unallocated pages to the write buffer. +func (b *buffer) Append(data []byte) { + for len(data) > 0 { + if len(b.payload) == 0 { + b.advancePage() + } + + n := copy(b.payload, data) + b.payload = b.payload[n:] + data = data[n:] + b.avail -= n + + tracef("writer: append %v bytes to (page: %v, off: %v)\n", n, b.page.Meta.ID, b.page.Meta.EndOff) + + b.page.Meta.EndOff += uint32(n) + } +} + +func (b *buffer) advancePage() { + // link new page into list + page := b.newPage() + if b.tail == nil { + b.head = page + b.tail = page + } else { + b.tail.Next = page + b.tail = page + } + + b.page = page + b.payload = page.Payload() + page.Meta.EndOff = uint32(szEventPageHeader) +} + +func (b *buffer) newPage() *page { + return b.pool.NewPage() +} + +func (b *buffer) releasePage(p *page) { + b.pool.Release(p) +} + +// ReserveHdr reserves space for the next event header in the write buffer. +// The start position in the buffer is tracked by the buffer, until the event is +// finished via CommitEvent. +func (b *buffer) ReserveHdr(n int) []byte { + if n > b.payloadSize { + return nil + } + + invariant.Check(b.eventHdrPage == nil, "can not reserve a new event header if recent event is not finished yet") + + // reserve n bytes in payload + if len(b.payload) < n { + b.advancePage() + } + + payloadWritten := b.payloadSize - len(b.payload) + b.eventHdrPage = b.page + b.eventHdrPage.Meta.EndOff += uint32(n) + b.eventHdrOffset = b.hdrSize + payloadWritten + b.eventHdrSize = n + b.payload = b.payload[n:] + b.avail -= n + + return b.ActiveEventHdr() +} + +// ActiveEventHdr returns the current event header bytes content for writing/reading. +func (b *buffer) ActiveEventHdr() []byte { + if b.eventHdrPage == nil { + return nil + } + + off := b.eventHdrOffset + return b.eventHdrPage.Data[off : off+b.eventHdrSize] +} + +// CommitEvent marks the current event being finished. Finalize pages +// and prepare for next event. +func (b *buffer) CommitEvent(id uint64) { + invariant.Check(b.eventHdrPage != nil, "no active event") + + page := b.eventHdrPage + meta := &page.Meta + if meta.FirstOff == 0 { + meta.FirstOff = uint32(b.eventHdrOffset) + meta.FirstID = id + } + meta.LastID = id + page.MarkDirty() + + // mark all event pages as dirty + for current := b.eventHdrPage; current != nil; current = current.Next { + current.MarkDirty() + } + // mark head as dirty if yet unlinked + if b.head != b.eventHdrPage && b.head.Next == b.eventHdrPage { + b.head.MarkDirty() + } + + b.eventHdrPage = nil + b.eventHdrOffset = -1 + b.eventHdrSize = -1 +} + +// Pages returns start and end page to be serialized. +// The `end` page must not be serialized +func (b *buffer) Pages() (start, end *page) { + if b.head == nil || !b.head.Dirty() { + return nil, nil + } + + if b.eventHdrPage == nil { + if b.tail.Dirty() { + return b.head, nil + } + for current := b.head; current != nil; current = current.Next { + if !current.Dirty() { + return b.head, current + } + } + + invariant.Unreachable("tail if list dirty and not dirty?") + } + + end = b.eventHdrPage + if end.Dirty() { + end = end.Next + } + return b.head, end +} + +// Reset removes all but the last page non-dirty page from the buffer. +// The last written page is still required for writing/linking new events/pages. +func (b *buffer) Reset(last *page) { + if b.head == nil { + return + } + + // Find last page not to be removed. A non-dirty page must not be removed + // if the next page is dirty, so to update the on-disk link. + // If no page is dirty, keep last page for linking. + pages := 0 + end := b.head + for current := b.head; current.Next != nil && current != b.eventHdrPage; current = current.Next { + if current.Next.Dirty() || current == last { + end = current + break + } + end = current.Next + pages++ + } + + tracef("reset pages (%v)\n", pages) + + invariant.Check(end != nil, "must not empty page list on reset") + + // release pages + spaceFreed := 0 + for page := b.head; page != end; { + freed := int(page.Meta.EndOff) - szEventPageHeader + tracef("writer: release page %v (%v)\n", page.Meta.ID, freed) + + next := page.Next + spaceFreed += freed + b.releasePage(page) + page = next + } + b.head = end + + // update memory usage counters + b.avail += spaceFreed +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/cursor.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/cursor.go new file mode 100644 index 00000000..f0564f93 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/cursor.go @@ -0,0 +1,172 @@ +package pq + +import "github.com/elastic/go-txfile" + +// cursor holds state for iterating events in the queue. +type cursor struct { + page txfile.PageID + off int + pageSize int +} + +// txCursor is used to advance a cursor within a transaction. +type txCursor struct { + *cursor + accessor *access + tx *txfile.Tx + page *txfile.Page +} + +// Nil checks if the cursor is pointing to a page. Returns true, if cursor is +// not pointing to any page in the queue. +func (c *cursor) Nil() bool { + return c.page == 0 +} + +func makeTxCursor(tx *txfile.Tx, accessor *access, cursor *cursor) txCursor { + return txCursor{ + tx: tx, + accessor: accessor, + page: nil, + cursor: cursor, + } +} + +func (c *txCursor) init() error { + if c.page != nil { + return nil + } + page, err := c.tx.Page(c.cursor.page) + if err != nil { + return err + } + + c.page = page + return nil +} + +// Read reads more bytes from the current event into b. If the end of the +// current event has reached, no bytes will be read. +func (c *txCursor) Read(b []byte) (int, error) { + if err := c.init(); err != nil { + return 0, err + } + + if c.Nil() { + return 0, nil + } + + to, err := c.readInto(b) + return len(b) - len(to), err +} + +// Skip skips the next n bytes. +func (c *txCursor) Skip(n int) error { + for n > 0 { + if c.PageBytes() == 0 { + ok, err := c.AdvancePage() + if err != nil { + return err + } + if !ok { + return errSeekPageFailed + } + } + + max := n + if L := c.PageBytes(); L < max { + max = L + } + c.cursor.off += max + n -= max + } + + return nil +} + +func (c *txCursor) readInto(to []byte) ([]byte, error) { + for len(to) > 0 { + // try to advance cursor to next page if last read did end at end of page + if c.PageBytes() == 0 { + ok, err := c.AdvancePage() + if !ok || err != nil { + return to, err + } + } + + var n int + err := c.WithBytes(func(b []byte) { n = copy(to, b) }) + to = to[n:] + c.cursor.off += n + if err != nil { + return to, err + } + } + + return to, nil +} + +func (c *txCursor) ReadEventHeader() (hdr *eventHeader, err error) { + err = c.WithBytes(func(b []byte) { + hdr = castEventHeader(b) + c.off += szEventHeader + }) + return +} + +func (c *txCursor) PageHeader() (hdr *eventPage, err error) { + err = c.WithHdr(func(h *eventPage) { + hdr = h + }) + return +} + +func (c *txCursor) AdvancePage() (ok bool, err error) { + err = c.WithHdr(func(hdr *eventPage) { + nextID := txfile.PageID(hdr.next.Get()) + tracef("advance page from %v -> %v\n", c.cursor.page, nextID) + ok = nextID != 0 + + if ok { + c.cursor.page = nextID + c.cursor.off = szEventPageHeader + c.page = nil + } + }) + return +} + +func (c *txCursor) WithPage(fn func([]byte)) error { + if err := c.init(); err != nil { + return err + } + + buf, err := c.page.Bytes() + if err != nil { + return err + } + + fn(buf) + return nil +} + +func (c *txCursor) WithHdr(fn func(*eventPage)) error { + return c.WithPage(func(b []byte) { + fn(castEventPageHeader(b)) + }) +} + +func (c *txCursor) WithBytes(fn func([]byte)) error { + return c.WithPage(func(b []byte) { + fn(b[c.off:]) + }) +} + +// PageBytes reports the amount of bytes still available in current page +func (c *cursor) PageBytes() int { + return c.pageSize - c.off +} + +func (c *cursor) Reset() { + *c = cursor{} +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/delegate.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/delegate.go new file mode 100644 index 00000000..aa919799 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/delegate.go @@ -0,0 +1,110 @@ +package pq + +import "github.com/elastic/go-txfile" + +// Delegate is used by the persistent queue to query common parameters and +// start transactions when required. +type Delegate interface { + // PageSize reports the page size to be used by the backing file. + PageSize() int + + // Root returns the queues root on file. + Root() (txfile.PageID, uintptr) + + Offset(id txfile.PageID, offset uintptr) uintptr + + SplitOffset(uintptr) (txfile.PageID, uintptr) + + // BeginWrite must create a read-write transaction for use by the writer. + // The transaction will be used to allocate pages and flush the current write + // buffer. + BeginWrite() *txfile.Tx + + // BeginRead must return a readonly transaction. + BeginRead() *txfile.Tx + + // BeginCleanup must return a read-write transaction for the ACK handling to + // remove events. No new contents will be written, but pages will be freed + // and the queue root page being updated. + BeginCleanup() *txfile.Tx +} + +// standaloneDelegate wraps a txfile.File into a standalone queue only file. +// The delegate sets the files root to the queue header. +type standaloneDelegate struct { + file *txfile.File + root txfile.PageID +} + +// NewStandaloneDelegate creates a standaonle Delegate from an txfile.File +// instance. This function will allocate and initialize the queue root page. +func NewStandaloneDelegate(f *txfile.File) (Delegate, error) { + tx := f.Begin() + defer tx.Close() + + root := tx.Root() + if root == 0 { + var err error + + root, err = initQueueRoot(tx) + if err != nil { + return nil, err + } + } + + return &standaloneDelegate{file: f, root: root}, nil +} + +func initQueueRoot(tx *txfile.Tx) (txfile.PageID, error) { + page, err := tx.Alloc() + if err != nil { + return 0, err + } + + buf := MakeRoot() + if err := page.SetBytes(buf[:]); err != nil { + return 0, err + } + + tx.SetRoot(page.ID()) + return page.ID(), tx.Commit() +} + +// PageSize returns the files page size. +func (d *standaloneDelegate) PageSize() int { + return d.file.PageSize() +} + +// Root finds the queue root page and offset. +func (d *standaloneDelegate) Root() (txfile.PageID, uintptr) { + return d.root, 0 +} + +func (d *standaloneDelegate) Offset(id txfile.PageID, offset uintptr) uintptr { + return d.file.Offset(id, offset) +} + +func (d *standaloneDelegate) SplitOffset(offset uintptr) (txfile.PageID, uintptr) { + return d.file.SplitOffset(offset) +} + +// BeginWrite creates a new transaction for flushing the write buffers to disk. +func (d *standaloneDelegate) BeginWrite() *txfile.Tx { + return d.file.BeginWith(txfile.TxOptions{ + WALLimit: 3, + }) +} + +// BeginRead returns a readonly transaction. +func (d *standaloneDelegate) BeginRead() *txfile.Tx { + return d.file.BeginReadonly() +} + +// BeginCleanup creates a new write transaction configured for cleaning up used +// events/pages only. +func (d *standaloneDelegate) BeginCleanup() *txfile.Tx { + return d.file.BeginWith(txfile.TxOptions{ + EnableOverflowArea: true, + WALLimit: 3, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/error.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/error.go new file mode 100644 index 00000000..2d525a3d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/error.go @@ -0,0 +1,15 @@ +package pq + +import "errors" + +var ( + errNODelegate = errors.New("delegate must not be nil") + errInvalidPagesize = errors.New("invalid page size") + errClosed = errors.New("queue closed") + errNoQueueRoot = errors.New("no queue root") + errIncompleteQueueRoot = errors.New("incomplete queue root") + errInvalidVersion = errors.New("invalid queue version") + errACKEmptyQueue = errors.New("ack on empty queue") + errACKTooManyEvents = errors.New("too many events have been acked") + errSeekPageFailed = errors.New("failed to seek to next page") +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/layout.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/layout.go new file mode 100644 index 00000000..85e6ed69 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/layout.go @@ -0,0 +1,81 @@ +package pq + +import ( + "unsafe" + + bin "github.com/urso/go-bin" + + "github.com/elastic/go-txfile" +) + +// primitive types +type ( + u32 = bin.U32le + u64 = bin.U64le +) + +// queuePage is the root structure into a persisted Queue instance. +type queuePage struct { + version u32 + + // start/end of single linked events list and event ids + head pos // head points to first event in list + tail pos // tail points to next event to be written + + // read points to next event to continue reading from + // if read == tail, all events have been read + read pos + + inuse u64 // number of actively used data pages +} + +type pos struct { + offset u64 // file offset of event + id u64 // id of event +} + +// eventPage create a single list of event pages, storing a number +// of events per page. +// If off == 0, the page does contain data only. +type eventPage struct { + next u64 // PageID of next eventPage + first u64 // event id of first event in current page + last u64 // event id of last even in current page + off u32 // offset of first event in current page +} + +// eventHeader is keeps track of the event size in bytes. +// The event ID can be 'computed' by iterating the events in a page. +type eventHeader struct { + sz u32 +} + +const ( + queueVersion = 1 + + // SzRoot is the size of the queue header in bytes. + SzRoot = int(unsafe.Sizeof(queuePage{})) + + szEventPageHeader = int(unsafe.Sizeof(eventPage{})) + szEventHeader = int(unsafe.Sizeof(eventHeader{})) +) + +func castQueueRootPage(b []byte) (hdr *queuePage) { bin.UnsafeCastStruct(&hdr, b); return } + +func castEventPageHeader(b []byte) (hdr *eventPage) { bin.UnsafeCastStruct(&hdr, b); return } + +func castEventHeader(b []byte) (hdr *eventHeader) { bin.UnsafeCastStruct(&hdr, b); return } + +func traceQueueHeader(hdr *queuePage) { + traceln("queue header:") + traceln(" version:", hdr.version.Get()) + tracef(" head(%v, %v)\n", hdr.head.id.Get(), hdr.head.offset.Get()) + tracef(" tail(%v, %v)\n", hdr.tail.id.Get(), hdr.tail.offset.Get()) + tracef(" read(%v, %v)\n", hdr.read.id.Get(), hdr.read.offset.Get()) + traceln(" data pages", hdr.inuse.Get()) +} + +func tracePageHeader(id txfile.PageID, hdr *eventPage) { + tracef("event page %v (next=%v, first=%v, last=%v, off=%v)\n", + id, hdr.next.Get(), hdr.first.Get(), hdr.last.Get(), hdr.off.Get()) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/page.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/page.go new file mode 100644 index 00000000..03b6b7cc --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/page.go @@ -0,0 +1,117 @@ +package pq + +import ( + "sync" + + "github.com/elastic/go-txfile" +) + +// page is used by the write buffer to keep page content and on-disk +// assignment. Pages with Meta.ID == 0 are not allocated on disk yet. +type page struct { + Next *page + + Meta pageMeta + Data []byte +} + +type pageMeta struct { + ID txfile.PageID + FirstID, LastID uint64 + FirstOff uint32 + EndOff uint32 + Flags pageFlags +} + +type pageFlags struct { + Dirty bool // indicates new event contents being written to this page +} + +type pagePool struct { + sync.Pool +} + +func newPagePool(pageSize int) *pagePool { + return &pagePool{sync.Pool{ + New: func() interface{} { + return &page{ + Data: make([]byte, pageSize), + } + }, + }} +} + +func (pp *pagePool) NewPage() *page { + return pp.get() +} + +func (pp *pagePool) NewPageWith(id txfile.PageID, contents []byte) *page { + p := pp.NewPage() + copy(p.Data, contents) + hdr := castEventPageHeader(contents) + p.Meta = pageMeta{ + ID: id, + FirstID: hdr.first.Get(), + LastID: hdr.last.Get(), + FirstOff: hdr.off.Get(), + } + return p +} + +func (pp *pagePool) get() *page { return pp.Pool.Get().(*page) } + +func (pp *pagePool) Release(p *page) { + p.Clear() + pp.Pool.Put(p) +} + +// Clear zeroes out a page object and the buffer page header, preparing the +// page object for being reused. +func (p *page) Clear() { + p.Meta = pageMeta{} + p.Next = nil + + // clear page header + for i := 0; i < szEventPageHeader; i++ { + p.Data[i] = 0 + } +} + +// Assigned checks if the page is represented by on on-disk page. +func (p *page) Assigned() bool { + return p.Meta.ID != 0 +} + +// Dirty checks if the page is dirty and must be flushed. +func (p *page) Dirty() bool { + return p.Meta.Flags.Dirty +} + +// MarkDirty marks a page as dirty. +func (p *page) MarkDirty() { + p.Meta.Flags.Dirty = true +} + +// UnmarkDirty marks a page as being in sync with the on-disk page. +func (p *page) UnmarkDirty() { + p.Meta.Flags.Dirty = false +} + +// SetNext write the next page ID into the page header. +func (p *page) SetNext(id txfile.PageID) { + hdr := castEventPageHeader(p.Data) + hdr.next.Set(uint64(id)) +} + +// Payload returns the slice of the page it's complete payload. +func (p *page) Payload() []byte { + return p.Data[szEventPageHeader:] +} + +// UpdateHeader updates the page header to reflect the page meta-data pages. +func (p *page) UpdateHeader() { + hdr := castEventPageHeader(p.Data) + hdr.first.Set(p.Meta.FirstID) + hdr.last.Set(p.Meta.LastID) + hdr.off.Set(p.Meta.FirstOff) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/pq.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/pq.go new file mode 100644 index 00000000..3f0076bf --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/pq.go @@ -0,0 +1,161 @@ +package pq + +import ( + "github.com/elastic/go-txfile" +) + +// Queue implements the on-disk queue data structure. The queue requires a +// Delegate, so to start transactions at any time. The Queue provides a reader +// and writer. While it is safe to use the Reader and Writer concurrently, the +// Reader and Writer themselves are not thread-safe. +type Queue struct { + accessor access + + // TODO: add support for multiple named readers with separate ACK handling. + + reader *Reader + writer *Writer + acker *acker +} + +type position struct { + page txfile.PageID + off int + id uint64 +} + +// Settings configures a queue when being instantiated with `New`. +type Settings struct { + // Queue write buffer size. If a single event is bigger then the + // write-buffer, the write-buffer will grow. In this case will the write + // buffer be flushed and reset to its original size. + WriteBuffer uint + + // Optional Flushed callback. Will be used to notify n events being + // successfully committed. + Flushed func(n uint) + + // Optional ACK callback. Will be use to notify number of events being successfully + // ACKed and pages being freed. + ACKed func(event, pages uint) +} + +// MakeRoot prepares the queue header (empty queue). +// When creating a queue with `New`, the queue header must be available. +// Still, a delegate is allowed to create the queue header lazily. +func MakeRoot() [SzRoot]byte { + var buf [SzRoot]byte + qu := castQueueRootPage(buf[:]) + qu.version.Set(queueVersion) + return buf +} + +// New creates a new Queue. The delegate is required to access the file and +// start transactions. An error is returned if the delegate is nil, the queue +// header is invalid, some settings are invalid, or if some IO error occurred. +func New(delegate Delegate, settings Settings) (*Queue, error) { + if delegate == nil { + return nil, errNODelegate + } + + accessor, err := makeAccess(delegate) + if err != nil { + return nil, err + } + + q := &Queue{accessor: accessor} + + pageSize := delegate.PageSize() + pagePool := newPagePool(pageSize) + + rootBuf, err := q.accessor.ReadRoot() + if err != nil { + return nil, err + } + + root := castQueueRootPage(rootBuf[:]) + if root.version.Get() != queueVersion { + return nil, errInvalidVersion + } + + tracef("open queue: %p (pageSize: %v)\n", q, pageSize) + traceQueueHeader(root) + + tail := q.accessor.ParsePosition(&root.tail) + writer, err := newWriter(&q.accessor, pagePool, + settings.WriteBuffer, tail, settings.Flushed) + if err != nil { + return nil, err + } + + reader, err := newReader(&q.accessor) + if err != nil { + return nil, err + } + + acker, err := newAcker(&q.accessor, settings.ACKed) + if err != nil { + return nil, err + } + + q.reader = reader + q.writer = writer + q.acker = acker + return q, nil +} + +// Close will try to flush the current write buffer, +// but after closing the queue, no more reads or writes can be executed +func (q *Queue) Close() error { + tracef("close queue %p\n", q) + defer tracef("queue %p closed\n", q) + + q.reader.close() + q.acker.close() + return q.writer.close() +} + +// Pending returns the total number of enqueued, but unacked events. +func (q *Queue) Pending() int { + tx := q.accessor.BeginRead() + defer tx.Close() + + hdr, err := q.accessor.RootHdr(tx) + if err != nil { + return -1 + } + + head := q.accessor.ParsePosition(&hdr.read) + if head.page == 0 { + head = q.accessor.ParsePosition(&hdr.head) + } + tail := q.accessor.ParsePosition(&hdr.tail) + + return int(tail.id - head.id) +} + +// Writer returns the queue writer for inserting new events into the queue. +// A queue has only one single writer instance, which is returned by GetWriter. +// The writer is is not thread safe. +func (q *Queue) Writer() *Writer { + return q.writer +} + +// Reader returns the queue reader for reading a new events from the queue. +// A queue has only one single reader instance. +// The reader is not thread safe. +func (q *Queue) Reader() *Reader { + return q.reader +} + +// ACK signals the queue, the most n events at the front of the queue have been +// processed. +// The queue will try to remove these asynchronously. +func (q *Queue) ACK(n uint) error { + return q.acker.handle(n) +} + +// Active returns the number of active, not yet ACKed events. +func (q *Queue) Active() (uint, error) { + return q.acker.Active() +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/reader.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/reader.go new file mode 100644 index 00000000..762e242d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/reader.go @@ -0,0 +1,210 @@ +package pq + +import ( + "github.com/elastic/go-txfile" + "github.com/elastic/go-txfile/internal/invariant" +) + +// Reader is used to iterate events stored in the queue. +type Reader struct { + accessor *access + state readState + active bool +} + +type readState struct { + id uint64 + endID uint64 // id of next, yet unwritten event. + eventBytes int // number of unread bytes in current event + + cursor cursor +} + +func newReader(accessor *access) (*Reader, error) { + return &Reader{ + active: true, + accessor: accessor, + state: readState{ + eventBytes: -1, + cursor: cursor{ + pageSize: accessor.PageSize(), + }, + }, + }, nil +} + +func (r *Reader) close() { + r.active = false +} + +// Available returns the number of unread events that can be read. +func (r *Reader) Available() uint { + if !r.active { + return 0 + } + + func() { + tx := r.accessor.BeginRead() + defer tx.Close() + r.updateQueueState(tx) + }() + + if r.state.cursor.Nil() { + return 0 + } + + return uint(r.state.endID - r.state.id) +} + +// Read reads the contents of the current event into the buffer. +// Returns 0 without reading if end of the current event has been reached. +// Use `Next` to skip/continue reading the next event. +func (r *Reader) Read(b []byte) (int, error) { + if !r.active { + return -1, errClosed + } + + if r.state.eventBytes <= 0 { + return 0, nil + } + + to, err := r.readInto(b) + return len(b) - len(to), err +} + +func (r *Reader) readInto(to []byte) ([]byte, error) { + tx := r.accessor.BeginRead() + defer tx.Close() + + n := r.state.eventBytes + if L := len(to); L < n { + n = L + } + + cursor := makeTxCursor(tx, r.accessor, &r.state.cursor) + for n > 0 { + consumed, err := cursor.Read(to[:n]) + to = to[consumed:] + n -= consumed + r.state.eventBytes -= consumed + + if err != nil { + return to, err + } + } + + // end of event -> advance to next event + var err error + if r.state.eventBytes == 0 { + r.state.eventBytes = -1 + r.state.id++ + + // As page is already in memory, use current transaction to try to skip to + // next page if no more new event fits into current page. + if cursor.PageBytes() < szEventHeader { + cursor.AdvancePage() + } + } + + return to, err +} + +// Next advances to the next event to be read. The event size in bytes is +// returned. A size of 0 is reported if no more event is available in the +// queue. +func (r *Reader) Next() (int, error) { + if !r.active { + return -1, errClosed + } + + tx := r.accessor.BeginRead() + defer tx.Close() + + cursor := makeTxCursor(tx, r.accessor, &r.state.cursor) + + // in event? Skip contents + if r.state.eventBytes > 0 { + err := cursor.Skip(r.state.eventBytes) + if err != nil { + return 0, err + } + + r.state.eventBytes = -1 + r.state.id++ + } + + // end of buffered queue state. Update state and check if we did indeed reach + // the end of the queue. + if cursor.Nil() || !idLess(r.state.id, r.state.endID) { + err := r.updateQueueState(tx) + if err != nil { + return 0, err + } + + // end of queue + if cursor.Nil() || !idLess(r.state.id, r.state.endID) { + return 0, nil + } + } + + // Advance page and initialize cursor if event header does not fit into + // current page. + if cursor.PageBytes() < szEventHeader { + // cursor was not advanced by last read. The acker will not have deleted + // the current page -> try to advance now. + ok, err := cursor.AdvancePage() + if err != nil { + return 0, err + } + invariant.Check(ok, "page list linkage broken") + + hdr, err := cursor.PageHeader() + if err != nil { + return 0, err + } + + id := hdr.first.Get() + off := int(hdr.off.Get()) + invariant.Check(r.state.id == id, "page start event id mismatch") + invariant.CheckNot(off == 0, "page event offset missing") + r.state.cursor.off = off + } + + // Initialize next event read by determining event size. + hdr, err := cursor.ReadEventHeader() + if err != nil { + return 0, err + } + L := int(hdr.sz.Get()) + r.state.eventBytes = L + return L, nil +} + +func (r *Reader) updateQueueState(tx *txfile.Tx) error { + root, err := r.accessor.RootHdr(tx) + if err != nil { + return err + } + + // Initialize cursor, if queue was empty on previous (without any pages). + if r.state.cursor.Nil() { + head := r.findReadStart(root) + tail := r.accessor.ParsePosition(&root.tail) + + r.state.id = head.id + r.state.cursor.page = head.page + r.state.cursor.off = head.off + r.state.endID = tail.id + } else { + r.state.endID = root.tail.id.Get() + } + return nil +} + +func (r *Reader) findReadStart(root *queuePage) position { + head := r.accessor.ParsePosition(&root.read) + if head.page != 0 { + return head + } + return r.accessor.ParsePosition(&root.head) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/trace.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/trace.go new file mode 100644 index 00000000..2acef0de --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/trace.go @@ -0,0 +1,10 @@ +package pq + +type tracer interface { + Println(...interface{}) + Printf(string, ...interface{}) +} + +var ( + logTracer tracer +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/trace_disabled.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/trace_disabled.go new file mode 100644 index 00000000..4bdc19a8 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/trace_disabled.go @@ -0,0 +1,9 @@ +// +build !tracing + +package pq + +func pushTracer(t tracer) {} +func popTracer() {} + +func traceln(vs ...interface{}) {} +func tracef(fmt string, vs ...interface{}) {} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/trace_enabled.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/trace_enabled.go new file mode 100644 index 00000000..75340aac --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/trace_enabled.go @@ -0,0 +1,36 @@ +// +build tracing + +package pq + +import ( + "github.com/elastic/go-txfile/internal/tracelog" +) + +var ( + tracers []tracer + activeTracer tracer +) + +func init() { + logTracer = tracelog.Get("pq") + activeTracer = logTracer +} + +func pushTracer(t tracer) { + tracers = append(tracers, activeTracer) + activeTracer = t +} + +func popTracer() { + i := len(tracers) - 1 + activeTracer = tracers[i] + tracers = tracers[:i] +} + +func traceln(vs ...interface{}) { + activeTracer.Println(vs...) +} + +func tracef(s string, vs ...interface{}) { + activeTracer.Printf(s, vs...) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/util.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/util.go new file mode 100644 index 00000000..5d7351e3 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/util.go @@ -0,0 +1,39 @@ +package pq + +import "github.com/elastic/go-txfile" + +func getPage(tx *txfile.Tx, id txfile.PageID) ([]byte, error) { + page, err := tx.Page(id) + if err != nil { + return nil, err + } + + return page.Bytes() +} + +func withPage(tx *txfile.Tx, id txfile.PageID, fn func([]byte) error) error { + page, err := getPage(tx, id) + if err != nil { + return err + } + return fn(page) +} + +func readPageByID(accessor *access, pool *pagePool, id txfile.PageID) (*page, error) { + tx := accessor.BeginRead() + defer tx.Close() + + var page *page + return page, withPage(tx, id, func(buf []byte) error { + page = pool.NewPageWith(id, buf) + return nil + }) +} + +func idLess(a, b uint64) bool { + return int64(a-b) < 0 +} + +func idLessEq(a, b uint64) bool { + return a == b || idLess(a, b) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/writer.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/writer.go new file mode 100644 index 00000000..27498891 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/pq/writer.go @@ -0,0 +1,327 @@ +package pq + +import ( + "github.com/elastic/go-txfile" + "github.com/elastic/go-txfile/internal/cleanup" +) + +// Writer is used to push new events onto the queue. +// The writer uses a write buffer, which is flushed once the buffer is full +// or if Flush is called. +// Only complete events are flushed. If an event is bigger then the configured write buffer, +// the write buffer will grow with the event size. +type Writer struct { + active bool + + accessor *access + flushCB func(uint) + + state writeState +} + +type writeState struct { + buf *buffer + + activeEventCount uint // count number of finished events since last flush + totalEventCount uint + totalAllocPages uint + + eventID uint64 + eventBytes int +} + +const defaultMinPages = 5 + +func newWriter( + accessor *access, + pagePool *pagePool, + writeBuffer uint, + end position, + flushCB func(uint), +) (*Writer, error) { + pageSize := accessor.PageSize() + if pageSize <= 0 { + return nil, errInvalidPagesize + } + + pages := int(writeBuffer) / pageSize + if pages <= defaultMinPages { + pages = defaultMinPages + } + + var tail *page + if end.page != 0 { + traceln("writer load endpage: ", end) + + page := end.page + off := end.off + + var err error + tail, err = readPageByID(accessor, pagePool, page) + if err != nil { + return nil, err + } + + tail.Meta.EndOff = uint32(off) + } + + w := &Writer{ + active: true, + accessor: accessor, + state: writeState{ + buf: newBuffer(pagePool, tail, pages, pageSize, szEventPageHeader), + eventID: end.id, + }, + flushCB: flushCB, + } + + // init buffer with 'first' event to be written + w.state.buf.ReserveHdr(szEventHeader) + return w, nil +} + +func (w *Writer) close() error { + if !w.active { + return nil + } + + err := w.doFlush() + if err != nil { + return err + } + + w.active = false + w.state.buf = nil + return err +} + +func (w *Writer) Write(p []byte) (int, error) { + if !w.active { + return 0, errClosed + } + + if w.state.buf.Avail() <= len(p) { + if err := w.doFlush(); err != nil { + return 0, err + } + } + + w.state.buf.Append(p) + w.state.eventBytes += len(p) + + return len(p), nil +} + +// Next is used to indicate the end of the current event. +// If write is used with a streaming encoder, the buffers +// of the actual writer must be flushed before calling Next on this writer. +// Upon next, the queue writer will add the event framing header and footer. +func (w *Writer) Next() error { + if !w.active { + return errClosed + } + + // finalize current event in buffer and prepare next event + hdr := castEventHeader(w.state.buf.ActiveEventHdr()) + hdr.sz.Set(uint32(w.state.eventBytes)) + w.state.buf.CommitEvent(w.state.eventID) + w.state.buf.ReserveHdr(szEventHeader) + w.state.eventBytes = 0 + w.state.eventID++ + w.state.activeEventCount++ + + // check if we need to flush + if w.state.buf.Avail() <= szEventHeader { + if err := w.doFlush(); err != nil { + return err + } + } + + return nil +} + +// Flush flushes the write buffer. Returns an error if the queue is closed, +// some error occurred or no more space is available in the file. +func (w *Writer) Flush() error { + if !w.active { + return errClosed + } + return w.doFlush() +} + +func (w *Writer) doFlush() error { + start, end := w.state.buf.Pages() + if start == nil || start == end { + return nil + } + + traceln("writer flush", w.state.activeEventCount) + + // unallocated points to first page in list that must be allocated. All + // pages between unallocated and end require a new page to be allocated. + var unallocated *page + for current := start; current != end; current = current.Next { + if !current.Assigned() { + unallocated = current + break + } + } + + tx := w.accessor.BeginWrite() + defer tx.Close() + + rootPage, queueHdr, err := w.accessor.LoadRootPage(tx) + if err != nil { + return err + } + + traceQueueHeader(queueHdr) + + ok := false + allocN, err := allocatePages(tx, unallocated, end) + if err != nil { + return err + } + linkPages(start, end) + defer cleanup.IfNot(&ok, func() { unassignPages(unallocated, end) }) + + traceln("write queue pages") + last, err := flushPages(tx, start, end) + if err != nil { + return err + } + + // update queue root + w.updateRootHdr(queueHdr, start, last, allocN) + rootPage.MarkDirty() + + err = tx.Commit() + if err != nil { + return err + } + + // mark write as success -> no error-cleanup required + ok = true + + // remove dirty flag from all published pages + for current := start; current != end; current = current.Next { + current.UnmarkDirty() + } + + w.state.buf.Reset(last) + + activeEventCount := w.state.activeEventCount + w.state.totalEventCount += activeEventCount + w.state.totalAllocPages += uint(allocN) + + traceln("Write buffer flushed. Total events: %v, total pages allocated: %v", + w.state.totalEventCount, + w.state.totalAllocPages) + + w.state.activeEventCount = 0 + if w.flushCB != nil { + w.flushCB(activeEventCount) + } + + return nil +} + +func (w *Writer) updateRootHdr(hdr *queuePage, start, last *page, allocated int) { + if hdr.head.offset.Get() == 0 { + w.accessor.WritePosition(&hdr.head, position{ + page: start.Meta.ID, + off: int(start.Meta.FirstOff), + id: start.Meta.FirstID, + }) + } + + hdr.inuse.Set(hdr.inuse.Get() + uint64(allocated)) + + endOff := int(last.Meta.EndOff) + if last == w.state.buf.eventHdrPage { + endOff = w.state.buf.eventHdrOffset + } + + w.accessor.WritePosition(&hdr.tail, position{ + page: last.Meta.ID, + off: endOff, + id: w.state.eventID, + }) + + traceln("writer: update queue header") + traceQueueHeader(hdr) +} + +func allocatePages(tx *txfile.Tx, start, end *page) (int, error) { + if start == nil { + return 0, nil + } + + allocN := 0 + for current := start; current != end; current = current.Next { + allocN++ + } + + tracef("allocate %v queue pages\n", allocN) + + txPages, err := tx.AllocN(allocN) + if err != nil { + return 0, err + } + + // assign new page IDs + for current, i := start, 0; current != end; current, i = current.Next, i+1 { + current.Meta.ID = txPages[i].ID() + } + + return allocN, nil +} + +// unassignPages removes page assignments from all pages between start and end, +// so to mark these pages as 'not allocated'. +func unassignPages(start, end *page) { + for current := start; current != end; current = current.Next { + current.Meta.ID = 0 + } +} + +// Update page headers to point to next page in the list. +func linkPages(start, end *page) { + for current := start; current.Next != end; current = current.Next { + tracef("link page %v -> %v\n", current.Meta.ID, current.Next.Meta.ID) + current.SetNext(current.Next.Meta.ID) + } +} + +// flushPages flushes all pages in the list of pages and returns the last page +// being flushed. +func flushPages(tx *txfile.Tx, start, end *page) (*page, error) { + last := start + for current := start; current != end; current = current.Next { + last = current + + err := flushPage(tx, current) + if err != nil { + return nil, err + } + } + + return last, nil +} + +func flushPage(tx *txfile.Tx, page *page) error { + page.UpdateHeader() + tracePageHeader(page.Meta.ID, castEventPageHeader(page.Data)) + + diskPage, err := tx.Page(page.Meta.ID) + if err != nil { + return err + } + + err = diskPage.SetBytes(page.Data) + if err != nil { + return err + } + + return diskPage.Flush() +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/region.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/region.go new file mode 100644 index 00000000..a20a2f14 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/region.go @@ -0,0 +1,234 @@ +package txfile + +import ( + "sort" +) + +// region values represent a continuous set of pages. +type region struct { + id PageID + count uint32 +} + +type regionList []region + +// PageIDs represent pages. The minimal page size is 512Bytes + +// all contents in a file must be addressable by offset. This gives us +// 9 bytes to store additional flags or value in the entry. + +const ( + maxRegionEncSz = 8 + 4 + entryBits = 9 // region entry header size in bits + entryOverflow = (1 << 8) - 1 // overflow marker == all counter bits set + entryOverflowMarker = uint64(entryOverflow) << (64 - entryBits) + entryCounterMask = uint32(((1 << (entryBits - 1)) - 1)) + entryMetaFlag = 1 << 63 // indicates the region holding pages used by the meta-area +) + +func (l regionList) Len() int { + return len(l) +} + +func (l *regionList) Add(reg region) { + *l = append(*l, reg) +} + +func (l regionList) Sort() { + sort.Slice(l, func(i, j int) bool { + return l[i].Before(l[j]) + }) +} + +func (l *regionList) MergeAdjacent() { + if len(*l) <= 1 { + return + } + + tmp := (*l)[:1] + i := 0 + for _, r := range (*l)[1:] { + if regionsMergable(tmp[i], r) { + tmp[i] = mergeRegions(tmp[i], r) + } else { + tmp = append(tmp, r) + i = i + 1 + } + } + *l = tmp +} + +func (l regionList) CountPagesUpTo(id PageID) (count uint) { + for _, reg := range l { + if reg.id >= id { + break + } + + start, end := reg.Range() + if end > id { + end = id + } + count += uint(end - start) + } + return +} + +func (l regionList) CountPages() (count uint) { + for _, reg := range l { + count += uint(reg.count) + } + return +} + +func (l regionList) EachPage(fn func(PageID)) { + for _, reg := range l { + reg.EachPage(fn) + } +} + +func (l regionList) EachRegion(fn func(region)) { + for _, reg := range l { + fn(reg) + } +} + +func (l regionList) PageIDs() (ids idList) { + l.EachPage(ids.Add) + return +} + +func (r region) Start() PageID { return r.id } +func (r region) End() PageID { return r.id + PageID(r.count) } +func (r region) Range() (PageID, PageID) { return r.Start(), r.End() } +func (r region) InRange(id PageID) bool { return r.Start() <= id && id < r.End() } + +func (r region) SplitAt(id PageID) region { + start, end := r.Range() + if id <= start || end < id { + return region{} + } + + if end > id { + end = id + } + + return region{id: start, count: uint32(end - start)} +} + +func (r region) EachPage(fn func(PageID)) { + for id, end := r.Range(); id != end; id++ { + fn(id) + } +} + +func (r region) Before(other region) bool { + return r.id < other.id +} + +func (r region) Precedes(other region) bool { + return r.id+PageID(r.count) == other.id +} + +func mergeRegions(a, b region) region { + return region{id: a.id, count: a.count + b.count} +} + +// mergeRegionLists merges 2 sorter regionLists into a new sorted region list. +// Adjacent regions will be merged into a single region as well. +func mergeRegionLists(a, b regionList) regionList { + L := len(a) + len(b) + if L == 0 { + return nil + } + + final := make(regionList, 0, L) + for len(a) > 0 && len(b) > 0 { + if a[0].Before(b[0]) { + final, a = append(final, a[0]), a[1:] + } else { + final, b = append(final, b[0]), b[1:] + } + } + + // copy leftover elements + final = append(final, a...) + final = append(final, b...) + + final.MergeAdjacent() + + return final +} + +// regionsMergable checks region a directly precedes regions b and +// the region counter will not overflow. +func regionsMergable(a, b region) bool { + if !a.Before(b) { + a, b = b, a + } + return a.Precedes(b) && (a.count+b.count) > a.count +} + +// optimizeRegionList sorts and merges adjacent regions. +func optimizeRegionList(reg *regionList) { + initLen := reg.Len() + reg.Sort() + reg.MergeAdjacent() + if l := reg.Len(); initLen > l { + tmp := make(regionList, l, l) + copy(tmp, *reg) + *reg = tmp + } +} + +// (de-)serialization +// ------------------ + +func regionEncodingSize(r region) int { + if r.count < entryOverflow { + return (&u64{}).Len() + } + return (&u64{}).Len() + (&u32{}).Len() +} + +func encodeRegion(buf []byte, isMeta bool, reg region) int { + flag := uint64(0) + if isMeta { + flag = entryMetaFlag + } + + payload := buf + entry := castU64(payload) + payload = payload[entry.Len():] + + if reg.count < entryOverflow { + count := uint64(reg.count) << (64 - entryBits) + entry.Set(flag | count | uint64(reg.id)) + } else { + count := castU32(payload) + payload = payload[count.Len():] + + entry.Set(flag | entryOverflowMarker | uint64(reg.id)) + count.Set(reg.count) + } + + return len(buf) - len(payload) +} + +func decodeRegion(buf []byte) (bool, region, int) { + payload := buf + entry := castU64(payload) + value := entry.Get() + payload = payload[entry.Len():] + + id := PageID((value << entryBits) >> entryBits) + isMeta := (entryMetaFlag & value) == entryMetaFlag + count := uint32(value>>(64-entryBits)) & entryCounterMask + switch count { + case 0: + count = 1 + case entryOverflow: + extra := castU32(payload) + count, payload = extra.Get(), payload[extra.Len():] + } + + return isMeta, region{id: id, count: count}, len(buf) - len(payload) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/trace.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/trace.go new file mode 100644 index 00000000..c6c062a6 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/trace.go @@ -0,0 +1,10 @@ +package txfile + +type tracer interface { + Println(...interface{}) + Printf(string, ...interface{}) +} + +var ( + logTracer tracer +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/trace_disabled.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/trace_disabled.go new file mode 100644 index 00000000..00bb45c5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/trace_disabled.go @@ -0,0 +1,9 @@ +// +build !tracing + +package txfile + +func pushTracer(t tracer) {} +func popTracer() {} + +func traceln(vs ...interface{}) {} +func tracef(fmt string, vs ...interface{}) {} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/trace_enabled.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/trace_enabled.go new file mode 100644 index 00000000..04c1e292 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/trace_enabled.go @@ -0,0 +1,36 @@ +// +build tracing + +package txfile + +import ( + "github.com/elastic/go-txfile/internal/tracelog" +) + +var ( + tracers []tracer + activeTracer tracer +) + +func init() { + logTracer = tracelog.Get("txfile") + activeTracer = logTracer +} + +func pushTracer(t tracer) { + tracers = append(tracers, activeTracer) + activeTracer = t +} + +func popTracer() { + i := len(tracers) - 1 + activeTracer = tracers[i] + tracers = tracers[:i] +} + +func traceln(vs ...interface{}) { + activeTracer.Println(vs...) +} + +func tracef(s string, vs ...interface{}) { + activeTracer.Printf(s, vs...) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/tx.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/tx.go new file mode 100644 index 00000000..69eded22 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/tx.go @@ -0,0 +1,624 @@ +package txfile + +import ( + "fmt" + "sync" + + "github.com/elastic/go-txfile/internal/cleanup" +) + +// Tx provides access to pages in a File. +// A transaction MUST always be closed, so to guarantee locks being released as +// well. +type Tx struct { + flags txFlags + file *File + lock sync.Locker + writeSync *txWriteSync + rootID PageID + dataEndID PageID + + // pages accessed by the transaction + pages map[PageID]*Page + + // allocation/free state + alloc txAllocState + + // scheduled WAL updates + wal txWalState +} + +// TxOptions adds some per transaction options user can set. +type TxOptions struct { + // Readonly transaction. + Readonly bool + + // Allow write transaction to allocate meta pages from overflow area. + // Potentially increasing the file size past the configured max size. + // This setting should only be used to guarantee progress when having a + // transaction only freeing pages. + // Later transactions will try to release pages from the overflow area and + // truncate the file, such that we have a chance to operate within max-size + // limits again. + EnableOverflowArea bool + + // MetaAreaGrowPercentage sets the percentage of meta pages in use, until + // the meta-area grows again. The value must be between 0 and 100. + // The default value is 80%. + MetaAreaGrowPercentage int + + // Number of pages in wal overwrite log to automatically trigger + // CheckpointWAL on commit. + WALLimit uint +} + +type txFlags struct { + readonly bool + active bool + checkpoint bool // mark wal checkpoint has been applied +} + +func newTx(file *File, lock sync.Locker, settings TxOptions) *Tx { + meta := file.getMetaPage() + + rootID := meta.root.Get() + dataEndMarker := meta.dataEndMarker.Get() + + tx := &Tx{ + flags: txFlags{ + readonly: settings.Readonly, + active: true, + }, + file: file, + lock: lock, + rootID: rootID, + dataEndID: dataEndMarker, + + pages: map[PageID]*Page{}, + } + + if !settings.Readonly { + tx.writeSync = newTxWriteSync() + tx.alloc = file.allocator.makeTxAllocState( + settings.EnableOverflowArea, + settings.MetaAreaGrowPercentage, + ) + tx.wal = file.wal.makeTxWALState(settings.WALLimit) + } + + return tx +} + +// Writable returns true if the transaction supports file modifications. +func (tx *Tx) Writable() bool { + return !tx.flags.readonly +} + +// Readonly returns true if no modifications to the page are allowed. Trying to +// write to a readonly page might result in a non-recoverable panic. +func (tx *Tx) Readonly() bool { + return tx.flags.readonly +} + +// Active returns true if the transaction can still be used to access pages. +// A transaction becomes inactive after Close, Commit or Rollback. +// Errors within a transaction might inactivate the transaction as well. +// When encountering errors, one should check if the transaction still can be used. +func (tx *Tx) Active() bool { + return tx.flags.active +} + +// PageSize returns the file page size. +func (tx *Tx) PageSize() int { + return int(tx.file.allocator.pageSize) +} + +// Root returns the data root page id. This ID must be set via SetRoot +// to indicate the start of application data to later transactions. +// On new files, the default root is 0, as no application data are stored yet. +func (tx *Tx) Root() PageID { + return tx.rootID +} + +// SetRoot sets the new root page id, indicating the new start of application +// data. SetRoot should be set by the first write transaction, when the file is +// generated first. +func (tx *Tx) SetRoot(id PageID) { + tx.rootID = id +} + +// RootPage returns the application data root page, if the root id has been set +// in the past. Returns nil, if no root page is set. +func (tx *Tx) RootPage() (*Page, error) { + if tx.rootID < 2 { + return nil, nil + } + return tx.Page(tx.rootID) +} + +// Rollback rolls back and closes the current transaction. Rollback returns an +// error if the transaction has already been closed by Close, Rollback or +// Commit. +func (tx *Tx) Rollback() error { + tracef("rollback transaction: %p\n", tx) + return tx.finishWith(tx.rollbackChanges) +} + +// Commit commits the current transaction to file. The commit step needs to +// take the Exclusive Lock, waiting for readonly transactions to be Closed. +// Returns an error if the transaction has already been closed by Close, +// Rollback or Commit. +func (tx *Tx) Commit() error { + tracef("commit transaction: %p\n", tx) + return tx.finishWith(tx.commitChanges) +} + +// Close closes the transaction, releasing any locks held by the transaction. +// It is safe to call Close multiple times. Close on an inactive transaction +// will be ignored. +// A non-committed read-write transaction will be rolled back on close. +// To guaranteed the File and Locking state being valid, even on panic or early return on error, +// one should also defer the Close operation on new transactions. +// For example: +// +// tx := f.Begin() +// defer tx.Close() +// +// err := some operation +// if err != nil { +// return err +// } +// +// return tx.Commit() +// +func (tx *Tx) Close() error { + tracef("close transaction: %p\n", tx) + if !tx.flags.active { + return nil + } + return tx.finishWith(tx.rollbackChanges) +} + +// CheckpointWAL copies all overwrite pages contents into the original pages. +// Only already committed pages from older transactions will be overwritten. +// Checkpointing only copies the contents and marks the overwrite pages as +// freed. The final transaction Commit is required, to propage the WAL mapping changes +// to all other transactions. +// Dirty pages are not overwritten. Manual checkpointing should be executed at +// the end of a transaction, right before committing, so to reduce writes if +// contents is to be overwritten anyways. +func (tx *Tx) CheckpointWAL() error { + if err := tx.canWrite(); err != nil { + return err + } + return tx.doCheckpointWAL() +} + +func (tx *Tx) doCheckpointWAL() error { + if tx.flags.checkpoint { + return nil + } + + // collect page ids that would have an old WAL page + // entry still alive after this transaction. + ids := make([]PageID, 0, len(tx.file.wal.mapping)) + walIDS := make([]PageID, 0, len(tx.file.wal.mapping)) + for id, walID := range tx.file.wal.mapping { + page := tx.pages[id] + if page != nil { + if page.flags.dirty { + // wal pages of dirty pages will be freed on flush -> do not copy + continue + } + } + + ids = append(ids, id) + walIDS = append(walIDS, walID) + } + + if len(ids) == 0 { + return nil + } + + // XXX: Some OS/filesystems might lock up when writing to file + // from mmapped area. + // -> Copy contents into temporary buffer, such that + // write operations are not backed by mmapped pages from same file. + pageSize := int(tx.PageSize()) + writeBuffer := make([]byte, pageSize*len(ids)) + for i := range ids { + id, walID := ids[i], walIDS[i] + + contents := tx.file.mmapedPage(walID) + if contents == nil { + panic("invalid WAL mapping") + } + + tracef("checkpoint copy from WAL page %v -> %v\n", walID, id) + + n := copy(writeBuffer, contents) + buf := writeBuffer[:n] + writeBuffer = writeBuffer[n:] + + tx.file.writer.Schedule(tx.writeSync, id, buf) + tx.freeWALID(id, walID) + } + + tx.flags.checkpoint = true + return nil +} + +func (tx *Tx) finishWith(fn func() error) error { + if !tx.flags.active { + return errTxFinished + } + defer tx.close() + + if !tx.flags.readonly { + return fn() + } + return nil +} + +func (tx *Tx) close() { + tx.flags.active = false + tx.pages = nil + tx.alloc = txAllocState{} + tx.wal = txWalState{} + tx.writeSync = nil + tx.file = nil + tx.lock.Unlock() +} + +func (tx *Tx) commitChanges() error { + commitOK := false + defer cleanup.IfNot(&commitOK, cleanup.IgnoreError(tx.rollbackChanges)) + + err := tx.tryCommitChanges() + if commitOK = err == nil; !commitOK { + return err + } + + traceMetaPage(tx.file.getMetaPage()) + return err +} + +// tryCommitChanges attempts to write flush all pages written and update the +// files state by writing the new meta data and finally the meta page. +// So to keep the most recent transaction successfully committed usable/consistent, +// tryCommitChanges is not allowed to re-use any pages freed within this transaction. +// +// rough commit sequence: +// 1. get pending lock, so no new readers can be started +// 2. flush all dirty pages. +// - dirty pages overwriting existing contents will, will allocate +// a new WAL page to be written to +// - If dirty page already has an WAL page, overwrite the original page and +// return WAL page to allocator +// 3. if WAL was updated (pages added/removed): +// - free pages holding the old WAL mapping +// - write new WAL mapping +// 4. if pages have been freed/allocated: +// - free pages holding the old free list entries +// - write new free list +// 5. fsync, to ensure all updates have been executed before updating the meta page +// 6. acquire esclusive lock -> no more readers/writers accessing the file +// 6. update the meta page +// 7. fsync +// 8. update internal structures +// 9. release locks +func (tx *Tx) tryCommitChanges() error { + pending, exclusive := tx.file.locks.Pending(), tx.file.locks.Exclusive() + + var newMetaBuf metaBuf + newMeta := newMetaBuf.cast() + *newMeta = *tx.file.getMetaPage() // init new meta header from current active meta header + newMeta.txid.Set(1 + newMeta.txid.Get()) // inc txid + newMeta.root.Set(tx.rootID) // update data root + + // give concurrent read transactions a chance to complete, but don't allow + // for new read transactions to start while executing the commit + pending.Lock() + defer pending.Unlock() + + // On function exit wait on writer to finish outstanding operations, in case + // we have to return early on error. On success, this is basically a no-op. + defer tx.writeSync.Wait() + + // Flush pages. + if err := tx.Flush(); err != nil { + return fmt.Errorf("dirty pages flushing failed with %v", err) + } + + // 1. finish Tx state updates and free file pages used to hold meta pages + csWAL, err := tx.commitPrepareWAL() + if err != nil { + return err + } + + var csAlloc allocCommitState + tx.file.allocator.fileCommitPrepare(&csAlloc, &tx.alloc) + + // 2. allocate new file pages for new meta data to be written + if err := tx.file.wal.fileCommitAlloc(tx, &csWAL); err != nil { + return err + } + csAlloc.updated = csAlloc.updated || len(csWAL.allocRegions) > 0 + + if err := tx.file.allocator.fileCommitAlloc(&csAlloc); err != nil { + return err + } + + // 3. serialize page mappings and new freelist + err = tx.file.wal.fileCommitSerialize(&csWAL, uint(tx.PageSize()), tx.scheduleWrite) + if err != nil { + return err + } + + err = tx.file.allocator.fileCommitSerialize(&csAlloc, tx.scheduleWrite) + if err != nil { + return err + } + + // 4. sync all new contents and metadata before updating the ondisk meta page. + tx.file.writer.Sync(tx.writeSync) + + // 5. finalize on-disk transaction be writing new meta page. + tx.file.wal.fileCommitMeta(newMeta, &csWAL) + tx.file.allocator.fileCommitMeta(newMeta, &csAlloc) + newMeta.Finalize() + metaID := 1 - tx.file.metaActive + tx.file.writer.Schedule(tx.writeSync, PageID(metaID), newMetaBuf[:]) + tx.file.writer.Sync(tx.writeSync) + + // 6. wait for all pages beeing written and synced, + // before updating in memory state. + if err := tx.writeSync.Wait(); err != nil { + return err + } + + // At this point the transaction has been completed on file level. + // Update internal structures as well, so future transactions + // will use the new serialized transaction state. + + // We have only one active write transaction + freelist is not shared with read transactions + // -> update freelist state before waiting for the exclusive lock to be available + tx.file.allocator.Commit(&csAlloc) + + // Wait for all read transactions to finish before updating global references + // to new contents. + exclusive.Lock() + defer exclusive.Unlock() + + // Update the WAL mapping. + tx.file.wal.Commit(&csWAL) + + // Switch the files active meta page to meta page being written. + tx.file.metaActive = metaID + + // check + apply mmap update. If we fail here, the file and internal + // state is already updated + valid. + // But mmap failed on us -> fatal error + endMarker := tx.file.allocator.data.endMarker + if metaEnd := tx.file.allocator.meta.endMarker; metaEnd > endMarker { + endMarker = metaEnd + } + fileSize := uint(endMarker) * tx.file.allocator.pageSize + if int(fileSize) > len(tx.file.mapped) { + err = tx.file.mmapUpdate() + } + + traceln("tx stats:") + traceln(" available data pages:", tx.file.allocator.DataAllocator().Avail(nil)) + traceln(" available meta pages:", tx.file.allocator.meta.freelist.Avail()) + traceln(" total meta pages:", tx.file.allocator.metaTotal) + traceln(" freelist pages:", len(tx.file.allocator.freelistPages)) + traceln(" wal mapping pages:", len(tx.file.wal.metaPages)) + traceln(" max pages:", tx.file.allocator.maxPages) + traceln(" wal mapped pages:", len(tx.file.wal.mapping)) + + return nil +} + +func (tx *Tx) commitPrepareWAL() (walCommitState, error) { + var st walCommitState + + tx.file.wal.fileCommitPrepare(&st, &tx.wal) + if st.checkpoint { + if err := tx.doCheckpointWAL(); err != nil { + return st, err + } + } + + if st.updated { + tx.metaAllocator().FreeRegions(&tx.alloc, tx.file.wal.metaPages) + } + return st, nil +} + +func (tx *Tx) access(id PageID) []byte { + return tx.file.mmapedPage(id) +} + +func (tx *Tx) scheduleWrite(id PageID, buf []byte) error { + tx.file.writer.Schedule(tx.writeSync, id, buf) + return nil +} + +// rollbackChanges undoes all changes scheduled. +// Potentially changes to be undone: +// 1. WAL: +// - mapping is only updated after ACK. +// - pages have been allocated from meta area -> only restore freelists +// 2. Allocations: +// - restore freelists, by returning allocated page +// ids < old endmarker to freelists +// - restore old end markers. +// - move pages allocated into meta area back into data area +// 3. File: +// - With page flushing or transaction failing late during commit, +// file might have been grown. +// => +// - Truncate file only if pages in overflow area have been allocated. +// - If maxSize == 0, truncate file to old end marker. +func (tx *Tx) rollbackChanges() error { + tx.file.allocator.Rollback(&tx.alloc) + + maxPages := tx.file.allocator.maxPages + if maxPages == 0 { + return nil + } + + // compute endmarker from before running the last transaction + endMarker := tx.file.allocator.meta.endMarker + if dataEnd := tx.file.allocator.data.endMarker; dataEnd > endMarker { + endMarker = dataEnd + } + + sz, err := tx.file.file.Size() + if err != nil { + // getting file size failed. State is valid, but we can not truncate :/ + return err + } + + truncateSz := uint(endMarker) * tx.file.allocator.pageSize + if uint(sz) > uint(truncateSz) { + return tx.file.file.Truncate(int64(truncateSz)) + } + + return nil +} + +// Page accesses a page by ID. Accessed pages are cached. Retrieving a page +// that has already been accessed, will return a pointer to the same Page object. +// Returns an error if the id is known to be invalid or the page has already +// been freed. +func (tx *Tx) Page(id PageID) (*Page, error) { + inBounds := id >= 2 + if tx.flags.readonly { + inBounds = inBounds && id < tx.dataEndID + } else { + inBounds = inBounds && id < tx.file.allocator.data.endMarker + } + if !inBounds { + return nil, errOutOfBounds + } + + if tx.alloc.data.freed.Has(id) || tx.alloc.meta.freed.Has(id) { + return nil, errFreedPage + } + + if p := tx.pages[id]; p != nil { + return p, nil + } + + page := newPage(tx, id) + if walID := tx.file.wal.Get(id); walID != 0 { + page.ondiskID = walID + } + + tx.pages[id] = page + return page, nil +} + +// Alloc allocates a new writable page with yet empty contents. +// Use Load(), Bytes and MarkDirty(), or SetBytes() to fill the page with +// new contents. +// Returns an error if the transaction is readonly or no more space is available. +func (tx *Tx) Alloc() (page *Page, err error) { + if err := tx.canWrite(); err != nil { + return nil, err + } + + err = tx.allocPagesWith(1, func(p *Page) { page = p }) + return +} + +// AllocN allocates n potentially non-contious, yet empty pages. +// Returns an error if the transaction is readonly or no more space is available. +func (tx *Tx) AllocN(n int) (pages []*Page, err error) { + if err := tx.canWrite(); err != nil { + return nil, err + } + + if n <= 0 { + return nil, nil + } + + pages, i := make([]*Page, n), 0 + err = tx.allocPagesWith(n, func(page *Page) { + pages[i], i = page, i+1 + }) + if err != nil { + return nil, err + } + return pages, nil +} + +func (tx *Tx) dataAllocator() *dataAllocator { + return tx.file.allocator.DataAllocator() +} + +func (tx *Tx) metaAllocator() *metaAllocator { + return tx.file.allocator.MetaAllocator() +} + +func (tx *Tx) walAllocator() *walAllocator { + return tx.file.allocator.WALPageAllocator() +} + +func (tx *Tx) allocPagesWith(n int, fn func(*Page)) error { + count := tx.dataAllocator().AllocRegionsWith(&tx.alloc, uint(n), func(reg region) { + reg.EachPage(func(id PageID) { + page := newPage(tx, id) + page.flags.new = true + tx.pages[id] = page + fn(page) + }) + }) + if count == 0 { + return errOutOfMemory + } + return nil +} + +func (tx *Tx) freePage(id PageID) { + tx.dataAllocator().Free(&tx.alloc, id) +} + +func (tx *Tx) allocWALID(orig PageID) PageID { + id := tx.walAllocator().Alloc(&tx.alloc) + if id != 0 { + tx.wal.Set(orig, id) + } + return id +} + +func (tx *Tx) freeWALID(id, walID PageID) { + tx.walAllocator().Free(&tx.alloc, walID) + tx.wal.Release(id) +} + +// Flush flushes all dirty pages within the transaction. +func (tx *Tx) Flush() error { + if err := tx.canWrite(); err != nil { + return err + } + + for _, page := range tx.pages { + if err := page.doFlush(); err != nil { + return err + } + } + return nil +} + +func (tx *Tx) canWrite() error { + if !tx.flags.active { + return errTxFinished + } + if tx.flags.readonly { + return errTxReadonly + } + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/txfiletest/txfiletest.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/txfiletest/txfiletest.go new file mode 100644 index 00000000..aa1b78b2 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/txfiletest/txfiletest.go @@ -0,0 +1,91 @@ +// Package txfiletest provides utilities for testing on top of txfile. +package txfiletest + +import ( + "io/ioutil" + "os" + "path" + + "github.com/elastic/go-txfile" + "github.com/elastic/go-txfile/internal/cleanup" +) + +// TestFile wraps a txfile.File structure for testing. +type TestFile struct { + *txfile.File + t testT + Path string + opts txfile.Options +} + +type testT interface { + Error(...interface{}) + Fatal(...interface{}) +} + +// SetupTestFile creates a new testfile in a temporary directory. +// The teardown function will remove the directory and the temporary file. +func SetupTestFile(t testT, opts txfile.Options) (tf *TestFile, teardown func()) { + if opts.PageSize == 0 { + opts.PageSize = 4096 + } + + ok := false + path, cleanPath := SetupPath(t, "") + defer cleanup.IfNot(&ok, cleanPath) + + tf = &TestFile{Path: path, t: t, opts: opts} + tf.Open() + + ok = true + return tf, func() { + tf.Close() + cleanPath() + } +} + +// Reopen tries to close and open the file again. +func (f *TestFile) Reopen() { + f.Close() + f.Open() +} + +// Close the test file. +func (f *TestFile) Close() { + if f.File != nil { + if err := f.File.Close(); err != nil { + f.t.Fatal("close failed on reopen") + } + f.File = nil + } +} + +// Open opens the file if it has been closed. +// The File pointer will be changed. +func (f *TestFile) Open() { + if f.File != nil { + return + } + + tmp, err := txfile.Open(f.Path, os.ModePerm, f.opts) + if err != nil { + f.t.Fatal("reopen failed") + } + f.File = tmp +} + +// SetupPath creates a temporary directory for testing. +// Use the teardown function to remove the directory again. +func SetupPath(t testT, file string) (dir string, teardown func()) { + dir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatal(err) + } + + if file == "" { + file = "test.dat" + } + return path.Join(dir, file), func() { + os.RemoveAll(dir) + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/util.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/util.go new file mode 100644 index 00000000..61f044dc --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/util.go @@ -0,0 +1,141 @@ +package txfile + +import ( + "math/bits" +) + +// pagingWriter supports writing entries into a linked (pre-allocated) list of +// pages. +type pagingWriter struct { + ids idList + buf []byte + pageSize uint + extraHeader uint + + onPage func(id PageID, buf []byte) error + + // current page state + i int + off uint + hdr *listPage + page []byte + payload []byte + count uint32 +} + +const maxUint uint = ^uint(0) + +func newPagingWriter( + ids idList, + pageSize uint, + extraHeader uint, + onPage func(id PageID, buf []byte) error, +) *pagingWriter { + if len(ids) == 0 { + return nil + } + + buf := make([]byte, len(ids)*int(pageSize)) + + // prelink all pages, in case some are not written to + off := 0 + for _, id := range ids[1:] { + hdr, _ := castListPage(buf[off:]) + hdr.next.Set(id) + off += int(pageSize) + } + + w := &pagingWriter{ + ids: ids, + buf: buf, + pageSize: pageSize, + extraHeader: extraHeader, + onPage: onPage, + } + w.prepareNext() + return w +} + +func (w *pagingWriter) Write(entry []byte) error { + if w == nil { + return nil + } + + if len(w.payload) < len(entry) { + if err := w.flushCurrent(); err != nil { + return err + } + } + + n := copy(w.payload, entry) + w.payload = w.payload[n:] + w.count++ + return nil +} + +func (w *pagingWriter) Flush() error { + if w == nil { + return nil + } + + if err := w.finalizePage(); err != nil { + return err + } + + for w.i < len(w.ids) { + // update to next page + if err := w.prepareNext(); err != nil { + return err + } + + if err := w.finalizePage(); err != nil { + return err + } + } + + return nil +} + +func (w *pagingWriter) flushCurrent() (err error) { + if err = w.finalizePage(); err == nil { + err = w.prepareNext() + } + return +} + +func (w *pagingWriter) finalizePage() error { + w.hdr.count.Set(w.count) + if w.onPage != nil { + if err := w.onPage(w.ids[w.i], w.page); err != nil { + return err + } + } + + w.count = 0 + w.off += w.pageSize + w.i++ + return nil +} + +func (w *pagingWriter) prepareNext() error { + if w.i >= len(w.ids) { + return errOutOfMemory + } + w.page = w.buf[w.off : w.off+w.pageSize] + w.hdr, w.payload = castListPage(w.page) + w.payload = w.payload[w.extraHeader:] + return nil +} + +func isPowerOf2(v uint64) bool { + // an uint is a power of two if exactly one bit is set -> + return v > 0 && (v&(v-1)) == 0 +} + +// nextPowerOf2 computes the next power of two value of `u`, such that +// nextPowerOf2(u) > u +// The input value must not have the highest bit being set. +func nextPowerOf2(u uint64) uint64 { + b := uint64(bits.LeadingZeros64(u)) + return uint64(1) << (64 - b) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/vfs.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/vfs.go new file mode 100644 index 00000000..3fbb9ed5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/vfs.go @@ -0,0 +1,42 @@ +package txfile + +import ( + "io" + "os" +) + +type vfsFile interface { + io.Closer + io.WriterAt + io.ReaderAt + + Name() string + Size() (int64, error) + Sync() error + Truncate(int64) error + + Lock(exclusive, blocking bool) error + Unlock() error + + MMap(sz int) ([]byte, error) + MUnmap([]byte) error +} + +type osFile struct { + *os.File + state osFileState +} + +func openOSFile(path string, mode os.FileMode) (*osFile, error) { + flags := os.O_RDWR | os.O_CREATE + f, err := os.OpenFile(path, flags, mode) + return &osFile{File: f}, err +} + +func (o *osFile) Size() (int64, error) { + stat, err := o.File.Stat() + if err != nil { + return -1, err + } + return stat.Size(), nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/vfs_unix.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/vfs_unix.go new file mode 100644 index 00000000..ecea5ab0 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/vfs_unix.go @@ -0,0 +1,33 @@ +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package txfile + +import ( + "golang.org/x/sys/unix" +) + +type osFileState struct{} + +func (f *osFile) MMap(sz int) ([]byte, error) { + return unix.Mmap(int(f.Fd()), 0, int(sz), unix.PROT_READ, unix.MAP_SHARED) +} + +func (f *osFile) MUnmap(b []byte) error { + return unix.Munmap(b) +} + +func (f *osFile) Lock(exclusive, blocking bool) error { + flags := unix.LOCK_SH + if exclusive { + flags = unix.LOCK_EX + } + if !blocking { + flags |= unix.LOCK_NB + } + + return unix.Flock(int(f.Fd()), flags) +} + +func (f *osFile) Unlock() error { + return unix.Flock(int(f.Fd()), unix.LOCK_UN) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/vfs_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/vfs_windows.go new file mode 100644 index 00000000..e5369657 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/vfs_windows.go @@ -0,0 +1,98 @@ +package txfile + +import ( + "fmt" + "os" + "reflect" + "unsafe" + + "golang.org/x/sys/windows" + + "github.com/theckman/go-flock" +) + +type osFileState struct { + mmapHandle windows.Handle + lock *flock.Flock +} + +const ( + lockExt = ".lock" +) + +func (f *osFile) MMap(sz int) ([]byte, error) { + szhi, szlo := uint32(sz>>32), uint32(sz) + hdl, err := windows.CreateFileMapping(windows.Handle(f.Fd()), nil, windows.PAGE_READONLY, szhi, szlo, nil) + if hdl == 0 { + return nil, os.NewSyscallError("CreateFileMapping", err) + } + + // map memory + addr, err := windows.MapViewOfFile(hdl, windows.FILE_MAP_READ, 0, 0, uintptr(sz)) + if addr == 0 { + windows.CloseHandle(hdl) + return nil, os.NewSyscallError("MapViewOfFile", err) + } + + f.state.mmapHandle = hdl + + slice := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ + Data: uintptr(addr), + Len: sz, + Cap: sz})) + return slice, nil +} + +func (f *osFile) MUnmap(b []byte) error { + err1 := windows.UnmapViewOfFile(uintptr(unsafe.Pointer(&b[0]))) + b = nil + + err2 := windows.CloseHandle(f.state.mmapHandle) + f.state.mmapHandle = 0 + + if err1 != nil { + return os.NewSyscallError("UnmapViewOfFile", err1) + } else if err2 != nil { + return os.NewSyscallError("CloseHandle", err2) + } + return nil +} + +func (f *osFile) Lock(exclusive, blocking bool) error { + if f.state.lock != nil { + return fmt.Errorf("file %v is already locked", f.Name()) + } + + var ok bool + var err error + lock := flock.NewFlock(f.Name() + lockExt) + if blocking { + err = lock.Lock() + ok = err != nil + } else { + ok, err = lock.TryLock() + } + + if err != nil { + return err + } + if !ok { + return fmt.Errorf("file %v can not be locked right now", f.Name()) + } + + f.state.lock = lock + return nil +} + +func (f *osFile) Unlock() error { + if f.state.lock == nil { + return fmt.Errorf("file %v is not locked", f.Name()) + } + + err := f.state.lock.Unlock() + if err == nil { + f.state.lock = nil + } + + return err +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/wal.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/wal.go new file mode 100644 index 00000000..123b0c8a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/wal.go @@ -0,0 +1,240 @@ +package txfile + +import "unsafe" + +// waLog (write-ahead-log) mapping page ids to overwrite page ids in +// the write-ahead-log. +type waLog struct { + mapping walMapping + metaPages regionList +} + +type txWalState struct { + free pageSet // ids being freed + new walMapping // all wal pages used for overwrites in a transaction + walLimit uint // transaction wal page count -> execute checkpoint when reached +} + +// walCommitState keeps track of changes applied to the wal log during the +// commit. These changes must be recorded for now, as the new wal state must +// not be updated in memory until after the transaction has been commit to disk. +type walCommitState struct { + tx *txWalState + updated bool + checkpoint bool + mapping walMapping // new wal mapping + allocRegions regionList // pre-allocate meta pages for serializing new mapping +} + +type walMapping map[PageID]PageID + +const ( + walHeaderSize = uint(unsafe.Sizeof(walPage{})) + walEntrySize = 14 + + defaultWALLimit = 1000 +) + +func makeWALog() waLog { + return waLog{ + mapping: walMapping{}, + metaPages: nil, + } +} + +func (l *waLog) makeTxWALState(limit uint) txWalState { + if limit == 0 { + // TODO: init wal limit on init, based on max file size + limit = defaultWALLimit + } + + return txWalState{ + walLimit: limit, + } +} + +func (l *waLog) Get(id PageID) PageID { + return l.mapping[id] +} + +func (l *waLog) fileCommitPrepare(st *walCommitState, tx *txWalState) { + st.tx = tx + newWal := createMappingUpdate(l.mapping, tx) + st.checkpoint = tx.walLimit > 0 && uint(len(newWal)) >= tx.walLimit + st.updated = st.checkpoint || tx.Updated() + + if st.checkpoint { + newWal = tx.new + } + st.mapping = newWal +} + +func (l *waLog) fileCommitAlloc(tx *Tx, st *walCommitState) error { + if !st.updated { + return nil + } + + pages := predictWALMappingPages(st.mapping, uint(tx.PageSize())) + if pages > 0 { + st.allocRegions = tx.metaAllocator().AllocRegions(&tx.alloc, pages) + if st.allocRegions == nil { + return errOutOfMemory + } + } + return nil +} + +func (l *waLog) fileCommitSerialize( + st *walCommitState, + pageSize uint, + onPage func(id PageID, buf []byte) error, +) error { + if !st.updated { + return nil + } + return writeWAL(st.allocRegions, pageSize, st.mapping, onPage) +} + +func (l *waLog) fileCommitMeta(meta *metaPage, st *walCommitState) { + if st.updated { + var rootPage PageID + if len(st.allocRegions) > 0 { + rootPage = st.allocRegions[0].id + } + meta.wal.Set(rootPage) + } +} + +func (l *waLog) Commit(st *walCommitState) { + if st.updated { + l.mapping = st.mapping + l.metaPages = st.allocRegions + } +} + +func (l walMapping) empty() bool { + return len(l) == 0 +} + +func (s *txWalState) Release(id PageID) { + s.free.Add(id) + if s.new != nil { + delete(s.new, id) + } +} + +func (s *txWalState) Updated() bool { + return !s.free.Empty() || !s.new.empty() +} + +func (s *txWalState) Set(orig, overwrite PageID) { + if s.new == nil { + s.new = walMapping{} + } + s.new[orig] = overwrite +} + +func createMappingUpdate(old walMapping, tx *txWalState) walMapping { + if !tx.Updated() { + return nil + } + + new := walMapping{} + for id, walID := range old { + if tx.free.Has(id) { + continue + } + if _, exists := tx.new[id]; exists { + continue + } + + new[id] = walID + } + for id, walID := range tx.new { + new[id] = walID + } + + return new +} + +func predictWALMappingPages(m walMapping, pageSize uint) uint { + perPage := walEntriesPerPage(pageSize) + return (uint(len(m)) + perPage - 1) / perPage +} + +func walEntriesPerPage(pageSize uint) uint { + payload := pageSize - walHeaderSize + return payload / walEntrySize +} + +func readWALMapping( + wal *waLog, + access func(PageID) []byte, + root PageID, +) error { + mapping, ids, err := readWAL(access, root) + if err != nil { + return nil + } + + wal.mapping = mapping + wal.metaPages = ids.Regions() + return nil +} + +func readWAL( + access func(PageID) []byte, + root PageID, +) (walMapping, idList, error) { + if root == 0 { + return walMapping{}, nil, nil + } + + mapping := walMapping{} + var metaPages idList + for pageID := root; pageID != 0; { + metaPages.Add(pageID) + node, data := castWalPage(access(pageID)) + if node == nil { + return nil, nil, errOutOfBounds + } + + count := int(node.count.Get()) + pageID = node.next.Get() + + for i := 0; i < count; i++ { + // read node mapping. Only 7 bytes are used per pageID + var k, v pgID + copy(k[0:7], data[0:7]) + copy(v[0:7], data[7:14]) + data = data[14:] + + mapping[k.Get()] = v.Get() + } + } + + return mapping, metaPages, nil +} + +func writeWAL( + to regionList, + pageSize uint, + mapping walMapping, + onPage func(id PageID, buf []byte) error, +) error { + allocPages := to.PageIDs() + writer := newPagingWriter(allocPages, pageSize, 0, onPage) + for id, walID := range mapping { + var k, v pgID + k.Set(id) + v.Set(walID) + + var payload [walEntrySize]byte + copy(payload[0:7], k[0:7]) + copy(payload[7:14], v[0:7]) + if err := writer.Write(payload[:]); err != nil { + return err + } + } + return writer.Flush() +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/write.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/write.go new file mode 100644 index 00000000..4a56a3b5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-txfile/write.go @@ -0,0 +1,277 @@ +package txfile + +import ( + "io" + "sort" + "sync" +) + +type writer struct { + target writable + pageSize uint + + mux sync.Mutex + cond *sync.Cond + done bool + scheduled []writeMsg + scheduled0 [64]writeMsg + fsync []syncMsg + fsync0 [8]syncMsg + + pending int // number of scheduled writes since last sync + published int // number of writes executed since last sync +} + +type writeMsg struct { + sync *txWriteSync + id PageID + buf []byte + fsync bool +} + +type syncMsg struct { + sync *txWriteSync + count int // number of pages to process, before fsyncing +} + +type txWriteSync struct { + err error + wg sync.WaitGroup +} + +type writable interface { + io.WriterAt + Sync() error +} + +func (w *writer) Init(target writable, pageSize uint) { + w.target = target + w.pageSize = pageSize + w.cond = sync.NewCond(&w.mux) + w.scheduled = w.scheduled0[:0] + w.fsync = w.fsync[:0] +} + +func (w *writer) Stop() { + w.mux.Lock() + w.done = true + w.mux.Unlock() + w.cond.Signal() +} + +func (w *writer) Schedule(sync *txWriteSync, id PageID, buf []byte) { + sync.Retain() + traceln("schedule write") + + w.mux.Lock() + defer w.mux.Unlock() + w.scheduled = append(w.scheduled, writeMsg{ + sync: sync, + id: id, + buf: buf, + }) + w.pending++ + + w.cond.Signal() +} + +func (w *writer) Sync(sync *txWriteSync) { + sync.Retain() + traceln("schedule sync") + + w.mux.Lock() + defer w.mux.Unlock() + w.fsync = append(w.fsync, syncMsg{ + sync: sync, + count: w.pending, + }) + w.pending = 0 + + w.cond.Signal() +} + +func (w *writer) Run() error { + var ( + buf [1024]writeMsg + n int + err error + fsync *txWriteSync + done bool + ) + + traceln("start async writer") + defer traceln("stop async writer") + + for { + n, fsync, done = w.nextCommand(buf[:]) + if done { + break + } + + traceln("writer message: ", n, fsync != nil, done) + + // TODO: use vector IO if possible + msgs := buf[:n] + sort.Slice(msgs, func(i, j int) bool { + return msgs[i].id < msgs[j].id + }) + + for _, msg := range msgs { + if err != nil { + traceln("done error") + + msg.sync.err = err + msg.sync.wg.Done() + continue + } + + off := uint64(msg.id) * uint64(w.pageSize) + tracef("write at(id=%v, off=%v, len=%v)\n", msg.id, off, len(msg.buf)) + + err = writeAt(w.target, msg.buf, int64(off)) + if err != nil { + msg.sync.err = err + } + + traceln("done send") + msg.sync.Release() + } + + if fsync != nil { + if err == nil { + if err = w.target.Sync(); err != nil { + fsync.err = err + } + } + + traceln("done fsync") + fsync.Release() + } + + if err != nil { + break + } + } + + if done { + return err + } + + // file still active, but we're facing errors -> stop writing and propagate + // last error to all transactions. + for { + n, fsync, done = w.nextCommand(buf[:]) + if done { + break + } + + traceln("ignoring writer message: ", n, fsync != nil, done) + + for _, msg := range buf[:n] { + msg.sync.err = err + msg.sync.Release() + } + if fsync != nil { + fsync.err = err + fsync.Release() + } + } + + return err +} + +func (w *writer) nextCommand(buf []writeMsg) (int, *txWriteSync, bool) { + w.mux.Lock() + defer w.mux.Unlock() + + traceln("async writer: wait next command") + defer traceln("async writer: received next command") + + for { + if w.done { + return 0, nil, true + } + + max := len(w.scheduled) + if max == 0 && len(w.fsync) == 0 { // no messages + w.cond.Wait() + continue + } + + if l := len(buf); l < max { + max = l + } + + // Check if we need to fsync and adjust `max` number of pages of required. + var sync *txWriteSync + traceln("check fsync: ", len(w.fsync)) + + if len(w.fsync) > 0 { + msg := w.fsync[0] + + // number of outstanding scheduled writes before fsync + outstanding := msg.count - w.published + traceln("outstanding:", outstanding) + + if outstanding <= max { // -> fsync + max, sync = outstanding, msg.sync + + // advance fsync state + w.fsync[0] = syncMsg{} // clear entry, so to potentially clean references from w.fsync0 + w.fsync = w.fsync[1:] + if len(w.fsync) == 0 { + w.fsync = w.fsync0[:0] + } + } + } + + // return buffers to be processed + var n int + scheduled := w.scheduled[:max] + if len(scheduled) > 0 { + n = copy(buf, scheduled) + w.scheduled = w.scheduled[n:] + if len(w.scheduled) == 0 { + w.scheduled = w.scheduled0[:0] + } + } + + if sync == nil { + w.published += n + } else { + w.published = 0 + } + + return n, sync, false + } +} + +func newTxWriteSync() *txWriteSync { + return &txWriteSync{} +} + +func (s *txWriteSync) Retain() { + s.wg.Add(1) +} + +func (s *txWriteSync) Release() { + s.wg.Done() +} + +func (s *txWriteSync) Wait() error { + s.wg.Wait() + return s.err +} + +func writeAt(out io.WriterAt, buf []byte, off int64) error { + for len(buf) > 0 { + n, err := out.WriteAt(buf, off) + if err != nil { + return err + } + + off += int64(n) + buf = buf[n:] + } + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-windows/LICENSE.txt b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-windows/LICENSE.txt new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-windows/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-windows/NOTICE.txt b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-windows/NOTICE.txt new file mode 100644 index 00000000..d47caeb0 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-windows/NOTICE.txt @@ -0,0 +1,5 @@ +Elastic go-windows +Copyright 2017-2018 Elasticsearch B.V. + +This product includes software developed at +Elasticsearch, B.V. (https://www.elastic.co/). diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-windows/README.md b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-windows/README.md new file mode 100644 index 00000000..1140052d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-windows/README.md @@ -0,0 +1,18 @@ +# go-windows + +[![Build Status](http://img.shields.io/travis/elastic/go-windows.svg?style=flat-square)][travis] +[![Build status](https://ci.appveyor.com/api/projects/status/remqhuw0jjguygc3/branch/master?svg=true)][appveyor] +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[travis]: http://travis-ci.org/elastic/go-windows +[appveyor]: https://ci.appveyor.com/project/elastic-beats/go-windows/branch/master +[godocs]: http://godoc.org/github.com/elastic/go-windows + +go-windows is a library for Go (golang) that provides wrappers to various +Windows APIs that are not covered by the stdlib or by +[golang.org/x/sys/windows](https://godoc.org/golang.org/x/sys/windows). + +Goals / Features + +- Does not use cgo. +- Provide abstractions to make using the APIs easier. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-windows/doc.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-windows/doc.go new file mode 100644 index 00000000..8bee7b37 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-windows/doc.go @@ -0,0 +1,23 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Package windows contains various Windows system calls. +package windows + +// Use "GOOS=windows go generate -v -x" to generate the sources. +// Add -trace to enable debug prints around syscalls. +//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -systemdll=false -output zsyscall_windows.go kernel32.go version.go diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-windows/kernel32.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-windows/kernel32.go new file mode 100644 index 00000000..28bb5d47 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-windows/kernel32.go @@ -0,0 +1,222 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build windows + +package windows + +import ( + "fmt" + "syscall" + "time" + "unsafe" + + "github.com/pkg/errors" +) + +// Syscalls +//sys _GetNativeSystemInfo(systemInfo *SystemInfo) (err error) = kernel32.GetNativeSystemInfo +//sys _GetTickCount64() (millis uint64, err error) = kernel32.GetTickCount64 +//sys _GetSystemTimes(idleTime *syscall.Filetime, kernelTime *syscall.Filetime, userTime *syscall.Filetime) (err error) = kernel32.GetSystemTimes +//sys _GlobalMemoryStatusEx(buffer *MemoryStatusEx) (err error) = kernel32.GlobalMemoryStatusEx + +var ( + sizeofMemoryStatusEx = uint32(unsafe.Sizeof(MemoryStatusEx{})) +) + +// SystemInfo is an equivalent representation of SYSTEM_INFO in the Windows API. +// https://msdn.microsoft.com/en-us/library/ms724958%28VS.85%29.aspx?f=255&MSPPError=-2147217396 +type SystemInfo struct { + ProcessorArchitecture ProcessorArchitecture + Reserved uint16 + PageSize uint32 + MinimumApplicationAddress uintptr + MaximumApplicationAddress uintptr + ActiveProcessorMask uint64 + NumberOfProcessors uint32 + ProcessorType ProcessorType + AllocationGranularity uint32 + ProcessorLevel uint16 + ProcessorRevision uint16 +} + +// ProcessorArchitecture specifies the processor architecture that the OS requires. +type ProcessorArchitecture uint16 + +// List of processor architectures associated with SystemInfo. +const ( + ProcessorArchitectureAMD64 ProcessorArchitecture = 9 + ProcessorArchitectureARM ProcessorArchitecture = 5 + ProcessorArchitectureARM64 ProcessorArchitecture = 12 + ProcessorArchitectureIA64 ProcessorArchitecture = 6 + ProcessorArchitectureIntel ProcessorArchitecture = 0 + ProcessorArchitectureUnknown ProcessorArchitecture = 0xFFFF +) + +func (a ProcessorArchitecture) String() string { + names := map[ProcessorArchitecture]string{ + ProcessorArchitectureAMD64: "x86_64", + ProcessorArchitectureARM: "arm", + ProcessorArchitectureARM64: "arm64", + ProcessorArchitectureIA64: "ia64", + ProcessorArchitectureIntel: "x86", + } + + name, found := names[a] + if !found { + return "unknown" + } + return name +} + +// ProcessorType specifies the type of processor. +type ProcessorType uint32 + +// List of processor types associated with SystemInfo. +const ( + ProcessorTypeIntel386 ProcessorType = 386 + ProcessorTypeIntel486 ProcessorType = 486 + ProcessorTypeIntelPentium ProcessorType = 586 + ProcessorTypeIntelIA64 ProcessorType = 2200 + ProcessorTypeAMDX8664 ProcessorType = 8664 +) + +func (t ProcessorType) String() string { + names := map[ProcessorType]string{ + ProcessorTypeIntel386: "386", + ProcessorTypeIntel486: "486", + ProcessorTypeIntelPentium: "586", + ProcessorTypeIntelIA64: "ia64", + ProcessorTypeAMDX8664: "x64_64", + } + + name, found := names[t] + if !found { + return "unknown" + } + return name +} + +// MemoryStatusEx is an equivalent representation of MEMORYSTATUSEX in the +// Windows API. It contains information about the current state of both physical +// and virtual memory, including extended memory. +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770 +type MemoryStatusEx struct { + length uint32 + MemoryLoad uint32 + TotalPhys uint64 + AvailPhys uint64 + TotalPageFile uint64 + AvailPageFile uint64 + TotalVirtual uint64 + AvailVirtual uint64 + AvailExtendedVirtual uint64 +} + +// GetNativeSystemInfo retrieves information about the current system to an +// application running under WOW64. If the function is called from a 64-bit +// application, it is equivalent to the GetSystemInfo function. +// https://msdn.microsoft.com/en-us/library/ms724340%28v=vs.85%29.aspx?f=255&MSPPError=-2147217396 +func GetNativeSystemInfo() (SystemInfo, error) { + var systemInfo SystemInfo + if err := _GetNativeSystemInfo(&systemInfo); err != nil { + return SystemInfo{}, errors.Wrap(err, "GetNativeSystemInfo failed") + } + return systemInfo, nil +} + +// Version identifies a Windows version by major, minor, and build number. +type Version struct { + Major int + Minor int + Build int +} + +// GetWindowsVersion returns the Windows version information. Applications not +// manifested for Windows 8.1 or Windows 10 will return the Windows 8 OS version +// value (6.2). +// +// For a table of version numbers see: +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx +func GetWindowsVersion() Version { + // https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx + ver, err := syscall.GetVersion() + if err != nil { + // GetVersion should never return an error. + panic(fmt.Errorf("GetVersion failed: %v", err)) + } + + return Version{ + Major: int(ver & 0xFF), + Minor: int(ver >> 8 & 0xFF), + Build: int(ver >> 16), + } +} + +// IsWindowsVistaOrGreater returns true if the Windows version is Vista or +// greater. +func (v Version) IsWindowsVistaOrGreater() bool { + // Vista is 6.0. + return v.Major >= 6 && v.Minor >= 0 +} + +// GetTickCount64 retrieves the number of milliseconds that have elapsed since +// the system was started. +// This function is available on Windows Vista and newer. +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724411(v=vs.85).aspx +func GetTickCount64() (uint64, error) { + return _GetTickCount64() +} + +// GetSystemTimes retrieves system timing information. On a multiprocessor +// system, the values returned are the sum of the designated times across all +// processors. The returned kernel time does not include the system idle time. +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724400(v=vs.85).aspx +func GetSystemTimes() (idle, kernel, user time.Duration, err error) { + var idleTime, kernelTime, userTime syscall.Filetime + err = _GetSystemTimes(&idleTime, &kernelTime, &userTime) + if err != nil { + return 0, 0, 0, errors.Wrap(err, "GetSystemTimes failed") + } + + idle = FiletimeToDuration(&idleTime) + kernel = FiletimeToDuration(&kernelTime) // Kernel time includes idle time so we subtract it out. + user = FiletimeToDuration(&userTime) + + return idle, kernel - idle, user, nil +} + +// FiletimeToDuration converts a Filetime to a time.Duration. Do not use this +// method to convert a Filetime to an actual clock time, for that use +// Filetime.Nanosecond(). +func FiletimeToDuration(ft *syscall.Filetime) time.Duration { + n := int64(ft.HighDateTime)<<32 + int64(ft.LowDateTime) // in 100-nanosecond intervals + return time.Duration(n * 100) +} + +// GlobalMemoryStatusEx retrieves information about the system's current usage +// of both physical and virtual memory. +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx +func GlobalMemoryStatusEx() (MemoryStatusEx, error) { + memoryStatusEx := MemoryStatusEx{length: sizeofMemoryStatusEx} + err := _GlobalMemoryStatusEx(&memoryStatusEx) + if err != nil { + return MemoryStatusEx{}, errors.Wrap(err, "GlobalMemoryStatusEx failed") + } + + return memoryStatusEx, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-windows/uft16.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-windows/uft16.go new file mode 100644 index 00000000..cec1ad50 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-windows/uft16.go @@ -0,0 +1,68 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package windows + +import ( + "fmt" + "unicode/utf16" +) + +// UTF16BytesToString returns a string that is decoded from the UTF-16 bytes. +// The byte slice must be of even length otherwise an error will be returned. +// The integer returned is the offset to the start of the next string with +// buffer if it exists, otherwise -1 is returned. +func UTF16BytesToString(b []byte) (string, int, error) { + if len(b)%2 != 0 { + return "", 0, fmt.Errorf("slice must have an even length (length=%d)", len(b)) + } + + offset := -1 + + // Find the null terminator if it exists and re-slice the b. + if nullIndex := indexNullTerminator(b); nullIndex > -1 { + if len(b) > nullIndex+2 { + offset = nullIndex + 2 + } + + b = b[:nullIndex] + } + + s := make([]uint16, len(b)/2) + for i := range s { + s[i] = uint16(b[i*2]) + uint16(b[(i*2)+1])<<8 + } + + return string(utf16.Decode(s)), offset, nil +} + +// indexNullTerminator returns the index of a null terminator within a buffer +// containing UTF-16 encoded data. If the null terminator is not found -1 is +// returned. +func indexNullTerminator(b []byte) int { + if len(b) < 2 { + return -1 + } + + for i := 0; i < len(b); i += 2 { + if b[i] == 0 && b[i+1] == 0 { + return i + } + } + + return -1 +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-windows/version.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-windows/version.go new file mode 100644 index 00000000..f614de2c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-windows/version.go @@ -0,0 +1,158 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build windows + +package windows + +import ( + "fmt" + "unsafe" + + "github.com/pkg/errors" +) + +// Syscalls +//sys _GetFileVersionInfo(filename string, reserved uint32, dataLen uint32, data *byte) (success bool, err error) [!success] = version.GetFileVersionInfoW +//sys _GetFileVersionInfoSize(filename string, handle uintptr) (size uint32, err error) = version.GetFileVersionInfoSizeW +//sys _VerQueryValueW(data *byte, subBlock string, pBuffer *uintptr, len *uint32) (success bool, err error) [!success] = version.VerQueryValueW + +// FixedFileInfo contains version information for a file. This information is +// language and code page independent. This is an equivalent representation of +// VS_FIXEDFILEINFO. +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms646997(v=vs.85).aspx +type FixedFileInfo struct { + Signature uint32 + StrucVersion uint32 + FileVersionMS uint32 + FileVersionLS uint32 + ProductVersionMS uint32 + ProductVersionLS uint32 + FileFlagsMask uint32 + FileFlags uint32 + FileOS uint32 + FileType uint32 + FileSubtype uint32 + FileDateMS uint32 + FileDateLS uint32 +} + +// ProductVersion returns the ProductVersion value in string format. +func (info FixedFileInfo) ProductVersion() string { + return fmt.Sprintf("%d.%d.%d.%d", + (info.ProductVersionMS >> 16), + (info.ProductVersionMS & 0xFFFF), + (info.ProductVersionLS >> 16), + (info.ProductVersionLS & 0xFFFF)) +} + +// FileVersion returns the FileVersion value in string format. +func (info FixedFileInfo) FileVersion() string { + return fmt.Sprintf("%d.%d.%d.%d", + (info.FileVersionMS >> 16), + (info.FileVersionMS & 0xFFFF), + (info.FileVersionLS >> 16), + (info.FileVersionLS & 0xFFFF)) +} + +// VersionData is a buffer holding the data returned by GetFileVersionInfo. +type VersionData []byte + +// QueryValue uses VerQueryValue to query version information from the a +// version-information resource. It returns responses using the first language +// and code point found in the resource. The accepted keys are listed in +// the VerQueryValue documentation (e.g. ProductVersion, FileVersion, etc.). +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms647464(v=vs.85).aspx +func (d VersionData) QueryValue(key string) (string, error) { + type LangAndCodePage struct { + Language uint16 + CodePage uint16 + } + + var dataPtr uintptr + var size uint32 + if _, err := _VerQueryValueW(&d[0], `\VarFileInfo\Translation`, &dataPtr, &size); err != nil || size == 0 { + return "", errors.Wrap(err, "failed to get list of languages") + } + + offset := int(dataPtr - (uintptr)(unsafe.Pointer(&d[0]))) + if offset <= 0 || offset > len(d)-1 { + return "", errors.New("invalid address") + } + + l := *(*LangAndCodePage)(unsafe.Pointer(&d[offset])) + + subBlock := fmt.Sprintf(`\StringFileInfo\%04x%04x\%v`, l.Language, l.CodePage, key) + if _, err := _VerQueryValueW(&d[0], subBlock, &dataPtr, &size); err != nil || size == 0 { + return "", errors.Wrapf(err, "failed to query %v", subBlock) + } + + offset = int(dataPtr - (uintptr)(unsafe.Pointer(&d[0]))) + if offset <= 0 || offset > len(d)-1 { + return "", errors.New("invalid address") + } + + str, _, err := UTF16BytesToString(d[offset : offset+int(size)*2]) + if err != nil { + return "", errors.Wrap(err, "failed to decode UTF16 data") + } + + return str, nil +} + +// FixedFileInfo returns the fixed version information from a +// version-information resource. It queries the root block to get the +// VS_FIXEDFILEINFO value. +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms647464(v=vs.85).aspx +func (d VersionData) FixedFileInfo() (*FixedFileInfo, error) { + if len(d) == 0 { + return nil, errors.New("use GetFileVersionInfo to initialize VersionData") + } + + var dataPtr uintptr + var size uint32 + if _, err := _VerQueryValueW(&d[0], `\`, &dataPtr, &size); err != nil { + return nil, errors.Wrap(err, "VerQueryValue failed for \\") + } + + offset := int(dataPtr - (uintptr)(unsafe.Pointer(&d[0]))) + if offset <= 0 || offset > len(d)-1 { + return nil, errors.New("invalid address") + } + + // Make a copy of the struct. + ffi := *(*FixedFileInfo)(unsafe.Pointer(&d[offset])) + + return &ffi, nil +} + +// GetFileVersionInfo retrieves version information for the specified file. +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms647003(v=vs.85).aspx +func GetFileVersionInfo(filename string) (VersionData, error) { + size, err := _GetFileVersionInfoSize(filename, 0) + if err != nil { + return nil, errors.Wrap(err, "GetFileVersionInfoSize failed") + } + + data := make(VersionData, size) + _, err = _GetFileVersionInfo(filename, 0, uint32(len(data)), &data[0]) + if err != nil { + return nil, errors.Wrap(err, "GetFileVersionInfo failed") + } + + return data, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-windows/zsyscall_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-windows/zsyscall_windows.go new file mode 100644 index 00000000..cc7e3b14 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-windows/zsyscall_windows.go @@ -0,0 +1,163 @@ +// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT + +package windows + +import ( + "syscall" + "unsafe" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + modversion = syscall.NewLazyDLL("version.dll") + + procGetNativeSystemInfo = modkernel32.NewProc("GetNativeSystemInfo") + procGetTickCount64 = modkernel32.NewProc("GetTickCount64") + procGetSystemTimes = modkernel32.NewProc("GetSystemTimes") + procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") + procGetFileVersionInfoW = modversion.NewProc("GetFileVersionInfoW") + procGetFileVersionInfoSizeW = modversion.NewProc("GetFileVersionInfoSizeW") + procVerQueryValueW = modversion.NewProc("VerQueryValueW") +) + +func _GetNativeSystemInfo(systemInfo *SystemInfo) (err error) { + r1, _, e1 := syscall.Syscall(procGetNativeSystemInfo.Addr(), 1, uintptr(unsafe.Pointer(systemInfo)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func _GetTickCount64() (millis uint64, err error) { + r0, _, e1 := syscall.Syscall(procGetTickCount64.Addr(), 0, 0, 0, 0) + millis = uint64(r0) + if millis == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func _GetSystemTimes(idleTime *syscall.Filetime, kernelTime *syscall.Filetime, userTime *syscall.Filetime) (err error) { + r1, _, e1 := syscall.Syscall(procGetSystemTimes.Addr(), 3, uintptr(unsafe.Pointer(idleTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func _GlobalMemoryStatusEx(buffer *MemoryStatusEx) (err error) { + r1, _, e1 := syscall.Syscall(procGlobalMemoryStatusEx.Addr(), 1, uintptr(unsafe.Pointer(buffer)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func _GetFileVersionInfo(filename string, reserved uint32, dataLen uint32, data *byte) (success bool, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(filename) + if err != nil { + return + } + return __GetFileVersionInfo(_p0, reserved, dataLen, data) +} + +func __GetFileVersionInfo(filename *uint16, reserved uint32, dataLen uint32, data *byte) (success bool, err error) { + r0, _, e1 := syscall.Syscall6(procGetFileVersionInfoW.Addr(), 4, uintptr(unsafe.Pointer(filename)), uintptr(reserved), uintptr(dataLen), uintptr(unsafe.Pointer(data)), 0, 0) + success = r0 != 0 + if !success { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func _GetFileVersionInfoSize(filename string, handle uintptr) (size uint32, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(filename) + if err != nil { + return + } + return __GetFileVersionInfoSize(_p0, handle) +} + +func __GetFileVersionInfoSize(filename *uint16, handle uintptr) (size uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetFileVersionInfoSizeW.Addr(), 2, uintptr(unsafe.Pointer(filename)), uintptr(handle), 0) + size = uint32(r0) + if size == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func _VerQueryValueW(data *byte, subBlock string, pBuffer *uintptr, len *uint32) (success bool, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(subBlock) + if err != nil { + return + } + return __VerQueryValueW(data, _p0, pBuffer, len) +} + +func __VerQueryValueW(data *byte, subBlock *uint16, pBuffer *uintptr, len *uint32) (success bool, err error) { + r0, _, e1 := syscall.Syscall6(procVerQueryValueW.Addr(), 4, uintptr(unsafe.Pointer(data)), uintptr(unsafe.Pointer(subBlock)), uintptr(unsafe.Pointer(pBuffer)), uintptr(unsafe.Pointer(len)), 0, 0) + success = r0 != 0 + if !success { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/CHANGELOG.md b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/CHANGELOG.md index 01d60378..27900707 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/CHANGELOG.md +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/CHANGELOG.md @@ -2,6 +2,15 @@ All notable changes to this project will be documented in this file. This project adheres to [Semantic Versioning](http://semver.org/). +## [0.9.0] + +### Added +- Added support for huge TLB pages on Linux #97 +- Added support for big endian platform #100 + +### Fixed +- Add missing method for OpenBSD #99 + ## [0.8.0] ### Added diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/README.md b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/README.md index 2482620a..ecdfc1c3 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/README.md +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/README.md @@ -26,6 +26,7 @@ The features vary by operating system. | FDUsage | X | | | | X | | FileSystemList | X | X | X | X | X | | FileSystemUsage | X | X | X | X | X | +| HugeTLBPages | X | | | | | | LoadAverage | X | X | | X | X | | Mem | X | X | X | X | X | | ProcArgs | X | X | X | | X | diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/codecov.yml b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/codecov.yml new file mode 100644 index 00000000..76ade0fd --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/codecov.yml @@ -0,0 +1,21 @@ +# Enable coverage report message for diff on commit +coverage: + status: + project: off + patch: + default: + # basic + target: auto + threshold: null + base: auto + # advanced + branches: null + if_no_uploads: error + if_not_found: success + if_ci_failed: error + only_pulls: false + flags: null + paths: null + +# Disable comments on Pull Requests +comment: false diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/concrete_sigar.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/concrete_sigar.go index 685aa6de..e3ee80a9 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/concrete_sigar.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/concrete_sigar.go @@ -62,6 +62,12 @@ func (c *ConcreteSigar) GetSwap() (Swap, error) { return s, err } +func (c *ConcreteSigar) GetHugeTLBPages() (HugeTLBPages, error) { + p := HugeTLBPages{} + err := p.Get() + return p, err +} + func (c *ConcreteSigar) GetFileSystemUsage(path string) (FileSystemUsage, error) { f := FileSystemUsage{} err := f.Get(path) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sigar_darwin.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sigar_darwin.go index f989f516..a90b998c 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sigar_darwin.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sigar_darwin.go @@ -91,6 +91,10 @@ func (self *Swap) Get() error { return nil } +func (self *HugeTLBPages) Get() error { + return ErrNotImplemented{runtime.GOOS} +} + func (self *Cpu) Get() error { var count C.mach_msg_type_number_t = C.HOST_CPU_LOAD_INFO_COUNT var cpuload C.host_cpu_load_info_data_t diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sigar_freebsd.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sigar_freebsd.go index 602b4a0a..ba706ff3 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sigar_freebsd.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sigar_freebsd.go @@ -97,6 +97,10 @@ func (self *ProcFDUsage) Get(pid int) error { return nil } +func (self *HugeTLBPages) Get() error { + return ErrNotImplemented{runtime.GOOS} +} + func parseCpuStat(self *Cpu, line string) error { fields := strings.Fields(line) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sigar_interface.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sigar_interface.go index a956af60..df79ae08 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sigar_interface.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sigar_interface.go @@ -26,6 +26,7 @@ type Sigar interface { GetLoadAverage() (LoadAverage, error) GetMem() (Mem, error) GetSwap() (Swap, error) + GetHugeTLBPages(HugeTLBPages, error) GetFileSystemUsage(string) (FileSystemUsage, error) GetFDUsage() (FDUsage, error) GetRusage(who int) (Rusage, error) @@ -82,6 +83,15 @@ type Swap struct { Free uint64 } +type HugeTLBPages struct { + Total uint64 + Free uint64 + Reserved uint64 + Surplus uint64 + DefaultSize uint64 + TotalAllocatedSize uint64 +} + type CpuList struct { List []Cpu } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sigar_linux.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sigar_linux.go index cb1d3525..09f2e30b 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sigar_linux.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sigar_linux.go @@ -45,6 +45,30 @@ func (self *FDUsage) Get() error { }) } +func (self *HugeTLBPages) Get() error { + table, err := parseMeminfo() + if err != nil { + return err + } + + self.Total, _ = table["HugePages_Total"] + self.Free, _ = table["HugePages_Free"] + self.Reserved, _ = table["HugePages_Rsvd"] + self.Surplus, _ = table["HugePages_Surp"] + self.DefaultSize, _ = table["Hugepagesize"] + + if totalSize, found := table["Hugetlb"]; found { + self.TotalAllocatedSize = totalSize + } else { + // If Hugetlb is not present, or huge pages of different sizes + // are used, this figure can be unaccurate. + // TODO (jsoriano): Extract information from /sys/kernel/mm/hugepages too + self.TotalAllocatedSize = (self.Total - self.Free + self.Reserved) * self.DefaultSize + } + + return nil +} + func (self *ProcFDUsage) Get(pid int) error { err := readFile(procFileName(pid, "limits"), func(line string) bool { if strings.HasPrefix(line, "Max open files") { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sigar_linux_common.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sigar_linux_common.go index 8e5e7856..0f46c730 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sigar_linux_common.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sigar_linux_common.go @@ -379,12 +379,16 @@ func parseMeminfo() (map[string]uint64, error) { return true // skip on errors } - num := strings.TrimLeft(fields[1], " ") - val, err := strtoull(strings.Fields(num)[0]) + valueUnit := strings.Fields(fields[1]) + value, err := strtoull(valueUnit[0]) if err != nil { return true // skip on errors } - table[fields[0]] = val * 1024 //in bytes + + if len(valueUnit) > 1 && valueUnit[1] == "kB" { + value *= 1024 + } + table[fields[0]] = value return true }) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sigar_openbsd.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sigar_openbsd.go index 4f1383a6..e4371b8b 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sigar_openbsd.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sigar_openbsd.go @@ -294,6 +294,10 @@ func (self *Swap) Get() error { return nil } +func (self *HugeTLBPages) Get() error { + return ErrNotImplemented{runtime.GOOS} +} + func (self *Cpu) Get() error { load := [C.CPUSTATES]C.long{C.CP_USER, C.CP_NICE, C.CP_SYS, C.CP_INTR, C.CP_IDLE} @@ -381,6 +385,10 @@ func (self *ProcFDUsage) Get(pid int) error { return ErrNotImplemented{runtime.GOOS} } +func (self *Rusage) Get(pid int) error { + return ErrNotImplemented{runtime.GOOS} +} + func fillCpu(cpu *Cpu, load [C.CPUSTATES]C.long) { cpu.User = uint64(load[0]) cpu.Nice = uint64(load[1]) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sigar_stub.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sigar_stub.go index 0b858f1c..de9565ae 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sigar_stub.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sigar_stub.go @@ -22,6 +22,10 @@ func (s *Swap) Get() error { return ErrNotImplemented{runtime.GOOS} } +func (s *HugeTLBPages) Get() error { + return ErrNotImplemented{runtime.GOOS} +} + func (f *FDUsage) Get() error { return ErrNotImplemented{runtime.GOOS} } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sigar_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sigar_windows.go index 0cdf928d..c2b54d8d 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sigar_windows.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sigar_windows.go @@ -120,6 +120,10 @@ func (self *Swap) Get() error { return nil } +func (self *HugeTLBPages) Get() error { + return ErrNotImplemented{runtime.GOOS} +} + func (self *Cpu) Get() error { idle, kernel, user, err := windows.GetSystemTimes() if err != nil { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sys/endian.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sys/endian.go new file mode 100644 index 00000000..d9f3e418 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sys/endian.go @@ -0,0 +1,16 @@ +package sys + +import ( + "encoding/binary" + "unsafe" +) + +func GetEndian() binary.ByteOrder { + var i int32 = 0x1 + v := (*[4]byte)(unsafe.Pointer(&i)) + if v[0] == 0 { + return binary.BigEndian + } else { + return binary.LittleEndian + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sys/linux/inetdiag.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sys/linux/inetdiag.go index a2851d23..cca3f2cf 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sys/linux/inetdiag.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sys/linux/inetdiag.go @@ -13,6 +13,7 @@ import ( "syscall" "unsafe" + "github.com/elastic/gosigar/sys" "github.com/pkg/errors" ) @@ -114,6 +115,10 @@ const ( INET_DIAG_MARK ) +var ( + byteOrder = sys.GetEndian() +) + // NetlinkInetDiag sends the given netlink request parses the responses with the // assumption that they are inet_diag_msgs. This will allocate a temporary // buffer for reading from the socket whose size will be the length of a page @@ -194,11 +199,11 @@ done: func serialize(msg syscall.NetlinkMessage) []byte { msg.Header.Len = uint32(syscall.SizeofNlMsghdr + len(msg.Data)) b := make([]byte, msg.Header.Len) - binary.LittleEndian.PutUint32(b[0:4], msg.Header.Len) - binary.LittleEndian.PutUint16(b[4:6], msg.Header.Type) - binary.LittleEndian.PutUint16(b[6:8], msg.Header.Flags) - binary.LittleEndian.PutUint32(b[8:12], msg.Header.Seq) - binary.LittleEndian.PutUint32(b[12:16], msg.Header.Pid) + byteOrder.PutUint32(b[0:4], msg.Header.Len) + byteOrder.PutUint16(b[4:6], msg.Header.Type) + byteOrder.PutUint16(b[6:8], msg.Header.Flags) + byteOrder.PutUint32(b[8:12], msg.Header.Seq) + byteOrder.PutUint32(b[12:16], msg.Header.Pid) copy(b[16:], msg.Data) return b } @@ -223,7 +228,7 @@ type InetDiagReq struct { func (r InetDiagReq) toWireFormat() []byte { buf := bytes.NewBuffer(make([]byte, sizeofInetDiagReq)) buf.Reset() - if err := binary.Write(buf, binary.LittleEndian, r); err != nil { + if err := binary.Write(buf, byteOrder, r); err != nil { // This never returns an error. panic(err) } @@ -264,7 +269,7 @@ type InetDiagReqV2 struct { func (r InetDiagReqV2) toWireFormat() []byte { buf := bytes.NewBuffer(make([]byte, sizeofInetDiagReqV2)) buf.Reset() - if err := binary.Write(buf, binary.LittleEndian, r); err != nil { + if err := binary.Write(buf, byteOrder, r); err != nil { // This never returns an error. panic(err) } @@ -315,7 +320,7 @@ type InetDiagMsg struct { func ParseInetDiagMsg(b []byte) (*InetDiagMsg, error) { r := bytes.NewReader(b) inetDiagMsg := &InetDiagMsg{} - err := binary.Read(r, binary.LittleEndian, inetDiagMsg) + err := binary.Read(r, byteOrder, inetDiagMsg) if err != nil { return nil, errors.Wrap(err, "failed to unmarshal inet_diag_msg") } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sys/linux/netlink.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sys/linux/netlink.go index f8747781..9db84299 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sys/linux/netlink.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/sys/linux/netlink.go @@ -3,8 +3,9 @@ package linux import ( - "encoding/binary" "errors" + + "github.com/elastic/gosigar/sys" ) // Netlink Error Code Handling @@ -14,7 +15,7 @@ import ( // describing the problem will be returned. func ParseNetlinkError(netlinkData []byte) error { if len(netlinkData) >= 4 { - errno := -binary.LittleEndian.Uint32(netlinkData[:4]) + errno := -sys.GetEndian().Uint32(netlinkData[:4]) return NetlinkErrno(errno) } return errors.New("received netlink error (data too short to read errno)") diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/AUTHORS.md b/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/AUTHORS.md deleted file mode 100644 index 0c802dd8..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/AUTHORS.md +++ /dev/null @@ -1,20 +0,0 @@ -The Prometheus project was started by Matt T. Proud (emeritus) and -Julius Volz in 2012. - -Maintainers of this repository: - -* Tobias Schmidt - -The following individuals have contributed code to this repository -(listed in alphabetical order): - -* Armen Baghumian -* Bjoern Rabenstein -* David Cournapeau -* Ji-Hoon, Seol -* Jonas Große Sundrup -* Julius Volz -* Matthias Rampke -* Nicky Gerritsen -* Rémi Audebert -* Tobias Schmidt diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/Makefile b/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/Makefile deleted file mode 100644 index c264a49d..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -ci: - ! gofmt -l *.go | read nothing - go vet - go test -v ./... - go get github.com/golang/lint/golint - golint *.go diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/fs.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/fs.go deleted file mode 100644 index 49aaab05..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/fs.go +++ /dev/null @@ -1,33 +0,0 @@ -package procfs - -import ( - "fmt" - "os" - "path" -) - -// FS represents the pseudo-filesystem proc, which provides an interface to -// kernel data structures. -type FS string - -// DefaultMountPoint is the common mount point of the proc filesystem. -const DefaultMountPoint = "/proc" - -// NewFS returns a new FS mounted under the given mountPoint. It will error -// if the mount point can't be read. -func NewFS(mountPoint string) (FS, error) { - info, err := os.Stat(mountPoint) - if err != nil { - return "", fmt.Errorf("could not read %s: %s", mountPoint, err) - } - if !info.IsDir() { - return "", fmt.Errorf("mount point %s is not a directory", mountPoint) - } - - return FS(mountPoint), nil -} - -// Path returns the path of the given subsystem relative to the procfs root. -func (fs FS) Path(p ...string) string { - return path.Join(append([]string{string(fs)}, p...)...) -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/stat.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/stat.go deleted file mode 100644 index 1ca217e8..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/stat.go +++ /dev/null @@ -1,56 +0,0 @@ -package procfs - -import ( - "bufio" - "fmt" - "os" - "strconv" - "strings" -) - -// Stat represents kernel/system statistics. -type Stat struct { - // Boot time in seconds since the Epoch. - BootTime int64 -} - -// NewStat returns kernel/system statistics read from /proc/stat. -func NewStat() (Stat, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Stat{}, err - } - - return fs.NewStat() -} - -// NewStat returns an information about current kernel/system statistics. -func (fs FS) NewStat() (Stat, error) { - f, err := os.Open(fs.Path("stat")) - if err != nil { - return Stat{}, err - } - defer f.Close() - - s := bufio.NewScanner(f) - for s.Scan() { - line := s.Text() - if !strings.HasPrefix(line, "btime") { - continue - } - fields := strings.Fields(line) - if len(fields) != 2 { - return Stat{}, fmt.Errorf("couldn't parse %s line %s", f.Name(), line) - } - i, err := strconv.ParseInt(fields[1], 10, 32) - if err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s: %s", fields[1], err) - } - return Stat{BootTime: i}, nil - } - if err := s.Err(); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err) - } - - return Stat{}, fmt.Errorf("couldn't parse %s, missing btime", f.Name()) -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/Makefile b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/Makefile index cdade051..e259784c 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/Makefile +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/Makefile @@ -1,3 +1,8 @@ +KUBE_VERSION=1.9.1 + +build: + go build -v ./... + test: go test -v ./... @@ -5,3 +10,43 @@ test-examples: @for example in $(shell find examples/ -name '*.go'); do \ go build -v $$example || exit 1; \ done + +.PHONY: generate +generate: _output/kubernetes _output/bin/protoc _output/bin/gomvpkg _output/bin/protoc-gen-gofast _output/src/github.com/golang/protobuf + ./scripts/generate.sh + go run scripts/register.go + cp scripts/time.go.partial apis/meta/v1/time.go + +.PHONY: verify-generate +verify-generate: generate + ./scripts/git-diff.sh + +_output/bin/protoc-gen-gofast: + ./scripts/go-install.sh \ + https://github.com/gogo/protobuf \ + github.com/gogo/protobuf \ + github.com/gogo/protobuf/protoc-gen-gofast \ + tags/v0.5 + +_output/bin/gomvpkg: + ./scripts/go-install.sh \ + https://github.com/golang/tools \ + golang.org/x/tools \ + golang.org/x/tools/cmd/gomvpkg \ + fbec762f837dc349b73d1eaa820552e2ad177942 + +_output/src/github.com/golang/protobuf: + git clone https://github.com/golang/protobuf _output/src/github.com/golang/protobuf + +_output/bin/protoc: + ./scripts/get-protoc.sh + +_output/kubernetes: + mkdir -p _output + curl -o _output/kubernetes.zip -L https://github.com/kubernetes/kubernetes/archive/v$(KUBE_VERSION).zip + unzip _output/kubernetes.zip -d _output > /dev/null + mv _output/kubernetes-$(KUBE_VERSION) _output/kubernetes + +.PHONY: clean +clean: + rm -rf _output diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/README.md b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/README.md index 8cbdaa37..437dc5b4 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/README.md +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/README.md @@ -13,6 +13,7 @@ import ( "log" "github.com/ericchiang/k8s" + corev1 "github.com/ericchiang/k8s/apis/core/v1" ) func main() { @@ -21,8 +22,8 @@ func main() { log.Fatal(err) } - nodes, err := client.CoreV1().ListNodes(context.Background()) - if err != nil { + var nodes corev1.NodeList + if err := client.List(context.Background(), "", &nodes); err != nil { log.Fatal(err) } for _, node := range nodes.Items { @@ -33,9 +34,9 @@ func main() { ## Should I use this or client-go? -client-go is a framework for building production ready controllers, components that regularly watch API resources and push the system towards a desired state. If you're writing a program that watches several resources in a loop for long durations, client-go's informers framework is a battle tested solution which will scale with the size of the cluster. +client-go is a framework for building production ready controllers, components that regularly watch API resources and push the system towards a desired state. If you're writing a program that watches several resources in a loop for long durations, client-go's informers framework is a battle tested solution which will scale with the size of the cluster. -This client should be used by programs that just need to talk to the Kubernetes API without prescriptive solutions for caching, reconciliation on failures, or work queues. This often includes components are relatively Kubernetes agnostic, but use the Kubernetes API for small tasks when running in Kubernetes. For example, performing leader election or persisting small amounts of state in annotations or configmaps. +This client should be used by programs that just need to talk to the Kubernetes API without prescriptive solutions for caching, reconciliation on failures, or work queues. This often includes components are relatively Kubernetes agnostic, but use the Kubernetes API for small tasks when running in Kubernetes. For example, performing leader election or persisting small amounts of state in annotations or configmaps. TL;DR - Use client-go if you're writing a controller. @@ -46,78 +47,173 @@ TL;DR - Use client-go if you're writing a controller. * [github.com/golang/protobuf/proto][go-proto] (protobuf serialization) * [golang.org/x/net/http2][go-http2] (HTTP/2 support) -## Versioned supported +## Usage -This client supports every API group version present since 1.3. +### Create, update, delete -## Usage +The type of the object passed to `Create`, `Update`, and `Delete` determine the resource being acted on. + +```go +configMap := &corev1.ConfigMap{ + Metadata: &metav1.ObjectMeta{ + Name: k8s.String("my-configmap"), + Namespace: k8s.String("my-namespace"), + }, + Data: map[string]string{"hello": "world"}, +} + +if err := client.Create(ctx, configMap); err != nil { + // handle error +} + +configMap.Data["hello"] = "kubernetes" + +if err := client.Update(ctx, configMap); err != nil { + // handle error +} + +if err := client.Delete(ctx, configMap); err != nil { + // handle error +} +``` + +### Get, list, watch + +Getting a resource requires providing a namespace (for namespaced objects) and a name. -### Namespace +```go +// Get the "cluster-info" configmap from the "kube-public" namespace +var configMap corev1.ConfigMap +err := client.Get(ctx, "kube-public", "cluster-info", &configMap) +``` -When performing a list or watch operation, the namespace to list or watch in is provided as an argument. +When performing a list operation, the namespace to list or watch is also required. ```go -pods, err := core.ListPods(ctx, "custom-namespace") // Pods from the "custom-namespace" +// Pods from the "custom-namespace" +var pods corev1.PodList +err := client.List(ctx, "custom-namespace", &pods) ``` A special value `AllNamespaces` indicates that the list or watch should be performed on all cluster resources. ```go -pods, err := core.ListPods(ctx, k8s.AllNamespaces) // Pods in all namespaces. +// Pods in all namespaces +var pods corev1.PodList +err := client.List(ctx, k8s.AllNamespaces, &pods) ``` -Both in-cluster and out-of-cluster clients are initialized with a primary namespace. This is the recommended value to use when listing or watching. +Watches require a example type to determine what resource they're watching. `Watch` returns an type which can be used to receive a stream of events. These events include resources of the same kind and the kind of the event (added, modified, deleted). ```go -client, err := k8s.NewInClusterClient() +// Watch configmaps in the "kube-system" namespace +var configMap corev1.ConfigMap +watcher, err := client.Watch(ctx, "kube-system", &configMap) if err != nil { // handle error } +defer watcher.Close() -// List pods in the namespace the client is running in. -pods, err := client.CoreV1().ListPods(ctx, client.Namespace) +for { + cm := new(corev1.ConfigMap) + eventType, err := watcher.Next(cm) + if err != nil { + // watcher encountered and error, exit or create a new watcher + } + fmt.Println(eventType, *cm.Metadata.Name) +} ``` -### Label selectors - -Label selectors can be provided to any list operation. +Both in-cluster and out-of-cluster clients are initialized with a primary namespace. This is the recommended value to use when listing or watching. ```go -l := new(k8s.LabelSelector) -l.Eq("tier", "production") -l.In("app", "database", "frontend") +client, err := k8s.NewInClusterClient() +if err != nil { + // handle error +} -pods, err := client.CoreV1().ListPods(ctx, client.Namespace, l.Selector()) +// List pods in the namespace the client is running in. +var pods corev1.PodList +err := client.List(ctx, client.Namespace, &pods) ``` -### Working with resources +### Custom resources -Use the generated API types directly to create and modify resources. +Client operations support user defined resources, such as resources provided by [CustomResourceDefinitions][crds] and [aggregated API servers][custom-api-servers]. To use a custom resource, define an equivalent Go struct then register it with the `k8s` package. By default the client will use JSON serialization when encoding and decoding custom resources. ```go import ( - "context" - "github.com/ericchiang/k8s" - "github.com/ericchiang/k8s/api/v1" metav1 "github.com/ericchiang/k8s/apis/meta/v1" ) -func createConfigMap(client *k8s.Client, name string, values map[string]string) error { - cm := &v1.ConfigMap{ +type MyResource struct { + Metadata *metav1.ObjectMeta `json:"metadata"` + Foo string `json:"foo"` + Bar int `json:"bar"` +} + +// Required for MyResource to implement k8s.Resource +func (m *MyResource) GetMetadata() *metav1.ObjectMeta { + return m.Metadata +} + +type MyResourceList struct { + Metadata *metav1.ListMeta `json:"metadata"` + Items []MyResource `json:"items"` +} + +// Require for MyResourceList to implement k8s.ResourceList +func (m *MyResourceList) GetMetadata() *metav1.ListMeta { + return m.Metadata +} + +func init() { + // Register resources with the k8s package. + k8s.Register("resource.example.com", "v1", "myresources", true, &MyResource{}) + k8s.RegisterList("resource.example.com", "v1", "myresources", true, &MyResourceList{}) +} +``` + +Once registered, the library can use the custom resources like any other. + +``` +func do(ctx context.Context, client *k8s.Client, namespace string) error { + r := &MyResource{ Metadata: &metav1.ObjectMeta{ - Name: &name, - Namespace: &client.Namespace, + Name: k8s.String("my-custom-resource"), + Namespace: &namespace, }, - Data: values, + Foo: "hello, world!", + Bar: 42, + } + if err := client.Create(ctx, r); err != nil { + return fmt.Errorf("create: %v", err) + } + r.Bar = -8 + if err := client.Update(ctx, r); err != nil { + return fmt.Errorf("update: %v", err) } - // Will return the created configmap as well. - _, err := client.CoreV1().CreateConfigMap(context.TODO(), cm) - return err + if err := client.Delete(ctx, r); err != nil { + return fmt.Errorf("delete: %v", err) + } + return nil } ``` -API structs use pointers to `int`, `bool`, and `string` types to differentiate between the zero value and an unsupplied one. This package provides [convenience methods][string] for creating pointers to literals of basic types. +If the custom type implements [`proto.Message`][proto-msg], the client will prefer protobuf when encoding and decoding the type. + +### Label selectors + +Label selectors can be provided to any list operation. + +```go +l := new(k8s.LabelSelector) +l.Eq("tier", "production") +l.In("app", "database", "frontend") + +pods, err := client.CoreV1().ListPods(ctx, client.Namespace, l.Selector()) +``` ### Creating out-of-cluster clients @@ -166,7 +262,7 @@ func createConfigMap(client *k8s.Client, name string, values map[string]string) Data: values, } - _, err := client.CoreV1().CreateConfigMap(context.TODO(), cm) + err := client.Create(context.TODO(), cm) // If an HTTP error was returned by the API server, it will be of type // *k8s.APIError. This can be used to inspect the status code. @@ -188,3 +284,6 @@ func createConfigMap(client *k8s.Client, name string, values map[string]string) [k8s-error]: https://godoc.org/github.com/ericchiang/k8s#APIError [config]: https://godoc.org/github.com/ericchiang/k8s#Config [string]: https://godoc.org/github.com/ericchiang/k8s#String +[crds]: https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/ +[custom-api-servers]: https://kubernetes.io/docs/concepts/api-extension/apiserver-aggregation/ +[proto-msg]: https://godoc.org/github.com/golang/protobuf/proto#Message diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/api/unversioned/generated.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/api/unversioned/generated.pb.go deleted file mode 100644 index 098178a5..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/api/unversioned/generated.pb.go +++ /dev/null @@ -1,5614 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/api/unversioned/generated.proto -// DO NOT EDIT! - -/* - Package unversioned is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/pkg/api/unversioned/generated.proto - - It has these top-level messages: - APIGroup - APIGroupList - APIResource - APIResourceList - APIVersions - Duration - ExportOptions - GroupKind - GroupResource - GroupVersion - GroupVersionForDiscovery - GroupVersionKind - GroupVersionResource - LabelSelector - LabelSelectorRequirement - ListMeta - RootPaths - ServerAddressByClientCIDR - Status - StatusCause - StatusDetails - Time - Timestamp - TypeMeta -*/ -package unversioned - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "github.com/ericchiang/k8s/runtime" -import _ "github.com/ericchiang/k8s/util/intstr" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// APIGroup contains the name, the supported versions, and the preferred version -// of a group. -type APIGroup struct { - // name is the name of the group. - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // versions are the versions supported in this group. - Versions []*GroupVersionForDiscovery `protobuf:"bytes,2,rep,name=versions" json:"versions,omitempty"` - // preferredVersion is the version preferred by the API server, which - // probably is the storage version. - // +optional - PreferredVersion *GroupVersionForDiscovery `protobuf:"bytes,3,opt,name=preferredVersion" json:"preferredVersion,omitempty"` - // a map of client CIDR to server address that is serving this group. - // This is to help clients reach servers in the most network-efficient way possible. - // Clients can use the appropriate server address as per the CIDR that they match. - // In case of multiple matches, clients should use the longest matching CIDR. - // The server returns only those CIDRs that it thinks that the client can match. - // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. - // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. - ServerAddressByClientCIDRs []*ServerAddressByClientCIDR `protobuf:"bytes,4,rep,name=serverAddressByClientCIDRs" json:"serverAddressByClientCIDRs,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *APIGroup) Reset() { *m = APIGroup{} } -func (m *APIGroup) String() string { return proto.CompactTextString(m) } -func (*APIGroup) ProtoMessage() {} -func (*APIGroup) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *APIGroup) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *APIGroup) GetVersions() []*GroupVersionForDiscovery { - if m != nil { - return m.Versions - } - return nil -} - -func (m *APIGroup) GetPreferredVersion() *GroupVersionForDiscovery { - if m != nil { - return m.PreferredVersion - } - return nil -} - -func (m *APIGroup) GetServerAddressByClientCIDRs() []*ServerAddressByClientCIDR { - if m != nil { - return m.ServerAddressByClientCIDRs - } - return nil -} - -// APIGroupList is a list of APIGroup, to allow clients to discover the API at -// /apis. -type APIGroupList struct { - // groups is a list of APIGroup. - Groups []*APIGroup `protobuf:"bytes,1,rep,name=groups" json:"groups,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *APIGroupList) Reset() { *m = APIGroupList{} } -func (m *APIGroupList) String() string { return proto.CompactTextString(m) } -func (*APIGroupList) ProtoMessage() {} -func (*APIGroupList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *APIGroupList) GetGroups() []*APIGroup { - if m != nil { - return m.Groups - } - return nil -} - -// APIResource specifies the name of a resource and whether it is namespaced. -type APIResource struct { - // name is the name of the resource. - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // namespaced indicates if a resource is namespaced or not. - Namespaced *bool `protobuf:"varint,2,opt,name=namespaced" json:"namespaced,omitempty"` - // kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo') - Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *APIResource) Reset() { *m = APIResource{} } -func (m *APIResource) String() string { return proto.CompactTextString(m) } -func (*APIResource) ProtoMessage() {} -func (*APIResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *APIResource) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *APIResource) GetNamespaced() bool { - if m != nil && m.Namespaced != nil { - return *m.Namespaced - } - return false -} - -func (m *APIResource) GetKind() string { - if m != nil && m.Kind != nil { - return *m.Kind - } - return "" -} - -// APIResourceList is a list of APIResource, it is used to expose the name of the -// resources supported in a specific group and version, and if the resource -// is namespaced. -type APIResourceList struct { - // groupVersion is the group and version this APIResourceList is for. - GroupVersion *string `protobuf:"bytes,1,opt,name=groupVersion" json:"groupVersion,omitempty"` - // resources contains the name of the resources and if they are namespaced. - Resources []*APIResource `protobuf:"bytes,2,rep,name=resources" json:"resources,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *APIResourceList) Reset() { *m = APIResourceList{} } -func (m *APIResourceList) String() string { return proto.CompactTextString(m) } -func (*APIResourceList) ProtoMessage() {} -func (*APIResourceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *APIResourceList) GetGroupVersion() string { - if m != nil && m.GroupVersion != nil { - return *m.GroupVersion - } - return "" -} - -func (m *APIResourceList) GetResources() []*APIResource { - if m != nil { - return m.Resources - } - return nil -} - -// APIVersions lists the versions that are available, to allow clients to -// discover the API at /api, which is the root path of the legacy v1 API. -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -type APIVersions struct { - // versions are the api versions that are available. - Versions []string `protobuf:"bytes,1,rep,name=versions" json:"versions,omitempty"` - // a map of client CIDR to server address that is serving this group. - // This is to help clients reach servers in the most network-efficient way possible. - // Clients can use the appropriate server address as per the CIDR that they match. - // In case of multiple matches, clients should use the longest matching CIDR. - // The server returns only those CIDRs that it thinks that the client can match. - // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. - // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. - ServerAddressByClientCIDRs []*ServerAddressByClientCIDR `protobuf:"bytes,2,rep,name=serverAddressByClientCIDRs" json:"serverAddressByClientCIDRs,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *APIVersions) Reset() { *m = APIVersions{} } -func (m *APIVersions) String() string { return proto.CompactTextString(m) } -func (*APIVersions) ProtoMessage() {} -func (*APIVersions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *APIVersions) GetVersions() []string { - if m != nil { - return m.Versions - } - return nil -} - -func (m *APIVersions) GetServerAddressByClientCIDRs() []*ServerAddressByClientCIDR { - if m != nil { - return m.ServerAddressByClientCIDRs - } - return nil -} - -// Duration is a wrapper around time.Duration which supports correct -// marshaling to YAML and JSON. In particular, it marshals into strings, which -// can be used as map keys in json. -type Duration struct { - Duration *int64 `protobuf:"varint,1,opt,name=duration" json:"duration,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Duration) Reset() { *m = Duration{} } -func (m *Duration) String() string { return proto.CompactTextString(m) } -func (*Duration) ProtoMessage() {} -func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } - -func (m *Duration) GetDuration() int64 { - if m != nil && m.Duration != nil { - return *m.Duration - } - return 0 -} - -// ExportOptions is the query options to the standard REST get call. -type ExportOptions struct { - // Should this value be exported. Export strips fields that a user can not specify.` - Export *bool `protobuf:"varint,1,opt,name=export" json:"export,omitempty"` - // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace' - Exact *bool `protobuf:"varint,2,opt,name=exact" json:"exact,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ExportOptions) Reset() { *m = ExportOptions{} } -func (m *ExportOptions) String() string { return proto.CompactTextString(m) } -func (*ExportOptions) ProtoMessage() {} -func (*ExportOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } - -func (m *ExportOptions) GetExport() bool { - if m != nil && m.Export != nil { - return *m.Export - } - return false -} - -func (m *ExportOptions) GetExact() bool { - if m != nil && m.Exact != nil { - return *m.Exact - } - return false -} - -// GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying -// concepts during lookup stages without having partially valid types -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -type GroupKind struct { - Group *string `protobuf:"bytes,1,opt,name=group" json:"group,omitempty"` - Kind *string `protobuf:"bytes,2,opt,name=kind" json:"kind,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupKind) Reset() { *m = GroupKind{} } -func (m *GroupKind) String() string { return proto.CompactTextString(m) } -func (*GroupKind) ProtoMessage() {} -func (*GroupKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } - -func (m *GroupKind) GetGroup() string { - if m != nil && m.Group != nil { - return *m.Group - } - return "" -} - -func (m *GroupKind) GetKind() string { - if m != nil && m.Kind != nil { - return *m.Kind - } - return "" -} - -// GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying -// concepts during lookup stages without having partially valid types -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -type GroupResource struct { - Group *string `protobuf:"bytes,1,opt,name=group" json:"group,omitempty"` - Resource *string `protobuf:"bytes,2,opt,name=resource" json:"resource,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupResource) Reset() { *m = GroupResource{} } -func (m *GroupResource) String() string { return proto.CompactTextString(m) } -func (*GroupResource) ProtoMessage() {} -func (*GroupResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } - -func (m *GroupResource) GetGroup() string { - if m != nil && m.Group != nil { - return *m.Group - } - return "" -} - -func (m *GroupResource) GetResource() string { - if m != nil && m.Resource != nil { - return *m.Resource - } - return "" -} - -// GroupVersion contains the "group" and the "version", which uniquely identifies the API. -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -type GroupVersion struct { - Group *string `protobuf:"bytes,1,opt,name=group" json:"group,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupVersion) Reset() { *m = GroupVersion{} } -func (m *GroupVersion) String() string { return proto.CompactTextString(m) } -func (*GroupVersion) ProtoMessage() {} -func (*GroupVersion) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } - -func (m *GroupVersion) GetGroup() string { - if m != nil && m.Group != nil { - return *m.Group - } - return "" -} - -func (m *GroupVersion) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -// GroupVersion contains the "group/version" and "version" string of a version. -// It is made a struct to keep extensibility. -type GroupVersionForDiscovery struct { - // groupVersion specifies the API group and version in the form "group/version" - GroupVersion *string `protobuf:"bytes,1,opt,name=groupVersion" json:"groupVersion,omitempty"` - // version specifies the version in the form of "version". This is to save - // the clients the trouble of splitting the GroupVersion. - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupVersionForDiscovery) Reset() { *m = GroupVersionForDiscovery{} } -func (m *GroupVersionForDiscovery) String() string { return proto.CompactTextString(m) } -func (*GroupVersionForDiscovery) ProtoMessage() {} -func (*GroupVersionForDiscovery) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{10} -} - -func (m *GroupVersionForDiscovery) GetGroupVersion() string { - if m != nil && m.GroupVersion != nil { - return *m.GroupVersion - } - return "" -} - -func (m *GroupVersionForDiscovery) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -// GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion -// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -type GroupVersionKind struct { - Group *string `protobuf:"bytes,1,opt,name=group" json:"group,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} } -func (m *GroupVersionKind) String() string { return proto.CompactTextString(m) } -func (*GroupVersionKind) ProtoMessage() {} -func (*GroupVersionKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } - -func (m *GroupVersionKind) GetGroup() string { - if m != nil && m.Group != nil { - return *m.Group - } - return "" -} - -func (m *GroupVersionKind) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -func (m *GroupVersionKind) GetKind() string { - if m != nil && m.Kind != nil { - return *m.Kind - } - return "" -} - -// GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion -// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -type GroupVersionResource struct { - Group *string `protobuf:"bytes,1,opt,name=group" json:"group,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - Resource *string `protobuf:"bytes,3,opt,name=resource" json:"resource,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} } -func (m *GroupVersionResource) String() string { return proto.CompactTextString(m) } -func (*GroupVersionResource) ProtoMessage() {} -func (*GroupVersionResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } - -func (m *GroupVersionResource) GetGroup() string { - if m != nil && m.Group != nil { - return *m.Group - } - return "" -} - -func (m *GroupVersionResource) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -func (m *GroupVersionResource) GetResource() string { - if m != nil && m.Resource != nil { - return *m.Resource - } - return "" -} - -// A label selector is a label query over a set of resources. The result of matchLabels and -// matchExpressions are ANDed. An empty label selector matches all objects. A null -// label selector matches no objects. -type LabelSelector struct { - // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - // map is equivalent to an element of matchExpressions, whose key field is "key", the - // operator is "In", and the values array contains only "value". The requirements are ANDed. - // +optional - MatchLabels map[string]string `protobuf:"bytes,1,rep,name=matchLabels" json:"matchLabels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // matchExpressions is a list of label selector requirements. The requirements are ANDed. - // +optional - MatchExpressions []*LabelSelectorRequirement `protobuf:"bytes,2,rep,name=matchExpressions" json:"matchExpressions,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LabelSelector) Reset() { *m = LabelSelector{} } -func (m *LabelSelector) String() string { return proto.CompactTextString(m) } -func (*LabelSelector) ProtoMessage() {} -func (*LabelSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } - -func (m *LabelSelector) GetMatchLabels() map[string]string { - if m != nil { - return m.MatchLabels - } - return nil -} - -func (m *LabelSelector) GetMatchExpressions() []*LabelSelectorRequirement { - if m != nil { - return m.MatchExpressions - } - return nil -} - -// A label selector requirement is a selector that contains values, a key, and an operator that -// relates the key and values. -type LabelSelectorRequirement struct { - // key is the label key that the selector applies to. - Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` - // operator represents a key's relationship to a set of values. - // Valid operators ard In, NotIn, Exists and DoesNotExist. - Operator *string `protobuf:"bytes,2,opt,name=operator" json:"operator,omitempty"` - // values is an array of string values. If the operator is In or NotIn, - // the values array must be non-empty. If the operator is Exists or DoesNotExist, - // the values array must be empty. This array is replaced during a strategic - // merge patch. - // +optional - Values []string `protobuf:"bytes,3,rep,name=values" json:"values,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} } -func (m *LabelSelectorRequirement) String() string { return proto.CompactTextString(m) } -func (*LabelSelectorRequirement) ProtoMessage() {} -func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{14} -} - -func (m *LabelSelectorRequirement) GetKey() string { - if m != nil && m.Key != nil { - return *m.Key - } - return "" -} - -func (m *LabelSelectorRequirement) GetOperator() string { - if m != nil && m.Operator != nil { - return *m.Operator - } - return "" -} - -func (m *LabelSelectorRequirement) GetValues() []string { - if m != nil { - return m.Values - } - return nil -} - -// ListMeta describes metadata that synthetic resources must have, including lists and -// various status objects. A resource may have only one of {ObjectMeta, ListMeta}. -type ListMeta struct { - // SelfLink is a URL representing this object. - // Populated by the system. - // Read-only. - // +optional - SelfLink *string `protobuf:"bytes,1,opt,name=selfLink" json:"selfLink,omitempty"` - // String that identifies the server's internal version of this object that - // can be used by clients to determine when objects have changed. - // Value must be treated as opaque by clients and passed unmodified back to the server. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency - // +optional - ResourceVersion *string `protobuf:"bytes,2,opt,name=resourceVersion" json:"resourceVersion,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ListMeta) Reset() { *m = ListMeta{} } -func (m *ListMeta) String() string { return proto.CompactTextString(m) } -func (*ListMeta) ProtoMessage() {} -func (*ListMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } - -func (m *ListMeta) GetSelfLink() string { - if m != nil && m.SelfLink != nil { - return *m.SelfLink - } - return "" -} - -func (m *ListMeta) GetResourceVersion() string { - if m != nil && m.ResourceVersion != nil { - return *m.ResourceVersion - } - return "" -} - -// RootPaths lists the paths available at root. -// For example: "/healthz", "/apis". -type RootPaths struct { - // paths are the paths available at root. - Paths []string `protobuf:"bytes,1,rep,name=paths" json:"paths,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RootPaths) Reset() { *m = RootPaths{} } -func (m *RootPaths) String() string { return proto.CompactTextString(m) } -func (*RootPaths) ProtoMessage() {} -func (*RootPaths) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } - -func (m *RootPaths) GetPaths() []string { - if m != nil { - return m.Paths - } - return nil -} - -// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. -type ServerAddressByClientCIDR struct { - // The CIDR with which clients can match their IP to figure out the server address that they should use. - ClientCIDR *string `protobuf:"bytes,1,opt,name=clientCIDR" json:"clientCIDR,omitempty"` - // Address of this server, suitable for a client that matches the above CIDR. - // This can be a hostname, hostname:port, IP or IP:port. - ServerAddress *string `protobuf:"bytes,2,opt,name=serverAddress" json:"serverAddress,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} } -func (m *ServerAddressByClientCIDR) String() string { return proto.CompactTextString(m) } -func (*ServerAddressByClientCIDR) ProtoMessage() {} -func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{17} -} - -func (m *ServerAddressByClientCIDR) GetClientCIDR() string { - if m != nil && m.ClientCIDR != nil { - return *m.ClientCIDR - } - return "" -} - -func (m *ServerAddressByClientCIDR) GetServerAddress() string { - if m != nil && m.ServerAddress != nil { - return *m.ServerAddress - } - return "" -} - -// Status is a return value for calls that don't return other objects. -type Status struct { - // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds - // +optional - Metadata *ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` - // Status of the operation. - // One of: "Success" or "Failure". - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status - // +optional - Status *string `protobuf:"bytes,2,opt,name=status" json:"status,omitempty"` - // A human-readable description of the status of this operation. - // +optional - Message *string `protobuf:"bytes,3,opt,name=message" json:"message,omitempty"` - // A machine-readable description of why this operation is in the - // "Failure" status. If this value is empty there - // is no information available. A Reason clarifies an HTTP status - // code but does not override it. - // +optional - Reason *string `protobuf:"bytes,4,opt,name=reason" json:"reason,omitempty"` - // Extended data associated with the reason. Each reason may define its - // own extended details. This field is optional and the data returned - // is not guaranteed to conform to any schema except that defined by - // the reason type. - // +optional - Details *StatusDetails `protobuf:"bytes,5,opt,name=details" json:"details,omitempty"` - // Suggested HTTP return code for this status, 0 if not set. - // +optional - Code *int32 `protobuf:"varint,6,opt,name=code" json:"code,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Status) Reset() { *m = Status{} } -func (m *Status) String() string { return proto.CompactTextString(m) } -func (*Status) ProtoMessage() {} -func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } - -func (m *Status) GetMetadata() *ListMeta { - if m != nil { - return m.Metadata - } - return nil -} - -func (m *Status) GetStatus() string { - if m != nil && m.Status != nil { - return *m.Status - } - return "" -} - -func (m *Status) GetMessage() string { - if m != nil && m.Message != nil { - return *m.Message - } - return "" -} - -func (m *Status) GetReason() string { - if m != nil && m.Reason != nil { - return *m.Reason - } - return "" -} - -func (m *Status) GetDetails() *StatusDetails { - if m != nil { - return m.Details - } - return nil -} - -func (m *Status) GetCode() int32 { - if m != nil && m.Code != nil { - return *m.Code - } - return 0 -} - -// StatusCause provides more information about an api.Status failure, including -// cases when multiple errors are encountered. -type StatusCause struct { - // A machine-readable description of the cause of the error. If this value is - // empty there is no information available. - // +optional - Reason *string `protobuf:"bytes,1,opt,name=reason" json:"reason,omitempty"` - // A human-readable description of the cause of the error. This field may be - // presented as-is to a reader. - // +optional - Message *string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` - // The field of the resource that has caused this error, as named by its JSON - // serialization. May include dot and postfix notation for nested attributes. - // Arrays are zero-indexed. Fields may appear more than once in an array of - // causes due to fields having multiple errors. - // Optional. - // - // Examples: - // "name" - the field "name" on the current resource - // "items[0].name" - the field "name" on the first array entry in "items" - // +optional - Field *string `protobuf:"bytes,3,opt,name=field" json:"field,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StatusCause) Reset() { *m = StatusCause{} } -func (m *StatusCause) String() string { return proto.CompactTextString(m) } -func (*StatusCause) ProtoMessage() {} -func (*StatusCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } - -func (m *StatusCause) GetReason() string { - if m != nil && m.Reason != nil { - return *m.Reason - } - return "" -} - -func (m *StatusCause) GetMessage() string { - if m != nil && m.Message != nil { - return *m.Message - } - return "" -} - -func (m *StatusCause) GetField() string { - if m != nil && m.Field != nil { - return *m.Field - } - return "" -} - -// StatusDetails is a set of additional properties that MAY be set by the -// server to provide additional information about a response. The Reason -// field of a Status object defines what attributes will be set. Clients -// must ignore fields that do not match the defined type of each attribute, -// and should assume that any attribute may be empty, invalid, or under -// defined. -type StatusDetails struct { - // The name attribute of the resource associated with the status StatusReason - // (when there is a single name which can be described). - // +optional - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // The group attribute of the resource associated with the status StatusReason. - // +optional - Group *string `protobuf:"bytes,2,opt,name=group" json:"group,omitempty"` - // The kind attribute of the resource associated with the status StatusReason. - // On some operations may differ from the requested resource Kind. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds - // +optional - Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"` - // The Causes array includes more details associated with the StatusReason - // failure. Not all StatusReasons may provide detailed causes. - // +optional - Causes []*StatusCause `protobuf:"bytes,4,rep,name=causes" json:"causes,omitempty"` - // If specified, the time in seconds before the operation should be retried. - // +optional - RetryAfterSeconds *int32 `protobuf:"varint,5,opt,name=retryAfterSeconds" json:"retryAfterSeconds,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StatusDetails) Reset() { *m = StatusDetails{} } -func (m *StatusDetails) String() string { return proto.CompactTextString(m) } -func (*StatusDetails) ProtoMessage() {} -func (*StatusDetails) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } - -func (m *StatusDetails) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *StatusDetails) GetGroup() string { - if m != nil && m.Group != nil { - return *m.Group - } - return "" -} - -func (m *StatusDetails) GetKind() string { - if m != nil && m.Kind != nil { - return *m.Kind - } - return "" -} - -func (m *StatusDetails) GetCauses() []*StatusCause { - if m != nil { - return m.Causes - } - return nil -} - -func (m *StatusDetails) GetRetryAfterSeconds() int32 { - if m != nil && m.RetryAfterSeconds != nil { - return *m.RetryAfterSeconds - } - return 0 -} - -// Time is a wrapper around time.Time which supports correct -// marshaling to YAML and JSON. Wrappers are provided for many -// of the factory methods that the time package offers. -// -// +protobuf.options.marshal=false -// +protobuf.as=Timestamp -// +protobuf.options.(gogoproto.goproto_stringer)=false -type Time struct { - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - Seconds *int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. This field may be limited in precision depending on context. - Nanos *int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Time) Reset() { *m = Time{} } -func (m *Time) String() string { return proto.CompactTextString(m) } -func (*Time) ProtoMessage() {} -func (*Time) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } - -func (m *Time) GetSeconds() int64 { - if m != nil && m.Seconds != nil { - return *m.Seconds - } - return 0 -} - -func (m *Time) GetNanos() int32 { - if m != nil && m.Nanos != nil { - return *m.Nanos - } - return 0 -} - -// Timestamp is a struct that is equivalent to Time, but intended for -// protobuf marshalling/unmarshalling. It is generated into a serialization -// that matches Time. Do not use in Go structs. -type Timestamp struct { - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - Seconds *int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. This field may be limited in precision depending on context. - Nanos *int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (m *Timestamp) String() string { return proto.CompactTextString(m) } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } - -func (m *Timestamp) GetSeconds() int64 { - if m != nil && m.Seconds != nil { - return *m.Seconds - } - return 0 -} - -func (m *Timestamp) GetNanos() int32 { - if m != nil && m.Nanos != nil { - return *m.Nanos - } - return 0 -} - -// TypeMeta describes an individual object in an API response or request -// with strings representing the type of the object and its API schema version. -// Structures that are versioned or persisted should inline TypeMeta. -type TypeMeta struct { - // Kind is a string value representing the REST resource this object represents. - // Servers may infer this from the endpoint the client submits requests to. - // Cannot be updated. - // In CamelCase. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds - // +optional - Kind *string `protobuf:"bytes,1,opt,name=kind" json:"kind,omitempty"` - // APIVersion defines the versioned schema of this representation of an object. - // Servers should convert recognized schemas to the latest internal value, and - // may reject unrecognized values. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources - // +optional - ApiVersion *string `protobuf:"bytes,2,opt,name=apiVersion" json:"apiVersion,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *TypeMeta) Reset() { *m = TypeMeta{} } -func (m *TypeMeta) String() string { return proto.CompactTextString(m) } -func (*TypeMeta) ProtoMessage() {} -func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } - -func (m *TypeMeta) GetKind() string { - if m != nil && m.Kind != nil { - return *m.Kind - } - return "" -} - -func (m *TypeMeta) GetApiVersion() string { - if m != nil && m.ApiVersion != nil { - return *m.ApiVersion - } - return "" -} - -func init() { - proto.RegisterType((*APIGroup)(nil), "github.com/ericchiang.k8s.api.unversioned.APIGroup") - proto.RegisterType((*APIGroupList)(nil), "github.com/ericchiang.k8s.api.unversioned.APIGroupList") - proto.RegisterType((*APIResource)(nil), "github.com/ericchiang.k8s.api.unversioned.APIResource") - proto.RegisterType((*APIResourceList)(nil), "github.com/ericchiang.k8s.api.unversioned.APIResourceList") - proto.RegisterType((*APIVersions)(nil), "github.com/ericchiang.k8s.api.unversioned.APIVersions") - proto.RegisterType((*Duration)(nil), "github.com/ericchiang.k8s.api.unversioned.Duration") - proto.RegisterType((*ExportOptions)(nil), "github.com/ericchiang.k8s.api.unversioned.ExportOptions") - proto.RegisterType((*GroupKind)(nil), "github.com/ericchiang.k8s.api.unversioned.GroupKind") - proto.RegisterType((*GroupResource)(nil), "github.com/ericchiang.k8s.api.unversioned.GroupResource") - proto.RegisterType((*GroupVersion)(nil), "github.com/ericchiang.k8s.api.unversioned.GroupVersion") - proto.RegisterType((*GroupVersionForDiscovery)(nil), "github.com/ericchiang.k8s.api.unversioned.GroupVersionForDiscovery") - proto.RegisterType((*GroupVersionKind)(nil), "github.com/ericchiang.k8s.api.unversioned.GroupVersionKind") - proto.RegisterType((*GroupVersionResource)(nil), "github.com/ericchiang.k8s.api.unversioned.GroupVersionResource") - proto.RegisterType((*LabelSelector)(nil), "github.com/ericchiang.k8s.api.unversioned.LabelSelector") - proto.RegisterType((*LabelSelectorRequirement)(nil), "github.com/ericchiang.k8s.api.unversioned.LabelSelectorRequirement") - proto.RegisterType((*ListMeta)(nil), "github.com/ericchiang.k8s.api.unversioned.ListMeta") - proto.RegisterType((*RootPaths)(nil), "github.com/ericchiang.k8s.api.unversioned.RootPaths") - proto.RegisterType((*ServerAddressByClientCIDR)(nil), "github.com/ericchiang.k8s.api.unversioned.ServerAddressByClientCIDR") - proto.RegisterType((*Status)(nil), "github.com/ericchiang.k8s.api.unversioned.Status") - proto.RegisterType((*StatusCause)(nil), "github.com/ericchiang.k8s.api.unversioned.StatusCause") - proto.RegisterType((*StatusDetails)(nil), "github.com/ericchiang.k8s.api.unversioned.StatusDetails") - proto.RegisterType((*Time)(nil), "github.com/ericchiang.k8s.api.unversioned.Time") - proto.RegisterType((*Timestamp)(nil), "github.com/ericchiang.k8s.api.unversioned.Timestamp") - proto.RegisterType((*TypeMeta)(nil), "github.com/ericchiang.k8s.api.unversioned.TypeMeta") -} -func (m *APIGroup) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *APIGroup) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Name != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name))) - i += copy(dAtA[i:], *m.Name) - } - if len(m.Versions) > 0 { - for _, msg := range m.Versions { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.PreferredVersion != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PreferredVersion.Size())) - n1, err := m.PreferredVersion.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - } - if len(m.ServerAddressByClientCIDRs) > 0 { - for _, msg := range m.ServerAddressByClientCIDRs { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *APIGroupList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *APIGroupList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Groups) > 0 { - for _, msg := range m.Groups { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *APIResource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *APIResource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Name != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name))) - i += copy(dAtA[i:], *m.Name) - } - if m.Namespaced != nil { - dAtA[i] = 0x10 - i++ - if *m.Namespaced { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.Kind != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Kind))) - i += copy(dAtA[i:], *m.Kind) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *APIResourceList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *APIResourceList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.GroupVersion != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.GroupVersion))) - i += copy(dAtA[i:], *m.GroupVersion) - } - if len(m.Resources) > 0 { - for _, msg := range m.Resources { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *APIVersions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *APIVersions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Versions) > 0 { - for _, s := range m.Versions { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.ServerAddressByClientCIDRs) > 0 { - for _, msg := range m.ServerAddressByClientCIDRs { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *Duration) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Duration) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Duration != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Duration)) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *ExportOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExportOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Export != nil { - dAtA[i] = 0x8 - i++ - if *m.Export { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.Exact != nil { - dAtA[i] = 0x10 - i++ - if *m.Exact { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *GroupKind) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GroupKind) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Group != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Group))) - i += copy(dAtA[i:], *m.Group) - } - if m.Kind != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Kind))) - i += copy(dAtA[i:], *m.Kind) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *GroupResource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GroupResource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Group != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Group))) - i += copy(dAtA[i:], *m.Group) - } - if m.Resource != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Resource))) - i += copy(dAtA[i:], *m.Resource) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *GroupVersion) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GroupVersion) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Group != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Group))) - i += copy(dAtA[i:], *m.Group) - } - if m.Version != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Version))) - i += copy(dAtA[i:], *m.Version) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *GroupVersionForDiscovery) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GroupVersionForDiscovery) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.GroupVersion != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.GroupVersion))) - i += copy(dAtA[i:], *m.GroupVersion) - } - if m.Version != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Version))) - i += copy(dAtA[i:], *m.Version) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *GroupVersionKind) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GroupVersionKind) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Group != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Group))) - i += copy(dAtA[i:], *m.Group) - } - if m.Version != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Version))) - i += copy(dAtA[i:], *m.Version) - } - if m.Kind != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Kind))) - i += copy(dAtA[i:], *m.Kind) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *GroupVersionResource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GroupVersionResource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Group != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Group))) - i += copy(dAtA[i:], *m.Group) - } - if m.Version != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Version))) - i += copy(dAtA[i:], *m.Version) - } - if m.Resource != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Resource))) - i += copy(dAtA[i:], *m.Resource) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *LabelSelector) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LabelSelector) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.MatchLabels) > 0 { - for k, _ := range m.MatchLabels { - dAtA[i] = 0xa - i++ - v := m.MatchLabels[k] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if len(m.MatchExpressions) > 0 { - for _, msg := range m.MatchExpressions { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *LabelSelectorRequirement) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LabelSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Key != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Key))) - i += copy(dAtA[i:], *m.Key) - } - if m.Operator != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Operator))) - i += copy(dAtA[i:], *m.Operator) - } - if len(m.Values) > 0 { - for _, s := range m.Values { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *ListMeta) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListMeta) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.SelfLink != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SelfLink))) - i += copy(dAtA[i:], *m.SelfLink) - } - if m.ResourceVersion != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceVersion))) - i += copy(dAtA[i:], *m.ResourceVersion) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *RootPaths) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RootPaths) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Paths) > 0 { - for _, s := range m.Paths { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *ServerAddressByClientCIDR) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServerAddressByClientCIDR) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ClientCIDR != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ClientCIDR))) - i += copy(dAtA[i:], *m.ClientCIDR) - } - if m.ServerAddress != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ServerAddress))) - i += copy(dAtA[i:], *m.ServerAddress) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *Status) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Status) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Metadata != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n2, err := m.Metadata.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } - if m.Status != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Status))) - i += copy(dAtA[i:], *m.Status) - } - if m.Message != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Message))) - i += copy(dAtA[i:], *m.Message) - } - if m.Reason != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason))) - i += copy(dAtA[i:], *m.Reason) - } - if m.Details != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Details.Size())) - n3, err := m.Details.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - } - if m.Code != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Code)) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *StatusCause) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatusCause) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Reason != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason))) - i += copy(dAtA[i:], *m.Reason) - } - if m.Message != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Message))) - i += copy(dAtA[i:], *m.Message) - } - if m.Field != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Field))) - i += copy(dAtA[i:], *m.Field) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *StatusDetails) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatusDetails) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Name != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name))) - i += copy(dAtA[i:], *m.Name) - } - if m.Group != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Group))) - i += copy(dAtA[i:], *m.Group) - } - if m.Kind != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Kind))) - i += copy(dAtA[i:], *m.Kind) - } - if len(m.Causes) > 0 { - for _, msg := range m.Causes { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.RetryAfterSeconds != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RetryAfterSeconds)) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *Time) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Time) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Seconds != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Seconds)) - } - if m.Nanos != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Nanos)) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *Timestamp) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Timestamp) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Seconds != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Seconds)) - } - if m.Nanos != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Nanos)) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *TypeMeta) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TypeMeta) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Kind != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Kind))) - i += copy(dAtA[i:], *m.Kind) - } - if m.ApiVersion != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ApiVersion))) - i += copy(dAtA[i:], *m.ApiVersion) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *APIGroup) Size() (n int) { - var l int - _ = l - if m.Name != nil { - l = len(*m.Name) - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Versions) > 0 { - for _, e := range m.Versions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.PreferredVersion != nil { - l = m.PreferredVersion.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.ServerAddressByClientCIDRs) > 0 { - for _, e := range m.ServerAddressByClientCIDRs { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *APIGroupList) Size() (n int) { - var l int - _ = l - if len(m.Groups) > 0 { - for _, e := range m.Groups { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *APIResource) Size() (n int) { - var l int - _ = l - if m.Name != nil { - l = len(*m.Name) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Namespaced != nil { - n += 2 - } - if m.Kind != nil { - l = len(*m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *APIResourceList) Size() (n int) { - var l int - _ = l - if m.GroupVersion != nil { - l = len(*m.GroupVersion) - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Resources) > 0 { - for _, e := range m.Resources { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *APIVersions) Size() (n int) { - var l int - _ = l - if len(m.Versions) > 0 { - for _, s := range m.Versions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.ServerAddressByClientCIDRs) > 0 { - for _, e := range m.ServerAddressByClientCIDRs { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Duration) Size() (n int) { - var l int - _ = l - if m.Duration != nil { - n += 1 + sovGenerated(uint64(*m.Duration)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ExportOptions) Size() (n int) { - var l int - _ = l - if m.Export != nil { - n += 2 - } - if m.Exact != nil { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *GroupKind) Size() (n int) { - var l int - _ = l - if m.Group != nil { - l = len(*m.Group) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Kind != nil { - l = len(*m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *GroupResource) Size() (n int) { - var l int - _ = l - if m.Group != nil { - l = len(*m.Group) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Resource != nil { - l = len(*m.Resource) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *GroupVersion) Size() (n int) { - var l int - _ = l - if m.Group != nil { - l = len(*m.Group) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Version != nil { - l = len(*m.Version) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *GroupVersionForDiscovery) Size() (n int) { - var l int - _ = l - if m.GroupVersion != nil { - l = len(*m.GroupVersion) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Version != nil { - l = len(*m.Version) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *GroupVersionKind) Size() (n int) { - var l int - _ = l - if m.Group != nil { - l = len(*m.Group) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Version != nil { - l = len(*m.Version) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Kind != nil { - l = len(*m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *GroupVersionResource) Size() (n int) { - var l int - _ = l - if m.Group != nil { - l = len(*m.Group) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Version != nil { - l = len(*m.Version) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Resource != nil { - l = len(*m.Resource) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *LabelSelector) Size() (n int) { - var l int - _ = l - if len(m.MatchLabels) > 0 { - for k, v := range m.MatchLabels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.MatchExpressions) > 0 { - for _, e := range m.MatchExpressions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *LabelSelectorRequirement) Size() (n int) { - var l int - _ = l - if m.Key != nil { - l = len(*m.Key) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Operator != nil { - l = len(*m.Operator) - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Values) > 0 { - for _, s := range m.Values { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListMeta) Size() (n int) { - var l int - _ = l - if m.SelfLink != nil { - l = len(*m.SelfLink) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ResourceVersion != nil { - l = len(*m.ResourceVersion) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *RootPaths) Size() (n int) { - var l int - _ = l - if len(m.Paths) > 0 { - for _, s := range m.Paths { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ServerAddressByClientCIDR) Size() (n int) { - var l int - _ = l - if m.ClientCIDR != nil { - l = len(*m.ClientCIDR) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ServerAddress != nil { - l = len(*m.ServerAddress) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Status) Size() (n int) { - var l int - _ = l - if m.Metadata != nil { - l = m.Metadata.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Status != nil { - l = len(*m.Status) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Message != nil { - l = len(*m.Message) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Reason != nil { - l = len(*m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Details != nil { - l = m.Details.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Code != nil { - n += 1 + sovGenerated(uint64(*m.Code)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StatusCause) Size() (n int) { - var l int - _ = l - if m.Reason != nil { - l = len(*m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Message != nil { - l = len(*m.Message) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Field != nil { - l = len(*m.Field) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StatusDetails) Size() (n int) { - var l int - _ = l - if m.Name != nil { - l = len(*m.Name) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Group != nil { - l = len(*m.Group) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Kind != nil { - l = len(*m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Causes) > 0 { - for _, e := range m.Causes { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.RetryAfterSeconds != nil { - n += 1 + sovGenerated(uint64(*m.RetryAfterSeconds)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Time) Size() (n int) { - var l int - _ = l - if m.Seconds != nil { - n += 1 + sovGenerated(uint64(*m.Seconds)) - } - if m.Nanos != nil { - n += 1 + sovGenerated(uint64(*m.Nanos)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Timestamp) Size() (n int) { - var l int - _ = l - if m.Seconds != nil { - n += 1 + sovGenerated(uint64(*m.Seconds)) - } - if m.Nanos != nil { - n += 1 + sovGenerated(uint64(*m.Nanos)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *TypeMeta) Size() (n int) { - var l int - _ = l - if m.Kind != nil { - l = len(*m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ApiVersion != nil { - l = len(*m.ApiVersion) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *APIGroup) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: APIGroup: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: APIGroup: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Name = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Versions = append(m.Versions, &GroupVersionForDiscovery{}) - if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreferredVersion", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PreferredVersion == nil { - m.PreferredVersion = &GroupVersionForDiscovery{} - } - if err := m.PreferredVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServerAddressByClientCIDRs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServerAddressByClientCIDRs = append(m.ServerAddressByClientCIDRs, &ServerAddressByClientCIDR{}) - if err := m.ServerAddressByClientCIDRs[len(m.ServerAddressByClientCIDRs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *APIGroupList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: APIGroupList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: APIGroupList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Groups = append(m.Groups, &APIGroup{}) - if err := m.Groups[len(m.Groups)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *APIResource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: APIResource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: APIResource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Name = &s - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespaced", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Namespaced = &b - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Kind = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *APIResourceList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: APIResourceList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: APIResourceList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GroupVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.GroupVersion = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resources = append(m.Resources, &APIResource{}) - if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *APIVersions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: APIVersions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: APIVersions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Versions = append(m.Versions, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServerAddressByClientCIDRs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServerAddressByClientCIDRs = append(m.ServerAddressByClientCIDRs, &ServerAddressByClientCIDR{}) - if err := m.ServerAddressByClientCIDRs[len(m.ServerAddressByClientCIDRs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Duration) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Duration: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Duration: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Duration = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExportOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExportOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExportOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Export", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Export = &b - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Exact", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Exact = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GroupKind) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GroupKind: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GroupKind: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Group = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Kind = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GroupResource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GroupResource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GroupResource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Group = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Resource = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GroupVersion) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GroupVersion: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GroupVersion: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Group = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Version = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GroupVersionForDiscovery: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GroupVersionForDiscovery: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GroupVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.GroupVersion = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Version = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GroupVersionKind: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GroupVersionKind: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Group = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Version = &s - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Kind = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GroupVersionResource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GroupVersionResource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Group = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Version = &s - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Resource = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelSelector) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelSelector: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelSelector: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchLabels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.MatchLabels == nil { - m.MatchLabels = make(map[string]string) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.MatchLabels[mapkey] = mapvalue - } else { - var mapvalue string - m.MatchLabels[mapkey] = mapvalue - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MatchExpressions = append(m.MatchExpressions, &LabelSelectorRequirement{}) - if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelSelectorRequirement: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Key = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Operator = &s - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListMeta) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListMeta: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListMeta: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SelfLink", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.SelfLink = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.ResourceVersion = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RootPaths) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RootPaths: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RootPaths: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Paths = append(m.Paths, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServerAddressByClientCIDR: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServerAddressByClientCIDR: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientCIDR", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.ClientCIDR = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServerAddress", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.ServerAddress = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Status) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Status: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Status: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Metadata == nil { - m.Metadata = &ListMeta{} - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Status = &s - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Message = &s - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Reason = &s - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Details", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Details == nil { - m.Details = &StatusDetails{} - } - if err := m.Details.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Code = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatusCause) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatusCause: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatusCause: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Reason = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Message = &s - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Field", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Field = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatusDetails) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatusDetails: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatusDetails: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Name = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Group = &s - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Kind = &s - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Causes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Causes = append(m.Causes, &StatusCause{}) - if err := m.Causes[len(m.Causes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RetryAfterSeconds", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.RetryAfterSeconds = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Time) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Time: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Time: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Seconds = &v - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Nanos = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Timestamp) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Timestamp: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Seconds = &v - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Nanos = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TypeMeta) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TypeMeta: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TypeMeta: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Kind = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ApiVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.ApiVersion = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("github.com/ericchiang/k8s/api/unversioned/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 999 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xbc, 0x56, 0xdd, 0x6e, 0xe3, 0x44, - 0x14, 0xc6, 0x4e, 0xd3, 0x75, 0x4e, 0x1b, 0x6d, 0xb1, 0x2a, 0x64, 0x22, 0x11, 0x05, 0x0b, 0x50, - 0x2e, 0x20, 0x11, 0x15, 0x8b, 0x56, 0x20, 0x0a, 0xd9, 0xb6, 0xac, 0xca, 0x76, 0x21, 0x9a, 0x2e, - 0x05, 0xc1, 0x0d, 0x53, 0xfb, 0x34, 0x6b, 0x25, 0xb1, 0xcd, 0xcc, 0xb8, 0x6a, 0xee, 0x90, 0xb8, - 0xe0, 0x15, 0x78, 0x01, 0xee, 0x78, 0x0f, 0xb8, 0xe4, 0x11, 0x50, 0xb9, 0xe4, 0x25, 0xd0, 0x8c, - 0x67, 0xd2, 0x71, 0x9a, 0xec, 0x66, 0x11, 0xe2, 0xaa, 0xf3, 0x9d, 0xce, 0xf9, 0xce, 0x99, 0xef, - 0xfc, 0x38, 0x70, 0x6f, 0x7c, 0x9f, 0xf7, 0x92, 0xac, 0x3f, 0x2e, 0xce, 0x91, 0xa5, 0x28, 0x90, - 0xf7, 0xf3, 0xf1, 0xa8, 0x4f, 0xf3, 0xa4, 0x5f, 0xa4, 0x97, 0xc8, 0x78, 0x92, 0xa5, 0x18, 0xf7, - 0x47, 0x98, 0x22, 0xa3, 0x02, 0xe3, 0x5e, 0xce, 0x32, 0x91, 0xf9, 0x6f, 0x96, 0x6e, 0xbd, 0x1b, - 0xb7, 0x5e, 0x3e, 0x1e, 0xf5, 0x68, 0x9e, 0xf4, 0x2c, 0xb7, 0xd6, 0x3b, 0xcb, 0xd9, 0x59, 0x91, - 0x8a, 0x64, 0x8a, 0x8b, 0xac, 0xad, 0x77, 0x97, 0x5f, 0x2f, 0x44, 0x32, 0xe9, 0x27, 0xa9, 0xe0, - 0x82, 0x2d, 0xba, 0x84, 0x7f, 0xbb, 0xe0, 0x0d, 0x86, 0xc7, 0x0f, 0x59, 0x56, 0xe4, 0xbe, 0x0f, - 0x1b, 0x29, 0x9d, 0x62, 0xe0, 0x74, 0x9c, 0x6e, 0x83, 0xa8, 0xb3, 0xff, 0x2d, 0x78, 0x3a, 0x1f, - 0x1e, 0xb8, 0x9d, 0x5a, 0x77, 0x6b, 0xef, 0xe3, 0xde, 0x5a, 0xc9, 0xf7, 0x14, 0xe7, 0x59, 0x09, - 0x3f, 0xcd, 0xd8, 0x61, 0xc2, 0xa3, 0xec, 0x12, 0xd9, 0x8c, 0xcc, 0x09, 0xfd, 0x31, 0xec, 0xe4, - 0x0c, 0x2f, 0x90, 0x31, 0x8c, 0xf5, 0xcd, 0xa0, 0xd6, 0x71, 0xfe, 0x8b, 0x20, 0xb7, 0x88, 0xfd, - 0x1f, 0x1c, 0x68, 0x71, 0x64, 0x97, 0xc8, 0x06, 0x71, 0xcc, 0x90, 0xf3, 0x07, 0xb3, 0x83, 0x49, - 0x82, 0xa9, 0x38, 0x38, 0x3e, 0x24, 0x3c, 0xd8, 0x50, 0x8f, 0xfb, 0x64, 0xcd, 0xb8, 0xa7, 0xab, - 0x88, 0xc8, 0x33, 0x62, 0x84, 0x5f, 0xc1, 0xb6, 0x11, 0xfb, 0x24, 0xe1, 0xc2, 0x7f, 0x08, 0x9b, - 0x23, 0x09, 0x78, 0xe0, 0xa8, 0xe8, 0xfd, 0x35, 0xa3, 0x1b, 0x12, 0xa2, 0xdd, 0xc3, 0x2f, 0x61, - 0x6b, 0x30, 0x3c, 0x26, 0xc8, 0xb3, 0x82, 0x45, 0xb8, 0xb4, 0x90, 0x6d, 0x00, 0xf9, 0x97, 0xe7, - 0x34, 0xc2, 0x38, 0x70, 0x3b, 0x4e, 0xd7, 0x23, 0x96, 0x45, 0xfa, 0x8c, 0x93, 0x34, 0x56, 0xfa, - 0x37, 0x88, 0x3a, 0x87, 0x3f, 0x39, 0x70, 0xd7, 0xe2, 0x55, 0x39, 0x87, 0xb0, 0x3d, 0xb2, 0x44, - 0xd7, 0x31, 0x2a, 0x36, 0x7f, 0x08, 0x0d, 0xa6, 0x7d, 0x4c, 0xd7, 0xec, 0xad, 0xff, 0x34, 0x13, - 0x8e, 0xdc, 0x90, 0x84, 0xbf, 0x3a, 0xea, 0x85, 0x67, 0xa6, 0x73, 0x5a, 0x56, 0x5b, 0x4a, 0xed, - 0x1a, 0x56, 0x57, 0x3d, 0xa7, 0xd0, 0xee, 0xff, 0x50, 0xe8, 0xb7, 0xc0, 0x3b, 0x2c, 0x18, 0x15, - 0x52, 0x8c, 0x16, 0x78, 0xb1, 0x3e, 0x2b, 0xb1, 0x6a, 0x64, 0x8e, 0xc3, 0x8f, 0xa0, 0x79, 0x74, - 0x95, 0x67, 0x4c, 0x7c, 0x91, 0x0b, 0x95, 0xfb, 0x2b, 0xb0, 0x89, 0xca, 0xa0, 0xae, 0x7a, 0x44, - 0x23, 0x7f, 0x17, 0xea, 0x78, 0x45, 0x23, 0xa1, 0x0b, 0x57, 0x82, 0xf0, 0x1e, 0x34, 0x54, 0x1f, - 0x3c, 0x4a, 0xd2, 0x58, 0x5e, 0x51, 0x45, 0xd0, 0x15, 0x29, 0xc1, 0xbc, 0xac, 0xae, 0x55, 0xd6, - 0x01, 0x34, 0xcb, 0xf6, 0x31, 0xfd, 0xb2, 0xdc, 0xb5, 0x05, 0x9e, 0x29, 0x80, 0x76, 0x9f, 0xe3, - 0x70, 0x1f, 0xb6, 0xed, 0xd1, 0x5b, 0xc1, 0x10, 0xc0, 0x1d, 0xad, 0xa4, 0x26, 0x30, 0x30, 0xfc, - 0x1a, 0x82, 0x55, 0xa3, 0xbb, 0x56, 0x87, 0xad, 0x66, 0x3e, 0x83, 0x1d, 0x9b, 0xf9, 0x19, 0xd2, - 0xac, 0xe4, 0x58, 0x3a, 0x0b, 0xe7, 0xb0, 0x6b, 0xf3, 0x3e, 0x47, 0xbb, 0xd5, 0xdc, 0xb6, 0xaa, - 0xb5, 0x05, 0x55, 0x7f, 0x71, 0xa1, 0x79, 0x42, 0xcf, 0x71, 0x72, 0x8a, 0x13, 0x8c, 0x44, 0xc6, - 0xfc, 0x11, 0x6c, 0x4d, 0xa9, 0x88, 0x9e, 0x2a, 0xab, 0x59, 0x13, 0x47, 0x6b, 0xf6, 0x6e, 0x85, - 0xaa, 0xf7, 0xf8, 0x86, 0xe7, 0x28, 0x15, 0x6c, 0x46, 0x6c, 0x66, 0xb9, 0x8a, 0x15, 0x3c, 0xba, - 0xca, 0x65, 0x3b, 0xff, 0x8b, 0x7d, 0x5f, 0x89, 0x46, 0xf0, 0xfb, 0x22, 0x61, 0x38, 0xc5, 0x54, - 0x90, 0x5b, 0xc4, 0xad, 0x7d, 0xd8, 0x59, 0xcc, 0xc6, 0xdf, 0x81, 0xda, 0x18, 0x67, 0x5a, 0x45, - 0x79, 0x94, 0xca, 0x5e, 0xd2, 0x49, 0x61, 0x9a, 0xaf, 0x04, 0x1f, 0xb8, 0xf7, 0x9d, 0xf0, 0x3b, - 0x08, 0x56, 0x45, 0x5b, 0xc2, 0xd3, 0x02, 0x2f, 0xcb, 0xe5, 0x57, 0x2f, 0x63, 0xa6, 0x8f, 0x0d, - 0x96, 0xf3, 0xa6, 0x68, 0x79, 0x50, 0x53, 0x5b, 0x44, 0xa3, 0x70, 0x08, 0x9e, 0xdc, 0x76, 0x8f, - 0x51, 0x50, 0xe9, 0xcf, 0x71, 0x72, 0x71, 0x92, 0xa4, 0x63, 0x4d, 0x3b, 0xc7, 0x7e, 0x17, 0xee, - 0x9a, 0xea, 0x9d, 0x55, 0xea, 0xbd, 0x68, 0x0e, 0x5f, 0x87, 0x06, 0xc9, 0x32, 0x31, 0xa4, 0xe2, - 0x29, 0x97, 0x4f, 0xcb, 0xe5, 0x41, 0xef, 0xae, 0x12, 0x84, 0x14, 0x5e, 0x5d, 0xb9, 0x6e, 0xe4, - 0xfe, 0x8e, 0xe6, 0x48, 0xe7, 0x61, 0x59, 0xfc, 0x37, 0xa0, 0x59, 0x59, 0x48, 0x3a, 0x8f, 0xaa, - 0x31, 0xfc, 0xd1, 0x85, 0xcd, 0x53, 0x41, 0x45, 0xc1, 0xfd, 0x47, 0xe0, 0x4d, 0x51, 0xd0, 0x98, - 0x0a, 0xaa, 0xe8, 0xd6, 0xff, 0xfc, 0x18, 0x65, 0xc8, 0x9c, 0x40, 0xea, 0xc8, 0x15, 0xad, 0x0e, - 0xab, 0x91, 0x9c, 0x83, 0x29, 0x72, 0x4e, 0x47, 0xa6, 0xd9, 0x0d, 0x94, 0x1e, 0x0c, 0x29, 0xcf, - 0xd2, 0x60, 0xa3, 0xf4, 0x28, 0x91, 0xff, 0x39, 0xdc, 0x89, 0x51, 0xd0, 0x64, 0xc2, 0x83, 0xba, - 0xca, 0xea, 0xbd, 0x75, 0x37, 0xb5, 0x8a, 0x78, 0x58, 0xfa, 0x12, 0x43, 0x22, 0x67, 0x39, 0xca, - 0x62, 0x0c, 0x36, 0x3b, 0x4e, 0xb7, 0x4e, 0xd4, 0x59, 0x7e, 0x2e, 0xcb, 0xdb, 0x07, 0xb4, 0xe0, - 0x76, 0x2a, 0x4e, 0x25, 0x15, 0x2b, 0x79, 0xb7, 0x9a, 0xfc, 0x2e, 0xd4, 0x2f, 0x12, 0x9c, 0x98, - 0x0d, 0x51, 0x82, 0xf0, 0x37, 0x07, 0x9a, 0x95, 0x2c, 0x96, 0x7e, 0x88, 0xe7, 0x0b, 0xc3, 0x5d, - 0xb6, 0xa7, 0xad, 0x95, 0xe3, 0x7f, 0x06, 0x9b, 0x91, 0x4c, 0xd0, 0xfc, 0x38, 0xd9, 0x7b, 0x21, - 0x25, 0xd4, 0xdb, 0x88, 0x66, 0xf0, 0xdf, 0x86, 0x97, 0x19, 0x0a, 0x36, 0x1b, 0x5c, 0x08, 0x64, - 0xa7, 0x18, 0x65, 0x69, 0x5c, 0x0a, 0x5c, 0x27, 0xb7, 0xff, 0x11, 0xbe, 0x0f, 0x1b, 0x4f, 0x92, - 0x29, 0x4a, 0x05, 0xb8, 0xbe, 0x5b, 0x7e, 0xba, 0x0c, 0x94, 0xaf, 0x48, 0x69, 0x9a, 0x95, 0xf5, - 0xae, 0x93, 0x12, 0x84, 0x1f, 0x42, 0x43, 0xfa, 0x71, 0x41, 0xa7, 0xf9, 0x0b, 0x3b, 0xef, 0x83, - 0xf7, 0x64, 0x96, 0xa3, 0x9a, 0x39, 0x23, 0x87, 0x63, 0xc9, 0xd1, 0x06, 0xa0, 0x79, 0x52, 0x1d, - 0x33, 0xcb, 0xf2, 0xe0, 0xb5, 0xdf, 0xaf, 0xdb, 0xce, 0x1f, 0xd7, 0x6d, 0xe7, 0xcf, 0xeb, 0xb6, - 0xf3, 0xf3, 0x5f, 0xed, 0x97, 0xbe, 0xd9, 0xb2, 0x54, 0xf9, 0x27, 0x00, 0x00, 0xff, 0xff, 0x78, - 0xc1, 0x23, 0x54, 0xab, 0x0b, 0x00, 0x00, -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/api/unversioned/time.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/api/unversioned/time.go deleted file mode 100644 index ed7d7162..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/api/unversioned/time.go +++ /dev/null @@ -1,32 +0,0 @@ -package unversioned - -import ( - "encoding/json" - "time" -) - -// JSON marshaling logic for the Time type. Need to make -// third party resources JSON work. - -func (t Time) MarshalJSON() ([]byte, error) { - var seconds, nanos int64 - if t.Seconds != nil { - seconds = *t.Seconds - } - if t.Nanos != nil { - nanos = int64(*t.Nanos) - } - return json.Marshal(time.Unix(seconds, nanos)) -} - -func (t *Time) UnmarshalJSON(p []byte) error { - var t1 time.Time - if err := json.Unmarshal(p, &t1); err != nil { - return err - } - seconds := t1.Unix() - nanos := int32(t1.UnixNano()) - t.Seconds = &seconds - t.Nanos = &nanos - return nil -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/apiextensions/v1beta1/generated.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/apiextensions/v1beta1/generated.pb.go new file mode 100644 index 00000000..72bd093f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/apiextensions/v1beta1/generated.pb.go @@ -0,0 +1,5394 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto + + It has these top-level messages: + CustomResourceDefinition + CustomResourceDefinitionCondition + CustomResourceDefinitionList + CustomResourceDefinitionNames + CustomResourceDefinitionSpec + CustomResourceDefinitionStatus + CustomResourceValidation + ExternalDocumentation + JSON + JSONSchemaProps + JSONSchemaPropsOrArray + JSONSchemaPropsOrBool + JSONSchemaPropsOrStringArray +*/ +package v1beta1 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import k8s_io_apimachinery_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" +import _ "github.com/ericchiang/k8s/runtime" +import _ "github.com/ericchiang/k8s/runtime/schema" +import _ "github.com/ericchiang/k8s/util/intstr" + +import encoding_binary "encoding/binary" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format +// <.spec.name>.<.spec.group>. +type CustomResourceDefinition struct { + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + // Spec describes how the user wants the resources to appear + Spec *CustomResourceDefinitionSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` + // Status indicates the actual state of the CustomResourceDefinition + Status *CustomResourceDefinitionStatus `protobuf:"bytes,3,opt,name=status" json:"status,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CustomResourceDefinition) Reset() { *m = CustomResourceDefinition{} } +func (m *CustomResourceDefinition) String() string { return proto.CompactTextString(m) } +func (*CustomResourceDefinition) ProtoMessage() {} +func (*CustomResourceDefinition) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{0} +} + +func (m *CustomResourceDefinition) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *CustomResourceDefinition) GetSpec() *CustomResourceDefinitionSpec { + if m != nil { + return m.Spec + } + return nil +} + +func (m *CustomResourceDefinition) GetStatus() *CustomResourceDefinitionStatus { + if m != nil { + return m.Status + } + return nil +} + +// CustomResourceDefinitionCondition contains details for the current condition of this pod. +type CustomResourceDefinitionCondition struct { + // Type is the type of the condition. + Type *string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + // Status is the status of the condition. + // Can be True, False, Unknown. + Status *string `protobuf:"bytes,2,opt,name=status" json:"status,omitempty"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime *k8s_io_apimachinery_pkg_apis_meta_v1.Time `protobuf:"bytes,3,opt,name=lastTransitionTime" json:"lastTransitionTime,omitempty"` + // Unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason *string `protobuf:"bytes,4,opt,name=reason" json:"reason,omitempty"` + // Human-readable message indicating details about last transition. + // +optional + Message *string `protobuf:"bytes,5,opt,name=message" json:"message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CustomResourceDefinitionCondition) Reset() { *m = CustomResourceDefinitionCondition{} } +func (m *CustomResourceDefinitionCondition) String() string { return proto.CompactTextString(m) } +func (*CustomResourceDefinitionCondition) ProtoMessage() {} +func (*CustomResourceDefinitionCondition) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{1} +} + +func (m *CustomResourceDefinitionCondition) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +func (m *CustomResourceDefinitionCondition) GetStatus() string { + if m != nil && m.Status != nil { + return *m.Status + } + return "" +} + +func (m *CustomResourceDefinitionCondition) GetLastTransitionTime() *k8s_io_apimachinery_pkg_apis_meta_v1.Time { + if m != nil { + return m.LastTransitionTime + } + return nil +} + +func (m *CustomResourceDefinitionCondition) GetReason() string { + if m != nil && m.Reason != nil { + return *m.Reason + } + return "" +} + +func (m *CustomResourceDefinitionCondition) GetMessage() string { + if m != nil && m.Message != nil { + return *m.Message + } + return "" +} + +// CustomResourceDefinitionList is a list of CustomResourceDefinition objects. +type CustomResourceDefinitionList struct { + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + // Items individual CustomResourceDefinitions + Items []*CustomResourceDefinition `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CustomResourceDefinitionList) Reset() { *m = CustomResourceDefinitionList{} } +func (m *CustomResourceDefinitionList) String() string { return proto.CompactTextString(m) } +func (*CustomResourceDefinitionList) ProtoMessage() {} +func (*CustomResourceDefinitionList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{2} +} + +func (m *CustomResourceDefinitionList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *CustomResourceDefinitionList) GetItems() []*CustomResourceDefinition { + if m != nil { + return m.Items + } + return nil +} + +// CustomResourceDefinitionNames indicates the names to serve this CustomResourceDefinition +type CustomResourceDefinitionNames struct { + // Plural is the plural name of the resource to serve. It must match the name of the CustomResourceDefinition-registration + // too: plural.group and it must be all lowercase. + Plural *string `protobuf:"bytes,1,opt,name=plural" json:"plural,omitempty"` + // Singular is the singular name of the resource. It must be all lowercase Defaults to lowercased + Singular *string `protobuf:"bytes,2,opt,name=singular" json:"singular,omitempty"` + // ShortNames are short names for the resource. It must be all lowercase. + ShortNames []string `protobuf:"bytes,3,rep,name=shortNames" json:"shortNames,omitempty"` + // Kind is the serialized kind of the resource. It is normally CamelCase and singular. + Kind *string `protobuf:"bytes,4,opt,name=kind" json:"kind,omitempty"` + // ListKind is the serialized kind of the list for this resource. Defaults to List. + ListKind *string `protobuf:"bytes,5,opt,name=listKind" json:"listKind,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CustomResourceDefinitionNames) Reset() { *m = CustomResourceDefinitionNames{} } +func (m *CustomResourceDefinitionNames) String() string { return proto.CompactTextString(m) } +func (*CustomResourceDefinitionNames) ProtoMessage() {} +func (*CustomResourceDefinitionNames) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{3} +} + +func (m *CustomResourceDefinitionNames) GetPlural() string { + if m != nil && m.Plural != nil { + return *m.Plural + } + return "" +} + +func (m *CustomResourceDefinitionNames) GetSingular() string { + if m != nil && m.Singular != nil { + return *m.Singular + } + return "" +} + +func (m *CustomResourceDefinitionNames) GetShortNames() []string { + if m != nil { + return m.ShortNames + } + return nil +} + +func (m *CustomResourceDefinitionNames) GetKind() string { + if m != nil && m.Kind != nil { + return *m.Kind + } + return "" +} + +func (m *CustomResourceDefinitionNames) GetListKind() string { + if m != nil && m.ListKind != nil { + return *m.ListKind + } + return "" +} + +// CustomResourceDefinitionSpec describes how a user wants their resource to appear +type CustomResourceDefinitionSpec struct { + // Group is the group this resource belongs in + Group *string `protobuf:"bytes,1,opt,name=group" json:"group,omitempty"` + // Version is the version this resource belongs in + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + // Names are the names used to describe this custom resource + Names *CustomResourceDefinitionNames `protobuf:"bytes,3,opt,name=names" json:"names,omitempty"` + // Scope indicates whether this resource is cluster or namespace scoped. Default is namespaced + Scope *string `protobuf:"bytes,4,opt,name=scope" json:"scope,omitempty"` + // Validation describes the validation methods for CustomResources + // +optional + Validation *CustomResourceValidation `protobuf:"bytes,5,opt,name=validation" json:"validation,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CustomResourceDefinitionSpec) Reset() { *m = CustomResourceDefinitionSpec{} } +func (m *CustomResourceDefinitionSpec) String() string { return proto.CompactTextString(m) } +func (*CustomResourceDefinitionSpec) ProtoMessage() {} +func (*CustomResourceDefinitionSpec) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{4} +} + +func (m *CustomResourceDefinitionSpec) GetGroup() string { + if m != nil && m.Group != nil { + return *m.Group + } + return "" +} + +func (m *CustomResourceDefinitionSpec) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +func (m *CustomResourceDefinitionSpec) GetNames() *CustomResourceDefinitionNames { + if m != nil { + return m.Names + } + return nil +} + +func (m *CustomResourceDefinitionSpec) GetScope() string { + if m != nil && m.Scope != nil { + return *m.Scope + } + return "" +} + +func (m *CustomResourceDefinitionSpec) GetValidation() *CustomResourceValidation { + if m != nil { + return m.Validation + } + return nil +} + +// CustomResourceDefinitionStatus indicates the state of the CustomResourceDefinition +type CustomResourceDefinitionStatus struct { + // Conditions indicate state for particular aspects of a CustomResourceDefinition + Conditions []*CustomResourceDefinitionCondition `protobuf:"bytes,1,rep,name=conditions" json:"conditions,omitempty"` + // AcceptedNames are the names that are actually being used to serve discovery + // They may be different than the names in spec. + AcceptedNames *CustomResourceDefinitionNames `protobuf:"bytes,2,opt,name=acceptedNames" json:"acceptedNames,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CustomResourceDefinitionStatus) Reset() { *m = CustomResourceDefinitionStatus{} } +func (m *CustomResourceDefinitionStatus) String() string { return proto.CompactTextString(m) } +func (*CustomResourceDefinitionStatus) ProtoMessage() {} +func (*CustomResourceDefinitionStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{5} +} + +func (m *CustomResourceDefinitionStatus) GetConditions() []*CustomResourceDefinitionCondition { + if m != nil { + return m.Conditions + } + return nil +} + +func (m *CustomResourceDefinitionStatus) GetAcceptedNames() *CustomResourceDefinitionNames { + if m != nil { + return m.AcceptedNames + } + return nil +} + +// CustomResourceValidation is a list of validation methods for CustomResources. +type CustomResourceValidation struct { + // OpenAPIV3Schema is the OpenAPI v3 schema to be validated against. + OpenAPIV3Schema *JSONSchemaProps `protobuf:"bytes,1,opt,name=openAPIV3Schema" json:"openAPIV3Schema,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CustomResourceValidation) Reset() { *m = CustomResourceValidation{} } +func (m *CustomResourceValidation) String() string { return proto.CompactTextString(m) } +func (*CustomResourceValidation) ProtoMessage() {} +func (*CustomResourceValidation) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{6} +} + +func (m *CustomResourceValidation) GetOpenAPIV3Schema() *JSONSchemaProps { + if m != nil { + return m.OpenAPIV3Schema + } + return nil +} + +// ExternalDocumentation allows referencing an external resource for extended documentation. +type ExternalDocumentation struct { + Description *string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + Url *string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ExternalDocumentation) Reset() { *m = ExternalDocumentation{} } +func (m *ExternalDocumentation) String() string { return proto.CompactTextString(m) } +func (*ExternalDocumentation) ProtoMessage() {} +func (*ExternalDocumentation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *ExternalDocumentation) GetDescription() string { + if m != nil && m.Description != nil { + return *m.Description + } + return "" +} + +func (m *ExternalDocumentation) GetUrl() string { + if m != nil && m.Url != nil { + return *m.Url + } + return "" +} + +// JSON represents any valid JSON value. +// These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil. +type JSON struct { + Raw []byte `protobuf:"bytes,1,opt,name=raw" json:"raw,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *JSON) Reset() { *m = JSON{} } +func (m *JSON) String() string { return proto.CompactTextString(m) } +func (*JSON) ProtoMessage() {} +func (*JSON) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *JSON) GetRaw() []byte { + if m != nil { + return m.Raw + } + return nil +} + +// JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema.org/). +type JSONSchemaProps struct { + Id *string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Schema *string `protobuf:"bytes,2,opt,name=schema" json:"schema,omitempty"` + Ref *string `protobuf:"bytes,3,opt,name=ref" json:"ref,omitempty"` + Description *string `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"` + Type *string `protobuf:"bytes,5,opt,name=type" json:"type,omitempty"` + Format *string `protobuf:"bytes,6,opt,name=format" json:"format,omitempty"` + Title *string `protobuf:"bytes,7,opt,name=title" json:"title,omitempty"` + Default *JSON `protobuf:"bytes,8,opt,name=default" json:"default,omitempty"` + Maximum *float64 `protobuf:"fixed64,9,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum *bool `protobuf:"varint,10,opt,name=exclusiveMaximum" json:"exclusiveMaximum,omitempty"` + Minimum *float64 `protobuf:"fixed64,11,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum *bool `protobuf:"varint,12,opt,name=exclusiveMinimum" json:"exclusiveMinimum,omitempty"` + MaxLength *int64 `protobuf:"varint,13,opt,name=maxLength" json:"maxLength,omitempty"` + MinLength *int64 `protobuf:"varint,14,opt,name=minLength" json:"minLength,omitempty"` + Pattern *string `protobuf:"bytes,15,opt,name=pattern" json:"pattern,omitempty"` + MaxItems *int64 `protobuf:"varint,16,opt,name=maxItems" json:"maxItems,omitempty"` + MinItems *int64 `protobuf:"varint,17,opt,name=minItems" json:"minItems,omitempty"` + UniqueItems *bool `protobuf:"varint,18,opt,name=uniqueItems" json:"uniqueItems,omitempty"` + MultipleOf *float64 `protobuf:"fixed64,19,opt,name=multipleOf" json:"multipleOf,omitempty"` + Enum []*JSON `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` + MaxProperties *int64 `protobuf:"varint,21,opt,name=maxProperties" json:"maxProperties,omitempty"` + MinProperties *int64 `protobuf:"varint,22,opt,name=minProperties" json:"minProperties,omitempty"` + Required []string `protobuf:"bytes,23,rep,name=required" json:"required,omitempty"` + Items *JSONSchemaPropsOrArray `protobuf:"bytes,24,opt,name=items" json:"items,omitempty"` + AllOf []*JSONSchemaProps `protobuf:"bytes,25,rep,name=allOf" json:"allOf,omitempty"` + OneOf []*JSONSchemaProps `protobuf:"bytes,26,rep,name=oneOf" json:"oneOf,omitempty"` + AnyOf []*JSONSchemaProps `protobuf:"bytes,27,rep,name=anyOf" json:"anyOf,omitempty"` + Not *JSONSchemaProps `protobuf:"bytes,28,opt,name=not" json:"not,omitempty"` + Properties map[string]*JSONSchemaProps `protobuf:"bytes,29,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + AdditionalProperties *JSONSchemaPropsOrBool `protobuf:"bytes,30,opt,name=additionalProperties" json:"additionalProperties,omitempty"` + PatternProperties map[string]*JSONSchemaProps `protobuf:"bytes,31,rep,name=patternProperties" json:"patternProperties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Dependencies map[string]*JSONSchemaPropsOrStringArray `protobuf:"bytes,32,rep,name=dependencies" json:"dependencies,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + AdditionalItems *JSONSchemaPropsOrBool `protobuf:"bytes,33,opt,name=additionalItems" json:"additionalItems,omitempty"` + Definitions map[string]*JSONSchemaProps `protobuf:"bytes,34,rep,name=definitions" json:"definitions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + ExternalDocs *ExternalDocumentation `protobuf:"bytes,35,opt,name=externalDocs" json:"externalDocs,omitempty"` + Example *JSON `protobuf:"bytes,36,opt,name=example" json:"example,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *JSONSchemaProps) Reset() { *m = JSONSchemaProps{} } +func (m *JSONSchemaProps) String() string { return proto.CompactTextString(m) } +func (*JSONSchemaProps) ProtoMessage() {} +func (*JSONSchemaProps) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + +func (m *JSONSchemaProps) GetId() string { + if m != nil && m.Id != nil { + return *m.Id + } + return "" +} + +func (m *JSONSchemaProps) GetSchema() string { + if m != nil && m.Schema != nil { + return *m.Schema + } + return "" +} + +func (m *JSONSchemaProps) GetRef() string { + if m != nil && m.Ref != nil { + return *m.Ref + } + return "" +} + +func (m *JSONSchemaProps) GetDescription() string { + if m != nil && m.Description != nil { + return *m.Description + } + return "" +} + +func (m *JSONSchemaProps) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +func (m *JSONSchemaProps) GetFormat() string { + if m != nil && m.Format != nil { + return *m.Format + } + return "" +} + +func (m *JSONSchemaProps) GetTitle() string { + if m != nil && m.Title != nil { + return *m.Title + } + return "" +} + +func (m *JSONSchemaProps) GetDefault() *JSON { + if m != nil { + return m.Default + } + return nil +} + +func (m *JSONSchemaProps) GetMaximum() float64 { + if m != nil && m.Maximum != nil { + return *m.Maximum + } + return 0 +} + +func (m *JSONSchemaProps) GetExclusiveMaximum() bool { + if m != nil && m.ExclusiveMaximum != nil { + return *m.ExclusiveMaximum + } + return false +} + +func (m *JSONSchemaProps) GetMinimum() float64 { + if m != nil && m.Minimum != nil { + return *m.Minimum + } + return 0 +} + +func (m *JSONSchemaProps) GetExclusiveMinimum() bool { + if m != nil && m.ExclusiveMinimum != nil { + return *m.ExclusiveMinimum + } + return false +} + +func (m *JSONSchemaProps) GetMaxLength() int64 { + if m != nil && m.MaxLength != nil { + return *m.MaxLength + } + return 0 +} + +func (m *JSONSchemaProps) GetMinLength() int64 { + if m != nil && m.MinLength != nil { + return *m.MinLength + } + return 0 +} + +func (m *JSONSchemaProps) GetPattern() string { + if m != nil && m.Pattern != nil { + return *m.Pattern + } + return "" +} + +func (m *JSONSchemaProps) GetMaxItems() int64 { + if m != nil && m.MaxItems != nil { + return *m.MaxItems + } + return 0 +} + +func (m *JSONSchemaProps) GetMinItems() int64 { + if m != nil && m.MinItems != nil { + return *m.MinItems + } + return 0 +} + +func (m *JSONSchemaProps) GetUniqueItems() bool { + if m != nil && m.UniqueItems != nil { + return *m.UniqueItems + } + return false +} + +func (m *JSONSchemaProps) GetMultipleOf() float64 { + if m != nil && m.MultipleOf != nil { + return *m.MultipleOf + } + return 0 +} + +func (m *JSONSchemaProps) GetEnum() []*JSON { + if m != nil { + return m.Enum + } + return nil +} + +func (m *JSONSchemaProps) GetMaxProperties() int64 { + if m != nil && m.MaxProperties != nil { + return *m.MaxProperties + } + return 0 +} + +func (m *JSONSchemaProps) GetMinProperties() int64 { + if m != nil && m.MinProperties != nil { + return *m.MinProperties + } + return 0 +} + +func (m *JSONSchemaProps) GetRequired() []string { + if m != nil { + return m.Required + } + return nil +} + +func (m *JSONSchemaProps) GetItems() *JSONSchemaPropsOrArray { + if m != nil { + return m.Items + } + return nil +} + +func (m *JSONSchemaProps) GetAllOf() []*JSONSchemaProps { + if m != nil { + return m.AllOf + } + return nil +} + +func (m *JSONSchemaProps) GetOneOf() []*JSONSchemaProps { + if m != nil { + return m.OneOf + } + return nil +} + +func (m *JSONSchemaProps) GetAnyOf() []*JSONSchemaProps { + if m != nil { + return m.AnyOf + } + return nil +} + +func (m *JSONSchemaProps) GetNot() *JSONSchemaProps { + if m != nil { + return m.Not + } + return nil +} + +func (m *JSONSchemaProps) GetProperties() map[string]*JSONSchemaProps { + if m != nil { + return m.Properties + } + return nil +} + +func (m *JSONSchemaProps) GetAdditionalProperties() *JSONSchemaPropsOrBool { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +func (m *JSONSchemaProps) GetPatternProperties() map[string]*JSONSchemaProps { + if m != nil { + return m.PatternProperties + } + return nil +} + +func (m *JSONSchemaProps) GetDependencies() map[string]*JSONSchemaPropsOrStringArray { + if m != nil { + return m.Dependencies + } + return nil +} + +func (m *JSONSchemaProps) GetAdditionalItems() *JSONSchemaPropsOrBool { + if m != nil { + return m.AdditionalItems + } + return nil +} + +func (m *JSONSchemaProps) GetDefinitions() map[string]*JSONSchemaProps { + if m != nil { + return m.Definitions + } + return nil +} + +func (m *JSONSchemaProps) GetExternalDocs() *ExternalDocumentation { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *JSONSchemaProps) GetExample() *JSON { + if m != nil { + return m.Example + } + return nil +} + +// JSONSchemaPropsOrArray represents a value that can either be a JSONSchemaProps +// or an array of JSONSchemaProps. Mainly here for serialization purposes. +type JSONSchemaPropsOrArray struct { + Schema *JSONSchemaProps `protobuf:"bytes,1,opt,name=schema" json:"schema,omitempty"` + JSONSchemas []*JSONSchemaProps `protobuf:"bytes,2,rep,name=jSONSchemas" json:"jSONSchemas,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *JSONSchemaPropsOrArray) Reset() { *m = JSONSchemaPropsOrArray{} } +func (m *JSONSchemaPropsOrArray) String() string { return proto.CompactTextString(m) } +func (*JSONSchemaPropsOrArray) ProtoMessage() {} +func (*JSONSchemaPropsOrArray) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } + +func (m *JSONSchemaPropsOrArray) GetSchema() *JSONSchemaProps { + if m != nil { + return m.Schema + } + return nil +} + +func (m *JSONSchemaPropsOrArray) GetJSONSchemas() []*JSONSchemaProps { + if m != nil { + return m.JSONSchemas + } + return nil +} + +// JSONSchemaPropsOrBool represents JSONSchemaProps or a boolean value. +// Defaults to true for the boolean property. +type JSONSchemaPropsOrBool struct { + Allows *bool `protobuf:"varint,1,opt,name=allows" json:"allows,omitempty"` + Schema *JSONSchemaProps `protobuf:"bytes,2,opt,name=schema" json:"schema,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *JSONSchemaPropsOrBool) Reset() { *m = JSONSchemaPropsOrBool{} } +func (m *JSONSchemaPropsOrBool) String() string { return proto.CompactTextString(m) } +func (*JSONSchemaPropsOrBool) ProtoMessage() {} +func (*JSONSchemaPropsOrBool) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } + +func (m *JSONSchemaPropsOrBool) GetAllows() bool { + if m != nil && m.Allows != nil { + return *m.Allows + } + return false +} + +func (m *JSONSchemaPropsOrBool) GetSchema() *JSONSchemaProps { + if m != nil { + return m.Schema + } + return nil +} + +// JSONSchemaPropsOrStringArray represents a JSONSchemaProps or a string array. +type JSONSchemaPropsOrStringArray struct { + Schema *JSONSchemaProps `protobuf:"bytes,1,opt,name=schema" json:"schema,omitempty"` + Property []string `protobuf:"bytes,2,rep,name=property" json:"property,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *JSONSchemaPropsOrStringArray) Reset() { *m = JSONSchemaPropsOrStringArray{} } +func (m *JSONSchemaPropsOrStringArray) String() string { return proto.CompactTextString(m) } +func (*JSONSchemaPropsOrStringArray) ProtoMessage() {} +func (*JSONSchemaPropsOrStringArray) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{12} +} + +func (m *JSONSchemaPropsOrStringArray) GetSchema() *JSONSchemaProps { + if m != nil { + return m.Schema + } + return nil +} + +func (m *JSONSchemaPropsOrStringArray) GetProperty() []string { + if m != nil { + return m.Property + } + return nil +} + +func init() { + proto.RegisterType((*CustomResourceDefinition)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinition") + proto.RegisterType((*CustomResourceDefinitionCondition)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionCondition") + proto.RegisterType((*CustomResourceDefinitionList)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionList") + proto.RegisterType((*CustomResourceDefinitionNames)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionNames") + proto.RegisterType((*CustomResourceDefinitionSpec)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionSpec") + proto.RegisterType((*CustomResourceDefinitionStatus)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionStatus") + proto.RegisterType((*CustomResourceValidation)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceValidation") + proto.RegisterType((*ExternalDocumentation)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.ExternalDocumentation") + proto.RegisterType((*JSON)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSON") + proto.RegisterType((*JSONSchemaProps)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps") + proto.RegisterType((*JSONSchemaPropsOrArray)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrArray") + proto.RegisterType((*JSONSchemaPropsOrBool)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrBool") + proto.RegisterType((*JSONSchemaPropsOrStringArray)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrStringArray") +} +func (m *CustomResourceDefinition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Metadata != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) + n1, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.Spec != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.Status != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *CustomResourceDefinitionCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Type != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type))) + i += copy(dAtA[i:], *m.Type) + } + if m.Status != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Status))) + i += copy(dAtA[i:], *m.Status) + } + if m.LastTransitionTime != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n4, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + if m.Reason != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason))) + i += copy(dAtA[i:], *m.Reason) + } + if m.Message != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Message))) + i += copy(dAtA[i:], *m.Message) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *CustomResourceDefinitionList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Metadata != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) + n5, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *CustomResourceDefinitionNames) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionNames) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Plural != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Plural))) + i += copy(dAtA[i:], *m.Plural) + } + if m.Singular != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Singular))) + i += copy(dAtA[i:], *m.Singular) + } + if len(m.ShortNames) > 0 { + for _, s := range m.ShortNames { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.Kind != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Kind))) + i += copy(dAtA[i:], *m.Kind) + } + if m.ListKind != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ListKind))) + i += copy(dAtA[i:], *m.ListKind) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *CustomResourceDefinitionSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Group != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Group))) + i += copy(dAtA[i:], *m.Group) + } + if m.Version != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Version))) + i += copy(dAtA[i:], *m.Version) + } + if m.Names != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Names.Size())) + n6, err := m.Names.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if m.Scope != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Scope))) + i += copy(dAtA[i:], *m.Scope) + } + if m.Validation != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Validation.Size())) + n7, err := m.Validation.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *CustomResourceDefinitionStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.AcceptedNames != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AcceptedNames.Size())) + n8, err := m.AcceptedNames.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *CustomResourceValidation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceValidation) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.OpenAPIV3Schema != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.OpenAPIV3Schema.Size())) + n9, err := m.OpenAPIV3Schema.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ExternalDocumentation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExternalDocumentation) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Description != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Description))) + i += copy(dAtA[i:], *m.Description) + } + if m.Url != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Url))) + i += copy(dAtA[i:], *m.Url) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *JSON) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JSON) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Raw != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) + i += copy(dAtA[i:], m.Raw) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *JSONSchemaProps) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JSONSchemaProps) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Id != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Id))) + i += copy(dAtA[i:], *m.Id) + } + if m.Schema != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Schema))) + i += copy(dAtA[i:], *m.Schema) + } + if m.Ref != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Ref))) + i += copy(dAtA[i:], *m.Ref) + } + if m.Description != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Description))) + i += copy(dAtA[i:], *m.Description) + } + if m.Type != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type))) + i += copy(dAtA[i:], *m.Type) + } + if m.Format != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Format))) + i += copy(dAtA[i:], *m.Format) + } + if m.Title != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Title))) + i += copy(dAtA[i:], *m.Title) + } + if m.Default != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Default.Size())) + n10, err := m.Default.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + if m.Maximum != nil { + dAtA[i] = 0x49 + i++ + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Maximum)))) + i += 8 + } + if m.ExclusiveMaximum != nil { + dAtA[i] = 0x50 + i++ + if *m.ExclusiveMaximum { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Minimum != nil { + dAtA[i] = 0x59 + i++ + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Minimum)))) + i += 8 + } + if m.ExclusiveMinimum != nil { + dAtA[i] = 0x60 + i++ + if *m.ExclusiveMinimum { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.MaxLength != nil { + dAtA[i] = 0x68 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxLength)) + } + if m.MinLength != nil { + dAtA[i] = 0x70 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinLength)) + } + if m.Pattern != nil { + dAtA[i] = 0x7a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Pattern))) + i += copy(dAtA[i:], *m.Pattern) + } + if m.MaxItems != nil { + dAtA[i] = 0x80 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxItems)) + } + if m.MinItems != nil { + dAtA[i] = 0x88 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinItems)) + } + if m.UniqueItems != nil { + dAtA[i] = 0x90 + i++ + dAtA[i] = 0x1 + i++ + if *m.UniqueItems { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.MultipleOf != nil { + dAtA[i] = 0x99 + i++ + dAtA[i] = 0x1 + i++ + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.MultipleOf)))) + i += 8 + } + if len(m.Enum) > 0 { + for _, msg := range m.Enum { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.MaxProperties != nil { + dAtA[i] = 0xa8 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxProperties)) + } + if m.MinProperties != nil { + dAtA[i] = 0xb0 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinProperties)) + } + if len(m.Required) > 0 { + for _, s := range m.Required { + dAtA[i] = 0xba + i++ + dAtA[i] = 0x1 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.Items != nil { + dAtA[i] = 0xc2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Items.Size())) + n11, err := m.Items.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + if len(m.AllOf) > 0 { + for _, msg := range m.AllOf { + dAtA[i] = 0xca + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.OneOf) > 0 { + for _, msg := range m.OneOf { + dAtA[i] = 0xd2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.AnyOf) > 0 { + for _, msg := range m.AnyOf { + dAtA[i] = 0xda + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Not != nil { + dAtA[i] = 0xe2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Not.Size())) + n12, err := m.Not.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + if len(m.Properties) > 0 { + for k, _ := range m.Properties { + dAtA[i] = 0xea + i++ + dAtA[i] = 0x1 + i++ + v := m.Properties[k] + msgSize := 0 + if v != nil { + msgSize = v.Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + if v != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(v.Size())) + n13, err := v.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + } + } + if m.AdditionalProperties != nil { + dAtA[i] = 0xf2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AdditionalProperties.Size())) + n14, err := m.AdditionalProperties.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + if len(m.PatternProperties) > 0 { + for k, _ := range m.PatternProperties { + dAtA[i] = 0xfa + i++ + dAtA[i] = 0x1 + i++ + v := m.PatternProperties[k] + msgSize := 0 + if v != nil { + msgSize = v.Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + if v != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(v.Size())) + n15, err := v.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + } + } + if len(m.Dependencies) > 0 { + for k, _ := range m.Dependencies { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x2 + i++ + v := m.Dependencies[k] + msgSize := 0 + if v != nil { + msgSize = v.Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + if v != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(v.Size())) + n16, err := v.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + } + } + if m.AdditionalItems != nil { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AdditionalItems.Size())) + n17, err := m.AdditionalItems.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + if len(m.Definitions) > 0 { + for k, _ := range m.Definitions { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x2 + i++ + v := m.Definitions[k] + msgSize := 0 + if v != nil { + msgSize = v.Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + if v != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(v.Size())) + n18, err := v.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + } + } + if m.ExternalDocs != nil { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ExternalDocs.Size())) + n19, err := m.ExternalDocs.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + } + if m.Example != nil { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Example.Size())) + n20, err := m.Example.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *JSONSchemaPropsOrArray) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JSONSchemaPropsOrArray) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Schema != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Schema.Size())) + n21, err := m.Schema.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + } + if len(m.JSONSchemas) > 0 { + for _, msg := range m.JSONSchemas { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *JSONSchemaPropsOrBool) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JSONSchemaPropsOrBool) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Allows != nil { + dAtA[i] = 0x8 + i++ + if *m.Allows { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Schema != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Schema.Size())) + n22, err := m.Schema.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *JSONSchemaPropsOrStringArray) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JSONSchemaPropsOrStringArray) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Schema != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Schema.Size())) + n23, err := m.Schema.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + } + if len(m.Property) > 0 { + for _, s := range m.Property { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *CustomResourceDefinition) Size() (n int) { + var l int + _ = l + if m.Metadata != nil { + l = m.Metadata.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CustomResourceDefinitionCondition) Size() (n int) { + var l int + _ = l + if m.Type != nil { + l = len(*m.Type) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Status != nil { + l = len(*m.Status) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LastTransitionTime != nil { + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Reason != nil { + l = len(*m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Message != nil { + l = len(*m.Message) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CustomResourceDefinitionList) Size() (n int) { + var l int + _ = l + if m.Metadata != nil { + l = m.Metadata.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CustomResourceDefinitionNames) Size() (n int) { + var l int + _ = l + if m.Plural != nil { + l = len(*m.Plural) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Singular != nil { + l = len(*m.Singular) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ShortNames) > 0 { + for _, s := range m.ShortNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Kind != nil { + l = len(*m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ListKind != nil { + l = len(*m.ListKind) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CustomResourceDefinitionSpec) Size() (n int) { + var l int + _ = l + if m.Group != nil { + l = len(*m.Group) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Version != nil { + l = len(*m.Version) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Names != nil { + l = m.Names.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Scope != nil { + l = len(*m.Scope) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Validation != nil { + l = m.Validation.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CustomResourceDefinitionStatus) Size() (n int) { + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.AcceptedNames != nil { + l = m.AcceptedNames.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CustomResourceValidation) Size() (n int) { + var l int + _ = l + if m.OpenAPIV3Schema != nil { + l = m.OpenAPIV3Schema.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ExternalDocumentation) Size() (n int) { + var l int + _ = l + if m.Description != nil { + l = len(*m.Description) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Url != nil { + l = len(*m.Url) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *JSON) Size() (n int) { + var l int + _ = l + if m.Raw != nil { + l = len(m.Raw) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *JSONSchemaProps) Size() (n int) { + var l int + _ = l + if m.Id != nil { + l = len(*m.Id) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Schema != nil { + l = len(*m.Schema) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Ref != nil { + l = len(*m.Ref) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Description != nil { + l = len(*m.Description) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Type != nil { + l = len(*m.Type) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Format != nil { + l = len(*m.Format) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Title != nil { + l = len(*m.Title) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Default != nil { + l = m.Default.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Maximum != nil { + n += 9 + } + if m.ExclusiveMaximum != nil { + n += 2 + } + if m.Minimum != nil { + n += 9 + } + if m.ExclusiveMinimum != nil { + n += 2 + } + if m.MaxLength != nil { + n += 1 + sovGenerated(uint64(*m.MaxLength)) + } + if m.MinLength != nil { + n += 1 + sovGenerated(uint64(*m.MinLength)) + } + if m.Pattern != nil { + l = len(*m.Pattern) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MaxItems != nil { + n += 2 + sovGenerated(uint64(*m.MaxItems)) + } + if m.MinItems != nil { + n += 2 + sovGenerated(uint64(*m.MinItems)) + } + if m.UniqueItems != nil { + n += 3 + } + if m.MultipleOf != nil { + n += 10 + } + if len(m.Enum) > 0 { + for _, e := range m.Enum { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.MaxProperties != nil { + n += 2 + sovGenerated(uint64(*m.MaxProperties)) + } + if m.MinProperties != nil { + n += 2 + sovGenerated(uint64(*m.MinProperties)) + } + if len(m.Required) > 0 { + for _, s := range m.Required { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.Items != nil { + l = m.Items.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.AllOf) > 0 { + for _, e := range m.AllOf { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.OneOf) > 0 { + for _, e := range m.OneOf { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.AnyOf) > 0 { + for _, e := range m.AnyOf { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.Not != nil { + l = m.Not.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.Properties) > 0 { + for k, v := range m.Properties { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovGenerated(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + l + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.AdditionalProperties != nil { + l = m.AdditionalProperties.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.PatternProperties) > 0 { + for k, v := range m.PatternProperties { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovGenerated(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + l + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Dependencies) > 0 { + for k, v := range m.Dependencies { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovGenerated(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + l + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.AdditionalItems != nil { + l = m.AdditionalItems.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.Definitions) > 0 { + for k, v := range m.Definitions { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovGenerated(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + l + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.ExternalDocs != nil { + l = m.ExternalDocs.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Example != nil { + l = m.Example.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *JSONSchemaPropsOrArray) Size() (n int) { + var l int + _ = l + if m.Schema != nil { + l = m.Schema.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.JSONSchemas) > 0 { + for _, e := range m.JSONSchemas { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *JSONSchemaPropsOrBool) Size() (n int) { + var l int + _ = l + if m.Allows != nil { + n += 2 + } + if m.Schema != nil { + l = m.Schema.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *JSONSchemaPropsOrStringArray) Size() (n int) { + var l int + _ = l + if m.Schema != nil { + l = m.Schema.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Property) > 0 { + for _, s := range m.Property { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &CustomResourceDefinitionSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &CustomResourceDefinitionStatus{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Type = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Status = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastTransitionTime == nil { + m.LastTransitionTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Reason = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Message = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, &CustomResourceDefinition{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionNames: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionNames: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Plural", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Plural = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Singular", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Singular = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShortNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShortNames = append(m.ShortNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Kind = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListKind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ListKind = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Group = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Version = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Names == nil { + m.Names = &CustomResourceDefinitionNames{} + } + if err := m.Names.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Scope = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Validation == nil { + m.Validation = &CustomResourceValidation{} + } + if err := m.Validation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, &CustomResourceDefinitionCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AcceptedNames", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AcceptedNames == nil { + m.AcceptedNames = &CustomResourceDefinitionNames{} + } + if err := m.AcceptedNames.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceValidation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceValidation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceValidation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OpenAPIV3Schema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.OpenAPIV3Schema == nil { + m.OpenAPIV3Schema = &JSONSchemaProps{} + } + if err := m.OpenAPIV3Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExternalDocumentation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExternalDocumentation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExternalDocumentation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Description = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Url", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Url = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JSON) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JSON: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JSON: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Raw = append(m.Raw[:0], dAtA[iNdEx:postIndex]...) + if m.Raw == nil { + m.Raw = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JSONSchemaProps: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JSONSchemaProps: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Id = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Schema = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Ref = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Description = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Type = &s + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Format = &s + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Title", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Title = &s + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Default == nil { + m.Default = &JSON{} + } + if err := m.Default.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Maximum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.Maximum = &v2 + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExclusiveMaximum", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ExclusiveMaximum = &b + case 11: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Minimum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.Minimum = &v2 + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExclusiveMinimum", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ExclusiveMinimum = &b + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxLength", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MaxLength = &v + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinLength", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MinLength = &v + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pattern", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Pattern = &s + iNdEx = postIndex + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxItems", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MaxItems = &v + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinItems", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MinItems = &v + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UniqueItems", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.UniqueItems = &b + case 19: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field MultipleOf", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.MultipleOf = &v2 + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Enum", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Enum = append(m.Enum, &JSON{}) + if err := m.Enum[len(m.Enum)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 21: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxProperties", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MaxProperties = &v + case 22: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinProperties", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MinProperties = &v + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Required", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Required = append(m.Required, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Items == nil { + m.Items = &JSONSchemaPropsOrArray{} + } + if err := m.Items.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 25: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllOf", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllOf = append(m.AllOf, &JSONSchemaProps{}) + if err := m.AllOf[len(m.AllOf)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OneOf", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OneOf = append(m.OneOf, &JSONSchemaProps{}) + if err := m.OneOf[len(m.OneOf)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 27: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AnyOf", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AnyOf = append(m.AnyOf, &JSONSchemaProps{}) + if err := m.AnyOf[len(m.AnyOf)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 28: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Not", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Not == nil { + m.Not = &JSONSchemaProps{} + } + if err := m.Not.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 29: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Properties", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Properties == nil { + m.Properties = make(map[string]*JSONSchemaProps) + } + var mapkey string + var mapvalue *JSONSchemaProps + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &JSONSchemaProps{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Properties[mapkey] = mapvalue + iNdEx = postIndex + case 30: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdditionalProperties", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AdditionalProperties == nil { + m.AdditionalProperties = &JSONSchemaPropsOrBool{} + } + if err := m.AdditionalProperties.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 31: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PatternProperties", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PatternProperties == nil { + m.PatternProperties = make(map[string]*JSONSchemaProps) + } + var mapkey string + var mapvalue *JSONSchemaProps + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &JSONSchemaProps{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.PatternProperties[mapkey] = mapvalue + iNdEx = postIndex + case 32: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dependencies", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Dependencies == nil { + m.Dependencies = make(map[string]*JSONSchemaPropsOrStringArray) + } + var mapkey string + var mapvalue *JSONSchemaPropsOrStringArray + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &JSONSchemaPropsOrStringArray{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Dependencies[mapkey] = mapvalue + iNdEx = postIndex + case 33: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdditionalItems", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AdditionalItems == nil { + m.AdditionalItems = &JSONSchemaPropsOrBool{} + } + if err := m.AdditionalItems.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 34: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Definitions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Definitions == nil { + m.Definitions = make(map[string]*JSONSchemaProps) + } + var mapkey string + var mapvalue *JSONSchemaProps + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &JSONSchemaProps{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Definitions[mapkey] = mapvalue + iNdEx = postIndex + case 35: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalDocs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExternalDocs == nil { + m.ExternalDocs = &ExternalDocumentation{} + } + if err := m.ExternalDocs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 36: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Example", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Example == nil { + m.Example = &JSON{} + } + if err := m.Example.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JSONSchemaPropsOrArray) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JSONSchemaPropsOrArray: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JSONSchemaPropsOrArray: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Schema == nil { + m.Schema = &JSONSchemaProps{} + } + if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JSONSchemas", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JSONSchemas = append(m.JSONSchemas, &JSONSchemaProps{}) + if err := m.JSONSchemas[len(m.JSONSchemas)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JSONSchemaPropsOrBool) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JSONSchemaPropsOrBool: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JSONSchemaPropsOrBool: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Allows", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Allows = &b + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Schema == nil { + m.Schema = &JSONSchemaProps{} + } + if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JSONSchemaPropsOrStringArray) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JSONSchemaPropsOrStringArray: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JSONSchemaPropsOrStringArray: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Schema == nil { + m.Schema = &JSONSchemaProps{} + } + if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Property", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Property = append(m.Property, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 1410 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x58, 0x4f, 0x8f, 0x14, 0x45, + 0x14, 0xb7, 0x66, 0x77, 0xd9, 0xdd, 0xda, 0x85, 0x5d, 0x4a, 0x58, 0x9a, 0x75, 0x19, 0x87, 0x96, + 0xc3, 0x86, 0xc4, 0x19, 0x01, 0x63, 0x88, 0x89, 0x07, 0xfe, 0x1d, 0xf8, 0xbb, 0xa4, 0x17, 0x21, + 0x01, 0x11, 0x8a, 0xee, 0x37, 0xb3, 0xc5, 0x76, 0x57, 0x37, 0x55, 0xd5, 0xc3, 0xcc, 0x85, 0xa8, + 0xf1, 0xc0, 0x45, 0x2f, 0x92, 0xe8, 0xc1, 0x93, 0x5e, 0xfc, 0x28, 0x1e, 0xf1, 0xe8, 0xc1, 0xc4, + 0xe0, 0x37, 0xf0, 0x13, 0x98, 0xaa, 0xae, 0xe9, 0xe9, 0xf9, 0x87, 0x24, 0xf4, 0xc0, 0xad, 0x5f, + 0xd5, 0xab, 0xdf, 0xef, 0xd5, 0x7b, 0xaf, 0xde, 0x7b, 0x33, 0xf8, 0xea, 0xee, 0x69, 0x59, 0x67, + 0x71, 0x83, 0x26, 0x0c, 0x3a, 0x0a, 0xb8, 0x64, 0x31, 0x97, 0x1f, 0xd2, 0x84, 0x49, 0x10, 0x6d, + 0x10, 0x8d, 0x64, 0xb7, 0xa5, 0xf7, 0xe4, 0xa0, 0x42, 0xa3, 0x7d, 0xe2, 0x01, 0x28, 0x7a, 0xa2, + 0xd1, 0x02, 0x0e, 0x82, 0x2a, 0x08, 0xea, 0x89, 0x88, 0x55, 0x4c, 0x3e, 0xcb, 0xe0, 0xea, 0x03, + 0xda, 0xf7, 0x72, 0xb8, 0x7a, 0xb2, 0xdb, 0xd2, 0x7b, 0x72, 0x50, 0xa1, 0x6e, 0xe1, 0xd6, 0x3f, + 0xee, 0x5b, 0x13, 0x51, 0x7f, 0x87, 0x71, 0x10, 0xdd, 0xbe, 0x09, 0x11, 0x28, 0xda, 0x68, 0x8f, + 0x90, 0xae, 0x37, 0x26, 0x9d, 0x12, 0x29, 0x57, 0x2c, 0x82, 0x91, 0x03, 0x9f, 0xfc, 0xdf, 0x01, + 0xe9, 0xef, 0x40, 0x44, 0x47, 0xce, 0x9d, 0x9a, 0x74, 0x2e, 0x55, 0x2c, 0x6c, 0x30, 0xae, 0xa4, + 0x12, 0xc3, 0x87, 0xdc, 0xe7, 0x15, 0xec, 0x9c, 0x4b, 0xa5, 0x8a, 0x23, 0x0f, 0x64, 0x9c, 0x0a, + 0x1f, 0xce, 0x43, 0x93, 0x71, 0xa6, 0x58, 0xcc, 0xc9, 0x15, 0xbc, 0xa0, 0x6f, 0x15, 0x50, 0x45, + 0x1d, 0x54, 0x43, 0x9b, 0x4b, 0x27, 0x3f, 0xaa, 0xf7, 0x5d, 0x98, 0x93, 0xf4, 0xfd, 0xa6, 0xb5, + 0xeb, 0xed, 0x13, 0xf5, 0xad, 0x07, 0x0f, 0xc1, 0x57, 0x57, 0x41, 0x51, 0x2f, 0x47, 0x20, 0x31, + 0x9e, 0x95, 0x09, 0xf8, 0x4e, 0xc5, 0x20, 0xdd, 0xa9, 0xbf, 0x56, 0x30, 0xea, 0x93, 0x8c, 0xde, + 0x4e, 0xc0, 0xf7, 0x0c, 0x11, 0x49, 0xf1, 0x1e, 0xa9, 0xa8, 0x4a, 0xa5, 0x33, 0x63, 0x28, 0xef, + 0x4e, 0x8b, 0xd2, 0x90, 0x78, 0x96, 0xcc, 0xfd, 0x0b, 0xe1, 0xa3, 0x93, 0x54, 0xcf, 0xc5, 0x3c, + 0xc8, 0x7c, 0x4b, 0xf0, 0xac, 0xea, 0x26, 0x60, 0xfc, 0xba, 0xe8, 0x99, 0x6f, 0xb2, 0x96, 0x1b, + 0x5c, 0x31, 0xab, 0x56, 0x22, 0xb7, 0x31, 0x09, 0xa9, 0x54, 0x37, 0x04, 0xe5, 0xd2, 0x9c, 0xbe, + 0xc1, 0x22, 0xb0, 0x97, 0x3a, 0xfe, 0x6a, 0x11, 0xd1, 0x27, 0xbc, 0x31, 0x28, 0x9a, 0x53, 0x00, + 0x95, 0x31, 0x77, 0x66, 0x33, 0xce, 0x4c, 0x22, 0x0e, 0x9e, 0x8f, 0x40, 0x4a, 0xda, 0x02, 0x67, + 0xce, 0x6c, 0xf4, 0x44, 0xf7, 0x4f, 0x84, 0x37, 0x26, 0xdd, 0xef, 0x0a, 0x93, 0x8a, 0x5c, 0x1a, + 0x49, 0x9b, 0xfa, 0xab, 0x19, 0xa9, 0x4f, 0x0f, 0x25, 0x4d, 0x84, 0xe7, 0x98, 0x82, 0x48, 0x7b, + 0x64, 0x66, 0x73, 0xe9, 0xe4, 0xad, 0x29, 0x85, 0xd0, 0xcb, 0x58, 0xdc, 0x5f, 0x11, 0x3e, 0x32, + 0x49, 0xe7, 0x1a, 0x8d, 0x40, 0x6a, 0x7f, 0x25, 0x61, 0x2a, 0x68, 0x68, 0x23, 0x67, 0x25, 0xb2, + 0x8e, 0x17, 0x24, 0xe3, 0xad, 0x34, 0xa4, 0xc2, 0x46, 0x2f, 0x97, 0x49, 0x15, 0x63, 0xb9, 0x13, + 0x0b, 0x65, 0x10, 0x9c, 0x99, 0xda, 0xcc, 0xe6, 0xa2, 0x57, 0x58, 0xd1, 0xb9, 0xb0, 0xcb, 0x78, + 0x60, 0x23, 0x60, 0xbe, 0x35, 0x5e, 0xc8, 0xa4, 0xba, 0xac, 0xd7, 0xb3, 0x00, 0xe4, 0xb2, 0xfb, + 0x47, 0x65, 0x72, 0x04, 0x74, 0xfe, 0x93, 0x03, 0x78, 0xae, 0x25, 0xe2, 0x34, 0xb1, 0x36, 0x66, + 0x82, 0x0e, 0x69, 0x1b, 0x84, 0x76, 0x88, 0xb5, 0xb0, 0x27, 0x12, 0x81, 0xe7, 0xb8, 0xb5, 0x4d, + 0x87, 0xeb, 0x8b, 0x29, 0x79, 0xd9, 0xdc, 0xd6, 0xcb, 0xa8, 0xb4, 0x8d, 0xd2, 0x8f, 0x13, 0xb0, + 0xb7, 0xce, 0x04, 0xf2, 0x18, 0xe3, 0x36, 0x0d, 0x59, 0x40, 0xb5, 0xbe, 0xb9, 0x78, 0xd9, 0x41, + 0xbf, 0x99, 0xc3, 0x7b, 0x05, 0x2a, 0xf7, 0xb7, 0x0a, 0xae, 0xbe, 0xfc, 0x81, 0x93, 0xaf, 0x10, + 0xc6, 0x7e, 0xef, 0x01, 0x4b, 0x07, 0x99, 0x8c, 0xbc, 0x3f, 0x25, 0x5f, 0xe5, 0x95, 0xc2, 0x2b, + 0x70, 0x92, 0x6f, 0x10, 0xde, 0x4b, 0x7d, 0x1f, 0x12, 0x05, 0x41, 0x96, 0x4d, 0x95, 0x37, 0x10, + 0xb1, 0x41, 0x4a, 0xf7, 0x19, 0x1a, 0xee, 0x19, 0x7d, 0x9f, 0x92, 0x0e, 0x5e, 0x89, 0x13, 0xe0, + 0x67, 0xae, 0x5f, 0xbc, 0x79, 0x6a, 0xdb, 0x34, 0x2a, 0x5b, 0x03, 0xae, 0xbd, 0xa6, 0x89, 0x97, + 0xb6, 0xb7, 0xae, 0x65, 0x80, 0xd7, 0x45, 0x9c, 0x48, 0x6f, 0x98, 0xc6, 0xbd, 0x8c, 0x0f, 0x5e, + 0xe8, 0x28, 0x10, 0x9c, 0x86, 0xe7, 0x63, 0x3f, 0x8d, 0x80, 0xab, 0xcc, 0xa4, 0x1a, 0x5e, 0x0a, + 0x40, 0xfa, 0x82, 0x25, 0x26, 0xa9, 0xb2, 0x37, 0x51, 0x5c, 0x22, 0xab, 0x78, 0x26, 0x15, 0xa1, + 0x7d, 0x15, 0xfa, 0xd3, 0x75, 0xf0, 0xac, 0x26, 0xd4, 0x3b, 0x82, 0x3e, 0x36, 0x67, 0x96, 0x3d, + 0xfd, 0xe9, 0x3e, 0x3d, 0x84, 0x57, 0x86, 0x6c, 0x21, 0xfb, 0x70, 0x85, 0x05, 0x16, 0xb8, 0xc2, + 0x02, 0x53, 0xc8, 0xb3, 0xbb, 0xf7, 0x0a, 0xb9, 0x91, 0x0c, 0x1a, 0x34, 0xcd, 0x2b, 0x5b, 0xf4, + 0xf4, 0xe7, 0xb0, 0x6d, 0xb3, 0xa3, 0xb6, 0xf5, 0x1a, 0xc5, 0xdc, 0x60, 0xa3, 0x68, 0xc6, 0x22, + 0xa2, 0xca, 0xd9, 0x93, 0xe1, 0x67, 0x92, 0x7e, 0x53, 0x8a, 0xa9, 0x10, 0x9c, 0xf9, 0xec, 0x4d, + 0x19, 0x81, 0xdc, 0xc5, 0xf3, 0x01, 0x34, 0x69, 0x1a, 0x2a, 0x67, 0xc1, 0x84, 0xe2, 0x5c, 0x09, + 0xa1, 0xf0, 0x7a, 0x98, 0xa6, 0x53, 0xd0, 0x0e, 0x8b, 0xd2, 0xc8, 0x59, 0xac, 0xa1, 0x4d, 0xe4, + 0xf5, 0x44, 0x72, 0x1c, 0xaf, 0x42, 0xc7, 0x0f, 0x53, 0xc9, 0xda, 0x70, 0xd5, 0xaa, 0xe0, 0x1a, + 0xda, 0x5c, 0xf0, 0x46, 0xd6, 0x0d, 0x0a, 0xe3, 0x46, 0x65, 0xc9, 0xa2, 0x64, 0xe2, 0x20, 0x8a, + 0x55, 0x59, 0x1e, 0x46, 0xb1, 0xba, 0x1b, 0x78, 0x31, 0xa2, 0x9d, 0x2b, 0xc0, 0x5b, 0x6a, 0xc7, + 0xd9, 0x5b, 0x43, 0x9b, 0x33, 0x5e, 0x7f, 0xc1, 0xec, 0x32, 0x6e, 0x77, 0xf7, 0xd9, 0xdd, 0xde, + 0x82, 0xb6, 0x20, 0xa1, 0x4a, 0x27, 0x90, 0xb3, 0x92, 0x95, 0x47, 0x2b, 0xea, 0x5a, 0x1c, 0xd1, + 0xce, 0x45, 0xd3, 0x87, 0x56, 0xcd, 0xb1, 0x5c, 0x36, 0x7b, 0x8c, 0x67, 0x7b, 0xfb, 0xed, 0x9e, + 0x95, 0x75, 0x70, 0x53, 0xce, 0x1e, 0xa5, 0x90, 0x6d, 0x13, 0x63, 0x74, 0x71, 0x49, 0x77, 0x86, + 0x28, 0x0d, 0x15, 0x4b, 0x42, 0xd8, 0x6a, 0x3a, 0xef, 0x9a, 0x8b, 0x17, 0x56, 0xc8, 0x2d, 0x3c, + 0x0b, 0x3c, 0x8d, 0x9c, 0x03, 0xa6, 0xd6, 0x94, 0x12, 0x37, 0x03, 0x48, 0x8e, 0xe1, 0xbd, 0x11, + 0xed, 0xe8, 0xec, 0x05, 0xa1, 0x18, 0x48, 0xe7, 0xa0, 0xb1, 0x7d, 0x70, 0xd1, 0x68, 0x31, 0x5e, + 0xd0, 0x5a, 0xb3, 0x5a, 0xc5, 0x45, 0xed, 0x02, 0x01, 0x8f, 0x52, 0x26, 0x20, 0x70, 0x0e, 0x99, + 0xe6, 0x96, 0xcb, 0x64, 0xb7, 0xd7, 0xbf, 0x1d, 0x93, 0x79, 0x9f, 0x97, 0x5b, 0x04, 0xb6, 0xc4, + 0x19, 0x21, 0x68, 0xd7, 0x76, 0x6f, 0x12, 0xe0, 0x39, 0x1a, 0x86, 0x5b, 0x4d, 0xe7, 0xb0, 0x71, + 0x57, 0xd9, 0x15, 0x27, 0x03, 0xd7, 0x2c, 0x31, 0xd7, 0xe1, 0x5a, 0x9f, 0x0e, 0x8b, 0x01, 0x37, + 0x77, 0xe1, 0xdd, 0xad, 0xa6, 0xf3, 0xde, 0x94, 0xee, 0xa2, 0xc1, 0xc9, 0x7d, 0x3c, 0xc3, 0x63, + 0xe5, 0x6c, 0x4c, 0xa5, 0x42, 0x6b, 0x68, 0xf2, 0x04, 0xe3, 0xa4, 0x9f, 0x3f, 0x47, 0xcc, 0x65, + 0xbe, 0x2c, 0x97, 0xa8, 0xde, 0xcf, 0xc5, 0x0b, 0x5c, 0x89, 0xae, 0x57, 0x60, 0x24, 0x4f, 0x11, + 0x3e, 0x40, 0x83, 0xac, 0x7f, 0xd2, 0xb0, 0x90, 0xca, 0x55, 0x73, 0xe7, 0x1b, 0x65, 0x27, 0xe4, + 0xd9, 0x38, 0x0e, 0xbd, 0xb1, 0x8c, 0xe4, 0x07, 0x84, 0xf7, 0xdb, 0x92, 0x52, 0xb0, 0xe3, 0x7d, + 0xe3, 0x12, 0x28, 0xdb, 0x25, 0xc3, 0x3c, 0x99, 0x67, 0x46, 0xf9, 0xc9, 0xb7, 0x08, 0x2f, 0x07, + 0x90, 0x00, 0x0f, 0x80, 0xfb, 0xda, 0xa0, 0x5a, 0x29, 0x73, 0xcd, 0xb0, 0x41, 0xe7, 0x0b, 0x14, + 0x99, 0x2d, 0x03, 0xac, 0xe4, 0x09, 0x5e, 0xe9, 0x3b, 0x2d, 0xab, 0x97, 0x47, 0xa7, 0x18, 0xa1, + 0x61, 0x32, 0xf2, 0x35, 0xd2, 0x9d, 0xb8, 0x37, 0xf7, 0x48, 0xc7, 0x35, 0x5e, 0xb8, 0x57, 0xba, + 0x17, 0x72, 0x86, 0xcc, 0x09, 0x45, 0x4e, 0xd2, 0xc1, 0xcb, 0xd0, 0x9f, 0x60, 0xa4, 0xf3, 0x41, + 0x29, 0x0e, 0x18, 0x3b, 0x14, 0x79, 0x03, 0x4c, 0x7a, 0x44, 0x80, 0x0e, 0x8d, 0x92, 0x10, 0x9c, + 0x63, 0x25, 0x8e, 0x08, 0x16, 0x73, 0xfd, 0x3b, 0x84, 0x57, 0x86, 0x52, 0x51, 0xcf, 0x42, 0xbb, + 0xd0, 0xb5, 0x43, 0x93, 0xfe, 0xd4, 0x25, 0xaf, 0x4d, 0xc3, 0x14, 0xec, 0x4c, 0x5b, 0x7a, 0xc9, + 0x33, 0xe0, 0x9f, 0x56, 0x4e, 0xa3, 0xf5, 0x67, 0x08, 0xaf, 0x8d, 0x7f, 0x21, 0x6f, 0xd5, 0xac, + 0x9f, 0x11, 0xde, 0x3f, 0xf2, 0x4e, 0xc6, 0x58, 0xf4, 0x68, 0xd0, 0xa2, 0x3b, 0x65, 0xbf, 0x90, + 0x6d, 0x25, 0x18, 0x6f, 0xd9, 0xd6, 0xda, 0x37, 0xef, 0x7b, 0x84, 0x57, 0x87, 0x13, 0xf8, 0x6d, + 0xfa, 0xcb, 0xfd, 0x17, 0xe1, 0xb5, 0xf1, 0x13, 0x01, 0x69, 0xe6, 0x13, 0xf8, 0x74, 0x7e, 0x7d, + 0xf4, 0x26, 0xfa, 0x04, 0x2f, 0x3d, 0xcc, 0xb7, 0x7a, 0xff, 0x52, 0x94, 0x4d, 0x56, 0xa4, 0x70, + 0x7f, 0x44, 0xf8, 0xe0, 0xd8, 0x9a, 0xa6, 0x7f, 0x15, 0xd0, 0x30, 0x8c, 0x1f, 0x4b, 0x73, 0xe7, + 0x05, 0xcf, 0x4a, 0x05, 0x5f, 0x54, 0xa6, 0xe9, 0x0b, 0xf7, 0x17, 0x84, 0x37, 0x5e, 0x96, 0x4b, + 0x6f, 0x2c, 0x28, 0xeb, 0x78, 0xc1, 0x4e, 0x00, 0x5d, 0x13, 0x91, 0x45, 0x2f, 0x97, 0xcf, 0x1e, + 0xfe, 0xfd, 0x45, 0x15, 0x3d, 0x7f, 0x51, 0x45, 0x7f, 0xbf, 0xa8, 0xa2, 0x9f, 0xfe, 0xa9, 0xbe, + 0x73, 0x7b, 0xde, 0xc2, 0xfd, 0x17, 0x00, 0x00, 0xff, 0xff, 0xaa, 0x01, 0xf6, 0x76, 0x6e, 0x16, + 0x00, 0x00, +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/apiextensions/v1beta1/register.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/apiextensions/v1beta1/register.go new file mode 100644 index 00000000..b1ba6e48 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/apiextensions/v1beta1/register.go @@ -0,0 +1,9 @@ +package v1beta1 + +import "github.com/ericchiang/k8s" + +func init() { + k8s.Register("apiextensions.k8s.io", "v1beta1", "customresourcedefinitions", false, &CustomResourceDefinition{}) + + k8s.RegisterList("apiextensions.k8s.io", "v1beta1", "customresourcedefinitions", false, &CustomResourceDefinitionList{}) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/apps/v1alpha1/generated.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/apps/v1alpha1/generated.pb.go deleted file mode 100644 index fa4131a4..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/apps/v1alpha1/generated.pb.go +++ /dev/null @@ -1,1227 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.proto -// DO NOT EDIT! - -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.proto - - It has these top-level messages: - PetSet - PetSetList - PetSetSpec - PetSetStatus -*/ -package v1alpha1 - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "github.com/ericchiang/k8s/api/resource" -import k8s_io_kubernetes_pkg_api_unversioned "github.com/ericchiang/k8s/api/unversioned" -import k8s_io_kubernetes_pkg_api_v1 "github.com/ericchiang/k8s/api/v1" -import _ "github.com/ericchiang/k8s/runtime" -import _ "github.com/ericchiang/k8s/util/intstr" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// PetSet represents a set of pods with consistent identities. -// Identities are defined as: -// - Network: A single stable DNS and hostname. -// - Storage: As many VolumeClaims as requested. -// The PetSet guarantees that a given network identity will always -// map to the same storage identity. PetSet is currently in alpha -// and subject to change without notice. -type PetSet struct { - Metadata *k8s_io_kubernetes_pkg_api_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` - // Spec defines the desired identities of pets in this set. - Spec *PetSetSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` - // Status is the current status of Pets in this PetSet. This data - // may be out of date by some window of time. - Status *PetSetStatus `protobuf:"bytes,3,opt,name=status" json:"status,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PetSet) Reset() { *m = PetSet{} } -func (m *PetSet) String() string { return proto.CompactTextString(m) } -func (*PetSet) ProtoMessage() {} -func (*PetSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *PetSet) GetMetadata() *k8s_io_kubernetes_pkg_api_v1.ObjectMeta { - if m != nil { - return m.Metadata - } - return nil -} - -func (m *PetSet) GetSpec() *PetSetSpec { - if m != nil { - return m.Spec - } - return nil -} - -func (m *PetSet) GetStatus() *PetSetStatus { - if m != nil { - return m.Status - } - return nil -} - -// PetSetList is a collection of PetSets. -type PetSetList struct { - Metadata *k8s_io_kubernetes_pkg_api_unversioned.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` - Items []*PetSet `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PetSetList) Reset() { *m = PetSetList{} } -func (m *PetSetList) String() string { return proto.CompactTextString(m) } -func (*PetSetList) ProtoMessage() {} -func (*PetSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *PetSetList) GetMetadata() *k8s_io_kubernetes_pkg_api_unversioned.ListMeta { - if m != nil { - return m.Metadata - } - return nil -} - -func (m *PetSetList) GetItems() []*PetSet { - if m != nil { - return m.Items - } - return nil -} - -// A PetSetSpec is the specification of a PetSet. -type PetSetSpec struct { - // Replicas is the desired number of replicas of the given Template. - // These are replicas in the sense that they are instantiations of the - // same Template, but individual replicas also have a consistent identity. - // If unspecified, defaults to 1. - // TODO: Consider a rename of this field. - Replicas *int32 `protobuf:"varint,1,opt,name=replicas" json:"replicas,omitempty"` - // Selector is a label query over pods that should match the replica count. - // If empty, defaulted to labels on the pod template. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors - Selector *k8s_io_kubernetes_pkg_api_unversioned.LabelSelector `protobuf:"bytes,2,opt,name=selector" json:"selector,omitempty"` - // Template is the object that describes the pod that will be created if - // insufficient replicas are detected. Each pod stamped out by the PetSet - // will fulfill this Template, but have a unique identity from the rest - // of the PetSet. - Template *k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec `protobuf:"bytes,3,opt,name=template" json:"template,omitempty"` - // VolumeClaimTemplates is a list of claims that pets are allowed to reference. - // The PetSet controller is responsible for mapping network identities to - // claims in a way that maintains the identity of a pet. Every claim in - // this list must have at least one matching (by name) volumeMount in one - // container in the template. A claim in this list takes precedence over - // any volumes in the template, with the same name. - // TODO: Define the behavior if a claim already exists with the same name. - VolumeClaimTemplates []*k8s_io_kubernetes_pkg_api_v1.PersistentVolumeClaim `protobuf:"bytes,4,rep,name=volumeClaimTemplates" json:"volumeClaimTemplates,omitempty"` - // ServiceName is the name of the service that governs this PetSet. - // This service must exist before the PetSet, and is responsible for - // the network identity of the set. Pets get DNS/hostnames that follow the - // pattern: pet-specific-string.serviceName.default.svc.cluster.local - // where "pet-specific-string" is managed by the PetSet controller. - ServiceName *string `protobuf:"bytes,5,opt,name=serviceName" json:"serviceName,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PetSetSpec) Reset() { *m = PetSetSpec{} } -func (m *PetSetSpec) String() string { return proto.CompactTextString(m) } -func (*PetSetSpec) ProtoMessage() {} -func (*PetSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *PetSetSpec) GetReplicas() int32 { - if m != nil && m.Replicas != nil { - return *m.Replicas - } - return 0 -} - -func (m *PetSetSpec) GetSelector() *k8s_io_kubernetes_pkg_api_unversioned.LabelSelector { - if m != nil { - return m.Selector - } - return nil -} - -func (m *PetSetSpec) GetTemplate() *k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec { - if m != nil { - return m.Template - } - return nil -} - -func (m *PetSetSpec) GetVolumeClaimTemplates() []*k8s_io_kubernetes_pkg_api_v1.PersistentVolumeClaim { - if m != nil { - return m.VolumeClaimTemplates - } - return nil -} - -func (m *PetSetSpec) GetServiceName() string { - if m != nil && m.ServiceName != nil { - return *m.ServiceName - } - return "" -} - -// PetSetStatus represents the current state of a PetSet. -type PetSetStatus struct { - // most recent generation observed by this autoscaler. - ObservedGeneration *int64 `protobuf:"varint,1,opt,name=observedGeneration" json:"observedGeneration,omitempty"` - // Replicas is the number of actual replicas. - Replicas *int32 `protobuf:"varint,2,opt,name=replicas" json:"replicas,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PetSetStatus) Reset() { *m = PetSetStatus{} } -func (m *PetSetStatus) String() string { return proto.CompactTextString(m) } -func (*PetSetStatus) ProtoMessage() {} -func (*PetSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *PetSetStatus) GetObservedGeneration() int64 { - if m != nil && m.ObservedGeneration != nil { - return *m.ObservedGeneration - } - return 0 -} - -func (m *PetSetStatus) GetReplicas() int32 { - if m != nil && m.Replicas != nil { - return *m.Replicas - } - return 0 -} - -func init() { - proto.RegisterType((*PetSet)(nil), "github.com/ericchiang.k8s.apis.apps.v1alpha1.PetSet") - proto.RegisterType((*PetSetList)(nil), "github.com/ericchiang.k8s.apis.apps.v1alpha1.PetSetList") - proto.RegisterType((*PetSetSpec)(nil), "github.com/ericchiang.k8s.apis.apps.v1alpha1.PetSetSpec") - proto.RegisterType((*PetSetStatus)(nil), "github.com/ericchiang.k8s.apis.apps.v1alpha1.PetSetStatus") -} -func (m *PetSet) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PetSet) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Metadata != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n1, err := m.Metadata.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - } - if m.Spec != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } - if m.Status != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *PetSetList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PetSetList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Metadata != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n4, err := m.Metadata.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - } - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *PetSetSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PetSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n5, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - } - if m.Template != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n6, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - } - if len(m.VolumeClaimTemplates) > 0 { - for _, msg := range m.VolumeClaimTemplates { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.ServiceName != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ServiceName))) - i += copy(dAtA[i:], *m.ServiceName) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *PetSetStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PetSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ObservedGeneration != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) - } - if m.Replicas != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *PetSet) Size() (n int) { - var l int - _ = l - if m.Metadata != nil { - l = m.Metadata.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Spec != nil { - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Status != nil { - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *PetSetList) Size() (n int) { - var l int - _ = l - if m.Metadata != nil { - l = m.Metadata.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *PetSetSpec) Size() (n int) { - var l int - _ = l - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) - } - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Template != nil { - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.VolumeClaimTemplates) > 0 { - for _, e := range m.VolumeClaimTemplates { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.ServiceName != nil { - l = len(*m.ServiceName) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *PetSetStatus) Size() (n int) { - var l int - _ = l - if m.ObservedGeneration != nil { - n += 1 + sovGenerated(uint64(*m.ObservedGeneration)) - } - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *PetSet) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PetSet: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PetSet: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_api_v1.ObjectMeta{} - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Spec == nil { - m.Spec = &PetSetSpec{} - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Status == nil { - m.Status = &PetSetStatus{} - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PetSetList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PetSetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PetSetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_api_unversioned.ListMeta{} - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, &PetSet{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PetSetSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PetSetSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PetSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Replicas = &v - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &k8s_io_kubernetes_pkg_api_unversioned.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Template == nil { - m.Template = &k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec{} - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeClaimTemplates", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, &k8s_io_kubernetes_pkg_api_v1.PersistentVolumeClaim{}) - if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.ServiceName = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PetSetStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PetSetStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PetSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ObservedGeneration = &v - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Replicas = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("github.com/ericchiang/k8s/apis/apps/v1alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 479 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x93, 0xcb, 0x8e, 0xd3, 0x30, - 0x14, 0x86, 0x49, 0x3b, 0x1d, 0x05, 0x97, 0x95, 0xc5, 0x22, 0xca, 0xa2, 0xaa, 0xba, 0xca, 0x82, - 0xb1, 0x69, 0xb9, 0x68, 0xd6, 0x80, 0xb8, 0x08, 0x18, 0x2a, 0x17, 0xb1, 0x98, 0x9d, 0x9b, 0x1c, - 0x15, 0xd3, 0x24, 0xb6, 0xec, 0x93, 0x3c, 0x0b, 0x5b, 0xde, 0x86, 0xe5, 0x3c, 0x02, 0x2a, 0x1b, - 0x1e, 0x03, 0x25, 0x69, 0x3b, 0xbd, 0xce, 0x65, 0x99, 0xf8, 0x7c, 0x7f, 0xfe, 0xf3, 0xff, 0x0e, - 0x39, 0x9f, 0x9f, 0x3b, 0xa6, 0x34, 0x9f, 0x17, 0x53, 0xb0, 0x39, 0x20, 0x38, 0x6e, 0xe6, 0x33, - 0x2e, 0x8d, 0x72, 0x5c, 0x1a, 0xe3, 0x78, 0x39, 0x94, 0xa9, 0xf9, 0x2e, 0x87, 0x7c, 0x06, 0x39, - 0x58, 0x89, 0x90, 0x30, 0x63, 0x35, 0x6a, 0x1a, 0x35, 0x24, 0xbb, 0x26, 0x99, 0x99, 0xcf, 0x58, - 0x45, 0xb2, 0x8a, 0x64, 0x2b, 0x32, 0x1c, 0x1d, 0xfd, 0x06, 0xb7, 0xe0, 0x74, 0x61, 0x63, 0xd8, - 0x55, 0x0f, 0x5f, 0x1c, 0x67, 0x8a, 0xbc, 0x04, 0xeb, 0x94, 0xce, 0x21, 0xd9, 0xc3, 0x9e, 0x1c, - 0xc7, 0xca, 0xbd, 0x15, 0xc2, 0xb3, 0xc3, 0xd3, 0xb6, 0xc8, 0x51, 0x65, 0xfb, 0x9e, 0x86, 0x87, - 0xc7, 0x0b, 0x54, 0x29, 0x57, 0x39, 0x3a, 0xb4, 0xbb, 0xc8, 0xe0, 0x9f, 0x47, 0x4e, 0xc7, 0x80, - 0x13, 0x40, 0xfa, 0x86, 0xf8, 0x19, 0xa0, 0x4c, 0x24, 0xca, 0xc0, 0xeb, 0x7b, 0x51, 0x77, 0x14, - 0xb1, 0xa3, 0x11, 0xb2, 0x72, 0xc8, 0xbe, 0x4c, 0x7f, 0x40, 0x8c, 0x9f, 0x01, 0xa5, 0x58, 0x93, - 0xf4, 0x3d, 0x39, 0x71, 0x06, 0xe2, 0xa0, 0x55, 0x2b, 0x3c, 0x67, 0x77, 0x2d, 0x81, 0x35, 0x2e, - 0x26, 0x06, 0x62, 0x51, 0x2b, 0xd0, 0x0b, 0x72, 0xea, 0x50, 0x62, 0xe1, 0x82, 0x76, 0xad, 0xf5, - 0xf2, 0xde, 0x5a, 0x35, 0x2d, 0x96, 0x2a, 0x83, 0x5f, 0x1e, 0x21, 0xcd, 0xc1, 0x27, 0xe5, 0x90, - 0x7e, 0xdc, 0x5b, 0x97, 0xdf, 0xb0, 0xee, 0x46, 0xa7, 0xac, 0xc2, 0x77, 0xb6, 0x7e, 0x4b, 0x3a, - 0x0a, 0x21, 0x73, 0x41, 0xab, 0xdf, 0x8e, 0xba, 0xa3, 0xa7, 0xf7, 0xb5, 0x2a, 0x1a, 0x7c, 0x70, - 0xd5, 0x5a, 0x79, 0xac, 0x82, 0xa0, 0x21, 0xf1, 0x2d, 0x98, 0x54, 0xc5, 0xd2, 0xd5, 0x1e, 0x3b, - 0x62, 0xfd, 0x4c, 0xc7, 0xc4, 0x77, 0x90, 0x42, 0x8c, 0xda, 0xde, 0x1e, 0xf6, 0xb6, 0x7f, 0x39, - 0x85, 0x74, 0xb2, 0x64, 0xc5, 0x5a, 0x85, 0x7e, 0x20, 0x3e, 0x42, 0x66, 0x52, 0x89, 0xb0, 0x8c, - 0xfc, 0xec, 0xe6, 0x0b, 0x30, 0xd6, 0xc9, 0xd7, 0x25, 0x50, 0xf7, 0xb6, 0xc6, 0xe9, 0x8c, 0x3c, - 0x2e, 0x75, 0x5a, 0x64, 0xf0, 0x3a, 0x95, 0x2a, 0x5b, 0x0d, 0xb9, 0xe0, 0xa4, 0x8e, 0xe7, 0xd9, - 0x2d, 0xb2, 0x95, 0x53, 0x87, 0x90, 0xe3, 0xb7, 0x6b, 0x0d, 0x71, 0x50, 0x90, 0xf6, 0x49, 0xd7, - 0x81, 0x2d, 0x55, 0x0c, 0x17, 0x32, 0x83, 0xa0, 0xd3, 0xf7, 0xa2, 0x87, 0x62, 0xf3, 0xd5, 0xe0, - 0x92, 0x3c, 0xda, 0xbc, 0x0e, 0x94, 0x11, 0xaa, 0xa7, 0xd5, 0x00, 0x24, 0xef, 0x9a, 0x9f, 0x41, - 0xe9, 0xbc, 0x4e, 0xb7, 0x2d, 0x0e, 0x9c, 0x6c, 0x75, 0xd0, 0xda, 0xee, 0xe0, 0x55, 0xf8, 0x7b, - 0xd1, 0xf3, 0xae, 0x16, 0x3d, 0xef, 0xcf, 0xa2, 0xe7, 0xfd, 0xfc, 0xdb, 0x7b, 0x70, 0xe9, 0xaf, - 0x8a, 0xfd, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x4b, 0x91, 0x03, 0xfc, 0xb9, 0x04, 0x00, 0x00, -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/apps/v1beta1/generated.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/apps/v1beta1/generated.pb.go index 650fbdc6..a9bbd1ea 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/apps/v1beta1/generated.pb.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/apps/v1beta1/generated.pb.go @@ -1,14 +1,15 @@ -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/apps/v1beta1/generated.proto -// DO NOT EDIT! +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/apps/v1beta1/generated.proto /* Package v1beta1 is a generated protocol buffer package. It is generated from these files: - k8s.io/kubernetes/pkg/apis/apps/v1beta1/generated.proto + k8s.io/api/apps/v1beta1/generated.proto It has these top-level messages: + ControllerRevision + ControllerRevisionList Deployment DeploymentCondition DeploymentList @@ -18,25 +19,28 @@ DeploymentStrategy RollbackConfig RollingUpdateDeployment + RollingUpdateStatefulSetStrategy Scale ScaleSpec ScaleStatus StatefulSet + StatefulSetCondition StatefulSetList StatefulSetSpec StatefulSetStatus + StatefulSetUpdateStrategy */ package v1beta1 import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import k8s_io_kubernetes_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" -import _ "github.com/ericchiang/k8s/runtime" +import k8s_io_api_core_v1 "github.com/ericchiang/k8s/apis/core/v1" +import _ "github.com/ericchiang/k8s/apis/policy/v1beta1" +import k8s_io_apimachinery_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" +import k8s_io_apimachinery_pkg_runtime "github.com/ericchiang/k8s/runtime" import _ "github.com/ericchiang/k8s/runtime/schema" -import k8s_io_kubernetes_pkg_util_intstr "github.com/ericchiang/k8s/util/intstr" -import k8s_io_kubernetes_pkg_api_v1 "github.com/ericchiang/k8s/api/v1" -import _ "github.com/ericchiang/k8s/apis/extensions/v1beta1" +import k8s_io_apimachinery_pkg_util_intstr "github.com/ericchiang/k8s/util/intstr" import io "io" @@ -51,11 +55,91 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +// DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1beta2/ControllerRevision. See the +// release notes for more information. +// ControllerRevision implements an immutable snapshot of state data. Clients +// are responsible for serializing and deserializing the objects that contain +// their internal state. +// Once a ControllerRevision has been successfully created, it can not be updated. +// The API Server will fail validation of all requests that attempt to mutate +// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both +// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, +// it may be subject to name and representation changes in future releases, and clients should not +// depend on its stability. It is primarily for internal use by controllers. +type ControllerRevision struct { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + // Data is the serialized representation of the state. + Data *k8s_io_apimachinery_pkg_runtime.RawExtension `protobuf:"bytes,2,opt,name=data" json:"data,omitempty"` + // Revision indicates the revision of the state represented by Data. + Revision *int64 `protobuf:"varint,3,opt,name=revision" json:"revision,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } +func (m *ControllerRevision) String() string { return proto.CompactTextString(m) } +func (*ControllerRevision) ProtoMessage() {} +func (*ControllerRevision) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *ControllerRevision) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *ControllerRevision) GetData() *k8s_io_apimachinery_pkg_runtime.RawExtension { + if m != nil { + return m.Data + } + return nil +} + +func (m *ControllerRevision) GetRevision() int64 { + if m != nil && m.Revision != nil { + return *m.Revision + } + return 0 +} + +// ControllerRevisionList is a resource containing a list of ControllerRevision objects. +type ControllerRevisionList struct { + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + // Items is the list of ControllerRevisions + Items []*ControllerRevision `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } +func (m *ControllerRevisionList) String() string { return proto.CompactTextString(m) } +func (*ControllerRevisionList) ProtoMessage() {} +func (*ControllerRevisionList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *ControllerRevisionList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *ControllerRevisionList) GetItems() []*ControllerRevision { + if m != nil { + return m.Items + } + return nil +} + +// DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for +// more information. // Deployment enables declarative updates for Pods and ReplicaSets. type Deployment struct { // Standard object metadata. // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Specification of the desired behavior of the Deployment. // +optional Spec *DeploymentSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` @@ -68,9 +152,9 @@ type Deployment struct { func (m *Deployment) Reset() { *m = Deployment{} } func (m *Deployment) String() string { return proto.CompactTextString(m) } func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } -func (m *Deployment) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *Deployment) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -98,9 +182,9 @@ type DeploymentCondition struct { // Status of the condition, one of True, False, Unknown. Status *string `protobuf:"bytes,2,opt,name=status" json:"status,omitempty"` // The last time this condition was updated. - LastUpdateTime *k8s_io_kubernetes_pkg_apis_meta_v1.Time `protobuf:"bytes,6,opt,name=lastUpdateTime" json:"lastUpdateTime,omitempty"` + LastUpdateTime *k8s_io_apimachinery_pkg_apis_meta_v1.Time `protobuf:"bytes,6,opt,name=lastUpdateTime" json:"lastUpdateTime,omitempty"` // Last time the condition transitioned from one status to another. - LastTransitionTime *k8s_io_kubernetes_pkg_apis_meta_v1.Time `protobuf:"bytes,7,opt,name=lastTransitionTime" json:"lastTransitionTime,omitempty"` + LastTransitionTime *k8s_io_apimachinery_pkg_apis_meta_v1.Time `protobuf:"bytes,7,opt,name=lastTransitionTime" json:"lastTransitionTime,omitempty"` // The reason for the condition's last transition. Reason *string `protobuf:"bytes,4,opt,name=reason" json:"reason,omitempty"` // A human readable message indicating details about the transition. @@ -111,7 +195,7 @@ type DeploymentCondition struct { func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } func (m *DeploymentCondition) String() string { return proto.CompactTextString(m) } func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } func (m *DeploymentCondition) GetType() string { if m != nil && m.Type != nil { @@ -127,14 +211,14 @@ func (m *DeploymentCondition) GetStatus() string { return "" } -func (m *DeploymentCondition) GetLastUpdateTime() *k8s_io_kubernetes_pkg_apis_meta_v1.Time { +func (m *DeploymentCondition) GetLastUpdateTime() *k8s_io_apimachinery_pkg_apis_meta_v1.Time { if m != nil { return m.LastUpdateTime } return nil } -func (m *DeploymentCondition) GetLastTransitionTime() *k8s_io_kubernetes_pkg_apis_meta_v1.Time { +func (m *DeploymentCondition) GetLastTransitionTime() *k8s_io_apimachinery_pkg_apis_meta_v1.Time { if m != nil { return m.LastTransitionTime } @@ -159,7 +243,7 @@ func (m *DeploymentCondition) GetMessage() string { type DeploymentList struct { // Standard list metadata. // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Items is the list of Deployments. Items []*Deployment `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -168,9 +252,9 @@ type DeploymentList struct { func (m *DeploymentList) Reset() { *m = DeploymentList{} } func (m *DeploymentList) String() string { return proto.CompactTextString(m) } func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } -func (m *DeploymentList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *DeploymentList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -184,6 +268,7 @@ func (m *DeploymentList) GetItems() []*Deployment { return nil } +// DEPRECATED. // DeploymentRollback stores the information required to rollback a deployment. type DeploymentRollback struct { // Required: This must match the Name of a deployment. @@ -199,7 +284,7 @@ type DeploymentRollback struct { func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } func (m *DeploymentRollback) String() string { return proto.CompactTextString(m) } func (*DeploymentRollback) ProtoMessage() {} -func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } func (m *DeploymentRollback) GetName() string { if m != nil && m.Name != nil { @@ -231,9 +316,9 @@ type DeploymentSpec struct { // Label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. // +optional - Selector *k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector `protobuf:"bytes,2,opt,name=selector" json:"selector,omitempty"` + Selector *k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector `protobuf:"bytes,2,opt,name=selector" json:"selector,omitempty"` // Template describes the pods that will be created. - Template *k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec `protobuf:"bytes,3,opt,name=template" json:"template,omitempty"` + Template *k8s_io_api_core_v1.PodTemplateSpec `protobuf:"bytes,3,opt,name=template" json:"template,omitempty"` // The deployment strategy to use to replace existing pods with new ones. // +optional Strategy *DeploymentStrategy `protobuf:"bytes,4,opt,name=strategy" json:"strategy,omitempty"` @@ -250,16 +335,16 @@ type DeploymentSpec struct { // Indicates that the deployment is paused. // +optional Paused *bool `protobuf:"varint,7,opt,name=paused" json:"paused,omitempty"` + // DEPRECATED. // The config this deployment is rolling back to. Will be cleared after rollback is done. // +optional RollbackTo *RollbackConfig `protobuf:"bytes,8,opt,name=rollbackTo" json:"rollbackTo,omitempty"` // The maximum time in seconds for a deployment to make progress before it // is considered to be failed. The deployment controller will continue to // process failed deployments and a condition with a ProgressDeadlineExceeded - // reason will be surfaced in the deployment status. Once autoRollback is - // implemented, the deployment controller will automatically rollback failed - // deployments. Note that progress will not be estimated during the time a - // deployment is paused. Defaults to 600s. + // reason will be surfaced in the deployment status. Note that progress will + // not be estimated during the time a deployment is paused. Defaults to 600s. + // +optional ProgressDeadlineSeconds *int32 `protobuf:"varint,9,opt,name=progressDeadlineSeconds" json:"progressDeadlineSeconds,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -267,7 +352,7 @@ type DeploymentSpec struct { func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } func (m *DeploymentSpec) String() string { return proto.CompactTextString(m) } func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } func (m *DeploymentSpec) GetReplicas() int32 { if m != nil && m.Replicas != nil { @@ -276,14 +361,14 @@ func (m *DeploymentSpec) GetReplicas() int32 { return 0 } -func (m *DeploymentSpec) GetSelector() *k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector { +func (m *DeploymentSpec) GetSelector() *k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector { if m != nil { return m.Selector } return nil } -func (m *DeploymentSpec) GetTemplate() *k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec { +func (m *DeploymentSpec) GetTemplate() *k8s_io_api_core_v1.PodTemplateSpec { if m != nil { return m.Template } @@ -349,18 +434,27 @@ type DeploymentStatus struct { // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. // +optional AvailableReplicas *int32 `protobuf:"varint,4,opt,name=availableReplicas" json:"availableReplicas,omitempty"` - // Total number of unavailable pods targeted by this deployment. + // Total number of unavailable pods targeted by this deployment. This is the total number of + // pods that are still required for the deployment to have 100% available capacity. They may + // either be pods that are running but not yet available or pods that still have not been created. // +optional UnavailableReplicas *int32 `protobuf:"varint,5,opt,name=unavailableReplicas" json:"unavailableReplicas,omitempty"` // Represents the latest available observations of a deployment's current state. - Conditions []*DeploymentCondition `protobuf:"bytes,6,rep,name=conditions" json:"conditions,omitempty"` - XXX_unrecognized []byte `json:"-"` + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []*DeploymentCondition `protobuf:"bytes,6,rep,name=conditions" json:"conditions,omitempty"` + // Count of hash collisions for the Deployment. The Deployment controller uses this + // field as a collision avoidance mechanism when it needs to create the name for the + // newest ReplicaSet. + // +optional + CollisionCount *int32 `protobuf:"varint,8,opt,name=collisionCount" json:"collisionCount,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } func (m *DeploymentStatus) String() string { return proto.CompactTextString(m) } func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } func (m *DeploymentStatus) GetObservedGeneration() int64 { if m != nil && m.ObservedGeneration != nil { @@ -411,6 +505,13 @@ func (m *DeploymentStatus) GetConditions() []*DeploymentCondition { return nil } +func (m *DeploymentStatus) GetCollisionCount() int32 { + if m != nil && m.CollisionCount != nil { + return *m.CollisionCount + } + return 0 +} + // DeploymentStrategy describes how to replace existing pods with new ones. type DeploymentStrategy struct { // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. @@ -429,7 +530,7 @@ type DeploymentStrategy struct { func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } func (m *DeploymentStrategy) String() string { return proto.CompactTextString(m) } func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func (m *DeploymentStrategy) GetType() string { if m != nil && m.Type != nil { @@ -445,8 +546,9 @@ func (m *DeploymentStrategy) GetRollingUpdate() *RollingUpdateDeployment { return nil } +// DEPRECATED. type RollbackConfig struct { - // The revision to rollback to. If set to 0, rollbck to the last revision. + // The revision to rollback to. If set to 0, rollback to the last revision. // +optional Revision *int64 `protobuf:"varint,1,opt,name=revision" json:"revision,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -455,7 +557,7 @@ type RollbackConfig struct { func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } func (m *RollbackConfig) String() string { return proto.CompactTextString(m) } func (*RollbackConfig) ProtoMessage() {} -func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *RollbackConfig) GetRevision() int64 { if m != nil && m.Revision != nil { @@ -477,7 +579,7 @@ type RollingUpdateDeployment struct { // that the total number of pods available at all times during the update is at // least 70% of desired pods. // +optional - MaxUnavailable *k8s_io_kubernetes_pkg_util_intstr.IntOrString `protobuf:"bytes,1,opt,name=maxUnavailable" json:"maxUnavailable,omitempty"` + MaxUnavailable *k8s_io_apimachinery_pkg_util_intstr.IntOrString `protobuf:"bytes,1,opt,name=maxUnavailable" json:"maxUnavailable,omitempty"` // The maximum number of pods that can be scheduled above the desired number of // pods. // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). @@ -490,38 +592,62 @@ type RollingUpdateDeployment struct { // new RC can be scaled up further, ensuring that total number of pods running // at any time during the update is atmost 130% of desired pods. // +optional - MaxSurge *k8s_io_kubernetes_pkg_util_intstr.IntOrString `protobuf:"bytes,2,opt,name=maxSurge" json:"maxSurge,omitempty"` - XXX_unrecognized []byte `json:"-"` + MaxSurge *k8s_io_apimachinery_pkg_util_intstr.IntOrString `protobuf:"bytes,2,opt,name=maxSurge" json:"maxSurge,omitempty"` + XXX_unrecognized []byte `json:"-"` } -func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } -func (m *RollingUpdateDeployment) String() string { return proto.CompactTextString(m) } -func (*RollingUpdateDeployment) ProtoMessage() {} -func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } +func (m *RollingUpdateDeployment) String() string { return proto.CompactTextString(m) } +func (*RollingUpdateDeployment) ProtoMessage() {} +func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{10} +} -func (m *RollingUpdateDeployment) GetMaxUnavailable() *k8s_io_kubernetes_pkg_util_intstr.IntOrString { +func (m *RollingUpdateDeployment) GetMaxUnavailable() *k8s_io_apimachinery_pkg_util_intstr.IntOrString { if m != nil { return m.MaxUnavailable } return nil } -func (m *RollingUpdateDeployment) GetMaxSurge() *k8s_io_kubernetes_pkg_util_intstr.IntOrString { +func (m *RollingUpdateDeployment) GetMaxSurge() *k8s_io_apimachinery_pkg_util_intstr.IntOrString { if m != nil { return m.MaxSurge } return nil } +// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. +type RollingUpdateStatefulSetStrategy struct { + // Partition indicates the ordinal at which the StatefulSet should be + // partitioned. + Partition *int32 `protobuf:"varint,1,opt,name=partition" json:"partition,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } +func (m *RollingUpdateStatefulSetStrategy) String() string { return proto.CompactTextString(m) } +func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} +func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{11} +} + +func (m *RollingUpdateStatefulSetStrategy) GetPartition() int32 { + if m != nil && m.Partition != nil { + return *m.Partition + } + return 0 +} + // Scale represents a scaling request for a resource. type Scale struct { - // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` - // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. // +optional Spec *ScaleSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` - // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. // +optional Status *ScaleStatus `protobuf:"bytes,3,opt,name=status" json:"status,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -530,9 +656,9 @@ type Scale struct { func (m *Scale) Reset() { *m = Scale{} } func (m *Scale) String() string { return proto.CompactTextString(m) } func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } -func (m *Scale) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *Scale) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -564,7 +690,7 @@ type ScaleSpec struct { func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } func (m *ScaleSpec) String() string { return proto.CompactTextString(m) } func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } func (m *ScaleSpec) GetReplicas() int32 { if m != nil && m.Replicas != nil { @@ -585,7 +711,7 @@ type ScaleStatus struct { // avoid introspection in the clients. The string will be in the same format as the // query-param syntax. If the target type only supports map-based selectors, both this // field and map-based selector field are populated. - // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional TargetSelector *string `protobuf:"bytes,3,opt,name=targetSelector" json:"targetSelector,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -594,7 +720,7 @@ type ScaleStatus struct { func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (m *ScaleStatus) String() string { return proto.CompactTextString(m) } func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } func (m *ScaleStatus) GetReplicas() int32 { if m != nil && m.Replicas != nil { @@ -617,6 +743,8 @@ func (m *ScaleStatus) GetTargetSelector() string { return "" } +// DEPRECATED - This group version of StatefulSet is deprecated by apps/v1beta2/StatefulSet. See the release notes for +// more information. // StatefulSet represents a set of pods with consistent identities. // Identities are defined as: // - Network: A single stable DNS and hostname. @@ -625,7 +753,7 @@ func (m *ScaleStatus) GetTargetSelector() string { // map to the same storage identity. type StatefulSet struct { // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Spec defines the desired identities of pods in this set. // +optional Spec *StatefulSetSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` @@ -639,9 +767,9 @@ type StatefulSet struct { func (m *StatefulSet) Reset() { *m = StatefulSet{} } func (m *StatefulSet) String() string { return proto.CompactTextString(m) } func (*StatefulSet) ProtoMessage() {} -func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } -func (m *StatefulSet) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *StatefulSet) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -662,20 +790,78 @@ func (m *StatefulSet) GetStatus() *StatefulSetStatus { return nil } +// StatefulSetCondition describes the state of a statefulset at a certain point. +type StatefulSetCondition struct { + // Type of statefulset condition. + Type *string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + // Status of the condition, one of True, False, Unknown. + Status *string `protobuf:"bytes,2,opt,name=status" json:"status,omitempty"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime *k8s_io_apimachinery_pkg_apis_meta_v1.Time `protobuf:"bytes,3,opt,name=lastTransitionTime" json:"lastTransitionTime,omitempty"` + // The reason for the condition's last transition. + // +optional + Reason *string `protobuf:"bytes,4,opt,name=reason" json:"reason,omitempty"` + // A human readable message indicating details about the transition. + // +optional + Message *string `protobuf:"bytes,5,opt,name=message" json:"message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } +func (m *StatefulSetCondition) String() string { return proto.CompactTextString(m) } +func (*StatefulSetCondition) ProtoMessage() {} +func (*StatefulSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } + +func (m *StatefulSetCondition) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +func (m *StatefulSetCondition) GetStatus() string { + if m != nil && m.Status != nil { + return *m.Status + } + return "" +} + +func (m *StatefulSetCondition) GetLastTransitionTime() *k8s_io_apimachinery_pkg_apis_meta_v1.Time { + if m != nil { + return m.LastTransitionTime + } + return nil +} + +func (m *StatefulSetCondition) GetReason() string { + if m != nil && m.Reason != nil { + return *m.Reason + } + return "" +} + +func (m *StatefulSetCondition) GetMessage() string { + if m != nil && m.Message != nil { + return *m.Message + } + return "" +} + // StatefulSetList is a collection of StatefulSets. type StatefulSetList struct { // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` - Items []*StatefulSet `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` - XXX_unrecognized []byte `json:"-"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Items []*StatefulSet `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } func (m *StatefulSetList) String() string { return proto.CompactTextString(m) } func (*StatefulSetList) ProtoMessage() {} -func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } -func (m *StatefulSetList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *StatefulSetList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -691,24 +877,24 @@ func (m *StatefulSetList) GetItems() []*StatefulSet { // A StatefulSetSpec is the specification of a StatefulSet. type StatefulSetSpec struct { - // Replicas is the desired number of replicas of the given Template. + // replicas is the desired number of replicas of the given Template. // These are replicas in the sense that they are instantiations of the // same Template, but individual replicas also have a consistent identity. // If unspecified, defaults to 1. // TODO: Consider a rename of this field. // +optional Replicas *int32 `protobuf:"varint,1,opt,name=replicas" json:"replicas,omitempty"` - // Selector is a label query over pods that should match the replica count. + // selector is a label query over pods that should match the replica count. // If empty, defaulted to labels on the pod template. - // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional - Selector *k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector `protobuf:"bytes,2,opt,name=selector" json:"selector,omitempty"` - // Template is the object that describes the pod that will be created if + Selector *k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector `protobuf:"bytes,2,opt,name=selector" json:"selector,omitempty"` + // template is the object that describes the pod that will be created if // insufficient replicas are detected. Each pod stamped out by the StatefulSet // will fulfill this Template, but have a unique identity from the rest // of the StatefulSet. - Template *k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec `protobuf:"bytes,3,opt,name=template" json:"template,omitempty"` - // VolumeClaimTemplates is a list of claims that pods are allowed to reference. + Template *k8s_io_api_core_v1.PodTemplateSpec `protobuf:"bytes,3,opt,name=template" json:"template,omitempty"` + // volumeClaimTemplates is a list of claims that pods are allowed to reference. // The StatefulSet controller is responsible for mapping network identities to // claims in a way that maintains the identity of a pod. Every claim in // this list must have at least one matching (by name) volumeMount in one @@ -716,20 +902,39 @@ type StatefulSetSpec struct { // any volumes in the template, with the same name. // TODO: Define the behavior if a claim already exists with the same name. // +optional - VolumeClaimTemplates []*k8s_io_kubernetes_pkg_api_v1.PersistentVolumeClaim `protobuf:"bytes,4,rep,name=volumeClaimTemplates" json:"volumeClaimTemplates,omitempty"` - // ServiceName is the name of the service that governs this StatefulSet. + VolumeClaimTemplates []*k8s_io_api_core_v1.PersistentVolumeClaim `protobuf:"bytes,4,rep,name=volumeClaimTemplates" json:"volumeClaimTemplates,omitempty"` + // serviceName is the name of the service that governs this StatefulSet. // This service must exist before the StatefulSet, and is responsible for // the network identity of the set. Pods get DNS/hostnames that follow the // pattern: pod-specific-string.serviceName.default.svc.cluster.local // where "pod-specific-string" is managed by the StatefulSet controller. - ServiceName *string `protobuf:"bytes,5,opt,name=serviceName" json:"serviceName,omitempty"` - XXX_unrecognized []byte `json:"-"` + ServiceName *string `protobuf:"bytes,5,opt,name=serviceName" json:"serviceName,omitempty"` + // podManagementPolicy controls how pods are created during initial scale up, + // when replacing pods on nodes, or when scaling down. The default policy is + // `OrderedReady`, where pods are created in increasing order (pod-0, then + // pod-1, etc) and the controller will wait until each pod is ready before + // continuing. When scaling down, the pods are removed in the opposite order. + // The alternative policy is `Parallel` which will create pods in parallel + // to match the desired scale without waiting, and on scale down will delete + // all pods at once. + // +optional + PodManagementPolicy *string `protobuf:"bytes,6,opt,name=podManagementPolicy" json:"podManagementPolicy,omitempty"` + // updateStrategy indicates the StatefulSetUpdateStrategy that will be + // employed to update Pods in the StatefulSet when a revision is made to + // Template. + UpdateStrategy *StatefulSetUpdateStrategy `protobuf:"bytes,7,opt,name=updateStrategy" json:"updateStrategy,omitempty"` + // revisionHistoryLimit is the maximum number of revisions that will + // be maintained in the StatefulSet's revision history. The revision history + // consists of all revisions not represented by a currently applied + // StatefulSetSpec version. The default value is 10. + RevisionHistoryLimit *int32 `protobuf:"varint,8,opt,name=revisionHistoryLimit" json:"revisionHistoryLimit,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } func (m *StatefulSetSpec) String() string { return proto.CompactTextString(m) } func (*StatefulSetSpec) ProtoMessage() {} -func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func (m *StatefulSetSpec) GetReplicas() int32 { if m != nil && m.Replicas != nil { @@ -738,21 +943,21 @@ func (m *StatefulSetSpec) GetReplicas() int32 { return 0 } -func (m *StatefulSetSpec) GetSelector() *k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector { +func (m *StatefulSetSpec) GetSelector() *k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector { if m != nil { return m.Selector } return nil } -func (m *StatefulSetSpec) GetTemplate() *k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec { +func (m *StatefulSetSpec) GetTemplate() *k8s_io_api_core_v1.PodTemplateSpec { if m != nil { return m.Template } return nil } -func (m *StatefulSetSpec) GetVolumeClaimTemplates() []*k8s_io_kubernetes_pkg_api_v1.PersistentVolumeClaim { +func (m *StatefulSetSpec) GetVolumeClaimTemplates() []*k8s_io_api_core_v1.PersistentVolumeClaim { if m != nil { return m.VolumeClaimTemplates } @@ -766,20 +971,66 @@ func (m *StatefulSetSpec) GetServiceName() string { return "" } +func (m *StatefulSetSpec) GetPodManagementPolicy() string { + if m != nil && m.PodManagementPolicy != nil { + return *m.PodManagementPolicy + } + return "" +} + +func (m *StatefulSetSpec) GetUpdateStrategy() *StatefulSetUpdateStrategy { + if m != nil { + return m.UpdateStrategy + } + return nil +} + +func (m *StatefulSetSpec) GetRevisionHistoryLimit() int32 { + if m != nil && m.RevisionHistoryLimit != nil { + return *m.RevisionHistoryLimit + } + return 0 +} + // StatefulSetStatus represents the current state of a StatefulSet. type StatefulSetStatus struct { - // most recent generation observed by this StatefulSet. + // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the + // StatefulSet's generation, which is updated on mutation by the API Server. // +optional ObservedGeneration *int64 `protobuf:"varint,1,opt,name=observedGeneration" json:"observedGeneration,omitempty"` - // Replicas is the number of actual replicas. - Replicas *int32 `protobuf:"varint,2,opt,name=replicas" json:"replicas,omitempty"` - XXX_unrecognized []byte `json:"-"` + // replicas is the number of Pods created by the StatefulSet controller. + Replicas *int32 `protobuf:"varint,2,opt,name=replicas" json:"replicas,omitempty"` + // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. + ReadyReplicas *int32 `protobuf:"varint,3,opt,name=readyReplicas" json:"readyReplicas,omitempty"` + // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by currentRevision. + CurrentReplicas *int32 `protobuf:"varint,4,opt,name=currentReplicas" json:"currentReplicas,omitempty"` + // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by updateRevision. + UpdatedReplicas *int32 `protobuf:"varint,5,opt,name=updatedReplicas" json:"updatedReplicas,omitempty"` + // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the + // sequence [0,currentReplicas). + CurrentRevision *string `protobuf:"bytes,6,opt,name=currentRevision" json:"currentRevision,omitempty"` + // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence + // [replicas-updatedReplicas,replicas) + UpdateRevision *string `protobuf:"bytes,7,opt,name=updateRevision" json:"updateRevision,omitempty"` + // collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller + // uses this field as a collision avoidance mechanism when it needs to create the name for the + // newest ControllerRevision. + // +optional + CollisionCount *int32 `protobuf:"varint,9,opt,name=collisionCount" json:"collisionCount,omitempty"` + // Represents the latest available observations of a statefulset's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []*StatefulSetCondition `protobuf:"bytes,10,rep,name=conditions" json:"conditions,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } func (m *StatefulSetStatus) String() string { return proto.CompactTextString(m) } func (*StatefulSetStatus) ProtoMessage() {} -func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } func (m *StatefulSetStatus) GetObservedGeneration() int64 { if m != nil && m.ObservedGeneration != nil { @@ -795,76 +1046,111 @@ func (m *StatefulSetStatus) GetReplicas() int32 { return 0 } -func init() { - proto.RegisterType((*Deployment)(nil), "github.com/ericchiang.k8s.apis.apps.v1beta1.Deployment") - proto.RegisterType((*DeploymentCondition)(nil), "github.com/ericchiang.k8s.apis.apps.v1beta1.DeploymentCondition") - proto.RegisterType((*DeploymentList)(nil), "github.com/ericchiang.k8s.apis.apps.v1beta1.DeploymentList") - proto.RegisterType((*DeploymentRollback)(nil), "github.com/ericchiang.k8s.apis.apps.v1beta1.DeploymentRollback") - proto.RegisterType((*DeploymentSpec)(nil), "github.com/ericchiang.k8s.apis.apps.v1beta1.DeploymentSpec") - proto.RegisterType((*DeploymentStatus)(nil), "github.com/ericchiang.k8s.apis.apps.v1beta1.DeploymentStatus") - proto.RegisterType((*DeploymentStrategy)(nil), "github.com/ericchiang.k8s.apis.apps.v1beta1.DeploymentStrategy") - proto.RegisterType((*RollbackConfig)(nil), "github.com/ericchiang.k8s.apis.apps.v1beta1.RollbackConfig") - proto.RegisterType((*RollingUpdateDeployment)(nil), "github.com/ericchiang.k8s.apis.apps.v1beta1.RollingUpdateDeployment") - proto.RegisterType((*Scale)(nil), "github.com/ericchiang.k8s.apis.apps.v1beta1.Scale") - proto.RegisterType((*ScaleSpec)(nil), "github.com/ericchiang.k8s.apis.apps.v1beta1.ScaleSpec") - proto.RegisterType((*ScaleStatus)(nil), "github.com/ericchiang.k8s.apis.apps.v1beta1.ScaleStatus") - proto.RegisterType((*StatefulSet)(nil), "github.com/ericchiang.k8s.apis.apps.v1beta1.StatefulSet") - proto.RegisterType((*StatefulSetList)(nil), "github.com/ericchiang.k8s.apis.apps.v1beta1.StatefulSetList") - proto.RegisterType((*StatefulSetSpec)(nil), "github.com/ericchiang.k8s.apis.apps.v1beta1.StatefulSetSpec") - proto.RegisterType((*StatefulSetStatus)(nil), "github.com/ericchiang.k8s.apis.apps.v1beta1.StatefulSetStatus") +func (m *StatefulSetStatus) GetReadyReplicas() int32 { + if m != nil && m.ReadyReplicas != nil { + return *m.ReadyReplicas + } + return 0 } -func (m *Deployment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err + +func (m *StatefulSetStatus) GetCurrentReplicas() int32 { + if m != nil && m.CurrentReplicas != nil { + return *m.CurrentReplicas } - return dAtA[:n], nil + return 0 } -func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Metadata != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n1, err := m.Metadata.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 +func (m *StatefulSetStatus) GetUpdatedReplicas() int32 { + if m != nil && m.UpdatedReplicas != nil { + return *m.UpdatedReplicas } - if m.Spec != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 + return 0 +} + +func (m *StatefulSetStatus) GetCurrentRevision() string { + if m != nil && m.CurrentRevision != nil { + return *m.CurrentRevision } - if m.Status != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 + return "" +} + +func (m *StatefulSetStatus) GetUpdateRevision() string { + if m != nil && m.UpdateRevision != nil { + return *m.UpdateRevision } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) + return "" +} + +func (m *StatefulSetStatus) GetCollisionCount() int32 { + if m != nil && m.CollisionCount != nil { + return *m.CollisionCount } - return i, nil + return 0 } -func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { +func (m *StatefulSetStatus) GetConditions() []*StatefulSetCondition { + if m != nil { + return m.Conditions + } + return nil +} + +// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet +// controller will use to perform updates. It includes any additional parameters +// necessary to perform the update for the indicated strategy. +type StatefulSetUpdateStrategy struct { + // Type indicates the type of the StatefulSetUpdateStrategy. + Type *string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType. + RollingUpdate *RollingUpdateStatefulSetStrategy `protobuf:"bytes,2,opt,name=rollingUpdate" json:"rollingUpdate,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } +func (m *StatefulSetUpdateStrategy) String() string { return proto.CompactTextString(m) } +func (*StatefulSetUpdateStrategy) ProtoMessage() {} +func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{20} +} + +func (m *StatefulSetUpdateStrategy) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +func (m *StatefulSetUpdateStrategy) GetRollingUpdate() *RollingUpdateStatefulSetStrategy { + if m != nil { + return m.RollingUpdate + } + return nil +} + +func init() { + proto.RegisterType((*ControllerRevision)(nil), "k8s.io.api.apps.v1beta1.ControllerRevision") + proto.RegisterType((*ControllerRevisionList)(nil), "k8s.io.api.apps.v1beta1.ControllerRevisionList") + proto.RegisterType((*Deployment)(nil), "k8s.io.api.apps.v1beta1.Deployment") + proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.api.apps.v1beta1.DeploymentCondition") + proto.RegisterType((*DeploymentList)(nil), "k8s.io.api.apps.v1beta1.DeploymentList") + proto.RegisterType((*DeploymentRollback)(nil), "k8s.io.api.apps.v1beta1.DeploymentRollback") + proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.apps.v1beta1.DeploymentSpec") + proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.apps.v1beta1.DeploymentStatus") + proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.apps.v1beta1.DeploymentStrategy") + proto.RegisterType((*RollbackConfig)(nil), "k8s.io.api.apps.v1beta1.RollbackConfig") + proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.apps.v1beta1.RollingUpdateDeployment") + proto.RegisterType((*RollingUpdateStatefulSetStrategy)(nil), "k8s.io.api.apps.v1beta1.RollingUpdateStatefulSetStrategy") + proto.RegisterType((*Scale)(nil), "k8s.io.api.apps.v1beta1.Scale") + proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.apps.v1beta1.ScaleSpec") + proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.apps.v1beta1.ScaleStatus") + proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1beta1.StatefulSet") + proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1beta1.StatefulSetCondition") + proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1beta1.StatefulSetList") + proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1beta1.StatefulSetSpec") + proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1beta1.StatefulSetStatus") + proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1beta1.StatefulSetUpdateStrategy") +} +func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -874,54 +1160,35 @@ func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { +func (m *ControllerRevision) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - if m.Type != nil { + if m.Metadata != nil { dAtA[i] = 0xa i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type))) - i += copy(dAtA[i:], *m.Type) - } - if m.Status != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Status))) - i += copy(dAtA[i:], *m.Status) - } - if m.Reason != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason))) - i += copy(dAtA[i:], *m.Reason) - } - if m.Message != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Message))) - i += copy(dAtA[i:], *m.Message) - } - if m.LastUpdateTime != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n4, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) + n1, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n4 + i += n1 } - if m.LastTransitionTime != nil { - dAtA[i] = 0x3a + if m.Data != nil { + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n5, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Data.Size())) + n2, err := m.Data.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n5 + i += n2 + } + if m.Revision != nil { + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Revision)) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -929,7 +1196,7 @@ func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *DeploymentList) Marshal() (dAtA []byte, err error) { +func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -939,7 +1206,7 @@ func (m *DeploymentList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { +func (m *ControllerRevisionList) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int @@ -948,11 +1215,11 @@ func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n6, err := m.Metadata.MarshalTo(dAtA[i:]) + n3, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n3 } if len(m.Items) > 0 { for _, msg := range m.Items { @@ -972,7 +1239,166 @@ func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *DeploymentRollback) Marshal() (dAtA []byte, err error) { +func (m *Deployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Metadata != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) + n4, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + if m.Spec != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n5, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + if m.Status != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n6, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Type != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type))) + i += copy(dAtA[i:], *m.Type) + } + if m.Status != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Status))) + i += copy(dAtA[i:], *m.Status) + } + if m.Reason != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason))) + i += copy(dAtA[i:], *m.Reason) + } + if m.Message != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Message))) + i += copy(dAtA[i:], *m.Message) + } + if m.LastUpdateTime != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) + n7, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.LastTransitionTime != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n8, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *DeploymentList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Metadata != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) + n9, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *DeploymentRollback) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -1014,11 +1440,11 @@ func (m *DeploymentRollback) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) - n7, err := m.RollbackTo.MarshalTo(dAtA[i:]) + n10, err := m.RollbackTo.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n10 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -1050,31 +1476,31 @@ func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n8, err := m.Selector.MarshalTo(dAtA[i:]) + n11, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n8 + i += n11 } if m.Template != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n9, err := m.Template.MarshalTo(dAtA[i:]) + n12, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n12 } if m.Strategy != nil { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) - n10, err := m.Strategy.MarshalTo(dAtA[i:]) + n13, err := m.Strategy.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n13 } if m.MinReadySeconds != nil { dAtA[i] = 0x28 @@ -1100,11 +1526,11 @@ func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x42 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) - n11, err := m.RollbackTo.MarshalTo(dAtA[i:]) + n14, err := m.RollbackTo.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n11 + i += n14 } if m.ProgressDeadlineSeconds != nil { dAtA[i] = 0x48 @@ -1174,6 +1600,11 @@ func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.ReadyReplicas)) } + if m.CollisionCount != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -1205,11 +1636,11 @@ func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n12, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + n15, err := m.RollingUpdate.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n12 + i += n15 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -1262,21 +1693,47 @@ func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n13, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + n16, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n13 + i += n16 } if m.MaxSurge != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) - n14, err := m.MaxSurge.MarshalTo(dAtA[i:]) + n17, err := m.MaxSurge.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n14 + i += n17 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollingUpdateStatefulSetStrategy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Partition != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition)) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -1303,31 +1760,31 @@ func (m *Scale) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n15, err := m.Metadata.MarshalTo(dAtA[i:]) + n18, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n15 + i += n18 } if m.Spec != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n16, err := m.Spec.MarshalTo(dAtA[i:]) + n19, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n16 + i += n19 } if m.Status != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n17, err := m.Status.MarshalTo(dAtA[i:]) + n20, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n17 + i += n20 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -1429,31 +1886,31 @@ func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n18, err := m.Metadata.MarshalTo(dAtA[i:]) + n21, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n18 + i += n21 } if m.Spec != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n19, err := m.Spec.MarshalTo(dAtA[i:]) + n22, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n19 + i += n22 } if m.Status != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n20, err := m.Status.MarshalTo(dAtA[i:]) + n23, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n20 + i += n23 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -1461,7 +1918,7 @@ func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { +func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -1471,32 +1928,44 @@ func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) { +func (m *StatefulSetCondition) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - if m.Metadata != nil { + if m.Type != nil { dAtA[i] = 0xa i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n21, err := m.Metadata.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type))) + i += copy(dAtA[i:], *m.Type) + } + if m.Status != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Status))) + i += copy(dAtA[i:], *m.Status) + } + if m.LastTransitionTime != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n24, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n21 + i += n24 } - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + if m.Reason != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason))) + i += copy(dAtA[i:], *m.Reason) + } + if m.Message != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Message))) + i += copy(dAtA[i:], *m.Message) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -1504,7 +1973,50 @@ func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { +func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Metadata != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) + n25, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + } + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -1528,21 +2040,21 @@ func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n22, err := m.Selector.MarshalTo(dAtA[i:]) + n26, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n22 + i += n26 } if m.Template != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n23, err := m.Template.MarshalTo(dAtA[i:]) + n27, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n23 + i += n27 } if len(m.VolumeClaimTemplates) > 0 { for _, msg := range m.VolumeClaimTemplates { @@ -1562,6 +2074,27 @@ func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ServiceName))) i += copy(dAtA[i:], *m.ServiceName) } + if m.PodManagementPolicy != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PodManagementPolicy))) + i += copy(dAtA[i:], *m.PodManagementPolicy) + } + if m.UpdateStrategy != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) + n28, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + } + if m.RevisionHistoryLimit != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -1593,30 +2126,93 @@ func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) } + if m.ReadyReplicas != nil { + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.ReadyReplicas)) + } + if m.CurrentReplicas != nil { + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentReplicas)) + } + if m.UpdatedReplicas != nil { + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.UpdatedReplicas)) + } + if m.CurrentRevision != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CurrentRevision))) + i += copy(dAtA[i:], *m.CurrentRevision) + } + if m.UpdateRevision != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UpdateRevision))) + i += copy(dAtA[i:], *m.UpdateRevision) + } + if m.CollisionCount != nil { + dAtA[i] = 0x48 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + } + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 +func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Type != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type))) + i += copy(dAtA[i:], *m.Type) + } + if m.RollingUpdate != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) + n29, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil } + func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -1626,6 +2222,45 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return offset + 1 } +func (m *ControllerRevision) Size() (n int) { + var l int + _ = l + if m.Metadata != nil { + l = m.Metadata.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Data != nil { + l = m.Data.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Revision != nil { + n += 1 + sovGenerated(uint64(*m.Revision)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ControllerRevisionList) Size() (n int) { + var l int + _ = l + if m.Metadata != nil { + l = m.Metadata.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *Deployment) Size() (n int) { var l int _ = l @@ -1791,6 +2426,9 @@ func (m *DeploymentStatus) Size() (n int) { if m.ReadyReplicas != nil { n += 1 + sovGenerated(uint64(*m.ReadyReplicas)) } + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -1843,6 +2481,18 @@ func (m *RollingUpdateDeployment) Size() (n int) { return n } +func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { + var l int + _ = l + if m.Partition != nil { + n += 1 + sovGenerated(uint64(*m.Partition)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *Scale) Size() (n int) { var l int _ = l @@ -1921,6 +2571,35 @@ func (m *StatefulSet) Size() (n int) { return n } +func (m *StatefulSetCondition) Size() (n int) { + var l int + _ = l + if m.Type != nil { + l = len(*m.Type) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Status != nil { + l = len(*m.Status) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LastTransitionTime != nil { + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Reason != nil { + l = len(*m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Message != nil { + l = len(*m.Message) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *StatefulSetList) Size() (n int) { var l int _ = l @@ -1964,6 +2643,17 @@ func (m *StatefulSetSpec) Size() (n int) { l = len(*m.ServiceName) n += 1 + l + sovGenerated(uint64(l)) } + if m.PodManagementPolicy != nil { + l = len(*m.PodManagementPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.UpdateStrategy != nil { + l = m.UpdateStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -1979,6 +2669,49 @@ func (m *StatefulSetStatus) Size() (n int) { if m.Replicas != nil { n += 1 + sovGenerated(uint64(*m.Replicas)) } + if m.ReadyReplicas != nil { + n += 1 + sovGenerated(uint64(*m.ReadyReplicas)) + } + if m.CurrentReplicas != nil { + n += 1 + sovGenerated(uint64(*m.CurrentReplicas)) + } + if m.UpdatedReplicas != nil { + n += 1 + sovGenerated(uint64(*m.UpdatedReplicas)) + } + if m.CurrentRevision != nil { + l = len(*m.CurrentRevision) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.UpdateRevision != nil { + l = len(*m.UpdateRevision) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StatefulSetUpdateStrategy) Size() (n int) { + var l int + _ = l + if m.Type != nil { + l = len(*m.Type) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -1998,7 +2731,7 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (m *Deployment) Unmarshal(dAtA []byte) error { +func (m *ControllerRevision) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2021,10 +2754,10 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Deployment: wiretype end group for non-group") + return fmt.Errorf("proto: ControllerRevision: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ControllerRevision: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -2054,7 +2787,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2062,7 +2795,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2086,18 +2819,18 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Spec == nil { - m.Spec = &DeploymentSpec{} + if m.Data == nil { + m.Data = &k8s_io_apimachinery_pkg_runtime.RawExtension{} } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) } - var msglen int + var v int64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2107,15 +2840,267 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen + m.Revision = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ControllerRevisionList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ControllerRevisionList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, &ControllerRevision{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Deployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Deployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &DeploymentSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } @@ -2324,7 +3309,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.LastUpdateTime == nil { - m.LastUpdateTime = &k8s_io_kubernetes_pkg_apis_meta_v1.Time{} + m.LastUpdateTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2357,7 +3342,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.LastTransitionTime == nil { - m.LastTransitionTime = &k8s_io_kubernetes_pkg_apis_meta_v1.Time{} + m.LastTransitionTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2441,7 +3426,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2585,51 +3570,14 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.UpdatedAnnotations == nil { m.UpdatedAnnotations = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2639,41 +3587,80 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.UpdatedAnnotations[mapkey] = mapvalue - } else { - var mapvalue string - m.UpdatedAnnotations[mapkey] = mapvalue } + m.UpdatedAnnotations[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -2806,7 +3793,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2839,7 +3826,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Template == nil { - m.Template = &k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec{} + m.Template = &k8s_io_api_core_v1.PodTemplateSpec{} } if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3194,6 +4181,26 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } } m.ReadyReplicas = &v + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3457,7 +4464,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_kubernetes_pkg_util_intstr.IntOrString{} + m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3490,7 +4497,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.MaxSurge == nil { - m.MaxSurge = &k8s_io_kubernetes_pkg_util_intstr.IntOrString{} + m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} } if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3518,6 +4525,77 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } return nil } +func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateStatefulSetStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateStatefulSetStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Partition = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *Scale) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -3574,7 +4652,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3814,51 +4892,14 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Selector == nil { m.Selector = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3868,41 +4909,80 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Selector[mapkey] = mapvalue - } else { - var mapvalue string - m.Selector[mapkey] = mapvalue } + m.Selector[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -4012,7 +5092,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4106,7 +5186,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } return nil } -func (m *StatefulSetList) Unmarshal(dAtA []byte) error { +func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4129,17 +5209,17 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StatefulSetList: wiretype end group for non-group") + return fmt.Errorf("proto: StatefulSetCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StatefulSetList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StatefulSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4149,28 +5229,55 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} + s := string(dAtA[iNdEx:postIndex]) + m.Type = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF } + s := string(dAtA[iNdEx:postIndex]) + m.Status = &s iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4194,19 +5301,477 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, &StatefulSet{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.LastTransitionTime == nil { + m.LastTransitionTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } - if skippy < 0 { - return ErrInvalidLengthGenerated + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Reason = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Message = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, &StatefulSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Template == nil { + m.Template = &k8s_io_api_core_v1.PodTemplateSpec{} + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeClaimTemplates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, &k8s_io_api_core_v1.PersistentVolumeClaim{}) + if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ServiceName = &s + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodManagementPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.PodManagementPolicy = &s + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UpdateStrategy == nil { + m.UpdateStrategy = &StatefulSetUpdateStrategy{} + } + if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF @@ -4221,7 +5786,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } return nil } -func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { +func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4244,13 +5809,33 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StatefulSetSpec: wiretype end group for non-group") + return fmt.Errorf("proto: StatefulSetStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StatefulSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StatefulSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ObservedGeneration = &v + case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) } @@ -4270,11 +5855,11 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } } m.Replicas = &v - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) } - var msglen int + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4284,30 +5869,57 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated + m.ReadyReplicas = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if m.Selector == nil { - m.Selector = &k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector{} + m.CurrentReplicas = &v + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex - case 3: + m.UpdatedReplicas = &v + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CurrentRevision", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4317,30 +5929,27 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Template == nil { - m.Template = &k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec{} - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.CurrentRevision = &s iNdEx = postIndex - case 4: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeClaimTemplates", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UpdateRevision", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4350,28 +5959,47 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, &k8s_io_kubernetes_pkg_api_v1.PersistentVolumeClaim{}) - if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.UpdateRevision = &s iNdEx = postIndex - case 5: + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4381,21 +6009,22 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.ServiceName = &s + m.Conditions = append(m.Conditions, &StatefulSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -4419,7 +6048,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { +func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4442,17 +6071,17 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StatefulSetStatus: wiretype end group for non-group") + return fmt.Errorf("proto: StatefulSetUpdateStrategy: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StatefulSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StatefulSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - var v int64 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4462,17 +6091,27 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - m.ObservedGeneration = &v + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Type = &s + iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) } - var v int32 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4482,12 +6121,25 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - m.Replicas = &v + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateStatefulSetStrategy{} + } + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -4615,83 +6267,97 @@ var ( ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) -func init() { - proto.RegisterFile("github.com/ericchiang/k8s/apis/apps/v1beta1/generated.proto", fileDescriptorGenerated) -} +func init() { proto.RegisterFile("k8s.io/api/apps/v1beta1/generated.proto", fileDescriptorGenerated) } var fileDescriptorGenerated = []byte{ - // 1158 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x57, 0xcf, 0x6f, 0x1b, 0xc5, - 0x17, 0xff, 0xae, 0x13, 0x37, 0xce, 0x8b, 0x9a, 0xb4, 0xd3, 0xe8, 0x1b, 0x93, 0x43, 0x54, 0xad, - 0x10, 0xcd, 0x21, 0x5d, 0x13, 0x13, 0xa9, 0xa1, 0x05, 0x09, 0x9a, 0x16, 0x9a, 0x92, 0xd2, 0xb2, - 0x4e, 0x5a, 0x84, 0x10, 0x68, 0x6c, 0xbf, 0x2c, 0x83, 0x77, 0x67, 0x57, 0x33, 0x63, 0x2b, 0x3e, - 0xf3, 0x0f, 0x70, 0xe4, 0xc6, 0x09, 0xc1, 0x3f, 0xc1, 0x9d, 0x13, 0x42, 0xfc, 0x05, 0x28, 0x9c, - 0x90, 0x10, 0x67, 0x24, 0x2e, 0x68, 0x66, 0x7f, 0xd8, 0xbb, 0xfe, 0x51, 0xc7, 0xca, 0x85, 0xdb, - 0xce, 0xcc, 0x7b, 0x9f, 0x79, 0xf3, 0xde, 0xe7, 0xfd, 0x58, 0xb8, 0xd3, 0xd9, 0x97, 0x0e, 0x0b, - 0x6b, 0x9d, 0x6e, 0x13, 0x05, 0x47, 0x85, 0xb2, 0x16, 0x75, 0xbc, 0x1a, 0x8d, 0x98, 0xac, 0xd1, - 0x28, 0x92, 0xb5, 0xde, 0x6e, 0x13, 0x15, 0xdd, 0xad, 0x79, 0xc8, 0x51, 0x50, 0x85, 0x6d, 0x27, - 0x12, 0xa1, 0x0a, 0xc9, 0xad, 0x58, 0xd1, 0x19, 0x28, 0x3a, 0x51, 0xc7, 0x73, 0xb4, 0xa2, 0xa3, - 0x15, 0x9d, 0x44, 0x71, 0xb3, 0x3e, 0xe5, 0x86, 0x00, 0x15, 0xad, 0xf5, 0x46, 0xc0, 0x37, 0x6f, - 0x8f, 0xd7, 0x11, 0x5d, 0xae, 0x58, 0x80, 0x23, 0xe2, 0x7b, 0xd3, 0xc5, 0x65, 0xeb, 0x0b, 0x0c, - 0xe8, 0x88, 0xd6, 0xee, 0x78, 0xad, 0xae, 0x62, 0x7e, 0x8d, 0x71, 0x25, 0x95, 0x18, 0x51, 0xd9, - 0x99, 0xf8, 0x96, 0x71, 0xaf, 0x78, 0x7b, 0xca, 0xcb, 0xf1, 0x4c, 0x21, 0x97, 0x2c, 0xe4, 0x13, - 0x3d, 0x6c, 0xff, 0x6d, 0x01, 0x3c, 0xc0, 0xc8, 0x0f, 0xfb, 0x01, 0x72, 0x45, 0x1e, 0x43, 0x45, - 0xbb, 0xab, 0x4d, 0x15, 0xad, 0x5a, 0x37, 0xad, 0xed, 0x95, 0xba, 0xe3, 0x4c, 0x89, 0x81, 0x96, - 0x75, 0x7a, 0xbb, 0xce, 0xd3, 0xe6, 0x97, 0xd8, 0x52, 0x4f, 0x50, 0x51, 0x37, 0xd3, 0x27, 0x1f, - 0xc0, 0xa2, 0x8c, 0xb0, 0x55, 0x2d, 0x19, 0x9c, 0x3b, 0xce, 0x8c, 0xb1, 0x74, 0x06, 0xe6, 0x34, - 0x22, 0x6c, 0xb9, 0x06, 0x84, 0x7c, 0x04, 0x57, 0xa4, 0xa2, 0xaa, 0x2b, 0xab, 0x0b, 0x06, 0xee, - 0xcd, 0x79, 0xe0, 0x0c, 0x80, 0x9b, 0x00, 0xd9, 0xdf, 0x96, 0xe0, 0xc6, 0xe0, 0xf0, 0x20, 0xe4, - 0x6d, 0xa6, 0x58, 0xc8, 0x09, 0x81, 0x45, 0xd5, 0x8f, 0xd0, 0xbc, 0x7f, 0xd9, 0x35, 0xdf, 0xe4, - 0xff, 0xd9, 0xf5, 0x25, 0xb3, 0x9b, 0xac, 0xf4, 0xbe, 0x40, 0x2a, 0x43, 0x5e, 0x5d, 0x8c, 0xf7, - 0xe3, 0x15, 0xa9, 0xc2, 0x52, 0x80, 0x52, 0x52, 0x0f, 0xab, 0x65, 0x73, 0x90, 0x2e, 0xc9, 0x33, - 0x58, 0xf5, 0xa9, 0x54, 0x27, 0x51, 0x9b, 0x2a, 0x3c, 0x66, 0x01, 0x56, 0xaf, 0x98, 0x07, 0x6d, - 0xcf, 0xe2, 0x67, 0x2d, 0xef, 0x16, 0xf4, 0xc9, 0xc7, 0x40, 0xf4, 0xce, 0xb1, 0xa0, 0x5c, 0x9a, - 0x17, 0x18, 0xd4, 0xa5, 0x0b, 0xa2, 0x8e, 0xc1, 0xb0, 0xbf, 0xb3, 0x60, 0x75, 0xe0, 0xa1, 0x23, - 0x26, 0x15, 0x79, 0x34, 0x42, 0x90, 0x9d, 0x59, 0xae, 0xd0, 0xba, 0x05, 0x7a, 0x1c, 0x42, 0x99, - 0x29, 0x0c, 0xb4, 0x47, 0x17, 0xb6, 0x57, 0xea, 0x6f, 0xcc, 0x11, 0x50, 0x37, 0x46, 0xb0, 0x7f, - 0x2e, 0x01, 0x19, 0xda, 0x0d, 0x7d, 0xbf, 0x49, 0x5b, 0x1d, 0x1d, 0x48, 0x4e, 0x83, 0x2c, 0x90, - 0xfa, 0x9b, 0x7c, 0x65, 0x01, 0xe9, 0x1a, 0xdf, 0xb5, 0xdf, 0xe5, 0x3c, 0x54, 0x54, 0x3f, 0x36, - 0xb5, 0xa1, 0x31, 0x8f, 0x0d, 0xc9, 0x6d, 0xce, 0xc9, 0x08, 0xea, 0x43, 0xae, 0x44, 0xdf, 0x1d, - 0x73, 0x1d, 0x79, 0x01, 0x20, 0x12, 0xbd, 0xe3, 0x30, 0x61, 0xf4, 0xec, 0x09, 0x92, 0x5e, 0x79, - 0x10, 0xf2, 0x53, 0xe6, 0xb9, 0x43, 0x50, 0x9b, 0x0f, 0x61, 0x63, 0x82, 0x1d, 0xe4, 0x1a, 0x2c, - 0x74, 0xb0, 0x9f, 0x38, 0x43, 0x7f, 0x92, 0x75, 0x28, 0xf7, 0xa8, 0xdf, 0xc5, 0x84, 0xd3, 0xf1, - 0xe2, 0x6e, 0x69, 0xdf, 0xb2, 0x7f, 0x58, 0x1c, 0x0e, 0xbc, 0x4e, 0x43, 0xb2, 0x09, 0x15, 0x81, - 0x91, 0xcf, 0x5a, 0x54, 0x1a, 0x8c, 0xb2, 0x9b, 0xad, 0xc9, 0x13, 0xa8, 0x48, 0xf4, 0xb1, 0xa5, - 0x42, 0x91, 0x64, 0xfb, 0xee, 0x4c, 0xa4, 0xa0, 0x4d, 0xf4, 0x1b, 0x89, 0xa2, 0x9b, 0x41, 0x90, - 0x43, 0xa8, 0x28, 0x0c, 0x22, 0x9f, 0x2a, 0x4c, 0x7c, 0x73, 0x7b, 0x32, 0x9c, 0x06, 0x7a, 0x16, - 0xb6, 0x8f, 0x13, 0x05, 0x53, 0x32, 0x32, 0x75, 0xf2, 0x02, 0x2a, 0x52, 0xe9, 0x7a, 0xe7, 0xf5, - 0x4d, 0x86, 0xae, 0xd4, 0xef, 0xcd, 0x55, 0x38, 0x62, 0x08, 0x37, 0x03, 0x23, 0xdb, 0xb0, 0x16, - 0x30, 0xee, 0x22, 0x6d, 0xf7, 0x1b, 0xd8, 0x0a, 0x79, 0x5b, 0x9a, 0x44, 0x2f, 0xbb, 0xc5, 0x6d, - 0x52, 0x87, 0x75, 0x81, 0x3d, 0xa6, 0xcb, 0xf0, 0x23, 0x26, 0x55, 0x28, 0xfa, 0x47, 0x2c, 0x60, - 0xca, 0xa4, 0x7d, 0xd9, 0x1d, 0x7b, 0xa6, 0xcb, 0x4a, 0x44, 0xbb, 0x12, 0xdb, 0x26, 0x8d, 0x2b, - 0x6e, 0xb2, 0x2a, 0xf0, 0xa6, 0x72, 0x69, 0xbc, 0x21, 0xfb, 0xb0, 0x11, 0x89, 0xd0, 0x13, 0x28, - 0xe5, 0x03, 0xa4, 0x6d, 0x9f, 0x71, 0x4c, 0x9f, 0xb5, 0x6c, 0xec, 0x9c, 0x74, 0x6c, 0xff, 0x59, - 0x82, 0x6b, 0xc5, 0x12, 0x4b, 0x1c, 0x20, 0x61, 0x53, 0xa2, 0xe8, 0x61, 0xfb, 0xfd, 0xb8, 0xe1, - 0xb0, 0x90, 0x1b, 0xda, 0x2c, 0xb8, 0x63, 0x4e, 0x72, 0xe4, 0x2a, 0x15, 0xc8, 0xb5, 0x0d, 0x6b, - 0x49, 0x06, 0xb9, 0xa9, 0xc8, 0x42, 0xec, 0xe9, 0xc2, 0x36, 0xd9, 0x81, 0xeb, 0xb4, 0x47, 0x99, - 0x4f, 0x9b, 0x3e, 0x66, 0xb2, 0x8b, 0x46, 0x76, 0xf4, 0x80, 0xbc, 0x0e, 0x37, 0xba, 0x7c, 0x54, - 0x3e, 0x8e, 0xe2, 0xb8, 0x23, 0xf2, 0x29, 0x40, 0x2b, 0xed, 0x12, 0xb2, 0x7a, 0xc5, 0x94, 0x8c, - 0xb7, 0xe6, 0xa0, 0x53, 0xd6, 0x6a, 0xdc, 0x21, 0x3c, 0xf2, 0x2a, 0x5c, 0x15, 0x9a, 0x37, 0x99, - 0x25, 0x4b, 0xc6, 0x92, 0xfc, 0xa6, 0xfd, 0xb5, 0x35, 0x5c, 0xea, 0x52, 0x62, 0x8e, 0xed, 0x59, - 0xa7, 0x70, 0x55, 0x47, 0x98, 0x71, 0x2f, 0x2e, 0x09, 0x49, 0x6a, 0xbe, 0x73, 0x21, 0xbe, 0x64, - 0xda, 0x43, 0x15, 0x2f, 0x0f, 0x6b, 0xef, 0xc0, 0x6a, 0x9e, 0x59, 0x71, 0x38, 0x63, 0x5a, 0x27, - 0x41, 0xcf, 0xd6, 0xf6, 0x8f, 0x16, 0x6c, 0x4c, 0x00, 0x26, 0xcf, 0x61, 0x35, 0xa0, 0x67, 0x27, - 0x03, 0xd7, 0xbf, 0x64, 0x06, 0xd1, 0x53, 0x94, 0x13, 0x4f, 0x51, 0xce, 0x21, 0x57, 0x4f, 0x45, - 0x43, 0x09, 0xc6, 0x3d, 0xb7, 0x80, 0x62, 0xa6, 0x1a, 0x7a, 0xd6, 0xe8, 0x0a, 0x2f, 0x75, 0xc2, - 0x45, 0x11, 0x33, 0x7d, 0xfb, 0x0f, 0x0b, 0xca, 0x8d, 0x16, 0x4d, 0x50, 0x2f, 0x6b, 0x56, 0x7a, - 0x2f, 0x37, 0x2b, 0xd5, 0x67, 0x0e, 0x91, 0xb1, 0x64, 0x68, 0x4c, 0x3a, 0x2a, 0x8c, 0x49, 0x7b, - 0x17, 0x44, 0xca, 0x4f, 0x48, 0xb7, 0x60, 0x39, 0xbb, 0x60, 0x5a, 0x03, 0xb0, 0xff, 0xb2, 0x60, - 0x65, 0x08, 0x60, 0x6a, 0xb3, 0xf8, 0x2c, 0xd7, 0x2c, 0x74, 0x0e, 0xdd, 0x9f, 0xc7, 0x48, 0x27, - 0x6d, 0x1c, 0x71, 0x97, 0x1d, 0x74, 0x8f, 0xd7, 0x60, 0x55, 0x51, 0xe1, 0xa1, 0x4a, 0x05, 0x8c, - 0x2b, 0x96, 0xdd, 0xc2, 0xee, 0xe6, 0x3d, 0xb8, 0x9a, 0x83, 0xb8, 0x50, 0x83, 0xfc, 0x47, 0x3f, - 0x58, 0x51, 0x85, 0xa7, 0x5d, 0xbf, 0x81, 0x97, 0x3b, 0x37, 0x1f, 0xe5, 0xb8, 0xb0, 0x3f, 0xbb, - 0x73, 0x06, 0xf6, 0x0c, 0x31, 0xc2, 0x2d, 0x30, 0xe2, 0xee, 0x5c, 0x78, 0x79, 0x5e, 0x7c, 0x6f, - 0xc1, 0xda, 0xd0, 0xe9, 0x25, 0x0f, 0x86, 0x8f, 0xf3, 0x83, 0xe1, 0xde, 0x3c, 0x06, 0xa7, 0x93, - 0xe1, 0xaf, 0xa5, 0x9c, 0xa5, 0xff, 0xe1, 0x49, 0xc6, 0x83, 0xf5, 0x5e, 0xe8, 0x77, 0x03, 0x3c, - 0xf0, 0x29, 0x0b, 0x52, 0x21, 0xdd, 0xdf, 0x5e, 0x32, 0x3d, 0x1b, 0x58, 0x14, 0x92, 0x49, 0x85, - 0x5c, 0x3d, 0x1f, 0x60, 0xb8, 0x63, 0x01, 0xc9, 0x4d, 0x58, 0xd1, 0xfd, 0x99, 0xb5, 0xf0, 0x43, - 0x3d, 0x3c, 0xc7, 0xbf, 0x2f, 0xc3, 0x5b, 0xf6, 0xe7, 0x70, 0x7d, 0x84, 0x1b, 0x97, 0xd9, 0xf2, - 0xef, 0xbf, 0xf2, 0xd3, 0xf9, 0x96, 0xf5, 0xcb, 0xf9, 0x96, 0xf5, 0xdb, 0xf9, 0x96, 0xf5, 0xcd, - 0xef, 0x5b, 0xff, 0xfb, 0x64, 0x29, 0x89, 0xf1, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x68, 0x83, - 0x57, 0x14, 0x4b, 0x10, 0x00, 0x00, + // 1419 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0x4b, 0x6f, 0x14, 0xc7, + 0x16, 0xbe, 0x3d, 0x0f, 0x3c, 0x73, 0x2c, 0xc6, 0x50, 0x58, 0x78, 0xb0, 0xae, 0x2c, 0xab, 0x2f, + 0x82, 0x81, 0x0b, 0x3d, 0x60, 0x22, 0xc4, 0x4b, 0x4a, 0xc0, 0x20, 0x92, 0xc8, 0x3c, 0x54, 0x63, + 0xa3, 0x08, 0x29, 0x8a, 0xca, 0x3d, 0xc5, 0x50, 0x71, 0xbf, 0x54, 0x55, 0x33, 0x61, 0x76, 0x28, + 0xca, 0x0f, 0xc8, 0x2e, 0xc9, 0x36, 0x59, 0x64, 0x95, 0x1f, 0xc1, 0x2e, 0x52, 0x36, 0xac, 0x23, + 0x25, 0x8a, 0xc8, 0x1f, 0x89, 0xaa, 0xfa, 0x31, 0xdd, 0x3d, 0xdd, 0xa6, 0x41, 0xb3, 0xc9, 0x6e, + 0xfa, 0xd4, 0xf9, 0xbe, 0xaa, 0x53, 0xe7, 0x9c, 0x3a, 0x9f, 0x0d, 0x67, 0x0f, 0xae, 0x09, 0x8b, + 0xf9, 0x7d, 0x12, 0xb0, 0x3e, 0x09, 0x02, 0xd1, 0x9f, 0x5c, 0xde, 0xa7, 0x92, 0x5c, 0xee, 0x8f, + 0xa8, 0x47, 0x39, 0x91, 0x74, 0x68, 0x05, 0xdc, 0x97, 0x3e, 0x5a, 0x0b, 0x1d, 0x2d, 0x12, 0x30, + 0x4b, 0x39, 0x5a, 0x91, 0xe3, 0xba, 0x99, 0x62, 0xb0, 0x7d, 0x4e, 0xfb, 0x93, 0x39, 0xf0, 0xfa, + 0xb9, 0x94, 0x4f, 0xe0, 0x3b, 0xcc, 0x9e, 0x96, 0xed, 0xb3, 0xfe, 0xc1, 0xcc, 0xd5, 0x25, 0xf6, + 0x73, 0xe6, 0x51, 0x3e, 0xed, 0x07, 0x07, 0x23, 0x65, 0x10, 0x7d, 0x97, 0x4a, 0x52, 0xb4, 0x41, + 0xbf, 0x0c, 0xc5, 0xc7, 0x9e, 0x64, 0x2e, 0x9d, 0x03, 0x5c, 0x7d, 0x1b, 0x40, 0xd8, 0xcf, 0xa9, + 0x4b, 0xe6, 0x70, 0x57, 0xca, 0x70, 0x63, 0xc9, 0x9c, 0x3e, 0xf3, 0xa4, 0x90, 0x3c, 0x0f, 0x32, + 0x5f, 0x19, 0x80, 0xb6, 0x7d, 0x4f, 0x72, 0xdf, 0x71, 0x28, 0xc7, 0x74, 0xc2, 0x04, 0xf3, 0x3d, + 0xb4, 0x03, 0x2d, 0x15, 0xcf, 0x90, 0x48, 0xd2, 0x35, 0x36, 0x8d, 0xde, 0xf2, 0xd6, 0x25, 0x6b, + 0x76, 0xcb, 0x09, 0xbd, 0x15, 0x1c, 0x8c, 0x94, 0x41, 0x58, 0xca, 0xdb, 0x9a, 0x5c, 0xb6, 0x1e, + 0xed, 0x7f, 0x49, 0x6d, 0xf9, 0x80, 0x4a, 0x82, 0x13, 0x06, 0x74, 0x1b, 0x1a, 0x9a, 0xa9, 0xa6, + 0x99, 0x2e, 0x96, 0x32, 0x45, 0x01, 0x5a, 0x98, 0x7c, 0x75, 0xef, 0x85, 0xa4, 0x9e, 0x3a, 0x0a, + 0xd6, 0x50, 0xb4, 0x0e, 0x2d, 0x1e, 0x1d, 0xae, 0x5b, 0xdf, 0x34, 0x7a, 0x75, 0x9c, 0x7c, 0x9b, + 0x3f, 0x1b, 0x70, 0x72, 0x3e, 0x86, 0x1d, 0x26, 0x24, 0xfa, 0x74, 0x2e, 0x0e, 0xab, 0x5a, 0x1c, + 0x0a, 0x3d, 0x17, 0x45, 0x93, 0x49, 0xea, 0x8a, 0x6e, 0x6d, 0xb3, 0xde, 0x5b, 0xde, 0xfa, 0xbf, + 0x55, 0x52, 0x76, 0xd6, 0xfc, 0x59, 0x70, 0x88, 0x34, 0x7f, 0x37, 0x00, 0xee, 0xd2, 0xc0, 0xf1, + 0xa7, 0x2e, 0xf5, 0xe4, 0x82, 0x6f, 0xf9, 0x26, 0x34, 0x44, 0x40, 0xed, 0xe8, 0x96, 0xcf, 0x96, + 0x1e, 0x6f, 0x76, 0x80, 0x41, 0x40, 0x6d, 0xac, 0x41, 0xe8, 0x36, 0x1c, 0x11, 0x92, 0xc8, 0xb1, + 0xd0, 0xb7, 0xbb, 0xbc, 0x75, 0xae, 0x0a, 0x5c, 0x03, 0x70, 0x04, 0x34, 0x7f, 0xaa, 0xc1, 0x89, + 0xd9, 0xe2, 0xb6, 0xef, 0x0d, 0x99, 0x54, 0xb5, 0x84, 0xa0, 0x21, 0xa7, 0x01, 0xd5, 0x11, 0xb6, + 0xb1, 0xfe, 0x8d, 0x4e, 0x26, 0xdb, 0xd5, 0xb4, 0x35, 0xfa, 0x52, 0x76, 0x4e, 0x89, 0xf0, 0xbd, + 0x6e, 0x23, 0xb4, 0x87, 0x5f, 0xa8, 0x0b, 0x4b, 0x2e, 0x15, 0x82, 0x8c, 0x68, 0xb7, 0xa9, 0x17, + 0xe2, 0x4f, 0x84, 0xa1, 0xe3, 0x10, 0x21, 0xf7, 0x82, 0x21, 0x91, 0x74, 0x97, 0xb9, 0xb4, 0x7b, + 0x44, 0x07, 0x70, 0xbe, 0xda, 0x4d, 0x2a, 0x04, 0xce, 0x31, 0xa0, 0xa7, 0x80, 0x94, 0x65, 0x97, + 0x13, 0x4f, 0xe8, 0x18, 0x34, 0xef, 0xd2, 0x3b, 0xf3, 0x16, 0xb0, 0x98, 0xdf, 0x19, 0xd0, 0x99, + 0xdd, 0xd2, 0xc2, 0x8b, 0xf4, 0x7a, 0xb6, 0x48, 0xff, 0x57, 0x21, 0x8d, 0x71, 0x71, 0xfe, 0x52, + 0x03, 0x94, 0xb2, 0xfa, 0x8e, 0xb3, 0x4f, 0xec, 0x03, 0x95, 0x3e, 0x8f, 0xb8, 0x49, 0xfa, 0xd4, + 0x6f, 0x24, 0x00, 0x8d, 0xf5, 0x75, 0x0d, 0x6f, 0x7b, 0x9e, 0x2f, 0x89, 0x8a, 0x2e, 0xde, 0x72, + 0xbb, 0xca, 0x96, 0x11, 0xb9, 0xb5, 0x37, 0xc7, 0x72, 0xcf, 0x93, 0x7c, 0x8a, 0x0b, 0xe8, 0xd1, + 0x7d, 0x00, 0x1e, 0xe1, 0x76, 0xfd, 0xa8, 0x4c, 0xcb, 0xab, 0x3c, 0xde, 0x62, 0xdb, 0xf7, 0x9e, + 0xb1, 0x11, 0x4e, 0x41, 0xd7, 0xef, 0xc1, 0x5a, 0xc9, 0xbe, 0xe8, 0x18, 0xd4, 0x0f, 0xe8, 0x34, + 0x8a, 0x55, 0xfd, 0x44, 0xab, 0xd0, 0x9c, 0x10, 0x67, 0x4c, 0xa3, 0x42, 0x0d, 0x3f, 0x6e, 0xd4, + 0xae, 0x19, 0xe6, 0xcb, 0x46, 0x3a, 0x93, 0xaa, 0x97, 0xc2, 0x57, 0x2a, 0x70, 0x98, 0x4d, 0x84, + 0xe6, 0x68, 0xe2, 0xe4, 0x1b, 0x3d, 0x82, 0x96, 0xa0, 0x0e, 0xb5, 0xa5, 0xcf, 0xa3, 0x16, 0xbd, + 0x52, 0x31, 0xcb, 0x64, 0x9f, 0x3a, 0x83, 0x08, 0x8a, 0x13, 0x12, 0xf4, 0x21, 0xb4, 0x24, 0x75, + 0x03, 0x87, 0x48, 0x1a, 0xdd, 0x46, 0x26, 0xdb, 0x6a, 0xe0, 0x29, 0xf8, 0x63, 0x7f, 0xb8, 0x1b, + 0xb9, 0xe9, 0x7e, 0x4f, 0x40, 0xe8, 0x3e, 0xb4, 0x84, 0x54, 0xc3, 0x60, 0x34, 0xd5, 0xed, 0x76, + 0xd8, 0x9b, 0x96, 0xee, 0xfa, 0x10, 0x82, 0x13, 0x30, 0xea, 0xc1, 0x8a, 0xcb, 0x3c, 0x4c, 0xc9, + 0x70, 0x3a, 0xa0, 0xb6, 0xef, 0x0d, 0x85, 0xee, 0xd2, 0x26, 0xce, 0x9b, 0xd1, 0x16, 0xac, 0xc6, + 0xcf, 0xf6, 0xc7, 0x4c, 0x48, 0x9f, 0x4f, 0x77, 0x98, 0xcb, 0xa4, 0xee, 0xd9, 0x26, 0x2e, 0x5c, + 0x53, 0x6f, 0x42, 0x40, 0xc6, 0x82, 0x0e, 0x75, 0x07, 0xb6, 0x70, 0xf4, 0x95, 0xab, 0x87, 0xd6, + 0x7b, 0xd7, 0x03, 0xba, 0x06, 0x6b, 0x01, 0xf7, 0x47, 0x9c, 0x0a, 0x71, 0x97, 0x92, 0xa1, 0xc3, + 0x3c, 0x1a, 0x87, 0xd1, 0xd6, 0xe7, 0x2a, 0x5b, 0x36, 0xbf, 0xa9, 0xc3, 0xb1, 0xfc, 0x7b, 0x88, + 0x2c, 0x40, 0xfe, 0xbe, 0xa0, 0x7c, 0x42, 0x87, 0xf7, 0xc3, 0x69, 0xab, 0x86, 0x96, 0xa1, 0x87, + 0x56, 0xc1, 0x4a, 0xa6, 0x68, 0x6a, 0xb9, 0xa2, 0xe9, 0xc1, 0x4a, 0xd4, 0x09, 0x38, 0x76, 0xa9, + 0x87, 0x37, 0x9b, 0x33, 0xa3, 0x0b, 0x70, 0x9c, 0x4c, 0x08, 0x73, 0xc8, 0xbe, 0x43, 0x13, 0xdf, + 0x86, 0xf6, 0x9d, 0x5f, 0x40, 0x97, 0xe0, 0xc4, 0xd8, 0x9b, 0xf7, 0x0f, 0xb3, 0x56, 0xb4, 0x84, + 0x76, 0x00, 0xec, 0xf8, 0x49, 0x17, 0xdd, 0x23, 0xba, 0xd5, 0x2f, 0x54, 0x28, 0x97, 0x64, 0x0e, + 0xe0, 0x14, 0x1e, 0x9d, 0x86, 0xa3, 0x5c, 0xd5, 0x45, 0xb2, 0xf3, 0x92, 0xde, 0x39, 0x6b, 0x44, + 0x67, 0xa0, 0x63, 0xfb, 0x8e, 0xa3, 0x4b, 0x62, 0xdb, 0x1f, 0x7b, 0x52, 0x67, 0xb9, 0x89, 0x73, + 0x56, 0xf3, 0xa5, 0x91, 0x7e, 0xb9, 0xe2, 0x02, 0x2d, 0x1c, 0x3c, 0x4f, 0xe0, 0xa8, 0xca, 0x3c, + 0xf3, 0x46, 0xe1, 0x13, 0x10, 0xb5, 0xe2, 0xa5, 0x43, 0xeb, 0x26, 0xf1, 0x4e, 0xbd, 0x60, 0x59, + 0x1a, 0xf3, 0x02, 0x74, 0xb2, 0x15, 0x96, 0x51, 0x2c, 0x46, 0x4e, 0xb1, 0xbc, 0x32, 0x60, 0xad, + 0x84, 0x18, 0x7d, 0x06, 0x1d, 0x97, 0xbc, 0xd8, 0x9b, 0xa5, 0xe0, 0xad, 0xd2, 0x40, 0xe9, 0x3b, + 0x2b, 0xd4, 0x77, 0xd6, 0x27, 0x9e, 0x7c, 0xc4, 0x07, 0x92, 0x33, 0x6f, 0x84, 0x73, 0x3c, 0x5a, + 0x6e, 0x90, 0x17, 0x83, 0x31, 0x1f, 0x15, 0x85, 0x5d, 0x8d, 0x33, 0x61, 0x30, 0x3f, 0x82, 0xcd, + 0x4c, 0x08, 0xaa, 0xfa, 0xe9, 0xb3, 0xb1, 0x33, 0xa0, 0xb3, 0x0c, 0xfc, 0x17, 0xda, 0x01, 0xe1, + 0x92, 0x25, 0x1d, 0xd0, 0xc4, 0x33, 0x83, 0xf9, 0x9b, 0x01, 0xcd, 0x81, 0x4d, 0xa2, 0x93, 0x2d, + 0x4e, 0x08, 0x5d, 0xcd, 0x08, 0x21, 0xb3, 0x34, 0xb5, 0x7a, 0xef, 0x94, 0x06, 0xba, 0x95, 0xd3, + 0x40, 0xa7, 0xdf, 0x82, 0xcc, 0xca, 0x9f, 0xb3, 0xd0, 0x4e, 0x08, 0x0f, 0x1b, 0x04, 0xe6, 0x9f, + 0x06, 0x2c, 0xa7, 0x08, 0x0e, 0x1d, 0x1a, 0x0f, 0x33, 0x43, 0x43, 0xf5, 0xdc, 0x56, 0x95, 0x43, + 0x59, 0xf1, 0xb8, 0x08, 0xa7, 0xe9, 0x6c, 0x66, 0x9c, 0x81, 0x8e, 0x24, 0x7c, 0x44, 0x65, 0xec, + 0xa0, 0x43, 0x6d, 0xe3, 0x9c, 0x75, 0xfd, 0x26, 0x1c, 0xcd, 0x50, 0xbc, 0xd3, 0x60, 0xfc, 0x43, + 0x05, 0x38, 0xab, 0x86, 0x05, 0x67, 0xf7, 0x56, 0x26, 0xbb, 0xbd, 0xf2, 0xeb, 0x48, 0xd5, 0xe3, + 0x2c, 0xc7, 0x77, 0x72, 0x39, 0x3e, 0x5f, 0x09, 0x9f, 0xcd, 0xf4, 0x6b, 0x03, 0x56, 0x53, 0xab, + 0xef, 0xa7, 0x74, 0x8b, 0x35, 0x66, 0x7d, 0x11, 0x1a, 0xf3, 0xdd, 0x55, 0xb4, 0xf9, 0x83, 0x01, + 0x2b, 0xa9, 0x90, 0x16, 0x2e, 0x4b, 0x6f, 0x64, 0x65, 0xe9, 0xe9, 0x2a, 0xb7, 0x1e, 0xeb, 0xd2, + 0xaf, 0x1b, 0x99, 0xb3, 0xfd, 0x0b, 0x85, 0xd6, 0xe7, 0xb0, 0x3a, 0xf1, 0x9d, 0xb1, 0x4b, 0xb7, + 0x1d, 0xc2, 0xdc, 0xd8, 0x49, 0x8d, 0xe7, 0x7a, 0xfe, 0x4f, 0xad, 0x84, 0x8c, 0x72, 0xc1, 0x84, + 0xa4, 0x9e, 0x7c, 0x32, 0x43, 0xe2, 0x42, 0x1a, 0xb4, 0x09, 0xcb, 0x4a, 0x54, 0x30, 0x9b, 0x3e, + 0x54, 0x42, 0x3d, 0x4c, 0x6d, 0xda, 0xa4, 0xc6, 0x7d, 0xe0, 0x0f, 0x1f, 0x10, 0x8f, 0x8c, 0xa8, + 0x1a, 0x32, 0x8f, 0xf5, 0x3f, 0x3a, 0xb4, 0xea, 0x6a, 0xe3, 0xa2, 0x25, 0xf4, 0x14, 0x3a, 0xe3, + 0xe8, 0x59, 0x8f, 0x14, 0x62, 0xf8, 0xe7, 0xcf, 0x56, 0x95, 0xcc, 0xed, 0x65, 0x90, 0x38, 0xc7, + 0x54, 0x2a, 0x02, 0x5b, 0xe5, 0x22, 0xd0, 0xfc, 0xb1, 0x0e, 0xc7, 0xe7, 0x3a, 0x72, 0xa1, 0x52, + 0x6b, 0x4e, 0x92, 0xd4, 0x8b, 0x24, 0x49, 0x0f, 0x56, 0xec, 0x31, 0xe7, 0x4a, 0x01, 0x64, 0x45, + 0x56, 0xde, 0x5c, 0x24, 0xdd, 0x9a, 0xc5, 0xd2, 0x2d, 0xcd, 0x19, 0x09, 0x86, 0x30, 0x33, 0x79, + 0xb3, 0x7a, 0xbe, 0x43, 0x70, 0xe2, 0xb8, 0x14, 0x3e, 0xdf, 0x59, 0x6b, 0x81, 0x70, 0x6a, 0x17, + 0x09, 0x27, 0xf4, 0x20, 0x23, 0xea, 0x40, 0x97, 0xe3, 0xc5, 0x2a, 0x19, 0x2e, 0x54, 0x75, 0xe6, + 0xb7, 0x06, 0x9c, 0x2a, 0x2d, 0x83, 0xc2, 0xd7, 0xf1, 0x8b, 0x62, 0x39, 0x76, 0xbd, 0x9a, 0x1c, + 0x2b, 0x90, 0x1c, 0x39, 0x5d, 0x76, 0xe7, 0xd4, 0xaf, 0x6f, 0x36, 0x8c, 0xd7, 0x6f, 0x36, 0x8c, + 0xbf, 0xde, 0x6c, 0x18, 0xdf, 0xff, 0xbd, 0xf1, 0x9f, 0xa7, 0x4b, 0x11, 0xd5, 0x3f, 0x01, 0x00, + 0x00, 0xff, 0xff, 0x8e, 0x93, 0xe2, 0x8c, 0x60, 0x14, 0x00, 0x00, } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/apps/v1beta1/register.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/apps/v1beta1/register.go new file mode 100644 index 00000000..c1b04dc5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/apps/v1beta1/register.go @@ -0,0 +1,13 @@ +package v1beta1 + +import "github.com/ericchiang/k8s" + +func init() { + k8s.Register("apps", "v1beta1", "controllerrevisions", true, &ControllerRevision{}) + k8s.Register("apps", "v1beta1", "deployments", true, &Deployment{}) + k8s.Register("apps", "v1beta1", "statefulsets", true, &StatefulSet{}) + + k8s.RegisterList("apps", "v1beta1", "controllerrevisions", true, &ControllerRevisionList{}) + k8s.RegisterList("apps", "v1beta1", "deployments", true, &DeploymentList{}) + k8s.RegisterList("apps", "v1beta1", "statefulsets", true, &StatefulSetList{}) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/authentication/v1/generated.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/authentication/v1/generated.pb.go index 27d8742d..c0f9cf35 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/authentication/v1/generated.pb.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/authentication/v1/generated.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/authentication/v1/generated.proto -// DO NOT EDIT! +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/authentication/v1/generated.proto /* Package v1 is a generated protocol buffer package. It is generated from these files: - k8s.io/kubernetes/pkg/apis/authentication/v1/generated.proto + k8s.io/api/authentication/v1/generated.proto It has these top-level messages: ExtraValue @@ -20,7 +19,7 @@ package v1 import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import k8s_io_kubernetes_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" +import k8s_io_apimachinery_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" import _ "github.com/ericchiang/k8s/runtime" import _ "github.com/ericchiang/k8s/runtime/schema" import _ "github.com/ericchiang/k8s/util/intstr" @@ -63,7 +62,7 @@ func (m *ExtraValue) GetItems() []string { // plugin in the kube-apiserver. type TokenReview struct { // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Spec holds information about the request being evaluated Spec *TokenReviewSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` // Status is filled in by the server and indicates whether the request can be authenticated. @@ -77,7 +76,7 @@ func (m *TokenReview) String() string { return proto.CompactTextStrin func (*TokenReview) ProtoMessage() {} func (*TokenReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -func (m *TokenReview) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *TokenReview) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -212,11 +211,11 @@ func (m *UserInfo) GetExtra() map[string]*ExtraValue { } func init() { - proto.RegisterType((*ExtraValue)(nil), "github.com/ericchiang.k8s.apis.authentication.v1.ExtraValue") - proto.RegisterType((*TokenReview)(nil), "github.com/ericchiang.k8s.apis.authentication.v1.TokenReview") - proto.RegisterType((*TokenReviewSpec)(nil), "github.com/ericchiang.k8s.apis.authentication.v1.TokenReviewSpec") - proto.RegisterType((*TokenReviewStatus)(nil), "github.com/ericchiang.k8s.apis.authentication.v1.TokenReviewStatus") - proto.RegisterType((*UserInfo)(nil), "github.com/ericchiang.k8s.apis.authentication.v1.UserInfo") + proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authentication.v1.ExtraValue") + proto.RegisterType((*TokenReview)(nil), "k8s.io.api.authentication.v1.TokenReview") + proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.api.authentication.v1.TokenReviewSpec") + proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.api.authentication.v1.TokenReviewStatus") + proto.RegisterType((*UserInfo)(nil), "k8s.io.api.authentication.v1.UserInfo") } func (m *ExtraValue) Marshal() (dAtA []byte, err error) { size := m.Size() @@ -455,24 +454,6 @@ func (m *UserInfo) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -736,7 +717,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1190,51 +1171,14 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Extra == nil { m.Extra = make(map[string]*ExtraValue) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue *ExtraValue + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1244,46 +1188,85 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Extra[mapkey] = mapvalue - } else { - var mapvalue *ExtraValue - m.Extra[mapkey] = mapvalue } + m.Extra[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -1413,40 +1396,40 @@ var ( ) func init() { - proto.RegisterFile("github.com/ericchiang/k8s/apis/authentication/v1/generated.proto", fileDescriptorGenerated) + proto.RegisterFile("k8s.io/api/authentication/v1/generated.proto", fileDescriptorGenerated) } var fileDescriptorGenerated = []byte{ - // 481 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x52, 0xdd, 0x8a, 0xd3, 0x40, - 0x14, 0x36, 0xfd, 0x59, 0xda, 0x29, 0xa2, 0x0e, 0x8b, 0x84, 0x5e, 0x94, 0x12, 0x04, 0x7b, 0xa1, - 0x13, 0x52, 0x44, 0x8a, 0x28, 0xa2, 0xb0, 0x17, 0x2e, 0xa8, 0x38, 0xfe, 0x2c, 0x78, 0x37, 0x9b, - 0x1e, 0xbb, 0x63, 0x36, 0x93, 0x30, 0x73, 0x26, 0xba, 0x0f, 0xe0, 0x3b, 0x08, 0xe2, 0xfb, 0x78, - 0xe9, 0x23, 0x48, 0x7d, 0x11, 0x99, 0x49, 0xdc, 0xbf, 0x96, 0x42, 0xf7, 0x2e, 0xe7, 0x70, 0xbe, - 0x9f, 0xf9, 0xf2, 0x91, 0xc7, 0xd9, 0xcc, 0x30, 0x59, 0xc4, 0x99, 0x3d, 0x04, 0xad, 0x00, 0xc1, - 0xc4, 0x65, 0xb6, 0x88, 0x45, 0x29, 0x4d, 0x2c, 0x2c, 0x1e, 0x81, 0x42, 0x99, 0x0a, 0x94, 0x85, - 0x8a, 0xab, 0x24, 0x5e, 0x80, 0x02, 0x2d, 0x10, 0xe6, 0xac, 0xd4, 0x05, 0x16, 0xf4, 0x5e, 0x8d, - 0x66, 0x67, 0x68, 0x56, 0x66, 0x0b, 0xe6, 0xd0, 0xec, 0x22, 0x9a, 0x55, 0xc9, 0x70, 0xba, 0x41, - 0x2b, 0x07, 0x14, 0x6b, 0x14, 0x86, 0xf7, 0xd7, 0x63, 0xb4, 0x55, 0x28, 0x73, 0x58, 0x39, 0x7f, - 0xb0, 0xf9, 0xdc, 0xa4, 0x47, 0x90, 0x8b, 0x15, 0x54, 0xb2, 0x1e, 0x65, 0x51, 0x1e, 0xc7, 0x52, - 0xa1, 0x41, 0x7d, 0x19, 0x12, 0x45, 0x84, 0xec, 0x7d, 0x45, 0x2d, 0x3e, 0x88, 0x63, 0x0b, 0x74, - 0x97, 0x74, 0x25, 0x42, 0x6e, 0xc2, 0x60, 0xdc, 0x9e, 0xf4, 0x79, 0x3d, 0x44, 0xdf, 0x5a, 0x64, - 0xf0, 0xae, 0xc8, 0x40, 0x71, 0xa8, 0x24, 0x7c, 0xa1, 0xfb, 0xa4, 0xe7, 0x9e, 0x39, 0x17, 0x28, - 0xc2, 0x60, 0x1c, 0x4c, 0x06, 0x53, 0xc6, 0x36, 0x04, 0xe8, 0x6e, 0x59, 0x95, 0xb0, 0xd7, 0x87, - 0x9f, 0x21, 0xc5, 0x97, 0x80, 0x82, 0x9f, 0xe2, 0xe9, 0x1b, 0xd2, 0x31, 0x25, 0xa4, 0x61, 0xcb, - 0xf3, 0x3c, 0x61, 0xdb, 0xfc, 0x08, 0x76, 0xce, 0xd4, 0xdb, 0x12, 0x52, 0xee, 0xa9, 0xe8, 0x01, - 0xd9, 0x31, 0x28, 0xd0, 0x9a, 0xb0, 0xed, 0x49, 0x9f, 0x5e, 0x9d, 0xd4, 0xd3, 0xf0, 0x86, 0x2e, - 0xba, 0x4b, 0x6e, 0x5c, 0x52, 0x74, 0x81, 0xa1, 0x5b, 0xf9, 0x1c, 0xfa, 0xbc, 0x1e, 0xa2, 0x1f, - 0x01, 0xb9, 0xb5, 0x42, 0x43, 0xef, 0x90, 0xeb, 0xe7, 0xd4, 0x60, 0xee, 0x31, 0x3d, 0x7e, 0x71, - 0x49, 0xf7, 0x49, 0xc7, 0x1a, 0xd0, 0x4d, 0x20, 0x0f, 0xb7, 0xf3, 0xfe, 0xde, 0x80, 0x7e, 0xa1, - 0x3e, 0x15, 0xdc, 0x73, 0x38, 0x77, 0xa0, 0x75, 0xa1, 0x7d, 0x10, 0x7d, 0x5e, 0x0f, 0xd1, 0xcf, - 0x16, 0xe9, 0xfd, 0x3f, 0xa4, 0x43, 0xd2, 0x73, 0xa7, 0x4a, 0xe4, 0xd0, 0xbc, 0xe1, 0x74, 0xa6, - 0x37, 0x49, 0xdb, 0xca, 0xb9, 0x77, 0xd2, 0xe7, 0xee, 0x93, 0xde, 0x26, 0x3b, 0x0b, 0x5d, 0xd8, - 0xd2, 0x45, 0xeb, 0x0a, 0xd2, 0x4c, 0xf4, 0x80, 0x74, 0xc1, 0xb5, 0x28, 0xec, 0x8c, 0xdb, 0x93, - 0xc1, 0xf4, 0xd9, 0xd5, 0x5c, 0x33, 0xdf, 0xc4, 0x3d, 0x85, 0xfa, 0x84, 0xd7, 0x7c, 0x43, 0xdd, - 0xd4, 0xd3, 0x2f, 0x9d, 0xa1, 0x0c, 0x4e, 0x1a, 0x9f, 0xee, 0x93, 0xbe, 0x22, 0xdd, 0xca, 0x35, - 0xb7, 0x89, 0x6b, 0xb6, 0x9d, 0xf0, 0x59, 0xf3, 0x79, 0x4d, 0xf3, 0xa8, 0x35, 0x0b, 0x9e, 0xef, - 0xfe, 0x5a, 0x8e, 0x82, 0xdf, 0xcb, 0x51, 0xf0, 0x67, 0x39, 0x0a, 0xbe, 0xff, 0x1d, 0x5d, 0xfb, - 0xd8, 0xaa, 0x92, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x0d, 0xcf, 0x29, 0xf1, 0x61, 0x04, 0x00, - 0x00, + // 486 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xcd, 0x8a, 0x13, 0x41, + 0x10, 0xc7, 0xed, 0x7c, 0x2c, 0x49, 0x07, 0x51, 0x9b, 0x45, 0x86, 0x20, 0x21, 0x0c, 0xa2, 0x39, + 0x68, 0x8f, 0xb3, 0x8a, 0x2c, 0x7b, 0x10, 0x14, 0x96, 0x45, 0x50, 0x84, 0xf6, 0xe3, 0xe0, 0xad, + 0x77, 0x52, 0x26, 0xed, 0xec, 0xf4, 0x0c, 0xdd, 0x35, 0xa3, 0x79, 0x06, 0x5f, 0xc0, 0x47, 0xf2, + 0xe8, 0x23, 0x48, 0x3c, 0xf8, 0x14, 0x82, 0x74, 0xcf, 0x98, 0xec, 0x6e, 0xdc, 0xb8, 0xb7, 0xae, + 0xa2, 0x7e, 0xff, 0xfa, 0x57, 0x75, 0xd1, 0x7b, 0xe9, 0xbe, 0xe5, 0x2a, 0x8f, 0x64, 0xa1, 0x22, + 0x59, 0xe2, 0x1c, 0x34, 0xaa, 0x44, 0xa2, 0xca, 0x75, 0x54, 0xc5, 0xd1, 0x0c, 0x34, 0x18, 0x89, + 0x30, 0xe5, 0x85, 0xc9, 0x31, 0x67, 0xb7, 0xea, 0x6a, 0x2e, 0x0b, 0xc5, 0xcf, 0x56, 0xf3, 0x2a, + 0x1e, 0x3e, 0x5a, 0x6b, 0x65, 0x32, 0x99, 0x2b, 0x0d, 0x66, 0x11, 0x15, 0xe9, 0xcc, 0x25, 0x6c, + 0x94, 0x01, 0xca, 0x7f, 0x68, 0x0e, 0xa3, 0x8b, 0x28, 0x53, 0x6a, 0x54, 0x19, 0x6c, 0x00, 0x8f, + 0xff, 0x07, 0xd8, 0x64, 0x0e, 0x99, 0xdc, 0xe0, 0x1e, 0x5e, 0xc4, 0x95, 0xa8, 0x4e, 0x22, 0xa5, + 0xd1, 0xa2, 0x39, 0x0f, 0x85, 0x21, 0xa5, 0x87, 0x9f, 0xd1, 0xc8, 0x77, 0xf2, 0xa4, 0x04, 0xb6, + 0x4b, 0xbb, 0x0a, 0x21, 0xb3, 0x01, 0x19, 0xb7, 0x27, 0x7d, 0x51, 0x07, 0xe1, 0x2f, 0x42, 0x07, + 0x6f, 0xf2, 0x14, 0xb4, 0x80, 0x4a, 0xc1, 0x27, 0xf6, 0x82, 0xf6, 0xdc, 0xb0, 0x53, 0x89, 0x32, + 0x20, 0x63, 0x32, 0x19, 0xec, 0x3d, 0xe0, 0xeb, 0xc5, 0xad, 0x7a, 0xf3, 0x22, 0x9d, 0xb9, 0x84, + 0xe5, 0xae, 0x9a, 0x57, 0x31, 0x7f, 0x75, 0xfc, 0x11, 0x12, 0x7c, 0x09, 0x28, 0xc5, 0x4a, 0x81, + 0x3d, 0xa5, 0x1d, 0x5b, 0x40, 0x12, 0xb4, 0xbc, 0xd2, 0x7d, 0xbe, 0xed, 0x0b, 0xf8, 0x29, 0x1b, + 0xaf, 0x0b, 0x48, 0x84, 0x47, 0xd9, 0x11, 0xdd, 0xb1, 0x28, 0xb1, 0xb4, 0x41, 0xdb, 0x8b, 0x44, + 0x97, 0x17, 0xf1, 0x98, 0x68, 0xf0, 0xf0, 0x2e, 0xbd, 0x76, 0xae, 0x83, 0x5b, 0x09, 0xba, 0x94, + 0x9f, 0xb4, 0x2f, 0xea, 0x20, 0xfc, 0x42, 0xe8, 0x8d, 0x0d, 0x19, 0x76, 0x9b, 0x5e, 0x3d, 0xd5, + 0x0d, 0xa6, 0x9e, 0xe9, 0x89, 0xb3, 0x49, 0x76, 0x40, 0x3b, 0xa5, 0x05, 0xd3, 0x0c, 0x7c, 0x67, + 0xbb, 0xd7, 0xb7, 0x16, 0xcc, 0x73, 0xfd, 0x21, 0x17, 0x9e, 0x71, 0x6e, 0xc0, 0x98, 0xdc, 0xf8, + 0x41, 0xfb, 0xa2, 0x0e, 0xc2, 0xdf, 0x84, 0xf6, 0xfe, 0x16, 0xb2, 0x21, 0xed, 0xb9, 0x52, 0x2d, + 0x33, 0x68, 0x3c, 0xaf, 0x62, 0x76, 0x9d, 0xb6, 0x4b, 0x35, 0xf5, 0x9d, 0xfb, 0xc2, 0x3d, 0xd9, + 0x4d, 0xba, 0x33, 0x33, 0x79, 0x59, 0xb8, 0xd5, 0xb9, 0x2f, 0x6f, 0x22, 0x76, 0x44, 0xbb, 0xe0, + 0xee, 0x22, 0xe8, 0x8c, 0xdb, 0x93, 0xc1, 0x5e, 0x7c, 0x39, 0x97, 0xdc, 0xdf, 0xd2, 0xa1, 0x46, + 0xb3, 0x10, 0x35, 0x3f, 0x3c, 0x6e, 0x0e, 0xcc, 0x27, 0x9d, 0x81, 0x14, 0x16, 0x8d, 0x2f, 0xf7, + 0x64, 0x4f, 0x68, 0xb7, 0x72, 0xb7, 0xd7, 0xac, 0x63, 0xb2, 0xbd, 0xd1, 0xfa, 0x56, 0x45, 0x8d, + 0x1d, 0xb4, 0xf6, 0xc9, 0xb3, 0xdd, 0x6f, 0xcb, 0x11, 0xf9, 0xbe, 0x1c, 0x91, 0x1f, 0xcb, 0x11, + 0xf9, 0xfa, 0x73, 0x74, 0xe5, 0x7d, 0xab, 0x8a, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0xbe, 0xd3, + 0x3d, 0xc1, 0xfb, 0x03, 0x00, 0x00, } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/authentication/v1/register.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/authentication/v1/register.go new file mode 100644 index 00000000..7a407ae0 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/authentication/v1/register.go @@ -0,0 +1,7 @@ +package v1 + +import "github.com/ericchiang/k8s" + +func init() { + k8s.Register("authentication.k8s.io", "v1", "tokenreviews", false, &TokenReview{}) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/authentication/v1beta1/generated.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/authentication/v1beta1/generated.pb.go index 65e8d9e2..79e067b9 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/authentication/v1beta1/generated.pb.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/authentication/v1beta1/generated.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/authentication/v1beta1/generated.proto -// DO NOT EDIT! +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/authentication/v1beta1/generated.proto /* Package v1beta1 is a generated protocol buffer package. It is generated from these files: - k8s.io/kubernetes/pkg/apis/authentication/v1beta1/generated.proto + k8s.io/api/authentication/v1beta1/generated.proto It has these top-level messages: ExtraValue @@ -20,11 +19,10 @@ package v1beta1 import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import k8s_io_kubernetes_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" +import k8s_io_apimachinery_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" import _ "github.com/ericchiang/k8s/runtime" import _ "github.com/ericchiang/k8s/runtime/schema" import _ "github.com/ericchiang/k8s/util/intstr" -import _ "github.com/ericchiang/k8s/api/v1" import io "io" @@ -64,7 +62,7 @@ func (m *ExtraValue) GetItems() []string { // plugin in the kube-apiserver. type TokenReview struct { // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Spec holds information about the request being evaluated Spec *TokenReviewSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` // Status is filled in by the server and indicates whether the request can be authenticated. @@ -78,7 +76,7 @@ func (m *TokenReview) String() string { return proto.CompactTextStrin func (*TokenReview) ProtoMessage() {} func (*TokenReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -func (m *TokenReview) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *TokenReview) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -213,11 +211,11 @@ func (m *UserInfo) GetExtra() map[string]*ExtraValue { } func init() { - proto.RegisterType((*ExtraValue)(nil), "github.com/ericchiang.k8s.apis.authentication.v1beta1.ExtraValue") - proto.RegisterType((*TokenReview)(nil), "github.com/ericchiang.k8s.apis.authentication.v1beta1.TokenReview") - proto.RegisterType((*TokenReviewSpec)(nil), "github.com/ericchiang.k8s.apis.authentication.v1beta1.TokenReviewSpec") - proto.RegisterType((*TokenReviewStatus)(nil), "github.com/ericchiang.k8s.apis.authentication.v1beta1.TokenReviewStatus") - proto.RegisterType((*UserInfo)(nil), "github.com/ericchiang.k8s.apis.authentication.v1beta1.UserInfo") + proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authentication.v1beta1.ExtraValue") + proto.RegisterType((*TokenReview)(nil), "k8s.io.api.authentication.v1beta1.TokenReview") + proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.api.authentication.v1beta1.TokenReviewSpec") + proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.api.authentication.v1beta1.TokenReviewStatus") + proto.RegisterType((*UserInfo)(nil), "k8s.io.api.authentication.v1beta1.UserInfo") } func (m *ExtraValue) Marshal() (dAtA []byte, err error) { size := m.Size() @@ -456,24 +454,6 @@ func (m *UserInfo) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -737,7 +717,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1191,51 +1171,14 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Extra == nil { m.Extra = make(map[string]*ExtraValue) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue *ExtraValue + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1245,46 +1188,85 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Extra[mapkey] = mapvalue - } else { - var mapvalue *ExtraValue - m.Extra[mapkey] = mapvalue } + m.Extra[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -1414,41 +1396,40 @@ var ( ) func init() { - proto.RegisterFile("github.com/ericchiang/k8s/apis/authentication/v1beta1/generated.proto", fileDescriptorGenerated) + proto.RegisterFile("k8s.io/api/authentication/v1beta1/generated.proto", fileDescriptorGenerated) } var fileDescriptorGenerated = []byte{ - // 498 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x93, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0xc7, 0x71, 0x3e, 0x4a, 0xb2, 0x11, 0x02, 0x56, 0x08, 0x99, 0x1c, 0xa2, 0xc8, 0x42, 0x22, - 0x07, 0x58, 0xcb, 0x11, 0x87, 0x0a, 0xc4, 0x81, 0x8a, 0x22, 0x81, 0x84, 0x2a, 0x6d, 0xa1, 0x07, - 0xd4, 0xcb, 0xc6, 0x19, 0xd2, 0xc5, 0xf5, 0xda, 0xda, 0x1d, 0xbb, 0xf4, 0x29, 0xb8, 0x72, 0xe4, - 0xc6, 0xab, 0x70, 0xe4, 0x11, 0x50, 0x78, 0x11, 0xb4, 0x6b, 0xd3, 0x0f, 0x1c, 0x22, 0xd1, 0xdc, - 0x76, 0x46, 0xf3, 0xff, 0xcd, 0xcc, 0xdf, 0x63, 0xf2, 0x3c, 0xd9, 0x36, 0x4c, 0x66, 0x61, 0x52, - 0xcc, 0x40, 0x2b, 0x40, 0x30, 0x61, 0x9e, 0x2c, 0x42, 0x91, 0x4b, 0x13, 0x8a, 0x02, 0x8f, 0x40, - 0xa1, 0x8c, 0x05, 0xca, 0x4c, 0x85, 0x65, 0x34, 0x03, 0x14, 0x51, 0xb8, 0x00, 0x05, 0x5a, 0x20, - 0xcc, 0x59, 0xae, 0x33, 0xcc, 0x68, 0x54, 0x21, 0xd8, 0x39, 0x82, 0xe5, 0xc9, 0x82, 0x59, 0x04, - 0xbb, 0x8c, 0x60, 0x35, 0x62, 0x38, 0x5d, 0xd3, 0x35, 0x05, 0x14, 0x61, 0xd9, 0x68, 0x33, 0x7c, - 0xb4, 0x5a, 0xa3, 0x0b, 0x85, 0x32, 0x85, 0x46, 0xf9, 0xe3, 0xf5, 0xe5, 0x26, 0x3e, 0x82, 0x54, - 0x34, 0x54, 0xd1, 0x6a, 0x55, 0x81, 0xf2, 0x38, 0x94, 0x0a, 0x0d, 0xea, 0x86, 0xe4, 0xe1, 0x3f, - 0x77, 0x59, 0xb1, 0x45, 0x10, 0x10, 0xb2, 0xfb, 0x09, 0xb5, 0x38, 0x10, 0xc7, 0x05, 0xd0, 0x3b, - 0xa4, 0x2b, 0x11, 0x52, 0xe3, 0x7b, 0xe3, 0xf6, 0xa4, 0xcf, 0xab, 0x20, 0xf8, 0xdc, 0x22, 0x83, - 0xb7, 0x59, 0x02, 0x8a, 0x43, 0x29, 0xe1, 0x84, 0xbe, 0x26, 0x3d, 0x6b, 0xca, 0x5c, 0xa0, 0xf0, - 0xbd, 0xb1, 0x37, 0x19, 0x4c, 0x19, 0x5b, 0xe3, 0xb9, 0xad, 0x65, 0x65, 0xc4, 0xf6, 0x66, 0x1f, - 0x21, 0xc6, 0x37, 0x80, 0x82, 0x9f, 0xe9, 0xe9, 0x01, 0xe9, 0x98, 0x1c, 0x62, 0xbf, 0xe5, 0x38, - 0x3b, 0xec, 0xbf, 0xbf, 0x1d, 0xbb, 0x30, 0xd9, 0x7e, 0x0e, 0x31, 0x77, 0x3c, 0x7a, 0x48, 0xb6, - 0x0c, 0x0a, 0x2c, 0x8c, 0xdf, 0x76, 0xe4, 0x17, 0x1b, 0x92, 0x1d, 0x8b, 0xd7, 0xcc, 0xe0, 0x01, - 0xb9, 0xf9, 0x57, 0x5b, 0x6b, 0x1d, 0xda, 0x94, 0x73, 0xa4, 0xcf, 0xab, 0x20, 0xf8, 0xea, 0x91, - 0xdb, 0x0d, 0x0c, 0xbd, 0x4f, 0x6e, 0x5c, 0x68, 0x09, 0x73, 0xa7, 0xe9, 0xf1, 0xcb, 0x49, 0xba, - 0x47, 0x3a, 0x85, 0x01, 0x5d, 0x5b, 0xf3, 0xf4, 0x0a, 0x0b, 0xbc, 0x33, 0xa0, 0x5f, 0xa9, 0x0f, - 0x19, 0x77, 0x20, 0x3b, 0x22, 0x68, 0x9d, 0x69, 0x67, 0x49, 0x9f, 0x57, 0x41, 0xf0, 0xad, 0x45, - 0x7a, 0x7f, 0x0a, 0xe9, 0x90, 0xf4, 0x6c, 0xa9, 0x12, 0x29, 0xd4, 0x8b, 0x9c, 0xc5, 0xf4, 0x16, - 0x69, 0x17, 0x72, 0xee, 0xc6, 0xe9, 0x73, 0xfb, 0xa4, 0x77, 0xc9, 0xd6, 0x42, 0x67, 0x45, 0x6e, - 0x4d, 0xb6, 0xf7, 0x52, 0x47, 0xf4, 0x90, 0x74, 0xc1, 0x1e, 0x95, 0xdf, 0x19, 0xb7, 0x27, 0x83, - 0xe9, 0xcb, 0x0d, 0x46, 0x67, 0xee, 0x3a, 0x77, 0x15, 0xea, 0x53, 0x5e, 0x41, 0x87, 0x27, 0xf5, - 0xc9, 0xba, 0xa4, 0x9d, 0x2a, 0x81, 0xd3, 0x7a, 0x58, 0xfb, 0xa4, 0xfb, 0xa4, 0x5b, 0xda, 0x6b, - 0xae, 0x8d, 0x7b, 0x76, 0x85, 0xee, 0xe7, 0xbf, 0x04, 0xaf, 0x58, 0x4f, 0x5a, 0xdb, 0xde, 0xce, - 0xbd, 0xef, 0xcb, 0x91, 0xf7, 0x63, 0x39, 0xf2, 0x7e, 0x2e, 0x47, 0xde, 0x97, 0x5f, 0xa3, 0x6b, - 0xef, 0xaf, 0xd7, 0x82, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x43, 0x77, 0xd5, 0x8a, 0xb7, 0x04, - 0x00, 0x00, + // 492 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0xdf, 0x6a, 0x13, 0x41, + 0x14, 0xc6, 0xdd, 0xfc, 0xa9, 0xc9, 0x04, 0x51, 0x07, 0x91, 0x98, 0x8b, 0x10, 0x17, 0xc1, 0x80, + 0x38, 0x6b, 0x62, 0x29, 0xc5, 0x1b, 0x41, 0xa9, 0x20, 0x44, 0x84, 0xf1, 0xcf, 0x85, 0x77, 0xd3, + 0xcd, 0x71, 0x33, 0x6e, 0x77, 0x76, 0x99, 0x39, 0xbb, 0x9a, 0x27, 0xf0, 0xd6, 0x4b, 0x1f, 0xc9, + 0x4b, 0x1f, 0x41, 0xe2, 0x13, 0xf8, 0x06, 0x32, 0xb3, 0xd3, 0xa4, 0x6d, 0x28, 0x6d, 0xef, 0xf6, + 0x1c, 0xe6, 0xf7, 0x9d, 0xef, 0x9c, 0xfd, 0xc8, 0x24, 0xdd, 0x37, 0x4c, 0xe6, 0x91, 0x28, 0x64, + 0x24, 0x4a, 0x5c, 0x80, 0x42, 0x19, 0x0b, 0x94, 0xb9, 0x8a, 0xaa, 0xc9, 0x21, 0xa0, 0x98, 0x44, + 0x09, 0x28, 0xd0, 0x02, 0x61, 0xce, 0x0a, 0x9d, 0x63, 0x4e, 0xef, 0xd7, 0x08, 0x13, 0x85, 0x64, + 0xa7, 0x11, 0xe6, 0x91, 0xc1, 0xee, 0x46, 0x35, 0x13, 0xf1, 0x42, 0x2a, 0xd0, 0xcb, 0xa8, 0x48, + 0x13, 0xdb, 0x30, 0x51, 0x06, 0x28, 0xa2, 0x6a, 0x4b, 0x78, 0x10, 0x9d, 0x47, 0xe9, 0x52, 0xa1, + 0xcc, 0x60, 0x0b, 0xd8, 0xbb, 0x08, 0x30, 0xf1, 0x02, 0x32, 0xb1, 0xc5, 0x3d, 0x3d, 0x8f, 0x2b, + 0x51, 0x1e, 0x45, 0x52, 0xa1, 0x41, 0x7d, 0x16, 0x0a, 0x43, 0x42, 0x0e, 0xbe, 0xa1, 0x16, 0x1f, + 0xc5, 0x51, 0x09, 0xf4, 0x0e, 0x69, 0x4b, 0x84, 0xcc, 0xf4, 0x83, 0x51, 0x73, 0xdc, 0xe5, 0x75, + 0x11, 0xfe, 0x0b, 0x48, 0xef, 0x7d, 0x9e, 0x82, 0xe2, 0x50, 0x49, 0xf8, 0x4a, 0x67, 0xa4, 0x63, + 0x97, 0x9d, 0x0b, 0x14, 0xfd, 0x60, 0x14, 0x8c, 0x7b, 0xd3, 0x27, 0x6c, 0x73, 0xbd, 0xf5, 0x6c, + 0x56, 0xa4, 0x89, 0x6d, 0x18, 0x66, 0x5f, 0xb3, 0x6a, 0xc2, 0xde, 0x1e, 0x7e, 0x81, 0x18, 0xdf, + 0x00, 0x0a, 0xbe, 0x56, 0xa0, 0xaf, 0x48, 0xcb, 0x14, 0x10, 0xf7, 0x1b, 0x4e, 0x69, 0xca, 0x2e, + 0xfc, 0x0f, 0xec, 0x84, 0x97, 0x77, 0x05, 0xc4, 0xdc, 0xf1, 0x74, 0x46, 0x76, 0x0c, 0x0a, 0x2c, + 0x4d, 0xbf, 0xe9, 0x94, 0x76, 0xaf, 0xa8, 0xe4, 0x58, 0xee, 0x35, 0xc2, 0x87, 0xe4, 0xe6, 0x99, + 0x31, 0xf6, 0x38, 0x68, 0x5b, 0x6e, 0xe7, 0x2e, 0xaf, 0x8b, 0xf0, 0x47, 0x40, 0x6e, 0x6f, 0xc9, + 0xd0, 0x07, 0xe4, 0xc6, 0x89, 0x91, 0x30, 0x77, 0x4c, 0x87, 0x9f, 0x6e, 0xd2, 0xe7, 0xa4, 0x55, + 0x1a, 0xd0, 0x7e, 0xf5, 0x47, 0x97, 0x30, 0xfc, 0xc1, 0x80, 0x7e, 0xad, 0x3e, 0xe7, 0xdc, 0x81, + 0xd6, 0x12, 0x68, 0x9d, 0x6b, 0xb7, 0x72, 0x97, 0xd7, 0x45, 0xf8, 0xbd, 0x41, 0x3a, 0xc7, 0x0f, + 0xe9, 0x80, 0x74, 0xec, 0x53, 0x25, 0x32, 0xf0, 0xc6, 0xd7, 0x35, 0xbd, 0x45, 0x9a, 0xa5, 0x9c, + 0xbb, 0xf1, 0x5d, 0x6e, 0x3f, 0xe9, 0x5d, 0xb2, 0x93, 0xe8, 0xbc, 0x2c, 0xec, 0x11, 0x6d, 0x02, + 0x7c, 0x45, 0x67, 0xa4, 0x0d, 0x36, 0x26, 0xfd, 0xd6, 0xa8, 0x39, 0xee, 0x4d, 0xf7, 0xae, 0x60, + 0x95, 0xb9, 0x7c, 0x1d, 0x28, 0xd4, 0x4b, 0x5e, 0x8b, 0x0c, 0x12, 0x1f, 0x3a, 0xd7, 0xb4, 0x2e, + 0x52, 0x58, 0x7a, 0x73, 0xf6, 0x93, 0xbe, 0x24, 0xed, 0xca, 0xe6, 0xd1, 0x1f, 0xe6, 0xf1, 0x25, + 0xa6, 0x6d, 0x42, 0xcc, 0x6b, 0xf6, 0x59, 0x63, 0x3f, 0x78, 0x71, 0xef, 0xd7, 0x6a, 0x18, 0xfc, + 0x5e, 0x0d, 0x83, 0x3f, 0xab, 0x61, 0xf0, 0xf3, 0xef, 0xf0, 0xda, 0xa7, 0xeb, 0x1e, 0xf8, 0x1f, + 0x00, 0x00, 0xff, 0xff, 0xd8, 0x1f, 0x01, 0xf4, 0x23, 0x04, 0x00, 0x00, } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/authentication/v1beta1/register.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/authentication/v1beta1/register.go new file mode 100644 index 00000000..85cf51bd --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/authentication/v1beta1/register.go @@ -0,0 +1,7 @@ +package v1beta1 + +import "github.com/ericchiang/k8s" + +func init() { + k8s.Register("authentication.k8s.io", "v1beta1", "tokenreviews", false, &TokenReview{}) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/authorization/v1/generated.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/authorization/v1/generated.pb.go index de347314..af936c4d 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/authorization/v1/generated.pb.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/authorization/v1/generated.pb.go @@ -1,34 +1,37 @@ -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/authorization/v1/generated.proto -// DO NOT EDIT! +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/authorization/v1/generated.proto /* Package v1 is a generated protocol buffer package. It is generated from these files: - k8s.io/kubernetes/pkg/apis/authorization/v1/generated.proto + k8s.io/api/authorization/v1/generated.proto It has these top-level messages: ExtraValue LocalSubjectAccessReview NonResourceAttributes + NonResourceRule ResourceAttributes + ResourceRule SelfSubjectAccessReview SelfSubjectAccessReviewSpec + SelfSubjectRulesReview + SelfSubjectRulesReviewSpec SubjectAccessReview SubjectAccessReviewSpec SubjectAccessReviewStatus + SubjectRulesReviewStatus */ package v1 import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import k8s_io_kubernetes_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" +import k8s_io_apimachinery_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" import _ "github.com/ericchiang/k8s/runtime" import _ "github.com/ericchiang/k8s/runtime/schema" import _ "github.com/ericchiang/k8s/util/intstr" -import _ "github.com/ericchiang/k8s/api/v1" import io "io" @@ -68,7 +71,7 @@ func (m *ExtraValue) GetItems() []string { // checking. type LocalSubjectAccessReview struct { // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace // you made the request against. If empty, it is defaulted. Spec *SubjectAccessReviewSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` @@ -85,7 +88,7 @@ func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -func (m *LocalSubjectAccessReview) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *LocalSubjectAccessReview) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -136,6 +139,36 @@ func (m *NonResourceAttributes) GetVerb() string { return "" } +// NonResourceRule holds information that describes a rule for the non-resource +type NonResourceRule struct { + // Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. "*" means all. + Verbs []string `protobuf:"bytes,1,rep,name=verbs" json:"verbs,omitempty"` + // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, + // final step in the path. "*" means all. + // +optional + NonResourceURLs []string `protobuf:"bytes,2,rep,name=nonResourceURLs" json:"nonResourceURLs,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NonResourceRule) Reset() { *m = NonResourceRule{} } +func (m *NonResourceRule) String() string { return proto.CompactTextString(m) } +func (*NonResourceRule) ProtoMessage() {} +func (*NonResourceRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *NonResourceRule) GetVerbs() []string { + if m != nil { + return m.Verbs + } + return nil +} + +func (m *NonResourceRule) GetNonResourceURLs() []string { + if m != nil { + return m.NonResourceURLs + } + return nil +} + // ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface type ResourceAttributes struct { // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces @@ -168,7 +201,7 @@ type ResourceAttributes struct { func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } func (m *ResourceAttributes) String() string { return proto.CompactTextString(m) } func (*ResourceAttributes) ProtoMessage() {} -func (*ResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (*ResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func (m *ResourceAttributes) GetNamespace() string { if m != nil && m.Namespace != nil { @@ -219,12 +252,64 @@ func (m *ResourceAttributes) GetName() string { return "" } +// ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, +// may contain duplicates, and possibly be incomplete. +type ResourceRule struct { + // Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. "*" means all. + Verbs []string `protobuf:"bytes,1,rep,name=verbs" json:"verbs,omitempty"` + // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of + // the enumerated resources in any API group will be allowed. "*" means all. + // +optional + ApiGroups []string `protobuf:"bytes,2,rep,name=apiGroups" json:"apiGroups,omitempty"` + // Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups. + // "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups. + // +optional + Resources []string `protobuf:"bytes,3,rep,name=resources" json:"resources,omitempty"` + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all. + // +optional + ResourceNames []string `protobuf:"bytes,4,rep,name=resourceNames" json:"resourceNames,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ResourceRule) Reset() { *m = ResourceRule{} } +func (m *ResourceRule) String() string { return proto.CompactTextString(m) } +func (*ResourceRule) ProtoMessage() {} +func (*ResourceRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *ResourceRule) GetVerbs() []string { + if m != nil { + return m.Verbs + } + return nil +} + +func (m *ResourceRule) GetApiGroups() []string { + if m != nil { + return m.ApiGroups + } + return nil +} + +func (m *ResourceRule) GetResources() []string { + if m != nil { + return m.Resources + } + return nil +} + +func (m *ResourceRule) GetResourceNames() []string { + if m != nil { + return m.ResourceNames + } + return nil +} + // SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a // spec.namespace means "in all namespaces". Self is a special case, because users should always be able // to check whether they can perform an action type SelfSubjectAccessReview struct { // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Spec holds information about the request being evaluated. user and groups must be empty Spec *SelfSubjectAccessReviewSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` // Status is filled in by the server and indicates whether the request is allowed or not @@ -236,9 +321,9 @@ type SelfSubjectAccessReview struct { func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } func (m *SelfSubjectAccessReview) String() string { return proto.CompactTextString(m) } func (*SelfSubjectAccessReview) ProtoMessage() {} -func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } -func (m *SelfSubjectAccessReview) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *SelfSubjectAccessReview) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -275,7 +360,7 @@ func (m *SelfSubjectAccessReviewSpec) Reset() { *m = SelfSubjectAccessRe func (m *SelfSubjectAccessReviewSpec) String() string { return proto.CompactTextString(m) } func (*SelfSubjectAccessReviewSpec) ProtoMessage() {} func (*SelfSubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} + return fileDescriptorGenerated, []int{7} } func (m *SelfSubjectAccessReviewSpec) GetResourceAttributes() *ResourceAttributes { @@ -292,10 +377,73 @@ func (m *SelfSubjectAccessReviewSpec) GetNonResourceAttributes() *NonResourceAtt return nil } +// SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. +// The returned list of actions may be incomplete depending on the server's authorization mode, +// and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, +// or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to +// drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. +// SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server. +type SelfSubjectRulesReview struct { + // +optional + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + // Spec holds information about the request being evaluated. + Spec *SelfSubjectRulesReviewSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` + // Status is filled in by the server and indicates the set of actions a user can perform. + // +optional + Status *SubjectRulesReviewStatus `protobuf:"bytes,3,opt,name=status" json:"status,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } +func (m *SelfSubjectRulesReview) String() string { return proto.CompactTextString(m) } +func (*SelfSubjectRulesReview) ProtoMessage() {} +func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *SelfSubjectRulesReview) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *SelfSubjectRulesReview) GetSpec() *SelfSubjectRulesReviewSpec { + if m != nil { + return m.Spec + } + return nil +} + +func (m *SelfSubjectRulesReview) GetStatus() *SubjectRulesReviewStatus { + if m != nil { + return m.Status + } + return nil +} + +type SelfSubjectRulesReviewSpec struct { + // Namespace to evaluate rules for. Required. + Namespace *string `protobuf:"bytes,1,opt,name=namespace" json:"namespace,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SelfSubjectRulesReviewSpec) Reset() { *m = SelfSubjectRulesReviewSpec{} } +func (m *SelfSubjectRulesReviewSpec) String() string { return proto.CompactTextString(m) } +func (*SelfSubjectRulesReviewSpec) ProtoMessage() {} +func (*SelfSubjectRulesReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{9} +} + +func (m *SelfSubjectRulesReviewSpec) GetNamespace() string { + if m != nil && m.Namespace != nil { + return *m.Namespace + } + return "" +} + // SubjectAccessReview checks whether or not a user or group can perform an action. type SubjectAccessReview struct { // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Spec holds information about the request being evaluated Spec *SubjectAccessReviewSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` // Status is filled in by the server and indicates whether the request is allowed or not @@ -307,9 +455,9 @@ type SubjectAccessReview struct { func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } func (m *SubjectAccessReview) String() string { return proto.CompactTextString(m) } func (*SubjectAccessReview) ProtoMessage() {} -func (*SubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (*SubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } -func (m *SubjectAccessReview) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *SubjectAccessReview) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -342,21 +490,26 @@ type SubjectAccessReviewSpec struct { // User is the user you're testing for. // If you specify "User" but not "Groups", then is it interpreted as "What if User were not a member of any groups // +optional - Verb *string `protobuf:"bytes,3,opt,name=verb" json:"verb,omitempty"` + User *string `protobuf:"bytes,3,opt,name=user" json:"user,omitempty"` // Groups is the groups you're testing for. // +optional Groups []string `protobuf:"bytes,4,rep,name=groups" json:"groups,omitempty"` // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer // it needs a reflection here. // +optional - Extra map[string]*ExtraValue `protobuf:"bytes,5,rep,name=extra" json:"extra,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_unrecognized []byte `json:"-"` + Extra map[string]*ExtraValue `protobuf:"bytes,5,rep,name=extra" json:"extra,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // UID information about the requesting user. + // +optional + Uid *string `protobuf:"bytes,6,opt,name=uid" json:"uid,omitempty"` + XXX_unrecognized []byte `json:"-"` } -func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} } -func (m *SubjectAccessReviewSpec) String() string { return proto.CompactTextString(m) } -func (*SubjectAccessReviewSpec) ProtoMessage() {} -func (*SubjectAccessReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} } +func (m *SubjectAccessReviewSpec) String() string { return proto.CompactTextString(m) } +func (*SubjectAccessReviewSpec) ProtoMessage() {} +func (*SubjectAccessReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{11} +} func (m *SubjectAccessReviewSpec) GetResourceAttributes() *ResourceAttributes { if m != nil { @@ -372,9 +525,9 @@ func (m *SubjectAccessReviewSpec) GetNonResourceAttributes() *NonResourceAttribu return nil } -func (m *SubjectAccessReviewSpec) GetVerb() string { - if m != nil && m.Verb != nil { - return *m.Verb +func (m *SubjectAccessReviewSpec) GetUser() string { + if m != nil && m.User != nil { + return *m.User } return "" } @@ -393,10 +546,23 @@ func (m *SubjectAccessReviewSpec) GetExtra() map[string]*ExtraValue { return nil } +func (m *SubjectAccessReviewSpec) GetUid() string { + if m != nil && m.Uid != nil { + return *m.Uid + } + return "" +} + // SubjectAccessReviewStatus type SubjectAccessReviewStatus struct { - // Allowed is required. True if the action would be allowed, false otherwise. + // Allowed is required. True if the action would be allowed, false otherwise. Allowed *bool `protobuf:"varint,1,opt,name=allowed" json:"allowed,omitempty"` + // Denied is optional. True if the action would be denied, otherwise + // false. If both allowed is false and denied is false, then the + // authorizer has no opinion on whether to authorize the action. Denied + // may not be true if Allowed is true. + // +optional + Denied *bool `protobuf:"varint,4,opt,name=denied" json:"denied,omitempty"` // Reason is optional. It indicates why a request was allowed or denied. // +optional Reason *string `protobuf:"bytes,2,opt,name=reason" json:"reason,omitempty"` @@ -412,7 +578,7 @@ func (m *SubjectAccessReviewStatus) Reset() { *m = SubjectAccessReviewSt func (m *SubjectAccessReviewStatus) String() string { return proto.CompactTextString(m) } func (*SubjectAccessReviewStatus) ProtoMessage() {} func (*SubjectAccessReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{8} + return fileDescriptorGenerated, []int{12} } func (m *SubjectAccessReviewStatus) GetAllowed() bool { @@ -422,6 +588,13 @@ func (m *SubjectAccessReviewStatus) GetAllowed() bool { return false } +func (m *SubjectAccessReviewStatus) GetDenied() bool { + if m != nil && m.Denied != nil { + return *m.Denied + } + return false +} + func (m *SubjectAccessReviewStatus) GetReason() string { if m != nil && m.Reason != nil { return *m.Reason @@ -436,16 +609,78 @@ func (m *SubjectAccessReviewStatus) GetEvaluationError() string { return "" } +// SubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on +// the set of authorizers the server is configured with and any errors experienced during evaluation. +// Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission, +// even if that list is incomplete. +type SubjectRulesReviewStatus struct { + // ResourceRules is the list of actions the subject is allowed to perform on resources. + // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. + ResourceRules []*ResourceRule `protobuf:"bytes,1,rep,name=resourceRules" json:"resourceRules,omitempty"` + // NonResourceRules is the list of actions the subject is allowed to perform on non-resources. + // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. + NonResourceRules []*NonResourceRule `protobuf:"bytes,2,rep,name=nonResourceRules" json:"nonResourceRules,omitempty"` + // Incomplete is true when the rules returned by this call are incomplete. This is most commonly + // encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation. + Incomplete *bool `protobuf:"varint,3,opt,name=incomplete" json:"incomplete,omitempty"` + // EvaluationError can appear in combination with Rules. It indicates an error occurred during + // rule evaluation, such as an authorizer that doesn't support rule evaluation, and that + // ResourceRules and/or NonResourceRules may be incomplete. + // +optional + EvaluationError *string `protobuf:"bytes,4,opt,name=evaluationError" json:"evaluationError,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SubjectRulesReviewStatus) Reset() { *m = SubjectRulesReviewStatus{} } +func (m *SubjectRulesReviewStatus) String() string { return proto.CompactTextString(m) } +func (*SubjectRulesReviewStatus) ProtoMessage() {} +func (*SubjectRulesReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{13} +} + +func (m *SubjectRulesReviewStatus) GetResourceRules() []*ResourceRule { + if m != nil { + return m.ResourceRules + } + return nil +} + +func (m *SubjectRulesReviewStatus) GetNonResourceRules() []*NonResourceRule { + if m != nil { + return m.NonResourceRules + } + return nil +} + +func (m *SubjectRulesReviewStatus) GetIncomplete() bool { + if m != nil && m.Incomplete != nil { + return *m.Incomplete + } + return false +} + +func (m *SubjectRulesReviewStatus) GetEvaluationError() string { + if m != nil && m.EvaluationError != nil { + return *m.EvaluationError + } + return "" +} + func init() { - proto.RegisterType((*ExtraValue)(nil), "github.com/ericchiang.k8s.apis.authorization.v1.ExtraValue") - proto.RegisterType((*LocalSubjectAccessReview)(nil), "github.com/ericchiang.k8s.apis.authorization.v1.LocalSubjectAccessReview") - proto.RegisterType((*NonResourceAttributes)(nil), "github.com/ericchiang.k8s.apis.authorization.v1.NonResourceAttributes") - proto.RegisterType((*ResourceAttributes)(nil), "github.com/ericchiang.k8s.apis.authorization.v1.ResourceAttributes") - proto.RegisterType((*SelfSubjectAccessReview)(nil), "github.com/ericchiang.k8s.apis.authorization.v1.SelfSubjectAccessReview") - proto.RegisterType((*SelfSubjectAccessReviewSpec)(nil), "github.com/ericchiang.k8s.apis.authorization.v1.SelfSubjectAccessReviewSpec") - proto.RegisterType((*SubjectAccessReview)(nil), "github.com/ericchiang.k8s.apis.authorization.v1.SubjectAccessReview") - proto.RegisterType((*SubjectAccessReviewSpec)(nil), "github.com/ericchiang.k8s.apis.authorization.v1.SubjectAccessReviewSpec") - proto.RegisterType((*SubjectAccessReviewStatus)(nil), "github.com/ericchiang.k8s.apis.authorization.v1.SubjectAccessReviewStatus") + proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authorization.v1.ExtraValue") + proto.RegisterType((*LocalSubjectAccessReview)(nil), "k8s.io.api.authorization.v1.LocalSubjectAccessReview") + proto.RegisterType((*NonResourceAttributes)(nil), "k8s.io.api.authorization.v1.NonResourceAttributes") + proto.RegisterType((*NonResourceRule)(nil), "k8s.io.api.authorization.v1.NonResourceRule") + proto.RegisterType((*ResourceAttributes)(nil), "k8s.io.api.authorization.v1.ResourceAttributes") + proto.RegisterType((*ResourceRule)(nil), "k8s.io.api.authorization.v1.ResourceRule") + proto.RegisterType((*SelfSubjectAccessReview)(nil), "k8s.io.api.authorization.v1.SelfSubjectAccessReview") + proto.RegisterType((*SelfSubjectAccessReviewSpec)(nil), "k8s.io.api.authorization.v1.SelfSubjectAccessReviewSpec") + proto.RegisterType((*SelfSubjectRulesReview)(nil), "k8s.io.api.authorization.v1.SelfSubjectRulesReview") + proto.RegisterType((*SelfSubjectRulesReviewSpec)(nil), "k8s.io.api.authorization.v1.SelfSubjectRulesReviewSpec") + proto.RegisterType((*SubjectAccessReview)(nil), "k8s.io.api.authorization.v1.SubjectAccessReview") + proto.RegisterType((*SubjectAccessReviewSpec)(nil), "k8s.io.api.authorization.v1.SubjectAccessReviewSpec") + proto.RegisterType((*SubjectAccessReviewStatus)(nil), "k8s.io.api.authorization.v1.SubjectAccessReviewStatus") + proto.RegisterType((*SubjectRulesReviewStatus)(nil), "k8s.io.api.authorization.v1.SubjectRulesReviewStatus") } func (m *ExtraValue) Marshal() (dAtA []byte, err error) { size := m.Size() @@ -567,6 +802,57 @@ func (m *NonResourceAttributes) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *NonResourceRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NonResourceRule) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + func (m *ResourceAttributes) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -630,6 +916,87 @@ func (m *ResourceAttributes) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *ResourceRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceRule) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.ApiGroups) > 0 { + for _, s := range m.ApiGroups { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + func (m *SelfSubjectAccessReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -722,7 +1089,7 @@ func (m *SelfSubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *SubjectAccessReview) Marshal() (dAtA []byte, err error) { +func (m *SelfSubjectRulesReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -732,7 +1099,7 @@ func (m *SubjectAccessReview) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { +func (m *SelfSubjectRulesReview) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int @@ -773,7 +1140,7 @@ func (m *SubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *SubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { +func (m *SelfSubjectRulesReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -783,36 +1150,114 @@ func (m *SubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { +func (m *SelfSubjectRulesReviewSpec) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - if m.ResourceAttributes != nil { + if m.Namespace != nil { dAtA[i] = 0xa i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceAttributes.Size())) - n12, err := m.ResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Namespace))) + i += copy(dAtA[i:], *m.Namespace) } - if m.NonResourceAttributes != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NonResourceAttributes.Size())) - n13, err := m.NonResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - if m.Verb != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Verb))) - i += copy(dAtA[i:], *m.Verb) + return i, nil +} + +func (m *SubjectAccessReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Metadata != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) + n12, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + if m.Spec != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n13, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + if m.Status != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n14, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *SubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ResourceAttributes != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceAttributes.Size())) + n15, err := m.ResourceAttributes.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + if m.NonResourceAttributes != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NonResourceAttributes.Size())) + n16, err := m.NonResourceAttributes.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + if m.User != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.User))) + i += copy(dAtA[i:], *m.User) } if len(m.Groups) > 0 { for _, s := range m.Groups { @@ -849,14 +1294,20 @@ func (m *SubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(v.Size())) - n14, err := v.MarshalTo(dAtA[i:]) + n17, err := v.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n14 + i += n17 } } } + if m.Uid != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Uid))) + i += copy(dAtA[i:], *m.Uid) + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -900,30 +1351,83 @@ func (m *SubjectAccessReviewStatus) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(len(*m.EvaluationError))) i += copy(dAtA[i:], *m.EvaluationError) } + if m.Denied != nil { + dAtA[i] = 0x20 + i++ + if *m.Denied { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 +func (m *SubjectRulesReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectRulesReviewStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ResourceRules) > 0 { + for _, msg := range m.ResourceRules { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.NonResourceRules) > 0 { + for _, msg := range m.NonResourceRules { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Incomplete != nil { + dAtA[i] = 0x18 + i++ + if *m.Incomplete { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.EvaluationError != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.EvaluationError))) + i += copy(dAtA[i:], *m.EvaluationError) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil } + func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -986,6 +1490,27 @@ func (m *NonResourceAttributes) Size() (n int) { return n } +func (m *NonResourceRule) Size() (n int) { + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *ResourceAttributes) Size() (n int) { var l int _ = l @@ -1023,6 +1548,39 @@ func (m *ResourceAttributes) Size() (n int) { return n } +func (m *ResourceRule) Size() (n int) { + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ApiGroups) > 0 { + for _, s := range m.ApiGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *SelfSubjectAccessReview) Size() (n int) { var l int _ = l @@ -1061,6 +1619,40 @@ func (m *SelfSubjectAccessReviewSpec) Size() (n int) { return n } +func (m *SelfSubjectRulesReview) Size() (n int) { + var l int + _ = l + if m.Metadata != nil { + l = m.Metadata.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SelfSubjectRulesReviewSpec) Size() (n int) { + var l int + _ = l + if m.Namespace != nil { + l = len(*m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *SubjectAccessReview) Size() (n int) { var l int _ = l @@ -1093,8 +1685,8 @@ func (m *SubjectAccessReviewSpec) Size() (n int) { l = m.NonResourceAttributes.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.Verb != nil { - l = len(*m.Verb) + if m.User != nil { + l = len(*m.User) n += 1 + l + sovGenerated(uint64(l)) } if len(m.Groups) > 0 { @@ -1116,6 +1708,10 @@ func (m *SubjectAccessReviewSpec) Size() (n int) { n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } + if m.Uid != nil { + l = len(*m.Uid) + n += 1 + l + sovGenerated(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -1136,6 +1732,37 @@ func (m *SubjectAccessReviewStatus) Size() (n int) { l = len(*m.EvaluationError) n += 1 + l + sovGenerated(uint64(l)) } + if m.Denied != nil { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SubjectRulesReviewStatus) Size() (n int) { + var l int + _ = l + if len(m.ResourceRules) > 0 { + for _, e := range m.ResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceRules) > 0 { + for _, e := range m.NonResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Incomplete != nil { + n += 2 + } + if m.EvaluationError != nil { + l = len(*m.EvaluationError) + n += 1 + l + sovGenerated(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -1291,7 +1918,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1496,7 +2123,7 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { +func (m *NonResourceRule) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1519,15 +2146,15 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResourceAttributes: wiretype end group for non-group") + return fmt.Errorf("proto: NonResourceRule: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NonResourceRule: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1552,12 +2179,11 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Namespace = &s + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1582,16 +2208,126 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Verb = &s + m.NonResourceURLs = append(m.NonResourceURLs, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Namespace = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Verb = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { @@ -1757,6 +2493,173 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } return nil } +func (m *ResourceRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ApiGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ApiGroups = append(m.ApiGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceNames = append(m.ResourceNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1813,7 +2716,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1995,12 +2898,243 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.NonResourceAttributes == nil { - m.NonResourceAttributes = &NonResourceAttributes{} - } - if err := m.NonResourceAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + if m.NonResourceAttributes == nil { + m.NonResourceAttributes = &NonResourceAttributes{} + } + if err := m.NonResourceAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectRulesReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectRulesReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &SelfSubjectRulesReviewSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &SubjectRulesReviewStatus{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectRulesReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectRulesReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Namespace = &s iNdEx = postIndex default: iNdEx = preIndex @@ -2080,7 +3214,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2271,7 +3405,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2297,7 +3431,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Verb = &s + m.User = &s iNdEx = postIndex case 4: if wireType != 2 { @@ -2354,51 +3488,14 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Extra == nil { m.Extra = make(map[string]*ExtraValue) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue *ExtraValue + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2408,46 +3505,115 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated + } + m.Extra[mapkey] = mapvalue + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue := &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx = postmsgIndex - m.Extra[mapkey] = mapvalue - } else { - var mapvalue *ExtraValue - m.Extra[mapkey] = mapvalue } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Uid = &s iNdEx = postIndex default: iNdEx = preIndex @@ -2581,6 +3747,191 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.EvaluationError = &s iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Denied", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Denied = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectRulesReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectRulesReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceRules = append(m.ResourceRules, &ResourceRule{}) + if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceRules = append(m.NonResourceRules, &NonResourceRule{}) + if err := m.NonResourceRules[len(m.NonResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Incomplete", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Incomplete = &b + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvaluationError", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.EvaluationError = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2709,51 +4060,63 @@ var ( ) func init() { - proto.RegisterFile("github.com/ericchiang/k8s/apis/authorization/v1/generated.proto", fileDescriptorGenerated) + proto.RegisterFile("k8s.io/api/authorization/v1/generated.proto", fileDescriptorGenerated) } var fileDescriptorGenerated = []byte{ - // 662 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x55, 0xdf, 0x6a, 0x13, 0x4f, - 0x14, 0xfe, 0xed, 0x26, 0x69, 0x9b, 0xd3, 0x8b, 0x9f, 0x8c, 0xad, 0x5d, 0xa3, 0x84, 0xb0, 0x57, - 0x01, 0x75, 0x96, 0x14, 0xc1, 0xa2, 0x17, 0xa5, 0xc5, 0x8a, 0x88, 0xb5, 0x30, 0x05, 0x11, 0x11, - 0x61, 0xb2, 0x3d, 0xa6, 0x6b, 0x92, 0x9d, 0x75, 0x66, 0x76, 0xdb, 0xfa, 0x04, 0xe2, 0x13, 0x08, - 0xde, 0x08, 0xbe, 0x4c, 0x2f, 0x7d, 0x04, 0xa9, 0x2f, 0x22, 0x33, 0xbb, 0x4d, 0x5b, 0xb3, 0x09, - 0x04, 0xff, 0x5c, 0xf5, 0x6e, 0xce, 0xd9, 0xf9, 0xbe, 0x73, 0xe6, 0x9c, 0x6f, 0xcf, 0x81, 0x07, - 0xfd, 0x35, 0x45, 0x23, 0x11, 0xf4, 0xd3, 0x2e, 0xca, 0x18, 0x35, 0xaa, 0x20, 0xe9, 0xf7, 0x02, - 0x9e, 0x44, 0x2a, 0xe0, 0xa9, 0xde, 0x17, 0x32, 0x7a, 0xcf, 0x75, 0x24, 0xe2, 0x20, 0xeb, 0x04, - 0x3d, 0x8c, 0x51, 0x72, 0x8d, 0x7b, 0x34, 0x91, 0x42, 0x0b, 0x72, 0x2b, 0x07, 0xd3, 0x33, 0x30, - 0x4d, 0xfa, 0x3d, 0x6a, 0xc0, 0xf4, 0x02, 0x98, 0x66, 0x9d, 0xc6, 0xea, 0x94, 0x48, 0x43, 0xd4, - 0xbc, 0x24, 0x40, 0xe3, 0x4e, 0x39, 0x46, 0xa6, 0xb1, 0x8e, 0x86, 0x38, 0x76, 0xfd, 0xee, 0xf4, - 0xeb, 0x2a, 0xdc, 0xc7, 0x21, 0x1f, 0x43, 0x75, 0xca, 0x51, 0xa9, 0x8e, 0x06, 0x41, 0x14, 0x6b, - 0xa5, 0xe5, 0x18, 0xe4, 0xf6, 0xc4, 0xb7, 0x94, 0xbc, 0xc2, 0xf7, 0x01, 0xb6, 0x0e, 0xb5, 0xe4, - 0xcf, 0xf9, 0x20, 0x45, 0xb2, 0x04, 0xb5, 0x48, 0xe3, 0x50, 0x79, 0x4e, 0xab, 0xd2, 0xae, 0xb3, - 0xdc, 0xf0, 0xbf, 0xb8, 0xe0, 0x3d, 0x15, 0x21, 0x1f, 0xec, 0xa6, 0xdd, 0xb7, 0x18, 0xea, 0x8d, - 0x30, 0x44, 0xa5, 0x18, 0x66, 0x11, 0x1e, 0x90, 0x27, 0xb0, 0x60, 0x2a, 0xb4, 0xc7, 0x35, 0xf7, - 0x9c, 0x96, 0xd3, 0x5e, 0x5c, 0xa5, 0x74, 0x4a, 0xe9, 0xcd, 0x5d, 0x9a, 0x75, 0xe8, 0x8e, 0x65, - 0xda, 0x46, 0xcd, 0xd9, 0x08, 0x4f, 0x5e, 0x40, 0x55, 0x25, 0x18, 0x7a, 0xae, 0xe5, 0x79, 0x48, - 0x67, 0x68, 0x21, 0x2d, 0xc9, 0x6d, 0x37, 0xc1, 0x90, 0x59, 0x46, 0xf2, 0x1a, 0xe6, 0x94, 0xe6, - 0x3a, 0x55, 0x5e, 0xc5, 0x72, 0x3f, 0xfa, 0x6d, 0x6e, 0xcb, 0xc6, 0x0a, 0x56, 0x7f, 0x1d, 0x96, - 0x9f, 0x89, 0x98, 0xa1, 0x12, 0xa9, 0x0c, 0x71, 0x43, 0x6b, 0x19, 0x75, 0x53, 0x8d, 0x8a, 0x10, - 0xa8, 0x26, 0x5c, 0xef, 0xdb, 0xd2, 0xd4, 0x99, 0x3d, 0x1b, 0x5f, 0x86, 0xb2, 0x6b, 0x9f, 0x59, - 0x67, 0xf6, 0xec, 0x1f, 0x3b, 0x40, 0x4a, 0xe0, 0x37, 0xa1, 0x1e, 0xf3, 0x21, 0xaa, 0x84, 0x87, - 0x58, 0x70, 0x9c, 0x39, 0xca, 0x88, 0x4c, 0x0b, 0x7b, 0x52, 0xa4, 0x89, 0x7d, 0x68, 0x9d, 0xe5, - 0x06, 0xf1, 0x60, 0x3e, 0x43, 0xa9, 0x22, 0x11, 0x7b, 0x55, 0xeb, 0x3f, 0x35, 0x49, 0x03, 0x16, - 0x64, 0x11, 0xd7, 0xab, 0xd9, 0x4f, 0x23, 0x9b, 0xb4, 0x60, 0x51, 0xa5, 0xdd, 0xd1, 0xe7, 0x39, - 0xfb, 0xf9, 0xbc, 0xcb, 0x64, 0x60, 0xd2, 0xf1, 0xe6, 0xf3, 0x0c, 0xcc, 0xd9, 0xff, 0xea, 0xc2, - 0xca, 0x2e, 0x0e, 0xde, 0xfc, 0x6d, 0xb5, 0xbc, 0xba, 0xa0, 0x96, 0xc7, 0xb3, 0x75, 0xb4, 0x3c, - 0xbf, 0x7f, 0xa8, 0x98, 0x0f, 0x2e, 0xdc, 0x98, 0x92, 0x05, 0x11, 0x40, 0xe4, 0x98, 0x1e, 0x8a, - 0x9a, 0xad, 0xcf, 0x94, 0xcb, 0xb8, 0xac, 0x58, 0x09, 0x35, 0x39, 0x84, 0xe5, 0xb8, 0x4c, 0xc2, - 0x45, 0x7d, 0x37, 0x67, 0x8a, 0x59, 0xfa, 0x33, 0xb0, 0xf2, 0x00, 0xfe, 0x67, 0x17, 0xae, 0x5e, - 0x8e, 0x96, 0x49, 0x42, 0xf9, 0x58, 0x85, 0x95, 0x4b, 0x91, 0x8c, 0x66, 0x5d, 0xe5, 0xdc, 0xac, - 0xbb, 0x06, 0x73, 0x76, 0xbc, 0x29, 0xaf, 0x6a, 0xf7, 0x55, 0x61, 0x11, 0x84, 0x1a, 0x9a, 0xa5, - 0xe6, 0xd5, 0x5a, 0x95, 0xf6, 0xe2, 0xea, 0xce, 0x9f, 0xe8, 0x36, 0xb5, 0x6b, 0x72, 0x2b, 0xd6, - 0xf2, 0x88, 0xe5, 0xec, 0x8d, 0x77, 0xc5, 0xee, 0xb4, 0x4e, 0x72, 0x05, 0x2a, 0x7d, 0x3c, 0x2a, - 0x86, 0xb4, 0x39, 0x92, 0x6d, 0xa8, 0x65, 0x66, 0xad, 0x16, 0xc5, 0xb9, 0x37, 0x53, 0x1a, 0x67, - 0x5b, 0x99, 0xe5, 0x2c, 0xf7, 0xdd, 0x35, 0xc7, 0x3f, 0x80, 0xeb, 0x13, 0x15, 0x63, 0x86, 0x3c, - 0x1f, 0x0c, 0xc4, 0x01, 0xee, 0xd9, 0x2c, 0x16, 0xd8, 0xa9, 0x69, 0x0a, 0x25, 0x91, 0x2b, 0x11, - 0x17, 0xab, 0xa2, 0xb0, 0x48, 0x1b, 0xfe, 0x47, 0x43, 0x6e, 0xa3, 0x6e, 0x49, 0x29, 0x64, 0x51, - 0xdf, 0x5f, 0xdd, 0x9b, 0x4b, 0xc7, 0x27, 0x4d, 0xe7, 0xdb, 0x49, 0xd3, 0xf9, 0x7e, 0xd2, 0x74, - 0x3e, 0xfd, 0x68, 0xfe, 0xf7, 0xd2, 0xcd, 0x3a, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x06, 0x70, - 0x3c, 0x69, 0xa2, 0x09, 0x00, 0x00, + // 862 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x56, 0x5f, 0x8b, 0x23, 0x45, + 0x10, 0x77, 0xf2, 0x67, 0x2f, 0xa9, 0x28, 0x77, 0xb4, 0xf7, 0x67, 0xcc, 0x1d, 0x21, 0x0c, 0x82, + 0x11, 0x65, 0xc6, 0xc4, 0xf3, 0x5c, 0x0e, 0xe4, 0x38, 0x61, 0x51, 0x30, 0xbb, 0x8b, 0xbd, 0xac, + 0x88, 0x2f, 0xd2, 0x99, 0x94, 0x49, 0x9b, 0xc9, 0xcc, 0xd0, 0xdd, 0x33, 0xeb, 0xfa, 0x28, 0xe8, + 0xeb, 0xbe, 0xfa, 0x91, 0xf6, 0xd1, 0x0f, 0xe0, 0x83, 0xac, 0x2f, 0x7e, 0x09, 0x41, 0xba, 0x67, + 0xf2, 0x7f, 0x12, 0xa3, 0xb0, 0x2f, 0xfa, 0xd6, 0x55, 0xd5, 0xf5, 0xab, 0xea, 0x5f, 0x55, 0x77, + 0x35, 0xbc, 0x33, 0x39, 0x94, 0x2e, 0x8f, 0x3c, 0x16, 0x73, 0x8f, 0x25, 0x6a, 0x1c, 0x09, 0xfe, + 0x3d, 0x53, 0x3c, 0x0a, 0xbd, 0xb4, 0xeb, 0x8d, 0x30, 0x44, 0xc1, 0x14, 0x0e, 0xdd, 0x58, 0x44, + 0x2a, 0x22, 0x8f, 0xb3, 0xcd, 0x2e, 0x8b, 0xb9, 0xbb, 0xb2, 0xd9, 0x4d, 0xbb, 0xcd, 0xa7, 0x0b, + 0xa4, 0x29, 0xf3, 0xc7, 0x3c, 0x44, 0x71, 0xe9, 0xc5, 0x93, 0x91, 0x56, 0x48, 0x6f, 0x8a, 0x8a, + 0x15, 0x40, 0x36, 0xbd, 0x6d, 0x5e, 0x22, 0x09, 0x15, 0x9f, 0xe2, 0x86, 0xc3, 0xb3, 0xbf, 0x73, + 0x90, 0xfe, 0x18, 0xa7, 0x6c, 0xc3, 0xef, 0xfd, 0x6d, 0x7e, 0x89, 0xe2, 0x81, 0xc7, 0x43, 0x25, + 0x95, 0x58, 0x77, 0x72, 0x1c, 0x80, 0xa3, 0xef, 0x94, 0x60, 0x5f, 0xb0, 0x20, 0x41, 0x72, 0x1f, + 0xaa, 0x5c, 0xe1, 0x54, 0xda, 0x56, 0xbb, 0xdc, 0xa9, 0xd3, 0x4c, 0x70, 0x7e, 0x28, 0x81, 0xdd, + 0x8f, 0x7c, 0x16, 0x9c, 0x25, 0x83, 0x6f, 0xd1, 0x57, 0x2f, 0x7d, 0x1f, 0xa5, 0xa4, 0x98, 0x72, + 0xbc, 0x20, 0x7d, 0xa8, 0xe9, 0x93, 0x0f, 0x99, 0x62, 0xb6, 0xd5, 0xb6, 0x3a, 0x8d, 0xde, 0x7b, + 0xee, 0x82, 0xc4, 0x79, 0x22, 0x6e, 0x3c, 0x19, 0x69, 0x85, 0x74, 0xf5, 0x6e, 0x37, 0xed, 0xba, + 0xa7, 0x06, 0xeb, 0x18, 0x15, 0xa3, 0x73, 0x04, 0xf2, 0x29, 0x54, 0x64, 0x8c, 0xbe, 0x5d, 0x32, + 0x48, 0x4f, 0xdd, 0x1d, 0xe5, 0x70, 0x0b, 0xb2, 0x39, 0x8b, 0xd1, 0xa7, 0x06, 0x81, 0x9c, 0xc0, + 0x81, 0x54, 0x4c, 0x25, 0xd2, 0x2e, 0x1b, 0xac, 0x67, 0xff, 0x18, 0xcb, 0x78, 0xd3, 0x1c, 0xc5, + 0x79, 0x01, 0x0f, 0x4e, 0xa2, 0x90, 0xa2, 0x8c, 0x12, 0xe1, 0xe3, 0x4b, 0xa5, 0x04, 0x1f, 0x24, + 0x0a, 0x25, 0x21, 0x50, 0x89, 0x99, 0x1a, 0x9b, 0xc3, 0xd7, 0xa9, 0x59, 0x6b, 0x5d, 0x8a, 0x62, + 0x60, 0x8e, 0x51, 0xa7, 0x66, 0xed, 0x7c, 0x0e, 0x77, 0x97, 0x00, 0x68, 0x12, 0x18, 0xba, 0xb5, + 0x69, 0x4e, 0xb7, 0x11, 0x48, 0x07, 0xee, 0x86, 0x8b, 0x8d, 0xe7, 0xb4, 0x2f, 0xed, 0x92, 0xb1, + 0xaf, 0xab, 0x9d, 0x6b, 0x0b, 0x48, 0x41, 0x46, 0x4f, 0xa0, 0x1e, 0xb2, 0x29, 0xca, 0x98, 0xf9, + 0x98, 0xa7, 0xb5, 0x50, 0x14, 0xe5, 0xa6, 0x13, 0x19, 0x89, 0x28, 0x89, 0x0d, 0x57, 0x75, 0x9a, + 0x09, 0xc4, 0x86, 0x3b, 0x29, 0x0a, 0xc9, 0xa3, 0xd0, 0xae, 0x18, 0xfd, 0x4c, 0x24, 0x4d, 0xa8, + 0x89, 0x3c, 0xae, 0x5d, 0x35, 0xa6, 0xb9, 0x4c, 0xda, 0xd0, 0x90, 0xc9, 0x60, 0x6e, 0x3e, 0x30, + 0xe6, 0x65, 0x95, 0xce, 0x40, 0xa7, 0x63, 0xdf, 0xc9, 0x32, 0xd0, 0x6b, 0xe7, 0x27, 0x0b, 0x5e, + 0xdd, 0x83, 0x9b, 0x27, 0x50, 0x67, 0x31, 0xff, 0x44, 0xa7, 0x37, 0x63, 0x65, 0xa1, 0xd0, 0xd6, + 0x59, 0x10, 0x5d, 0x76, 0x63, 0x9d, 0x2b, 0xc8, 0x9b, 0xf0, 0xda, 0x4c, 0x38, 0xd1, 0x6c, 0xd8, + 0x15, 0xb3, 0x63, 0x55, 0xe9, 0xfc, 0x58, 0x82, 0x47, 0x67, 0x18, 0x7c, 0x73, 0xfb, 0xbd, 0xde, + 0x5f, 0xe9, 0xf5, 0xc3, 0xdd, 0xfd, 0x59, 0x9c, 0xd1, 0x2d, 0xf6, 0xfb, 0x1f, 0x16, 0x3c, 0xde, + 0x11, 0x95, 0x7c, 0x0d, 0x44, 0x6c, 0xb4, 0x5e, 0xce, 0x8a, 0xb7, 0x33, 0xf6, 0x66, 0xc7, 0xd2, + 0x02, 0x28, 0x32, 0x86, 0x07, 0x61, 0xd1, 0x85, 0xcb, 0xf9, 0xea, 0xed, 0x8c, 0x51, 0x78, 0x55, + 0x69, 0x31, 0xa0, 0x7e, 0xdf, 0x1e, 0x2e, 0x1d, 0x55, 0xb7, 0xdf, 0xed, 0x54, 0xfc, 0xb3, 0x95, + 0x8a, 0x7f, 0xb8, 0x6f, 0xc5, 0x97, 0x12, 0x5a, 0x2a, 0xf8, 0xf1, 0x5a, 0xc1, 0x3f, 0xd8, 0xa7, + 0xe0, 0xcb, 0x50, 0xab, 0xf5, 0x7e, 0x0e, 0xcd, 0xed, 0x21, 0x77, 0x3f, 0x29, 0xce, 0x9f, 0x16, + 0xbc, 0xfe, 0x7f, 0x9e, 0x0d, 0xbf, 0x96, 0xe1, 0xd1, 0x7f, 0xff, 0x9e, 0xe8, 0x77, 0x3b, 0x91, + 0x28, 0xf2, 0x21, 0x61, 0xd6, 0xe4, 0x21, 0x1c, 0x8c, 0xb2, 0xd7, 0x38, 0x7b, 0x4d, 0x73, 0x89, + 0x9c, 0x43, 0x15, 0xf5, 0xbf, 0xc2, 0xae, 0xb6, 0xcb, 0x9d, 0x46, 0xef, 0xc5, 0xbf, 0xa9, 0x96, + 0x6b, 0x7e, 0x26, 0x47, 0xa1, 0x12, 0x97, 0x34, 0x43, 0x23, 0xf7, 0xa0, 0x9c, 0xf0, 0x61, 0x3e, + 0x54, 0xf4, 0xb2, 0xc9, 0xf2, 0x0f, 0x8c, 0xd9, 0xa6, 0xed, 0x13, 0xbc, 0xcc, 0x3b, 0x54, 0x2f, + 0xc9, 0x47, 0x50, 0x4d, 0xf5, 0xdf, 0x26, 0xa7, 0xe3, 0xad, 0x9d, 0x89, 0x2c, 0xbe, 0x42, 0x34, + 0xf3, 0x7a, 0x5e, 0x3a, 0xb4, 0x9c, 0x2b, 0x0b, 0xde, 0xd8, 0xda, 0x04, 0x7a, 0x4a, 0xb2, 0x20, + 0x88, 0x2e, 0x70, 0x68, 0xc2, 0xd6, 0xe8, 0x4c, 0xd4, 0xdc, 0x08, 0x64, 0x32, 0x0a, 0xf3, 0x59, + 0x9b, 0x4b, 0x7a, 0xc0, 0xa3, 0x46, 0x37, 0x61, 0x8f, 0x84, 0x88, 0x66, 0x94, 0xae, 0xab, 0x35, + 0xc2, 0x10, 0x43, 0x8e, 0x43, 0x33, 0x80, 0x6b, 0x34, 0x97, 0x9c, 0xab, 0x12, 0xd8, 0xdb, 0x6e, + 0x34, 0x39, 0x5d, 0xcc, 0x39, 0x63, 0x34, 0x13, 0xb4, 0xd1, 0x7b, 0x7b, 0xaf, 0x66, 0xd3, 0x1e, + 0x74, 0xd5, 0x9f, 0x7c, 0x09, 0xf7, 0xc2, 0xd5, 0x9f, 0x4b, 0x36, 0x7b, 0x1b, 0xbd, 0x77, 0xf7, + 0x6d, 0x2e, 0x03, 0xbb, 0x81, 0x42, 0x5a, 0x00, 0x3c, 0xf4, 0xa3, 0x69, 0x1c, 0xa0, 0x42, 0x43, + 0x42, 0x8d, 0x2e, 0x69, 0x8a, 0x98, 0xaa, 0x14, 0x32, 0xf5, 0xf1, 0xfd, 0xeb, 0x9b, 0x96, 0xf5, + 0xcb, 0x4d, 0xcb, 0xfa, 0xed, 0xa6, 0x65, 0xfd, 0xfc, 0x7b, 0xeb, 0x95, 0xaf, 0x4a, 0x69, 0xf7, + 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb7, 0x0f, 0x8e, 0x08, 0xfc, 0x0b, 0x00, 0x00, } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/authorization/v1/register.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/authorization/v1/register.go new file mode 100644 index 00000000..936ba031 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/authorization/v1/register.go @@ -0,0 +1,10 @@ +package v1 + +import "github.com/ericchiang/k8s" + +func init() { + k8s.Register("authorization.k8s.io", "v1", "localsubjectaccessreviews", true, &LocalSubjectAccessReview{}) + k8s.Register("authorization.k8s.io", "v1", "selfsubjectaccessreviews", false, &SelfSubjectAccessReview{}) + k8s.Register("authorization.k8s.io", "v1", "selfsubjectrulesreviews", false, &SelfSubjectRulesReview{}) + k8s.Register("authorization.k8s.io", "v1", "subjectaccessreviews", false, &SubjectAccessReview{}) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/authorization/v1beta1/generated.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/authorization/v1beta1/generated.pb.go index ca84e5d3..e6e31a15 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/authorization/v1beta1/generated.pb.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/authorization/v1beta1/generated.pb.go @@ -1,34 +1,37 @@ -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/authorization/v1beta1/generated.proto -// DO NOT EDIT! +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/authorization/v1beta1/generated.proto /* Package v1beta1 is a generated protocol buffer package. It is generated from these files: - k8s.io/kubernetes/pkg/apis/authorization/v1beta1/generated.proto + k8s.io/api/authorization/v1beta1/generated.proto It has these top-level messages: ExtraValue LocalSubjectAccessReview NonResourceAttributes + NonResourceRule ResourceAttributes + ResourceRule SelfSubjectAccessReview SelfSubjectAccessReviewSpec + SelfSubjectRulesReview + SelfSubjectRulesReviewSpec SubjectAccessReview SubjectAccessReviewSpec SubjectAccessReviewStatus + SubjectRulesReviewStatus */ package v1beta1 import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import k8s_io_kubernetes_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" +import k8s_io_apimachinery_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" import _ "github.com/ericchiang/k8s/runtime" import _ "github.com/ericchiang/k8s/runtime/schema" import _ "github.com/ericchiang/k8s/util/intstr" -import _ "github.com/ericchiang/k8s/api/v1" import io "io" @@ -68,7 +71,7 @@ func (m *ExtraValue) GetItems() []string { // checking. type LocalSubjectAccessReview struct { // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace // you made the request against. If empty, it is defaulted. Spec *SubjectAccessReviewSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` @@ -85,7 +88,7 @@ func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -func (m *LocalSubjectAccessReview) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *LocalSubjectAccessReview) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -136,6 +139,36 @@ func (m *NonResourceAttributes) GetVerb() string { return "" } +// NonResourceRule holds information that describes a rule for the non-resource +type NonResourceRule struct { + // Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. "*" means all. + Verbs []string `protobuf:"bytes,1,rep,name=verbs" json:"verbs,omitempty"` + // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, + // final step in the path. "*" means all. + // +optional + NonResourceURLs []string `protobuf:"bytes,2,rep,name=nonResourceURLs" json:"nonResourceURLs,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NonResourceRule) Reset() { *m = NonResourceRule{} } +func (m *NonResourceRule) String() string { return proto.CompactTextString(m) } +func (*NonResourceRule) ProtoMessage() {} +func (*NonResourceRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *NonResourceRule) GetVerbs() []string { + if m != nil { + return m.Verbs + } + return nil +} + +func (m *NonResourceRule) GetNonResourceURLs() []string { + if m != nil { + return m.NonResourceURLs + } + return nil +} + // ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface type ResourceAttributes struct { // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces @@ -168,7 +201,7 @@ type ResourceAttributes struct { func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } func (m *ResourceAttributes) String() string { return proto.CompactTextString(m) } func (*ResourceAttributes) ProtoMessage() {} -func (*ResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (*ResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func (m *ResourceAttributes) GetNamespace() string { if m != nil && m.Namespace != nil { @@ -219,12 +252,64 @@ func (m *ResourceAttributes) GetName() string { return "" } +// ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, +// may contain duplicates, and possibly be incomplete. +type ResourceRule struct { + // Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. "*" means all. + Verbs []string `protobuf:"bytes,1,rep,name=verbs" json:"verbs,omitempty"` + // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of + // the enumerated resources in any API group will be allowed. "*" means all. + // +optional + ApiGroups []string `protobuf:"bytes,2,rep,name=apiGroups" json:"apiGroups,omitempty"` + // Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups. + // "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups. + // +optional + Resources []string `protobuf:"bytes,3,rep,name=resources" json:"resources,omitempty"` + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all. + // +optional + ResourceNames []string `protobuf:"bytes,4,rep,name=resourceNames" json:"resourceNames,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ResourceRule) Reset() { *m = ResourceRule{} } +func (m *ResourceRule) String() string { return proto.CompactTextString(m) } +func (*ResourceRule) ProtoMessage() {} +func (*ResourceRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *ResourceRule) GetVerbs() []string { + if m != nil { + return m.Verbs + } + return nil +} + +func (m *ResourceRule) GetApiGroups() []string { + if m != nil { + return m.ApiGroups + } + return nil +} + +func (m *ResourceRule) GetResources() []string { + if m != nil { + return m.Resources + } + return nil +} + +func (m *ResourceRule) GetResourceNames() []string { + if m != nil { + return m.ResourceNames + } + return nil +} + // SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a // spec.namespace means "in all namespaces". Self is a special case, because users should always be able // to check whether they can perform an action type SelfSubjectAccessReview struct { // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Spec holds information about the request being evaluated. user and groups must be empty Spec *SelfSubjectAccessReviewSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` // Status is filled in by the server and indicates whether the request is allowed or not @@ -236,9 +321,9 @@ type SelfSubjectAccessReview struct { func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } func (m *SelfSubjectAccessReview) String() string { return proto.CompactTextString(m) } func (*SelfSubjectAccessReview) ProtoMessage() {} -func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } -func (m *SelfSubjectAccessReview) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *SelfSubjectAccessReview) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -275,7 +360,7 @@ func (m *SelfSubjectAccessReviewSpec) Reset() { *m = SelfSubjectAccessRe func (m *SelfSubjectAccessReviewSpec) String() string { return proto.CompactTextString(m) } func (*SelfSubjectAccessReviewSpec) ProtoMessage() {} func (*SelfSubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} + return fileDescriptorGenerated, []int{7} } func (m *SelfSubjectAccessReviewSpec) GetResourceAttributes() *ResourceAttributes { @@ -292,10 +377,73 @@ func (m *SelfSubjectAccessReviewSpec) GetNonResourceAttributes() *NonResourceAtt return nil } +// SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. +// The returned list of actions may be incomplete depending on the server's authorization mode, +// and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, +// or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to +// drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. +// SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server. +type SelfSubjectRulesReview struct { + // +optional + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + // Spec holds information about the request being evaluated. + Spec *SelfSubjectRulesReviewSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` + // Status is filled in by the server and indicates the set of actions a user can perform. + // +optional + Status *SubjectRulesReviewStatus `protobuf:"bytes,3,opt,name=status" json:"status,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } +func (m *SelfSubjectRulesReview) String() string { return proto.CompactTextString(m) } +func (*SelfSubjectRulesReview) ProtoMessage() {} +func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *SelfSubjectRulesReview) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *SelfSubjectRulesReview) GetSpec() *SelfSubjectRulesReviewSpec { + if m != nil { + return m.Spec + } + return nil +} + +func (m *SelfSubjectRulesReview) GetStatus() *SubjectRulesReviewStatus { + if m != nil { + return m.Status + } + return nil +} + +type SelfSubjectRulesReviewSpec struct { + // Namespace to evaluate rules for. Required. + Namespace *string `protobuf:"bytes,1,opt,name=namespace" json:"namespace,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SelfSubjectRulesReviewSpec) Reset() { *m = SelfSubjectRulesReviewSpec{} } +func (m *SelfSubjectRulesReviewSpec) String() string { return proto.CompactTextString(m) } +func (*SelfSubjectRulesReviewSpec) ProtoMessage() {} +func (*SelfSubjectRulesReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{9} +} + +func (m *SelfSubjectRulesReviewSpec) GetNamespace() string { + if m != nil && m.Namespace != nil { + return *m.Namespace + } + return "" +} + // SubjectAccessReview checks whether or not a user or group can perform an action. type SubjectAccessReview struct { // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Spec holds information about the request being evaluated Spec *SubjectAccessReviewSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` // Status is filled in by the server and indicates whether the request is allowed or not @@ -307,9 +455,9 @@ type SubjectAccessReview struct { func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } func (m *SubjectAccessReview) String() string { return proto.CompactTextString(m) } func (*SubjectAccessReview) ProtoMessage() {} -func (*SubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (*SubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } -func (m *SubjectAccessReview) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *SubjectAccessReview) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -342,21 +490,26 @@ type SubjectAccessReviewSpec struct { // User is the user you're testing for. // If you specify "User" but not "Group", then is it interpreted as "What if User were not a member of any groups // +optional - Verb *string `protobuf:"bytes,3,opt,name=verb" json:"verb,omitempty"` + User *string `protobuf:"bytes,3,opt,name=user" json:"user,omitempty"` // Groups is the groups you're testing for. // +optional Group []string `protobuf:"bytes,4,rep,name=group" json:"group,omitempty"` // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer // it needs a reflection here. // +optional - Extra map[string]*ExtraValue `protobuf:"bytes,5,rep,name=extra" json:"extra,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_unrecognized []byte `json:"-"` + Extra map[string]*ExtraValue `protobuf:"bytes,5,rep,name=extra" json:"extra,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // UID information about the requesting user. + // +optional + Uid *string `protobuf:"bytes,6,opt,name=uid" json:"uid,omitempty"` + XXX_unrecognized []byte `json:"-"` } -func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} } -func (m *SubjectAccessReviewSpec) String() string { return proto.CompactTextString(m) } -func (*SubjectAccessReviewSpec) ProtoMessage() {} -func (*SubjectAccessReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} } +func (m *SubjectAccessReviewSpec) String() string { return proto.CompactTextString(m) } +func (*SubjectAccessReviewSpec) ProtoMessage() {} +func (*SubjectAccessReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{11} +} func (m *SubjectAccessReviewSpec) GetResourceAttributes() *ResourceAttributes { if m != nil { @@ -372,9 +525,9 @@ func (m *SubjectAccessReviewSpec) GetNonResourceAttributes() *NonResourceAttribu return nil } -func (m *SubjectAccessReviewSpec) GetVerb() string { - if m != nil && m.Verb != nil { - return *m.Verb +func (m *SubjectAccessReviewSpec) GetUser() string { + if m != nil && m.User != nil { + return *m.User } return "" } @@ -393,10 +546,23 @@ func (m *SubjectAccessReviewSpec) GetExtra() map[string]*ExtraValue { return nil } +func (m *SubjectAccessReviewSpec) GetUid() string { + if m != nil && m.Uid != nil { + return *m.Uid + } + return "" +} + // SubjectAccessReviewStatus type SubjectAccessReviewStatus struct { - // Allowed is required. True if the action would be allowed, false otherwise. + // Allowed is required. True if the action would be allowed, false otherwise. Allowed *bool `protobuf:"varint,1,opt,name=allowed" json:"allowed,omitempty"` + // Denied is optional. True if the action would be denied, otherwise + // false. If both allowed is false and denied is false, then the + // authorizer has no opinion on whether to authorize the action. Denied + // may not be true if Allowed is true. + // +optional + Denied *bool `protobuf:"varint,4,opt,name=denied" json:"denied,omitempty"` // Reason is optional. It indicates why a request was allowed or denied. // +optional Reason *string `protobuf:"bytes,2,opt,name=reason" json:"reason,omitempty"` @@ -412,7 +578,7 @@ func (m *SubjectAccessReviewStatus) Reset() { *m = SubjectAccessReviewSt func (m *SubjectAccessReviewStatus) String() string { return proto.CompactTextString(m) } func (*SubjectAccessReviewStatus) ProtoMessage() {} func (*SubjectAccessReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{8} + return fileDescriptorGenerated, []int{12} } func (m *SubjectAccessReviewStatus) GetAllowed() bool { @@ -422,6 +588,13 @@ func (m *SubjectAccessReviewStatus) GetAllowed() bool { return false } +func (m *SubjectAccessReviewStatus) GetDenied() bool { + if m != nil && m.Denied != nil { + return *m.Denied + } + return false +} + func (m *SubjectAccessReviewStatus) GetReason() string { if m != nil && m.Reason != nil { return *m.Reason @@ -436,16 +609,78 @@ func (m *SubjectAccessReviewStatus) GetEvaluationError() string { return "" } +// SubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on +// the set of authorizers the server is configured with and any errors experienced during evaluation. +// Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission, +// even if that list is incomplete. +type SubjectRulesReviewStatus struct { + // ResourceRules is the list of actions the subject is allowed to perform on resources. + // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. + ResourceRules []*ResourceRule `protobuf:"bytes,1,rep,name=resourceRules" json:"resourceRules,omitempty"` + // NonResourceRules is the list of actions the subject is allowed to perform on non-resources. + // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. + NonResourceRules []*NonResourceRule `protobuf:"bytes,2,rep,name=nonResourceRules" json:"nonResourceRules,omitempty"` + // Incomplete is true when the rules returned by this call are incomplete. This is most commonly + // encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation. + Incomplete *bool `protobuf:"varint,3,opt,name=incomplete" json:"incomplete,omitempty"` + // EvaluationError can appear in combination with Rules. It indicates an error occurred during + // rule evaluation, such as an authorizer that doesn't support rule evaluation, and that + // ResourceRules and/or NonResourceRules may be incomplete. + // +optional + EvaluationError *string `protobuf:"bytes,4,opt,name=evaluationError" json:"evaluationError,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SubjectRulesReviewStatus) Reset() { *m = SubjectRulesReviewStatus{} } +func (m *SubjectRulesReviewStatus) String() string { return proto.CompactTextString(m) } +func (*SubjectRulesReviewStatus) ProtoMessage() {} +func (*SubjectRulesReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{13} +} + +func (m *SubjectRulesReviewStatus) GetResourceRules() []*ResourceRule { + if m != nil { + return m.ResourceRules + } + return nil +} + +func (m *SubjectRulesReviewStatus) GetNonResourceRules() []*NonResourceRule { + if m != nil { + return m.NonResourceRules + } + return nil +} + +func (m *SubjectRulesReviewStatus) GetIncomplete() bool { + if m != nil && m.Incomplete != nil { + return *m.Incomplete + } + return false +} + +func (m *SubjectRulesReviewStatus) GetEvaluationError() string { + if m != nil && m.EvaluationError != nil { + return *m.EvaluationError + } + return "" +} + func init() { - proto.RegisterType((*ExtraValue)(nil), "github.com/ericchiang.k8s.apis.authorization.v1beta1.ExtraValue") - proto.RegisterType((*LocalSubjectAccessReview)(nil), "github.com/ericchiang.k8s.apis.authorization.v1beta1.LocalSubjectAccessReview") - proto.RegisterType((*NonResourceAttributes)(nil), "github.com/ericchiang.k8s.apis.authorization.v1beta1.NonResourceAttributes") - proto.RegisterType((*ResourceAttributes)(nil), "github.com/ericchiang.k8s.apis.authorization.v1beta1.ResourceAttributes") - proto.RegisterType((*SelfSubjectAccessReview)(nil), "github.com/ericchiang.k8s.apis.authorization.v1beta1.SelfSubjectAccessReview") - proto.RegisterType((*SelfSubjectAccessReviewSpec)(nil), "github.com/ericchiang.k8s.apis.authorization.v1beta1.SelfSubjectAccessReviewSpec") - proto.RegisterType((*SubjectAccessReview)(nil), "github.com/ericchiang.k8s.apis.authorization.v1beta1.SubjectAccessReview") - proto.RegisterType((*SubjectAccessReviewSpec)(nil), "github.com/ericchiang.k8s.apis.authorization.v1beta1.SubjectAccessReviewSpec") - proto.RegisterType((*SubjectAccessReviewStatus)(nil), "github.com/ericchiang.k8s.apis.authorization.v1beta1.SubjectAccessReviewStatus") + proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authorization.v1beta1.ExtraValue") + proto.RegisterType((*LocalSubjectAccessReview)(nil), "k8s.io.api.authorization.v1beta1.LocalSubjectAccessReview") + proto.RegisterType((*NonResourceAttributes)(nil), "k8s.io.api.authorization.v1beta1.NonResourceAttributes") + proto.RegisterType((*NonResourceRule)(nil), "k8s.io.api.authorization.v1beta1.NonResourceRule") + proto.RegisterType((*ResourceAttributes)(nil), "k8s.io.api.authorization.v1beta1.ResourceAttributes") + proto.RegisterType((*ResourceRule)(nil), "k8s.io.api.authorization.v1beta1.ResourceRule") + proto.RegisterType((*SelfSubjectAccessReview)(nil), "k8s.io.api.authorization.v1beta1.SelfSubjectAccessReview") + proto.RegisterType((*SelfSubjectAccessReviewSpec)(nil), "k8s.io.api.authorization.v1beta1.SelfSubjectAccessReviewSpec") + proto.RegisterType((*SelfSubjectRulesReview)(nil), "k8s.io.api.authorization.v1beta1.SelfSubjectRulesReview") + proto.RegisterType((*SelfSubjectRulesReviewSpec)(nil), "k8s.io.api.authorization.v1beta1.SelfSubjectRulesReviewSpec") + proto.RegisterType((*SubjectAccessReview)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReview") + proto.RegisterType((*SubjectAccessReviewSpec)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReviewSpec") + proto.RegisterType((*SubjectAccessReviewStatus)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReviewStatus") + proto.RegisterType((*SubjectRulesReviewStatus)(nil), "k8s.io.api.authorization.v1beta1.SubjectRulesReviewStatus") } func (m *ExtraValue) Marshal() (dAtA []byte, err error) { size := m.Size() @@ -567,6 +802,57 @@ func (m *NonResourceAttributes) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *NonResourceRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NonResourceRule) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + func (m *ResourceAttributes) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -630,6 +916,87 @@ func (m *ResourceAttributes) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *ResourceRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceRule) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.ApiGroups) > 0 { + for _, s := range m.ApiGroups { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + func (m *SelfSubjectAccessReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -722,7 +1089,7 @@ func (m *SelfSubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *SubjectAccessReview) Marshal() (dAtA []byte, err error) { +func (m *SelfSubjectRulesReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -732,7 +1099,7 @@ func (m *SubjectAccessReview) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { +func (m *SelfSubjectRulesReview) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int @@ -773,7 +1140,7 @@ func (m *SubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *SubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { +func (m *SelfSubjectRulesReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -783,36 +1150,114 @@ func (m *SubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { +func (m *SelfSubjectRulesReviewSpec) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - if m.ResourceAttributes != nil { + if m.Namespace != nil { dAtA[i] = 0xa i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceAttributes.Size())) - n12, err := m.ResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Namespace))) + i += copy(dAtA[i:], *m.Namespace) } - if m.NonResourceAttributes != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NonResourceAttributes.Size())) - n13, err := m.NonResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - if m.Verb != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Verb))) - i += copy(dAtA[i:], *m.Verb) + return i, nil +} + +func (m *SubjectAccessReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Metadata != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) + n12, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + if m.Spec != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n13, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + if m.Status != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n14, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *SubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ResourceAttributes != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceAttributes.Size())) + n15, err := m.ResourceAttributes.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + if m.NonResourceAttributes != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NonResourceAttributes.Size())) + n16, err := m.NonResourceAttributes.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + if m.User != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.User))) + i += copy(dAtA[i:], *m.User) } if len(m.Group) > 0 { for _, s := range m.Group { @@ -849,14 +1294,20 @@ func (m *SubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(v.Size())) - n14, err := v.MarshalTo(dAtA[i:]) + n17, err := v.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n14 + i += n17 } } } + if m.Uid != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Uid))) + i += copy(dAtA[i:], *m.Uid) + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -900,30 +1351,83 @@ func (m *SubjectAccessReviewStatus) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(len(*m.EvaluationError))) i += copy(dAtA[i:], *m.EvaluationError) } + if m.Denied != nil { + dAtA[i] = 0x20 + i++ + if *m.Denied { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 +func (m *SubjectRulesReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectRulesReviewStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ResourceRules) > 0 { + for _, msg := range m.ResourceRules { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.NonResourceRules) > 0 { + for _, msg := range m.NonResourceRules { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Incomplete != nil { + dAtA[i] = 0x18 + i++ + if *m.Incomplete { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.EvaluationError != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.EvaluationError))) + i += copy(dAtA[i:], *m.EvaluationError) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil } + func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -986,6 +1490,27 @@ func (m *NonResourceAttributes) Size() (n int) { return n } +func (m *NonResourceRule) Size() (n int) { + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *ResourceAttributes) Size() (n int) { var l int _ = l @@ -1023,6 +1548,39 @@ func (m *ResourceAttributes) Size() (n int) { return n } +func (m *ResourceRule) Size() (n int) { + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ApiGroups) > 0 { + for _, s := range m.ApiGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *SelfSubjectAccessReview) Size() (n int) { var l int _ = l @@ -1061,6 +1619,40 @@ func (m *SelfSubjectAccessReviewSpec) Size() (n int) { return n } +func (m *SelfSubjectRulesReview) Size() (n int) { + var l int + _ = l + if m.Metadata != nil { + l = m.Metadata.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SelfSubjectRulesReviewSpec) Size() (n int) { + var l int + _ = l + if m.Namespace != nil { + l = len(*m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *SubjectAccessReview) Size() (n int) { var l int _ = l @@ -1093,8 +1685,8 @@ func (m *SubjectAccessReviewSpec) Size() (n int) { l = m.NonResourceAttributes.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.Verb != nil { - l = len(*m.Verb) + if m.User != nil { + l = len(*m.User) n += 1 + l + sovGenerated(uint64(l)) } if len(m.Group) > 0 { @@ -1116,6 +1708,10 @@ func (m *SubjectAccessReviewSpec) Size() (n int) { n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } + if m.Uid != nil { + l = len(*m.Uid) + n += 1 + l + sovGenerated(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -1136,6 +1732,37 @@ func (m *SubjectAccessReviewStatus) Size() (n int) { l = len(*m.EvaluationError) n += 1 + l + sovGenerated(uint64(l)) } + if m.Denied != nil { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SubjectRulesReviewStatus) Size() (n int) { + var l int + _ = l + if len(m.ResourceRules) > 0 { + for _, e := range m.ResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceRules) > 0 { + for _, e := range m.NonResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Incomplete != nil { + n += 2 + } + if m.EvaluationError != nil { + l = len(*m.EvaluationError) + n += 1 + l + sovGenerated(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -1291,7 +1918,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1496,7 +2123,7 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { +func (m *NonResourceRule) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1519,15 +2146,15 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResourceAttributes: wiretype end group for non-group") + return fmt.Errorf("proto: NonResourceRule: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NonResourceRule: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1552,12 +2179,11 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Namespace = &s + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1582,16 +2208,126 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Verb = &s + m.NonResourceURLs = append(m.NonResourceURLs, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Namespace = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Verb = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { @@ -1757,6 +2493,173 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } return nil } +func (m *ResourceRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ApiGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ApiGroups = append(m.ApiGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceNames = append(m.ResourceNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1813,7 +2716,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1995,12 +2898,243 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.NonResourceAttributes == nil { - m.NonResourceAttributes = &NonResourceAttributes{} - } - if err := m.NonResourceAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + if m.NonResourceAttributes == nil { + m.NonResourceAttributes = &NonResourceAttributes{} + } + if err := m.NonResourceAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectRulesReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectRulesReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &SelfSubjectRulesReviewSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &SubjectRulesReviewStatus{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectRulesReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectRulesReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Namespace = &s iNdEx = postIndex default: iNdEx = preIndex @@ -2080,7 +3214,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2271,7 +3405,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2297,7 +3431,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Verb = &s + m.User = &s iNdEx = postIndex case 4: if wireType != 2 { @@ -2354,51 +3488,14 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Extra == nil { m.Extra = make(map[string]*ExtraValue) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue *ExtraValue + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2408,46 +3505,115 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated + } + m.Extra[mapkey] = mapvalue + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue := &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx = postmsgIndex - m.Extra[mapkey] = mapvalue - } else { - var mapvalue *ExtraValue - m.Extra[mapkey] = mapvalue } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Uid = &s iNdEx = postIndex default: iNdEx = preIndex @@ -2581,6 +3747,191 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.EvaluationError = &s iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Denied", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Denied = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectRulesReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectRulesReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceRules = append(m.ResourceRules, &ResourceRule{}) + if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceRules = append(m.NonResourceRules, &NonResourceRule{}) + if err := m.NonResourceRules[len(m.NonResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Incomplete", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Incomplete = &b + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvaluationError", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.EvaluationError = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2709,51 +4060,64 @@ var ( ) func init() { - proto.RegisterFile("github.com/ericchiang/k8s/apis/authorization/v1beta1/generated.proto", fileDescriptorGenerated) + proto.RegisterFile("k8s.io/api/authorization/v1beta1/generated.proto", fileDescriptorGenerated) } var fileDescriptorGenerated = []byte{ - // 666 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x55, 0xcf, 0x6e, 0xd3, 0x4e, - 0x10, 0xfe, 0x39, 0x7f, 0xda, 0x66, 0x7a, 0xf8, 0xa1, 0x85, 0x52, 0x37, 0xa0, 0x28, 0xf2, 0x29, - 0x07, 0x58, 0x93, 0x8a, 0x43, 0x85, 0x90, 0xa0, 0x88, 0x0a, 0xf1, 0xa7, 0x20, 0x6d, 0x11, 0x07, - 0x24, 0x0e, 0x1b, 0x77, 0x48, 0xdd, 0x24, 0x5e, 0x6b, 0x77, 0xed, 0x52, 0x24, 0x5e, 0x82, 0x13, - 0x07, 0xae, 0x5c, 0x90, 0x78, 0x8f, 0x1e, 0x79, 0x04, 0x54, 0x5e, 0x04, 0xed, 0x7a, 0xdb, 0x12, - 0xe2, 0x06, 0x05, 0x15, 0x4e, 0xbd, 0xed, 0x8c, 0x77, 0xbe, 0xf9, 0x76, 0xe6, 0xf3, 0x0c, 0xdc, - 0x1d, 0xac, 0x29, 0x1a, 0x8b, 0x70, 0x90, 0xf5, 0x50, 0x26, 0xa8, 0x51, 0x85, 0xe9, 0xa0, 0x1f, - 0xf2, 0x34, 0x56, 0x21, 0xcf, 0xf4, 0x8e, 0x90, 0xf1, 0x5b, 0xae, 0x63, 0x91, 0x84, 0x79, 0xb7, - 0x87, 0x9a, 0x77, 0xc3, 0x3e, 0x26, 0x28, 0xb9, 0xc6, 0x6d, 0x9a, 0x4a, 0xa1, 0x05, 0xb9, 0x51, - 0x20, 0xd0, 0x13, 0x04, 0x9a, 0x0e, 0xfa, 0xd4, 0x20, 0xd0, 0x31, 0x04, 0xea, 0x10, 0x9a, 0xab, - 0x53, 0x72, 0x8e, 0x50, 0xf3, 0x30, 0x9f, 0xc8, 0xd2, 0xbc, 0x5e, 0x1e, 0x23, 0xb3, 0x44, 0xc7, - 0x23, 0x9c, 0xb8, 0x7e, 0x73, 0xfa, 0x75, 0x15, 0xed, 0xe0, 0x88, 0x4f, 0x44, 0x75, 0xcb, 0xa3, - 0x32, 0x1d, 0x0f, 0xc3, 0x38, 0xd1, 0x4a, 0xcb, 0x89, 0x90, 0x6b, 0xa7, 0xbe, 0xa5, 0xe4, 0x15, - 0x41, 0x00, 0xb0, 0xf1, 0x46, 0x4b, 0xfe, 0x82, 0x0f, 0x33, 0x24, 0x97, 0xa0, 0x1e, 0x6b, 0x1c, - 0x29, 0xdf, 0x6b, 0x57, 0x3b, 0x0d, 0x56, 0x18, 0xc1, 0xe7, 0x0a, 0xf8, 0x4f, 0x44, 0xc4, 0x87, - 0x5b, 0x59, 0x6f, 0x17, 0x23, 0xbd, 0x1e, 0x45, 0xa8, 0x14, 0xc3, 0x3c, 0xc6, 0x3d, 0xf2, 0x08, - 0x16, 0x4c, 0x85, 0xb6, 0xb9, 0xe6, 0xbe, 0xd7, 0xf6, 0x3a, 0x8b, 0xab, 0x94, 0x4e, 0xa9, 0xbf, - 0xb9, 0x4b, 0xf3, 0x2e, 0x7d, 0x66, 0x91, 0x36, 0x51, 0x73, 0x76, 0x1c, 0x4f, 0x5e, 0x41, 0x4d, - 0xa5, 0x18, 0xf9, 0x15, 0x8b, 0xf3, 0x90, 0xce, 0xda, 0x47, 0x5a, 0x42, 0x70, 0x2b, 0xc5, 0x88, - 0x59, 0x58, 0x12, 0xc1, 0x9c, 0xd2, 0x5c, 0x67, 0xca, 0xaf, 0xda, 0x04, 0x8f, 0xcf, 0x26, 0x81, - 0x85, 0x64, 0x0e, 0x3a, 0xb8, 0x03, 0x4b, 0x4f, 0x45, 0xc2, 0x50, 0x89, 0x4c, 0x46, 0xb8, 0xae, - 0xb5, 0x8c, 0x7b, 0x99, 0x46, 0x45, 0x08, 0xd4, 0x52, 0xae, 0x77, 0x6c, 0x91, 0x1a, 0xcc, 0x9e, - 0x8d, 0x2f, 0x47, 0xd9, 0xb3, 0x0f, 0x6e, 0x30, 0x7b, 0x0e, 0x0e, 0x3c, 0x20, 0x25, 0xe1, 0x57, - 0xa1, 0x91, 0xf0, 0x11, 0xaa, 0x94, 0x47, 0xe8, 0x30, 0x4e, 0x1c, 0x65, 0x40, 0xa6, 0x99, 0x7d, - 0x29, 0xb2, 0xd4, 0xbe, 0xb6, 0xc1, 0x0a, 0x83, 0xf8, 0x30, 0x9f, 0xa3, 0x54, 0xb1, 0x48, 0xfc, - 0x9a, 0xf5, 0x1f, 0x99, 0xa4, 0x09, 0x0b, 0xd2, 0xe5, 0xf5, 0xeb, 0xf6, 0xd3, 0xb1, 0x4d, 0xda, - 0xb0, 0xa8, 0xb2, 0xde, 0xf1, 0xe7, 0x39, 0xfb, 0xf9, 0x67, 0x97, 0x61, 0x60, 0xe8, 0xf8, 0xf3, - 0x05, 0x03, 0x73, 0x0e, 0xbe, 0x54, 0x60, 0x79, 0x0b, 0x87, 0xaf, 0xff, 0xb6, 0x6e, 0xf8, 0x98, - 0x6e, 0x36, 0xff, 0xa0, 0xad, 0xe5, 0x24, 0xff, 0xb5, 0x76, 0xde, 0x57, 0xe0, 0xca, 0x14, 0x2a, - 0x44, 0x03, 0x91, 0x13, 0xca, 0x70, 0xd5, 0xbb, 0x3f, 0x3b, 0xa1, 0x49, 0x95, 0xb1, 0x12, 0x7c, - 0xf2, 0x0e, 0x96, 0x92, 0x32, 0x45, 0xbb, 0x72, 0x3f, 0x98, 0x3d, 0x71, 0xe9, 0x0f, 0xc2, 0xca, - 0xb3, 0x04, 0x9f, 0x2a, 0x70, 0xf1, 0x7c, 0xf0, 0xfc, 0x5e, 0x3c, 0x1f, 0x6b, 0xb0, 0x7c, 0x2e, - 0x9c, 0xf1, 0x81, 0x6b, 0x67, 0x62, 0xb5, 0x6c, 0x26, 0xd6, 0x8a, 0x05, 0x57, 0xcc, 0xc4, 0x5d, - 0xa8, 0xa3, 0x59, 0x82, 0x7e, 0xbd, 0x5d, 0xed, 0x2c, 0xae, 0x3e, 0x3f, 0xb3, 0xfe, 0x53, 0xbb, - 0x5b, 0x37, 0x12, 0x2d, 0xf7, 0x59, 0x91, 0xa2, 0x99, 0xbb, 0x85, 0x6b, 0x9d, 0xe4, 0x02, 0x54, - 0x07, 0xb8, 0xef, 0xe6, 0xb9, 0x39, 0x12, 0x06, 0xf5, 0xdc, 0xec, 0x62, 0x57, 0xa4, 0xdb, 0xb3, - 0x73, 0x39, 0xd9, 0xe7, 0xac, 0x80, 0xba, 0x55, 0x59, 0xf3, 0x82, 0x3d, 0x58, 0x39, 0x55, 0x43, - 0x66, 0x29, 0xf0, 0xe1, 0x50, 0xec, 0xe1, 0xb6, 0xa5, 0xb2, 0xc0, 0x8e, 0x4c, 0x72, 0x19, 0xe6, - 0x24, 0x72, 0x25, 0x12, 0xb7, 0x5a, 0x9c, 0x45, 0x3a, 0xf0, 0x3f, 0x1a, 0x70, 0x9b, 0x7a, 0x43, - 0x4a, 0x21, 0x5d, 0x9d, 0x7f, 0x75, 0xdf, 0x5b, 0x39, 0x38, 0x6c, 0x79, 0x5f, 0x0f, 0x5b, 0xde, - 0xb7, 0xc3, 0x96, 0xf7, 0xe1, 0x7b, 0xeb, 0xbf, 0x97, 0xf3, 0x8e, 0xe9, 0x8f, 0x00, 0x00, 0x00, - 0xff, 0xff, 0xb1, 0xb0, 0xe4, 0xe9, 0xeb, 0x09, 0x00, 0x00, + // 867 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0xcd, 0x8e, 0x1b, 0x45, + 0x10, 0x66, 0xfc, 0xb3, 0x6b, 0x97, 0x41, 0x89, 0x1a, 0x92, 0x4c, 0x4c, 0x64, 0x59, 0x23, 0x0e, + 0x7b, 0x40, 0x33, 0xd9, 0x25, 0x82, 0xb0, 0x80, 0x50, 0x22, 0x56, 0x5c, 0x36, 0x81, 0xf4, 0x02, + 0x87, 0x48, 0x1c, 0xda, 0xe3, 0x8a, 0xdd, 0x78, 0xfe, 0xd4, 0xdd, 0xe3, 0xb0, 0x3c, 0x00, 0x5c, + 0x90, 0x72, 0x85, 0x37, 0xca, 0x91, 0x47, 0x40, 0xcb, 0x9d, 0x03, 0x4f, 0x80, 0xba, 0xa7, 0xed, + 0xf1, 0xcf, 0x38, 0x76, 0x56, 0xda, 0x13, 0xdc, 0xba, 0xaa, 0xa6, 0xbe, 0xfe, 0xe6, 0x9b, 0x6f, + 0xba, 0x1a, 0xee, 0x4e, 0xee, 0x4b, 0x9f, 0xa7, 0x01, 0xcb, 0x78, 0xc0, 0x72, 0x35, 0x4e, 0x05, + 0xff, 0x89, 0x29, 0x9e, 0x26, 0xc1, 0xf4, 0x70, 0x80, 0x8a, 0x1d, 0x06, 0x23, 0x4c, 0x50, 0x30, + 0x85, 0x43, 0x3f, 0x13, 0xa9, 0x4a, 0x49, 0xbf, 0xe8, 0xf0, 0x59, 0xc6, 0xfd, 0xa5, 0x0e, 0xdf, + 0x76, 0x74, 0xef, 0x95, 0x98, 0x31, 0x0b, 0xc7, 0x3c, 0x41, 0x71, 0x1e, 0x64, 0x93, 0x91, 0x4e, + 0xc8, 0x20, 0x46, 0xc5, 0x82, 0xe9, 0x1a, 0x6e, 0x37, 0xd8, 0xd4, 0x25, 0xf2, 0x44, 0xf1, 0x18, + 0xd7, 0x1a, 0x3e, 0xdc, 0xd6, 0x20, 0xc3, 0x31, 0xc6, 0x6c, 0xad, 0xef, 0x83, 0x4d, 0x7d, 0xb9, + 0xe2, 0x51, 0xc0, 0x13, 0x25, 0x95, 0x58, 0x6d, 0xf2, 0x3c, 0x80, 0x93, 0x1f, 0x95, 0x60, 0xdf, + 0xb1, 0x28, 0x47, 0xf2, 0x0e, 0x34, 0xb9, 0xc2, 0x58, 0xba, 0x4e, 0xbf, 0x7e, 0xd0, 0xa6, 0x45, + 0xe0, 0xfd, 0x5a, 0x03, 0xf7, 0x34, 0x0d, 0x59, 0x74, 0x96, 0x0f, 0x7e, 0xc0, 0x50, 0x3d, 0x08, + 0x43, 0x94, 0x92, 0xe2, 0x94, 0xe3, 0x73, 0x72, 0x0a, 0x2d, 0xfd, 0xe6, 0x43, 0xa6, 0x98, 0xeb, + 0xf4, 0x9d, 0x83, 0xce, 0xd1, 0x5d, 0xbf, 0x54, 0x72, 0x4e, 0xc4, 0xcf, 0x26, 0x23, 0x9d, 0x90, + 0xbe, 0x7e, 0xda, 0x9f, 0x1e, 0xfa, 0x5f, 0x19, 0xac, 0x47, 0xa8, 0x18, 0x9d, 0x23, 0x90, 0x47, + 0xd0, 0x90, 0x19, 0x86, 0x6e, 0xcd, 0x20, 0x7d, 0xec, 0x6f, 0xfb, 0x26, 0x7e, 0x05, 0xa5, 0xb3, + 0x0c, 0x43, 0x6a, 0x60, 0xc8, 0x19, 0xec, 0x49, 0xc5, 0x54, 0x2e, 0xdd, 0xba, 0x01, 0xfc, 0xe4, + 0x72, 0x80, 0x06, 0x82, 0x5a, 0x28, 0xef, 0x73, 0xb8, 0xf1, 0x38, 0x4d, 0x28, 0xca, 0x34, 0x17, + 0x21, 0x3e, 0x50, 0x4a, 0xf0, 0x41, 0xae, 0x50, 0x12, 0x02, 0x8d, 0x8c, 0xa9, 0xb1, 0x91, 0xa1, + 0x4d, 0xcd, 0x5a, 0xe7, 0xa6, 0x28, 0x06, 0xe6, 0x85, 0xda, 0xd4, 0xac, 0xbd, 0x27, 0x70, 0x6d, + 0x01, 0x80, 0xe6, 0x91, 0x11, 0x5e, 0x97, 0xe6, 0xc2, 0x9b, 0x80, 0x1c, 0xc0, 0xb5, 0xa4, 0x7c, + 0xf0, 0x5b, 0x7a, 0x2a, 0xdd, 0x9a, 0xa9, 0xaf, 0xa6, 0xbd, 0x97, 0x0e, 0x90, 0x0a, 0x46, 0x77, + 0xa0, 0x9d, 0xb0, 0x18, 0x65, 0xc6, 0x42, 0xb4, 0xb4, 0xca, 0x44, 0x15, 0x37, 0x4d, 0x64, 0x24, + 0xd2, 0x3c, 0x33, 0x82, 0xb5, 0x69, 0x11, 0x10, 0x17, 0xf6, 0xa7, 0x28, 0x24, 0x4f, 0x13, 0xb7, + 0x61, 0xf2, 0xb3, 0x90, 0x74, 0xa1, 0x25, 0xec, 0xbe, 0x6e, 0xd3, 0x94, 0xe6, 0x31, 0xe9, 0x43, + 0x47, 0xe6, 0x83, 0x79, 0x79, 0xcf, 0x94, 0x17, 0x53, 0x9a, 0x81, 0xa6, 0xe3, 0xee, 0x17, 0x0c, + 0xf4, 0xda, 0xfb, 0xd9, 0x81, 0x37, 0x77, 0xd0, 0xe6, 0x0e, 0xb4, 0x59, 0xc6, 0xbf, 0xd4, 0xf4, + 0x66, 0xaa, 0x94, 0x09, 0x5d, 0x9d, 0x6d, 0xa2, 0xbf, 0xbd, 0xa9, 0xce, 0x13, 0xe4, 0x3d, 0x78, + 0x6b, 0x16, 0x3c, 0xd6, 0x6a, 0xb8, 0x0d, 0xf3, 0xc4, 0x72, 0xd2, 0x7b, 0x51, 0x83, 0x5b, 0x67, + 0x18, 0x3d, 0xbb, 0x7a, 0xd7, 0x3f, 0x59, 0x72, 0xfd, 0x67, 0x3b, 0x98, 0xb4, 0x9a, 0xd6, 0x55, + 0x3b, 0xff, 0x1f, 0x07, 0xde, 0x7d, 0xc5, 0xd6, 0x64, 0x08, 0x44, 0xac, 0x99, 0xd0, 0xea, 0x73, + 0x6f, 0x3b, 0x81, 0x75, 0x03, 0xd3, 0x0a, 0x3c, 0x12, 0xc3, 0x8d, 0xa4, 0xea, 0xff, 0xb3, 0xf2, + 0x7d, 0xb4, 0x7d, 0xa3, 0xca, 0xdf, 0x97, 0x56, 0xa3, 0xea, 0xd3, 0xef, 0xe6, 0xc2, 0x4b, 0x6b, + 0x4b, 0x5e, 0x8d, 0x0b, 0xbe, 0x5e, 0x72, 0xc1, 0xa7, 0xaf, 0xe5, 0x82, 0x05, 0x56, 0x0b, 0x26, + 0xa0, 0x2b, 0x26, 0x38, 0xde, 0xd9, 0x04, 0x8b, 0x78, 0xcb, 0x1e, 0x38, 0x86, 0xee, 0xe6, 0x7d, + 0x5f, 0x7d, 0xe0, 0x78, 0xbf, 0xd4, 0xe0, 0xed, 0xff, 0x67, 0x88, 0x56, 0xf1, 0xef, 0x3a, 0xdc, + 0xfa, 0x2f, 0xfd, 0x45, 0xfa, 0xa4, 0xcf, 0x25, 0x0a, 0x3b, 0x56, 0xcc, 0xba, 0x9c, 0x35, 0xc5, + 0xf1, 0x6b, 0x67, 0xcd, 0x53, 0x68, 0xa2, 0xbe, 0x91, 0xb8, 0xcd, 0x7e, 0xfd, 0xa0, 0x73, 0xf4, + 0xc5, 0xa5, 0xbf, 0x9f, 0x6f, 0x2e, 0x36, 0x27, 0x89, 0x12, 0xe7, 0xb4, 0x80, 0x24, 0xd7, 0xa1, + 0x9e, 0xf3, 0xa1, 0x9d, 0x44, 0x7a, 0xd9, 0x7d, 0x66, 0xef, 0x3f, 0xe6, 0x31, 0x5d, 0x9f, 0xe0, + 0xb9, 0x35, 0xae, 0x5e, 0x92, 0x87, 0xd0, 0x9c, 0xea, 0xab, 0x91, 0x95, 0xe5, 0xfd, 0xed, 0x6c, + 0xca, 0xeb, 0x14, 0x2d, 0x5a, 0x8f, 0x6b, 0xf7, 0x1d, 0xef, 0x85, 0x03, 0xb7, 0x37, 0xda, 0x42, + 0xcf, 0x57, 0x16, 0x45, 0xe9, 0x73, 0x1c, 0x9a, 0xbd, 0x5b, 0x74, 0x16, 0x92, 0x9b, 0xb0, 0x27, + 0x90, 0xc9, 0x34, 0xb1, 0x53, 0xda, 0x46, 0xfa, 0x6a, 0x80, 0x1a, 0xdd, 0xec, 0x7d, 0x22, 0x44, + 0x3a, 0x93, 0x76, 0x35, 0xad, 0x11, 0x86, 0x98, 0x70, 0x1c, 0x9a, 0xd1, 0xdd, 0xa2, 0x36, 0xf2, + 0x7e, 0xaf, 0x81, 0xbb, 0xe9, 0x6f, 0x27, 0xdf, 0x94, 0x13, 0xd2, 0x14, 0xcd, 0xec, 0xed, 0x1c, + 0xf9, 0xbb, 0xdb, 0x4f, 0xb7, 0xd1, 0x65, 0x10, 0xf2, 0x3d, 0x5c, 0x4f, 0x96, 0x2f, 0x3e, 0xc5, + 0xe8, 0xee, 0x1c, 0x1d, 0xbe, 0x96, 0xdd, 0x0c, 0xf6, 0x1a, 0x14, 0xe9, 0x01, 0xf0, 0x24, 0x4c, + 0xe3, 0x2c, 0x42, 0x85, 0x46, 0x8e, 0x16, 0x5d, 0xc8, 0x54, 0x69, 0xd6, 0xa8, 0xd4, 0xec, 0xe1, + 0xed, 0x97, 0x17, 0x3d, 0xe7, 0x8f, 0x8b, 0x9e, 0xf3, 0xe7, 0x45, 0xcf, 0xf9, 0xed, 0xaf, 0xde, + 0x1b, 0x4f, 0xf7, 0x2d, 0x8d, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x3b, 0x47, 0x6d, 0x06, 0x59, + 0x0c, 0x00, 0x00, } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/authorization/v1beta1/register.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/authorization/v1beta1/register.go new file mode 100644 index 00000000..1545ceb1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/authorization/v1beta1/register.go @@ -0,0 +1,10 @@ +package v1beta1 + +import "github.com/ericchiang/k8s" + +func init() { + k8s.Register("authorization.k8s.io", "v1beta1", "localsubjectaccessreviews", true, &LocalSubjectAccessReview{}) + k8s.Register("authorization.k8s.io", "v1beta1", "selfsubjectaccessreviews", false, &SelfSubjectAccessReview{}) + k8s.Register("authorization.k8s.io", "v1beta1", "selfsubjectrulesreviews", false, &SelfSubjectRulesReview{}) + k8s.Register("authorization.k8s.io", "v1beta1", "subjectaccessreviews", false, &SubjectAccessReview{}) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/autoscaling/v1/generated.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/autoscaling/v1/generated.pb.go index bc707870..63db4bb7 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/autoscaling/v1/generated.pb.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/autoscaling/v1/generated.pb.go @@ -1,16 +1,16 @@ -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto -// DO NOT EDIT! +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/autoscaling/v1/generated.proto /* Package v1 is a generated protocol buffer package. It is generated from these files: - k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto + k8s.io/api/autoscaling/v1/generated.proto It has these top-level messages: CrossVersionObjectReference HorizontalPodAutoscaler + HorizontalPodAutoscalerCondition HorizontalPodAutoscalerList HorizontalPodAutoscalerSpec HorizontalPodAutoscalerStatus @@ -31,12 +31,12 @@ package v1 import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import k8s_io_kubernetes_pkg_api_resource "github.com/ericchiang/k8s/api/resource" -import k8s_io_kubernetes_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" +import _ "github.com/ericchiang/k8s/apis/core/v1" +import k8s_io_apimachinery_pkg_api_resource "github.com/ericchiang/k8s/apis/resource" +import k8s_io_apimachinery_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" import _ "github.com/ericchiang/k8s/runtime" import _ "github.com/ericchiang/k8s/runtime/schema" import _ "github.com/ericchiang/k8s/util/intstr" -import _ "github.com/ericchiang/k8s/api/v1" import io "io" @@ -53,7 +53,7 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // CrossVersionObjectReference contains enough information to let you identify the referred resource. type CrossVersionObjectReference struct { - // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" Kind *string `protobuf:"bytes,1,opt,name=kind" json:"kind,omitempty"` // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` @@ -93,10 +93,10 @@ func (m *CrossVersionObjectReference) GetApiVersion() string { // configuration of a horizontal pod autoscaler. type HorizontalPodAutoscaler struct { - // Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` - // behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. // +optional Spec *HorizontalPodAutoscalerSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` // current information about the autoscaler. @@ -110,7 +110,7 @@ func (m *HorizontalPodAutoscaler) String() string { return proto.Comp func (*HorizontalPodAutoscaler) ProtoMessage() {} func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -func (m *HorizontalPodAutoscaler) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *HorizontalPodAutoscaler) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -131,11 +131,74 @@ func (m *HorizontalPodAutoscaler) GetStatus() *HorizontalPodAutoscalerStatus { return nil } +// HorizontalPodAutoscalerCondition describes the state of +// a HorizontalPodAutoscaler at a certain point. +type HorizontalPodAutoscalerCondition struct { + // type describes the current condition + Type *string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + // status is the status of the condition (True, False, Unknown) + Status *string `protobuf:"bytes,2,opt,name=status" json:"status,omitempty"` + // lastTransitionTime is the last time the condition transitioned from + // one status to another + // +optional + LastTransitionTime *k8s_io_apimachinery_pkg_apis_meta_v1.Time `protobuf:"bytes,3,opt,name=lastTransitionTime" json:"lastTransitionTime,omitempty"` + // reason is the reason for the condition's last transition. + // +optional + Reason *string `protobuf:"bytes,4,opt,name=reason" json:"reason,omitempty"` + // message is a human-readable explanation containing details about + // the transition + // +optional + Message *string `protobuf:"bytes,5,opt,name=message" json:"message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } +func (m *HorizontalPodAutoscalerCondition) String() string { return proto.CompactTextString(m) } +func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} +func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{2} +} + +func (m *HorizontalPodAutoscalerCondition) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +func (m *HorizontalPodAutoscalerCondition) GetStatus() string { + if m != nil && m.Status != nil { + return *m.Status + } + return "" +} + +func (m *HorizontalPodAutoscalerCondition) GetLastTransitionTime() *k8s_io_apimachinery_pkg_apis_meta_v1.Time { + if m != nil { + return m.LastTransitionTime + } + return nil +} + +func (m *HorizontalPodAutoscalerCondition) GetReason() string { + if m != nil && m.Reason != nil { + return *m.Reason + } + return "" +} + +func (m *HorizontalPodAutoscalerCondition) GetMessage() string { + if m != nil && m.Message != nil { + return *m.Message + } + return "" +} + // list of horizontal pod autoscaler objects. type HorizontalPodAutoscalerList struct { // Standard list metadata. // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // list of horizontal pod autoscaler objects. Items []*HorizontalPodAutoscaler `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -145,10 +208,10 @@ func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutosc func (m *HorizontalPodAutoscalerList) String() string { return proto.CompactTextString(m) } func (*HorizontalPodAutoscalerList) ProtoMessage() {} func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{2} + return fileDescriptorGenerated, []int{3} } -func (m *HorizontalPodAutoscalerList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *HorizontalPodAutoscalerList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -183,7 +246,7 @@ func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutosc func (m *HorizontalPodAutoscalerSpec) String() string { return proto.CompactTextString(m) } func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{3} + return fileDescriptorGenerated, []int{4} } func (m *HorizontalPodAutoscalerSpec) GetScaleTargetRef() *CrossVersionObjectReference { @@ -222,7 +285,7 @@ type HorizontalPodAutoscalerStatus struct { // last time the HorizontalPodAutoscaler scaled the number of pods; // used by the autoscaler to control how often the number of pods is changed. // +optional - LastScaleTime *k8s_io_kubernetes_pkg_apis_meta_v1.Time `protobuf:"bytes,2,opt,name=lastScaleTime" json:"lastScaleTime,omitempty"` + LastScaleTime *k8s_io_apimachinery_pkg_apis_meta_v1.Time `protobuf:"bytes,2,opt,name=lastScaleTime" json:"lastScaleTime,omitempty"` // current number of replicas of pods managed by this autoscaler. CurrentReplicas *int32 `protobuf:"varint,3,opt,name=currentReplicas" json:"currentReplicas,omitempty"` // desired number of replicas of pods managed by this autoscaler. @@ -238,7 +301,7 @@ func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAuto func (m *HorizontalPodAutoscalerStatus) String() string { return proto.CompactTextString(m) } func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{4} + return fileDescriptorGenerated, []int{5} } func (m *HorizontalPodAutoscalerStatus) GetObservedGeneration() int64 { @@ -248,7 +311,7 @@ func (m *HorizontalPodAutoscalerStatus) GetObservedGeneration() int64 { return 0 } -func (m *HorizontalPodAutoscalerStatus) GetLastScaleTime() *k8s_io_kubernetes_pkg_apis_meta_v1.Time { +func (m *HorizontalPodAutoscalerStatus) GetLastScaleTime() *k8s_io_apimachinery_pkg_apis_meta_v1.Time { if m != nil { return m.LastScaleTime } @@ -303,7 +366,7 @@ type MetricSpec struct { func (m *MetricSpec) Reset() { *m = MetricSpec{} } func (m *MetricSpec) String() string { return proto.CompactTextString(m) } func (*MetricSpec) ProtoMessage() {} -func (*MetricSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (*MetricSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } func (m *MetricSpec) GetType() string { if m != nil && m.Type != nil { @@ -359,7 +422,7 @@ type MetricStatus struct { func (m *MetricStatus) Reset() { *m = MetricStatus{} } func (m *MetricStatus) String() string { return proto.CompactTextString(m) } func (*MetricStatus) ProtoMessage() {} -func (*MetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (*MetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } func (m *MetricStatus) GetType() string { if m != nil && m.Type != nil { @@ -397,14 +460,14 @@ type ObjectMetricSource struct { // metricName is the name of the metric in question. MetricName *string `protobuf:"bytes,2,opt,name=metricName" json:"metricName,omitempty"` // targetValue is the target value of the metric (as a quantity). - TargetValue *k8s_io_kubernetes_pkg_api_resource.Quantity `protobuf:"bytes,3,opt,name=targetValue" json:"targetValue,omitempty"` - XXX_unrecognized []byte `json:"-"` + TargetValue *k8s_io_apimachinery_pkg_api_resource.Quantity `protobuf:"bytes,3,opt,name=targetValue" json:"targetValue,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } func (m *ObjectMetricSource) String() string { return proto.CompactTextString(m) } func (*ObjectMetricSource) ProtoMessage() {} -func (*ObjectMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*ObjectMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func (m *ObjectMetricSource) GetTarget() *CrossVersionObjectReference { if m != nil { @@ -420,7 +483,7 @@ func (m *ObjectMetricSource) GetMetricName() string { return "" } -func (m *ObjectMetricSource) GetTargetValue() *k8s_io_kubernetes_pkg_api_resource.Quantity { +func (m *ObjectMetricSource) GetTargetValue() *k8s_io_apimachinery_pkg_api_resource.Quantity { if m != nil { return m.TargetValue } @@ -435,14 +498,14 @@ type ObjectMetricStatus struct { // metricName is the name of the metric in question. MetricName *string `protobuf:"bytes,2,opt,name=metricName" json:"metricName,omitempty"` // currentValue is the current value of the metric (as a quantity). - CurrentValue *k8s_io_kubernetes_pkg_api_resource.Quantity `protobuf:"bytes,3,opt,name=currentValue" json:"currentValue,omitempty"` - XXX_unrecognized []byte `json:"-"` + CurrentValue *k8s_io_apimachinery_pkg_api_resource.Quantity `protobuf:"bytes,3,opt,name=currentValue" json:"currentValue,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } func (m *ObjectMetricStatus) String() string { return proto.CompactTextString(m) } func (*ObjectMetricStatus) ProtoMessage() {} -func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *ObjectMetricStatus) GetTarget() *CrossVersionObjectReference { if m != nil { @@ -458,7 +521,7 @@ func (m *ObjectMetricStatus) GetMetricName() string { return "" } -func (m *ObjectMetricStatus) GetCurrentValue() *k8s_io_kubernetes_pkg_api_resource.Quantity { +func (m *ObjectMetricStatus) GetCurrentValue() *k8s_io_apimachinery_pkg_api_resource.Quantity { if m != nil { return m.CurrentValue } @@ -474,14 +537,14 @@ type PodsMetricSource struct { MetricName *string `protobuf:"bytes,1,opt,name=metricName" json:"metricName,omitempty"` // targetAverageValue is the target value of the average of the // metric across all relevant pods (as a quantity) - TargetAverageValue *k8s_io_kubernetes_pkg_api_resource.Quantity `protobuf:"bytes,2,opt,name=targetAverageValue" json:"targetAverageValue,omitempty"` - XXX_unrecognized []byte `json:"-"` + TargetAverageValue *k8s_io_apimachinery_pkg_api_resource.Quantity `protobuf:"bytes,2,opt,name=targetAverageValue" json:"targetAverageValue,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } func (m *PodsMetricSource) String() string { return proto.CompactTextString(m) } func (*PodsMetricSource) ProtoMessage() {} -func (*PodsMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*PodsMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } func (m *PodsMetricSource) GetMetricName() string { if m != nil && m.MetricName != nil { @@ -490,7 +553,7 @@ func (m *PodsMetricSource) GetMetricName() string { return "" } -func (m *PodsMetricSource) GetTargetAverageValue() *k8s_io_kubernetes_pkg_api_resource.Quantity { +func (m *PodsMetricSource) GetTargetAverageValue() *k8s_io_apimachinery_pkg_api_resource.Quantity { if m != nil { return m.TargetAverageValue } @@ -504,14 +567,14 @@ type PodsMetricStatus struct { MetricName *string `protobuf:"bytes,1,opt,name=metricName" json:"metricName,omitempty"` // currentAverageValue is the current value of the average of the // metric across all relevant pods (as a quantity) - CurrentAverageValue *k8s_io_kubernetes_pkg_api_resource.Quantity `protobuf:"bytes,2,opt,name=currentAverageValue" json:"currentAverageValue,omitempty"` - XXX_unrecognized []byte `json:"-"` + CurrentAverageValue *k8s_io_apimachinery_pkg_api_resource.Quantity `protobuf:"bytes,2,opt,name=currentAverageValue" json:"currentAverageValue,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } func (m *PodsMetricStatus) String() string { return proto.CompactTextString(m) } func (*PodsMetricStatus) ProtoMessage() {} -func (*PodsMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*PodsMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func (m *PodsMetricStatus) GetMetricName() string { if m != nil && m.MetricName != nil { @@ -520,7 +583,7 @@ func (m *PodsMetricStatus) GetMetricName() string { return "" } -func (m *PodsMetricStatus) GetCurrentAverageValue() *k8s_io_kubernetes_pkg_api_resource.Quantity { +func (m *PodsMetricStatus) GetCurrentAverageValue() *k8s_io_apimachinery_pkg_api_resource.Quantity { if m != nil { return m.CurrentAverageValue } @@ -542,18 +605,18 @@ type ResourceMetricSource struct { // the requested value of the resource for the pods. // +optional TargetAverageUtilization *int32 `protobuf:"varint,2,opt,name=targetAverageUtilization" json:"targetAverageUtilization,omitempty"` - // targetAverageValue is the the target value of the average of the + // targetAverageValue is the target value of the average of the // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // +optional - TargetAverageValue *k8s_io_kubernetes_pkg_api_resource.Quantity `protobuf:"bytes,3,opt,name=targetAverageValue" json:"targetAverageValue,omitempty"` - XXX_unrecognized []byte `json:"-"` + TargetAverageValue *k8s_io_apimachinery_pkg_api_resource.Quantity `protobuf:"bytes,3,opt,name=targetAverageValue" json:"targetAverageValue,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } func (m *ResourceMetricSource) String() string { return proto.CompactTextString(m) } func (*ResourceMetricSource) ProtoMessage() {} -func (*ResourceMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (*ResourceMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } func (m *ResourceMetricSource) GetName() string { if m != nil && m.Name != nil { @@ -569,7 +632,7 @@ func (m *ResourceMetricSource) GetTargetAverageUtilization() int32 { return 0 } -func (m *ResourceMetricSource) GetTargetAverageValue() *k8s_io_kubernetes_pkg_api_resource.Quantity { +func (m *ResourceMetricSource) GetTargetAverageValue() *k8s_io_apimachinery_pkg_api_resource.Quantity { if m != nil { return m.TargetAverageValue } @@ -591,18 +654,18 @@ type ResourceMetricStatus struct { // specification. // +optional CurrentAverageUtilization *int32 `protobuf:"varint,2,opt,name=currentAverageUtilization" json:"currentAverageUtilization,omitempty"` - // currentAverageValue is the the current value of the average of the + // currentAverageValue is the current value of the average of the // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // It will always be set, regardless of the corresponding metric specification. - CurrentAverageValue *k8s_io_kubernetes_pkg_api_resource.Quantity `protobuf:"bytes,3,opt,name=currentAverageValue" json:"currentAverageValue,omitempty"` - XXX_unrecognized []byte `json:"-"` + CurrentAverageValue *k8s_io_apimachinery_pkg_api_resource.Quantity `protobuf:"bytes,3,opt,name=currentAverageValue" json:"currentAverageValue,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } func (m *ResourceMetricStatus) String() string { return proto.CompactTextString(m) } func (*ResourceMetricStatus) ProtoMessage() {} -func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } func (m *ResourceMetricStatus) GetName() string { if m != nil && m.Name != nil { @@ -618,7 +681,7 @@ func (m *ResourceMetricStatus) GetCurrentAverageUtilization() int32 { return 0 } -func (m *ResourceMetricStatus) GetCurrentAverageValue() *k8s_io_kubernetes_pkg_api_resource.Quantity { +func (m *ResourceMetricStatus) GetCurrentAverageValue() *k8s_io_apimachinery_pkg_api_resource.Quantity { if m != nil { return m.CurrentAverageValue } @@ -627,13 +690,13 @@ func (m *ResourceMetricStatus) GetCurrentAverageValue() *k8s_io_kubernetes_pkg_a // Scale represents a scaling request for a resource. type Scale struct { - // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` - // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. // +optional Spec *ScaleSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` - // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. // +optional Status *ScaleStatus `protobuf:"bytes,3,opt,name=status" json:"status,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -642,9 +705,9 @@ type Scale struct { func (m *Scale) Reset() { *m = Scale{} } func (m *Scale) String() string { return proto.CompactTextString(m) } func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } -func (m *Scale) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *Scale) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -676,7 +739,7 @@ type ScaleSpec struct { func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } func (m *ScaleSpec) String() string { return proto.CompactTextString(m) } func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } func (m *ScaleSpec) GetReplicas() int32 { if m != nil && m.Replicas != nil { @@ -701,7 +764,7 @@ type ScaleStatus struct { func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (m *ScaleStatus) String() string { return proto.CompactTextString(m) } func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } func (m *ScaleStatus) GetReplicas() int32 { if m != nil && m.Replicas != nil { @@ -718,22 +781,23 @@ func (m *ScaleStatus) GetSelector() string { } func init() { - proto.RegisterType((*CrossVersionObjectReference)(nil), "github.com/ericchiang.k8s.apis.autoscaling.v1.CrossVersionObjectReference") - proto.RegisterType((*HorizontalPodAutoscaler)(nil), "github.com/ericchiang.k8s.apis.autoscaling.v1.HorizontalPodAutoscaler") - proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "github.com/ericchiang.k8s.apis.autoscaling.v1.HorizontalPodAutoscalerList") - proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "github.com/ericchiang.k8s.apis.autoscaling.v1.HorizontalPodAutoscalerSpec") - proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "github.com/ericchiang.k8s.apis.autoscaling.v1.HorizontalPodAutoscalerStatus") - proto.RegisterType((*MetricSpec)(nil), "github.com/ericchiang.k8s.apis.autoscaling.v1.MetricSpec") - proto.RegisterType((*MetricStatus)(nil), "github.com/ericchiang.k8s.apis.autoscaling.v1.MetricStatus") - proto.RegisterType((*ObjectMetricSource)(nil), "github.com/ericchiang.k8s.apis.autoscaling.v1.ObjectMetricSource") - proto.RegisterType((*ObjectMetricStatus)(nil), "github.com/ericchiang.k8s.apis.autoscaling.v1.ObjectMetricStatus") - proto.RegisterType((*PodsMetricSource)(nil), "github.com/ericchiang.k8s.apis.autoscaling.v1.PodsMetricSource") - proto.RegisterType((*PodsMetricStatus)(nil), "github.com/ericchiang.k8s.apis.autoscaling.v1.PodsMetricStatus") - proto.RegisterType((*ResourceMetricSource)(nil), "github.com/ericchiang.k8s.apis.autoscaling.v1.ResourceMetricSource") - proto.RegisterType((*ResourceMetricStatus)(nil), "github.com/ericchiang.k8s.apis.autoscaling.v1.ResourceMetricStatus") - proto.RegisterType((*Scale)(nil), "github.com/ericchiang.k8s.apis.autoscaling.v1.Scale") - proto.RegisterType((*ScaleSpec)(nil), "github.com/ericchiang.k8s.apis.autoscaling.v1.ScaleSpec") - proto.RegisterType((*ScaleStatus)(nil), "github.com/ericchiang.k8s.apis.autoscaling.v1.ScaleStatus") + proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v1.CrossVersionObjectReference") + proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscaler") + proto.RegisterType((*HorizontalPodAutoscalerCondition)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerCondition") + proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerList") + proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerSpec") + proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerStatus") + proto.RegisterType((*MetricSpec)(nil), "k8s.io.api.autoscaling.v1.MetricSpec") + proto.RegisterType((*MetricStatus)(nil), "k8s.io.api.autoscaling.v1.MetricStatus") + proto.RegisterType((*ObjectMetricSource)(nil), "k8s.io.api.autoscaling.v1.ObjectMetricSource") + proto.RegisterType((*ObjectMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ObjectMetricStatus") + proto.RegisterType((*PodsMetricSource)(nil), "k8s.io.api.autoscaling.v1.PodsMetricSource") + proto.RegisterType((*PodsMetricStatus)(nil), "k8s.io.api.autoscaling.v1.PodsMetricStatus") + proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.api.autoscaling.v1.ResourceMetricSource") + proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ResourceMetricStatus") + proto.RegisterType((*Scale)(nil), "k8s.io.api.autoscaling.v1.Scale") + proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.autoscaling.v1.ScaleSpec") + proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.autoscaling.v1.ScaleStatus") } func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { size := m.Size() @@ -825,6 +889,61 @@ func (m *HorizontalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscalerCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Type != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type))) + i += copy(dAtA[i:], *m.Type) + } + if m.Status != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Status))) + i += copy(dAtA[i:], *m.Status) + } + if m.LastTransitionTime != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n4, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + if m.Reason != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason))) + i += copy(dAtA[i:], *m.Reason) + } + if m.Message != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Message))) + i += copy(dAtA[i:], *m.Message) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -844,11 +963,11 @@ func (m *HorizontalPodAutoscalerList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n4, err := m.Metadata.MarshalTo(dAtA[i:]) + n5, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n4 + i += n5 } if len(m.Items) > 0 { for _, msg := range m.Items { @@ -887,11 +1006,11 @@ func (m *HorizontalPodAutoscalerSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleTargetRef.Size())) - n5, err := m.ScaleTargetRef.MarshalTo(dAtA[i:]) + n6, err := m.ScaleTargetRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n5 + i += n6 } if m.MinReplicas != nil { dAtA[i] = 0x10 @@ -938,11 +1057,11 @@ func (m *HorizontalPodAutoscalerStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastScaleTime.Size())) - n6, err := m.LastScaleTime.MarshalTo(dAtA[i:]) + n7, err := m.LastScaleTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n7 } if m.CurrentReplicas != nil { dAtA[i] = 0x18 @@ -990,31 +1109,31 @@ func (m *MetricSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n7, err := m.Object.MarshalTo(dAtA[i:]) + n8, err := m.Object.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n8 } if m.Pods != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) - n8, err := m.Pods.MarshalTo(dAtA[i:]) + n9, err := m.Pods.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n8 + i += n9 } if m.Resource != nil { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n9, err := m.Resource.MarshalTo(dAtA[i:]) + n10, err := m.Resource.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n10 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -1047,31 +1166,31 @@ func (m *MetricStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n10, err := m.Object.MarshalTo(dAtA[i:]) + n11, err := m.Object.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n11 } if m.Pods != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) - n11, err := m.Pods.MarshalTo(dAtA[i:]) + n12, err := m.Pods.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n11 + i += n12 } if m.Resource != nil { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n12, err := m.Resource.MarshalTo(dAtA[i:]) + n13, err := m.Resource.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n12 + i += n13 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -1098,11 +1217,11 @@ func (m *ObjectMetricSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n13, err := m.Target.MarshalTo(dAtA[i:]) + n14, err := m.Target.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n13 + i += n14 } if m.MetricName != nil { dAtA[i] = 0x12 @@ -1114,11 +1233,11 @@ func (m *ObjectMetricSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) - n14, err := m.TargetValue.MarshalTo(dAtA[i:]) + n15, err := m.TargetValue.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n14 + i += n15 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -1145,11 +1264,11 @@ func (m *ObjectMetricStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n15, err := m.Target.MarshalTo(dAtA[i:]) + n16, err := m.Target.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n15 + i += n16 } if m.MetricName != nil { dAtA[i] = 0x12 @@ -1161,11 +1280,11 @@ func (m *ObjectMetricStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) - n16, err := m.CurrentValue.MarshalTo(dAtA[i:]) + n17, err := m.CurrentValue.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n16 + i += n17 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -1198,11 +1317,11 @@ func (m *PodsMetricSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) - n17, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) + n18, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n17 + i += n18 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -1235,11 +1354,11 @@ func (m *PodsMetricStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) - n18, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) + n19, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n18 + i += n19 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -1277,11 +1396,11 @@ func (m *ResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) - n19, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) + n20, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n19 + i += n20 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -1319,11 +1438,11 @@ func (m *ResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) - n20, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) + n21, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n20 + i += n21 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -1350,31 +1469,31 @@ func (m *Scale) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n21, err := m.Metadata.MarshalTo(dAtA[i:]) + n22, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n21 + i += n22 } if m.Spec != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n22, err := m.Spec.MarshalTo(dAtA[i:]) + n23, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n22 + i += n23 } if m.Status != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n23, err := m.Status.MarshalTo(dAtA[i:]) + n24, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n23 + i += n24 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -1440,24 +1559,6 @@ func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -1509,6 +1610,35 @@ func (m *HorizontalPodAutoscaler) Size() (n int) { return n } +func (m *HorizontalPodAutoscalerCondition) Size() (n int) { + var l int + _ = l + if m.Type != nil { + l = len(*m.Type) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Status != nil { + l = len(*m.Status) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LastTransitionTime != nil { + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Reason != nil { + l = len(*m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Message != nil { + l = len(*m.Message) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *HorizontalPodAutoscalerList) Size() (n int) { var l int _ = l @@ -2000,7 +2130,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2094,6 +2224,210 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } return nil } +func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Type = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Status = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastTransitionTime == nil { + m.LastTransitionTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Reason = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Message = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -2150,7 +2484,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2429,7 +2763,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.LastScaleTime == nil { - m.LastScaleTime = &k8s_io_kubernetes_pkg_apis_meta_v1.Time{} + m.LastScaleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.LastScaleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2996,7 +3330,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.TargetValue == nil { - m.TargetValue = &k8s_io_kubernetes_pkg_api_resource.Quantity{} + m.TargetValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} } if err := m.TargetValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3143,7 +3477,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.CurrentValue == nil { - m.CurrentValue = &k8s_io_kubernetes_pkg_api_resource.Quantity{} + m.CurrentValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} } if err := m.CurrentValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3257,7 +3591,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.TargetAverageValue == nil { - m.TargetAverageValue = &k8s_io_kubernetes_pkg_api_resource.Quantity{} + m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} } if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3371,7 +3705,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.CurrentAverageValue == nil { - m.CurrentAverageValue = &k8s_io_kubernetes_pkg_api_resource.Quantity{} + m.CurrentAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} } if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3505,7 +3839,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.TargetAverageValue == nil { - m.TargetAverageValue = &k8s_io_kubernetes_pkg_api_resource.Quantity{} + m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} } if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3639,7 +3973,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.CurrentAverageValue == nil { - m.CurrentAverageValue = &k8s_io_kubernetes_pkg_api_resource.Quantity{} + m.CurrentAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} } if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3723,7 +4057,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4094,69 +4428,71 @@ var ( ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) -func init() { - proto.RegisterFile("github.com/ericchiang/k8s/apis/autoscaling/v1/generated.proto", fileDescriptorGenerated) -} +func init() { proto.RegisterFile("k8s.io/api/autoscaling/v1/generated.proto", fileDescriptorGenerated) } var fileDescriptorGenerated = []byte{ - // 942 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xc4, 0x56, 0xdd, 0x8e, 0xdb, 0x44, - 0x14, 0xc6, 0x4e, 0x76, 0xd5, 0x1e, 0x97, 0x1f, 0x0d, 0x95, 0x08, 0x5b, 0x11, 0x56, 0xbe, 0x61, - 0x91, 0x8a, 0xad, 0x44, 0x15, 0xe2, 0x57, 0xa8, 0xad, 0x28, 0x11, 0xa2, 0xdb, 0xe0, 0xb2, 0x15, - 0x2a, 0xa8, 0x62, 0xd6, 0x3e, 0x84, 0x21, 0x89, 0x6d, 0xcd, 0x8c, 0x23, 0xda, 0xa7, 0x40, 0x5c, - 0xc1, 0x35, 0x42, 0x3c, 0x05, 0x17, 0x48, 0x5c, 0xf4, 0x92, 0x47, 0x40, 0x0b, 0xe2, 0x8e, 0x77, - 0x40, 0x33, 0x9e, 0xf5, 0xfa, 0x27, 0xce, 0x6e, 0xda, 0x54, 0xbd, 0xb3, 0x67, 0xce, 0xf7, 0xcd, - 0xf9, 0xbe, 0x39, 0x73, 0x66, 0xe0, 0xed, 0xe9, 0x5b, 0xc2, 0x63, 0x89, 0x3f, 0xcd, 0x0e, 0x91, - 0xc7, 0x28, 0x51, 0xf8, 0xe9, 0x74, 0xe2, 0xd3, 0x94, 0x09, 0x9f, 0x66, 0x32, 0x11, 0x21, 0x9d, - 0xb1, 0x78, 0xe2, 0x2f, 0x06, 0xfe, 0x04, 0x63, 0xe4, 0x54, 0x62, 0xe4, 0xa5, 0x3c, 0x91, 0x09, - 0x79, 0x3d, 0x87, 0x7a, 0x27, 0x50, 0x2f, 0x9d, 0x4e, 0x3c, 0x05, 0xf5, 0x4a, 0x50, 0x6f, 0x31, - 0xd8, 0x19, 0xb6, 0xae, 0xe2, 0x73, 0x14, 0x49, 0xc6, 0x43, 0xac, 0xd3, 0xaf, 0xc0, 0x08, 0x7f, - 0x8e, 0x92, 0x2e, 0x49, 0x69, 0xe7, 0x8d, 0xe5, 0x18, 0x9e, 0xc5, 0x92, 0xcd, 0x9b, 0x4b, 0x5c, - 0x59, 0x1d, 0x2e, 0xc2, 0x6f, 0x70, 0x4e, 0x1b, 0xa8, 0xc1, 0x72, 0x54, 0x26, 0xd9, 0xcc, 0x67, - 0xb1, 0x14, 0x92, 0x37, 0x20, 0x97, 0xdb, 0xf5, 0x37, 0x55, 0xb8, 0x08, 0x97, 0xae, 0xf3, 0x44, - 0x88, 0x3b, 0xc8, 0x05, 0x4b, 0xe2, 0x5b, 0x87, 0xdf, 0x62, 0x28, 0x03, 0xfc, 0x1a, 0x39, 0xc6, - 0x21, 0x12, 0x02, 0xdd, 0x29, 0x8b, 0xa3, 0x9e, 0xb5, 0x6b, 0xed, 0x9d, 0x0f, 0xf4, 0xb7, 0x1a, - 0x8b, 0xe9, 0x1c, 0x7b, 0x76, 0x3e, 0xa6, 0xbe, 0x49, 0x1f, 0x80, 0xa6, 0xcc, 0x90, 0xf4, 0x3a, - 0x7a, 0xa6, 0x34, 0xe2, 0xfe, 0x6c, 0xc3, 0x4b, 0xa3, 0x84, 0xb3, 0x07, 0x49, 0x2c, 0xe9, 0x6c, - 0x9c, 0x44, 0x57, 0xcd, 0xa6, 0x21, 0x27, 0x1f, 0xc3, 0x39, 0xe5, 0x71, 0x44, 0x25, 0xd5, 0xeb, - 0x38, 0x43, 0xcf, 0x5b, 0xb1, 0xdd, 0x2a, 0xd6, 0x5b, 0x0c, 0xbc, 0x3c, 0xd5, 0x9b, 0x28, 0x69, - 0x50, 0xe0, 0xc9, 0x5d, 0xe8, 0x8a, 0x14, 0x43, 0x9d, 0x9b, 0x33, 0xbc, 0xe1, 0x9d, 0xb9, 0x6c, - 0xbc, 0x96, 0xec, 0x6e, 0xa7, 0x18, 0x06, 0x9a, 0x93, 0x7c, 0x05, 0xdb, 0x42, 0x52, 0x99, 0x09, - 0xad, 0xcf, 0x19, 0x8e, 0x36, 0xc0, 0xae, 0xf9, 0x02, 0xc3, 0xeb, 0xfe, 0x6e, 0xc1, 0xa5, 0x96, - 0xc8, 0x4f, 0x98, 0x90, 0x64, 0xd4, 0x70, 0xea, 0xf2, 0x59, 0x9c, 0x52, 0xd8, 0x9a, 0x4f, 0x9f, - 0xc3, 0x16, 0x93, 0x38, 0x17, 0x3d, 0x7b, 0xb7, 0xb3, 0xe7, 0x0c, 0xaf, 0x3d, 0xbe, 0x94, 0x20, - 0x27, 0x74, 0x7f, 0xb2, 0x5b, 0x35, 0x28, 0x2f, 0x49, 0x0c, 0xcf, 0xe9, 0xbf, 0xcf, 0x28, 0x9f, - 0xa0, 0xaa, 0x34, 0xa3, 0x64, 0x9d, 0xbd, 0x5a, 0x51, 0xb1, 0x41, 0x8d, 0x9d, 0xec, 0x82, 0x33, - 0x67, 0x71, 0x80, 0xe9, 0x8c, 0x85, 0x54, 0xe8, 0xc2, 0xd8, 0x0a, 0xca, 0x43, 0x3a, 0x82, 0x7e, - 0x57, 0x44, 0x74, 0x4c, 0xc4, 0xc9, 0x10, 0xb9, 0x01, 0x7d, 0xa9, 0x09, 0xaf, 0x8f, 0x0f, 0x0e, - 0x24, 0x9b, 0xb1, 0x07, 0x54, 0xb2, 0x24, 0x1e, 0x23, 0x0f, 0x31, 0x96, 0x74, 0x82, 0xbd, 0xae, - 0x06, 0x9d, 0x12, 0xe5, 0xfe, 0x66, 0xc3, 0x2b, 0x2b, 0x2b, 0x81, 0x78, 0x40, 0x92, 0x43, 0x81, - 0x7c, 0x81, 0xd1, 0x47, 0xf9, 0x49, 0x55, 0xe7, 0x49, 0x39, 0xd4, 0x09, 0x96, 0xcc, 0x90, 0x7d, - 0x78, 0x76, 0x46, 0x85, 0xbc, 0xad, 0x35, 0x33, 0x73, 0x28, 0x9d, 0xe1, 0xde, 0x59, 0xca, 0x42, - 0xc5, 0x07, 0x55, 0x38, 0xd9, 0x83, 0xe7, 0xc3, 0x8c, 0x73, 0x8c, 0x65, 0xcd, 0x8f, 0xfa, 0xb0, - 0x8a, 0x8c, 0x50, 0x30, 0x8e, 0x51, 0x11, 0x99, 0x9b, 0x50, 0x1f, 0x26, 0x23, 0x78, 0xd5, 0x80, - 0x5b, 0xed, 0xdb, 0xd2, 0xc8, 0xd3, 0xc2, 0xdc, 0x5f, 0x6c, 0x80, 0x9b, 0x28, 0x39, 0x0b, 0x75, - 0x29, 0x11, 0xe8, 0xca, 0xfb, 0x29, 0x1e, 0x37, 0x27, 0xf5, 0x4d, 0x0e, 0x60, 0x3b, 0xd1, 0x15, - 0x61, 0x9c, 0x78, 0x7f, 0x8d, 0xb2, 0x2a, 0x3a, 0x8a, 0x5a, 0x40, 0x5f, 0x14, 0x81, 0x21, 0x23, - 0xb7, 0xa0, 0x9b, 0x26, 0xd1, 0xf1, 0xc9, 0x7f, 0x77, 0x0d, 0xd2, 0x71, 0x12, 0x89, 0x0a, 0xa5, - 0x26, 0x22, 0x5f, 0xc0, 0xb9, 0xe3, 0xdb, 0x48, 0xfb, 0xe6, 0x0c, 0x3f, 0x58, 0x83, 0x34, 0x30, - 0xd0, 0x0a, 0x71, 0x41, 0xe8, 0xfe, 0x6a, 0xc3, 0x05, 0x33, 0x95, 0x97, 0xd5, 0x13, 0x75, 0xca, - 0xf4, 0xb0, 0x4d, 0x3a, 0x95, 0x53, 0x3e, 0x01, 0xa7, 0x72, 0xe2, 0x13, 0xa7, 0xfe, 0xb1, 0x80, - 0x34, 0xb7, 0x9d, 0xdc, 0x83, 0xed, 0xfc, 0x28, 0x6f, 0xb8, 0x39, 0x19, 0x56, 0x75, 0x5d, 0xce, - 0xf5, 0x7a, 0xfb, 0x27, 0x17, 0x69, 0x69, 0x84, 0xec, 0x83, 0x93, 0x47, 0xde, 0xa1, 0xb3, 0x0c, - 0x8d, 0x97, 0x2b, 0x7a, 0xbd, 0x77, 0x2c, 0xc8, 0xfb, 0x34, 0xa3, 0xb1, 0x64, 0xf2, 0x7e, 0x50, - 0x26, 0x70, 0xff, 0xad, 0xcb, 0xcc, 0xcb, 0xe2, 0x69, 0xcb, 0x1c, 0xc3, 0x05, 0x73, 0xe4, 0x1f, - 0x5d, 0x67, 0x85, 0xc1, 0xfd, 0xde, 0x82, 0x17, 0xea, 0x27, 0xae, 0x96, 0x86, 0xd5, 0x48, 0xe3, - 0x4b, 0x20, 0x79, 0xc2, 0x57, 0x17, 0xc8, 0xe9, 0x04, 0xf3, 0x64, 0xec, 0x47, 0x48, 0x66, 0x09, - 0x8f, 0xfb, 0x43, 0x35, 0xa5, 0xdc, 0xf9, 0xd3, 0x52, 0xba, 0x07, 0x2f, 0x1a, 0x5d, 0x8f, 0x9d, - 0xd3, 0x32, 0x22, 0xf7, 0x0f, 0x0b, 0x2e, 0x2e, 0x6b, 0x22, 0xc5, 0xe3, 0xce, 0x2a, 0x3d, 0xee, - 0xde, 0x81, 0x5e, 0x45, 0x57, 0xa9, 0x39, 0x9b, 0xfb, 0xb4, 0x75, 0xbe, 0xc5, 0xdb, 0xce, 0x86, - 0xbc, 0x7d, 0xd8, 0x94, 0x51, 0x34, 0xbc, 0x86, 0x8c, 0xf7, 0xe0, 0xe5, 0xaa, 0x15, 0x4d, 0x1d, - 0xed, 0x01, 0x6d, 0x3b, 0xd2, 0xd9, 0xd4, 0x8e, 0xfc, 0x67, 0xc1, 0x96, 0xbe, 0x87, 0x37, 0xfa, - 0x1e, 0x1e, 0x55, 0xde, 0xc3, 0x57, 0xd6, 0x38, 0xdf, 0x3a, 0x97, 0xd2, 0xeb, 0x77, 0xbf, 0xf6, - 0xfa, 0x7d, 0x73, 0x6d, 0xae, 0xea, 0x5b, 0xf7, 0x35, 0x38, 0x5f, 0x2c, 0x41, 0x76, 0x54, 0x8f, - 0x37, 0xaf, 0x08, 0x4b, 0xef, 0x44, 0xf1, 0xef, 0x7e, 0x08, 0x4e, 0x09, 0xbf, 0x2a, 0x54, 0xcd, - 0x09, 0x9c, 0x61, 0x28, 0x13, 0x6e, 0xba, 0x4d, 0xf1, 0x7f, 0xed, 0xe2, 0xc3, 0xa3, 0xbe, 0xf5, - 0xe7, 0x51, 0xdf, 0xfa, 0xeb, 0xa8, 0x6f, 0xfd, 0xf8, 0x77, 0xff, 0x99, 0xbb, 0xf6, 0x62, 0xf0, - 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xec, 0xd7, 0xea, 0x49, 0x93, 0x0e, 0x00, 0x00, + // 1001 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0xcd, 0x6f, 0x1b, 0x45, + 0x14, 0x67, 0xfd, 0x11, 0xda, 0xe7, 0xf2, 0xa1, 0xa1, 0x02, 0x37, 0x15, 0x26, 0x5a, 0x21, 0x08, + 0x20, 0xd6, 0xc4, 0xa0, 0x2a, 0x42, 0x08, 0x54, 0xa2, 0x42, 0x54, 0xda, 0x62, 0xb6, 0x69, 0x0f, + 0x3d, 0x54, 0x4c, 0x77, 0x1f, 0xee, 0x10, 0xef, 0xce, 0x6a, 0x66, 0x6c, 0x91, 0xde, 0x39, 0x70, + 0x45, 0x1c, 0xf8, 0x3f, 0xf8, 0x03, 0xb8, 0x21, 0xc4, 0x05, 0x4e, 0x88, 0x03, 0x07, 0x14, 0xfe, + 0x06, 0xc4, 0x15, 0xcd, 0x87, 0xd7, 0xeb, 0xb5, 0xd7, 0x6e, 0x12, 0x8b, 0xdb, 0xcc, 0x9b, 0xf7, + 0xfb, 0xcd, 0xfb, 0x9e, 0x81, 0xd7, 0x0e, 0x77, 0x65, 0xc0, 0x78, 0x97, 0x66, 0xac, 0x4b, 0x47, + 0x8a, 0xcb, 0x88, 0x0e, 0x59, 0x3a, 0xe8, 0x8e, 0x77, 0xba, 0x03, 0x4c, 0x51, 0x50, 0x85, 0x71, + 0x90, 0x09, 0xae, 0x38, 0xb9, 0x64, 0x55, 0x03, 0x9a, 0xb1, 0xa0, 0xa0, 0x1a, 0x8c, 0x77, 0x36, + 0xfd, 0x02, 0x4b, 0xc4, 0x05, 0x2e, 0x80, 0x6f, 0xbe, 0x33, 0xd5, 0x49, 0x68, 0xf4, 0x90, 0xa5, + 0x28, 0x8e, 0xba, 0xd9, 0xe1, 0xc0, 0x80, 0x04, 0x4a, 0x3e, 0x12, 0x11, 0x9e, 0x08, 0x25, 0xbb, + 0x09, 0x2a, 0xba, 0xe8, 0xae, 0x6e, 0x15, 0x4a, 0x8c, 0x52, 0xc5, 0x92, 0xf9, 0x6b, 0xae, 0xac, + 0x02, 0xc8, 0xe8, 0x21, 0x26, 0x74, 0x0e, 0xf7, 0x76, 0x15, 0x6e, 0xa4, 0xd8, 0xb0, 0xcb, 0x52, + 0x25, 0x95, 0x28, 0x83, 0x7c, 0x84, 0xcb, 0x7b, 0x82, 0x4b, 0x79, 0x17, 0x85, 0x64, 0x3c, 0xfd, + 0xf4, 0xc1, 0x97, 0x18, 0xa9, 0x10, 0xbf, 0x40, 0x81, 0x69, 0x84, 0x84, 0x40, 0xe3, 0x90, 0xa5, + 0x71, 0xdb, 0xdb, 0xf2, 0xb6, 0xcf, 0x87, 0x66, 0xad, 0x65, 0x29, 0x4d, 0xb0, 0x5d, 0xb3, 0x32, + 0xbd, 0x26, 0x1d, 0x00, 0x9a, 0x31, 0x47, 0xd2, 0xae, 0x9b, 0x93, 0x82, 0xc4, 0xff, 0xba, 0x06, + 0x2f, 0xec, 0x73, 0xc1, 0x1e, 0xf1, 0x54, 0xd1, 0x61, 0x9f, 0xc7, 0x57, 0x5d, 0xd2, 0x50, 0x90, + 0x1b, 0x70, 0x4e, 0xc7, 0x2e, 0xa6, 0x8a, 0x9a, 0x7b, 0x5a, 0xbd, 0xb7, 0x82, 0x69, 0x7a, 0x73, + 0x57, 0x82, 0xec, 0x70, 0xa0, 0x05, 0x32, 0xd0, 0xda, 0xc1, 0x78, 0x27, 0xb0, 0xc6, 0xde, 0x44, + 0x45, 0xc3, 0x9c, 0x81, 0x5c, 0x87, 0x86, 0xcc, 0x30, 0x32, 0xd6, 0xb5, 0x7a, 0x57, 0x82, 0xca, + 0x42, 0x09, 0x2a, 0xec, 0xb9, 0x9d, 0x61, 0x14, 0x1a, 0x0e, 0xd2, 0x87, 0x0d, 0xa9, 0xa8, 0x1a, + 0x49, 0xe3, 0x51, 0xab, 0xb7, 0x7b, 0x0a, 0x36, 0x83, 0x0f, 0x1d, 0x8f, 0xff, 0xa7, 0x07, 0x5b, + 0x15, 0x9a, 0x7b, 0x3c, 0x8d, 0x99, 0x62, 0x3c, 0xd5, 0x01, 0x56, 0x47, 0x19, 0x4e, 0x82, 0xae, + 0xd7, 0xe4, 0xf9, 0xdc, 0x14, 0x1b, 0x76, 0xb7, 0x23, 0xf7, 0x80, 0x0c, 0xa9, 0x54, 0x07, 0x82, + 0xa6, 0xd2, 0xa0, 0x0f, 0x58, 0x82, 0xce, 0xdc, 0xd7, 0x1f, 0x2f, 0x8c, 0x1a, 0x11, 0x2e, 0x60, + 0xd1, 0x77, 0x0a, 0xa4, 0x92, 0xa7, 0xed, 0x86, 0xbd, 0xd3, 0xee, 0x48, 0x1b, 0x9e, 0x4c, 0x50, + 0x4a, 0x3a, 0xc0, 0x76, 0xd3, 0x1c, 0x4c, 0xb6, 0xfe, 0x0f, 0x1e, 0x5c, 0xae, 0x70, 0xef, 0x06, + 0x93, 0x8a, 0x5c, 0x9f, 0x4b, 0x75, 0xf0, 0x78, 0x36, 0x6a, 0x74, 0x29, 0xd1, 0xfb, 0xd0, 0x64, + 0x0a, 0x13, 0x1d, 0x90, 0xfa, 0x76, 0xab, 0xd7, 0x3b, 0x79, 0x6e, 0x42, 0x4b, 0xe0, 0x7f, 0x53, + 0xab, 0xb4, 0x5a, 0x17, 0x03, 0xb9, 0x0f, 0x4f, 0x9b, 0xdd, 0x01, 0x15, 0x03, 0xd4, 0xcd, 0xe1, + 0x6c, 0x5f, 0x56, 0x5c, 0x4b, 0x9a, 0x2a, 0x2c, 0xb1, 0x91, 0x2d, 0x68, 0x25, 0x2c, 0x0d, 0x31, + 0x1b, 0xb2, 0x88, 0xda, 0x04, 0x37, 0xc3, 0xa2, 0xc8, 0x68, 0xd0, 0xaf, 0x72, 0x8d, 0xba, 0xd3, + 0x98, 0x8a, 0xc8, 0x47, 0xd0, 0x51, 0x86, 0x70, 0xaf, 0x7f, 0xe7, 0x8e, 0x62, 0x43, 0xf6, 0x88, + 0xea, 0x3c, 0xf6, 0x51, 0x44, 0x98, 0x2a, 0x9d, 0xaa, 0x86, 0x01, 0xad, 0xd0, 0xf2, 0x7f, 0xac, + 0xc1, 0x8b, 0x4b, 0x4b, 0x99, 0x04, 0x40, 0xf8, 0x03, 0x89, 0x62, 0x8c, 0xf1, 0xc7, 0x76, 0x98, + 0xe8, 0x96, 0xd7, 0x11, 0xa9, 0x87, 0x0b, 0x4e, 0x48, 0x1f, 0x9e, 0xd2, 0xb5, 0x75, 0xdb, 0xf8, + 0xcc, 0xdc, 0xdc, 0x38, 0x59, 0x71, 0xce, 0x12, 0x90, 0x6d, 0x78, 0x26, 0x1a, 0x09, 0x81, 0xa9, + 0x2a, 0x45, 0xa4, 0x2c, 0xd6, 0x9a, 0x31, 0x4a, 0x26, 0x30, 0xce, 0x35, 0x6d, 0x18, 0xca, 0x62, + 0xb2, 0x0f, 0x2f, 0x39, 0x70, 0x65, 0x00, 0x9b, 0x06, 0xb9, 0x4a, 0xcd, 0xff, 0xc7, 0x03, 0xb8, + 0x89, 0x4a, 0xb0, 0xc8, 0x14, 0xcf, 0xa2, 0x66, 0xbe, 0x06, 0x1b, 0xdc, 0xd4, 0x84, 0x8b, 0xc5, + 0x9b, 0x4b, 0x0a, 0x29, 0x1f, 0x72, 0x9a, 0xd0, 0xbc, 0x4b, 0xa1, 0x03, 0x93, 0x0f, 0xa0, 0x91, + 0xf1, 0x78, 0x32, 0x9c, 0xde, 0x58, 0x42, 0xd2, 0xe7, 0xb1, 0x9c, 0xa1, 0x30, 0x40, 0xf2, 0x09, + 0x9c, 0x9b, 0x3c, 0x76, 0x26, 0x2e, 0xad, 0x5e, 0x77, 0x09, 0x49, 0xe8, 0x54, 0x67, 0x88, 0x72, + 0x02, 0xff, 0x5f, 0x0f, 0x2e, 0xb8, 0x23, 0x5b, 0x28, 0x6b, 0xf5, 0xdc, 0x8d, 0xd1, 0xb3, 0x78, + 0x6e, 0x29, 0xd6, 0xe0, 0xb9, 0x25, 0x9a, 0x7a, 0xfe, 0xbb, 0x07, 0x64, 0x3e, 0x4d, 0xe4, 0x16, + 0x6c, 0xd8, 0x66, 0x3b, 0xe3, 0xb8, 0x70, 0x2c, 0xfa, 0x8d, 0x4d, 0x0c, 0xff, 0xad, 0xe9, 0xeb, + 0x5b, 0x90, 0x90, 0x3e, 0xb4, 0xac, 0xe6, 0x5d, 0x3a, 0x1c, 0x4d, 0xde, 0x80, 0xa5, 0xf3, 0x35, + 0x98, 0xb8, 0x10, 0x7c, 0x36, 0xa2, 0xa9, 0x62, 0xea, 0x28, 0x2c, 0x52, 0xf8, 0x7f, 0x94, 0x1d, + 0xb3, 0x89, 0xfd, 0xbf, 0x1d, 0x0b, 0xe1, 0x82, 0x6b, 0xba, 0xb3, 0x78, 0x36, 0xc3, 0xe1, 0x7f, + 0xeb, 0xc1, 0xb3, 0xe5, 0xae, 0x28, 0x19, 0xe2, 0xcd, 0x19, 0x72, 0x1f, 0x88, 0x35, 0xf9, 0xea, + 0x18, 0x05, 0x1d, 0xa0, 0x35, 0xa7, 0x76, 0x2a, 0x73, 0x16, 0x30, 0xf9, 0xdf, 0xcd, 0x1a, 0x65, + 0xa3, 0xbd, 0xca, 0xa8, 0xcf, 0xe1, 0x39, 0xe7, 0xd9, 0x1a, 0xac, 0x5a, 0x44, 0xe5, 0xff, 0xe4, + 0xc1, 0xc5, 0x45, 0xcd, 0x9f, 0xff, 0x04, 0xbd, 0xc2, 0x4f, 0xf0, 0x5d, 0x68, 0xcf, 0x78, 0x56, + 0x18, 0x92, 0xee, 0x65, 0xab, 0x3c, 0xaf, 0x88, 0x6f, 0x7d, 0x6d, 0xf1, 0xfd, 0x65, 0xde, 0x91, + 0x7c, 0x54, 0xcd, 0x39, 0xf2, 0x1e, 0x5c, 0x9a, 0x0d, 0xc6, 0xbc, 0x27, 0xd5, 0x0a, 0x55, 0x59, + 0xa9, 0xaf, 0x2f, 0x2b, 0xbf, 0x7a, 0xd0, 0x34, 0x6f, 0xe2, 0x9a, 0x3f, 0xd0, 0xbb, 0x33, 0x1f, + 0xe8, 0x97, 0x97, 0xf4, 0xb6, 0xb9, 0xbd, 0xf0, 0x5d, 0x7e, 0xbf, 0xf4, 0x5d, 0x7e, 0x65, 0x25, + 0x76, 0xf6, 0x73, 0xfc, 0x2a, 0x9c, 0xcf, 0x29, 0xc9, 0xa6, 0x9e, 0xd0, 0xee, 0xcd, 0xf6, 0x4c, + 0xb4, 0xf3, 0xbd, 0x7f, 0x0d, 0x5a, 0x05, 0xfc, 0x32, 0x55, 0x7d, 0x26, 0x71, 0x88, 0x91, 0xe2, + 0xc2, 0x4d, 0x96, 0x7c, 0xff, 0xe1, 0xc5, 0x9f, 0x8f, 0x3b, 0xde, 0x6f, 0xc7, 0x1d, 0xef, 0xaf, + 0xe3, 0x8e, 0xf7, 0xfd, 0xdf, 0x9d, 0x27, 0xee, 0xd5, 0xc6, 0x3b, 0xff, 0x05, 0x00, 0x00, 0xff, + 0xff, 0x10, 0x9f, 0x01, 0x5b, 0x86, 0x0e, 0x00, 0x00, } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/autoscaling/v1/register.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/autoscaling/v1/register.go new file mode 100644 index 00000000..84f41f1e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/autoscaling/v1/register.go @@ -0,0 +1,9 @@ +package v1 + +import "github.com/ericchiang/k8s" + +func init() { + k8s.Register("autoscaling", "v1", "horizontalpodautoscalers", true, &HorizontalPodAutoscaler{}) + + k8s.RegisterList("autoscaling", "v1", "horizontalpodautoscalers", true, &HorizontalPodAutoscalerList{}) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/autoscaling/v2alpha1/generated.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/autoscaling/v2alpha1/generated.pb.go deleted file mode 100644 index 4d366f3b..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/autoscaling/v2alpha1/generated.pb.go +++ /dev/null @@ -1,3635 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/generated.proto -// DO NOT EDIT! - -/* - Package v2alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/generated.proto - - It has these top-level messages: - CrossVersionObjectReference - HorizontalPodAutoscaler - HorizontalPodAutoscalerList - HorizontalPodAutoscalerSpec - HorizontalPodAutoscalerStatus - MetricSpec - MetricStatus - ObjectMetricSource - ObjectMetricStatus - PodsMetricSource - PodsMetricStatus - ResourceMetricSource - ResourceMetricStatus -*/ -package v2alpha1 - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import k8s_io_kubernetes_pkg_api_resource "github.com/ericchiang/k8s/api/resource" -import k8s_io_kubernetes_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" -import _ "github.com/ericchiang/k8s/runtime" -import _ "github.com/ericchiang/k8s/runtime/schema" -import _ "github.com/ericchiang/k8s/util/intstr" -import _ "github.com/ericchiang/k8s/api/v1" -import _ "github.com/ericchiang/k8s/apis/autoscaling/v1" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// CrossVersionObjectReference contains enough information to let you identify the referred resource. -type CrossVersionObjectReference struct { - // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds" - Kind *string `protobuf:"bytes,1,opt,name=kind" json:"kind,omitempty"` - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names - Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` - // API version of the referent - // +optional - ApiVersion *string `protobuf:"bytes,3,opt,name=apiVersion" json:"apiVersion,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } -func (m *CrossVersionObjectReference) String() string { return proto.CompactTextString(m) } -func (*CrossVersionObjectReference) ProtoMessage() {} -func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} -} - -func (m *CrossVersionObjectReference) GetKind() string { - if m != nil && m.Kind != nil { - return *m.Kind - } - return "" -} - -func (m *CrossVersionObjectReference) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *CrossVersionObjectReference) GetApiVersion() string { - if m != nil && m.ApiVersion != nil { - return *m.ApiVersion - } - return "" -} - -// HorizontalPodAutoscaler is the configuration for a horizontal pod -// autoscaler, which automatically manages the replica count of any resource -// implementing the scale subresource based on the metrics specified. -type HorizontalPodAutoscaler struct { - // metadata is the standard object metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` - // spec is the specification for the behaviour of the autoscaler. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. - // +optional - Spec *HorizontalPodAutoscalerSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` - // status is the current information about the autoscaler. - // +optional - Status *HorizontalPodAutoscalerStatus `protobuf:"bytes,3,opt,name=status" json:"status,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } -func (m *HorizontalPodAutoscaler) String() string { return proto.CompactTextString(m) } -func (*HorizontalPodAutoscaler) ProtoMessage() {} -func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *HorizontalPodAutoscaler) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { - if m != nil { - return m.Metadata - } - return nil -} - -func (m *HorizontalPodAutoscaler) GetSpec() *HorizontalPodAutoscalerSpec { - if m != nil { - return m.Spec - } - return nil -} - -func (m *HorizontalPodAutoscaler) GetStatus() *HorizontalPodAutoscalerStatus { - if m != nil { - return m.Status - } - return nil -} - -// HorizontalPodAutoscaler is a list of horizontal pod autoscaler objects. -type HorizontalPodAutoscalerList struct { - // metadata is the standard list metadata. - // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` - // items is the list of horizontal pod autoscaler objects. - Items []*HorizontalPodAutoscaler `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } -func (m *HorizontalPodAutoscalerList) String() string { return proto.CompactTextString(m) } -func (*HorizontalPodAutoscalerList) ProtoMessage() {} -func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{2} -} - -func (m *HorizontalPodAutoscalerList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { - if m != nil { - return m.Metadata - } - return nil -} - -func (m *HorizontalPodAutoscalerList) GetItems() []*HorizontalPodAutoscaler { - if m != nil { - return m.Items - } - return nil -} - -// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler. -type HorizontalPodAutoscalerSpec struct { - // scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics - // should be collected, as well as to actually change the replica count. - ScaleTargetRef *CrossVersionObjectReference `protobuf:"bytes,1,opt,name=scaleTargetRef" json:"scaleTargetRef,omitempty"` - // minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. - // It defaults to 1 pod. - // +optional - MinReplicas *int32 `protobuf:"varint,2,opt,name=minReplicas" json:"minReplicas,omitempty"` - // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. - // It cannot be less that minReplicas. - MaxReplicas *int32 `protobuf:"varint,3,opt,name=maxReplicas" json:"maxReplicas,omitempty"` - // metrics contains the specifications for which to use to calculate the - // desired replica count (the maximum replica count across all metrics will - // be used). The desired replica count is calculated multiplying the - // ratio between the target value and the current value by the current - // number of pods. Ergo, metrics used must decrease as the pod count is - // increased, and vice-versa. See the individual metric source types for - // more information about how each type of metric must respond. - // +optional - Metrics []*MetricSpec `protobuf:"bytes,4,rep,name=metrics" json:"metrics,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } -func (m *HorizontalPodAutoscalerSpec) String() string { return proto.CompactTextString(m) } -func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} -func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{3} -} - -func (m *HorizontalPodAutoscalerSpec) GetScaleTargetRef() *CrossVersionObjectReference { - if m != nil { - return m.ScaleTargetRef - } - return nil -} - -func (m *HorizontalPodAutoscalerSpec) GetMinReplicas() int32 { - if m != nil && m.MinReplicas != nil { - return *m.MinReplicas - } - return 0 -} - -func (m *HorizontalPodAutoscalerSpec) GetMaxReplicas() int32 { - if m != nil && m.MaxReplicas != nil { - return *m.MaxReplicas - } - return 0 -} - -func (m *HorizontalPodAutoscalerSpec) GetMetrics() []*MetricSpec { - if m != nil { - return m.Metrics - } - return nil -} - -// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler. -type HorizontalPodAutoscalerStatus struct { - // observedGeneration is the most recent generation observed by this autoscaler. - // +optional - ObservedGeneration *int64 `protobuf:"varint,1,opt,name=observedGeneration" json:"observedGeneration,omitempty"` - // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, - // used by the autoscaler to control how often the number of pods is changed. - // +optional - LastScaleTime *k8s_io_kubernetes_pkg_apis_meta_v1.Time `protobuf:"bytes,2,opt,name=lastScaleTime" json:"lastScaleTime,omitempty"` - // currentReplicas is current number of replicas of pods managed by this autoscaler, - // as last seen by the autoscaler. - CurrentReplicas *int32 `protobuf:"varint,3,opt,name=currentReplicas" json:"currentReplicas,omitempty"` - // desiredReplicas is the desired number of replicas of pods managed by this autoscaler, - // as last calculated by the autoscaler. - DesiredReplicas *int32 `protobuf:"varint,4,opt,name=desiredReplicas" json:"desiredReplicas,omitempty"` - // currentMetrics is the last read state of the metrics used by this autoscaler. - CurrentMetrics []*MetricStatus `protobuf:"bytes,5,rep,name=currentMetrics" json:"currentMetrics,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } -func (m *HorizontalPodAutoscalerStatus) String() string { return proto.CompactTextString(m) } -func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} -func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{4} -} - -func (m *HorizontalPodAutoscalerStatus) GetObservedGeneration() int64 { - if m != nil && m.ObservedGeneration != nil { - return *m.ObservedGeneration - } - return 0 -} - -func (m *HorizontalPodAutoscalerStatus) GetLastScaleTime() *k8s_io_kubernetes_pkg_apis_meta_v1.Time { - if m != nil { - return m.LastScaleTime - } - return nil -} - -func (m *HorizontalPodAutoscalerStatus) GetCurrentReplicas() int32 { - if m != nil && m.CurrentReplicas != nil { - return *m.CurrentReplicas - } - return 0 -} - -func (m *HorizontalPodAutoscalerStatus) GetDesiredReplicas() int32 { - if m != nil && m.DesiredReplicas != nil { - return *m.DesiredReplicas - } - return 0 -} - -func (m *HorizontalPodAutoscalerStatus) GetCurrentMetrics() []*MetricStatus { - if m != nil { - return m.CurrentMetrics - } - return nil -} - -// MetricSpec specifies how to scale based on a single metric -// (only `type` and one other matching field should be set at once). -type MetricSpec struct { - // type is the type of metric source. It should match one of the fields below. - Type *string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - // object refers to a metric describing a single kubernetes object - // (for example, hits-per-second on an Ingress object). - // +optional - Object *ObjectMetricSource `protobuf:"bytes,2,opt,name=object" json:"object,omitempty"` - // pods refers to a metric describing each pod in the current scale target - // (for example, transactions-processed-per-second). The values will be - // averaged together before being compared to the target value. - // +optional - Pods *PodsMetricSource `protobuf:"bytes,3,opt,name=pods" json:"pods,omitempty"` - // resource refers to a resource metric (such as those specified in - // requests and limits) known to Kubernetes describing each pod in the - // current scale target (e.g. CPU or memory). Such metrics are built in to - // Kubernetes, and have special scaling options on top of those available - // to normal per-pod metrics using the "pods" source. - // +optional - Resource *ResourceMetricSource `protobuf:"bytes,4,opt,name=resource" json:"resource,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MetricSpec) Reset() { *m = MetricSpec{} } -func (m *MetricSpec) String() string { return proto.CompactTextString(m) } -func (*MetricSpec) ProtoMessage() {} -func (*MetricSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } - -func (m *MetricSpec) GetType() string { - if m != nil && m.Type != nil { - return *m.Type - } - return "" -} - -func (m *MetricSpec) GetObject() *ObjectMetricSource { - if m != nil { - return m.Object - } - return nil -} - -func (m *MetricSpec) GetPods() *PodsMetricSource { - if m != nil { - return m.Pods - } - return nil -} - -func (m *MetricSpec) GetResource() *ResourceMetricSource { - if m != nil { - return m.Resource - } - return nil -} - -// MetricStatus describes the last-read state of a single metric. -type MetricStatus struct { - // type is the type of metric source. It will match one of the fields below. - Type *string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - // object refers to a metric describing a single kubernetes object - // (for example, hits-per-second on an Ingress object). - // +optional - Object *ObjectMetricStatus `protobuf:"bytes,2,opt,name=object" json:"object,omitempty"` - // pods refers to a metric describing each pod in the current scale target - // (for example, transactions-processed-per-second). The values will be - // averaged together before being compared to the target value. - // +optional - Pods *PodsMetricStatus `protobuf:"bytes,3,opt,name=pods" json:"pods,omitempty"` - // resource refers to a resource metric (such as those specified in - // requests and limits) known to Kubernetes describing each pod in the - // current scale target (e.g. CPU or memory). Such metrics are built in to - // Kubernetes, and have special scaling options on top of those available - // to normal per-pod metrics using the "pods" source. - // +optional - Resource *ResourceMetricStatus `protobuf:"bytes,4,opt,name=resource" json:"resource,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MetricStatus) Reset() { *m = MetricStatus{} } -func (m *MetricStatus) String() string { return proto.CompactTextString(m) } -func (*MetricStatus) ProtoMessage() {} -func (*MetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } - -func (m *MetricStatus) GetType() string { - if m != nil && m.Type != nil { - return *m.Type - } - return "" -} - -func (m *MetricStatus) GetObject() *ObjectMetricStatus { - if m != nil { - return m.Object - } - return nil -} - -func (m *MetricStatus) GetPods() *PodsMetricStatus { - if m != nil { - return m.Pods - } - return nil -} - -func (m *MetricStatus) GetResource() *ResourceMetricStatus { - if m != nil { - return m.Resource - } - return nil -} - -// ObjectMetricSource indicates how to scale on a metric describing a -// kubernetes object (for example, hits-per-second on an Ingress object). -type ObjectMetricSource struct { - // target is the described Kubernetes object. - Target *CrossVersionObjectReference `protobuf:"bytes,1,opt,name=target" json:"target,omitempty"` - // metricName is the name of the metric in question. - MetricName *string `protobuf:"bytes,2,opt,name=metricName" json:"metricName,omitempty"` - // targetValue is the target value of the metric (as a quantity). - TargetValue *k8s_io_kubernetes_pkg_api_resource.Quantity `protobuf:"bytes,3,opt,name=targetValue" json:"targetValue,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } -func (m *ObjectMetricSource) String() string { return proto.CompactTextString(m) } -func (*ObjectMetricSource) ProtoMessage() {} -func (*ObjectMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } - -func (m *ObjectMetricSource) GetTarget() *CrossVersionObjectReference { - if m != nil { - return m.Target - } - return nil -} - -func (m *ObjectMetricSource) GetMetricName() string { - if m != nil && m.MetricName != nil { - return *m.MetricName - } - return "" -} - -func (m *ObjectMetricSource) GetTargetValue() *k8s_io_kubernetes_pkg_api_resource.Quantity { - if m != nil { - return m.TargetValue - } - return nil -} - -// ObjectMetricStatus indicates the current value of a metric describing a -// kubernetes object (for example, hits-per-second on an Ingress object). -type ObjectMetricStatus struct { - // target is the described Kubernetes object. - Target *CrossVersionObjectReference `protobuf:"bytes,1,opt,name=target" json:"target,omitempty"` - // metricName is the name of the metric in question. - MetricName *string `protobuf:"bytes,2,opt,name=metricName" json:"metricName,omitempty"` - // currentValue is the current value of the metric (as a quantity). - CurrentValue *k8s_io_kubernetes_pkg_api_resource.Quantity `protobuf:"bytes,3,opt,name=currentValue" json:"currentValue,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } -func (m *ObjectMetricStatus) String() string { return proto.CompactTextString(m) } -func (*ObjectMetricStatus) ProtoMessage() {} -func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } - -func (m *ObjectMetricStatus) GetTarget() *CrossVersionObjectReference { - if m != nil { - return m.Target - } - return nil -} - -func (m *ObjectMetricStatus) GetMetricName() string { - if m != nil && m.MetricName != nil { - return *m.MetricName - } - return "" -} - -func (m *ObjectMetricStatus) GetCurrentValue() *k8s_io_kubernetes_pkg_api_resource.Quantity { - if m != nil { - return m.CurrentValue - } - return nil -} - -// PodsMetricSource indicates how to scale on a metric describing each pod in -// the current scale target (for example, transactions-processed-per-second). -// The values will be averaged together before being compared to the target -// value. -type PodsMetricSource struct { - // metricName is the name of the metric in question - MetricName *string `protobuf:"bytes,1,opt,name=metricName" json:"metricName,omitempty"` - // targetAverageValue is the target value of the average of the - // metric across all relevant pods (as a quantity) - TargetAverageValue *k8s_io_kubernetes_pkg_api_resource.Quantity `protobuf:"bytes,2,opt,name=targetAverageValue" json:"targetAverageValue,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } -func (m *PodsMetricSource) String() string { return proto.CompactTextString(m) } -func (*PodsMetricSource) ProtoMessage() {} -func (*PodsMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } - -func (m *PodsMetricSource) GetMetricName() string { - if m != nil && m.MetricName != nil { - return *m.MetricName - } - return "" -} - -func (m *PodsMetricSource) GetTargetAverageValue() *k8s_io_kubernetes_pkg_api_resource.Quantity { - if m != nil { - return m.TargetAverageValue - } - return nil -} - -// PodsMetricStatus indicates the current value of a metric describing each pod in -// the current scale target (for example, transactions-processed-per-second). -type PodsMetricStatus struct { - // metricName is the name of the metric in question - MetricName *string `protobuf:"bytes,1,opt,name=metricName" json:"metricName,omitempty"` - // currentAverageValue is the current value of the average of the - // metric across all relevant pods (as a quantity) - CurrentAverageValue *k8s_io_kubernetes_pkg_api_resource.Quantity `protobuf:"bytes,2,opt,name=currentAverageValue" json:"currentAverageValue,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } -func (m *PodsMetricStatus) String() string { return proto.CompactTextString(m) } -func (*PodsMetricStatus) ProtoMessage() {} -func (*PodsMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } - -func (m *PodsMetricStatus) GetMetricName() string { - if m != nil && m.MetricName != nil { - return *m.MetricName - } - return "" -} - -func (m *PodsMetricStatus) GetCurrentAverageValue() *k8s_io_kubernetes_pkg_api_resource.Quantity { - if m != nil { - return m.CurrentAverageValue - } - return nil -} - -// ResourceMetricSource indicates how to scale on a resource metric known to -// Kubernetes, as specified in requests and limits, describing each pod in the -// current scale target (e.g. CPU or memory). The values will be averaged -// together before being compared to the target. Such metrics are built in to -// Kubernetes, and have special scaling options on top of those available to -// normal per-pod metrics using the "pods" source. Only one "target" type -// should be set. -type ResourceMetricSource struct { - // name is the name of the resource in question. - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // targetAverageUtilization is the target value of the average of the - // resource metric across all relevant pods, represented as a percentage of - // the requested value of the resource for the pods. - // +optional - TargetAverageUtilization *int32 `protobuf:"varint,2,opt,name=targetAverageUtilization" json:"targetAverageUtilization,omitempty"` - // targetAverageValue is the the target value of the average of the - // resource metric across all relevant pods, as a raw value (instead of as - // a percentage of the request), similar to the "pods" metric source type. - // +optional - TargetAverageValue *k8s_io_kubernetes_pkg_api_resource.Quantity `protobuf:"bytes,3,opt,name=targetAverageValue" json:"targetAverageValue,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } -func (m *ResourceMetricSource) String() string { return proto.CompactTextString(m) } -func (*ResourceMetricSource) ProtoMessage() {} -func (*ResourceMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } - -func (m *ResourceMetricSource) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *ResourceMetricSource) GetTargetAverageUtilization() int32 { - if m != nil && m.TargetAverageUtilization != nil { - return *m.TargetAverageUtilization - } - return 0 -} - -func (m *ResourceMetricSource) GetTargetAverageValue() *k8s_io_kubernetes_pkg_api_resource.Quantity { - if m != nil { - return m.TargetAverageValue - } - return nil -} - -// ResourceMetricStatus indicates the current value of a resource metric known to -// Kubernetes, as specified in requests and limits, describing each pod in the -// current scale target (e.g. CPU or memory). Such metrics are built in to -// Kubernetes, and have special scaling options on top of those available to -// normal per-pod metrics using the "pods" source. -type ResourceMetricStatus struct { - // name is the name of the resource in question. - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // currentAverageUtilization is the current value of the average of the - // resource metric across all relevant pods, represented as a percentage of - // the requested value of the resource for the pods. It will only be - // present if `targetAverageValue` was set in the corresponding metric - // specification. - // +optional - CurrentAverageUtilization *int32 `protobuf:"varint,2,opt,name=currentAverageUtilization" json:"currentAverageUtilization,omitempty"` - // currentAverageValue is the the current value of the average of the - // resource metric across all relevant pods, as a raw value (instead of as - // a percentage of the request), similar to the "pods" metric source type. - // It will always be set, regardless of the corresponding metric specification. - CurrentAverageValue *k8s_io_kubernetes_pkg_api_resource.Quantity `protobuf:"bytes,3,opt,name=currentAverageValue" json:"currentAverageValue,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } -func (m *ResourceMetricStatus) String() string { return proto.CompactTextString(m) } -func (*ResourceMetricStatus) ProtoMessage() {} -func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } - -func (m *ResourceMetricStatus) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *ResourceMetricStatus) GetCurrentAverageUtilization() int32 { - if m != nil && m.CurrentAverageUtilization != nil { - return *m.CurrentAverageUtilization - } - return 0 -} - -func (m *ResourceMetricStatus) GetCurrentAverageValue() *k8s_io_kubernetes_pkg_api_resource.Quantity { - if m != nil { - return m.CurrentAverageValue - } - return nil -} - -func init() { - proto.RegisterType((*CrossVersionObjectReference)(nil), "github.com/ericchiang.k8s.apis.autoscaling.v2alpha1.CrossVersionObjectReference") - proto.RegisterType((*HorizontalPodAutoscaler)(nil), "github.com/ericchiang.k8s.apis.autoscaling.v2alpha1.HorizontalPodAutoscaler") - proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "github.com/ericchiang.k8s.apis.autoscaling.v2alpha1.HorizontalPodAutoscalerList") - proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "github.com/ericchiang.k8s.apis.autoscaling.v2alpha1.HorizontalPodAutoscalerSpec") - proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "github.com/ericchiang.k8s.apis.autoscaling.v2alpha1.HorizontalPodAutoscalerStatus") - proto.RegisterType((*MetricSpec)(nil), "github.com/ericchiang.k8s.apis.autoscaling.v2alpha1.MetricSpec") - proto.RegisterType((*MetricStatus)(nil), "github.com/ericchiang.k8s.apis.autoscaling.v2alpha1.MetricStatus") - proto.RegisterType((*ObjectMetricSource)(nil), "github.com/ericchiang.k8s.apis.autoscaling.v2alpha1.ObjectMetricSource") - proto.RegisterType((*ObjectMetricStatus)(nil), "github.com/ericchiang.k8s.apis.autoscaling.v2alpha1.ObjectMetricStatus") - proto.RegisterType((*PodsMetricSource)(nil), "github.com/ericchiang.k8s.apis.autoscaling.v2alpha1.PodsMetricSource") - proto.RegisterType((*PodsMetricStatus)(nil), "github.com/ericchiang.k8s.apis.autoscaling.v2alpha1.PodsMetricStatus") - proto.RegisterType((*ResourceMetricSource)(nil), "github.com/ericchiang.k8s.apis.autoscaling.v2alpha1.ResourceMetricSource") - proto.RegisterType((*ResourceMetricStatus)(nil), "github.com/ericchiang.k8s.apis.autoscaling.v2alpha1.ResourceMetricStatus") -} -func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CrossVersionObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Kind != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Kind))) - i += copy(dAtA[i:], *m.Kind) - } - if m.Name != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name))) - i += copy(dAtA[i:], *m.Name) - } - if m.ApiVersion != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ApiVersion))) - i += copy(dAtA[i:], *m.ApiVersion) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HorizontalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Metadata != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n1, err := m.Metadata.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - } - if m.Spec != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } - if m.Status != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HorizontalPodAutoscalerList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Metadata != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n4, err := m.Metadata.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - } - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HorizontalPodAutoscalerSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ScaleTargetRef != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleTargetRef.Size())) - n5, err := m.ScaleTargetRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - } - if m.MinReplicas != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.MinReplicas)) - } - if m.MaxReplicas != nil { - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxReplicas)) - } - if len(m.Metrics) > 0 { - for _, msg := range m.Metrics { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HorizontalPodAutoscalerStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ObservedGeneration != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) - } - if m.LastScaleTime != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastScaleTime.Size())) - n6, err := m.LastScaleTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - } - if m.CurrentReplicas != nil { - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentReplicas)) - } - if m.DesiredReplicas != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.DesiredReplicas)) - } - if len(m.CurrentMetrics) > 0 { - for _, msg := range m.CurrentMetrics { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *MetricSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MetricSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Type != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type))) - i += copy(dAtA[i:], *m.Type) - } - if m.Object != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n7, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } - if m.Pods != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) - n8, err := m.Pods.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - } - if m.Resource != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n9, err := m.Resource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *MetricStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Type != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type))) - i += copy(dAtA[i:], *m.Type) - } - if m.Object != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n10, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - } - if m.Pods != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) - n11, err := m.Pods.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 - } - if m.Resource != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n12, err := m.Resource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ObjectMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Target != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n13, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - } - if m.MetricName != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MetricName))) - i += copy(dAtA[i:], *m.MetricName) - } - if m.TargetValue != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) - n14, err := m.TargetValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ObjectMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Target != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n15, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n15 - } - if m.MetricName != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MetricName))) - i += copy(dAtA[i:], *m.MetricName) - } - if m.CurrentValue != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) - n16, err := m.CurrentValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n16 - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodsMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.MetricName != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MetricName))) - i += copy(dAtA[i:], *m.MetricName) - } - if m.TargetAverageValue != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) - n17, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n17 - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodsMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.MetricName != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MetricName))) - i += copy(dAtA[i:], *m.MetricName) - } - if m.CurrentAverageValue != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) - n18, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Name != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name))) - i += copy(dAtA[i:], *m.Name) - } - if m.TargetAverageUtilization != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetAverageUtilization)) - } - if m.TargetAverageValue != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) - n19, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n19 - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Name != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name))) - i += copy(dAtA[i:], *m.Name) - } - if m.CurrentAverageUtilization != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentAverageUtilization)) - } - if m.CurrentAverageValue != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) - n20, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n20 - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *CrossVersionObjectReference) Size() (n int) { - var l int - _ = l - if m.Kind != nil { - l = len(*m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Name != nil { - l = len(*m.Name) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ApiVersion != nil { - l = len(*m.ApiVersion) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *HorizontalPodAutoscaler) Size() (n int) { - var l int - _ = l - if m.Metadata != nil { - l = m.Metadata.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Spec != nil { - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Status != nil { - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *HorizontalPodAutoscalerList) Size() (n int) { - var l int - _ = l - if m.Metadata != nil { - l = m.Metadata.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *HorizontalPodAutoscalerSpec) Size() (n int) { - var l int - _ = l - if m.ScaleTargetRef != nil { - l = m.ScaleTargetRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.MinReplicas != nil { - n += 1 + sovGenerated(uint64(*m.MinReplicas)) - } - if m.MaxReplicas != nil { - n += 1 + sovGenerated(uint64(*m.MaxReplicas)) - } - if len(m.Metrics) > 0 { - for _, e := range m.Metrics { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *HorizontalPodAutoscalerStatus) Size() (n int) { - var l int - _ = l - if m.ObservedGeneration != nil { - n += 1 + sovGenerated(uint64(*m.ObservedGeneration)) - } - if m.LastScaleTime != nil { - l = m.LastScaleTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.CurrentReplicas != nil { - n += 1 + sovGenerated(uint64(*m.CurrentReplicas)) - } - if m.DesiredReplicas != nil { - n += 1 + sovGenerated(uint64(*m.DesiredReplicas)) - } - if len(m.CurrentMetrics) > 0 { - for _, e := range m.CurrentMetrics { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MetricSpec) Size() (n int) { - var l int - _ = l - if m.Type != nil { - l = len(*m.Type) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Object != nil { - l = m.Object.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Pods != nil { - l = m.Pods.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Resource != nil { - l = m.Resource.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MetricStatus) Size() (n int) { - var l int - _ = l - if m.Type != nil { - l = len(*m.Type) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Object != nil { - l = m.Object.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Pods != nil { - l = m.Pods.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Resource != nil { - l = m.Resource.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ObjectMetricSource) Size() (n int) { - var l int - _ = l - if m.Target != nil { - l = m.Target.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.MetricName != nil { - l = len(*m.MetricName) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.TargetValue != nil { - l = m.TargetValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ObjectMetricStatus) Size() (n int) { - var l int - _ = l - if m.Target != nil { - l = m.Target.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.MetricName != nil { - l = len(*m.MetricName) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.CurrentValue != nil { - l = m.CurrentValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *PodsMetricSource) Size() (n int) { - var l int - _ = l - if m.MetricName != nil { - l = len(*m.MetricName) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.TargetAverageValue != nil { - l = m.TargetAverageValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *PodsMetricStatus) Size() (n int) { - var l int - _ = l - if m.MetricName != nil { - l = len(*m.MetricName) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.CurrentAverageValue != nil { - l = m.CurrentAverageValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ResourceMetricSource) Size() (n int) { - var l int - _ = l - if m.Name != nil { - l = len(*m.Name) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.TargetAverageUtilization != nil { - n += 1 + sovGenerated(uint64(*m.TargetAverageUtilization)) - } - if m.TargetAverageValue != nil { - l = m.TargetAverageValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ResourceMetricStatus) Size() (n int) { - var l int - _ = l - if m.Name != nil { - l = len(*m.Name) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.CurrentAverageUtilization != nil { - n += 1 + sovGenerated(uint64(*m.CurrentAverageUtilization)) - } - if m.CurrentAverageValue != nil { - l = m.CurrentAverageValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CrossVersionObjectReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CrossVersionObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Kind = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Name = &s - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ApiVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.ApiVersion = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscaler: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Spec == nil { - m.Spec = &HorizontalPodAutoscalerSpec{} - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Status == nil { - m.Status = &HorizontalPodAutoscalerStatus{} - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscalerList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscalerList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, &HorizontalPodAutoscaler{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ScaleTargetRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ScaleTargetRef == nil { - m.ScaleTargetRef = &CrossVersionObjectReference{} - } - if err := m.ScaleTargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReplicas", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.MinReplicas = &v - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxReplicas", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.MaxReplicas = &v - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Metrics = append(m.Metrics, &MetricSpec{}) - if err := m.Metrics[len(m.Metrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ObservedGeneration = &v - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastScaleTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LastScaleTime == nil { - m.LastScaleTime = &k8s_io_kubernetes_pkg_apis_meta_v1.Time{} - } - if err := m.LastScaleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.CurrentReplicas = &v - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DesiredReplicas", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.DesiredReplicas = &v - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentMetrics", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CurrentMetrics = append(m.CurrentMetrics, &MetricStatus{}) - if err := m.CurrentMetrics[len(m.CurrentMetrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MetricSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Type = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Object == nil { - m.Object = &ObjectMetricSource{} - } - if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pods == nil { - m.Pods = &PodsMetricSource{} - } - if err := m.Pods.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Resource == nil { - m.Resource = &ResourceMetricSource{} - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MetricStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Type = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Object == nil { - m.Object = &ObjectMetricStatus{} - } - if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pods == nil { - m.Pods = &PodsMetricStatus{} - } - if err := m.Pods.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Resource == nil { - m.Resource = &ResourceMetricStatus{} - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ObjectMetricSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ObjectMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Target == nil { - m.Target = &CrossVersionObjectReference{} - } - if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.MetricName = &s - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TargetValue == nil { - m.TargetValue = &k8s_io_kubernetes_pkg_api_resource.Quantity{} - } - if err := m.TargetValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ObjectMetricStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ObjectMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Target == nil { - m.Target = &CrossVersionObjectReference{} - } - if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.MetricName = &s - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CurrentValue == nil { - m.CurrentValue = &k8s_io_kubernetes_pkg_api_resource.Quantity{} - } - if err := m.CurrentValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodsMetricSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodsMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.MetricName = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TargetAverageValue == nil { - m.TargetAverageValue = &k8s_io_kubernetes_pkg_api_resource.Quantity{} - } - if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodsMetricStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodsMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.MetricName = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CurrentAverageValue == nil { - m.CurrentAverageValue = &k8s_io_kubernetes_pkg_api_resource.Quantity{} - } - if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceMetricSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Name = &s - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageUtilization", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TargetAverageUtilization = &v - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TargetAverageValue == nil { - m.TargetAverageValue = &k8s_io_kubernetes_pkg_api_resource.Quantity{} - } - if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceMetricStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Name = &s - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageUtilization", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.CurrentAverageUtilization = &v - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CurrentAverageValue == nil { - m.CurrentAverageValue = &k8s_io_kubernetes_pkg_api_resource.Quantity{} - } - if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("github.com/ericchiang/k8s/apis/autoscaling/v2alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 884 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x56, 0x4d, 0x6f, 0x23, 0x45, - 0x10, 0x65, 0x6c, 0x27, 0x84, 0xf2, 0xb2, 0xa0, 0x06, 0x09, 0x93, 0x15, 0x56, 0x34, 0xa7, 0x1c, - 0x96, 0x1e, 0x65, 0xc4, 0x81, 0x4f, 0xa1, 0xb0, 0x42, 0x44, 0x68, 0x13, 0xc2, 0x2c, 0xd9, 0x03, - 0xa0, 0x15, 0x9d, 0x99, 0x5a, 0x6f, 0xe3, 0xf1, 0xcc, 0xa8, 0xbb, 0xc7, 0x62, 0xf7, 0x57, 0x20, - 0x4e, 0xfc, 0x0a, 0x38, 0x22, 0xee, 0x48, 0xec, 0x91, 0x33, 0x27, 0x14, 0x6e, 0x88, 0x1f, 0x81, - 0xfa, 0xc3, 0x5f, 0x33, 0x1e, 0x13, 0x07, 0x2f, 0xda, 0xdb, 0xb8, 0xba, 0xea, 0xf5, 0x7b, 0xaf, - 0xab, 0xab, 0x0d, 0xef, 0x0f, 0xdf, 0x94, 0x94, 0xe7, 0xc1, 0xb0, 0x3c, 0x47, 0x91, 0xa1, 0x42, - 0x19, 0x14, 0xc3, 0x41, 0xc0, 0x0a, 0x2e, 0x03, 0x56, 0xaa, 0x5c, 0xc6, 0x2c, 0xe5, 0xd9, 0x20, - 0x18, 0x87, 0x2c, 0x2d, 0x1e, 0xb0, 0x83, 0x60, 0x80, 0x19, 0x0a, 0xa6, 0x30, 0xa1, 0x85, 0xc8, - 0x55, 0x4e, 0x02, 0x0b, 0x40, 0x67, 0x00, 0xb4, 0x18, 0x0e, 0xa8, 0x06, 0xa0, 0x73, 0x00, 0x74, - 0x02, 0xb0, 0x1b, 0x36, 0xee, 0x18, 0x08, 0x94, 0x79, 0x29, 0x62, 0xac, 0x6e, 0xb2, 0xa2, 0x46, - 0x06, 0x23, 0x54, 0x2c, 0x18, 0xd7, 0x88, 0xed, 0xbe, 0xbe, 0xbc, 0x46, 0x94, 0x99, 0xe2, 0xa3, - 0xfa, 0x16, 0x6f, 0xac, 0x4e, 0x97, 0xf1, 0x03, 0x1c, 0xb1, 0x5a, 0xd5, 0xc1, 0xf2, 0xaa, 0x52, - 0xf1, 0x34, 0xe0, 0x99, 0x92, 0x4a, 0xd4, 0x4a, 0x6e, 0x36, 0xeb, 0x5f, 0xa2, 0xe2, 0xad, 0xcb, - 0x9e, 0x4f, 0xad, 0xd4, 0x47, 0xb8, 0x71, 0x4b, 0xe4, 0x52, 0xde, 0x45, 0x21, 0x79, 0x9e, 0x7d, - 0x72, 0xfe, 0x35, 0xc6, 0x2a, 0xc2, 0xfb, 0x28, 0x30, 0x8b, 0x91, 0x10, 0xe8, 0x0c, 0x79, 0x96, - 0xf4, 0xbc, 0x3d, 0x6f, 0xff, 0xb9, 0xc8, 0x7c, 0xeb, 0x58, 0xc6, 0x46, 0xd8, 0x6b, 0xd9, 0x98, - 0xfe, 0x26, 0x7d, 0x00, 0x56, 0x70, 0x07, 0xd2, 0x6b, 0x9b, 0x95, 0xb9, 0x88, 0xff, 0x63, 0x0b, - 0x5e, 0x39, 0xca, 0x05, 0x7f, 0x94, 0x67, 0x8a, 0xa5, 0xa7, 0x79, 0x72, 0xe8, 0x68, 0xa1, 0x20, - 0x1f, 0xc3, 0x8e, 0x3e, 0x9e, 0x84, 0x29, 0x66, 0xf6, 0xe9, 0x86, 0x94, 0xae, 0xe8, 0x17, 0x9d, - 0x4b, 0xc7, 0x07, 0xd4, 0x52, 0x3d, 0x46, 0xc5, 0xa2, 0x69, 0x3d, 0xf9, 0x0a, 0x3a, 0xb2, 0xc0, - 0xd8, 0x70, 0xeb, 0x86, 0xb7, 0xe9, 0x9a, 0x7d, 0x47, 0x1b, 0x38, 0xde, 0x29, 0x30, 0x8e, 0x0c, - 0x32, 0xb9, 0x0f, 0xdb, 0x52, 0x31, 0x55, 0x4a, 0xa3, 0xb2, 0x1b, 0x9e, 0x6c, 0x6c, 0x0f, 0x83, - 0x1a, 0x39, 0x74, 0xff, 0x57, 0x0f, 0x6e, 0x34, 0x64, 0xde, 0xe6, 0x52, 0x91, 0xa3, 0x9a, 0x6b, - 0x37, 0x2f, 0xe3, 0x9a, 0xae, 0xad, 0x78, 0x76, 0x0f, 0xb6, 0xb8, 0xc2, 0x91, 0xec, 0xb5, 0xf6, - 0xda, 0xfb, 0xdd, 0xf0, 0x68, 0x53, 0x82, 0x22, 0x0b, 0xeb, 0xff, 0xd0, 0x6a, 0x54, 0xa2, 0x7d, - 0x25, 0x0a, 0xae, 0x9b, 0x5f, 0x9f, 0x31, 0x31, 0x40, 0xdd, 0x7b, 0x4e, 0xcf, 0xfa, 0xa7, 0xb7, - 0xa2, 0x93, 0xa3, 0xca, 0x1e, 0x64, 0x0f, 0xba, 0x23, 0x9e, 0x45, 0x58, 0xa4, 0x3c, 0x66, 0xd2, - 0x34, 0xcc, 0x56, 0x34, 0x1f, 0x32, 0x19, 0xec, 0x9b, 0x69, 0x46, 0xdb, 0x65, 0xcc, 0x42, 0xe4, - 0x0c, 0x9e, 0x1d, 0xa1, 0x12, 0x3c, 0x96, 0xbd, 0x8e, 0xf1, 0xee, 0x9d, 0xb5, 0x29, 0x1f, 0x9b, - 0x7a, 0xd3, 0x5f, 0x13, 0x2c, 0xff, 0xf7, 0x16, 0xbc, 0xb6, 0xb2, 0x49, 0x08, 0x05, 0x92, 0x9f, - 0x4b, 0x14, 0x63, 0x4c, 0x3e, 0xb2, 0x17, 0x5a, 0x5f, 0x3b, 0x6d, 0x5b, 0x3b, 0x5a, 0xb2, 0x42, - 0x4e, 0xe0, 0xf9, 0x94, 0x49, 0x75, 0xc7, 0x58, 0xc0, 0xdd, 0xdd, 0xed, 0x86, 0xfb, 0x97, 0xe9, - 0x18, 0x9d, 0x1f, 0x2d, 0x96, 0x93, 0x7d, 0x78, 0x21, 0x2e, 0x85, 0xc0, 0x4c, 0x55, 0xec, 0xa9, - 0x86, 0x75, 0x66, 0x82, 0x92, 0x0b, 0x4c, 0xa6, 0x99, 0x1d, 0x9b, 0x59, 0x09, 0x13, 0x84, 0xeb, - 0xae, 0xf8, 0xd8, 0x79, 0xba, 0x65, 0x3c, 0x7d, 0xef, 0xaa, 0x9e, 0xda, 0xfb, 0x54, 0x01, 0xf5, - 0x7f, 0x6a, 0x01, 0xcc, 0x4c, 0xd7, 0xc3, 0x4c, 0x3d, 0x2c, 0x70, 0x32, 0xe0, 0xf4, 0x37, 0xf9, - 0x02, 0xb6, 0x73, 0xd3, 0x3d, 0xce, 0xa6, 0x5b, 0x6b, 0x33, 0x98, 0xce, 0x26, 0xbd, 0x8d, 0x79, - 0xad, 0x22, 0x07, 0x49, 0xce, 0xa0, 0x53, 0xe4, 0xc9, 0x64, 0x7a, 0x1c, 0xae, 0x0d, 0x7d, 0x9a, - 0x27, 0x72, 0x01, 0xd8, 0xc0, 0x11, 0x06, 0x3b, 0x93, 0x87, 0xd1, 0x18, 0xdc, 0x0d, 0x3f, 0x5c, - 0x1b, 0x3a, 0x72, 0x00, 0x0b, 0xf0, 0x53, 0x58, 0xff, 0xe7, 0x16, 0x5c, 0x9b, 0xb7, 0xf6, 0xff, - 0xf0, 0xce, 0xcd, 0xc4, 0xcd, 0x7b, 0x67, 0x81, 0x9f, 0x98, 0x77, 0x16, 0x7e, 0xe6, 0xdd, 0x5f, - 0x1e, 0x90, 0x7a, 0x53, 0x90, 0x04, 0xb6, 0x95, 0x99, 0x48, 0x4f, 0x64, 0xe4, 0x39, 0x6c, 0xfd, - 0x38, 0xdb, 0xd1, 0x72, 0x32, 0x7b, 0xb6, 0xe7, 0x22, 0xe4, 0x04, 0xba, 0x36, 0xf3, 0x2e, 0x4b, - 0x4b, 0x74, 0xee, 0xae, 0x78, 0x4d, 0xe8, 0x44, 0x16, 0xfd, 0xb4, 0x64, 0x99, 0xe2, 0xea, 0x61, - 0x34, 0x0f, 0xe0, 0xff, 0x5d, 0x15, 0x6b, 0xdb, 0xe5, 0xe9, 0x10, 0x7b, 0x0a, 0xd7, 0xdc, 0x44, - 0xb8, 0xba, 0xda, 0x05, 0x04, 0xff, 0x5b, 0x0f, 0x5e, 0xac, 0xde, 0xca, 0x0a, 0x0d, 0xaf, 0x46, - 0xe3, 0x4b, 0x20, 0x96, 0xf0, 0xe1, 0x18, 0x05, 0x1b, 0xa0, 0x25, 0xd3, 0xba, 0x02, 0x99, 0x25, - 0x38, 0xfe, 0x77, 0x8b, 0x94, 0xac, 0xff, 0xff, 0x46, 0xe9, 0x1e, 0xbc, 0xe4, 0x74, 0xfd, 0x67, - 0x4e, 0xcb, 0x80, 0xfc, 0x5f, 0x3c, 0x78, 0x79, 0xd9, 0x88, 0x99, 0xfe, 0xa1, 0xf4, 0xe6, 0xfe, - 0x50, 0xbe, 0x0d, 0xbd, 0x05, 0x5d, 0x67, 0x8a, 0xa7, 0xfc, 0x91, 0x7d, 0xe7, 0xec, 0x5b, 0xdd, - 0xb8, 0xde, 0xe0, 0x6d, 0x7b, 0x43, 0xde, 0x3e, 0xae, 0xcb, 0x98, 0x8e, 0xc3, 0x9a, 0x8c, 0x77, - 0xe1, 0xd5, 0x45, 0x2b, 0xea, 0x3a, 0x9a, 0x13, 0x9a, 0x4e, 0xa4, 0xbd, 0xa1, 0x13, 0xf9, 0x60, - 0xf7, 0xf1, 0x45, 0xdf, 0xfb, 0xed, 0xa2, 0xef, 0xfd, 0x71, 0xd1, 0xf7, 0xbe, 0xff, 0xb3, 0xff, - 0xcc, 0xe7, 0x3b, 0x93, 0xbb, 0xf6, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8a, 0x6b, 0x54, 0x39, - 0xf4, 0x0d, 0x00, 0x00, -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/batch/v1/generated.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/batch/v1/generated.pb.go index 6c5f663f..5264a93b 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/batch/v1/generated.pb.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/batch/v1/generated.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto -// DO NOT EDIT! +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/batch/v1/generated.proto /* Package v1 is a generated protocol buffer package. It is generated from these files: - k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto + k8s.io/api/batch/v1/generated.proto It has these top-level messages: Job @@ -20,11 +19,11 @@ package v1 import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import k8s_io_kubernetes_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" +import k8s_io_api_core_v1 "github.com/ericchiang/k8s/apis/core/v1" +import k8s_io_apimachinery_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" import _ "github.com/ericchiang/k8s/runtime" import _ "github.com/ericchiang/k8s/runtime/schema" import _ "github.com/ericchiang/k8s/util/intstr" -import k8s_io_kubernetes_pkg_api_v1 "github.com/ericchiang/k8s/api/v1" import io "io" @@ -42,15 +41,15 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // Job represents the configuration of a single job. type Job struct { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` - // Spec is a structure defining the expected behavior of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + // Specification of the desired behavior of a job. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec *JobSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` - // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // Current status of a job. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status *JobStatus `protobuf:"bytes,3,opt,name=status" json:"status,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -61,7 +60,7 @@ func (m *Job) String() string { return proto.CompactTextString(m) } func (*Job) ProtoMessage() {} func (*Job) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } -func (m *Job) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *Job) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -90,10 +89,10 @@ type JobCondition struct { Status *string `protobuf:"bytes,2,opt,name=status" json:"status,omitempty"` // Last time the condition was checked. // +optional - LastProbeTime *k8s_io_kubernetes_pkg_apis_meta_v1.Time `protobuf:"bytes,3,opt,name=lastProbeTime" json:"lastProbeTime,omitempty"` + LastProbeTime *k8s_io_apimachinery_pkg_apis_meta_v1.Time `protobuf:"bytes,3,opt,name=lastProbeTime" json:"lastProbeTime,omitempty"` // Last time the condition transit from one status to another. // +optional - LastTransitionTime *k8s_io_kubernetes_pkg_apis_meta_v1.Time `protobuf:"bytes,4,opt,name=lastTransitionTime" json:"lastTransitionTime,omitempty"` + LastTransitionTime *k8s_io_apimachinery_pkg_apis_meta_v1.Time `protobuf:"bytes,4,opt,name=lastTransitionTime" json:"lastTransitionTime,omitempty"` // (brief) reason for the condition's last transition. // +optional Reason *string `protobuf:"bytes,5,opt,name=reason" json:"reason,omitempty"` @@ -122,14 +121,14 @@ func (m *JobCondition) GetStatus() string { return "" } -func (m *JobCondition) GetLastProbeTime() *k8s_io_kubernetes_pkg_apis_meta_v1.Time { +func (m *JobCondition) GetLastProbeTime() *k8s_io_apimachinery_pkg_apis_meta_v1.Time { if m != nil { return m.LastProbeTime } return nil } -func (m *JobCondition) GetLastTransitionTime() *k8s_io_kubernetes_pkg_apis_meta_v1.Time { +func (m *JobCondition) GetLastTransitionTime() *k8s_io_apimachinery_pkg_apis_meta_v1.Time { if m != nil { return m.LastTransitionTime } @@ -152,11 +151,11 @@ func (m *JobCondition) GetMessage() string { // JobList is a collection of jobs. type JobList struct { - // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` - // Items is the list of Job. + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + // items is the list of Jobs. Items []*Job `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -166,7 +165,7 @@ func (m *JobList) String() string { return proto.CompactTextString(m) func (*JobList) ProtoMessage() {} func (*JobList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } -func (m *JobList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *JobList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -182,31 +181,35 @@ func (m *JobList) GetItems() []*Job { // JobSpec describes how the job execution will look like. type JobSpec struct { - // Parallelism specifies the maximum desired number of pods the job should + // Specifies the maximum desired number of pods the job should // run at any given time. The actual number of pods running in steady state will // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), // i.e. when the work left to do is less than max parallelism. - // More info: http://kubernetes.io/docs/user-guide/jobs + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ // +optional Parallelism *int32 `protobuf:"varint,1,opt,name=parallelism" json:"parallelism,omitempty"` - // Completions specifies the desired number of successfully finished pods the + // Specifies the desired number of successfully finished pods the // job should be run with. Setting to nil means that the success of any // pod signals the success of all pods, and allows parallelism to have any positive // value. Setting to 1 means that parallelism is limited to 1 and the success of that // pod signals the success of the job. - // More info: http://kubernetes.io/docs/user-guide/jobs + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ // +optional Completions *int32 `protobuf:"varint,2,opt,name=completions" json:"completions,omitempty"` - // Optional duration in seconds relative to the startTime that the job may be active + // Specifies the duration in seconds relative to the startTime that the job may be active // before the system tries to terminate it; value must be positive integer // +optional ActiveDeadlineSeconds *int64 `protobuf:"varint,3,opt,name=activeDeadlineSeconds" json:"activeDeadlineSeconds,omitempty"` - // Selector is a label query over pods that should match the pod count. + // Specifies the number of retries before marking this job failed. + // Defaults to 6 + // +optional + BackoffLimit *int32 `protobuf:"varint,7,opt,name=backoffLimit" json:"backoffLimit,omitempty"` + // A label query over pods that should match the pod count. // Normally, the system sets this field for you. - // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional - Selector *k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector `protobuf:"bytes,4,opt,name=selector" json:"selector,omitempty"` - // ManualSelector controls generation of pod labels and pod selectors. + Selector *k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector `protobuf:"bytes,4,opt,name=selector" json:"selector,omitempty"` + // manualSelector controls generation of pod labels and pod selectors. // Leave `manualSelector` unset unless you are certain what you are doing. // When false or unset, the system pick labels unique to this job // and appends those labels to the pod template. When true, @@ -215,14 +218,13 @@ type JobSpec struct { // and other jobs to not function correctly. However, You may see // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` // API. - // More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector // +optional ManualSelector *bool `protobuf:"varint,5,opt,name=manualSelector" json:"manualSelector,omitempty"` - // Template is the object that describes the pod that will be created when - // executing a job. - // More info: http://kubernetes.io/docs/user-guide/jobs - Template *k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec `protobuf:"bytes,6,opt,name=template" json:"template,omitempty"` - XXX_unrecognized []byte `json:"-"` + // Describes the pod that will be created when executing a job. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ + Template *k8s_io_api_core_v1.PodTemplateSpec `protobuf:"bytes,6,opt,name=template" json:"template,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *JobSpec) Reset() { *m = JobSpec{} } @@ -251,7 +253,14 @@ func (m *JobSpec) GetActiveDeadlineSeconds() int64 { return 0 } -func (m *JobSpec) GetSelector() *k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector { +func (m *JobSpec) GetBackoffLimit() int32 { + if m != nil && m.BackoffLimit != nil { + return *m.BackoffLimit + } + return 0 +} + +func (m *JobSpec) GetSelector() *k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector { if m != nil { return m.Selector } @@ -265,7 +274,7 @@ func (m *JobSpec) GetManualSelector() bool { return false } -func (m *JobSpec) GetTemplate() *k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec { +func (m *JobSpec) GetTemplate() *k8s_io_api_core_v1.PodTemplateSpec { if m != nil { return m.Template } @@ -274,27 +283,29 @@ func (m *JobSpec) GetTemplate() *k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec { // JobStatus represents the current state of a Job. type JobStatus struct { - // Conditions represent the latest available observations of an object's current state. - // More info: http://kubernetes.io/docs/user-guide/jobs + // The latest available observations of an object's current state. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ // +optional + // +patchMergeKey=type + // +patchStrategy=merge Conditions []*JobCondition `protobuf:"bytes,1,rep,name=conditions" json:"conditions,omitempty"` - // StartTime represents time when the job was acknowledged by the Job Manager. + // Represents time when the job was acknowledged by the job controller. // It is not guaranteed to be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. // +optional - StartTime *k8s_io_kubernetes_pkg_apis_meta_v1.Time `protobuf:"bytes,2,opt,name=startTime" json:"startTime,omitempty"` - // CompletionTime represents time when the job was completed. It is not guaranteed to + StartTime *k8s_io_apimachinery_pkg_apis_meta_v1.Time `protobuf:"bytes,2,opt,name=startTime" json:"startTime,omitempty"` + // Represents time when the job was completed. It is not guaranteed to // be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. // +optional - CompletionTime *k8s_io_kubernetes_pkg_apis_meta_v1.Time `protobuf:"bytes,3,opt,name=completionTime" json:"completionTime,omitempty"` - // Active is the number of actively running pods. + CompletionTime *k8s_io_apimachinery_pkg_apis_meta_v1.Time `protobuf:"bytes,3,opt,name=completionTime" json:"completionTime,omitempty"` + // The number of actively running pods. // +optional Active *int32 `protobuf:"varint,4,opt,name=active" json:"active,omitempty"` - // Succeeded is the number of pods which reached Phase Succeeded. + // The number of pods which reached phase Succeeded. // +optional Succeeded *int32 `protobuf:"varint,5,opt,name=succeeded" json:"succeeded,omitempty"` - // Failed is the number of pods which reached Phase Failed. + // The number of pods which reached phase Failed. // +optional Failed *int32 `protobuf:"varint,6,opt,name=failed" json:"failed,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -312,14 +323,14 @@ func (m *JobStatus) GetConditions() []*JobCondition { return nil } -func (m *JobStatus) GetStartTime() *k8s_io_kubernetes_pkg_apis_meta_v1.Time { +func (m *JobStatus) GetStartTime() *k8s_io_apimachinery_pkg_apis_meta_v1.Time { if m != nil { return m.StartTime } return nil } -func (m *JobStatus) GetCompletionTime() *k8s_io_kubernetes_pkg_apis_meta_v1.Time { +func (m *JobStatus) GetCompletionTime() *k8s_io_apimachinery_pkg_apis_meta_v1.Time { if m != nil { return m.CompletionTime } @@ -348,11 +359,11 @@ func (m *JobStatus) GetFailed() int32 { } func init() { - proto.RegisterType((*Job)(nil), "github.com/ericchiang.k8s.apis.batch.v1.Job") - proto.RegisterType((*JobCondition)(nil), "github.com/ericchiang.k8s.apis.batch.v1.JobCondition") - proto.RegisterType((*JobList)(nil), "github.com/ericchiang.k8s.apis.batch.v1.JobList") - proto.RegisterType((*JobSpec)(nil), "github.com/ericchiang.k8s.apis.batch.v1.JobSpec") - proto.RegisterType((*JobStatus)(nil), "github.com/ericchiang.k8s.apis.batch.v1.JobStatus") + proto.RegisterType((*Job)(nil), "k8s.io.api.batch.v1.Job") + proto.RegisterType((*JobCondition)(nil), "k8s.io.api.batch.v1.JobCondition") + proto.RegisterType((*JobList)(nil), "k8s.io.api.batch.v1.JobList") + proto.RegisterType((*JobSpec)(nil), "k8s.io.api.batch.v1.JobSpec") + proto.RegisterType((*JobStatus)(nil), "k8s.io.api.batch.v1.JobStatus") } func (m *Job) Marshal() (dAtA []byte, err error) { size := m.Size() @@ -573,6 +584,11 @@ func (m *JobSpec) MarshalTo(dAtA []byte) (int, error) { } i += n8 } + if m.BackoffLimit != nil { + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.BackoffLimit)) + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -647,24 +663,6 @@ func (m *JobStatus) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -770,6 +768,9 @@ func (m *JobSpec) Size() (n int) { l = m.Template.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.BackoffLimit != nil { + n += 1 + sovGenerated(uint64(*m.BackoffLimit)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -877,7 +878,7 @@ func (m *Job) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1087,7 +1088,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.LastProbeTime == nil { - m.LastProbeTime = &k8s_io_kubernetes_pkg_apis_meta_v1.Time{} + m.LastProbeTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.LastProbeTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1120,7 +1121,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.LastTransitionTime == nil { - m.LastTransitionTime = &k8s_io_kubernetes_pkg_apis_meta_v1.Time{} + m.LastTransitionTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1264,7 +1265,7 @@ func (m *JobList) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1439,7 +1440,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1493,12 +1494,32 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Template == nil { - m.Template = &k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec{} + m.Template = &k8s_io_api_core_v1.PodTemplateSpec{} } if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BackoffLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.BackoffLimit = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1608,7 +1629,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.StartTime == nil { - m.StartTime = &k8s_io_kubernetes_pkg_apis_meta_v1.Time{} + m.StartTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1641,7 +1662,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.CompletionTime == nil { - m.CompletionTime = &k8s_io_kubernetes_pkg_apis_meta_v1.Time{} + m.CompletionTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.CompletionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1834,50 +1855,49 @@ var ( ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) -func init() { - proto.RegisterFile("github.com/ericchiang/k8s/apis/batch/v1/generated.proto", fileDescriptorGenerated) -} +func init() { proto.RegisterFile("k8s.io/api/batch/v1/generated.proto", fileDescriptorGenerated) } var fileDescriptorGenerated = []byte{ - // 630 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x93, 0xc1, 0x6e, 0xd3, 0x4c, - 0x10, 0xc7, 0x3f, 0x3b, 0x75, 0x9b, 0x6c, 0x3f, 0x7a, 0x58, 0x01, 0xb2, 0x2a, 0x14, 0x55, 0x41, - 0x42, 0x3d, 0xb4, 0x6b, 0xb9, 0xf4, 0xc0, 0x09, 0x21, 0x40, 0x15, 0x44, 0x14, 0xca, 0xb6, 0x07, - 0xc4, 0x6d, 0x6d, 0x0f, 0xe9, 0x12, 0xdb, 0x6b, 0x79, 0x27, 0x91, 0x78, 0x0e, 0x24, 0xc4, 0x0b, - 0x81, 0x38, 0x72, 0xe7, 0x82, 0xca, 0x8b, 0xa0, 0x5d, 0xbb, 0x4e, 0xda, 0xa4, 0x69, 0xca, 0xcd, - 0x3b, 0x9e, 0xdf, 0x7f, 0x67, 0xe6, 0xbf, 0x43, 0x1e, 0x0e, 0x1f, 0x69, 0x26, 0x55, 0x30, 0x1c, - 0x45, 0x50, 0xe6, 0x80, 0xa0, 0x83, 0x62, 0x38, 0x08, 0x44, 0x21, 0x75, 0x10, 0x09, 0x8c, 0x4f, - 0x83, 0x71, 0x18, 0x0c, 0x20, 0x87, 0x52, 0x20, 0x24, 0xac, 0x28, 0x15, 0x2a, 0x7a, 0xbf, 0x82, - 0xd8, 0x04, 0x62, 0xc5, 0x70, 0xc0, 0x0c, 0xc4, 0x2c, 0xc4, 0xc6, 0xe1, 0xe6, 0xde, 0x02, 0xe5, - 0x0c, 0x50, 0xcc, 0x11, 0xde, 0xdc, 0x9d, 0xcf, 0x94, 0xa3, 0x1c, 0x65, 0x06, 0x33, 0xe9, 0xfb, - 0x8b, 0xd3, 0x75, 0x7c, 0x0a, 0x99, 0x98, 0xa1, 0xc2, 0xf9, 0xd4, 0x08, 0x65, 0x1a, 0xc8, 0x1c, - 0x35, 0x96, 0x33, 0xc8, 0xce, 0x95, 0xbd, 0xcc, 0xe9, 0xa2, 0xf7, 0xcb, 0x21, 0xad, 0xbe, 0x8a, - 0x68, 0x9f, 0xb4, 0x4d, 0xa3, 0x89, 0x40, 0xe1, 0x3b, 0x5b, 0xce, 0xf6, 0xfa, 0x1e, 0x63, 0x0b, - 0x26, 0x67, 0x72, 0xd9, 0x38, 0x64, 0x6f, 0xa2, 0x8f, 0x10, 0xe3, 0x21, 0xa0, 0xe0, 0x0d, 0x4f, - 0x9f, 0x90, 0x15, 0x5d, 0x40, 0xec, 0xbb, 0x56, 0x67, 0x87, 0x2d, 0xe1, 0x00, 0xeb, 0xab, 0xe8, - 0xb8, 0x80, 0x98, 0x5b, 0x92, 0x1e, 0x90, 0x55, 0x8d, 0x02, 0x47, 0xda, 0x6f, 0x5d, 0x5f, 0xcb, - 0x05, 0x0d, 0x4b, 0xf1, 0x9a, 0xee, 0x7d, 0x71, 0xc9, 0xff, 0x7d, 0x15, 0x3d, 0x53, 0x79, 0x22, - 0x51, 0xaa, 0x9c, 0x52, 0xb2, 0x82, 0x9f, 0x0a, 0xb0, 0x2d, 0x76, 0xb8, 0xfd, 0xa6, 0x77, 0x9b, - 0xcb, 0x5c, 0x1b, 0xad, 0x4f, 0xf4, 0x35, 0xb9, 0x95, 0x0a, 0x8d, 0x47, 0xa5, 0x8a, 0xe0, 0x44, - 0x66, 0x50, 0xd7, 0xb2, 0xbd, 0xcc, 0x5c, 0x4c, 0x3e, 0xbf, 0x88, 0xd3, 0x77, 0x84, 0x9a, 0xc0, - 0x49, 0x29, 0x72, 0x6d, 0xab, 0xb1, 0xa2, 0x2b, 0x37, 0x14, 0x9d, 0xa3, 0x61, 0x3a, 0x28, 0x41, - 0x68, 0x95, 0xfb, 0x5e, 0xd5, 0x41, 0x75, 0xa2, 0x3e, 0x59, 0xcb, 0x40, 0x6b, 0x31, 0x00, 0x7f, - 0xd5, 0xfe, 0x38, 0x3f, 0xf6, 0x3e, 0x3b, 0x64, 0xad, 0xaf, 0xa2, 0x57, 0x52, 0x23, 0x7d, 0x31, - 0x63, 0xfd, 0xce, 0x32, 0xd5, 0x18, 0xf6, 0x92, 0xf1, 0x8f, 0x89, 0x27, 0x11, 0x32, 0x33, 0xc8, - 0xd6, 0x75, 0x4d, 0x4d, 0xbb, 0xc6, 0x2b, 0xac, 0xf7, 0xcd, 0xb5, 0x55, 0x99, 0x87, 0x40, 0xb7, - 0xc8, 0x7a, 0x21, 0x4a, 0x91, 0xa6, 0x90, 0x4a, 0x9d, 0xd9, 0xc2, 0x3c, 0x3e, 0x1d, 0x32, 0x19, - 0xb1, 0xca, 0x8a, 0x14, 0xcc, 0x1c, 0x2a, 0xf3, 0x3c, 0x3e, 0x1d, 0xa2, 0xfb, 0xe4, 0x8e, 0x88, - 0x51, 0x8e, 0xe1, 0x39, 0x88, 0x24, 0x95, 0x39, 0x1c, 0x43, 0xac, 0xf2, 0xa4, 0x7a, 0x55, 0x2d, - 0x3e, 0xff, 0x27, 0x3d, 0x24, 0x6d, 0x0d, 0x29, 0xc4, 0xa8, 0xca, 0xda, 0x9d, 0x70, 0xa9, 0x79, - 0x88, 0x08, 0xd2, 0xe3, 0x1a, 0xe4, 0x8d, 0x04, 0x7d, 0x40, 0x36, 0x32, 0x91, 0x8f, 0x44, 0xf3, - 0xcf, 0x9a, 0xd4, 0xe6, 0x97, 0xa2, 0xf4, 0x25, 0x69, 0x23, 0x64, 0x45, 0x2a, 0xb0, 0x72, 0x6b, - 0x7d, 0x6f, 0xf7, 0xea, 0x6b, 0xcd, 0x85, 0x47, 0x2a, 0x39, 0xa9, 0x01, 0xbb, 0x3a, 0x0d, 0xde, - 0xfb, 0xee, 0x92, 0x4e, 0xb3, 0x0c, 0xf4, 0x2d, 0x21, 0xf1, 0xf9, 0x02, 0x68, 0xdf, 0xb1, 0xd6, - 0x84, 0xcb, 0x5a, 0xd3, 0xac, 0x0e, 0x9f, 0x12, 0xa1, 0x07, 0xa4, 0xa3, 0x51, 0x94, 0x68, 0x5f, - 0xb0, 0x7b, 0xc3, 0x17, 0x3c, 0x41, 0xe9, 0x11, 0xd9, 0x98, 0xf8, 0xf5, 0x4f, 0x3b, 0x76, 0x89, - 0x37, 0xab, 0x50, 0xb9, 0x6a, 0xad, 0xf3, 0x78, 0x7d, 0xa2, 0xf7, 0x48, 0x47, 0x8f, 0xe2, 0x18, - 0x20, 0x81, 0xc4, 0x1a, 0xe0, 0xf1, 0x49, 0xc0, 0x50, 0x1f, 0x84, 0x4c, 0x21, 0xb1, 0x93, 0xf7, - 0x78, 0x7d, 0x7a, 0x7a, 0xfb, 0xc7, 0x59, 0xd7, 0xf9, 0x79, 0xd6, 0x75, 0x7e, 0x9f, 0x75, 0x9d, - 0xaf, 0x7f, 0xba, 0xff, 0xbd, 0x77, 0xc7, 0xe1, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x21, 0x55, - 0xb9, 0x02, 0x88, 0x06, 0x00, 0x00, + // 656 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0xcd, 0x4e, 0xdb, 0x4c, + 0x14, 0xfd, 0xec, 0x60, 0x48, 0x26, 0x7c, 0x2c, 0xa6, 0x3f, 0xb2, 0x10, 0x8a, 0xa8, 0x91, 0x2a, + 0xd4, 0xc5, 0x98, 0x00, 0x42, 0xdd, 0x55, 0xfd, 0x59, 0x54, 0x11, 0x15, 0x68, 0x60, 0xc5, 0x6e, + 0x3c, 0xbe, 0x84, 0x69, 0x6c, 0x8f, 0xe5, 0x99, 0x44, 0xe2, 0x01, 0xfa, 0x0e, 0x55, 0x1f, 0xa5, + 0x52, 0xf7, 0x5d, 0xf6, 0x11, 0xda, 0xf4, 0x45, 0xaa, 0x19, 0x1b, 0x27, 0xe4, 0x47, 0x0d, 0xdd, + 0xe5, 0x9e, 0x39, 0xe7, 0x78, 0xee, 0x3d, 0x73, 0x83, 0xf6, 0x06, 0x2f, 0x15, 0x11, 0x32, 0x64, + 0xb9, 0x08, 0x23, 0xa6, 0xf9, 0x4d, 0x38, 0xea, 0x86, 0x7d, 0xc8, 0xa0, 0x60, 0x1a, 0x62, 0x92, + 0x17, 0x52, 0x4b, 0xfc, 0xa8, 0x24, 0x11, 0x96, 0x0b, 0x62, 0x49, 0x64, 0xd4, 0xdd, 0x0e, 0xa6, + 0x94, 0x5c, 0x16, 0xb0, 0x40, 0xb8, 0x7d, 0x3c, 0xe1, 0xa4, 0x8c, 0xdf, 0x88, 0x0c, 0x8a, 0xdb, + 0x30, 0x1f, 0xf4, 0x0d, 0xa0, 0xc2, 0x14, 0x34, 0x5b, 0xa4, 0x0a, 0x97, 0xa9, 0x8a, 0x61, 0xa6, + 0x45, 0x0a, 0x73, 0x82, 0x93, 0xbf, 0x09, 0x14, 0xbf, 0x81, 0x94, 0xcd, 0xe9, 0x8e, 0x96, 0xe9, + 0x86, 0x5a, 0x24, 0xa1, 0xc8, 0xb4, 0xd2, 0xc5, 0xac, 0x28, 0xf8, 0xe6, 0xa0, 0x46, 0x4f, 0x46, + 0xf8, 0x14, 0x35, 0x4d, 0x03, 0x31, 0xd3, 0xcc, 0x77, 0x76, 0x9d, 0xfd, 0xf6, 0xe1, 0x01, 0x99, + 0xcc, 0xa9, 0xf6, 0x23, 0xf9, 0xa0, 0x6f, 0x00, 0x45, 0x0c, 0x9b, 0x8c, 0xba, 0xe4, 0x2c, 0xfa, + 0x08, 0x5c, 0x7f, 0x00, 0xcd, 0x68, 0xed, 0x80, 0x0f, 0xd0, 0x9a, 0xca, 0x81, 0xfb, 0xae, 0x75, + 0xda, 0x21, 0x0b, 0x26, 0x4e, 0x7a, 0x32, 0xba, 0xc8, 0x81, 0x53, 0xcb, 0xc4, 0x27, 0x68, 0x5d, + 0x69, 0xa6, 0x87, 0xca, 0x6f, 0x58, 0x4d, 0x67, 0xa9, 0xc6, 0xb2, 0x68, 0xc5, 0x0e, 0xbe, 0xb8, + 0x68, 0xb3, 0x27, 0xa3, 0xb7, 0x32, 0x8b, 0x85, 0x16, 0x32, 0xc3, 0x18, 0xad, 0xe9, 0xdb, 0x1c, + 0x6c, 0x13, 0x2d, 0x6a, 0x7f, 0xe3, 0xa7, 0xb5, 0xb9, 0x6b, 0xd1, 0xaa, 0xc2, 0xe7, 0xe8, 0xff, + 0x84, 0x29, 0x7d, 0x5e, 0xc8, 0x08, 0x2e, 0x45, 0x0a, 0xd5, 0xb7, 0x5f, 0xac, 0xd6, 0xb9, 0x51, + 0xd0, 0xfb, 0x06, 0xf8, 0x0a, 0x61, 0x03, 0x5c, 0x16, 0x2c, 0x53, 0xf6, 0x3e, 0xd6, 0x76, 0xed, + 0xc1, 0xb6, 0x0b, 0x5c, 0x4c, 0x17, 0x05, 0x30, 0x25, 0x33, 0xdf, 0x2b, 0xbb, 0x28, 0x2b, 0xec, + 0xa3, 0x8d, 0x14, 0x94, 0x62, 0x7d, 0xf0, 0xd7, 0xed, 0xc1, 0x5d, 0x19, 0x7c, 0x72, 0xd0, 0x46, + 0x4f, 0x46, 0xa7, 0x42, 0x69, 0xdc, 0x9b, 0x0b, 0x98, 0xac, 0x76, 0x1f, 0xa3, 0x9e, 0x89, 0x97, + 0x20, 0x4f, 0x68, 0x48, 0xcd, 0x38, 0x1b, 0xfb, 0xed, 0x43, 0x7f, 0x59, 0x56, 0xb4, 0xa4, 0x05, + 0xbf, 0x5c, 0x7b, 0x0f, 0x13, 0x37, 0xde, 0x45, 0xed, 0x9c, 0x15, 0x2c, 0x49, 0x20, 0x11, 0x2a, + 0xb5, 0x57, 0xf1, 0xe8, 0x34, 0x64, 0x18, 0x5c, 0xa6, 0x79, 0x02, 0xa6, 0xf3, 0x32, 0x32, 0x8f, + 0x4e, 0x43, 0xf8, 0x18, 0x3d, 0x61, 0x5c, 0x8b, 0x11, 0xbc, 0x03, 0x16, 0x27, 0x22, 0x83, 0x0b, + 0xe0, 0x32, 0x8b, 0xcb, 0xb7, 0xd3, 0xa0, 0x8b, 0x0f, 0xf1, 0x19, 0x6a, 0x2a, 0x48, 0x80, 0x6b, + 0x59, 0x54, 0x89, 0x1c, 0xad, 0x38, 0x01, 0x16, 0x41, 0x72, 0x51, 0x49, 0x69, 0x6d, 0x82, 0x9f, + 0xa3, 0xad, 0x94, 0x65, 0x43, 0x56, 0x9f, 0xd9, 0x60, 0x9a, 0x74, 0x06, 0xc5, 0xaf, 0x50, 0x53, + 0x43, 0x9a, 0x27, 0x4c, 0x97, 0x09, 0xb5, 0x0f, 0xf7, 0xa6, 0x27, 0x66, 0xfe, 0x6e, 0xcc, 0x67, + 0xce, 0x65, 0x7c, 0x59, 0xd1, 0xec, 0x62, 0xd4, 0x22, 0x1c, 0xa0, 0xcd, 0x88, 0xf1, 0x81, 0xbc, + 0xbe, 0x3e, 0x15, 0xa9, 0xd0, 0xfe, 0x86, 0x1d, 0xc9, 0x3d, 0x2c, 0xf8, 0xea, 0xa2, 0x56, 0xbd, + 0x1e, 0xf8, 0x35, 0x42, 0xfc, 0x6e, 0x25, 0x94, 0xef, 0xd8, 0x98, 0x9e, 0x2d, 0x8b, 0xa9, 0x5e, + 0x1e, 0x3a, 0x25, 0xc2, 0xef, 0x51, 0x4b, 0x69, 0x56, 0x68, 0xfb, 0x82, 0xdd, 0x07, 0xbf, 0xe0, + 0x89, 0x18, 0x53, 0xb4, 0x35, 0x49, 0xef, 0x1f, 0xf7, 0x6c, 0xc6, 0xc1, 0x2c, 0x43, 0x99, 0xb2, + 0x8d, 0xd2, 0xa3, 0x55, 0x85, 0x77, 0x50, 0x4b, 0x0d, 0x39, 0x07, 0x88, 0x21, 0xb6, 0x71, 0x78, + 0x74, 0x02, 0x18, 0xd5, 0x35, 0x13, 0x09, 0xc4, 0x36, 0x07, 0x8f, 0x56, 0xd5, 0x9b, 0xc7, 0xdf, + 0xc7, 0x1d, 0xe7, 0xc7, 0xb8, 0xe3, 0xfc, 0x1c, 0x77, 0x9c, 0xcf, 0xbf, 0x3b, 0xff, 0x5d, 0xb9, + 0xa3, 0xee, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xce, 0x55, 0x56, 0xf1, 0x4e, 0x06, 0x00, 0x00, } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/batch/v1/register.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/batch/v1/register.go new file mode 100644 index 00000000..a7530a6e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/batch/v1/register.go @@ -0,0 +1,9 @@ +package v1 + +import "github.com/ericchiang/k8s" + +func init() { + k8s.Register("batch", "v1", "jobs", true, &Job{}) + + k8s.RegisterList("batch", "v1", "jobs", true, &JobList{}) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/batch/v2alpha1/generated.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/batch/v2alpha1/generated.pb.go index e0ae7855..8195fa6d 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/batch/v2alpha1/generated.pb.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/batch/v2alpha1/generated.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto -// DO NOT EDIT! +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/batch/v2alpha1/generated.proto /* Package v2alpha1 is a generated protocol buffer package. It is generated from these files: - k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto + k8s.io/api/batch/v2alpha1/generated.proto It has these top-level messages: CronJob @@ -21,12 +20,12 @@ package v2alpha1 import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import k8s_io_kubernetes_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" +import k8s_io_api_batch_v1 "github.com/ericchiang/k8s/apis/batch/v1" +import k8s_io_api_core_v1 "github.com/ericchiang/k8s/apis/core/v1" +import k8s_io_apimachinery_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" import _ "github.com/ericchiang/k8s/runtime" import _ "github.com/ericchiang/k8s/runtime/schema" import _ "github.com/ericchiang/k8s/util/intstr" -import k8s_io_kubernetes_pkg_api_v1 "github.com/ericchiang/k8s/api/v1" -import k8s_io_kubernetes_pkg_apis_batch_v1 "github.com/ericchiang/k8s/apis/batch/v1" import io "io" @@ -44,15 +43,15 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // CronJob represents the configuration of a single cron job. type CronJob struct { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` - // Spec is a structure defining the expected behavior of a job, including the schedule. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + // Specification of the desired behavior of a cron job, including the schedule. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec *CronJobSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` - // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // Current status of a cron job. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status *CronJobStatus `protobuf:"bytes,3,opt,name=status" json:"status,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -63,7 +62,7 @@ func (m *CronJob) String() string { return proto.CompactTextString(m) func (*CronJob) ProtoMessage() {} func (*CronJob) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } -func (m *CronJob) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *CronJob) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -86,11 +85,11 @@ func (m *CronJob) GetStatus() *CronJobStatus { // CronJobList is a collection of cron jobs. type CronJobList struct { - // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` - // Items is the list of CronJob. + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + // items is the list of CronJobs. Items []*CronJob `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -100,7 +99,7 @@ func (m *CronJobList) String() string { return proto.CompactTextStrin func (*CronJobList) ProtoMessage() {} func (*CronJobList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -func (m *CronJobList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *CronJobList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -116,21 +115,24 @@ func (m *CronJobList) GetItems() []*CronJob { // CronJobSpec describes how the job execution will look like and when it will actually run. type CronJobSpec struct { - // Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. + // The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. Schedule *string `protobuf:"bytes,1,opt,name=schedule" json:"schedule,omitempty"` // Optional deadline in seconds for starting the job if it misses scheduled // time for any reason. Missed jobs executions will be counted as failed ones. // +optional StartingDeadlineSeconds *int64 `protobuf:"varint,2,opt,name=startingDeadlineSeconds" json:"startingDeadlineSeconds,omitempty"` - // ConcurrencyPolicy specifies how to treat concurrent executions of a Job. + // Specifies how to treat concurrent executions of a Job. + // Valid values are: + // - "Allow" (default): allows CronJobs to run concurrently; + // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; + // - "Replace": cancels currently running job and replaces it with a new one // +optional ConcurrencyPolicy *string `protobuf:"bytes,3,opt,name=concurrencyPolicy" json:"concurrencyPolicy,omitempty"` - // Suspend flag tells the controller to suspend subsequent executions, it does + // This flag tells the controller to suspend subsequent executions, it does // not apply to already started executions. Defaults to false. // +optional Suspend *bool `protobuf:"varint,4,opt,name=suspend" json:"suspend,omitempty"` - // JobTemplate is the object that describes the job that will be created when - // executing a CronJob. + // Specifies the job that will be created when executing a CronJob. JobTemplate *JobTemplateSpec `protobuf:"bytes,5,opt,name=jobTemplate" json:"jobTemplate,omitempty"` // The number of successful finished jobs to retain. // This is a pointer to distinguish between explicit zero and not specified. @@ -199,13 +201,13 @@ func (m *CronJobSpec) GetFailedJobsHistoryLimit() int32 { // CronJobStatus represents the current state of a cron job. type CronJobStatus struct { - // Active holds pointers to currently running jobs. + // A list of pointers to currently running jobs. // +optional - Active []*k8s_io_kubernetes_pkg_api_v1.ObjectReference `protobuf:"bytes,1,rep,name=active" json:"active,omitempty"` - // LastScheduleTime keeps information of when was the last time the job was successfully scheduled. + Active []*k8s_io_api_core_v1.ObjectReference `protobuf:"bytes,1,rep,name=active" json:"active,omitempty"` + // Information when was the last time the job was successfully scheduled. // +optional - LastScheduleTime *k8s_io_kubernetes_pkg_apis_meta_v1.Time `protobuf:"bytes,4,opt,name=lastScheduleTime" json:"lastScheduleTime,omitempty"` - XXX_unrecognized []byte `json:"-"` + LastScheduleTime *k8s_io_apimachinery_pkg_apis_meta_v1.Time `protobuf:"bytes,4,opt,name=lastScheduleTime" json:"lastScheduleTime,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *CronJobStatus) Reset() { *m = CronJobStatus{} } @@ -213,14 +215,14 @@ func (m *CronJobStatus) String() string { return proto.CompactTextStr func (*CronJobStatus) ProtoMessage() {} func (*CronJobStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } -func (m *CronJobStatus) GetActive() []*k8s_io_kubernetes_pkg_api_v1.ObjectReference { +func (m *CronJobStatus) GetActive() []*k8s_io_api_core_v1.ObjectReference { if m != nil { return m.Active } return nil } -func (m *CronJobStatus) GetLastScheduleTime() *k8s_io_kubernetes_pkg_apis_meta_v1.Time { +func (m *CronJobStatus) GetLastScheduleTime() *k8s_io_apimachinery_pkg_apis_meta_v1.Time { if m != nil { return m.LastScheduleTime } @@ -230,11 +232,11 @@ func (m *CronJobStatus) GetLastScheduleTime() *k8s_io_kubernetes_pkg_apis_meta_v // JobTemplate describes a template for creating copies of a predefined pod. type JobTemplate struct { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` - // Template defines jobs that will be created from this template - // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + // Defines jobs that will be created from this template. + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Template *JobTemplateSpec `protobuf:"bytes,2,opt,name=template" json:"template,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -245,7 +247,7 @@ func (m *JobTemplate) String() string { return proto.CompactTextStrin func (*JobTemplate) ProtoMessage() {} func (*JobTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } -func (m *JobTemplate) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *JobTemplate) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -262,14 +264,14 @@ func (m *JobTemplate) GetTemplate() *JobTemplateSpec { // JobTemplateSpec describes the data a Job should have when created from a template type JobTemplateSpec struct { // Standard object's metadata of the jobs created from this template. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Specification of the desired behavior of the job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional - Spec *k8s_io_kubernetes_pkg_apis_batch_v1.JobSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` - XXX_unrecognized []byte `json:"-"` + Spec *k8s_io_api_batch_v1.JobSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } @@ -277,14 +279,14 @@ func (m *JobTemplateSpec) String() string { return proto.CompactTextS func (*JobTemplateSpec) ProtoMessage() {} func (*JobTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } -func (m *JobTemplateSpec) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *JobTemplateSpec) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } return nil } -func (m *JobTemplateSpec) GetSpec() *k8s_io_kubernetes_pkg_apis_batch_v1.JobSpec { +func (m *JobTemplateSpec) GetSpec() *k8s_io_api_batch_v1.JobSpec { if m != nil { return m.Spec } @@ -292,12 +294,12 @@ func (m *JobTemplateSpec) GetSpec() *k8s_io_kubernetes_pkg_apis_batch_v1.JobSpec } func init() { - proto.RegisterType((*CronJob)(nil), "github.com/ericchiang.k8s.apis.batch.v2alpha1.CronJob") - proto.RegisterType((*CronJobList)(nil), "github.com/ericchiang.k8s.apis.batch.v2alpha1.CronJobList") - proto.RegisterType((*CronJobSpec)(nil), "github.com/ericchiang.k8s.apis.batch.v2alpha1.CronJobSpec") - proto.RegisterType((*CronJobStatus)(nil), "github.com/ericchiang.k8s.apis.batch.v2alpha1.CronJobStatus") - proto.RegisterType((*JobTemplate)(nil), "github.com/ericchiang.k8s.apis.batch.v2alpha1.JobTemplate") - proto.RegisterType((*JobTemplateSpec)(nil), "github.com/ericchiang.k8s.apis.batch.v2alpha1.JobTemplateSpec") + proto.RegisterType((*CronJob)(nil), "k8s.io.api.batch.v2alpha1.CronJob") + proto.RegisterType((*CronJobList)(nil), "k8s.io.api.batch.v2alpha1.CronJobList") + proto.RegisterType((*CronJobSpec)(nil), "k8s.io.api.batch.v2alpha1.CronJobSpec") + proto.RegisterType((*CronJobStatus)(nil), "k8s.io.api.batch.v2alpha1.CronJobStatus") + proto.RegisterType((*JobTemplate)(nil), "k8s.io.api.batch.v2alpha1.JobTemplate") + proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.api.batch.v2alpha1.JobTemplateSpec") } func (m *CronJob) Marshal() (dAtA []byte, err error) { size := m.Size() @@ -586,24 +588,6 @@ func (m *JobTemplateSpec) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -808,7 +792,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -958,7 +942,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1297,7 +1281,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Active = append(m.Active, &k8s_io_kubernetes_pkg_api_v1.ObjectReference{}) + m.Active = append(m.Active, &k8s_io_api_core_v1.ObjectReference{}) if err := m.Active[len(m.Active)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1329,7 +1313,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.LastScheduleTime == nil { - m.LastScheduleTime = &k8s_io_kubernetes_pkg_apis_meta_v1.Time{} + m.LastScheduleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.LastScheduleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1413,7 +1397,7 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1530,7 +1514,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1563,7 +1547,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Spec == nil { - m.Spec = &k8s_io_kubernetes_pkg_apis_batch_v1.JobSpec{} + m.Spec = &k8s_io_api_batch_v1.JobSpec{} } if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1696,48 +1680,46 @@ var ( ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) -func init() { - proto.RegisterFile("github.com/ericchiang/k8s/apis/batch/v2alpha1/generated.proto", fileDescriptorGenerated) -} +func init() { proto.RegisterFile("k8s.io/api/batch/v2alpha1/generated.proto", fileDescriptorGenerated) } var fileDescriptorGenerated = []byte{ - // 598 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x94, 0xcb, 0x6e, 0xd3, 0x40, - 0x14, 0x86, 0x99, 0xa6, 0x97, 0x74, 0x22, 0x04, 0xcc, 0x02, 0xac, 0x2c, 0xa2, 0xc8, 0xab, 0x20, - 0xa5, 0x63, 0xd5, 0xa0, 0xaa, 0xb0, 0x40, 0x88, 0x8b, 0x54, 0x45, 0x45, 0x54, 0xd3, 0x8a, 0x05, - 0x62, 0x33, 0x1e, 0x9f, 0xa6, 0xd3, 0xf8, 0x26, 0xcf, 0x71, 0xa4, 0x2e, 0x79, 0x0b, 0x76, 0x88, - 0x3d, 0x1b, 0xde, 0x82, 0x25, 0x8f, 0x80, 0xca, 0x23, 0xf0, 0x02, 0xc8, 0xd3, 0x5c, 0xda, 0xba, - 0x09, 0x29, 0x74, 0x69, 0x9d, 0xf3, 0xfd, 0x3e, 0xe7, 0xff, 0x67, 0x86, 0x3e, 0x19, 0x6c, 0x1b, - 0xae, 0x53, 0x6f, 0x50, 0x04, 0x90, 0x27, 0x80, 0x60, 0xbc, 0x6c, 0xd0, 0xf7, 0x64, 0xa6, 0x8d, - 0x17, 0x48, 0x54, 0x47, 0xde, 0xd0, 0x97, 0x51, 0x76, 0x24, 0x37, 0xbd, 0x3e, 0x24, 0x90, 0x4b, - 0x84, 0x90, 0x67, 0x79, 0x8a, 0x29, 0x7b, 0x78, 0x86, 0xf2, 0x29, 0xca, 0xb3, 0x41, 0x9f, 0x97, - 0x28, 0xb7, 0x28, 0x1f, 0xa3, 0x4d, 0x7f, 0xce, 0x5f, 0x62, 0x40, 0xe9, 0x0d, 0x2b, 0xf2, 0xcd, - 0x8d, 0xab, 0x99, 0xbc, 0x48, 0x50, 0xc7, 0x50, 0x69, 0x7f, 0x3c, 0xbf, 0xdd, 0xa8, 0x23, 0x88, - 0x65, 0x85, 0xda, 0xbc, 0x9a, 0x2a, 0x50, 0x47, 0x9e, 0x4e, 0xd0, 0x60, 0x5e, 0x41, 0xba, 0x33, - 0x77, 0xb9, 0x6a, 0x8b, 0x47, 0x7f, 0xf7, 0xb7, 0x02, 0xb9, 0xbf, 0x09, 0x5d, 0x7b, 0x99, 0xa7, - 0x49, 0x2f, 0x0d, 0x58, 0x8f, 0xd6, 0x4b, 0x87, 0x42, 0x89, 0xd2, 0x21, 0x6d, 0xd2, 0x69, 0xf8, - 0x9c, 0xcf, 0x31, 0xbe, 0xec, 0xe5, 0xc3, 0x4d, 0xfe, 0x36, 0x38, 0x06, 0x85, 0x6f, 0x00, 0xa5, - 0x98, 0xf0, 0xac, 0x47, 0x97, 0x4d, 0x06, 0xca, 0x59, 0xb2, 0x3a, 0x5b, 0x7c, 0xe1, 0x00, 0xf9, - 0x68, 0x9a, 0xfd, 0x0c, 0x94, 0xb0, 0x1a, 0x6c, 0x8f, 0xae, 0x1a, 0x94, 0x58, 0x18, 0xa7, 0x66, - 0xd5, 0xb6, 0xff, 0x41, 0xcd, 0xf2, 0x62, 0xa4, 0xe3, 0x7e, 0x21, 0xb4, 0x31, 0xaa, 0xec, 0x6a, - 0x83, 0x6c, 0xa7, 0xb2, 0x79, 0x77, 0x91, 0xcd, 0x4b, 0xf6, 0xd2, 0xde, 0x3b, 0x74, 0x45, 0x23, - 0xc4, 0xc6, 0x59, 0x6a, 0xd7, 0x3a, 0x0d, 0xdf, 0xbf, 0xfe, 0xa8, 0xe2, 0x4c, 0xc0, 0xfd, 0x58, - 0x9b, 0xcc, 0x58, 0x7a, 0xc1, 0x9a, 0xb4, 0x5e, 0x9e, 0xac, 0xb0, 0x88, 0xc0, 0xce, 0xb8, 0x2e, - 0x26, 0xdf, 0x6c, 0x9b, 0x3e, 0x30, 0x28, 0x73, 0xd4, 0x49, 0xff, 0x15, 0xc8, 0x30, 0xd2, 0x09, - 0xec, 0x83, 0x4a, 0x93, 0xd0, 0xd8, 0x00, 0x6a, 0x62, 0x56, 0x99, 0x75, 0xe9, 0x3d, 0x95, 0x26, - 0xaa, 0xc8, 0x73, 0x48, 0xd4, 0xc9, 0x5e, 0x1a, 0x69, 0x75, 0x62, 0x6d, 0x5e, 0x17, 0xd5, 0x02, - 0x73, 0xe8, 0x9a, 0x29, 0x4c, 0x06, 0x49, 0xe8, 0x2c, 0xb7, 0x49, 0xa7, 0x2e, 0xc6, 0x9f, 0xec, - 0x03, 0x6d, 0x1c, 0xa7, 0xc1, 0x01, 0xc4, 0x59, 0x24, 0x11, 0x9c, 0x15, 0x6b, 0xe2, 0xd3, 0x6b, - 0x6c, 0xdf, 0x9b, 0xd2, 0x36, 0xfa, 0xf3, 0x72, 0xec, 0x19, 0x6d, 0x9a, 0x42, 0x29, 0x30, 0xe6, - 0xb0, 0x88, 0x7a, 0x69, 0x60, 0x76, 0xb4, 0xc1, 0x34, 0x3f, 0xd9, 0xd5, 0xb1, 0x46, 0x67, 0xb5, - 0x4d, 0x3a, 0x2b, 0x62, 0x4e, 0x07, 0xdb, 0xa2, 0xf7, 0x0f, 0xa5, 0x8e, 0x20, 0xac, 0xb0, 0x6b, - 0x96, 0x9d, 0x51, 0x75, 0xbf, 0x12, 0x7a, 0xfb, 0xc2, 0x09, 0x62, 0xaf, 0xe9, 0xaa, 0x54, 0xa8, - 0x87, 0x65, 0x06, 0x65, 0xc0, 0x1b, 0xb3, 0x57, 0x9c, 0xde, 0x0d, 0x01, 0x87, 0x50, 0xda, 0x08, - 0x62, 0x04, 0xb3, 0x03, 0x7a, 0x37, 0x92, 0x06, 0xf7, 0x47, 0x01, 0x1e, 0xe8, 0x18, 0xac, 0xa3, - 0x0d, 0xbf, 0xb3, 0xc8, 0xc1, 0x2b, 0xfb, 0x45, 0x45, 0xc1, 0xfd, 0x46, 0x68, 0xe3, 0x9c, 0x8f, - 0x37, 0x7a, 0xa1, 0xdf, 0xd1, 0x3a, 0x8e, 0xd3, 0x5d, 0xfa, 0xef, 0x74, 0x27, 0x5a, 0xee, 0x67, - 0x42, 0xef, 0x5c, 0xaa, 0xde, 0xe8, 0xdc, 0xcf, 0x2f, 0x3c, 0x44, 0xdd, 0x05, 0x66, 0xb6, 0xd3, - 0x4e, 0x9f, 0x9f, 0x17, 0xcd, 0xef, 0xa7, 0x2d, 0xf2, 0xe3, 0xb4, 0x45, 0x7e, 0x9e, 0xb6, 0xc8, - 0xa7, 0x5f, 0xad, 0x5b, 0xef, 0xeb, 0xe3, 0xbd, 0xfe, 0x04, 0x00, 0x00, 0xff, 0xff, 0xce, 0x67, - 0x63, 0xbf, 0xd4, 0x06, 0x00, 0x00, + // 599 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0x4f, 0x6b, 0x13, 0x41, + 0x18, 0xc6, 0x9d, 0xf4, 0x5f, 0x3a, 0x41, 0xd4, 0x39, 0xe8, 0x1a, 0x24, 0x84, 0x2d, 0x48, 0x14, + 0x99, 0x6d, 0xab, 0x94, 0xa2, 0x20, 0xa2, 0x22, 0x12, 0x22, 0xca, 0xb4, 0x78, 0xf0, 0x36, 0x99, + 0x7d, 0x9b, 0x4c, 0xbb, 0xbb, 0xb3, 0xec, 0xcc, 0x06, 0xf2, 0x31, 0xc4, 0x8b, 0x77, 0xfd, 0x30, + 0x1e, 0xbd, 0x78, 0x97, 0xf8, 0x09, 0xfc, 0x06, 0x32, 0xd3, 0xfc, 0xdf, 0x24, 0xad, 0xd0, 0xe3, + 0xee, 0xfb, 0xfc, 0xde, 0x7d, 0x9f, 0xe7, 0x9d, 0x1d, 0xfc, 0xe0, 0xec, 0x50, 0x53, 0xa9, 0x02, + 0x9e, 0xca, 0xa0, 0xcd, 0x8d, 0xe8, 0x06, 0xbd, 0x7d, 0x1e, 0xa5, 0x5d, 0xbe, 0x17, 0x74, 0x20, + 0x81, 0x8c, 0x1b, 0x08, 0x69, 0x9a, 0x29, 0xa3, 0xc8, 0xdd, 0x73, 0x29, 0xe5, 0xa9, 0xa4, 0x4e, + 0x4a, 0x47, 0xd2, 0xea, 0x4e, 0xb1, 0x4b, 0x81, 0xaf, 0xfa, 0x53, 0x22, 0xa1, 0x32, 0x58, 0xa4, + 0x79, 0x32, 0xd1, 0xc4, 0x5c, 0x74, 0x65, 0x02, 0x59, 0x3f, 0x48, 0xcf, 0x3a, 0xf6, 0x85, 0x0e, + 0x62, 0x30, 0x7c, 0x11, 0x15, 0x2c, 0xa3, 0xb2, 0x3c, 0x31, 0x32, 0x86, 0x02, 0x70, 0x70, 0x11, + 0xa0, 0x45, 0x17, 0x62, 0x5e, 0xe0, 0x1e, 0x2f, 0xe3, 0x72, 0x23, 0xa3, 0x40, 0x26, 0x46, 0x9b, + 0x6c, 0x1e, 0xf2, 0x7f, 0x21, 0xbc, 0xf5, 0x2a, 0x53, 0x49, 0x53, 0xb5, 0x49, 0x0b, 0x97, 0xad, + 0x89, 0x90, 0x1b, 0xee, 0xa1, 0x3a, 0x6a, 0x54, 0xf6, 0x77, 0xe9, 0x24, 0xd6, 0x71, 0x4f, 0x9a, + 0x9e, 0x75, 0xec, 0x0b, 0x4d, 0xad, 0x9a, 0xf6, 0xf6, 0xe8, 0xfb, 0xf6, 0x29, 0x08, 0xf3, 0x0e, + 0x0c, 0x67, 0xe3, 0x0e, 0xe4, 0x29, 0x5e, 0xd7, 0x29, 0x08, 0xaf, 0xe4, 0x3a, 0xdd, 0xa7, 0x4b, + 0x17, 0x44, 0x87, 0xdf, 0x3f, 0x4a, 0x41, 0x30, 0xc7, 0x90, 0x17, 0x78, 0x53, 0x1b, 0x6e, 0x72, + 0xed, 0xad, 0x39, 0xba, 0x71, 0x09, 0xda, 0xe9, 0xd9, 0x90, 0xf3, 0xbf, 0x20, 0x5c, 0x19, 0x56, + 0x5a, 0x52, 0x1b, 0xd2, 0x2c, 0x78, 0xa3, 0x97, 0xf3, 0x66, 0xe9, 0x39, 0x67, 0x87, 0x78, 0x43, + 0x1a, 0x88, 0xb5, 0x57, 0xaa, 0xaf, 0x35, 0x2a, 0xfb, 0xfe, 0xc5, 0xc3, 0xb1, 0x73, 0xc0, 0xff, + 0x5b, 0x1a, 0x4f, 0x65, 0xdd, 0x92, 0x2a, 0x2e, 0xdb, 0x65, 0x86, 0x79, 0x04, 0x6e, 0xaa, 0x6d, + 0x36, 0x7e, 0x26, 0x87, 0xf8, 0x8e, 0x36, 0x3c, 0x33, 0x32, 0xe9, 0xbc, 0x06, 0x1e, 0x46, 0x32, + 0x81, 0x23, 0x10, 0x2a, 0x09, 0xb5, 0x8b, 0x74, 0x8d, 0x2d, 0x2b, 0x93, 0x47, 0xf8, 0x96, 0x50, + 0x89, 0xc8, 0xb3, 0x0c, 0x12, 0xd1, 0xff, 0xa0, 0x22, 0x29, 0xfa, 0x2e, 0xc8, 0x6d, 0x56, 0x2c, + 0x10, 0x0f, 0x6f, 0xe9, 0x5c, 0xa7, 0x90, 0x84, 0xde, 0x7a, 0x1d, 0x35, 0xca, 0x6c, 0xf4, 0x48, + 0x5a, 0xb8, 0x72, 0xaa, 0xda, 0xc7, 0x10, 0xa7, 0x11, 0x37, 0xe0, 0x6d, 0xb8, 0xd8, 0x1e, 0xae, + 0x70, 0xdb, 0x9c, 0xa8, 0xdd, 0x32, 0xa7, 0x71, 0xf2, 0x1c, 0x57, 0x75, 0x2e, 0x04, 0x68, 0x7d, + 0x92, 0x47, 0x4d, 0xd5, 0xd6, 0x6f, 0xa5, 0x36, 0x2a, 0xeb, 0xb7, 0x64, 0x2c, 0x8d, 0xb7, 0x59, + 0x47, 0x8d, 0x0d, 0xb6, 0x42, 0x41, 0x0e, 0xf0, 0xed, 0x13, 0x2e, 0x23, 0x08, 0x0b, 0xec, 0x96, + 0x63, 0x97, 0x54, 0xfd, 0xef, 0x08, 0x5f, 0x9f, 0x39, 0x23, 0xe4, 0x19, 0xde, 0xe4, 0xc2, 0xc8, + 0x9e, 0xcd, 0xdc, 0x2e, 0x70, 0x67, 0xda, 0x92, 0xfd, 0xf9, 0x27, 0x67, 0x9a, 0xc1, 0x09, 0xd8, + 0xb0, 0x80, 0x0d, 0x11, 0xf2, 0x11, 0xdf, 0x8c, 0xb8, 0x36, 0x47, 0xc3, 0x35, 0x1d, 0xcb, 0x18, + 0x5c, 0x6e, 0xb3, 0xc9, 0xac, 0x38, 0x50, 0x96, 0x60, 0x85, 0x1e, 0xfe, 0x37, 0x84, 0x2b, 0x53, + 0xf9, 0x5d, 0xf1, 0xcf, 0xf8, 0x06, 0x97, 0xcd, 0x68, 0x8f, 0xa5, 0xff, 0xde, 0xe3, 0x98, 0xf5, + 0x3f, 0x23, 0x7c, 0x63, 0xae, 0x7a, 0xc5, 0x93, 0xee, 0xce, 0x5c, 0x1b, 0xf7, 0x16, 0x4c, 0xe9, + 0xe6, 0x9b, 0x5c, 0x16, 0x2f, 0xab, 0x3f, 0x06, 0x35, 0xf4, 0x73, 0x50, 0x43, 0xbf, 0x07, 0x35, + 0xf4, 0xf5, 0x4f, 0xed, 0xda, 0xa7, 0xf2, 0xc8, 0xc9, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb8, + 0x52, 0xc1, 0xcb, 0x42, 0x06, 0x00, 0x00, } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/batch/v2alpha1/register.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/batch/v2alpha1/register.go new file mode 100644 index 00000000..896aabdd --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/batch/v2alpha1/register.go @@ -0,0 +1,9 @@ +package v2alpha1 + +import "github.com/ericchiang/k8s" + +func init() { + k8s.Register("batch", "v2alpha1", "cronjobs", true, &CronJob{}) + + k8s.RegisterList("batch", "v2alpha1", "cronjobs", true, &CronJobList{}) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/certificates/v1alpha1/generated.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/certificates/v1alpha1/generated.pb.go deleted file mode 100644 index c2e30cd7..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/certificates/v1alpha1/generated.pb.go +++ /dev/null @@ -1,1505 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/generated.proto -// DO NOT EDIT! - -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/generated.proto - - It has these top-level messages: - CertificateSigningRequest - CertificateSigningRequestCondition - CertificateSigningRequestList - CertificateSigningRequestSpec - CertificateSigningRequestStatus -*/ -package v1alpha1 - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "github.com/ericchiang/k8s/api/resource" -import k8s_io_kubernetes_pkg_api_unversioned "github.com/ericchiang/k8s/api/unversioned" -import k8s_io_kubernetes_pkg_api_v1 "github.com/ericchiang/k8s/api/v1" -import _ "github.com/ericchiang/k8s/runtime" -import _ "github.com/ericchiang/k8s/util/intstr" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// Describes a certificate signing request -type CertificateSigningRequest struct { - // +optional - Metadata *k8s_io_kubernetes_pkg_api_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` - // The certificate request itself and any additional information. - // +optional - Spec *CertificateSigningRequestSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` - // Derived information about the request. - // +optional - Status *CertificateSigningRequestStatus `protobuf:"bytes,3,opt,name=status" json:"status,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CertificateSigningRequest) Reset() { *m = CertificateSigningRequest{} } -func (m *CertificateSigningRequest) String() string { return proto.CompactTextString(m) } -func (*CertificateSigningRequest) ProtoMessage() {} -func (*CertificateSigningRequest) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} -} - -func (m *CertificateSigningRequest) GetMetadata() *k8s_io_kubernetes_pkg_api_v1.ObjectMeta { - if m != nil { - return m.Metadata - } - return nil -} - -func (m *CertificateSigningRequest) GetSpec() *CertificateSigningRequestSpec { - if m != nil { - return m.Spec - } - return nil -} - -func (m *CertificateSigningRequest) GetStatus() *CertificateSigningRequestStatus { - if m != nil { - return m.Status - } - return nil -} - -type CertificateSigningRequestCondition struct { - // request approval state, currently Approved or Denied. - Type *string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - // brief reason for the request state - // +optional - Reason *string `protobuf:"bytes,2,opt,name=reason" json:"reason,omitempty"` - // human readable message with details about the request state - // +optional - Message *string `protobuf:"bytes,3,opt,name=message" json:"message,omitempty"` - // timestamp for the last update to this condition - // +optional - LastUpdateTime *k8s_io_kubernetes_pkg_api_unversioned.Time `protobuf:"bytes,4,opt,name=lastUpdateTime" json:"lastUpdateTime,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CertificateSigningRequestCondition) Reset() { *m = CertificateSigningRequestCondition{} } -func (m *CertificateSigningRequestCondition) String() string { return proto.CompactTextString(m) } -func (*CertificateSigningRequestCondition) ProtoMessage() {} -func (*CertificateSigningRequestCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} -} - -func (m *CertificateSigningRequestCondition) GetType() string { - if m != nil && m.Type != nil { - return *m.Type - } - return "" -} - -func (m *CertificateSigningRequestCondition) GetReason() string { - if m != nil && m.Reason != nil { - return *m.Reason - } - return "" -} - -func (m *CertificateSigningRequestCondition) GetMessage() string { - if m != nil && m.Message != nil { - return *m.Message - } - return "" -} - -func (m *CertificateSigningRequestCondition) GetLastUpdateTime() *k8s_io_kubernetes_pkg_api_unversioned.Time { - if m != nil { - return m.LastUpdateTime - } - return nil -} - -type CertificateSigningRequestList struct { - // +optional - Metadata *k8s_io_kubernetes_pkg_api_unversioned.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` - Items []*CertificateSigningRequest `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CertificateSigningRequestList) Reset() { *m = CertificateSigningRequestList{} } -func (m *CertificateSigningRequestList) String() string { return proto.CompactTextString(m) } -func (*CertificateSigningRequestList) ProtoMessage() {} -func (*CertificateSigningRequestList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{2} -} - -func (m *CertificateSigningRequestList) GetMetadata() *k8s_io_kubernetes_pkg_api_unversioned.ListMeta { - if m != nil { - return m.Metadata - } - return nil -} - -func (m *CertificateSigningRequestList) GetItems() []*CertificateSigningRequest { - if m != nil { - return m.Items - } - return nil -} - -// This information is immutable after the request is created. Only the Request -// and ExtraInfo fields can be set on creation, other fields are derived by -// Kubernetes and cannot be modified by users. -type CertificateSigningRequestSpec struct { - // Base64-encoded PKCS#10 CSR data - Request []byte `protobuf:"bytes,1,opt,name=request" json:"request,omitempty"` - // Information about the requesting user (if relevant) - // See user.Info interface for details - // +optional - Username *string `protobuf:"bytes,2,opt,name=username" json:"username,omitempty"` - // +optional - Uid *string `protobuf:"bytes,3,opt,name=uid" json:"uid,omitempty"` - // +optional - Groups []string `protobuf:"bytes,4,rep,name=groups" json:"groups,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CertificateSigningRequestSpec) Reset() { *m = CertificateSigningRequestSpec{} } -func (m *CertificateSigningRequestSpec) String() string { return proto.CompactTextString(m) } -func (*CertificateSigningRequestSpec) ProtoMessage() {} -func (*CertificateSigningRequestSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{3} -} - -func (m *CertificateSigningRequestSpec) GetRequest() []byte { - if m != nil { - return m.Request - } - return nil -} - -func (m *CertificateSigningRequestSpec) GetUsername() string { - if m != nil && m.Username != nil { - return *m.Username - } - return "" -} - -func (m *CertificateSigningRequestSpec) GetUid() string { - if m != nil && m.Uid != nil { - return *m.Uid - } - return "" -} - -func (m *CertificateSigningRequestSpec) GetGroups() []string { - if m != nil { - return m.Groups - } - return nil -} - -type CertificateSigningRequestStatus struct { - // Conditions applied to the request, such as approval or denial. - // +optional - Conditions []*CertificateSigningRequestCondition `protobuf:"bytes,1,rep,name=conditions" json:"conditions,omitempty"` - // If request was approved, the controller will place the issued certificate here. - // +optional - Certificate []byte `protobuf:"bytes,2,opt,name=certificate" json:"certificate,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CertificateSigningRequestStatus) Reset() { *m = CertificateSigningRequestStatus{} } -func (m *CertificateSigningRequestStatus) String() string { return proto.CompactTextString(m) } -func (*CertificateSigningRequestStatus) ProtoMessage() {} -func (*CertificateSigningRequestStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{4} -} - -func (m *CertificateSigningRequestStatus) GetConditions() []*CertificateSigningRequestCondition { - if m != nil { - return m.Conditions - } - return nil -} - -func (m *CertificateSigningRequestStatus) GetCertificate() []byte { - if m != nil { - return m.Certificate - } - return nil -} - -func init() { - proto.RegisterType((*CertificateSigningRequest)(nil), "github.com/ericchiang.k8s.apis.certificates.v1alpha1.CertificateSigningRequest") - proto.RegisterType((*CertificateSigningRequestCondition)(nil), "github.com/ericchiang.k8s.apis.certificates.v1alpha1.CertificateSigningRequestCondition") - proto.RegisterType((*CertificateSigningRequestList)(nil), "github.com/ericchiang.k8s.apis.certificates.v1alpha1.CertificateSigningRequestList") - proto.RegisterType((*CertificateSigningRequestSpec)(nil), "github.com/ericchiang.k8s.apis.certificates.v1alpha1.CertificateSigningRequestSpec") - proto.RegisterType((*CertificateSigningRequestStatus)(nil), "github.com/ericchiang.k8s.apis.certificates.v1alpha1.CertificateSigningRequestStatus") -} -func (m *CertificateSigningRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CertificateSigningRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Metadata != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n1, err := m.Metadata.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - } - if m.Spec != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } - if m.Status != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *CertificateSigningRequestCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CertificateSigningRequestCondition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Type != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type))) - i += copy(dAtA[i:], *m.Type) - } - if m.Reason != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason))) - i += copy(dAtA[i:], *m.Reason) - } - if m.Message != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Message))) - i += copy(dAtA[i:], *m.Message) - } - if m.LastUpdateTime != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n4, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *CertificateSigningRequestList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CertificateSigningRequestList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Metadata != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n5, err := m.Metadata.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - } - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *CertificateSigningRequestSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CertificateSigningRequestSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Request != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Request))) - i += copy(dAtA[i:], m.Request) - } - if m.Username != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Username))) - i += copy(dAtA[i:], *m.Username) - } - if m.Uid != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Uid))) - i += copy(dAtA[i:], *m.Uid) - } - if len(m.Groups) > 0 { - for _, s := range m.Groups { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *CertificateSigningRequestStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CertificateSigningRequestStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.Certificate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Certificate))) - i += copy(dAtA[i:], m.Certificate) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *CertificateSigningRequest) Size() (n int) { - var l int - _ = l - if m.Metadata != nil { - l = m.Metadata.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Spec != nil { - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Status != nil { - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CertificateSigningRequestCondition) Size() (n int) { - var l int - _ = l - if m.Type != nil { - l = len(*m.Type) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Reason != nil { - l = len(*m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Message != nil { - l = len(*m.Message) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.LastUpdateTime != nil { - l = m.LastUpdateTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CertificateSigningRequestList) Size() (n int) { - var l int - _ = l - if m.Metadata != nil { - l = m.Metadata.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CertificateSigningRequestSpec) Size() (n int) { - var l int - _ = l - if m.Request != nil { - l = len(m.Request) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Username != nil { - l = len(*m.Username) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Uid != nil { - l = len(*m.Uid) - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Groups) > 0 { - for _, s := range m.Groups { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CertificateSigningRequestStatus) Size() (n int) { - var l int - _ = l - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Certificate != nil { - l = len(m.Certificate) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CertificateSigningRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CertificateSigningRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_api_v1.ObjectMeta{} - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Spec == nil { - m.Spec = &CertificateSigningRequestSpec{} - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Status == nil { - m.Status = &CertificateSigningRequestStatus{} - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CertificateSigningRequestCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CertificateSigningRequestCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Type = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Reason = &s - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Message = &s - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LastUpdateTime == nil { - m.LastUpdateTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} - } - if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CertificateSigningRequestList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CertificateSigningRequestList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_api_unversioned.ListMeta{} - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, &CertificateSigningRequest{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CertificateSigningRequestSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CertificateSigningRequestSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Request = append(m.Request[:0], dAtA[iNdEx:postIndex]...) - if m.Request == nil { - m.Request = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Username = &s - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Uid = &s - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CertificateSigningRequestStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CertificateSigningRequestStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, &CertificateSigningRequestCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Certificate = append(m.Certificate[:0], dAtA[iNdEx:postIndex]...) - if m.Certificate == nil { - m.Certificate = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("github.com/ericchiang/k8s/apis/certificates/v1alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 516 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x52, 0xc1, 0x8e, 0xd3, 0x30, - 0x14, 0x24, 0xdb, 0xb2, 0xb4, 0xee, 0x0a, 0x21, 0x1f, 0x50, 0xa8, 0x44, 0xa9, 0x72, 0xaa, 0x04, - 0x38, 0xb4, 0x12, 0x12, 0x47, 0xc4, 0x72, 0x5b, 0xd0, 0x0a, 0x77, 0xb9, 0x70, 0xf3, 0x26, 0x8f, - 0x60, 0xda, 0x38, 0xc6, 0x7e, 0xae, 0xc4, 0x89, 0xdf, 0xe0, 0x37, 0x38, 0x72, 0xe2, 0xca, 0x0d, - 0x3e, 0x01, 0x95, 0x1f, 0x41, 0x71, 0x9b, 0x6e, 0xd4, 0x6e, 0x16, 0x90, 0x7a, 0xf3, 0xb3, 0x3c, - 0x33, 0x7e, 0x33, 0x43, 0x9e, 0xce, 0x9e, 0x58, 0x26, 0x8b, 0x78, 0xe6, 0xce, 0xc1, 0x28, 0x40, - 0xb0, 0xb1, 0x9e, 0x65, 0xb1, 0xd0, 0xd2, 0xc6, 0x09, 0x18, 0x94, 0x6f, 0x65, 0x22, 0xca, 0xdb, - 0xc5, 0x58, 0xcc, 0xf5, 0x3b, 0x31, 0x8e, 0x33, 0x50, 0x60, 0x04, 0x42, 0xca, 0xb4, 0x29, 0xb0, - 0xa0, 0x8f, 0x56, 0x0c, 0xec, 0x82, 0x81, 0xe9, 0x59, 0xc6, 0x4a, 0x06, 0x56, 0x67, 0x60, 0x15, - 0x43, 0x7f, 0xd2, 0xa8, 0x19, 0x1b, 0xb0, 0x85, 0x33, 0x09, 0x6c, 0xab, 0xf4, 0x1f, 0x37, 0x63, - 0x9c, 0x5a, 0x80, 0xb1, 0xb2, 0x50, 0x90, 0xee, 0xc0, 0x1e, 0x34, 0xc3, 0x16, 0x3b, 0xab, 0xf4, - 0x1f, 0x5e, 0xfe, 0xda, 0x38, 0x85, 0x32, 0xdf, 0xfd, 0xd3, 0xf8, 0xf2, 0xe7, 0x0e, 0xe5, 0x3c, - 0x96, 0x0a, 0x2d, 0x9a, 0x6d, 0x48, 0xf4, 0xe5, 0x80, 0xdc, 0x39, 0xbe, 0x30, 0x65, 0x2a, 0x33, - 0x25, 0x55, 0xc6, 0xe1, 0x83, 0x03, 0x8b, 0xf4, 0x39, 0xe9, 0xe4, 0x80, 0x22, 0x15, 0x28, 0xc2, - 0x60, 0x18, 0x8c, 0x7a, 0x93, 0x11, 0x6b, 0x74, 0x97, 0x2d, 0xc6, 0xec, 0xf4, 0xfc, 0x3d, 0x24, - 0xf8, 0x12, 0x50, 0xf0, 0x0d, 0x92, 0x26, 0xa4, 0x6d, 0x35, 0x24, 0xe1, 0x81, 0x67, 0x38, 0x65, - 0xff, 0x9b, 0x0f, 0x6b, 0xfc, 0xe0, 0x54, 0x43, 0xc2, 0x3d, 0x39, 0x95, 0xe4, 0xd0, 0xa2, 0x40, - 0x67, 0xc3, 0x96, 0x97, 0x79, 0xb5, 0x4f, 0x19, 0x4f, 0xcc, 0xd7, 0x02, 0xd1, 0xb7, 0x80, 0x44, - 0x8d, 0x6f, 0x8f, 0x0b, 0x95, 0x4a, 0x94, 0x85, 0xa2, 0x94, 0xb4, 0xf1, 0xa3, 0x06, 0x6f, 0x5c, - 0x97, 0xfb, 0x33, 0xbd, 0x4d, 0x0e, 0x0d, 0x08, 0x5b, 0x28, 0x6f, 0x46, 0x97, 0xaf, 0x27, 0x1a, - 0x92, 0x1b, 0x39, 0x58, 0x2b, 0x32, 0xf0, 0xdf, 0xef, 0xf2, 0x6a, 0xa4, 0x53, 0x72, 0x73, 0x2e, - 0x2c, 0xbe, 0xd6, 0xa9, 0x40, 0x38, 0x93, 0x39, 0x84, 0x6d, 0xbf, 0xdf, 0xfd, 0x2b, 0x82, 0xa8, - 0x15, 0x90, 0x95, 0x10, 0xbe, 0x45, 0x11, 0xfd, 0x08, 0xc8, 0xdd, 0xc6, 0x0d, 0x5e, 0x48, 0x8b, - 0xf4, 0x64, 0x27, 0xf9, 0xf8, 0x1f, 0x05, 0x4b, 0xf8, 0x56, 0x01, 0x04, 0xb9, 0x2e, 0x11, 0x72, - 0x1b, 0x1e, 0x0c, 0x5b, 0xa3, 0xde, 0xe4, 0x64, 0x8f, 0xd1, 0xf0, 0x15, 0x73, 0xf4, 0xe9, 0x8a, - 0x85, 0xca, 0x96, 0x94, 0x0e, 0x9b, 0xd5, 0xe8, 0xf7, 0x39, 0xe2, 0xd5, 0x48, 0xfb, 0xa4, 0xe3, - 0x2c, 0x18, 0x25, 0x72, 0x58, 0xa7, 0xb2, 0x99, 0xe9, 0x2d, 0xd2, 0x72, 0x32, 0x5d, 0x67, 0x52, - 0x1e, 0xcb, 0x04, 0x33, 0x53, 0x38, 0x6d, 0xc3, 0xf6, 0xb0, 0x55, 0x26, 0xb8, 0x9a, 0xa2, 0xaf, - 0x01, 0xb9, 0xf7, 0x97, 0x02, 0x51, 0x24, 0x24, 0xa9, 0xea, 0x61, 0xc3, 0xc0, 0x9b, 0x71, 0xb6, - 0x47, 0x33, 0x36, 0xdd, 0xe3, 0x35, 0x1d, 0x3a, 0x24, 0xbd, 0x1a, 0x8f, 0x5f, 0xf1, 0x88, 0xd7, - 0xaf, 0x9e, 0xf5, 0xbf, 0x2f, 0x07, 0xc1, 0xcf, 0xe5, 0x20, 0xf8, 0xb5, 0x1c, 0x04, 0x9f, 0x7f, - 0x0f, 0xae, 0xbd, 0xe9, 0x54, 0x62, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xb7, 0x2f, 0x56, 0x87, - 0x90, 0x05, 0x00, 0x00, -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/certificates/v1beta1/generated.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/certificates/v1beta1/generated.pb.go index 1791bf58..209967d8 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/certificates/v1beta1/generated.pb.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/certificates/v1beta1/generated.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/certificates/v1beta1/generated.proto -// DO NOT EDIT! +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/certificates/v1beta1/generated.proto /* Package v1beta1 is a generated protocol buffer package. It is generated from these files: - k8s.io/kubernetes/pkg/apis/certificates/v1beta1/generated.proto + k8s.io/api/certificates/v1beta1/generated.proto It has these top-level messages: CertificateSigningRequest @@ -21,11 +20,10 @@ package v1beta1 import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import k8s_io_kubernetes_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" +import k8s_io_apimachinery_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" import _ "github.com/ericchiang/k8s/runtime" import _ "github.com/ericchiang/k8s/runtime/schema" import _ "github.com/ericchiang/k8s/util/intstr" -import _ "github.com/ericchiang/k8s/api/v1" import io "io" @@ -43,7 +41,7 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // Describes a certificate signing request type CertificateSigningRequest struct { // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // The certificate request itself and any additional information. // +optional Spec *CertificateSigningRequestSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` @@ -60,7 +58,7 @@ func (*CertificateSigningRequest) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } -func (m *CertificateSigningRequest) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *CertificateSigningRequest) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -92,8 +90,8 @@ type CertificateSigningRequestCondition struct { Message *string `protobuf:"bytes,3,opt,name=message" json:"message,omitempty"` // timestamp for the last update to this condition // +optional - LastUpdateTime *k8s_io_kubernetes_pkg_apis_meta_v1.Time `protobuf:"bytes,4,opt,name=lastUpdateTime" json:"lastUpdateTime,omitempty"` - XXX_unrecognized []byte `json:"-"` + LastUpdateTime *k8s_io_apimachinery_pkg_apis_meta_v1.Time `protobuf:"bytes,4,opt,name=lastUpdateTime" json:"lastUpdateTime,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *CertificateSigningRequestCondition) Reset() { *m = CertificateSigningRequestCondition{} } @@ -124,7 +122,7 @@ func (m *CertificateSigningRequestCondition) GetMessage() string { return "" } -func (m *CertificateSigningRequestCondition) GetLastUpdateTime() *k8s_io_kubernetes_pkg_apis_meta_v1.Time { +func (m *CertificateSigningRequestCondition) GetLastUpdateTime() *k8s_io_apimachinery_pkg_apis_meta_v1.Time { if m != nil { return m.LastUpdateTime } @@ -133,9 +131,9 @@ func (m *CertificateSigningRequestCondition) GetLastUpdateTime() *k8s_io_kuberne type CertificateSigningRequestList struct { // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` - Items []*CertificateSigningRequest `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` - XXX_unrecognized []byte `json:"-"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Items []*CertificateSigningRequest `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *CertificateSigningRequestList) Reset() { *m = CertificateSigningRequestList{} } @@ -145,7 +143,7 @@ func (*CertificateSigningRequestList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } -func (m *CertificateSigningRequestList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *CertificateSigningRequestList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -169,7 +167,7 @@ type CertificateSigningRequestSpec struct { // valid for. // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 - KeyUsage []string `protobuf:"bytes,5,rep,name=keyUsage" json:"keyUsage,omitempty"` + Usages []string `protobuf:"bytes,5,rep,name=usages" json:"usages,omitempty"` // Information about the requesting user. // See user.Info interface for details. // +optional @@ -203,9 +201,9 @@ func (m *CertificateSigningRequestSpec) GetRequest() []byte { return nil } -func (m *CertificateSigningRequestSpec) GetKeyUsage() []string { +func (m *CertificateSigningRequestSpec) GetUsages() []string { if m != nil { - return m.KeyUsage + return m.Usages } return nil } @@ -290,12 +288,12 @@ func (m *ExtraValue) GetItems() []string { } func init() { - proto.RegisterType((*CertificateSigningRequest)(nil), "github.com/ericchiang.k8s.apis.certificates.v1beta1.CertificateSigningRequest") - proto.RegisterType((*CertificateSigningRequestCondition)(nil), "github.com/ericchiang.k8s.apis.certificates.v1beta1.CertificateSigningRequestCondition") - proto.RegisterType((*CertificateSigningRequestList)(nil), "github.com/ericchiang.k8s.apis.certificates.v1beta1.CertificateSigningRequestList") - proto.RegisterType((*CertificateSigningRequestSpec)(nil), "github.com/ericchiang.k8s.apis.certificates.v1beta1.CertificateSigningRequestSpec") - proto.RegisterType((*CertificateSigningRequestStatus)(nil), "github.com/ericchiang.k8s.apis.certificates.v1beta1.CertificateSigningRequestStatus") - proto.RegisterType((*ExtraValue)(nil), "github.com/ericchiang.k8s.apis.certificates.v1beta1.ExtraValue") + proto.RegisterType((*CertificateSigningRequest)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequest") + proto.RegisterType((*CertificateSigningRequestCondition)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestCondition") + proto.RegisterType((*CertificateSigningRequestList)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestList") + proto.RegisterType((*CertificateSigningRequestSpec)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestSpec") + proto.RegisterType((*CertificateSigningRequestStatus)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestStatus") + proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.certificates.v1beta1.ExtraValue") } func (m *CertificateSigningRequest) Marshal() (dAtA []byte, err error) { size := m.Size() @@ -488,8 +486,8 @@ func (m *CertificateSigningRequestSpec) MarshalTo(dAtA []byte) (int, error) { i += copy(dAtA[i:], s) } } - if len(m.KeyUsage) > 0 { - for _, s := range m.KeyUsage { + if len(m.Usages) > 0 { + for _, s := range m.Usages { dAtA[i] = 0x2a i++ l = len(s) @@ -612,24 +610,6 @@ func (m *ExtraValue) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -725,8 +705,8 @@ func (m *CertificateSigningRequestSpec) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } - if len(m.KeyUsage) > 0 { - for _, s := range m.KeyUsage { + if len(m.Usages) > 0 { + for _, s := range m.Usages { l = len(s) n += 1 + l + sovGenerated(uint64(l)) } @@ -853,7 +833,7 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1093,7 +1073,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.LastUpdateTime == nil { - m.LastUpdateTime = &k8s_io_kubernetes_pkg_apis_meta_v1.Time{} + m.LastUpdateTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1177,7 +1157,7 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1387,7 +1367,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KeyUsage", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Usages", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1412,7 +1392,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.KeyUsage = append(m.KeyUsage, string(dAtA[iNdEx:postIndex])) + m.Usages = append(m.Usages, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 6: if wireType != 2 { @@ -1440,51 +1420,14 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Extra == nil { m.Extra = make(map[string]*ExtraValue) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue *ExtraValue + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1494,46 +1437,85 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Extra[mapkey] = mapvalue - } else { - var mapvalue *ExtraValue - m.Extra[mapkey] = mapvalue } + m.Extra[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -1856,47 +1838,47 @@ var ( ) func init() { - proto.RegisterFile("github.com/ericchiang/k8s/apis/certificates/v1beta1/generated.proto", fileDescriptorGenerated) + proto.RegisterFile("k8s.io/api/certificates/v1beta1/generated.proto", fileDescriptorGenerated) } var fileDescriptorGenerated = []byte{ - // 602 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x93, 0x4b, 0x6b, 0x14, 0x41, - 0x10, 0xc7, 0x9d, 0x7d, 0xe4, 0xd1, 0x09, 0x22, 0x8d, 0xc8, 0x64, 0xc1, 0x75, 0x99, 0x53, 0x0e, - 0xb1, 0x87, 0x0d, 0x1e, 0x82, 0x1e, 0x04, 0x43, 0x40, 0x82, 0x8f, 0xd8, 0x6b, 0x04, 0x3d, 0xd9, - 0x3b, 0x5b, 0x4e, 0xda, 0xd9, 0x79, 0xd8, 0x5d, 0xb3, 0xb8, 0x67, 0xbf, 0x84, 0x5f, 0x44, 0xf0, - 0xe0, 0x07, 0x10, 0xbc, 0xf8, 0x11, 0x64, 0xfd, 0x22, 0xd2, 0x3d, 0xb3, 0x0f, 0xb2, 0x0f, 0x13, - 0xd8, 0xdb, 0x54, 0x4f, 0xd5, 0xaf, 0xea, 0x5f, 0xff, 0x6e, 0xf2, 0x38, 0x3a, 0xd2, 0x4c, 0xa6, - 0x7e, 0x94, 0x77, 0x41, 0x25, 0x80, 0xa0, 0xfd, 0x2c, 0x0a, 0x7d, 0x91, 0x49, 0xed, 0x07, 0xa0, - 0x50, 0x7e, 0x90, 0x81, 0x30, 0xa7, 0x83, 0x76, 0x17, 0x50, 0xb4, 0xfd, 0x10, 0x12, 0x50, 0x02, - 0xa1, 0xc7, 0x32, 0x95, 0x62, 0x4a, 0xfd, 0x02, 0xc0, 0xa6, 0x00, 0x96, 0x45, 0x21, 0x33, 0x00, - 0x36, 0x0b, 0x60, 0x25, 0xa0, 0x71, 0xb8, 0xa2, 0x63, 0x0c, 0x28, 0xfc, 0xc1, 0x5c, 0x93, 0xc6, - 0xfd, 0xc5, 0x35, 0x2a, 0x4f, 0x50, 0xc6, 0x30, 0x97, 0xfe, 0x60, 0x75, 0xba, 0x0e, 0x2e, 0x20, - 0x16, 0x73, 0x55, 0xed, 0xc5, 0x55, 0x39, 0xca, 0xbe, 0x2f, 0x13, 0xd4, 0xa8, 0xe6, 0x4a, 0x0e, - 0x96, 0x6a, 0x59, 0xa0, 0xc2, 0xfb, 0x56, 0x21, 0x7b, 0xc7, 0xd3, 0x95, 0x74, 0x64, 0x98, 0xc8, - 0x24, 0xe4, 0xf0, 0x29, 0x07, 0x8d, 0xf4, 0x94, 0x6c, 0x19, 0xf9, 0x3d, 0x81, 0xc2, 0x75, 0x5a, - 0xce, 0xfe, 0xce, 0x21, 0x63, 0x2b, 0x76, 0x6b, 0x72, 0xd9, 0xa0, 0xcd, 0x5e, 0x76, 0x3f, 0x42, - 0x80, 0xcf, 0x01, 0x05, 0x9f, 0xd4, 0xd3, 0x2e, 0xa9, 0xe9, 0x0c, 0x02, 0xb7, 0x62, 0x39, 0x2f, - 0xd8, 0x35, 0x3d, 0x62, 0x4b, 0xa7, 0xec, 0x64, 0x10, 0x70, 0xcb, 0xa6, 0x17, 0x64, 0x43, 0xa3, - 0xc0, 0x5c, 0xbb, 0x55, 0xdb, 0xe5, 0x6c, 0x8d, 0x5d, 0x2c, 0x97, 0x97, 0x7c, 0xef, 0x87, 0x43, - 0xbc, 0xa5, 0xb9, 0xc7, 0x69, 0xd2, 0x93, 0x28, 0xd3, 0x84, 0x52, 0x52, 0xc3, 0x61, 0x06, 0x76, - 0x79, 0xdb, 0xdc, 0x7e, 0xd3, 0x3b, 0x64, 0x43, 0x81, 0xd0, 0x69, 0x62, 0x57, 0xb1, 0xcd, 0xcb, - 0x88, 0xba, 0x64, 0x33, 0x06, 0xad, 0x45, 0x08, 0x76, 0xfa, 0x6d, 0x3e, 0x0e, 0xe9, 0x19, 0xb9, - 0xd9, 0x17, 0x1a, 0xcf, 0xb3, 0x9e, 0x40, 0x78, 0x2d, 0x63, 0x70, 0x6b, 0x56, 0xde, 0xfe, 0x55, - 0xcc, 0x30, 0xf9, 0xfc, 0x52, 0xbd, 0xf7, 0xcb, 0x21, 0x77, 0x97, 0x8e, 0xff, 0x4c, 0x6a, 0xa4, - 0x4f, 0xe7, 0xac, 0x3f, 0xb8, 0x4a, 0x37, 0x53, 0x7b, 0xc9, 0xf8, 0xf7, 0xa4, 0x2e, 0x11, 0x62, - 0xed, 0x56, 0x5a, 0xd5, 0xfd, 0x9d, 0xc3, 0xd3, 0xf5, 0x79, 0xc2, 0x0b, 0xb0, 0xf7, 0xa5, 0xba, - 0x42, 0x8d, 0xb9, 0x1e, 0x66, 0xb7, 0xaa, 0x08, 0xad, 0x98, 0x5d, 0x3e, 0x0e, 0x69, 0x83, 0x6c, - 0xe5, 0x1a, 0x54, 0x22, 0x62, 0x28, 0xfd, 0x98, 0xc4, 0xf4, 0x16, 0xa9, 0xe6, 0xb2, 0x57, 0xba, - 0x61, 0x3e, 0x8d, 0x77, 0xa1, 0x4a, 0xf3, 0x4c, 0xbb, 0xb5, 0x56, 0xd5, 0x78, 0x57, 0x44, 0x86, - 0x12, 0xc1, 0xf0, 0xdc, 0x9a, 0x57, 0xb7, 0x7f, 0x26, 0x31, 0x4d, 0x49, 0x1d, 0x3e, 0xa3, 0x12, - 0xee, 0x86, 0xd5, 0xff, 0x76, 0xbd, 0x37, 0x9f, 0x9d, 0x18, 0xf6, 0x49, 0x82, 0x6a, 0xc8, 0x8b, - 0x3e, 0x8d, 0x9c, 0x90, 0xe9, 0xa1, 0x11, 0x11, 0xc1, 0xb0, 0xbc, 0x81, 0xe6, 0x93, 0xbe, 0x22, - 0xf5, 0x81, 0xe8, 0xe7, 0x50, 0x3e, 0xc5, 0x47, 0xd7, 0x1e, 0xc8, 0xd2, 0xdf, 0x18, 0x04, 0x2f, - 0x48, 0x0f, 0x2b, 0x47, 0x8e, 0xf7, 0xdd, 0x21, 0xf7, 0xfe, 0xf3, 0x7c, 0xa8, 0x26, 0x24, 0x18, - 0x3f, 0x0e, 0xed, 0x3a, 0x76, 0x21, 0x9d, 0xf5, 0x2d, 0x64, 0xf2, 0xf0, 0xf8, 0x4c, 0x1b, 0xda, - 0x22, 0x3b, 0x33, 0x18, 0xab, 0x7a, 0x97, 0xcf, 0x1e, 0x79, 0x5e, 0xb9, 0x31, 0xab, 0x89, 0xde, - 0x1e, 0x5f, 0x58, 0xc7, 0x3a, 0x59, 0x04, 0x4f, 0xf6, 0x7e, 0x8e, 0x9a, 0xce, 0xef, 0x51, 0xd3, - 0xf9, 0x33, 0x6a, 0x3a, 0x5f, 0xff, 0x36, 0x6f, 0xbc, 0xdb, 0x2c, 0xe7, 0xf9, 0x17, 0x00, 0x00, - 0xff, 0xff, 0x22, 0x79, 0xcf, 0xba, 0xb1, 0x06, 0x00, 0x00, + // 594 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0xcd, 0x6e, 0x13, 0x3d, + 0x14, 0xfd, 0x26, 0x3f, 0xfd, 0x71, 0xaa, 0x4f, 0xc8, 0x42, 0x68, 0x1a, 0x89, 0x34, 0x9a, 0x55, + 0x05, 0x92, 0x87, 0x14, 0x84, 0xaa, 0x2e, 0x10, 0x10, 0x75, 0x01, 0x2a, 0x02, 0xb9, 0x80, 0x10, + 0x1b, 0xe4, 0x4e, 0x2e, 0x53, 0x93, 0x8c, 0x67, 0xb0, 0xef, 0x44, 0xe4, 0x49, 0xe0, 0x11, 0x78, + 0x09, 0x58, 0xb3, 0xe4, 0x0d, 0x40, 0xe1, 0x45, 0x90, 0x3d, 0x93, 0x1f, 0x25, 0x0a, 0xa9, 0xda, + 0x9d, 0xcf, 0x95, 0xcf, 0xf1, 0x3d, 0xf7, 0x5c, 0x93, 0xb0, 0x7f, 0x68, 0x98, 0x4c, 0x43, 0x91, + 0xc9, 0x30, 0x02, 0x8d, 0xf2, 0xbd, 0x8c, 0x04, 0x82, 0x09, 0x87, 0x9d, 0x33, 0x40, 0xd1, 0x09, + 0x63, 0x50, 0xa0, 0x05, 0x42, 0x8f, 0x65, 0x3a, 0xc5, 0x94, 0xee, 0x15, 0x04, 0x26, 0x32, 0xc9, + 0xe6, 0x09, 0xac, 0x24, 0x34, 0xef, 0xcd, 0x14, 0x13, 0x11, 0x9d, 0x4b, 0x05, 0x7a, 0x14, 0x66, + 0xfd, 0xd8, 0x16, 0x4c, 0x98, 0x00, 0x8a, 0x70, 0xb8, 0x24, 0xdb, 0x0c, 0x57, 0xb1, 0x74, 0xae, + 0x50, 0x26, 0xb0, 0x44, 0xb8, 0xbf, 0x8e, 0x60, 0xa2, 0x73, 0x48, 0xc4, 0x12, 0xef, 0xee, 0x2a, + 0x5e, 0x8e, 0x72, 0x10, 0x4a, 0x85, 0x06, 0xf5, 0x22, 0x29, 0xf8, 0x5c, 0x21, 0xbb, 0xdd, 0x99, + 0xd9, 0x53, 0x19, 0x2b, 0xa9, 0x62, 0x0e, 0x1f, 0x73, 0x30, 0x48, 0x4f, 0xc8, 0x96, 0xb5, 0xd5, + 0x13, 0x28, 0x7c, 0xaf, 0xed, 0xed, 0x37, 0x0e, 0xee, 0xb0, 0xd9, 0x94, 0xa6, 0xaf, 0xb0, 0xac, + 0x1f, 0xdb, 0x82, 0x61, 0xf6, 0x36, 0x1b, 0x76, 0xd8, 0xf3, 0xb3, 0x0f, 0x10, 0xe1, 0x33, 0x40, + 0xc1, 0xa7, 0x0a, 0x94, 0x93, 0x9a, 0xc9, 0x20, 0xf2, 0x2b, 0x4e, 0xe9, 0x01, 0x5b, 0x33, 0x6f, + 0xb6, 0xb2, 0xaf, 0xd3, 0x0c, 0x22, 0xee, 0xb4, 0xe8, 0x1b, 0xb2, 0x61, 0x50, 0x60, 0x6e, 0xfc, + 0xaa, 0x53, 0x7d, 0x78, 0x05, 0x55, 0xa7, 0xc3, 0x4b, 0xbd, 0xe0, 0xbb, 0x47, 0x82, 0x95, 0x77, + 0xbb, 0xa9, 0xea, 0x49, 0x94, 0xa9, 0xa2, 0x94, 0xd4, 0x70, 0x94, 0x81, 0x1b, 0xcf, 0x36, 0x77, + 0x67, 0x7a, 0x83, 0x6c, 0x68, 0x10, 0x26, 0x55, 0xce, 0xea, 0x36, 0x2f, 0x11, 0xf5, 0xc9, 0x66, + 0x02, 0xc6, 0x88, 0x18, 0x5c, 0xb7, 0xdb, 0x7c, 0x02, 0x29, 0x27, 0xff, 0x0f, 0x84, 0xc1, 0x57, + 0x59, 0x4f, 0x20, 0xbc, 0x94, 0x09, 0xf8, 0x35, 0x67, 0xe7, 0xd6, 0xc5, 0xc6, 0x6d, 0x19, 0x7c, + 0x41, 0x21, 0xf8, 0xe6, 0x91, 0x9b, 0x2b, 0x0d, 0x9c, 0x48, 0x83, 0xf4, 0xe9, 0x52, 0xbc, 0xec, + 0x62, 0xef, 0x59, 0xf6, 0x42, 0xb8, 0x2f, 0x48, 0x5d, 0x22, 0x24, 0xc6, 0xaf, 0xb4, 0xab, 0xfb, + 0x8d, 0x83, 0xa3, 0xcb, 0xe7, 0xc0, 0x0b, 0xa1, 0xe0, 0x57, 0xe5, 0x1f, 0xfd, 0xdb, 0x15, 0xb0, + 0xf3, 0xd4, 0x05, 0x74, 0xed, 0xef, 0xf0, 0x09, 0xa4, 0x4d, 0xb2, 0x95, 0x1b, 0xd0, 0x4a, 0x24, + 0x50, 0x66, 0x30, 0xc5, 0xf4, 0x1a, 0xa9, 0xe6, 0xb2, 0x57, 0x26, 0x60, 0x8f, 0x36, 0xaf, 0x58, + 0xa7, 0x79, 0x66, 0xfc, 0x5a, 0xbb, 0x6a, 0xf3, 0x2a, 0x90, 0xad, 0xe7, 0x36, 0x1e, 0xe3, 0xd7, + 0x8b, 0x7a, 0x81, 0xe8, 0x3b, 0x52, 0x87, 0x4f, 0xa8, 0x85, 0xbf, 0xe1, 0xbc, 0x3e, 0xb9, 0xda, + 0x26, 0xb3, 0x63, 0xab, 0x75, 0xac, 0x50, 0x8f, 0x78, 0xa1, 0xdb, 0x04, 0x42, 0x66, 0x45, 0xdb, + 0x70, 0x1f, 0x46, 0xe5, 0x86, 0xd9, 0x23, 0x7d, 0x44, 0xea, 0x43, 0x31, 0xc8, 0xa1, 0xfc, 0x4a, + 0xb7, 0xd7, 0x36, 0xe0, 0xd4, 0x5e, 0x5b, 0x0a, 0x2f, 0x98, 0x47, 0x95, 0x43, 0x2f, 0xf8, 0xea, + 0x91, 0xbd, 0x35, 0xdf, 0x81, 0x46, 0x84, 0x44, 0x93, 0x65, 0x37, 0xbe, 0xe7, 0x0c, 0x77, 0x2f, + 0x6f, 0x78, 0xfa, 0x71, 0xf8, 0x9c, 0x2c, 0x6d, 0x93, 0xc6, 0x9c, 0x8c, 0x73, 0xb5, 0xc3, 0xe7, + 0x4b, 0x41, 0x50, 0x4e, 0xc4, 0x79, 0xa0, 0xd7, 0x27, 0xcb, 0xe6, 0xb9, 0x5c, 0x0a, 0xf0, 0x78, + 0xf7, 0xc7, 0xb8, 0xe5, 0xfd, 0x1c, 0xb7, 0xbc, 0xdf, 0xe3, 0x96, 0xf7, 0xe5, 0x4f, 0xeb, 0xbf, + 0xb7, 0x9b, 0x65, 0x3f, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x51, 0x94, 0xce, 0xdf, 0x0d, 0x06, + 0x00, 0x00, } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/certificates/v1beta1/register.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/certificates/v1beta1/register.go new file mode 100644 index 00000000..3dede963 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/certificates/v1beta1/register.go @@ -0,0 +1,9 @@ +package v1beta1 + +import "github.com/ericchiang/k8s" + +func init() { + k8s.Register("certificates.k8s.io", "v1beta1", "certificatesigningrequests", false, &CertificateSigningRequest{}) + + k8s.RegisterList("certificates.k8s.io", "v1beta1", "certificatesigningrequests", false, &CertificateSigningRequestList{}) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/api/v1/generated.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/core/v1/generated.pb.go similarity index 76% rename from vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/api/v1/generated.pb.go rename to vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/core/v1/generated.pb.go index 27632880..5ada6a62 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/api/v1/generated.pb.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/core/v1/generated.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/api/v1/generated.proto -// DO NOT EDIT! +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/core/v1/generated.proto /* Package v1 is a generated protocol buffer package. It is generated from these files: - k8s.io/kubernetes/pkg/api/v1/generated.proto + k8s.io/api/core/v1/generated.proto It has these top-level messages: AWSElasticBlockStoreVolumeSource @@ -14,11 +13,15 @@ AttachedVolume AvoidPods AzureDiskVolumeSource + AzureFilePersistentVolumeSource AzureFileVolumeSource Binding + CSIPersistentVolumeSource Capabilities + CephFSPersistentVolumeSource CephFSVolumeSource CinderVolumeSource + ClientIPConfig ComponentCondition ComponentStatus ComponentStatusList @@ -52,6 +55,7 @@ EnvVarSource Event EventList + EventSeries EventSource ExecAction FCVolumeSource @@ -63,7 +67,9 @@ HTTPGetAction HTTPHeader Handler + HostAlias HostPathVolumeSource + ISCSIPersistentVolumeSource ISCSIVolumeSource KeyToPath Lifecycle @@ -76,6 +82,7 @@ LoadBalancerIngress LoadBalancerStatus LocalObjectReference + LocalVolumeSource NFSVolumeSource Namespace NamespaceList @@ -85,6 +92,7 @@ NodeAddress NodeAffinity NodeCondition + NodeConfigSource NodeDaemonEndpoints NodeList NodeProxyOptions @@ -100,6 +108,7 @@ ObjectReference PersistentVolume PersistentVolumeClaim + PersistentVolumeClaimCondition PersistentVolumeClaimList PersistentVolumeClaimSpec PersistentVolumeClaimStatus @@ -115,6 +124,8 @@ PodAntiAffinity PodAttachOptions PodCondition + PodDNSConfig + PodDNSConfigOption PodExecOptions PodList PodLogOptions @@ -135,6 +146,7 @@ Probe ProjectedVolumeSource QuobyteVolumeSource + RBDPersistentVolumeSource RBDVolumeSource RangeAllocation ReplicationController @@ -149,12 +161,14 @@ ResourceQuotaStatus ResourceRequirements SELinuxOptions + ScaleIOPersistentVolumeSource ScaleIOVolumeSource Secret SecretEnvSource SecretKeySelector SecretList SecretProjection + SecretReference SecretVolumeSource SecurityContext SerializedReference @@ -166,11 +180,15 @@ ServiceProxyOptions ServiceSpec ServiceStatus + SessionAffinityConfig + StorageOSPersistentVolumeSource + StorageOSVolumeSource Sysctl TCPSocketAction Taint Toleration Volume + VolumeDevice VolumeMount VolumeProjection VolumeSource @@ -182,11 +200,12 @@ package v1 import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import k8s_io_kubernetes_pkg_api_resource "github.com/ericchiang/k8s/api/resource" -import k8s_io_kubernetes_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" -import k8s_io_kubernetes_pkg_runtime "github.com/ericchiang/k8s/runtime" +import _ "github.com/ericchiang/k8s/apis/apiextensions/v1beta1" +import k8s_io_apimachinery_pkg_api_resource "github.com/ericchiang/k8s/apis/resource" +import k8s_io_apimachinery_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" +import k8s_io_apimachinery_pkg_runtime "github.com/ericchiang/k8s/runtime" import _ "github.com/ericchiang/k8s/runtime/schema" -import k8s_io_kubernetes_pkg_util_intstr "github.com/ericchiang/k8s/util/intstr" +import k8s_io_apimachinery_pkg_util_intstr "github.com/ericchiang/k8s/util/intstr" import io "io" @@ -209,12 +228,12 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // ownership management and SELinux relabeling. type AWSElasticBlockStoreVolumeSource struct { // Unique ID of the persistent disk resource in AWS (Amazon EBS volume). - // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore VolumeID *string `protobuf:"bytes,1,opt,name=volumeID" json:"volumeID,omitempty"` // Filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore // TODO: how do we prevent errors in the filesystem from compromising the machine // +optional FsType *string `protobuf:"bytes,2,opt,name=fsType" json:"fsType,omitempty"` @@ -226,7 +245,7 @@ type AWSElasticBlockStoreVolumeSource struct { Partition *int32 `protobuf:"varint,3,opt,name=partition" json:"partition,omitempty"` // Specify "true" to force and set the ReadOnly property in VolumeMounts to "true". // If omitted, the default is "false". - // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore // +optional ReadOnly *bool `protobuf:"varint,4,opt,name=readOnly" json:"readOnly,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -375,8 +394,10 @@ type AzureDiskVolumeSource struct { // Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional - ReadOnly *bool `protobuf:"varint,5,opt,name=readOnly" json:"readOnly,omitempty"` - XXX_unrecognized []byte `json:"-"` + ReadOnly *bool `protobuf:"varint,5,opt,name=readOnly" json:"readOnly,omitempty"` + // Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared + Kind *string `protobuf:"bytes,6,opt,name=kind" json:"kind,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *AzureDiskVolumeSource) Reset() { *m = AzureDiskVolumeSource{} } @@ -419,6 +440,65 @@ func (m *AzureDiskVolumeSource) GetReadOnly() bool { return false } +func (m *AzureDiskVolumeSource) GetKind() string { + if m != nil && m.Kind != nil { + return *m.Kind + } + return "" +} + +// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. +type AzureFilePersistentVolumeSource struct { + // the name of secret that contains Azure Storage Account Name and Key + SecretName *string `protobuf:"bytes,1,opt,name=secretName" json:"secretName,omitempty"` + // Share Name + ShareName *string `protobuf:"bytes,2,opt,name=shareName" json:"shareName,omitempty"` + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly *bool `protobuf:"varint,3,opt,name=readOnly" json:"readOnly,omitempty"` + // the namespace of the secret that contains Azure Storage Account Name and Key + // default is the same as the Pod + // +optional + SecretNamespace *string `protobuf:"bytes,4,opt,name=secretNamespace" json:"secretNamespace,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AzureFilePersistentVolumeSource) Reset() { *m = AzureFilePersistentVolumeSource{} } +func (m *AzureFilePersistentVolumeSource) String() string { return proto.CompactTextString(m) } +func (*AzureFilePersistentVolumeSource) ProtoMessage() {} +func (*AzureFilePersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{5} +} + +func (m *AzureFilePersistentVolumeSource) GetSecretName() string { + if m != nil && m.SecretName != nil { + return *m.SecretName + } + return "" +} + +func (m *AzureFilePersistentVolumeSource) GetShareName() string { + if m != nil && m.ShareName != nil { + return *m.ShareName + } + return "" +} + +func (m *AzureFilePersistentVolumeSource) GetReadOnly() bool { + if m != nil && m.ReadOnly != nil { + return *m.ReadOnly + } + return false +} + +func (m *AzureFilePersistentVolumeSource) GetSecretNamespace() string { + if m != nil && m.SecretNamespace != nil { + return *m.SecretNamespace + } + return "" +} + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. type AzureFileVolumeSource struct { // the name of secret that contains Azure Storage Account Name and Key @@ -435,7 +515,7 @@ type AzureFileVolumeSource struct { func (m *AzureFileVolumeSource) Reset() { *m = AzureFileVolumeSource{} } func (m *AzureFileVolumeSource) String() string { return proto.CompactTextString(m) } func (*AzureFileVolumeSource) ProtoMessage() {} -func (*AzureFileVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (*AzureFileVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } func (m *AzureFileVolumeSource) GetSecretName() string { if m != nil && m.SecretName != nil { @@ -458,13 +538,13 @@ func (m *AzureFileVolumeSource) GetReadOnly() bool { return false } -// Binding ties one object to another. -// For example, a pod is bound to a node by a scheduler. +// Binding ties one object to another; for example, a pod is bound to a node by a scheduler. +// Deprecated in 1.7, please use the bindings subresource of pods instead. type Binding struct { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // The target object that you want to bind to the standard object. Target *ObjectReference `protobuf:"bytes,2,opt,name=target" json:"target,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -473,9 +553,9 @@ type Binding struct { func (m *Binding) Reset() { *m = Binding{} } func (m *Binding) String() string { return proto.CompactTextString(m) } func (*Binding) ProtoMessage() {} -func (*Binding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (*Binding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } -func (m *Binding) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *Binding) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -489,6 +569,50 @@ func (m *Binding) GetTarget() *ObjectReference { return nil } +// Represents storage that is managed by an external CSI volume driver +type CSIPersistentVolumeSource struct { + // Driver is the name of the driver to use for this volume. + // Required. + Driver *string `protobuf:"bytes,1,opt,name=driver" json:"driver,omitempty"` + // VolumeHandle is the unique volume name returned by the CSI volume + // plugin’s CreateVolume to refer to the volume on all subsequent calls. + // Required. + VolumeHandle *string `protobuf:"bytes,2,opt,name=volumeHandle" json:"volumeHandle,omitempty"` + // Optional: The value to pass to ControllerPublishVolumeRequest. + // Defaults to false (read/write). + // +optional + ReadOnly *bool `protobuf:"varint,3,opt,name=readOnly" json:"readOnly,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CSIPersistentVolumeSource) Reset() { *m = CSIPersistentVolumeSource{} } +func (m *CSIPersistentVolumeSource) String() string { return proto.CompactTextString(m) } +func (*CSIPersistentVolumeSource) ProtoMessage() {} +func (*CSIPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{8} +} + +func (m *CSIPersistentVolumeSource) GetDriver() string { + if m != nil && m.Driver != nil { + return *m.Driver + } + return "" +} + +func (m *CSIPersistentVolumeSource) GetVolumeHandle() string { + if m != nil && m.VolumeHandle != nil { + return *m.VolumeHandle + } + return "" +} + +func (m *CSIPersistentVolumeSource) GetReadOnly() bool { + if m != nil && m.ReadOnly != nil { + return *m.ReadOnly + } + return false +} + // Adds and removes POSIX capabilities from running containers. type Capabilities struct { // Added capabilities @@ -503,7 +627,7 @@ type Capabilities struct { func (m *Capabilities) Reset() { *m = Capabilities{} } func (m *Capabilities) String() string { return proto.CompactTextString(m) } func (*Capabilities) ProtoMessage() {} -func (*Capabilities) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*Capabilities) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *Capabilities) GetAdd() []string { if m != nil { @@ -519,30 +643,108 @@ func (m *Capabilities) GetDrop() []string { return nil } +// Represents a Ceph Filesystem mount that lasts the lifetime of a pod +// Cephfs volumes do not support ownership management or SELinux relabeling. +type CephFSPersistentVolumeSource struct { + // Required: Monitors is a collection of Ceph monitors + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + Monitors []string `protobuf:"bytes,1,rep,name=monitors" json:"monitors,omitempty"` + // Optional: Used as the mounted root, rather than the full Ceph tree, default is / + // +optional + Path *string `protobuf:"bytes,2,opt,name=path" json:"path,omitempty"` + // Optional: User is the rados user name, default is admin + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // +optional + User *string `protobuf:"bytes,3,opt,name=user" json:"user,omitempty"` + // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // +optional + SecretFile *string `protobuf:"bytes,4,opt,name=secretFile" json:"secretFile,omitempty"` + // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // +optional + SecretRef *SecretReference `protobuf:"bytes,5,opt,name=secretRef" json:"secretRef,omitempty"` + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // +optional + ReadOnly *bool `protobuf:"varint,6,opt,name=readOnly" json:"readOnly,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CephFSPersistentVolumeSource) Reset() { *m = CephFSPersistentVolumeSource{} } +func (m *CephFSPersistentVolumeSource) String() string { return proto.CompactTextString(m) } +func (*CephFSPersistentVolumeSource) ProtoMessage() {} +func (*CephFSPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{10} +} + +func (m *CephFSPersistentVolumeSource) GetMonitors() []string { + if m != nil { + return m.Monitors + } + return nil +} + +func (m *CephFSPersistentVolumeSource) GetPath() string { + if m != nil && m.Path != nil { + return *m.Path + } + return "" +} + +func (m *CephFSPersistentVolumeSource) GetUser() string { + if m != nil && m.User != nil { + return *m.User + } + return "" +} + +func (m *CephFSPersistentVolumeSource) GetSecretFile() string { + if m != nil && m.SecretFile != nil { + return *m.SecretFile + } + return "" +} + +func (m *CephFSPersistentVolumeSource) GetSecretRef() *SecretReference { + if m != nil { + return m.SecretRef + } + return nil +} + +func (m *CephFSPersistentVolumeSource) GetReadOnly() bool { + if m != nil && m.ReadOnly != nil { + return *m.ReadOnly + } + return false +} + // Represents a Ceph Filesystem mount that lasts the lifetime of a pod // Cephfs volumes do not support ownership management or SELinux relabeling. type CephFSVolumeSource struct { // Required: Monitors is a collection of Ceph monitors - // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it Monitors []string `protobuf:"bytes,1,rep,name=monitors" json:"monitors,omitempty"` // Optional: Used as the mounted root, rather than the full Ceph tree, default is / // +optional Path *string `protobuf:"bytes,2,opt,name=path" json:"path,omitempty"` // Optional: User is the rados user name, default is admin - // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it // +optional User *string `protobuf:"bytes,3,opt,name=user" json:"user,omitempty"` // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it // +optional SecretFile *string `protobuf:"bytes,4,opt,name=secretFile" json:"secretFile,omitempty"` // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it // +optional SecretRef *LocalObjectReference `protobuf:"bytes,5,opt,name=secretRef" json:"secretRef,omitempty"` // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it // +optional ReadOnly *bool `protobuf:"varint,6,opt,name=readOnly" json:"readOnly,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -551,7 +753,7 @@ type CephFSVolumeSource struct { func (m *CephFSVolumeSource) Reset() { *m = CephFSVolumeSource{} } func (m *CephFSVolumeSource) String() string { return proto.CompactTextString(m) } func (*CephFSVolumeSource) ProtoMessage() {} -func (*CephFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*CephFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func (m *CephFSVolumeSource) GetMonitors() []string { if m != nil { @@ -601,17 +803,17 @@ func (m *CephFSVolumeSource) GetReadOnly() bool { // Cinder volumes support ownership management and SELinux relabeling. type CinderVolumeSource struct { // volume id used to identify the volume in cinder - // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md VolumeID *string `protobuf:"bytes,1,opt,name=volumeID" json:"volumeID,omitempty"` // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md // +optional FsType *string `protobuf:"bytes,2,opt,name=fsType" json:"fsType,omitempty"` // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md // +optional ReadOnly *bool `protobuf:"varint,3,opt,name=readOnly" json:"readOnly,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -620,7 +822,7 @@ type CinderVolumeSource struct { func (m *CinderVolumeSource) Reset() { *m = CinderVolumeSource{} } func (m *CinderVolumeSource) String() string { return proto.CompactTextString(m) } func (*CinderVolumeSource) ProtoMessage() {} -func (*CinderVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*CinderVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } func (m *CinderVolumeSource) GetVolumeID() string { if m != nil && m.VolumeID != nil { @@ -643,6 +845,28 @@ func (m *CinderVolumeSource) GetReadOnly() bool { return false } +// ClientIPConfig represents the configurations of Client IP based session affinity. +type ClientIPConfig struct { + // timeoutSeconds specifies the seconds of ClientIP type session sticky time. + // The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + // Default value is 10800(for 3 hours). + // +optional + TimeoutSeconds *int32 `protobuf:"varint,1,opt,name=timeoutSeconds" json:"timeoutSeconds,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ClientIPConfig) Reset() { *m = ClientIPConfig{} } +func (m *ClientIPConfig) String() string { return proto.CompactTextString(m) } +func (*ClientIPConfig) ProtoMessage() {} +func (*ClientIPConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } + +func (m *ClientIPConfig) GetTimeoutSeconds() int32 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return 0 +} + // Information about the condition of a component. type ComponentCondition struct { // Type of condition for a component. @@ -665,7 +889,7 @@ type ComponentCondition struct { func (m *ComponentCondition) Reset() { *m = ComponentCondition{} } func (m *ComponentCondition) String() string { return proto.CompactTextString(m) } func (*ComponentCondition) ProtoMessage() {} -func (*ComponentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*ComponentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } func (m *ComponentCondition) GetType() string { if m != nil && m.Type != nil { @@ -698,11 +922,13 @@ func (m *ComponentCondition) GetError() string { // ComponentStatus (and ComponentStatusList) holds the cluster validation info. type ComponentStatus struct { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // List of component conditions observed // +optional + // +patchMergeKey=type + // +patchStrategy=merge Conditions []*ComponentCondition `protobuf:"bytes,2,rep,name=conditions" json:"conditions,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -710,9 +936,9 @@ type ComponentStatus struct { func (m *ComponentStatus) Reset() { *m = ComponentStatus{} } func (m *ComponentStatus) String() string { return proto.CompactTextString(m) } func (*ComponentStatus) ProtoMessage() {} -func (*ComponentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (*ComponentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } -func (m *ComponentStatus) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *ComponentStatus) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -729,9 +955,9 @@ func (m *ComponentStatus) GetConditions() []*ComponentCondition { // Status of all the conditions for the component as a list of ComponentStatus objects. type ComponentStatusList struct { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // List of ComponentStatus objects. Items []*ComponentStatus `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -740,9 +966,9 @@ type ComponentStatusList struct { func (m *ComponentStatusList) Reset() { *m = ComponentStatusList{} } func (m *ComponentStatusList) String() string { return proto.CompactTextString(m) } func (*ComponentStatusList) ProtoMessage() {} -func (*ComponentStatusList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (*ComponentStatusList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } -func (m *ComponentStatusList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *ComponentStatusList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -759,11 +985,11 @@ func (m *ComponentStatusList) GetItems() []*ComponentStatus { // ConfigMap holds configuration data for pods to consume. type ConfigMap struct { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Data contains the configuration data. - // Each key must be a valid DNS_SUBDOMAIN with an optional leading dot. + // Each key must consist of alphanumeric characters, '-', '_' or '.'. // +optional Data map[string]string `protobuf:"bytes,2,rep,name=data" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` XXX_unrecognized []byte `json:"-"` @@ -772,9 +998,9 @@ type ConfigMap struct { func (m *ConfigMap) Reset() { *m = ConfigMap{} } func (m *ConfigMap) String() string { return proto.CompactTextString(m) } func (*ConfigMap) ProtoMessage() {} -func (*ConfigMap) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (*ConfigMap) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } -func (m *ConfigMap) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *ConfigMap) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -805,7 +1031,7 @@ type ConfigMapEnvSource struct { func (m *ConfigMapEnvSource) Reset() { *m = ConfigMapEnvSource{} } func (m *ConfigMapEnvSource) String() string { return proto.CompactTextString(m) } func (*ConfigMapEnvSource) ProtoMessage() {} -func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func (m *ConfigMapEnvSource) GetLocalObjectReference() *LocalObjectReference { if m != nil { @@ -836,7 +1062,7 @@ type ConfigMapKeySelector struct { func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} } func (m *ConfigMapKeySelector) String() string { return proto.CompactTextString(m) } func (*ConfigMapKeySelector) ProtoMessage() {} -func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } func (m *ConfigMapKeySelector) GetLocalObjectReference() *LocalObjectReference { if m != nil { @@ -861,9 +1087,9 @@ func (m *ConfigMapKeySelector) GetOptional() bool { // ConfigMapList is a resource containing a list of ConfigMap objects. type ConfigMapList struct { - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Items is the list of ConfigMaps. Items []*ConfigMap `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -872,9 +1098,9 @@ type ConfigMapList struct { func (m *ConfigMapList) Reset() { *m = ConfigMapList{} } func (m *ConfigMapList) String() string { return proto.CompactTextString(m) } func (*ConfigMapList) ProtoMessage() {} -func (*ConfigMapList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (*ConfigMapList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } -func (m *ConfigMapList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *ConfigMapList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -915,7 +1141,7 @@ type ConfigMapProjection struct { func (m *ConfigMapProjection) Reset() { *m = ConfigMapProjection{} } func (m *ConfigMapProjection) String() string { return proto.CompactTextString(m) } func (*ConfigMapProjection) ProtoMessage() {} -func (*ConfigMapProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (*ConfigMapProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } func (m *ConfigMapProjection) GetLocalObjectReference() *LocalObjectReference { if m != nil { @@ -971,7 +1197,7 @@ type ConfigMapVolumeSource struct { func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} } func (m *ConfigMapVolumeSource) String() string { return proto.CompactTextString(m) } func (*ConfigMapVolumeSource) ProtoMessage() {} -func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } func (m *ConfigMapVolumeSource) GetLocalObjectReference() *LocalObjectReference { if m != nil { @@ -1008,7 +1234,9 @@ type Container struct { // Cannot be updated. Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // Docker image name. - // More info: http://kubernetes.io/docs/user-guide/images + // More info: https://kubernetes.io/docs/concepts/containers/images + // This field is optional to allow higher level config management to default or override + // container images in workload controllers like Deployments and StatefulSets. // +optional Image *string `protobuf:"bytes,2,opt,name=image" json:"image,omitempty"` // Entrypoint array. Not executed within a shell. @@ -1018,7 +1246,7 @@ type Container struct { // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, // regardless of whether the variable exists or not. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/containers#containers-and-commands + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell // +optional Command []string `protobuf:"bytes,3,rep,name=command" json:"command,omitempty"` // Arguments to the entrypoint. @@ -1028,7 +1256,7 @@ type Container struct { // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, // regardless of whether the variable exists or not. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/containers#containers-and-commands + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell // +optional Args []string `protobuf:"bytes,4,rep,name=args" json:"args,omitempty"` // Container's working directory. @@ -1045,6 +1273,8 @@ type Container struct { // accessible from the network. // Cannot be updated. // +optional + // +patchMergeKey=containerPort + // +patchStrategy=merge Ports []*ContainerPort `protobuf:"bytes,6,rep,name=ports" json:"ports,omitempty"` // List of sources to populate environment variables in the container. // The keys defined within a source must be a C_IDENTIFIER. All invalid keys @@ -1057,26 +1287,36 @@ type Container struct { // List of environment variables to set in the container. // Cannot be updated. // +optional + // +patchMergeKey=name + // +patchStrategy=merge Env []*EnvVar `protobuf:"bytes,7,rep,name=env" json:"env,omitempty"` // Compute Resources required by this container. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources // +optional Resources *ResourceRequirements `protobuf:"bytes,8,opt,name=resources" json:"resources,omitempty"` // Pod volumes to mount into the container's filesystem. // Cannot be updated. // +optional + // +patchMergeKey=mountPath + // +patchStrategy=merge VolumeMounts []*VolumeMount `protobuf:"bytes,9,rep,name=volumeMounts" json:"volumeMounts,omitempty"` + // volumeDevices is the list of block devices to be used by the container. + // This is an alpha feature and may change in the future. + // +patchMergeKey=devicePath + // +patchStrategy=merge + // +optional + VolumeDevices []*VolumeDevice `protobuf:"bytes,21,rep,name=volumeDevices" json:"volumeDevices,omitempty"` // Periodic probe of container liveness. // Container will be restarted if the probe fails. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes // +optional LivenessProbe *Probe `protobuf:"bytes,10,opt,name=livenessProbe" json:"livenessProbe,omitempty"` // Periodic probe of container service readiness. // Container will be removed from service endpoints if the probe fails. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes // +optional ReadinessProbe *Probe `protobuf:"bytes,11,opt,name=readinessProbe" json:"readinessProbe,omitempty"` // Actions that the management system should take in response to container lifecycle events. @@ -1105,11 +1345,12 @@ type Container struct { // One of Always, Never, IfNotPresent. // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/images#updating-images + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images // +optional ImagePullPolicy *string `protobuf:"bytes,14,opt,name=imagePullPolicy" json:"imagePullPolicy,omitempty"` // Security options the pod should run with. - // More info: http://releases.k8s.io/HEAD/docs/design/security_context.md + // More info: https://kubernetes.io/docs/concepts/policy/security-context/ + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ // +optional SecurityContext *SecurityContext `protobuf:"bytes,15,opt,name=securityContext" json:"securityContext,omitempty"` // Whether this container should allocate a buffer for stdin in the container runtime. If this @@ -1136,7 +1377,7 @@ type Container struct { func (m *Container) Reset() { *m = Container{} } func (m *Container) String() string { return proto.CompactTextString(m) } func (*Container) ProtoMessage() {} -func (*Container) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +func (*Container) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } func (m *Container) GetName() string { if m != nil && m.Name != nil { @@ -1208,6 +1449,13 @@ func (m *Container) GetVolumeMounts() []*VolumeMount { return nil } +func (m *Container) GetVolumeDevices() []*VolumeDevice { + if m != nil { + return m.VolumeDevices + } + return nil +} + func (m *Container) GetLivenessProbe() *Probe { if m != nil { return m.LivenessProbe @@ -1292,7 +1540,7 @@ type ContainerImage struct { func (m *ContainerImage) Reset() { *m = ContainerImage{} } func (m *ContainerImage) String() string { return proto.CompactTextString(m) } func (*ContainerImage) ProtoMessage() {} -func (*ContainerImage) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } +func (*ContainerImage) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } func (m *ContainerImage) GetNames() []string { if m != nil { @@ -1337,7 +1585,7 @@ type ContainerPort struct { func (m *ContainerPort) Reset() { *m = ContainerPort{} } func (m *ContainerPort) String() string { return proto.CompactTextString(m) } func (*ContainerPort) ProtoMessage() {} -func (*ContainerPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +func (*ContainerPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } func (m *ContainerPort) GetName() string { if m != nil && m.Name != nil { @@ -1393,7 +1641,7 @@ type ContainerState struct { func (m *ContainerState) Reset() { *m = ContainerState{} } func (m *ContainerState) String() string { return proto.CompactTextString(m) } func (*ContainerState) ProtoMessage() {} -func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } func (m *ContainerState) GetWaiting() *ContainerStateWaiting { if m != nil { @@ -1420,16 +1668,16 @@ func (m *ContainerState) GetTerminated() *ContainerStateTerminated { type ContainerStateRunning struct { // Time at which the container was last (re-)started // +optional - StartedAt *k8s_io_kubernetes_pkg_apis_meta_v1.Time `protobuf:"bytes,1,opt,name=startedAt" json:"startedAt,omitempty"` - XXX_unrecognized []byte `json:"-"` + StartedAt *k8s_io_apimachinery_pkg_apis_meta_v1.Time `protobuf:"bytes,1,opt,name=startedAt" json:"startedAt,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } func (m *ContainerStateRunning) String() string { return proto.CompactTextString(m) } func (*ContainerStateRunning) ProtoMessage() {} -func (*ContainerStateRunning) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +func (*ContainerStateRunning) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } -func (m *ContainerStateRunning) GetStartedAt() *k8s_io_kubernetes_pkg_apis_meta_v1.Time { +func (m *ContainerStateRunning) GetStartedAt() *k8s_io_apimachinery_pkg_apis_meta_v1.Time { if m != nil { return m.StartedAt } @@ -1451,10 +1699,10 @@ type ContainerStateTerminated struct { Message *string `protobuf:"bytes,4,opt,name=message" json:"message,omitempty"` // Time at which previous execution of the container started // +optional - StartedAt *k8s_io_kubernetes_pkg_apis_meta_v1.Time `protobuf:"bytes,5,opt,name=startedAt" json:"startedAt,omitempty"` + StartedAt *k8s_io_apimachinery_pkg_apis_meta_v1.Time `protobuf:"bytes,5,opt,name=startedAt" json:"startedAt,omitempty"` // Time at which the container last terminated // +optional - FinishedAt *k8s_io_kubernetes_pkg_apis_meta_v1.Time `protobuf:"bytes,6,opt,name=finishedAt" json:"finishedAt,omitempty"` + FinishedAt *k8s_io_apimachinery_pkg_apis_meta_v1.Time `protobuf:"bytes,6,opt,name=finishedAt" json:"finishedAt,omitempty"` // Container's ID in the format 'docker://' // +optional ContainerID *string `protobuf:"bytes,7,opt,name=containerID" json:"containerID,omitempty"` @@ -1465,7 +1713,7 @@ func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminat func (m *ContainerStateTerminated) String() string { return proto.CompactTextString(m) } func (*ContainerStateTerminated) ProtoMessage() {} func (*ContainerStateTerminated) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{24} + return fileDescriptorGenerated, []int{28} } func (m *ContainerStateTerminated) GetExitCode() int32 { @@ -1496,14 +1744,14 @@ func (m *ContainerStateTerminated) GetMessage() string { return "" } -func (m *ContainerStateTerminated) GetStartedAt() *k8s_io_kubernetes_pkg_apis_meta_v1.Time { +func (m *ContainerStateTerminated) GetStartedAt() *k8s_io_apimachinery_pkg_apis_meta_v1.Time { if m != nil { return m.StartedAt } return nil } -func (m *ContainerStateTerminated) GetFinishedAt() *k8s_io_kubernetes_pkg_apis_meta_v1.Time { +func (m *ContainerStateTerminated) GetFinishedAt() *k8s_io_apimachinery_pkg_apis_meta_v1.Time { if m != nil { return m.FinishedAt } @@ -1531,7 +1779,7 @@ type ContainerStateWaiting struct { func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } func (m *ContainerStateWaiting) String() string { return proto.CompactTextString(m) } func (*ContainerStateWaiting) ProtoMessage() {} -func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } func (m *ContainerStateWaiting) GetReason() string { if m != nil && m.Reason != nil { @@ -1566,13 +1814,12 @@ type ContainerStatus struct { // garbage collection. This value will get capped at 5 by GC. RestartCount *int32 `protobuf:"varint,5,opt,name=restartCount" json:"restartCount,omitempty"` // The image the container is running. - // More info: http://kubernetes.io/docs/user-guide/images + // More info: https://kubernetes.io/docs/concepts/containers/images // TODO(dchen1107): Which image the container is running with? Image *string `protobuf:"bytes,6,opt,name=image" json:"image,omitempty"` // ImageID of the container's image. ImageID *string `protobuf:"bytes,7,opt,name=imageID" json:"imageID,omitempty"` // Container's ID in the format 'docker://'. - // More info: http://kubernetes.io/docs/user-guide/container-environment#container-information // +optional ContainerID *string `protobuf:"bytes,8,opt,name=containerID" json:"containerID,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -1581,7 +1828,7 @@ type ContainerStatus struct { func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } func (m *ContainerStatus) String() string { return proto.CompactTextString(m) } func (*ContainerStatus) ProtoMessage() {} -func (*ContainerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (*ContainerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } func (m *ContainerStatus) GetName() string { if m != nil && m.Name != nil { @@ -1649,7 +1896,7 @@ type DaemonEndpoint struct { func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } func (m *DaemonEndpoint) String() string { return proto.CompactTextString(m) } func (*DaemonEndpoint) ProtoMessage() {} -func (*DaemonEndpoint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +func (*DaemonEndpoint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } func (m *DaemonEndpoint) GetPort() int32 { if m != nil && m.Port != nil { @@ -1682,6 +1929,10 @@ type DeleteOptions struct { // Either this field or OrphanDependents may be set, but not both. // The default policy is decided by the existing finalizer set in the // metadata.finalizers and the resource-specific default policy. + // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - + // allow the garbage collector to delete the dependents in the background; + // 'Foreground' - a cascading policy that deletes all dependents in the + // foreground. // +optional PropagationPolicy *string `protobuf:"bytes,4,opt,name=propagationPolicy" json:"propagationPolicy,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -1690,7 +1941,7 @@ type DeleteOptions struct { func (m *DeleteOptions) Reset() { *m = DeleteOptions{} } func (m *DeleteOptions) String() string { return proto.CompactTextString(m) } func (*DeleteOptions) ProtoMessage() {} -func (*DeleteOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +func (*DeleteOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } func (m *DeleteOptions) GetGracePeriodSeconds() int64 { if m != nil && m.GracePeriodSeconds != nil { @@ -1733,7 +1984,7 @@ type DownwardAPIProjection struct { func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} } func (m *DownwardAPIProjection) String() string { return proto.CompactTextString(m) } func (*DownwardAPIProjection) ProtoMessage() {} -func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } func (m *DownwardAPIProjection) GetItems() []*DownwardAPIVolumeFile { if m != nil { @@ -1765,7 +2016,7 @@ type DownwardAPIVolumeFile struct { func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} } func (m *DownwardAPIVolumeFile) String() string { return proto.CompactTextString(m) } func (*DownwardAPIVolumeFile) ProtoMessage() {} -func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } func (m *DownwardAPIVolumeFile) GetPath() string { if m != nil && m.Path != nil { @@ -1815,7 +2066,7 @@ func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource func (m *DownwardAPIVolumeSource) String() string { return proto.CompactTextString(m) } func (*DownwardAPIVolumeSource) ProtoMessage() {} func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{31} + return fileDescriptorGenerated, []int{35} } func (m *DownwardAPIVolumeSource) GetItems() []*DownwardAPIVolumeFile { @@ -1838,16 +2089,24 @@ type EmptyDirVolumeSource struct { // What type of storage medium should back this directory. // The default is "" which means to use the node's default medium. // Must be an empty string (default) or Memory. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + // +optional + Medium *string `protobuf:"bytes,1,opt,name=medium" json:"medium,omitempty"` + // Total amount of local storage required for this EmptyDir volume. + // The size limit is also applicable for memory medium. + // The maximum usage on memory medium EmptyDir would be the minimum value between + // the SizeLimit specified here and the sum of memory limits of all containers in a pod. + // The default is nil which means that the limit is undefined. // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir // +optional - Medium *string `protobuf:"bytes,1,opt,name=medium" json:"medium,omitempty"` - XXX_unrecognized []byte `json:"-"` + SizeLimit *k8s_io_apimachinery_pkg_api_resource.Quantity `protobuf:"bytes,2,opt,name=sizeLimit" json:"sizeLimit,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} } func (m *EmptyDirVolumeSource) String() string { return proto.CompactTextString(m) } func (*EmptyDirVolumeSource) ProtoMessage() {} -func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } +func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } func (m *EmptyDirVolumeSource) GetMedium() string { if m != nil && m.Medium != nil { @@ -1856,6 +2115,13 @@ func (m *EmptyDirVolumeSource) GetMedium() string { return "" } +func (m *EmptyDirVolumeSource) GetSizeLimit() *k8s_io_apimachinery_pkg_api_resource.Quantity { + if m != nil { + return m.SizeLimit + } + return nil +} + // EndpointAddress is a tuple that describes single IP address. type EndpointAddress struct { // The IP of this endpoint. @@ -1880,7 +2146,7 @@ type EndpointAddress struct { func (m *EndpointAddress) Reset() { *m = EndpointAddress{} } func (m *EndpointAddress) String() string { return proto.CompactTextString(m) } func (*EndpointAddress) ProtoMessage() {} -func (*EndpointAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } +func (*EndpointAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } func (m *EndpointAddress) GetIp() string { if m != nil && m.Ip != nil { @@ -1930,7 +2196,7 @@ type EndpointPort struct { func (m *EndpointPort) Reset() { *m = EndpointPort{} } func (m *EndpointPort) String() string { return proto.CompactTextString(m) } func (*EndpointPort) ProtoMessage() {} -func (*EndpointPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } +func (*EndpointPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } func (m *EndpointPort) GetName() string { if m != nil && m.Name != nil { @@ -1982,7 +2248,7 @@ type EndpointSubset struct { func (m *EndpointSubset) Reset() { *m = EndpointSubset{} } func (m *EndpointSubset) String() string { return proto.CompactTextString(m) } func (*EndpointSubset) ProtoMessage() {} -func (*EndpointSubset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } +func (*EndpointSubset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } func (m *EndpointSubset) GetAddresses() []*EndpointAddress { if m != nil { @@ -2019,9 +2285,9 @@ func (m *EndpointSubset) GetPorts() []*EndpointPort { // ] type Endpoints struct { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // The set of all endpoints is the union of all subsets. Addresses are placed into // subsets according to the IPs they share. A single address with multiple ports, // some of which are ready and some of which are not (because they come from @@ -2036,9 +2302,9 @@ type Endpoints struct { func (m *Endpoints) Reset() { *m = Endpoints{} } func (m *Endpoints) String() string { return proto.CompactTextString(m) } func (*Endpoints) ProtoMessage() {} -func (*Endpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } +func (*Endpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } -func (m *Endpoints) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *Endpoints) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -2055,9 +2321,9 @@ func (m *Endpoints) GetSubsets() []*EndpointSubset { // EndpointsList is a list of endpoints. type EndpointsList struct { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // List of endpoints. Items []*Endpoints `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -2066,9 +2332,9 @@ type EndpointsList struct { func (m *EndpointsList) Reset() { *m = EndpointsList{} } func (m *EndpointsList) String() string { return proto.CompactTextString(m) } func (*EndpointsList) ProtoMessage() {} -func (*EndpointsList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } +func (*EndpointsList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } -func (m *EndpointsList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *EndpointsList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -2099,7 +2365,7 @@ type EnvFromSource struct { func (m *EnvFromSource) Reset() { *m = EnvFromSource{} } func (m *EnvFromSource) String() string { return proto.CompactTextString(m) } func (*EnvFromSource) ProtoMessage() {} -func (*EnvFromSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } +func (*EnvFromSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } func (m *EnvFromSource) GetPrefix() string { if m != nil && m.Prefix != nil { @@ -2145,7 +2411,7 @@ type EnvVar struct { func (m *EnvVar) Reset() { *m = EnvVar{} } func (m *EnvVar) String() string { return proto.CompactTextString(m) } func (*EnvVar) ProtoMessage() {} -func (*EnvVar) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } +func (*EnvVar) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } func (m *EnvVar) GetName() string { if m != nil && m.Name != nil { @@ -2171,11 +2437,11 @@ func (m *EnvVar) GetValueFrom() *EnvVarSource { // EnvVarSource represents a source for the value of an EnvVar. type EnvVarSource struct { // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, - // spec.nodeName, spec.serviceAccountName, status.podIP. + // spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP. // +optional FieldRef *ObjectFieldSelector `protobuf:"bytes,1,opt,name=fieldRef" json:"fieldRef,omitempty"` // Selects a resource of the container: only resources limits and requests - // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + // (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. // +optional ResourceFieldRef *ResourceFieldSelector `protobuf:"bytes,2,opt,name=resourceFieldRef" json:"resourceFieldRef,omitempty"` // Selects a key of a ConfigMap. @@ -2190,7 +2456,7 @@ type EnvVarSource struct { func (m *EnvVarSource) Reset() { *m = EnvVarSource{} } func (m *EnvVarSource) String() string { return proto.CompactTextString(m) } func (*EnvVarSource) ProtoMessage() {} -func (*EnvVarSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } +func (*EnvVarSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } func (m *EnvVarSource) GetFieldRef() *ObjectFieldSelector { if m != nil { @@ -2221,11 +2487,10 @@ func (m *EnvVarSource) GetSecretKeyRef() *SecretKeySelector { } // Event is a report of an event somewhere in the cluster. -// TODO: Decide whether to store these separately or with the object they apply to. type Event struct { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // The object that this event is about. InvolvedObject *ObjectReference `protobuf:"bytes,2,opt,name=involvedObject" json:"involvedObject,omitempty"` // This should be a short, machine understandable string that gives the reason @@ -2242,25 +2507,43 @@ type Event struct { Source *EventSource `protobuf:"bytes,5,opt,name=source" json:"source,omitempty"` // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) // +optional - FirstTimestamp *k8s_io_kubernetes_pkg_apis_meta_v1.Time `protobuf:"bytes,6,opt,name=firstTimestamp" json:"firstTimestamp,omitempty"` + FirstTimestamp *k8s_io_apimachinery_pkg_apis_meta_v1.Time `protobuf:"bytes,6,opt,name=firstTimestamp" json:"firstTimestamp,omitempty"` // The time at which the most recent occurrence of this event was recorded. // +optional - LastTimestamp *k8s_io_kubernetes_pkg_apis_meta_v1.Time `protobuf:"bytes,7,opt,name=lastTimestamp" json:"lastTimestamp,omitempty"` + LastTimestamp *k8s_io_apimachinery_pkg_apis_meta_v1.Time `protobuf:"bytes,7,opt,name=lastTimestamp" json:"lastTimestamp,omitempty"` // The number of times this event has occurred. // +optional Count *int32 `protobuf:"varint,8,opt,name=count" json:"count,omitempty"` // Type of this event (Normal, Warning), new types could be added in the future // +optional - Type *string `protobuf:"bytes,9,opt,name=type" json:"type,omitempty"` - XXX_unrecognized []byte `json:"-"` + Type *string `protobuf:"bytes,9,opt,name=type" json:"type,omitempty"` + // Time when this Event was first observed. + // +optional + EventTime *k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime `protobuf:"bytes,10,opt,name=eventTime" json:"eventTime,omitempty"` + // Data about the Event series this event represents or nil if it's a singleton Event. + // +optional + Series *EventSeries `protobuf:"bytes,11,opt,name=series" json:"series,omitempty"` + // What action was taken/failed regarding to the Regarding object. + // +optional + Action *string `protobuf:"bytes,12,opt,name=action" json:"action,omitempty"` + // Optional secondary object for more complex actions. + // +optional + Related *ObjectReference `protobuf:"bytes,13,opt,name=related" json:"related,omitempty"` + // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. + // +optional + ReportingComponent *string `protobuf:"bytes,14,opt,name=reportingComponent" json:"reportingComponent,omitempty"` + // ID of the controller instance, e.g. `kubelet-xyzf`. + // +optional + ReportingInstance *string `protobuf:"bytes,15,opt,name=reportingInstance" json:"reportingInstance,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *Event) Reset() { *m = Event{} } func (m *Event) String() string { return proto.CompactTextString(m) } func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } +func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } -func (m *Event) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *Event) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -2295,14 +2578,14 @@ func (m *Event) GetSource() *EventSource { return nil } -func (m *Event) GetFirstTimestamp() *k8s_io_kubernetes_pkg_apis_meta_v1.Time { +func (m *Event) GetFirstTimestamp() *k8s_io_apimachinery_pkg_apis_meta_v1.Time { if m != nil { return m.FirstTimestamp } return nil } -func (m *Event) GetLastTimestamp() *k8s_io_kubernetes_pkg_apis_meta_v1.Time { +func (m *Event) GetLastTimestamp() *k8s_io_apimachinery_pkg_apis_meta_v1.Time { if m != nil { return m.LastTimestamp } @@ -2323,12 +2606,54 @@ func (m *Event) GetType() string { return "" } +func (m *Event) GetEventTime() *k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime { + if m != nil { + return m.EventTime + } + return nil +} + +func (m *Event) GetSeries() *EventSeries { + if m != nil { + return m.Series + } + return nil +} + +func (m *Event) GetAction() string { + if m != nil && m.Action != nil { + return *m.Action + } + return "" +} + +func (m *Event) GetRelated() *ObjectReference { + if m != nil { + return m.Related + } + return nil +} + +func (m *Event) GetReportingComponent() string { + if m != nil && m.ReportingComponent != nil { + return *m.ReportingComponent + } + return "" +} + +func (m *Event) GetReportingInstance() string { + if m != nil && m.ReportingInstance != nil { + return *m.ReportingInstance + } + return "" +} + // EventList is a list of events. type EventList struct { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // List of events Items []*Event `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -2337,9 +2662,9 @@ type EventList struct { func (m *EventList) Reset() { *m = EventList{} } func (m *EventList) String() string { return proto.CompactTextString(m) } func (*EventList) ProtoMessage() {} -func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } +func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } -func (m *EventList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *EventList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -2353,6 +2678,44 @@ func (m *EventList) GetItems() []*Event { return nil } +// EventSeries contain information on series of events, i.e. thing that was/is happening +// continously for some time. +type EventSeries struct { + // Number of occurrences in this series up to the last heartbeat time + Count *int32 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"` + // Time of the last occurence observed + LastObservedTime *k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime `protobuf:"bytes,2,opt,name=lastObservedTime" json:"lastObservedTime,omitempty"` + // State of this Series: Ongoing or Finished + State *string `protobuf:"bytes,3,opt,name=state" json:"state,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EventSeries) Reset() { *m = EventSeries{} } +func (m *EventSeries) String() string { return proto.CompactTextString(m) } +func (*EventSeries) ProtoMessage() {} +func (*EventSeries) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } + +func (m *EventSeries) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *EventSeries) GetLastObservedTime() *k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime { + if m != nil { + return m.LastObservedTime + } + return nil +} + +func (m *EventSeries) GetState() string { + if m != nil && m.State != nil { + return *m.State + } + return "" +} + // EventSource contains information for an event. type EventSource struct { // Component from which the event is generated. @@ -2367,7 +2730,7 @@ type EventSource struct { func (m *EventSource) Reset() { *m = EventSource{} } func (m *EventSource) String() string { return proto.CompactTextString(m) } func (*EventSource) ProtoMessage() {} -func (*EventSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } +func (*EventSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{48} } func (m *EventSource) GetComponent() string { if m != nil && m.Component != nil { @@ -2398,7 +2761,7 @@ type ExecAction struct { func (m *ExecAction) Reset() { *m = ExecAction{} } func (m *ExecAction) String() string { return proto.CompactTextString(m) } func (*ExecAction) ProtoMessage() {} -func (*ExecAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } +func (*ExecAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } func (m *ExecAction) GetCommand() []string { if m != nil { @@ -2411,9 +2774,11 @@ func (m *ExecAction) GetCommand() []string { // Fibre Channel volumes can only be mounted as read/write once. // Fibre Channel volumes support ownership management and SELinux relabeling. type FCVolumeSource struct { - // Required: FC target worldwide names (WWNs) + // Optional: FC target worldwide names (WWNs) + // +optional TargetWWNs []string `protobuf:"bytes,1,rep,name=targetWWNs" json:"targetWWNs,omitempty"` - // Required: FC target lun number + // Optional: FC target lun number + // +optional Lun *int32 `protobuf:"varint,2,opt,name=lun" json:"lun,omitempty"` // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. @@ -2424,14 +2789,18 @@ type FCVolumeSource struct { // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional - ReadOnly *bool `protobuf:"varint,4,opt,name=readOnly" json:"readOnly,omitempty"` - XXX_unrecognized []byte `json:"-"` + ReadOnly *bool `protobuf:"varint,4,opt,name=readOnly" json:"readOnly,omitempty"` + // Optional: FC volume world wide identifiers (wwids) + // Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + // +optional + Wwids []string `protobuf:"bytes,5,rep,name=wwids" json:"wwids,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } func (m *FCVolumeSource) String() string { return proto.CompactTextString(m) } func (*FCVolumeSource) ProtoMessage() {} -func (*FCVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } +func (*FCVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } func (m *FCVolumeSource) GetTargetWWNs() []string { if m != nil { @@ -2461,8 +2830,15 @@ func (m *FCVolumeSource) GetReadOnly() bool { return false } +func (m *FCVolumeSource) GetWwids() []string { + if m != nil { + return m.Wwids + } + return nil +} + // FlexVolume represents a generic volume resource that is -// provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. +// provisioned/attached using an exec based plugin. type FlexVolumeSource struct { // Driver is the name of the driver to use for this volume. Driver *string `protobuf:"bytes,1,opt,name=driver" json:"driver,omitempty"` @@ -2491,7 +2867,7 @@ type FlexVolumeSource struct { func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } func (m *FlexVolumeSource) String() string { return proto.CompactTextString(m) } func (*FlexVolumeSource) ProtoMessage() {} -func (*FlexVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } +func (*FlexVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{51} } func (m *FlexVolumeSource) GetDriver() string { if m != nil && m.Driver != nil { @@ -2545,7 +2921,7 @@ type FlockerVolumeSource struct { func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } func (m *FlockerVolumeSource) String() string { return proto.CompactTextString(m) } func (*FlockerVolumeSource) ProtoMessage() {} -func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } +func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } func (m *FlockerVolumeSource) GetDatasetName() string { if m != nil && m.DatasetName != nil { @@ -2569,12 +2945,12 @@ func (m *FlockerVolumeSource) GetDatasetUUID() string { // PDs support ownership management and SELinux relabeling. type GCEPersistentDiskVolumeSource struct { // Unique name of the PD resource in GCE. Used to identify the disk in GCE. - // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk PdName *string `protobuf:"bytes,1,opt,name=pdName" json:"pdName,omitempty"` // Filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk // TODO: how do we prevent errors in the filesystem from compromising the machine // +optional FsType *string `protobuf:"bytes,2,opt,name=fsType" json:"fsType,omitempty"` @@ -2582,12 +2958,12 @@ type GCEPersistentDiskVolumeSource struct { // If omitted, the default is to mount by volume name. // Examples: For volume /dev/sda1, you specify the partition as "1". // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). - // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk // +optional Partition *int32 `protobuf:"varint,3,opt,name=partition" json:"partition,omitempty"` // ReadOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. - // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk // +optional ReadOnly *bool `protobuf:"varint,4,opt,name=readOnly" json:"readOnly,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -2597,7 +2973,7 @@ func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDisk func (m *GCEPersistentDiskVolumeSource) String() string { return proto.CompactTextString(m) } func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{48} + return fileDescriptorGenerated, []int{53} } func (m *GCEPersistentDiskVolumeSource) GetPdName() string { @@ -2649,7 +3025,7 @@ type GitRepoVolumeSource struct { func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } func (m *GitRepoVolumeSource) String() string { return proto.CompactTextString(m) } func (*GitRepoVolumeSource) ProtoMessage() {} -func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } +func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } func (m *GitRepoVolumeSource) GetRepository() string { if m != nil && m.Repository != nil { @@ -2676,14 +3052,14 @@ func (m *GitRepoVolumeSource) GetDirectory() string { // Glusterfs volumes do not support ownership management or SELinux relabeling. type GlusterfsVolumeSource struct { // EndpointsName is the endpoint name that details Glusterfs topology. - // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod Endpoints *string `protobuf:"bytes,1,opt,name=endpoints" json:"endpoints,omitempty"` // Path is the Glusterfs volume path. - // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod Path *string `protobuf:"bytes,2,opt,name=path" json:"path,omitempty"` // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. // Defaults to false. - // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod // +optional ReadOnly *bool `protobuf:"varint,3,opt,name=readOnly" json:"readOnly,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -2692,7 +3068,7 @@ type GlusterfsVolumeSource struct { func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } func (m *GlusterfsVolumeSource) String() string { return proto.CompactTextString(m) } func (*GlusterfsVolumeSource) ProtoMessage() {} -func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } +func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } func (m *GlusterfsVolumeSource) GetEndpoints() string { if m != nil && m.Endpoints != nil { @@ -2723,7 +3099,7 @@ type HTTPGetAction struct { // Name or number of the port to access on the container. // Number must be in the range 1 to 65535. // Name must be an IANA_SVC_NAME. - Port *k8s_io_kubernetes_pkg_util_intstr.IntOrString `protobuf:"bytes,2,opt,name=port" json:"port,omitempty"` + Port *k8s_io_apimachinery_pkg_util_intstr.IntOrString `protobuf:"bytes,2,opt,name=port" json:"port,omitempty"` // Host name to connect to, defaults to the pod IP. You probably want to set // "Host" in httpHeaders instead. // +optional @@ -2741,7 +3117,7 @@ type HTTPGetAction struct { func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } func (m *HTTPGetAction) String() string { return proto.CompactTextString(m) } func (*HTTPGetAction) ProtoMessage() {} -func (*HTTPGetAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{51} } +func (*HTTPGetAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} } func (m *HTTPGetAction) GetPath() string { if m != nil && m.Path != nil { @@ -2750,7 +3126,7 @@ func (m *HTTPGetAction) GetPath() string { return "" } -func (m *HTTPGetAction) GetPort() *k8s_io_kubernetes_pkg_util_intstr.IntOrString { +func (m *HTTPGetAction) GetPort() *k8s_io_apimachinery_pkg_util_intstr.IntOrString { if m != nil { return m.Port } @@ -2790,7 +3166,7 @@ type HTTPHeader struct { func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } func (m *HTTPHeader) String() string { return proto.CompactTextString(m) } func (*HTTPHeader) ProtoMessage() {} -func (*HTTPHeader) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } +func (*HTTPHeader) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} } func (m *HTTPHeader) GetName() string { if m != nil && m.Name != nil { @@ -2827,7 +3203,7 @@ type Handler struct { func (m *Handler) Reset() { *m = Handler{} } func (m *Handler) String() string { return proto.CompactTextString(m) } func (*Handler) ProtoMessage() {} -func (*Handler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } +func (*Handler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{58} } func (m *Handler) GetExec() *ExecAction { if m != nil { @@ -2850,19 +3226,54 @@ func (m *Handler) GetTcpSocket() *TCPSocketAction { return nil } +// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the +// pod's hosts file. +type HostAlias struct { + // IP address of the host file entry. + Ip *string `protobuf:"bytes,1,opt,name=ip" json:"ip,omitempty"` + // Hostnames for the above IP address. + Hostnames []string `protobuf:"bytes,2,rep,name=hostnames" json:"hostnames,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *HostAlias) Reset() { *m = HostAlias{} } +func (m *HostAlias) String() string { return proto.CompactTextString(m) } +func (*HostAlias) ProtoMessage() {} +func (*HostAlias) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{59} } + +func (m *HostAlias) GetIp() string { + if m != nil && m.Ip != nil { + return *m.Ip + } + return "" +} + +func (m *HostAlias) GetHostnames() []string { + if m != nil { + return m.Hostnames + } + return nil +} + // Represents a host path mapped into a pod. // Host path volumes do not support ownership management or SELinux relabeling. type HostPathVolumeSource struct { // Path of the directory on the host. - // More info: http://kubernetes.io/docs/user-guide/volumes#hostpath - Path *string `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` + // If the path is a symlink, it will follow the link to the real path. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + Path *string `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` + // Type for HostPath Volume + // Defaults to "" + // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + // +optional + Type *string `protobuf:"bytes,2,opt,name=type" json:"type,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } func (m *HostPathVolumeSource) String() string { return proto.CompactTextString(m) } func (*HostPathVolumeSource) ProtoMessage() {} -func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } +func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} } func (m *HostPathVolumeSource) GetPath() string { if m != nil && m.Path != nil { @@ -2871,24 +3282,163 @@ func (m *HostPathVolumeSource) GetPath() string { return "" } +func (m *HostPathVolumeSource) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +// ISCSIPersistentVolumeSource represents an ISCSI disk. +// ISCSI volumes can only be mounted as read/write once. +// ISCSI volumes support ownership management and SELinux relabeling. +type ISCSIPersistentVolumeSource struct { + // iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + TargetPortal *string `protobuf:"bytes,1,opt,name=targetPortal" json:"targetPortal,omitempty"` + // Target iSCSI Qualified Name. + Iqn *string `protobuf:"bytes,2,opt,name=iqn" json:"iqn,omitempty"` + // iSCSI Target Lun number. + Lun *int32 `protobuf:"varint,3,opt,name=lun" json:"lun,omitempty"` + // iSCSI Interface Name that uses an iSCSI transport. + // Defaults to 'default' (tcp). + // +optional + IscsiInterface *string `protobuf:"bytes,4,opt,name=iscsiInterface" json:"iscsiInterface,omitempty"` + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FsType *string `protobuf:"bytes,5,opt,name=fsType" json:"fsType,omitempty"` + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // +optional + ReadOnly *bool `protobuf:"varint,6,opt,name=readOnly" json:"readOnly,omitempty"` + // iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + // +optional + Portals []string `protobuf:"bytes,7,rep,name=portals" json:"portals,omitempty"` + // whether support iSCSI Discovery CHAP authentication + // +optional + ChapAuthDiscovery *bool `protobuf:"varint,8,opt,name=chapAuthDiscovery" json:"chapAuthDiscovery,omitempty"` + // whether support iSCSI Session CHAP authentication + // +optional + ChapAuthSession *bool `protobuf:"varint,11,opt,name=chapAuthSession" json:"chapAuthSession,omitempty"` + // CHAP Secret for iSCSI target and initiator authentication + // +optional + SecretRef *SecretReference `protobuf:"bytes,10,opt,name=secretRef" json:"secretRef,omitempty"` + // Custom iSCSI Initiator Name. + // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + // : will be created for the connection. + // +optional + InitiatorName *string `protobuf:"bytes,12,opt,name=initiatorName" json:"initiatorName,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} } +func (m *ISCSIPersistentVolumeSource) String() string { return proto.CompactTextString(m) } +func (*ISCSIPersistentVolumeSource) ProtoMessage() {} +func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{61} +} + +func (m *ISCSIPersistentVolumeSource) GetTargetPortal() string { + if m != nil && m.TargetPortal != nil { + return *m.TargetPortal + } + return "" +} + +func (m *ISCSIPersistentVolumeSource) GetIqn() string { + if m != nil && m.Iqn != nil { + return *m.Iqn + } + return "" +} + +func (m *ISCSIPersistentVolumeSource) GetLun() int32 { + if m != nil && m.Lun != nil { + return *m.Lun + } + return 0 +} + +func (m *ISCSIPersistentVolumeSource) GetIscsiInterface() string { + if m != nil && m.IscsiInterface != nil { + return *m.IscsiInterface + } + return "" +} + +func (m *ISCSIPersistentVolumeSource) GetFsType() string { + if m != nil && m.FsType != nil { + return *m.FsType + } + return "" +} + +func (m *ISCSIPersistentVolumeSource) GetReadOnly() bool { + if m != nil && m.ReadOnly != nil { + return *m.ReadOnly + } + return false +} + +func (m *ISCSIPersistentVolumeSource) GetPortals() []string { + if m != nil { + return m.Portals + } + return nil +} + +func (m *ISCSIPersistentVolumeSource) GetChapAuthDiscovery() bool { + if m != nil && m.ChapAuthDiscovery != nil { + return *m.ChapAuthDiscovery + } + return false +} + +func (m *ISCSIPersistentVolumeSource) GetChapAuthSession() bool { + if m != nil && m.ChapAuthSession != nil { + return *m.ChapAuthSession + } + return false +} + +func (m *ISCSIPersistentVolumeSource) GetSecretRef() *SecretReference { + if m != nil { + return m.SecretRef + } + return nil +} + +func (m *ISCSIPersistentVolumeSource) GetInitiatorName() string { + if m != nil && m.InitiatorName != nil { + return *m.InitiatorName + } + return "" +} + // Represents an ISCSI disk. // ISCSI volumes can only be mounted as read/write once. // ISCSI volumes support ownership management and SELinux relabeling. type ISCSIVolumeSource struct { - // iSCSI target portal. The portal is either an IP or ip_addr:port if the port + // iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port // is other than default (typically TCP ports 860 and 3260). TargetPortal *string `protobuf:"bytes,1,opt,name=targetPortal" json:"targetPortal,omitempty"` // Target iSCSI Qualified Name. Iqn *string `protobuf:"bytes,2,opt,name=iqn" json:"iqn,omitempty"` - // iSCSI target lun number. + // iSCSI Target Lun number. Lun *int32 `protobuf:"varint,3,opt,name=lun" json:"lun,omitempty"` - // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. + // iSCSI Interface Name that uses an iSCSI transport. + // Defaults to 'default' (tcp). // +optional IscsiInterface *string `protobuf:"bytes,4,opt,name=iscsiInterface" json:"iscsiInterface,omitempty"` // Filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://kubernetes.io/docs/user-guide/volumes#iscsi + // More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi // TODO: how do we prevent errors in the filesystem from compromising the machine // +optional FsType *string `protobuf:"bytes,5,opt,name=fsType" json:"fsType,omitempty"` @@ -2896,17 +3446,31 @@ type ISCSIVolumeSource struct { // Defaults to false. // +optional ReadOnly *bool `protobuf:"varint,6,opt,name=readOnly" json:"readOnly,omitempty"` - // iSCSI target portal List. The portal is either an IP or ip_addr:port if the port + // iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port // is other than default (typically TCP ports 860 and 3260). // +optional - Portals []string `protobuf:"bytes,7,rep,name=portals" json:"portals,omitempty"` - XXX_unrecognized []byte `json:"-"` + Portals []string `protobuf:"bytes,7,rep,name=portals" json:"portals,omitempty"` + // whether support iSCSI Discovery CHAP authentication + // +optional + ChapAuthDiscovery *bool `protobuf:"varint,8,opt,name=chapAuthDiscovery" json:"chapAuthDiscovery,omitempty"` + // whether support iSCSI Session CHAP authentication + // +optional + ChapAuthSession *bool `protobuf:"varint,11,opt,name=chapAuthSession" json:"chapAuthSession,omitempty"` + // CHAP Secret for iSCSI target and initiator authentication + // +optional + SecretRef *LocalObjectReference `protobuf:"bytes,10,opt,name=secretRef" json:"secretRef,omitempty"` + // Custom iSCSI Initiator Name. + // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + // : will be created for the connection. + // +optional + InitiatorName *string `protobuf:"bytes,12,opt,name=initiatorName" json:"initiatorName,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } func (m *ISCSIVolumeSource) String() string { return proto.CompactTextString(m) } func (*ISCSIVolumeSource) ProtoMessage() {} -func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } +func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{62} } func (m *ISCSIVolumeSource) GetTargetPortal() string { if m != nil && m.TargetPortal != nil { @@ -2957,6 +3521,34 @@ func (m *ISCSIVolumeSource) GetPortals() []string { return nil } +func (m *ISCSIVolumeSource) GetChapAuthDiscovery() bool { + if m != nil && m.ChapAuthDiscovery != nil { + return *m.ChapAuthDiscovery + } + return false +} + +func (m *ISCSIVolumeSource) GetChapAuthSession() bool { + if m != nil && m.ChapAuthSession != nil { + return *m.ChapAuthSession + } + return false +} + +func (m *ISCSIVolumeSource) GetSecretRef() *LocalObjectReference { + if m != nil { + return m.SecretRef + } + return nil +} + +func (m *ISCSIVolumeSource) GetInitiatorName() string { + if m != nil && m.InitiatorName != nil { + return *m.InitiatorName + } + return "" +} + // Maps a string key to a path within a volume. type KeyToPath struct { // The key to project. @@ -2978,7 +3570,7 @@ type KeyToPath struct { func (m *KeyToPath) Reset() { *m = KeyToPath{} } func (m *KeyToPath) String() string { return proto.CompactTextString(m) } func (*KeyToPath) ProtoMessage() {} -func (*KeyToPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} } +func (*KeyToPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{63} } func (m *KeyToPath) GetKey() string { if m != nil && m.Key != nil { @@ -3008,7 +3600,7 @@ type Lifecycle struct { // PostStart is called immediately after a container is created. If the handler fails, // the container is terminated and restarted according to its restart policy. // Other management of the container blocks until the hook completes. - // More info: http://kubernetes.io/docs/user-guide/container-environment#hook-details + // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks // +optional PostStart *Handler `protobuf:"bytes,1,opt,name=postStart" json:"postStart,omitempty"` // PreStop is called immediately before a container is terminated. @@ -3016,7 +3608,7 @@ type Lifecycle struct { // The reason for termination is passed to the handler. // Regardless of the outcome of the handler, the container is eventually terminated. // Other management of the container blocks until the hook completes. - // More info: http://kubernetes.io/docs/user-guide/container-environment#hook-details + // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks // +optional PreStop *Handler `protobuf:"bytes,2,opt,name=preStop" json:"preStop,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -3025,7 +3617,7 @@ type Lifecycle struct { func (m *Lifecycle) Reset() { *m = Lifecycle{} } func (m *Lifecycle) String() string { return proto.CompactTextString(m) } func (*Lifecycle) ProtoMessage() {} -func (*Lifecycle) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} } +func (*Lifecycle) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{64} } func (m *Lifecycle) GetPostStart() *Handler { if m != nil { @@ -3044,11 +3636,11 @@ func (m *Lifecycle) GetPreStop() *Handler { // LimitRange sets resource usage limits for each kind of resource in a Namespace. type LimitRange struct { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Spec defines the limits enforced. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec *LimitRangeSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -3057,9 +3649,9 @@ type LimitRange struct { func (m *LimitRange) Reset() { *m = LimitRange{} } func (m *LimitRange) String() string { return proto.CompactTextString(m) } func (*LimitRange) ProtoMessage() {} -func (*LimitRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{58} } +func (*LimitRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{65} } -func (m *LimitRange) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *LimitRange) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -3080,26 +3672,26 @@ type LimitRangeItem struct { Type *string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` // Max usage constraints on this kind by resource name. // +optional - Max map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity `protobuf:"bytes,2,rep,name=max" json:"max,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Max map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity `protobuf:"bytes,2,rep,name=max" json:"max,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // Min usage constraints on this kind by resource name. // +optional - Min map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity `protobuf:"bytes,3,rep,name=min" json:"min,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Min map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity `protobuf:"bytes,3,rep,name=min" json:"min,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // Default resource requirement limit value by resource name if resource limit is omitted. // +optional - Default map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity `protobuf:"bytes,4,rep,name=default" json:"default,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Default map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity `protobuf:"bytes,4,rep,name=default" json:"default,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // DefaultRequest is the default resource requirement request value by resource name if resource request is omitted. // +optional - DefaultRequest map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity `protobuf:"bytes,5,rep,name=defaultRequest" json:"defaultRequest,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + DefaultRequest map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity `protobuf:"bytes,5,rep,name=defaultRequest" json:"defaultRequest,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource. // +optional - MaxLimitRequestRatio map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity `protobuf:"bytes,6,rep,name=maxLimitRequestRatio" json:"maxLimitRequestRatio,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_unrecognized []byte `json:"-"` + MaxLimitRequestRatio map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity `protobuf:"bytes,6,rep,name=maxLimitRequestRatio" json:"maxLimitRequestRatio,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_unrecognized []byte `json:"-"` } func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } func (m *LimitRangeItem) String() string { return proto.CompactTextString(m) } func (*LimitRangeItem) ProtoMessage() {} -func (*LimitRangeItem) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{59} } +func (*LimitRangeItem) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{66} } func (m *LimitRangeItem) GetType() string { if m != nil && m.Type != nil { @@ -3108,35 +3700,35 @@ func (m *LimitRangeItem) GetType() string { return "" } -func (m *LimitRangeItem) GetMax() map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity { +func (m *LimitRangeItem) GetMax() map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity { if m != nil { return m.Max } return nil } -func (m *LimitRangeItem) GetMin() map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity { +func (m *LimitRangeItem) GetMin() map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity { if m != nil { return m.Min } return nil } -func (m *LimitRangeItem) GetDefault() map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity { +func (m *LimitRangeItem) GetDefault() map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity { if m != nil { return m.Default } return nil } -func (m *LimitRangeItem) GetDefaultRequest() map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity { +func (m *LimitRangeItem) GetDefaultRequest() map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity { if m != nil { return m.DefaultRequest } return nil } -func (m *LimitRangeItem) GetMaxLimitRequestRatio() map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity { +func (m *LimitRangeItem) GetMaxLimitRequestRatio() map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity { if m != nil { return m.MaxLimitRequestRatio } @@ -3146,11 +3738,11 @@ func (m *LimitRangeItem) GetMaxLimitRequestRatio() map[string]*k8s_io_kubernetes // LimitRangeList is a list of LimitRange items. type LimitRangeList struct { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Items is a list of LimitRange objects. - // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_limit_range.md + // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ Items []*LimitRange `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -3158,9 +3750,9 @@ type LimitRangeList struct { func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } func (m *LimitRangeList) String() string { return proto.CompactTextString(m) } func (*LimitRangeList) ProtoMessage() {} -func (*LimitRangeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} } +func (*LimitRangeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{67} } -func (m *LimitRangeList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *LimitRangeList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -3184,7 +3776,7 @@ type LimitRangeSpec struct { func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } func (m *LimitRangeSpec) String() string { return proto.CompactTextString(m) } func (*LimitRangeSpec) ProtoMessage() {} -func (*LimitRangeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{61} } +func (*LimitRangeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{68} } func (m *LimitRangeSpec) GetLimits() []*LimitRangeItem { if m != nil { @@ -3196,27 +3788,27 @@ func (m *LimitRangeSpec) GetLimits() []*LimitRangeItem { // List holds a list of objects, which may not be known by the server. type List struct { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // List of objects - Items []*k8s_io_kubernetes_pkg_runtime.RawExtension `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` - XXX_unrecognized []byte `json:"-"` + Items []*k8s_io_apimachinery_pkg_runtime.RawExtension `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *List) Reset() { *m = List{} } func (m *List) String() string { return proto.CompactTextString(m) } func (*List) ProtoMessage() {} -func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{62} } +func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{69} } -func (m *List) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *List) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } return nil } -func (m *List) GetItems() []*k8s_io_kubernetes_pkg_runtime.RawExtension { +func (m *List) GetItems() []*k8s_io_apimachinery_pkg_runtime.RawExtension { if m != nil { return m.Items } @@ -3235,6 +3827,9 @@ type ListOptions struct { // Defaults to everything. // +optional FieldSelector *string `protobuf:"bytes,2,opt,name=fieldSelector" json:"fieldSelector,omitempty"` + // If true, partially initialized resources are included in the response. + // +optional + IncludeUninitialized *bool `protobuf:"varint,6,opt,name=includeUninitialized" json:"includeUninitialized,omitempty"` // Watch for changes to the described resources and return them as a stream of // add, update, and remove notifications. Specify resourceVersion. // +optional @@ -3256,7 +3851,7 @@ type ListOptions struct { func (m *ListOptions) Reset() { *m = ListOptions{} } func (m *ListOptions) String() string { return proto.CompactTextString(m) } func (*ListOptions) ProtoMessage() {} -func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{63} } +func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{70} } func (m *ListOptions) GetLabelSelector() string { if m != nil && m.LabelSelector != nil { @@ -3272,6 +3867,13 @@ func (m *ListOptions) GetFieldSelector() string { return "" } +func (m *ListOptions) GetIncludeUninitialized() bool { + if m != nil && m.IncludeUninitialized != nil { + return *m.IncludeUninitialized + } + return false +} + func (m *ListOptions) GetWatch() bool { if m != nil && m.Watch != nil { return *m.Watch @@ -3310,7 +3912,7 @@ type LoadBalancerIngress struct { func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } func (m *LoadBalancerIngress) String() string { return proto.CompactTextString(m) } func (*LoadBalancerIngress) ProtoMessage() {} -func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{64} } +func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{71} } func (m *LoadBalancerIngress) GetIp() string { if m != nil && m.Ip != nil { @@ -3338,7 +3940,7 @@ type LoadBalancerStatus struct { func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } func (m *LoadBalancerStatus) String() string { return proto.CompactTextString(m) } func (*LoadBalancerStatus) ProtoMessage() {} -func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{65} } +func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{72} } func (m *LoadBalancerStatus) GetIngress() []*LoadBalancerIngress { if m != nil { @@ -3351,7 +3953,7 @@ func (m *LoadBalancerStatus) GetIngress() []*LoadBalancerIngress { // referenced object inside the same namespace. type LocalObjectReference struct { // Name of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names // TODO: Add other useful fields. apiVersion, kind, uid? // +optional Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` @@ -3361,7 +3963,7 @@ type LocalObjectReference struct { func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } func (m *LocalObjectReference) String() string { return proto.CompactTextString(m) } func (*LocalObjectReference) ProtoMessage() {} -func (*LocalObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{66} } +func (*LocalObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{73} } func (m *LocalObjectReference) GetName() string { if m != nil && m.Name != nil { @@ -3370,19 +3972,40 @@ func (m *LocalObjectReference) GetName() string { return "" } +// Local represents directly-attached storage with node affinity +type LocalVolumeSource struct { + // The full path to the volume on the node + // For alpha, this path must be a directory + // Once block as a source is supported, then this path can point to a block device + Path *string `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } +func (m *LocalVolumeSource) String() string { return proto.CompactTextString(m) } +func (*LocalVolumeSource) ProtoMessage() {} +func (*LocalVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{74} } + +func (m *LocalVolumeSource) GetPath() string { + if m != nil && m.Path != nil { + return *m.Path + } + return "" +} + // Represents an NFS mount that lasts the lifetime of a pod. // NFS volumes do not support ownership management or SELinux relabeling. type NFSVolumeSource struct { // Server is the hostname or IP address of the NFS server. - // More info: http://kubernetes.io/docs/user-guide/volumes#nfs + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs Server *string `protobuf:"bytes,1,opt,name=server" json:"server,omitempty"` // Path that is exported by the NFS server. - // More info: http://kubernetes.io/docs/user-guide/volumes#nfs + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs Path *string `protobuf:"bytes,2,opt,name=path" json:"path,omitempty"` // ReadOnly here will force // the NFS export to be mounted with read-only permissions. // Defaults to false. - // More info: http://kubernetes.io/docs/user-guide/volumes#nfs + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs // +optional ReadOnly *bool `protobuf:"varint,3,opt,name=readOnly" json:"readOnly,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -3391,7 +4014,7 @@ type NFSVolumeSource struct { func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } func (m *NFSVolumeSource) String() string { return proto.CompactTextString(m) } func (*NFSVolumeSource) ProtoMessage() {} -func (*NFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{67} } +func (*NFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{75} } func (m *NFSVolumeSource) GetServer() string { if m != nil && m.Server != nil { @@ -3418,15 +4041,15 @@ func (m *NFSVolumeSource) GetReadOnly() bool { // Use of multiple namespaces is optional. type Namespace struct { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Spec defines the behavior of the Namespace. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec *NamespaceSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` // Status describes the current status of a Namespace. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status *NamespaceStatus `protobuf:"bytes,3,opt,name=status" json:"status,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -3435,9 +4058,9 @@ type Namespace struct { func (m *Namespace) Reset() { *m = Namespace{} } func (m *Namespace) String() string { return proto.CompactTextString(m) } func (*Namespace) ProtoMessage() {} -func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{68} } +func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{76} } -func (m *Namespace) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *Namespace) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -3461,11 +4084,11 @@ func (m *Namespace) GetStatus() *NamespaceStatus { // NamespaceList is a list of Namespaces. type NamespaceList struct { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Items is the list of Namespace objects in the list. - // More info: http://kubernetes.io/docs/user-guide/namespaces + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ Items []*Namespace `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -3473,9 +4096,9 @@ type NamespaceList struct { func (m *NamespaceList) Reset() { *m = NamespaceList{} } func (m *NamespaceList) String() string { return proto.CompactTextString(m) } func (*NamespaceList) ProtoMessage() {} -func (*NamespaceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{69} } +func (*NamespaceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{77} } -func (m *NamespaceList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *NamespaceList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -3492,7 +4115,7 @@ func (m *NamespaceList) GetItems() []*Namespace { // NamespaceSpec describes the attributes on a Namespace. type NamespaceSpec struct { // Finalizers is an opaque list of values that must be empty to permanently remove object from storage. - // More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#finalizers + // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ // +optional Finalizers []string `protobuf:"bytes,1,rep,name=finalizers" json:"finalizers,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -3501,7 +4124,7 @@ type NamespaceSpec struct { func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } func (m *NamespaceSpec) String() string { return proto.CompactTextString(m) } func (*NamespaceSpec) ProtoMessage() {} -func (*NamespaceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{70} } +func (*NamespaceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{78} } func (m *NamespaceSpec) GetFinalizers() []string { if m != nil { @@ -3513,7 +4136,7 @@ func (m *NamespaceSpec) GetFinalizers() []string { // NamespaceStatus is information about the current status of a Namespace. type NamespaceStatus struct { // Phase is the current lifecycle phase of the namespace. - // More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#phases + // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ // +optional Phase *string `protobuf:"bytes,1,opt,name=phase" json:"phase,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -3522,7 +4145,7 @@ type NamespaceStatus struct { func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } func (m *NamespaceStatus) String() string { return proto.CompactTextString(m) } func (*NamespaceStatus) ProtoMessage() {} -func (*NamespaceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{71} } +func (*NamespaceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{79} } func (m *NamespaceStatus) GetPhase() string { if m != nil && m.Phase != nil { @@ -3535,17 +4158,17 @@ func (m *NamespaceStatus) GetPhase() string { // Each node will have a unique identifier in the cache (i.e. in etcd). type Node struct { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Spec defines the behavior of a node. - // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec *NodeSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` // Most recently observed status of the node. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status *NodeStatus `protobuf:"bytes,3,opt,name=status" json:"status,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -3554,9 +4177,9 @@ type Node struct { func (m *Node) Reset() { *m = Node{} } func (m *Node) String() string { return proto.CompactTextString(m) } func (*Node) ProtoMessage() {} -func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{72} } +func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{80} } -func (m *Node) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *Node) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -3589,7 +4212,7 @@ type NodeAddress struct { func (m *NodeAddress) Reset() { *m = NodeAddress{} } func (m *NodeAddress) String() string { return proto.CompactTextString(m) } func (*NodeAddress) ProtoMessage() {} -func (*NodeAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{73} } +func (*NodeAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{81} } func (m *NodeAddress) GetType() string { if m != nil && m.Type != nil { @@ -3631,7 +4254,7 @@ type NodeAffinity struct { func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } func (m *NodeAffinity) String() string { return proto.CompactTextString(m) } func (*NodeAffinity) ProtoMessage() {} -func (*NodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{74} } +func (*NodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{82} } func (m *NodeAffinity) GetRequiredDuringSchedulingIgnoredDuringExecution() *NodeSelector { if m != nil { @@ -3655,10 +4278,10 @@ type NodeCondition struct { Status *string `protobuf:"bytes,2,opt,name=status" json:"status,omitempty"` // Last time we got an update on a given condition. // +optional - LastHeartbeatTime *k8s_io_kubernetes_pkg_apis_meta_v1.Time `protobuf:"bytes,3,opt,name=lastHeartbeatTime" json:"lastHeartbeatTime,omitempty"` + LastHeartbeatTime *k8s_io_apimachinery_pkg_apis_meta_v1.Time `protobuf:"bytes,3,opt,name=lastHeartbeatTime" json:"lastHeartbeatTime,omitempty"` // Last time the condition transit from one status to another. // +optional - LastTransitionTime *k8s_io_kubernetes_pkg_apis_meta_v1.Time `protobuf:"bytes,4,opt,name=lastTransitionTime" json:"lastTransitionTime,omitempty"` + LastTransitionTime *k8s_io_apimachinery_pkg_apis_meta_v1.Time `protobuf:"bytes,4,opt,name=lastTransitionTime" json:"lastTransitionTime,omitempty"` // (brief) reason for the condition's last transition. // +optional Reason *string `protobuf:"bytes,5,opt,name=reason" json:"reason,omitempty"` @@ -3671,7 +4294,7 @@ type NodeCondition struct { func (m *NodeCondition) Reset() { *m = NodeCondition{} } func (m *NodeCondition) String() string { return proto.CompactTextString(m) } func (*NodeCondition) ProtoMessage() {} -func (*NodeCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{75} } +func (*NodeCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{83} } func (m *NodeCondition) GetType() string { if m != nil && m.Type != nil { @@ -3687,14 +4310,14 @@ func (m *NodeCondition) GetStatus() string { return "" } -func (m *NodeCondition) GetLastHeartbeatTime() *k8s_io_kubernetes_pkg_apis_meta_v1.Time { +func (m *NodeCondition) GetLastHeartbeatTime() *k8s_io_apimachinery_pkg_apis_meta_v1.Time { if m != nil { return m.LastHeartbeatTime } return nil } -func (m *NodeCondition) GetLastTransitionTime() *k8s_io_kubernetes_pkg_apis_meta_v1.Time { +func (m *NodeCondition) GetLastTransitionTime() *k8s_io_apimachinery_pkg_apis_meta_v1.Time { if m != nil { return m.LastTransitionTime } @@ -3715,6 +4338,24 @@ func (m *NodeCondition) GetMessage() string { return "" } +// NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. +type NodeConfigSource struct { + ConfigMapRef *ObjectReference `protobuf:"bytes,1,opt,name=configMapRef" json:"configMapRef,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} } +func (m *NodeConfigSource) String() string { return proto.CompactTextString(m) } +func (*NodeConfigSource) ProtoMessage() {} +func (*NodeConfigSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{84} } + +func (m *NodeConfigSource) GetConfigMapRef() *ObjectReference { + if m != nil { + return m.ConfigMapRef + } + return nil +} + // NodeDaemonEndpoints lists ports opened by daemons running on the Node. type NodeDaemonEndpoints struct { // Endpoint on which Kubelet is listening. @@ -3726,7 +4367,7 @@ type NodeDaemonEndpoints struct { func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } func (m *NodeDaemonEndpoints) String() string { return proto.CompactTextString(m) } func (*NodeDaemonEndpoints) ProtoMessage() {} -func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{76} } +func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{85} } func (m *NodeDaemonEndpoints) GetKubeletEndpoint() *DaemonEndpoint { if m != nil { @@ -3738,9 +4379,9 @@ func (m *NodeDaemonEndpoints) GetKubeletEndpoint() *DaemonEndpoint { // NodeList is the whole list of all Nodes which have been registered with master. type NodeList struct { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // List of nodes Items []*Node `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -3749,9 +4390,9 @@ type NodeList struct { func (m *NodeList) Reset() { *m = NodeList{} } func (m *NodeList) String() string { return proto.CompactTextString(m) } func (*NodeList) ProtoMessage() {} -func (*NodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{77} } +func (*NodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{86} } -func (m *NodeList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *NodeList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -3776,7 +4417,7 @@ type NodeProxyOptions struct { func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } func (m *NodeProxyOptions) String() string { return proto.CompactTextString(m) } func (*NodeProxyOptions) ProtoMessage() {} -func (*NodeProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{78} } +func (*NodeProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{87} } func (m *NodeProxyOptions) GetPath() string { if m != nil && m.Path != nil { @@ -3789,16 +4430,16 @@ func (m *NodeProxyOptions) GetPath() string { // see http://releases.k8s.io/HEAD/docs/design/resources.md for more details. type NodeResources struct { // Capacity represents the available resources of a node - Capacity map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity `protobuf:"bytes,1,rep,name=capacity" json:"capacity,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_unrecognized []byte `json:"-"` + Capacity map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity `protobuf:"bytes,1,rep,name=capacity" json:"capacity,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_unrecognized []byte `json:"-"` } func (m *NodeResources) Reset() { *m = NodeResources{} } func (m *NodeResources) String() string { return proto.CompactTextString(m) } func (*NodeResources) ProtoMessage() {} -func (*NodeResources) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{79} } +func (*NodeResources) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{88} } -func (m *NodeResources) GetCapacity() map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity { +func (m *NodeResources) GetCapacity() map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity { if m != nil { return m.Capacity } @@ -3817,7 +4458,7 @@ type NodeSelector struct { func (m *NodeSelector) Reset() { *m = NodeSelector{} } func (m *NodeSelector) String() string { return proto.CompactTextString(m) } func (*NodeSelector) ProtoMessage() {} -func (*NodeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{80} } +func (*NodeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{89} } func (m *NodeSelector) GetNodeSelectorTerms() []*NodeSelectorTerm { if m != nil { @@ -3848,7 +4489,7 @@ func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement func (m *NodeSelectorRequirement) String() string { return proto.CompactTextString(m) } func (*NodeSelectorRequirement) ProtoMessage() {} func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{81} + return fileDescriptorGenerated, []int{90} } func (m *NodeSelectorRequirement) GetKey() string { @@ -3882,7 +4523,7 @@ type NodeSelectorTerm struct { func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } func (m *NodeSelectorTerm) String() string { return proto.CompactTextString(m) } func (*NodeSelectorTerm) ProtoMessage() {} -func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{82} } +func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{91} } func (m *NodeSelectorTerm) GetMatchExpressions() []*NodeSelectorRequirement { if m != nil { @@ -3904,19 +4545,23 @@ type NodeSpec struct { // +optional ProviderID *string `protobuf:"bytes,3,opt,name=providerID" json:"providerID,omitempty"` // Unschedulable controls node schedulability of new pods. By default, node is schedulable. - // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration + // More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration // +optional Unschedulable *bool `protobuf:"varint,4,opt,name=unschedulable" json:"unschedulable,omitempty"` // If specified, the node's taints. // +optional - Taints []*Taint `protobuf:"bytes,5,rep,name=taints" json:"taints,omitempty"` - XXX_unrecognized []byte `json:"-"` + Taints []*Taint `protobuf:"bytes,5,rep,name=taints" json:"taints,omitempty"` + // If specified, the source to get node configuration from + // The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field + // +optional + ConfigSource *NodeConfigSource `protobuf:"bytes,6,opt,name=configSource" json:"configSource,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *NodeSpec) Reset() { *m = NodeSpec{} } func (m *NodeSpec) String() string { return proto.CompactTextString(m) } func (*NodeSpec) ProtoMessage() {} -func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{83} } +func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{92} } func (m *NodeSpec) GetPodCIDR() string { if m != nil && m.PodCIDR != nil { @@ -3953,35 +4598,46 @@ func (m *NodeSpec) GetTaints() []*Taint { return nil } +func (m *NodeSpec) GetConfigSource() *NodeConfigSource { + if m != nil { + return m.ConfigSource + } + return nil +} + // NodeStatus is information about the current status of a node. type NodeStatus struct { // Capacity represents the total resources of a node. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#capacity for more details. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity // +optional - Capacity map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity `protobuf:"bytes,1,rep,name=capacity" json:"capacity,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Capacity map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity `protobuf:"bytes,1,rep,name=capacity" json:"capacity,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // Allocatable represents the resources of a node that are available for scheduling. // Defaults to Capacity. // +optional - Allocatable map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity `protobuf:"bytes,2,rep,name=allocatable" json:"allocatable,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Allocatable map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity `protobuf:"bytes,2,rep,name=allocatable" json:"allocatable,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // NodePhase is the recently observed lifecycle phase of the node. - // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-phase + // More info: https://kubernetes.io/docs/concepts/nodes/node/#phase // The field is never populated, and now is deprecated. // +optional Phase *string `protobuf:"bytes,3,opt,name=phase" json:"phase,omitempty"` // Conditions is an array of current observed node conditions. - // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-condition + // More info: https://kubernetes.io/docs/concepts/nodes/node/#condition // +optional + // +patchMergeKey=type + // +patchStrategy=merge Conditions []*NodeCondition `protobuf:"bytes,4,rep,name=conditions" json:"conditions,omitempty"` // List of addresses reachable to the node. // Queried from cloud provider, if available. - // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-addresses + // More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses // +optional + // +patchMergeKey=type + // +patchStrategy=merge Addresses []*NodeAddress `protobuf:"bytes,5,rep,name=addresses" json:"addresses,omitempty"` // Endpoints of daemons running on the Node. // +optional DaemonEndpoints *NodeDaemonEndpoints `protobuf:"bytes,6,opt,name=daemonEndpoints" json:"daemonEndpoints,omitempty"` // Set of ids/uuids to uniquely identify the node. - // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-info + // More info: https://kubernetes.io/docs/concepts/nodes/node/#info // +optional NodeInfo *NodeSystemInfo `protobuf:"bytes,7,opt,name=nodeInfo" json:"nodeInfo,omitempty"` // List of container images on this node @@ -3999,16 +4655,16 @@ type NodeStatus struct { func (m *NodeStatus) Reset() { *m = NodeStatus{} } func (m *NodeStatus) String() string { return proto.CompactTextString(m) } func (*NodeStatus) ProtoMessage() {} -func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{84} } +func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{93} } -func (m *NodeStatus) GetCapacity() map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity { +func (m *NodeStatus) GetCapacity() map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity { if m != nil { return m.Capacity } return nil } -func (m *NodeStatus) GetAllocatable() map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity { +func (m *NodeStatus) GetAllocatable() map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity { if m != nil { return m.Allocatable } @@ -4074,11 +4730,11 @@ func (m *NodeStatus) GetVolumesAttached() []*AttachedVolume { // NodeSystemInfo is a set of ids/uuids to uniquely identify the node. type NodeSystemInfo struct { // MachineID reported by the node. For unique machine identification - // in the cluster this field is prefered. Learn more from man(5) + // in the cluster this field is preferred. Learn more from man(5) // machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html MachineID *string `protobuf:"bytes,1,opt,name=machineID" json:"machineID,omitempty"` // SystemUUID reported by the node. For unique machine identification - // MachineID is prefered. This field is specific to Red Hat hosts + // MachineID is preferred. This field is specific to Red Hat hosts // https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html SystemUUID *string `protobuf:"bytes,2,opt,name=systemUUID" json:"systemUUID,omitempty"` // Boot ID reported by the node. @@ -4103,7 +4759,7 @@ type NodeSystemInfo struct { func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } func (m *NodeSystemInfo) String() string { return proto.CompactTextString(m) } func (*NodeSystemInfo) ProtoMessage() {} -func (*NodeSystemInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{85} } +func (*NodeSystemInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{94} } func (m *NodeSystemInfo) GetMachineID() string { if m != nil && m.MachineID != nil { @@ -4188,7 +4844,7 @@ type ObjectFieldSelector struct { func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } func (m *ObjectFieldSelector) String() string { return proto.CompactTextString(m) } func (*ObjectFieldSelector) ProtoMessage() {} -func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{86} } +func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{95} } func (m *ObjectFieldSelector) GetApiVersion() string { if m != nil && m.ApiVersion != nil { @@ -4206,7 +4862,7 @@ func (m *ObjectFieldSelector) GetFieldPath() string { // ObjectMeta is metadata that all persisted resources must have, which includes all objects // users must create. -// DEPRECATED: Use k8s.io.kubernetes/pkg/apis/meta/v1.ObjectMeta instead - this type will be removed soon. +// DEPRECATED: Use k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta instead - this type will be removed soon. // +k8s:openapi-gen=false type ObjectMeta struct { // Name must be unique within a namespace. Is required when creating resources, although @@ -4214,7 +4870,7 @@ type ObjectMeta struct { // automatically. Name is primarily intended for creation idempotence and configuration // definition. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names // +optional Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // GenerateName is an optional prefix, used by the server, to generate a unique @@ -4231,7 +4887,7 @@ type ObjectMeta struct { // should retry (optionally after the time indicated in the Retry-After header). // // Applied only if Name is not specified. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#idempotency + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency // +optional GenerateName *string `protobuf:"bytes,2,opt,name=generateName" json:"generateName,omitempty"` // Namespace defines the space within each name must be unique. An empty namespace is @@ -4241,7 +4897,7 @@ type ObjectMeta struct { // // Must be a DNS_LABEL. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/namespaces + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ // +optional Namespace *string `protobuf:"bytes,3,opt,name=namespace" json:"namespace,omitempty"` // SelfLink is a URL representing this object. @@ -4255,7 +4911,7 @@ type ObjectMeta struct { // // Populated by the system. // Read-only. - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids // +optional Uid *string `protobuf:"bytes,5,opt,name=uid" json:"uid,omitempty"` // An opaque value that represents the internal version of this object that can @@ -4267,7 +4923,7 @@ type ObjectMeta struct { // Populated by the system. // Read-only. // Value must be treated as opaque by clients and . - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency // +optional ResourceVersion *string `protobuf:"bytes,6,opt,name=resourceVersion" json:"resourceVersion,omitempty"` // A sequence number representing a specific generation of the desired state. @@ -4281,9 +4937,9 @@ type ObjectMeta struct { // Populated by the system. // Read-only. // Null for lists. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - CreationTimestamp *k8s_io_kubernetes_pkg_apis_meta_v1.Time `protobuf:"bytes,8,opt,name=creationTimestamp" json:"creationTimestamp,omitempty"` + CreationTimestamp *k8s_io_apimachinery_pkg_apis_meta_v1.Time `protobuf:"bytes,8,opt,name=creationTimestamp" json:"creationTimestamp,omitempty"` // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This // field is set by the server when a graceful deletion is requested by the user, and is not // directly settable by a client. The resource is expected to be deleted (no longer visible @@ -4300,9 +4956,9 @@ type ObjectMeta struct { // // Populated by the system when a graceful deletion is requested. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - DeletionTimestamp *k8s_io_kubernetes_pkg_apis_meta_v1.Time `protobuf:"bytes,9,opt,name=deletionTimestamp" json:"deletionTimestamp,omitempty"` + DeletionTimestamp *k8s_io_apimachinery_pkg_apis_meta_v1.Time `protobuf:"bytes,9,opt,name=deletionTimestamp" json:"deletionTimestamp,omitempty"` // Number of seconds allowed for this object to gracefully terminate before // it will be removed from the system. Only set when deletionTimestamp is also set. // May only be shortened. @@ -4312,13 +4968,13 @@ type ObjectMeta struct { // Map of string keys and values that can be used to organize and categorize // (scope and select) objects. May match selectors of replication controllers // and services. - // More info: http://kubernetes.io/docs/user-guide/labels + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional Labels map[string]string `protobuf:"bytes,11,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // Annotations is an unstructured key value map stored with a resource that may be // set by external tools to store and retrieve arbitrary metadata. They are not // queryable and should be preserved when modifying objects. - // More info: http://kubernetes.io/docs/user-guide/annotations + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ // +optional Annotations map[string]string `protobuf:"bytes,12,rep,name=annotations" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // List of objects depended by this object. If ALL objects in the list have @@ -4326,12 +4982,25 @@ type ObjectMeta struct { // then an entry in this list will point to this controller, with the controller field set to true. // There cannot be more than one managing controller. // +optional - OwnerReferences []*k8s_io_kubernetes_pkg_apis_meta_v1.OwnerReference `protobuf:"bytes,13,rep,name=ownerReferences" json:"ownerReferences,omitempty"` + // +patchMergeKey=uid + // +patchStrategy=merge + OwnerReferences []*k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference `protobuf:"bytes,13,rep,name=ownerReferences" json:"ownerReferences,omitempty"` + // An initializer is a controller which enforces some system invariant at object creation time. + // This field is a list of initializers that have not yet acted on this object. If nil or empty, + // this object has been completely initialized. Otherwise, the object is considered uninitialized + // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to + // observe uninitialized objects. + // + // When an object is created, the system will populate this list with the current set of initializers. + // Only privileged users may set or modify this list. Once it is empty, it may not be modified further + // by any user. + Initializers *k8s_io_apimachinery_pkg_apis_meta_v1.Initializers `protobuf:"bytes,16,opt,name=initializers" json:"initializers,omitempty"` // Must be empty before the object is deleted from the registry. Each entry // is an identifier for the responsible component that will remove the entry // from the list. If the deletionTimestamp of the object is non-nil, entries // in this list can only be removed. // +optional + // +patchStrategy=merge Finalizers []string `protobuf:"bytes,14,rep,name=finalizers" json:"finalizers,omitempty"` // The name of the cluster which the object belongs to. // This is used to distinguish resources with same name and namespace in different clusters. @@ -4344,7 +5013,7 @@ type ObjectMeta struct { func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } func (m *ObjectMeta) String() string { return proto.CompactTextString(m) } func (*ObjectMeta) ProtoMessage() {} -func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{87} } +func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{96} } func (m *ObjectMeta) GetName() string { if m != nil && m.Name != nil { @@ -4395,14 +5064,14 @@ func (m *ObjectMeta) GetGeneration() int64 { return 0 } -func (m *ObjectMeta) GetCreationTimestamp() *k8s_io_kubernetes_pkg_apis_meta_v1.Time { +func (m *ObjectMeta) GetCreationTimestamp() *k8s_io_apimachinery_pkg_apis_meta_v1.Time { if m != nil { return m.CreationTimestamp } return nil } -func (m *ObjectMeta) GetDeletionTimestamp() *k8s_io_kubernetes_pkg_apis_meta_v1.Time { +func (m *ObjectMeta) GetDeletionTimestamp() *k8s_io_apimachinery_pkg_apis_meta_v1.Time { if m != nil { return m.DeletionTimestamp } @@ -4430,13 +5099,20 @@ func (m *ObjectMeta) GetAnnotations() map[string]string { return nil } -func (m *ObjectMeta) GetOwnerReferences() []*k8s_io_kubernetes_pkg_apis_meta_v1.OwnerReference { +func (m *ObjectMeta) GetOwnerReferences() []*k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference { if m != nil { return m.OwnerReferences } return nil } +func (m *ObjectMeta) GetInitializers() *k8s_io_apimachinery_pkg_apis_meta_v1.Initializers { + if m != nil { + return m.Initializers + } + return nil +} + func (m *ObjectMeta) GetFinalizers() []string { if m != nil { return m.Finalizers @@ -4452,28 +5128,29 @@ func (m *ObjectMeta) GetClusterName() string { } // ObjectReference contains enough information to let you inspect or modify the referred object. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type ObjectReference struct { // Kind of the referent. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional Kind *string `protobuf:"bytes,1,opt,name=kind" json:"kind,omitempty"` // Namespace of the referent. - // More info: http://kubernetes.io/docs/user-guide/namespaces + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ // +optional Namespace *string `protobuf:"bytes,2,opt,name=namespace" json:"namespace,omitempty"` // Name of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names // +optional Name *string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"` // UID of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids // +optional Uid *string `protobuf:"bytes,4,opt,name=uid" json:"uid,omitempty"` // API version of the referent. // +optional ApiVersion *string `protobuf:"bytes,5,opt,name=apiVersion" json:"apiVersion,omitempty"` // Specific resourceVersion to which this reference is made, if any. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency // +optional ResourceVersion *string `protobuf:"bytes,6,opt,name=resourceVersion" json:"resourceVersion,omitempty"` // If referring to a piece of an object instead of an entire object, this string @@ -4492,7 +5169,7 @@ type ObjectReference struct { func (m *ObjectReference) Reset() { *m = ObjectReference{} } func (m *ObjectReference) String() string { return proto.CompactTextString(m) } func (*ObjectReference) ProtoMessage() {} -func (*ObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{88} } +func (*ObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{97} } func (m *ObjectReference) GetKind() string { if m != nil && m.Kind != nil { @@ -4545,21 +5222,21 @@ func (m *ObjectReference) GetFieldPath() string { // PersistentVolume (PV) is a storage resource provisioned by an administrator. // It is analogous to a node. -// More info: http://kubernetes.io/docs/user-guide/persistent-volumes +// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes type PersistentVolume struct { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Spec defines a specification of a persistent volume owned by the cluster. // Provisioned by an administrator. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistent-volumes + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes // +optional Spec *PersistentVolumeSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` // Status represents the current information/status for the persistent volume. // Populated by the system. // Read-only. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistent-volumes + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes // +optional Status *PersistentVolumeStatus `protobuf:"bytes,3,opt,name=status" json:"status,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -4568,9 +5245,9 @@ type PersistentVolume struct { func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } func (m *PersistentVolume) String() string { return proto.CompactTextString(m) } func (*PersistentVolume) ProtoMessage() {} -func (*PersistentVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{89} } +func (*PersistentVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{98} } -func (m *PersistentVolume) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *PersistentVolume) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -4594,16 +5271,16 @@ func (m *PersistentVolume) GetStatus() *PersistentVolumeStatus { // PersistentVolumeClaim is a user's request for and claim to a persistent volume type PersistentVolumeClaim struct { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Spec defines the desired characteristics of a volume requested by a pod author. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims // +optional Spec *PersistentVolumeClaimSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` // Status represents the current information/status of a persistent volume claim. // Read-only. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims // +optional Status *PersistentVolumeClaimStatus `protobuf:"bytes,3,opt,name=status" json:"status,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -4612,9 +5289,9 @@ type PersistentVolumeClaim struct { func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } func (m *PersistentVolumeClaim) String() string { return proto.CompactTextString(m) } func (*PersistentVolumeClaim) ProtoMessage() {} -func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{90} } +func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{99} } -func (m *PersistentVolumeClaim) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *PersistentVolumeClaim) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -4635,14 +5312,84 @@ func (m *PersistentVolumeClaim) GetStatus() *PersistentVolumeClaimStatus { return nil } +// PersistentVolumeClaimCondition contails details about state of pvc +type PersistentVolumeClaimCondition struct { + Type *string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Status *string `protobuf:"bytes,2,opt,name=status" json:"status,omitempty"` + // Last time we probed the condition. + // +optional + LastProbeTime *k8s_io_apimachinery_pkg_apis_meta_v1.Time `protobuf:"bytes,3,opt,name=lastProbeTime" json:"lastProbeTime,omitempty"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime *k8s_io_apimachinery_pkg_apis_meta_v1.Time `protobuf:"bytes,4,opt,name=lastTransitionTime" json:"lastTransitionTime,omitempty"` + // Unique, this should be a short, machine understandable string that gives the reason + // for condition's last transition. If it reports "ResizeStarted" that means the underlying + // persistent volume is being resized. + // +optional + Reason *string `protobuf:"bytes,5,opt,name=reason" json:"reason,omitempty"` + // Human-readable message indicating details about last transition. + // +optional + Message *string `protobuf:"bytes,6,opt,name=message" json:"message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } +func (m *PersistentVolumeClaimCondition) String() string { return proto.CompactTextString(m) } +func (*PersistentVolumeClaimCondition) ProtoMessage() {} +func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{100} +} + +func (m *PersistentVolumeClaimCondition) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +func (m *PersistentVolumeClaimCondition) GetStatus() string { + if m != nil && m.Status != nil { + return *m.Status + } + return "" +} + +func (m *PersistentVolumeClaimCondition) GetLastProbeTime() *k8s_io_apimachinery_pkg_apis_meta_v1.Time { + if m != nil { + return m.LastProbeTime + } + return nil +} + +func (m *PersistentVolumeClaimCondition) GetLastTransitionTime() *k8s_io_apimachinery_pkg_apis_meta_v1.Time { + if m != nil { + return m.LastTransitionTime + } + return nil +} + +func (m *PersistentVolumeClaimCondition) GetReason() string { + if m != nil && m.Reason != nil { + return *m.Reason + } + return "" +} + +func (m *PersistentVolumeClaimCondition) GetMessage() string { + if m != nil && m.Message != nil { + return *m.Message + } + return "" +} + // PersistentVolumeClaimList is a list of PersistentVolumeClaim items. type PersistentVolumeClaimList struct { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // A list of persistent volume claims. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims Items []*PersistentVolumeClaim `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -4651,10 +5398,10 @@ func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaim func (m *PersistentVolumeClaimList) String() string { return proto.CompactTextString(m) } func (*PersistentVolumeClaimList) ProtoMessage() {} func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{91} + return fileDescriptorGenerated, []int{101} } -func (m *PersistentVolumeClaimList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *PersistentVolumeClaimList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -4672,23 +5419,28 @@ func (m *PersistentVolumeClaimList) GetItems() []*PersistentVolumeClaim { // and allows a Source for provider-specific attributes type PersistentVolumeClaimSpec struct { // AccessModes contains the desired access modes the volume should have. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1 + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 // +optional AccessModes []string `protobuf:"bytes,1,rep,name=accessModes" json:"accessModes,omitempty"` // A label query over volumes to consider for binding. // +optional - Selector *k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector `protobuf:"bytes,4,opt,name=selector" json:"selector,omitempty"` + Selector *k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector `protobuf:"bytes,4,opt,name=selector" json:"selector,omitempty"` // Resources represents the minimum resources the volume should have. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources // +optional Resources *ResourceRequirements `protobuf:"bytes,2,opt,name=resources" json:"resources,omitempty"` // VolumeName is the binding reference to the PersistentVolume backing this claim. // +optional VolumeName *string `protobuf:"bytes,3,opt,name=volumeName" json:"volumeName,omitempty"` // Name of the StorageClass required by the claim. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#class-1 + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 // +optional StorageClassName *string `protobuf:"bytes,5,opt,name=storageClassName" json:"storageClassName,omitempty"` + // volumeMode defines what type of volume is required by the claim. + // Value of Filesystem is implied when not included in claim spec. + // This is an alpha feature and may change in the future. + // +optional + VolumeMode *string `protobuf:"bytes,6,opt,name=volumeMode" json:"volumeMode,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -4696,7 +5448,7 @@ func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaim func (m *PersistentVolumeClaimSpec) String() string { return proto.CompactTextString(m) } func (*PersistentVolumeClaimSpec) ProtoMessage() {} func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{92} + return fileDescriptorGenerated, []int{102} } func (m *PersistentVolumeClaimSpec) GetAccessModes() []string { @@ -4706,7 +5458,7 @@ func (m *PersistentVolumeClaimSpec) GetAccessModes() []string { return nil } -func (m *PersistentVolumeClaimSpec) GetSelector() *k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector { +func (m *PersistentVolumeClaimSpec) GetSelector() *k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector { if m != nil { return m.Selector } @@ -4734,26 +5486,39 @@ func (m *PersistentVolumeClaimSpec) GetStorageClassName() string { return "" } +func (m *PersistentVolumeClaimSpec) GetVolumeMode() string { + if m != nil && m.VolumeMode != nil { + return *m.VolumeMode + } + return "" +} + // PersistentVolumeClaimStatus is the current status of a persistent volume claim. type PersistentVolumeClaimStatus struct { // Phase represents the current phase of PersistentVolumeClaim. // +optional Phase *string `protobuf:"bytes,1,opt,name=phase" json:"phase,omitempty"` // AccessModes contains the actual access modes the volume backing the PVC has. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1 + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 // +optional AccessModes []string `protobuf:"bytes,2,rep,name=accessModes" json:"accessModes,omitempty"` // Represents the actual resources of the underlying volume. // +optional - Capacity map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity `protobuf:"bytes,3,rep,name=capacity" json:"capacity,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_unrecognized []byte `json:"-"` + Capacity map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity `protobuf:"bytes,3,rep,name=capacity" json:"capacity,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Current Condition of persistent volume claim. If underlying persistent volume is being + // resized then the Condition will be set to 'ResizeStarted'. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []*PersistentVolumeClaimCondition `protobuf:"bytes,4,rep,name=conditions" json:"conditions,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } func (m *PersistentVolumeClaimStatus) String() string { return proto.CompactTextString(m) } func (*PersistentVolumeClaimStatus) ProtoMessage() {} func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{93} + return fileDescriptorGenerated, []int{103} } func (m *PersistentVolumeClaimStatus) GetPhase() string { @@ -4770,20 +5535,27 @@ func (m *PersistentVolumeClaimStatus) GetAccessModes() []string { return nil } -func (m *PersistentVolumeClaimStatus) GetCapacity() map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity { +func (m *PersistentVolumeClaimStatus) GetCapacity() map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity { if m != nil { return m.Capacity } return nil } +func (m *PersistentVolumeClaimStatus) GetConditions() []*PersistentVolumeClaimCondition { + if m != nil { + return m.Conditions + } + return nil +} + // PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. // This volume finds the bound PV and mounts that volume for the pod. A // PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another // type of volume that is owned by someone else (the system). type PersistentVolumeClaimVolumeSource struct { // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims ClaimName *string `protobuf:"bytes,1,opt,name=claimName" json:"claimName,omitempty"` // Will force the ReadOnly setting in VolumeMounts. // Default false. @@ -4796,7 +5568,7 @@ func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVol func (m *PersistentVolumeClaimVolumeSource) String() string { return proto.CompactTextString(m) } func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{94} + return fileDescriptorGenerated, []int{104} } func (m *PersistentVolumeClaimVolumeSource) GetClaimName() string { @@ -4816,11 +5588,11 @@ func (m *PersistentVolumeClaimVolumeSource) GetReadOnly() bool { // PersistentVolumeList is a list of PersistentVolume items. type PersistentVolumeList struct { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // List of persistent volumes. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes Items []*PersistentVolume `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -4828,9 +5600,9 @@ type PersistentVolumeList struct { func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } func (m *PersistentVolumeList) String() string { return proto.CompactTextString(m) } func (*PersistentVolumeList) ProtoMessage() {} -func (*PersistentVolumeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{95} } +func (*PersistentVolumeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{105} } -func (m *PersistentVolumeList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *PersistentVolumeList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -4849,45 +5621,45 @@ func (m *PersistentVolumeList) GetItems() []*PersistentVolume { type PersistentVolumeSource struct { // GCEPersistentDisk represents a GCE Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. Provisioned by an admin. - // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk // +optional GcePersistentDisk *GCEPersistentDiskVolumeSource `protobuf:"bytes,1,opt,name=gcePersistentDisk" json:"gcePersistentDisk,omitempty"` // AWSElasticBlockStore represents an AWS Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore // +optional AwsElasticBlockStore *AWSElasticBlockStoreVolumeSource `protobuf:"bytes,2,opt,name=awsElasticBlockStore" json:"awsElasticBlockStore,omitempty"` // HostPath represents a directory on the host. // Provisioned by a developer or tester. // This is useful for single-node development and testing only! // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. - // More info: http://kubernetes.io/docs/user-guide/volumes#hostpath + // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath // +optional HostPath *HostPathVolumeSource `protobuf:"bytes,3,opt,name=hostPath" json:"hostPath,omitempty"` // Glusterfs represents a Glusterfs volume that is attached to a host and // exposed to the pod. Provisioned by an admin. - // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md // +optional Glusterfs *GlusterfsVolumeSource `protobuf:"bytes,4,opt,name=glusterfs" json:"glusterfs,omitempty"` // NFS represents an NFS mount on the host. Provisioned by an admin. - // More info: http://kubernetes.io/docs/user-guide/volumes#nfs + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs // +optional Nfs *NFSVolumeSource `protobuf:"bytes,5,opt,name=nfs" json:"nfs,omitempty"` // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md // +optional - Rbd *RBDVolumeSource `protobuf:"bytes,6,opt,name=rbd" json:"rbd,omitempty"` + Rbd *RBDPersistentVolumeSource `protobuf:"bytes,6,opt,name=rbd" json:"rbd,omitempty"` // ISCSI represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. Provisioned by an admin. // +optional - Iscsi *ISCSIVolumeSource `protobuf:"bytes,7,opt,name=iscsi" json:"iscsi,omitempty"` + Iscsi *ISCSIPersistentVolumeSource `protobuf:"bytes,7,opt,name=iscsi" json:"iscsi,omitempty"` // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md // +optional Cinder *CinderVolumeSource `protobuf:"bytes,8,opt,name=cinder" json:"cinder,omitempty"` // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime // +optional - Cephfs *CephFSVolumeSource `protobuf:"bytes,9,opt,name=cephfs" json:"cephfs,omitempty"` + Cephfs *CephFSPersistentVolumeSource `protobuf:"bytes,9,opt,name=cephfs" json:"cephfs,omitempty"` // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. // +optional Fc *FCVolumeSource `protobuf:"bytes,10,opt,name=fc" json:"fc,omitempty"` @@ -4895,13 +5667,12 @@ type PersistentVolumeSource struct { // +optional Flocker *FlockerVolumeSource `protobuf:"bytes,11,opt,name=flocker" json:"flocker,omitempty"` // FlexVolume represents a generic volume resource that is - // provisioned/attached using an exec based plugin. This is an - // alpha feature and may change in future. + // provisioned/attached using an exec based plugin. // +optional FlexVolume *FlexVolumeSource `protobuf:"bytes,12,opt,name=flexVolume" json:"flexVolume,omitempty"` // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. // +optional - AzureFile *AzureFileVolumeSource `protobuf:"bytes,13,opt,name=azureFile" json:"azureFile,omitempty"` + AzureFile *AzureFilePersistentVolumeSource `protobuf:"bytes,13,opt,name=azureFile" json:"azureFile,omitempty"` // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine // +optional VsphereVolume *VsphereVirtualDiskVolumeSource `protobuf:"bytes,14,opt,name=vsphereVolume" json:"vsphereVolume,omitempty"` @@ -4918,14 +5689,26 @@ type PersistentVolumeSource struct { PortworxVolume *PortworxVolumeSource `protobuf:"bytes,18,opt,name=portworxVolume" json:"portworxVolume,omitempty"` // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. // +optional - ScaleIO *ScaleIOVolumeSource `protobuf:"bytes,19,opt,name=scaleIO" json:"scaleIO,omitempty"` - XXX_unrecognized []byte `json:"-"` + ScaleIO *ScaleIOPersistentVolumeSource `protobuf:"bytes,19,opt,name=scaleIO" json:"scaleIO,omitempty"` + // Local represents directly-attached storage with node affinity + // +optional + Local *LocalVolumeSource `protobuf:"bytes,20,opt,name=local" json:"local,omitempty"` + // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod + // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md + // +optional + Storageos *StorageOSPersistentVolumeSource `protobuf:"bytes,21,opt,name=storageos" json:"storageos,omitempty"` + // CSI represents storage that handled by an external CSI driver + // +optional + Csi *CSIPersistentVolumeSource `protobuf:"bytes,22,opt,name=csi" json:"csi,omitempty"` + XXX_unrecognized []byte `json:"-"` } -func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } -func (m *PersistentVolumeSource) String() string { return proto.CompactTextString(m) } -func (*PersistentVolumeSource) ProtoMessage() {} -func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{96} } +func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } +func (m *PersistentVolumeSource) String() string { return proto.CompactTextString(m) } +func (*PersistentVolumeSource) ProtoMessage() {} +func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{106} +} func (m *PersistentVolumeSource) GetGcePersistentDisk() *GCEPersistentDiskVolumeSource { if m != nil { @@ -4962,14 +5745,14 @@ func (m *PersistentVolumeSource) GetNfs() *NFSVolumeSource { return nil } -func (m *PersistentVolumeSource) GetRbd() *RBDVolumeSource { +func (m *PersistentVolumeSource) GetRbd() *RBDPersistentVolumeSource { if m != nil { return m.Rbd } return nil } -func (m *PersistentVolumeSource) GetIscsi() *ISCSIVolumeSource { +func (m *PersistentVolumeSource) GetIscsi() *ISCSIPersistentVolumeSource { if m != nil { return m.Iscsi } @@ -4983,7 +5766,7 @@ func (m *PersistentVolumeSource) GetCinder() *CinderVolumeSource { return nil } -func (m *PersistentVolumeSource) GetCephfs() *CephFSVolumeSource { +func (m *PersistentVolumeSource) GetCephfs() *CephFSPersistentVolumeSource { if m != nil { return m.Cephfs } @@ -5011,7 +5794,7 @@ func (m *PersistentVolumeSource) GetFlexVolume() *FlexVolumeSource { return nil } -func (m *PersistentVolumeSource) GetAzureFile() *AzureFileVolumeSource { +func (m *PersistentVolumeSource) GetAzureFile() *AzureFilePersistentVolumeSource { if m != nil { return m.AzureFile } @@ -5053,50 +5836,81 @@ func (m *PersistentVolumeSource) GetPortworxVolume() *PortworxVolumeSource { return nil } -func (m *PersistentVolumeSource) GetScaleIO() *ScaleIOVolumeSource { +func (m *PersistentVolumeSource) GetScaleIO() *ScaleIOPersistentVolumeSource { if m != nil { return m.ScaleIO } return nil } +func (m *PersistentVolumeSource) GetLocal() *LocalVolumeSource { + if m != nil { + return m.Local + } + return nil +} + +func (m *PersistentVolumeSource) GetStorageos() *StorageOSPersistentVolumeSource { + if m != nil { + return m.Storageos + } + return nil +} + +func (m *PersistentVolumeSource) GetCsi() *CSIPersistentVolumeSource { + if m != nil { + return m.Csi + } + return nil +} + // PersistentVolumeSpec is the specification of a persistent volume. type PersistentVolumeSpec struct { // A description of the persistent volume's resources and capacity. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#capacity + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity // +optional - Capacity map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity `protobuf:"bytes,1,rep,name=capacity" json:"capacity,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Capacity map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity `protobuf:"bytes,1,rep,name=capacity" json:"capacity,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // The actual volume backing the persistent volume. PersistentVolumeSource *PersistentVolumeSource `protobuf:"bytes,2,opt,name=persistentVolumeSource" json:"persistentVolumeSource,omitempty"` // AccessModes contains all ways the volume can be mounted. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes // +optional AccessModes []string `protobuf:"bytes,3,rep,name=accessModes" json:"accessModes,omitempty"` // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. // Expected to be non-nil when bound. // claim.VolumeName is the authoritative bind between PV and PVC. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#binding + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding // +optional ClaimRef *ObjectReference `protobuf:"bytes,4,opt,name=claimRef" json:"claimRef,omitempty"` // What happens to a persistent volume when released from its claim. // Valid options are Retain (default) and Recycle. // Recycling must be supported by the volume plugin underlying this persistent volume. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#recycling-policy + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming // +optional PersistentVolumeReclaimPolicy *string `protobuf:"bytes,5,opt,name=persistentVolumeReclaimPolicy" json:"persistentVolumeReclaimPolicy,omitempty"` // Name of StorageClass to which this persistent volume belongs. Empty value // means that this volume does not belong to any StorageClass. // +optional StorageClassName *string `protobuf:"bytes,6,opt,name=storageClassName" json:"storageClassName,omitempty"` + // A list of mount options, e.g. ["ro", "soft"]. Not validated - mount will + // simply fail if one is invalid. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options + // +optional + MountOptions []string `protobuf:"bytes,7,rep,name=mountOptions" json:"mountOptions,omitempty"` + // volumeMode defines if a volume is intended to be used with a formatted filesystem + // or to remain in raw block state. Value of Filesystem is implied when not included in spec. + // This is an alpha feature and may change in the future. + // +optional + VolumeMode *string `protobuf:"bytes,8,opt,name=volumeMode" json:"volumeMode,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } func (m *PersistentVolumeSpec) String() string { return proto.CompactTextString(m) } func (*PersistentVolumeSpec) ProtoMessage() {} -func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{97} } +func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{107} } -func (m *PersistentVolumeSpec) GetCapacity() map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity { +func (m *PersistentVolumeSpec) GetCapacity() map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity { if m != nil { return m.Capacity } @@ -5138,10 +5952,24 @@ func (m *PersistentVolumeSpec) GetStorageClassName() string { return "" } +func (m *PersistentVolumeSpec) GetMountOptions() []string { + if m != nil { + return m.MountOptions + } + return nil +} + +func (m *PersistentVolumeSpec) GetVolumeMode() string { + if m != nil && m.VolumeMode != nil { + return *m.VolumeMode + } + return "" +} + // PersistentVolumeStatus is the current status of a persistent volume. type PersistentVolumeStatus struct { // Phase indicates if a volume is available, bound to a claim, or released by a claim. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#phase + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase // +optional Phase *string `protobuf:"bytes,1,opt,name=phase" json:"phase,omitempty"` // A human-readable message indicating details about why the volume is in this state. @@ -5154,10 +5982,12 @@ type PersistentVolumeStatus struct { XXX_unrecognized []byte `json:"-"` } -func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } -func (m *PersistentVolumeStatus) String() string { return proto.CompactTextString(m) } -func (*PersistentVolumeStatus) ProtoMessage() {} -func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{98} } +func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } +func (m *PersistentVolumeStatus) String() string { return proto.CompactTextString(m) } +func (*PersistentVolumeStatus) ProtoMessage() {} +func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{108} +} func (m *PersistentVolumeStatus) GetPhase() string { if m != nil && m.Phase != nil { @@ -5195,7 +6025,7 @@ func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersiste func (m *PhotonPersistentDiskVolumeSource) String() string { return proto.CompactTextString(m) } func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{99} + return fileDescriptorGenerated, []int{109} } func (m *PhotonPersistentDiskVolumeSource) GetPdID() string { @@ -5216,18 +6046,18 @@ func (m *PhotonPersistentDiskVolumeSource) GetFsType() string { // by clients and scheduled onto hosts. type Pod struct { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Specification of the desired behavior of the pod. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec *PodSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` // Most recently observed status of the pod. // This data may not be up to date. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status *PodStatus `protobuf:"bytes,3,opt,name=status" json:"status,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -5236,9 +6066,9 @@ type Pod struct { func (m *Pod) Reset() { *m = Pod{} } func (m *Pod) String() string { return proto.CompactTextString(m) } func (*Pod) ProtoMessage() {} -func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{100} } +func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{110} } -func (m *Pod) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *Pod) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -5261,16 +6091,6 @@ func (m *Pod) GetStatus() *PodStatus { // Pod affinity is a group of inter pod affinity scheduling rules. type PodAffinity struct { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system will try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // +optional - // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` // If the affinity requirements specified by this field are not met at // scheduling time, the pod will not be scheduled onto the node. // If the affinity requirements specified by this field cease to be met @@ -5297,7 +6117,7 @@ type PodAffinity struct { func (m *PodAffinity) Reset() { *m = PodAffinity{} } func (m *PodAffinity) String() string { return proto.CompactTextString(m) } func (*PodAffinity) ProtoMessage() {} -func (*PodAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{101} } +func (*PodAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{111} } func (m *PodAffinity) GetRequiredDuringSchedulingIgnoredDuringExecution() []*PodAffinityTerm { if m != nil { @@ -5317,12 +6137,12 @@ func (m *PodAffinity) GetPreferredDuringSchedulingIgnoredDuringExecution() []*We // relative to the given namespace(s)) that this pod should be // co-located (affinity) or not co-located (anti-affinity) with, // where co-located is defined as running on a node whose value of -// the label with key tches that of any node on which +// the label with key matches that of any node on which // a pod of the set of pods is running type PodAffinityTerm struct { // A label query over a set of resources, in this case pods. // +optional - LabelSelector *k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector `protobuf:"bytes,1,opt,name=labelSelector" json:"labelSelector,omitempty"` + LabelSelector *k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector `protobuf:"bytes,1,opt,name=labelSelector" json:"labelSelector,omitempty"` // namespaces specifies which namespaces the labelSelector applies to (matches against); // null or empty list means "this pod's namespace" Namespaces []string `protobuf:"bytes,2,rep,name=namespaces" json:"namespaces,omitempty"` @@ -5330,10 +6150,7 @@ type PodAffinityTerm struct { // the labelSelector in the specified namespaces, where co-located is defined as running on a node // whose value of the label with key topologyKey matches that of any node on which any of the // selected pods is running. - // For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies" - // ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); - // for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed. - // +optional + // Empty topologyKey is not allowed. TopologyKey *string `protobuf:"bytes,3,opt,name=topologyKey" json:"topologyKey,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -5341,9 +6158,9 @@ type PodAffinityTerm struct { func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } func (m *PodAffinityTerm) String() string { return proto.CompactTextString(m) } func (*PodAffinityTerm) ProtoMessage() {} -func (*PodAffinityTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{102} } +func (*PodAffinityTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{112} } -func (m *PodAffinityTerm) GetLabelSelector() *k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector { +func (m *PodAffinityTerm) GetLabelSelector() *k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector { if m != nil { return m.LabelSelector } @@ -5366,16 +6183,6 @@ func (m *PodAffinityTerm) GetTopologyKey() string { // Pod anti affinity is a group of inter pod anti affinity scheduling rules. type PodAntiAffinity struct { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the anti-affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the anti-affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system will try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // +optional - // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` // If the anti-affinity requirements specified by this field are not met at // scheduling time, the pod will not be scheduled onto the node. // If the anti-affinity requirements specified by this field cease to be met @@ -5402,7 +6209,7 @@ type PodAntiAffinity struct { func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } func (m *PodAntiAffinity) String() string { return proto.CompactTextString(m) } func (*PodAntiAffinity) ProtoMessage() {} -func (*PodAntiAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{103} } +func (*PodAntiAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{113} } func (m *PodAntiAffinity) GetRequiredDuringSchedulingIgnoredDuringExecution() []*PodAffinityTerm { if m != nil { @@ -5451,7 +6258,7 @@ type PodAttachOptions struct { func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } func (m *PodAttachOptions) String() string { return proto.CompactTextString(m) } func (*PodAttachOptions) ProtoMessage() {} -func (*PodAttachOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{104} } +func (*PodAttachOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{114} } func (m *PodAttachOptions) GetStdin() bool { if m != nil && m.Stdin != nil { @@ -5492,18 +6299,18 @@ func (m *PodAttachOptions) GetContainer() string { type PodCondition struct { // Type is the type of the condition. // Currently only Ready. - // More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions Type *string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` // Status is the status of the condition. // Can be True, False, Unknown. - // More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions Status *string `protobuf:"bytes,2,opt,name=status" json:"status,omitempty"` // Last time we probed the condition. // +optional - LastProbeTime *k8s_io_kubernetes_pkg_apis_meta_v1.Time `protobuf:"bytes,3,opt,name=lastProbeTime" json:"lastProbeTime,omitempty"` + LastProbeTime *k8s_io_apimachinery_pkg_apis_meta_v1.Time `protobuf:"bytes,3,opt,name=lastProbeTime" json:"lastProbeTime,omitempty"` // Last time the condition transitioned from one status to another. // +optional - LastTransitionTime *k8s_io_kubernetes_pkg_apis_meta_v1.Time `protobuf:"bytes,4,opt,name=lastTransitionTime" json:"lastTransitionTime,omitempty"` + LastTransitionTime *k8s_io_apimachinery_pkg_apis_meta_v1.Time `protobuf:"bytes,4,opt,name=lastTransitionTime" json:"lastTransitionTime,omitempty"` // Unique, one-word, CamelCase reason for the condition's last transition. // +optional Reason *string `protobuf:"bytes,5,opt,name=reason" json:"reason,omitempty"` @@ -5516,7 +6323,7 @@ type PodCondition struct { func (m *PodCondition) Reset() { *m = PodCondition{} } func (m *PodCondition) String() string { return proto.CompactTextString(m) } func (*PodCondition) ProtoMessage() {} -func (*PodCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{105} } +func (*PodCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{115} } func (m *PodCondition) GetType() string { if m != nil && m.Type != nil { @@ -5532,14 +6339,14 @@ func (m *PodCondition) GetStatus() string { return "" } -func (m *PodCondition) GetLastProbeTime() *k8s_io_kubernetes_pkg_apis_meta_v1.Time { +func (m *PodCondition) GetLastProbeTime() *k8s_io_apimachinery_pkg_apis_meta_v1.Time { if m != nil { return m.LastProbeTime } return nil } -func (m *PodCondition) GetLastTransitionTime() *k8s_io_kubernetes_pkg_apis_meta_v1.Time { +func (m *PodCondition) GetLastTransitionTime() *k8s_io_apimachinery_pkg_apis_meta_v1.Time { if m != nil { return m.LastTransitionTime } @@ -5560,6 +6367,82 @@ func (m *PodCondition) GetMessage() string { return "" } +// PodDNSConfig defines the DNS parameters of a pod in addition to +// those generated from DNSPolicy. +type PodDNSConfig struct { + // A list of DNS name server IP addresses. + // This will be appended to the base nameservers generated from DNSPolicy. + // Duplicated nameservers will be removed. + // +optional + Nameservers []string `protobuf:"bytes,1,rep,name=nameservers" json:"nameservers,omitempty"` + // A list of DNS search domains for host-name lookup. + // This will be appended to the base search paths generated from DNSPolicy. + // Duplicated search paths will be removed. + // +optional + Searches []string `protobuf:"bytes,2,rep,name=searches" json:"searches,omitempty"` + // A list of DNS resolver options. + // This will be merged with the base options generated from DNSPolicy. + // Duplicated entries will be removed. Resolution options given in Options + // will override those that appear in the base DNSPolicy. + // +optional + Options []*PodDNSConfigOption `protobuf:"bytes,3,rep,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } +func (m *PodDNSConfig) String() string { return proto.CompactTextString(m) } +func (*PodDNSConfig) ProtoMessage() {} +func (*PodDNSConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{116} } + +func (m *PodDNSConfig) GetNameservers() []string { + if m != nil { + return m.Nameservers + } + return nil +} + +func (m *PodDNSConfig) GetSearches() []string { + if m != nil { + return m.Searches + } + return nil +} + +func (m *PodDNSConfig) GetOptions() []*PodDNSConfigOption { + if m != nil { + return m.Options + } + return nil +} + +// PodDNSConfigOption defines DNS resolver options of a pod. +type PodDNSConfigOption struct { + // Required. + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // +optional + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } +func (m *PodDNSConfigOption) String() string { return proto.CompactTextString(m) } +func (*PodDNSConfigOption) ProtoMessage() {} +func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{117} } + +func (m *PodDNSConfigOption) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *PodDNSConfigOption) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + // PodExecOptions is the query options to a Pod's remote exec call. // --- // TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging @@ -5593,7 +6476,7 @@ type PodExecOptions struct { func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } func (m *PodExecOptions) String() string { return proto.CompactTextString(m) } func (*PodExecOptions) ProtoMessage() {} -func (*PodExecOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{106} } +func (*PodExecOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{118} } func (m *PodExecOptions) GetStdin() bool { if m != nil && m.Stdin != nil { @@ -5640,11 +6523,11 @@ func (m *PodExecOptions) GetCommand() []string { // PodList is a list of Pods. type PodList struct { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // List of pods. - // More info: http://kubernetes.io/docs/user-guide/pods + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md Items []*Pod `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -5652,9 +6535,9 @@ type PodList struct { func (m *PodList) Reset() { *m = PodList{} } func (m *PodList) String() string { return proto.CompactTextString(m) } func (*PodList) ProtoMessage() {} -func (*PodList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{107} } +func (*PodList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{119} } -func (m *PodList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *PodList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -5690,7 +6573,7 @@ type PodLogOptions struct { // If this value is in the future, no logs will be returned. // Only one of sinceSeconds or sinceTime may be specified. // +optional - SinceTime *k8s_io_kubernetes_pkg_apis_meta_v1.Time `protobuf:"bytes,5,opt,name=sinceTime" json:"sinceTime,omitempty"` + SinceTime *k8s_io_apimachinery_pkg_apis_meta_v1.Time `protobuf:"bytes,5,opt,name=sinceTime" json:"sinceTime,omitempty"` // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line // of log output. Defaults to false. // +optional @@ -5710,7 +6593,7 @@ type PodLogOptions struct { func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } func (m *PodLogOptions) String() string { return proto.CompactTextString(m) } func (*PodLogOptions) ProtoMessage() {} -func (*PodLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{108} } +func (*PodLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{120} } func (m *PodLogOptions) GetContainer() string { if m != nil && m.Container != nil { @@ -5740,7 +6623,7 @@ func (m *PodLogOptions) GetSinceSeconds() int64 { return 0 } -func (m *PodLogOptions) GetSinceTime() *k8s_io_kubernetes_pkg_apis_meta_v1.Time { +func (m *PodLogOptions) GetSinceTime() *k8s_io_apimachinery_pkg_apis_meta_v1.Time { if m != nil { return m.SinceTime } @@ -5785,7 +6668,7 @@ type PodPortForwardOptions struct { func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } func (m *PodPortForwardOptions) String() string { return proto.CompactTextString(m) } func (*PodPortForwardOptions) ProtoMessage() {} -func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{109} } +func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{121} } func (m *PodPortForwardOptions) GetPorts() []int32 { if m != nil { @@ -5805,7 +6688,7 @@ type PodProxyOptions struct { func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } func (m *PodProxyOptions) String() string { return proto.CompactTextString(m) } func (*PodProxyOptions) ProtoMessage() {} -func (*PodProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{110} } +func (*PodProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{122} } func (m *PodProxyOptions) GetPath() string { if m != nil && m.Path != nil { @@ -5862,7 +6745,7 @@ type PodSecurityContext struct { func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } func (m *PodSecurityContext) String() string { return proto.CompactTextString(m) } func (*PodSecurityContext) ProtoMessage() {} -func (*PodSecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{111} } +func (*PodSecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{123} } func (m *PodSecurityContext) GetSeLinuxOptions() *SELinuxOptions { if m != nil { @@ -5904,16 +6787,16 @@ func (m *PodSecurityContext) GetFsGroup() int64 { type PodSignature struct { // Reference to controller whose pods should avoid this node. // +optional - PodController *k8s_io_kubernetes_pkg_apis_meta_v1.OwnerReference `protobuf:"bytes,1,opt,name=podController" json:"podController,omitempty"` - XXX_unrecognized []byte `json:"-"` + PodController *k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference `protobuf:"bytes,1,opt,name=podController" json:"podController,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *PodSignature) Reset() { *m = PodSignature{} } func (m *PodSignature) String() string { return proto.CompactTextString(m) } func (*PodSignature) ProtoMessage() {} -func (*PodSignature) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{112} } +func (*PodSignature) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{124} } -func (m *PodSignature) GetPodController() *k8s_io_kubernetes_pkg_apis_meta_v1.OwnerReference { +func (m *PodSignature) GetPodController() *k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference { if m != nil { return m.PodController } @@ -5923,8 +6806,10 @@ func (m *PodSignature) GetPodController() *k8s_io_kubernetes_pkg_apis_meta_v1.Ow // PodSpec is a description of a pod. type PodSpec struct { // List of volumes that can be mounted by containers belonging to the pod. - // More info: http://kubernetes.io/docs/user-guide/volumes + // More info: https://kubernetes.io/docs/concepts/storage/volumes // +optional + // +patchMergeKey=name + // +patchStrategy=merge,retainKeys Volumes []*Volume `protobuf:"bytes,1,rep,name=volumes" json:"volumes,omitempty"` // List of initialization containers belonging to the pod. // Init containers are executed in order prior to containers being started. If any @@ -5938,18 +6823,21 @@ type PodSpec struct { // in a similar fashion. // Init containers cannot currently be added or removed. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/containers + // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + // +patchMergeKey=name + // +patchStrategy=merge InitContainers []*Container `protobuf:"bytes,20,rep,name=initContainers" json:"initContainers,omitempty"` // List of containers belonging to the pod. // Containers cannot currently be added or removed. // There must be at least one container in a Pod. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/containers + // +patchMergeKey=name + // +patchStrategy=merge Containers []*Container `protobuf:"bytes,2,rep,name=containers" json:"containers,omitempty"` // Restart policy for all containers within the pod. // One of Always, OnFailure, Never. // Default to Always. - // More info: http://kubernetes.io/docs/user-guide/pod-states#restartpolicy + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy // +optional RestartPolicy *string `protobuf:"bytes,3,opt,name=restartPolicy" json:"restartPolicy,omitempty"` // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. @@ -5966,19 +6854,22 @@ type PodSpec struct { // Value must be a positive integer. // +optional ActiveDeadlineSeconds *int64 `protobuf:"varint,5,opt,name=activeDeadlineSeconds" json:"activeDeadlineSeconds,omitempty"` - // Set DNS policy for containers within the pod. - // One of 'ClusterFirstWithHostNet', 'ClusterFirst' or 'Default'. + // Set DNS policy for the pod. // Defaults to "ClusterFirst". - // To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. + // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + // To have DNS options set along with hostNetwork, you have to specify DNS policy + // explicitly to 'ClusterFirstWithHostNet'. + // Note that 'None' policy is an alpha feature introduced in v1.9 and CustomPodDNS feature gate must be enabled to use it. // +optional DnsPolicy *string `protobuf:"bytes,6,opt,name=dnsPolicy" json:"dnsPolicy,omitempty"` // NodeSelector is a selector which must be true for the pod to fit on a node. // Selector which must match a node's labels for the pod to be scheduled on that node. - // More info: http://kubernetes.io/docs/user-guide/node-selection/README + // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ // +optional NodeSelector map[string]string `protobuf:"bytes,7,rep,name=nodeSelector" json:"nodeSelector,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // ServiceAccountName is the name of the ServiceAccount to use to run this pod. - // More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ // +optional ServiceAccountName *string `protobuf:"bytes,8,opt,name=serviceAccountName" json:"serviceAccountName,omitempty"` // DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. @@ -6017,8 +6908,10 @@ type PodSpec struct { // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. // If specified, these secrets will be passed to individual puller implementations for them to use. For example, // in the case of docker, only DockerConfig type secrets are honored. - // More info: http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod + // More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod // +optional + // +patchMergeKey=name + // +patchStrategy=merge ImagePullSecrets []*LocalObjectReference `protobuf:"bytes,15,rep,name=imagePullSecrets" json:"imagePullSecrets,omitempty"` // Specifies the hostname of the Pod // If not specified, the pod's hostname will be set to a system-defined value. @@ -6037,14 +6930,40 @@ type PodSpec struct { SchedulerName *string `protobuf:"bytes,19,opt,name=schedulerName" json:"schedulerName,omitempty"` // If specified, the pod's tolerations. // +optional - Tolerations []*Toleration `protobuf:"bytes,22,rep,name=tolerations" json:"tolerations,omitempty"` + Tolerations []*Toleration `protobuf:"bytes,22,rep,name=tolerations" json:"tolerations,omitempty"` + // HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + // file if specified. This is only valid for non-hostNetwork pods. + // +optional + // +patchMergeKey=ip + // +patchStrategy=merge + HostAliases []*HostAlias `protobuf:"bytes,23,rep,name=hostAliases" json:"hostAliases,omitempty"` + // If specified, indicates the pod's priority. "SYSTEM" is a special keyword + // which indicates the highest priority. Any other name must be defined by + // creating a PriorityClass object with that name. + // If not specified, the pod priority will be default or zero if there is no + // default. + // +optional + PriorityClassName *string `protobuf:"bytes,24,opt,name=priorityClassName" json:"priorityClassName,omitempty"` + // The priority value. Various system components use this field to find the + // priority of the pod. When Priority Admission Controller is enabled, it + // prevents users from setting this field. The admission controller populates + // this field from PriorityClassName. + // The higher the value, the higher the priority. + // +optional + Priority *int32 `protobuf:"varint,25,opt,name=priority" json:"priority,omitempty"` + // Specifies the DNS parameters of a pod. + // Parameters specified here will be merged to the generated DNS + // configuration based on DNSPolicy. + // This is an alpha feature introduced in v1.9 and CustomPodDNS feature gate must be enabled to use it. + // +optional + DnsConfig *PodDNSConfig `protobuf:"bytes,26,opt,name=dnsConfig" json:"dnsConfig,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *PodSpec) Reset() { *m = PodSpec{} } func (m *PodSpec) String() string { return proto.CompactTextString(m) } func (*PodSpec) ProtoMessage() {} -func (*PodSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{113} } +func (*PodSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{125} } func (m *PodSpec) GetVolumes() []*Volume { if m != nil { @@ -6200,22 +7119,52 @@ func (m *PodSpec) GetTolerations() []*Toleration { return nil } +func (m *PodSpec) GetHostAliases() []*HostAlias { + if m != nil { + return m.HostAliases + } + return nil +} + +func (m *PodSpec) GetPriorityClassName() string { + if m != nil && m.PriorityClassName != nil { + return *m.PriorityClassName + } + return "" +} + +func (m *PodSpec) GetPriority() int32 { + if m != nil && m.Priority != nil { + return *m.Priority + } + return 0 +} + +func (m *PodSpec) GetDnsConfig() *PodDNSConfig { + if m != nil { + return m.DnsConfig + } + return nil +} + // PodStatus represents information about the status of a pod. Status may trail the actual // state of a system. type PodStatus struct { // Current condition of the pod. - // More info: http://kubernetes.io/docs/user-guide/pod-states#pod-phase + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase // +optional Phase *string `protobuf:"bytes,1,opt,name=phase" json:"phase,omitempty"` // Current service state of pod. - // More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions // +optional + // +patchMergeKey=type + // +patchStrategy=merge Conditions []*PodCondition `protobuf:"bytes,2,rep,name=conditions" json:"conditions,omitempty"` // A human readable message indicating details about why the pod is in this condition. // +optional Message *string `protobuf:"bytes,3,opt,name=message" json:"message,omitempty"` // A brief CamelCase message indicating details about why the pod is in this state. - // e.g. 'OutOfDisk' + // e.g. 'Evicted' // +optional Reason *string `protobuf:"bytes,4,opt,name=reason" json:"reason,omitempty"` // IP address of the host to which the pod is assigned. Empty if not yet scheduled. @@ -6228,15 +7177,15 @@ type PodStatus struct { // RFC 3339 date and time at which the object was acknowledged by the Kubelet. // This is before the Kubelet pulled the container image(s) for the pod. // +optional - StartTime *k8s_io_kubernetes_pkg_apis_meta_v1.Time `protobuf:"bytes,7,opt,name=startTime" json:"startTime,omitempty"` + StartTime *k8s_io_apimachinery_pkg_apis_meta_v1.Time `protobuf:"bytes,7,opt,name=startTime" json:"startTime,omitempty"` // The list has one entry per init container in the manifest. The most recent successful // init container will have ready = true, the most recently started container will have // startTime set. - // More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status InitContainerStatuses []*ContainerStatus `protobuf:"bytes,10,rep,name=initContainerStatuses" json:"initContainerStatuses,omitempty"` // The list has one entry per container in the manifest. Each entry is currently the output // of `docker inspect`. - // More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status // +optional ContainerStatuses []*ContainerStatus `protobuf:"bytes,8,rep,name=containerStatuses" json:"containerStatuses,omitempty"` // The Quality of Service (QOS) classification assigned to the pod based on resource requirements @@ -6250,7 +7199,7 @@ type PodStatus struct { func (m *PodStatus) Reset() { *m = PodStatus{} } func (m *PodStatus) String() string { return proto.CompactTextString(m) } func (*PodStatus) ProtoMessage() {} -func (*PodStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{114} } +func (*PodStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{126} } func (m *PodStatus) GetPhase() string { if m != nil && m.Phase != nil { @@ -6294,7 +7243,7 @@ func (m *PodStatus) GetPodIP() string { return "" } -func (m *PodStatus) GetStartTime() *k8s_io_kubernetes_pkg_apis_meta_v1.Time { +func (m *PodStatus) GetStartTime() *k8s_io_apimachinery_pkg_apis_meta_v1.Time { if m != nil { return m.StartTime } @@ -6325,14 +7274,14 @@ func (m *PodStatus) GetQosClass() string { // PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded type PodStatusResult struct { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Most recently observed status of the pod. // This data may not be up to date. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status *PodStatus `protobuf:"bytes,2,opt,name=status" json:"status,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -6341,9 +7290,9 @@ type PodStatusResult struct { func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } func (m *PodStatusResult) String() string { return proto.CompactTextString(m) } func (*PodStatusResult) ProtoMessage() {} -func (*PodStatusResult) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{115} } +func (*PodStatusResult) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{127} } -func (m *PodStatusResult) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *PodStatusResult) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -6360,11 +7309,11 @@ func (m *PodStatusResult) GetStatus() *PodStatus { // PodTemplate describes a template for creating copies of a predefined pod. type PodTemplate struct { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Template defines the pods that will be created from this pod template. - // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Template *PodTemplateSpec `protobuf:"bytes,2,opt,name=template" json:"template,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -6373,9 +7322,9 @@ type PodTemplate struct { func (m *PodTemplate) Reset() { *m = PodTemplate{} } func (m *PodTemplate) String() string { return proto.CompactTextString(m) } func (*PodTemplate) ProtoMessage() {} -func (*PodTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{116} } +func (*PodTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{128} } -func (m *PodTemplate) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *PodTemplate) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -6392,9 +7341,9 @@ func (m *PodTemplate) GetTemplate() *PodTemplateSpec { // PodTemplateList is a list of PodTemplates. type PodTemplateList struct { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // List of pod templates Items []*PodTemplate `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -6403,9 +7352,9 @@ type PodTemplateList struct { func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } func (m *PodTemplateList) String() string { return proto.CompactTextString(m) } func (*PodTemplateList) ProtoMessage() {} -func (*PodTemplateList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{117} } +func (*PodTemplateList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{129} } -func (m *PodTemplateList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *PodTemplateList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -6422,11 +7371,11 @@ func (m *PodTemplateList) GetItems() []*PodTemplate { // PodTemplateSpec describes the data a pod should have when created from a template type PodTemplateSpec struct { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Specification of the desired behavior of the pod. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec *PodSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -6435,9 +7384,9 @@ type PodTemplateSpec struct { func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } func (m *PodTemplateSpec) String() string { return proto.CompactTextString(m) } func (*PodTemplateSpec) ProtoMessage() {} -func (*PodTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{118} } +func (*PodTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{130} } -func (m *PodTemplateSpec) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *PodTemplateSpec) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -6469,7 +7418,7 @@ type PortworxVolumeSource struct { func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } func (m *PortworxVolumeSource) String() string { return proto.CompactTextString(m) } func (*PortworxVolumeSource) ProtoMessage() {} -func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{119} } +func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{131} } func (m *PortworxVolumeSource) GetVolumeID() string { if m != nil && m.VolumeID != nil { @@ -6504,7 +7453,7 @@ type Preconditions struct { func (m *Preconditions) Reset() { *m = Preconditions{} } func (m *Preconditions) String() string { return proto.CompactTextString(m) } func (*Preconditions) ProtoMessage() {} -func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{120} } +func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{132} } func (m *Preconditions) GetUid() string { if m != nil && m.Uid != nil { @@ -6519,7 +7468,7 @@ type PreferAvoidPodsEntry struct { PodSignature *PodSignature `protobuf:"bytes,1,opt,name=podSignature" json:"podSignature,omitempty"` // Time at which this entry was added to the list. // +optional - EvictionTime *k8s_io_kubernetes_pkg_apis_meta_v1.Time `protobuf:"bytes,2,opt,name=evictionTime" json:"evictionTime,omitempty"` + EvictionTime *k8s_io_apimachinery_pkg_apis_meta_v1.Time `protobuf:"bytes,2,opt,name=evictionTime" json:"evictionTime,omitempty"` // (brief) reason why this entry was added to the list. // +optional Reason *string `protobuf:"bytes,3,opt,name=reason" json:"reason,omitempty"` @@ -6532,7 +7481,7 @@ type PreferAvoidPodsEntry struct { func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } func (m *PreferAvoidPodsEntry) String() string { return proto.CompactTextString(m) } func (*PreferAvoidPodsEntry) ProtoMessage() {} -func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{121} } +func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{133} } func (m *PreferAvoidPodsEntry) GetPodSignature() *PodSignature { if m != nil { @@ -6541,7 +7490,7 @@ func (m *PreferAvoidPodsEntry) GetPodSignature() *PodSignature { return nil } -func (m *PreferAvoidPodsEntry) GetEvictionTime() *k8s_io_kubernetes_pkg_apis_meta_v1.Time { +func (m *PreferAvoidPodsEntry) GetEvictionTime() *k8s_io_apimachinery_pkg_apis_meta_v1.Time { if m != nil { return m.EvictionTime } @@ -6576,7 +7525,7 @@ func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm func (m *PreferredSchedulingTerm) String() string { return proto.CompactTextString(m) } func (*PreferredSchedulingTerm) ProtoMessage() {} func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{122} + return fileDescriptorGenerated, []int{134} } func (m *PreferredSchedulingTerm) GetWeight() int32 { @@ -6599,12 +7548,12 @@ type Probe struct { // The action taken to determine the health of a container Handler *Handler `protobuf:"bytes,1,opt,name=handler" json:"handler,omitempty"` // Number of seconds after the container has started before liveness probes are initiated. - // More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes // +optional InitialDelaySeconds *int32 `protobuf:"varint,2,opt,name=initialDelaySeconds" json:"initialDelaySeconds,omitempty"` // Number of seconds after which the probe times out. // Defaults to 1 second. Minimum value is 1. - // More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes // +optional TimeoutSeconds *int32 `protobuf:"varint,3,opt,name=timeoutSeconds" json:"timeoutSeconds,omitempty"` // How often (in seconds) to perform the probe. @@ -6625,7 +7574,7 @@ type Probe struct { func (m *Probe) Reset() { *m = Probe{} } func (m *Probe) String() string { return proto.CompactTextString(m) } func (*Probe) ProtoMessage() {} -func (*Probe) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{123} } +func (*Probe) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{135} } func (m *Probe) GetHandler() *Handler { if m != nil { @@ -6686,7 +7635,7 @@ type ProjectedVolumeSource struct { func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } func (m *ProjectedVolumeSource) String() string { return proto.CompactTextString(m) } func (*ProjectedVolumeSource) ProtoMessage() {} -func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{124} } +func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{136} } func (m *ProjectedVolumeSource) GetSources() []*VolumeProjection { if m != nil { @@ -6729,7 +7678,7 @@ type QuobyteVolumeSource struct { func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } func (m *QuobyteVolumeSource) String() string { return proto.CompactTextString(m) } func (*QuobyteVolumeSource) ProtoMessage() {} -func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{125} } +func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{137} } func (m *QuobyteVolumeSource) GetRegistry() string { if m != nil && m.Registry != nil { @@ -6766,46 +7715,154 @@ func (m *QuobyteVolumeSource) GetGroup() string { return "" } +// Represents a Rados Block Device mount that lasts the lifetime of a pod. +// RBD volumes support ownership management and SELinux relabeling. +type RBDPersistentVolumeSource struct { + // A collection of Ceph monitors. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + Monitors []string `protobuf:"bytes,1,rep,name=monitors" json:"monitors,omitempty"` + // The rados image name. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + Image *string `protobuf:"bytes,2,opt,name=image" json:"image,omitempty"` + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FsType *string `protobuf:"bytes,3,opt,name=fsType" json:"fsType,omitempty"` + // The rados pool name. + // Default is rbd. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + Pool *string `protobuf:"bytes,4,opt,name=pool" json:"pool,omitempty"` + // The rados user name. + // Default is admin. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + User *string `protobuf:"bytes,5,opt,name=user" json:"user,omitempty"` + // Keyring is the path to key ring for RBDUser. + // Default is /etc/ceph/keyring. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + Keyring *string `protobuf:"bytes,6,opt,name=keyring" json:"keyring,omitempty"` + // SecretRef is name of the authentication secret for RBDUser. If provided + // overrides keyring. + // Default is nil. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + SecretRef *SecretReference `protobuf:"bytes,7,opt,name=secretRef" json:"secretRef,omitempty"` + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + ReadOnly *bool `protobuf:"varint,8,opt,name=readOnly" json:"readOnly,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } +func (m *RBDPersistentVolumeSource) String() string { return proto.CompactTextString(m) } +func (*RBDPersistentVolumeSource) ProtoMessage() {} +func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{138} +} + +func (m *RBDPersistentVolumeSource) GetMonitors() []string { + if m != nil { + return m.Monitors + } + return nil +} + +func (m *RBDPersistentVolumeSource) GetImage() string { + if m != nil && m.Image != nil { + return *m.Image + } + return "" +} + +func (m *RBDPersistentVolumeSource) GetFsType() string { + if m != nil && m.FsType != nil { + return *m.FsType + } + return "" +} + +func (m *RBDPersistentVolumeSource) GetPool() string { + if m != nil && m.Pool != nil { + return *m.Pool + } + return "" +} + +func (m *RBDPersistentVolumeSource) GetUser() string { + if m != nil && m.User != nil { + return *m.User + } + return "" +} + +func (m *RBDPersistentVolumeSource) GetKeyring() string { + if m != nil && m.Keyring != nil { + return *m.Keyring + } + return "" +} + +func (m *RBDPersistentVolumeSource) GetSecretRef() *SecretReference { + if m != nil { + return m.SecretRef + } + return nil +} + +func (m *RBDPersistentVolumeSource) GetReadOnly() bool { + if m != nil && m.ReadOnly != nil { + return *m.ReadOnly + } + return false +} + // Represents a Rados Block Device mount that lasts the lifetime of a pod. // RBD volumes support ownership management and SELinux relabeling. type RBDVolumeSource struct { // A collection of Ceph monitors. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it Monitors []string `protobuf:"bytes,1,rep,name=monitors" json:"monitors,omitempty"` // The rados image name. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it Image *string `protobuf:"bytes,2,opt,name=image" json:"image,omitempty"` // Filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://kubernetes.io/docs/user-guide/volumes#rbd + // More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd // TODO: how do we prevent errors in the filesystem from compromising the machine // +optional FsType *string `protobuf:"bytes,3,opt,name=fsType" json:"fsType,omitempty"` // The rados pool name. // Default is rbd. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it // +optional Pool *string `protobuf:"bytes,4,opt,name=pool" json:"pool,omitempty"` // The rados user name. // Default is admin. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it // +optional User *string `protobuf:"bytes,5,opt,name=user" json:"user,omitempty"` // Keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it // +optional Keyring *string `protobuf:"bytes,6,opt,name=keyring" json:"keyring,omitempty"` // SecretRef is name of the authentication secret for RBDUser. If provided // overrides keyring. // Default is nil. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it // +optional SecretRef *LocalObjectReference `protobuf:"bytes,7,opt,name=secretRef" json:"secretRef,omitempty"` // ReadOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it // +optional ReadOnly *bool `protobuf:"varint,8,opt,name=readOnly" json:"readOnly,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -6814,7 +7871,7 @@ type RBDVolumeSource struct { func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } func (m *RBDVolumeSource) String() string { return proto.CompactTextString(m) } func (*RBDVolumeSource) ProtoMessage() {} -func (*RBDVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{126} } +func (*RBDVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{139} } func (m *RBDVolumeSource) GetMonitors() []string { if m != nil { @@ -6875,9 +7932,9 @@ func (m *RBDVolumeSource) GetReadOnly() bool { // RangeAllocation is not a public type. type RangeAllocation struct { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Range is string that identifies the range represented by 'data'. Range *string `protobuf:"bytes,2,opt,name=range" json:"range,omitempty"` // Data is a bit array containing all allocated addresses in the previous segment. @@ -6888,9 +7945,9 @@ type RangeAllocation struct { func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } func (m *RangeAllocation) String() string { return proto.CompactTextString(m) } func (*RangeAllocation) ProtoMessage() {} -func (*RangeAllocation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{127} } +func (*RangeAllocation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{140} } -func (m *RangeAllocation) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *RangeAllocation) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -6915,18 +7972,18 @@ func (m *RangeAllocation) GetData() []byte { type ReplicationController struct { // If the Labels of a ReplicationController are empty, they are defaulted to // be the same as the Pod(s) that the replication controller manages. - // Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Spec defines the specification of the desired behavior of the replication controller. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec *ReplicationControllerSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` // Status is the most recently observed status of the replication controller. // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status *ReplicationControllerStatus `protobuf:"bytes,3,opt,name=status" json:"status,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -6935,9 +7992,9 @@ type ReplicationController struct { func (m *ReplicationController) Reset() { *m = ReplicationController{} } func (m *ReplicationController) String() string { return proto.CompactTextString(m) } func (*ReplicationController) ProtoMessage() {} -func (*ReplicationController) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{128} } +func (*ReplicationController) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{141} } -func (m *ReplicationController) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *ReplicationController) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -6966,7 +8023,7 @@ type ReplicationControllerCondition struct { Status *string `protobuf:"bytes,2,opt,name=status" json:"status,omitempty"` // The last time the condition transitioned from one status to another. // +optional - LastTransitionTime *k8s_io_kubernetes_pkg_apis_meta_v1.Time `protobuf:"bytes,3,opt,name=lastTransitionTime" json:"lastTransitionTime,omitempty"` + LastTransitionTime *k8s_io_apimachinery_pkg_apis_meta_v1.Time `protobuf:"bytes,3,opt,name=lastTransitionTime" json:"lastTransitionTime,omitempty"` // The reason for the condition's last transition. // +optional Reason *string `protobuf:"bytes,4,opt,name=reason" json:"reason,omitempty"` @@ -6980,7 +8037,7 @@ func (m *ReplicationControllerCondition) Reset() { *m = ReplicationContr func (m *ReplicationControllerCondition) String() string { return proto.CompactTextString(m) } func (*ReplicationControllerCondition) ProtoMessage() {} func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{129} + return fileDescriptorGenerated, []int{142} } func (m *ReplicationControllerCondition) GetType() string { @@ -6997,7 +8054,7 @@ func (m *ReplicationControllerCondition) GetStatus() string { return "" } -func (m *ReplicationControllerCondition) GetLastTransitionTime() *k8s_io_kubernetes_pkg_apis_meta_v1.Time { +func (m *ReplicationControllerCondition) GetLastTransitionTime() *k8s_io_apimachinery_pkg_apis_meta_v1.Time { if m != nil { return m.LastTransitionTime } @@ -7021,11 +8078,11 @@ func (m *ReplicationControllerCondition) GetMessage() string { // ReplicationControllerList is a collection of replication controllers. type ReplicationControllerList struct { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // List of replication controllers. - // More info: http://kubernetes.io/docs/user-guide/replication-controller + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller Items []*ReplicationController `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -7034,10 +8091,10 @@ func (m *ReplicationControllerList) Reset() { *m = ReplicationController func (m *ReplicationControllerList) String() string { return proto.CompactTextString(m) } func (*ReplicationControllerList) ProtoMessage() {} func (*ReplicationControllerList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{130} + return fileDescriptorGenerated, []int{143} } -func (m *ReplicationControllerList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *ReplicationControllerList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -7056,7 +8113,7 @@ type ReplicationControllerSpec struct { // Replicas is the number of desired replicas. // This is a pointer to distinguish between explicit zero and unspecified. // Defaults to 1. - // More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller // +optional Replicas *int32 `protobuf:"varint,1,opt,name=replicas" json:"replicas,omitempty"` // Minimum number of seconds for which a newly created pod should be ready @@ -7068,12 +8125,12 @@ type ReplicationControllerSpec struct { // If Selector is empty, it is defaulted to the labels present on the Pod template. // Label keys and values that must match in order to be controlled by this replication // controller, if empty defaulted to labels on Pod template. - // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional Selector map[string]string `protobuf:"bytes,2,rep,name=selector" json:"selector,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // Template is the object that describes the pod that will be created if // insufficient replicas are detected. This takes precedence over a TemplateRef. - // More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template // +optional Template *PodTemplateSpec `protobuf:"bytes,3,opt,name=template" json:"template,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -7083,7 +8140,7 @@ func (m *ReplicationControllerSpec) Reset() { *m = ReplicationController func (m *ReplicationControllerSpec) String() string { return proto.CompactTextString(m) } func (*ReplicationControllerSpec) ProtoMessage() {} func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{131} + return fileDescriptorGenerated, []int{144} } func (m *ReplicationControllerSpec) GetReplicas() int32 { @@ -7118,7 +8175,7 @@ func (m *ReplicationControllerSpec) GetTemplate() *PodTemplateSpec { // controller. type ReplicationControllerStatus struct { // Replicas is the most recently oberved number of replicas. - // More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller Replicas *int32 `protobuf:"varint,1,opt,name=replicas" json:"replicas,omitempty"` // The number of pods that have labels matching the labels of the pod template of the replication controller. // +optional @@ -7134,6 +8191,8 @@ type ReplicationControllerStatus struct { ObservedGeneration *int64 `protobuf:"varint,3,opt,name=observedGeneration" json:"observedGeneration,omitempty"` // Represents the latest available observations of a replication controller's current state. // +optional + // +patchMergeKey=type + // +patchStrategy=merge Conditions []*ReplicationControllerCondition `protobuf:"bytes,6,rep,name=conditions" json:"conditions,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -7142,7 +8201,7 @@ func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControll func (m *ReplicationControllerStatus) String() string { return proto.CompactTextString(m) } func (*ReplicationControllerStatus) ProtoMessage() {} func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{132} + return fileDescriptorGenerated, []int{145} } func (m *ReplicationControllerStatus) GetReplicas() int32 { @@ -7196,14 +8255,14 @@ type ResourceFieldSelector struct { Resource *string `protobuf:"bytes,2,opt,name=resource" json:"resource,omitempty"` // Specifies the output format of the exposed resources, defaults to "1" // +optional - Divisor *k8s_io_kubernetes_pkg_api_resource.Quantity `protobuf:"bytes,3,opt,name=divisor" json:"divisor,omitempty"` - XXX_unrecognized []byte `json:"-"` + Divisor *k8s_io_apimachinery_pkg_api_resource.Quantity `protobuf:"bytes,3,opt,name=divisor" json:"divisor,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } func (m *ResourceFieldSelector) String() string { return proto.CompactTextString(m) } func (*ResourceFieldSelector) ProtoMessage() {} -func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{133} } +func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{146} } func (m *ResourceFieldSelector) GetContainerName() string { if m != nil && m.ContainerName != nil { @@ -7219,7 +8278,7 @@ func (m *ResourceFieldSelector) GetResource() string { return "" } -func (m *ResourceFieldSelector) GetDivisor() *k8s_io_kubernetes_pkg_api_resource.Quantity { +func (m *ResourceFieldSelector) GetDivisor() *k8s_io_apimachinery_pkg_api_resource.Quantity { if m != nil { return m.Divisor } @@ -7229,15 +8288,15 @@ func (m *ResourceFieldSelector) GetDivisor() *k8s_io_kubernetes_pkg_api_resource // ResourceQuota sets aggregate quota restrictions enforced per namespace type ResourceQuota struct { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Spec defines the desired quota. - // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec *ResourceQuotaSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` // Status defines the actual enforced quota and its current usage. - // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status *ResourceQuotaStatus `protobuf:"bytes,3,opt,name=status" json:"status,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -7246,9 +8305,9 @@ type ResourceQuota struct { func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } func (m *ResourceQuota) String() string { return proto.CompactTextString(m) } func (*ResourceQuota) ProtoMessage() {} -func (*ResourceQuota) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{134} } +func (*ResourceQuota) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{147} } -func (m *ResourceQuota) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *ResourceQuota) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -7272,11 +8331,11 @@ func (m *ResourceQuota) GetStatus() *ResourceQuotaStatus { // ResourceQuotaList is a list of ResourceQuota items. type ResourceQuotaList struct { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Items is a list of ResourceQuota objects. - // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota + // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ Items []*ResourceQuota `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -7284,9 +8343,9 @@ type ResourceQuotaList struct { func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } func (m *ResourceQuotaList) String() string { return proto.CompactTextString(m) } func (*ResourceQuotaList) ProtoMessage() {} -func (*ResourceQuotaList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{135} } +func (*ResourceQuotaList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{148} } -func (m *ResourceQuotaList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *ResourceQuotaList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -7303,9 +8362,9 @@ func (m *ResourceQuotaList) GetItems() []*ResourceQuota { // ResourceQuotaSpec defines the desired hard limits to enforce for Quota. type ResourceQuotaSpec struct { // Hard is the set of desired hard limits for each named resource. - // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota + // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ // +optional - Hard map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity `protobuf:"bytes,1,rep,name=hard" json:"hard,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Hard map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity `protobuf:"bytes,1,rep,name=hard" json:"hard,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // A collection of filters that must match each object tracked by a quota. // If not specified, the quota matches all objects. // +optional @@ -7316,9 +8375,9 @@ type ResourceQuotaSpec struct { func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } func (m *ResourceQuotaSpec) String() string { return proto.CompactTextString(m) } func (*ResourceQuotaSpec) ProtoMessage() {} -func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{136} } +func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{149} } -func (m *ResourceQuotaSpec) GetHard() map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity { +func (m *ResourceQuotaSpec) GetHard() map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity { if m != nil { return m.Hard } @@ -7335,28 +8394,28 @@ func (m *ResourceQuotaSpec) GetScopes() []string { // ResourceQuotaStatus defines the enforced hard limits and observed use. type ResourceQuotaStatus struct { // Hard is the set of enforced hard limits for each named resource. - // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota + // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ // +optional - Hard map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity `protobuf:"bytes,1,rep,name=hard" json:"hard,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Hard map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity `protobuf:"bytes,1,rep,name=hard" json:"hard,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // Used is the current observed total usage of the resource in the namespace. // +optional - Used map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity `protobuf:"bytes,2,rep,name=used" json:"used,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_unrecognized []byte `json:"-"` + Used map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity `protobuf:"bytes,2,rep,name=used" json:"used,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_unrecognized []byte `json:"-"` } func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } func (m *ResourceQuotaStatus) String() string { return proto.CompactTextString(m) } func (*ResourceQuotaStatus) ProtoMessage() {} -func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{137} } +func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{150} } -func (m *ResourceQuotaStatus) GetHard() map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity { +func (m *ResourceQuotaStatus) GetHard() map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity { if m != nil { return m.Hard } return nil } -func (m *ResourceQuotaStatus) GetUsed() map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity { +func (m *ResourceQuotaStatus) GetUsed() map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity { if m != nil { return m.Used } @@ -7366,31 +8425,31 @@ func (m *ResourceQuotaStatus) GetUsed() map[string]*k8s_io_kubernetes_pkg_api_re // ResourceRequirements describes the compute resource requirements. type ResourceRequirements struct { // Limits describes the maximum amount of compute resources allowed. - // More info: http://kubernetes.io/docs/user-guide/compute-resources/ + // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ // +optional - Limits map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity `protobuf:"bytes,1,rep,name=limits" json:"limits,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Limits map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity `protobuf:"bytes,1,rep,name=limits" json:"limits,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // Requests describes the minimum amount of compute resources required. // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, // otherwise to an implementation-defined value. - // More info: http://kubernetes.io/docs/user-guide/compute-resources/ + // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ // +optional - Requests map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity `protobuf:"bytes,2,rep,name=requests" json:"requests,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_unrecognized []byte `json:"-"` + Requests map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity `protobuf:"bytes,2,rep,name=requests" json:"requests,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_unrecognized []byte `json:"-"` } func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } func (m *ResourceRequirements) String() string { return proto.CompactTextString(m) } func (*ResourceRequirements) ProtoMessage() {} -func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{138} } +func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{151} } -func (m *ResourceRequirements) GetLimits() map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity { +func (m *ResourceRequirements) GetLimits() map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity { if m != nil { return m.Limits } return nil } -func (m *ResourceRequirements) GetRequests() map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity { +func (m *ResourceRequirements) GetRequests() map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity { if m != nil { return m.Requests } @@ -7417,7 +8476,7 @@ type SELinuxOptions struct { func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } func (m *SELinuxOptions) String() string { return proto.CompactTextString(m) } func (*SELinuxOptions) ProtoMessage() {} -func (*SELinuxOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{139} } +func (*SELinuxOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{152} } func (m *SELinuxOptions) GetUser() string { if m != nil && m.User != nil { @@ -7447,6 +8506,119 @@ func (m *SELinuxOptions) GetLevel() string { return "" } +// ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume +type ScaleIOPersistentVolumeSource struct { + // The host address of the ScaleIO API Gateway. + Gateway *string `protobuf:"bytes,1,opt,name=gateway" json:"gateway,omitempty"` + // The name of the storage system as configured in ScaleIO. + System *string `protobuf:"bytes,2,opt,name=system" json:"system,omitempty"` + // SecretRef references to the secret for ScaleIO user and other + // sensitive information. If this is not provided, Login operation will fail. + SecretRef *SecretReference `protobuf:"bytes,3,opt,name=secretRef" json:"secretRef,omitempty"` + // Flag to enable/disable SSL communication with Gateway, default false + // +optional + SslEnabled *bool `protobuf:"varint,4,opt,name=sslEnabled" json:"sslEnabled,omitempty"` + // The name of the ScaleIO Protection Domain for the configured storage. + // +optional + ProtectionDomain *string `protobuf:"bytes,5,opt,name=protectionDomain" json:"protectionDomain,omitempty"` + // The ScaleIO Storage Pool associated with the protection domain. + // +optional + StoragePool *string `protobuf:"bytes,6,opt,name=storagePool" json:"storagePool,omitempty"` + // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + // +optional + StorageMode *string `protobuf:"bytes,7,opt,name=storageMode" json:"storageMode,omitempty"` + // The name of a volume already created in the ScaleIO system + // that is associated with this volume source. + VolumeName *string `protobuf:"bytes,8,opt,name=volumeName" json:"volumeName,omitempty"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FsType *string `protobuf:"bytes,9,opt,name=fsType" json:"fsType,omitempty"` + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly *bool `protobuf:"varint,10,opt,name=readOnly" json:"readOnly,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } +func (m *ScaleIOPersistentVolumeSource) String() string { return proto.CompactTextString(m) } +func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} +func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{153} +} + +func (m *ScaleIOPersistentVolumeSource) GetGateway() string { + if m != nil && m.Gateway != nil { + return *m.Gateway + } + return "" +} + +func (m *ScaleIOPersistentVolumeSource) GetSystem() string { + if m != nil && m.System != nil { + return *m.System + } + return "" +} + +func (m *ScaleIOPersistentVolumeSource) GetSecretRef() *SecretReference { + if m != nil { + return m.SecretRef + } + return nil +} + +func (m *ScaleIOPersistentVolumeSource) GetSslEnabled() bool { + if m != nil && m.SslEnabled != nil { + return *m.SslEnabled + } + return false +} + +func (m *ScaleIOPersistentVolumeSource) GetProtectionDomain() string { + if m != nil && m.ProtectionDomain != nil { + return *m.ProtectionDomain + } + return "" +} + +func (m *ScaleIOPersistentVolumeSource) GetStoragePool() string { + if m != nil && m.StoragePool != nil { + return *m.StoragePool + } + return "" +} + +func (m *ScaleIOPersistentVolumeSource) GetStorageMode() string { + if m != nil && m.StorageMode != nil { + return *m.StorageMode + } + return "" +} + +func (m *ScaleIOPersistentVolumeSource) GetVolumeName() string { + if m != nil && m.VolumeName != nil { + return *m.VolumeName + } + return "" +} + +func (m *ScaleIOPersistentVolumeSource) GetFsType() string { + if m != nil && m.FsType != nil { + return *m.FsType + } + return "" +} + +func (m *ScaleIOPersistentVolumeSource) GetReadOnly() bool { + if m != nil && m.ReadOnly != nil { + return *m.ReadOnly + } + return false +} + // ScaleIOVolumeSource represents a persistent ScaleIO volume type ScaleIOVolumeSource struct { // The host address of the ScaleIO API Gateway. @@ -7459,13 +8631,13 @@ type ScaleIOVolumeSource struct { // Flag to enable/disable SSL communication with Gateway, default false // +optional SslEnabled *bool `protobuf:"varint,4,opt,name=sslEnabled" json:"sslEnabled,omitempty"` - // The name of the Protection Domain for the configured storage (defaults to "default"). + // The name of the ScaleIO Protection Domain for the configured storage. // +optional ProtectionDomain *string `protobuf:"bytes,5,opt,name=protectionDomain" json:"protectionDomain,omitempty"` - // The Storage Pool associated with the protection domain (defaults to "default"). + // The ScaleIO Storage Pool associated with the protection domain. // +optional StoragePool *string `protobuf:"bytes,6,opt,name=storagePool" json:"storagePool,omitempty"` - // Indicates whether the storage for a volume should be thick or thin (defaults to "thin"). + // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. // +optional StorageMode *string `protobuf:"bytes,7,opt,name=storageMode" json:"storageMode,omitempty"` // The name of a volume already created in the ScaleIO system @@ -7486,7 +8658,7 @@ type ScaleIOVolumeSource struct { func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } func (m *ScaleIOVolumeSource) String() string { return proto.CompactTextString(m) } func (*ScaleIOVolumeSource) ProtoMessage() {} -func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{140} } +func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{154} } func (m *ScaleIOVolumeSource) GetGateway() string { if m != nil && m.Gateway != nil { @@ -7562,14 +8734,13 @@ func (m *ScaleIOVolumeSource) GetReadOnly() bool { // the Data field must be less than MaxSecretSize bytes. type Secret struct { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` - // Data contains the secret data. Each key must be a valid DNS_SUBDOMAIN - // or leading dot followed by valid DNS_SUBDOMAIN. - // The serialized form of the secret data is a base64 encoded string, - // representing the arbitrary (possibly non-string) data value here. - // Described in https://tools.ietf.org/html/rfc4648#section-4 + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + // Data contains the secret data. Each key must consist of alphanumeric + // characters, '-', '_' or '.'. The serialized form of the secret data is a + // base64 encoded string, representing the arbitrary (possibly non-string) + // data value here. Described in https://tools.ietf.org/html/rfc4648#section-4 // +optional Data map[string][]byte `protobuf:"bytes,2,rep,name=data" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // stringData allows specifying non-binary secret data in string form. @@ -7588,9 +8759,9 @@ type Secret struct { func (m *Secret) Reset() { *m = Secret{} } func (m *Secret) String() string { return proto.CompactTextString(m) } func (*Secret) ProtoMessage() {} -func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{141} } +func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{155} } -func (m *Secret) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *Secret) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -7635,7 +8806,7 @@ type SecretEnvSource struct { func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } func (m *SecretEnvSource) String() string { return proto.CompactTextString(m) } func (*SecretEnvSource) ProtoMessage() {} -func (*SecretEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{142} } +func (*SecretEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{156} } func (m *SecretEnvSource) GetLocalObjectReference() *LocalObjectReference { if m != nil { @@ -7666,7 +8837,7 @@ type SecretKeySelector struct { func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } func (m *SecretKeySelector) String() string { return proto.CompactTextString(m) } func (*SecretKeySelector) ProtoMessage() {} -func (*SecretKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{143} } +func (*SecretKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{157} } func (m *SecretKeySelector) GetLocalObjectReference() *LocalObjectReference { if m != nil { @@ -7692,11 +8863,11 @@ func (m *SecretKeySelector) GetOptional() bool { // SecretList is a list of Secret. type SecretList struct { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Items is a list of secret objects. - // More info: http://kubernetes.io/docs/user-guide/secrets + // More info: https://kubernetes.io/docs/concepts/configuration/secret Items []*Secret `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -7704,9 +8875,9 @@ type SecretList struct { func (m *SecretList) Reset() { *m = SecretList{} } func (m *SecretList) String() string { return proto.CompactTextString(m) } func (*SecretList) ProtoMessage() {} -func (*SecretList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{144} } +func (*SecretList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{158} } -func (m *SecretList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *SecretList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -7746,7 +8917,7 @@ type SecretProjection struct { func (m *SecretProjection) Reset() { *m = SecretProjection{} } func (m *SecretProjection) String() string { return proto.CompactTextString(m) } func (*SecretProjection) ProtoMessage() {} -func (*SecretProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{145} } +func (*SecretProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{159} } func (m *SecretProjection) GetLocalObjectReference() *LocalObjectReference { if m != nil { @@ -7769,6 +8940,37 @@ func (m *SecretProjection) GetOptional() bool { return false } +// SecretReference represents a Secret Reference. It has enough information to retrieve secret +// in any namespace +type SecretReference struct { + // Name is unique within a namespace to reference a secret resource. + // +optional + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Namespace defines the space within which the secret name must be unique. + // +optional + Namespace *string `protobuf:"bytes,2,opt,name=namespace" json:"namespace,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SecretReference) Reset() { *m = SecretReference{} } +func (m *SecretReference) String() string { return proto.CompactTextString(m) } +func (*SecretReference) ProtoMessage() {} +func (*SecretReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{160} } + +func (m *SecretReference) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *SecretReference) GetNamespace() string { + if m != nil && m.Namespace != nil { + return *m.Namespace + } + return "" +} + // Adapts a Secret into a volume. // // The contents of the target Secret's Data field will be presented in a volume @@ -7776,7 +8978,7 @@ func (m *SecretProjection) GetOptional() bool { // Secret volumes support ownership management and SELinux relabeling. type SecretVolumeSource struct { // Name of the secret in the pod's namespace to use. - // More info: http://kubernetes.io/docs/user-guide/volumes#secrets + // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret // +optional SecretName *string `protobuf:"bytes,1,opt,name=secretName" json:"secretName,omitempty"` // If unspecified, each key-value pair in the Data field of the referenced @@ -7804,7 +9006,7 @@ type SecretVolumeSource struct { func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } func (m *SecretVolumeSource) String() string { return proto.CompactTextString(m) } func (*SecretVolumeSource) ProtoMessage() {} -func (*SecretVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{146} } +func (*SecretVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{161} } func (m *SecretVolumeSource) GetSecretName() string { if m != nil && m.SecretName != nil { @@ -7870,14 +9072,22 @@ type SecurityContext struct { // Whether this container has a read-only root filesystem. // Default is false. // +optional - ReadOnlyRootFilesystem *bool `protobuf:"varint,6,opt,name=readOnlyRootFilesystem" json:"readOnlyRootFilesystem,omitempty"` - XXX_unrecognized []byte `json:"-"` + ReadOnlyRootFilesystem *bool `protobuf:"varint,6,opt,name=readOnlyRootFilesystem" json:"readOnlyRootFilesystem,omitempty"` + // AllowPrivilegeEscalation controls whether a process can gain more + // privileges than its parent process. This bool directly controls if + // the no_new_privs flag will be set on the container process. + // AllowPrivilegeEscalation is true always when the container is: + // 1) run as Privileged + // 2) has CAP_SYS_ADMIN + // +optional + AllowPrivilegeEscalation *bool `protobuf:"varint,7,opt,name=allowPrivilegeEscalation" json:"allowPrivilegeEscalation,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *SecurityContext) Reset() { *m = SecurityContext{} } func (m *SecurityContext) String() string { return proto.CompactTextString(m) } func (*SecurityContext) ProtoMessage() {} -func (*SecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{147} } +func (*SecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{162} } func (m *SecurityContext) GetCapabilities() *Capabilities { if m != nil { @@ -7921,6 +9131,13 @@ func (m *SecurityContext) GetReadOnlyRootFilesystem() bool { return false } +func (m *SecurityContext) GetAllowPrivilegeEscalation() bool { + if m != nil && m.AllowPrivilegeEscalation != nil { + return *m.AllowPrivilegeEscalation + } + return false +} + // SerializedReference is a reference to serialized object. type SerializedReference struct { // The reference to an object in the system. @@ -7932,7 +9149,7 @@ type SerializedReference struct { func (m *SerializedReference) Reset() { *m = SerializedReference{} } func (m *SerializedReference) String() string { return proto.CompactTextString(m) } func (*SerializedReference) ProtoMessage() {} -func (*SerializedReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{148} } +func (*SerializedReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{163} } func (m *SerializedReference) GetReference() *ObjectReference { if m != nil { @@ -7946,17 +9163,17 @@ func (m *SerializedReference) GetReference() *ObjectReference { // will answer requests sent through the proxy. type Service struct { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Spec defines the behavior of a service. - // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec *ServiceSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` // Most recently observed status of the service. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status *ServiceStatus `protobuf:"bytes,3,opt,name=status" json:"status,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -7965,9 +9182,9 @@ type Service struct { func (m *Service) Reset() { *m = Service{} } func (m *Service) String() string { return proto.CompactTextString(m) } func (*Service) ProtoMessage() {} -func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{149} } +func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{164} } -func (m *Service) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *Service) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -7994,17 +9211,19 @@ func (m *Service) GetStatus() *ServiceStatus { // * a set of secrets type ServiceAccount struct { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. - // More info: http://kubernetes.io/docs/user-guide/secrets + // More info: https://kubernetes.io/docs/concepts/configuration/secret // +optional + // +patchMergeKey=name + // +patchStrategy=merge Secrets []*ObjectReference `protobuf:"bytes,2,rep,name=secrets" json:"secrets,omitempty"` // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. - // More info: http://kubernetes.io/docs/user-guide/secrets#manually-specifying-an-imagepullsecret + // More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod // +optional ImagePullSecrets []*LocalObjectReference `protobuf:"bytes,3,rep,name=imagePullSecrets" json:"imagePullSecrets,omitempty"` // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. @@ -8017,9 +9236,9 @@ type ServiceAccount struct { func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } func (m *ServiceAccount) String() string { return proto.CompactTextString(m) } func (*ServiceAccount) ProtoMessage() {} -func (*ServiceAccount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{150} } +func (*ServiceAccount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{165} } -func (m *ServiceAccount) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *ServiceAccount) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -8050,11 +9269,11 @@ func (m *ServiceAccount) GetAutomountServiceAccountToken() bool { // ServiceAccountList is a list of ServiceAccount objects type ServiceAccountList struct { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // List of ServiceAccounts. - // More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md#service-accounts + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ Items []*ServiceAccount `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -8062,9 +9281,9 @@ type ServiceAccountList struct { func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } func (m *ServiceAccountList) String() string { return proto.CompactTextString(m) } func (*ServiceAccountList) ProtoMessage() {} -func (*ServiceAccountList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{151} } +func (*ServiceAccountList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{166} } -func (m *ServiceAccountList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *ServiceAccountList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -8081,9 +9300,9 @@ func (m *ServiceAccountList) GetItems() []*ServiceAccount { // ServiceList holds a list of services. type ServiceList struct { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // List of services Items []*Service `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -8092,9 +9311,9 @@ type ServiceList struct { func (m *ServiceList) Reset() { *m = ServiceList{} } func (m *ServiceList) String() string { return proto.CompactTextString(m) } func (*ServiceList) ProtoMessage() {} -func (*ServiceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{152} } +func (*ServiceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{167} } -func (m *ServiceList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *ServiceList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -8129,14 +9348,14 @@ type ServicePort struct { // of the 'port' field is used (an identity map). // This field is ignored for services with clusterIP=None, and should be // omitted or set equal to the 'port' field. - // More info: http://kubernetes.io/docs/user-guide/services#defining-a-service + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service // +optional - TargetPort *k8s_io_kubernetes_pkg_util_intstr.IntOrString `protobuf:"bytes,4,opt,name=targetPort" json:"targetPort,omitempty"` + TargetPort *k8s_io_apimachinery_pkg_util_intstr.IntOrString `protobuf:"bytes,4,opt,name=targetPort" json:"targetPort,omitempty"` // The port on each node on which this service is exposed when type=NodePort or LoadBalancer. // Usually assigned by the system. If specified, it will be allocated to the service // if unused or else creation of the service will fail. // Default is to auto-allocate a port if the ServiceType of this Service requires one. - // More info: http://kubernetes.io/docs/user-guide/services#type--nodeport + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport // +optional NodePort *int32 `protobuf:"varint,5,opt,name=nodePort" json:"nodePort,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -8145,7 +9364,7 @@ type ServicePort struct { func (m *ServicePort) Reset() { *m = ServicePort{} } func (m *ServicePort) String() string { return proto.CompactTextString(m) } func (*ServicePort) ProtoMessage() {} -func (*ServicePort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{153} } +func (*ServicePort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{168} } func (m *ServicePort) GetName() string { if m != nil && m.Name != nil { @@ -8168,7 +9387,7 @@ func (m *ServicePort) GetPort() int32 { return 0 } -func (m *ServicePort) GetTargetPort() *k8s_io_kubernetes_pkg_util_intstr.IntOrString { +func (m *ServicePort) GetTargetPort() *k8s_io_apimachinery_pkg_util_intstr.IntOrString { if m != nil { return m.TargetPort } @@ -8197,7 +9416,7 @@ type ServiceProxyOptions struct { func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } func (m *ServiceProxyOptions) String() string { return proto.CompactTextString(m) } func (*ServiceProxyOptions) ProtoMessage() {} -func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{154} } +func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{169} } func (m *ServiceProxyOptions) GetPath() string { if m != nil && m.Path != nil { @@ -8209,14 +9428,16 @@ func (m *ServiceProxyOptions) GetPath() string { // ServiceSpec describes the attributes that a user creates on a service. type ServiceSpec struct { // The list of ports that are exposed by this service. - // More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // +patchMergeKey=port + // +patchStrategy=merge Ports []*ServicePort `protobuf:"bytes,1,rep,name=ports" json:"ports,omitempty"` // Route service traffic to pods with label keys and values matching this // selector. If empty or not present, the service is assumed to have an // external process managing its endpoints, which Kubernetes will not // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. // Ignored if type is ExternalName. - // More info: http://kubernetes.io/docs/user-guide/services#overview + // More info: https://kubernetes.io/docs/concepts/services-networking/service/ // +optional Selector map[string]string `protobuf:"bytes,2,rep,name=selector" json:"selector,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // clusterIP is the IP address of the service and is usually assigned @@ -8227,7 +9448,7 @@ type ServiceSpec struct { // can be specified for headless services when proxying is not required. // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if // type is ExternalName. - // More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies // +optional ClusterIP *string `protobuf:"bytes,3,opt,name=clusterIP" json:"clusterIP,omitempty"` // type determines how the Service is exposed. Defaults to ClusterIP. Valid @@ -8243,31 +9464,21 @@ type ServiceSpec struct { // "LoadBalancer" builds on NodePort and creates an // external load-balancer (if supported in the current cloud) which routes // to the clusterIP. - // More info: http://kubernetes.io/docs/user-guide/services#overview + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types // +optional Type *string `protobuf:"bytes,4,opt,name=type" json:"type,omitempty"` // externalIPs is a list of IP addresses for which nodes in the cluster // will also accept traffic for this service. These IPs are not managed by // Kubernetes. The user is responsible for ensuring that traffic arrives // at a node with this IP. A common example is external load-balancers - // that are not part of the Kubernetes system. A previous form of this - // functionality exists as the deprecatedPublicIPs field. When using this - // field, callers should also clear the deprecatedPublicIPs field. + // that are not part of the Kubernetes system. // +optional ExternalIPs []string `protobuf:"bytes,5,rep,name=externalIPs" json:"externalIPs,omitempty"` - // deprecatedPublicIPs is deprecated and replaced by the externalIPs field - // with almost the exact same semantics. This field is retained in the v1 - // API for compatibility until at least 8/20/2016. It will be removed from - // any new API revisions. If both deprecatedPublicIPs *and* externalIPs are - // set, deprecatedPublicIPs is used. - // +k8s:conversion-gen=false - // +optional - DeprecatedPublicIPs []string `protobuf:"bytes,6,rep,name=deprecatedPublicIPs" json:"deprecatedPublicIPs,omitempty"` // Supports "ClientIP" and "None". Used to maintain session affinity. // Enable client IP based session affinity. // Must be ClientIP or None. // Defaults to None. - // More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies // +optional SessionAffinity *string `protobuf:"bytes,7,opt,name=sessionAffinity" json:"sessionAffinity,omitempty"` // Only applies to Service Type: LoadBalancer @@ -8280,21 +9491,51 @@ type ServiceSpec struct { // If specified and supported by the platform, this will restrict traffic through the cloud-provider // load-balancer will be restricted to the specified client IPs. This field will be ignored if the // cloud-provider does not support the feature." - // More info: http://kubernetes.io/docs/user-guide/services-firewalls + // More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/ // +optional LoadBalancerSourceRanges []string `protobuf:"bytes,9,rep,name=loadBalancerSourceRanges" json:"loadBalancerSourceRanges,omitempty"` // externalName is the external reference that kubedns or equivalent will // return as a CNAME record for this service. No proxying will be involved. - // Must be a valid DNS name and requires Type to be ExternalName. - // +optional - ExternalName *string `protobuf:"bytes,10,opt,name=externalName" json:"externalName,omitempty"` - XXX_unrecognized []byte `json:"-"` + // Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) + // and requires Type to be ExternalName. + // +optional + ExternalName *string `protobuf:"bytes,10,opt,name=externalName" json:"externalName,omitempty"` + // externalTrafficPolicy denotes if this Service desires to route external + // traffic to node-local or cluster-wide endpoints. "Local" preserves the + // client source IP and avoids a second hop for LoadBalancer and Nodeport + // type services, but risks potentially imbalanced traffic spreading. + // "Cluster" obscures the client source IP and may cause a second hop to + // another node, but should have good overall load-spreading. + // +optional + ExternalTrafficPolicy *string `protobuf:"bytes,11,opt,name=externalTrafficPolicy" json:"externalTrafficPolicy,omitempty"` + // healthCheckNodePort specifies the healthcheck nodePort for the service. + // If not specified, HealthCheckNodePort is created by the service api + // backend with the allocated nodePort. Will use user-specified nodePort value + // if specified by the client. Only effects when Type is set to LoadBalancer + // and ExternalTrafficPolicy is set to Local. + // +optional + HealthCheckNodePort *int32 `protobuf:"varint,12,opt,name=healthCheckNodePort" json:"healthCheckNodePort,omitempty"` + // publishNotReadyAddresses, when set to true, indicates that DNS implementations + // must publish the notReadyAddresses of subsets for the Endpoints associated with + // the Service. The default value is false. + // The primary use case for setting this field is to use a StatefulSet's Headless Service + // to propagate SRV records for its Pods without respect to their readiness for purpose + // of peer discovery. + // This field will replace the service.alpha.kubernetes.io/tolerate-unready-endpoints + // when that annotation is deprecated and all clients have been converted to use this + // field. + // +optional + PublishNotReadyAddresses *bool `protobuf:"varint,13,opt,name=publishNotReadyAddresses" json:"publishNotReadyAddresses,omitempty"` + // sessionAffinityConfig contains the configurations of session affinity. + // +optional + SessionAffinityConfig *SessionAffinityConfig `protobuf:"bytes,14,opt,name=sessionAffinityConfig" json:"sessionAffinityConfig,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } func (m *ServiceSpec) String() string { return proto.CompactTextString(m) } func (*ServiceSpec) ProtoMessage() {} -func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{155} } +func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{170} } func (m *ServiceSpec) GetPorts() []*ServicePort { if m != nil { @@ -8331,13 +9572,6 @@ func (m *ServiceSpec) GetExternalIPs() []string { return nil } -func (m *ServiceSpec) GetDeprecatedPublicIPs() []string { - if m != nil { - return m.DeprecatedPublicIPs - } - return nil -} - func (m *ServiceSpec) GetSessionAffinity() string { if m != nil && m.SessionAffinity != nil { return *m.SessionAffinity @@ -8366,6 +9600,34 @@ func (m *ServiceSpec) GetExternalName() string { return "" } +func (m *ServiceSpec) GetExternalTrafficPolicy() string { + if m != nil && m.ExternalTrafficPolicy != nil { + return *m.ExternalTrafficPolicy + } + return "" +} + +func (m *ServiceSpec) GetHealthCheckNodePort() int32 { + if m != nil && m.HealthCheckNodePort != nil { + return *m.HealthCheckNodePort + } + return 0 +} + +func (m *ServiceSpec) GetPublishNotReadyAddresses() bool { + if m != nil && m.PublishNotReadyAddresses != nil { + return *m.PublishNotReadyAddresses + } + return false +} + +func (m *ServiceSpec) GetSessionAffinityConfig() *SessionAffinityConfig { + if m != nil { + return m.SessionAffinityConfig + } + return nil +} + // ServiceStatus represents the current status of a service. type ServiceStatus struct { // LoadBalancer contains the current status of the load-balancer, @@ -8378,7 +9640,7 @@ type ServiceStatus struct { func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } func (m *ServiceStatus) String() string { return proto.CompactTextString(m) } func (*ServiceStatus) ProtoMessage() {} -func (*ServiceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{156} } +func (*ServiceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{171} } func (m *ServiceStatus) GetLoadBalancer() *LoadBalancerStatus { if m != nil { @@ -8387,8 +9649,171 @@ func (m *ServiceStatus) GetLoadBalancer() *LoadBalancerStatus { return nil } +// SessionAffinityConfig represents the configurations of session affinity. +type SessionAffinityConfig struct { + // clientIP contains the configurations of Client IP based session affinity. + // +optional + ClientIP *ClientIPConfig `protobuf:"bytes,1,opt,name=clientIP" json:"clientIP,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } +func (m *SessionAffinityConfig) String() string { return proto.CompactTextString(m) } +func (*SessionAffinityConfig) ProtoMessage() {} +func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{172} } + +func (m *SessionAffinityConfig) GetClientIP() *ClientIPConfig { + if m != nil { + return m.ClientIP + } + return nil +} + +// Represents a StorageOS persistent volume resource. +type StorageOSPersistentVolumeSource struct { + // VolumeName is the human-readable name of the StorageOS volume. Volume + // names are only unique within a namespace. + VolumeName *string `protobuf:"bytes,1,opt,name=volumeName" json:"volumeName,omitempty"` + // VolumeNamespace specifies the scope of the volume within StorageOS. If no + // namespace is specified then the Pod's namespace will be used. This allows the + // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + // Set VolumeName to any name to override the default behaviour. + // Set to "default" if you are not using namespaces within StorageOS. + // Namespaces that do not pre-exist within StorageOS will be created. + // +optional + VolumeNamespace *string `protobuf:"bytes,2,opt,name=volumeNamespace" json:"volumeNamespace,omitempty"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FsType *string `protobuf:"bytes,3,opt,name=fsType" json:"fsType,omitempty"` + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly *bool `protobuf:"varint,4,opt,name=readOnly" json:"readOnly,omitempty"` + // SecretRef specifies the secret to use for obtaining the StorageOS API + // credentials. If not specified, default values will be attempted. + // +optional + SecretRef *ObjectReference `protobuf:"bytes,5,opt,name=secretRef" json:"secretRef,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } +func (m *StorageOSPersistentVolumeSource) String() string { return proto.CompactTextString(m) } +func (*StorageOSPersistentVolumeSource) ProtoMessage() {} +func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{173} +} + +func (m *StorageOSPersistentVolumeSource) GetVolumeName() string { + if m != nil && m.VolumeName != nil { + return *m.VolumeName + } + return "" +} + +func (m *StorageOSPersistentVolumeSource) GetVolumeNamespace() string { + if m != nil && m.VolumeNamespace != nil { + return *m.VolumeNamespace + } + return "" +} + +func (m *StorageOSPersistentVolumeSource) GetFsType() string { + if m != nil && m.FsType != nil { + return *m.FsType + } + return "" +} + +func (m *StorageOSPersistentVolumeSource) GetReadOnly() bool { + if m != nil && m.ReadOnly != nil { + return *m.ReadOnly + } + return false +} + +func (m *StorageOSPersistentVolumeSource) GetSecretRef() *ObjectReference { + if m != nil { + return m.SecretRef + } + return nil +} + +// Represents a StorageOS persistent volume resource. +type StorageOSVolumeSource struct { + // VolumeName is the human-readable name of the StorageOS volume. Volume + // names are only unique within a namespace. + VolumeName *string `protobuf:"bytes,1,opt,name=volumeName" json:"volumeName,omitempty"` + // VolumeNamespace specifies the scope of the volume within StorageOS. If no + // namespace is specified then the Pod's namespace will be used. This allows the + // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + // Set VolumeName to any name to override the default behaviour. + // Set to "default" if you are not using namespaces within StorageOS. + // Namespaces that do not pre-exist within StorageOS will be created. + // +optional + VolumeNamespace *string `protobuf:"bytes,2,opt,name=volumeNamespace" json:"volumeNamespace,omitempty"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FsType *string `protobuf:"bytes,3,opt,name=fsType" json:"fsType,omitempty"` + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly *bool `protobuf:"varint,4,opt,name=readOnly" json:"readOnly,omitempty"` + // SecretRef specifies the secret to use for obtaining the StorageOS API + // credentials. If not specified, default values will be attempted. + // +optional + SecretRef *LocalObjectReference `protobuf:"bytes,5,opt,name=secretRef" json:"secretRef,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } +func (m *StorageOSVolumeSource) String() string { return proto.CompactTextString(m) } +func (*StorageOSVolumeSource) ProtoMessage() {} +func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{174} } + +func (m *StorageOSVolumeSource) GetVolumeName() string { + if m != nil && m.VolumeName != nil { + return *m.VolumeName + } + return "" +} + +func (m *StorageOSVolumeSource) GetVolumeNamespace() string { + if m != nil && m.VolumeNamespace != nil { + return *m.VolumeNamespace + } + return "" +} + +func (m *StorageOSVolumeSource) GetFsType() string { + if m != nil && m.FsType != nil { + return *m.FsType + } + return "" +} + +func (m *StorageOSVolumeSource) GetReadOnly() bool { + if m != nil && m.ReadOnly != nil { + return *m.ReadOnly + } + return false +} + +func (m *StorageOSVolumeSource) GetSecretRef() *LocalObjectReference { + if m != nil { + return m.SecretRef + } + return nil +} + +// Sysctl defines a kernel parameter to be set type Sysctl struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Name of a property to set + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Value of a property to set Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -8396,7 +9821,7 @@ type Sysctl struct { func (m *Sysctl) Reset() { *m = Sysctl{} } func (m *Sysctl) String() string { return proto.CompactTextString(m) } func (*Sysctl) ProtoMessage() {} -func (*Sysctl) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{157} } +func (*Sysctl) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{175} } func (m *Sysctl) GetName() string { if m != nil && m.Name != nil { @@ -8417,24 +9842,34 @@ type TCPSocketAction struct { // Number or name of the port to access on the container. // Number must be in the range 1 to 65535. // Name must be an IANA_SVC_NAME. - Port *k8s_io_kubernetes_pkg_util_intstr.IntOrString `protobuf:"bytes,1,opt,name=port" json:"port,omitempty"` - XXX_unrecognized []byte `json:"-"` + Port *k8s_io_apimachinery_pkg_util_intstr.IntOrString `protobuf:"bytes,1,opt,name=port" json:"port,omitempty"` + // Optional: Host name to connect to, defaults to the pod IP. + // +optional + Host *string `protobuf:"bytes,2,opt,name=host" json:"host,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } func (m *TCPSocketAction) String() string { return proto.CompactTextString(m) } func (*TCPSocketAction) ProtoMessage() {} -func (*TCPSocketAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{158} } +func (*TCPSocketAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{176} } -func (m *TCPSocketAction) GetPort() *k8s_io_kubernetes_pkg_util_intstr.IntOrString { +func (m *TCPSocketAction) GetPort() *k8s_io_apimachinery_pkg_util_intstr.IntOrString { if m != nil { return m.Port } return nil } -// The node this Taint is attached to has the effect "effect" on -// any pod that that does not tolerate the Taint. +func (m *TCPSocketAction) GetHost() string { + if m != nil && m.Host != nil { + return *m.Host + } + return "" +} + +// The node this Taint is attached to has the "effect" on +// any pod that does not tolerate the Taint. type Taint struct { // Required. The taint key to be applied to a node. Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` @@ -8448,14 +9883,14 @@ type Taint struct { // TimeAdded represents the time at which the taint was added. // It is only written for NoExecute taints. // +optional - TimeAdded *k8s_io_kubernetes_pkg_apis_meta_v1.Time `protobuf:"bytes,4,opt,name=timeAdded" json:"timeAdded,omitempty"` - XXX_unrecognized []byte `json:"-"` + TimeAdded *k8s_io_apimachinery_pkg_apis_meta_v1.Time `protobuf:"bytes,4,opt,name=timeAdded" json:"timeAdded,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *Taint) Reset() { *m = Taint{} } func (m *Taint) String() string { return proto.CompactTextString(m) } func (*Taint) ProtoMessage() {} -func (*Taint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{159} } +func (*Taint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{177} } func (m *Taint) GetKey() string { if m != nil && m.Key != nil { @@ -8478,7 +9913,7 @@ func (m *Taint) GetEffect() string { return "" } -func (m *Taint) GetTimeAdded() *k8s_io_kubernetes_pkg_apis_meta_v1.Time { +func (m *Taint) GetTimeAdded() *k8s_io_apimachinery_pkg_apis_meta_v1.Time { if m != nil { return m.TimeAdded } @@ -8518,7 +9953,7 @@ type Toleration struct { func (m *Toleration) Reset() { *m = Toleration{} } func (m *Toleration) String() string { return proto.CompactTextString(m) } func (*Toleration) ProtoMessage() {} -func (*Toleration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{160} } +func (*Toleration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{178} } func (m *Toleration) GetKey() string { if m != nil && m.Key != nil { @@ -8559,7 +9994,7 @@ func (m *Toleration) GetTolerationSeconds() int64 { type Volume struct { // Volume's name. // Must be a DNS_LABEL and unique within the pod. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // VolumeSource represents the location and type of the mounted volume. // If not specified, the Volume is implied to be an EmptyDir. @@ -8571,7 +10006,7 @@ type Volume struct { func (m *Volume) Reset() { *m = Volume{} } func (m *Volume) String() string { return proto.CompactTextString(m) } func (*Volume) ProtoMessage() {} -func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{161} } +func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{179} } func (m *Volume) GetName() string { if m != nil && m.Name != nil { @@ -8587,6 +10022,34 @@ func (m *Volume) GetVolumeSource() *VolumeSource { return nil } +// volumeDevice describes a mapping of a raw block device within a container. +type VolumeDevice struct { + // name must match the name of a persistentVolumeClaim in the pod + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // devicePath is the path inside of the container that the device will be mapped to. + DevicePath *string `protobuf:"bytes,2,opt,name=devicePath" json:"devicePath,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } +func (m *VolumeDevice) String() string { return proto.CompactTextString(m) } +func (*VolumeDevice) ProtoMessage() {} +func (*VolumeDevice) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{180} } + +func (m *VolumeDevice) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *VolumeDevice) GetDevicePath() string { + if m != nil && m.DevicePath != nil { + return *m.DevicePath + } + return "" +} + // VolumeMount describes a mounting of a Volume within a container. type VolumeMount struct { // This must match the Name of a Volume. @@ -8601,14 +10064,21 @@ type VolumeMount struct { // Path within the volume from which the container's volume should be mounted. // Defaults to "" (volume's root). // +optional - SubPath *string `protobuf:"bytes,4,opt,name=subPath" json:"subPath,omitempty"` + SubPath *string `protobuf:"bytes,4,opt,name=subPath" json:"subPath,omitempty"` + // mountPropagation determines how mounts are propagated from the host + // to container and the other way around. + // When not set, MountPropagationHostToContainer is used. + // This field is alpha in 1.8 and can be reworked or removed in a future + // release. + // +optional + MountPropagation *string `protobuf:"bytes,5,opt,name=mountPropagation" json:"mountPropagation,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *VolumeMount) Reset() { *m = VolumeMount{} } func (m *VolumeMount) String() string { return proto.CompactTextString(m) } func (*VolumeMount) ProtoMessage() {} -func (*VolumeMount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{162} } +func (*VolumeMount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{181} } func (m *VolumeMount) GetName() string { if m != nil && m.Name != nil { @@ -8638,6 +10108,13 @@ func (m *VolumeMount) GetSubPath() string { return "" } +func (m *VolumeMount) GetMountPropagation() string { + if m != nil && m.MountPropagation != nil { + return *m.MountPropagation + } + return "" +} + // Projection that may be projected along with other supported volume types type VolumeProjection struct { // information about the secret data to project @@ -8652,7 +10129,7 @@ type VolumeProjection struct { func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } func (m *VolumeProjection) String() string { return proto.CompactTextString(m) } func (*VolumeProjection) ProtoMessage() {} -func (*VolumeProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{163} } +func (*VolumeProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{182} } func (m *VolumeProjection) GetSecret() *SecretProjection { if m != nil { @@ -8682,62 +10159,61 @@ type VolumeSource struct { // machine that is directly exposed to the container. This is generally // used for system agents or other privileged things that are allowed // to see the host machine. Most containers will NOT need this. - // More info: http://kubernetes.io/docs/user-guide/volumes#hostpath + // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath // --- // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not // mount host directories as read/write. // +optional HostPath *HostPathVolumeSource `protobuf:"bytes,1,opt,name=hostPath" json:"hostPath,omitempty"` // EmptyDir represents a temporary directory that shares a pod's lifetime. - // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir // +optional EmptyDir *EmptyDirVolumeSource `protobuf:"bytes,2,opt,name=emptyDir" json:"emptyDir,omitempty"` // GCEPersistentDisk represents a GCE Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk // +optional GcePersistentDisk *GCEPersistentDiskVolumeSource `protobuf:"bytes,3,opt,name=gcePersistentDisk" json:"gcePersistentDisk,omitempty"` // AWSElasticBlockStore represents an AWS Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore // +optional AwsElasticBlockStore *AWSElasticBlockStoreVolumeSource `protobuf:"bytes,4,opt,name=awsElasticBlockStore" json:"awsElasticBlockStore,omitempty"` // GitRepo represents a git repository at a particular revision. // +optional GitRepo *GitRepoVolumeSource `protobuf:"bytes,5,opt,name=gitRepo" json:"gitRepo,omitempty"` // Secret represents a secret that should populate this volume. - // More info: http://kubernetes.io/docs/user-guide/volumes#secrets + // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret // +optional Secret *SecretVolumeSource `protobuf:"bytes,6,opt,name=secret" json:"secret,omitempty"` // NFS represents an NFS mount on the host that shares a pod's lifetime - // More info: http://kubernetes.io/docs/user-guide/volumes#nfs + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs // +optional Nfs *NFSVolumeSource `protobuf:"bytes,7,opt,name=nfs" json:"nfs,omitempty"` // ISCSI represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: http://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md + // More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md // +optional Iscsi *ISCSIVolumeSource `protobuf:"bytes,8,opt,name=iscsi" json:"iscsi,omitempty"` // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md // +optional Glusterfs *GlusterfsVolumeSource `protobuf:"bytes,9,opt,name=glusterfs" json:"glusterfs,omitempty"` // PersistentVolumeClaimVolumeSource represents a reference to a // PersistentVolumeClaim in the same namespace. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims // +optional PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `protobuf:"bytes,10,opt,name=persistentVolumeClaim" json:"persistentVolumeClaim,omitempty"` // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md // +optional Rbd *RBDVolumeSource `protobuf:"bytes,11,opt,name=rbd" json:"rbd,omitempty"` // FlexVolume represents a generic volume resource that is - // provisioned/attached using an exec based plugin. This is an - // alpha feature and may change in future. + // provisioned/attached using an exec based plugin. // +optional FlexVolume *FlexVolumeSource `protobuf:"bytes,12,opt,name=flexVolume" json:"flexVolume,omitempty"` // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md // +optional Cinder *CinderVolumeSource `protobuf:"bytes,13,opt,name=cinder" json:"cinder,omitempty"` // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime @@ -8776,14 +10252,17 @@ type VolumeSource struct { PortworxVolume *PortworxVolumeSource `protobuf:"bytes,24,opt,name=portworxVolume" json:"portworxVolume,omitempty"` // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. // +optional - ScaleIO *ScaleIOVolumeSource `protobuf:"bytes,25,opt,name=scaleIO" json:"scaleIO,omitempty"` - XXX_unrecognized []byte `json:"-"` + ScaleIO *ScaleIOVolumeSource `protobuf:"bytes,25,opt,name=scaleIO" json:"scaleIO,omitempty"` + // StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + // +optional + Storageos *StorageOSVolumeSource `protobuf:"bytes,27,opt,name=storageos" json:"storageos,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *VolumeSource) Reset() { *m = VolumeSource{} } func (m *VolumeSource) String() string { return proto.CompactTextString(m) } func (*VolumeSource) ProtoMessage() {} -func (*VolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{164} } +func (*VolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{183} } func (m *VolumeSource) GetHostPath() *HostPathVolumeSource { if m != nil { @@ -8967,6 +10446,13 @@ func (m *VolumeSource) GetScaleIO() *ScaleIOVolumeSource { return nil } +func (m *VolumeSource) GetStorageos() *StorageOSVolumeSource { + if m != nil { + return m.Storageos + } + return nil +} + // Represents a vSphere volume resource. type VsphereVirtualDiskVolumeSource struct { // Path that identifies vSphere volume vmdk @@ -8975,7 +10461,13 @@ type VsphereVirtualDiskVolumeSource struct { // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // +optional - FsType *string `protobuf:"bytes,2,opt,name=fsType" json:"fsType,omitempty"` + FsType *string `protobuf:"bytes,2,opt,name=fsType" json:"fsType,omitempty"` + // Storage Policy Based Management (SPBM) profile name. + // +optional + StoragePolicyName *string `protobuf:"bytes,3,opt,name=storagePolicyName" json:"storagePolicyName,omitempty"` + // Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. + // +optional + StoragePolicyID *string `protobuf:"bytes,4,opt,name=storagePolicyID" json:"storagePolicyID,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -8983,7 +10475,7 @@ func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDi func (m *VsphereVirtualDiskVolumeSource) String() string { return proto.CompactTextString(m) } func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{165} + return fileDescriptorGenerated, []int{184} } func (m *VsphereVirtualDiskVolumeSource) GetVolumePath() string { @@ -9000,6 +10492,20 @@ func (m *VsphereVirtualDiskVolumeSource) GetFsType() string { return "" } +func (m *VsphereVirtualDiskVolumeSource) GetStoragePolicyName() string { + if m != nil && m.StoragePolicyName != nil { + return *m.StoragePolicyName + } + return "" +} + +func (m *VsphereVirtualDiskVolumeSource) GetStoragePolicyID() string { + if m != nil && m.StoragePolicyID != nil { + return *m.StoragePolicyID + } + return "" +} + // The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) type WeightedPodAffinityTerm struct { // weight associated with matching the corresponding podAffinityTerm, @@ -9014,7 +10520,7 @@ func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm func (m *WeightedPodAffinityTerm) String() string { return proto.CompactTextString(m) } func (*WeightedPodAffinityTerm) ProtoMessage() {} func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{166} + return fileDescriptorGenerated, []int{185} } func (m *WeightedPodAffinityTerm) GetWeight() int32 { @@ -9032,173 +10538,192 @@ func (m *WeightedPodAffinityTerm) GetPodAffinityTerm() *PodAffinityTerm { } func init() { - proto.RegisterType((*AWSElasticBlockStoreVolumeSource)(nil), "github.com/ericchiang.k8s.api.v1.AWSElasticBlockStoreVolumeSource") - proto.RegisterType((*Affinity)(nil), "github.com/ericchiang.k8s.api.v1.Affinity") - proto.RegisterType((*AttachedVolume)(nil), "github.com/ericchiang.k8s.api.v1.AttachedVolume") - proto.RegisterType((*AvoidPods)(nil), "github.com/ericchiang.k8s.api.v1.AvoidPods") - proto.RegisterType((*AzureDiskVolumeSource)(nil), "github.com/ericchiang.k8s.api.v1.AzureDiskVolumeSource") - proto.RegisterType((*AzureFileVolumeSource)(nil), "github.com/ericchiang.k8s.api.v1.AzureFileVolumeSource") - proto.RegisterType((*Binding)(nil), "github.com/ericchiang.k8s.api.v1.Binding") - proto.RegisterType((*Capabilities)(nil), "github.com/ericchiang.k8s.api.v1.Capabilities") - proto.RegisterType((*CephFSVolumeSource)(nil), "github.com/ericchiang.k8s.api.v1.CephFSVolumeSource") - proto.RegisterType((*CinderVolumeSource)(nil), "github.com/ericchiang.k8s.api.v1.CinderVolumeSource") - proto.RegisterType((*ComponentCondition)(nil), "github.com/ericchiang.k8s.api.v1.ComponentCondition") - proto.RegisterType((*ComponentStatus)(nil), "github.com/ericchiang.k8s.api.v1.ComponentStatus") - proto.RegisterType((*ComponentStatusList)(nil), "github.com/ericchiang.k8s.api.v1.ComponentStatusList") - proto.RegisterType((*ConfigMap)(nil), "github.com/ericchiang.k8s.api.v1.ConfigMap") - proto.RegisterType((*ConfigMapEnvSource)(nil), "github.com/ericchiang.k8s.api.v1.ConfigMapEnvSource") - proto.RegisterType((*ConfigMapKeySelector)(nil), "github.com/ericchiang.k8s.api.v1.ConfigMapKeySelector") - proto.RegisterType((*ConfigMapList)(nil), "github.com/ericchiang.k8s.api.v1.ConfigMapList") - proto.RegisterType((*ConfigMapProjection)(nil), "github.com/ericchiang.k8s.api.v1.ConfigMapProjection") - proto.RegisterType((*ConfigMapVolumeSource)(nil), "github.com/ericchiang.k8s.api.v1.ConfigMapVolumeSource") - proto.RegisterType((*Container)(nil), "github.com/ericchiang.k8s.api.v1.Container") - proto.RegisterType((*ContainerImage)(nil), "github.com/ericchiang.k8s.api.v1.ContainerImage") - proto.RegisterType((*ContainerPort)(nil), "github.com/ericchiang.k8s.api.v1.ContainerPort") - proto.RegisterType((*ContainerState)(nil), "github.com/ericchiang.k8s.api.v1.ContainerState") - proto.RegisterType((*ContainerStateRunning)(nil), "github.com/ericchiang.k8s.api.v1.ContainerStateRunning") - proto.RegisterType((*ContainerStateTerminated)(nil), "github.com/ericchiang.k8s.api.v1.ContainerStateTerminated") - proto.RegisterType((*ContainerStateWaiting)(nil), "github.com/ericchiang.k8s.api.v1.ContainerStateWaiting") - proto.RegisterType((*ContainerStatus)(nil), "github.com/ericchiang.k8s.api.v1.ContainerStatus") - proto.RegisterType((*DaemonEndpoint)(nil), "github.com/ericchiang.k8s.api.v1.DaemonEndpoint") - proto.RegisterType((*DeleteOptions)(nil), "github.com/ericchiang.k8s.api.v1.DeleteOptions") - proto.RegisterType((*DownwardAPIProjection)(nil), "github.com/ericchiang.k8s.api.v1.DownwardAPIProjection") - proto.RegisterType((*DownwardAPIVolumeFile)(nil), "github.com/ericchiang.k8s.api.v1.DownwardAPIVolumeFile") - proto.RegisterType((*DownwardAPIVolumeSource)(nil), "github.com/ericchiang.k8s.api.v1.DownwardAPIVolumeSource") - proto.RegisterType((*EmptyDirVolumeSource)(nil), "github.com/ericchiang.k8s.api.v1.EmptyDirVolumeSource") - proto.RegisterType((*EndpointAddress)(nil), "github.com/ericchiang.k8s.api.v1.EndpointAddress") - proto.RegisterType((*EndpointPort)(nil), "github.com/ericchiang.k8s.api.v1.EndpointPort") - proto.RegisterType((*EndpointSubset)(nil), "github.com/ericchiang.k8s.api.v1.EndpointSubset") - proto.RegisterType((*Endpoints)(nil), "github.com/ericchiang.k8s.api.v1.Endpoints") - proto.RegisterType((*EndpointsList)(nil), "github.com/ericchiang.k8s.api.v1.EndpointsList") - proto.RegisterType((*EnvFromSource)(nil), "github.com/ericchiang.k8s.api.v1.EnvFromSource") - proto.RegisterType((*EnvVar)(nil), "github.com/ericchiang.k8s.api.v1.EnvVar") - proto.RegisterType((*EnvVarSource)(nil), "github.com/ericchiang.k8s.api.v1.EnvVarSource") - proto.RegisterType((*Event)(nil), "github.com/ericchiang.k8s.api.v1.Event") - proto.RegisterType((*EventList)(nil), "github.com/ericchiang.k8s.api.v1.EventList") - proto.RegisterType((*EventSource)(nil), "github.com/ericchiang.k8s.api.v1.EventSource") - proto.RegisterType((*ExecAction)(nil), "github.com/ericchiang.k8s.api.v1.ExecAction") - proto.RegisterType((*FCVolumeSource)(nil), "github.com/ericchiang.k8s.api.v1.FCVolumeSource") - proto.RegisterType((*FlexVolumeSource)(nil), "github.com/ericchiang.k8s.api.v1.FlexVolumeSource") - proto.RegisterType((*FlockerVolumeSource)(nil), "github.com/ericchiang.k8s.api.v1.FlockerVolumeSource") - proto.RegisterType((*GCEPersistentDiskVolumeSource)(nil), "github.com/ericchiang.k8s.api.v1.GCEPersistentDiskVolumeSource") - proto.RegisterType((*GitRepoVolumeSource)(nil), "github.com/ericchiang.k8s.api.v1.GitRepoVolumeSource") - proto.RegisterType((*GlusterfsVolumeSource)(nil), "github.com/ericchiang.k8s.api.v1.GlusterfsVolumeSource") - proto.RegisterType((*HTTPGetAction)(nil), "github.com/ericchiang.k8s.api.v1.HTTPGetAction") - proto.RegisterType((*HTTPHeader)(nil), "github.com/ericchiang.k8s.api.v1.HTTPHeader") - proto.RegisterType((*Handler)(nil), "github.com/ericchiang.k8s.api.v1.Handler") - proto.RegisterType((*HostPathVolumeSource)(nil), "github.com/ericchiang.k8s.api.v1.HostPathVolumeSource") - proto.RegisterType((*ISCSIVolumeSource)(nil), "github.com/ericchiang.k8s.api.v1.ISCSIVolumeSource") - proto.RegisterType((*KeyToPath)(nil), "github.com/ericchiang.k8s.api.v1.KeyToPath") - proto.RegisterType((*Lifecycle)(nil), "github.com/ericchiang.k8s.api.v1.Lifecycle") - proto.RegisterType((*LimitRange)(nil), "github.com/ericchiang.k8s.api.v1.LimitRange") - proto.RegisterType((*LimitRangeItem)(nil), "github.com/ericchiang.k8s.api.v1.LimitRangeItem") - proto.RegisterType((*LimitRangeList)(nil), "github.com/ericchiang.k8s.api.v1.LimitRangeList") - proto.RegisterType((*LimitRangeSpec)(nil), "github.com/ericchiang.k8s.api.v1.LimitRangeSpec") - proto.RegisterType((*List)(nil), "github.com/ericchiang.k8s.api.v1.List") - proto.RegisterType((*ListOptions)(nil), "github.com/ericchiang.k8s.api.v1.ListOptions") - proto.RegisterType((*LoadBalancerIngress)(nil), "github.com/ericchiang.k8s.api.v1.LoadBalancerIngress") - proto.RegisterType((*LoadBalancerStatus)(nil), "github.com/ericchiang.k8s.api.v1.LoadBalancerStatus") - proto.RegisterType((*LocalObjectReference)(nil), "github.com/ericchiang.k8s.api.v1.LocalObjectReference") - proto.RegisterType((*NFSVolumeSource)(nil), "github.com/ericchiang.k8s.api.v1.NFSVolumeSource") - proto.RegisterType((*Namespace)(nil), "github.com/ericchiang.k8s.api.v1.Namespace") - proto.RegisterType((*NamespaceList)(nil), "github.com/ericchiang.k8s.api.v1.NamespaceList") - proto.RegisterType((*NamespaceSpec)(nil), "github.com/ericchiang.k8s.api.v1.NamespaceSpec") - proto.RegisterType((*NamespaceStatus)(nil), "github.com/ericchiang.k8s.api.v1.NamespaceStatus") - proto.RegisterType((*Node)(nil), "github.com/ericchiang.k8s.api.v1.Node") - proto.RegisterType((*NodeAddress)(nil), "github.com/ericchiang.k8s.api.v1.NodeAddress") - proto.RegisterType((*NodeAffinity)(nil), "github.com/ericchiang.k8s.api.v1.NodeAffinity") - proto.RegisterType((*NodeCondition)(nil), "github.com/ericchiang.k8s.api.v1.NodeCondition") - proto.RegisterType((*NodeDaemonEndpoints)(nil), "github.com/ericchiang.k8s.api.v1.NodeDaemonEndpoints") - proto.RegisterType((*NodeList)(nil), "github.com/ericchiang.k8s.api.v1.NodeList") - proto.RegisterType((*NodeProxyOptions)(nil), "github.com/ericchiang.k8s.api.v1.NodeProxyOptions") - proto.RegisterType((*NodeResources)(nil), "github.com/ericchiang.k8s.api.v1.NodeResources") - proto.RegisterType((*NodeSelector)(nil), "github.com/ericchiang.k8s.api.v1.NodeSelector") - proto.RegisterType((*NodeSelectorRequirement)(nil), "github.com/ericchiang.k8s.api.v1.NodeSelectorRequirement") - proto.RegisterType((*NodeSelectorTerm)(nil), "github.com/ericchiang.k8s.api.v1.NodeSelectorTerm") - proto.RegisterType((*NodeSpec)(nil), "github.com/ericchiang.k8s.api.v1.NodeSpec") - proto.RegisterType((*NodeStatus)(nil), "github.com/ericchiang.k8s.api.v1.NodeStatus") - proto.RegisterType((*NodeSystemInfo)(nil), "github.com/ericchiang.k8s.api.v1.NodeSystemInfo") - proto.RegisterType((*ObjectFieldSelector)(nil), "github.com/ericchiang.k8s.api.v1.ObjectFieldSelector") - proto.RegisterType((*ObjectMeta)(nil), "github.com/ericchiang.k8s.api.v1.ObjectMeta") - proto.RegisterType((*ObjectReference)(nil), "github.com/ericchiang.k8s.api.v1.ObjectReference") - proto.RegisterType((*PersistentVolume)(nil), "github.com/ericchiang.k8s.api.v1.PersistentVolume") - proto.RegisterType((*PersistentVolumeClaim)(nil), "github.com/ericchiang.k8s.api.v1.PersistentVolumeClaim") - proto.RegisterType((*PersistentVolumeClaimList)(nil), "github.com/ericchiang.k8s.api.v1.PersistentVolumeClaimList") - proto.RegisterType((*PersistentVolumeClaimSpec)(nil), "github.com/ericchiang.k8s.api.v1.PersistentVolumeClaimSpec") - proto.RegisterType((*PersistentVolumeClaimStatus)(nil), "github.com/ericchiang.k8s.api.v1.PersistentVolumeClaimStatus") - proto.RegisterType((*PersistentVolumeClaimVolumeSource)(nil), "github.com/ericchiang.k8s.api.v1.PersistentVolumeClaimVolumeSource") - proto.RegisterType((*PersistentVolumeList)(nil), "github.com/ericchiang.k8s.api.v1.PersistentVolumeList") - proto.RegisterType((*PersistentVolumeSource)(nil), "github.com/ericchiang.k8s.api.v1.PersistentVolumeSource") - proto.RegisterType((*PersistentVolumeSpec)(nil), "github.com/ericchiang.k8s.api.v1.PersistentVolumeSpec") - proto.RegisterType((*PersistentVolumeStatus)(nil), "github.com/ericchiang.k8s.api.v1.PersistentVolumeStatus") - proto.RegisterType((*PhotonPersistentDiskVolumeSource)(nil), "github.com/ericchiang.k8s.api.v1.PhotonPersistentDiskVolumeSource") - proto.RegisterType((*Pod)(nil), "github.com/ericchiang.k8s.api.v1.Pod") - proto.RegisterType((*PodAffinity)(nil), "github.com/ericchiang.k8s.api.v1.PodAffinity") - proto.RegisterType((*PodAffinityTerm)(nil), "github.com/ericchiang.k8s.api.v1.PodAffinityTerm") - proto.RegisterType((*PodAntiAffinity)(nil), "github.com/ericchiang.k8s.api.v1.PodAntiAffinity") - proto.RegisterType((*PodAttachOptions)(nil), "github.com/ericchiang.k8s.api.v1.PodAttachOptions") - proto.RegisterType((*PodCondition)(nil), "github.com/ericchiang.k8s.api.v1.PodCondition") - proto.RegisterType((*PodExecOptions)(nil), "github.com/ericchiang.k8s.api.v1.PodExecOptions") - proto.RegisterType((*PodList)(nil), "github.com/ericchiang.k8s.api.v1.PodList") - proto.RegisterType((*PodLogOptions)(nil), "github.com/ericchiang.k8s.api.v1.PodLogOptions") - proto.RegisterType((*PodPortForwardOptions)(nil), "github.com/ericchiang.k8s.api.v1.PodPortForwardOptions") - proto.RegisterType((*PodProxyOptions)(nil), "github.com/ericchiang.k8s.api.v1.PodProxyOptions") - proto.RegisterType((*PodSecurityContext)(nil), "github.com/ericchiang.k8s.api.v1.PodSecurityContext") - proto.RegisterType((*PodSignature)(nil), "github.com/ericchiang.k8s.api.v1.PodSignature") - proto.RegisterType((*PodSpec)(nil), "github.com/ericchiang.k8s.api.v1.PodSpec") - proto.RegisterType((*PodStatus)(nil), "github.com/ericchiang.k8s.api.v1.PodStatus") - proto.RegisterType((*PodStatusResult)(nil), "github.com/ericchiang.k8s.api.v1.PodStatusResult") - proto.RegisterType((*PodTemplate)(nil), "github.com/ericchiang.k8s.api.v1.PodTemplate") - proto.RegisterType((*PodTemplateList)(nil), "github.com/ericchiang.k8s.api.v1.PodTemplateList") - proto.RegisterType((*PodTemplateSpec)(nil), "github.com/ericchiang.k8s.api.v1.PodTemplateSpec") - proto.RegisterType((*PortworxVolumeSource)(nil), "github.com/ericchiang.k8s.api.v1.PortworxVolumeSource") - proto.RegisterType((*Preconditions)(nil), "github.com/ericchiang.k8s.api.v1.Preconditions") - proto.RegisterType((*PreferAvoidPodsEntry)(nil), "github.com/ericchiang.k8s.api.v1.PreferAvoidPodsEntry") - proto.RegisterType((*PreferredSchedulingTerm)(nil), "github.com/ericchiang.k8s.api.v1.PreferredSchedulingTerm") - proto.RegisterType((*Probe)(nil), "github.com/ericchiang.k8s.api.v1.Probe") - proto.RegisterType((*ProjectedVolumeSource)(nil), "github.com/ericchiang.k8s.api.v1.ProjectedVolumeSource") - proto.RegisterType((*QuobyteVolumeSource)(nil), "github.com/ericchiang.k8s.api.v1.QuobyteVolumeSource") - proto.RegisterType((*RBDVolumeSource)(nil), "github.com/ericchiang.k8s.api.v1.RBDVolumeSource") - proto.RegisterType((*RangeAllocation)(nil), "github.com/ericchiang.k8s.api.v1.RangeAllocation") - proto.RegisterType((*ReplicationController)(nil), "github.com/ericchiang.k8s.api.v1.ReplicationController") - proto.RegisterType((*ReplicationControllerCondition)(nil), "github.com/ericchiang.k8s.api.v1.ReplicationControllerCondition") - proto.RegisterType((*ReplicationControllerList)(nil), "github.com/ericchiang.k8s.api.v1.ReplicationControllerList") - proto.RegisterType((*ReplicationControllerSpec)(nil), "github.com/ericchiang.k8s.api.v1.ReplicationControllerSpec") - proto.RegisterType((*ReplicationControllerStatus)(nil), "github.com/ericchiang.k8s.api.v1.ReplicationControllerStatus") - proto.RegisterType((*ResourceFieldSelector)(nil), "github.com/ericchiang.k8s.api.v1.ResourceFieldSelector") - proto.RegisterType((*ResourceQuota)(nil), "github.com/ericchiang.k8s.api.v1.ResourceQuota") - proto.RegisterType((*ResourceQuotaList)(nil), "github.com/ericchiang.k8s.api.v1.ResourceQuotaList") - proto.RegisterType((*ResourceQuotaSpec)(nil), "github.com/ericchiang.k8s.api.v1.ResourceQuotaSpec") - proto.RegisterType((*ResourceQuotaStatus)(nil), "github.com/ericchiang.k8s.api.v1.ResourceQuotaStatus") - proto.RegisterType((*ResourceRequirements)(nil), "github.com/ericchiang.k8s.api.v1.ResourceRequirements") - proto.RegisterType((*SELinuxOptions)(nil), "github.com/ericchiang.k8s.api.v1.SELinuxOptions") - proto.RegisterType((*ScaleIOVolumeSource)(nil), "github.com/ericchiang.k8s.api.v1.ScaleIOVolumeSource") - proto.RegisterType((*Secret)(nil), "github.com/ericchiang.k8s.api.v1.Secret") - proto.RegisterType((*SecretEnvSource)(nil), "github.com/ericchiang.k8s.api.v1.SecretEnvSource") - proto.RegisterType((*SecretKeySelector)(nil), "github.com/ericchiang.k8s.api.v1.SecretKeySelector") - proto.RegisterType((*SecretList)(nil), "github.com/ericchiang.k8s.api.v1.SecretList") - proto.RegisterType((*SecretProjection)(nil), "github.com/ericchiang.k8s.api.v1.SecretProjection") - proto.RegisterType((*SecretVolumeSource)(nil), "github.com/ericchiang.k8s.api.v1.SecretVolumeSource") - proto.RegisterType((*SecurityContext)(nil), "github.com/ericchiang.k8s.api.v1.SecurityContext") - proto.RegisterType((*SerializedReference)(nil), "github.com/ericchiang.k8s.api.v1.SerializedReference") - proto.RegisterType((*Service)(nil), "github.com/ericchiang.k8s.api.v1.Service") - proto.RegisterType((*ServiceAccount)(nil), "github.com/ericchiang.k8s.api.v1.ServiceAccount") - proto.RegisterType((*ServiceAccountList)(nil), "github.com/ericchiang.k8s.api.v1.ServiceAccountList") - proto.RegisterType((*ServiceList)(nil), "github.com/ericchiang.k8s.api.v1.ServiceList") - proto.RegisterType((*ServicePort)(nil), "github.com/ericchiang.k8s.api.v1.ServicePort") - proto.RegisterType((*ServiceProxyOptions)(nil), "github.com/ericchiang.k8s.api.v1.ServiceProxyOptions") - proto.RegisterType((*ServiceSpec)(nil), "github.com/ericchiang.k8s.api.v1.ServiceSpec") - proto.RegisterType((*ServiceStatus)(nil), "github.com/ericchiang.k8s.api.v1.ServiceStatus") - proto.RegisterType((*Sysctl)(nil), "github.com/ericchiang.k8s.api.v1.Sysctl") - proto.RegisterType((*TCPSocketAction)(nil), "github.com/ericchiang.k8s.api.v1.TCPSocketAction") - proto.RegisterType((*Taint)(nil), "github.com/ericchiang.k8s.api.v1.Taint") - proto.RegisterType((*Toleration)(nil), "github.com/ericchiang.k8s.api.v1.Toleration") - proto.RegisterType((*Volume)(nil), "github.com/ericchiang.k8s.api.v1.Volume") - proto.RegisterType((*VolumeMount)(nil), "github.com/ericchiang.k8s.api.v1.VolumeMount") - proto.RegisterType((*VolumeProjection)(nil), "github.com/ericchiang.k8s.api.v1.VolumeProjection") - proto.RegisterType((*VolumeSource)(nil), "github.com/ericchiang.k8s.api.v1.VolumeSource") - proto.RegisterType((*VsphereVirtualDiskVolumeSource)(nil), "github.com/ericchiang.k8s.api.v1.VsphereVirtualDiskVolumeSource") - proto.RegisterType((*WeightedPodAffinityTerm)(nil), "github.com/ericchiang.k8s.api.v1.WeightedPodAffinityTerm") + proto.RegisterType((*AWSElasticBlockStoreVolumeSource)(nil), "k8s.io.api.core.v1.AWSElasticBlockStoreVolumeSource") + proto.RegisterType((*Affinity)(nil), "k8s.io.api.core.v1.Affinity") + proto.RegisterType((*AttachedVolume)(nil), "k8s.io.api.core.v1.AttachedVolume") + proto.RegisterType((*AvoidPods)(nil), "k8s.io.api.core.v1.AvoidPods") + proto.RegisterType((*AzureDiskVolumeSource)(nil), "k8s.io.api.core.v1.AzureDiskVolumeSource") + proto.RegisterType((*AzureFilePersistentVolumeSource)(nil), "k8s.io.api.core.v1.AzureFilePersistentVolumeSource") + proto.RegisterType((*AzureFileVolumeSource)(nil), "k8s.io.api.core.v1.AzureFileVolumeSource") + proto.RegisterType((*Binding)(nil), "k8s.io.api.core.v1.Binding") + proto.RegisterType((*CSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CSIPersistentVolumeSource") + proto.RegisterType((*Capabilities)(nil), "k8s.io.api.core.v1.Capabilities") + proto.RegisterType((*CephFSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CephFSPersistentVolumeSource") + proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.api.core.v1.CephFSVolumeSource") + proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.api.core.v1.CinderVolumeSource") + proto.RegisterType((*ClientIPConfig)(nil), "k8s.io.api.core.v1.ClientIPConfig") + proto.RegisterType((*ComponentCondition)(nil), "k8s.io.api.core.v1.ComponentCondition") + proto.RegisterType((*ComponentStatus)(nil), "k8s.io.api.core.v1.ComponentStatus") + proto.RegisterType((*ComponentStatusList)(nil), "k8s.io.api.core.v1.ComponentStatusList") + proto.RegisterType((*ConfigMap)(nil), "k8s.io.api.core.v1.ConfigMap") + proto.RegisterType((*ConfigMapEnvSource)(nil), "k8s.io.api.core.v1.ConfigMapEnvSource") + proto.RegisterType((*ConfigMapKeySelector)(nil), "k8s.io.api.core.v1.ConfigMapKeySelector") + proto.RegisterType((*ConfigMapList)(nil), "k8s.io.api.core.v1.ConfigMapList") + proto.RegisterType((*ConfigMapProjection)(nil), "k8s.io.api.core.v1.ConfigMapProjection") + proto.RegisterType((*ConfigMapVolumeSource)(nil), "k8s.io.api.core.v1.ConfigMapVolumeSource") + proto.RegisterType((*Container)(nil), "k8s.io.api.core.v1.Container") + proto.RegisterType((*ContainerImage)(nil), "k8s.io.api.core.v1.ContainerImage") + proto.RegisterType((*ContainerPort)(nil), "k8s.io.api.core.v1.ContainerPort") + proto.RegisterType((*ContainerState)(nil), "k8s.io.api.core.v1.ContainerState") + proto.RegisterType((*ContainerStateRunning)(nil), "k8s.io.api.core.v1.ContainerStateRunning") + proto.RegisterType((*ContainerStateTerminated)(nil), "k8s.io.api.core.v1.ContainerStateTerminated") + proto.RegisterType((*ContainerStateWaiting)(nil), "k8s.io.api.core.v1.ContainerStateWaiting") + proto.RegisterType((*ContainerStatus)(nil), "k8s.io.api.core.v1.ContainerStatus") + proto.RegisterType((*DaemonEndpoint)(nil), "k8s.io.api.core.v1.DaemonEndpoint") + proto.RegisterType((*DeleteOptions)(nil), "k8s.io.api.core.v1.DeleteOptions") + proto.RegisterType((*DownwardAPIProjection)(nil), "k8s.io.api.core.v1.DownwardAPIProjection") + proto.RegisterType((*DownwardAPIVolumeFile)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeFile") + proto.RegisterType((*DownwardAPIVolumeSource)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeSource") + proto.RegisterType((*EmptyDirVolumeSource)(nil), "k8s.io.api.core.v1.EmptyDirVolumeSource") + proto.RegisterType((*EndpointAddress)(nil), "k8s.io.api.core.v1.EndpointAddress") + proto.RegisterType((*EndpointPort)(nil), "k8s.io.api.core.v1.EndpointPort") + proto.RegisterType((*EndpointSubset)(nil), "k8s.io.api.core.v1.EndpointSubset") + proto.RegisterType((*Endpoints)(nil), "k8s.io.api.core.v1.Endpoints") + proto.RegisterType((*EndpointsList)(nil), "k8s.io.api.core.v1.EndpointsList") + proto.RegisterType((*EnvFromSource)(nil), "k8s.io.api.core.v1.EnvFromSource") + proto.RegisterType((*EnvVar)(nil), "k8s.io.api.core.v1.EnvVar") + proto.RegisterType((*EnvVarSource)(nil), "k8s.io.api.core.v1.EnvVarSource") + proto.RegisterType((*Event)(nil), "k8s.io.api.core.v1.Event") + proto.RegisterType((*EventList)(nil), "k8s.io.api.core.v1.EventList") + proto.RegisterType((*EventSeries)(nil), "k8s.io.api.core.v1.EventSeries") + proto.RegisterType((*EventSource)(nil), "k8s.io.api.core.v1.EventSource") + proto.RegisterType((*ExecAction)(nil), "k8s.io.api.core.v1.ExecAction") + proto.RegisterType((*FCVolumeSource)(nil), "k8s.io.api.core.v1.FCVolumeSource") + proto.RegisterType((*FlexVolumeSource)(nil), "k8s.io.api.core.v1.FlexVolumeSource") + proto.RegisterType((*FlockerVolumeSource)(nil), "k8s.io.api.core.v1.FlockerVolumeSource") + proto.RegisterType((*GCEPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.GCEPersistentDiskVolumeSource") + proto.RegisterType((*GitRepoVolumeSource)(nil), "k8s.io.api.core.v1.GitRepoVolumeSource") + proto.RegisterType((*GlusterfsVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsVolumeSource") + proto.RegisterType((*HTTPGetAction)(nil), "k8s.io.api.core.v1.HTTPGetAction") + proto.RegisterType((*HTTPHeader)(nil), "k8s.io.api.core.v1.HTTPHeader") + proto.RegisterType((*Handler)(nil), "k8s.io.api.core.v1.Handler") + proto.RegisterType((*HostAlias)(nil), "k8s.io.api.core.v1.HostAlias") + proto.RegisterType((*HostPathVolumeSource)(nil), "k8s.io.api.core.v1.HostPathVolumeSource") + proto.RegisterType((*ISCSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIPersistentVolumeSource") + proto.RegisterType((*ISCSIVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIVolumeSource") + proto.RegisterType((*KeyToPath)(nil), "k8s.io.api.core.v1.KeyToPath") + proto.RegisterType((*Lifecycle)(nil), "k8s.io.api.core.v1.Lifecycle") + proto.RegisterType((*LimitRange)(nil), "k8s.io.api.core.v1.LimitRange") + proto.RegisterType((*LimitRangeItem)(nil), "k8s.io.api.core.v1.LimitRangeItem") + proto.RegisterType((*LimitRangeList)(nil), "k8s.io.api.core.v1.LimitRangeList") + proto.RegisterType((*LimitRangeSpec)(nil), "k8s.io.api.core.v1.LimitRangeSpec") + proto.RegisterType((*List)(nil), "k8s.io.api.core.v1.List") + proto.RegisterType((*ListOptions)(nil), "k8s.io.api.core.v1.ListOptions") + proto.RegisterType((*LoadBalancerIngress)(nil), "k8s.io.api.core.v1.LoadBalancerIngress") + proto.RegisterType((*LoadBalancerStatus)(nil), "k8s.io.api.core.v1.LoadBalancerStatus") + proto.RegisterType((*LocalObjectReference)(nil), "k8s.io.api.core.v1.LocalObjectReference") + proto.RegisterType((*LocalVolumeSource)(nil), "k8s.io.api.core.v1.LocalVolumeSource") + proto.RegisterType((*NFSVolumeSource)(nil), "k8s.io.api.core.v1.NFSVolumeSource") + proto.RegisterType((*Namespace)(nil), "k8s.io.api.core.v1.Namespace") + proto.RegisterType((*NamespaceList)(nil), "k8s.io.api.core.v1.NamespaceList") + proto.RegisterType((*NamespaceSpec)(nil), "k8s.io.api.core.v1.NamespaceSpec") + proto.RegisterType((*NamespaceStatus)(nil), "k8s.io.api.core.v1.NamespaceStatus") + proto.RegisterType((*Node)(nil), "k8s.io.api.core.v1.Node") + proto.RegisterType((*NodeAddress)(nil), "k8s.io.api.core.v1.NodeAddress") + proto.RegisterType((*NodeAffinity)(nil), "k8s.io.api.core.v1.NodeAffinity") + proto.RegisterType((*NodeCondition)(nil), "k8s.io.api.core.v1.NodeCondition") + proto.RegisterType((*NodeConfigSource)(nil), "k8s.io.api.core.v1.NodeConfigSource") + proto.RegisterType((*NodeDaemonEndpoints)(nil), "k8s.io.api.core.v1.NodeDaemonEndpoints") + proto.RegisterType((*NodeList)(nil), "k8s.io.api.core.v1.NodeList") + proto.RegisterType((*NodeProxyOptions)(nil), "k8s.io.api.core.v1.NodeProxyOptions") + proto.RegisterType((*NodeResources)(nil), "k8s.io.api.core.v1.NodeResources") + proto.RegisterType((*NodeSelector)(nil), "k8s.io.api.core.v1.NodeSelector") + proto.RegisterType((*NodeSelectorRequirement)(nil), "k8s.io.api.core.v1.NodeSelectorRequirement") + proto.RegisterType((*NodeSelectorTerm)(nil), "k8s.io.api.core.v1.NodeSelectorTerm") + proto.RegisterType((*NodeSpec)(nil), "k8s.io.api.core.v1.NodeSpec") + proto.RegisterType((*NodeStatus)(nil), "k8s.io.api.core.v1.NodeStatus") + proto.RegisterType((*NodeSystemInfo)(nil), "k8s.io.api.core.v1.NodeSystemInfo") + proto.RegisterType((*ObjectFieldSelector)(nil), "k8s.io.api.core.v1.ObjectFieldSelector") + proto.RegisterType((*ObjectMeta)(nil), "k8s.io.api.core.v1.ObjectMeta") + proto.RegisterType((*ObjectReference)(nil), "k8s.io.api.core.v1.ObjectReference") + proto.RegisterType((*PersistentVolume)(nil), "k8s.io.api.core.v1.PersistentVolume") + proto.RegisterType((*PersistentVolumeClaim)(nil), "k8s.io.api.core.v1.PersistentVolumeClaim") + proto.RegisterType((*PersistentVolumeClaimCondition)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimCondition") + proto.RegisterType((*PersistentVolumeClaimList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimList") + proto.RegisterType((*PersistentVolumeClaimSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimSpec") + proto.RegisterType((*PersistentVolumeClaimStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus") + proto.RegisterType((*PersistentVolumeClaimVolumeSource)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimVolumeSource") + proto.RegisterType((*PersistentVolumeList)(nil), "k8s.io.api.core.v1.PersistentVolumeList") + proto.RegisterType((*PersistentVolumeSource)(nil), "k8s.io.api.core.v1.PersistentVolumeSource") + proto.RegisterType((*PersistentVolumeSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeSpec") + proto.RegisterType((*PersistentVolumeStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeStatus") + proto.RegisterType((*PhotonPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.PhotonPersistentDiskVolumeSource") + proto.RegisterType((*Pod)(nil), "k8s.io.api.core.v1.Pod") + proto.RegisterType((*PodAffinity)(nil), "k8s.io.api.core.v1.PodAffinity") + proto.RegisterType((*PodAffinityTerm)(nil), "k8s.io.api.core.v1.PodAffinityTerm") + proto.RegisterType((*PodAntiAffinity)(nil), "k8s.io.api.core.v1.PodAntiAffinity") + proto.RegisterType((*PodAttachOptions)(nil), "k8s.io.api.core.v1.PodAttachOptions") + proto.RegisterType((*PodCondition)(nil), "k8s.io.api.core.v1.PodCondition") + proto.RegisterType((*PodDNSConfig)(nil), "k8s.io.api.core.v1.PodDNSConfig") + proto.RegisterType((*PodDNSConfigOption)(nil), "k8s.io.api.core.v1.PodDNSConfigOption") + proto.RegisterType((*PodExecOptions)(nil), "k8s.io.api.core.v1.PodExecOptions") + proto.RegisterType((*PodList)(nil), "k8s.io.api.core.v1.PodList") + proto.RegisterType((*PodLogOptions)(nil), "k8s.io.api.core.v1.PodLogOptions") + proto.RegisterType((*PodPortForwardOptions)(nil), "k8s.io.api.core.v1.PodPortForwardOptions") + proto.RegisterType((*PodProxyOptions)(nil), "k8s.io.api.core.v1.PodProxyOptions") + proto.RegisterType((*PodSecurityContext)(nil), "k8s.io.api.core.v1.PodSecurityContext") + proto.RegisterType((*PodSignature)(nil), "k8s.io.api.core.v1.PodSignature") + proto.RegisterType((*PodSpec)(nil), "k8s.io.api.core.v1.PodSpec") + proto.RegisterType((*PodStatus)(nil), "k8s.io.api.core.v1.PodStatus") + proto.RegisterType((*PodStatusResult)(nil), "k8s.io.api.core.v1.PodStatusResult") + proto.RegisterType((*PodTemplate)(nil), "k8s.io.api.core.v1.PodTemplate") + proto.RegisterType((*PodTemplateList)(nil), "k8s.io.api.core.v1.PodTemplateList") + proto.RegisterType((*PodTemplateSpec)(nil), "k8s.io.api.core.v1.PodTemplateSpec") + proto.RegisterType((*PortworxVolumeSource)(nil), "k8s.io.api.core.v1.PortworxVolumeSource") + proto.RegisterType((*Preconditions)(nil), "k8s.io.api.core.v1.Preconditions") + proto.RegisterType((*PreferAvoidPodsEntry)(nil), "k8s.io.api.core.v1.PreferAvoidPodsEntry") + proto.RegisterType((*PreferredSchedulingTerm)(nil), "k8s.io.api.core.v1.PreferredSchedulingTerm") + proto.RegisterType((*Probe)(nil), "k8s.io.api.core.v1.Probe") + proto.RegisterType((*ProjectedVolumeSource)(nil), "k8s.io.api.core.v1.ProjectedVolumeSource") + proto.RegisterType((*QuobyteVolumeSource)(nil), "k8s.io.api.core.v1.QuobyteVolumeSource") + proto.RegisterType((*RBDPersistentVolumeSource)(nil), "k8s.io.api.core.v1.RBDPersistentVolumeSource") + proto.RegisterType((*RBDVolumeSource)(nil), "k8s.io.api.core.v1.RBDVolumeSource") + proto.RegisterType((*RangeAllocation)(nil), "k8s.io.api.core.v1.RangeAllocation") + proto.RegisterType((*ReplicationController)(nil), "k8s.io.api.core.v1.ReplicationController") + proto.RegisterType((*ReplicationControllerCondition)(nil), "k8s.io.api.core.v1.ReplicationControllerCondition") + proto.RegisterType((*ReplicationControllerList)(nil), "k8s.io.api.core.v1.ReplicationControllerList") + proto.RegisterType((*ReplicationControllerSpec)(nil), "k8s.io.api.core.v1.ReplicationControllerSpec") + proto.RegisterType((*ReplicationControllerStatus)(nil), "k8s.io.api.core.v1.ReplicationControllerStatus") + proto.RegisterType((*ResourceFieldSelector)(nil), "k8s.io.api.core.v1.ResourceFieldSelector") + proto.RegisterType((*ResourceQuota)(nil), "k8s.io.api.core.v1.ResourceQuota") + proto.RegisterType((*ResourceQuotaList)(nil), "k8s.io.api.core.v1.ResourceQuotaList") + proto.RegisterType((*ResourceQuotaSpec)(nil), "k8s.io.api.core.v1.ResourceQuotaSpec") + proto.RegisterType((*ResourceQuotaStatus)(nil), "k8s.io.api.core.v1.ResourceQuotaStatus") + proto.RegisterType((*ResourceRequirements)(nil), "k8s.io.api.core.v1.ResourceRequirements") + proto.RegisterType((*SELinuxOptions)(nil), "k8s.io.api.core.v1.SELinuxOptions") + proto.RegisterType((*ScaleIOPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOPersistentVolumeSource") + proto.RegisterType((*ScaleIOVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOVolumeSource") + proto.RegisterType((*Secret)(nil), "k8s.io.api.core.v1.Secret") + proto.RegisterType((*SecretEnvSource)(nil), "k8s.io.api.core.v1.SecretEnvSource") + proto.RegisterType((*SecretKeySelector)(nil), "k8s.io.api.core.v1.SecretKeySelector") + proto.RegisterType((*SecretList)(nil), "k8s.io.api.core.v1.SecretList") + proto.RegisterType((*SecretProjection)(nil), "k8s.io.api.core.v1.SecretProjection") + proto.RegisterType((*SecretReference)(nil), "k8s.io.api.core.v1.SecretReference") + proto.RegisterType((*SecretVolumeSource)(nil), "k8s.io.api.core.v1.SecretVolumeSource") + proto.RegisterType((*SecurityContext)(nil), "k8s.io.api.core.v1.SecurityContext") + proto.RegisterType((*SerializedReference)(nil), "k8s.io.api.core.v1.SerializedReference") + proto.RegisterType((*Service)(nil), "k8s.io.api.core.v1.Service") + proto.RegisterType((*ServiceAccount)(nil), "k8s.io.api.core.v1.ServiceAccount") + proto.RegisterType((*ServiceAccountList)(nil), "k8s.io.api.core.v1.ServiceAccountList") + proto.RegisterType((*ServiceList)(nil), "k8s.io.api.core.v1.ServiceList") + proto.RegisterType((*ServicePort)(nil), "k8s.io.api.core.v1.ServicePort") + proto.RegisterType((*ServiceProxyOptions)(nil), "k8s.io.api.core.v1.ServiceProxyOptions") + proto.RegisterType((*ServiceSpec)(nil), "k8s.io.api.core.v1.ServiceSpec") + proto.RegisterType((*ServiceStatus)(nil), "k8s.io.api.core.v1.ServiceStatus") + proto.RegisterType((*SessionAffinityConfig)(nil), "k8s.io.api.core.v1.SessionAffinityConfig") + proto.RegisterType((*StorageOSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSPersistentVolumeSource") + proto.RegisterType((*StorageOSVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSVolumeSource") + proto.RegisterType((*Sysctl)(nil), "k8s.io.api.core.v1.Sysctl") + proto.RegisterType((*TCPSocketAction)(nil), "k8s.io.api.core.v1.TCPSocketAction") + proto.RegisterType((*Taint)(nil), "k8s.io.api.core.v1.Taint") + proto.RegisterType((*Toleration)(nil), "k8s.io.api.core.v1.Toleration") + proto.RegisterType((*Volume)(nil), "k8s.io.api.core.v1.Volume") + proto.RegisterType((*VolumeDevice)(nil), "k8s.io.api.core.v1.VolumeDevice") + proto.RegisterType((*VolumeMount)(nil), "k8s.io.api.core.v1.VolumeMount") + proto.RegisterType((*VolumeProjection)(nil), "k8s.io.api.core.v1.VolumeProjection") + proto.RegisterType((*VolumeSource)(nil), "k8s.io.api.core.v1.VolumeSource") + proto.RegisterType((*VsphereVirtualDiskVolumeSource)(nil), "k8s.io.api.core.v1.VsphereVirtualDiskVolumeSource") + proto.RegisterType((*WeightedPodAffinityTerm)(nil), "k8s.io.api.core.v1.WeightedPodAffinityTerm") } func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() @@ -9414,6 +10939,61 @@ func (m *AzureDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { } i++ } + if m.Kind != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Kind))) + i += copy(dAtA[i:], *m.Kind) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *AzureFilePersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AzureFilePersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.SecretName != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SecretName))) + i += copy(dAtA[i:], *m.SecretName) + } + if m.ShareName != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ShareName))) + i += copy(dAtA[i:], *m.ShareName) + } + if m.ReadOnly != nil { + dAtA[i] = 0x18 + i++ + if *m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.SecretNamespace != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SecretNamespace))) + i += copy(dAtA[i:], *m.SecretNamespace) + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -9504,6 +11084,49 @@ func (m *Binding) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *CSIPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Driver != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Driver))) + i += copy(dAtA[i:], *m.Driver) + } + if m.VolumeHandle != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeHandle))) + i += copy(dAtA[i:], *m.VolumeHandle) + } + if m.ReadOnly != nil { + dAtA[i] = 0x18 + i++ + if *m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + func (m *Capabilities) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -9555,7 +11178,7 @@ func (m *Capabilities) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *CephFSVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *CephFSPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -9565,7 +11188,7 @@ func (m *CephFSVolumeSource) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *CephFSVolumeSource) MarshalTo(dAtA []byte) (int, error) { +func (m *CephFSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int @@ -9629,6 +11252,80 @@ func (m *CephFSVolumeSource) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *CephFSVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CephFSVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Monitors) > 0 { + for _, s := range m.Monitors { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.Path != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) + i += copy(dAtA[i:], *m.Path) + } + if m.User != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.User))) + i += copy(dAtA[i:], *m.User) + } + if m.SecretFile != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SecretFile))) + i += copy(dAtA[i:], *m.SecretFile) + } + if m.SecretRef != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n7, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.ReadOnly != nil { + dAtA[i] = 0x30 + i++ + if *m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + func (m *CinderVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -9672,6 +11369,32 @@ func (m *CinderVolumeSource) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *ClientIPConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientIPConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.TimeoutSeconds != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + func (m *ComponentCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -9736,11 +11459,11 @@ func (m *ComponentStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n7, err := m.Metadata.MarshalTo(dAtA[i:]) + n8, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n8 } if len(m.Conditions) > 0 { for _, msg := range m.Conditions { @@ -9779,11 +11502,11 @@ func (m *ComponentStatusList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n8, err := m.Metadata.MarshalTo(dAtA[i:]) + n9, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n8 + i += n9 } if len(m.Items) > 0 { for _, msg := range m.Items { @@ -9822,11 +11545,11 @@ func (m *ConfigMap) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n9, err := m.Metadata.MarshalTo(dAtA[i:]) + n10, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n10 } if len(m.Data) > 0 { for k, _ := range m.Data { @@ -9870,11 +11593,11 @@ func (m *ConfigMapEnvSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n10, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n11, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n11 } if m.Optional != nil { dAtA[i] = 0x10 @@ -9911,11 +11634,11 @@ func (m *ConfigMapKeySelector) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n11, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n12, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n11 + i += n12 } if m.Key != nil { dAtA[i] = 0x12 @@ -9958,11 +11681,11 @@ func (m *ConfigMapList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n12, err := m.Metadata.MarshalTo(dAtA[i:]) + n13, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n12 + i += n13 } if len(m.Items) > 0 { for _, msg := range m.Items { @@ -10001,11 +11724,11 @@ func (m *ConfigMapProjection) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n13, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n14, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n13 + i += n14 } if len(m.Items) > 0 { for _, msg := range m.Items { @@ -10054,11 +11777,11 @@ func (m *ConfigMapVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n14, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n15, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n14 + i += n15 } if len(m.Items) > 0 { for _, msg := range m.Items { @@ -10184,11 +11907,11 @@ func (m *Container) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x42 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Resources.Size())) - n15, err := m.Resources.MarshalTo(dAtA[i:]) + n16, err := m.Resources.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n15 + i += n16 } if len(m.VolumeMounts) > 0 { for _, msg := range m.VolumeMounts { @@ -10206,31 +11929,31 @@ func (m *Container) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x52 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LivenessProbe.Size())) - n16, err := m.LivenessProbe.MarshalTo(dAtA[i:]) + n17, err := m.LivenessProbe.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n16 + i += n17 } if m.ReadinessProbe != nil { dAtA[i] = 0x5a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ReadinessProbe.Size())) - n17, err := m.ReadinessProbe.MarshalTo(dAtA[i:]) + n18, err := m.ReadinessProbe.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n17 + i += n18 } if m.Lifecycle != nil { dAtA[i] = 0x62 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Lifecycle.Size())) - n18, err := m.Lifecycle.MarshalTo(dAtA[i:]) + n19, err := m.Lifecycle.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n18 + i += n19 } if m.TerminationMessagePath != nil { dAtA[i] = 0x6a @@ -10248,11 +11971,11 @@ func (m *Container) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x7a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecurityContext.Size())) - n19, err := m.SecurityContext.MarshalTo(dAtA[i:]) + n20, err := m.SecurityContext.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n19 + i += n20 } if m.Stdin != nil { dAtA[i] = 0x80 @@ -10312,6 +12035,20 @@ func (m *Container) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(len(*m.TerminationMessagePolicy))) i += copy(dAtA[i:], *m.TerminationMessagePolicy) } + if len(m.VolumeDevices) > 0 { + for _, msg := range m.VolumeDevices { + dAtA[i] = 0xaa + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -10427,31 +12164,31 @@ func (m *ContainerState) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Waiting.Size())) - n20, err := m.Waiting.MarshalTo(dAtA[i:]) + n21, err := m.Waiting.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n20 + i += n21 } if m.Running != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Running.Size())) - n21, err := m.Running.MarshalTo(dAtA[i:]) + n22, err := m.Running.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n21 + i += n22 } if m.Terminated != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Terminated.Size())) - n22, err := m.Terminated.MarshalTo(dAtA[i:]) + n23, err := m.Terminated.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n22 + i += n23 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -10478,11 +12215,11 @@ func (m *ContainerStateRunning) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.StartedAt.Size())) - n23, err := m.StartedAt.MarshalTo(dAtA[i:]) + n24, err := m.StartedAt.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n23 + i += n24 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -10531,21 +12268,21 @@ func (m *ContainerStateTerminated) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.StartedAt.Size())) - n24, err := m.StartedAt.MarshalTo(dAtA[i:]) + n25, err := m.StartedAt.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n24 + i += n25 } if m.FinishedAt != nil { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FinishedAt.Size())) - n25, err := m.FinishedAt.MarshalTo(dAtA[i:]) + n26, err := m.FinishedAt.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n25 + i += n26 } if m.ContainerID != nil { dAtA[i] = 0x3a @@ -10617,21 +12354,21 @@ func (m *ContainerStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.State.Size())) - n26, err := m.State.MarshalTo(dAtA[i:]) + n27, err := m.State.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n26 + i += n27 } if m.LastState != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastState.Size())) - n27, err := m.LastState.MarshalTo(dAtA[i:]) + n28, err := m.LastState.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n27 + i += n28 } if m.Ready != nil { dAtA[i] = 0x20 @@ -10722,11 +12459,11 @@ func (m *DeleteOptions) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Preconditions.Size())) - n28, err := m.Preconditions.MarshalTo(dAtA[i:]) + n29, err := m.Preconditions.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n28 + i += n29 } if m.OrphanDependents != nil { dAtA[i] = 0x18 @@ -10808,21 +12545,21 @@ func (m *DownwardAPIVolumeFile) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FieldRef.Size())) - n29, err := m.FieldRef.MarshalTo(dAtA[i:]) + n30, err := m.FieldRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n29 + i += n30 } if m.ResourceFieldRef != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceFieldRef.Size())) - n30, err := m.ResourceFieldRef.MarshalTo(dAtA[i:]) + n31, err := m.ResourceFieldRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n30 + i += n31 } if m.Mode != nil { dAtA[i] = 0x20 @@ -10894,6 +12631,16 @@ func (m *EmptyDirVolumeSource) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Medium))) i += copy(dAtA[i:], *m.Medium) } + if m.SizeLimit != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SizeLimit.Size())) + n32, err := m.SizeLimit.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -10925,11 +12672,11 @@ func (m *EndpointAddress) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.TargetRef.Size())) - n31, err := m.TargetRef.MarshalTo(dAtA[i:]) + n33, err := m.TargetRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n31 + i += n33 } if m.Hostname != nil { dAtA[i] = 0x1a @@ -11063,11 +12810,11 @@ func (m *Endpoints) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n32, err := m.Metadata.MarshalTo(dAtA[i:]) + n34, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n32 + i += n34 } if len(m.Subsets) > 0 { for _, msg := range m.Subsets { @@ -11106,11 +12853,11 @@ func (m *EndpointsList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n33, err := m.Metadata.MarshalTo(dAtA[i:]) + n35, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n33 + i += n35 } if len(m.Items) > 0 { for _, msg := range m.Items { @@ -11155,21 +12902,21 @@ func (m *EnvFromSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMapRef.Size())) - n34, err := m.ConfigMapRef.MarshalTo(dAtA[i:]) + n36, err := m.ConfigMapRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n34 + i += n36 } if m.SecretRef != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n35, err := m.SecretRef.MarshalTo(dAtA[i:]) + n37, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n35 + i += n37 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -11208,11 +12955,11 @@ func (m *EnvVar) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ValueFrom.Size())) - n36, err := m.ValueFrom.MarshalTo(dAtA[i:]) + n38, err := m.ValueFrom.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n36 + i += n38 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -11239,41 +12986,41 @@ func (m *EnvVarSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FieldRef.Size())) - n37, err := m.FieldRef.MarshalTo(dAtA[i:]) + n39, err := m.FieldRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n37 + i += n39 } if m.ResourceFieldRef != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceFieldRef.Size())) - n38, err := m.ResourceFieldRef.MarshalTo(dAtA[i:]) + n40, err := m.ResourceFieldRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n38 + i += n40 } if m.ConfigMapKeyRef != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMapKeyRef.Size())) - n39, err := m.ConfigMapKeyRef.MarshalTo(dAtA[i:]) + n41, err := m.ConfigMapKeyRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n39 + i += n41 } if m.SecretKeyRef != nil { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretKeyRef.Size())) - n40, err := m.SecretKeyRef.MarshalTo(dAtA[i:]) + n42, err := m.SecretKeyRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n40 + i += n42 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -11300,21 +13047,21 @@ func (m *Event) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n41, err := m.Metadata.MarshalTo(dAtA[i:]) + n43, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n41 + i += n43 } if m.InvolvedObject != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.InvolvedObject.Size())) - n42, err := m.InvolvedObject.MarshalTo(dAtA[i:]) + n44, err := m.InvolvedObject.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n42 + i += n44 } if m.Reason != nil { dAtA[i] = 0x1a @@ -11332,31 +13079,31 @@ func (m *Event) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) - n43, err := m.Source.MarshalTo(dAtA[i:]) + n45, err := m.Source.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n43 + i += n45 } if m.FirstTimestamp != nil { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FirstTimestamp.Size())) - n44, err := m.FirstTimestamp.MarshalTo(dAtA[i:]) + n46, err := m.FirstTimestamp.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n44 + i += n46 } if m.LastTimestamp != nil { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTimestamp.Size())) - n45, err := m.LastTimestamp.MarshalTo(dAtA[i:]) + n47, err := m.LastTimestamp.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n45 + i += n47 } if m.Count != nil { dAtA[i] = 0x40 @@ -11369,6 +13116,54 @@ func (m *Event) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type))) i += copy(dAtA[i:], *m.Type) } + if m.EventTime != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.EventTime.Size())) + n48, err := m.EventTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n48 + } + if m.Series != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Series.Size())) + n49, err := m.Series.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n49 + } + if m.Action != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Action))) + i += copy(dAtA[i:], *m.Action) + } + if m.Related != nil { + dAtA[i] = 0x6a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Related.Size())) + n50, err := m.Related.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n50 + } + if m.ReportingComponent != nil { + dAtA[i] = 0x72 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReportingComponent))) + i += copy(dAtA[i:], *m.ReportingComponent) + } + if m.ReportingInstance != nil { + dAtA[i] = 0x7a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReportingInstance))) + i += copy(dAtA[i:], *m.ReportingInstance) + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -11394,11 +13189,11 @@ func (m *EventList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n46, err := m.Metadata.MarshalTo(dAtA[i:]) + n51, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n46 + i += n51 } if len(m.Items) > 0 { for _, msg := range m.Items { @@ -11418,6 +13213,48 @@ func (m *EventList) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *EventSeries) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Count != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Count)) + } + if m.LastObservedTime != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastObservedTime.Size())) + n52, err := m.LastObservedTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n52 + } + if m.State != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.State))) + i += copy(dAtA[i:], *m.State) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + func (m *EventSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -11538,6 +13375,21 @@ func (m *FCVolumeSource) MarshalTo(dAtA []byte) (int, error) { } i++ } + if len(m.Wwids) > 0 { + for _, s := range m.Wwids { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -11575,11 +13427,11 @@ func (m *FlexVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n47, err := m.SecretRef.MarshalTo(dAtA[i:]) + n53, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n47 + i += n53 } if m.ReadOnly != nil { dAtA[i] = 0x20 @@ -11802,11 +13654,11 @@ func (m *HTTPGetAction) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n48, err := m.Port.MarshalTo(dAtA[i:]) + n54, err := m.Port.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n48 + i += n54 } if m.Host != nil { dAtA[i] = 0x1a @@ -11890,31 +13742,73 @@ func (m *Handler) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Exec.Size())) - n49, err := m.Exec.MarshalTo(dAtA[i:]) + n55, err := m.Exec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n49 + i += n55 } if m.HttpGet != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.HttpGet.Size())) - n50, err := m.HttpGet.MarshalTo(dAtA[i:]) + n56, err := m.HttpGet.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n50 + i += n56 } if m.TcpSocket != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.TcpSocket.Size())) - n51, err := m.TcpSocket.MarshalTo(dAtA[i:]) + n57, err := m.TcpSocket.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n51 + i += n57 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *HostAlias) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HostAlias) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Ip != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Ip))) + i += copy(dAtA[i:], *m.Ip) + } + if len(m.Hostnames) > 0 { + for _, s := range m.Hostnames { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -11943,6 +13837,123 @@ func (m *HostPathVolumeSource) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) i += copy(dAtA[i:], *m.Path) } + if m.Type != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type))) + i += copy(dAtA[i:], *m.Type) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ISCSIPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ISCSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.TargetPortal != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.TargetPortal))) + i += copy(dAtA[i:], *m.TargetPortal) + } + if m.Iqn != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Iqn))) + i += copy(dAtA[i:], *m.Iqn) + } + if m.Lun != nil { + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Lun)) + } + if m.IscsiInterface != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IscsiInterface))) + i += copy(dAtA[i:], *m.IscsiInterface) + } + if m.FsType != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FsType))) + i += copy(dAtA[i:], *m.FsType) + } + if m.ReadOnly != nil { + dAtA[i] = 0x30 + i++ + if *m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Portals) > 0 { + for _, s := range m.Portals { + dAtA[i] = 0x3a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.ChapAuthDiscovery != nil { + dAtA[i] = 0x40 + i++ + if *m.ChapAuthDiscovery { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.SecretRef != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n58, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n58 + } + if m.ChapAuthSession != nil { + dAtA[i] = 0x58 + i++ + if *m.ChapAuthSession { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.InitiatorName != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.InitiatorName))) + i += copy(dAtA[i:], *m.InitiatorName) + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -12018,6 +14029,42 @@ func (m *ISCSIVolumeSource) MarshalTo(dAtA []byte) (int, error) { i += copy(dAtA[i:], s) } } + if m.ChapAuthDiscovery != nil { + dAtA[i] = 0x40 + i++ + if *m.ChapAuthDiscovery { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.SecretRef != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n59, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n59 + } + if m.ChapAuthSession != nil { + dAtA[i] = 0x58 + i++ + if *m.ChapAuthSession { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.InitiatorName != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.InitiatorName))) + i += copy(dAtA[i:], *m.InitiatorName) + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -12081,21 +14128,21 @@ func (m *Lifecycle) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PostStart.Size())) - n52, err := m.PostStart.MarshalTo(dAtA[i:]) + n60, err := m.PostStart.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n52 + i += n60 } if m.PreStop != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PreStop.Size())) - n53, err := m.PreStop.MarshalTo(dAtA[i:]) + n61, err := m.PreStop.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n53 + i += n61 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -12122,21 +14169,21 @@ func (m *LimitRange) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n54, err := m.Metadata.MarshalTo(dAtA[i:]) + n62, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n54 + i += n62 } if m.Spec != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n55, err := m.Spec.MarshalTo(dAtA[i:]) + n63, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n55 + i += n63 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -12185,11 +14232,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(v.Size())) - n56, err := v.MarshalTo(dAtA[i:]) + n64, err := v.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n56 + i += n64 } } } @@ -12213,11 +14260,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(v.Size())) - n57, err := v.MarshalTo(dAtA[i:]) + n65, err := v.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n57 + i += n65 } } } @@ -12241,11 +14288,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(v.Size())) - n58, err := v.MarshalTo(dAtA[i:]) + n66, err := v.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n58 + i += n66 } } } @@ -12269,11 +14316,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(v.Size())) - n59, err := v.MarshalTo(dAtA[i:]) + n67, err := v.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n59 + i += n67 } } } @@ -12297,11 +14344,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(v.Size())) - n60, err := v.MarshalTo(dAtA[i:]) + n68, err := v.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n60 + i += n68 } } } @@ -12330,11 +14377,11 @@ func (m *LimitRangeList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n61, err := m.Metadata.MarshalTo(dAtA[i:]) + n69, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n61 + i += n69 } if len(m.Items) > 0 { for _, msg := range m.Items { @@ -12406,11 +14453,11 @@ func (m *List) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n62, err := m.Metadata.MarshalTo(dAtA[i:]) + n70, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n62 + i += n70 } if len(m.Items) > 0 { for _, msg := range m.Items { @@ -12478,6 +14525,16 @@ func (m *ListOptions) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) } + if m.IncludeUninitialized != nil { + dAtA[i] = 0x30 + i++ + if *m.IncludeUninitialized { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -12577,6 +14634,33 @@ func (m *LocalObjectReference) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *LocalVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Path != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) + i += copy(dAtA[i:], *m.Path) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + func (m *NFSVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -12639,31 +14723,31 @@ func (m *Namespace) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n63, err := m.Metadata.MarshalTo(dAtA[i:]) + n71, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n63 + i += n71 } if m.Spec != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n64, err := m.Spec.MarshalTo(dAtA[i:]) + n72, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n64 + i += n72 } if m.Status != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n65, err := m.Status.MarshalTo(dAtA[i:]) + n73, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n65 + i += n73 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -12690,11 +14774,11 @@ func (m *NamespaceList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n66, err := m.Metadata.MarshalTo(dAtA[i:]) + n74, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n66 + i += n74 } if len(m.Items) > 0 { for _, msg := range m.Items { @@ -12796,31 +14880,31 @@ func (m *Node) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n67, err := m.Metadata.MarshalTo(dAtA[i:]) + n75, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n67 + i += n75 } if m.Spec != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n68, err := m.Spec.MarshalTo(dAtA[i:]) + n76, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n68 + i += n76 } if m.Status != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n69, err := m.Status.MarshalTo(dAtA[i:]) + n77, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n69 + i += n77 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -12880,11 +14964,11 @@ func (m *NodeAffinity) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RequiredDuringSchedulingIgnoredDuringExecution.Size())) - n70, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalTo(dAtA[i:]) + n78, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n70 + i += n78 } if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { @@ -12935,21 +15019,21 @@ func (m *NodeCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastHeartbeatTime.Size())) - n71, err := m.LastHeartbeatTime.MarshalTo(dAtA[i:]) + n79, err := m.LastHeartbeatTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n71 + i += n79 } if m.LastTransitionTime != nil { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n72, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n80, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n72 + i += n80 } if m.Reason != nil { dAtA[i] = 0x2a @@ -12969,6 +15053,37 @@ func (m *NodeCondition) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *NodeConfigSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeConfigSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ConfigMapRef != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMapRef.Size())) + n81, err := m.ConfigMapRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n81 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + func (m *NodeDaemonEndpoints) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -12988,11 +15103,11 @@ func (m *NodeDaemonEndpoints) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.KubeletEndpoint.Size())) - n73, err := m.KubeletEndpoint.MarshalTo(dAtA[i:]) + n82, err := m.KubeletEndpoint.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n73 + i += n82 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -13019,11 +15134,11 @@ func (m *NodeList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n74, err := m.Metadata.MarshalTo(dAtA[i:]) + n83, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n74 + i += n83 } if len(m.Items) > 0 { for _, msg := range m.Items { @@ -13105,11 +15220,11 @@ func (m *NodeResources) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(v.Size())) - n75, err := v.MarshalTo(dAtA[i:]) + n84, err := v.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n75 + i += n84 } } } @@ -13288,6 +15403,16 @@ func (m *NodeSpec) MarshalTo(dAtA []byte) (int, error) { i += n } } + if m.ConfigSource != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigSource.Size())) + n85, err := m.ConfigSource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n85 + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -13329,11 +15454,11 @@ func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(v.Size())) - n76, err := v.MarshalTo(dAtA[i:]) + n86, err := v.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n76 + i += n86 } } } @@ -13357,11 +15482,11 @@ func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(v.Size())) - n77, err := v.MarshalTo(dAtA[i:]) + n87, err := v.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n77 + i += n87 } } } @@ -13399,21 +15524,21 @@ func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DaemonEndpoints.Size())) - n78, err := m.DaemonEndpoints.MarshalTo(dAtA[i:]) + n88, err := m.DaemonEndpoints.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n78 + i += n88 } if m.NodeInfo != nil { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.NodeInfo.Size())) - n79, err := m.NodeInfo.MarshalTo(dAtA[i:]) + n89, err := m.NodeInfo.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n79 + i += n89 } if len(m.Images) > 0 { for _, msg := range m.Images { @@ -13634,21 +15759,21 @@ func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x42 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.CreationTimestamp.Size())) - n80, err := m.CreationTimestamp.MarshalTo(dAtA[i:]) + n90, err := m.CreationTimestamp.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n80 + i += n90 } if m.DeletionTimestamp != nil { dAtA[i] = 0x4a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DeletionTimestamp.Size())) - n81, err := m.DeletionTimestamp.MarshalTo(dAtA[i:]) + n91, err := m.DeletionTimestamp.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n81 + i += n91 } if m.DeletionGracePeriodSeconds != nil { dAtA[i] = 0x50 @@ -13722,6 +15847,18 @@ func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ClusterName))) i += copy(dAtA[i:], *m.ClusterName) } + if m.Initializers != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Initializers.Size())) + n92, err := m.Initializers.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n92 + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -13810,31 +15947,31 @@ func (m *PersistentVolume) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n82, err := m.Metadata.MarshalTo(dAtA[i:]) + n93, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n82 + i += n93 } if m.Spec != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n83, err := m.Spec.MarshalTo(dAtA[i:]) + n94, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n83 + i += n94 } if m.Status != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n84, err := m.Status.MarshalTo(dAtA[i:]) + n95, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n84 + i += n95 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -13861,31 +15998,96 @@ func (m *PersistentVolumeClaim) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n85, err := m.Metadata.MarshalTo(dAtA[i:]) + n96, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n85 + i += n96 } if m.Spec != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n86, err := m.Spec.MarshalTo(dAtA[i:]) + n97, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n86 + i += n97 } if m.Status != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n87, err := m.Status.MarshalTo(dAtA[i:]) + n98, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n87 + i += n98 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *PersistentVolumeClaimCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PersistentVolumeClaimCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Type != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type))) + i += copy(dAtA[i:], *m.Type) + } + if m.Status != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Status))) + i += copy(dAtA[i:], *m.Status) + } + if m.LastProbeTime != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) + n99, err := m.LastProbeTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n99 + } + if m.LastTransitionTime != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n100, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n100 + } + if m.Reason != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason))) + i += copy(dAtA[i:], *m.Reason) + } + if m.Message != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Message))) + i += copy(dAtA[i:], *m.Message) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -13912,11 +16114,11 @@ func (m *PersistentVolumeClaimList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n88, err := m.Metadata.MarshalTo(dAtA[i:]) + n101, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n88 + i += n101 } if len(m.Items) > 0 { for _, msg := range m.Items { @@ -13970,11 +16172,11 @@ func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Resources.Size())) - n89, err := m.Resources.MarshalTo(dAtA[i:]) + n102, err := m.Resources.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n89 + i += n102 } if m.VolumeName != nil { dAtA[i] = 0x1a @@ -13986,11 +16188,11 @@ func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n90, err := m.Selector.MarshalTo(dAtA[i:]) + n103, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n90 + i += n103 } if m.StorageClassName != nil { dAtA[i] = 0x2a @@ -13998,6 +16200,12 @@ func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StorageClassName))) i += copy(dAtA[i:], *m.StorageClassName) } + if m.VolumeMode != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode))) + i += copy(dAtA[i:], *m.VolumeMode) + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -14060,14 +16268,26 @@ func (m *PersistentVolumeClaimStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(v.Size())) - n91, err := v.MarshalTo(dAtA[i:]) + n104, err := v.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n91 + i += n104 } } } + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -14130,11 +16350,11 @@ func (m *PersistentVolumeList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n92, err := m.Metadata.MarshalTo(dAtA[i:]) + n105, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n92 + i += n105 } if len(m.Items) > 0 { for _, msg := range m.Items { @@ -14173,151 +16393,151 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.GcePersistentDisk.Size())) - n93, err := m.GcePersistentDisk.MarshalTo(dAtA[i:]) + n106, err := m.GcePersistentDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n93 + i += n106 } if m.AwsElasticBlockStore != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AwsElasticBlockStore.Size())) - n94, err := m.AwsElasticBlockStore.MarshalTo(dAtA[i:]) + n107, err := m.AwsElasticBlockStore.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n94 + i += n107 } if m.HostPath != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) - n95, err := m.HostPath.MarshalTo(dAtA[i:]) + n108, err := m.HostPath.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n95 + i += n108 } if m.Glusterfs != nil { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) - n96, err := m.Glusterfs.MarshalTo(dAtA[i:]) + n109, err := m.Glusterfs.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n96 + i += n109 } if m.Nfs != nil { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Nfs.Size())) - n97, err := m.Nfs.MarshalTo(dAtA[i:]) + n110, err := m.Nfs.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n97 + i += n110 } if m.Rbd != nil { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Rbd.Size())) - n98, err := m.Rbd.MarshalTo(dAtA[i:]) + n111, err := m.Rbd.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n98 + i += n111 } if m.Iscsi != nil { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Iscsi.Size())) - n99, err := m.Iscsi.MarshalTo(dAtA[i:]) + n112, err := m.Iscsi.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n99 + i += n112 } if m.Cinder != nil { dAtA[i] = 0x42 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) - n100, err := m.Cinder.MarshalTo(dAtA[i:]) + n113, err := m.Cinder.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n100 + i += n113 } if m.Cephfs != nil { dAtA[i] = 0x4a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Cephfs.Size())) - n101, err := m.Cephfs.MarshalTo(dAtA[i:]) + n114, err := m.Cephfs.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n101 + i += n114 } if m.Fc != nil { dAtA[i] = 0x52 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Fc.Size())) - n102, err := m.Fc.MarshalTo(dAtA[i:]) + n115, err := m.Fc.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n102 + i += n115 } if m.Flocker != nil { dAtA[i] = 0x5a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) - n103, err := m.Flocker.MarshalTo(dAtA[i:]) + n116, err := m.Flocker.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n103 + i += n116 } if m.FlexVolume != nil { dAtA[i] = 0x62 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) - n104, err := m.FlexVolume.MarshalTo(dAtA[i:]) + n117, err := m.FlexVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n104 + i += n117 } if m.AzureFile != nil { dAtA[i] = 0x6a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) - n105, err := m.AzureFile.MarshalTo(dAtA[i:]) + n118, err := m.AzureFile.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n105 + i += n118 } if m.VsphereVolume != nil { dAtA[i] = 0x72 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) - n106, err := m.VsphereVolume.MarshalTo(dAtA[i:]) + n119, err := m.VsphereVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n106 + i += n119 } if m.Quobyte != nil { dAtA[i] = 0x7a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) - n107, err := m.Quobyte.MarshalTo(dAtA[i:]) + n120, err := m.Quobyte.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n107 + i += n120 } if m.AzureDisk != nil { dAtA[i] = 0x82 @@ -14325,11 +16545,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size())) - n108, err := m.AzureDisk.MarshalTo(dAtA[i:]) + n121, err := m.AzureDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n108 + i += n121 } if m.PhotonPersistentDisk != nil { dAtA[i] = 0x8a @@ -14337,11 +16557,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PhotonPersistentDisk.Size())) - n109, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) + n122, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n109 + i += n122 } if m.PortworxVolume != nil { dAtA[i] = 0x92 @@ -14349,11 +16569,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PortworxVolume.Size())) - n110, err := m.PortworxVolume.MarshalTo(dAtA[i:]) + n123, err := m.PortworxVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n110 + i += n123 } if m.ScaleIO != nil { dAtA[i] = 0x9a @@ -14361,11 +16581,47 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleIO.Size())) - n111, err := m.ScaleIO.MarshalTo(dAtA[i:]) + n124, err := m.ScaleIO.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n111 + i += n124 + } + if m.Local != nil { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Local.Size())) + n125, err := m.Local.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n125 + } + if m.Storageos != nil { + dAtA[i] = 0xaa + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Storageos.Size())) + n126, err := m.Storageos.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n126 + } + if m.Csi != nil { + dAtA[i] = 0xb2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Csi.Size())) + n127, err := m.Csi.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n127 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -14408,11 +16664,11 @@ func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(v.Size())) - n112, err := v.MarshalTo(dAtA[i:]) + n128, err := v.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n112 + i += n128 } } } @@ -14420,11 +16676,11 @@ func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeSource.Size())) - n113, err := m.PersistentVolumeSource.MarshalTo(dAtA[i:]) + n129, err := m.PersistentVolumeSource.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n113 + i += n129 } if len(m.AccessModes) > 0 { for _, s := range m.AccessModes { @@ -14445,11 +16701,11 @@ func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ClaimRef.Size())) - n114, err := m.ClaimRef.MarshalTo(dAtA[i:]) + n130, err := m.ClaimRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n114 + i += n130 } if m.PersistentVolumeReclaimPolicy != nil { dAtA[i] = 0x2a @@ -14463,6 +16719,27 @@ func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StorageClassName))) i += copy(dAtA[i:], *m.StorageClassName) } + if len(m.MountOptions) > 0 { + for _, s := range m.MountOptions { + dAtA[i] = 0x3a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.VolumeMode != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode))) + i += copy(dAtA[i:], *m.VolumeMode) + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -14560,31 +16837,31 @@ func (m *Pod) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n115, err := m.Metadata.MarshalTo(dAtA[i:]) + n131, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n115 + i += n131 } if m.Spec != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n116, err := m.Spec.MarshalTo(dAtA[i:]) + n132, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n116 + i += n132 } if m.Status != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n117, err := m.Status.MarshalTo(dAtA[i:]) + n133, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n117 + i += n133 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -14656,11 +16933,11 @@ func (m *PodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LabelSelector.Size())) - n118, err := m.LabelSelector.MarshalTo(dAtA[i:]) + n134, err := m.LabelSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n118 + i += n134 } if len(m.Namespaces) > 0 { for _, s := range m.Namespaces { @@ -14832,21 +17109,21 @@ func (m *PodCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) - n119, err := m.LastProbeTime.MarshalTo(dAtA[i:]) + n135, err := m.LastProbeTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n119 + i += n135 } if m.LastTransitionTime != nil { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n120, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n136, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n120 + i += n136 } if m.Reason != nil { dAtA[i] = 0x2a @@ -14866,6 +17143,102 @@ func (m *PodCondition) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *PodDNSConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodDNSConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Nameservers) > 0 { + for _, s := range m.Nameservers { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Searches) > 0 { + for _, s := range m.Searches { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Options) > 0 { + for _, msg := range m.Options { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *PodDNSConfigOption) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodDNSConfigOption) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Name != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name))) + i += copy(dAtA[i:], *m.Name) + } + if m.Value != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Value))) + i += copy(dAtA[i:], *m.Value) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + func (m *PodExecOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -14967,11 +17340,11 @@ func (m *PodList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n121, err := m.Metadata.MarshalTo(dAtA[i:]) + n137, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n121 + i += n137 } if len(m.Items) > 0 { for _, msg := range m.Items { @@ -15041,11 +17414,11 @@ func (m *PodLogOptions) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SinceTime.Size())) - n122, err := m.SinceTime.MarshalTo(dAtA[i:]) + n138, err := m.SinceTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n122 + i += n138 } if m.Timestamps != nil { dAtA[i] = 0x30 @@ -15147,11 +17520,11 @@ func (m *PodSecurityContext) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SeLinuxOptions.Size())) - n123, err := m.SeLinuxOptions.MarshalTo(dAtA[i:]) + n139, err := m.SeLinuxOptions.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n123 + i += n139 } if m.RunAsUser != nil { dAtA[i] = 0x10 @@ -15205,11 +17578,11 @@ func (m *PodSignature) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodController.Size())) - n124, err := m.PodController.MarshalTo(dAtA[i:]) + n140, err := m.PodController.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n124 + i += n140 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -15347,11 +17720,11 @@ func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x72 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecurityContext.Size())) - n125, err := m.SecurityContext.MarshalTo(dAtA[i:]) + n141, err := m.SecurityContext.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n125 + i += n141 } if len(m.ImagePullSecrets) > 0 { for _, msg := range m.ImagePullSecrets { @@ -15387,11 +17760,11 @@ func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Affinity.Size())) - n126, err := m.Affinity.MarshalTo(dAtA[i:]) + n142, err := m.Affinity.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n126 + i += n142 } if m.SchedulerName != nil { dAtA[i] = 0x9a @@ -15441,6 +17814,47 @@ func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { i += n } } + if len(m.HostAliases) > 0 { + for _, msg := range m.HostAliases { + dAtA[i] = 0xba + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.PriorityClassName != nil { + dAtA[i] = 0xc2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PriorityClassName))) + i += copy(dAtA[i:], *m.PriorityClassName) + } + if m.Priority != nil { + dAtA[i] = 0xc8 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Priority)) + } + if m.DnsConfig != nil { + dAtA[i] = 0xd2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DnsConfig.Size())) + n143, err := m.DnsConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n143 + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -15508,11 +17922,11 @@ func (m *PodStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.StartTime.Size())) - n127, err := m.StartTime.MarshalTo(dAtA[i:]) + n144, err := m.StartTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n127 + i += n144 } if len(m.ContainerStatuses) > 0 { for _, msg := range m.ContainerStatuses { @@ -15569,21 +17983,21 @@ func (m *PodStatusResult) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n128, err := m.Metadata.MarshalTo(dAtA[i:]) + n145, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n128 + i += n145 } if m.Status != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n129, err := m.Status.MarshalTo(dAtA[i:]) + n146, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n129 + i += n146 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -15610,21 +18024,21 @@ func (m *PodTemplate) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n130, err := m.Metadata.MarshalTo(dAtA[i:]) + n147, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n130 + i += n147 } if m.Template != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n131, err := m.Template.MarshalTo(dAtA[i:]) + n148, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n131 + i += n148 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -15651,11 +18065,11 @@ func (m *PodTemplateList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n132, err := m.Metadata.MarshalTo(dAtA[i:]) + n149, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n132 + i += n149 } if len(m.Items) > 0 { for _, msg := range m.Items { @@ -15694,21 +18108,21 @@ func (m *PodTemplateSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n133, err := m.Metadata.MarshalTo(dAtA[i:]) + n150, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n133 + i += n150 } if m.Spec != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n134, err := m.Spec.MarshalTo(dAtA[i:]) + n151, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n134 + i += n151 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -15805,21 +18219,21 @@ func (m *PreferAvoidPodsEntry) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodSignature.Size())) - n135, err := m.PodSignature.MarshalTo(dAtA[i:]) + n152, err := m.PodSignature.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n135 + i += n152 } if m.EvictionTime != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.EvictionTime.Size())) - n136, err := m.EvictionTime.MarshalTo(dAtA[i:]) + n153, err := m.EvictionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n136 + i += n153 } if m.Reason != nil { dAtA[i] = 0x1a @@ -15863,11 +18277,11 @@ func (m *PreferredSchedulingTerm) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Preference.Size())) - n137, err := m.Preference.MarshalTo(dAtA[i:]) + n154, err := m.Preference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n137 + i += n154 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -15894,11 +18308,11 @@ func (m *Probe) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Handler.Size())) - n138, err := m.Handler.MarshalTo(dAtA[i:]) + n155, err := m.Handler.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n138 + i += n155 } if m.InitialDelaySeconds != nil { dAtA[i] = 0x10 @@ -16024,6 +18438,92 @@ func (m *QuobyteVolumeSource) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *RBDPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RBDPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Monitors) > 0 { + for _, s := range m.Monitors { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.Image != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Image))) + i += copy(dAtA[i:], *m.Image) + } + if m.FsType != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FsType))) + i += copy(dAtA[i:], *m.FsType) + } + if m.Pool != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Pool))) + i += copy(dAtA[i:], *m.Pool) + } + if m.User != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.User))) + i += copy(dAtA[i:], *m.User) + } + if m.Keyring != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Keyring))) + i += copy(dAtA[i:], *m.Keyring) + } + if m.SecretRef != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n156, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n156 + } + if m.ReadOnly != nil { + dAtA[i] = 0x40 + i++ + if *m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + func (m *RBDVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -16088,11 +18588,11 @@ func (m *RBDVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n139, err := m.SecretRef.MarshalTo(dAtA[i:]) + n157, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n139 + i += n157 } if m.ReadOnly != nil { dAtA[i] = 0x40 @@ -16129,11 +18629,11 @@ func (m *RangeAllocation) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n140, err := m.Metadata.MarshalTo(dAtA[i:]) + n158, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n140 + i += n158 } if m.Range != nil { dAtA[i] = 0x12 @@ -16172,31 +18672,31 @@ func (m *ReplicationController) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n141, err := m.Metadata.MarshalTo(dAtA[i:]) + n159, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n141 + i += n159 } if m.Spec != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n142, err := m.Spec.MarshalTo(dAtA[i:]) + n160, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n142 + i += n160 } if m.Status != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n143, err := m.Status.MarshalTo(dAtA[i:]) + n161, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n143 + i += n161 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -16235,11 +18735,11 @@ func (m *ReplicationControllerCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n144, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n162, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n144 + i += n162 } if m.Reason != nil { dAtA[i] = 0x22 @@ -16278,11 +18778,11 @@ func (m *ReplicationControllerList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n145, err := m.Metadata.MarshalTo(dAtA[i:]) + n163, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n145 + i += n163 } if len(m.Items) > 0 { for _, msg := range m.Items { @@ -16343,11 +18843,11 @@ func (m *ReplicationControllerSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n146, err := m.Template.MarshalTo(dAtA[i:]) + n164, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n146 + i += n164 } if m.MinReadySeconds != nil { dAtA[i] = 0x20 @@ -16449,11 +18949,11 @@ func (m *ResourceFieldSelector) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Divisor.Size())) - n147, err := m.Divisor.MarshalTo(dAtA[i:]) + n165, err := m.Divisor.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n147 + i += n165 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -16480,31 +18980,31 @@ func (m *ResourceQuota) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n148, err := m.Metadata.MarshalTo(dAtA[i:]) + n166, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n148 + i += n166 } if m.Spec != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n149, err := m.Spec.MarshalTo(dAtA[i:]) + n167, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n149 + i += n167 } if m.Status != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n150, err := m.Status.MarshalTo(dAtA[i:]) + n168, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n150 + i += n168 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -16531,11 +19031,11 @@ func (m *ResourceQuotaList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n151, err := m.Metadata.MarshalTo(dAtA[i:]) + n169, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n151 + i += n169 } if len(m.Items) > 0 { for _, msg := range m.Items { @@ -16590,11 +19090,11 @@ func (m *ResourceQuotaSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(v.Size())) - n152, err := v.MarshalTo(dAtA[i:]) + n170, err := v.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n152 + i += n170 } } } @@ -16654,11 +19154,11 @@ func (m *ResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(v.Size())) - n153, err := v.MarshalTo(dAtA[i:]) + n171, err := v.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n153 + i += n171 } } } @@ -16682,11 +19182,11 @@ func (m *ResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(v.Size())) - n154, err := v.MarshalTo(dAtA[i:]) + n172, err := v.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n154 + i += n172 } } } @@ -16731,11 +19231,11 @@ func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(v.Size())) - n155, err := v.MarshalTo(dAtA[i:]) + n173, err := v.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n155 + i += n173 } } } @@ -16759,11 +19259,11 @@ func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(v.Size())) - n156, err := v.MarshalTo(dAtA[i:]) + n174, err := v.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n156 + i += n174 } } } @@ -16818,6 +19318,99 @@ func (m *SELinuxOptions) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *ScaleIOPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScaleIOPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Gateway != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Gateway))) + i += copy(dAtA[i:], *m.Gateway) + } + if m.System != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.System))) + i += copy(dAtA[i:], *m.System) + } + if m.SecretRef != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n175, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n175 + } + if m.SslEnabled != nil { + dAtA[i] = 0x20 + i++ + if *m.SslEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.ProtectionDomain != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ProtectionDomain))) + i += copy(dAtA[i:], *m.ProtectionDomain) + } + if m.StoragePool != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StoragePool))) + i += copy(dAtA[i:], *m.StoragePool) + } + if m.StorageMode != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StorageMode))) + i += copy(dAtA[i:], *m.StorageMode) + } + if m.VolumeName != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeName))) + i += copy(dAtA[i:], *m.VolumeName) + } + if m.FsType != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FsType))) + i += copy(dAtA[i:], *m.FsType) + } + if m.ReadOnly != nil { + dAtA[i] = 0x50 + i++ + if *m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + func (m *ScaleIOVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -16849,11 +19442,11 @@ func (m *ScaleIOVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n157, err := m.SecretRef.MarshalTo(dAtA[i:]) + n176, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n157 + i += n176 } if m.SslEnabled != nil { dAtA[i] = 0x20 @@ -16930,11 +19523,11 @@ func (m *Secret) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n158, err := m.Metadata.MarshalTo(dAtA[i:]) + n177, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n158 + i += n177 } if len(m.Data) > 0 { for k, _ := range m.Data { @@ -17007,11 +19600,11 @@ func (m *SecretEnvSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n159, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n178, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n159 + i += n178 } if m.Optional != nil { dAtA[i] = 0x10 @@ -17048,11 +19641,11 @@ func (m *SecretKeySelector) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n160, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n179, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n160 + i += n179 } if m.Key != nil { dAtA[i] = 0x12 @@ -17095,11 +19688,11 @@ func (m *SecretList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n161, err := m.Metadata.MarshalTo(dAtA[i:]) + n180, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n161 + i += n180 } if len(m.Items) > 0 { for _, msg := range m.Items { @@ -17138,11 +19731,11 @@ func (m *SecretProjection) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n162, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n181, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n162 + i += n181 } if len(m.Items) > 0 { for _, msg := range m.Items { @@ -17172,6 +19765,39 @@ func (m *SecretProjection) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *SecretReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretReference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Name != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name))) + i += copy(dAtA[i:], *m.Name) + } + if m.Namespace != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Namespace))) + i += copy(dAtA[i:], *m.Namespace) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + func (m *SecretVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -17245,11 +19871,11 @@ func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Capabilities.Size())) - n163, err := m.Capabilities.MarshalTo(dAtA[i:]) + n182, err := m.Capabilities.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n163 + i += n182 } if m.Privileged != nil { dAtA[i] = 0x10 @@ -17265,11 +19891,11 @@ func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SeLinuxOptions.Size())) - n164, err := m.SeLinuxOptions.MarshalTo(dAtA[i:]) + n183, err := m.SeLinuxOptions.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n164 + i += n183 } if m.RunAsUser != nil { dAtA[i] = 0x20 @@ -17296,6 +19922,16 @@ func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) { } i++ } + if m.AllowPrivilegeEscalation != nil { + dAtA[i] = 0x38 + i++ + if *m.AllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -17321,11 +19957,11 @@ func (m *SerializedReference) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Reference.Size())) - n165, err := m.Reference.MarshalTo(dAtA[i:]) + n184, err := m.Reference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n165 + i += n184 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -17352,31 +19988,31 @@ func (m *Service) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n166, err := m.Metadata.MarshalTo(dAtA[i:]) + n185, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n166 + i += n185 } if m.Spec != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n167, err := m.Spec.MarshalTo(dAtA[i:]) + n186, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n167 + i += n186 } if m.Status != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n168, err := m.Status.MarshalTo(dAtA[i:]) + n187, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n168 + i += n187 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -17403,11 +20039,11 @@ func (m *ServiceAccount) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n169, err := m.Metadata.MarshalTo(dAtA[i:]) + n188, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n169 + i += n188 } if len(m.Secrets) > 0 { for _, msg := range m.Secrets { @@ -17468,11 +20104,11 @@ func (m *ServiceAccountList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n170, err := m.Metadata.MarshalTo(dAtA[i:]) + n189, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n170 + i += n189 } if len(m.Items) > 0 { for _, msg := range m.Items { @@ -17511,11 +20147,11 @@ func (m *ServiceList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n171, err := m.Metadata.MarshalTo(dAtA[i:]) + n190, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n171 + i += n190 } if len(m.Items) > 0 { for _, msg := range m.Items { @@ -17571,11 +20207,11 @@ func (m *ServicePort) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.TargetPort.Size())) - n172, err := m.TargetPort.MarshalTo(dAtA[i:]) + n191, err := m.TargetPort.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n172 + i += n191 } if m.NodePort != nil { dAtA[i] = 0x28 @@ -17686,21 +20322,6 @@ func (m *ServiceSpec) MarshalTo(dAtA []byte) (int, error) { i += copy(dAtA[i:], s) } } - if len(m.DeprecatedPublicIPs) > 0 { - for _, s := range m.DeprecatedPublicIPs { - dAtA[i] = 0x32 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } if m.SessionAffinity != nil { dAtA[i] = 0x3a i++ @@ -17734,6 +20355,37 @@ func (m *ServiceSpec) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ExternalName))) i += copy(dAtA[i:], *m.ExternalName) } + if m.ExternalTrafficPolicy != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ExternalTrafficPolicy))) + i += copy(dAtA[i:], *m.ExternalTrafficPolicy) + } + if m.HealthCheckNodePort != nil { + dAtA[i] = 0x60 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.HealthCheckNodePort)) + } + if m.PublishNotReadyAddresses != nil { + dAtA[i] = 0x68 + i++ + if *m.PublishNotReadyAddresses { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.SessionAffinityConfig != nil { + dAtA[i] = 0x72 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SessionAffinityConfig.Size())) + n192, err := m.SessionAffinityConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n192 + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -17759,11 +20411,160 @@ func (m *ServiceStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LoadBalancer.Size())) - n173, err := m.LoadBalancer.MarshalTo(dAtA[i:]) + n193, err := m.LoadBalancer.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n193 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *SessionAffinityConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SessionAffinityConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ClientIP != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ClientIP.Size())) + n194, err := m.ClientIP.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n194 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *StorageOSPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageOSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.VolumeName != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeName))) + i += copy(dAtA[i:], *m.VolumeName) + } + if m.VolumeNamespace != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeNamespace))) + i += copy(dAtA[i:], *m.VolumeNamespace) + } + if m.FsType != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FsType))) + i += copy(dAtA[i:], *m.FsType) + } + if m.ReadOnly != nil { + dAtA[i] = 0x20 + i++ + if *m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.SecretRef != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n195, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n173 + i += n195 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *StorageOSVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageOSVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.VolumeName != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeName))) + i += copy(dAtA[i:], *m.VolumeName) + } + if m.VolumeNamespace != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeNamespace))) + i += copy(dAtA[i:], *m.VolumeNamespace) + } + if m.FsType != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FsType))) + i += copy(dAtA[i:], *m.FsType) + } + if m.ReadOnly != nil { + dAtA[i] = 0x20 + i++ + if *m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.SecretRef != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n196, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n196 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -17823,11 +20624,17 @@ func (m *TCPSocketAction) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n174, err := m.Port.MarshalTo(dAtA[i:]) + n197, err := m.Port.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n174 + i += n197 + } + if m.Host != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Host))) + i += copy(dAtA[i:], *m.Host) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -17872,11 +20679,11 @@ func (m *Taint) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.TimeAdded.Size())) - n175, err := m.TimeAdded.MarshalTo(dAtA[i:]) + n198, err := m.TimeAdded.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n175 + i += n198 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -17959,11 +20766,44 @@ func (m *Volume) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.VolumeSource.Size())) - n176, err := m.VolumeSource.MarshalTo(dAtA[i:]) + n199, err := m.VolumeSource.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n176 + i += n199 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *VolumeDevice) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeDevice) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Name != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name))) + i += copy(dAtA[i:], *m.Name) + } + if m.DevicePath != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DevicePath))) + i += copy(dAtA[i:], *m.DevicePath) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -18014,6 +20854,12 @@ func (m *VolumeMount) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SubPath))) i += copy(dAtA[i:], *m.SubPath) } + if m.MountPropagation != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MountPropagation))) + i += copy(dAtA[i:], *m.MountPropagation) + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -18039,31 +20885,31 @@ func (m *VolumeProjection) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) - n177, err := m.Secret.MarshalTo(dAtA[i:]) + n200, err := m.Secret.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n177 + i += n200 } if m.DownwardAPI != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size())) - n178, err := m.DownwardAPI.MarshalTo(dAtA[i:]) + n201, err := m.DownwardAPI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n178 + i += n201 } if m.ConfigMap != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) - n179, err := m.ConfigMap.MarshalTo(dAtA[i:]) + n202, err := m.ConfigMap.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n179 + i += n202 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -18090,151 +20936,151 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) - n180, err := m.HostPath.MarshalTo(dAtA[i:]) + n203, err := m.HostPath.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n180 + i += n203 } if m.EmptyDir != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.EmptyDir.Size())) - n181, err := m.EmptyDir.MarshalTo(dAtA[i:]) + n204, err := m.EmptyDir.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n181 + i += n204 } if m.GcePersistentDisk != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.GcePersistentDisk.Size())) - n182, err := m.GcePersistentDisk.MarshalTo(dAtA[i:]) + n205, err := m.GcePersistentDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n182 + i += n205 } if m.AwsElasticBlockStore != nil { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AwsElasticBlockStore.Size())) - n183, err := m.AwsElasticBlockStore.MarshalTo(dAtA[i:]) + n206, err := m.AwsElasticBlockStore.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n183 + i += n206 } if m.GitRepo != nil { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.GitRepo.Size())) - n184, err := m.GitRepo.MarshalTo(dAtA[i:]) + n207, err := m.GitRepo.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n184 + i += n207 } if m.Secret != nil { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) - n185, err := m.Secret.MarshalTo(dAtA[i:]) + n208, err := m.Secret.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n185 + i += n208 } if m.Nfs != nil { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Nfs.Size())) - n186, err := m.Nfs.MarshalTo(dAtA[i:]) + n209, err := m.Nfs.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n186 + i += n209 } if m.Iscsi != nil { dAtA[i] = 0x42 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Iscsi.Size())) - n187, err := m.Iscsi.MarshalTo(dAtA[i:]) + n210, err := m.Iscsi.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n187 + i += n210 } if m.Glusterfs != nil { dAtA[i] = 0x4a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) - n188, err := m.Glusterfs.MarshalTo(dAtA[i:]) + n211, err := m.Glusterfs.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n188 + i += n211 } if m.PersistentVolumeClaim != nil { dAtA[i] = 0x52 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeClaim.Size())) - n189, err := m.PersistentVolumeClaim.MarshalTo(dAtA[i:]) + n212, err := m.PersistentVolumeClaim.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n189 + i += n212 } if m.Rbd != nil { dAtA[i] = 0x5a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Rbd.Size())) - n190, err := m.Rbd.MarshalTo(dAtA[i:]) + n213, err := m.Rbd.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n190 + i += n213 } if m.FlexVolume != nil { dAtA[i] = 0x62 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) - n191, err := m.FlexVolume.MarshalTo(dAtA[i:]) + n214, err := m.FlexVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n191 + i += n214 } if m.Cinder != nil { dAtA[i] = 0x6a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) - n192, err := m.Cinder.MarshalTo(dAtA[i:]) + n215, err := m.Cinder.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n192 + i += n215 } if m.Cephfs != nil { dAtA[i] = 0x72 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Cephfs.Size())) - n193, err := m.Cephfs.MarshalTo(dAtA[i:]) + n216, err := m.Cephfs.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n193 + i += n216 } if m.Flocker != nil { dAtA[i] = 0x7a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) - n194, err := m.Flocker.MarshalTo(dAtA[i:]) + n217, err := m.Flocker.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n194 + i += n217 } if m.DownwardAPI != nil { dAtA[i] = 0x82 @@ -18242,11 +21088,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size())) - n195, err := m.DownwardAPI.MarshalTo(dAtA[i:]) + n218, err := m.DownwardAPI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n195 + i += n218 } if m.Fc != nil { dAtA[i] = 0x8a @@ -18254,11 +21100,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Fc.Size())) - n196, err := m.Fc.MarshalTo(dAtA[i:]) + n219, err := m.Fc.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n196 + i += n219 } if m.AzureFile != nil { dAtA[i] = 0x92 @@ -18266,11 +21112,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) - n197, err := m.AzureFile.MarshalTo(dAtA[i:]) + n220, err := m.AzureFile.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n197 + i += n220 } if m.ConfigMap != nil { dAtA[i] = 0x9a @@ -18278,11 +21124,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) - n198, err := m.ConfigMap.MarshalTo(dAtA[i:]) + n221, err := m.ConfigMap.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n198 + i += n221 } if m.VsphereVolume != nil { dAtA[i] = 0xa2 @@ -18290,11 +21136,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) - n199, err := m.VsphereVolume.MarshalTo(dAtA[i:]) + n222, err := m.VsphereVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n199 + i += n222 } if m.Quobyte != nil { dAtA[i] = 0xaa @@ -18302,11 +21148,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) - n200, err := m.Quobyte.MarshalTo(dAtA[i:]) + n223, err := m.Quobyte.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n200 + i += n223 } if m.AzureDisk != nil { dAtA[i] = 0xb2 @@ -18314,11 +21160,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size())) - n201, err := m.AzureDisk.MarshalTo(dAtA[i:]) + n224, err := m.AzureDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n201 + i += n224 } if m.PhotonPersistentDisk != nil { dAtA[i] = 0xba @@ -18326,11 +21172,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PhotonPersistentDisk.Size())) - n202, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) + n225, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n202 + i += n225 } if m.PortworxVolume != nil { dAtA[i] = 0xc2 @@ -18338,11 +21184,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PortworxVolume.Size())) - n203, err := m.PortworxVolume.MarshalTo(dAtA[i:]) + n226, err := m.PortworxVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n203 + i += n226 } if m.ScaleIO != nil { dAtA[i] = 0xca @@ -18350,11 +21196,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleIO.Size())) - n204, err := m.ScaleIO.MarshalTo(dAtA[i:]) + n227, err := m.ScaleIO.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n204 + i += n227 } if m.Projected != nil { dAtA[i] = 0xd2 @@ -18362,11 +21208,23 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Projected.Size())) - n205, err := m.Projected.MarshalTo(dAtA[i:]) + n228, err := m.Projected.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n205 + i += n228 + } + if m.Storageos != nil { + dAtA[i] = 0xda + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Storageos.Size())) + n229, err := m.Storageos.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n229 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -18401,6 +21259,18 @@ func (m *VsphereVirtualDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FsType))) i += copy(dAtA[i:], *m.FsType) } + if m.StoragePolicyName != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StoragePolicyName))) + i += copy(dAtA[i:], *m.StoragePolicyName) + } + if m.StoragePolicyID != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StoragePolicyID))) + i += copy(dAtA[i:], *m.StoragePolicyID) + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -18431,11 +21301,11 @@ func (m *WeightedPodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodAffinityTerm.Size())) - n206, err := m.PodAffinityTerm.MarshalTo(dAtA[i:]) + n230, err := m.PodAffinityTerm.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n206 + i += n230 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -18443,24 +21313,6 @@ func (m *WeightedPodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -18568,6 +21420,34 @@ func (m *AzureDiskVolumeSource) Size() (n int) { if m.ReadOnly != nil { n += 2 } + if m.Kind != nil { + l = len(*m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AzureFilePersistentVolumeSource) Size() (n int) { + var l int + _ = l + if m.SecretName != nil { + l = len(*m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ShareName != nil { + l = len(*m.ShareName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ReadOnly != nil { + n += 2 + } + if m.SecretNamespace != nil { + l = len(*m.SecretNamespace) + n += 1 + l + sovGenerated(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -18611,6 +21491,26 @@ func (m *Binding) Size() (n int) { return n } +func (m *CSIPersistentVolumeSource) Size() (n int) { + var l int + _ = l + if m.Driver != nil { + l = len(*m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.VolumeHandle != nil { + l = len(*m.VolumeHandle) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ReadOnly != nil { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *Capabilities) Size() (n int) { var l int _ = l @@ -18632,6 +21532,40 @@ func (m *Capabilities) Size() (n int) { return n } +func (m *CephFSPersistentVolumeSource) Size() (n int) { + var l int + _ = l + if len(m.Monitors) > 0 { + for _, s := range m.Monitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Path != nil { + l = len(*m.Path) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.User != nil { + l = len(*m.User) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SecretFile != nil { + l = len(*m.SecretFile) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ReadOnly != nil { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *CephFSVolumeSource) Size() (n int) { var l int _ = l @@ -18686,6 +21620,18 @@ func (m *CinderVolumeSource) Size() (n int) { return n } +func (m *ClientIPConfig) Size() (n int) { + var l int + _ = l + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *ComponentCondition) Size() (n int) { var l int _ = l @@ -18964,6 +21910,12 @@ func (m *Container) Size() (n int) { l = len(*m.TerminationMessagePolicy) n += 2 + l + sovGenerated(uint64(l)) } + if len(m.VolumeDevices) > 0 { + for _, e := range m.VolumeDevices { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -19239,6 +22191,10 @@ func (m *EmptyDirVolumeSource) Size() (n int) { l = len(*m.Medium) n += 1 + l + sovGenerated(uint64(l)) } + if m.SizeLimit != nil { + l = m.SizeLimit.Size() + n += 1 + l + sovGenerated(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -19460,6 +22416,30 @@ func (m *Event) Size() (n int) { l = len(*m.Type) n += 1 + l + sovGenerated(uint64(l)) } + if m.EventTime != nil { + l = m.EventTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Series != nil { + l = m.Series.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Action != nil { + l = len(*m.Action) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Related != nil { + l = m.Related.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ReportingComponent != nil { + l = len(*m.ReportingComponent) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ReportingInstance != nil { + l = len(*m.ReportingInstance) + n += 1 + l + sovGenerated(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -19485,6 +22465,26 @@ func (m *EventList) Size() (n int) { return n } +func (m *EventSeries) Size() (n int) { + var l int + _ = l + if m.Count != nil { + n += 1 + sovGenerated(uint64(*m.Count)) + } + if m.LastObservedTime != nil { + l = m.LastObservedTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.State != nil { + l = len(*m.State) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *EventSource) Size() (n int) { var l int _ = l @@ -19536,6 +22536,12 @@ func (m *FCVolumeSource) Size() (n int) { if m.ReadOnly != nil { n += 2 } + if len(m.Wwids) > 0 { + for _, s := range m.Wwids { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -19724,6 +22730,25 @@ func (m *Handler) Size() (n int) { return n } +func (m *HostAlias) Size() (n int) { + var l int + _ = l + if m.Ip != nil { + l = len(*m.Ip) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Hostnames) > 0 { + for _, s := range m.Hostnames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *HostPathVolumeSource) Size() (n int) { var l int _ = l @@ -19731,6 +22756,61 @@ func (m *HostPathVolumeSource) Size() (n int) { l = len(*m.Path) n += 1 + l + sovGenerated(uint64(l)) } + if m.Type != nil { + l = len(*m.Type) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ISCSIPersistentVolumeSource) Size() (n int) { + var l int + _ = l + if m.TargetPortal != nil { + l = len(*m.TargetPortal) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Iqn != nil { + l = len(*m.Iqn) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Lun != nil { + n += 1 + sovGenerated(uint64(*m.Lun)) + } + if m.IscsiInterface != nil { + l = len(*m.IscsiInterface) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FsType != nil { + l = len(*m.FsType) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ReadOnly != nil { + n += 2 + } + if len(m.Portals) > 0 { + for _, s := range m.Portals { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ChapAuthDiscovery != nil { + n += 2 + } + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ChapAuthSession != nil { + n += 2 + } + if m.InitiatorName != nil { + l = len(*m.InitiatorName) + n += 1 + l + sovGenerated(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -19768,6 +22848,20 @@ func (m *ISCSIVolumeSource) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.ChapAuthDiscovery != nil { + n += 2 + } + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ChapAuthSession != nil { + n += 2 + } + if m.InitiatorName != nil { + l = len(*m.InitiatorName) + n += 1 + l + sovGenerated(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -19980,6 +23074,9 @@ func (m *ListOptions) Size() (n int) { if m.TimeoutSeconds != nil { n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) } + if m.IncludeUninitialized != nil { + n += 2 + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -20031,6 +23128,19 @@ func (m *LocalObjectReference) Size() (n int) { return n } +func (m *LocalVolumeSource) Size() (n int) { + var l int + _ = l + if m.Path != nil { + l = len(*m.Path) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *NFSVolumeSource) Size() (n int) { var l int _ = l @@ -20209,6 +23319,19 @@ func (m *NodeCondition) Size() (n int) { return n } +func (m *NodeConfigSource) Size() (n int) { + var l int + _ = l + if m.ConfigMapRef != nil { + l = m.ConfigMapRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *NodeDaemonEndpoints) Size() (n int) { var l int _ = l @@ -20353,6 +23476,10 @@ func (m *NodeSpec) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.ConfigSource != nil { + l = m.ConfigSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -20575,6 +23702,10 @@ func (m *ObjectMeta) Size() (n int) { l = len(*m.ClusterName) n += 1 + l + sovGenerated(uint64(l)) } + if m.Initializers != nil { + l = m.Initializers.Size() + n += 2 + l + sovGenerated(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -20660,6 +23791,39 @@ func (m *PersistentVolumeClaim) Size() (n int) { return n } +func (m *PersistentVolumeClaimCondition) Size() (n int) { + var l int + _ = l + if m.Type != nil { + l = len(*m.Type) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Status != nil { + l = len(*m.Status) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LastProbeTime != nil { + l = m.LastProbeTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LastTransitionTime != nil { + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Reason != nil { + l = len(*m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Message != nil { + l = len(*m.Message) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *PersistentVolumeClaimList) Size() (n int) { var l int _ = l @@ -20704,6 +23868,10 @@ func (m *PersistentVolumeClaimSpec) Size() (n int) { l = len(*m.StorageClassName) n += 1 + l + sovGenerated(uint64(l)) } + if m.VolumeMode != nil { + l = len(*m.VolumeMode) + n += 1 + l + sovGenerated(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -20736,6 +23904,12 @@ func (m *PersistentVolumeClaimStatus) Size() (n int) { n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -20856,6 +24030,18 @@ func (m *PersistentVolumeSource) Size() (n int) { l = m.ScaleIO.Size() n += 2 + l + sovGenerated(uint64(l)) } + if m.Local != nil { + l = m.Local.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Storageos != nil { + l = m.Storageos.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Csi != nil { + l = m.Csi.Size() + n += 2 + l + sovGenerated(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -20900,6 +24086,16 @@ func (m *PersistentVolumeSpec) Size() (n int) { l = len(*m.StorageClassName) n += 1 + l + sovGenerated(uint64(l)) } + if len(m.MountOptions) > 0 { + for _, s := range m.MountOptions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.VolumeMode != nil { + l = len(*m.VolumeMode) + n += 1 + l + sovGenerated(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -21088,6 +24284,50 @@ func (m *PodCondition) Size() (n int) { return n } +func (m *PodDNSConfig) Size() (n int) { + var l int + _ = l + if len(m.Nameservers) > 0 { + for _, s := range m.Nameservers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Searches) > 0 { + for _, s := range m.Searches { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PodDNSConfigOption) Size() (n int) { + var l int + _ = l + if m.Name != nil { + l = len(*m.Name) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Value != nil { + l = len(*m.Value) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *PodExecOptions) Size() (n int) { var l int _ = l @@ -21339,6 +24579,23 @@ func (m *PodSpec) Size() (n int) { n += 2 + l + sovGenerated(uint64(l)) } } + if len(m.HostAliases) > 0 { + for _, e := range m.HostAliases { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.PriorityClassName != nil { + l = len(*m.PriorityClassName) + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Priority != nil { + n += 2 + sovGenerated(uint64(*m.Priority)) + } + if m.DnsConfig != nil { + l = m.DnsConfig.Size() + n += 2 + l + sovGenerated(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -21618,7 +24875,7 @@ func (m *QuobyteVolumeSource) Size() (n int) { return n } -func (m *RBDVolumeSource) Size() (n int) { +func (m *RBDPersistentVolumeSource) Size() (n int) { var l int _ = l if len(m.Monitors) > 0 { @@ -21660,70 +24917,41 @@ func (m *RBDVolumeSource) Size() (n int) { return n } -func (m *RangeAllocation) Size() (n int) { - var l int - _ = l - if m.Metadata != nil { - l = m.Metadata.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Range != nil { - l = len(*m.Range) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Data != nil { - l = len(m.Data) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ReplicationController) Size() (n int) { +func (m *RBDVolumeSource) Size() (n int) { var l int _ = l - if m.Metadata != nil { - l = m.Metadata.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Monitors) > 0 { + for _, s := range m.Monitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - if m.Spec != nil { - l = m.Spec.Size() + if m.Image != nil { + l = len(*m.Image) n += 1 + l + sovGenerated(uint64(l)) } - if m.Status != nil { - l = m.Status.Size() + if m.FsType != nil { + l = len(*m.FsType) n += 1 + l + sovGenerated(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ReplicationControllerCondition) Size() (n int) { - var l int - _ = l - if m.Type != nil { - l = len(*m.Type) + if m.Pool != nil { + l = len(*m.Pool) n += 1 + l + sovGenerated(uint64(l)) } - if m.Status != nil { - l = len(*m.Status) + if m.User != nil { + l = len(*m.User) n += 1 + l + sovGenerated(uint64(l)) } - if m.LastTransitionTime != nil { - l = m.LastTransitionTime.Size() + if m.Keyring != nil { + l = len(*m.Keyring) n += 1 + l + sovGenerated(uint64(l)) } - if m.Reason != nil { - l = len(*m.Reason) + if m.SecretRef != nil { + l = m.SecretRef.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.Message != nil { - l = len(*m.Message) - n += 1 + l + sovGenerated(uint64(l)) + if m.ReadOnly != nil { + n += 2 } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) @@ -21731,95 +24959,19 @@ func (m *ReplicationControllerCondition) Size() (n int) { return n } -func (m *ReplicationControllerList) Size() (n int) { +func (m *RangeAllocation) Size() (n int) { var l int _ = l if m.Metadata != nil { l = m.Metadata.Size() n += 1 + l + sovGenerated(uint64(l)) } - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ReplicationControllerSpec) Size() (n int) { - var l int - _ = l - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) - } - if len(m.Selector) > 0 { - for k, v := range m.Selector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if m.Template != nil { - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.MinReadySeconds != nil { - n += 1 + sovGenerated(uint64(*m.MinReadySeconds)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ReplicationControllerStatus) Size() (n int) { - var l int - _ = l - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) - } - if m.FullyLabeledReplicas != nil { - n += 1 + sovGenerated(uint64(*m.FullyLabeledReplicas)) - } - if m.ObservedGeneration != nil { - n += 1 + sovGenerated(uint64(*m.ObservedGeneration)) - } - if m.ReadyReplicas != nil { - n += 1 + sovGenerated(uint64(*m.ReadyReplicas)) - } - if m.AvailableReplicas != nil { - n += 1 + sovGenerated(uint64(*m.AvailableReplicas)) - } - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ResourceFieldSelector) Size() (n int) { - var l int - _ = l - if m.ContainerName != nil { - l = len(*m.ContainerName) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Resource != nil { - l = len(*m.Resource) + if m.Range != nil { + l = len(*m.Range) n += 1 + l + sovGenerated(uint64(l)) } - if m.Divisor != nil { - l = m.Divisor.Size() + if m.Data != nil { + l = len(m.Data) n += 1 + l + sovGenerated(uint64(l)) } if m.XXX_unrecognized != nil { @@ -21828,7 +24980,154 @@ func (m *ResourceFieldSelector) Size() (n int) { return n } -func (m *ResourceQuota) Size() (n int) { +func (m *ReplicationController) Size() (n int) { + var l int + _ = l + if m.Metadata != nil { + l = m.Metadata.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReplicationControllerCondition) Size() (n int) { + var l int + _ = l + if m.Type != nil { + l = len(*m.Type) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Status != nil { + l = len(*m.Status) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LastTransitionTime != nil { + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Reason != nil { + l = len(*m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Message != nil { + l = len(*m.Message) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReplicationControllerList) Size() (n int) { + var l int + _ = l + if m.Metadata != nil { + l = m.Metadata.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReplicationControllerSpec) Size() (n int) { + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Template != nil { + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MinReadySeconds != nil { + n += 1 + sovGenerated(uint64(*m.MinReadySeconds)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReplicationControllerStatus) Size() (n int) { + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.FullyLabeledReplicas != nil { + n += 1 + sovGenerated(uint64(*m.FullyLabeledReplicas)) + } + if m.ObservedGeneration != nil { + n += 1 + sovGenerated(uint64(*m.ObservedGeneration)) + } + if m.ReadyReplicas != nil { + n += 1 + sovGenerated(uint64(*m.ReadyReplicas)) + } + if m.AvailableReplicas != nil { + n += 1 + sovGenerated(uint64(*m.AvailableReplicas)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ResourceFieldSelector) Size() (n int) { + var l int + _ = l + if m.ContainerName != nil { + l = len(*m.ContainerName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Resource != nil { + l = len(*m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Divisor != nil { + l = m.Divisor.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ResourceQuota) Size() (n int) { var l int _ = l if m.Metadata != nil { @@ -21991,6 +25290,53 @@ func (m *SELinuxOptions) Size() (n int) { return n } +func (m *ScaleIOPersistentVolumeSource) Size() (n int) { + var l int + _ = l + if m.Gateway != nil { + l = len(*m.Gateway) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.System != nil { + l = len(*m.System) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SslEnabled != nil { + n += 2 + } + if m.ProtectionDomain != nil { + l = len(*m.ProtectionDomain) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.StoragePool != nil { + l = len(*m.StoragePool) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.StorageMode != nil { + l = len(*m.StorageMode) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.VolumeName != nil { + l = len(*m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FsType != nil { + l = len(*m.FsType) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ReadOnly != nil { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *ScaleIOVolumeSource) Size() (n int) { var l int _ = l @@ -22152,6 +25498,23 @@ func (m *SecretProjection) Size() (n int) { return n } +func (m *SecretReference) Size() (n int) { + var l int + _ = l + if m.Name != nil { + l = len(*m.Name) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Namespace != nil { + l = len(*m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *SecretVolumeSource) Size() (n int) { var l int _ = l @@ -22200,6 +25563,9 @@ func (m *SecurityContext) Size() (n int) { if m.ReadOnlyRootFilesystem != nil { n += 2 } + if m.AllowPrivilegeEscalation != nil { + n += 2 + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -22377,12 +25743,6 @@ func (m *ServiceSpec) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } - if len(m.DeprecatedPublicIPs) > 0 { - for _, s := range m.DeprecatedPublicIPs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } if m.SessionAffinity != nil { l = len(*m.SessionAffinity) n += 1 + l + sovGenerated(uint64(l)) @@ -22401,6 +25761,20 @@ func (m *ServiceSpec) Size() (n int) { l = len(*m.ExternalName) n += 1 + l + sovGenerated(uint64(l)) } + if m.ExternalTrafficPolicy != nil { + l = len(*m.ExternalTrafficPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.HealthCheckNodePort != nil { + n += 1 + sovGenerated(uint64(*m.HealthCheckNodePort)) + } + if m.PublishNotReadyAddresses != nil { + n += 2 + } + if m.SessionAffinityConfig != nil { + l = m.SessionAffinityConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -22420,6 +25794,75 @@ func (m *ServiceStatus) Size() (n int) { return n } +func (m *SessionAffinityConfig) Size() (n int) { + var l int + _ = l + if m.ClientIP != nil { + l = m.ClientIP.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StorageOSPersistentVolumeSource) Size() (n int) { + var l int + _ = l + if m.VolumeName != nil { + l = len(*m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.VolumeNamespace != nil { + l = len(*m.VolumeNamespace) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FsType != nil { + l = len(*m.FsType) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ReadOnly != nil { + n += 2 + } + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StorageOSVolumeSource) Size() (n int) { + var l int + _ = l + if m.VolumeName != nil { + l = len(*m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.VolumeNamespace != nil { + l = len(*m.VolumeNamespace) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FsType != nil { + l = len(*m.FsType) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ReadOnly != nil { + n += 2 + } + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *Sysctl) Size() (n int) { var l int _ = l @@ -22444,6 +25887,10 @@ func (m *TCPSocketAction) Size() (n int) { l = m.Port.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.Host != nil { + l = len(*m.Host) + n += 1 + l + sovGenerated(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -22520,6 +25967,23 @@ func (m *Volume) Size() (n int) { return n } +func (m *VolumeDevice) Size() (n int) { + var l int + _ = l + if m.Name != nil { + l = len(*m.Name) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DevicePath != nil { + l = len(*m.DevicePath) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *VolumeMount) Size() (n int) { var l int _ = l @@ -22538,6 +26002,10 @@ func (m *VolumeMount) Size() (n int) { l = len(*m.SubPath) n += 1 + l + sovGenerated(uint64(l)) } + if m.MountPropagation != nil { + l = len(*m.MountPropagation) + n += 1 + l + sovGenerated(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -22672,6 +26140,10 @@ func (m *VolumeSource) Size() (n int) { l = m.Projected.Size() n += 2 + l + sovGenerated(uint64(l)) } + if m.Storageos != nil { + l = m.Storageos.Size() + n += 2 + l + sovGenerated(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -22689,6 +26161,14 @@ func (m *VsphereVirtualDiskVolumeSource) Size() (n int) { l = len(*m.FsType) n += 1 + l + sovGenerated(uint64(l)) } + if m.StoragePolicyName != nil { + l = len(*m.StoragePolicyName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.StoragePolicyID != nil { + l = len(*m.StoragePolicyID) + n += 1 + l + sovGenerated(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -23389,90 +26869,9 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := bool(v != 0) m.ReadOnly = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AzureFileVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AzureFileVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.SecretName = &s - iNdEx = postIndex - case 2: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShareName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23498,145 +26897,7 @@ func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.ShareName = &s - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.ReadOnly = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Binding) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Binding: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Binding: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Target == nil { - m.Target = &ObjectReference{} - } - if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Kind = &s iNdEx = postIndex default: iNdEx = preIndex @@ -23660,7 +26921,7 @@ func (m *Binding) Unmarshal(dAtA []byte) error { } return nil } -func (m *Capabilities) Unmarshal(dAtA []byte) error { +func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23683,15 +26944,15 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Capabilities: wiretype end group for non-group") + return fmt.Errorf("proto: AzureFilePersistentVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Capabilities: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AzureFilePersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Add", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23716,11 +26977,12 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Add = append(m.Add, string(dAtA[iNdEx:postIndex])) + s := string(dAtA[iNdEx:postIndex]) + m.SecretName = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Drop", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ShareName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23745,7 +27007,59 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Drop = append(m.Drop, string(dAtA[iNdEx:postIndex])) + s := string(dAtA[iNdEx:postIndex]) + m.ShareName = &s + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ReadOnly = &b + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretNamespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.SecretNamespace = &s iNdEx = postIndex default: iNdEx = preIndex @@ -23769,7 +27083,7 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { } return nil } -func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { +func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23792,15 +27106,15 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CephFSVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: AzureFileVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CephFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AzureFileVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23825,11 +27139,12 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex])) + s := string(dAtA[iNdEx:postIndex]) + m.SecretName = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ShareName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23855,13 +27170,13 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Path = &s + m.ShareName = &s iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23871,27 +27186,69 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + b := bool(v != 0) + m.ReadOnly = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.User = &s - iNdEx = postIndex - case 4: + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Binding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Binding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Binding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23901,25 +27258,28 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.SecretFile = &s + if m.Metadata == nil { + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23943,34 +27303,13 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecretRef == nil { - m.SecretRef = &LocalObjectReference{} + if m.Target == nil { + m.Target = &ObjectReference{} } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.ReadOnly = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -23993,7 +27332,7 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { +func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24016,15 +27355,15 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CinderVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: CSIPersistentVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CinderVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CSIPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -24050,11 +27389,11 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.VolumeID = &s + m.Driver = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FsType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeHandle", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -24080,7 +27419,7 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.FsType = &s + m.VolumeHandle = &s iNdEx = postIndex case 3: if wireType != 0 { @@ -24125,7 +27464,7 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *ComponentCondition) Unmarshal(dAtA []byte) error { +func (m *Capabilities) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24148,15 +27487,15 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ComponentCondition: wiretype end group for non-group") + return fmt.Errorf("proto: Capabilities: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ComponentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Capabilities: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Add", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -24181,12 +27520,11 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Type = &s + m.Add = append(m.Add, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Drop", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -24211,12 +27549,62 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Status = &s + m.Drop = append(m.Drop, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 3: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CephFSPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CephFSPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -24241,12 +27629,11 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Message = &s + m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -24272,64 +27659,43 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Error = &s + m.Path = &s iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ComponentStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + postIndex := iNdEx + intStringLen + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ComponentStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ComponentStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + s := string(dAtA[iNdEx:postIndex]) + m.User = &s + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24339,28 +27705,25 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.SecretFile = &s iNdEx = postIndex - case 2: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24384,11 +27747,34 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Conditions = append(m.Conditions, &ComponentCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ReadOnly = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -24411,7 +27797,7 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { +func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24434,17 +27820,17 @@ func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ComponentStatusList: wiretype end group for non-group") + return fmt.Errorf("proto: CephFSVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ComponentStatusList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CephFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24454,28 +27840,114 @@ func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} + m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF } + s := string(dAtA[iNdEx:postIndex]) + m.Path = &s iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.User = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.SecretFile = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24499,11 +27971,34 @@ func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, &ComponentStatus{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ReadOnly = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -24526,7 +28021,7 @@ func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { } return nil } -func (m *ConfigMap) Unmarshal(dAtA []byte) error { +func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24549,17 +28044,17 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ConfigMap: wiretype end group for non-group") + return fmt.Errorf("proto: CinderVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMap: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CinderVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24569,30 +28064,27 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.VolumeID = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FsType", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24602,34 +28094,27 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + s := string(dAtA[iNdEx:postIndex]) + m.FsType = &s + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } - var stringLenmapkey uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24639,71 +28124,13 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Data == nil { - m.Data = make(map[string]string) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Data[mapkey] = mapvalue - } else { - var mapvalue string - m.Data[mapkey] = mapvalue - } - iNdEx = postIndex + b := bool(v != 0) + m.ReadOnly = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -24726,7 +28153,7 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { } return nil } -func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { +func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24749,50 +28176,17 @@ func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ConfigMapEnvSource: wiretype end group for non-group") + return fmt.Errorf("proto: ClientIPConfig: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapEnvSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ClientIPConfig: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LocalObjectReference == nil { - m.LocalObjectReference = &LocalObjectReference{} - } - if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) } - var v int + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24802,13 +28196,12 @@ func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.Optional = &b + m.TimeoutSeconds = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -24831,7 +28224,7 @@ func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { +func (m *ComponentCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24854,17 +28247,17 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ConfigMapKeySelector: wiretype end group for non-group") + return fmt.Errorf("proto: ComponentCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapKeySelector: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ComponentCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24874,28 +28267,25 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.LocalObjectReference == nil { - m.LocalObjectReference = &LocalObjectReference{} - } - if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.Type = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -24921,13 +28311,13 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Key = &s + m.Status = &s iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24937,13 +28327,52 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.Optional = &b + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Message = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Error = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -24966,7 +28395,7 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { } return nil } -func (m *ConfigMapList) Unmarshal(dAtA []byte) error { +func (m *ComponentStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24989,10 +28418,10 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ConfigMapList: wiretype end group for non-group") + return fmt.Errorf("proto: ComponentStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ComponentStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -25022,7 +28451,7 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -25030,7 +28459,7 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -25054,8 +28483,8 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, &ConfigMap{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Conditions = append(m.Conditions, &ComponentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -25081,7 +28510,7 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error { } return nil } -func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { +func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -25104,15 +28533,15 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ConfigMapProjection: wiretype end group for non-group") + return fmt.Errorf("proto: ComponentStatusList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapProjection: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ComponentStatusList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -25136,10 +28565,10 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.LocalObjectReference == nil { - m.LocalObjectReference = &LocalObjectReference{} + if m.Metadata == nil { + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} } - if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -25169,32 +28598,11 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, &KeyToPath{}) + m.Items = append(m.Items, &ComponentStatus{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Optional = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -25217,7 +28625,7 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { } return nil } -func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ConfigMap) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -25240,15 +28648,15 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ConfigMapVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ConfigMap: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfigMap: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -25272,16 +28680,16 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.LocalObjectReference == nil { - m.LocalObjectReference = &LocalObjectReference{} + if m.Metadata == nil { + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } - if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -25305,52 +28713,98 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, &KeyToPath{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.DefaultMode = &v - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + if m.Data == nil { + m.Data = make(map[string]string) } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - b := bool(v != 0) - m.Optional = &b + m.Data[mapkey] = mapvalue + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -25373,7 +28827,7 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *Container) Unmarshal(dAtA []byte) error { +func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -25396,17 +28850,17 @@ func (m *Container) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Container: wiretype end group for non-group") + return fmt.Errorf("proto: ConfigMapEnvSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Container: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfigMapEnvSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -25416,27 +28870,30 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Name = &s + if m.LocalObjectReference == nil { + m.LocalObjectReference = &LocalObjectReference{} + } + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -25446,27 +28903,69 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Image = &s - iNdEx = postIndex - case 3: + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapKeySelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapKeySelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -25476,24 +28975,28 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + if m.LocalObjectReference == nil { + m.LocalObjectReference = &LocalObjectReference{} + } + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -25518,13 +29021,14 @@ func (m *Container) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + s := string(dAtA[iNdEx:postIndex]) + m.Key = &s iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WorkingDir", wireType) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -25534,25 +29038,67 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.WorkingDir = &s - iNdEx = postIndex - case 6: + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -25576,14 +29122,16 @@ func (m *Container) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ports = append(m.Ports, &ContainerPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Metadata == nil { + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 7: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -25607,14 +29155,65 @@ func (m *Container) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Env = append(m.Env, &EnvVar{}) - if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, &ConfigMap{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 8: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapProjection: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapProjection: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -25638,16 +29237,16 @@ func (m *Container) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Resources == nil { - m.Resources = &ResourceRequirements{} + if m.LocalObjectReference == nil { + m.LocalObjectReference = &LocalObjectReference{} } - if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 9: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -25671,16 +29270,16 @@ func (m *Container) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeMounts = append(m.VolumeMounts, &VolumeMount{}) - if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, &KeyToPath{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LivenessProbe", wireType) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -25690,28 +29289,67 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.LivenessProbe == nil { - m.LivenessProbe = &Probe{} + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if err := m.LivenessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if iNdEx >= l { + return io.ErrUnexpectedEOF } - iNdEx = postIndex - case 11: + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadinessProbe", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -25735,16 +29373,16 @@ func (m *Container) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ReadinessProbe == nil { - m.ReadinessProbe = &Probe{} + if m.LocalObjectReference == nil { + m.LocalObjectReference = &LocalObjectReference{} } - if err := m.ReadinessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 12: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Lifecycle", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -25768,18 +29406,16 @@ func (m *Container) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Lifecycle == nil { - m.Lifecycle = &Lifecycle{} - } - if err := m.Lifecycle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, &KeyToPath{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePath", wireType) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) } - var stringLen uint64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -25789,27 +29425,17 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.TerminationMessagePath = &s - iNdEx = postIndex - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImagePullPolicy", wireType) + m.DefaultMode = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -25819,27 +29445,502 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.ImagePullPolicy = &s - iNdEx = postIndex - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) - } - var msglen int + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Container) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Container: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Container: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Name = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Image = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkingDir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.WorkingDir = &s + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, &ContainerPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, &EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resources == nil { + m.Resources = &ResourceRequirements{} + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeMounts = append(m.VolumeMounts, &VolumeMount{}) + if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LivenessProbe", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LivenessProbe == nil { + m.LivenessProbe = &Probe{} + } + if err := m.LivenessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadinessProbe", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ReadinessProbe == nil { + m.ReadinessProbe = &Probe{} + } + if err := m.ReadinessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Lifecycle", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Lifecycle == nil { + m.Lifecycle = &Lifecycle{} + } + if err := m.Lifecycle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.TerminationMessagePath = &s + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ImagePullPolicy = &s + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -25992,6 +30093,37 @@ func (m *Container) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.TerminationMessagePolicy = &s iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeDevices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeDevices = append(m.VolumeDevices, &VolumeDevice{}) + if err := m.VolumeDevices[len(m.VolumeDevices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -26501,7 +30633,7 @@ func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.StartedAt == nil { - m.StartedAt = &k8s_io_kubernetes_pkg_apis_meta_v1.Time{} + m.StartedAt = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -26685,7 +30817,7 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.StartedAt == nil { - m.StartedAt = &k8s_io_kubernetes_pkg_apis_meta_v1.Time{} + m.StartedAt = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -26718,7 +30850,7 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.FinishedAt == nil { - m.FinishedAt = &k8s_io_kubernetes_pkg_apis_meta_v1.Time{} + m.FinishedAt = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.FinishedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -27801,6 +31933,39 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.Medium = &s iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SizeLimit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SizeLimit == nil { + m.SizeLimit = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + } + if err := m.SizeLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -28328,7 +32493,7 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -28443,7 +32608,7 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -29032,7 +33197,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -29191,7 +33356,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.FirstTimestamp == nil { - m.FirstTimestamp = &k8s_io_kubernetes_pkg_apis_meta_v1.Time{} + m.FirstTimestamp = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.FirstTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -29224,7 +33389,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.LastTimestamp == nil { - m.LastTimestamp = &k8s_io_kubernetes_pkg_apis_meta_v1.Time{} + m.LastTimestamp = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.LastTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -29280,60 +33445,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.Type = &s iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EventList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EventList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EventList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EventTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -29357,16 +33471,16 @@ func (m *EventList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} + if m.EventTime == nil { + m.EventTime = &k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime{} } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.EventTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Series", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -29390,65 +33504,16 @@ func (m *EventList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, &Event{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if m.Series == nil { + m.Series = &EventSeries{} } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { + if err := m.Series.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EventSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EventSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EventSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Component", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -29474,13 +33539,13 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Component = &s + m.Action = &s iNdEx = postIndex - case 2: + case 13: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Related", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -29490,76 +33555,28 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Host = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExecAction) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if m.Related == nil { + m.Related = &ObjectReference{} } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + if err := m.Related.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExecAction: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExecAction: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 14: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReportingComponent", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -29584,111 +33601,12 @@ func (m *ExecAction) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + s := string(dAtA[iNdEx:postIndex]) + m.ReportingComponent = &s iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FCVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FCVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 15: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetWWNs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TargetWWNs = append(m.TargetWWNs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Lun = &v - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FsType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReportingInstance", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -29714,29 +33632,8 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.FsType = &s + m.ReportingInstance = &s iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.ReadOnly = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -29759,7 +33656,7 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { +func (m *EventList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -29782,75 +33679,15 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: FlexVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: EventList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: FlexVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EventList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Driver = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FsType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.FsType = &s - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -29874,37 +33711,16 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecretRef == nil { - m.SecretRef = &LocalObjectReference{} + if m.Metadata == nil { + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.ReadOnly = &b - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -29928,94 +33744,9 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Options == nil { - m.Options = make(map[string]string) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Options[mapkey] = mapvalue - } else { - var mapvalue string - m.Options[mapkey] = mapvalue + m.Items = append(m.Items, &Event{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex default: @@ -30040,7 +33771,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { +func (m *EventSeries) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30063,17 +33794,37 @@ func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: FlockerVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: EventSeries: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: FlockerVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EventSeries: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Count = &v + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DatasetName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastObservedTime", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30083,25 +33834,28 @@ func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.DatasetName = &s + if m.LastObservedTime == nil { + m.LastObservedTime = &k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime{} + } + if err := m.LastObservedTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DatasetUUID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -30127,7 +33881,7 @@ func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.DatasetUUID = &s + m.State = &s iNdEx = postIndex default: iNdEx = preIndex @@ -30151,7 +33905,7 @@ func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { +func (m *EventSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30174,15 +33928,15 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GCEPersistentDiskVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: EventSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GCEPersistentDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EventSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PdName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Component", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -30208,11 +33962,11 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.PdName = &s + m.Component = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FsType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -30238,49 +33992,8 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.FsType = &s + m.Host = &s iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Partition = &v - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.ReadOnly = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -30303,7 +34016,7 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ExecAction) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30326,75 +34039,15 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GitRepoVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ExecAction: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GitRepoVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExecAction: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Repository", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Repository = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Revision = &s - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Directory", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -30419,8 +34072,7 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Directory = &s + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -30444,7 +34096,7 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { +func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30467,15 +34119,15 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GlusterfsVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: FCVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GlusterfsVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: FCVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Endpoints", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetWWNs", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -30500,12 +34152,31 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Endpoints = &s + m.TargetWWNs = append(m.TargetWWNs, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Lun = &v + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FsType", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -30531,9 +34202,9 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Path = &s + m.FsType = &s iNdEx = postIndex - case 3: + case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } @@ -30554,6 +34225,35 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { } b := bool(v != 0) m.ReadOnly = &b + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Wwids", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Wwids = append(m.Wwids, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -30576,7 +34276,7 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { +func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30599,15 +34299,15 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HTTPGetAction: wiretype end group for non-group") + return fmt.Errorf("proto: FlexVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPGetAction: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: FlexVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -30633,13 +34333,13 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Path = &s + m.Driver = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FsType", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30649,30 +34349,27 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Port == nil { - m.Port = &k8s_io_kubernetes_pkg_util_intstr.IntOrString{} - } - if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.FsType = &s iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30682,27 +34379,30 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Host = &s - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scheme", wireType) + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} } - var stringLen uint64 + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30712,25 +34412,16 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Scheme = &s - iNdEx = postIndex + b := bool(v != 0) + m.ReadOnly = &b case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HttpHeaders", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -30754,10 +34445,97 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.HttpHeaders = append(m.HttpHeaders, &HTTPHeader{}) - if err := m.HttpHeaders[len(m.HttpHeaders)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if m.Options == nil { + m.Options = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.Options[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -30781,7 +34559,7 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { } return nil } -func (m *HTTPHeader) Unmarshal(dAtA []byte) error { +func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30804,15 +34582,15 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HTTPHeader: wiretype end group for non-group") + return fmt.Errorf("proto: FlockerVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPHeader: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: FlockerVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DatasetName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -30838,11 +34616,11 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Name = &s + m.DatasetName = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DatasetUUID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -30868,7 +34646,7 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Value = &s + m.DatasetUUID = &s iNdEx = postIndex default: iNdEx = preIndex @@ -30892,7 +34670,7 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { } return nil } -func (m *Handler) Unmarshal(dAtA []byte) error { +func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30915,17 +34693,17 @@ func (m *Handler) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Handler: wiretype end group for non-group") + return fmt.Errorf("proto: GCEPersistentDiskVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Handler: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GCEPersistentDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Exec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PdName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30935,30 +34713,27 @@ func (m *Handler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Exec == nil { - m.Exec = &ExecAction{} - } - if err := m.Exec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.PdName = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HttpGet", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FsType", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30968,30 +34743,27 @@ func (m *Handler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.HttpGet == nil { - m.HttpGet = &HTTPGetAction{} - } - if err := m.HttpGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.FsType = &s iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TcpSocket", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) } - var msglen int + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31001,25 +34773,33 @@ func (m *Handler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TcpSocket == nil { - m.TcpSocket = &TCPSocketAction{} + m.Partition = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } - if err := m.TcpSocket.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex + b := bool(v != 0) + m.ReadOnly = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -31042,7 +34822,7 @@ func (m *Handler) Unmarshal(dAtA []byte) error { } return nil } -func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { +func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -31065,15 +34845,15 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HostPathVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: GitRepoVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HostPathVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GitRepoVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Repository", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -31099,7 +34879,67 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Path = &s + m.Repository = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Revision = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Directory", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Directory = &s iNdEx = postIndex default: iNdEx = preIndex @@ -31123,7 +34963,7 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { +func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -31146,15 +34986,15 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ISCSIVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: GlusterfsVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ISCSIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GlusterfsVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetPortal", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Endpoints", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -31180,11 +35020,11 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.TargetPortal = &s + m.Endpoints = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Iqn", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -31210,13 +35050,13 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Iqn = &s + m.Path = &s iNdEx = postIndex case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } - var v int32 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31226,15 +35066,67 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - m.Lun = &v - case 4: + b := bool(v != 0) + m.ReadOnly = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPGetAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPGetAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IscsiInterface", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -31260,11 +35152,44 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.IscsiInterface = &s + m.Path = &s iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FsType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Port == nil { + m.Port = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -31290,13 +35215,13 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.FsType = &s + m.Host = &s iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scheme", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31306,18 +35231,27 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.ReadOnly = &b - case 7: + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Scheme = &s + iNdEx = postIndex + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Portals", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HttpHeaders", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31327,20 +35261,22 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Portals = append(m.Portals, string(dAtA[iNdEx:postIndex])) + m.HttpHeaders = append(m.HttpHeaders, &HTTPHeader{}) + if err := m.HttpHeaders[len(m.HttpHeaders)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -31364,7 +35300,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *KeyToPath) Unmarshal(dAtA []byte) error { +func (m *HTTPHeader) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -31387,15 +35323,15 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: KeyToPath: wiretype end group for non-group") + return fmt.Errorf("proto: HTTPHeader: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: KeyToPath: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: HTTPHeader: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -31421,11 +35357,11 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Key = &s + m.Name = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -31451,28 +35387,8 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Path = &s + m.Value = &s iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Mode = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -31495,7 +35411,7 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { } return nil } -func (m *Lifecycle) Unmarshal(dAtA []byte) error { +func (m *Handler) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -31518,15 +35434,15 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Lifecycle: wiretype end group for non-group") + return fmt.Errorf("proto: Handler: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Lifecycle: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Handler: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PostStart", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Exec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -31550,16 +35466,16 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.PostStart == nil { - m.PostStart = &Handler{} + if m.Exec == nil { + m.Exec = &ExecAction{} } - if err := m.PostStart.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Exec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreStop", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HttpGet", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -31583,10 +35499,43 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.PreStop == nil { - m.PreStop = &Handler{} + if m.HttpGet == nil { + m.HttpGet = &HTTPGetAction{} } - if err := m.PreStop.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.HttpGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TcpSocket", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TcpSocket == nil { + m.TcpSocket = &TCPSocketAction{} + } + if err := m.TcpSocket.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -31612,7 +35561,7 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { } return nil } -func (m *LimitRange) Unmarshal(dAtA []byte) error { +func (m *HostAlias) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -31635,17 +35584,17 @@ func (m *LimitRange) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LimitRange: wiretype end group for non-group") + return fmt.Errorf("proto: HostAlias: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LimitRange: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: HostAlias: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ip", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31655,30 +35604,27 @@ func (m *LimitRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.Ip = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hostnames", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31688,24 +35634,20 @@ func (m *LimitRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Spec == nil { - m.Spec = &LimitRangeSpec{} - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Hostnames = append(m.Hostnames, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -31729,7 +35671,7 @@ func (m *LimitRange) Unmarshal(dAtA []byte) error { } return nil } -func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { +func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -31752,15 +35694,15 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LimitRangeItem: wiretype end group for non-group") + return fmt.Errorf("proto: HostPathVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LimitRangeItem: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: HostPathVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -31786,13 +35728,13 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Type = &s + m.Path = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31802,34 +35744,78 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + s := string(dAtA[iNdEx:postIndex]) + m.Type = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ISCSIPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ISCSIPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetPortal", wireType) } - var stringLenmapkey uint64 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31839,81 +35825,27 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + postIndex := iNdEx + intStringLen + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Max == nil { - m.Max = make(map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Max[mapkey] = mapvalue - } else { - var mapvalue *k8s_io_kubernetes_pkg_api_resource.Quantity - m.Max[mapkey] = mapvalue - } + s := string(dAtA[iNdEx:postIndex]) + m.TargetPortal = &s iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Iqn", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31923,19 +35855,27 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 + s := string(dAtA[iNdEx:postIndex]) + m.Iqn = &s + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType) + } + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31945,12 +35885,17 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapkey uint64 + m.Lun = &v + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IscsiInterface", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31960,81 +35905,27 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + postIndex := iNdEx + intStringLen + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Min == nil { - m.Min = make(map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Min[mapkey] = mapvalue - } else { - var mapvalue *k8s_io_kubernetes_pkg_api_resource.Quantity - m.Min[mapkey] = mapvalue - } + s := string(dAtA[iNdEx:postIndex]) + m.IscsiInterface = &s iNdEx = postIndex - case 4: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FsType", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32044,34 +35935,27 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + s := string(dAtA[iNdEx:postIndex]) + m.FsType = &s + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } - var stringLenmapkey uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32081,81 +35965,18 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Default == nil { - m.Default = make(map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Default[mapkey] = mapvalue - } else { - var mapvalue *k8s_io_kubernetes_pkg_api_resource.Quantity - m.Default[mapkey] = mapvalue - } - iNdEx = postIndex - case 5: + b := bool(v != 0) + m.ReadOnly = &b + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultRequest", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Portals", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32165,34 +35986,26 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + m.Portals = append(m.Portals, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ChapAuthDiscovery", wireType) } - var stringLenmapkey uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32202,79 +36015,16 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.DefaultRequest == nil { - m.DefaultRequest = make(map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.DefaultRequest[mapkey] = mapvalue - } else { - var mapvalue *k8s_io_kubernetes_pkg_api_resource.Quantity - m.DefaultRequest[mapkey] = mapvalue - } - iNdEx = postIndex - case 6: + b := bool(v != 0) + m.ChapAuthDiscovery = &b + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxLimitRequestRatio", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -32298,7 +36048,18 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ChapAuthSession", wireType) + } + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32308,12 +36069,18 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapkey uint64 + b := bool(v != 0) + m.ChapAuthSession = &b + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitiatorName", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32323,75 +36090,21 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + postIndex := iNdEx + intStringLen + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.MaxLimitRequestRatio == nil { - m.MaxLimitRequestRatio = make(map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.MaxLimitRequestRatio[mapkey] = mapvalue - } else { - var mapvalue *k8s_io_kubernetes_pkg_api_resource.Quantity - m.MaxLimitRequestRatio[mapkey] = mapvalue - } + s := string(dAtA[iNdEx:postIndex]) + m.InitiatorName = &s iNdEx = postIndex default: iNdEx = preIndex @@ -32415,7 +36128,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } return nil } -func (m *LimitRangeList) Unmarshal(dAtA []byte) error { +func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -32438,17 +36151,17 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LimitRangeList: wiretype end group for non-group") + return fmt.Errorf("proto: ISCSIVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LimitRangeList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ISCSIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetPortal", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32458,30 +36171,27 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.TargetPortal = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Iqn", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32491,79 +36201,27 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, &LimitRange{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.Iqn = &s iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LimitRangeSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LimitRangeSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType) } - var msglen int + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32573,79 +36231,17 @@ func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Limits = append(m.Limits, &LimitRangeItem{}) - if err := m.Limits[len(m.Limits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *List) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: List: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: List: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Lun = &v + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IscsiInterface", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32655,30 +36251,27 @@ func (m *List) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.IscsiInterface = &s iNdEx = postIndex - case 2: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FsType", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32688,79 +36281,27 @@ func (m *List) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, &k8s_io_kubernetes_pkg_runtime.RawExtension{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.FsType = &s iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32770,25 +36311,16 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.LabelSelector = &s - iNdEx = postIndex - case 2: + b := bool(v != 0) + m.ReadOnly = &b + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Portals", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -32813,12 +36345,11 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.FieldSelector = &s + m.Portals = append(m.Portals, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 3: + case 8: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Watch", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ChapAuthDiscovery", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -32836,12 +36367,12 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } } b := bool(v != 0) - m.Watch = &b - case 4: + m.ChapAuthDiscovery = &b + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32851,27 +36382,30 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.ResourceVersion = &s + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: + case 11: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ChapAuthSession", wireType) } - var v int64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32881,12 +36415,43 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - m.TimeoutSeconds = &v + b := bool(v != 0) + m.ChapAuthSession = &b + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitiatorName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.InitiatorName = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -32909,7 +36474,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { +func (m *KeyToPath) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -32932,15 +36497,15 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LoadBalancerIngress: wiretype end group for non-group") + return fmt.Errorf("proto: KeyToPath: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: KeyToPath: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ip", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -32966,11 +36531,11 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Ip = &s + m.Key = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -32996,8 +36561,28 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Hostname = &s + m.Path = &s iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Mode = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -33020,7 +36605,7 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { } return nil } -func (m *LoadBalancerStatus) Unmarshal(dAtA []byte) error { +func (m *Lifecycle) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33043,15 +36628,15 @@ func (m *LoadBalancerStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LoadBalancerStatus: wiretype end group for non-group") + return fmt.Errorf("proto: Lifecycle: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Lifecycle: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PostStart", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -33075,8 +36660,43 @@ func (m *LoadBalancerStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ingress = append(m.Ingress, &LoadBalancerIngress{}) - if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.PostStart == nil { + m.PostStart = &Handler{} + } + if err := m.PostStart.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreStop", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PreStop == nil { + m.PreStop = &Handler{} + } + if err := m.PreStop.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -33102,7 +36722,7 @@ func (m *LoadBalancerStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *LocalObjectReference) Unmarshal(dAtA []byte) error { +func (m *LimitRange) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33125,17 +36745,17 @@ func (m *LocalObjectReference) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LocalObjectReference: wiretype end group for non-group") + return fmt.Errorf("proto: LimitRange: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LocalObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LimitRange: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33145,21 +36765,57 @@ func (m *LocalObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Name = &s + if m.Metadata == nil { + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &LimitRangeSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -33183,7 +36839,7 @@ func (m *LocalObjectReference) Unmarshal(dAtA []byte) error { } return nil } -func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { +func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33206,15 +36862,15 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NFSVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: LimitRangeItem: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LimitRangeItem: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Server", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -33240,13 +36896,13 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Server = &s + m.Type = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33256,97 +36912,241 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Path = &s - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + if m.Max == nil { + m.Max = make(map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity) } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + var mapkey string + var mapvalue *k8s_io_apimachinery_pkg_api_resource.Quantity + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Max[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.ReadOnly = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + postIndex := iNdEx + msglen + if postIndex > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Namespace) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if m.Min == nil { + m.Min = make(map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity) } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + var mapkey string + var mapvalue *k8s_io_apimachinery_pkg_api_resource.Quantity + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Namespace: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Namespace: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Min[mapkey] = mapvalue + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -33370,16 +37170,106 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + if m.Default == nil { + m.Default = make(map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity) } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var mapkey string + var mapvalue *k8s_io_apimachinery_pkg_api_resource.Quantity + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.Default[mapkey] = mapvalue iNdEx = postIndex - case 2: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DefaultRequest", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -33403,16 +37293,106 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Spec == nil { - m.Spec = &NamespaceSpec{} + if m.DefaultRequest == nil { + m.DefaultRequest = make(map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity) } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var mapkey string + var mapvalue *k8s_io_apimachinery_pkg_api_resource.Quantity + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.DefaultRequest[mapkey] = mapvalue iNdEx = postIndex - case 3: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxLimitRequestRatio", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -33436,12 +37416,102 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Status == nil { - m.Status = &NamespaceStatus{} + if m.MaxLimitRequestRatio == nil { + m.MaxLimitRequestRatio = make(map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity) } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var mapkey string + var mapvalue *k8s_io_apimachinery_pkg_api_resource.Quantity + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.MaxLimitRequestRatio[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -33465,7 +37535,7 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { } return nil } -func (m *NamespaceList) Unmarshal(dAtA []byte) error { +func (m *LimitRangeList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33488,10 +37558,10 @@ func (m *NamespaceList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NamespaceList: wiretype end group for non-group") + return fmt.Errorf("proto: LimitRangeList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NamespaceList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LimitRangeList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -33521,7 +37591,7 @@ func (m *NamespaceList) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -33553,7 +37623,7 @@ func (m *NamespaceList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, &Namespace{}) + m.Items = append(m.Items, &LimitRange{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -33580,7 +37650,7 @@ func (m *NamespaceList) Unmarshal(dAtA []byte) error { } return nil } -func (m *NamespaceSpec) Unmarshal(dAtA []byte) error { +func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33603,17 +37673,17 @@ func (m *NamespaceSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NamespaceSpec: wiretype end group for non-group") + return fmt.Errorf("proto: LimitRangeSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NamespaceSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LimitRangeSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33623,20 +37693,22 @@ func (m *NamespaceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Finalizers = append(m.Finalizers, string(dAtA[iNdEx:postIndex])) + m.Limits = append(m.Limits, &LimitRangeItem{}) + if err := m.Limits[len(m.Limits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -33660,7 +37732,7 @@ func (m *NamespaceSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *NamespaceStatus) Unmarshal(dAtA []byte) error { +func (m *List) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33683,17 +37755,17 @@ func (m *NamespaceStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NamespaceStatus: wiretype end group for non-group") + return fmt.Errorf("proto: List: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NamespaceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: List: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33703,21 +37775,55 @@ func (m *NamespaceStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Phase = &s + if m.Metadata == nil { + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, &k8s_io_apimachinery_pkg_runtime.RawExtension{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -33741,7 +37847,7 @@ func (m *NamespaceStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *Node) Unmarshal(dAtA []byte) error { +func (m *ListOptions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33764,17 +37870,17 @@ func (m *Node) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Node: wiretype end group for non-group") + return fmt.Errorf("proto: ListOptions: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Node: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ListOptions: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33784,30 +37890,27 @@ func (m *Node) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.LabelSelector = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FieldSelector", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33817,30 +37920,48 @@ func (m *Node) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Spec == nil { - m.Spec = &NodeSpec{} - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.FieldSelector = &s iNdEx = postIndex case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Watch", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Watch = &b + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33850,25 +37971,63 @@ func (m *Node) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Status == nil { - m.Status = &NodeStatus{} + s := string(dAtA[iNdEx:postIndex]) + m.ResourceVersion = &s + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex + m.TimeoutSeconds = &v + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeUninitialized", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.IncludeUninitialized = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -33891,7 +38050,7 @@ func (m *Node) Unmarshal(dAtA []byte) error { } return nil } -func (m *NodeAddress) Unmarshal(dAtA []byte) error { +func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33914,15 +38073,15 @@ func (m *NodeAddress) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NodeAddress: wiretype end group for non-group") + return fmt.Errorf("proto: LoadBalancerIngress: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NodeAddress: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ip", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -33948,11 +38107,11 @@ func (m *NodeAddress) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Type = &s + m.Ip = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -33978,7 +38137,7 @@ func (m *NodeAddress) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Address = &s + m.Hostname = &s iNdEx = postIndex default: iNdEx = preIndex @@ -34002,7 +38161,7 @@ func (m *NodeAddress) Unmarshal(dAtA []byte) error { } return nil } -func (m *NodeAffinity) Unmarshal(dAtA []byte) error { +func (m *LoadBalancerStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -34025,48 +38184,15 @@ func (m *NodeAffinity) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NodeAffinity: wiretype end group for non-group") + return fmt.Errorf("proto: LoadBalancerStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NodeAffinity: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RequiredDuringSchedulingIgnoredDuringExecution == nil { - m.RequiredDuringSchedulingIgnoredDuringExecution = &NodeSelector{} - } - if err := m.RequiredDuringSchedulingIgnoredDuringExecution.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -34090,8 +38216,8 @@ func (m *NodeAffinity) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, &PreferredSchedulingTerm{}) - if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ingress = append(m.Ingress, &LoadBalancerIngress{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -34117,7 +38243,7 @@ func (m *NodeAffinity) Unmarshal(dAtA []byte) error { } return nil } -func (m *NodeCondition) Unmarshal(dAtA []byte) error { +func (m *LocalObjectReference) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -34140,15 +38266,15 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NodeCondition: wiretype end group for non-group") + return fmt.Errorf("proto: LocalObjectReference: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NodeCondition: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LocalObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -34174,43 +38300,64 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Type = &s + m.Name = &s iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { + if skippy < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Status = &s - iNdEx = postIndex - case 3: + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LocalVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LocalVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastHeartbeatTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -34220,30 +38367,78 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.LastHeartbeatTime == nil { - m.LastHeartbeatTime = &k8s_io_kubernetes_pkg_apis_meta_v1.Time{} - } - if err := m.LastHeartbeatTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + s := string(dAtA[iNdEx:postIndex]) + m.Path = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { return err } - iNdEx = postIndex - case 4: + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NFSVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Server", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -34253,28 +38448,25 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.LastTransitionTime == nil { - m.LastTransitionTime = &k8s_io_kubernetes_pkg_apis_meta_v1.Time{} - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.Server = &s iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -34300,13 +38492,13 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Reason = &s + m.Path = &s iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -34316,22 +38508,13 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Message = &s - iNdEx = postIndex + b := bool(v != 0) + m.ReadOnly = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -34354,7 +38537,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } return nil } -func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error { +func (m *Namespace) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -34377,15 +38560,15 @@ func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NodeDaemonEndpoints: wiretype end group for non-group") + return fmt.Errorf("proto: Namespace: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NodeDaemonEndpoints: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Namespace: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KubeletEndpoint", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -34409,10 +38592,76 @@ func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.KubeletEndpoint == nil { - m.KubeletEndpoint = &DaemonEndpoint{} + if m.Metadata == nil { + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } - if err := m.KubeletEndpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &NamespaceSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &NamespaceStatus{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -34438,7 +38687,7 @@ func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error { } return nil } -func (m *NodeList) Unmarshal(dAtA []byte) error { +func (m *NamespaceList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -34461,10 +38710,10 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NodeList: wiretype end group for non-group") + return fmt.Errorf("proto: NamespaceList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NodeList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NamespaceList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -34494,7 +38743,7 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -34526,7 +38775,7 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, &Node{}) + m.Items = append(m.Items, &Namespace{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -34553,7 +38802,7 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { } return nil } -func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { +func (m *NamespaceSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -34576,15 +38825,15 @@ func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NodeProxyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: NamespaceSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NodeProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NamespaceSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -34609,8 +38858,7 @@ func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Path = &s + m.Finalizers = append(m.Finalizers, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -34634,7 +38882,7 @@ func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *NodeResources) Unmarshal(dAtA []byte) error { +func (m *NamespaceStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -34657,17 +38905,17 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NodeResources: wiretype end group for non-group") + return fmt.Errorf("proto: NamespaceStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NodeResources: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NamespaceStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -34677,112 +38925,21 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Capacity == nil { - m.Capacity = make(map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Capacity[mapkey] = mapvalue - } else { - var mapvalue *k8s_io_kubernetes_pkg_api_resource.Quantity - m.Capacity[mapkey] = mapvalue - } + s := string(dAtA[iNdEx:postIndex]) + m.Phase = &s iNdEx = postIndex default: iNdEx = preIndex @@ -34806,7 +38963,7 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { } return nil } -func (m *NodeSelector) Unmarshal(dAtA []byte) error { +func (m *Node) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -34829,15 +38986,15 @@ func (m *NodeSelector) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NodeSelector: wiretype end group for non-group") + return fmt.Errorf("proto: Node: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NodeSelector: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Node: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeSelectorTerms", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -34861,8 +39018,76 @@ func (m *NodeSelector) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.NodeSelectorTerms = append(m.NodeSelectorTerms, &NodeSelectorTerm{}) - if err := m.NodeSelectorTerms[len(m.NodeSelectorTerms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Metadata == nil { + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &NodeSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &NodeStatus{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -34888,7 +39113,7 @@ func (m *NodeSelector) Unmarshal(dAtA []byte) error { } return nil } -func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { +func (m *NodeAddress) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -34911,15 +39136,15 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NodeSelectorRequirement: wiretype end group for non-group") + return fmt.Errorf("proto: NodeAddress: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NodeSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NodeAddress: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -34945,11 +39170,11 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Key = &s + m.Type = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -34975,36 +39200,7 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Operator = &s - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) + m.Address = &s iNdEx = postIndex default: iNdEx = preIndex @@ -35028,7 +39224,7 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { } return nil } -func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { +func (m *NodeAffinity) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -35051,15 +39247,15 @@ func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NodeSelectorTerm: wiretype end group for non-group") + return fmt.Errorf("proto: NodeAffinity: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NodeSelectorTerm: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NodeAffinity: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -35083,21 +39279,54 @@ func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.MatchExpressions = append(m.MatchExpressions, &NodeSelectorRequirement{}) - if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.RequiredDuringSchedulingIgnoredDuringExecution == nil { + m.RequiredDuringSchedulingIgnoredDuringExecution = &NodeSelector{} + } + if err := m.RequiredDuringSchedulingIgnoredDuringExecution.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType) } - if skippy < 0 { + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, &PreferredSchedulingTerm{}) + if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) @@ -35110,7 +39339,7 @@ func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { } return nil } -func (m *NodeSpec) Unmarshal(dAtA []byte) error { +func (m *NodeCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -35133,15 +39362,15 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NodeSpec: wiretype end group for non-group") + return fmt.Errorf("proto: NodeCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NodeSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NodeCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodCIDR", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -35167,11 +39396,11 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.PodCIDR = &s + m.Type = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -35197,11 +39426,77 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.ExternalID = &s + m.Status = &s iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProviderID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastHeartbeatTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastHeartbeatTime == nil { + m.LastHeartbeatTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + } + if err := m.LastHeartbeatTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastTransitionTime == nil { + m.LastTransitionTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -35227,13 +39522,13 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.ProviderID = &s + m.Reason = &s iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Unschedulable", wireType) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -35243,16 +39538,76 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.Unschedulable = &b - case 5: + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Message = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeConfigSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeConfigSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeConfigSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Taints", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -35276,8 +39631,10 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Taints = append(m.Taints, &Taint{}) - if err := m.Taints[len(m.Taints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ConfigMapRef == nil { + m.ConfigMapRef = &ObjectReference{} + } + if err := m.ConfigMapRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -35303,7 +39660,765 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *NodeStatus) Unmarshal(dAtA []byte) error { +func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeDaemonEndpoints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeDaemonEndpoints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubeletEndpoint", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.KubeletEndpoint == nil { + m.KubeletEndpoint = &DaemonEndpoint{} + } + if err := m.KubeletEndpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, &Node{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeProxyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Path = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Capacity == nil { + m.Capacity = make(map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity) + } + var mapkey string + var mapvalue *k8s_io_apimachinery_pkg_api_resource.Quantity + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Capacity[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelectorTerms", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeSelectorTerms = append(m.NodeSelectorTerms, &NodeSelectorTerm{}) + if err := m.NodeSelectorTerms[len(m.NodeSelectorTerms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSelectorRequirement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Key = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Operator = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSelectorTerm: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSelectorTerm: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchExpressions = append(m.MatchExpressions, &NodeSelectorRequirement{}) + if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -35326,17 +40441,17 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NodeStatus: wiretype end group for non-group") + return fmt.Errorf("proto: NodeSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NodeStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NodeSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PodCIDR", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -35346,19 +40461,27 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 + s := string(dAtA[iNdEx:postIndex]) + m.PodCIDR = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalID", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -35368,12 +40491,27 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapkey uint64 + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ExternalID = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProviderID", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -35383,79 +40521,46 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + postIndex := iNdEx + intStringLen + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Capacity == nil { - m.Capacity = make(map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity) + s := string(dAtA[iNdEx:postIndex]) + m.ProviderID = &s + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Unschedulable", wireType) } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx = postmsgIndex - m.Capacity[mapkey] = mapvalue - } else { - var mapvalue *k8s_io_kubernetes_pkg_api_resource.Quantity - m.Capacity[mapkey] = mapvalue } - iNdEx = postIndex - case 2: + b := bool(v != 0) + m.Unschedulable = &b + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Allocatable", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Taints", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -35479,7 +40584,16 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 + m.Taints = append(m.Taints, &Taint{}) + if err := m.Taints[len(m.Taints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigSource", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -35489,12 +40603,81 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapkey uint64 + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigSource == nil { + m.ConfigSource = &NodeConfigSource{} + } + if err := m.ConfigSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -35504,26 +40687,26 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + postIndex := iNdEx + msglen + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Allocatable == nil { - m.Allocatable = make(map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity) + if m.Capacity == nil { + m.Capacity = make(map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue *k8s_io_apimachinery_pkg_api_resource.Quantity + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -35533,12 +40716,120 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Capacity[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Allocatable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Allocatable == nil { + m.Allocatable = make(map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity) + } + var mapkey string + var mapvalue *k8s_io_apimachinery_pkg_api_resource.Quantity + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -35548,31 +40839,85 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - iNdEx = postmsgIndex - m.Allocatable[mapkey] = mapvalue - } else { - var mapvalue *k8s_io_kubernetes_pkg_api_resource.Quantity - m.Allocatable[mapkey] = mapvalue } + m.Allocatable[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -36563,7 +41908,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.CreationTimestamp == nil { - m.CreationTimestamp = &k8s_io_kubernetes_pkg_apis_meta_v1.Time{} + m.CreationTimestamp = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.CreationTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -36596,7 +41941,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.DeletionTimestamp == nil { - m.DeletionTimestamp = &k8s_io_kubernetes_pkg_apis_meta_v1.Time{} + m.DeletionTimestamp = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.DeletionTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -36648,51 +41993,14 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -36702,41 +42010,80 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex case 12: if wireType != 2 { @@ -36764,51 +42111,14 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Annotations == nil { m.Annotations = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -36818,41 +42128,80 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Annotations[mapkey] = mapvalue - } else { - var mapvalue string - m.Annotations[mapkey] = mapvalue } + m.Annotations[mapkey] = mapvalue iNdEx = postIndex case 13: if wireType != 2 { @@ -36880,7 +42229,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.OwnerReferences = append(m.OwnerReferences, &k8s_io_kubernetes_pkg_apis_meta_v1.OwnerReference{}) + m.OwnerReferences = append(m.OwnerReferences, &k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference{}) if err := m.OwnerReferences[len(m.OwnerReferences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -36944,6 +42293,39 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.ClusterName = &s iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Initializers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Initializers == nil { + m.Initializers = &k8s_io_apimachinery_pkg_apis_meta_v1.Initializers{} + } + if err := m.Initializers.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -37283,7 +42665,7 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -37433,7 +42815,7 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -37465,18 +42847,228 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Spec == nil { - m.Spec = &PersistentVolumeClaimSpec{} + if m.Spec == nil { + m.Spec = &PersistentVolumeClaimSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &PersistentVolumeClaimStatus{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeClaimCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeClaimCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Type = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Status = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastProbeTime == nil { + m.LastProbeTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + } + if err := m.LastProbeTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastTransitionTime == nil { + m.LastTransitionTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -37486,24 +43078,51 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Status == nil { - m.Status = &PersistentVolumeClaimStatus{} + s := string(dAtA[iNdEx:postIndex]) + m.Reason = &s + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF } + s := string(dAtA[iNdEx:postIndex]) + m.Message = &s iNdEx = postIndex default: iNdEx = preIndex @@ -37583,7 +43202,7 @@ func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -37790,7 +43409,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -37826,6 +43445,36 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.StorageClassName = &s iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.VolumeMode = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -37962,51 +43611,14 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Capacity == nil { - m.Capacity = make(map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity) + m.Capacity = make(map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue *k8s_io_apimachinery_pkg_api_resource.Quantity + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -38016,45 +43628,115 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated + } + m.Capacity[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx = postmsgIndex - m.Capacity[mapkey] = mapvalue - } else { - var mapvalue *k8s_io_kubernetes_pkg_api_resource.Quantity - m.Capacity[mapkey] = mapvalue + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, &PersistentVolumeClaimCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex default: @@ -38237,7 +43919,7 @@ func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -38517,7 +44199,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Rbd == nil { - m.Rbd = &RBDVolumeSource{} + m.Rbd = &RBDPersistentVolumeSource{} } if err := m.Rbd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -38550,7 +44232,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Iscsi == nil { - m.Iscsi = &ISCSIVolumeSource{} + m.Iscsi = &ISCSIPersistentVolumeSource{} } if err := m.Iscsi.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -38616,7 +44298,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Cephfs == nil { - m.Cephfs = &CephFSVolumeSource{} + m.Cephfs = &CephFSPersistentVolumeSource{} } if err := m.Cephfs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -38748,7 +44430,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.AzureFile == nil { - m.AzureFile = &AzureFileVolumeSource{} + m.AzureFile = &AzureFilePersistentVolumeSource{} } if err := m.AzureFile.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -38879,16 +44561,115 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.PhotonPersistentDisk == nil { - m.PhotonPersistentDisk = &PhotonPersistentDiskVolumeSource{} + if m.PhotonPersistentDisk == nil { + m.PhotonPersistentDisk = &PhotonPersistentDiskVolumeSource{} + } + if err := m.PhotonPersistentDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortworxVolume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PortworxVolume == nil { + m.PortworxVolume = &PortworxVolumeSource{} + } + if err := m.PortworxVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleIO", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ScaleIO == nil { + m.ScaleIO = &ScaleIOPersistentVolumeSource{} + } + if err := m.ScaleIO.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Local", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Local == nil { + m.Local = &LocalVolumeSource{} } - if err := m.PhotonPersistentDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Local.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 18: + case 21: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PortworxVolume", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Storageos", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -38912,16 +44693,16 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.PortworxVolume == nil { - m.PortworxVolume = &PortworxVolumeSource{} + if m.Storageos == nil { + m.Storageos = &StorageOSPersistentVolumeSource{} } - if err := m.PortworxVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Storageos.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 19: + case 22: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ScaleIO", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Csi", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -38945,10 +44726,10 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ScaleIO == nil { - m.ScaleIO = &ScaleIOVolumeSource{} + if m.Csi == nil { + m.Csi = &CSIPersistentVolumeSource{} } - if err := m.ScaleIO.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Csi.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -39029,51 +44810,14 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Capacity == nil { - m.Capacity = make(map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity) + m.Capacity = make(map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue *k8s_io_apimachinery_pkg_api_resource.Quantity + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -39083,46 +44827,85 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Capacity[mapkey] = mapvalue - } else { - var mapvalue *k8s_io_kubernetes_pkg_api_resource.Quantity - m.Capacity[mapkey] = mapvalue } + m.Capacity[mapkey] = mapvalue iNdEx = postIndex case 2: if wireType != 2 { @@ -39279,6 +45062,65 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.StorageClassName = &s iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MountOptions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MountOptions = append(m.MountOptions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.VolumeMode = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -39609,7 +45451,7 @@ func (m *Pod) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -39872,7 +45714,7 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.LabelSelector == nil { - m.LabelSelector = &k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector{} + m.LabelSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -40353,7 +46195,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.LastProbeTime == nil { - m.LastProbeTime = &k8s_io_kubernetes_pkg_apis_meta_v1.Time{} + m.LastProbeTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.LastProbeTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -40386,7 +46228,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.LastTransitionTime == nil { - m.LastTransitionTime = &k8s_io_kubernetes_pkg_apis_meta_v1.Time{} + m.LastTransitionTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -40474,6 +46316,257 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } return nil } +func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDNSConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDNSConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nameservers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Nameservers = append(m.Nameservers, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Searches", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Searches = append(m.Searches, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &PodDNSConfigOption{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDNSConfigOption: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDNSConfigOption: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Name = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Value = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *PodExecOptions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -40724,7 +46817,7 @@ func (m *PodList) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -40931,7 +47024,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.SinceTime == nil { - m.SinceTime = &k8s_io_kubernetes_pkg_apis_meta_v1.Time{} + m.SinceTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.SinceTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -41050,25 +47143,67 @@ func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + if wireType == 0 { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { - return io.ErrUnexpectedEOF + m.Ports = append(m.Ports, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break + if packedLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Ports = append(m.Ports, v) } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } - m.Ports = append(m.Ports, v) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -41276,25 +47411,67 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { b := bool(v != 0) m.RunAsNonRoot = &b case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + if wireType == 0 { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { - return io.ErrUnexpectedEOF + m.SupplementalGroups = append(m.SupplementalGroups, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break + if packedLen < 0 { + return ErrInvalidLengthGenerated } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.SupplementalGroups = append(m.SupplementalGroups, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) } - m.SupplementalGroups = append(m.SupplementalGroups, v) case 5: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field FsGroup", wireType) @@ -41393,7 +47570,7 @@ func (m *PodSignature) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.PodController == nil { - m.PodController = &k8s_io_kubernetes_pkg_apis_meta_v1.OwnerReference{} + m.PodController = &k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference{} } if err := m.PodController.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -41540,83 +47717,13 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.RestartPolicy = &s - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TerminationGracePeriodSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TerminationGracePeriodSeconds = &v - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ActiveDeadlineSeconds = &v - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DnsPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.DnsPolicy = &s + m.RestartPolicy = &s iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminationGracePeriodSeconds", wireType) } - var msglen int + var v int64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -41626,19 +47733,37 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated + m.TerminationGracePeriodSeconds = &v + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ActiveDeadlineSeconds = &v + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DnsPolicy", wireType) } - var keykey uint64 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -41648,12 +47773,27 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapkey uint64 + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.DnsPolicy = &s + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -41663,26 +47803,26 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + postIndex := iNdEx + msglen + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.NodeSelector == nil { m.NodeSelector = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -41692,41 +47832,80 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.NodeSelector[mapkey] = mapvalue - } else { - var mapvalue string - m.NodeSelector[mapkey] = mapvalue } + m.NodeSelector[mapkey] = mapvalue iNdEx = postIndex case 8: if wireType != 2 { @@ -42151,6 +48330,120 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostAliases", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostAliases = append(m.HostAliases, &HostAlias{}) + if err := m.HostAliases[len(m.HostAliases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PriorityClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.PriorityClassName = &s + iNdEx = postIndex + case 25: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Priority = &v + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DnsConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DnsConfig == nil { + m.DnsConfig = &PodDNSConfig{} + } + if err := m.DnsConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -42410,7 +48703,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.StartTime == nil { - m.StartTime = &k8s_io_kubernetes_pkg_apis_meta_v1.Time{} + m.StartTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -42586,7 +48879,7 @@ func (m *PodStatusResult) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -42703,7 +48996,7 @@ func (m *PodTemplate) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -42820,7 +49113,7 @@ func (m *PodTemplateList) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -42935,7 +49228,7 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -43298,7 +49591,7 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.EvictionTime == nil { - m.EvictionTime = &k8s_io_kubernetes_pkg_apis_meta_v1.Time{} + m.EvictionTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.EvictionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -43697,17 +49990,119 @@ func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ProjectedVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ProjectedVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProjectedVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sources = append(m.Sources, &VolumeProjection{}) + if err := m.Sources[len(m.Sources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DefaultMode = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QuobyteVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ProjectedVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: QuobyteVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Registry", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -43717,28 +50112,57 @@ func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Sources = append(m.Sources, &VolumeProjection{}) - if err := m.Sources[len(m.Sources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.Registry = &s iNdEx = postIndex case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volume", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Volume = &s + iNdEx = postIndex + case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } - var v int32 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -43748,12 +50172,73 @@ func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - m.DefaultMode = &v + b := bool(v != 0) + m.ReadOnly = &b + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.User = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Group = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -43776,7 +50261,7 @@ func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { +func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -43799,15 +50284,15 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QuobyteVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: RBDPersistentVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QuobyteVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RBDPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Registry", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -43832,12 +50317,11 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Registry = &s + m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Volume", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -43863,13 +50347,13 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Volume = &s + m.Image = &s iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FsType", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -43879,14 +50363,53 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.ReadOnly = &b + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.FsType = &s + iNdEx = postIndex case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Pool = &s + iNdEx = postIndex + case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) } @@ -43916,9 +50439,9 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.User = &s iNdEx = postIndex - case 5: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyring", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -43944,8 +50467,62 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Group = &s + m.Keyring = &s + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ReadOnly = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -44308,7 +50885,7 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -44453,7 +51030,7 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -44663,7 +51240,7 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.LastTransitionTime == nil { - m.LastTransitionTime = &k8s_io_kubernetes_pkg_apis_meta_v1.Time{} + m.LastTransitionTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -44807,7 +51384,7 @@ func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -44941,51 +51518,14 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Selector == nil { m.Selector = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -44995,41 +51535,80 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Selector[mapkey] = mapvalue - } else { - var mapvalue string - m.Selector[mapkey] = mapvalue } + m.Selector[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -45404,7 +51983,7 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Divisor == nil { - m.Divisor = &k8s_io_kubernetes_pkg_api_resource.Quantity{} + m.Divisor = &k8s_io_apimachinery_pkg_api_resource.Quantity{} } if err := m.Divisor.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -45488,7 +52067,7 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -45638,7 +52217,7 @@ func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -45656,24 +52235,524 @@ func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, &ResourceQuota{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceQuotaSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceQuotaSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hard", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Hard == nil { + m.Hard = make(map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity) + } + var mapkey string + var mapvalue *k8s_io_apimachinery_pkg_api_resource.Quantity + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Hard[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scopes = append(m.Scopes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceQuotaStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceQuotaStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hard", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Hard == nil { + m.Hard = make(map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity) + } + var mapkey string + var mapvalue *k8s_io_apimachinery_pkg_api_resource.Quantity + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Hard[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Used", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Used == nil { + m.Used = make(map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity) + } + var mapkey string + var mapvalue *k8s_io_apimachinery_pkg_api_resource.Quantity + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, &ResourceQuota{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Used[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -45697,7 +52776,7 @@ func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { +func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -45720,15 +52799,15 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResourceQuotaSpec: wiretype end group for non-group") + return fmt.Errorf("proto: ResourceRequirements: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceQuotaSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResourceRequirements: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -45752,51 +52831,14 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Hard == nil { - m.Hard = make(map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity) + if m.Limits == nil { + m.Limits = make(map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue *k8s_io_apimachinery_pkg_api_resource.Quantity + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -45806,52 +52848,91 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Hard[mapkey] = mapvalue - } else { - var mapvalue *k8s_io_kubernetes_pkg_api_resource.Quantity - m.Hard[mapkey] = mapvalue } + m.Limits[mapkey] = mapvalue iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -45861,20 +52942,114 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Scopes = append(m.Scopes, string(dAtA[iNdEx:postIndex])) + if m.Requests == nil { + m.Requests = make(map[string]*k8s_io_apimachinery_pkg_api_resource.Quantity) + } + var mapkey string + var mapvalue *k8s_io_apimachinery_pkg_api_resource.Quantity + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Requests[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -45898,7 +53073,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { +func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -45921,17 +53096,17 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResourceQuotaStatus: wiretype end group for non-group") + return fmt.Errorf("proto: SELinuxOptions: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceQuotaStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SELinuxOptions: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -45941,34 +53116,27 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + s := string(dAtA[iNdEx:postIndex]) + m.User = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) } - var stringLenmapkey uint64 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -45978,81 +53146,27 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + postIndex := iNdEx + intStringLen + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Hard == nil { - m.Hard = make(map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Hard[mapkey] = mapvalue - } else { - var mapvalue *k8s_io_kubernetes_pkg_api_resource.Quantity - m.Hard[mapkey] = mapvalue - } + s := string(dAtA[iNdEx:postIndex]) + m.Role = &s iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Used", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -46062,34 +53176,27 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + s := string(dAtA[iNdEx:postIndex]) + m.Type = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) } - var stringLenmapkey uint64 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -46099,75 +53206,21 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + postIndex := iNdEx + intStringLen + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Used == nil { - m.Used = make(map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Used[mapkey] = mapvalue - } else { - var mapvalue *k8s_io_kubernetes_pkg_api_resource.Quantity - m.Used[mapkey] = mapvalue - } + s := string(dAtA[iNdEx:postIndex]) + m.Level = &s iNdEx = postIndex default: iNdEx = preIndex @@ -46191,7 +53244,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { +func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -46214,17 +53267,17 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResourceRequirements: wiretype end group for non-group") + return fmt.Errorf("proto: ScaleIOPersistentVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceRequirements: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ScaleIOPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Gateway", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -46234,34 +53287,27 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + s := string(dAtA[iNdEx:postIndex]) + m.Gateway = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field System", wireType) } - var stringLenmapkey uint64 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -46271,79 +53317,25 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + postIndex := iNdEx + intStringLen + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Limits == nil { - m.Limits = make(map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Limits[mapkey] = mapvalue - } else { - var mapvalue *k8s_io_kubernetes_pkg_api_resource.Quantity - m.Limits[mapkey] = mapvalue - } + s := string(dAtA[iNdEx:postIndex]) + m.System = &s iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -46367,7 +53359,18 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SslEnabled", wireType) + } + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -46377,12 +53380,18 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapkey uint64 + b := bool(v != 0) + m.SslEnabled = &b + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProtectionDomain", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -46392,130 +53401,55 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + postIndex := iNdEx + intStringLen + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Requests == nil { - m.Requests = make(map[string]*k8s_io_kubernetes_pkg_api_resource.Quantity) + s := string(dAtA[iNdEx:postIndex]) + m.ProtectionDomain = &s + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StoragePool", wireType) } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx = postmsgIndex - m.Requests[mapkey] = mapvalue - } else { - var mapvalue *k8s_io_kubernetes_pkg_api_resource.Quantity - m.Requests[mapkey] = mapvalue - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err } - if skippy < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { + postIndex := iNdEx + intStringLen + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SELinuxOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SELinuxOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + s := string(dAtA[iNdEx:postIndex]) + m.StoragePool = &s + iNdEx = postIndex + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StorageMode", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -46541,11 +53475,11 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.User = &s + m.StorageMode = &s iNdEx = postIndex - case 2: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -46571,11 +53505,11 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Role = &s + m.VolumeName = &s iNdEx = postIndex - case 3: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FsType", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -46601,13 +53535,13 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Type = &s + m.FsType = &s iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -46617,22 +53551,13 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Level = &s - iNdEx = postIndex + b := bool(v != 0) + m.ReadOnly = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -47047,7 +53972,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -47079,7 +54004,104 @@ func (m *Secret) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 + if m.Data == nil { + m.Data = make(map[string][]byte) + } + var mapkey string + mapvalue := []byte{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapbyteLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapbyteLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intMapbyteLen := int(mapbyteLen) + if intMapbyteLen < 0 { + return ErrInvalidLengthGenerated + } + postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = make([]byte, mapbyteLen) + copy(mapvalue, dAtA[iNdEx:postbytesIndex]) + iNdEx = postbytesIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Data[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -47089,12 +54111,27 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapkey uint64 + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Type = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringData", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -47104,26 +54141,26 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + postIndex := iNdEx + msglen + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Data == nil { - m.Data = make(map[string][]byte) + if m.StringData == nil { + m.StringData = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -47133,46 +54170,273 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapbyteLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapbyteLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intMapbyteLen := int(mapbyteLen) - if intMapbyteLen < 0 { - return ErrInvalidLengthGenerated + } + m.StringData[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretEnvSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretEnvSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretEnvSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - postbytesIndex := iNdEx + intMapbyteLen - if postbytesIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue := make([]byte, mapbyteLen) - copy(mapvalue, dAtA[iNdEx:postbytesIndex]) - iNdEx = postbytesIndex - m.Data[mapkey] = mapvalue - } else { - var mapvalue []byte - m.Data[mapkey] = mapvalue + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LocalObjectReference == nil { + m.LocalObjectReference = &LocalObjectReference{} + } + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretKeySelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretKeySelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LocalObjectReference == nil { + m.LocalObjectReference = &LocalObjectReference{} + } + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -47198,13 +54462,13 @@ func (m *Secret) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Type = &s + m.Key = &s iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StringData", wireType) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -47214,19 +54478,69 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex > l { + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - var keykey uint64 + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -47236,12 +54550,30 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapkey uint64 + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -47251,69 +54583,21 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + postIndex := iNdEx + msglen + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.StringData == nil { - m.StringData = make(map[string]string) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.StringData[mapkey] = mapvalue - } else { - var mapvalue string - m.StringData[mapkey] = mapvalue + m.Items = append(m.Items, &Secret{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex default: @@ -47338,7 +54622,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } return nil } -func (m *SecretEnvSource) Unmarshal(dAtA []byte) error { +func (m *SecretProjection) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -47361,10 +54645,10 @@ func (m *SecretEnvSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SecretEnvSource: wiretype end group for non-group") + return fmt.Errorf("proto: SecretProjection: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SecretEnvSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SecretProjection: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -47401,6 +54685,37 @@ func (m *SecretEnvSource) Unmarshal(dAtA []byte) error { } iNdEx = postIndex case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, &KeyToPath{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) } @@ -47443,7 +54758,7 @@ func (m *SecretEnvSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { +func (m *SecretReference) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -47466,17 +54781,17 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SecretKeySelector: wiretype end group for non-group") + return fmt.Errorf("proto: SecretReference: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SecretKeySelector: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SecretReference: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -47486,28 +54801,25 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.LocalObjectReference == nil { - m.LocalObjectReference = &LocalObjectReference{} - } - if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.Name = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -47533,29 +54845,8 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Key = &s + m.Namespace = &s iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Optional = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -47578,7 +54869,7 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { } return nil } -func (m *SecretList) Unmarshal(dAtA []byte) error { +func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -47601,17 +54892,17 @@ func (m *SecretList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SecretList: wiretype end group for non-group") + return fmt.Errorf("proto: SecretVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SecretList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SecretVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -47621,24 +54912,21 @@ func (m *SecretList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.SecretName = &s iNdEx = postIndex case 2: if wireType != 2 { @@ -47666,11 +54954,52 @@ func (m *SecretList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, &Secret{}) + m.Items = append(m.Items, &KeyToPath{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DefaultMode = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -47693,7 +55022,7 @@ func (m *SecretList) Unmarshal(dAtA []byte) error { } return nil } -func (m *SecretProjection) Unmarshal(dAtA []byte) error { +func (m *SecurityContext) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -47716,15 +55045,15 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SecretProjection: wiretype end group for non-group") + return fmt.Errorf("proto: SecurityContext: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SecretProjection: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SecurityContext: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Capabilities", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -47748,16 +55077,37 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.LocalObjectReference == nil { - m.LocalObjectReference = &LocalObjectReference{} + if m.Capabilities == nil { + m.Capabilities = &Capabilities{} } - if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Capabilities.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Privileged = &b + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SeLinuxOptions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -47781,14 +55131,36 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, &KeyToPath{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.SeLinuxOptions == nil { + m.SeLinuxOptions = &SELinuxOptions{} + } + if err := m.SeLinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RunAsUser = &v + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsNonRoot", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -47806,7 +55178,49 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { } } b := bool(v != 0) - m.Optional = &b + m.RunAsNonRoot = &b + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnlyRootFilesystem", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ReadOnlyRootFilesystem = &b + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowPrivilegeEscalation", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AllowPrivilegeEscalation = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -47829,7 +55243,7 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { } return nil } -func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { +func (m *SerializedReference) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -47852,17 +55266,17 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SecretVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: SerializedReference: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SecretVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SerializedReference: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reference", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -47872,25 +55286,79 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.SecretName = &s + if m.Reference == nil { + m.Reference = &ObjectReference{} + } + if err := m.Reference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Service) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Service: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Service: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -47914,16 +55382,18 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, &KeyToPath{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Metadata == nil { + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } - var v int32 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -47933,17 +55403,30 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - m.DefaultMode = &v - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + if msglen < 0 { + return ErrInvalidLengthGenerated } - var v int + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &ServiceSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -47953,13 +55436,25 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.Optional = &b + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &ServiceStatus{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -47982,7 +55477,7 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *SecurityContext) Unmarshal(dAtA []byte) error { +func (m *ServiceAccount) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -48005,15 +55500,15 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SecurityContext: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceAccount: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SecurityContext: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceAccount: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Capabilities", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -48037,37 +55532,16 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Capabilities == nil { - m.Capabilities = &Capabilities{} + if m.Metadata == nil { + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } - if err := m.Capabilities.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Privileged = &b - case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SeLinuxOptions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -48091,18 +55565,16 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SeLinuxOptions == nil { - m.SeLinuxOptions = &SELinuxOptions{} - } - if err := m.SeLinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Secrets = append(m.Secrets, &ObjectReference{}) + if err := m.Secrets[len(m.Secrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullSecrets", wireType) } - var v int64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -48112,36 +55584,26 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - m.RunAsUser = &v - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsNonRoot", wireType) + if msglen < 0 { + return ErrInvalidLengthGenerated } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF } - b := bool(v != 0) - m.RunAsNonRoot = &b - case 6: + m.ImagePullSecrets = append(m.ImagePullSecrets, &LocalObjectReference{}) + if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnlyRootFilesystem", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AutomountServiceAccountToken", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -48159,7 +55621,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } } b := bool(v != 0) - m.ReadOnlyRootFilesystem = &b + m.AutomountServiceAccountToken = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -48182,7 +55644,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } return nil } -func (m *SerializedReference) Unmarshal(dAtA []byte) error { +func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -48205,15 +55667,15 @@ func (m *SerializedReference) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SerializedReference: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceAccountList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SerializedReference: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceAccountList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reference", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -48237,10 +55699,41 @@ func (m *SerializedReference) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Reference == nil { - m.Reference = &ObjectReference{} + if m.Metadata == nil { + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} } - if err := m.Reference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, &ServiceAccount{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -48266,7 +55759,7 @@ func (m *SerializedReference) Unmarshal(dAtA []byte) error { } return nil } -func (m *Service) Unmarshal(dAtA []byte) error { +func (m *ServiceList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -48289,10 +55782,10 @@ func (m *Service) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Service: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Service: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -48322,7 +55815,7 @@ func (m *Service) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -48330,40 +55823,7 @@ func (m *Service) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Spec == nil { - m.Spec = &ServiceSpec{} - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -48387,10 +55847,8 @@ func (m *Service) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Status == nil { - m.Status = &ServiceStatus{} - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, &Service{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -48416,7 +55874,7 @@ func (m *Service) Unmarshal(dAtA []byte) error { } return nil } -func (m *ServiceAccount) Unmarshal(dAtA []byte) error { +func (m *ServicePort) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -48439,17 +55897,17 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ServiceAccount: wiretype end group for non-group") + return fmt.Errorf("proto: ServicePort: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceAccount: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServicePort: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -48459,30 +55917,27 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.Name = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -48492,26 +55947,45 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Secrets = append(m.Secrets, &ObjectReference{}) - if err := m.Secrets[len(m.Secrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.Protocol = &s iNdEx = postIndex case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Port = &v + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImagePullSecrets", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetPort", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -48535,16 +56009,18 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ImagePullSecrets = append(m.ImagePullSecrets, &LocalObjectReference{}) - if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.TargetPort == nil { + m.TargetPort = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.TargetPort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 5: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AutomountServiceAccountToken", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NodePort", wireType) } - var v int + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -48554,13 +56030,12 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.AutomountServiceAccountToken = &b + m.NodePort = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -48583,7 +56058,7 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { } return nil } -func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { +func (m *ServiceProxyOptions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -48606,50 +56081,17 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ServiceAccountList: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceProxyOptions: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceAccountList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -48659,22 +56101,21 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, &ServiceAccount{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.Path = &s iNdEx = postIndex default: iNdEx = preIndex @@ -48698,7 +56139,7 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { } return nil } -func (m *ServiceList) Unmarshal(dAtA []byte) error { +func (m *ServiceSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -48721,17 +56162,255 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ServiceList: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, &ServicePort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Selector[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ClusterIP = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Type = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalIPs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalIPs = append(m.ExternalIPs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionAffinity", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -48741,30 +56420,27 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.SessionAffinity = &s iNdEx = postIndex - case 2: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerIP", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -48774,77 +56450,54 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, &Service{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.LoadBalancerIP = &s iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerSourceRanges", wireType) } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServicePort) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + postIndex := iNdEx + intStringLen + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServicePort: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServicePort: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.LoadBalancerSourceRanges = append(m.LoadBalancerSourceRanges, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ExternalName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -48870,11 +56523,11 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Name = &s + m.ExternalName = &s iNdEx = postIndex - case 2: + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ExternalTrafficPolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -48900,11 +56553,11 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Protocol = &s + m.ExternalTrafficPolicy = &s iNdEx = postIndex - case 3: + case 12: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HealthCheckNodePort", wireType) } var v int32 for shift := uint(0); ; shift += 7 { @@ -48921,12 +56574,12 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { break } } - m.Port = &v - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetPort", wireType) + m.HealthCheckNodePort = &v + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PublishNotReadyAddresses", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -48936,30 +56589,18 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TargetPort == nil { - m.TargetPort = &k8s_io_kubernetes_pkg_util_intstr.IntOrString{} - } - if err := m.TargetPort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NodePort", wireType) + b := bool(v != 0) + m.PublishNotReadyAddresses = &b + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionAffinityConfig", wireType) } - var v int32 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -48969,12 +56610,25 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - m.NodePort = &v + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SessionAffinityConfig == nil { + m.SessionAffinityConfig = &SessionAffinityConfig{} + } + if err := m.SessionAffinityConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -48997,7 +56651,7 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { } return nil } -func (m *ServiceProxyOptions) Unmarshal(dAtA []byte) error { +func (m *ServiceStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -49020,17 +56674,17 @@ func (m *ServiceProxyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ServiceProxyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -49040,21 +56694,24 @@ func (m *ServiceProxyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Path = &s + if m.LoadBalancer == nil { + m.LoadBalancer = &LoadBalancerStatus{} + } + if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -49078,7 +56735,7 @@ func (m *ServiceProxyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *ServiceSpec) Unmarshal(dAtA []byte) error { +func (m *SessionAffinityConfig) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -49101,15 +56758,15 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ServiceSpec: wiretype end group for non-group") + return fmt.Errorf("proto: SessionAffinityConfig: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SessionAffinityConfig: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ClientIP", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -49133,53 +56790,69 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ports = append(m.Ports, &ServicePort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ClientIP == nil { + m.ClientIP = &ClientIPConfig{} + } + if err := m.ClientIP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - if msglen < 0 { + if skippy < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - var stringLenmapkey uint64 + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageOSPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageOSPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -49189,74 +56862,25 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + postIndex := iNdEx + intStringLen + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Selector == nil { - m.Selector = make(map[string]string) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Selector[mapkey] = mapvalue - } else { - var mapvalue string - m.Selector[mapkey] = mapvalue - } + s := string(dAtA[iNdEx:postIndex]) + m.VolumeName = &s iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterIP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeNamespace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -49282,11 +56906,11 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.ClusterIP = &s + m.VolumeNamespace = &s iNdEx = postIndex - case 4: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FsType", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -49312,13 +56936,13 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Type = &s + m.FsType = &s iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalIPs", wireType) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -49328,26 +56952,18 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExternalIPs = append(m.ExternalIPs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 6: + b := bool(v != 0) + m.ReadOnly = &b + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedPublicIPs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -49357,24 +56973,79 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.DeprecatedPublicIPs = append(m.DeprecatedPublicIPs, string(dAtA[iNdEx:postIndex])) + if m.SecretRef == nil { + m.SecretRef = &ObjectReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 7: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageOSVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageOSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SessionAffinity", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -49400,11 +57071,11 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.SessionAffinity = &s + m.VolumeName = &s iNdEx = postIndex - case 8: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerIP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeNamespace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -49430,11 +57101,11 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.LoadBalancerIP = &s + m.VolumeNamespace = &s iNdEx = postIndex - case 9: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerSourceRanges", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FsType", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -49459,13 +57130,14 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.LoadBalancerSourceRanges = append(m.LoadBalancerSourceRanges, string(dAtA[iNdEx:postIndex])) + s := string(dAtA[iNdEx:postIndex]) + m.FsType = &s iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalName", wireType) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -49475,76 +57147,16 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.ExternalName = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + b := bool(v != 0) + m.ReadOnly = &b + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -49568,10 +57180,10 @@ func (m *ServiceStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.LoadBalancer == nil { - m.LoadBalancer = &LoadBalancerStatus{} + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} } - if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -49764,12 +57376,42 @@ func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Port == nil { - m.Port = &k8s_io_kubernetes_pkg_util_intstr.IntOrString{} + m.Port = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} } if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Host = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -49938,7 +57580,7 @@ func (m *Taint) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.TimeAdded == nil { - m.TimeAdded = &k8s_io_kubernetes_pkg_apis_meta_v1.Time{} + m.TimeAdded = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.TimeAdded.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -50271,6 +57913,117 @@ func (m *Volume) Unmarshal(dAtA []byte) error { } return nil } +func (m *VolumeDevice) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeDevice: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeDevice: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Name = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DevicePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.DevicePath = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *VolumeMount) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -50411,6 +58164,36 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.SubPath = &s iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MountPropagation", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.MountPropagation = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -51470,6 +59253,39 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 27: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Storageos", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Storageos == nil { + m.Storageos = &StorageOSVolumeSource{} + } + if err := m.Storageos.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -51581,6 +59397,66 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.FsType = &s iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StoragePolicyName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.StoragePolicyName = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StoragePolicyID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.StoragePolicyID = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -51812,520 +59688,583 @@ var ( ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) -func init() { - proto.RegisterFile("github.com/ericchiang/k8s/api/v1/generated.proto", fileDescriptorGenerated) -} +func init() { proto.RegisterFile("k8s.io/api/core/v1/generated.proto", fileDescriptorGenerated) } var fileDescriptorGenerated = []byte{ - // 8157 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x7d, 0x5b, 0x8c, 0xe4, 0xd6, - 0x75, 0xe0, 0xb2, 0xaa, 0xba, 0xab, 0xeb, 0xf4, 0xfb, 0x76, 0xcf, 0xa8, 0xd4, 0x96, 0x07, 0x63, - 0x5a, 0x92, 0x47, 0xf2, 0xa8, 0x47, 0x33, 0x96, 0xf5, 0xb6, 0x46, 0x3d, 0xfd, 0x98, 0x69, 0xcd, - 0x4c, 0x4f, 0x89, 0xdd, 0x33, 0x63, 0x5b, 0xda, 0xf5, 0xb2, 0xc9, 0xdb, 0xd5, 0xdc, 0x66, 0x91, - 0x14, 0xc9, 0xea, 0x99, 0x16, 0xf6, 0x63, 0xd7, 0xc6, 0x7a, 0x9f, 0x58, 0x7b, 0xbd, 0xb1, 0x21, - 0x07, 0x46, 0xac, 0x24, 0x8e, 0x81, 0xc4, 0x06, 0xe2, 0x00, 0xf9, 0xcd, 0x87, 0x11, 0x24, 0xce, - 0x13, 0xb1, 0x3f, 0x82, 0x38, 0xc8, 0x47, 0x12, 0x07, 0xf0, 0x8f, 0x03, 0x04, 0x41, 0x82, 0x7c, - 0xe4, 0x01, 0x04, 0xf7, 0x45, 0xde, 0x4b, 0xb2, 0xba, 0x58, 0xa5, 0x6a, 0x49, 0x09, 0xf2, 0xc7, - 0x7b, 0xc8, 0x73, 0x78, 0xee, 0xeb, 0xdc, 0x73, 0xce, 0x3d, 0xf7, 0x5c, 0x38, 0x7f, 0xf0, 0x6c, - 0xb4, 0xec, 0xf8, 0x17, 0x0e, 0xba, 0xbb, 0x38, 0xf4, 0x70, 0x8c, 0xa3, 0x0b, 0xc1, 0x41, 0xfb, - 0x82, 0x19, 0x38, 0x17, 0x0e, 0x2f, 0x5e, 0x68, 0x63, 0x0f, 0x87, 0x66, 0x8c, 0xed, 0xe5, 0x20, - 0xf4, 0x63, 0x1f, 0x3d, 0xc4, 0xbe, 0x5e, 0x4e, 0xbf, 0x5e, 0x0e, 0x0e, 0xda, 0xcb, 0x66, 0xe0, - 0x2c, 0x1f, 0x5e, 0x5c, 0xba, 0xd4, 0x9b, 0x56, 0x88, 0x23, 0xbf, 0x1b, 0x5a, 0x38, 0x4b, 0xf1, - 0x18, 0x9c, 0xe8, 0x42, 0x07, 0xc7, 0x66, 0x01, 0x17, 0x4b, 0x4f, 0x14, 0xe3, 0x84, 0x5d, 0x2f, - 0x76, 0x3a, 0xf9, 0x5f, 0x3c, 0x75, 0xfc, 0xe7, 0x91, 0xb5, 0x8f, 0x3b, 0x66, 0x0e, 0xeb, 0x62, - 0x31, 0x56, 0x37, 0x76, 0xdc, 0x0b, 0x8e, 0x17, 0x47, 0x71, 0x98, 0x45, 0xd1, 0xbf, 0xa8, 0xc1, - 0xd9, 0x95, 0xbb, 0xdb, 0xeb, 0xae, 0x19, 0xc5, 0x8e, 0x75, 0xc5, 0xf5, 0xad, 0x83, 0xed, 0xd8, - 0x0f, 0xf1, 0x1d, 0xdf, 0xed, 0x76, 0xf0, 0x36, 0x6d, 0x00, 0xb4, 0x04, 0x13, 0x87, 0xb4, 0xbc, - 0xb9, 0xd6, 0xd4, 0xce, 0x6a, 0xe7, 0x1a, 0x46, 0x52, 0x46, 0xa7, 0x61, 0x7c, 0x2f, 0xda, 0x39, - 0x0a, 0x70, 0xb3, 0x42, 0xdf, 0xf0, 0x12, 0x7a, 0x08, 0x1a, 0x81, 0x19, 0xc6, 0x4e, 0xec, 0xf8, - 0x5e, 0xb3, 0x7a, 0x56, 0x3b, 0x37, 0x66, 0xa4, 0x00, 0x42, 0x31, 0xc4, 0xa6, 0x7d, 0xcb, 0x73, - 0x8f, 0x9a, 0xb5, 0xb3, 0xda, 0xb9, 0x09, 0x23, 0x29, 0xeb, 0xff, 0xa5, 0x02, 0x13, 0x2b, 0x7b, - 0x7b, 0x8e, 0xe7, 0xc4, 0x47, 0x68, 0x0b, 0xa6, 0x3c, 0xdf, 0xc6, 0xa2, 0x4c, 0x7f, 0x3f, 0x79, - 0xe9, 0xf1, 0xe5, 0xe3, 0x3a, 0x75, 0x79, 0x4b, 0xc2, 0x30, 0x14, 0x7c, 0x74, 0x1d, 0x26, 0x03, - 0xdf, 0x4e, 0xc8, 0x55, 0x28, 0xb9, 0xc7, 0x8e, 0x27, 0xd7, 0x4a, 0x11, 0x0c, 0x19, 0x1b, 0xdd, - 0x85, 0x59, 0x52, 0xf4, 0x62, 0x27, 0x21, 0x58, 0xa5, 0x04, 0x9f, 0xe8, 0x4f, 0x50, 0x42, 0x32, - 0xb2, 0x54, 0xf4, 0x35, 0x98, 0x59, 0x89, 0x63, 0xd3, 0xda, 0xc7, 0x36, 0xeb, 0x08, 0x84, 0xa0, - 0xe6, 0x99, 0x1d, 0xcc, 0x9b, 0x9f, 0x3e, 0xa3, 0x33, 0x00, 0x36, 0x3e, 0x74, 0x2c, 0xdc, 0x32, - 0xe3, 0x7d, 0xde, 0xfc, 0x12, 0x44, 0x77, 0xa0, 0xb1, 0x72, 0xe8, 0x3b, 0x76, 0xcb, 0xb7, 0x23, - 0xf4, 0x3a, 0xcc, 0x06, 0x21, 0xde, 0xc3, 0x61, 0x02, 0x6a, 0x6a, 0x67, 0xab, 0xe7, 0x26, 0x2f, - 0x5d, 0xea, 0xc3, 0xab, 0x8a, 0xb4, 0xee, 0xc5, 0x21, 0x61, 0x58, 0x85, 0xea, 0x3f, 0xaf, 0xc1, - 0xa9, 0x95, 0x37, 0xbb, 0x21, 0x5e, 0x73, 0xa2, 0x83, 0xec, 0xd8, 0xb1, 0x9d, 0xe8, 0x60, 0x2b, - 0x65, 0x3e, 0x29, 0xa3, 0x26, 0xd4, 0xc9, 0xf3, 0x6d, 0x63, 0x93, 0x73, 0x2f, 0x8a, 0xe8, 0x2c, - 0x4c, 0x5a, 0xa6, 0xb5, 0xef, 0x78, 0xed, 0x9b, 0xbe, 0x8d, 0x69, 0xab, 0x36, 0x0c, 0x19, 0x24, - 0x8d, 0xbb, 0x9a, 0x32, 0xee, 0xe4, 0x91, 0x35, 0x96, 0x19, 0x59, 0x6f, 0x70, 0x26, 0x37, 0x1c, - 0x57, 0x1d, 0xe0, 0x67, 0x00, 0x22, 0x6c, 0x85, 0x38, 0x96, 0xd8, 0x94, 0x20, 0x64, 0x30, 0x47, - 0xfb, 0x66, 0x88, 0xe9, 0x6b, 0xc6, 0x6a, 0x0a, 0x50, 0x7e, 0x59, 0xcd, 0xfc, 0xf2, 0x6b, 0x1a, - 0xd4, 0xaf, 0x38, 0x9e, 0xed, 0x78, 0x6d, 0xf4, 0x0a, 0x4c, 0x10, 0xf1, 0x60, 0x9b, 0xb1, 0xc9, - 0xc7, 0xf1, 0x72, 0xef, 0xb6, 0x8f, 0x96, 0xc9, 0xb7, 0xa4, 0x07, 0x6e, 0xed, 0xfe, 0x27, 0x6c, - 0xc5, 0x37, 0x71, 0x6c, 0x1a, 0x09, 0x3e, 0x5a, 0x87, 0xf1, 0xd8, 0x0c, 0xdb, 0x38, 0xe6, 0x43, - 0xb8, 0xcf, 0x88, 0x63, 0x34, 0x0c, 0xd2, 0x69, 0xd8, 0xb3, 0xb0, 0xc1, 0x91, 0xf5, 0xa7, 0x60, - 0x6a, 0xd5, 0x0c, 0xcc, 0x5d, 0xc7, 0x75, 0x62, 0x07, 0x47, 0x68, 0x0e, 0xaa, 0xa6, 0x6d, 0xd3, - 0x91, 0xd1, 0x30, 0xc8, 0x23, 0x19, 0x78, 0x76, 0xe8, 0x07, 0xcd, 0x0a, 0x05, 0xd1, 0x67, 0xfd, - 0xc7, 0x1a, 0xa0, 0x55, 0x1c, 0xec, 0x6f, 0x6c, 0x67, 0xbb, 0xba, 0xe3, 0x7b, 0x4e, 0xec, 0x87, - 0x11, 0xa7, 0x90, 0x94, 0x09, 0x99, 0x20, 0x1d, 0xa5, 0xf4, 0x99, 0xc0, 0xba, 0x11, 0x0e, 0x79, - 0xef, 0xd2, 0xe7, 0xb4, 0x27, 0x48, 0x1f, 0xf1, 0xae, 0x95, 0x20, 0xa8, 0x05, 0x0d, 0x56, 0x32, - 0xf0, 0x1e, 0xed, 0xdf, 0xbe, 0x03, 0xf8, 0x86, 0x6f, 0x99, 0x6e, 0xb6, 0xfe, 0x29, 0x11, 0xa5, - 0xf7, 0xc6, 0x33, 0xbd, 0x67, 0x03, 0x5a, 0x75, 0x3c, 0x1b, 0x87, 0xef, 0x58, 0x1c, 0x1e, 0x37, - 0x46, 0x02, 0x40, 0xab, 0x7e, 0x27, 0xf0, 0x3d, 0xec, 0xc5, 0xab, 0xbe, 0x67, 0x33, 0x11, 0x89, - 0xa0, 0x16, 0x13, 0x3a, 0x7c, 0xc6, 0x93, 0x67, 0x42, 0x3d, 0x8a, 0xcd, 0xb8, 0x1b, 0x09, 0xea, - 0xac, 0x44, 0x26, 0x52, 0x07, 0x47, 0x91, 0xd9, 0x16, 0x53, 0x45, 0x14, 0xd1, 0x22, 0x8c, 0xe1, - 0x30, 0xf4, 0x43, 0xde, 0x94, 0xac, 0xa0, 0xff, 0xb2, 0x06, 0xb3, 0xc9, 0x2f, 0xb7, 0x19, 0x8d, - 0x51, 0x8e, 0xce, 0x16, 0x80, 0x25, 0x2a, 0x12, 0xd1, 0xa1, 0x33, 0x79, 0xe9, 0xc9, 0xe3, 0xbb, - 0x29, 0xdf, 0x02, 0x86, 0x44, 0x43, 0xff, 0x86, 0x06, 0x0b, 0x19, 0x8e, 0x6f, 0x38, 0x51, 0x8c, - 0xae, 0xe5, 0xb8, 0x3e, 0x5f, 0x86, 0x6b, 0x82, 0x9b, 0xe1, 0x79, 0x15, 0xc6, 0x9c, 0x18, 0x77, - 0x04, 0xbb, 0x4f, 0x94, 0x64, 0x97, 0xf1, 0x62, 0x30, 0x5c, 0xfd, 0x8f, 0x34, 0x68, 0xac, 0xfa, - 0xde, 0x9e, 0xd3, 0xbe, 0x69, 0x06, 0x23, 0x9e, 0xf0, 0x35, 0x4a, 0x87, 0x71, 0x77, 0xb1, 0x1f, - 0x77, 0x9c, 0x85, 0xe5, 0x35, 0x33, 0x36, 0x99, 0xcc, 0xa6, 0xe8, 0x4b, 0xcf, 0x40, 0x23, 0x01, - 0x91, 0xd9, 0x7e, 0x80, 0x8f, 0xf8, 0x08, 0x23, 0x8f, 0x64, 0xb8, 0x1c, 0x9a, 0x6e, 0x57, 0x8c, - 0x5e, 0x56, 0x78, 0xbe, 0xf2, 0xac, 0xa6, 0xbf, 0x45, 0xe6, 0xbc, 0x20, 0xbb, 0xee, 0x1d, 0xf2, - 0xb9, 0xb0, 0x07, 0x8b, 0x6e, 0xc1, 0x04, 0xe3, 0xd5, 0x1d, 0x66, 0x6a, 0x16, 0xd2, 0x23, 0xf3, - 0xc7, 0x0f, 0xc8, 0x50, 0x30, 0x5d, 0xca, 0xdb, 0x84, 0x91, 0x94, 0xf5, 0x6f, 0x6b, 0xb0, 0x98, - 0xb0, 0x76, 0x1d, 0x1f, 0x6d, 0x63, 0x17, 0x5b, 0xb1, 0x1f, 0xbe, 0x6b, 0xcc, 0xf1, 0x76, 0xac, - 0xa4, 0xed, 0x28, 0xb3, 0x5b, 0xcd, 0xb0, 0xfb, 0x96, 0x06, 0xd3, 0x09, 0xbb, 0x23, 0x1e, 0xc4, - 0x9f, 0x50, 0x07, 0xf1, 0x47, 0x4a, 0x0e, 0x13, 0x31, 0x7c, 0x7f, 0x48, 0x67, 0x19, 0x07, 0xb6, - 0x42, 0x9f, 0x54, 0x93, 0xc8, 0xa2, 0x77, 0xab, 0x21, 0x07, 0x63, 0xff, 0x3a, 0x3e, 0xda, 0xf1, - 0x89, 0xa6, 0xc3, 0xd9, 0x57, 0x5a, 0xbd, 0x96, 0x69, 0xf5, 0x7f, 0xd4, 0xe0, 0x54, 0x52, 0x35, - 0x45, 0x9c, 0xff, 0x0b, 0xa9, 0xdc, 0x59, 0x98, 0xb4, 0xf1, 0x9e, 0xd9, 0x75, 0xe3, 0x44, 0x25, - 0x1a, 0x33, 0x64, 0xd0, 0xb1, 0xd5, 0xff, 0xec, 0x04, 0x15, 0x4c, 0xb1, 0xe9, 0x78, 0x38, 0x2c, - 0xd4, 0x26, 0x17, 0x61, 0xcc, 0xe9, 0x90, 0x15, 0x84, 0x4f, 0x7d, 0x5a, 0x20, 0x2b, 0x8b, 0xe5, - 0x77, 0x3a, 0xa6, 0x67, 0x37, 0xab, 0x74, 0x49, 0x17, 0x45, 0x42, 0xc3, 0x0c, 0xdb, 0x51, 0xb3, - 0xc6, 0x14, 0x03, 0xf2, 0x4c, 0x56, 0xef, 0x7b, 0x7e, 0x78, 0xe0, 0x78, 0xed, 0x35, 0x27, 0xa4, - 0xcb, 0x73, 0xc3, 0x90, 0x20, 0x68, 0x05, 0xc6, 0x02, 0x3f, 0x8c, 0xa3, 0xe6, 0x38, 0x6d, 0x82, - 0x8f, 0xf6, 0x1d, 0x9e, 0x8c, 0xdf, 0x96, 0x1f, 0xc6, 0x06, 0xc3, 0x44, 0x4f, 0x43, 0x15, 0x7b, - 0x87, 0xcd, 0x3a, 0x25, 0xf0, 0xf0, 0xf1, 0x04, 0xd6, 0xbd, 0xc3, 0x3b, 0x66, 0x68, 0x10, 0x04, - 0xa2, 0x38, 0x08, 0x83, 0x2e, 0x6a, 0x4e, 0x94, 0xe9, 0x5a, 0x83, 0x7f, 0x6e, 0xe0, 0x37, 0xba, - 0x4e, 0x88, 0x3b, 0xd8, 0x8b, 0x23, 0x23, 0x25, 0x82, 0x6e, 0xc2, 0x14, 0x5b, 0xf6, 0x6f, 0xfa, - 0x5d, 0x2f, 0x8e, 0x9a, 0x0d, 0xca, 0x52, 0x1f, 0x5b, 0xe2, 0x4e, 0x8a, 0x61, 0x28, 0xe8, 0x68, - 0x13, 0xa6, 0x5d, 0xe7, 0x10, 0x7b, 0x38, 0x8a, 0x5a, 0xa1, 0xbf, 0x8b, 0x9b, 0x40, 0x99, 0xfc, - 0x70, 0x3f, 0xf5, 0xdc, 0xdf, 0xc5, 0x86, 0x8a, 0x89, 0xae, 0xc3, 0x0c, 0x51, 0x2e, 0x9c, 0x94, - 0xd6, 0x64, 0x79, 0x5a, 0x19, 0x54, 0xb4, 0x0e, 0x0d, 0xd7, 0xd9, 0xc3, 0xd6, 0x91, 0xe5, 0xe2, - 0xe6, 0x14, 0xa5, 0xd3, 0x67, 0xe8, 0xde, 0x10, 0x9f, 0x1b, 0x29, 0x26, 0x7a, 0x1a, 0x4e, 0xc7, - 0x38, 0xec, 0x38, 0x9e, 0x49, 0x46, 0xe4, 0x4d, 0xa6, 0x9e, 0x50, 0xc3, 0x65, 0x9a, 0x0e, 0x93, - 0x1e, 0x6f, 0xd1, 0x39, 0x98, 0xa5, 0x23, 0xb1, 0xd5, 0x75, 0xdd, 0x96, 0xef, 0x3a, 0xd6, 0x51, - 0x73, 0x86, 0x22, 0x64, 0xc1, 0xc4, 0x1a, 0x8b, 0xb0, 0xd5, 0x0d, 0x9d, 0xf8, 0x88, 0x8c, 0x1c, - 0x7c, 0x3f, 0x6e, 0xce, 0x96, 0xd1, 0x8d, 0xb7, 0x55, 0x24, 0x23, 0x4b, 0x85, 0xcc, 0x8c, 0x28, - 0xb6, 0x1d, 0xaf, 0x39, 0x47, 0x27, 0x15, 0x2b, 0x50, 0x9b, 0x80, 0x3c, 0xdc, 0x22, 0xb2, 0x62, - 0x9e, 0xbe, 0x49, 0x01, 0x64, 0x49, 0x88, 0xe3, 0xa3, 0x26, 0xa2, 0x70, 0xf2, 0x88, 0xd6, 0xa1, - 0x8e, 0xbd, 0xc3, 0x8d, 0xd0, 0xef, 0x34, 0x17, 0xca, 0x8c, 0xfe, 0x75, 0xf6, 0x31, 0x13, 0x52, - 0x86, 0xc0, 0x45, 0xcf, 0x43, 0xb3, 0xa0, 0xa5, 0x58, 0xc3, 0x2c, 0xd2, 0x86, 0xe9, 0xf9, 0x9e, - 0x98, 0x95, 0xc9, 0x9c, 0xda, 0xec, 0x70, 0xf5, 0x90, 0x4c, 0x7e, 0xa1, 0xaf, 0xb3, 0x02, 0xad, - 0x9a, 0xf3, 0x26, 0xbe, 0x72, 0x14, 0x63, 0xa6, 0x69, 0x56, 0x8d, 0x14, 0xa0, 0x7f, 0x95, 0xad, - 0x5f, 0xe9, 0xd4, 0x2c, 0x14, 0x27, 0x4b, 0x30, 0xb1, 0xef, 0x47, 0x31, 0x79, 0x4f, 0x49, 0x8c, - 0x19, 0x49, 0x19, 0x3d, 0x0c, 0xd3, 0x96, 0x4c, 0x80, 0x0b, 0x33, 0x15, 0x48, 0x28, 0x50, 0x1f, - 0x85, 0xe5, 0xbb, 0x5c, 0x7b, 0x4d, 0xca, 0x44, 0x11, 0x26, 0xd4, 0x36, 0x5b, 0x5c, 0xc8, 0xf0, - 0x92, 0xfe, 0xf9, 0x8a, 0x54, 0x45, 0xa2, 0x9a, 0x61, 0x74, 0x13, 0xea, 0xf7, 0x4c, 0x27, 0x76, - 0xbc, 0x36, 0x97, 0xe8, 0x1f, 0x2b, 0x29, 0x75, 0x28, 0xfa, 0x5d, 0x86, 0x6a, 0x08, 0x1a, 0x84, - 0x5c, 0xd8, 0xf5, 0x3c, 0x42, 0xae, 0x32, 0x38, 0x39, 0x83, 0xa1, 0x1a, 0x82, 0x06, 0xba, 0x03, - 0x20, 0xba, 0x0b, 0xdb, 0xdc, 0x7b, 0xf0, 0xf4, 0x20, 0x14, 0x77, 0x12, 0x6c, 0x43, 0xa2, 0xa4, - 0x7f, 0x86, 0xae, 0x76, 0xf9, 0x3f, 0xa3, 0x0d, 0x32, 0x6c, 0xcd, 0x30, 0xc6, 0xf6, 0x4a, 0xcc, - 0x1b, 0xe4, 0x5c, 0x19, 0x65, 0x63, 0xc7, 0xe9, 0x10, 0xb3, 0x49, 0xa0, 0xea, 0xbf, 0x5a, 0x81, - 0x66, 0x2f, 0x4e, 0x48, 0xd7, 0xe1, 0xfb, 0x4e, 0xbc, 0x4a, 0x16, 0x2a, 0x8d, 0x75, 0xbe, 0x28, - 0x53, 0x1b, 0xc6, 0x69, 0x0b, 0x3d, 0x6e, 0xcc, 0xe0, 0x25, 0x02, 0x0f, 0xb1, 0x19, 0x71, 0x6f, - 0x51, 0xc3, 0xe0, 0x25, 0xd9, 0xb6, 0xa9, 0xa9, 0xb6, 0x8d, 0x52, 0x95, 0xb1, 0xa1, 0xab, 0x82, - 0xae, 0x01, 0xec, 0x39, 0x9e, 0x13, 0xed, 0x53, 0x42, 0xe3, 0x03, 0x12, 0x92, 0x70, 0xa9, 0xdb, - 0x22, 0x99, 0x60, 0x6b, 0xcd, 0x3a, 0x77, 0x5b, 0xa4, 0x20, 0x7d, 0x33, 0xdb, 0x2f, 0x7c, 0x80, - 0x49, 0xd5, 0xd7, 0x7a, 0x55, 0xbf, 0xa2, 0x54, 0x5f, 0xff, 0x6e, 0x85, 0x18, 0x71, 0x12, 0xad, - 0x6e, 0x54, 0x38, 0x13, 0xaf, 0x10, 0xf1, 0x65, 0xc6, 0x98, 0x8f, 0xd7, 0xf3, 0x03, 0x8d, 0x57, - 0x86, 0x8a, 0x5e, 0x81, 0x86, 0x6b, 0x46, 0xd4, 0xd8, 0xc1, 0x7c, 0x94, 0x0e, 0x46, 0x27, 0x45, - 0x27, 0x32, 0x87, 0x2c, 0x31, 0xc2, 0xf1, 0xc7, 0x0a, 0x48, 0x87, 0xa9, 0x10, 0xd3, 0x3e, 0x59, - 0x25, 0xeb, 0x21, 0xed, 0xcf, 0x31, 0x43, 0x81, 0xa5, 0x2a, 0xca, 0x78, 0x46, 0x45, 0xa1, 0x0f, - 0x49, 0x83, 0x8b, 0x62, 0xb6, 0x3b, 0x26, 0xf2, 0xdd, 0xf1, 0x30, 0xcc, 0xac, 0x99, 0xb8, 0xe3, - 0x7b, 0xeb, 0x9e, 0x1d, 0xf8, 0x8e, 0x47, 0x65, 0x19, 0x15, 0x49, 0x6c, 0xd8, 0xd2, 0x67, 0xfd, - 0x2f, 0x35, 0x98, 0x5e, 0xc3, 0x2e, 0x8e, 0xf1, 0x2d, 0xaa, 0x4f, 0x45, 0x68, 0x19, 0x50, 0x3b, - 0x34, 0x2d, 0xdc, 0xc2, 0xa1, 0xe3, 0xdb, 0xdb, 0x98, 0x58, 0xaa, 0x11, 0xc5, 0xa9, 0x1a, 0x05, - 0x6f, 0xd0, 0xab, 0x30, 0x1d, 0x84, 0x58, 0xb1, 0x89, 0xb5, 0xfe, 0x4b, 0x40, 0x4b, 0x46, 0x31, - 0x54, 0x0a, 0xe8, 0x71, 0x98, 0xf3, 0xc3, 0x60, 0xdf, 0xf4, 0xd6, 0x70, 0x80, 0x3d, 0x9b, 0x68, - 0x27, 0xdc, 0xd4, 0xc8, 0xc1, 0xd1, 0x79, 0x98, 0x0f, 0x42, 0x3f, 0x30, 0xdb, 0x74, 0x51, 0xe0, - 0xab, 0x05, 0x9b, 0x4d, 0xf9, 0x17, 0xfa, 0x2e, 0x9c, 0x5a, 0xf3, 0xef, 0x79, 0xf7, 0xcc, 0xd0, - 0x5e, 0x69, 0x6d, 0x4a, 0x66, 0xc0, 0xa6, 0xd0, 0x60, 0x99, 0xe7, 0xb0, 0x8f, 0xe4, 0x93, 0x68, - 0x30, 0xad, 0x67, 0xc3, 0x71, 0xb1, 0xb0, 0x34, 0xfe, 0x4a, 0x53, 0x7e, 0x92, 0x7e, 0x90, 0x78, - 0x8a, 0x34, 0xc9, 0x53, 0x74, 0x13, 0x26, 0xf6, 0x1c, 0xec, 0xda, 0x06, 0xde, 0xe3, 0x2d, 0x77, - 0xb1, 0x8c, 0xbf, 0x6b, 0x83, 0xe0, 0x08, 0x6b, 0xd0, 0x48, 0x48, 0xa0, 0xcf, 0xc0, 0x9c, 0x50, - 0xe3, 0x36, 0x04, 0xd9, 0x6a, 0x19, 0x61, 0x6e, 0xc8, 0x58, 0x09, 0xe1, 0x1c, 0x31, 0x52, 0x87, - 0x0e, 0x91, 0x7d, 0x35, 0x36, 0x88, 0xc8, 0xb3, 0xfe, 0x79, 0x0d, 0x1e, 0xc8, 0xd5, 0x98, 0x9b, - 0x20, 0xa3, 0x6b, 0xd8, 0xac, 0x99, 0x50, 0xc9, 0x99, 0x09, 0xfa, 0x32, 0x2c, 0xae, 0x77, 0x82, - 0xf8, 0x68, 0xcd, 0x51, 0xdd, 0x5a, 0xa7, 0x61, 0xbc, 0x83, 0x6d, 0xa7, 0xdb, 0x11, 0x12, 0x88, - 0x95, 0xf4, 0x6f, 0x6a, 0x30, 0x2b, 0xa6, 0xc7, 0x8a, 0x6d, 0x87, 0x38, 0x8a, 0xd0, 0x0c, 0x54, - 0x9c, 0x80, 0x7f, 0x57, 0x71, 0x02, 0x74, 0x1d, 0x1a, 0xcc, 0xa3, 0x98, 0xf6, 0xd0, 0x80, 0x1e, - 0xc9, 0x14, 0x5f, 0xa8, 0x0e, 0x54, 0x90, 0xb1, 0xb5, 0x20, 0x29, 0x93, 0x77, 0x9e, 0x6f, 0x33, - 0x47, 0x2c, 0x57, 0x0a, 0x44, 0x59, 0x37, 0x60, 0x4a, 0xf0, 0xd9, 0x53, 0x2d, 0x21, 0xa3, 0x2b, - 0x55, 0x49, 0xe8, 0xb3, 0xa2, 0x68, 0x54, 0x55, 0x45, 0x83, 0x98, 0x8d, 0x33, 0x82, 0xe8, 0x76, - 0x77, 0x37, 0xc2, 0x31, 0xa9, 0xab, 0xc9, 0x9a, 0x01, 0x8b, 0x0e, 0x7b, 0xa2, 0x9f, 0x2a, 0xa7, - 0xb4, 0x9e, 0x91, 0xe2, 0xa3, 0xd7, 0x60, 0xde, 0xf3, 0x63, 0x83, 0x88, 0xc0, 0x95, 0x84, 0x68, - 0x65, 0x18, 0xa2, 0x79, 0x3a, 0xe8, 0x65, 0x61, 0x6e, 0x55, 0x29, 0xc1, 0xc7, 0xcb, 0x11, 0x94, - 0xac, 0x2d, 0xfd, 0xeb, 0x1a, 0x34, 0x04, 0x7c, 0xb4, 0x2e, 0xc2, 0x0d, 0xa8, 0x47, 0xb4, 0x3d, - 0x45, 0x75, 0xcf, 0x97, 0xe3, 0x8e, 0x75, 0x82, 0x21, 0x90, 0xa9, 0x37, 0x25, 0xe1, 0xf0, 0x3d, - 0xf5, 0xa6, 0x24, 0x5c, 0x08, 0x19, 0xf7, 0x3b, 0x94, 0x35, 0x49, 0x8b, 0x27, 0x53, 0x2c, 0x08, - 0xf1, 0x9e, 0x73, 0x5f, 0x4c, 0x31, 0x56, 0x42, 0x3b, 0x30, 0x65, 0x25, 0xbe, 0x98, 0x64, 0x06, - 0x3d, 0x59, 0xd2, 0x7b, 0x93, 0x78, 0xe3, 0x0c, 0x85, 0x0a, 0x19, 0xa8, 0xa9, 0xaf, 0xbc, 0x5a, - 0xd2, 0x14, 0x0a, 0x71, 0x9c, 0xd2, 0x4b, 0xf1, 0xf5, 0xff, 0x0c, 0xe3, 0xcc, 0x9c, 0xee, 0xe5, - 0x3c, 0xc8, 0xfb, 0x0d, 0xd1, 0x35, 0x68, 0xd0, 0x07, 0x6a, 0xf4, 0x54, 0xcb, 0xec, 0xdc, 0xb1, - 0x5f, 0x88, 0xbf, 0x27, 0xc8, 0xfa, 0x5f, 0x57, 0xc8, 0xdc, 0x4e, 0xdf, 0x29, 0x2b, 0x82, 0x76, - 0x32, 0x2b, 0x42, 0x65, 0x94, 0x2b, 0xc2, 0xeb, 0x30, 0x6b, 0x49, 0x2e, 0xca, 0xb4, 0x47, 0x2e, - 0x95, 0xec, 0x64, 0xc9, 0xaf, 0x69, 0x64, 0x49, 0xa1, 0x6d, 0x98, 0x62, 0x3d, 0xc5, 0x49, 0xd7, - 0x28, 0xe9, 0x0b, 0x65, 0x3a, 0x5b, 0xa6, 0xab, 0x10, 0xd1, 0x7f, 0x52, 0x85, 0xb1, 0xf5, 0x43, - 0xec, 0xc5, 0x23, 0x9d, 0xf7, 0xb7, 0x61, 0xc6, 0xf1, 0x0e, 0x7d, 0xf7, 0x10, 0xdb, 0xec, 0xfd, - 0x70, 0xcb, 0x45, 0x86, 0xc8, 0x10, 0xd6, 0xc3, 0x0a, 0x8c, 0xb3, 0x3e, 0xe2, 0xa6, 0x43, 0x1f, - 0xc7, 0x0d, 0x6d, 0x09, 0x3e, 0x30, 0x39, 0x22, 0x6a, 0xc1, 0xcc, 0x9e, 0x13, 0x46, 0x31, 0xb1, - 0x03, 0xa2, 0xd8, 0xec, 0x04, 0x03, 0x1b, 0x0f, 0x19, 0x7c, 0xb4, 0x05, 0xd3, 0x44, 0x51, 0x4e, - 0x09, 0xd6, 0x07, 0x24, 0xa8, 0xa2, 0x93, 0x79, 0x69, 0x51, 0x75, 0x7a, 0x82, 0xae, 0x77, 0xac, - 0x90, 0x6c, 0x2d, 0x35, 0xd2, 0xad, 0x25, 0xfd, 0x8b, 0x44, 0xd2, 0x93, 0x3a, 0x8e, 0x58, 0x86, - 0x3e, 0xa7, 0xca, 0xd0, 0x0f, 0x97, 0x68, 0x65, 0x21, 0x3f, 0x2f, 0xc3, 0xa4, 0xd4, 0xea, 0xe8, - 0x21, 0x68, 0x58, 0x62, 0xd7, 0x85, 0x0b, 0x9f, 0x14, 0x40, 0xea, 0x44, 0x94, 0x04, 0xb1, 0xc1, - 0x48, 0x9e, 0xf5, 0x47, 0x01, 0xd6, 0xef, 0x63, 0x6b, 0x85, 0x69, 0xaf, 0x92, 0x2b, 0x53, 0x53, - 0x5c, 0x99, 0xfa, 0x21, 0xcc, 0x6c, 0xac, 0x66, 0x37, 0x84, 0x99, 0x3e, 0x72, 0xf7, 0xee, 0x96, - 0x70, 0x8e, 0x48, 0x10, 0x34, 0x07, 0x55, 0xb7, 0xeb, 0x71, 0x2d, 0x82, 0x3c, 0x4a, 0x1b, 0x7f, - 0xd5, 0x9e, 0x1b, 0x7f, 0xd9, 0x48, 0x87, 0x5f, 0xaf, 0xc0, 0xdc, 0x86, 0x8b, 0xef, 0x67, 0xd5, - 0x30, 0x3b, 0x74, 0x0e, 0x71, 0x28, 0xd6, 0x08, 0x56, 0xea, 0xb9, 0xb3, 0xd8, 0xca, 0x4b, 0xf9, - 0x11, 0xee, 0x88, 0x66, 0x58, 0x46, 0xb7, 0xa1, 0xce, 0x7c, 0xca, 0x51, 0x73, 0x8c, 0x76, 0xe8, - 0x0b, 0xc7, 0xff, 0x2b, 0x5b, 0xbd, 0x65, 0x6e, 0x41, 0xb1, 0x3d, 0x29, 0x41, 0x6b, 0xe9, 0x79, - 0x98, 0x92, 0x5f, 0x0c, 0xb4, 0x33, 0xf5, 0x29, 0x58, 0xd8, 0x70, 0x7d, 0xeb, 0x20, 0xb3, 0x4b, - 0x4b, 0x14, 0x61, 0x33, 0x36, 0x23, 0x65, 0x53, 0x5f, 0x06, 0x49, 0x5f, 0xdc, 0xbe, 0xbd, 0xb9, - 0xc6, 0x09, 0xcb, 0x20, 0xfd, 0x7f, 0x6a, 0xf0, 0xc1, 0xab, 0xab, 0xeb, 0x2d, 0x1c, 0x46, 0x4e, - 0x14, 0x63, 0x2f, 0xce, 0x85, 0x37, 0x90, 0x15, 0xdd, 0x96, 0x7e, 0xc0, 0x4b, 0x27, 0x10, 0x16, - 0xe3, 0xc3, 0xc2, 0x55, 0x27, 0x36, 0x70, 0xe0, 0x67, 0x47, 0x6a, 0x88, 0x03, 0x3f, 0x72, 0x62, - 0x3f, 0x14, 0x0d, 0x26, 0x41, 0x18, 0xc9, 0x43, 0x27, 0x22, 0xff, 0x63, 0xac, 0x24, 0x65, 0xc2, - 0x8c, 0xed, 0x84, 0x54, 0xf6, 0x1f, 0xf1, 0x61, 0x9b, 0x02, 0x74, 0x0c, 0xa7, 0xae, 0xba, 0xdd, - 0x28, 0xc6, 0xe1, 0x5e, 0xa4, 0xfc, 0xf2, 0x21, 0x68, 0x60, 0xa1, 0xeb, 0x88, 0x89, 0x98, 0x00, - 0x0a, 0x77, 0xfa, 0x8f, 0xdb, 0xfd, 0xfe, 0x53, 0x0d, 0xa6, 0xaf, 0xed, 0xec, 0xb4, 0xae, 0xe2, - 0x98, 0x4f, 0xd4, 0x22, 0x0b, 0xf0, 0x8a, 0xa4, 0xb7, 0xf7, 0x5e, 0x7e, 0xba, 0xb1, 0xe3, 0x2e, - 0xb3, 0x48, 0xa7, 0xe5, 0x4d, 0x2f, 0xbe, 0x15, 0x6e, 0xc7, 0xa1, 0xe3, 0xb5, 0xb9, 0x9e, 0x2f, - 0x44, 0x44, 0x35, 0x15, 0x11, 0xd4, 0x1b, 0x65, 0xed, 0xe3, 0xc4, 0x9a, 0xe0, 0x25, 0xf4, 0x0a, - 0x4c, 0xee, 0xc7, 0x71, 0x70, 0x0d, 0x9b, 0x36, 0x0e, 0xc5, 0x58, 0x3f, 0x77, 0xfc, 0x58, 0x27, - 0xb5, 0x60, 0x08, 0x86, 0x8c, 0xac, 0x3f, 0x0d, 0x90, 0xbe, 0x2a, 0xaf, 0x3e, 0xe9, 0x7f, 0xa2, - 0x41, 0xfd, 0x9a, 0xe9, 0xd9, 0x2e, 0x0e, 0xd1, 0x8b, 0x50, 0xc3, 0xf7, 0xb1, 0xd5, 0xdf, 0x63, - 0x47, 0xa5, 0x68, 0x22, 0xf4, 0x0c, 0x8a, 0x85, 0xd6, 0xa1, 0x4e, 0x18, 0xba, 0x9a, 0x84, 0x8b, - 0x7c, 0xb4, 0x7f, 0x4d, 0x92, 0xfe, 0x30, 0x04, 0x2e, 0xb5, 0xf2, 0xac, 0x60, 0x9b, 0xcc, 0xb5, - 0xb8, 0x9c, 0x42, 0xb9, 0xb3, 0xda, 0x62, 0x9f, 0x73, 0x52, 0x29, 0xbe, 0xfe, 0x38, 0x2c, 0x5e, - 0xf3, 0xa3, 0xb8, 0x65, 0xc6, 0xfb, 0xca, 0xe8, 0x2a, 0xe8, 0x7d, 0xfd, 0x07, 0x1a, 0xcc, 0x6f, - 0x6e, 0xaf, 0x6e, 0xab, 0x56, 0xb3, 0x0e, 0x53, 0x4c, 0x24, 0x13, 0x8b, 0xc5, 0x74, 0x39, 0x86, - 0x02, 0x23, 0x82, 0xc4, 0x79, 0x43, 0x8c, 0x7c, 0xf2, 0x28, 0x44, 0x77, 0x35, 0x15, 0xdd, 0x8f, - 0xc2, 0x8c, 0x13, 0x59, 0x91, 0xb3, 0xe9, 0x91, 0xb1, 0x6e, 0x5a, 0x62, 0x2c, 0x64, 0xa0, 0xd2, - 0x9c, 0x1e, 0xeb, 0x29, 0xe2, 0x33, 0x11, 0x24, 0x64, 0xd1, 0x09, 0x28, 0x27, 0x11, 0xdd, 0xb2, - 0x6a, 0x18, 0xa2, 0xa8, 0xaf, 0x43, 0x23, 0xd9, 0xe3, 0x2b, 0x90, 0x77, 0x3d, 0x02, 0x66, 0x3a, - 0xe9, 0xde, 0x1f, 0x73, 0x2b, 0xfc, 0x3f, 0x0d, 0x1a, 0xc9, 0x86, 0x0b, 0x5a, 0x85, 0x46, 0xe0, - 0x53, 0x47, 0x5b, 0x28, 0xbc, 0xbb, 0x8f, 0xf4, 0xe9, 0x6a, 0x36, 0xc0, 0x8c, 0x14, 0x0f, 0x5d, - 0x86, 0x7a, 0x10, 0xe2, 0xed, 0x98, 0x46, 0xfd, 0x0c, 0x40, 0x42, 0x60, 0xe9, 0x3f, 0xad, 0x01, - 0xdc, 0x70, 0x3a, 0x4e, 0x6c, 0x98, 0x5e, 0x1b, 0x8f, 0x54, 0x7d, 0x7c, 0x19, 0x6a, 0x51, 0x80, - 0xad, 0x72, 0xbe, 0xcc, 0x94, 0x87, 0xed, 0x00, 0x5b, 0x06, 0xc5, 0xd4, 0xbf, 0x30, 0x01, 0x33, - 0xe9, 0x8b, 0xcd, 0x18, 0x77, 0x0a, 0x43, 0x6d, 0xae, 0x42, 0xb5, 0x63, 0xde, 0xe7, 0x5a, 0xcb, - 0xc7, 0xcb, 0xfe, 0x87, 0x90, 0x5b, 0xbe, 0x69, 0xde, 0x67, 0xcb, 0x1b, 0xa1, 0x40, 0x09, 0x39, - 0x1e, 0x37, 0xc1, 0x07, 0x24, 0xe4, 0x78, 0x82, 0x90, 0xe3, 0xa1, 0x6d, 0xa8, 0x73, 0x37, 0x0e, - 0xdd, 0x73, 0x9d, 0xbc, 0xf4, 0xdc, 0x40, 0xc4, 0xd6, 0x18, 0x2e, 0x5f, 0x78, 0x39, 0x25, 0xb4, - 0x0f, 0x33, 0xfc, 0xd1, 0xc0, 0x6f, 0x74, 0x71, 0x14, 0x73, 0x51, 0xf7, 0xf2, 0x30, 0xb4, 0x39, - 0x09, 0xf6, 0x8b, 0x0c, 0x5d, 0xf4, 0x26, 0x2c, 0x76, 0xcc, 0xfb, 0x0c, 0x91, 0x81, 0x0c, 0x33, - 0x76, 0x7c, 0xbe, 0x15, 0xbc, 0x31, 0x68, 0x0b, 0xe7, 0x08, 0xb1, 0xbf, 0x16, 0xfe, 0x63, 0xc9, - 0x86, 0x09, 0xd1, 0x29, 0x05, 0x53, 0xed, 0x8a, 0x2c, 0x7d, 0x8f, 0x1f, 0x54, 0xc2, 0xae, 0x5b, - 0x7e, 0xb5, 0x6b, 0x7a, 0xb1, 0x13, 0x1f, 0x49, 0x8a, 0x08, 0xfd, 0x0b, 0xef, 0xb1, 0x13, 0xfc, - 0xcb, 0x3e, 0x4c, 0xc9, 0x5d, 0x79, 0x82, 0x7f, 0xf2, 0x61, 0xa1, 0xa0, 0x63, 0x4f, 0xf0, 0x87, - 0x5d, 0x78, 0xb0, 0x67, 0xcf, 0x9e, 0xdc, 0x6f, 0x89, 0xb8, 0x92, 0x24, 0xc2, 0x88, 0xed, 0x9f, - 0x97, 0x54, 0xfb, 0xe7, 0x5c, 0xd9, 0x71, 0x2e, 0x8c, 0xa0, 0x3b, 0x32, 0x6f, 0x44, 0x8c, 0xa1, - 0x35, 0x18, 0x77, 0x09, 0x44, 0x38, 0x1f, 0xcf, 0x0f, 0x32, 0x75, 0x0c, 0x8e, 0xab, 0xff, 0x7f, - 0x0d, 0x6a, 0x23, 0xae, 0xea, 0x8a, 0x5a, 0xd5, 0x5e, 0x3a, 0x06, 0x0f, 0x62, 0x5f, 0x36, 0xcc, - 0x7b, 0xeb, 0xf7, 0x63, 0xec, 0x11, 0x55, 0x54, 0xd4, 0xf6, 0xbb, 0x1a, 0x4c, 0x12, 0xca, 0x62, - 0x9f, 0xe5, 0x61, 0x62, 0x0f, 0xef, 0x62, 0x57, 0xb8, 0x28, 0x78, 0xf7, 0xab, 0x40, 0xf2, 0xd5, - 0x9e, 0xec, 0x7f, 0xe1, 0x8b, 0xa6, 0x0a, 0x24, 0x4a, 0xd6, 0x3d, 0x33, 0xb6, 0xf6, 0xb9, 0x06, - 0xca, 0x0a, 0xe8, 0x1c, 0xcc, 0x8a, 0xc1, 0x71, 0x87, 0xa8, 0xf9, 0xbe, 0xc7, 0x57, 0xff, 0x2c, - 0x98, 0xa8, 0x09, 0x84, 0x6f, 0xbf, 0x1b, 0x8b, 0xfd, 0x9e, 0x31, 0xba, 0xdf, 0x93, 0x81, 0xea, - 0x2b, 0xb0, 0x70, 0xc3, 0x37, 0xed, 0x2b, 0xa6, 0x6b, 0x7a, 0x16, 0x0e, 0x37, 0xbd, 0x76, 0xa1, - 0xcb, 0x5c, 0xf6, 0x72, 0x57, 0x54, 0x2f, 0xb7, 0x6e, 0x02, 0x92, 0x49, 0xf0, 0xcd, 0xbd, 0xeb, - 0x50, 0x77, 0x18, 0x31, 0xde, 0xf3, 0x17, 0xfb, 0xd9, 0x79, 0x39, 0x2e, 0x0c, 0x41, 0x81, 0xa8, - 0x5f, 0x45, 0x76, 0x60, 0x91, 0x7a, 0xaa, 0x7f, 0x0a, 0x66, 0xb7, 0x32, 0xb1, 0xbe, 0x44, 0x6f, - 0xc6, 0xa1, 0x64, 0xa5, 0xb2, 0xd2, 0x30, 0xda, 0x7f, 0x83, 0x18, 0x4c, 0x51, 0x40, 0x34, 0xac, - 0x51, 0x6a, 0x0a, 0x97, 0x15, 0x4d, 0xa1, 0x8f, 0xc2, 0x9b, 0xb0, 0x90, 0x2a, 0x0a, 0x68, 0x3d, - 0x09, 0xb6, 0x2d, 0xa5, 0xea, 0xa6, 0x24, 0x58, 0x44, 0x28, 0x47, 0xa6, 0x0e, 0xea, 0xe4, 0xdd, - 0x7b, 0xea, 0xa0, 0x4e, 0xb8, 0x10, 0xb3, 0xed, 0x82, 0xc4, 0x19, 0x15, 0x2d, 0x67, 0xe8, 0x4e, - 0xb8, 0xe9, 0x3a, 0x6f, 0xe2, 0x24, 0x86, 0x5b, 0x82, 0xe8, 0x1f, 0x81, 0xd9, 0x4c, 0x35, 0xc9, - 0xac, 0x0a, 0xf6, 0xcd, 0x48, 0x0c, 0x18, 0x56, 0xd0, 0xbf, 0xaf, 0x41, 0x6d, 0xcb, 0xb7, 0x47, - 0xdb, 0xa3, 0xcf, 0x2b, 0x3d, 0xfa, 0x68, 0xff, 0x33, 0x20, 0x52, 0x67, 0xbe, 0x9c, 0xe9, 0xcc, - 0x73, 0x25, 0xb0, 0xd5, 0x7e, 0x7c, 0x01, 0x26, 0xe9, 0xb9, 0x12, 0xbe, 0x03, 0x56, 0xa4, 0x33, - 0x36, 0xa1, 0xce, 0x77, 0x76, 0xc4, 0x5e, 0x3d, 0x2f, 0xea, 0xbf, 0x51, 0x81, 0x29, 0xf9, 0x54, - 0x0a, 0xfa, 0x92, 0x06, 0xcb, 0x21, 0x0b, 0x2c, 0xb3, 0xd7, 0xba, 0xc4, 0x48, 0xdd, 0xb6, 0xf6, - 0xb1, 0xdd, 0x75, 0x1d, 0xaf, 0xbd, 0xd9, 0xf6, 0xfc, 0x04, 0x4c, 0x6c, 0xb9, 0x2e, 0xf5, 0x16, - 0x94, 0x3e, 0xfa, 0x92, 0xf8, 0x73, 0x07, 0xfc, 0x03, 0xfa, 0xba, 0x06, 0x17, 0xd8, 0xc9, 0x8e, - 0xf2, 0x5c, 0x95, 0x52, 0x90, 0x5b, 0x82, 0x68, 0x4a, 0x6e, 0x07, 0x87, 0x1d, 0x63, 0xd0, 0xbf, - 0xe9, 0x5f, 0xaf, 0xc0, 0x34, 0xa9, 0xe2, 0x70, 0x61, 0xf2, 0x77, 0x60, 0xde, 0x35, 0xa3, 0xf8, - 0x1a, 0x36, 0xc3, 0x78, 0x17, 0x9b, 0xd4, 0x4f, 0xda, 0x7f, 0x3c, 0x64, 0x3c, 0xac, 0x79, 0x12, - 0xe8, 0x93, 0x80, 0xa8, 0xdb, 0x35, 0x34, 0xbd, 0x88, 0x72, 0x45, 0x09, 0xd7, 0x06, 0x24, 0x5c, - 0x40, 0x43, 0x72, 0x6b, 0x8f, 0xf5, 0x72, 0x6b, 0x8f, 0xab, 0x51, 0x21, 0x1d, 0x58, 0x20, 0x0d, - 0xa4, 0x46, 0x35, 0x90, 0xaa, 0xcf, 0x12, 0x06, 0x5c, 0x1c, 0x0b, 0x58, 0x7f, 0xd1, 0x43, 0xf7, - 0x9a, 0x15, 0x3a, 0x46, 0x96, 0x88, 0xfe, 0x7f, 0x35, 0x98, 0x20, 0xff, 0x1b, 0xb1, 0x60, 0x7b, - 0x56, 0x15, 0x6c, 0x7a, 0xff, 0x41, 0x2f, 0x64, 0xda, 0xa3, 0x30, 0x47, 0x8a, 0xad, 0xd0, 0xbf, - 0x7f, 0x24, 0xb4, 0x88, 0x1e, 0x2e, 0x05, 0x3a, 0x92, 0x8c, 0x24, 0x9e, 0xf3, 0x36, 0x4c, 0x58, - 0x66, 0x60, 0x5a, 0xec, 0x98, 0x59, 0x09, 0x03, 0x4b, 0x41, 0x5f, 0x5e, 0xe5, 0xb8, 0xcc, 0x0e, - 0x49, 0x48, 0x2d, 0x39, 0x30, 0xad, 0xbc, 0x3a, 0x41, 0x45, 0xd6, 0x65, 0x42, 0x26, 0xd1, 0x78, - 0x5e, 0x87, 0x79, 0x4f, 0x2a, 0x93, 0x29, 0x27, 0x54, 0x87, 0xe5, 0xf2, 0x62, 0x84, 0xce, 0xd4, - 0x3c, 0x21, 0xfd, 0x33, 0xf0, 0x80, 0x22, 0x6d, 0xd2, 0x30, 0xd9, 0x82, 0x2a, 0xd2, 0xd8, 0x64, - 0x1c, 0x9a, 0xa9, 0x76, 0x96, 0x94, 0xc9, 0x20, 0xa7, 0x75, 0x88, 0x78, 0x88, 0x31, 0x2f, 0xe9, - 0x5d, 0xd6, 0x95, 0xf2, 0x5f, 0x91, 0x09, 0x73, 0x1d, 0xa2, 0xb7, 0xad, 0xdf, 0x0f, 0x88, 0x60, - 0xa5, 0x8e, 0x68, 0xad, 0x8c, 0x08, 0xea, 0xc1, 0xaa, 0x91, 0x23, 0xa7, 0x7f, 0x8f, 0x0f, 0x69, - 0xba, 0x22, 0x52, 0xff, 0x8d, 0xbd, 0xba, 0xb9, 0x66, 0xf0, 0xda, 0x88, 0x22, 0x59, 0x2b, 0xf1, - 0xfd, 0x18, 0x87, 0x9e, 0xe9, 0x26, 0xce, 0x63, 0x09, 0x42, 0xde, 0x07, 0xa1, 0x7f, 0xe8, 0xd8, - 0x34, 0xf6, 0x88, 0xf9, 0x1c, 0x25, 0x08, 0x51, 0x5a, 0xbb, 0x5e, 0xc4, 0x04, 0x9e, 0xb9, 0xcb, - 0x0f, 0x3b, 0x4d, 0x18, 0x2a, 0x10, 0xbd, 0x00, 0xe3, 0xb1, 0x49, 0x1d, 0xad, 0x63, 0x65, 0xf6, - 0x4f, 0x76, 0xc8, 0xb7, 0x06, 0x47, 0xd1, 0xbf, 0x55, 0x07, 0x48, 0x57, 0x32, 0x64, 0xe4, 0x06, - 0xf8, 0xd3, 0x65, 0x57, 0xc1, 0x5e, 0xa3, 0x1b, 0xbd, 0x06, 0x93, 0xa6, 0xeb, 0xfa, 0x96, 0x19, - 0xd3, 0x3a, 0x54, 0xca, 0xce, 0x1b, 0x4e, 0x76, 0x25, 0xc5, 0x65, 0x94, 0x65, 0x6a, 0xa9, 0x6e, - 0x51, 0x95, 0x74, 0x0b, 0x74, 0x5d, 0x39, 0x5c, 0x54, 0x2b, 0x13, 0x4b, 0xab, 0x2c, 0x19, 0xf2, - 0xb9, 0x22, 0x74, 0x55, 0x0e, 0xe6, 0x18, 0x2b, 0x13, 0xc1, 0x2d, 0x29, 0x01, 0x6a, 0x20, 0xc7, - 0xac, 0xad, 0xca, 0x5c, 0xbe, 0x19, 0x78, 0xb1, 0x3f, 0xb9, 0x8c, 0xb0, 0x36, 0xb2, 0x94, 0x88, - 0x60, 0x25, 0xf3, 0x6f, 0xd3, 0xdb, 0xf3, 0xf9, 0x8e, 0xe0, 0xf9, 0x12, 0x4d, 0x7c, 0x14, 0xc5, - 0xb8, 0x43, 0x70, 0x8c, 0x04, 0x9b, 0x18, 0x8f, 0x34, 0x3a, 0x2e, 0x6a, 0x4e, 0x94, 0x31, 0x1e, - 0xd5, 0x70, 0x61, 0x83, 0xe3, 0x22, 0x5d, 0x84, 0xbe, 0x47, 0x9b, 0xde, 0xed, 0x08, 0xd3, 0xd0, - 0xf7, 0x86, 0xa1, 0xc0, 0xc8, 0x8a, 0xc3, 0xcb, 0xe2, 0x28, 0x6b, 0x13, 0xca, 0xfc, 0x52, 0x3d, - 0xf8, 0x6a, 0x64, 0x89, 0xbc, 0x8b, 0xf2, 0x74, 0xc9, 0x85, 0xb9, 0xec, 0x00, 0x3d, 0x41, 0xe9, - 0xfd, 0xd9, 0x2a, 0xcc, 0xa8, 0xfd, 0x86, 0x1e, 0x82, 0x46, 0x87, 0x9e, 0x79, 0x4d, 0x8f, 0x1a, - 0xa6, 0x00, 0x7a, 0x56, 0x92, 0x7e, 0x2b, 0x6d, 0x5f, 0x49, 0x10, 0x22, 0x57, 0x77, 0x7d, 0x3f, - 0x4e, 0xa4, 0x0f, 0x2f, 0x11, 0xc9, 0x73, 0x40, 0x38, 0x73, 0x55, 0x83, 0x57, 0x05, 0x12, 0xc9, - 0xe7, 0x47, 0xb4, 0xdb, 0xb9, 0xee, 0x21, 0x8a, 0xe8, 0x59, 0x78, 0x20, 0x89, 0xa1, 0x34, 0x98, - 0x31, 0x2f, 0x28, 0x31, 0x65, 0xa4, 0xd7, 0x6b, 0x62, 0x42, 0x73, 0x05, 0x42, 0x20, 0xb0, 0x88, - 0xcd, 0x0c, 0x14, 0x3d, 0x0e, 0x73, 0x04, 0x42, 0x17, 0x71, 0xf1, 0x25, 0x8b, 0xde, 0xcc, 0xc1, - 0x89, 0x01, 0xcf, 0x56, 0x12, 0xa2, 0x3f, 0xd2, 0xca, 0xf3, 0x7d, 0xed, 0x2c, 0x98, 0x8c, 0x5a, - 0x33, 0xb4, 0xf6, 0x9d, 0x18, 0x5b, 0x71, 0x37, 0x64, 0x07, 0x2c, 0x1a, 0x86, 0x02, 0xd3, 0xb7, - 0x61, 0xa1, 0x20, 0x52, 0x84, 0x34, 0xb5, 0x19, 0x38, 0x82, 0x15, 0xbe, 0xcb, 0x96, 0x42, 0x48, - 0x47, 0x51, 0x67, 0x83, 0x74, 0x12, 0x3b, 0x05, 0xe8, 0x5f, 0xac, 0x03, 0xa4, 0x16, 0x4d, 0xe1, - 0x0e, 0x90, 0x0e, 0x53, 0xe2, 0x68, 0xbe, 0x74, 0xc8, 0x58, 0x81, 0x91, 0x9f, 0x78, 0xc2, 0xfa, - 0x12, 0xdb, 0x75, 0x09, 0x80, 0xac, 0xb0, 0x11, 0x76, 0xf7, 0x6e, 0x38, 0xde, 0x81, 0x88, 0x8c, - 0x13, 0x65, 0x32, 0x68, 0xbb, 0x8e, 0xcd, 0xfb, 0x91, 0x3c, 0x16, 0xb9, 0x3d, 0xc6, 0x8b, 0xdd, - 0x1e, 0x67, 0x00, 0x38, 0x17, 0xa2, 0xbf, 0xaa, 0x86, 0x04, 0x21, 0x4a, 0xb5, 0x15, 0x62, 0x53, - 0xa8, 0xac, 0x2c, 0x6c, 0x61, 0x62, 0x50, 0xa5, 0x3a, 0x47, 0x82, 0xd0, 0xb5, 0xc9, 0x98, 0x50, - 0xe8, 0x36, 0x06, 0xa5, 0x9b, 0x23, 0x81, 0x5e, 0x82, 0x25, 0x01, 0xbc, 0x9a, 0x0f, 0xe1, 0x05, - 0x5a, 0xbf, 0x63, 0xbe, 0x40, 0x37, 0x60, 0x9c, 0x7a, 0x9f, 0xa2, 0xe6, 0x24, 0x15, 0x67, 0x4f, - 0x95, 0x09, 0x5c, 0x21, 0xfd, 0xbe, 0x7c, 0x83, 0xa2, 0xb1, 0x75, 0x8e, 0xd3, 0xa0, 0xeb, 0xa7, - 0xe7, 0xf9, 0xb1, 0xc9, 0x56, 0xb3, 0xa9, 0x32, 0xeb, 0xa7, 0x44, 0x72, 0x25, 0xc5, 0x15, 0xeb, - 0x67, 0x0a, 0x41, 0xaf, 0xc3, 0xac, 0x7f, 0x8f, 0xcc, 0x42, 0xe1, 0xdd, 0x89, 0x9a, 0xd3, 0xfd, - 0xce, 0xfc, 0x4b, 0x36, 0xb8, 0x82, 0x6a, 0x64, 0x49, 0x65, 0x9c, 0x05, 0x33, 0x59, 0x67, 0x01, - 0x8d, 0xbe, 0x66, 0xfb, 0xc7, 0x74, 0x44, 0xcf, 0xf2, 0xe8, 0xeb, 0x14, 0xb4, 0xf4, 0x1c, 0x4c, - 0x4a, 0x6d, 0x32, 0xc8, 0xa6, 0xff, 0xd2, 0x4b, 0x30, 0x97, 0xad, 0xfb, 0x40, 0x41, 0x03, 0x3f, - 0xd0, 0x60, 0xb6, 0xc0, 0xf5, 0x75, 0xe0, 0xd0, 0xe8, 0x10, 0x3a, 0x2f, 0xc9, 0xb3, 0x3a, 0xe7, - 0x2a, 0xd9, 0x39, 0x27, 0x66, 0x72, 0x55, 0x9a, 0xc9, 0x7c, 0xae, 0xd5, 0xd2, 0xb9, 0xa6, 0x0a, - 0x8f, 0xb1, 0x9c, 0xf0, 0x28, 0x3f, 0x17, 0x15, 0x31, 0x53, 0xcf, 0x8a, 0x99, 0xbf, 0xd5, 0x60, - 0x2e, 0x0d, 0x55, 0xe0, 0x89, 0x23, 0x46, 0x1b, 0xb3, 0x29, 0x3b, 0x60, 0xfa, 0x25, 0x8e, 0xc8, - 0x70, 0x22, 0x39, 0x63, 0x6e, 0x64, 0x9c, 0x31, 0x4f, 0x0d, 0x48, 0x49, 0x75, 0xcc, 0x7c, 0xae, - 0x02, 0xa7, 0xb2, 0x9f, 0xac, 0xba, 0xa6, 0xd3, 0x19, 0x69, 0xdd, 0xaf, 0x2b, 0x75, 0x7f, 0x66, - 0x30, 0x8e, 0x29, 0x3b, 0x52, 0x03, 0xbc, 0x9a, 0x69, 0x80, 0xe7, 0x86, 0x21, 0xa7, 0xb6, 0xc2, - 0x77, 0x34, 0x78, 0xb0, 0xf0, 0xbb, 0x11, 0x5b, 0xe6, 0x9b, 0xaa, 0x65, 0xfe, 0xb1, 0x21, 0x38, - 0x17, 0xa6, 0xfa, 0x2f, 0x55, 0x7a, 0xb0, 0x4c, 0x2d, 0xaf, 0xb3, 0x30, 0x69, 0x5a, 0x16, 0x8e, - 0xa2, 0x9b, 0xbe, 0x9d, 0x1c, 0x50, 0x93, 0x41, 0xea, 0x91, 0xce, 0xca, 0x28, 0x8e, 0x74, 0x9e, - 0x01, 0x60, 0xea, 0xe6, 0x56, 0x3a, 0xab, 0x25, 0x08, 0xba, 0x49, 0xd7, 0x58, 0xb6, 0xc7, 0x50, - 0xeb, 0xa7, 0xdd, 0x4b, 0xcd, 0x28, 0xef, 0x56, 0x18, 0x09, 0x09, 0xa2, 0xe6, 0x44, 0xb1, 0x1f, - 0x9a, 0x6d, 0x52, 0xed, 0x28, 0xa2, 0x3f, 0x65, 0xe2, 0x21, 0x07, 0xd7, 0x7f, 0xa1, 0x02, 0x1f, - 0x38, 0x66, 0x1c, 0x14, 0xfb, 0x61, 0xb3, 0x8d, 0x58, 0xc9, 0x37, 0xa2, 0x25, 0x19, 0x85, 0x6c, - 0x8f, 0xfa, 0xea, 0xd0, 0x83, 0xf1, 0xfd, 0xe0, 0x03, 0xf9, 0xf7, 0xf0, 0xa1, 0x42, 0x0e, 0xb3, - 0x11, 0x4c, 0x16, 0x01, 0x4a, 0x81, 0x5b, 0x29, 0x40, 0xd9, 0xaf, 0xa8, 0x64, 0xf6, 0x2b, 0xbe, - 0xa9, 0xc1, 0x62, 0x96, 0xfe, 0x88, 0x67, 0xd8, 0x9a, 0x3a, 0xc3, 0x96, 0x07, 0xeb, 0x0e, 0x31, - 0xb9, 0x7e, 0x73, 0x0a, 0x4e, 0xe7, 0x04, 0x27, 0xab, 0xbd, 0x03, 0xf3, 0x6d, 0xaa, 0xd5, 0x48, - 0x41, 0x6d, 0x9c, 0xe7, 0x3e, 0xd1, 0x7c, 0xc7, 0xc6, 0xc2, 0x19, 0x79, 0xaa, 0x28, 0x84, 0x45, - 0xf3, 0x5e, 0x94, 0xcb, 0x2e, 0xc5, 0x3b, 0xf9, 0xa5, 0x3e, 0x96, 0x60, 0x9f, 0xbc, 0x54, 0x46, - 0x21, 0x6d, 0xb4, 0xc5, 0x4f, 0x9e, 0x92, 0x35, 0xb2, 0x54, 0x3c, 0x64, 0x51, 0x18, 0x92, 0x91, - 0xd0, 0x40, 0xaf, 0x42, 0xa3, 0x2d, 0xe2, 0xe0, 0xf8, 0xac, 0xef, 0x23, 0xf5, 0x0a, 0xc3, 0xe6, - 0x8c, 0x94, 0x0a, 0xba, 0x0c, 0x55, 0x6f, 0x2f, 0xe2, 0x81, 0xc7, 0xfd, 0xf6, 0x95, 0xd4, 0x9d, - 0x37, 0x83, 0x60, 0x12, 0x02, 0xe1, 0xae, 0xcd, 0x3d, 0x0c, 0x7d, 0x08, 0x18, 0x57, 0xd6, 0x54, - 0x02, 0xe1, 0xae, 0x8d, 0xd6, 0x61, 0x8c, 0x46, 0x37, 0x71, 0x77, 0x42, 0x9f, 0x50, 0xf1, 0x5c, - 0xec, 0x95, 0xc1, 0xb0, 0xd1, 0x35, 0x18, 0xb7, 0x68, 0x82, 0x1c, 0xae, 0xf1, 0xf7, 0x3b, 0xb2, - 0x90, 0x4b, 0xa6, 0x63, 0x70, 0x7c, 0x4a, 0x09, 0x07, 0xfb, 0x7b, 0x11, 0xd7, 0xf1, 0xfb, 0x51, - 0xca, 0xa5, 0x1f, 0x32, 0x38, 0x3e, 0x7a, 0x11, 0x2a, 0x7b, 0x16, 0x3f, 0x3d, 0xdf, 0xc7, 0xd7, - 0xa0, 0x46, 0xfd, 0x1a, 0x95, 0x3d, 0x0b, 0x5d, 0x87, 0xfa, 0x1e, 0x8b, 0x26, 0xe5, 0x87, 0xe6, - 0x2f, 0xf6, 0x0b, 0x70, 0xcd, 0x85, 0x9e, 0x1a, 0x82, 0x02, 0xda, 0x02, 0xd8, 0x4b, 0x02, 0x60, - 0xf9, 0xe1, 0xf9, 0xe5, 0xc1, 0x02, 0x66, 0x0d, 0x89, 0x02, 0x19, 0x8a, 0xa6, 0x48, 0x60, 0x45, - 0xcf, 0xcd, 0xf7, 0x1d, 0x8a, 0x85, 0xf9, 0xae, 0x8c, 0x94, 0x0a, 0xda, 0x85, 0xe9, 0xc3, 0x28, - 0xd8, 0xc7, 0x62, 0x6a, 0xd1, 0xd3, 0xf5, 0x93, 0x97, 0x5e, 0xec, 0x93, 0xc6, 0x80, 0xa3, 0x38, - 0x61, 0xdc, 0x35, 0xdd, 0x9c, 0x24, 0x50, 0x49, 0x92, 0x36, 0x7d, 0xa3, 0xeb, 0xef, 0x1e, 0xc5, - 0x98, 0x9f, 0xc8, 0xef, 0xd3, 0xa6, 0xaf, 0xb2, 0x8f, 0xd5, 0x36, 0xe5, 0x14, 0x92, 0x36, 0xa0, - 0x52, 0x6b, 0xae, 0x74, 0x1b, 0xe4, 0x78, 0x4c, 0xa9, 0x10, 0x29, 0x15, 0xec, 0xfb, 0xb1, 0xef, - 0x65, 0x64, 0xe2, 0x7c, 0x19, 0x29, 0xd5, 0x2a, 0xc0, 0x54, 0xa5, 0x54, 0x11, 0x6d, 0xf4, 0x69, - 0x98, 0x09, 0xfc, 0x30, 0xbe, 0xe7, 0x87, 0x62, 0x78, 0xa0, 0x52, 0x5a, 0xb5, 0x82, 0xc3, 0xff, - 0x90, 0xa1, 0x44, 0xda, 0x3b, 0xb2, 0x4c, 0x17, 0x6f, 0xde, 0x6a, 0x2e, 0x94, 0x69, 0xef, 0x6d, - 0xf6, 0xb1, 0xda, 0xde, 0x9c, 0x82, 0xfe, 0xed, 0x5a, 0x7e, 0xc5, 0xa3, 0x0a, 0xda, 0xeb, 0x39, - 0x77, 0xf2, 0xcb, 0x83, 0x5b, 0x04, 0x3d, 0x1d, 0xcb, 0x2e, 0x9c, 0x0e, 0x0a, 0x97, 0x2f, 0xbe, - 0x76, 0x0c, 0x6a, 0x33, 0xb0, 0x5a, 0xf5, 0xa0, 0x99, 0xd5, 0x93, 0xaa, 0x79, 0x3d, 0x69, 0x13, - 0x26, 0xa8, 0x86, 0x90, 0x1e, 0xaf, 0x19, 0xf0, 0xc4, 0x4a, 0x82, 0x8e, 0xd6, 0xe0, 0x83, 0x59, - 0x36, 0x0c, 0x4c, 0xdf, 0xf2, 0x93, 0xb9, 0x4c, 0x07, 0x3c, 0xfe, 0xa3, 0x42, 0xe5, 0x71, 0xbc, - 0x58, 0x79, 0x7c, 0x37, 0xf5, 0xaf, 0xff, 0x58, 0xa0, 0x76, 0x1c, 0xa7, 0xa1, 0xf6, 0x3c, 0xdf, - 0xde, 0xeb, 0x48, 0x8f, 0xbe, 0x05, 0x67, 0xfb, 0xcd, 0x39, 0xba, 0xe3, 0x67, 0x27, 0x3e, 0x53, - 0xfa, 0xdc, 0x2b, 0x24, 0x5f, 0xff, 0x7d, 0x0d, 0xaa, 0x2d, 0xdf, 0x1e, 0xa9, 0xb5, 0xf8, 0x9c, - 0x62, 0x2d, 0x3e, 0xd2, 0x37, 0x1d, 0xa4, 0x64, 0x1b, 0x5e, 0xce, 0xd8, 0x86, 0x1f, 0xe9, 0x8f, - 0xac, 0x5a, 0x82, 0xdf, 0xab, 0xc0, 0xa4, 0x94, 0xb2, 0x12, 0x7d, 0x79, 0x98, 0x50, 0x83, 0x6a, - 0xb9, 0x2c, 0x96, 0xfc, 0x1f, 0x74, 0x8b, 0xf0, 0x7d, 0x13, 0x6d, 0x70, 0x17, 0x3b, 0xed, 0xfd, - 0x18, 0xdb, 0x59, 0x06, 0x07, 0x8e, 0x36, 0xf8, 0xb6, 0x06, 0xb3, 0x19, 0x22, 0xe8, 0x6e, 0x51, - 0x44, 0xda, 0x50, 0x76, 0x60, 0x26, 0x88, 0xed, 0x0c, 0x40, 0xe2, 0x58, 0x12, 0x96, 0x9a, 0x04, - 0x21, 0x22, 0x2a, 0xf6, 0x03, 0xdf, 0xf5, 0xdb, 0x47, 0xd7, 0xb1, 0x38, 0xae, 0x21, 0x83, 0xf4, - 0xdf, 0xad, 0x30, 0x76, 0xa5, 0x4c, 0xa2, 0xff, 0xd6, 0xf9, 0x43, 0x77, 0xfe, 0xff, 0xd0, 0x60, - 0x8e, 0x10, 0xa1, 0xfb, 0x4e, 0x22, 0x92, 0x20, 0x49, 0x05, 0xa4, 0xc9, 0xa9, 0x80, 0x68, 0xbc, - 0x89, 0xed, 0x77, 0x63, 0x6e, 0x2e, 0xf2, 0x12, 0x87, 0xe3, 0x30, 0xe4, 0x61, 0x6f, 0xbc, 0x24, - 0x92, 0x03, 0xd5, 0xd2, 0xe4, 0x40, 0xf4, 0x6c, 0x1b, 0xdf, 0x33, 0xe1, 0xe2, 0x3f, 0x05, 0xe8, - 0x5f, 0xa9, 0xc0, 0x54, 0xcb, 0xb7, 0x87, 0x0b, 0x7a, 0xe1, 0x47, 0x0a, 0x69, 0x32, 0xa7, 0xa1, - 0x02, 0x5e, 0x54, 0xf4, 0xf7, 0x55, 0xb0, 0xcb, 0xdb, 0x1a, 0xcc, 0xb4, 0x7c, 0x9b, 0x74, 0xda, - 0x7b, 0xda, 0x43, 0xf2, 0xd9, 0xc2, 0x71, 0xf5, 0x6c, 0xe1, 0xff, 0xd1, 0xa0, 0xde, 0xf2, 0xed, - 0x11, 0xfb, 0x08, 0x9e, 0x51, 0x7d, 0x04, 0x1f, 0xea, 0x3b, 0x59, 0x85, 0x5b, 0xe0, 0x5b, 0x15, - 0x98, 0x26, 0xec, 0xf8, 0x6d, 0xd1, 0x60, 0x4a, 0xc5, 0xb4, 0x6c, 0xc5, 0xc8, 0xa2, 0xe9, 0xbb, - 0xae, 0x7f, 0x4f, 0x34, 0x1c, 0x2b, 0xb1, 0x9c, 0x09, 0xf8, 0xd0, 0xf1, 0xbb, 0x22, 0xeb, 0x48, - 0x52, 0x46, 0x3a, 0x4c, 0x45, 0x8e, 0x67, 0x61, 0xb1, 0xa7, 0x52, 0xa3, 0x7b, 0x2a, 0x0a, 0x8c, - 0xe6, 0xee, 0x21, 0x65, 0x3a, 0x78, 0x06, 0xcf, 0xdd, 0x23, 0x50, 0xe9, 0x41, 0x4d, 0xb1, 0xb5, - 0x13, 0xf1, 0xd3, 0x37, 0x12, 0x84, 0xd4, 0x2e, 0x36, 0x1d, 0xf7, 0x86, 0xe3, 0xe1, 0x88, 0x6f, - 0x5e, 0xa5, 0x00, 0x82, 0x4d, 0xc3, 0xa1, 0x59, 0xa6, 0xab, 0x09, 0xb6, 0xb7, 0x95, 0x42, 0xf4, - 0x27, 0xe0, 0x54, 0xcb, 0xb7, 0x89, 0xce, 0xbd, 0xe1, 0x87, 0xf7, 0xcc, 0xd0, 0x96, 0x46, 0x19, - 0xcb, 0xac, 0x40, 0x84, 0xe5, 0x98, 0xc8, 0x96, 0xf0, 0x08, 0x95, 0xbf, 0x7d, 0x43, 0x8f, 0xfe, - 0x4e, 0x03, 0xd4, 0xa2, 0x1b, 0x4a, 0x4a, 0x9a, 0xb1, 0x1d, 0x98, 0x89, 0xf0, 0x0d, 0xc7, 0xeb, - 0xde, 0xe7, 0xc8, 0xe5, 0x22, 0xb4, 0xb6, 0xd7, 0x65, 0x1c, 0x23, 0x43, 0x83, 0x34, 0x40, 0xd8, - 0xf5, 0x56, 0xa2, 0xdb, 0x11, 0x0e, 0x45, 0x2e, 0xaf, 0x04, 0x40, 0xb3, 0xee, 0x90, 0xc2, 0x96, - 0xef, 0x19, 0xbe, 0x1f, 0xf3, 0xae, 0x54, 0x60, 0x68, 0x19, 0x50, 0xd4, 0x0d, 0x02, 0x97, 0xba, - 0x4b, 0x4d, 0xf7, 0x6a, 0xe8, 0x77, 0x03, 0x16, 0x77, 0x51, 0x35, 0x0a, 0xde, 0x90, 0xb9, 0xb0, - 0x17, 0xd1, 0x67, 0x1e, 0x20, 0x2d, 0x8a, 0xfa, 0x3e, 0x15, 0x63, 0xdb, 0x4e, 0xdb, 0x33, 0xe3, - 0x6e, 0x48, 0x44, 0xc9, 0x74, 0x40, 0xc5, 0x5a, 0x1c, 0xfa, 0xae, 0x8b, 0xc3, 0xfe, 0x29, 0x17, - 0x7b, 0xee, 0x4e, 0xa9, 0x84, 0xf4, 0x7f, 0x68, 0xd0, 0x59, 0x47, 0xed, 0x94, 0x97, 0xa0, 0xce, - 0x63, 0x08, 0xf8, 0xd2, 0xf6, 0x70, 0x99, 0x14, 0x7d, 0x86, 0x40, 0x42, 0x57, 0x69, 0xbc, 0x09, - 0x9b, 0x0f, 0xe5, 0x13, 0x6b, 0xf2, 0xfd, 0x6f, 0x09, 0x15, 0x3d, 0x0c, 0xd3, 0x3c, 0x9d, 0x11, - 0xd7, 0xf3, 0xd9, 0x1a, 0xae, 0x02, 0x89, 0x75, 0x20, 0x25, 0x70, 0x2b, 0xd8, 0xa2, 0x64, 0xd3, - 0xe9, 0xf8, 0x8f, 0xd0, 0x53, 0x70, 0xca, 0xb4, 0x62, 0xe7, 0x10, 0xaf, 0x61, 0xd3, 0x76, 0x1d, - 0x0f, 0xab, 0x31, 0xeb, 0xc5, 0x2f, 0xe9, 0x81, 0x50, 0x2f, 0xe2, 0xdc, 0x8d, 0xf3, 0x03, 0xa1, - 0x02, 0x80, 0x5e, 0x63, 0xb9, 0xd8, 0x13, 0xcd, 0x87, 0xe5, 0x60, 0x7c, 0xa6, 0x94, 0x72, 0xab, - 0xc4, 0x5f, 0x31, 0x5b, 0x4f, 0x21, 0x46, 0x47, 0x19, 0x0e, 0x0f, 0x1d, 0x0b, 0xaf, 0x58, 0xf4, - 0x94, 0x3a, 0x35, 0x68, 0xd8, 0xa6, 0x7f, 0xc1, 0x1b, 0xf4, 0x28, 0x99, 0x2d, 0x32, 0x94, 0xef, - 0xfa, 0x67, 0xa0, 0x4a, 0xc2, 0x18, 0x50, 0x13, 0xc6, 0x10, 0x95, 0x6a, 0xdf, 0x8f, 0xe2, 0x2d, - 0x4c, 0x8c, 0xe7, 0x03, 0xea, 0xef, 0x99, 0x30, 0x64, 0x10, 0x19, 0xcb, 0xd4, 0x0f, 0xb8, 0xb9, - 0x46, 0xbd, 0x37, 0x13, 0x86, 0x28, 0x8a, 0x37, 0x9b, 0xad, 0x55, 0xea, 0x88, 0xe1, 0x6f, 0x36, - 0x5b, 0xab, 0xe8, 0xd3, 0xf9, 0x3c, 0x84, 0x33, 0x65, 0x5c, 0x5a, 0x79, 0x91, 0x90, 0x4f, 0x45, - 0xf8, 0x1f, 0x60, 0x2e, 0x49, 0x7b, 0xc8, 0xf2, 0x37, 0x44, 0xcd, 0xd9, 0x32, 0x69, 0xdc, 0x0b, - 0xcf, 0x7c, 0xe7, 0x68, 0x29, 0x87, 0x12, 0xe6, 0x32, 0xa9, 0x77, 0x1e, 0x82, 0x46, 0xd4, 0xdd, - 0xb5, 0xfd, 0x8e, 0xe9, 0x78, 0xd4, 0x35, 0xd2, 0x30, 0x52, 0x00, 0xba, 0x02, 0x13, 0xa6, 0x48, - 0x82, 0x8f, 0xca, 0x04, 0x68, 0x27, 0xd9, 0xef, 0x13, 0x3c, 0x32, 0x41, 0x78, 0xec, 0x1b, 0xdf, - 0x33, 0x5e, 0x60, 0x13, 0x44, 0x01, 0xa2, 0x5b, 0x30, 0x43, 0x3e, 0x5f, 0x4d, 0xe7, 0xe4, 0xe2, - 0x60, 0x73, 0x32, 0x83, 0x8e, 0xae, 0xc0, 0x43, 0x66, 0x37, 0xf6, 0x3b, 0x64, 0xbc, 0x6c, 0x2b, - 0xa3, 0x67, 0xc7, 0x3f, 0xc0, 0x5e, 0xf3, 0x14, 0xed, 0xdf, 0x63, 0xbf, 0x41, 0xaf, 0x10, 0xed, - 0xdc, 0xe5, 0x31, 0x11, 0x51, 0xf3, 0x74, 0x99, 0xc3, 0x3e, 0x3b, 0x09, 0x82, 0x21, 0x23, 0x2f, - 0x5d, 0x86, 0xf9, 0xdc, 0x6c, 0x19, 0x68, 0x73, 0xfb, 0x6f, 0xaa, 0xd0, 0x48, 0xec, 0xc2, 0x1e, - 0x76, 0xf7, 0x2b, 0x05, 0x29, 0xba, 0x1f, 0xef, 0x3b, 0x40, 0x8b, 0x83, 0xe8, 0x7a, 0xa7, 0x1f, - 0x4f, 0x55, 0xba, 0x9a, 0xa2, 0xd2, 0xf5, 0xc8, 0xdf, 0xc8, 0xd6, 0x55, 0x7b, 0xb3, 0x25, 0x32, - 0xbc, 0xd1, 0x42, 0x92, 0xe8, 0x8f, 0x2a, 0x0b, 0xf5, 0xa1, 0x12, 0xfd, 0x51, 0x65, 0xe1, 0x35, - 0x98, 0xb7, 0xd4, 0x84, 0x79, 0x49, 0x1c, 0xdc, 0x13, 0x03, 0x64, 0xb3, 0xeb, 0x46, 0x46, 0x9e, - 0x0e, 0x99, 0x3a, 0x6f, 0xf8, 0x11, 0xf5, 0xb9, 0x70, 0x51, 0x94, 0x94, 0x91, 0x05, 0xa7, 0x94, - 0x31, 0x97, 0xfc, 0x1c, 0x86, 0xf9, 0x79, 0x31, 0x2d, 0xfd, 0x67, 0x98, 0xb5, 0xca, 0x3f, 0xc2, - 0x51, 0xd7, 0x8d, 0x47, 0x7c, 0xa0, 0x46, 0x36, 0x30, 0x86, 0x70, 0x4c, 0x7c, 0x43, 0xa3, 0x8e, - 0x89, 0x1d, 0xdc, 0x09, 0x5c, 0x96, 0x54, 0x70, 0x74, 0xcc, 0x6d, 0xc2, 0x44, 0xcc, 0xe9, 0x96, - 0x4b, 0x28, 0x23, 0x31, 0x42, 0x9d, 0x2f, 0x09, 0xba, 0xfe, 0x35, 0xd6, 0x8e, 0xe2, 0xed, 0x88, - 0x35, 0xf7, 0xcb, 0xaa, 0xe6, 0xfe, 0x58, 0x69, 0x2e, 0x85, 0x06, 0xff, 0x96, 0xca, 0x1e, 0x55, - 0x71, 0xde, 0x1f, 0xae, 0x2b, 0x7d, 0x0f, 0x16, 0x8b, 0xfc, 0xd3, 0x23, 0xbf, 0x30, 0xe1, 0x43, - 0x30, 0xad, 0xa4, 0x46, 0x14, 0x31, 0x37, 0x5a, 0x12, 0x73, 0xa3, 0xff, 0x58, 0x83, 0xc5, 0xa2, - 0xab, 0x4b, 0xd0, 0x16, 0x4c, 0x05, 0x92, 0x0e, 0x5a, 0xee, 0x54, 0x8d, 0xac, 0xb5, 0x1a, 0x0a, - 0x3e, 0xba, 0x01, 0x53, 0xf8, 0xd0, 0xb1, 0x12, 0x43, 0xb8, 0x32, 0xa0, 0x78, 0x52, 0xb0, 0x07, - 0x4f, 0x63, 0xa4, 0xff, 0x57, 0x0d, 0x1e, 0xe8, 0x71, 0xbc, 0x86, 0x50, 0xbb, 0x47, 0xdd, 0x21, - 0x3c, 0x9b, 0x25, 0x2f, 0xa1, 0x2d, 0x00, 0xe6, 0x0d, 0xa1, 0x79, 0xce, 0x2b, 0x65, 0xb6, 0xa5, - 0x72, 0x07, 0x02, 0x24, 0x0a, 0xfa, 0xdb, 0x15, 0x18, 0x63, 0xc9, 0xa2, 0x2f, 0x43, 0x7d, 0x9f, - 0x65, 0x03, 0x18, 0x2c, 0xfb, 0x80, 0xc0, 0x42, 0x4f, 0xc2, 0x02, 0x91, 0x6e, 0x8e, 0xe9, 0xae, - 0x61, 0xd7, 0x3c, 0x12, 0x5a, 0x2b, 0x4b, 0xb4, 0x53, 0xf4, 0xaa, 0xe0, 0x58, 0x26, 0x4b, 0x8f, - 0x90, 0x81, 0x12, 0xe5, 0x22, 0xc8, 0xe9, 0xd1, 0x63, 0x86, 0x0a, 0xa4, 0x5e, 0xf5, 0x2e, 0x75, - 0xfb, 0xef, 0xec, 0x87, 0x38, 0xda, 0xf7, 0x5d, 0x9b, 0xa7, 0x22, 0xcd, 0xc1, 0xc9, 0xb7, 0x7b, - 0xa6, 0xe3, 0x76, 0x43, 0x9c, 0x7e, 0x3b, 0xce, 0xbe, 0xcd, 0xc2, 0xf5, 0xcf, 0x69, 0x70, 0x8a, - 0x67, 0xd2, 0x14, 0xa1, 0xcd, 0x7c, 0x72, 0x5c, 0x83, 0xba, 0x88, 0x61, 0x29, 0x75, 0x34, 0x83, - 0x21, 0xa7, 0x59, 0x39, 0x0d, 0x81, 0x5e, 0x22, 0xf5, 0xe3, 0x17, 0x34, 0x58, 0x28, 0xd8, 0x5c, - 0x63, 0x93, 0xad, 0xed, 0x44, 0x71, 0x92, 0x42, 0x26, 0x29, 0xd3, 0xd3, 0x19, 0x6c, 0x83, 0x8a, - 0x4f, 0x50, 0x56, 0x3a, 0x6e, 0x82, 0x26, 0x37, 0xbb, 0xd4, 0xa4, 0x9b, 0x5d, 0x16, 0x61, 0xac, - 0x9d, 0x18, 0x85, 0x0d, 0x83, 0x15, 0xf4, 0xff, 0x5d, 0x81, 0xd9, 0xcc, 0x06, 0xf5, 0xb1, 0xf7, - 0xc8, 0x14, 0x67, 0xa9, 0xef, 0x95, 0x7c, 0x89, 0x66, 0x7b, 0x4c, 0xd2, 0x47, 0xd3, 0xe7, 0x84, - 0xb7, 0x31, 0x89, 0xb7, 0x26, 0xd4, 0x0f, 0xf0, 0x51, 0xe8, 0x78, 0x6d, 0xe1, 0x61, 0xe2, 0x45, - 0x35, 0xbb, 0x52, 0x7d, 0xd4, 0xd9, 0x95, 0x26, 0x32, 0x82, 0xed, 0xbf, 0x6b, 0x30, 0x4b, 0x8f, - 0x6a, 0xf3, 0xb0, 0x73, 0xc7, 0xf7, 0x46, 0x2a, 0xdb, 0x17, 0x61, 0x2c, 0x24, 0xe4, 0x45, 0xeb, - 0xd1, 0x02, 0xbd, 0xe2, 0x87, 0x50, 0x27, 0x6d, 0x37, 0xc5, 0xee, 0x09, 0xa1, 0x41, 0x75, 0x06, - 0x0e, 0x5c, 0x87, 0x71, 0x91, 0x9a, 0xd6, 0xef, 0x5d, 0x50, 0x5d, 0x21, 0x3b, 0xc3, 0x07, 0xd5, - 0x15, 0x93, 0x53, 0x35, 0x96, 0x1f, 0x6a, 0x70, 0xa6, 0xf0, 0xbb, 0xe1, 0x5c, 0xb1, 0xc5, 0xae, - 0xd3, 0xea, 0x48, 0x5d, 0xa7, 0xb5, 0x5e, 0xeb, 0xc6, 0x98, 0xba, 0x6e, 0x7c, 0x47, 0x83, 0x07, - 0x0b, 0xab, 0xf6, 0x9e, 0xc6, 0x0b, 0x16, 0x72, 0x24, 0x34, 0x9f, 0xdf, 0xab, 0xf4, 0x60, 0x99, - 0xea, 0x40, 0x74, 0x5e, 0xd1, 0x97, 0x91, 0xc8, 0x39, 0x2e, 0xca, 0xc8, 0x94, 0xe2, 0xf6, 0x18, - 0x1f, 0xeb, 0x43, 0x8e, 0xb5, 0x65, 0xd5, 0x87, 0x91, 0xc6, 0xf2, 0xc9, 0x0a, 0x68, 0xf5, 0x1d, - 0x29, 0xa0, 0xe8, 0x1c, 0xcc, 0x76, 0x1c, 0x8f, 0xe6, 0x72, 0x55, 0xd7, 0xaa, 0x2c, 0x78, 0xe9, - 0x05, 0x98, 0x1e, 0xde, 0x4a, 0xfc, 0x83, 0x0a, 0x7c, 0xe0, 0x98, 0x49, 0x70, 0x6c, 0x83, 0x5e, - 0x82, 0xc5, 0xbd, 0xae, 0xeb, 0x1e, 0xd1, 0x1d, 0x2d, 0x6c, 0x1b, 0xe2, 0x3b, 0xb6, 0xe6, 0x14, - 0xbe, 0x43, 0xcb, 0x80, 0xfc, 0x5d, 0x9a, 0x26, 0xc0, 0xbe, 0x9a, 0x1e, 0x28, 0xa8, 0xb2, 0x9c, - 0xd9, 0xf9, 0x37, 0xcc, 0x5d, 0x66, 0xda, 0x47, 0x09, 0x71, 0xbe, 0x60, 0x2b, 0x40, 0x74, 0x1e, - 0xe6, 0xcd, 0x43, 0xd3, 0xa1, 0xa7, 0xe5, 0x92, 0x2f, 0xd9, 0x8a, 0x9d, 0x7f, 0x81, 0x5e, 0x57, - 0xac, 0x5e, 0x96, 0x7a, 0xe6, 0xc5, 0x21, 0x86, 0x42, 0xf1, 0x25, 0x55, 0x3f, 0xab, 0x11, 0xa1, - 0x59, 0x90, 0x10, 0x54, 0xb9, 0xf1, 0x40, 0x0a, 0x3a, 0x54, 0x81, 0xac, 0xc5, 0xa3, 0x34, 0xca, - 0x81, 0x2e, 0xc3, 0x3c, 0xd7, 0xe4, 0x06, 0xd4, 0x6d, 0xe7, 0xd0, 0x89, 0xfc, 0xb0, 0x44, 0xfe, - 0xf5, 0xfc, 0x0e, 0xbd, 0x40, 0xd6, 0x7f, 0xa2, 0xc1, 0xb4, 0xe0, 0xf1, 0xd5, 0xae, 0x1f, 0x9b, - 0x23, 0x15, 0xe8, 0xab, 0x8a, 0x40, 0xbf, 0x50, 0x2e, 0x1a, 0x97, 0xb2, 0x21, 0x09, 0xf2, 0xcd, - 0x8c, 0x20, 0xbf, 0x38, 0x08, 0x19, 0x55, 0x80, 0xbf, 0xad, 0xc1, 0xbc, 0xf2, 0xfe, 0xbd, 0x49, - 0x79, 0x52, 0xc4, 0xa9, 0x90, 0x6a, 0x7f, 0x9e, 0x65, 0x91, 0x4a, 0xb3, 0x9b, 0x50, 0xdb, 0x37, - 0x43, 0xbb, 0xdc, 0x41, 0xe4, 0x1c, 0xfa, 0xf2, 0x35, 0x33, 0xb4, 0xf9, 0xb5, 0x5f, 0x84, 0x0c, - 0x4b, 0x73, 0xe7, 0x07, 0xc9, 0xc6, 0x32, 0x2f, 0x2d, 0x61, 0x68, 0x24, 0x9f, 0x9e, 0x60, 0x50, - 0xc8, 0x7f, 0xab, 0xc2, 0x42, 0x41, 0x37, 0xa1, 0x5b, 0x4a, 0x2d, 0x5f, 0x18, 0xb8, 0x9f, 0x73, - 0xf5, 0xbc, 0x45, 0x95, 0x3b, 0x9b, 0x77, 0xc7, 0x10, 0x04, 0x6f, 0x47, 0x58, 0x10, 0x24, 0x84, - 0xde, 0xa5, 0x06, 0x22, 0xbf, 0x49, 0xfe, 0x7c, 0x82, 0xfd, 0xf0, 0x76, 0x15, 0x16, 0x8b, 0x62, - 0xe0, 0xd1, 0x9d, 0x4c, 0x4e, 0xa1, 0x97, 0x06, 0x8f, 0xa3, 0x67, 0x89, 0x86, 0x92, 0xe3, 0x4d, - 0xb4, 0x80, 0x5e, 0x27, 0x12, 0x8d, 0x66, 0x72, 0x12, 0x53, 0xe4, 0xe5, 0x21, 0x28, 0xf3, 0x64, - 0x50, 0x9c, 0x76, 0x42, 0x71, 0xa9, 0x0d, 0x93, 0xd2, 0x4f, 0x4f, 0xb0, 0x7b, 0x1c, 0x22, 0x33, - 0x25, 0x1e, 0x4e, 0xb0, 0x8b, 0x76, 0x61, 0x46, 0xdd, 0xd1, 0x4b, 0x0c, 0x16, 0x4d, 0x32, 0x58, - 0x10, 0xd4, 0x42, 0xdf, 0x15, 0xab, 0x04, 0x7d, 0x4e, 0x34, 0xd1, 0xaa, 0xa4, 0x89, 0x2e, 0xc2, - 0x98, 0x8b, 0x0f, 0xb1, 0xb0, 0x80, 0x58, 0x41, 0xff, 0xa7, 0x0a, 0x2c, 0x14, 0xc4, 0xfc, 0x11, - 0x6d, 0xb1, 0x6d, 0xc6, 0xf8, 0x9e, 0x29, 0x6a, 0x26, 0x8a, 0x54, 0x7e, 0xb0, 0xb3, 0x95, 0x42, - 0xa3, 0x65, 0x47, 0x2a, 0x47, 0x9f, 0x7c, 0xf6, 0x0c, 0x40, 0x14, 0xb9, 0xeb, 0x1e, 0x59, 0xb6, - 0x6d, 0xbe, 0x01, 0x2f, 0x41, 0x88, 0xd1, 0x1d, 0x84, 0x7e, 0xcc, 0x6c, 0xdf, 0x35, 0xb6, 0x19, - 0xc1, 0xcf, 0x4c, 0x64, 0xe1, 0xc4, 0x20, 0xe6, 0xa1, 0x70, 0x2d, 0x62, 0x05, 0x32, 0xd3, 0x4e, - 0x06, 0x49, 0x5f, 0x50, 0x93, 0xb9, 0xae, 0x7c, 0x41, 0x2f, 0x55, 0x53, 0x8f, 0x84, 0x4c, 0xe4, - 0x8e, 0x84, 0xa4, 0xa6, 0x67, 0xa3, 0xa7, 0xff, 0x0a, 0x32, 0x66, 0xde, 0xdf, 0x57, 0x60, 0x9c, - 0xed, 0xb8, 0x8c, 0x74, 0xf1, 0xbd, 0xa2, 0x5c, 0x11, 0xb9, 0x5c, 0x26, 0xfb, 0x77, 0xf6, 0x7e, - 0xc8, 0xc2, 0x41, 0xb4, 0x03, 0x10, 0xd1, 0x3c, 0xaa, 0xe4, 0x63, 0x7e, 0xe0, 0xfe, 0xa9, 0x52, - 0xd4, 0xb7, 0x13, 0x34, 0xf6, 0x0f, 0x89, 0xce, 0x40, 0x37, 0x51, 0x4e, 0xc9, 0x93, 0xf1, 0x13, - 0x30, 0x9b, 0xa1, 0x3b, 0x90, 0xda, 0xfb, 0x65, 0x0d, 0x66, 0x33, 0x79, 0xee, 0xdf, 0x17, 0xb7, - 0x58, 0xfe, 0xa2, 0x06, 0xf3, 0xb9, 0x94, 0xec, 0xef, 0xd3, 0x2b, 0x2c, 0xbf, 0xa4, 0x01, 0x30, - 0x5e, 0x47, 0xac, 0x4f, 0x3d, 0xaf, 0xea, 0x53, 0x0f, 0x97, 0x19, 0x65, 0x42, 0x91, 0xfa, 0x43, - 0x0d, 0xe6, 0x18, 0xe4, 0x5f, 0xd7, 0xcd, 0x95, 0xbf, 0xa2, 0x01, 0x62, 0xf5, 0x1a, 0xe8, 0xce, - 0xea, 0xf7, 0xf4, 0xba, 0xc9, 0x1f, 0x54, 0xe8, 0x24, 0x53, 0xf6, 0xae, 0xb7, 0x60, 0xca, 0x92, - 0xee, 0x9a, 0x2e, 0xe7, 0x79, 0x97, 0x6f, 0xa7, 0x36, 0x14, 0x7c, 0x96, 0x60, 0xc5, 0x39, 0x74, - 0x5c, 0xdc, 0xa6, 0x1a, 0x1f, 0x5d, 0x29, 0x52, 0x48, 0x41, 0x3c, 0x4d, 0x75, 0xd4, 0xf1, 0x34, - 0xb5, 0x7e, 0xf1, 0x34, 0x63, 0x05, 0xf1, 0x34, 0x4f, 0xc3, 0x69, 0xb1, 0x12, 0x90, 0xf2, 0x86, - 0xe3, 0x62, 0xbe, 0xb6, 0xb2, 0xf0, 0xa5, 0x1e, 0x6f, 0xf5, 0x5d, 0x58, 0xd8, 0xc6, 0xa1, 0x43, - 0x8f, 0x57, 0xdb, 0xe9, 0xc8, 0xbb, 0x0e, 0x8d, 0x30, 0x33, 0xac, 0x07, 0xbd, 0x7a, 0x27, 0xf5, - 0xe4, 0xff, 0xb1, 0x06, 0x75, 0xbe, 0xbd, 0x3d, 0xd2, 0xa5, 0xe9, 0x13, 0x8a, 0x5d, 0xf8, 0x58, - 0xbf, 0x69, 0x4d, 0x19, 0x90, 0x2c, 0xc2, 0xd5, 0x8c, 0x45, 0xf8, 0xd1, 0x72, 0x04, 0x54, 0x5b, - 0xf0, 0xb7, 0x2b, 0x30, 0xa3, 0x6e, 0xdd, 0x8f, 0xb4, 0x8a, 0x57, 0xa1, 0x1e, 0xf1, 0x88, 0x8c, - 0x52, 0xf7, 0xf7, 0x64, 0x7b, 0x41, 0x60, 0x17, 0xc6, 0x78, 0x54, 0x47, 0x18, 0xe3, 0xd1, 0x2f, - 0xdc, 0xa1, 0xd6, 0x3f, 0xdc, 0x41, 0xff, 0x39, 0x2a, 0x93, 0x64, 0xf8, 0x88, 0x17, 0x82, 0x2b, - 0xaa, 0xf4, 0x3a, 0x5f, 0xaa, 0xc3, 0x39, 0x2b, 0x62, 0x41, 0xf8, 0x29, 0x0d, 0x26, 0xf9, 0x9b, - 0x11, 0x73, 0xf7, 0x82, 0xca, 0xdd, 0x23, 0xa5, 0xb8, 0x13, 0x6c, 0xfd, 0x5a, 0xca, 0xd6, 0x71, - 0xb7, 0x67, 0x26, 0x57, 0x52, 0x55, 0x32, 0x77, 0x5f, 0x8a, 0x2b, 0xac, 0xaa, 0xd2, 0x15, 0x56, - 0x5b, 0xe2, 0xbe, 0x0a, 0x7a, 0x77, 0x5d, 0x6d, 0xa8, 0x24, 0xf9, 0x12, 0x05, 0x11, 0x35, 0x45, - 0xa9, 0x31, 0x67, 0x5a, 0x52, 0xd6, 0x1f, 0xa3, 0x72, 0x88, 0xb2, 0xdf, 0x2f, 0xd2, 0xf1, 0xad, - 0x5a, 0x52, 0xd5, 0x6d, 0x76, 0xb6, 0x41, 0x0a, 0x9b, 0x2c, 0x2b, 0x07, 0xe4, 0xdb, 0x7f, 0xb7, - 0x73, 0x8e, 0xdc, 0x67, 0x4a, 0xcb, 0x92, 0x9e, 0xae, 0x5b, 0x7a, 0x1a, 0x98, 0x1e, 0xcd, 0xdc, - 0x6c, 0x89, 0xbc, 0x2a, 0x09, 0x20, 0xd1, 0x88, 0x6b, 0x92, 0x46, 0x7c, 0x16, 0x26, 0x93, 0x4c, - 0x5f, 0x2d, 0x96, 0x37, 0xaa, 0x61, 0xc8, 0x20, 0xf4, 0x24, 0x2c, 0xd8, 0x38, 0x08, 0xb1, 0x65, - 0xc6, 0xd8, 0x6e, 0x75, 0x77, 0x5d, 0xc7, 0x22, 0x5f, 0xb2, 0xe0, 0xe0, 0xa2, 0x57, 0xe8, 0x1c, - 0xcc, 0x46, 0x2c, 0x05, 0x99, 0x88, 0x8c, 0xe2, 0xe6, 0x48, 0x16, 0x8c, 0x1e, 0x85, 0x19, 0x57, - 0x4e, 0xed, 0xda, 0xe2, 0x66, 0x49, 0x06, 0x8a, 0x9e, 0x87, 0xa6, 0x0c, 0xe1, 0x47, 0xa0, 0x4c, - 0xaf, 0x8d, 0x23, 0x9e, 0xb1, 0xa9, 0xe7, 0x7b, 0xb2, 0x90, 0x89, 0xea, 0x48, 0xa1, 0x73, 0x0a, - 0xec, 0x9d, 0x79, 0x9f, 0x31, 0x41, 0x96, 0xc4, 0x34, 0xda, 0x81, 0x29, 0x99, 0x1b, 0x3e, 0x43, - 0x9f, 0x2c, 0x9f, 0xe2, 0x96, 0x8b, 0x7b, 0x85, 0x8a, 0x7e, 0x09, 0xc6, 0xb7, 0x8f, 0x22, 0x2b, - 0x76, 0x07, 0xb8, 0x77, 0xe1, 0x36, 0xcc, 0x66, 0xee, 0x2d, 0x48, 0xae, 0x9f, 0xd0, 0x86, 0xbf, - 0x7e, 0x42, 0xff, 0x82, 0x06, 0x63, 0x34, 0x3f, 0x5b, 0xd9, 0x76, 0x22, 0xb6, 0x25, 0xde, 0xdb, - 0xc3, 0x96, 0xb8, 0xb2, 0x82, 0x97, 0xd0, 0x06, 0x34, 0x62, 0xa7, 0x83, 0x57, 0x6c, 0x9b, 0x9b, - 0xc8, 0x03, 0xc5, 0x43, 0x25, 0xa8, 0xfa, 0x57, 0x34, 0x80, 0x34, 0x08, 0x6d, 0xc0, 0xac, 0x7d, - 0x09, 0xcb, 0xd5, 0x62, 0x96, 0x6b, 0x0a, 0xcb, 0xe7, 0x61, 0x3e, 0x0d, 0x71, 0x53, 0x63, 0x51, - 0xf3, 0x2f, 0x74, 0x17, 0xc6, 0xf9, 0x51, 0xc6, 0xa2, 0x5e, 0xdb, 0x12, 0xd9, 0xc7, 0x94, 0x03, - 0x81, 0x8f, 0x97, 0xd9, 0x36, 0x17, 0xf7, 0xa4, 0xc9, 0xf8, 0x7a, 0x17, 0x26, 0xa5, 0x6b, 0xb9, - 0x7b, 0xc9, 0xe3, 0x5e, 0x29, 0x01, 0x68, 0x92, 0x2e, 0x82, 0x98, 0x1c, 0x38, 0x6f, 0x18, 0x29, - 0x00, 0x35, 0xe9, 0x3d, 0x77, 0xf4, 0x1d, 0x8f, 0xdc, 0xe0, 0x45, 0xfd, 0xb3, 0x15, 0x98, 0xcb, - 0x6e, 0xe6, 0xa3, 0x0d, 0x18, 0x67, 0x7a, 0x40, 0x7f, 0x7d, 0x24, 0xb5, 0x80, 0xa4, 0x60, 0x00, - 0x8e, 0x8d, 0x6e, 0xc3, 0xa4, 0x9d, 0x5e, 0x13, 0x59, 0xee, 0x2e, 0xb3, 0xc2, 0x4b, 0x3f, 0x0d, - 0x99, 0x0e, 0xba, 0x45, 0x0f, 0x0b, 0xb0, 0xbb, 0xc7, 0xca, 0x79, 0xe7, 0x93, 0x0b, 0xcc, 0x24, - 0x92, 0x29, 0x0d, 0xfd, 0xab, 0xf3, 0x30, 0xa5, 0x98, 0x35, 0xf2, 0xe9, 0x7d, 0x6d, 0x04, 0xa7, - 0xf7, 0xb7, 0x60, 0x02, 0xf3, 0xdb, 0x2e, 0xcb, 0xe5, 0x08, 0x29, 0xba, 0x1b, 0xd3, 0x48, 0x68, - 0x14, 0x27, 0x4f, 0xa8, 0xbe, 0xab, 0xc9, 0x13, 0x6a, 0x27, 0x98, 0x3c, 0xe1, 0x3a, 0xd4, 0xdb, - 0xec, 0x96, 0x21, 0x7e, 0x2a, 0xa3, 0x4f, 0xf7, 0x16, 0x5c, 0x49, 0x64, 0x08, 0x0a, 0xe8, 0x5a, - 0x32, 0x98, 0xc7, 0xcb, 0x08, 0xf3, 0xbc, 0x91, 0x9b, 0x0c, 0x67, 0x9e, 0x30, 0xa1, 0x3e, 0x74, - 0xc2, 0x84, 0x24, 0xdf, 0xc1, 0xc4, 0x3b, 0xca, 0x77, 0xa0, 0xe4, 0x82, 0x68, 0x8c, 0x24, 0x17, - 0x44, 0x17, 0x4e, 0x05, 0x45, 0x09, 0x4b, 0x78, 0x06, 0x83, 0xcb, 0x43, 0x64, 0x63, 0x51, 0x7e, - 0x55, 0x4c, 0x5d, 0x64, 0x90, 0x98, 0x1c, 0x3a, 0x83, 0xc4, 0xa8, 0x73, 0x1b, 0xa4, 0xa9, 0x24, - 0xa6, 0x47, 0x96, 0x4a, 0x62, 0xe6, 0x1d, 0xa6, 0x92, 0x90, 0x92, 0x41, 0xcc, 0xbe, 0xe3, 0x64, - 0x10, 0x77, 0x55, 0x91, 0xcc, 0x52, 0x17, 0x7c, 0x7c, 0xc0, 0xab, 0x7e, 0x39, 0x51, 0x45, 0x28, - 0xb3, 0x84, 0x17, 0xf3, 0x43, 0x26, 0xbc, 0x50, 0x72, 0x4a, 0xa0, 0x91, 0xe4, 0x94, 0x78, 0x55, - 0x5e, 0x25, 0x16, 0x4a, 0xde, 0x92, 0xcf, 0x3e, 0x57, 0x49, 0x26, 0x54, 0xf2, 0x69, 0x2a, 0x16, - 0x4f, 0x34, 0x4d, 0xc5, 0xa9, 0xd1, 0xa6, 0xa9, 0x38, 0x7d, 0xa2, 0x69, 0x2a, 0x1e, 0x78, 0x57, - 0xd3, 0x54, 0x34, 0x4f, 0x22, 0x4d, 0xc5, 0x83, 0xef, 0x34, 0x4d, 0x05, 0x69, 0xef, 0x40, 0xc4, - 0x57, 0x36, 0x97, 0xca, 0xb4, 0x77, 0x61, 0x38, 0xa6, 0x91, 0x52, 0xd1, 0x3f, 0x09, 0x67, 0x8e, - 0x1f, 0x40, 0xe9, 0xe6, 0x50, 0x2b, 0xb5, 0x7c, 0x25, 0x48, 0xcf, 0x94, 0x03, 0xff, 0x4b, 0x83, - 0x07, 0x7a, 0x1c, 0x54, 0xee, 0x19, 0xb4, 0x7b, 0x17, 0x66, 0x03, 0xf5, 0xd3, 0xd2, 0x81, 0xee, - 0xca, 0x41, 0xe8, 0x2c, 0x95, 0x2b, 0x8b, 0xbf, 0xf5, 0xa3, 0x33, 0xda, 0xf7, 0x7f, 0x74, 0x46, - 0xfb, 0xb3, 0x1f, 0x9d, 0xd1, 0xde, 0xfa, 0x8b, 0x33, 0xff, 0xee, 0xd3, 0x95, 0xc3, 0x8b, 0xff, - 0x1c, 0x00, 0x00, 0xff, 0xff, 0xea, 0x43, 0xdb, 0x19, 0x65, 0x95, 0x00, 0x00, + // 9185 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6d, 0x8c, 0x24, 0x47, + 0x96, 0x10, 0x59, 0xd5, 0xdd, 0xd5, 0xf5, 0xfa, 0x73, 0x62, 0xba, 0xc7, 0xe5, 0xb6, 0xdd, 0x1e, + 0xa7, 0x67, 0xc7, 0xb3, 0x5e, 0xbb, 0xc7, 0x1e, 0x8f, 0x67, 0x6d, 0xcf, 0x7a, 0xc6, 0x3d, 0x5d, + 0xdd, 0x33, 0x6d, 0xcf, 0x47, 0x39, 0x7b, 0xec, 0x59, 0xdf, 0xc2, 0x2d, 0xd9, 0x99, 0xd1, 0xd5, + 0xb1, 0x9d, 0x95, 0x59, 0xce, 0xcc, 0xea, 0x9e, 0x5e, 0x1d, 0x12, 0x20, 0x74, 0xec, 0xb1, 0xa7, + 0x63, 0x41, 0xc7, 0xee, 0xb1, 0x5a, 0xf1, 0x75, 0x42, 0xc7, 0x0f, 0x8e, 0x0f, 0x21, 0xb1, 0x27, + 0x21, 0x40, 0x20, 0x10, 0xdc, 0x0f, 0x04, 0x48, 0x48, 0xab, 0x3b, 0x60, 0x01, 0x83, 0xc4, 0xfd, + 0xe0, 0x07, 0x27, 0x74, 0x08, 0xfe, 0xa1, 0xf8, 0xca, 0x8c, 0xc8, 0xaf, 0xae, 0xee, 0xa9, 0x19, + 0x1b, 0xdd, 0xfd, 0xcb, 0x78, 0x11, 0xef, 0x65, 0x7c, 0xbe, 0x78, 0xef, 0xc5, 0x8b, 0x17, 0x60, + 0xee, 0xbd, 0x15, 0xad, 0x90, 0xe0, 0xa2, 0xdd, 0x27, 0x17, 0x9d, 0x20, 0xc4, 0x17, 0xf7, 0x5f, + 0xbf, 0xd8, 0xc5, 0x3e, 0x0e, 0xed, 0x18, 0xbb, 0x2b, 0xfd, 0x30, 0x88, 0x03, 0x84, 0x78, 0x99, + 0x15, 0xbb, 0x4f, 0x56, 0x68, 0x99, 0x95, 0xfd, 0xd7, 0x97, 0xee, 0xa4, 0x78, 0xf8, 0x61, 0x8c, + 0xfd, 0x88, 0x04, 0x7e, 0xf4, 0xaa, 0xdd, 0x27, 0x11, 0x0e, 0xf7, 0x71, 0x78, 0xb1, 0xbf, 0xd7, + 0xa5, 0x79, 0x91, 0x5e, 0xe0, 0xe2, 0xfe, 0xeb, 0xdb, 0x38, 0xb6, 0x73, 0xbf, 0x58, 0xba, 0x9c, + 0x92, 0xeb, 0xd9, 0xce, 0x2e, 0xf1, 0x71, 0x78, 0x28, 0x69, 0x5c, 0x0c, 0x71, 0x14, 0x0c, 0x42, + 0x07, 0x1f, 0x0b, 0x2b, 0xba, 0xd8, 0xc3, 0xb1, 0x5d, 0xd0, 0x9c, 0xa5, 0x8b, 0x65, 0x58, 0xe1, + 0xc0, 0x8f, 0x49, 0x2f, 0xff, 0x9b, 0x2b, 0x47, 0x21, 0x44, 0xce, 0x2e, 0xee, 0xd9, 0x39, 0xbc, + 0x37, 0xca, 0xf0, 0x06, 0x31, 0xf1, 0x2e, 0x12, 0x3f, 0x8e, 0xe2, 0x30, 0x8b, 0x64, 0x7e, 0xcf, + 0x80, 0xb3, 0xab, 0x0f, 0xb6, 0xd6, 0x3d, 0x3b, 0x8a, 0x89, 0x73, 0xc3, 0x0b, 0x9c, 0xbd, 0xad, + 0x38, 0x08, 0xf1, 0xc7, 0x81, 0x37, 0xe8, 0xe1, 0x2d, 0xd6, 0x11, 0x68, 0x09, 0x26, 0xf7, 0x59, + 0x7a, 0xb3, 0xdd, 0x32, 0xce, 0x1a, 0x17, 0x9a, 0x56, 0x92, 0x46, 0x67, 0x60, 0x62, 0x27, 0xba, + 0x7f, 0xd8, 0xc7, 0xad, 0x1a, 0xcb, 0x11, 0x29, 0xf4, 0x2c, 0x34, 0xfb, 0x76, 0x18, 0x93, 0x98, + 0x04, 0x7e, 0xab, 0x7e, 0xd6, 0xb8, 0x30, 0x6e, 0xa5, 0x00, 0x4a, 0x31, 0xc4, 0xb6, 0x7b, 0xcf, + 0xf7, 0x0e, 0x5b, 0x63, 0x67, 0x8d, 0x0b, 0x93, 0x56, 0x92, 0x36, 0x3f, 0x33, 0x60, 0x72, 0x75, + 0x67, 0x87, 0xf8, 0x24, 0x3e, 0x44, 0x6d, 0x98, 0xf6, 0x03, 0x17, 0xcb, 0x34, 0xfb, 0xfd, 0xd4, + 0xa5, 0xb3, 0x2b, 0xf9, 0x39, 0xb2, 0x72, 0x57, 0x29, 0x67, 0x69, 0x58, 0x68, 0x15, 0xa6, 0xfa, + 0x81, 0x9b, 0x10, 0xa9, 0x31, 0x22, 0xcf, 0x17, 0x11, 0xe9, 0xa4, 0xc5, 0x2c, 0x15, 0x07, 0xdd, + 0x81, 0x39, 0x9a, 0xf4, 0x63, 0x92, 0x90, 0xa9, 0x33, 0x32, 0x2f, 0x96, 0x91, 0x51, 0x8a, 0x5a, + 0x59, 0x5c, 0xb3, 0x0d, 0xb3, 0xab, 0x71, 0x6c, 0x3b, 0xbb, 0xd8, 0xe5, 0x5d, 0x8d, 0x10, 0x8c, + 0xf9, 0x76, 0x0f, 0x8b, 0x0e, 0x66, 0xdf, 0x68, 0x19, 0xc0, 0xc5, 0xfb, 0xc4, 0xc1, 0x1d, 0x3b, + 0xde, 0x15, 0x1d, 0xac, 0x40, 0xcc, 0x6f, 0x42, 0x73, 0x75, 0x3f, 0x20, 0x6e, 0x27, 0x70, 0x23, + 0x64, 0xc1, 0x5c, 0x3f, 0xc4, 0x3b, 0x38, 0x4c, 0x40, 0x2d, 0xe3, 0x6c, 0xfd, 0xc2, 0xd4, 0xa5, + 0x0b, 0x85, 0x35, 0xd4, 0x8b, 0xae, 0xfb, 0x71, 0x48, 0xab, 0xa9, 0x43, 0xcd, 0x1f, 0x1b, 0xb0, + 0xb8, 0xfa, 0xed, 0x41, 0x88, 0xdb, 0x24, 0xda, 0xcb, 0xce, 0x09, 0x97, 0x44, 0x7b, 0x77, 0xd3, + 0x2a, 0x27, 0x69, 0xd4, 0x82, 0x06, 0xfd, 0xfe, 0xc8, 0xda, 0x14, 0x75, 0x96, 0x49, 0x74, 0x16, + 0xa6, 0x1c, 0x36, 0x37, 0xbb, 0x77, 0x02, 0x17, 0xb3, 0x1e, 0x6c, 0x5a, 0x2a, 0x48, 0x99, 0x4f, + 0x63, 0xda, 0x7c, 0x52, 0x67, 0xcc, 0xb8, 0x3e, 0x63, 0x68, 0xd7, 0xed, 0x11, 0xdf, 0x6d, 0x4d, + 0xf0, 0xae, 0xa3, 0xdf, 0xe6, 0x5f, 0x37, 0xe0, 0x79, 0x56, 0xf3, 0x0d, 0xe2, 0xe1, 0x0e, 0x0e, + 0x23, 0x12, 0xc5, 0xd8, 0x8f, 0xb5, 0x36, 0x2c, 0x03, 0x44, 0xd8, 0x09, 0x71, 0xac, 0xb4, 0x42, + 0x81, 0xd0, 0x39, 0x1c, 0xed, 0xda, 0x21, 0x66, 0xd9, 0xbc, 0x25, 0x29, 0x40, 0xab, 0x51, 0x3d, + 0x53, 0xa3, 0x0b, 0x30, 0x97, 0xd2, 0x89, 0xfa, 0xb6, 0x23, 0x9b, 0x93, 0x05, 0x9b, 0x9f, 0x8a, + 0x0e, 0xa6, 0xd5, 0x7c, 0x32, 0x95, 0x33, 0x7f, 0xd9, 0x80, 0xc6, 0x0d, 0xe2, 0xbb, 0xc4, 0xef, + 0xa2, 0xdb, 0x30, 0x49, 0x19, 0x97, 0x6b, 0xc7, 0xb6, 0x58, 0x5b, 0xaf, 0x29, 0xb3, 0x25, 0xe1, + 0x23, 0x2b, 0xfd, 0xbd, 0x2e, 0x05, 0x44, 0x2b, 0xb4, 0x34, 0x9d, 0x3f, 0xf7, 0xb6, 0xbf, 0x85, + 0x9d, 0xf8, 0x0e, 0x8e, 0x6d, 0x2b, 0xa1, 0x80, 0xae, 0xc2, 0x44, 0x6c, 0x87, 0x5d, 0x1c, 0x8b, + 0x25, 0x56, 0xb8, 0x36, 0x38, 0xa6, 0x45, 0x27, 0x1a, 0xf6, 0x1d, 0x6c, 0x09, 0x14, 0x33, 0x82, + 0xa7, 0xd7, 0xb6, 0x36, 0x4b, 0x86, 0xea, 0x0c, 0x4c, 0xb8, 0x21, 0xd9, 0xc7, 0xa1, 0xe8, 0x09, + 0x91, 0x42, 0x26, 0x4c, 0x73, 0x56, 0x74, 0xcb, 0xf6, 0x5d, 0x4f, 0x76, 0x84, 0x06, 0xab, 0xec, + 0x8b, 0xcb, 0x30, 0xbd, 0x66, 0xf7, 0xed, 0x6d, 0xe2, 0x91, 0x98, 0xe0, 0x08, 0xcd, 0x43, 0xdd, + 0x76, 0x5d, 0xb6, 0x70, 0x9a, 0x16, 0xfd, 0xa4, 0x93, 0xcb, 0x0d, 0x83, 0x7e, 0xab, 0xc6, 0x40, + 0xec, 0xdb, 0xfc, 0x2f, 0x06, 0x3c, 0xbb, 0x86, 0xfb, 0xbb, 0x1b, 0x5b, 0x25, 0xd5, 0x5d, 0x82, + 0xc9, 0x5e, 0xe0, 0x93, 0x38, 0x08, 0x23, 0x41, 0x2b, 0x49, 0x53, 0x82, 0xfd, 0x74, 0x39, 0xb3, + 0x6f, 0x0a, 0x1b, 0x44, 0x38, 0x14, 0x0b, 0x82, 0x7d, 0xa7, 0x13, 0x80, 0x4e, 0x0d, 0x31, 0x7d, + 0x14, 0x08, 0x5a, 0x85, 0x26, 0x4f, 0x59, 0x78, 0x87, 0x2d, 0x89, 0x92, 0xfe, 0xde, 0x92, 0x85, + 0x44, 0x7f, 0xa7, 0x58, 0x5a, 0xcf, 0x4c, 0x64, 0x7a, 0xe6, 0x3f, 0x18, 0x80, 0x78, 0x1b, 0x9f, + 0x78, 0xcb, 0x36, 0xf2, 0x2d, 0x2b, 0xe4, 0x61, 0xb7, 0x03, 0xc7, 0xf6, 0xb2, 0xd3, 0x69, 0xc8, + 0xe6, 0xb9, 0x80, 0xd6, 0x88, 0xef, 0xe2, 0xf0, 0x91, 0x77, 0xba, 0xaa, 0xe9, 0xf5, 0x16, 0xcc, + 0xae, 0x79, 0x04, 0xfb, 0xf1, 0x66, 0x67, 0x2d, 0xf0, 0x77, 0x48, 0x17, 0x9d, 0x87, 0x59, 0xba, + 0x89, 0x07, 0x83, 0x78, 0x0b, 0x3b, 0x81, 0xcf, 0x98, 0x34, 0xdd, 0x1c, 0x33, 0x50, 0xb3, 0x0f, + 0x68, 0x2d, 0xe8, 0xf5, 0x03, 0x1f, 0xfb, 0xf1, 0x5a, 0xe0, 0xbb, 0x7c, 0xdf, 0x44, 0x30, 0x16, + 0xd3, 0x1a, 0x88, 0x4d, 0x82, 0x7e, 0xd3, 0x7a, 0x45, 0xb1, 0x1d, 0x0f, 0x22, 0x59, 0x2f, 0x9e, + 0xa2, 0x5c, 0xb8, 0x87, 0xa3, 0xc8, 0xee, 0x4a, 0x3e, 0x2b, 0x93, 0x68, 0x01, 0xc6, 0x71, 0x18, + 0x06, 0xa1, 0xe8, 0x7a, 0x9e, 0x30, 0x7f, 0xcd, 0x80, 0xb9, 0xe4, 0x97, 0x5b, 0x9c, 0xc6, 0x68, + 0xd9, 0xc3, 0x06, 0x80, 0x23, 0x9b, 0x12, 0xb1, 0x05, 0x35, 0x75, 0xe9, 0x7c, 0xd1, 0xc0, 0xe6, + 0x5b, 0x6e, 0x29, 0x98, 0xe6, 0x8f, 0x0c, 0x38, 0x9d, 0xa9, 0xe9, 0x6d, 0x12, 0xc5, 0xe8, 0xfd, + 0x5c, 0x6d, 0x57, 0x86, 0xab, 0x2d, 0xc5, 0xce, 0xd4, 0xf5, 0x6d, 0x18, 0x27, 0x31, 0xee, 0xc9, + 0x6a, 0xbe, 0x58, 0x59, 0x4d, 0x5e, 0x07, 0x8b, 0x63, 0x98, 0xff, 0xda, 0x80, 0x26, 0x1f, 0xed, + 0x3b, 0x76, 0x7f, 0xe4, 0x1c, 0x76, 0x8c, 0x51, 0xe2, 0xb5, 0x7a, 0xa9, 0xb8, 0x56, 0xe2, 0xd7, + 0x2b, 0x6d, 0x3b, 0xb6, 0xf9, 0xc6, 0xce, 0x90, 0x96, 0xbe, 0x0a, 0xcd, 0x04, 0x44, 0x39, 0xdd, + 0x1e, 0x3e, 0x14, 0x33, 0x89, 0x7e, 0xd2, 0x69, 0xb1, 0x6f, 0x7b, 0x03, 0x39, 0xbf, 0x79, 0xe2, + 0x9d, 0xda, 0x5b, 0x86, 0xf9, 0x4b, 0x94, 0x17, 0x48, 0xb2, 0xeb, 0xfe, 0xbe, 0x58, 0x2d, 0x7f, + 0x18, 0x16, 0xbc, 0x82, 0x25, 0x28, 0x9a, 0x39, 0xfc, 0x92, 0x2d, 0xa4, 0x42, 0xd7, 0x55, 0xd0, + 0xa7, 0x03, 0x6e, 0x7b, 0xac, 0x46, 0x93, 0x56, 0x92, 0x36, 0xff, 0x9a, 0x01, 0x0b, 0x49, 0x85, + 0x3e, 0xc0, 0x87, 0x5b, 0xd8, 0xc3, 0x4e, 0x1c, 0x84, 0x8f, 0xb9, 0x4a, 0xa2, 0xcf, 0x6a, 0x69, + 0x9f, 0xa9, 0x95, 0xac, 0x67, 0x2a, 0xf9, 0x3d, 0x03, 0x66, 0x92, 0x4a, 0x8e, 0x7c, 0x82, 0xbe, + 0xa1, 0x4f, 0xd0, 0xe7, 0x2a, 0xa7, 0x82, 0x9c, 0x9a, 0xff, 0x8c, 0xad, 0x1c, 0x01, 0xec, 0x84, + 0x01, 0x6d, 0x1e, 0xe5, 0x2b, 0x8f, 0xb7, 0xdb, 0x86, 0xa9, 0xea, 0x07, 0xf8, 0xf0, 0x7e, 0x40, + 0x85, 0x5a, 0x51, 0x55, 0xad, 0x67, 0xc7, 0x32, 0x3d, 0xfb, 0x3b, 0x06, 0x2c, 0x26, 0xcd, 0xd0, + 0x18, 0xf8, 0x17, 0xb0, 0x21, 0x67, 0x61, 0xca, 0xc5, 0x3b, 0xf6, 0xc0, 0x8b, 0x13, 0x99, 0x77, + 0xdc, 0x52, 0x41, 0x95, 0x4d, 0xfd, 0x9f, 0x0d, 0xc6, 0x4c, 0x62, 0x9b, 0xce, 0x8c, 0x42, 0x25, + 0x61, 0x01, 0xc6, 0x49, 0x8f, 0x72, 0x79, 0xb1, 0x6c, 0x59, 0x82, 0x72, 0x7f, 0x27, 0xe8, 0xf5, + 0x6c, 0xdf, 0x6d, 0xd5, 0xd9, 0x36, 0x2d, 0x93, 0x94, 0x86, 0x1d, 0x76, 0xa3, 0xd6, 0x18, 0x17, + 0x68, 0xe8, 0x37, 0xdd, 0x91, 0x0f, 0x82, 0x70, 0x8f, 0xf8, 0xdd, 0x36, 0x09, 0xd9, 0x96, 0xdb, + 0xb4, 0x14, 0x08, 0xfa, 0x2a, 0x8c, 0xf7, 0x83, 0x30, 0x8e, 0x5a, 0x13, 0xac, 0xe1, 0x2f, 0x94, + 0x4c, 0x36, 0x5e, 0xcb, 0x4e, 0x10, 0xc6, 0x16, 0x2f, 0x8f, 0x5e, 0x81, 0x3a, 0xf6, 0xf7, 0x5b, + 0x0d, 0x86, 0xb6, 0x54, 0x84, 0xb6, 0xee, 0xef, 0x7f, 0x6c, 0x87, 0x16, 0x2d, 0x46, 0x37, 0x7e, + 0xa9, 0x7d, 0x47, 0xad, 0xc9, 0xf2, 0x21, 0xb3, 0x44, 0x21, 0x0b, 0x7f, 0x3a, 0x20, 0x21, 0xee, + 0x61, 0x3f, 0x8e, 0xac, 0x14, 0x15, 0xad, 0x49, 0xa9, 0xf0, 0x4e, 0x30, 0xf0, 0xe3, 0xa8, 0xd5, + 0x64, 0xbf, 0x2f, 0x54, 0xf8, 0x3e, 0x4e, 0xcb, 0x59, 0x1a, 0x12, 0xba, 0x0e, 0x33, 0x1e, 0xd9, + 0xc7, 0x3e, 0x8e, 0xa2, 0x4e, 0x18, 0x6c, 0xe3, 0x16, 0xb0, 0x0a, 0x3d, 0x5d, 0xac, 0x4d, 0x05, + 0xdb, 0xd8, 0xd2, 0xcb, 0xa3, 0x55, 0x98, 0xa5, 0x82, 0x00, 0x49, 0x29, 0x4c, 0x1d, 0x45, 0x21, + 0x83, 0x80, 0xae, 0x42, 0xd3, 0x23, 0x3b, 0xd8, 0x39, 0x74, 0x3c, 0xdc, 0x9a, 0x66, 0xd8, 0x85, + 0x93, 0xee, 0xb6, 0x2c, 0x64, 0xa5, 0xe5, 0xd1, 0x15, 0x38, 0x13, 0xe3, 0xb0, 0x47, 0x7c, 0x9b, + 0xce, 0xa5, 0x3b, 0x7c, 0xf3, 0x67, 0x9a, 0xe4, 0x0c, 0x1b, 0xe0, 0x92, 0x5c, 0xaa, 0xbc, 0xb0, + 0x39, 0xd4, 0x19, 0x78, 0x5e, 0x27, 0xf0, 0x88, 0x73, 0xd8, 0x9a, 0xe5, 0xca, 0x4b, 0x06, 0x4c, + 0x95, 0xe2, 0x08, 0x3b, 0x83, 0x90, 0xc4, 0x87, 0x74, 0xf4, 0xf1, 0xc3, 0xb8, 0x35, 0x57, 0x29, + 0x88, 0xaa, 0x45, 0xad, 0x2c, 0x2e, 0x9d, 0xc9, 0x51, 0xec, 0x12, 0xbf, 0x35, 0xcf, 0x16, 0x01, + 0x4f, 0x30, 0x45, 0x87, 0x7e, 0xdc, 0xa3, 0xeb, 0xf8, 0x14, 0xcb, 0x49, 0x01, 0x94, 0x25, 0xc7, + 0xf1, 0x61, 0x0b, 0x31, 0x38, 0xfd, 0x44, 0x57, 0xa1, 0x81, 0xfd, 0xfd, 0x8d, 0x30, 0xe8, 0xb5, + 0x4e, 0x97, 0xcf, 0xd6, 0x75, 0x5e, 0x84, 0xb3, 0x0d, 0x4b, 0x62, 0xa0, 0x77, 0xa0, 0x55, 0xd0, + 0x2b, 0xbc, 0x13, 0x16, 0x58, 0x27, 0x94, 0xe6, 0xa3, 0x0d, 0x98, 0xe1, 0x13, 0xa8, 0xcd, 0x34, + 0xf4, 0xa8, 0xb5, 0xc8, 0x7e, 0x7f, 0xb6, 0x7c, 0xda, 0xf1, 0x82, 0x96, 0x8e, 0x66, 0xb6, 0x61, + 0x36, 0x59, 0x4b, 0x9b, 0x3d, 0x21, 0xb0, 0xd1, 0xa5, 0x2e, 0x25, 0x6e, 0x9e, 0x60, 0x1d, 0x43, + 0xbe, 0x8d, 0x6f, 0x1c, 0xc6, 0x98, 0xcb, 0x7e, 0x75, 0x2b, 0x05, 0x98, 0x7f, 0x91, 0xef, 0x3e, + 0xe9, 0x92, 0x2c, 0x64, 0x1e, 0x4b, 0x30, 0xb9, 0x1b, 0x44, 0x31, 0xcd, 0x67, 0x24, 0xc6, 0xad, + 0x24, 0x8d, 0xce, 0xc1, 0x8c, 0xa3, 0x12, 0x10, 0xac, 0x4b, 0x07, 0x52, 0x0a, 0xcc, 0x94, 0xe4, + 0x04, 0x9e, 0x90, 0x27, 0x93, 0x34, 0x15, 0x4d, 0x29, 0xb5, 0xcd, 0x8e, 0x60, 0x29, 0x22, 0x45, + 0xf9, 0x77, 0xda, 0x44, 0x2a, 0x3c, 0x61, 0xb4, 0x06, 0x8d, 0x03, 0x9b, 0xc4, 0xc4, 0xef, 0x0a, + 0x5e, 0xfd, 0xe5, 0x4a, 0x1e, 0xc3, 0x90, 0x1e, 0x70, 0x04, 0x4b, 0x62, 0x52, 0x22, 0xe1, 0xc0, + 0xf7, 0x29, 0x91, 0xda, 0xb0, 0x44, 0x2c, 0x8e, 0x60, 0x49, 0x4c, 0x74, 0x1b, 0x40, 0x0e, 0x31, + 0x76, 0x85, 0x91, 0xe7, 0x95, 0xa3, 0xe9, 0xdc, 0x4f, 0x70, 0x2c, 0x05, 0xdf, 0xb4, 0xd9, 0x4e, + 0x95, 0xff, 0x1f, 0xba, 0x45, 0xa7, 0xb5, 0x1d, 0xc6, 0xd8, 0x5d, 0x8d, 0x45, 0x93, 0x5f, 0x1e, + 0x4e, 0x18, 0xb8, 0x4f, 0x7a, 0x54, 0xcd, 0x91, 0xc8, 0xe6, 0x6f, 0xd4, 0xa0, 0x55, 0x56, 0x17, + 0x3a, 0x3c, 0xf8, 0x21, 0x89, 0xd7, 0xe8, 0xd6, 0xc3, 0x35, 0x8d, 0x24, 0xcd, 0x34, 0x07, 0xd2, + 0x95, 0xf2, 0xd5, 0xb8, 0x25, 0x52, 0x14, 0x1e, 0x62, 0x3b, 0x12, 0x86, 0xbb, 0xa6, 0x25, 0x52, + 0xaa, 0x46, 0x31, 0xa6, 0x6b, 0x14, 0x5a, 0x63, 0xc6, 0x1f, 0xa1, 0x31, 0xe8, 0x7d, 0x80, 0x1d, + 0xe2, 0x93, 0x68, 0x97, 0x91, 0x9a, 0x38, 0x36, 0x29, 0x05, 0x9b, 0x59, 0x9b, 0x92, 0x85, 0xd4, + 0x6e, 0x35, 0x84, 0xb5, 0x29, 0x05, 0x99, 0x9b, 0xd9, 0xd1, 0x11, 0x53, 0x4a, 0xe9, 0x02, 0xa3, + 0xac, 0x0b, 0x6a, 0x5a, 0x17, 0x98, 0xbf, 0x5e, 0xa3, 0xea, 0x93, 0x42, 0x6b, 0x10, 0x15, 0xae, + 0xb8, 0xb7, 0x28, 0x93, 0xb3, 0x63, 0x2c, 0x66, 0xa8, 0x39, 0xc4, 0x0c, 0xe5, 0x08, 0xe8, 0x3d, + 0x68, 0x7a, 0x76, 0xc4, 0x94, 0x0d, 0x2c, 0xe6, 0xe5, 0x30, 0xd8, 0x29, 0x12, 0xe5, 0x23, 0x74, + 0x83, 0x91, 0x36, 0x57, 0x9e, 0x40, 0x26, 0x4c, 0x87, 0x98, 0x8d, 0xc0, 0x1a, 0xdd, 0xf9, 0xd8, + 0xf8, 0x8d, 0x5b, 0x1a, 0x2c, 0x15, 0x32, 0x26, 0x32, 0x42, 0x06, 0xfb, 0x48, 0x3a, 0x57, 0x26, + 0xb3, 0x5d, 0x3f, 0x99, 0xef, 0xfa, 0x73, 0x30, 0xdb, 0xb6, 0x71, 0x2f, 0xf0, 0xd7, 0x7d, 0xb7, + 0x1f, 0x10, 0x9f, 0xf1, 0x27, 0xc6, 0x66, 0xf8, 0x34, 0x65, 0xdf, 0xe6, 0x67, 0x06, 0xcc, 0xb4, + 0xb1, 0x87, 0x63, 0x7c, 0x8f, 0x49, 0x44, 0x11, 0x5a, 0x01, 0xd4, 0x0d, 0x6d, 0x07, 0x77, 0x70, + 0x48, 0x02, 0x57, 0x55, 0xa2, 0xeb, 0x56, 0x41, 0x0e, 0xba, 0x09, 0x33, 0xfd, 0x10, 0x6b, 0x7a, + 0xa7, 0x51, 0xb6, 0x29, 0x74, 0xd4, 0x82, 0x96, 0x8e, 0x87, 0x5e, 0x86, 0xf9, 0x20, 0xec, 0xef, + 0xda, 0x7e, 0x1b, 0xf7, 0xb1, 0xef, 0x52, 0x99, 0x43, 0x88, 0xfc, 0x39, 0x38, 0x7a, 0x05, 0x4e, + 0xf5, 0xc3, 0xa0, 0x6f, 0x77, 0xd9, 0x36, 0x21, 0xf6, 0x0f, 0xbe, 0x66, 0xf2, 0x19, 0xe6, 0xd7, + 0x61, 0xb1, 0x1d, 0x1c, 0xf8, 0x07, 0x76, 0xe8, 0xae, 0x76, 0x36, 0x15, 0xb1, 0xfc, 0xba, 0x94, + 0x37, 0xb9, 0x21, 0xb7, 0x90, 0x9b, 0x29, 0x98, 0x7c, 0x53, 0xd9, 0x20, 0x1e, 0x96, 0xf2, 0xfe, + 0xbf, 0x37, 0x34, 0xd2, 0x69, 0x81, 0xc4, 0x56, 0x63, 0x28, 0xb6, 0x9a, 0x35, 0x98, 0xdc, 0x21, + 0xd8, 0x73, 0x2d, 0xbc, 0x23, 0x7a, 0xe9, 0xa5, 0x72, 0x03, 0xde, 0x06, 0x2d, 0x29, 0xf5, 0x2e, + 0x2b, 0x41, 0x44, 0x1f, 0xc1, 0xbc, 0x14, 0xc4, 0x36, 0x24, 0xb1, 0x7a, 0x39, 0x33, 0xb6, 0xd4, + 0xb2, 0x09, 0xb9, 0x1c, 0x09, 0x5a, 0xdf, 0x1e, 0xe5, 0x61, 0x63, 0x7c, 0x72, 0xd0, 0x6f, 0xf3, + 0xe7, 0xe0, 0xa9, 0x5c, 0xe3, 0x84, 0x1e, 0xf0, 0xa8, 0x3d, 0x97, 0x95, 0xda, 0x6b, 0x39, 0xa9, + 0xdd, 0xfc, 0x39, 0x58, 0x58, 0xef, 0xf5, 0xe3, 0xc3, 0x36, 0x09, 0xb3, 0xa6, 0xca, 0x1e, 0x76, + 0xc9, 0xa0, 0x27, 0x59, 0x07, 0x4f, 0xa1, 0xdb, 0x7c, 0xbb, 0xbe, 0x4d, 0x7a, 0x44, 0xda, 0x47, + 0x2b, 0xb5, 0xbf, 0x15, 0xd9, 0x07, 0x2b, 0x1f, 0x0e, 0x6c, 0x3f, 0x26, 0xf1, 0xa1, 0x95, 0x12, + 0x30, 0x7f, 0x64, 0xc0, 0x9c, 0x5c, 0x39, 0xab, 0xae, 0x1b, 0xe2, 0x28, 0x42, 0xb3, 0x50, 0x23, + 0x7d, 0xf1, 0xd7, 0x1a, 0xe9, 0xa3, 0x55, 0x68, 0x72, 0xdb, 0x6a, 0x3a, 0xa0, 0x43, 0x59, 0x64, + 0x53, 0x2c, 0x29, 0x1f, 0x30, 0x2e, 0xc6, 0x37, 0x83, 0x24, 0x4d, 0xf3, 0xfc, 0xc0, 0xe5, 0x06, + 0x68, 0xb1, 0xf3, 0xcb, 0xb4, 0x69, 0xc1, 0xb4, 0xac, 0x5d, 0xa9, 0xec, 0x41, 0xa7, 0x60, 0x2a, + 0x77, 0xb0, 0x6f, 0x4d, 0x9a, 0xa8, 0xeb, 0xd2, 0x84, 0xf9, 0x53, 0x03, 0x66, 0x25, 0xd1, 0xad, + 0xc1, 0x76, 0x84, 0x63, 0xda, 0x42, 0x9b, 0x37, 0x1e, 0xcb, 0xa1, 0x7e, 0xb1, 0x58, 0xda, 0xd3, + 0x7a, 0xca, 0x4a, 0xb1, 0xd0, 0x87, 0x70, 0xca, 0x0f, 0x62, 0x8b, 0x72, 0xc2, 0xd5, 0x84, 0x54, + 0x6d, 0x78, 0x52, 0x79, 0x6c, 0x74, 0x45, 0x6a, 0x4b, 0xf5, 0x72, 0x01, 0x50, 0xed, 0x1d, 0xa1, + 0x2c, 0x99, 0x3f, 0x30, 0xa0, 0x29, 0xe1, 0xa3, 0xb6, 0xbd, 0x7d, 0x0d, 0x1a, 0x11, 0xeb, 0x33, + 0xd9, 0x38, 0xb3, 0xaa, 0x56, 0xbc, 0x7b, 0x2d, 0x89, 0xc2, 0x4c, 0x19, 0x49, 0xcd, 0x3e, 0x17, + 0x53, 0x46, 0xf2, 0x77, 0xc9, 0xda, 0xfe, 0x3e, 0xab, 0x92, 0x22, 0xc4, 0xd3, 0x85, 0xd7, 0x0f, + 0xf1, 0x0e, 0x79, 0x28, 0x17, 0x1e, 0x4f, 0xa1, 0xf7, 0x61, 0xda, 0x49, 0x0c, 0x21, 0xc9, 0x4a, + 0x38, 0x5f, 0x69, 0x30, 0x49, 0x8c, 0x5c, 0x96, 0x86, 0xab, 0x1b, 0xdd, 0xeb, 0x47, 0x19, 0xdd, + 0x53, 0x2a, 0x29, 0x96, 0x19, 0xc2, 0x04, 0xd7, 0x79, 0xcb, 0xb4, 0xf9, 0xbc, 0x11, 0x0e, 0x5d, + 0x83, 0x26, 0xfb, 0x60, 0x5a, 0x4d, 0xbd, 0xfc, 0x0c, 0x94, 0x13, 0x96, 0xff, 0x4c, 0x50, 0xcc, + 0xdf, 0xac, 0xd1, 0xf5, 0x98, 0xe6, 0x69, 0xac, 0xde, 0x18, 0x25, 0xab, 0xaf, 0x3d, 0x3a, 0xab, + 0xb7, 0x60, 0xce, 0x51, 0x6c, 0x7b, 0x69, 0x4f, 0x5f, 0xa8, 0x1c, 0x32, 0xc5, 0x0c, 0x68, 0x65, + 0x09, 0xa0, 0x4d, 0x98, 0xe6, 0x23, 0x20, 0x08, 0x8e, 0x31, 0x82, 0x5f, 0x2a, 0x1f, 0x3a, 0x95, + 0x9a, 0x86, 0x6a, 0xfe, 0x70, 0x02, 0xc6, 0xd7, 0xf7, 0xb1, 0x1f, 0x8f, 0x78, 0x85, 0x7e, 0x00, + 0xb3, 0xc4, 0xdf, 0x0f, 0xbc, 0x7d, 0xec, 0xf2, 0xfc, 0xe3, 0xb0, 0xec, 0x0c, 0xea, 0x09, 0x44, + 0xf8, 0xaf, 0xc2, 0x04, 0x1f, 0x07, 0x21, 0xbf, 0x17, 0x5a, 0x4b, 0x58, 0xbb, 0xc5, 0xf4, 0x12, + 0xc5, 0x91, 0x05, 0xb3, 0x3b, 0x24, 0x8c, 0x62, 0x2a, 0x7e, 0x47, 0xb1, 0xdd, 0xeb, 0x9f, 0x40, + 0x6a, 0xcf, 0x50, 0x40, 0x1d, 0x98, 0xa1, 0x52, 0x6b, 0x4a, 0xb2, 0x71, 0x6c, 0x92, 0x3a, 0x01, + 0xba, 0xae, 0x1c, 0x26, 0xdd, 0x4e, 0xb2, 0xdd, 0x86, 0x27, 0x92, 0xf3, 0x94, 0xa6, 0x72, 0x9e, + 0x72, 0x07, 0x9a, 0x98, 0x36, 0x93, 0xe2, 0x0a, 0x9b, 0xcf, 0xc5, 0xe1, 0xfe, 0x7b, 0x87, 0x38, + 0x61, 0xc0, 0x15, 0x9a, 0x84, 0x02, 0xeb, 0x57, 0x1c, 0x12, 0x1c, 0x09, 0xeb, 0x4f, 0x45, 0xbf, + 0xb2, 0x62, 0x96, 0x28, 0x4e, 0x87, 0xd0, 0x66, 0x62, 0x20, 0x33, 0xfc, 0x34, 0x2d, 0x91, 0x42, + 0xef, 0x42, 0x23, 0xc4, 0x1e, 0x53, 0x4e, 0x67, 0x86, 0x9f, 0x20, 0x12, 0x87, 0xca, 0xcf, 0x21, + 0xa6, 0xfb, 0x0d, 0xf1, 0xbb, 0xc9, 0x01, 0x86, 0x30, 0xf0, 0x14, 0xe4, 0x50, 0x51, 0x36, 0x81, + 0x6e, 0xfa, 0x51, 0x6c, 0xfb, 0x0e, 0x66, 0x56, 0x9e, 0xa6, 0x95, 0xcf, 0x30, 0xbf, 0x43, 0xb7, + 0x30, 0xda, 0x98, 0x91, 0x6f, 0x12, 0x17, 0xf5, 0x4d, 0xe2, 0xe9, 0xd2, 0x6e, 0x94, 0x1b, 0xc4, + 0xf7, 0x0d, 0x98, 0x52, 0xfa, 0x35, 0x9d, 0x01, 0x86, 0x3a, 0x03, 0xbe, 0x01, 0xf3, 0x74, 0xa2, + 0xdc, 0xdb, 0x66, 0x9e, 0x44, 0x2e, 0x1b, 0xf4, 0xda, 0xc9, 0x06, 0x3d, 0x47, 0x88, 0x1b, 0xb4, + 0xa4, 0xb6, 0xd6, 0x14, 0x7a, 0x9c, 0x79, 0x5d, 0xd6, 0x8b, 0xaf, 0x9f, 0x67, 0xa1, 0xe9, 0x24, + 0xe3, 0xc0, 0xb7, 0x82, 0x14, 0x40, 0x67, 0x28, 0x15, 0xb8, 0xe4, 0x99, 0x2a, 0xfd, 0x36, 0xcf, + 0x03, 0xac, 0x3f, 0xc4, 0xce, 0x2a, 0x9f, 0x0f, 0x8a, 0xa5, 0xd7, 0xd0, 0x2c, 0xbd, 0xe6, 0x2f, + 0x1a, 0x30, 0xbb, 0xb1, 0x96, 0xf5, 0x2a, 0xe0, 0xc2, 0xdd, 0x83, 0x07, 0x77, 0xa5, 0x39, 0x49, + 0x81, 0xa0, 0x79, 0xa8, 0x7b, 0x03, 0x5f, 0x88, 0x64, 0xf4, 0x53, 0x39, 0xf6, 0xac, 0x97, 0x1e, + 0x7b, 0x66, 0x5c, 0x78, 0x68, 0xbb, 0x0f, 0x0e, 0x88, 0x1b, 0xb5, 0xc6, 0xb9, 0xbd, 0x8a, 0x25, + 0xcc, 0xbf, 0x5d, 0x83, 0xf9, 0x0d, 0x0f, 0x3f, 0x1c, 0xea, 0x60, 0xbf, 0xec, 0xb4, 0x75, 0x23, + 0xbf, 0x01, 0x3f, 0xf2, 0xd9, 0x70, 0xb6, 0xfa, 0x1f, 0x40, 0x83, 0xdb, 0xdf, 0x79, 0x03, 0xa6, + 0x2e, 0xbd, 0x5e, 0xf4, 0x87, 0x6c, 0x53, 0x56, 0x84, 0x86, 0xca, 0x4f, 0xdc, 0x24, 0x85, 0xa5, + 0x77, 0x60, 0x5a, 0xcd, 0x38, 0xd6, 0xb9, 0xdb, 0x27, 0x70, 0x7a, 0xc3, 0x0b, 0x9c, 0xbd, 0xcc, + 0x29, 0x35, 0xd5, 0x4d, 0xec, 0xd8, 0x8e, 0x34, 0xdf, 0x10, 0x15, 0xa4, 0x94, 0xf8, 0xe8, 0xa3, + 0xcd, 0xb6, 0x20, 0xac, 0x82, 0xcc, 0x5f, 0x30, 0xe0, 0xb9, 0x9b, 0x6b, 0xeb, 0xa9, 0xff, 0x42, + 0xce, 0xc3, 0x87, 0x8a, 0x53, 0xae, 0xf2, 0x03, 0x91, 0x7a, 0x0c, 0x1e, 0x5f, 0x01, 0x9c, 0xbe, + 0x49, 0x62, 0x0b, 0xf7, 0x83, 0xec, 0x5c, 0xa5, 0x0c, 0x26, 0x22, 0x71, 0x10, 0xca, 0x0e, 0x53, + 0x20, 0x9c, 0xe4, 0x3e, 0x89, 0xe8, 0xff, 0x78, 0x55, 0x92, 0x34, 0xad, 0x8c, 0x4b, 0x42, 0xb6, + 0x7d, 0x1f, 0x8a, 0x89, 0x9b, 0x02, 0x4c, 0x0c, 0x8b, 0x37, 0xbd, 0x41, 0x14, 0xe3, 0x70, 0x27, + 0xd2, 0x7e, 0xf9, 0x2c, 0x34, 0xb1, 0x14, 0x34, 0xe5, 0x5a, 0x4c, 0x00, 0x85, 0xfe, 0x0d, 0x55, + 0xa7, 0xff, 0x3f, 0x31, 0x60, 0xe6, 0xd6, 0xfd, 0xfb, 0x9d, 0x9b, 0x38, 0x16, 0x6b, 0xb5, 0x48, + 0xeb, 0x6e, 0x2b, 0x6a, 0x50, 0x95, 0x04, 0x31, 0x88, 0x89, 0xb7, 0xc2, 0xdd, 0xf8, 0x56, 0x36, + 0xfd, 0xf8, 0x5e, 0xb8, 0x15, 0x87, 0xc4, 0xef, 0x0a, 0xc5, 0x49, 0xf2, 0x89, 0x7a, 0xca, 0x27, + 0x98, 0x7d, 0xcf, 0xd9, 0xc5, 0x89, 0x7a, 0x26, 0x52, 0xe8, 0x3d, 0x98, 0xda, 0x8d, 0xe3, 0xfe, + 0x2d, 0x6c, 0xbb, 0x38, 0x94, 0x73, 0x7c, 0xb9, 0x68, 0x8e, 0xd3, 0xda, 0xf3, 0x62, 0x96, 0x8a, + 0x62, 0x5e, 0x01, 0x48, 0xb3, 0x86, 0x97, 0x63, 0xcd, 0x7f, 0x68, 0x40, 0x83, 0x7b, 0xe5, 0x84, + 0xe8, 0x12, 0x8c, 0xe1, 0x87, 0xd8, 0x11, 0x1b, 0x43, 0xe1, 0xef, 0x53, 0x2e, 0x67, 0xb1, 0xb2, + 0xe8, 0x2a, 0x34, 0x68, 0x35, 0x6e, 0x26, 0x1e, 0x46, 0x2f, 0x94, 0xd5, 0x3a, 0xe9, 0x73, 0x4b, + 0x62, 0x30, 0x75, 0xd8, 0xe9, 0x6f, 0xd1, 0xf5, 0x14, 0x57, 0xc9, 0xee, 0xf7, 0xd7, 0x3a, 0xbc, + 0x90, 0x20, 0x90, 0x62, 0x99, 0x6f, 0x43, 0xf3, 0x56, 0x10, 0xc5, 0xab, 0x1e, 0xb1, 0xf3, 0xea, + 0xf6, 0xb3, 0xd0, 0x94, 0xba, 0x71, 0x24, 0xdc, 0x85, 0x52, 0x80, 0x79, 0x0d, 0x16, 0x28, 0x6a, + 0xc7, 0x8e, 0x77, 0xb5, 0x29, 0x57, 0x34, 0x25, 0xa4, 0x58, 0x52, 0x4b, 0xc5, 0x12, 0xf3, 0x47, + 0x75, 0x78, 0x66, 0x73, 0xab, 0xdc, 0x43, 0xca, 0x84, 0x69, 0xce, 0xc7, 0xa9, 0x46, 0x69, 0x7b, + 0x82, 0x9e, 0x06, 0xa3, 0xbc, 0x87, 0x7c, 0x2a, 0x17, 0x0b, 0xfd, 0x94, 0xfc, 0xbe, 0x9e, 0xf2, + 0xfb, 0xf3, 0x30, 0x4b, 0x22, 0x27, 0x22, 0x9b, 0x3e, 0x5d, 0x1e, 0xa9, 0xe7, 0x5a, 0x06, 0xaa, + 0xb0, 0x81, 0xf1, 0xd2, 0x7d, 0x21, 0xe3, 0x74, 0x43, 0xb7, 0xaa, 0x3e, 0xab, 0x49, 0xc4, 0x4e, + 0x04, 0x9b, 0x96, 0x4c, 0x52, 0x29, 0xc3, 0xd9, 0xb5, 0xfb, 0xab, 0x83, 0x78, 0xb7, 0x4d, 0x22, + 0x27, 0xd8, 0xc7, 0xe1, 0x21, 0x13, 0xd5, 0x26, 0xad, 0x7c, 0x86, 0xae, 0x85, 0xc1, 0x89, 0x5c, + 0x9f, 0x2e, 0xc0, 0x9c, 0xa4, 0xbb, 0x85, 0x23, 0xc6, 0x43, 0xa6, 0xd8, 0xef, 0xb2, 0x60, 0x74, + 0x0e, 0x66, 0x88, 0x4f, 0x62, 0x62, 0xc7, 0x41, 0xc8, 0xd8, 0x21, 0x17, 0xc7, 0x74, 0xa0, 0xf9, + 0x83, 0x3a, 0x9c, 0x62, 0xc3, 0xf3, 0xfb, 0x76, 0x50, 0x36, 0xf2, 0x83, 0x72, 0xa2, 0x9d, 0x79, + 0xd4, 0x23, 0xb3, 0x0e, 0xcd, 0xe4, 0x4c, 0xbe, 0x60, 0xf7, 0x2d, 0x71, 0x5a, 0xeb, 0xa5, 0x67, + 0xf5, 0xdc, 0xd8, 0xf8, 0xc7, 0xa0, 0x99, 0x9c, 0xb2, 0xa2, 0xb7, 0xa1, 0xd9, 0x0f, 0x98, 0x55, + 0x3d, 0x94, 0x87, 0x37, 0xcf, 0x14, 0x72, 0x22, 0xce, 0xeb, 0xac, 0xb4, 0x34, 0x7a, 0x13, 0x1a, + 0xfd, 0x10, 0x6f, 0xc5, 0xcc, 0xa5, 0xf0, 0x48, 0x44, 0x59, 0xd6, 0xfc, 0xf3, 0x06, 0x00, 0xb3, + 0xfc, 0x59, 0xb6, 0xdf, 0xc5, 0x23, 0x56, 0x3d, 0xaf, 0xc0, 0x58, 0xd4, 0xc7, 0x4e, 0xd5, 0x91, + 0x44, 0xfa, 0xef, 0xad, 0x3e, 0x76, 0x2c, 0x56, 0xde, 0xfc, 0xef, 0x0d, 0x98, 0x4d, 0x33, 0x36, + 0x63, 0xdc, 0x2b, 0xf4, 0x50, 0x7b, 0x17, 0xea, 0x3d, 0xfb, 0xa1, 0x10, 0xdc, 0xbf, 0x52, 0x4d, + 0x9d, 0x12, 0x59, 0xb9, 0x63, 0x3f, 0xe4, 0x52, 0x14, 0xc5, 0x63, 0xe8, 0xc4, 0x17, 0xc6, 0xb4, + 0xa1, 0xd0, 0x89, 0x2f, 0xd1, 0x89, 0x8f, 0x36, 0xa1, 0x21, 0xcc, 0xb6, 0xcc, 0xe5, 0x41, 0x17, + 0xec, 0xcb, 0x48, 0xb4, 0x39, 0x86, 0x90, 0xe5, 0x04, 0x3e, 0xfa, 0x59, 0x98, 0x15, 0x9f, 0x16, + 0xfe, 0x74, 0x80, 0xa3, 0x58, 0xec, 0x9d, 0x57, 0x86, 0xa7, 0x28, 0x10, 0x39, 0xe1, 0x0c, 0x35, + 0xd4, 0x87, 0x85, 0x9e, 0xfd, 0x90, 0x23, 0x72, 0x90, 0x65, 0xc7, 0x24, 0x10, 0x5e, 0x17, 0x5f, + 0x1b, 0xae, 0xe7, 0x72, 0xe8, 0xfc, 0x5f, 0x85, 0x94, 0x97, 0x76, 0x60, 0x52, 0x76, 0x76, 0xc1, + 0xda, 0x68, 0xab, 0x9b, 0xf8, 0xf1, 0xcd, 0xd5, 0xa9, 0x24, 0xcb, 0xfe, 0x23, 0x46, 0xe5, 0xb1, + 0xfe, 0xe7, 0x5b, 0x30, 0xad, 0x0e, 0xdd, 0x63, 0xfd, 0xd7, 0xa7, 0x70, 0xba, 0x60, 0x50, 0x1f, + 0xeb, 0x2f, 0x0f, 0xe0, 0xe9, 0xd2, 0x11, 0x7e, 0x9c, 0x3f, 0xa6, 0xec, 0x47, 0x59, 0xe9, 0x23, + 0x57, 0xee, 0x2f, 0xeb, 0xca, 0xfd, 0x72, 0xf5, 0x4c, 0x97, 0x1a, 0xfe, 0x6d, 0xb5, 0x4e, 0x94, + 0x2d, 0xa1, 0x77, 0x60, 0xc2, 0xa3, 0x10, 0x79, 0x18, 0x60, 0x1e, 0xbd, 0x64, 0x2c, 0x81, 0x61, + 0xfe, 0xc0, 0x80, 0xb1, 0x91, 0x37, 0x6c, 0x4d, 0x6f, 0xd8, 0xab, 0xa5, 0x84, 0xc4, 0xe5, 0x9e, + 0x15, 0xcb, 0x3e, 0x58, 0x97, 0x17, 0x98, 0x64, 0x3b, 0x7f, 0xcf, 0x80, 0x29, 0x4a, 0x5b, 0x1e, + 0x81, 0x9e, 0x83, 0x19, 0xcf, 0xde, 0xc6, 0x9e, 0x34, 0x50, 0x8a, 0x21, 0xd7, 0x81, 0xb4, 0xd4, + 0x8e, 0x6a, 0x69, 0x15, 0x3b, 0x9c, 0x0e, 0x64, 0xaa, 0xba, 0x1d, 0x3b, 0xbb, 0x42, 0x79, 0xe1, + 0x09, 0xba, 0x07, 0xcb, 0xe9, 0xf0, 0x31, 0x15, 0x37, 0x03, 0x5f, 0xde, 0x5f, 0xc8, 0x80, 0x0b, + 0xfc, 0x99, 0xc7, 0xd9, 0x51, 0x6c, 0x06, 0x8a, 0x2e, 0xc1, 0x02, 0xf1, 0x1d, 0x6f, 0xe0, 0xe2, + 0x8f, 0x7c, 0xbe, 0x3f, 0x7b, 0xe4, 0xdb, 0xd8, 0x15, 0xd2, 0x48, 0x61, 0x9e, 0xb9, 0x0a, 0xa7, + 0x6f, 0x07, 0xb6, 0x7b, 0xc3, 0xf6, 0x6c, 0xdf, 0xc1, 0xe1, 0xa6, 0xdf, 0x2d, 0x3c, 0xe6, 0x52, + 0xcf, 0xa8, 0x6a, 0xfa, 0x19, 0x95, 0xf9, 0x00, 0x90, 0x4a, 0x42, 0x9c, 0xcb, 0xaf, 0x42, 0x83, + 0x70, 0x62, 0x62, 0x9e, 0xbc, 0x54, 0x2c, 0xa8, 0xe4, 0xfe, 0x6d, 0x49, 0x3c, 0xf3, 0x65, 0x58, + 0x28, 0x12, 0x64, 0x8a, 0x74, 0x21, 0xf3, 0x25, 0x38, 0xc5, 0xca, 0x1e, 0x25, 0xf7, 0x9b, 0x9f, + 0xc0, 0xdc, 0xdd, 0x8c, 0xbf, 0xfd, 0x19, 0x66, 0x3e, 0x54, 0xec, 0x23, 0x3c, 0x75, 0x6c, 0x5d, + 0xf4, 0x5f, 0x19, 0xd0, 0x4c, 0x6e, 0x9d, 0x8c, 0x58, 0x7c, 0x78, 0x53, 0x13, 0x1f, 0x0a, 0x55, + 0xb2, 0xe4, 0xd7, 0xa9, 0xf4, 0x80, 0xae, 0x26, 0x8e, 0xeb, 0x15, 0xca, 0x58, 0x8a, 0xc8, 0x7d, + 0xac, 0x05, 0x0a, 0x3b, 0x91, 0x4a, 0xf2, 0x3e, 0x97, 0x13, 0xa9, 0xe4, 0xef, 0x72, 0x99, 0x5e, + 0x54, 0x6a, 0xc4, 0xb8, 0xd1, 0x32, 0xf3, 0x65, 0x61, 0x93, 0x39, 0xb9, 0x2d, 0xa1, 0x40, 0xcc, + 0x97, 0x60, 0x2e, 0xd3, 0x3c, 0xba, 0x1c, 0xfb, 0xbb, 0x76, 0x24, 0xe7, 0x0f, 0x4f, 0x98, 0xff, + 0xc8, 0x80, 0xb1, 0xbb, 0x81, 0x3b, 0xea, 0x71, 0x7b, 0x4d, 0x1b, 0xb7, 0x67, 0xcb, 0x2e, 0xd5, + 0x29, 0x43, 0x76, 0x25, 0x33, 0x64, 0xcb, 0xa5, 0x38, 0xfa, 0x68, 0x5d, 0x85, 0x29, 0x76, 0x3d, + 0x4f, 0x1c, 0x54, 0x17, 0x09, 0x89, 0x2d, 0x68, 0x88, 0x43, 0x59, 0xe9, 0x59, 0x23, 0x92, 0xe6, + 0xdf, 0xad, 0xc1, 0xb4, 0x7a, 0xb9, 0x0f, 0x7d, 0xc7, 0x80, 0x95, 0x90, 0xbb, 0x7e, 0xba, 0xed, + 0x41, 0x48, 0xfc, 0xee, 0x96, 0xb3, 0x8b, 0xdd, 0x81, 0x47, 0xfc, 0xee, 0x66, 0xd7, 0x0f, 0x12, + 0xf0, 0xfa, 0x43, 0xec, 0x0c, 0x98, 0x3d, 0xea, 0x88, 0x7b, 0x83, 0xc9, 0x51, 0xcf, 0x31, 0xe9, + 0xa2, 0xbf, 0x60, 0xc0, 0x45, 0x7e, 0x69, 0x6e, 0xf8, 0xba, 0x54, 0xc8, 0xc1, 0x1d, 0x49, 0x2a, + 0x25, 0x72, 0x1f, 0x87, 0x3d, 0xeb, 0xb8, 0xff, 0x30, 0x7f, 0xb5, 0x06, 0x33, 0xb4, 0x61, 0x27, + 0xbb, 0x3a, 0xf2, 0x75, 0x38, 0xe5, 0xd9, 0x51, 0x7c, 0x0b, 0xdb, 0x61, 0xbc, 0x8d, 0x6d, 0x7e, + 0x14, 0x52, 0x3f, 0xf6, 0x11, 0x4c, 0x9e, 0x08, 0xfa, 0x19, 0x40, 0xec, 0x5c, 0x26, 0xb4, 0xfd, + 0x88, 0xd5, 0x8b, 0x91, 0x1e, 0x3b, 0x36, 0xe9, 0x02, 0x2a, 0xca, 0x99, 0xd7, 0x78, 0xd9, 0x99, + 0xd7, 0x84, 0xee, 0xb3, 0xf5, 0x0d, 0x98, 0x17, 0x9d, 0xb4, 0x43, 0xba, 0x82, 0xe1, 0xde, 0xcc, + 0x9c, 0x16, 0x1b, 0xc3, 0x9f, 0xb1, 0x68, 0x88, 0xa6, 0x03, 0xa7, 0x29, 0x71, 0xdd, 0xc9, 0x29, + 0x42, 0xb7, 0x61, 0x6e, 0x6f, 0xb0, 0x8d, 0x3d, 0x1c, 0x4b, 0x98, 0xf8, 0x45, 0xa1, 0xac, 0xa2, + 0x63, 0x5b, 0x59, 0x54, 0xf3, 0xe7, 0x0d, 0x98, 0xa4, 0x7f, 0x19, 0x39, 0x07, 0x5c, 0xd1, 0x39, + 0x60, 0xab, 0x6c, 0xe5, 0x48, 0xe6, 0x77, 0x9e, 0x77, 0x65, 0x27, 0x0c, 0x1e, 0x1e, 0x4a, 0x39, + 0xa5, 0x68, 0x8b, 0xfb, 0x4d, 0x83, 0x4f, 0x4c, 0x2b, 0x71, 0xd6, 0xfe, 0x00, 0x26, 0x1d, 0xbb, + 0x6f, 0x3b, 0xfc, 0x7a, 0x6f, 0xa9, 0x82, 0xa6, 0x21, 0xad, 0xac, 0x09, 0x0c, 0xae, 0xdb, 0x24, + 0x04, 0x96, 0xf6, 0x60, 0x46, 0xcb, 0x7a, 0xac, 0x42, 0xf1, 0x36, 0xe7, 0x4b, 0x89, 0x2c, 0x65, + 0xc1, 0x29, 0x5f, 0x49, 0xd3, 0x95, 0x2b, 0x05, 0x8c, 0x73, 0x47, 0x71, 0x1e, 0xb6, 0xcc, 0xf3, + 0xe8, 0xe6, 0x37, 0xe1, 0x29, 0x8d, 0x41, 0xa5, 0x1e, 0xef, 0x05, 0x4d, 0x63, 0x17, 0x09, 0x70, + 0x68, 0xa7, 0xd2, 0x5e, 0x92, 0xa6, 0xab, 0x83, 0xd5, 0x3c, 0x12, 0xf7, 0x01, 0x44, 0xca, 0xdc, + 0xe3, 0x03, 0xa7, 0xfe, 0x15, 0x3d, 0x80, 0xf9, 0x1e, 0x95, 0x03, 0xd7, 0x1f, 0xf6, 0x43, 0x6e, + 0x6b, 0x91, 0xed, 0xf8, 0xca, 0x91, 0x1c, 0x34, 0xad, 0xa0, 0x95, 0x23, 0x62, 0xfe, 0xe9, 0x1a, + 0x9f, 0xae, 0x6c, 0x7b, 0x64, 0x86, 0x27, 0x77, 0x6d, 0xb3, 0x6d, 0x89, 0x36, 0xc8, 0x24, 0xdd, + 0x38, 0xf1, 0xc3, 0x18, 0x87, 0xbe, 0xed, 0x25, 0xa7, 0x17, 0x0a, 0x84, 0xe6, 0xf7, 0xc3, 0x60, + 0x9f, 0xb8, 0xcc, 0xb9, 0x90, 0x9b, 0xbc, 0x15, 0x08, 0x15, 0x7d, 0x07, 0x7e, 0xc4, 0x79, 0xa4, + 0xbd, 0x2d, 0xee, 0x18, 0x4e, 0x5a, 0x3a, 0x10, 0xbd, 0x0e, 0x13, 0xb1, 0xcd, 0x2c, 0xfd, 0xe3, + 0xe5, 0x47, 0x8a, 0xf7, 0x69, 0x09, 0x4b, 0x14, 0x44, 0xb7, 0x24, 0x73, 0xe0, 0xcc, 0x42, 0x9c, + 0x74, 0x97, 0x0e, 0xae, 0xca, 0x58, 0x2c, 0x0d, 0xd3, 0xfc, 0x4f, 0x13, 0x00, 0xe9, 0x46, 0x89, + 0x6e, 0xe5, 0x16, 0xc1, 0x2b, 0xd5, 0x5b, 0x6b, 0xd9, 0x0a, 0x40, 0x1f, 0xc2, 0x94, 0xed, 0x79, + 0x81, 0x63, 0xc7, 0xac, 0xe5, 0xb5, 0xea, 0x15, 0x25, 0x88, 0xad, 0xa6, 0x18, 0x9c, 0x9e, 0x4a, + 0x23, 0x15, 0x4a, 0xea, 0x8a, 0x50, 0x82, 0x56, 0xb5, 0xdb, 0x7c, 0x63, 0xe5, 0xae, 0xf6, 0xda, + 0x3e, 0xa4, 0x5e, 0xe4, 0x43, 0xef, 0xaa, 0xee, 0x5b, 0xe3, 0xe5, 0x97, 0x34, 0x14, 0xd9, 0x41, + 0x77, 0xdd, 0x9a, 0x73, 0x75, 0xee, 0x2a, 0x06, 0xe4, 0xa5, 0x32, 0x22, 0x19, 0x66, 0x6c, 0x65, + 0xf1, 0xd1, 0x35, 0xee, 0xd3, 0xb6, 0xe9, 0xef, 0x04, 0xc2, 0xe7, 0xc0, 0x2c, 0xed, 0xba, 0xc3, + 0x28, 0xc6, 0x3d, 0x5a, 0xd2, 0x4a, 0x70, 0xa8, 0x02, 0xca, 0x5c, 0x60, 0xa3, 0xd6, 0x64, 0xb9, + 0x02, 0xaa, 0x7b, 0xf7, 0x5b, 0x02, 0x23, 0xbd, 0xcb, 0x1c, 0x6d, 0xfa, 0x1f, 0x45, 0x98, 0xdd, + 0x5a, 0x49, 0xee, 0x32, 0x73, 0x18, 0xdd, 0x3d, 0x44, 0x5a, 0x86, 0x0f, 0x68, 0x41, 0xf9, 0x8f, + 0xf4, 0x10, 0x03, 0x56, 0x16, 0xf5, 0x89, 0x72, 0xcb, 0x25, 0x1f, 0xe6, 0xb3, 0xd3, 0xec, 0xb1, + 0x72, 0xe7, 0x3f, 0x59, 0x87, 0x59, 0x7d, 0x9c, 0xd0, 0xb3, 0xd0, 0x14, 0x44, 0x92, 0xfb, 0xbd, + 0x29, 0x80, 0x5d, 0x4b, 0x66, 0x65, 0x95, 0x33, 0x53, 0x05, 0x42, 0x39, 0xe8, 0x76, 0x10, 0xc4, + 0x09, 0xc7, 0x11, 0x29, 0xca, 0x6d, 0xf6, 0x70, 0xe8, 0x63, 0x4f, 0x57, 0x95, 0x75, 0x20, 0xe5, + 0x76, 0x41, 0xc4, 0x06, 0x5c, 0x88, 0x27, 0x32, 0x89, 0xde, 0x82, 0xa7, 0x12, 0xc7, 0x68, 0x8b, + 0x9b, 0x01, 0x24, 0x25, 0x2e, 0xaf, 0x94, 0x65, 0x53, 0xe5, 0x5b, 0x08, 0x04, 0x12, 0x81, 0xbb, + 0x61, 0x67, 0xa0, 0xe8, 0x65, 0x98, 0xa7, 0x10, 0xb6, 0x39, 0xcb, 0x92, 0xdc, 0x25, 0x3b, 0x07, + 0xa7, 0xaa, 0x3f, 0xdf, 0x33, 0xa8, 0x98, 0xc9, 0x1a, 0x2f, 0xbc, 0x63, 0xb2, 0x60, 0x3a, 0x5f, + 0xed, 0xd0, 0xd9, 0x25, 0x31, 0x76, 0xe2, 0x41, 0xc8, 0x7d, 0x65, 0x9a, 0x96, 0x06, 0x33, 0xb7, + 0xe0, 0x74, 0x81, 0x0f, 0x19, 0xed, 0x6a, 0xbb, 0x4f, 0x64, 0x55, 0xc4, 0xd1, 0x6e, 0x0a, 0xa1, + 0x03, 0xc5, 0xcc, 0x14, 0x4a, 0xdc, 0x8b, 0x14, 0x60, 0xfe, 0x76, 0x03, 0x20, 0x55, 0x68, 0x0a, + 0x8f, 0x1f, 0x4d, 0x98, 0x96, 0xa1, 0x4e, 0x94, 0x00, 0x09, 0x1a, 0x8c, 0xfe, 0xc4, 0x4f, 0xc2, + 0x33, 0x88, 0x33, 0xe2, 0x04, 0x40, 0xf7, 0xd2, 0x08, 0x7b, 0x3b, 0xb7, 0x89, 0xbf, 0x27, 0xbd, + 0x5b, 0x65, 0x9a, 0x4e, 0xdb, 0x01, 0x71, 0xc5, 0x38, 0xd2, 0xcf, 0x22, 0x83, 0xc9, 0x44, 0xb1, + 0xc1, 0x64, 0x19, 0x40, 0xd4, 0x42, 0x8e, 0x57, 0xdd, 0x52, 0x20, 0x54, 0xf6, 0x76, 0x42, 0x6c, + 0x4b, 0xa9, 0x96, 0xbb, 0x3f, 0x4d, 0x1e, 0x5f, 0xf6, 0xce, 0x11, 0xa1, 0x94, 0x5d, 0x3a, 0x2b, + 0x34, 0xca, 0xcd, 0xe3, 0x53, 0xce, 0x11, 0x41, 0xd7, 0x60, 0x49, 0x02, 0x6f, 0xe6, 0x7d, 0xf3, + 0x81, 0xb5, 0xb1, 0xa2, 0x04, 0xba, 0x01, 0x13, 0xcc, 0x76, 0x15, 0xb5, 0xa6, 0x18, 0x33, 0x7b, + 0xb9, 0x5c, 0xda, 0xa6, 0x23, 0xbe, 0x72, 0x9b, 0x15, 0xe6, 0x3b, 0x95, 0xc0, 0x64, 0xfb, 0x9e, + 0xef, 0x07, 0xb1, 0xcd, 0xf7, 0xa3, 0xe9, 0xf2, 0x7d, 0x4f, 0x21, 0xb4, 0x9a, 0x62, 0xc8, 0x7d, + 0x2f, 0x85, 0xa0, 0x9f, 0x85, 0xb9, 0xe0, 0x80, 0xae, 0x3a, 0x29, 0xe1, 0x47, 0xad, 0x19, 0x46, + 0xf6, 0xf2, 0x90, 0x4a, 0xb7, 0x86, 0x6c, 0x65, 0x89, 0x65, 0xec, 0x03, 0xb3, 0x59, 0xfb, 0x00, + 0xbb, 0x44, 0xc1, 0xdd, 0x14, 0xd8, 0x1c, 0x9e, 0x13, 0x97, 0x28, 0x52, 0x10, 0xfa, 0x18, 0xa6, + 0x53, 0x83, 0x59, 0x18, 0xb1, 0x8b, 0x73, 0x53, 0x97, 0x2e, 0x0d, 0x57, 0xbd, 0x4d, 0x05, 0xd3, + 0xd2, 0xe8, 0x2c, 0xbd, 0x0d, 0x53, 0x4a, 0x1f, 0x1f, 0xc7, 0x67, 0x65, 0xe9, 0x1a, 0xcc, 0x67, + 0x7b, 0xf5, 0x58, 0x3e, 0x2f, 0xff, 0xd6, 0x80, 0xb9, 0x02, 0xa3, 0x1a, 0x0b, 0xf0, 0x62, 0xa4, + 0x01, 0x5e, 0xf4, 0xd5, 0x5b, 0xcb, 0xae, 0x5e, 0xc9, 0x13, 0xea, 0x0a, 0x4f, 0x10, 0xab, 0x76, + 0x2c, 0x5d, 0xb5, 0x3a, 0x1b, 0x1a, 0xcf, 0xb1, 0xa1, 0xe1, 0x57, 0xb5, 0xc6, 0xb0, 0x1a, 0x59, + 0x86, 0xf5, 0x99, 0x01, 0xf3, 0xd9, 0x63, 0xfb, 0x91, 0xfb, 0x77, 0xab, 0xb6, 0x9c, 0xe2, 0x90, + 0x3f, 0x59, 0xc7, 0x81, 0xd4, 0xae, 0x73, 0x23, 0x63, 0xd7, 0x79, 0x79, 0x28, 0x7c, 0xdd, 0xc6, + 0xf3, 0xbb, 0x06, 0x2c, 0x66, 0x8b, 0xac, 0x79, 0x36, 0xe9, 0x8d, 0xb8, 0xa5, 0xab, 0x5a, 0x4b, + 0x5f, 0x1d, 0xa6, 0xa6, 0xac, 0x1a, 0x4a, 0x73, 0x6f, 0x66, 0x9a, 0x7b, 0x71, 0x78, 0x22, 0x7a, + 0x9b, 0xff, 0x66, 0x0d, 0x96, 0x0b, 0xcb, 0x9d, 0xcc, 0xee, 0x22, 0xdc, 0x5e, 0xd9, 0xdd, 0xdf, + 0x13, 0xda, 0x5c, 0x74, 0x02, 0x5f, 0x30, 0x7b, 0xcb, 0xdf, 0x30, 0xe0, 0xe9, 0xc2, 0xee, 0x1a, + 0xb9, 0xf9, 0xe2, 0xba, 0x6e, 0xbe, 0xf8, 0xf2, 0xd0, 0x03, 0x2c, 0xed, 0x19, 0xff, 0xb4, 0x56, + 0x52, 0x55, 0xa6, 0xba, 0x9e, 0x85, 0x29, 0xdb, 0x71, 0x70, 0x14, 0xdd, 0x09, 0xdc, 0xe4, 0x5a, + 0xae, 0x0a, 0xd2, 0xaf, 0xb2, 0xd7, 0x4e, 0x7e, 0x95, 0x7d, 0x19, 0x80, 0x4b, 0xed, 0x77, 0x53, + 0x76, 0xa6, 0x40, 0xd0, 0x3d, 0x26, 0xa6, 0xf0, 0x03, 0x1e, 0x3e, 0xac, 0x6f, 0x0c, 0xd9, 0x69, + 0xea, 0x61, 0x91, 0x95, 0x10, 0xa1, 0xb2, 0x62, 0x14, 0x07, 0xa1, 0xdd, 0xa5, 0xcd, 0x8d, 0x22, + 0xf6, 0x5b, 0x3e, 0xbe, 0x39, 0x78, 0x5a, 0x39, 0x76, 0x47, 0x6a, 0x42, 0xad, 0x1c, 0xbb, 0x22, + 0xf5, 0xbb, 0x35, 0x78, 0xa6, 0x62, 0x19, 0x15, 0x5b, 0xbb, 0xb3, 0x9d, 0x5b, 0xcb, 0x77, 0xee, + 0x27, 0x8a, 0xb6, 0xcc, 0xdd, 0x02, 0xde, 0x3d, 0xe6, 0x0a, 0x2e, 0x55, 0x9f, 0xad, 0x02, 0xad, + 0xf6, 0xd2, 0xd0, 0xc4, 0x0b, 0xd5, 0xdc, 0x27, 0x6b, 0x94, 0xfa, 0x23, 0xf0, 0x42, 0x61, 0xd5, + 0xb2, 0x7e, 0x8e, 0x0e, 0x05, 0x2a, 0xee, 0x9d, 0x29, 0x40, 0x3b, 0x47, 0xaa, 0x65, 0xce, 0x91, + 0xfe, 0x92, 0x01, 0x0b, 0x59, 0xfa, 0x23, 0x5f, 0xbd, 0xef, 0xe8, 0xab, 0xf7, 0xdc, 0x30, 0xfd, + 0x2f, 0x17, 0xee, 0x77, 0x67, 0xe0, 0x4c, 0x89, 0x8b, 0xdc, 0x37, 0xe1, 0x54, 0xd7, 0xc1, 0xba, + 0xcb, 0xab, 0xa8, 0x6b, 0xa1, 0x87, 0x6f, 0xa5, 0x7f, 0xac, 0x95, 0xa7, 0x85, 0x76, 0x61, 0xc1, + 0x3e, 0x88, 0x72, 0xc1, 0x14, 0xc5, 0xa0, 0x5e, 0x2e, 0x54, 0xd1, 0x8f, 0x08, 0xbe, 0x68, 0x15, + 0x52, 0x44, 0x6d, 0x71, 0x6f, 0x9f, 0x8a, 0x1b, 0x15, 0x5e, 0xd0, 0x45, 0x1e, 0x87, 0x56, 0x82, + 0x89, 0x6e, 0x42, 0xb3, 0x2b, 0xfd, 0x60, 0x05, 0xf7, 0x28, 0xe4, 0x94, 0x85, 0xce, 0xb2, 0x56, + 0x8a, 0x8b, 0xde, 0x84, 0xba, 0xbf, 0x13, 0x55, 0x45, 0x21, 0xcb, 0x9c, 0x6b, 0x5a, 0xb4, 0x3c, + 0xba, 0x0e, 0xf5, 0x70, 0xdb, 0x15, 0x46, 0x9b, 0xc2, 0x9d, 0xdc, 0xba, 0xd1, 0x2e, 0x1e, 0x4c, + 0x8b, 0x62, 0xa2, 0x75, 0x18, 0x67, 0x1e, 0x70, 0xc2, 0x56, 0x53, 0xb8, 0x8f, 0x57, 0x38, 0x4d, + 0x5a, 0x1c, 0x1b, 0x5d, 0x83, 0x09, 0x87, 0x05, 0x03, 0x13, 0x8a, 0x56, 0xf1, 0xdd, 0xb0, 0x5c, + 0xb8, 0x30, 0x4b, 0x60, 0xa1, 0x5b, 0x30, 0xe1, 0xe0, 0xfe, 0xee, 0x4e, 0x24, 0xd4, 0xa9, 0xd7, + 0x0a, 0xf1, 0x2b, 0x02, 0xc6, 0x59, 0x02, 0x1f, 0x5d, 0x82, 0xda, 0x8e, 0x23, 0xbc, 0xe7, 0x0a, + 0x4d, 0x3a, 0xba, 0x3f, 0xbf, 0x55, 0xdb, 0x71, 0xd0, 0x2a, 0x34, 0x76, 0xb8, 0x97, 0xb8, 0xb8, + 0x62, 0xf2, 0x52, 0xb1, 0xbb, 0x7a, 0xce, 0x91, 0xdc, 0x92, 0x78, 0xa8, 0x0d, 0xb0, 0x93, 0xb8, + 0xb3, 0x8b, 0x40, 0x23, 0xe7, 0x86, 0x71, 0x7a, 0xb7, 0x14, 0x3c, 0xf4, 0x21, 0x34, 0x6d, 0x19, + 0xcb, 0x50, 0xdc, 0x4d, 0x79, 0xa3, 0x70, 0xce, 0x57, 0xc7, 0x65, 0xb4, 0x52, 0x2a, 0xe8, 0xeb, + 0x30, 0xb3, 0x1f, 0xf5, 0x77, 0xb1, 0x5c, 0x14, 0xec, 0xa2, 0x4a, 0x09, 0x47, 0xfe, 0x58, 0x14, + 0x24, 0x61, 0x3c, 0xb0, 0xbd, 0xdc, 0x7a, 0xd5, 0x09, 0xd1, 0x5e, 0xfb, 0x74, 0x10, 0x6c, 0x1f, + 0xc6, 0x58, 0xc4, 0x2c, 0x29, 0xec, 0xb5, 0x0f, 0x79, 0x11, 0xbd, 0xd7, 0x04, 0x1e, 0x5d, 0x3e, + 0xb6, 0x0c, 0x8e, 0x29, 0x54, 0xaf, 0x2f, 0x97, 0xb6, 0x37, 0x57, 0x9f, 0x14, 0x97, 0xf2, 0x8d, + 0xfe, 0x6e, 0x10, 0x07, 0x7e, 0x86, 0x37, 0x9d, 0x2a, 0xe7, 0x1b, 0x9d, 0x82, 0xf2, 0x3a, 0xdf, + 0x28, 0xa2, 0x88, 0x3a, 0x30, 0xdb, 0x0f, 0xc2, 0xf8, 0x20, 0x08, 0xe5, 0x60, 0xa3, 0x0a, 0x85, + 0x41, 0x2b, 0x29, 0xe8, 0x66, 0xf0, 0xd1, 0x07, 0xd0, 0x88, 0x1c, 0xdb, 0xc3, 0x9b, 0xf7, 0x5a, + 0xa7, 0xcb, 0x59, 0xe9, 0x16, 0x2f, 0x52, 0x32, 0xe0, 0x92, 0x02, 0xba, 0x0a, 0xe3, 0x2c, 0xf0, + 0x12, 0x8b, 0xb5, 0x52, 0x72, 0x3f, 0x2f, 0xe7, 0x4a, 0x61, 0x71, 0x1c, 0x3a, 0xfd, 0x84, 0x84, + 0x12, 0x44, 0xad, 0xc5, 0xf2, 0xe9, 0xb7, 0xc5, 0x0b, 0xdd, 0x2b, 0x5b, 0x8b, 0x29, 0x15, 0xca, + 0xa0, 0x28, 0x77, 0x39, 0x53, 0xce, 0xa0, 0xca, 0x79, 0x0b, 0xc5, 0x34, 0xff, 0xdd, 0x58, 0x7e, + 0xbb, 0x64, 0x12, 0xa4, 0x95, 0x33, 0xf8, 0x5f, 0x19, 0x56, 0x67, 0x2b, 0x95, 0x5d, 0xb6, 0xe1, + 0x4c, 0xbf, 0xb0, 0x2e, 0x62, 0x03, 0x1a, 0x4e, 0xab, 0xe3, 0xb5, 0x2f, 0xa1, 0x94, 0x15, 0xce, + 0xea, 0x79, 0xe1, 0xec, 0x3a, 0x4c, 0x32, 0x51, 0x22, 0xbd, 0x66, 0x39, 0xd4, 0xe1, 0x69, 0x82, + 0x84, 0xda, 0xf0, 0x5c, 0xf6, 0xe7, 0x16, 0x66, 0xb9, 0x22, 0x90, 0x02, 0x17, 0x47, 0xab, 0x0b, + 0x15, 0xca, 0xb1, 0x13, 0x25, 0x72, 0xac, 0x09, 0xd3, 0xbd, 0x60, 0xe0, 0x4b, 0x07, 0x2b, 0xe1, + 0x07, 0xad, 0xc1, 0x32, 0xb2, 0xee, 0x64, 0x56, 0xd6, 0x7d, 0xb2, 0x42, 0xde, 0x1f, 0x2d, 0x90, + 0x71, 0xaa, 0x44, 0xea, 0xd2, 0xb0, 0x25, 0x65, 0x17, 0x45, 0xcd, 0xbb, 0x70, 0xf6, 0x28, 0x16, + 0xc3, 0xce, 0x77, 0xdd, 0xc4, 0x92, 0xce, 0xbe, 0xcb, 0x6e, 0x07, 0x99, 0xff, 0xc0, 0x80, 0x7a, + 0x27, 0x70, 0x47, 0x6c, 0x0b, 0xb8, 0xa8, 0xd9, 0x02, 0x9e, 0x29, 0x09, 0xc5, 0xac, 0x68, 0xfe, + 0x6f, 0x66, 0x34, 0xff, 0xe7, 0xca, 0x50, 0x74, 0x3d, 0xff, 0xef, 0xd5, 0x60, 0x4a, 0x09, 0x0d, + 0x8d, 0xbe, 0x7b, 0x12, 0x0f, 0x94, 0x7a, 0x55, 0xb4, 0x68, 0x41, 0x99, 0x1d, 0x03, 0x7f, 0xce, + 0x4e, 0x28, 0x0f, 0x30, 0xe9, 0xee, 0xc6, 0xd8, 0xcd, 0x56, 0xeb, 0xd8, 0x4e, 0x28, 0x7f, 0xcb, + 0x80, 0xb9, 0x0c, 0x11, 0xf4, 0x49, 0x91, 0xef, 0xe2, 0x09, 0x95, 0xd6, 0x8c, 0xc3, 0xe3, 0x32, + 0x40, 0x62, 0x00, 0x94, 0x6a, 0xa3, 0x02, 0xa1, 0xac, 0x2b, 0x0e, 0xfa, 0x81, 0x17, 0x74, 0x0f, + 0x3f, 0xc0, 0xf2, 0x56, 0x98, 0x0a, 0x32, 0x7f, 0x5c, 0xe3, 0x15, 0x56, 0x22, 0x75, 0xff, 0xc1, + 0x50, 0x0f, 0x35, 0xd4, 0xdf, 0x31, 0x60, 0x9e, 0x12, 0x61, 0x27, 0x8b, 0x92, 0x65, 0x26, 0xf1, + 0xdc, 0x0c, 0x35, 0x9e, 0x1b, 0x33, 0x7e, 0xb9, 0xc1, 0x20, 0x16, 0xba, 0xa5, 0x48, 0x09, 0x38, + 0x0e, 0x43, 0xe1, 0xbb, 0x28, 0x52, 0x32, 0xc2, 0xdb, 0x58, 0x1a, 0xe1, 0x8d, 0xdd, 0x98, 0x15, + 0x27, 0x62, 0x62, 0x33, 0x48, 0x01, 0xe6, 0x0f, 0x6b, 0x30, 0xdd, 0x09, 0xdc, 0x3f, 0xb0, 0xc0, + 0x15, 0x59, 0xe0, 0x7e, 0xc9, 0x60, 0x9d, 0xd3, 0xbe, 0xbb, 0x25, 0xe2, 0x11, 0x9f, 0x85, 0x29, + 0xb6, 0x44, 0x98, 0x57, 0x69, 0x62, 0xc9, 0x52, 0x40, 0xfc, 0x20, 0xcc, 0x0e, 0x9d, 0xdd, 0x64, + 0x51, 0x25, 0x69, 0xf4, 0x5e, 0x7a, 0x53, 0xb6, 0x5e, 0x1e, 0xce, 0x57, 0xfd, 0x21, 0x9f, 0x1b, + 0xc9, 0xf5, 0x58, 0xf3, 0x1a, 0xa0, 0x7c, 0xf6, 0x31, 0x6e, 0x14, 0xfe, 0x15, 0x03, 0x66, 0x3b, + 0x81, 0x4b, 0x67, 0xe2, 0xe7, 0x3a, 0xed, 0xd4, 0x6b, 0xd8, 0x13, 0xfa, 0x35, 0xec, 0x3f, 0x65, + 0x40, 0xa3, 0x13, 0xb8, 0x23, 0xb7, 0x92, 0xbc, 0xaa, 0x5b, 0x49, 0x9e, 0x2a, 0xe9, 0x7a, 0x69, + 0x18, 0xf9, 0xf5, 0x1a, 0xcc, 0xd0, 0x6a, 0x04, 0x5d, 0xd9, 0x51, 0x5a, 0x83, 0x8c, 0x6c, 0x83, + 0xe8, 0x4e, 0x1e, 0x78, 0x5e, 0x70, 0x20, 0x3b, 0x8c, 0xa7, 0x78, 0x88, 0x1e, 0xbc, 0x4f, 0x82, + 0x81, 0x8c, 0x7f, 0x95, 0xa4, 0xa9, 0x20, 0x15, 0x11, 0xdf, 0xc1, 0xf2, 0xe8, 0x6f, 0x8c, 0x1d, + 0xfd, 0x69, 0x30, 0x16, 0x2b, 0x8e, 0xa6, 0xd9, 0x3a, 0x38, 0x49, 0xac, 0x38, 0x89, 0xcc, 0x2e, + 0xb3, 0xcb, 0x33, 0xc8, 0x48, 0x78, 0x92, 0x2b, 0x10, 0xda, 0xbe, 0xd8, 0x26, 0xde, 0x6d, 0xe2, + 0xe3, 0x48, 0x9c, 0xb4, 0xa6, 0x00, 0x8a, 0xcd, 0x3c, 0xff, 0x79, 0xfc, 0xc4, 0x49, 0x7e, 0x10, + 0x9b, 0x42, 0xcc, 0x57, 0x61, 0xb1, 0x13, 0xb8, 0x54, 0x07, 0xda, 0x08, 0xc2, 0x03, 0x3b, 0x74, + 0x95, 0xf9, 0xc5, 0xc3, 0xfb, 0xd0, 0xc5, 0x32, 0x2e, 0x83, 0xf7, 0x7c, 0x89, 0x6d, 0x1d, 0x47, + 0xfa, 0xbf, 0xfd, 0x0f, 0x83, 0x4d, 0xf8, 0x4c, 0x2c, 0x4c, 0xf4, 0x3e, 0xcc, 0x46, 0xf8, 0x36, + 0xf1, 0x07, 0x0f, 0xa5, 0x0c, 0x5a, 0xe1, 0x14, 0xb8, 0xb5, 0xae, 0x96, 0xb4, 0x32, 0x98, 0xb4, + 0xd9, 0xe1, 0xc0, 0x5f, 0x8d, 0x3e, 0x8a, 0x70, 0x28, 0xe3, 0x42, 0x26, 0x00, 0x16, 0xed, 0x8d, + 0x26, 0xee, 0x06, 0xbe, 0x15, 0x04, 0xb1, 0x18, 0x42, 0x0d, 0x86, 0x56, 0x00, 0x45, 0x83, 0x7e, + 0xdf, 0x63, 0xe6, 0x68, 0xdb, 0xbb, 0x19, 0x06, 0x83, 0x3e, 0x37, 0x86, 0xd6, 0xad, 0x82, 0x1c, + 0x3a, 0xf7, 0x77, 0x22, 0xf6, 0x2d, 0xbc, 0xff, 0x65, 0xd2, 0xfc, 0x16, 0x63, 0x37, 0x5b, 0xa4, + 0xeb, 0xdb, 0xf1, 0x20, 0xa4, 0xdc, 0x70, 0xa6, 0xcf, 0x78, 0x73, 0x1c, 0x06, 0x9e, 0x87, 0xe5, + 0xf6, 0x7f, 0xb2, 0x03, 0x55, 0x9d, 0x94, 0xf9, 0xbf, 0x81, 0xad, 0x33, 0xa6, 0x5e, 0x5d, 0x86, + 0x86, 0x70, 0x76, 0x11, 0xbb, 0xf1, 0x52, 0x79, 0x14, 0x4e, 0x4b, 0x16, 0x45, 0xef, 0x32, 0xe3, + 0x2f, 0x9f, 0xff, 0x47, 0x05, 0x56, 0x16, 0x0e, 0x19, 0x0a, 0x02, 0x3a, 0x07, 0x33, 0x22, 0x68, + 0x9e, 0x50, 0x54, 0xb8, 0x88, 0xa1, 0x03, 0xa9, 0x7a, 0xa3, 0x84, 0x10, 0x2d, 0x38, 0x2f, 0xe7, + 0x8b, 0xa6, 0xba, 0x10, 0xba, 0x0c, 0x8b, 0xb6, 0x13, 0x93, 0x7d, 0xdc, 0xc6, 0xb6, 0xeb, 0x11, + 0x1f, 0xeb, 0xd7, 0x2f, 0x8a, 0x33, 0xd9, 0xb5, 0x78, 0x3f, 0x12, 0xb5, 0x9b, 0x10, 0xd7, 0xe2, + 0x25, 0x00, 0x7d, 0xc8, 0x1f, 0x5b, 0x49, 0x44, 0xb3, 0x46, 0xee, 0xce, 0x4a, 0x56, 0xaa, 0xd6, + 0x5c, 0xff, 0xb8, 0x3a, 0xaa, 0x91, 0x60, 0x33, 0x09, 0x87, 0xfb, 0xc4, 0xc1, 0xab, 0x0e, 0x0b, + 0xb8, 0xc1, 0xf4, 0x30, 0xae, 0x3d, 0x15, 0xe4, 0xa0, 0xf3, 0x74, 0x1d, 0xa8, 0x50, 0xe1, 0x7c, + 0x92, 0x81, 0x6a, 0xb1, 0xc7, 0x40, 0x8f, 0x3d, 0x46, 0xb7, 0xb4, 0xdd, 0x20, 0x8a, 0xef, 0xe2, + 0xf8, 0x20, 0x08, 0xf7, 0xc4, 0xe5, 0x51, 0x15, 0x44, 0xe7, 0x2b, 0xb3, 0x81, 0x6e, 0xb6, 0x99, + 0xad, 0x6b, 0xd2, 0x92, 0x49, 0x99, 0xb3, 0xd9, 0x59, 0x63, 0x06, 0x2c, 0x91, 0xb3, 0xd9, 0x59, + 0x43, 0x9d, 0x7c, 0xac, 0xdb, 0xd9, 0x72, 0x63, 0x61, 0x7e, 0x89, 0xe7, 0xc3, 0xdd, 0xde, 0x87, + 0xf9, 0x24, 0xa0, 0x2e, 0xbf, 0xa9, 0x1c, 0xb5, 0xe6, 0xca, 0x5f, 0x6c, 0x29, 0xbc, 0x37, 0x9b, + 0xa3, 0xa0, 0xdd, 0x86, 0x99, 0xcf, 0x44, 0x6c, 0x7b, 0x16, 0x9a, 0xd1, 0x60, 0xdb, 0x0d, 0x7a, + 0x36, 0xf1, 0x99, 0x71, 0xa9, 0x69, 0xa5, 0x00, 0xf4, 0x16, 0x4c, 0xda, 0xf2, 0x6d, 0x1b, 0x54, + 0x7e, 0x25, 0x20, 0x79, 0xd4, 0x26, 0x29, 0x4d, 0x27, 0xbe, 0x70, 0xac, 0x14, 0xae, 0x0a, 0xa7, + 0xf9, 0xc4, 0xd7, 0x80, 0x68, 0x1d, 0x66, 0x69, 0xf1, 0xb5, 0x74, 0x85, 0x2d, 0x0c, 0xb3, 0xc2, + 0x32, 0x48, 0xe8, 0x06, 0x3c, 0x6b, 0x0f, 0xe2, 0x80, 0x29, 0xe7, 0x5b, 0xda, 0xac, 0xb8, 0x1f, + 0xec, 0x61, 0x9f, 0x59, 0x7e, 0x26, 0xad, 0xca, 0x32, 0xe8, 0x3d, 0xaa, 0x0a, 0x78, 0xc2, 0xe5, + 0x26, 0x6a, 0x9d, 0x29, 0xbf, 0x75, 0x76, 0x3f, 0x29, 0x66, 0xa9, 0x28, 0xe8, 0x3a, 0x9f, 0x64, + 0x2c, 0x12, 0x00, 0x8e, 0x5a, 0x4f, 0x95, 0xb7, 0x24, 0x09, 0x18, 0x60, 0xa9, 0x18, 0x3c, 0x44, + 0x24, 0x09, 0xd8, 0x84, 0x48, 0x0c, 0x14, 0x2d, 0x19, 0x22, 0x32, 0x93, 0xc1, 0x37, 0x5d, 0x0e, + 0x6c, 0x3d, 0xcd, 0xc3, 0xb8, 0xca, 0x34, 0xba, 0xc6, 0x16, 0x35, 0x97, 0x9f, 0x5a, 0x4b, 0xe5, + 0x17, 0x1d, 0x54, 0x39, 0xcb, 0x4a, 0x51, 0x96, 0xae, 0xc3, 0xa9, 0xdc, 0x32, 0x3e, 0x96, 0xcb, + 0xc6, 0x4f, 0xea, 0xd0, 0x4c, 0x74, 0xe6, 0x12, 0x0b, 0xc4, 0x7b, 0x05, 0x6f, 0x3f, 0x94, 0xd5, + 0xb2, 0xd8, 0x59, 0xb4, 0xfc, 0x3d, 0x8b, 0x54, 0x0c, 0x1e, 0xd3, 0xc4, 0xe0, 0x92, 0xf0, 0xc3, + 0x7c, 0x03, 0x77, 0x37, 0x3b, 0x32, 0x98, 0x29, 0x4b, 0x24, 0x31, 0x6c, 0x99, 0x5c, 0xd2, 0x38, + 0x61, 0x0c, 0x5b, 0x26, 0x97, 0x7c, 0x08, 0xa7, 0x1c, 0x3d, 0x12, 0x6c, 0xe2, 0x0f, 0xfa, 0xe2, + 0x91, 0x01, 0x5b, 0x07, 0x91, 0x95, 0xc7, 0xa6, 0xe3, 0xff, 0x69, 0x10, 0xb1, 0xf9, 0x20, 0x38, + 0x62, 0x92, 0x46, 0x9f, 0xc0, 0xa2, 0xb6, 0x44, 0x92, 0x5f, 0xc2, 0xf0, 0xbf, 0x2c, 0xa6, 0x60, + 0x7e, 0x9f, 0x6b, 0xf0, 0xa2, 0x10, 0x8e, 0x06, 0x5e, 0x3c, 0xf2, 0xbb, 0x63, 0xaa, 0x22, 0x36, + 0xb4, 0x41, 0xe6, 0x47, 0x06, 0x33, 0xc8, 0xdc, 0xc7, 0xbd, 0xbe, 0x67, 0xc7, 0xa3, 0x76, 0xa6, + 0xb9, 0x0e, 0x93, 0xb1, 0xa0, 0x5c, 0x15, 0x84, 0x4d, 0xa9, 0x00, 0x33, 0x31, 0x25, 0x48, 0xe6, + 0x2f, 0xf3, 0x7e, 0x93, 0xb9, 0x23, 0x17, 0xfd, 0xdf, 0xd4, 0x45, 0xff, 0xe7, 0x8f, 0xa8, 0x9d, + 0x54, 0x01, 0xbe, 0xa7, 0x57, 0x8b, 0x49, 0x4a, 0x9f, 0xaf, 0x41, 0xce, 0xdc, 0x81, 0x85, 0xa2, + 0x63, 0x86, 0x91, 0xbf, 0xc4, 0xf3, 0x02, 0xcc, 0x68, 0xd1, 0x7d, 0xa5, 0x37, 0x98, 0x91, 0x78, + 0x83, 0x99, 0x3f, 0x35, 0x60, 0xa1, 0xe8, 0x59, 0x34, 0xd4, 0x86, 0xe9, 0xbe, 0x22, 0xc4, 0x56, + 0x5d, 0x26, 0x53, 0x85, 0x5d, 0x4b, 0xc3, 0x42, 0x77, 0x61, 0x1a, 0xef, 0x13, 0x27, 0x31, 0x01, + 0xd4, 0x8e, 0xcd, 0x62, 0x34, 0xfc, 0xe3, 0x87, 0xf8, 0x33, 0x0f, 0xe0, 0xa9, 0x92, 0x0b, 0x66, + 0x94, 0xd8, 0x01, 0xb3, 0x05, 0x89, 0xe8, 0x68, 0x22, 0x85, 0xda, 0x00, 0xdc, 0x14, 0xc4, 0x5e, + 0xd1, 0xa8, 0x55, 0x5f, 0x77, 0xd0, 0xee, 0xb2, 0x28, 0x78, 0xe6, 0xf7, 0x6b, 0x30, 0xce, 0x1f, + 0x34, 0x78, 0x13, 0x1a, 0xbb, 0x3c, 0xb4, 0xc5, 0x30, 0x61, 0x33, 0x64, 0x59, 0xf4, 0x1a, 0x9c, + 0x16, 0xfe, 0x89, 0x6d, 0xec, 0xd9, 0x87, 0x52, 0xd6, 0xe5, 0x61, 0xca, 0x8a, 0xb2, 0x0a, 0xee, + 0x25, 0xd7, 0x8b, 0xde, 0x59, 0xa2, 0xa2, 0x4b, 0x3f, 0x27, 0x7d, 0x8f, 0x5b, 0x3a, 0x90, 0x1d, + 0x26, 0x0c, 0xd8, 0x19, 0xc7, 0xfd, 0xdd, 0x10, 0x47, 0xbb, 0x81, 0xe7, 0x8a, 0x30, 0xd9, 0x39, + 0x38, 0x2d, 0xbb, 0x63, 0x13, 0x6f, 0x10, 0xe2, 0xb4, 0xec, 0x04, 0x2f, 0x9b, 0x85, 0x9b, 0x87, + 0xb0, 0x28, 0xc2, 0x3d, 0x4b, 0x27, 0x7d, 0x31, 0xfd, 0xaf, 0x41, 0x43, 0x3a, 0x0f, 0x55, 0x5c, + 0x20, 0xe2, 0x28, 0x69, 0xc0, 0x68, 0x4b, 0x22, 0x0d, 0x11, 0xbe, 0xf8, 0xcf, 0x1a, 0x70, 0xba, + 0xe0, 0x74, 0x93, 0x2f, 0xa2, 0x2e, 0x89, 0xe2, 0x24, 0xe6, 0x56, 0x92, 0x66, 0x77, 0x88, 0xf8, + 0xa9, 0xa1, 0x58, 0x78, 0x3c, 0x55, 0xf9, 0x14, 0x9e, 0x7c, 0x00, 0x6c, 0x4c, 0x79, 0x00, 0x6c, + 0x01, 0xc6, 0xbb, 0x89, 0xb6, 0xd8, 0xb4, 0x78, 0xc2, 0xfc, 0xf9, 0x1a, 0x3c, 0x5d, 0x7a, 0xde, + 0x5f, 0xf9, 0xf0, 0x58, 0xf1, 0x13, 0x28, 0x65, 0x91, 0xeb, 0x58, 0xdc, 0xe1, 0xe4, 0xb5, 0x02, + 0xf6, 0x9d, 0xd4, 0x72, 0x5c, 0xa9, 0x65, 0x0b, 0x1a, 0x7b, 0xf8, 0x30, 0x24, 0x7e, 0x57, 0x5a, + 0xd1, 0x44, 0x52, 0x8f, 0x3f, 0xd4, 0x78, 0xe4, 0xa7, 0xd7, 0x26, 0x33, 0xbc, 0xea, 0x4f, 0xd4, + 0x60, 0xce, 0xba, 0xd1, 0xfe, 0xc2, 0x36, 0x7f, 0x23, 0xdf, 0xfc, 0x47, 0x8e, 0xc1, 0x97, 0xed, + 0x83, 0x5f, 0x30, 0x60, 0x8e, 0xc5, 0x68, 0x10, 0x77, 0x46, 0x48, 0xe0, 0x8f, 0x78, 0xab, 0x5a, + 0x80, 0xf1, 0x90, 0xfe, 0x40, 0xf6, 0x1a, 0x4b, 0xb0, 0xe7, 0xfe, 0x28, 0x7d, 0xda, 0x67, 0xd3, + 0xfc, 0xdd, 0x2c, 0xe6, 0xd9, 0x6a, 0xe1, 0xbe, 0x47, 0x78, 0x3d, 0x52, 0x93, 0xc3, 0x93, 0xf7, + 0x6c, 0x2d, 0xac, 0xc6, 0x71, 0x3d, 0x5b, 0x8b, 0x89, 0xe8, 0x02, 0xd6, 0x6f, 0x1b, 0xb0, 0x5c, + 0x58, 0xee, 0x64, 0x76, 0xf5, 0x62, 0x2b, 0x78, 0x7d, 0xc4, 0x56, 0xf0, 0xb1, 0xb2, 0x8d, 0x70, + 0x3c, 0xef, 0x87, 0x5a, 0xd8, 0xb8, 0xcf, 0xc5, 0x0f, 0xb5, 0xb0, 0x26, 0x52, 0x64, 0xfb, 0x8d, + 0x5a, 0x49, 0x55, 0x99, 0xf0, 0xc6, 0x56, 0x10, 0xcb, 0x94, 0xef, 0x08, 0x26, 0x69, 0xf4, 0x40, + 0xf1, 0x0c, 0xe5, 0x7f, 0xbf, 0x7a, 0xac, 0x19, 0xb5, 0xa2, 0xdb, 0x75, 0x52, 0x0f, 0x51, 0x55, + 0x3a, 0xae, 0x9f, 0x40, 0x3a, 0x46, 0x17, 0x60, 0xae, 0x47, 0x7c, 0x16, 0x34, 0x5d, 0xdf, 0x75, + 0xb3, 0xe0, 0xa5, 0xab, 0x30, 0x73, 0x72, 0xb5, 0xf4, 0x9f, 0xd7, 0xe0, 0x99, 0x8a, 0xa9, 0x5e, + 0xd9, 0x79, 0x97, 0x60, 0x61, 0x67, 0xe0, 0x79, 0x87, 0xec, 0xc0, 0x11, 0xbb, 0x96, 0x2c, 0xc7, + 0x37, 0xd2, 0xc2, 0x3c, 0xb4, 0x02, 0x28, 0x10, 0xd1, 0x5f, 0x6f, 0xa6, 0x37, 0x7c, 0xea, 0xfc, + 0x65, 0x8a, 0x7c, 0x0e, 0x37, 0x17, 0xda, 0xee, 0x61, 0x42, 0x5c, 0x88, 0x1e, 0x1a, 0x10, 0xbd, + 0x02, 0xa7, 0xec, 0x7d, 0x9b, 0xb0, 0x2b, 0xab, 0x49, 0x49, 0x2e, 0x7b, 0xe4, 0x33, 0x32, 0xee, + 0xab, 0x13, 0xe5, 0xee, 0xab, 0xd5, 0x6b, 0x5b, 0x7b, 0x6e, 0xf1, 0x57, 0x19, 0xfb, 0x2b, 0x88, + 0xdd, 0xad, 0xbd, 0x10, 0xa4, 0xb8, 0x92, 0xea, 0x40, 0xde, 0xcf, 0x51, 0xea, 0x88, 0xc2, 0x24, + 0x0a, 0x11, 0x3c, 0xfa, 0x16, 0x34, 0x5c, 0xb2, 0x4f, 0xa2, 0x20, 0x14, 0x53, 0xe9, 0xb8, 0x4e, + 0x11, 0x12, 0xdd, 0xfc, 0x2d, 0x03, 0x66, 0x64, 0x2d, 0x3f, 0x1c, 0x04, 0xb1, 0x3d, 0x62, 0xe6, + 0xfc, 0xb6, 0xc6, 0x9c, 0xbf, 0x54, 0xe5, 0xcb, 0xcd, 0x7e, 0xaf, 0x30, 0xe5, 0xeb, 0x19, 0xa6, + 0xfc, 0xd2, 0xd1, 0xc8, 0x3a, 0x33, 0xfe, 0x15, 0x03, 0x4e, 0x69, 0xf9, 0x23, 0xe7, 0x53, 0x5f, + 0xd5, 0xf9, 0xd4, 0x0b, 0x47, 0xd6, 0x50, 0xf2, 0xa7, 0xdf, 0xca, 0x56, 0x8d, 0xf1, 0xa5, 0x35, + 0x18, 0xdb, 0xb5, 0x43, 0xb7, 0xea, 0x3e, 0x7f, 0x0e, 0x69, 0xe5, 0x96, 0x1d, 0xba, 0xe2, 0xb9, + 0x4a, 0x8a, 0xcc, 0xc3, 0x97, 0x06, 0xfd, 0xe4, 0xd8, 0x51, 0xa4, 0x96, 0xba, 0xd0, 0x4c, 0x8a, + 0x3e, 0x56, 0x2f, 0x9b, 0xdf, 0xab, 0xc1, 0xe9, 0x82, 0x61, 0x41, 0xeb, 0x5a, 0xeb, 0x5e, 0x1f, + 0x72, 0x34, 0x73, 0xed, 0x5b, 0x67, 0x42, 0x97, 0x2b, 0xba, 0x7c, 0x68, 0x32, 0x1f, 0x45, 0x58, + 0x92, 0xa1, 0xe8, 0x4f, 0xac, 0x3b, 0xe8, 0x8f, 0x92, 0x7f, 0x3f, 0xd6, 0x7e, 0xff, 0x5e, 0x1d, + 0x16, 0x8a, 0xee, 0x45, 0xa0, 0xdb, 0x99, 0xf0, 0x5e, 0x97, 0x87, 0xbd, 0x51, 0xc1, 0x63, 0x7e, + 0x25, 0x37, 0x06, 0x59, 0x02, 0x59, 0x94, 0x2f, 0xb1, 0x20, 0x6a, 0x72, 0xda, 0x5f, 0x19, 0x9a, + 0x9e, 0x88, 0xbe, 0x26, 0x28, 0x26, 0x74, 0x96, 0x08, 0x4c, 0x29, 0xbf, 0x7a, 0xac, 0xc3, 0xb1, + 0x47, 0xf9, 0x9d, 0x52, 0x8b, 0xc7, 0x1c, 0xea, 0x62, 0x56, 0x3f, 0x75, 0x4c, 0x54, 0x06, 0x43, + 0x51, 0x19, 0x10, 0x8c, 0x85, 0x41, 0xf2, 0x0a, 0x37, 0xfb, 0x4e, 0xa4, 0xc4, 0xba, 0x22, 0x25, + 0x2e, 0xc0, 0xb8, 0x87, 0xf7, 0xb1, 0xd4, 0x41, 0x78, 0xc2, 0xfc, 0x3f, 0x35, 0x78, 0xae, 0xd2, + 0x4f, 0x94, 0x4a, 0x74, 0x5d, 0x3b, 0xc6, 0x07, 0xb6, 0x6c, 0xa5, 0x4c, 0x32, 0x5e, 0xc1, 0x2f, + 0x2b, 0x4b, 0xb9, 0x93, 0xdf, 0x51, 0x3e, 0xde, 0x7b, 0x1d, 0x85, 0x5a, 0xca, 0x32, 0x40, 0x14, + 0x79, 0xeb, 0x3e, 0xdd, 0x67, 0x5d, 0xe1, 0x0f, 0xa0, 0x40, 0xa8, 0xbe, 0xdf, 0x0f, 0x83, 0x98, + 0x6b, 0xe0, 0x6d, 0x7e, 0xb6, 0x22, 0x2e, 0xcc, 0x64, 0xe1, 0x54, 0x2d, 0x17, 0xce, 0x87, 0x1d, + 0xaa, 0x82, 0x71, 0xbd, 0x4a, 0x05, 0x29, 0x25, 0x98, 0xe2, 0xde, 0xd0, 0x4a, 0xb0, 0xd7, 0x42, + 0xf5, 0x1b, 0x41, 0x93, 0xb9, 0x1b, 0x41, 0xa9, 0xde, 0xd7, 0x2c, 0xb5, 0x8e, 0x41, 0x46, 0xdb, + 0xfa, 0x5f, 0x35, 0x38, 0x2d, 0xba, 0xfe, 0x11, 0x3b, 0x7c, 0x54, 0xf1, 0xd9, 0x7f, 0x3f, 0xf4, + 0xfa, 0xef, 0xd4, 0x60, 0x82, 0x4f, 0xbc, 0x11, 0xcb, 0x2a, 0x6f, 0x69, 0xaf, 0x44, 0x9f, 0x2b, + 0x9f, 0xf0, 0xd9, 0x27, 0xa2, 0x0b, 0x57, 0xeb, 0xfb, 0x00, 0x11, 0x0b, 0x35, 0x4e, 0x0b, 0x8b, + 0x2b, 0x51, 0x2f, 0x57, 0xd0, 0xdc, 0x4a, 0x0a, 0x73, 0xca, 0x0a, 0xf6, 0xb1, 0x9e, 0xa0, 0x9e, + 0x56, 0xb9, 0xdd, 0xbb, 0x30, 0x97, 0xa1, 0x7b, 0x2c, 0x5d, 0xe0, 0xbb, 0x06, 0xcc, 0x65, 0xde, + 0xe4, 0xf9, 0x1c, 0x9f, 0xaf, 0xfe, 0xcb, 0x06, 0x9c, 0xca, 0x3d, 0x33, 0xf3, 0x85, 0x7a, 0xbb, + 0xfa, 0xcf, 0x18, 0x00, 0xbc, 0x86, 0x23, 0x17, 0x35, 0x5f, 0xd3, 0x45, 0xcd, 0xa5, 0x0a, 0x76, + 0x2c, 0x64, 0xcc, 0x7f, 0x62, 0xc0, 0x3c, 0x87, 0xfc, 0xff, 0xfb, 0x64, 0xf5, 0x9a, 0x9c, 0x7f, + 0x95, 0xa1, 0x22, 0xab, 0x6f, 0xb5, 0x9b, 0xbf, 0x66, 0x00, 0xe2, 0x54, 0xb2, 0x0f, 0x25, 0x70, + 0xc6, 0xa9, 0xe8, 0x60, 0x0a, 0xe4, 0xf3, 0x78, 0xb6, 0xfa, 0xff, 0xd6, 0x58, 0x7b, 0x35, 0x7f, + 0x87, 0x36, 0x4c, 0x3b, 0x76, 0xdf, 0xde, 0x26, 0x1e, 0x89, 0x09, 0x8e, 0xaa, 0x8e, 0x51, 0xd6, + 0x94, 0x72, 0x96, 0x86, 0xc5, 0x63, 0x3f, 0x91, 0x7d, 0xe2, 0xe1, 0x2e, 0x93, 0x9d, 0xd9, 0xc6, + 0x90, 0x42, 0x0a, 0x3c, 0xa9, 0xea, 0xa3, 0xf1, 0xa4, 0x1a, 0x3b, 0xca, 0x93, 0x6a, 0xbc, 0xc0, + 0x93, 0xea, 0x0a, 0x9c, 0x91, 0xec, 0x9e, 0xa6, 0x37, 0x88, 0x87, 0xc5, 0xb6, 0xc9, 0xdd, 0xd5, + 0x4a, 0x72, 0xd1, 0x3b, 0xd0, 0xb2, 0x3d, 0x2f, 0x38, 0xe8, 0xc8, 0x86, 0xad, 0x47, 0x8e, 0xed, + 0xa5, 0x31, 0x43, 0x26, 0xad, 0xd2, 0x7c, 0xf3, 0xeb, 0x70, 0x7a, 0x0b, 0x87, 0x22, 0x88, 0x6a, + 0x3a, 0xdd, 0x56, 0xa1, 0x19, 0x66, 0x56, 0xc9, 0x70, 0xaf, 0x01, 0xa6, 0xe7, 0x38, 0xff, 0xc2, + 0x80, 0x86, 0x70, 0xa7, 0x18, 0xf1, 0x8e, 0xf5, 0x86, 0xa6, 0x5d, 0x3f, 0x5f, 0xcc, 0x13, 0xd8, + 0x8f, 0x15, 0xbd, 0xfa, 0xed, 0x8c, 0x5e, 0xfd, 0x42, 0x15, 0x9a, 0xae, 0x51, 0xff, 0xb8, 0x06, + 0xb3, 0xba, 0x63, 0xc8, 0x88, 0x1b, 0xf4, 0x2e, 0x34, 0x22, 0xe1, 0xd3, 0x53, 0xf1, 0x98, 0x60, + 0xee, 0x95, 0x26, 0x81, 0x53, 0xe8, 0x1b, 0x54, 0x7f, 0x64, 0xdf, 0xa0, 0xa3, 0x5c, 0x67, 0xc6, + 0x8e, 0x76, 0x9d, 0x31, 0x7f, 0xc8, 0x78, 0x90, 0x0a, 0x1f, 0xf9, 0x0e, 0xf1, 0x96, 0xce, 0xaf, + 0xcc, 0x8a, 0x61, 0x15, 0x55, 0x90, 0x3b, 0xc5, 0x2f, 0x1a, 0x30, 0x25, 0x72, 0x46, 0x5e, 0xab, + 0xd7, 0xf5, 0x5a, 0x3d, 0x53, 0x51, 0x2b, 0x59, 0x9d, 0x7f, 0x9c, 0x56, 0xa7, 0xea, 0x05, 0xee, + 0xe4, 0xc5, 0xcb, 0x5a, 0xe6, 0xfd, 0x6c, 0xf9, 0x42, 0x66, 0x5d, 0x79, 0x21, 0xb3, 0x23, 0x5f, + 0x70, 0x62, 0x6f, 0xe5, 0x8e, 0x9d, 0xf0, 0xd1, 0x18, 0x85, 0x86, 0xf4, 0xa5, 0x63, 0xf4, 0xb8, + 0x61, 0x31, 0x49, 0x9b, 0x5f, 0x66, 0xbc, 0x84, 0x35, 0xe0, 0x28, 0xcf, 0xd6, 0x9f, 0x8c, 0x27, + 0x8d, 0xdd, 0xe2, 0x57, 0x6d, 0x14, 0x37, 0xd9, 0xea, 0x35, 0xad, 0x3c, 0x82, 0x89, 0x36, 0x73, + 0x66, 0xeb, 0x57, 0x8f, 0xe0, 0x06, 0xa5, 0x86, 0x6a, 0x76, 0xcb, 0x9d, 0x5d, 0x51, 0xde, 0xec, + 0xc8, 0x00, 0x4f, 0x09, 0x20, 0x11, 0x75, 0xc7, 0x14, 0x51, 0xf7, 0x2c, 0x4c, 0x25, 0x61, 0x06, + 0x3b, 0xf2, 0xf9, 0x2a, 0x15, 0x84, 0x2e, 0xc0, 0x5c, 0xc4, 0xa3, 0x19, 0x4a, 0x3f, 0x38, 0xa1, + 0x2d, 0x64, 0xc1, 0xe8, 0x3c, 0xcc, 0x7a, 0x6a, 0x04, 0xe9, 0x8e, 0xd0, 0x1a, 0x32, 0x50, 0xca, + 0xea, 0x55, 0x88, 0xb8, 0x7f, 0x67, 0xfb, 0x5d, 0x1c, 0x89, 0x10, 0x70, 0xa5, 0xf9, 0x74, 0x0b, + 0x92, 0x95, 0x53, 0x5c, 0x21, 0x35, 0x18, 0xba, 0x0c, 0x8b, 0x32, 0x7d, 0x3f, 0xb4, 0x77, 0x76, + 0x88, 0x23, 0xfc, 0x3f, 0xa7, 0x58, 0xe1, 0xe2, 0x4c, 0xf4, 0x1a, 0x9c, 0xde, 0xc5, 0xb6, 0x17, + 0xef, 0xae, 0xed, 0x62, 0x67, 0xef, 0xae, 0x9c, 0x1f, 0xd3, 0xfc, 0xc4, 0xbd, 0x20, 0x8b, 0xb6, + 0xa3, 0x3f, 0xd8, 0xf6, 0x48, 0xb4, 0x7b, 0x37, 0xf7, 0x9e, 0x2a, 0xf7, 0xa5, 0x2c, 0xcd, 0x47, + 0xdf, 0x84, 0xc5, 0x4c, 0xf7, 0x09, 0x77, 0xb6, 0xd9, 0xf2, 0x5b, 0xb5, 0x5b, 0x45, 0x08, 0x56, + 0x31, 0x9d, 0x47, 0x3b, 0x48, 0xf8, 0x06, 0x45, 0x56, 0x76, 0x11, 0xf4, 0x3e, 0x4c, 0xab, 0x43, + 0x22, 0x58, 0xcb, 0xf9, 0xa3, 0x82, 0x88, 0x8b, 0x3d, 0x48, 0xc3, 0x35, 0x1f, 0xc0, 0x62, 0x61, + 0x4b, 0xd0, 0x35, 0x98, 0x74, 0x3c, 0x82, 0xfd, 0x78, 0xb3, 0x53, 0xe5, 0x0c, 0xbe, 0x26, 0xca, + 0x88, 0xf6, 0x27, 0x38, 0xe6, 0x67, 0x06, 0x3c, 0x7f, 0xc4, 0x55, 0xd7, 0x8c, 0x56, 0x6b, 0xe4, + 0xb4, 0xda, 0x0b, 0x32, 0xdc, 0xe0, 0xdd, 0x8c, 0x50, 0x9a, 0x05, 0x9f, 0xe8, 0x99, 0x38, 0xcd, + 0x38, 0x33, 0x7e, 0x0c, 0x89, 0x24, 0x7d, 0x4c, 0xf5, 0x3f, 0x1a, 0xb0, 0x98, 0x34, 0xf2, 0x0b, + 0xd4, 0xb4, 0x8d, 0x7c, 0xd3, 0x4e, 0x62, 0x06, 0x31, 0x2f, 0xc1, 0xc4, 0xd6, 0x61, 0xe4, 0xc4, + 0xde, 0x31, 0xae, 0xc4, 0xec, 0xc1, 0x5c, 0xe6, 0x09, 0xab, 0xe4, 0x8d, 0x31, 0x63, 0x24, 0x6f, + 0x8c, 0xa9, 0x6f, 0x11, 0xfe, 0x39, 0x03, 0xc6, 0x59, 0x8c, 0xd4, 0x61, 0x57, 0x14, 0xed, 0x4e, + 0xbc, 0xb3, 0x83, 0x1d, 0xf9, 0x56, 0x99, 0x48, 0xa1, 0x5b, 0xd0, 0x8c, 0x49, 0x0f, 0xaf, 0xba, + 0xae, 0x30, 0xf8, 0x1c, 0xd3, 0xff, 0x32, 0x41, 0x36, 0xbf, 0x6f, 0x00, 0xa4, 0x7e, 0xbb, 0xc7, + 0x8c, 0x93, 0x9b, 0x54, 0xba, 0x5e, 0x5c, 0xe9, 0x31, 0xad, 0xd2, 0xaf, 0xc0, 0xa9, 0xd4, 0x2b, + 0x58, 0x77, 0xc1, 0xcf, 0x67, 0x98, 0xdb, 0x30, 0x21, 0x6e, 0xcd, 0x17, 0x8d, 0x66, 0x5b, 0xc6, + 0xff, 0xd4, 0x2e, 0x6d, 0x9f, 0x2d, 0x77, 0xfc, 0x91, 0x81, 0x65, 0x55, 0x2c, 0xf3, 0x06, 0x4c, + 0xf3, 0xdc, 0x36, 0x66, 0x72, 0x7a, 0xd1, 0x9f, 0x96, 0x01, 0x5c, 0x96, 0xab, 0xc4, 0x57, 0x54, + 0x20, 0xe6, 0x5f, 0x35, 0x60, 0xea, 0x63, 0x71, 0xb5, 0x59, 0x3c, 0x93, 0x5a, 0x24, 0xb7, 0x94, + 0x05, 0x89, 0x61, 0x71, 0x36, 0x29, 0x62, 0x12, 0x9e, 0xa4, 0x69, 0xa5, 0x00, 0xd4, 0x62, 0x4f, + 0x51, 0xb3, 0x3c, 0xe1, 0xa0, 0x26, 0x92, 0xe8, 0x65, 0x98, 0xe7, 0xc5, 0xd2, 0x27, 0xf2, 0xa5, + 0x51, 0x2f, 0x0b, 0x37, 0xff, 0x9b, 0x01, 0xf3, 0x59, 0xff, 0x27, 0xf4, 0x35, 0x98, 0xe0, 0x6b, + 0x47, 0xcc, 0xf4, 0x0a, 0xd3, 0x97, 0xe2, 0x35, 0x25, 0x70, 0xd0, 0x07, 0x30, 0xe5, 0xa6, 0x6f, + 0xc2, 0x57, 0x3d, 0x65, 0x5c, 0xf8, 0x5c, 0xbf, 0xa5, 0x62, 0xa3, 0x75, 0x76, 0xb9, 0x8a, 0x87, + 0x03, 0xaf, 0x3a, 0xf7, 0x4b, 0xde, 0x2f, 0x56, 0x08, 0xa5, 0x98, 0xe6, 0x4f, 0xe7, 0xe4, 0x78, + 0x0a, 0xbe, 0xa6, 0x46, 0x7e, 0x31, 0x4e, 0x1c, 0xf9, 0xa5, 0x0d, 0x93, 0x58, 0x3c, 0x5e, 0x5f, + 0x15, 0x9d, 0xaa, 0xe8, 0x81, 0x7b, 0x2b, 0xc1, 0x2c, 0x0e, 0xa8, 0x53, 0x7f, 0x02, 0x01, 0x75, + 0xc6, 0x46, 0x1e, 0x50, 0x67, 0x15, 0x1a, 0x5d, 0xfe, 0x06, 0xa5, 0x60, 0xd7, 0x85, 0x83, 0x55, + 0xf0, 0x4c, 0xa5, 0x25, 0xf1, 0xd0, 0xb5, 0x64, 0xf2, 0x4d, 0x94, 0xcb, 0x03, 0x79, 0xf3, 0x4d, + 0x32, 0xfd, 0x44, 0x10, 0x9d, 0xc6, 0x31, 0x83, 0xe8, 0x5c, 0x95, 0x31, 0x70, 0x26, 0xcb, 0x4f, + 0xa6, 0x73, 0x2f, 0xd3, 0xc9, 0xc8, 0x37, 0x5a, 0x04, 0xa0, 0xe6, 0x23, 0x44, 0x00, 0xda, 0x83, + 0xc5, 0x7e, 0x51, 0xd8, 0x29, 0x11, 0xcb, 0xe6, 0xcd, 0xa1, 0x43, 0x68, 0x69, 0x3f, 0x28, 0xa6, + 0x49, 0x7b, 0x2a, 0xdc, 0x76, 0x45, 0xb4, 0x9b, 0x17, 0x4b, 0xe2, 0x06, 0xe5, 0xa3, 0x05, 0x8d, + 0x26, 0xca, 0x4d, 0x1a, 0x2c, 0x68, 0xe6, 0x44, 0xc1, 0x82, 0xae, 0x25, 0xc1, 0x82, 0x2a, 0xee, + 0x0f, 0xf1, 0x60, 0x41, 0x85, 0x21, 0x82, 0x94, 0x70, 0x3f, 0x73, 0x27, 0x0c, 0xf7, 0x73, 0x47, + 0x67, 0x74, 0x3c, 0x74, 0xcd, 0x57, 0x8e, 0x60, 0x74, 0x1a, 0x29, 0x8d, 0xd5, 0xf1, 0xa0, 0x45, + 0xa7, 0x8e, 0x15, 0xb4, 0xe8, 0xa6, 0x1a, 0x2b, 0x08, 0x1d, 0x11, 0x3b, 0x87, 0x16, 0x2a, 0x8b, + 0x10, 0x74, 0x53, 0xe5, 0xb3, 0xa7, 0xcb, 0x09, 0x25, 0x7c, 0x56, 0x27, 0x94, 0xe0, 0xe6, 0x43, + 0x0d, 0x2d, 0x3c, 0x86, 0x50, 0x43, 0x8b, 0xa3, 0x08, 0x35, 0x74, 0xe6, 0x31, 0x84, 0x1a, 0x7a, + 0xea, 0x09, 0x84, 0x1a, 0x6a, 0x3d, 0x62, 0xa8, 0xa1, 0xd5, 0x34, 0xd4, 0xd0, 0xd3, 0xe5, 0xfd, + 0x58, 0x70, 0x8e, 0x99, 0x06, 0x18, 0xba, 0x09, 0xcd, 0xbe, 0x74, 0xb8, 0x16, 0x77, 0xa5, 0x8a, + 0x63, 0x43, 0x16, 0x79, 0x65, 0x5b, 0x29, 0x2e, 0x25, 0x94, 0x06, 0x1b, 0x7a, 0xa6, 0x42, 0x4b, + 0x2d, 0x52, 0x4e, 0x94, 0x10, 0x43, 0xe6, 0xdf, 0x31, 0x60, 0xb9, 0x7a, 0x3a, 0xa5, 0xaa, 0x4c, + 0x27, 0xb5, 0xb9, 0x28, 0x90, 0xd2, 0xfb, 0x10, 0xaf, 0xc0, 0xa9, 0xe4, 0x90, 0x93, 0x2a, 0xf5, + 0x4a, 0x08, 0xc9, 0x7c, 0x06, 0xb3, 0x6c, 0xa8, 0xc0, 0xcd, 0xb6, 0x7c, 0xf3, 0x2b, 0x03, 0x36, + 0xff, 0xb8, 0x01, 0x4f, 0x95, 0x44, 0x8e, 0x28, 0xbd, 0x48, 0x70, 0x07, 0xe6, 0xfa, 0x7a, 0xd1, + 0x23, 0x6e, 0xd6, 0x68, 0xf1, 0x28, 0xb2, 0xb8, 0x37, 0x16, 0xfe, 0xe5, 0x67, 0xcb, 0xc6, 0xbf, + 0xf9, 0x6c, 0xd9, 0xf8, 0xcf, 0x9f, 0x2d, 0x1b, 0xbf, 0xf2, 0x5f, 0x97, 0xff, 0xd0, 0xcf, 0xd4, + 0xf6, 0x5f, 0xff, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x5c, 0x32, 0x34, 0xa9, 0x5f, 0xa2, 0x00, + 0x00, } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/core/v1/register.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/core/v1/register.go new file mode 100644 index 00000000..dcf8f46b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/core/v1/register.go @@ -0,0 +1,35 @@ +package v1 + +import "github.com/ericchiang/k8s" + +func init() { + k8s.Register("", "v1", "componentstatuses", false, &ComponentStatus{}) + k8s.Register("", "v1", "configmaps", true, &ConfigMap{}) + k8s.Register("", "v1", "endpoints", true, &Endpoints{}) + k8s.Register("", "v1", "limitranges", true, &LimitRange{}) + k8s.Register("", "v1", "namespaces", false, &Namespace{}) + k8s.Register("", "v1", "nodes", false, &Node{}) + k8s.Register("", "v1", "persistentvolumeclaims", true, &PersistentVolumeClaim{}) + k8s.Register("", "v1", "persistentvolumes", false, &PersistentVolume{}) + k8s.Register("", "v1", "pods", true, &Pod{}) + k8s.Register("", "v1", "replicationcontrollers", true, &ReplicationController{}) + k8s.Register("", "v1", "resourcequotas", true, &ResourceQuota{}) + k8s.Register("", "v1", "secrets", true, &Secret{}) + k8s.Register("", "v1", "services", true, &Service{}) + k8s.Register("", "v1", "serviceaccounts", true, &ServiceAccount{}) + + k8s.RegisterList("", "v1", "componentstatuses", false, &ComponentStatusList{}) + k8s.RegisterList("", "v1", "configmaps", true, &ConfigMapList{}) + k8s.RegisterList("", "v1", "endpoints", true, &EndpointsList{}) + k8s.RegisterList("", "v1", "limitranges", true, &LimitRangeList{}) + k8s.RegisterList("", "v1", "namespaces", false, &NamespaceList{}) + k8s.RegisterList("", "v1", "nodes", false, &NodeList{}) + k8s.RegisterList("", "v1", "persistentvolumeclaims", true, &PersistentVolumeClaimList{}) + k8s.RegisterList("", "v1", "persistentvolumes", false, &PersistentVolumeList{}) + k8s.RegisterList("", "v1", "pods", true, &PodList{}) + k8s.RegisterList("", "v1", "replicationcontrollers", true, &ReplicationControllerList{}) + k8s.RegisterList("", "v1", "resourcequotas", true, &ResourceQuotaList{}) + k8s.RegisterList("", "v1", "secrets", true, &SecretList{}) + k8s.RegisterList("", "v1", "services", true, &ServiceList{}) + k8s.RegisterList("", "v1", "serviceaccounts", true, &ServiceAccountList{}) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/extensions/v1beta1/generated.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/extensions/v1beta1/generated.pb.go index bc84f3b5..b1fe94e7 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/extensions/v1beta1/generated.pb.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/extensions/v1beta1/generated.pb.go @@ -1,20 +1,21 @@ -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto -// DO NOT EDIT! +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/extensions/v1beta1/generated.proto /* Package v1beta1 is a generated protocol buffer package. It is generated from these files: - k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto + k8s.io/api/extensions/v1beta1/generated.proto It has these top-level messages: - APIVersion + AllowedFlexVolume + AllowedHostPath CustomMetricCurrentStatus CustomMetricCurrentStatusList CustomMetricTarget CustomMetricTargetList DaemonSet + DaemonSetCondition DaemonSetList DaemonSetSpec DaemonSetStatus @@ -31,6 +32,7 @@ HTTPIngressRuleValue HostPortRange IDRange + IPBlock Ingress IngressBackend IngressList @@ -40,6 +42,7 @@ IngressStatus IngressTLS NetworkPolicy + NetworkPolicyEgressRule NetworkPolicyIngressRule NetworkPolicyList NetworkPolicyPeer @@ -63,22 +66,19 @@ ScaleSpec ScaleStatus SupplementalGroupsStrategyOptions - ThirdPartyResource - ThirdPartyResourceData - ThirdPartyResourceDataList - ThirdPartyResourceList */ package v1beta1 import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import k8s_io_kubernetes_pkg_api_resource "github.com/ericchiang/k8s/api/resource" -import k8s_io_kubernetes_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" +import k8s_io_api_core_v1 "github.com/ericchiang/k8s/apis/core/v1" +import _ "github.com/ericchiang/k8s/apis/policy/v1beta1" +import k8s_io_apimachinery_pkg_api_resource "github.com/ericchiang/k8s/apis/resource" +import k8s_io_apimachinery_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" import _ "github.com/ericchiang/k8s/runtime" import _ "github.com/ericchiang/k8s/runtime/schema" -import k8s_io_kubernetes_pkg_util_intstr "github.com/ericchiang/k8s/util/intstr" -import k8s_io_kubernetes_pkg_api_v1 "github.com/ericchiang/k8s/api/v1" +import k8s_io_apimachinery_pkg_util_intstr "github.com/ericchiang/k8s/util/intstr" import io "io" @@ -93,22 +93,47 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package -// An APIVersion represents a single concrete version of an object model. -type APIVersion struct { - // Name of this version (e.g. 'v1'). - // +optional - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. +type AllowedFlexVolume struct { + // Driver is the name of the Flexvolume driver. + Driver *string `protobuf:"bytes,1,opt,name=driver" json:"driver,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *APIVersion) Reset() { *m = APIVersion{} } -func (m *APIVersion) String() string { return proto.CompactTextString(m) } -func (*APIVersion) ProtoMessage() {} -func (*APIVersion) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } +func (m *AllowedFlexVolume) String() string { return proto.CompactTextString(m) } +func (*AllowedFlexVolume) ProtoMessage() {} +func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } -func (m *APIVersion) GetName() string { - if m != nil && m.Name != nil { - return *m.Name +func (m *AllowedFlexVolume) GetDriver() string { + if m != nil && m.Driver != nil { + return *m.Driver + } + return "" +} + +// defines the host volume conditions that will be enabled by a policy +// for pods to use. It requires the path prefix to be defined. +type AllowedHostPath struct { + // is the path prefix that the host volume must match. + // It does not support `*`. + // Trailing slashes are trimmed when validating the path prefix with a host path. + // + // Examples: + // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` + // `/foo` would not allow `/food` or `/etc/foo` + PathPrefix *string `protobuf:"bytes,1,opt,name=pathPrefix" json:"pathPrefix,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } +func (m *AllowedHostPath) String() string { return proto.CompactTextString(m) } +func (*AllowedHostPath) ProtoMessage() {} +func (*AllowedHostPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *AllowedHostPath) GetPathPrefix() string { + if m != nil && m.PathPrefix != nil { + return *m.PathPrefix } return "" } @@ -117,15 +142,15 @@ type CustomMetricCurrentStatus struct { // Custom Metric name. Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // Custom Metric value (average). - Value *k8s_io_kubernetes_pkg_api_resource.Quantity `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` + Value *k8s_io_apimachinery_pkg_api_resource.Quantity `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *CustomMetricCurrentStatus) Reset() { *m = CustomMetricCurrentStatus{} } func (m *CustomMetricCurrentStatus) String() string { return proto.CompactTextString(m) } func (*CustomMetricCurrentStatus) ProtoMessage() {} func (*CustomMetricCurrentStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} + return fileDescriptorGenerated, []int{2} } func (m *CustomMetricCurrentStatus) GetName() string { @@ -135,7 +160,7 @@ func (m *CustomMetricCurrentStatus) GetName() string { return "" } -func (m *CustomMetricCurrentStatus) GetValue() *k8s_io_kubernetes_pkg_api_resource.Quantity { +func (m *CustomMetricCurrentStatus) GetValue() *k8s_io_apimachinery_pkg_api_resource.Quantity { if m != nil { return m.Value } @@ -151,7 +176,7 @@ func (m *CustomMetricCurrentStatusList) Reset() { *m = CustomMetricCurre func (m *CustomMetricCurrentStatusList) String() string { return proto.CompactTextString(m) } func (*CustomMetricCurrentStatusList) ProtoMessage() {} func (*CustomMetricCurrentStatusList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{2} + return fileDescriptorGenerated, []int{3} } func (m *CustomMetricCurrentStatusList) GetItems() []*CustomMetricCurrentStatus { @@ -166,14 +191,14 @@ type CustomMetricTarget struct { // Custom Metric name. Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // Custom Metric value (average). - Value *k8s_io_kubernetes_pkg_api_resource.Quantity `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` + Value *k8s_io_apimachinery_pkg_api_resource.Quantity `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *CustomMetricTarget) Reset() { *m = CustomMetricTarget{} } func (m *CustomMetricTarget) String() string { return proto.CompactTextString(m) } func (*CustomMetricTarget) ProtoMessage() {} -func (*CustomMetricTarget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (*CustomMetricTarget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func (m *CustomMetricTarget) GetName() string { if m != nil && m.Name != nil { @@ -182,7 +207,7 @@ func (m *CustomMetricTarget) GetName() string { return "" } -func (m *CustomMetricTarget) GetValue() *k8s_io_kubernetes_pkg_api_resource.Quantity { +func (m *CustomMetricTarget) GetValue() *k8s_io_apimachinery_pkg_api_resource.Quantity { if m != nil { return m.Value } @@ -197,7 +222,7 @@ type CustomMetricTargetList struct { func (m *CustomMetricTargetList) Reset() { *m = CustomMetricTargetList{} } func (m *CustomMetricTargetList) String() string { return proto.CompactTextString(m) } func (*CustomMetricTargetList) ProtoMessage() {} -func (*CustomMetricTargetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (*CustomMetricTargetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } func (m *CustomMetricTargetList) GetItems() []*CustomMetricTarget { if m != nil { @@ -206,21 +231,23 @@ func (m *CustomMetricTargetList) GetItems() []*CustomMetricTarget { return nil } +// DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for +// more information. // DaemonSet represents the configuration of a daemon set. type DaemonSet struct { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // The desired behavior of this daemon set. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec *DaemonSetSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` // The current status of this daemon set. This data may be // out of date by some window of time. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status *DaemonSetStatus `protobuf:"bytes,3,opt,name=status" json:"status,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -229,9 +256,9 @@ type DaemonSet struct { func (m *DaemonSet) Reset() { *m = DaemonSet{} } func (m *DaemonSet) String() string { return proto.CompactTextString(m) } func (*DaemonSet) ProtoMessage() {} -func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } -func (m *DaemonSet) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *DaemonSet) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -252,12 +279,70 @@ func (m *DaemonSet) GetStatus() *DaemonSetStatus { return nil } +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +type DaemonSetCondition struct { + // Type of DaemonSet condition. + Type *string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + // Status of the condition, one of True, False, Unknown. + Status *string `protobuf:"bytes,2,opt,name=status" json:"status,omitempty"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime *k8s_io_apimachinery_pkg_apis_meta_v1.Time `protobuf:"bytes,3,opt,name=lastTransitionTime" json:"lastTransitionTime,omitempty"` + // The reason for the condition's last transition. + // +optional + Reason *string `protobuf:"bytes,4,opt,name=reason" json:"reason,omitempty"` + // A human readable message indicating details about the transition. + // +optional + Message *string `protobuf:"bytes,5,opt,name=message" json:"message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } +func (m *DaemonSetCondition) String() string { return proto.CompactTextString(m) } +func (*DaemonSetCondition) ProtoMessage() {} +func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *DaemonSetCondition) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +func (m *DaemonSetCondition) GetStatus() string { + if m != nil && m.Status != nil { + return *m.Status + } + return "" +} + +func (m *DaemonSetCondition) GetLastTransitionTime() *k8s_io_apimachinery_pkg_apis_meta_v1.Time { + if m != nil { + return m.LastTransitionTime + } + return nil +} + +func (m *DaemonSetCondition) GetReason() string { + if m != nil && m.Reason != nil { + return *m.Reason + } + return "" +} + +func (m *DaemonSetCondition) GetMessage() string { + if m != nil && m.Message != nil { + return *m.Message + } + return "" +} + // DaemonSetList is a collection of daemon sets. type DaemonSetList struct { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // A list of daemon sets. Items []*DaemonSet `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -266,9 +351,9 @@ type DaemonSetList struct { func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } func (m *DaemonSetList) String() string { return proto.CompactTextString(m) } func (*DaemonSetList) ProtoMessage() {} -func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } -func (m *DaemonSetList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *DaemonSetList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -287,15 +372,15 @@ type DaemonSetSpec struct { // A label query over pods that are managed by the daemon set. // Must match in order to be controlled. // If empty, defaulted to labels on Pod template. - // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional - Selector *k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"` + Selector *k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"` // An object that describes the pod that will be created. // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). - // More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template - Template *k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec `protobuf:"bytes,2,opt,name=template" json:"template,omitempty"` + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + Template *k8s_io_api_core_v1.PodTemplateSpec `protobuf:"bytes,2,opt,name=template" json:"template,omitempty"` // An update strategy to replace existing DaemonSet pods with new pods. // +optional UpdateStrategy *DaemonSetUpdateStrategy `protobuf:"bytes,3,opt,name=updateStrategy" json:"updateStrategy,omitempty"` @@ -305,26 +390,32 @@ type DaemonSetSpec struct { // is ready). // +optional MinReadySeconds *int32 `protobuf:"varint,4,opt,name=minReadySeconds" json:"minReadySeconds,omitempty"` + // DEPRECATED. // A sequence number representing a specific generation of the template. // Populated by the system. It can be set only during the creation. // +optional TemplateGeneration *int64 `protobuf:"varint,5,opt,name=templateGeneration" json:"templateGeneration,omitempty"` - XXX_unrecognized []byte `json:"-"` + // The number of old history to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 10. + // +optional + RevisionHistoryLimit *int32 `protobuf:"varint,6,opt,name=revisionHistoryLimit" json:"revisionHistoryLimit,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } func (m *DaemonSetSpec) String() string { return proto.CompactTextString(m) } func (*DaemonSetSpec) ProtoMessage() {} -func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } -func (m *DaemonSetSpec) GetSelector() *k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector { +func (m *DaemonSetSpec) GetSelector() *k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector { if m != nil { return m.Selector } return nil } -func (m *DaemonSetSpec) GetTemplate() *k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec { +func (m *DaemonSetSpec) GetTemplate() *k8s_io_api_core_v1.PodTemplateSpec { if m != nil { return m.Template } @@ -352,19 +443,26 @@ func (m *DaemonSetSpec) GetTemplateGeneration() int64 { return 0 } +func (m *DaemonSetSpec) GetRevisionHistoryLimit() int32 { + if m != nil && m.RevisionHistoryLimit != nil { + return *m.RevisionHistoryLimit + } + return 0 +} + // DaemonSetStatus represents the current status of a daemon set. type DaemonSetStatus struct { // The number of nodes that are running at least 1 // daemon pod and are supposed to run the daemon pod. - // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ CurrentNumberScheduled *int32 `protobuf:"varint,1,opt,name=currentNumberScheduled" json:"currentNumberScheduled,omitempty"` // The number of nodes that are running the daemon pod, but are // not supposed to run the daemon pod. - // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ NumberMisscheduled *int32 `protobuf:"varint,2,opt,name=numberMisscheduled" json:"numberMisscheduled,omitempty"` // The total number of nodes that should be running the daemon // pod (including nodes correctly running the daemon pod). - // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ DesiredNumberScheduled *int32 `protobuf:"varint,3,opt,name=desiredNumberScheduled" json:"desiredNumberScheduled,omitempty"` // The number of nodes that should be running the daemon pod and have one // or more of the daemon pod running and ready. @@ -385,13 +483,23 @@ type DaemonSetStatus struct { // (ready for at least spec.minReadySeconds) // +optional NumberUnavailable *int32 `protobuf:"varint,8,opt,name=numberUnavailable" json:"numberUnavailable,omitempty"` - XXX_unrecognized []byte `json:"-"` + // Count of hash collisions for the DaemonSet. The DaemonSet controller + // uses this field as a collision avoidance mechanism when it needs to + // create the name for the newest ControllerRevision. + // +optional + CollisionCount *int32 `protobuf:"varint,9,opt,name=collisionCount" json:"collisionCount,omitempty"` + // Represents the latest available observations of a DaemonSet's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []*DaemonSetCondition `protobuf:"bytes,10,rep,name=conditions" json:"conditions,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } func (m *DaemonSetStatus) String() string { return proto.CompactTextString(m) } func (*DaemonSetStatus) ProtoMessage() {} -func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } func (m *DaemonSetStatus) GetCurrentNumberScheduled() int32 { if m != nil && m.CurrentNumberScheduled != nil { @@ -449,6 +557,20 @@ func (m *DaemonSetStatus) GetNumberUnavailable() int32 { return 0 } +func (m *DaemonSetStatus) GetCollisionCount() int32 { + if m != nil && m.CollisionCount != nil { + return *m.CollisionCount + } + return 0 +} + +func (m *DaemonSetStatus) GetConditions() []*DaemonSetCondition { + if m != nil { + return m.Conditions + } + return nil +} + type DaemonSetUpdateStrategy struct { // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". // Default is OnDelete. @@ -457,17 +579,19 @@ type DaemonSetUpdateStrategy struct { // Rolling update config params. Present only if type = "RollingUpdate". // --- // TODO: Update this to follow our convention for oneOf, whatever we decide it - // to be. Same as DeploymentStrategy.RollingUpdate. + // to be. Same as Deployment `strategy.rollingUpdate`. // See https://github.com/kubernetes/kubernetes/issues/35345 // +optional RollingUpdate *RollingUpdateDaemonSet `protobuf:"bytes,2,opt,name=rollingUpdate" json:"rollingUpdate,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } -func (m *DaemonSetUpdateStrategy) String() string { return proto.CompactTextString(m) } -func (*DaemonSetUpdateStrategy) ProtoMessage() {} -func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } +func (m *DaemonSetUpdateStrategy) String() string { return proto.CompactTextString(m) } +func (*DaemonSetUpdateStrategy) ProtoMessage() {} +func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{11} +} func (m *DaemonSetUpdateStrategy) GetType() string { if m != nil && m.Type != nil { @@ -483,11 +607,13 @@ func (m *DaemonSetUpdateStrategy) GetRollingUpdate() *RollingUpdateDaemonSet { return nil } +// DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for +// more information. // Deployment enables declarative updates for Pods and ReplicaSets. type Deployment struct { // Standard object metadata. // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Specification of the desired behavior of the Deployment. // +optional Spec *DeploymentSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` @@ -500,9 +626,9 @@ type Deployment struct { func (m *Deployment) Reset() { *m = Deployment{} } func (m *Deployment) String() string { return proto.CompactTextString(m) } func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } -func (m *Deployment) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *Deployment) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -530,9 +656,9 @@ type DeploymentCondition struct { // Status of the condition, one of True, False, Unknown. Status *string `protobuf:"bytes,2,opt,name=status" json:"status,omitempty"` // The last time this condition was updated. - LastUpdateTime *k8s_io_kubernetes_pkg_apis_meta_v1.Time `protobuf:"bytes,6,opt,name=lastUpdateTime" json:"lastUpdateTime,omitempty"` + LastUpdateTime *k8s_io_apimachinery_pkg_apis_meta_v1.Time `protobuf:"bytes,6,opt,name=lastUpdateTime" json:"lastUpdateTime,omitempty"` // Last time the condition transitioned from one status to another. - LastTransitionTime *k8s_io_kubernetes_pkg_apis_meta_v1.Time `protobuf:"bytes,7,opt,name=lastTransitionTime" json:"lastTransitionTime,omitempty"` + LastTransitionTime *k8s_io_apimachinery_pkg_apis_meta_v1.Time `protobuf:"bytes,7,opt,name=lastTransitionTime" json:"lastTransitionTime,omitempty"` // The reason for the condition's last transition. Reason *string `protobuf:"bytes,4,opt,name=reason" json:"reason,omitempty"` // A human readable message indicating details about the transition. @@ -543,7 +669,7 @@ type DeploymentCondition struct { func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } func (m *DeploymentCondition) String() string { return proto.CompactTextString(m) } func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } func (m *DeploymentCondition) GetType() string { if m != nil && m.Type != nil { @@ -559,14 +685,14 @@ func (m *DeploymentCondition) GetStatus() string { return "" } -func (m *DeploymentCondition) GetLastUpdateTime() *k8s_io_kubernetes_pkg_apis_meta_v1.Time { +func (m *DeploymentCondition) GetLastUpdateTime() *k8s_io_apimachinery_pkg_apis_meta_v1.Time { if m != nil { return m.LastUpdateTime } return nil } -func (m *DeploymentCondition) GetLastTransitionTime() *k8s_io_kubernetes_pkg_apis_meta_v1.Time { +func (m *DeploymentCondition) GetLastTransitionTime() *k8s_io_apimachinery_pkg_apis_meta_v1.Time { if m != nil { return m.LastTransitionTime } @@ -591,7 +717,7 @@ func (m *DeploymentCondition) GetMessage() string { type DeploymentList struct { // Standard list metadata. // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Items is the list of Deployments. Items []*Deployment `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -600,9 +726,9 @@ type DeploymentList struct { func (m *DeploymentList) Reset() { *m = DeploymentList{} } func (m *DeploymentList) String() string { return proto.CompactTextString(m) } func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } -func (m *DeploymentList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *DeploymentList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -616,6 +742,7 @@ func (m *DeploymentList) GetItems() []*Deployment { return nil } +// DEPRECATED. // DeploymentRollback stores the information required to rollback a deployment. type DeploymentRollback struct { // Required: This must match the Name of a deployment. @@ -631,7 +758,7 @@ type DeploymentRollback struct { func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } func (m *DeploymentRollback) String() string { return proto.CompactTextString(m) } func (*DeploymentRollback) ProtoMessage() {} -func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } func (m *DeploymentRollback) GetName() string { if m != nil && m.Name != nil { @@ -663,11 +790,12 @@ type DeploymentSpec struct { // Label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. // +optional - Selector *k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector `protobuf:"bytes,2,opt,name=selector" json:"selector,omitempty"` + Selector *k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector `protobuf:"bytes,2,opt,name=selector" json:"selector,omitempty"` // Template describes the pods that will be created. - Template *k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec `protobuf:"bytes,3,opt,name=template" json:"template,omitempty"` + Template *k8s_io_api_core_v1.PodTemplateSpec `protobuf:"bytes,3,opt,name=template" json:"template,omitempty"` // The deployment strategy to use to replace existing pods with new ones. // +optional + // +patchStrategy=retainKeys Strategy *DeploymentStrategy `protobuf:"bytes,4,opt,name=strategy" json:"strategy,omitempty"` // Minimum number of seconds for which a newly created pod should be ready // without any of its container crashing, for it to be considered available. @@ -682,16 +810,17 @@ type DeploymentSpec struct { // deployment controller. // +optional Paused *bool `protobuf:"varint,7,opt,name=paused" json:"paused,omitempty"` + // DEPRECATED. // The config this deployment is rolling back to. Will be cleared after rollback is done. // +optional RollbackTo *RollbackConfig `protobuf:"bytes,8,opt,name=rollbackTo" json:"rollbackTo,omitempty"` // The maximum time in seconds for a deployment to make progress before it // is considered to be failed. The deployment controller will continue to // process failed deployments and a condition with a ProgressDeadlineExceeded - // reason will be surfaced in the deployment status. Once autoRollback is - // implemented, the deployment controller will automatically rollback failed - // deployments. Note that progress will not be estimated during the time a - // deployment is paused. This is not set by default. + // reason will be surfaced in the deployment status. Note that progress will + // not be estimated during the time a deployment is paused. This is not set + // by default. + // +optional ProgressDeadlineSeconds *int32 `protobuf:"varint,9,opt,name=progressDeadlineSeconds" json:"progressDeadlineSeconds,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -699,7 +828,7 @@ type DeploymentSpec struct { func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } func (m *DeploymentSpec) String() string { return proto.CompactTextString(m) } func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } func (m *DeploymentSpec) GetReplicas() int32 { if m != nil && m.Replicas != nil { @@ -708,14 +837,14 @@ func (m *DeploymentSpec) GetReplicas() int32 { return 0 } -func (m *DeploymentSpec) GetSelector() *k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector { +func (m *DeploymentSpec) GetSelector() *k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector { if m != nil { return m.Selector } return nil } -func (m *DeploymentSpec) GetTemplate() *k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec { +func (m *DeploymentSpec) GetTemplate() *k8s_io_api_core_v1.PodTemplateSpec { if m != nil { return m.Template } @@ -781,18 +910,27 @@ type DeploymentStatus struct { // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. // +optional AvailableReplicas *int32 `protobuf:"varint,4,opt,name=availableReplicas" json:"availableReplicas,omitempty"` - // Total number of unavailable pods targeted by this deployment. + // Total number of unavailable pods targeted by this deployment. This is the total number of + // pods that are still required for the deployment to have 100% available capacity. They may + // either be pods that are running but not yet available or pods that still have not been created. // +optional UnavailableReplicas *int32 `protobuf:"varint,5,opt,name=unavailableReplicas" json:"unavailableReplicas,omitempty"` // Represents the latest available observations of a deployment's current state. - Conditions []*DeploymentCondition `protobuf:"bytes,6,rep,name=conditions" json:"conditions,omitempty"` - XXX_unrecognized []byte `json:"-"` + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []*DeploymentCondition `protobuf:"bytes,6,rep,name=conditions" json:"conditions,omitempty"` + // Count of hash collisions for the Deployment. The Deployment controller uses this + // field as a collision avoidance mechanism when it needs to create the name for the + // newest ReplicaSet. + // +optional + CollisionCount *int32 `protobuf:"varint,8,opt,name=collisionCount" json:"collisionCount,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } func (m *DeploymentStatus) String() string { return proto.CompactTextString(m) } func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } func (m *DeploymentStatus) GetObservedGeneration() int64 { if m != nil && m.ObservedGeneration != nil { @@ -843,6 +981,13 @@ func (m *DeploymentStatus) GetConditions() []*DeploymentCondition { return nil } +func (m *DeploymentStatus) GetCollisionCount() int32 { + if m != nil && m.CollisionCount != nil { + return *m.CollisionCount + } + return 0 +} + // DeploymentStrategy describes how to replace existing pods with new ones. type DeploymentStrategy struct { // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. @@ -861,7 +1006,7 @@ type DeploymentStrategy struct { func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } func (m *DeploymentStrategy) String() string { return proto.CompactTextString(m) } func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func (m *DeploymentStrategy) GetType() string { if m != nil && m.Type != nil { @@ -892,7 +1037,7 @@ type FSGroupStrategyOptions struct { func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } func (m *FSGroupStrategyOptions) String() string { return proto.CompactTextString(m) } func (*FSGroupStrategyOptions) ProtoMessage() {} -func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } func (m *FSGroupStrategyOptions) GetRule() string { if m != nil && m.Rule != nil { @@ -929,7 +1074,7 @@ type HTTPIngressPath struct { func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } func (m *HTTPIngressPath) String() string { return proto.CompactTextString(m) } func (*HTTPIngressPath) ProtoMessage() {} -func (*HTTPIngressPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (*HTTPIngressPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } func (m *HTTPIngressPath) GetPath() string { if m != nil && m.Path != nil { @@ -959,7 +1104,7 @@ type HTTPIngressRuleValue struct { func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } func (m *HTTPIngressRuleValue) String() string { return proto.CompactTextString(m) } func (*HTTPIngressRuleValue) ProtoMessage() {} -func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } func (m *HTTPIngressRuleValue) GetPaths() []*HTTPIngressPath { if m != nil { @@ -981,7 +1126,7 @@ type HostPortRange struct { func (m *HostPortRange) Reset() { *m = HostPortRange{} } func (m *HostPortRange) String() string { return proto.CompactTextString(m) } func (*HostPortRange) ProtoMessage() {} -func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } +func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } func (m *HostPortRange) GetMin() int32 { if m != nil && m.Min != nil { @@ -1009,7 +1154,7 @@ type IDRange struct { func (m *IDRange) Reset() { *m = IDRange{} } func (m *IDRange) String() string { return proto.CompactTextString(m) } func (*IDRange) ProtoMessage() {} -func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } func (m *IDRange) GetMin() int64 { if m != nil && m.Min != nil { @@ -1025,21 +1170,56 @@ func (m *IDRange) GetMax() int64 { return 0 } +// DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. +// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24") that is allowed to the pods +// matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should +// not be included within this rule. +type IPBlock struct { + // CIDR is a string representing the IP Block + // Valid examples are "192.168.1.1/24" + Cidr *string `protobuf:"bytes,1,opt,name=cidr" json:"cidr,omitempty"` + // Except is a slice of CIDRs that should not be included within an IP Block + // Valid examples are "192.168.1.1/24" + // Except values will be rejected if they are outside the CIDR range + // +optional + Except []string `protobuf:"bytes,2,rep,name=except" json:"except,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IPBlock) Reset() { *m = IPBlock{} } +func (m *IPBlock) String() string { return proto.CompactTextString(m) } +func (*IPBlock) ProtoMessage() {} +func (*IPBlock) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } + +func (m *IPBlock) GetCidr() string { + if m != nil && m.Cidr != nil { + return *m.Cidr + } + return "" +} + +func (m *IPBlock) GetExcept() []string { + if m != nil { + return m.Except + } + return nil +} + // Ingress is a collection of rules that allow inbound connections to reach the // endpoints defined by a backend. An Ingress can be configured to give services // externally-reachable urls, load balance traffic, terminate SSL, offer name // based virtual hosting etc. type Ingress struct { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Spec is the desired state of the Ingress. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec *IngressSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` // Status is the current state of the Ingress. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status *IngressStatus `protobuf:"bytes,3,opt,name=status" json:"status,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -1048,9 +1228,9 @@ type Ingress struct { func (m *Ingress) Reset() { *m = Ingress{} } func (m *Ingress) String() string { return proto.CompactTextString(m) } func (*Ingress) ProtoMessage() {} -func (*Ingress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +func (*Ingress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } -func (m *Ingress) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *Ingress) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -1076,14 +1256,14 @@ type IngressBackend struct { // Specifies the name of the referenced service. ServiceName *string `protobuf:"bytes,1,opt,name=serviceName" json:"serviceName,omitempty"` // Specifies the port of the referenced service. - ServicePort *k8s_io_kubernetes_pkg_util_intstr.IntOrString `protobuf:"bytes,2,opt,name=servicePort" json:"servicePort,omitempty"` - XXX_unrecognized []byte `json:"-"` + ServicePort *k8s_io_apimachinery_pkg_util_intstr.IntOrString `protobuf:"bytes,2,opt,name=servicePort" json:"servicePort,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *IngressBackend) Reset() { *m = IngressBackend{} } func (m *IngressBackend) String() string { return proto.CompactTextString(m) } func (*IngressBackend) ProtoMessage() {} -func (*IngressBackend) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +func (*IngressBackend) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } func (m *IngressBackend) GetServiceName() string { if m != nil && m.ServiceName != nil { @@ -1092,7 +1272,7 @@ func (m *IngressBackend) GetServiceName() string { return "" } -func (m *IngressBackend) GetServicePort() *k8s_io_kubernetes_pkg_util_intstr.IntOrString { +func (m *IngressBackend) GetServicePort() *k8s_io_apimachinery_pkg_util_intstr.IntOrString { if m != nil { return m.ServicePort } @@ -1102,9 +1282,9 @@ func (m *IngressBackend) GetServicePort() *k8s_io_kubernetes_pkg_util_intstr.Int // IngressList is a collection of Ingress. type IngressList struct { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Items is the list of Ingress. Items []*Ingress `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -1113,9 +1293,9 @@ type IngressList struct { func (m *IngressList) Reset() { *m = IngressList{} } func (m *IngressList) String() string { return proto.CompactTextString(m) } func (*IngressList) ProtoMessage() {} -func (*IngressList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +func (*IngressList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } -func (m *IngressList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *IngressList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -1160,7 +1340,7 @@ type IngressRule struct { func (m *IngressRule) Reset() { *m = IngressRule{} } func (m *IngressRule) String() string { return proto.CompactTextString(m) } func (*IngressRule) ProtoMessage() {} -func (*IngressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (*IngressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } func (m *IngressRule) GetHost() string { if m != nil && m.Host != nil { @@ -1189,7 +1369,7 @@ type IngressRuleValue struct { func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } func (m *IngressRuleValue) String() string { return proto.CompactTextString(m) } func (*IngressRuleValue) ProtoMessage() {} -func (*IngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (*IngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } func (m *IngressRuleValue) GetHttp() *HTTPIngressRuleValue { if m != nil { @@ -1223,7 +1403,7 @@ type IngressSpec struct { func (m *IngressSpec) Reset() { *m = IngressSpec{} } func (m *IngressSpec) String() string { return proto.CompactTextString(m) } func (*IngressSpec) ProtoMessage() {} -func (*IngressSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +func (*IngressSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } func (m *IngressSpec) GetBackend() *IngressBackend { if m != nil { @@ -1250,16 +1430,16 @@ func (m *IngressSpec) GetRules() []*IngressRule { type IngressStatus struct { // LoadBalancer contains the current status of the load-balancer. // +optional - LoadBalancer *k8s_io_kubernetes_pkg_api_v1.LoadBalancerStatus `protobuf:"bytes,1,opt,name=loadBalancer" json:"loadBalancer,omitempty"` - XXX_unrecognized []byte `json:"-"` + LoadBalancer *k8s_io_api_core_v1.LoadBalancerStatus `protobuf:"bytes,1,opt,name=loadBalancer" json:"loadBalancer,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *IngressStatus) Reset() { *m = IngressStatus{} } func (m *IngressStatus) String() string { return proto.CompactTextString(m) } func (*IngressStatus) ProtoMessage() {} -func (*IngressStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +func (*IngressStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } -func (m *IngressStatus) GetLoadBalancer() *k8s_io_kubernetes_pkg_api_v1.LoadBalancerStatus { +func (m *IngressStatus) GetLoadBalancer() *k8s_io_api_core_v1.LoadBalancerStatus { if m != nil { return m.LoadBalancer } @@ -1287,7 +1467,7 @@ type IngressTLS struct { func (m *IngressTLS) Reset() { *m = IngressTLS{} } func (m *IngressTLS) String() string { return proto.CompactTextString(m) } func (*IngressTLS) ProtoMessage() {} -func (*IngressTLS) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +func (*IngressTLS) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } func (m *IngressTLS) GetHosts() []string { if m != nil { @@ -1303,11 +1483,13 @@ func (m *IngressTLS) GetSecretName() string { return "" } +// DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. +// NetworkPolicy describes what network traffic is allowed for a set of Pods type NetworkPolicy struct { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Specification of the desired behavior for this NetworkPolicy. // +optional Spec *NetworkPolicySpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` @@ -1317,9 +1499,9 @@ type NetworkPolicy struct { func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } func (m *NetworkPolicy) String() string { return proto.CompactTextString(m) } func (*NetworkPolicy) ProtoMessage() {} -func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } -func (m *NetworkPolicy) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *NetworkPolicy) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -1333,24 +1515,64 @@ func (m *NetworkPolicy) GetSpec() *NetworkPolicySpec { return nil } +// DEPRECATED 1.9 - This group version of NetworkPolicyEgressRule is deprecated by networking/v1/NetworkPolicyEgressRule. +// NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods +// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. +// This type is beta-level in 1.8 +type NetworkPolicyEgressRule struct { + // List of destination ports for outgoing traffic. + // Each item in this list is combined using a logical OR. If this field is + // empty or missing, this rule matches all ports (traffic not restricted by port). + // If this field is present and contains at least one item, then this rule allows + // traffic only if the traffic matches at least one port in the list. + // +optional + Ports []*NetworkPolicyPort `protobuf:"bytes,1,rep,name=ports" json:"ports,omitempty"` + // List of destinations for outgoing traffic of pods selected for this rule. + // Items in this list are combined using a logical OR operation. If this field is + // empty or missing, this rule matches all destinations (traffic not restricted by + // destination). If this field is present and contains at least one item, this rule + // allows traffic only if the traffic matches at least one item in the to list. + // +optional + To []*NetworkPolicyPeer `protobuf:"bytes,2,rep,name=to" json:"to,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } +func (m *NetworkPolicyEgressRule) String() string { return proto.CompactTextString(m) } +func (*NetworkPolicyEgressRule) ProtoMessage() {} +func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{34} +} + +func (m *NetworkPolicyEgressRule) GetPorts() []*NetworkPolicyPort { + if m != nil { + return m.Ports + } + return nil +} + +func (m *NetworkPolicyEgressRule) GetTo() []*NetworkPolicyPeer { + if m != nil { + return m.To + } + return nil +} + +// DEPRECATED 1.9 - This group version of NetworkPolicyIngressRule is deprecated by networking/v1/NetworkPolicyIngressRule. // This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from. type NetworkPolicyIngressRule struct { // List of ports which should be made accessible on the pods selected for this rule. // Each item in this list is combined using a logical OR. - // If this field is not provided, this rule matches all ports (traffic not restricted by port). - // If this field is empty, this rule matches no ports (no traffic matches). + // If this field is empty or missing, this rule matches all ports (traffic not restricted by port). // If this field is present and contains at least one item, then this rule allows traffic // only if the traffic matches at least one port in the list. - // TODO: Update this to be a pointer to slice as soon as auto-generation supports it. // +optional Ports []*NetworkPolicyPort `protobuf:"bytes,1,rep,name=ports" json:"ports,omitempty"` // List of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. - // If this field is not provided, this rule matches all sources (traffic not restricted by source). - // If this field is empty, this rule matches no sources (no traffic matches). + // If this field is empty or missing, this rule matches all sources (traffic not restricted by source). // If this field is present and contains at least on item, this rule allows traffic only if the // traffic matches at least one item in the from list. - // TODO: Update this to be a pointer to slice as soon as auto-generation supports it. // +optional From []*NetworkPolicyPeer `protobuf:"bytes,2,rep,name=from" json:"from,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -1360,7 +1582,7 @@ func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRu func (m *NetworkPolicyIngressRule) String() string { return proto.CompactTextString(m) } func (*NetworkPolicyIngressRule) ProtoMessage() {} func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{31} + return fileDescriptorGenerated, []int{35} } func (m *NetworkPolicyIngressRule) GetPorts() []*NetworkPolicyPort { @@ -1377,12 +1599,13 @@ func (m *NetworkPolicyIngressRule) GetFrom() []*NetworkPolicyPeer { return nil } +// DEPRECATED 1.9 - This group version of NetworkPolicyList is deprecated by networking/v1/NetworkPolicyList. // Network Policy List is a list of NetworkPolicy objects. type NetworkPolicyList struct { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Items is a list of schema objects. Items []*NetworkPolicy `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -1391,9 +1614,9 @@ type NetworkPolicyList struct { func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } func (m *NetworkPolicyList) String() string { return proto.CompactTextString(m) } func (*NetworkPolicyList) ProtoMessage() {} -func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } +func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } -func (m *NetworkPolicyList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *NetworkPolicyList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -1407,42 +1630,52 @@ func (m *NetworkPolicyList) GetItems() []*NetworkPolicy { return nil } +// DEPRECATED 1.9 - This group version of NetworkPolicyPeer is deprecated by networking/v1/NetworkPolicyPeer. type NetworkPolicyPeer struct { // This is a label selector which selects Pods in this namespace. // This field follows standard label selector semantics. - // If not provided, this selector selects no pods. // If present but empty, this selector selects all pods in this namespace. // +optional - PodSelector *k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector `protobuf:"bytes,1,opt,name=podSelector" json:"podSelector,omitempty"` + PodSelector *k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector `protobuf:"bytes,1,opt,name=podSelector" json:"podSelector,omitempty"` // Selects Namespaces using cluster scoped-labels. This // matches all pods in all namespaces selected by this label selector. // This field follows standard label selector semantics. - // If omitted, this selector selects no namespaces. // If present but empty, this selector selects all namespaces. // +optional - NamespaceSelector *k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector `protobuf:"bytes,2,opt,name=namespaceSelector" json:"namespaceSelector,omitempty"` - XXX_unrecognized []byte `json:"-"` + NamespaceSelector *k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector `protobuf:"bytes,2,opt,name=namespaceSelector" json:"namespaceSelector,omitempty"` + // IPBlock defines policy on a particular IPBlock + // +optional + IpBlock *IPBlock `protobuf:"bytes,3,opt,name=ipBlock" json:"ipBlock,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } func (m *NetworkPolicyPeer) String() string { return proto.CompactTextString(m) } func (*NetworkPolicyPeer) ProtoMessage() {} -func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } +func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } -func (m *NetworkPolicyPeer) GetPodSelector() *k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector { +func (m *NetworkPolicyPeer) GetPodSelector() *k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector { if m != nil { return m.PodSelector } return nil } -func (m *NetworkPolicyPeer) GetNamespaceSelector() *k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector { +func (m *NetworkPolicyPeer) GetNamespaceSelector() *k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector { if m != nil { return m.NamespaceSelector } return nil } +func (m *NetworkPolicyPeer) GetIpBlock() *IPBlock { + if m != nil { + return m.IpBlock + } + return nil +} + +// DEPRECATED 1.9 - This group version of NetworkPolicyPort is deprecated by networking/v1/NetworkPolicyPort. type NetworkPolicyPort struct { // Optional. The protocol (TCP or UDP) which traffic must match. // If not specified, this field defaults to TCP. @@ -1454,14 +1687,14 @@ type NetworkPolicyPort struct { // If present, only traffic on the specified protocol AND port // will be matched. // +optional - Port *k8s_io_kubernetes_pkg_util_intstr.IntOrString `protobuf:"bytes,2,opt,name=port" json:"port,omitempty"` - XXX_unrecognized []byte `json:"-"` + Port *k8s_io_apimachinery_pkg_util_intstr.IntOrString `protobuf:"bytes,2,opt,name=port" json:"port,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } func (m *NetworkPolicyPort) String() string { return proto.CompactTextString(m) } func (*NetworkPolicyPort) ProtoMessage() {} -func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } +func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } func (m *NetworkPolicyPort) GetProtocol() string { if m != nil && m.Protocol != nil { @@ -1470,39 +1703,60 @@ func (m *NetworkPolicyPort) GetProtocol() string { return "" } -func (m *NetworkPolicyPort) GetPort() *k8s_io_kubernetes_pkg_util_intstr.IntOrString { +func (m *NetworkPolicyPort) GetPort() *k8s_io_apimachinery_pkg_util_intstr.IntOrString { if m != nil { return m.Port } return nil } +// DEPRECATED 1.9 - This group version of NetworkPolicySpec is deprecated by networking/v1/NetworkPolicySpec. type NetworkPolicySpec struct { // Selects the pods to which this NetworkPolicy object applies. The array of ingress rules // is applied to any pods selected by this field. Multiple network policies can select the // same set of pods. In this case, the ingress rules for each are combined additively. // This field is NOT optional and follows standard label selector semantics. // An empty podSelector matches all pods in this namespace. - PodSelector *k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector `protobuf:"bytes,1,opt,name=podSelector" json:"podSelector,omitempty"` + PodSelector *k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector `protobuf:"bytes,1,opt,name=podSelector" json:"podSelector,omitempty"` // List of ingress rules to be applied to the selected pods. - // Traffic is allowed to a pod if namespace.networkPolicy.ingress.isolation is undefined and cluster policy allows it, + // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod // OR if the traffic source is the pod's local node, // OR if the traffic matches at least one ingress rule across all of the NetworkPolicy // objects whose podSelector matches the pod. - // If this field is empty then this NetworkPolicy does not affect ingress isolation. - // If this field is present and contains at least one rule, this policy allows any traffic - // which matches at least one of the ingress rules in this list. + // If this field is empty then this NetworkPolicy does not allow any traffic + // (and serves solely to ensure that the pods it selects are isolated by default). + // +optional + Ingress []*NetworkPolicyIngressRule `protobuf:"bytes,2,rep,name=ingress" json:"ingress,omitempty"` + // List of egress rules to be applied to the selected pods. Outgoing traffic is + // allowed if there are no NetworkPolicies selecting the pod (and cluster policy + // otherwise allows the traffic), OR if the traffic matches at least one egress rule + // across all of the NetworkPolicy objects whose podSelector matches the pod. If + // this field is empty then this NetworkPolicy limits all outgoing traffic (and serves + // solely to ensure that the pods it selects are isolated by default). + // This field is beta-level in 1.8 + // +optional + Egress []*NetworkPolicyEgressRule `protobuf:"bytes,3,rep,name=egress" json:"egress,omitempty"` + // List of rule types that the NetworkPolicy relates to. + // Valid options are Ingress, Egress, or Ingress,Egress. + // If this field is not specified, it will default based on the existence of Ingress or Egress rules; + // policies that contain an Egress section are assumed to affect Egress, and all policies + // (whether or not they contain an Ingress section) are assumed to affect Ingress. + // If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. + // Likewise, if you want to write a policy that specifies that no egress is allowed, + // you must specify a policyTypes value that include "Egress" (since such a policy would not include + // an Egress section and would otherwise default to just [ "Ingress" ]). + // This field is beta-level in 1.8 // +optional - Ingress []*NetworkPolicyIngressRule `protobuf:"bytes,2,rep,name=ingress" json:"ingress,omitempty"` - XXX_unrecognized []byte `json:"-"` + PolicyTypes []string `protobuf:"bytes,4,rep,name=policyTypes" json:"policyTypes,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } func (m *NetworkPolicySpec) String() string { return proto.CompactTextString(m) } func (*NetworkPolicySpec) ProtoMessage() {} -func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } +func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } -func (m *NetworkPolicySpec) GetPodSelector() *k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector { +func (m *NetworkPolicySpec) GetPodSelector() *k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector { if m != nil { return m.PodSelector } @@ -1516,13 +1770,27 @@ func (m *NetworkPolicySpec) GetIngress() []*NetworkPolicyIngressRule { return nil } +func (m *NetworkPolicySpec) GetEgress() []*NetworkPolicyEgressRule { + if m != nil { + return m.Egress + } + return nil +} + +func (m *NetworkPolicySpec) GetPolicyTypes() []string { + if m != nil { + return m.PolicyTypes + } + return nil +} + // Pod Security Policy governs the ability to make requests that affect the Security Context // that will be applied to a pod and container. type PodSecurityPolicy struct { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // spec defines the policy enforced. // +optional Spec *PodSecurityPolicySpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` @@ -1532,9 +1800,9 @@ type PodSecurityPolicy struct { func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } func (m *PodSecurityPolicy) String() string { return proto.CompactTextString(m) } func (*PodSecurityPolicy) ProtoMessage() {} -func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } +func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } -func (m *PodSecurityPolicy) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *PodSecurityPolicy) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -1551,9 +1819,9 @@ func (m *PodSecurityPolicy) GetSpec() *PodSecurityPolicySpec { // Pod Security Policy List is a list of PodSecurityPolicy objects. type PodSecurityPolicyList struct { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Items is a list of schema objects. Items []*PodSecurityPolicy `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -1562,9 +1830,9 @@ type PodSecurityPolicyList struct { func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } func (m *PodSecurityPolicyList) String() string { return proto.CompactTextString(m) } func (*PodSecurityPolicyList) ProtoMessage() {} -func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } +func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } -func (m *PodSecurityPolicyList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *PodSecurityPolicyList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -1584,8 +1852,9 @@ type PodSecurityPolicySpec struct { // +optional Privileged *bool `protobuf:"varint,1,opt,name=privileged" json:"privileged,omitempty"` // DefaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capabiility in both - // DefaultAddCapabilities and RequiredDropCapabilities. + // unless the pod spec specifically drops the capability. You may not list a capability in both + // DefaultAddCapabilities and RequiredDropCapabilities. Capabilities added here are implicitly + // allowed, and need not be included in the AllowedCapabilities list. // +optional DefaultAddCapabilities []string `protobuf:"bytes,2,rep,name=defaultAddCapabilities" json:"defaultAddCapabilities,omitempty"` // RequiredDropCapabilities are the capabilities that will be dropped from the container. These @@ -1627,14 +1896,30 @@ type PodSecurityPolicySpec struct { // If set to false the container may run with a read only root file system if it wishes but it // will not be forced to. // +optional - ReadOnlyRootFilesystem *bool `protobuf:"varint,14,opt,name=readOnlyRootFilesystem" json:"readOnlyRootFilesystem,omitempty"` - XXX_unrecognized []byte `json:"-"` + ReadOnlyRootFilesystem *bool `protobuf:"varint,14,opt,name=readOnlyRootFilesystem" json:"readOnlyRootFilesystem,omitempty"` + // DefaultAllowPrivilegeEscalation controls the default setting for whether a + // process can gain more privileges than its parent process. + // +optional + DefaultAllowPrivilegeEscalation *bool `protobuf:"varint,15,opt,name=defaultAllowPrivilegeEscalation" json:"defaultAllowPrivilegeEscalation,omitempty"` + // AllowPrivilegeEscalation determines if a pod can request to allow + // privilege escalation. If unspecified, defaults to true. + // +optional + AllowPrivilegeEscalation *bool `protobuf:"varint,16,opt,name=allowPrivilegeEscalation" json:"allowPrivilegeEscalation,omitempty"` + // is a white list of allowed host paths. Empty indicates that all host paths may be used. + // +optional + AllowedHostPaths []*AllowedHostPath `protobuf:"bytes,17,rep,name=allowedHostPaths" json:"allowedHostPaths,omitempty"` + // AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all + // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes + // is allowed in the "Volumes" field. + // +optional + AllowedFlexVolumes []*AllowedFlexVolume `protobuf:"bytes,18,rep,name=allowedFlexVolumes" json:"allowedFlexVolumes,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } func (m *PodSecurityPolicySpec) String() string { return proto.CompactTextString(m) } func (*PodSecurityPolicySpec) ProtoMessage() {} -func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } +func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } func (m *PodSecurityPolicySpec) GetPrivileged() bool { if m != nil && m.Privileged != nil { @@ -1734,22 +2019,52 @@ func (m *PodSecurityPolicySpec) GetReadOnlyRootFilesystem() bool { return false } -// ReplicaSet represents the configuration of a ReplicaSet. +func (m *PodSecurityPolicySpec) GetDefaultAllowPrivilegeEscalation() bool { + if m != nil && m.DefaultAllowPrivilegeEscalation != nil { + return *m.DefaultAllowPrivilegeEscalation + } + return false +} + +func (m *PodSecurityPolicySpec) GetAllowPrivilegeEscalation() bool { + if m != nil && m.AllowPrivilegeEscalation != nil { + return *m.AllowPrivilegeEscalation + } + return false +} + +func (m *PodSecurityPolicySpec) GetAllowedHostPaths() []*AllowedHostPath { + if m != nil { + return m.AllowedHostPaths + } + return nil +} + +func (m *PodSecurityPolicySpec) GetAllowedFlexVolumes() []*AllowedFlexVolume { + if m != nil { + return m.AllowedFlexVolumes + } + return nil +} + +// DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for +// more information. +// ReplicaSet ensures that a specified number of pod replicas are running at any given time. type ReplicaSet struct { // If the Labels of a ReplicaSet are empty, they are defaulted to // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec *ReplicaSetSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` // Status is the most recently observed status of the ReplicaSet. // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status *ReplicaSetStatus `protobuf:"bytes,3,opt,name=status" json:"status,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -1758,9 +2073,9 @@ type ReplicaSet struct { func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } func (m *ReplicaSet) String() string { return proto.CompactTextString(m) } func (*ReplicaSet) ProtoMessage() {} -func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } +func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } -func (m *ReplicaSet) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *ReplicaSet) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -1789,7 +2104,7 @@ type ReplicaSetCondition struct { Status *string `protobuf:"bytes,2,opt,name=status" json:"status,omitempty"` // The last time the condition transitioned from one status to another. // +optional - LastTransitionTime *k8s_io_kubernetes_pkg_apis_meta_v1.Time `protobuf:"bytes,3,opt,name=lastTransitionTime" json:"lastTransitionTime,omitempty"` + LastTransitionTime *k8s_io_apimachinery_pkg_apis_meta_v1.Time `protobuf:"bytes,3,opt,name=lastTransitionTime" json:"lastTransitionTime,omitempty"` // The reason for the condition's last transition. // +optional Reason *string `protobuf:"bytes,4,opt,name=reason" json:"reason,omitempty"` @@ -1802,7 +2117,7 @@ type ReplicaSetCondition struct { func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } func (m *ReplicaSetCondition) String() string { return proto.CompactTextString(m) } func (*ReplicaSetCondition) ProtoMessage() {} -func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } func (m *ReplicaSetCondition) GetType() string { if m != nil && m.Type != nil { @@ -1818,7 +2133,7 @@ func (m *ReplicaSetCondition) GetStatus() string { return "" } -func (m *ReplicaSetCondition) GetLastTransitionTime() *k8s_io_kubernetes_pkg_apis_meta_v1.Time { +func (m *ReplicaSetCondition) GetLastTransitionTime() *k8s_io_apimachinery_pkg_apis_meta_v1.Time { if m != nil { return m.LastTransitionTime } @@ -1842,11 +2157,11 @@ func (m *ReplicaSetCondition) GetMessage() string { // ReplicaSetList is a collection of ReplicaSets. type ReplicaSetList struct { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // List of ReplicaSets. - // More info: http://kubernetes.io/docs/user-guide/replication-controller + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller Items []*ReplicaSet `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -1854,9 +2169,9 @@ type ReplicaSetList struct { func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } func (m *ReplicaSetList) String() string { return proto.CompactTextString(m) } func (*ReplicaSetList) ProtoMessage() {} -func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } +func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } -func (m *ReplicaSetList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *ReplicaSetList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -1875,7 +2190,7 @@ type ReplicaSetSpec struct { // Replicas is the number of desired replicas. // This is a pointer to distinguish between explicit zero and unspecified. // Defaults to 1. - // More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller // +optional Replicas *int32 `protobuf:"varint,1,opt,name=replicas" json:"replicas,omitempty"` // Minimum number of seconds for which a newly created pod should be ready @@ -1886,21 +2201,21 @@ type ReplicaSetSpec struct { // Selector is a label query over pods that should match the replica count. // If the selector is empty, it is defaulted to the labels present on the pod template. // Label keys and values that must match in order to be controlled by this replica set. - // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional - Selector *k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector `protobuf:"bytes,2,opt,name=selector" json:"selector,omitempty"` + Selector *k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector `protobuf:"bytes,2,opt,name=selector" json:"selector,omitempty"` // Template is the object that describes the pod that will be created if // insufficient replicas are detected. - // More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template // +optional - Template *k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec `protobuf:"bytes,3,opt,name=template" json:"template,omitempty"` - XXX_unrecognized []byte `json:"-"` + Template *k8s_io_api_core_v1.PodTemplateSpec `protobuf:"bytes,3,opt,name=template" json:"template,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } func (m *ReplicaSetSpec) String() string { return proto.CompactTextString(m) } func (*ReplicaSetSpec) ProtoMessage() {} -func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } func (m *ReplicaSetSpec) GetReplicas() int32 { if m != nil && m.Replicas != nil { @@ -1916,14 +2231,14 @@ func (m *ReplicaSetSpec) GetMinReadySeconds() int32 { return 0 } -func (m *ReplicaSetSpec) GetSelector() *k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector { +func (m *ReplicaSetSpec) GetSelector() *k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector { if m != nil { return m.Selector } return nil } -func (m *ReplicaSetSpec) GetTemplate() *k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec { +func (m *ReplicaSetSpec) GetTemplate() *k8s_io_api_core_v1.PodTemplateSpec { if m != nil { return m.Template } @@ -1933,7 +2248,7 @@ func (m *ReplicaSetSpec) GetTemplate() *k8s_io_kubernetes_pkg_api_v1.PodTemplate // ReplicaSetStatus represents the current status of a ReplicaSet. type ReplicaSetStatus struct { // Replicas is the most recently oberved number of replicas. - // More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller Replicas *int32 `protobuf:"varint,1,opt,name=replicas" json:"replicas,omitempty"` // The number of pods that have labels matching the labels of the pod template of the replicaset. // +optional @@ -1949,6 +2264,8 @@ type ReplicaSetStatus struct { ObservedGeneration *int64 `protobuf:"varint,3,opt,name=observedGeneration" json:"observedGeneration,omitempty"` // Represents the latest available observations of a replica set's current state. // +optional + // +patchMergeKey=type + // +patchStrategy=merge Conditions []*ReplicaSetCondition `protobuf:"bytes,6,rep,name=conditions" json:"conditions,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -1956,7 +2273,7 @@ type ReplicaSetStatus struct { func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } func (m *ReplicaSetStatus) String() string { return proto.CompactTextString(m) } func (*ReplicaSetStatus) ProtoMessage() {} -func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } func (m *ReplicaSetStatus) GetReplicas() int32 { if m != nil && m.Replicas != nil { @@ -2009,11 +2326,12 @@ func (m *ReplicationControllerDummy) Reset() { *m = ReplicationControlle func (m *ReplicationControllerDummy) String() string { return proto.CompactTextString(m) } func (*ReplicationControllerDummy) ProtoMessage() {} func (*ReplicationControllerDummy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{44} + return fileDescriptorGenerated, []int{48} } +// DEPRECATED. type RollbackConfig struct { - // The revision to rollback to. If set to 0, rollbck to the last revision. + // The revision to rollback to. If set to 0, rollback to the last revision. // +optional Revision *int64 `protobuf:"varint,1,opt,name=revision" json:"revision,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -2022,7 +2340,7 @@ type RollbackConfig struct { func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } func (m *RollbackConfig) String() string { return proto.CompactTextString(m) } func (*RollbackConfig) ProtoMessage() {} -func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } +func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } func (m *RollbackConfig) GetRevision() int64 { if m != nil && m.Revision != nil { @@ -2048,16 +2366,16 @@ type RollingUpdateDaemonSet struct { // that at least 70% of original number of DaemonSet pods are available at // all times during the update. // +optional - MaxUnavailable *k8s_io_kubernetes_pkg_util_intstr.IntOrString `protobuf:"bytes,1,opt,name=maxUnavailable" json:"maxUnavailable,omitempty"` - XXX_unrecognized []byte `json:"-"` + MaxUnavailable *k8s_io_apimachinery_pkg_util_intstr.IntOrString `protobuf:"bytes,1,opt,name=maxUnavailable" json:"maxUnavailable,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } func (m *RollingUpdateDaemonSet) String() string { return proto.CompactTextString(m) } func (*RollingUpdateDaemonSet) ProtoMessage() {} -func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } -func (m *RollingUpdateDaemonSet) GetMaxUnavailable() *k8s_io_kubernetes_pkg_util_intstr.IntOrString { +func (m *RollingUpdateDaemonSet) GetMaxUnavailable() *k8s_io_apimachinery_pkg_util_intstr.IntOrString { if m != nil { return m.MaxUnavailable } @@ -2077,7 +2395,7 @@ type RollingUpdateDeployment struct { // that the total number of pods available at all times during the update is at // least 70% of desired pods. // +optional - MaxUnavailable *k8s_io_kubernetes_pkg_util_intstr.IntOrString `protobuf:"bytes,1,opt,name=maxUnavailable" json:"maxUnavailable,omitempty"` + MaxUnavailable *k8s_io_apimachinery_pkg_util_intstr.IntOrString `protobuf:"bytes,1,opt,name=maxUnavailable" json:"maxUnavailable,omitempty"` // The maximum number of pods that can be scheduled above the desired number of // pods. // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). @@ -2090,25 +2408,25 @@ type RollingUpdateDeployment struct { // new RC can be scaled up further, ensuring that total number of pods running // at any time during the update is atmost 130% of desired pods. // +optional - MaxSurge *k8s_io_kubernetes_pkg_util_intstr.IntOrString `protobuf:"bytes,2,opt,name=maxSurge" json:"maxSurge,omitempty"` - XXX_unrecognized []byte `json:"-"` + MaxSurge *k8s_io_apimachinery_pkg_util_intstr.IntOrString `protobuf:"bytes,2,opt,name=maxSurge" json:"maxSurge,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (m *RollingUpdateDeployment) String() string { return proto.CompactTextString(m) } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{47} + return fileDescriptorGenerated, []int{51} } -func (m *RollingUpdateDeployment) GetMaxUnavailable() *k8s_io_kubernetes_pkg_util_intstr.IntOrString { +func (m *RollingUpdateDeployment) GetMaxUnavailable() *k8s_io_apimachinery_pkg_util_intstr.IntOrString { if m != nil { return m.MaxUnavailable } return nil } -func (m *RollingUpdateDeployment) GetMaxSurge() *k8s_io_kubernetes_pkg_util_intstr.IntOrString { +func (m *RollingUpdateDeployment) GetMaxSurge() *k8s_io_apimachinery_pkg_util_intstr.IntOrString { if m != nil { return m.MaxSurge } @@ -2129,7 +2447,7 @@ func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptio func (m *RunAsUserStrategyOptions) String() string { return proto.CompactTextString(m) } func (*RunAsUserStrategyOptions) ProtoMessage() {} func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{48} + return fileDescriptorGenerated, []int{52} } func (m *RunAsUserStrategyOptions) GetRule() string { @@ -2151,16 +2469,16 @@ type SELinuxStrategyOptions struct { // type is the strategy that will dictate the allowable labels that may be set. Rule *string `protobuf:"bytes,1,opt,name=rule" json:"rule,omitempty"` // seLinuxOptions required to run as; required for MustRunAs - // More info: http://releases.k8s.io/HEAD/docs/design/security_context.md#security-context + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ // +optional - SeLinuxOptions *k8s_io_kubernetes_pkg_api_v1.SELinuxOptions `protobuf:"bytes,2,opt,name=seLinuxOptions" json:"seLinuxOptions,omitempty"` - XXX_unrecognized []byte `json:"-"` + SeLinuxOptions *k8s_io_api_core_v1.SELinuxOptions `protobuf:"bytes,2,opt,name=seLinuxOptions" json:"seLinuxOptions,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } func (m *SELinuxStrategyOptions) String() string { return proto.CompactTextString(m) } func (*SELinuxStrategyOptions) ProtoMessage() {} -func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } +func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } func (m *SELinuxStrategyOptions) GetRule() string { if m != nil && m.Rule != nil { @@ -2169,7 +2487,7 @@ func (m *SELinuxStrategyOptions) GetRule() string { return "" } -func (m *SELinuxStrategyOptions) GetSeLinuxOptions() *k8s_io_kubernetes_pkg_api_v1.SELinuxOptions { +func (m *SELinuxStrategyOptions) GetSeLinuxOptions() *k8s_io_api_core_v1.SELinuxOptions { if m != nil { return m.SeLinuxOptions } @@ -2178,13 +2496,13 @@ func (m *SELinuxStrategyOptions) GetSeLinuxOptions() *k8s_io_kubernetes_pkg_api_ // represents a scaling request for a resource. type Scale struct { - // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` - // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. // +optional Spec *ScaleSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` - // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. // +optional Status *ScaleStatus `protobuf:"bytes,3,opt,name=status" json:"status,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -2193,9 +2511,9 @@ type Scale struct { func (m *Scale) Reset() { *m = Scale{} } func (m *Scale) String() string { return proto.CompactTextString(m) } func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } +func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } -func (m *Scale) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *Scale) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -2227,7 +2545,7 @@ type ScaleSpec struct { func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } func (m *ScaleSpec) String() string { return proto.CompactTextString(m) } func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{51} } +func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } func (m *ScaleSpec) GetReplicas() int32 { if m != nil && m.Replicas != nil { @@ -2248,7 +2566,7 @@ type ScaleStatus struct { // avoid introspection in the clients. The string will be in the same format as the // query-param syntax. If the target type only supports map-based selectors, both this // field and map-based selector field are populated. - // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional TargetSelector *string `protobuf:"bytes,3,opt,name=targetSelector" json:"targetSelector,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -2257,7 +2575,7 @@ type ScaleStatus struct { func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (m *ScaleStatus) String() string { return proto.CompactTextString(m) } func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } +func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} } func (m *ScaleStatus) GetReplicas() int32 { if m != nil && m.Replicas != nil { @@ -2296,7 +2614,7 @@ func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalG func (m *SupplementalGroupsStrategyOptions) String() string { return proto.CompactTextString(m) } func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{53} + return fileDescriptorGenerated, []int{57} } func (m *SupplementalGroupsStrategyOptions) GetRule() string { @@ -2313,199 +2631,67 @@ func (m *SupplementalGroupsStrategyOptions) GetRanges() []*IDRange { return nil } -// A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource -// types to the API. It consists of one or more Versions of the api. -type ThirdPartyResource struct { - // Standard object metadata - // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` - // Description is the description of this object. - // +optional - Description *string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` - // Versions are versions for this third party object - // +optional - Versions []*APIVersion `protobuf:"bytes,3,rep,name=versions" json:"versions,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ThirdPartyResource) Reset() { *m = ThirdPartyResource{} } -func (m *ThirdPartyResource) String() string { return proto.CompactTextString(m) } -func (*ThirdPartyResource) ProtoMessage() {} -func (*ThirdPartyResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } - -func (m *ThirdPartyResource) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { - if m != nil { - return m.Metadata - } - return nil -} - -func (m *ThirdPartyResource) GetDescription() string { - if m != nil && m.Description != nil { - return *m.Description - } - return "" -} - -func (m *ThirdPartyResource) GetVersions() []*APIVersion { - if m != nil { - return m.Versions - } - return nil -} - -// An internal object, used for versioned storage in etcd. Not exposed to the end user. -type ThirdPartyResourceData struct { - // Standard object metadata. - // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` - // Data is the raw JSON data for this data. - // +optional - Data []byte `protobuf:"bytes,2,opt,name=data" json:"data,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ThirdPartyResourceData) Reset() { *m = ThirdPartyResourceData{} } -func (m *ThirdPartyResourceData) String() string { return proto.CompactTextString(m) } -func (*ThirdPartyResourceData) ProtoMessage() {} -func (*ThirdPartyResourceData) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } - -func (m *ThirdPartyResourceData) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { - if m != nil { - return m.Metadata - } - return nil -} - -func (m *ThirdPartyResourceData) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -// ThirdPartyResrouceDataList is a list of ThirdPartyResourceData. -type ThirdPartyResourceDataList struct { - // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` - // Items is the list of ThirdpartyResourceData. - Items []*ThirdPartyResourceData `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ThirdPartyResourceDataList) Reset() { *m = ThirdPartyResourceDataList{} } -func (m *ThirdPartyResourceDataList) String() string { return proto.CompactTextString(m) } -func (*ThirdPartyResourceDataList) ProtoMessage() {} -func (*ThirdPartyResourceDataList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{56} -} - -func (m *ThirdPartyResourceDataList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { - if m != nil { - return m.Metadata - } - return nil -} - -func (m *ThirdPartyResourceDataList) GetItems() []*ThirdPartyResourceData { - if m != nil { - return m.Items - } - return nil -} - -// ThirdPartyResourceList is a list of ThirdPartyResources. -type ThirdPartyResourceList struct { - // Standard list metadata. - // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` - // Items is the list of ThirdPartyResources. - Items []*ThirdPartyResource `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ThirdPartyResourceList) Reset() { *m = ThirdPartyResourceList{} } -func (m *ThirdPartyResourceList) String() string { return proto.CompactTextString(m) } -func (*ThirdPartyResourceList) ProtoMessage() {} -func (*ThirdPartyResourceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} } - -func (m *ThirdPartyResourceList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { - if m != nil { - return m.Metadata - } - return nil -} - -func (m *ThirdPartyResourceList) GetItems() []*ThirdPartyResource { - if m != nil { - return m.Items - } - return nil -} - func init() { - proto.RegisterType((*APIVersion)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.APIVersion") - proto.RegisterType((*CustomMetricCurrentStatus)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.CustomMetricCurrentStatus") - proto.RegisterType((*CustomMetricCurrentStatusList)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.CustomMetricCurrentStatusList") - proto.RegisterType((*CustomMetricTarget)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.CustomMetricTarget") - proto.RegisterType((*CustomMetricTargetList)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.CustomMetricTargetList") - proto.RegisterType((*DaemonSet)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.DaemonSet") - proto.RegisterType((*DaemonSetList)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.DaemonSetList") - proto.RegisterType((*DaemonSetSpec)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.DaemonSetSpec") - proto.RegisterType((*DaemonSetStatus)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.DaemonSetStatus") - proto.RegisterType((*DaemonSetUpdateStrategy)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.DaemonSetUpdateStrategy") - proto.RegisterType((*Deployment)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.Deployment") - proto.RegisterType((*DeploymentCondition)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.DeploymentCondition") - proto.RegisterType((*DeploymentList)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.DeploymentList") - proto.RegisterType((*DeploymentRollback)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.DeploymentRollback") - proto.RegisterType((*DeploymentSpec)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.DeploymentSpec") - proto.RegisterType((*DeploymentStatus)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.DeploymentStatus") - proto.RegisterType((*DeploymentStrategy)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.DeploymentStrategy") - proto.RegisterType((*FSGroupStrategyOptions)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.FSGroupStrategyOptions") - proto.RegisterType((*HTTPIngressPath)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.HTTPIngressPath") - proto.RegisterType((*HTTPIngressRuleValue)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.HTTPIngressRuleValue") - proto.RegisterType((*HostPortRange)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.HostPortRange") - proto.RegisterType((*IDRange)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.IDRange") - proto.RegisterType((*Ingress)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.Ingress") - proto.RegisterType((*IngressBackend)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.IngressBackend") - proto.RegisterType((*IngressList)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.IngressList") - proto.RegisterType((*IngressRule)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.IngressRule") - proto.RegisterType((*IngressRuleValue)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.IngressRuleValue") - proto.RegisterType((*IngressSpec)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.IngressSpec") - proto.RegisterType((*IngressStatus)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.IngressStatus") - proto.RegisterType((*IngressTLS)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.IngressTLS") - proto.RegisterType((*NetworkPolicy)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.NetworkPolicy") - proto.RegisterType((*NetworkPolicyIngressRule)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.NetworkPolicyIngressRule") - proto.RegisterType((*NetworkPolicyList)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.NetworkPolicyList") - proto.RegisterType((*NetworkPolicyPeer)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.NetworkPolicyPeer") - proto.RegisterType((*NetworkPolicyPort)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.NetworkPolicyPort") - proto.RegisterType((*NetworkPolicySpec)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.NetworkPolicySpec") - proto.RegisterType((*PodSecurityPolicy)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.PodSecurityPolicy") - proto.RegisterType((*PodSecurityPolicyList)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.PodSecurityPolicyList") - proto.RegisterType((*PodSecurityPolicySpec)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.PodSecurityPolicySpec") - proto.RegisterType((*ReplicaSet)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.ReplicaSet") - proto.RegisterType((*ReplicaSetCondition)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.ReplicaSetCondition") - proto.RegisterType((*ReplicaSetList)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.ReplicaSetList") - proto.RegisterType((*ReplicaSetSpec)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.ReplicaSetSpec") - proto.RegisterType((*ReplicaSetStatus)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.ReplicaSetStatus") - proto.RegisterType((*ReplicationControllerDummy)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.ReplicationControllerDummy") - proto.RegisterType((*RollbackConfig)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.RollbackConfig") - proto.RegisterType((*RollingUpdateDaemonSet)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.RollingUpdateDaemonSet") - proto.RegisterType((*RollingUpdateDeployment)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.RollingUpdateDeployment") - proto.RegisterType((*RunAsUserStrategyOptions)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.RunAsUserStrategyOptions") - proto.RegisterType((*SELinuxStrategyOptions)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.SELinuxStrategyOptions") - proto.RegisterType((*Scale)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.Scale") - proto.RegisterType((*ScaleSpec)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.ScaleSpec") - proto.RegisterType((*ScaleStatus)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.ScaleStatus") - proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.SupplementalGroupsStrategyOptions") - proto.RegisterType((*ThirdPartyResource)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.ThirdPartyResource") - proto.RegisterType((*ThirdPartyResourceData)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.ThirdPartyResourceData") - proto.RegisterType((*ThirdPartyResourceDataList)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.ThirdPartyResourceDataList") - proto.RegisterType((*ThirdPartyResourceList)(nil), "github.com/ericchiang.k8s.apis.extensions.v1beta1.ThirdPartyResourceList") -} -func (m *APIVersion) Marshal() (dAtA []byte, err error) { + proto.RegisterType((*AllowedFlexVolume)(nil), "k8s.io.api.extensions.v1beta1.AllowedFlexVolume") + proto.RegisterType((*AllowedHostPath)(nil), "k8s.io.api.extensions.v1beta1.AllowedHostPath") + proto.RegisterType((*CustomMetricCurrentStatus)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricCurrentStatus") + proto.RegisterType((*CustomMetricCurrentStatusList)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricCurrentStatusList") + proto.RegisterType((*CustomMetricTarget)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricTarget") + proto.RegisterType((*CustomMetricTargetList)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricTargetList") + proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.extensions.v1beta1.DaemonSet") + proto.RegisterType((*DaemonSetCondition)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetCondition") + proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetList") + proto.RegisterType((*DaemonSetSpec)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetSpec") + proto.RegisterType((*DaemonSetStatus)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetStatus") + proto.RegisterType((*DaemonSetUpdateStrategy)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetUpdateStrategy") + proto.RegisterType((*Deployment)(nil), "k8s.io.api.extensions.v1beta1.Deployment") + proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.api.extensions.v1beta1.DeploymentCondition") + proto.RegisterType((*DeploymentList)(nil), "k8s.io.api.extensions.v1beta1.DeploymentList") + proto.RegisterType((*DeploymentRollback)(nil), "k8s.io.api.extensions.v1beta1.DeploymentRollback") + proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.extensions.v1beta1.DeploymentSpec") + proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.extensions.v1beta1.DeploymentStatus") + proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.extensions.v1beta1.DeploymentStrategy") + proto.RegisterType((*FSGroupStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.FSGroupStrategyOptions") + proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.api.extensions.v1beta1.HTTPIngressPath") + proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.api.extensions.v1beta1.HTTPIngressRuleValue") + proto.RegisterType((*HostPortRange)(nil), "k8s.io.api.extensions.v1beta1.HostPortRange") + proto.RegisterType((*IDRange)(nil), "k8s.io.api.extensions.v1beta1.IDRange") + proto.RegisterType((*IPBlock)(nil), "k8s.io.api.extensions.v1beta1.IPBlock") + proto.RegisterType((*Ingress)(nil), "k8s.io.api.extensions.v1beta1.Ingress") + proto.RegisterType((*IngressBackend)(nil), "k8s.io.api.extensions.v1beta1.IngressBackend") + proto.RegisterType((*IngressList)(nil), "k8s.io.api.extensions.v1beta1.IngressList") + proto.RegisterType((*IngressRule)(nil), "k8s.io.api.extensions.v1beta1.IngressRule") + proto.RegisterType((*IngressRuleValue)(nil), "k8s.io.api.extensions.v1beta1.IngressRuleValue") + proto.RegisterType((*IngressSpec)(nil), "k8s.io.api.extensions.v1beta1.IngressSpec") + proto.RegisterType((*IngressStatus)(nil), "k8s.io.api.extensions.v1beta1.IngressStatus") + proto.RegisterType((*IngressTLS)(nil), "k8s.io.api.extensions.v1beta1.IngressTLS") + proto.RegisterType((*NetworkPolicy)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicy") + proto.RegisterType((*NetworkPolicyEgressRule)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyEgressRule") + proto.RegisterType((*NetworkPolicyIngressRule)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyIngressRule") + proto.RegisterType((*NetworkPolicyList)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyList") + proto.RegisterType((*NetworkPolicyPeer)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyPeer") + proto.RegisterType((*NetworkPolicyPort)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyPort") + proto.RegisterType((*NetworkPolicySpec)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicySpec") + proto.RegisterType((*PodSecurityPolicy)(nil), "k8s.io.api.extensions.v1beta1.PodSecurityPolicy") + proto.RegisterType((*PodSecurityPolicyList)(nil), "k8s.io.api.extensions.v1beta1.PodSecurityPolicyList") + proto.RegisterType((*PodSecurityPolicySpec)(nil), "k8s.io.api.extensions.v1beta1.PodSecurityPolicySpec") + proto.RegisterType((*ReplicaSet)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSet") + proto.RegisterType((*ReplicaSetCondition)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetCondition") + proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetList") + proto.RegisterType((*ReplicaSetSpec)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetSpec") + proto.RegisterType((*ReplicaSetStatus)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetStatus") + proto.RegisterType((*ReplicationControllerDummy)(nil), "k8s.io.api.extensions.v1beta1.ReplicationControllerDummy") + proto.RegisterType((*RollbackConfig)(nil), "k8s.io.api.extensions.v1beta1.RollbackConfig") + proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDaemonSet") + proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDeployment") + proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RunAsUserStrategyOptions") + proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SELinuxStrategyOptions") + proto.RegisterType((*Scale)(nil), "k8s.io.api.extensions.v1beta1.Scale") + proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.extensions.v1beta1.ScaleSpec") + proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus") + proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SupplementalGroupsStrategyOptions") +} +func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -2515,16 +2701,16 @@ func (m *APIVersion) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *APIVersion) MarshalTo(dAtA []byte) (int, error) { +func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - if m.Name != nil { + if m.Driver != nil { dAtA[i] = 0xa i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name))) - i += copy(dAtA[i:], *m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Driver))) + i += copy(dAtA[i:], *m.Driver) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -2532,7 +2718,7 @@ func (m *APIVersion) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *CustomMetricCurrentStatus) Marshal() (dAtA []byte, err error) { +func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -2542,15 +2728,42 @@ func (m *CustomMetricCurrentStatus) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *CustomMetricCurrentStatus) MarshalTo(dAtA []byte) (int, error) { +func (m *AllowedHostPath) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - if m.Name != nil { + if m.PathPrefix != nil { dAtA[i] = 0xa i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name))) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PathPrefix))) + i += copy(dAtA[i:], *m.PathPrefix) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *CustomMetricCurrentStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomMetricCurrentStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Name != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name))) i += copy(dAtA[i:], *m.Name) } if m.Value != nil { @@ -2723,6 +2936,61 @@ func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Type != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type))) + i += copy(dAtA[i:], *m.Type) + } + if m.Status != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Status))) + i += copy(dAtA[i:], *m.Status) + } + if m.LastTransitionTime != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n6, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if m.Reason != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason))) + i += copy(dAtA[i:], *m.Reason) + } + if m.Message != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Message))) + i += copy(dAtA[i:], *m.Message) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2742,11 +3010,11 @@ func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n6, err := m.Metadata.MarshalTo(dAtA[i:]) + n7, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n7 } if len(m.Items) > 0 { for _, msg := range m.Items { @@ -2785,31 +3053,31 @@ func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n7, err := m.Selector.MarshalTo(dAtA[i:]) + n8, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n8 } if m.Template != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n8, err := m.Template.MarshalTo(dAtA[i:]) + n9, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n8 + i += n9 } if m.UpdateStrategy != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n9, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + n10, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n10 } if m.MinReadySeconds != nil { dAtA[i] = 0x20 @@ -2821,6 +3089,11 @@ func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.TemplateGeneration)) } + if m.RevisionHistoryLimit != nil { + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -2882,6 +3155,23 @@ func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.NumberUnavailable)) } + if m.CollisionCount != nil { + dAtA[i] = 0x48 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + } + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -2913,11 +3203,11 @@ func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n10, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + n11, err := m.RollingUpdate.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n11 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -2944,31 +3234,31 @@ func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n11, err := m.Metadata.MarshalTo(dAtA[i:]) + n12, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n11 + i += n12 } if m.Spec != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n12, err := m.Spec.MarshalTo(dAtA[i:]) + n13, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n12 + i += n13 } if m.Status != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n13, err := m.Status.MarshalTo(dAtA[i:]) + n14, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n13 + i += n14 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -3019,21 +3309,21 @@ func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n14, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) + n15, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n14 + i += n15 } if m.LastTransitionTime != nil { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n15, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n16, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n15 + i += n16 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -3060,11 +3350,11 @@ func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n16, err := m.Metadata.MarshalTo(dAtA[i:]) + n17, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n16 + i += n17 } if len(m.Items) > 0 { for _, msg := range m.Items { @@ -3126,11 +3416,11 @@ func (m *DeploymentRollback) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) - n17, err := m.RollbackTo.MarshalTo(dAtA[i:]) + n18, err := m.RollbackTo.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n17 + i += n18 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -3162,31 +3452,31 @@ func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n18, err := m.Selector.MarshalTo(dAtA[i:]) + n19, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n18 + i += n19 } if m.Template != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n19, err := m.Template.MarshalTo(dAtA[i:]) + n20, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n19 + i += n20 } if m.Strategy != nil { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) - n20, err := m.Strategy.MarshalTo(dAtA[i:]) + n21, err := m.Strategy.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n20 + i += n21 } if m.MinReadySeconds != nil { dAtA[i] = 0x28 @@ -3212,11 +3502,11 @@ func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x42 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) - n21, err := m.RollbackTo.MarshalTo(dAtA[i:]) + n22, err := m.RollbackTo.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n21 + i += n22 } if m.ProgressDeadlineSeconds != nil { dAtA[i] = 0x48 @@ -3286,6 +3576,11 @@ func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.ReadyReplicas)) } + if m.CollisionCount != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -3317,11 +3612,11 @@ func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n22, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + n23, err := m.RollingUpdate.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n22 + i += n23 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -3393,11 +3688,11 @@ func (m *HTTPIngressPath) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) - n23, err := m.Backend.MarshalTo(dAtA[i:]) + n24, err := m.Backend.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n23 + i += n24 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -3500,6 +3795,48 @@ func (m *IDRange) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *IPBlock) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IPBlock) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Cidr != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Cidr))) + i += copy(dAtA[i:], *m.Cidr) + } + if len(m.Except) > 0 { + for _, s := range m.Except { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + func (m *Ingress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -3519,31 +3856,31 @@ func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n24, err := m.Metadata.MarshalTo(dAtA[i:]) + n25, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n24 + i += n25 } if m.Spec != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n25, err := m.Spec.MarshalTo(dAtA[i:]) + n26, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n25 + i += n26 } if m.Status != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n26, err := m.Status.MarshalTo(dAtA[i:]) + n27, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n26 + i += n27 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -3576,11 +3913,11 @@ func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ServicePort.Size())) - n27, err := m.ServicePort.MarshalTo(dAtA[i:]) + n28, err := m.ServicePort.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n27 + i += n28 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -3607,11 +3944,11 @@ func (m *IngressList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n28, err := m.Metadata.MarshalTo(dAtA[i:]) + n29, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n28 + i += n29 } if len(m.Items) > 0 { for _, msg := range m.Items { @@ -3656,11 +3993,11 @@ func (m *IngressRule) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.IngressRuleValue.Size())) - n29, err := m.IngressRuleValue.MarshalTo(dAtA[i:]) + n30, err := m.IngressRuleValue.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n29 + i += n30 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -3687,11 +4024,11 @@ func (m *IngressRuleValue) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Http.Size())) - n30, err := m.Http.MarshalTo(dAtA[i:]) + n31, err := m.Http.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n30 + i += n31 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -3718,11 +4055,11 @@ func (m *IngressSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) - n31, err := m.Backend.MarshalTo(dAtA[i:]) + n32, err := m.Backend.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n31 + i += n32 } if len(m.Tls) > 0 { for _, msg := range m.Tls { @@ -3773,11 +4110,11 @@ func (m *IngressStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LoadBalancer.Size())) - n32, err := m.LoadBalancer.MarshalTo(dAtA[i:]) + n33, err := m.LoadBalancer.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n32 + i += n33 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -3846,21 +4183,66 @@ func (m *NetworkPolicy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n33, err := m.Metadata.MarshalTo(dAtA[i:]) + n34, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n33 + i += n34 } if m.Spec != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n34, err := m.Spec.MarshalTo(dAtA[i:]) + n35, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n34 + i += n35 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *NetworkPolicyEgressRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkPolicyEgressRule) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.To) > 0 { + for _, msg := range m.To { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -3932,11 +4314,11 @@ func (m *NetworkPolicyList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n35, err := m.Metadata.MarshalTo(dAtA[i:]) + n36, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n35 + i += n36 } if len(m.Items) > 0 { for _, msg := range m.Items { @@ -3975,21 +4357,31 @@ func (m *NetworkPolicyPeer) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) - n36, err := m.PodSelector.MarshalTo(dAtA[i:]) + n37, err := m.PodSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n36 + i += n37 } if m.NamespaceSelector != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) - n37, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) + n38, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n37 + i += n38 + } + if m.IpBlock != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.IpBlock.Size())) + n39, err := m.IpBlock.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -4022,11 +4414,11 @@ func (m *NetworkPolicyPort) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n38, err := m.Port.MarshalTo(dAtA[i:]) + n40, err := m.Port.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n38 + i += n40 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -4053,11 +4445,11 @@ func (m *NetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) - n39, err := m.PodSelector.MarshalTo(dAtA[i:]) + n41, err := m.PodSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n39 + i += n41 } if len(m.Ingress) > 0 { for _, msg := range m.Ingress { @@ -4071,11 +4463,38 @@ func (m *NetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) { i += n } } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} + if len(m.Egress) > 0 { + for _, msg := range m.Egress { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.PolicyTypes) > 0 { + for _, s := range m.PolicyTypes { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() @@ -4096,21 +4515,21 @@ func (m *PodSecurityPolicy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n40, err := m.Metadata.MarshalTo(dAtA[i:]) + n42, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n40 + i += n42 } if m.Spec != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n41, err := m.Spec.MarshalTo(dAtA[i:]) + n43, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n41 + i += n43 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -4137,11 +4556,11 @@ func (m *PodSecurityPolicyList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n42, err := m.Metadata.MarshalTo(dAtA[i:]) + n44, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n42 + i += n44 } if len(m.Items) > 0 { for _, msg := range m.Items { @@ -4292,41 +4711,41 @@ func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x52 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SeLinux.Size())) - n43, err := m.SeLinux.MarshalTo(dAtA[i:]) + n45, err := m.SeLinux.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n43 + i += n45 } if m.RunAsUser != nil { dAtA[i] = 0x5a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RunAsUser.Size())) - n44, err := m.RunAsUser.MarshalTo(dAtA[i:]) + n46, err := m.RunAsUser.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n44 + i += n46 } if m.SupplementalGroups != nil { dAtA[i] = 0x62 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SupplementalGroups.Size())) - n45, err := m.SupplementalGroups.MarshalTo(dAtA[i:]) + n47, err := m.SupplementalGroups.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n45 + i += n47 } if m.FsGroup != nil { dAtA[i] = 0x6a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FsGroup.Size())) - n46, err := m.FsGroup.MarshalTo(dAtA[i:]) + n48, err := m.FsGroup.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n46 + i += n48 } if m.ReadOnlyRootFilesystem != nil { dAtA[i] = 0x70 @@ -4338,6 +4757,56 @@ func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { } i++ } + if m.DefaultAllowPrivilegeEscalation != nil { + dAtA[i] = 0x78 + i++ + if *m.DefaultAllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.AllowPrivilegeEscalation != nil { + dAtA[i] = 0x80 + i++ + dAtA[i] = 0x1 + i++ + if *m.AllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.AllowedHostPaths) > 0 { + for _, msg := range m.AllowedHostPaths { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.AllowedFlexVolumes) > 0 { + for _, msg := range m.AllowedFlexVolumes { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -4363,31 +4832,31 @@ func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n47, err := m.Metadata.MarshalTo(dAtA[i:]) + n49, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n47 + i += n49 } if m.Spec != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n48, err := m.Spec.MarshalTo(dAtA[i:]) + n50, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n48 + i += n50 } if m.Status != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n49, err := m.Status.MarshalTo(dAtA[i:]) + n51, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n49 + i += n51 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -4426,11 +4895,11 @@ func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n50, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n52, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n50 + i += n52 } if m.Reason != nil { dAtA[i] = 0x22 @@ -4469,11 +4938,11 @@ func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n51, err := m.Metadata.MarshalTo(dAtA[i:]) + n53, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n51 + i += n53 } if len(m.Items) > 0 { for _, msg := range m.Items { @@ -4517,21 +4986,21 @@ func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n52, err := m.Selector.MarshalTo(dAtA[i:]) + n54, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n52 + i += n54 } if m.Template != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n53, err := m.Template.MarshalTo(dAtA[i:]) + n55, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n53 + i += n55 } if m.MinReadySeconds != nil { dAtA[i] = 0x20 @@ -4668,11 +5137,11 @@ func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n54, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + n56, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n54 + i += n56 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -4699,21 +5168,21 @@ func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n55, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + n57, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n55 + i += n57 } if m.MaxSurge != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) - n56, err := m.MaxSurge.MarshalTo(dAtA[i:]) + n58, err := m.MaxSurge.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n56 + i += n58 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -4785,11 +5254,11 @@ func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SeLinuxOptions.Size())) - n57, err := m.SeLinuxOptions.MarshalTo(dAtA[i:]) + n59, err := m.SeLinuxOptions.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n57 + i += n59 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -4816,31 +5285,31 @@ func (m *Scale) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n58, err := m.Metadata.MarshalTo(dAtA[i:]) + n60, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n58 + i += n60 } if m.Spec != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n59, err := m.Spec.MarshalTo(dAtA[i:]) + n61, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n59 + i += n61 } if m.Status != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n60, err := m.Status.MarshalTo(dAtA[i:]) + n62, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n60 + i += n62 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -4962,227 +5431,118 @@ func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) return i, nil } -func (m *ThirdPartyResource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ } - return dAtA[:n], nil + dAtA[offset] = uint8(v) + return offset + 1 } - -func (m *ThirdPartyResource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i +func (m *AllowedFlexVolume) Size() (n int) { var l int _ = l - if m.Metadata != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n61, err := m.Metadata.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n61 - } - if m.Description != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Description))) - i += copy(dAtA[i:], *m.Description) - } - if len(m.Versions) > 0 { - for _, msg := range m.Versions { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + if m.Driver != nil { + l = len(*m.Driver) + n += 1 + l + sovGenerated(uint64(l)) } if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) + n += len(m.XXX_unrecognized) } - return i, nil + return n } -func (m *ThirdPartyResourceData) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err +func (m *AllowedHostPath) Size() (n int) { + var l int + _ = l + if m.PathPrefix != nil { + l = len(*m.PathPrefix) + n += 1 + l + sovGenerated(uint64(l)) } - return dAtA[:n], nil + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n } -func (m *ThirdPartyResourceData) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i +func (m *CustomMetricCurrentStatus) Size() (n int) { var l int _ = l - if m.Metadata != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n62, err := m.Metadata.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n62 + if m.Name != nil { + l = len(*m.Name) + n += 1 + l + sovGenerated(uint64(l)) } - if m.Data != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Data))) - i += copy(dAtA[i:], m.Data) + if m.Value != nil { + l = m.Value.Size() + n += 1 + l + sovGenerated(uint64(l)) } if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) + n += len(m.XXX_unrecognized) } - return i, nil + return n } -func (m *ThirdPartyResourceDataList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err +func (m *CustomMetricCurrentStatusList) Size() (n int) { + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - return dAtA[:n], nil + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n } -func (m *ThirdPartyResourceDataList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i +func (m *CustomMetricTarget) Size() (n int) { var l int _ = l - if m.Metadata != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n63, err := m.Metadata.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n63 + if m.Name != nil { + l = len(*m.Name) + n += 1 + l + sovGenerated(uint64(l)) } - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + if m.Value != nil { + l = m.Value.Size() + n += 1 + l + sovGenerated(uint64(l)) } if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) + n += len(m.XXX_unrecognized) } - return i, nil + return n } -func (m *ThirdPartyResourceList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err +func (m *CustomMetricTargetList) Size() (n int) { + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - return dAtA[:n], nil + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n } -func (m *ThirdPartyResourceList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i +func (m *DaemonSet) Size() (n int) { var l int _ = l if m.Metadata != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n64, err := m.Metadata.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n64 - } - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *APIVersion) Size() (n int) { - var l int - _ = l - if m.Name != nil { - l = len(*m.Name) + l = m.Metadata.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CustomMetricCurrentStatus) Size() (n int) { - var l int - _ = l - if m.Name != nil { - l = len(*m.Name) + if m.Spec != nil { + l = m.Spec.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.Value != nil { - l = m.Value.Size() + if m.Status != nil { + l = m.Status.Size() n += 1 + l + sovGenerated(uint64(l)) } if m.XXX_unrecognized != nil { @@ -5191,66 +5551,27 @@ func (m *CustomMetricCurrentStatus) Size() (n int) { return n } -func (m *CustomMetricCurrentStatusList) Size() (n int) { - var l int - _ = l - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CustomMetricTarget) Size() (n int) { +func (m *DaemonSetCondition) Size() (n int) { var l int _ = l - if m.Name != nil { - l = len(*m.Name) + if m.Type != nil { + l = len(*m.Type) n += 1 + l + sovGenerated(uint64(l)) } - if m.Value != nil { - l = m.Value.Size() + if m.Status != nil { + l = len(*m.Status) n += 1 + l + sovGenerated(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CustomMetricTargetList) Size() (n int) { - var l int - _ = l - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DaemonSet) Size() (n int) { - var l int - _ = l - if m.Metadata != nil { - l = m.Metadata.Size() + if m.LastTransitionTime != nil { + l = m.LastTransitionTime.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.Spec != nil { - l = m.Spec.Size() + if m.Reason != nil { + l = len(*m.Reason) n += 1 + l + sovGenerated(uint64(l)) } - if m.Status != nil { - l = m.Status.Size() + if m.Message != nil { + l = len(*m.Message) n += 1 + l + sovGenerated(uint64(l)) } if m.XXX_unrecognized != nil { @@ -5299,6 +5620,9 @@ func (m *DaemonSetSpec) Size() (n int) { if m.TemplateGeneration != nil { n += 1 + sovGenerated(uint64(*m.TemplateGeneration)) } + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -5332,6 +5656,15 @@ func (m *DaemonSetStatus) Size() (n int) { if m.NumberUnavailable != nil { n += 1 + sovGenerated(uint64(*m.NumberUnavailable)) } + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -5520,6 +5853,9 @@ func (m *DeploymentStatus) Size() (n int) { if m.ReadyReplicas != nil { n += 1 + sovGenerated(uint64(*m.ReadyReplicas)) } + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -5624,6 +5960,25 @@ func (m *IDRange) Size() (n int) { return n } +func (m *IPBlock) Size() (n int) { + var l int + _ = l + if m.Cidr != nil { + l = len(*m.Cidr) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Except) > 0 { + for _, s := range m.Except { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *Ingress) Size() (n int) { var l int _ = l @@ -5785,6 +6140,27 @@ func (m *NetworkPolicy) Size() (n int) { return n } +func (m *NetworkPolicyEgressRule) Size() (n int) { + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.To) > 0 { + for _, e := range m.To { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *NetworkPolicyIngressRule) Size() (n int) { var l int _ = l @@ -5836,6 +6212,10 @@ func (m *NetworkPolicyPeer) Size() (n int) { l = m.NamespaceSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.IpBlock != nil { + l = m.IpBlock.Size() + n += 1 + l + sovGenerated(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -5872,6 +6252,18 @@ func (m *NetworkPolicySpec) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if len(m.Egress) > 0 { + for _, e := range m.Egress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.PolicyTypes) > 0 { + for _, s := range m.PolicyTypes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -5978,6 +6370,24 @@ func (m *PodSecurityPolicySpec) Size() (n int) { if m.ReadOnlyRootFilesystem != nil { n += 2 } + if m.DefaultAllowPrivilegeEscalation != nil { + n += 2 + } + if m.AllowPrivilegeEscalation != nil { + n += 3 + } + if len(m.AllowedHostPaths) > 0 { + for _, e := range m.AllowedHostPaths { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.AllowedFlexVolumes) > 0 { + for _, e := range m.AllowedFlexVolumes { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -6269,84 +6679,6 @@ func (m *SupplementalGroupsStrategyOptions) Size() (n int) { return n } -func (m *ThirdPartyResource) Size() (n int) { - var l int - _ = l - if m.Metadata != nil { - l = m.Metadata.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Description != nil { - l = len(*m.Description) - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Versions) > 0 { - for _, e := range m.Versions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ThirdPartyResourceData) Size() (n int) { - var l int - _ = l - if m.Metadata != nil { - l = m.Metadata.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Data != nil { - l = len(m.Data) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ThirdPartyResourceDataList) Size() (n int) { - var l int - _ = l - if m.Metadata != nil { - l = m.Metadata.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ThirdPartyResourceList) Size() (n int) { - var l int - _ = l - if m.Metadata != nil { - l = m.Metadata.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - func sovGenerated(x uint64) (n int) { for { n++ @@ -6360,7 +6692,7 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (m *APIVersion) Unmarshal(dAtA []byte) error { +func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6383,15 +6715,15 @@ func (m *APIVersion) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: APIVersion: wiretype end group for non-group") + return fmt.Errorf("proto: AllowedFlexVolume: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: APIVersion: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AllowedFlexVolume: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6417,7 +6749,7 @@ func (m *APIVersion) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Name = &s + m.Driver = &s iNdEx = postIndex default: iNdEx = preIndex @@ -6441,7 +6773,7 @@ func (m *APIVersion) Unmarshal(dAtA []byte) error { } return nil } -func (m *CustomMetricCurrentStatus) Unmarshal(dAtA []byte) error { +func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6464,15 +6796,15 @@ func (m *CustomMetricCurrentStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CustomMetricCurrentStatus: wiretype end group for non-group") + return fmt.Errorf("proto: AllowedHostPath: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CustomMetricCurrentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AllowedHostPath: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PathPrefix", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6498,9 +6830,90 @@ func (m *CustomMetricCurrentStatus) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Name = &s + m.PathPrefix = &s iNdEx = postIndex - case 2: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomMetricCurrentStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomMetricCurrentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomMetricCurrentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Name = &s + iNdEx = postIndex + case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } @@ -6527,7 +6940,7 @@ func (m *CustomMetricCurrentStatus) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Value == nil { - m.Value = &k8s_io_kubernetes_pkg_api_resource.Quantity{} + m.Value = &k8s_io_apimachinery_pkg_api_resource.Quantity{} } if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -6723,7 +7136,7 @@ func (m *CustomMetricTarget) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Value == nil { - m.Value = &k8s_io_kubernetes_pkg_api_resource.Quantity{} + m.Value = &k8s_io_apimachinery_pkg_api_resource.Quantity{} } if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -6889,7 +7302,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -6983,7 +7396,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } return nil } -func (m *DaemonSetList) Unmarshal(dAtA []byte) error { +func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7006,17 +7419,17 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DaemonSetList: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSetCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7026,28 +7439,55 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} + s := string(dAtA[iNdEx:postIndex]) + m.Type = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF } + s := string(dAtA[iNdEx:postIndex]) + m.Status = &s iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -7071,11 +7511,73 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, &DaemonSet{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.LastTransitionTime == nil { + m.LastTransitionTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Reason = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Message = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -7098,7 +7600,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } return nil } -func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { +func (m *DaemonSetList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7121,15 +7623,15 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DaemonSetSpec: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSetList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSetList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -7153,16 +7655,16 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Selector == nil { - m.Selector = &k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector{} + if m.Metadata == nil { + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -7186,50 +7688,165 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Template == nil { - m.Template = &k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec{} - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, &DaemonSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - if msglen < 0 { + if skippy < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.UpdateStrategy == nil { - m.UpdateStrategy = &DaemonSetUpdateStrategy{} - } - if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) - } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Template == nil { + m.Template = &k8s_io_api_core_v1.PodTemplateSpec{} + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UpdateStrategy == nil { + m.UpdateStrategy = &DaemonSetUpdateStrategy{} + } + if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { @@ -7266,6 +7883,26 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } } m.TemplateGeneration = &v + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -7477,6 +8114,57 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } } m.NumberUnavailable = &v + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, &DaemonSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -7669,7 +8357,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -7939,7 +8627,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.LastUpdateTime == nil { - m.LastUpdateTime = &k8s_io_kubernetes_pkg_apis_meta_v1.Time{} + m.LastUpdateTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -7972,7 +8660,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.LastTransitionTime == nil { - m.LastTransitionTime = &k8s_io_kubernetes_pkg_apis_meta_v1.Time{} + m.LastTransitionTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -8056,7 +8744,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -8200,51 +8888,14 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.UpdatedAnnotations == nil { m.UpdatedAnnotations = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8254,41 +8905,80 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.UpdatedAnnotations[mapkey] = mapvalue - } else { - var mapvalue string - m.UpdatedAnnotations[mapkey] = mapvalue } + m.UpdatedAnnotations[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -8421,7 +9111,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -8454,7 +9144,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Template == nil { - m.Template = &k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec{} + m.Template = &k8s_io_api_core_v1.PodTemplateSpec{} } if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -8809,6 +9499,26 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } } m.ReadyReplicas = &v + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -9435,6 +10145,116 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } return nil } +func (m *IPBlock) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IPBlock: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IPBlock: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cidr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Cidr = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Except", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Except = append(m.Except, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *Ingress) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -9491,7 +10311,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -9671,7 +10491,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.ServicePort == nil { - m.ServicePort = &k8s_io_kubernetes_pkg_util_intstr.IntOrString{} + m.ServicePort = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} } if err := m.ServicePort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -9755,7 +10575,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -10214,7 +11034,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.LoadBalancer == nil { - m.LoadBalancer = &k8s_io_kubernetes_pkg_api_v1.LoadBalancerStatus{} + m.LoadBalancer = &k8s_io_api_core_v1.LoadBalancerStatus{} } if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -10408,7 +11228,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -10469,7 +11289,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { +func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10492,10 +11312,10 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyIngressRule: wiretype end group for non-group") + return fmt.Errorf("proto: NetworkPolicyEgressRule: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyIngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NetworkPolicyEgressRule: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -10531,7 +11351,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10555,8 +11375,8 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.From = append(m.From, &NetworkPolicyPeer{}) - if err := m.From[len(m.From)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.To = append(m.To, &NetworkPolicyPeer{}) + if err := m.To[len(m.To)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -10582,7 +11402,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { +func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10605,15 +11425,15 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyList: wiretype end group for non-group") + return fmt.Errorf("proto: NetworkPolicyIngressRule: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NetworkPolicyIngressRule: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10637,16 +11457,14 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ports = append(m.Ports, &NetworkPolicyPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10670,8 +11488,8 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, &NetworkPolicy{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.From = append(m.From, &NetworkPolicyPeer{}) + if err := m.From[len(m.From)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -10697,7 +11515,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { +func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10720,15 +11538,15 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyPeer: wiretype end group for non-group") + return fmt.Errorf("proto: NetworkPolicyList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10752,16 +11570,16 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.PodSelector == nil { - m.PodSelector = &k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector{} + if m.Metadata == nil { + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} } - if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10785,10 +11603,8 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.NamespaceSelector == nil { - m.NamespaceSelector = &k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, &NetworkPolicy{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -10814,7 +11630,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { +func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10837,17 +11653,17 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyPort: wiretype end group for non-group") + return fmt.Errorf("proto: NetworkPolicyPeer: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyPort: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10857,25 +11673,28 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Protocol = &s + if m.PodSelector == nil { + m.PodSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10899,10 +11718,43 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Port == nil { - m.Port = &k8s_io_kubernetes_pkg_util_intstr.IntOrString{} + if m.NamespaceSelector == nil { + m.NamespaceSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } - if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IpBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.IpBlock == nil { + m.IpBlock = &IPBlock{} + } + if err := m.IpBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -10928,7 +11780,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { +func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10951,17 +11803,17 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicySpec: wiretype end group for non-group") + return fmt.Errorf("proto: NetworkPolicyPort: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NetworkPolicyPort: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10971,28 +11823,25 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.PodSelector == nil { - m.PodSelector = &k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.Protocol = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11016,8 +11865,10 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ingress = append(m.Ingress, &NetworkPolicyIngressRule{}) - if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Port == nil { + m.Port = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -11043,6 +11894,181 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } return nil } +func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicySpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodSelector == nil { + m.PodSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ingress = append(m.Ingress, &NetworkPolicyIngressRule{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Egress", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Egress = append(m.Egress, &NetworkPolicyEgressRule{}) + if err := m.Egress[len(m.Egress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PolicyTypes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PolicyTypes = append(m.PolicyTypes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -11099,7 +12125,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -11216,7 +12242,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -11688,60 +12714,51 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := bool(v != 0) m.ReadOnlyRootFilesystem = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultAllowPrivilegeEscalation", wireType) } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicaSet) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { - return io.ErrUnexpectedEOF + b := bool(v != 0) + m.DefaultAllowPrivilegeEscalation = &b + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowPrivilegeEscalation", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicaSet: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSet: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + b := bool(v != 0) + m.AllowPrivilegeEscalation = &b + case 17: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AllowedHostPaths", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11765,16 +12782,14 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.AllowedHostPaths = append(m.AllowedHostPaths, &AllowedHostPath{}) + if err := m.AllowedHostPaths[len(m.AllowedHostPaths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 18: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AllowedFlexVolumes", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11798,7 +12813,122 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Spec == nil { + m.AllowedFlexVolumes = append(m.AllowedFlexVolumes, &AllowedFlexVolume{}) + if err := m.AllowedFlexVolumes[len(m.AllowedFlexVolumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { m.Spec = &ReplicaSetSpec{} } if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { @@ -11976,7 +13106,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.LastTransitionTime == nil { - m.LastTransitionTime = &k8s_io_kubernetes_pkg_apis_meta_v1.Time{} + m.LastTransitionTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -12120,7 +13250,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -12255,7 +13385,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -12288,7 +13418,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Template == nil { - m.Template = &k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec{} + m.Template = &k8s_io_api_core_v1.PodTemplateSpec{} } if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -12696,7 +13826,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_kubernetes_pkg_util_intstr.IntOrString{} + m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -12780,7 +13910,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_kubernetes_pkg_util_intstr.IntOrString{} + m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -12813,7 +13943,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.MaxSurge == nil { - m.MaxSurge = &k8s_io_kubernetes_pkg_util_intstr.IntOrString{} + m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} } if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -13039,7 +14169,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.SeLinuxOptions == nil { - m.SeLinuxOptions = &k8s_io_kubernetes_pkg_api_v1.SELinuxOptions{} + m.SeLinuxOptions = &k8s_io_api_core_v1.SELinuxOptions{} } if err := m.SeLinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -13123,7 +14253,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -13363,51 +14493,14 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Selector == nil { m.Selector = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13417,41 +14510,80 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Selector[mapkey] = mapvalue - } else { - var mapvalue string - m.Selector[mapkey] = mapvalue } + m.Selector[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -13617,496 +14749,6 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *ThirdPartyResource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ThirdPartyResource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ThirdPartyResource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Description = &s - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Versions = append(m.Versions, &APIVersion{}) - if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ThirdPartyResourceData) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ThirdPartyResourceData: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ThirdPartyResourceData: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ThirdPartyResourceDataList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ThirdPartyResourceDataList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ThirdPartyResourceDataList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, &ThirdPartyResourceData{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ThirdPartyResourceList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ThirdPartyResourceList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ThirdPartyResourceList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, &ThirdPartyResource{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 @@ -14213,170 +14855,179 @@ var ( ) func init() { - proto.RegisterFile("github.com/ericchiang/k8s/apis/extensions/v1beta1/generated.proto", fileDescriptorGenerated) + proto.RegisterFile("k8s.io/api/extensions/v1beta1/generated.proto", fileDescriptorGenerated) } var fileDescriptorGenerated = []byte{ - // 2571 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x5a, 0xcb, 0x6f, 0x24, 0x49, - 0xd1, 0xff, 0xaa, 0xdb, 0x1e, 0xb7, 0xc3, 0x6b, 0xcf, 0x4c, 0xee, 0x7c, 0x9e, 0x5e, 0x0b, 0x2c, - 0x53, 0x42, 0xe0, 0xc3, 0x4c, 0x7b, 0x6d, 0xd0, 0x6a, 0x18, 0x76, 0xd8, 0xf5, 0x63, 0x1e, 0x06, - 0x8f, 0xa7, 0x37, 0xbb, 0xc7, 0xb3, 0x2c, 0x5a, 0x56, 0xe9, 0xae, 0x9c, 0x76, 0xe1, 0xea, 0xaa, - 0xda, 0xcc, 0xac, 0x5e, 0xb7, 0x84, 0x96, 0x87, 0x16, 0x21, 0x0e, 0x70, 0x41, 0x42, 0x88, 0x0b, - 0x27, 0x0e, 0x88, 0x13, 0x12, 0x97, 0x15, 0xe2, 0xc4, 0x65, 0x01, 0x21, 0x71, 0x85, 0x13, 0x1a, - 0xfe, 0x01, 0xc4, 0xe3, 0xc4, 0x05, 0x65, 0x56, 0x56, 0x75, 0xbd, 0xda, 0x4c, 0x57, 0xf7, 0xac, - 0xe0, 0x56, 0x95, 0x99, 0xf1, 0x8b, 0x47, 0x46, 0x46, 0x44, 0x46, 0x15, 0xdc, 0x3a, 0xbd, 0xc1, - 0x1b, 0xb6, 0xb7, 0x71, 0x1a, 0x1c, 0x53, 0xe6, 0x52, 0x41, 0xf9, 0x86, 0x7f, 0xda, 0xdd, 0x20, - 0xbe, 0xcd, 0x37, 0xe8, 0x99, 0xa0, 0x2e, 0xb7, 0x3d, 0x97, 0x6f, 0xf4, 0x37, 0x8f, 0xa9, 0x20, - 0x9b, 0x1b, 0x5d, 0xea, 0x52, 0x46, 0x04, 0xb5, 0x1a, 0x3e, 0xf3, 0x84, 0x87, 0xae, 0x87, 0xe4, - 0x8d, 0x21, 0x79, 0xc3, 0x3f, 0xed, 0x36, 0x24, 0x79, 0x63, 0x48, 0xde, 0xd0, 0xe4, 0x2b, 0x5b, - 0x23, 0xb9, 0x6d, 0x30, 0xca, 0xbd, 0x80, 0x75, 0x68, 0x96, 0xc5, 0x39, 0x34, 0x7c, 0xa3, 0x47, - 0x05, 0xd9, 0xe8, 0xe7, 0xc4, 0x5a, 0xb9, 0x5e, 0x4c, 0xc3, 0x02, 0x57, 0xd8, 0xbd, 0x3c, 0x8b, - 0x4f, 0x9f, 0xbf, 0x9c, 0x77, 0x4e, 0x68, 0x8f, 0xe4, 0xa8, 0x36, 0x8b, 0xa9, 0x02, 0x61, 0x3b, - 0x1b, 0xb6, 0x2b, 0xb8, 0x60, 0x39, 0x92, 0x6b, 0xa3, 0xf5, 0xcf, 0x6b, 0x61, 0xae, 0x01, 0x6c, - 0x37, 0xf7, 0x8f, 0x28, 0x93, 0x46, 0x44, 0x08, 0x66, 0x5c, 0xd2, 0xa3, 0x75, 0x63, 0xcd, 0x58, - 0x9f, 0xc7, 0xea, 0xd9, 0xe4, 0xf0, 0xc2, 0x6e, 0xc0, 0x85, 0xd7, 0xbb, 0x4f, 0x05, 0xb3, 0x3b, - 0xbb, 0x01, 0x63, 0xd4, 0x15, 0x2d, 0x41, 0x44, 0xc0, 0x8b, 0x08, 0xd0, 0x0e, 0xcc, 0xf6, 0x89, - 0x13, 0xd0, 0x7a, 0x65, 0xcd, 0x58, 0x5f, 0xd8, 0xba, 0xd6, 0x18, 0xb9, 0x7f, 0x8d, 0x68, 0x43, - 0x1a, 0xaf, 0x05, 0xc4, 0x15, 0xb6, 0x18, 0xe0, 0x90, 0xd4, 0xfc, 0x1a, 0x7c, 0x74, 0x24, 0xd3, - 0x03, 0x9b, 0x0b, 0xf4, 0x65, 0x98, 0xb5, 0x05, 0xed, 0xf1, 0xba, 0xb1, 0x56, 0x5d, 0x5f, 0xd8, - 0xba, 0xd7, 0x18, 0xcb, 0x49, 0x1a, 0x23, 0xc1, 0x71, 0x08, 0x6b, 0x3a, 0x80, 0x92, 0x6b, 0xda, - 0x84, 0x75, 0xa9, 0x78, 0x66, 0xea, 0xbe, 0x0d, 0xcb, 0x79, 0x6e, 0x4a, 0xcf, 0x47, 0x69, 0x3d, - 0xb7, 0x27, 0xd0, 0x33, 0x44, 0x8d, 0x14, 0xfc, 0x46, 0x05, 0xe6, 0xf7, 0x08, 0xed, 0x79, 0x6e, - 0x8b, 0x0a, 0xf4, 0x79, 0xa8, 0x49, 0x3f, 0xb7, 0x88, 0x20, 0x4a, 0xb9, 0x85, 0xad, 0xc6, 0x79, - 0x9c, 0xe4, 0xda, 0x46, 0x7f, 0xb3, 0xf1, 0xe0, 0xf8, 0x2b, 0xb4, 0x23, 0xee, 0x53, 0x41, 0x70, - 0x4c, 0x8f, 0x9a, 0x30, 0xc3, 0x7d, 0xda, 0xd1, 0xf6, 0x78, 0x79, 0x4c, 0x89, 0x63, 0x99, 0x5a, - 0x3e, 0xed, 0x60, 0x85, 0x84, 0x8e, 0xe0, 0x02, 0x57, 0xbb, 0x53, 0xaf, 0x2a, 0xcc, 0xcf, 0x95, - 0xc6, 0x0c, 0xf7, 0x58, 0xa3, 0x99, 0x3f, 0x35, 0x60, 0x31, 0x9e, 0x53, 0xe6, 0xbe, 0x97, 0xb3, - 0xc3, 0xb5, 0xa7, 0xb1, 0x83, 0xa4, 0xcd, 0x58, 0xe1, 0x30, 0xda, 0xb8, 0x8a, 0xda, 0xb8, 0x1b, - 0x65, 0x45, 0x8e, 0xf6, 0xeb, 0xef, 0x95, 0x84, 0xac, 0xd2, 0x36, 0xe8, 0x3e, 0xd4, 0x38, 0x75, - 0x68, 0x47, 0x78, 0x4c, 0xcb, 0xba, 0xf9, 0x54, 0xb2, 0x92, 0x63, 0xea, 0xb4, 0x34, 0x21, 0x8e, - 0x21, 0xd0, 0x3e, 0xd4, 0x04, 0xed, 0xf9, 0x0e, 0x11, 0x91, 0x2b, 0x5f, 0x3f, 0xc7, 0x95, 0xfb, - 0x9b, 0x8d, 0xa6, 0x67, 0xb5, 0x35, 0x81, 0xda, 0xab, 0x98, 0x1c, 0xb9, 0xb0, 0x14, 0xf8, 0x96, - 0x1c, 0x17, 0x32, 0xd6, 0x74, 0x07, 0x7a, 0xdf, 0xee, 0x94, 0x35, 0xc2, 0xc3, 0x14, 0x1a, 0xce, - 0xa0, 0xa3, 0x75, 0xb8, 0xd8, 0xb3, 0x5d, 0x4c, 0x89, 0x35, 0x68, 0xd1, 0x8e, 0xe7, 0x5a, 0xbc, - 0x3e, 0xb3, 0x66, 0xac, 0xcf, 0xe2, 0xec, 0x30, 0x6a, 0x00, 0x8a, 0xa4, 0xbc, 0x1b, 0x46, 0x42, - 0xdb, 0x73, 0xeb, 0xb3, 0x6b, 0xc6, 0x7a, 0x15, 0x17, 0xcc, 0x98, 0xdf, 0xaf, 0xc2, 0xc5, 0x8c, - 0xf7, 0xa0, 0x97, 0x60, 0xb9, 0x13, 0x86, 0x8c, 0xc3, 0xa0, 0x77, 0x4c, 0x59, 0xab, 0x73, 0x42, - 0xad, 0xc0, 0xa1, 0x96, 0xda, 0x85, 0x59, 0x3c, 0x62, 0x56, 0xf2, 0x76, 0xd5, 0xd0, 0x7d, 0x9b, - 0xf3, 0x98, 0xa6, 0xa2, 0x68, 0x0a, 0x66, 0x24, 0x1f, 0x8b, 0x72, 0x9b, 0x51, 0x2b, 0xcb, 0xa7, - 0x1a, 0xf2, 0x29, 0x9e, 0x45, 0x6b, 0xb0, 0x10, 0xa2, 0x29, 0xcd, 0xb5, 0x25, 0x92, 0x43, 0x52, - 0x12, 0xef, 0x98, 0x53, 0xd6, 0xa7, 0x56, 0xde, 0x0a, 0xf9, 0x19, 0x29, 0x49, 0x68, 0xf1, 0x9c, - 0x24, 0x17, 0x42, 0x49, 0x8a, 0x67, 0xe5, 0xbe, 0x84, 0x6c, 0xb7, 0xfb, 0xc4, 0x76, 0xc8, 0xb1, - 0x43, 0xeb, 0x73, 0xe1, 0xbe, 0x64, 0x86, 0xd1, 0x35, 0xb8, 0x1c, 0x0e, 0x3d, 0x74, 0x49, 0xbc, - 0xb6, 0xa6, 0xd6, 0xe6, 0x27, 0xcc, 0x1f, 0x19, 0x70, 0x75, 0x84, 0x6f, 0xc8, 0x10, 0x2d, 0x06, - 0x7e, 0x1c, 0xa2, 0xe5, 0x33, 0x3a, 0x85, 0x45, 0xe6, 0x39, 0x8e, 0xed, 0x76, 0xc3, 0xc5, 0xda, - 0xbf, 0x6f, 0x8f, 0xe9, 0x8e, 0x38, 0x89, 0x31, 0x3c, 0xa0, 0x69, 0x6c, 0xf3, 0xbd, 0x0a, 0xc0, - 0x1e, 0xf5, 0x1d, 0x6f, 0xd0, 0xa3, 0xee, 0x74, 0x23, 0xeb, 0x6b, 0xa9, 0xc8, 0x7a, 0x6b, 0xdc, - 0xd3, 0x14, 0x0b, 0x95, 0x08, 0xad, 0x8f, 0x32, 0xa1, 0xf5, 0x95, 0xf2, 0xa0, 0xe9, 0xd8, 0xfa, - 0xe3, 0x0a, 0x3c, 0x3f, 0x9c, 0xdc, 0xf5, 0x5c, 0xcb, 0x16, 0xba, 0xc4, 0xc8, 0xed, 0xcf, 0x72, - 0x2c, 0x44, 0x45, 0x8d, 0xea, 0x37, 0x39, 0xce, 0x28, 0xe1, 0x9e, 0xab, 0x9c, 0x78, 0x1e, 0xeb, - 0x37, 0x54, 0x87, 0xb9, 0x1e, 0xe5, 0x9c, 0x74, 0xa9, 0x72, 0xda, 0x79, 0x1c, 0xbd, 0xa2, 0x26, - 0x2c, 0x39, 0x84, 0x6b, 0x9f, 0x68, 0xdb, 0x3d, 0xaa, 0x3c, 0x74, 0x61, 0x6b, 0xfd, 0x69, 0x6c, - 0x2e, 0xd7, 0xe3, 0x0c, 0x3d, 0x7a, 0x1d, 0x90, 0x1c, 0x69, 0x33, 0xe2, 0x72, 0xa5, 0x81, 0x42, - 0x9d, 0x1b, 0x13, 0xb5, 0x00, 0xc3, 0xfc, 0x99, 0x01, 0x4b, 0x43, 0x0b, 0x4d, 0x39, 0xfd, 0x3c, - 0x48, 0xa7, 0x9f, 0xcf, 0x94, 0xde, 0xd6, 0x28, 0xff, 0xfc, 0xa9, 0x02, 0x28, 0x31, 0xea, 0x39, - 0xce, 0x31, 0xe9, 0x9c, 0x16, 0x56, 0x44, 0xdf, 0x31, 0x00, 0xe9, 0x88, 0xb0, 0xed, 0xba, 0x9e, - 0x50, 0x41, 0x24, 0x92, 0xe4, 0x8b, 0xe5, 0x25, 0xd1, 0x3c, 0x1b, 0x0f, 0x73, 0xd8, 0xb7, 0x5d, - 0xc1, 0x06, 0xb8, 0x80, 0x29, 0x7a, 0x13, 0x80, 0x69, 0xba, 0xb6, 0xa7, 0x7d, 0xfc, 0x56, 0x89, - 0x73, 0x2f, 0x01, 0x76, 0x3d, 0xf7, 0xb1, 0xdd, 0xc5, 0x09, 0xc0, 0x95, 0xdb, 0x70, 0x75, 0x84, - 0x34, 0xe8, 0x12, 0x54, 0x4f, 0xe9, 0x40, 0x1b, 0x46, 0x3e, 0xa2, 0x2b, 0xc9, 0x4a, 0x71, 0x5e, - 0xd7, 0x7e, 0x37, 0x2b, 0x37, 0x0c, 0xf3, 0x17, 0x33, 0x49, 0x57, 0x50, 0xd9, 0x7d, 0x05, 0x6a, - 0x8c, 0xfa, 0x8e, 0xdd, 0x21, 0x5c, 0xe7, 0x95, 0xf8, 0x3d, 0x95, 0xf9, 0x2b, 0xd3, 0xcd, 0xfc, - 0xd5, 0xc9, 0x32, 0xff, 0x9b, 0x50, 0xe3, 0x51, 0xce, 0x9f, 0x51, 0x50, 0xdb, 0x13, 0x04, 0x14, - 0x9d, 0xee, 0x63, 0xc8, 0xa2, 0x44, 0x3f, 0x5b, 0x9c, 0xe8, 0xb7, 0xe0, 0x0a, 0xa3, 0x7d, 0x5b, - 0x62, 0xdf, 0xb3, 0xb9, 0xf0, 0xd8, 0xe0, 0xc0, 0xee, 0xd9, 0x42, 0x27, 0xac, 0xc2, 0x39, 0x19, - 0x6e, 0x7c, 0x12, 0x70, 0x6a, 0xa9, 0xe3, 0x5d, 0xc3, 0xfa, 0x2d, 0xe3, 0x43, 0xb5, 0x29, 0xfb, - 0x10, 0xba, 0x01, 0x57, 0x7d, 0xe6, 0x75, 0x19, 0xe5, 0x7c, 0x8f, 0x12, 0xcb, 0xb1, 0x5d, 0x1a, - 0x29, 0x37, 0xaf, 0xa4, 0x1d, 0x35, 0x6d, 0xfe, 0xad, 0x02, 0x97, 0xb2, 0x01, 0x78, 0x44, 0x72, - 0x37, 0x46, 0x26, 0xf7, 0xa4, 0xa3, 0x55, 0x32, 0x8e, 0xb6, 0x0e, 0x17, 0xf5, 0x99, 0xc2, 0xd1, - 0x92, 0xb0, 0xf6, 0xc8, 0x0e, 0xcb, 0x04, 0x1e, 0xe7, 0xe7, 0x78, 0x6d, 0x58, 0x7a, 0xe4, 0x27, - 0xd0, 0x8b, 0xf0, 0x7c, 0xe0, 0xe6, 0xd7, 0x87, 0x7b, 0x59, 0x34, 0x85, 0x8e, 0x01, 0x3a, 0x51, - 0x0e, 0xe1, 0xf5, 0x0b, 0x2a, 0x94, 0xec, 0x94, 0x76, 0xad, 0x38, 0x1d, 0xe1, 0x04, 0x2a, 0xfa, - 0x38, 0x2c, 0x32, 0xe9, 0x43, 0xb1, 0x3c, 0x61, 0xb1, 0x92, 0x1e, 0x34, 0x7f, 0x60, 0x24, 0x03, - 0xe1, 0xb9, 0x75, 0x87, 0x53, 0x5c, 0x77, 0xdc, 0x99, 0xa8, 0xee, 0x18, 0xc6, 0xc3, 0x4c, 0xe1, - 0xf1, 0x55, 0x58, 0xbe, 0xd3, 0xba, 0xcb, 0xbc, 0xc0, 0x8f, 0x84, 0x7a, 0xe0, 0x87, 0x8a, 0x21, - 0x98, 0x61, 0x81, 0x13, 0xcb, 0x26, 0x9f, 0xd1, 0x21, 0x5c, 0x60, 0xc4, 0xed, 0xd2, 0x28, 0x2e, - 0xbf, 0x34, 0xa6, 0x50, 0xfb, 0x7b, 0x58, 0x92, 0x63, 0x8d, 0x62, 0xbe, 0x0b, 0x17, 0xef, 0xb5, - 0xdb, 0xcd, 0x7d, 0x57, 0x79, 0x6a, 0x93, 0x88, 0x13, 0xc9, 0xd6, 0x27, 0xe2, 0x24, 0x62, 0x2b, - 0x9f, 0xd1, 0x23, 0x98, 0x93, 0x6e, 0x4f, 0x5d, 0xab, 0x64, 0x15, 0xa3, 0x19, 0xec, 0x84, 0x20, - 0x38, 0x42, 0x33, 0x1d, 0xb8, 0x92, 0xe0, 0x8f, 0x03, 0x87, 0x1e, 0xc9, 0xf0, 0x8a, 0xda, 0x30, - 0x2b, 0x19, 0x47, 0x17, 0xe8, 0x71, 0xaf, 0x8e, 0x19, 0x9d, 0x70, 0x08, 0x66, 0x7e, 0x0a, 0x16, - 0xef, 0x79, 0x5c, 0x34, 0x3d, 0x26, 0x94, 0x19, 0x64, 0xb4, 0xef, 0xd9, 0xae, 0x8e, 0xd4, 0xf2, - 0x51, 0x8d, 0x90, 0x33, 0x7d, 0xa4, 0xe4, 0xa3, 0x79, 0x1d, 0xe6, 0xb4, 0xd5, 0x92, 0xcb, 0xab, - 0xb9, 0xe5, 0xd5, 0x70, 0xf9, 0xbf, 0x0c, 0x98, 0xd3, 0xac, 0xa7, 0x5a, 0x45, 0x1e, 0xa6, 0xaa, - 0xc8, 0x9b, 0xe5, 0xec, 0x9f, 0x28, 0x21, 0xdb, 0x99, 0x12, 0xf2, 0xe5, 0x92, 0x88, 0xe9, 0xfa, - 0xf1, 0x3d, 0x03, 0x96, 0xd2, 0x7b, 0x2d, 0x2f, 0x36, 0x32, 0x7a, 0xd9, 0x1d, 0x7a, 0x38, 0x2c, - 0x39, 0x92, 0x43, 0xa8, 0x19, 0xaf, 0x90, 0x3b, 0xa3, 0x35, 0x1c, 0x65, 0xa9, 0x40, 0xd8, 0x4e, - 0x23, 0x6c, 0xa2, 0x35, 0xf6, 0x5d, 0xf1, 0x80, 0xb5, 0x04, 0xb3, 0xdd, 0x2e, 0x4e, 0x42, 0x98, - 0x3f, 0x31, 0x60, 0x41, 0x8b, 0x31, 0xe5, 0x0a, 0xed, 0x20, 0x5d, 0xa1, 0xbd, 0x54, 0xce, 0x6a, - 0x51, 0x79, 0xf6, 0xbd, 0xa1, 0x9c, 0xd2, 0xf7, 0xe5, 0xd9, 0x3b, 0xf1, 0xb8, 0x88, 0xce, 0x9e, - 0x7c, 0x46, 0xa7, 0x70, 0xc9, 0xce, 0x1c, 0x0f, 0x6d, 0xa2, 0x57, 0x4a, 0x32, 0x8f, 0x60, 0x70, - 0x0e, 0xd8, 0x3c, 0x85, 0x4b, 0xb9, 0xb3, 0xf8, 0x08, 0x66, 0x4e, 0x84, 0xf0, 0xb5, 0xe1, 0x76, - 0xcb, 0x1f, 0xc5, 0x21, 0x63, 0x05, 0x68, 0x7e, 0xab, 0x12, 0x6b, 0xdf, 0x0a, 0x6f, 0x35, 0x71, - 0x94, 0x31, 0xa6, 0x19, 0x65, 0xd0, 0x17, 0xa0, 0x2a, 0x9c, 0xb2, 0x45, 0xb5, 0x06, 0x6d, 0x1f, - 0xb4, 0xb0, 0x44, 0x41, 0x4d, 0x98, 0x95, 0xa1, 0x58, 0x9e, 0x9b, 0x6a, 0xf9, 0x93, 0x28, 0x6d, - 0x81, 0x43, 0x20, 0x93, 0xc2, 0x62, 0xea, 0x34, 0xa1, 0x36, 0x3c, 0xe7, 0x78, 0xc4, 0xda, 0x21, - 0x0e, 0x71, 0x3b, 0x34, 0xea, 0x13, 0xbd, 0x78, 0x7e, 0x79, 0x77, 0x90, 0xa0, 0xd0, 0xa7, 0x32, - 0x85, 0x62, 0xee, 0x00, 0x0c, 0x75, 0x91, 0x65, 0xad, 0x74, 0xaf, 0x30, 0xc2, 0xce, 0xe3, 0xf0, - 0x05, 0xad, 0x02, 0x70, 0xda, 0x61, 0x54, 0xa8, 0xb3, 0x1a, 0x56, 0xbc, 0x89, 0x11, 0xf3, 0xe7, - 0x06, 0x2c, 0x1e, 0x52, 0xf1, 0x8e, 0xc7, 0x4e, 0x9b, 0x9e, 0x63, 0x77, 0x06, 0x53, 0x8d, 0x71, - 0xed, 0x54, 0x8c, 0x7b, 0x75, 0x4c, 0xcb, 0xa6, 0xe4, 0x1a, 0x46, 0x3a, 0xf3, 0x03, 0x03, 0xea, - 0xa9, 0xb9, 0xe4, 0x89, 0x3b, 0x82, 0x59, 0xdf, 0x63, 0x22, 0x4a, 0x34, 0x13, 0xf1, 0x54, 0x79, - 0x25, 0x84, 0x93, 0xaa, 0x3c, 0x66, 0x5e, 0x4f, 0xfb, 0xdc, 0x64, 0xb0, 0x94, 0x32, 0xac, 0xd0, - 0xa4, 0xf9, 0x2f, 0xa7, 0xe6, 0xa6, 0x1c, 0xdd, 0x70, 0x3a, 0xba, 0xbd, 0x3c, 0x89, 0xd8, 0x51, - 0x8c, 0xfb, 0x4d, 0x56, 0x66, 0xa9, 0x0f, 0x6a, 0xc1, 0x82, 0xef, 0x59, 0xad, 0x89, 0x3b, 0xa1, - 0x49, 0x14, 0xf4, 0x16, 0x5c, 0x96, 0x57, 0x59, 0xee, 0x93, 0x0e, 0x6d, 0x4d, 0x7c, 0xd5, 0xca, - 0x63, 0x99, 0x3c, 0xab, 0x8a, 0xc7, 0x84, 0x2c, 0xc5, 0xd5, 0x57, 0x99, 0x8e, 0xe7, 0xe8, 0xc0, - 0x1d, 0xbf, 0xa3, 0x1d, 0x98, 0xf1, 0xcb, 0xe7, 0x34, 0x45, 0x6b, 0xfe, 0x2e, 0x6b, 0x40, 0x15, - 0x2c, 0x9f, 0x89, 0x01, 0x09, 0xcc, 0xe9, 0x94, 0xa0, 0x3d, 0xe0, 0xee, 0x24, 0x1e, 0x90, 0x0c, - 0x75, 0x11, 0xae, 0xf9, 0xbe, 0x01, 0x97, 0x9b, 0x92, 0x65, 0x27, 0x60, 0xb6, 0x18, 0x3c, 0x83, - 0x28, 0xf2, 0x7a, 0x2a, 0x8a, 0xec, 0x8d, 0xa9, 0x41, 0x4e, 0xb6, 0x44, 0x24, 0x79, 0xdf, 0x80, - 0xff, 0xcf, 0xcd, 0x4f, 0xf9, 0x08, 0x1e, 0xa5, 0x8f, 0xe0, 0xab, 0x93, 0x8a, 0x1f, 0x1d, 0xc3, - 0xef, 0xce, 0x15, 0xc8, 0xae, 0x3c, 0x69, 0x15, 0xc0, 0x67, 0x76, 0xdf, 0x76, 0x68, 0x57, 0x77, - 0xc3, 0x6b, 0x38, 0x31, 0x12, 0x76, 0xb4, 0x1f, 0x93, 0xc0, 0x11, 0xdb, 0x96, 0xb5, 0x4b, 0x7c, - 0x72, 0x6c, 0x3b, 0xb6, 0xb0, 0xf5, 0x1d, 0x64, 0x1e, 0x8f, 0x98, 0x45, 0x37, 0xa1, 0xce, 0xe8, - 0xdb, 0x81, 0xcd, 0xa8, 0xb5, 0xc7, 0x3c, 0x3f, 0x45, 0x59, 0x55, 0x94, 0x23, 0xe7, 0xe5, 0x55, - 0x93, 0x38, 0x8e, 0xf7, 0x0e, 0x4d, 0x33, 0x9c, 0x51, 0x64, 0x45, 0x53, 0xa8, 0x0e, 0x73, 0x7d, - 0xcf, 0x09, 0x7a, 0x54, 0x5e, 0x48, 0xe5, 0xaa, 0xe8, 0x55, 0x16, 0xa0, 0x32, 0xb9, 0x69, 0xd7, - 0x54, 0xbd, 0x84, 0x1a, 0x4e, 0x0e, 0xa1, 0x37, 0x60, 0xfe, 0x44, 0xdf, 0x0b, 0xe4, 0xf5, 0xb1, - 0x4c, 0xe8, 0x4b, 0xdd, 0x2b, 0xf0, 0x10, 0x4e, 0xca, 0xa5, 0x5e, 0xf6, 0xf7, 0x54, 0x0f, 0xa2, - 0x86, 0xa3, 0xd7, 0x68, 0x66, 0xbf, 0xb9, 0xab, 0x3a, 0x06, 0x7a, 0x66, 0xbf, 0xb9, 0x8b, 0xde, - 0x82, 0x39, 0x4e, 0x0f, 0x6c, 0x37, 0x38, 0xab, 0x43, 0xa9, 0x9e, 0x77, 0xeb, 0xb6, 0xa2, 0xce, - 0xdc, 0x28, 0x71, 0x84, 0x8a, 0x28, 0xcc, 0xb3, 0xc0, 0xdd, 0xe6, 0x0f, 0x39, 0x65, 0xf5, 0x05, - 0xc5, 0x62, 0xdc, 0x93, 0x8e, 0x23, 0xfa, 0x2c, 0x93, 0x21, 0x32, 0xfa, 0xba, 0x01, 0x88, 0x07, - 0xbe, 0xef, 0x50, 0x79, 0xf3, 0x25, 0x8e, 0xba, 0xe6, 0xf2, 0xfa, 0x73, 0x8a, 0x61, 0x73, 0x5c, - 0x9d, 0x72, 0x40, 0x59, 0xce, 0x05, 0xbc, 0xa4, 0x29, 0x1f, 0x73, 0xf5, 0x5c, 0x5f, 0x2c, 0x65, - 0xca, 0xe2, 0xcb, 0x39, 0x8e, 0x50, 0xe5, 0xe9, 0x60, 0x94, 0x58, 0x0f, 0x5c, 0x67, 0x80, 0x3d, - 0x4f, 0xdc, 0xb1, 0x1d, 0xca, 0x07, 0x5c, 0xd0, 0x5e, 0x7d, 0x49, 0x6d, 0xea, 0x88, 0x59, 0xf5, - 0xc1, 0x41, 0x77, 0x27, 0xa6, 0xfd, 0x29, 0x77, 0xb2, 0x0f, 0x0e, 0x43, 0xa1, 0xa6, 0xf8, 0xc1, - 0x21, 0x01, 0x9a, 0xbe, 0x30, 0xfe, 0xde, 0x80, 0xe7, 0x87, 0x93, 0xe5, 0x3e, 0x38, 0x14, 0x37, - 0xfb, 0xab, 0x93, 0x37, 0xfb, 0xc7, 0xff, 0x94, 0xa1, 0x3e, 0x0f, 0x0c, 0xf5, 0xf9, 0xef, 0xfa, - 0x3c, 0x30, 0x94, 0x2b, 0x4a, 0x0a, 0x7f, 0x4d, 0x49, 0xfb, 0x3f, 0xdc, 0xc1, 0x7e, 0xea, 0x6f, - 0xc9, 0xe6, 0x6f, 0x2b, 0x70, 0x29, 0xeb, 0x8d, 0xe7, 0x2a, 0xbd, 0x05, 0x57, 0x1e, 0x07, 0x8e, - 0x33, 0x50, 0x5a, 0x24, 0x5a, 0xaa, 0x61, 0x8b, 0xa8, 0x70, 0x6e, 0x44, 0x37, 0xb7, 0x3a, 0xb2, - 0x9b, 0x9b, 0xeb, 0x61, 0xce, 0x14, 0xf4, 0x30, 0x8b, 0xbb, 0xb5, 0xb3, 0xa3, 0xba, 0xb5, 0xd3, - 0xe8, 0xbd, 0x16, 0x9c, 0xcc, 0x64, 0xef, 0xd5, 0xfc, 0x08, 0xac, 0xe8, 0x25, 0xf2, 0x7d, 0xd7, - 0x73, 0x05, 0xf3, 0x1c, 0x87, 0xb2, 0xbd, 0xa0, 0xd7, 0x1b, 0x98, 0xd7, 0x60, 0x29, 0xdd, 0x40, - 0x0f, 0xed, 0x1c, 0xf6, 0xf0, 0x75, 0x17, 0x2d, 0x7e, 0x37, 0x7d, 0x58, 0x2e, 0xfe, 0x54, 0x8b, - 0x8e, 0x60, 0xa9, 0x47, 0xce, 0x92, 0xdf, 0x98, 0x8d, 0x52, 0xe5, 0x74, 0x06, 0xc5, 0xfc, 0x95, - 0x01, 0x57, 0x47, 0x74, 0x69, 0x9f, 0x15, 0x4f, 0x15, 0xe7, 0xc9, 0x59, 0x2b, 0x60, 0x5d, 0x5a, - 0xf2, 0x52, 0x10, 0xd3, 0x9b, 0xef, 0x42, 0x7d, 0x54, 0x16, 0xfe, 0x50, 0x9a, 0xc7, 0xdf, 0x34, - 0x60, 0xb9, 0xb8, 0xd2, 0x28, 0x64, 0xdf, 0x86, 0x25, 0x5d, 0x7f, 0xe8, 0x55, 0x4f, 0xf1, 0xef, - 0x55, 0x3f, 0xae, 0x65, 0xa2, 0xc4, 0x9b, 0xc1, 0x30, 0xff, 0x69, 0xc0, 0x6c, 0xab, 0x43, 0xb4, - 0x69, 0xa7, 0x95, 0x42, 0x0f, 0x52, 0x29, 0x74, 0xdc, 0xdf, 0x80, 0x94, 0x3c, 0x89, 0xec, 0x89, - 0x33, 0xd9, 0xf3, 0x66, 0x29, 0xbc, 0x74, 0xe2, 0xfc, 0x24, 0xcc, 0xc7, 0x6c, 0xce, 0x8b, 0x5f, - 0xe6, 0x3f, 0x0c, 0x58, 0x48, 0x00, 0x9c, 0x1b, 0xeb, 0xac, 0x54, 0x80, 0x2f, 0xf3, 0x8b, 0x5e, - 0x82, 0x53, 0x23, 0x0a, 0xf9, 0xe1, 0x77, 0xde, 0x61, 0xdc, 0xff, 0x04, 0x2c, 0x09, 0xf5, 0x57, - 0x5b, 0x7c, 0x7b, 0xad, 0x2a, 0x37, 0xc9, 0x8c, 0xae, 0x7c, 0x16, 0x16, 0x53, 0x10, 0x63, 0x7d, - 0x9c, 0xfd, 0xb6, 0x01, 0x1f, 0xfb, 0x8f, 0x25, 0xe3, 0x87, 0x72, 0x4c, 0xfe, 0x68, 0x00, 0x6a, - 0x9f, 0xd8, 0xcc, 0x6a, 0x12, 0x26, 0x06, 0x58, 0xff, 0x4d, 0x38, 0x55, 0x77, 0x5d, 0x83, 0x05, - 0x8b, 0xf2, 0x0e, 0xb3, 0x95, 0x5a, 0xda, 0x18, 0xc9, 0x21, 0xf4, 0x10, 0x6a, 0xfd, 0xf0, 0x77, - 0xd1, 0xa8, 0x71, 0x39, 0x6e, 0xf5, 0x30, 0xfc, 0xe1, 0x14, 0xc7, 0x50, 0xe6, 0x19, 0x2c, 0xe7, - 0x55, 0xdb, 0x93, 0x22, 0x4d, 0x53, 0x3d, 0x04, 0x33, 0x0a, 0x47, 0xea, 0xf5, 0x1c, 0x56, 0xcf, - 0xe6, 0xaf, 0x0d, 0x58, 0x29, 0x66, 0x3d, 0xe5, 0xa2, 0xeb, 0x4b, 0xe9, 0xa2, 0x6b, 0xdc, 0xfb, - 0x43, 0xb1, 0x8c, 0x51, 0x01, 0xf6, 0x4b, 0xa3, 0xc8, 0x80, 0x53, 0xd6, 0xe0, 0x51, 0x5a, 0x83, - 0xed, 0x89, 0x35, 0xd0, 0xd2, 0xef, 0xbc, 0xf0, 0xc1, 0x93, 0x55, 0xe3, 0x0f, 0x4f, 0x56, 0x8d, - 0x3f, 0x3f, 0x59, 0x35, 0x7e, 0xf8, 0x97, 0xd5, 0xff, 0x7b, 0x63, 0x4e, 0x13, 0xfd, 0x3b, 0x00, - 0x00, 0xff, 0xff, 0x12, 0x7c, 0x23, 0xa4, 0x3e, 0x2e, 0x00, 0x00, + // 2711 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x5a, 0xcf, 0x6f, 0xdc, 0xc6, + 0xf5, 0xff, 0x72, 0x57, 0xd2, 0xae, 0x9e, 0x22, 0x59, 0x9e, 0xf8, 0x6b, 0x6f, 0x84, 0x46, 0x75, + 0xd9, 0xc2, 0x75, 0x52, 0x7b, 0x65, 0x3b, 0x89, 0x6b, 0x24, 0x41, 0x62, 0x4b, 0x6b, 0xc9, 0x32, + 0x64, 0x4b, 0xe1, 0x4a, 0x41, 0x11, 0x07, 0x45, 0x47, 0xe4, 0x68, 0xc5, 0x8a, 0x4b, 0xb2, 0xc3, + 0xa1, 0xa2, 0xbd, 0x14, 0x3d, 0xe5, 0x10, 0x14, 0xed, 0xb1, 0x05, 0x8a, 0x9c, 0x72, 0x29, 0x90, + 0x43, 0xcf, 0xed, 0xad, 0xb7, 0x9e, 0xda, 0x5e, 0x8a, 0x9e, 0x0a, 0x14, 0x4e, 0x51, 0xf4, 0xcf, + 0x28, 0xe6, 0x07, 0xb9, 0xfc, 0xa9, 0xe5, 0xca, 0x2b, 0x14, 0xed, 0x8d, 0x33, 0xf3, 0xde, 0x67, + 0xde, 0xcc, 0x7b, 0xf3, 0xde, 0x9b, 0x37, 0x84, 0x9b, 0x47, 0xf7, 0x82, 0xb6, 0xed, 0xad, 0x60, + 0xdf, 0x5e, 0x21, 0x27, 0x8c, 0xb8, 0x81, 0xed, 0xb9, 0xc1, 0xca, 0xf1, 0xed, 0x7d, 0xc2, 0xf0, + 0xed, 0x95, 0x1e, 0x71, 0x09, 0xc5, 0x8c, 0x58, 0x6d, 0x9f, 0x7a, 0xcc, 0x43, 0xaf, 0x4a, 0xf2, + 0x36, 0xf6, 0xed, 0xf6, 0x90, 0xbc, 0xad, 0xc8, 0x97, 0xf4, 0x04, 0x9a, 0xe9, 0x51, 0xb2, 0x72, + 0x9c, 0x83, 0x58, 0x7a, 0x2d, 0x41, 0xe3, 0x7b, 0x8e, 0x6d, 0x0e, 0xca, 0x66, 0x5b, 0x7a, 0x73, + 0x48, 0xda, 0xc7, 0xe6, 0xa1, 0xed, 0x12, 0x3a, 0x58, 0xf1, 0x8f, 0x7a, 0x82, 0x97, 0x92, 0xc0, + 0x0b, 0xa9, 0x49, 0xc6, 0xe2, 0x0a, 0x56, 0xfa, 0x84, 0xe1, 0x22, 0xb1, 0x56, 0xca, 0xb8, 0x68, + 0xe8, 0x32, 0xbb, 0x9f, 0x9f, 0xe6, 0xee, 0x28, 0x86, 0xc0, 0x3c, 0x24, 0x7d, 0x9c, 0xe3, 0x7b, + 0xa3, 0x8c, 0x2f, 0x64, 0xb6, 0xb3, 0x62, 0xbb, 0x2c, 0x60, 0x34, 0xcb, 0xa4, 0x7f, 0x07, 0x2e, + 0x3e, 0x70, 0x1c, 0xef, 0x13, 0x62, 0xad, 0x3b, 0xe4, 0xe4, 0x43, 0xcf, 0x09, 0xfb, 0x04, 0x5d, + 0x86, 0x19, 0x8b, 0xda, 0xc7, 0x84, 0xb6, 0xb4, 0xab, 0xda, 0xf5, 0x59, 0x43, 0xb5, 0xf4, 0xdb, + 0x70, 0x41, 0x11, 0x3f, 0xf2, 0x02, 0xb6, 0x83, 0xd9, 0x21, 0x5a, 0x06, 0xf0, 0x31, 0x3b, 0xdc, + 0xa1, 0xe4, 0xc0, 0x3e, 0x51, 0xe4, 0x89, 0x1e, 0x3d, 0x84, 0x57, 0xd6, 0xc2, 0x80, 0x79, 0xfd, + 0x27, 0x84, 0x51, 0xdb, 0x5c, 0x0b, 0x29, 0x25, 0x2e, 0xeb, 0x32, 0xcc, 0xc2, 0x00, 0x21, 0x98, + 0x72, 0x71, 0x9f, 0x28, 0x36, 0xf1, 0x8d, 0x3a, 0x30, 0x7d, 0x8c, 0x9d, 0x90, 0xb4, 0x6a, 0x57, + 0xb5, 0xeb, 0x73, 0x77, 0xda, 0xed, 0xa1, 0x61, 0xc4, 0xab, 0x6a, 0xfb, 0x47, 0x3d, 0x61, 0x29, + 0x91, 0xaa, 0xda, 0x1f, 0x84, 0xd8, 0x65, 0x36, 0x1b, 0x18, 0x92, 0x59, 0xf7, 0xe0, 0xd5, 0xd2, + 0x69, 0xb7, 0xec, 0x80, 0xa1, 0xa7, 0x30, 0x6d, 0x33, 0xd2, 0x0f, 0x5a, 0xda, 0xd5, 0xfa, 0xf5, + 0xb9, 0x3b, 0xf7, 0xda, 0xa7, 0xda, 0x5f, 0xbb, 0x14, 0xcc, 0x90, 0x30, 0xba, 0x0b, 0x28, 0x49, + 0xb3, 0x8b, 0x69, 0x8f, 0xb0, 0x73, 0x5c, 0x20, 0x86, 0xcb, 0xf9, 0xf9, 0xc4, 0xca, 0x36, 0xd2, + 0x2b, 0xbb, 0x3d, 0xc6, 0xca, 0x24, 0x4a, 0xb4, 0xa4, 0xaf, 0x34, 0x98, 0xed, 0x60, 0xd2, 0xf7, + 0xdc, 0x2e, 0x61, 0x68, 0x0b, 0x9a, 0xdc, 0xc2, 0x2d, 0xcc, 0xb0, 0x58, 0xce, 0xdc, 0x9d, 0x5b, + 0xa7, 0x49, 0x1e, 0xb4, 0x39, 0x75, 0xfb, 0xf8, 0x76, 0x7b, 0x7b, 0xff, 0x87, 0xc4, 0x64, 0x4f, + 0x08, 0xc3, 0x46, 0x8c, 0x80, 0xee, 0xc3, 0x54, 0xe0, 0x13, 0x53, 0xed, 0xc1, 0x8d, 0x11, 0x32, + 0xc6, 0x52, 0x74, 0x7d, 0x62, 0x1a, 0x82, 0x13, 0xad, 0xc3, 0x4c, 0x20, 0x34, 0xd0, 0xaa, 0xe7, + 0xf6, 0xf1, 0x74, 0x0c, 0xa9, 0x37, 0xc5, 0xad, 0xff, 0x51, 0x03, 0x14, 0x8f, 0xad, 0x79, 0xae, + 0x65, 0x33, 0xdb, 0x73, 0xb9, 0xe6, 0xd8, 0xc0, 0x8f, 0x35, 0xc7, 0xbf, 0xf9, 0xb1, 0x50, 0x53, + 0xd6, 0xe4, 0xb1, 0x90, 0x2d, 0xf4, 0x11, 0x20, 0x07, 0x07, 0x6c, 0x97, 0x62, 0x37, 0x10, 0xdc, + 0xbb, 0x76, 0x9f, 0x28, 0xb1, 0x5e, 0xaf, 0xb6, 0x49, 0x9c, 0xc3, 0x28, 0x40, 0xe1, 0x73, 0x52, + 0x82, 0x03, 0xcf, 0x6d, 0x4d, 0xc9, 0x39, 0x65, 0x0b, 0xb5, 0xa0, 0xd1, 0x27, 0x41, 0x80, 0x7b, + 0xa4, 0x35, 0x2d, 0x06, 0xa2, 0xa6, 0xfe, 0x2b, 0x0d, 0xe6, 0xe3, 0x05, 0x09, 0x8b, 0x78, 0x9c, + 0x53, 0x5d, 0xbb, 0x9a, 0x54, 0x9c, 0x3b, 0xa3, 0xb8, 0xf7, 0x22, 0xeb, 0xaa, 0x09, 0xeb, 0xba, + 0x5e, 0x75, 0xd7, 0x23, 0xa3, 0xfa, 0x79, 0x3d, 0x21, 0x1d, 0x57, 0x27, 0xda, 0x86, 0x66, 0x40, + 0x1c, 0x62, 0x32, 0x8f, 0x2a, 0xe9, 0xde, 0xa8, 0x28, 0x1d, 0xde, 0x27, 0x4e, 0x57, 0xb1, 0x1a, + 0x31, 0x08, 0x7a, 0x1f, 0x9a, 0x8c, 0xf4, 0x7d, 0x07, 0xb3, 0xe8, 0x8c, 0x7d, 0x33, 0x29, 0x25, + 0x0f, 0x1f, 0x9c, 0x7d, 0xc7, 0xb3, 0x76, 0x15, 0x99, 0x30, 0xab, 0x98, 0x09, 0x7d, 0x1f, 0x16, + 0x42, 0xdf, 0xe2, 0xfd, 0x8c, 0xbb, 0xca, 0xde, 0x40, 0xe9, 0xf2, 0x6e, 0xd5, 0xc5, 0xee, 0xa5, + 0xb8, 0x8d, 0x0c, 0x1a, 0xba, 0x0e, 0x17, 0xfa, 0xb6, 0x6b, 0x10, 0x6c, 0x0d, 0xba, 0xc4, 0xf4, + 0x5c, 0x2b, 0x10, 0xca, 0x9d, 0x36, 0xb2, 0xdd, 0xa8, 0x0d, 0x28, 0x92, 0x6a, 0x43, 0x3a, 0x6e, + 0xdb, 0x73, 0x85, 0xc2, 0xeb, 0x46, 0xc1, 0x08, 0xba, 0x03, 0x97, 0x28, 0x39, 0xb6, 0xb9, 0x58, + 0x8f, 0xec, 0x80, 0x79, 0x74, 0xb0, 0x65, 0xf7, 0x6d, 0xd6, 0x9a, 0x11, 0xf0, 0x85, 0x63, 0xfa, + 0xcf, 0xa6, 0xe0, 0x42, 0xe6, 0x70, 0xa0, 0xbb, 0x70, 0xd9, 0x94, 0x5e, 0xee, 0x69, 0xd8, 0xdf, + 0x27, 0xb4, 0x6b, 0x1e, 0x12, 0x2b, 0x74, 0x88, 0x25, 0x34, 0x34, 0x6d, 0x94, 0x8c, 0x72, 0x79, + 0x5d, 0xd1, 0xf5, 0xc4, 0x0e, 0x82, 0x98, 0xa7, 0x26, 0x78, 0x0a, 0x46, 0xf8, 0x3c, 0x16, 0x09, + 0x6c, 0x4a, 0xac, 0xec, 0x3c, 0x75, 0x39, 0x4f, 0xf1, 0x28, 0xba, 0x0a, 0x73, 0x12, 0x4d, 0xec, + 0x96, 0xda, 0xbd, 0x64, 0x17, 0x97, 0xc4, 0xdb, 0x0f, 0x08, 0x3d, 0x26, 0x56, 0x7e, 0xe7, 0xf2, + 0x23, 0x5c, 0x12, 0xa9, 0xa5, 0x9c, 0x24, 0x72, 0xef, 0x4a, 0x46, 0xb9, 0x2e, 0xe5, 0xb4, 0x0f, + 0x8e, 0xb1, 0xed, 0xe0, 0x7d, 0x87, 0xb4, 0x1a, 0x52, 0x97, 0x99, 0x6e, 0x74, 0x03, 0x2e, 0xca, + 0xae, 0x3d, 0x17, 0xc7, 0xb4, 0x4d, 0x41, 0x9b, 0x1f, 0x40, 0xd7, 0x60, 0xc1, 0xf4, 0x1c, 0x47, + 0xa8, 0x6b, 0xcd, 0x0b, 0x5d, 0xd6, 0x9a, 0x15, 0xa4, 0x99, 0x5e, 0xf4, 0x01, 0x80, 0x19, 0x39, + 0xad, 0xa0, 0x05, 0x95, 0x5c, 0x7e, 0xde, 0xdd, 0x19, 0x09, 0x10, 0xfd, 0x33, 0x0d, 0xae, 0x94, + 0x98, 0x72, 0xa1, 0x5b, 0x7c, 0x06, 0xf3, 0x94, 0x0b, 0xe5, 0xf6, 0x24, 0xb1, 0x3a, 0x74, 0x6f, + 0x8d, 0x90, 0xc2, 0x48, 0xf2, 0x0c, 0xfd, 0x44, 0x1a, 0x4b, 0xff, 0xa7, 0x06, 0xd0, 0x21, 0xbe, + 0xe3, 0x0d, 0xfa, 0xc4, 0x9d, 0x74, 0x14, 0x7a, 0x90, 0x8a, 0x42, 0x37, 0x47, 0x6d, 0x5b, 0x2c, + 0x46, 0x22, 0x0c, 0x6d, 0x64, 0xc2, 0xd0, 0x4a, 0x75, 0x90, 0x74, 0x1c, 0xfa, 0xa2, 0x06, 0x2f, + 0x0f, 0x07, 0xcf, 0x16, 0x88, 0xc6, 0x0e, 0x16, 0xc8, 0x80, 0x05, 0x1e, 0x74, 0xe4, 0x66, 0x8b, + 0xb0, 0x35, 0x33, 0x76, 0xd8, 0xca, 0x20, 0x94, 0x84, 0xc3, 0xc6, 0x24, 0xc2, 0xa1, 0xfe, 0xb9, + 0x06, 0x0b, 0xc3, 0x5d, 0x9a, 0x78, 0x74, 0x7b, 0x3f, 0x1d, 0xdd, 0x5e, 0xab, 0xac, 0xcc, 0x28, + 0xbc, 0xfd, 0xb6, 0x06, 0x28, 0xd1, 0xeb, 0x39, 0xce, 0x3e, 0x36, 0x8f, 0x0a, 0xf3, 0xc0, 0x01, + 0x20, 0xe5, 0x53, 0x1e, 0xb8, 0xae, 0xc7, 0xb0, 0x3c, 0xc1, 0x72, 0xe2, 0xcd, 0xea, 0x13, 0xab, + 0x29, 0xda, 0x7b, 0x39, 0xac, 0x87, 0x2e, 0xa3, 0x03, 0xa3, 0x60, 0x12, 0xf4, 0x04, 0x80, 0x2a, + 0xbe, 0x5d, 0x4f, 0x19, 0xee, 0xcd, 0x0a, 0xc7, 0x95, 0x33, 0xac, 0x79, 0xee, 0x81, 0xdd, 0x33, + 0x12, 0x00, 0x4b, 0x0f, 0xe1, 0x4a, 0xc9, 0xec, 0x68, 0x11, 0xea, 0x47, 0x64, 0xa0, 0xd6, 0xcd, + 0x3f, 0xd1, 0xa5, 0x64, 0xfa, 0x3b, 0xab, 0xd2, 0xd9, 0xb7, 0x6b, 0xf7, 0x34, 0xfd, 0xa7, 0x53, + 0x49, 0xdd, 0x8a, 0xdc, 0x60, 0x09, 0x9a, 0x94, 0xf8, 0x8e, 0x6d, 0xe2, 0x40, 0x45, 0x9e, 0xb8, + 0x9d, 0xca, 0x1b, 0x6a, 0x93, 0xce, 0x1b, 0xea, 0x67, 0xc9, 0x1b, 0x9e, 0x40, 0x33, 0x88, 0x32, + 0x86, 0x29, 0x01, 0x70, 0x7b, 0x0c, 0x6f, 0xa0, 0x92, 0x85, 0x18, 0xa2, 0x28, 0x4d, 0x98, 0x2e, + 0x4e, 0x13, 0xce, 0x10, 0xf6, 0xb9, 0xaf, 0xf0, 0x71, 0x18, 0x10, 0x4b, 0x9c, 0xcc, 0xa6, 0xa1, + 0x5a, 0x19, 0xdb, 0x68, 0xbe, 0xa0, 0x6d, 0xa0, 0x7b, 0x70, 0xc5, 0xa7, 0x5e, 0x8f, 0x92, 0x20, + 0xe8, 0x10, 0x6c, 0x39, 0xb6, 0x4b, 0xa2, 0xc5, 0xc8, 0x80, 0x56, 0x36, 0xac, 0x7f, 0x56, 0x87, + 0xc5, 0xac, 0xb7, 0x2c, 0x09, 0xeb, 0x5a, 0x69, 0x58, 0x4f, 0x1a, 0x50, 0x2d, 0x63, 0x40, 0xd7, + 0xe1, 0x82, 0x3a, 0x1b, 0x46, 0x44, 0x22, 0xb3, 0x8e, 0x6c, 0x37, 0x0f, 0xdd, 0x71, 0x64, 0x8e, + 0x69, 0x65, 0xd2, 0x91, 0x1f, 0x40, 0xb7, 0xe0, 0xe5, 0xd0, 0xcd, 0xd3, 0x4b, 0xdd, 0x15, 0x0d, + 0x21, 0x23, 0x15, 0xc4, 0x67, 0x84, 0x0b, 0xb8, 0x53, 0xd9, 0x74, 0x0a, 0xa3, 0x38, 0xfa, 0x16, + 0xcc, 0x53, 0x6e, 0x23, 0xf1, 0xfc, 0x32, 0x2d, 0x49, 0x77, 0x16, 0xa4, 0x19, 0xcd, 0xa2, 0x34, + 0x43, 0xff, 0x54, 0x4b, 0xfa, 0xb5, 0x53, 0xd3, 0x81, 0x8f, 0x8b, 0xd3, 0x81, 0xbb, 0x63, 0xa5, + 0x03, 0x43, 0xff, 0x96, 0xc9, 0x07, 0x1c, 0xb8, 0xbc, 0xde, 0xdd, 0xa0, 0x5e, 0xe8, 0x47, 0x42, + 0x6c, 0xfb, 0x72, 0xc1, 0x08, 0xa6, 0x68, 0xe8, 0xc4, 0xb2, 0xf0, 0x6f, 0xf4, 0x1e, 0xcc, 0x50, + 0xec, 0xf6, 0x48, 0xe4, 0x57, 0xaf, 0x8d, 0x10, 0x62, 0xb3, 0x63, 0x70, 0x72, 0x43, 0x71, 0xe9, + 0x2e, 0x5c, 0x78, 0xb4, 0xbb, 0xbb, 0xb3, 0xe9, 0x0a, 0x0b, 0x15, 0x05, 0x0f, 0x04, 0x53, 0x3e, + 0x66, 0x87, 0xd1, 0x34, 0xfc, 0x1b, 0x6d, 0x40, 0x83, 0x9b, 0x3b, 0x71, 0xad, 0x8a, 0xa9, 0x84, + 0x02, 0x5c, 0x95, 0x4c, 0x46, 0xc4, 0xad, 0x7f, 0x0c, 0x97, 0x12, 0xf3, 0x19, 0xa1, 0x43, 0x3e, + 0xe4, 0xee, 0x11, 0x75, 0x60, 0x9a, 0x4f, 0x14, 0xdd, 0xe9, 0x47, 0xdd, 0x75, 0x33, 0x32, 0x1b, + 0x92, 0x59, 0x7f, 0x03, 0xe6, 0x45, 0xdd, 0xc6, 0xa3, 0x4c, 0x2c, 0x93, 0x7b, 0xe7, 0xbe, 0xed, + 0x2a, 0xcf, 0xca, 0x3f, 0x45, 0x0f, 0x3e, 0x51, 0x47, 0x85, 0x7f, 0xea, 0x37, 0xa1, 0xa1, 0x76, + 0x25, 0x49, 0x5e, 0xcf, 0x91, 0xd7, 0x25, 0xf9, 0x5b, 0xd0, 0xd8, 0xdc, 0x59, 0x75, 0x3c, 0x19, + 0xf4, 0x4c, 0xdb, 0x8a, 0x6a, 0x48, 0xe2, 0x9b, 0x7b, 0x1d, 0x72, 0x62, 0x12, 0x9f, 0x09, 0x85, + 0xcc, 0x1a, 0xaa, 0xa5, 0xff, 0x4d, 0x83, 0x86, 0x92, 0x78, 0xc2, 0x39, 0xde, 0x7b, 0xa9, 0x1c, + 0xef, 0xf5, 0x6a, 0x8a, 0x49, 0x24, 0x78, 0x9d, 0x4c, 0x82, 0x77, 0xa3, 0x22, 0x42, 0x3a, 0xbb, + 0xfb, 0x54, 0x83, 0x85, 0xb4, 0xd2, 0xf9, 0x1d, 0x86, 0xbb, 0x2b, 0xdb, 0x24, 0x4f, 0x87, 0xa9, + 0x41, 0xb2, 0x0b, 0x19, 0x31, 0x05, 0x57, 0x99, 0x5a, 0x41, 0xf9, 0x5e, 0x84, 0xcc, 0x76, 0xda, + 0xb2, 0xcc, 0xd7, 0xde, 0x74, 0xd9, 0x36, 0xed, 0x32, 0x6a, 0xbb, 0x3d, 0x23, 0x09, 0xa2, 0xff, + 0x42, 0x83, 0x39, 0x25, 0xc8, 0xc4, 0xb3, 0xa7, 0x77, 0xd3, 0xd9, 0xd3, 0xb5, 0x6a, 0x3b, 0x15, + 0xa5, 0x4e, 0x3f, 0x8e, 0x05, 0xe3, 0x76, 0xcf, 0xad, 0xe7, 0xd0, 0x0b, 0x58, 0x64, 0x3d, 0xfc, + 0x1b, 0x3d, 0x83, 0x45, 0x3b, 0x73, 0x34, 0xd4, 0xae, 0xac, 0x54, 0x9c, 0x2b, 0x62, 0x33, 0x72, + 0x40, 0xfa, 0x33, 0x58, 0xcc, 0x9d, 0xbb, 0x0d, 0x98, 0x3a, 0x64, 0xcc, 0x2f, 0xa8, 0x4b, 0x8c, + 0x38, 0x76, 0xc3, 0x89, 0x04, 0x80, 0xfe, 0x97, 0xe1, 0xb6, 0x77, 0xe5, 0xb5, 0x21, 0xf6, 0x18, + 0xda, 0x8b, 0x78, 0x0c, 0xf4, 0x0e, 0xd4, 0x99, 0x53, 0x35, 0x5f, 0x55, 0x20, 0xbb, 0x5b, 0x5d, + 0x83, 0x73, 0xa1, 0xfb, 0x30, 0xcd, 0xdd, 0x24, 0x37, 0xed, 0x7a, 0xf5, 0xc3, 0xc1, 0xd7, 0x66, + 0x48, 0x46, 0xfd, 0x19, 0xcc, 0xa7, 0x0c, 0x1e, 0x3d, 0x86, 0x97, 0x1c, 0x0f, 0x5b, 0xab, 0xd8, + 0xc1, 0xae, 0x49, 0xa2, 0x8a, 0xce, 0xb5, 0xa2, 0x44, 0x6a, 0x2b, 0x41, 0xa7, 0x8e, 0x4b, 0x8a, + 0x57, 0x5f, 0x05, 0x18, 0x4a, 0xcc, 0x13, 0x47, 0x6e, 0x04, 0xd2, 0x07, 0xce, 0x1a, 0xb2, 0x81, + 0x96, 0x01, 0x02, 0x62, 0x52, 0xc2, 0xc4, 0x21, 0x92, 0x39, 0x65, 0xa2, 0x47, 0xff, 0x42, 0x83, + 0xf9, 0xa7, 0x84, 0x7d, 0xe2, 0xd1, 0xa3, 0x1d, 0xf1, 0x26, 0x30, 0x61, 0xf7, 0xd2, 0x49, 0xb9, + 0x97, 0x5b, 0x23, 0x76, 0x30, 0x25, 0xc9, 0xd0, 0xc9, 0x70, 0x29, 0xaf, 0xa4, 0xc6, 0x1e, 0x0e, + 0x0f, 0xc2, 0x3a, 0x4c, 0xfb, 0x1e, 0x65, 0x91, 0xef, 0x1f, 0x6b, 0x0a, 0xe1, 0xea, 0x25, 0x3b, + 0xba, 0x0f, 0x35, 0xe6, 0x29, 0x43, 0x19, 0x0f, 0x84, 0x10, 0x6a, 0xd4, 0x98, 0xa7, 0xff, 0x5a, + 0x83, 0x56, 0x6a, 0x24, 0x79, 0x5e, 0x27, 0x25, 0x66, 0x07, 0xa6, 0x0e, 0xa8, 0xd7, 0x3f, 0xb3, + 0xa0, 0x82, 0x9b, 0x6f, 0xe8, 0xc5, 0xd4, 0xd8, 0xc4, 0x9d, 0xdd, 0x6a, 0xda, 0xd9, 0xdd, 0x18, + 0x47, 0xd0, 0xb8, 0x18, 0x5a, 0xcb, 0x48, 0xc9, 0x57, 0x80, 0xf6, 0x60, 0xce, 0xf7, 0xac, 0xee, + 0x04, 0x6a, 0xa2, 0x49, 0x1c, 0x84, 0xe1, 0x22, 0xbf, 0x77, 0x06, 0x3e, 0x36, 0x49, 0x77, 0x02, + 0x17, 0xa7, 0x3c, 0x1a, 0xba, 0x0f, 0x0d, 0xdb, 0x17, 0xc1, 0x5f, 0x05, 0xcb, 0x91, 0x21, 0x40, + 0xa6, 0x0a, 0x46, 0xc4, 0xa6, 0x87, 0xd9, 0x0d, 0xf1, 0x28, 0xe3, 0x49, 0xbc, 0x78, 0xac, 0x32, + 0x3d, 0x47, 0x85, 0x83, 0xb8, 0xcd, 0xcd, 0xc5, 0x7f, 0x91, 0xe0, 0x28, 0xb8, 0xf5, 0xdf, 0x64, + 0x15, 0x21, 0x9c, 0xf4, 0x39, 0x29, 0xe2, 0x03, 0x68, 0xa8, 0xe0, 0xa3, 0x6c, 0xe7, 0xbb, 0xe3, + 0xd8, 0x4e, 0xd2, 0x09, 0x47, 0x38, 0xe8, 0x29, 0xcc, 0x10, 0x89, 0x28, 0x3d, 0xf9, 0xdd, 0x71, + 0x10, 0x87, 0xbe, 0xc6, 0x50, 0x28, 0x3c, 0x37, 0x91, 0x2f, 0xa8, 0xbb, 0x03, 0x9f, 0xf0, 0xab, + 0x0e, 0xf7, 0xb8, 0xc9, 0x2e, 0xfd, 0x4b, 0x0d, 0x2e, 0xee, 0xf0, 0x45, 0x99, 0x21, 0xb5, 0xd9, + 0xe0, 0x5c, 0x7c, 0xeb, 0xa3, 0x94, 0x6f, 0x7d, 0x73, 0xc4, 0x9a, 0x72, 0xd2, 0x24, 0xfc, 0xeb, + 0x97, 0x1a, 0xfc, 0x7f, 0x6e, 0x7c, 0xe2, 0x2e, 0x61, 0x3d, 0xed, 0x12, 0x6e, 0x8d, 0x2b, 0x70, + 0xe4, 0x16, 0x7e, 0x32, 0x5b, 0x20, 0xad, 0xb0, 0xc8, 0x65, 0x00, 0x9f, 0xda, 0xc7, 0xb6, 0x43, + 0x7a, 0xaa, 0x16, 0xdf, 0x34, 0x12, 0x3d, 0xb2, 0x9e, 0x7e, 0x80, 0x43, 0x87, 0x3d, 0xb0, 0xac, + 0x35, 0xec, 0xe3, 0x7d, 0xdb, 0xb1, 0x99, 0xad, 0xee, 0x3f, 0xb3, 0x46, 0xc9, 0x28, 0x7a, 0x1b, + 0x5a, 0x94, 0xfc, 0x28, 0xb4, 0x29, 0xb1, 0x3a, 0xd4, 0xf3, 0x53, 0x9c, 0x75, 0xc1, 0x59, 0x3a, + 0xce, 0xaf, 0xbb, 0x58, 0x3e, 0x0a, 0xa7, 0xd8, 0xa4, 0xcd, 0x14, 0x0d, 0xa1, 0x16, 0x34, 0x8e, + 0xc5, 0x43, 0x33, 0xbf, 0x14, 0x73, 0xaa, 0xa8, 0xc9, 0xed, 0x8e, 0x87, 0x75, 0x65, 0x9e, 0xa2, + 0x7e, 0xd1, 0x34, 0x92, 0x5d, 0xe8, 0x31, 0xcc, 0x1e, 0xaa, 0x3b, 0x0c, 0xbf, 0xd2, 0x56, 0x71, + 0xbd, 0xa9, 0x3b, 0x8f, 0x31, 0x64, 0xe7, 0x72, 0x88, 0xc6, 0x66, 0x47, 0xdc, 0x7a, 0x9b, 0x46, + 0xd4, 0x8c, 0x46, 0x36, 0x77, 0xd6, 0x44, 0x95, 0x42, 0x8d, 0x6c, 0xee, 0xac, 0xa1, 0x6d, 0x68, + 0x04, 0x64, 0xcb, 0x76, 0xc3, 0x93, 0x16, 0x54, 0x2a, 0x73, 0x77, 0x1f, 0x0a, 0xea, 0xcc, 0x6d, + 0xd5, 0x88, 0x50, 0xd0, 0x1e, 0xcc, 0xd2, 0xd0, 0x7d, 0x10, 0xec, 0x05, 0x84, 0xb6, 0xe6, 0x04, + 0xe4, 0x28, 0x7f, 0x60, 0x44, 0xf4, 0x59, 0xd0, 0x21, 0x12, 0xf2, 0x01, 0x05, 0xa1, 0xef, 0x3b, + 0x84, 0x5f, 0xa2, 0xb1, 0x23, 0x6e, 0xcc, 0x41, 0xeb, 0x25, 0x81, 0x7f, 0x7f, 0x94, 0xc8, 0x39, + 0xc6, 0xec, 0x44, 0x05, 0xd8, 0x7c, 0x67, 0x0e, 0x02, 0xf1, 0xdd, 0x9a, 0xaf, 0xb4, 0x33, 0xc5, + 0xf7, 0x78, 0x23, 0x42, 0xe1, 0xc6, 0x4c, 0x09, 0xb6, 0xb6, 0x5d, 0x67, 0x60, 0x78, 0x1e, 0x5b, + 0xb7, 0x1d, 0x12, 0x0c, 0x02, 0x46, 0xfa, 0xad, 0x05, 0xa1, 0x93, 0x92, 0x51, 0xf4, 0x08, 0xbe, + 0x1e, 0x99, 0x39, 0x37, 0xbe, 0x9d, 0xe8, 0x78, 0x3c, 0x0c, 0x4c, 0xec, 0xc8, 0x82, 0xd1, 0x05, + 0x01, 0x30, 0x8a, 0x8c, 0x1f, 0x0b, 0x5c, 0x06, 0xb1, 0x28, 0x20, 0x4a, 0xc7, 0xd1, 0x47, 0xb0, + 0x88, 0xd3, 0xff, 0x4a, 0x04, 0xad, 0x8b, 0x95, 0x6e, 0xef, 0x99, 0x5f, 0x2c, 0x8c, 0x1c, 0x0e, + 0xfa, 0x01, 0x20, 0x9c, 0xfd, 0x69, 0x23, 0x68, 0xa1, 0x4a, 0x5e, 0x27, 0xf7, 0xb7, 0x87, 0x51, + 0x80, 0x25, 0x9e, 0x5d, 0x54, 0x91, 0x68, 0xf2, 0x8f, 0xff, 0xe3, 0x3d, 0xbb, 0x0c, 0xc5, 0x78, + 0x81, 0x67, 0x97, 0x04, 0x48, 0xfa, 0x62, 0xfe, 0x27, 0x0d, 0x5e, 0x1e, 0x0e, 0xfe, 0x2f, 0xbc, + 0xff, 0x7f, 0xae, 0xc1, 0xc2, 0x70, 0x45, 0xff, 0xe9, 0x27, 0x92, 0xa1, 0x24, 0x51, 0x74, 0xfb, + 0x57, 0x4a, 0xbe, 0xff, 0xc2, 0x32, 0x7f, 0xe5, 0xe7, 0x7b, 0xfd, 0x77, 0x35, 0x58, 0xcc, 0x5a, + 0xde, 0xa9, 0x8b, 0xbd, 0x03, 0x97, 0x0e, 0x42, 0xc7, 0x19, 0x08, 0xd9, 0x13, 0x75, 0x69, 0x59, + 0x8f, 0x2b, 0x1c, 0x2b, 0x29, 0x89, 0xd7, 0x4b, 0x4b, 0xe2, 0xb9, 0xc2, 0xf0, 0x54, 0x51, 0x61, + 0xb8, 0xb0, 0xe4, 0x3d, 0x5d, 0x56, 0xf2, 0x3e, 0x4b, 0x01, 0xbb, 0xe0, 0xd4, 0xa5, 0x9e, 0xa1, + 0xbf, 0x06, 0x4b, 0x8a, 0x84, 0x89, 0x32, 0xb4, 0xcb, 0xa8, 0xe7, 0x38, 0x84, 0x76, 0xc2, 0x7e, + 0x7f, 0xa0, 0xdf, 0x80, 0x85, 0xf4, 0xab, 0x83, 0xdc, 0x57, 0xf9, 0xd0, 0xa1, 0x4a, 0x94, 0x71, + 0x5b, 0xa7, 0x70, 0xb9, 0xf8, 0xb9, 0x19, 0x7d, 0x0f, 0x16, 0xfa, 0xf8, 0x24, 0xf9, 0x24, 0xaf, + 0x9d, 0xf1, 0x26, 0x91, 0xc1, 0xd1, 0x7f, 0xaf, 0xc1, 0x95, 0x92, 0xa2, 0xf6, 0xf9, 0xcd, 0x2a, + 0x3c, 0x35, 0x3e, 0xe9, 0x86, 0xb4, 0x47, 0xce, 0x7c, 0x27, 0x8a, 0x11, 0x74, 0x17, 0x5a, 0x65, + 0xc9, 0xc6, 0xb9, 0xd4, 0xdb, 0x4f, 0xe0, 0x72, 0x71, 0xbe, 0x54, 0x38, 0xdb, 0x63, 0x58, 0x50, + 0x59, 0x94, 0xa2, 0x52, 0x2b, 0xd6, 0x8b, 0xce, 0xb3, 0xc2, 0x8d, 0xb2, 0x8c, 0x0c, 0xa7, 0xfe, + 0x57, 0x0d, 0xa6, 0xbb, 0x26, 0x56, 0x3b, 0x38, 0xb9, 0x58, 0xf7, 0x6e, 0x2a, 0xd6, 0x8d, 0xfa, + 0x5d, 0x4a, 0x48, 0x90, 0x08, 0x73, 0xab, 0x99, 0x30, 0xf7, 0x7a, 0x25, 0xfe, 0x74, 0x84, 0xfb, + 0x36, 0xcc, 0xc6, 0xb0, 0xa7, 0x39, 0x1f, 0xfd, 0x1f, 0x1a, 0xcc, 0x25, 0x00, 0x4e, 0x75, 0x54, + 0xbb, 0x29, 0xaf, 0x5c, 0xe5, 0x0f, 0xca, 0x04, 0x72, 0x3b, 0xf2, 0xca, 0xf2, 0x85, 0x7a, 0xe8, + 0x9a, 0xaf, 0xc1, 0x02, 0x13, 0xbf, 0x20, 0xc6, 0x77, 0xee, 0xba, 0x50, 0x77, 0xa6, 0x77, 0xe9, + 0x1d, 0x98, 0x4f, 0x41, 0x8c, 0xf5, 0xcc, 0xfc, 0x09, 0x7c, 0x63, 0x64, 0x82, 0x7b, 0x1e, 0xc6, + 0xbd, 0xfa, 0xca, 0x1f, 0x9e, 0x2f, 0x6b, 0x7f, 0x7e, 0xbe, 0xac, 0xfd, 0xfd, 0xf9, 0xb2, 0xf6, + 0xcb, 0xaf, 0x96, 0xff, 0xef, 0xa3, 0x86, 0x22, 0xfd, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x08, + 0x49, 0xd4, 0xa8, 0x2d, 0x2d, 0x00, 0x00, } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/extensions/v1beta1/register.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/extensions/v1beta1/register.go new file mode 100644 index 00000000..8f98d5d1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/extensions/v1beta1/register.go @@ -0,0 +1,19 @@ +package v1beta1 + +import "github.com/ericchiang/k8s" + +func init() { + k8s.Register("extensions", "v1beta1", "daemonsets", true, &DaemonSet{}) + k8s.Register("extensions", "v1beta1", "deployments", true, &Deployment{}) + k8s.Register("extensions", "v1beta1", "ingresses", true, &Ingress{}) + k8s.Register("extensions", "v1beta1", "networkpolicies", true, &NetworkPolicy{}) + k8s.Register("extensions", "v1beta1", "podsecuritypolicies", false, &PodSecurityPolicy{}) + k8s.Register("extensions", "v1beta1", "replicasets", true, &ReplicaSet{}) + + k8s.RegisterList("extensions", "v1beta1", "daemonsets", true, &DaemonSetList{}) + k8s.RegisterList("extensions", "v1beta1", "deployments", true, &DeploymentList{}) + k8s.RegisterList("extensions", "v1beta1", "ingresses", true, &IngressList{}) + k8s.RegisterList("extensions", "v1beta1", "networkpolicies", true, &NetworkPolicyList{}) + k8s.RegisterList("extensions", "v1beta1", "podsecuritypolicies", false, &PodSecurityPolicyList{}) + k8s.RegisterList("extensions", "v1beta1", "replicasets", true, &ReplicaSetList{}) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/imagepolicy/v1alpha1/generated.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/imagepolicy/v1alpha1/generated.pb.go index b05c1f85..3e75f4c6 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/imagepolicy/v1alpha1/generated.pb.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/imagepolicy/v1alpha1/generated.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/generated.proto -// DO NOT EDIT! +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/imagepolicy/v1alpha1/generated.proto /* Package v1alpha1 is a generated protocol buffer package. It is generated from these files: - k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/generated.proto + k8s.io/api/imagepolicy/v1alpha1/generated.proto It has these top-level messages: ImageReview @@ -19,11 +18,10 @@ package v1alpha1 import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import k8s_io_kubernetes_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" +import k8s_io_apimachinery_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" import _ "github.com/ericchiang/k8s/runtime" import _ "github.com/ericchiang/k8s/runtime/schema" import _ "github.com/ericchiang/k8s/util/intstr" -import _ "github.com/ericchiang/k8s/api/v1" import io "io" @@ -41,7 +39,7 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // ImageReview checks if the set of images in a pod are allowed. type ImageReview struct { // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Spec holds information about the pod being evaluated Spec *ImageReviewSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` // Status is filled in by the backend and indicates whether the pod should be allowed. @@ -55,7 +53,7 @@ func (m *ImageReview) String() string { return proto.CompactTextStrin func (*ImageReview) ProtoMessage() {} func (*ImageReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } -func (m *ImageReview) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *ImageReview) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -172,10 +170,10 @@ func (m *ImageReviewStatus) GetReason() string { } func init() { - proto.RegisterType((*ImageReview)(nil), "github.com/ericchiang.k8s.apis.imagepolicy.v1alpha1.ImageReview") - proto.RegisterType((*ImageReviewContainerSpec)(nil), "github.com/ericchiang.k8s.apis.imagepolicy.v1alpha1.ImageReviewContainerSpec") - proto.RegisterType((*ImageReviewSpec)(nil), "github.com/ericchiang.k8s.apis.imagepolicy.v1alpha1.ImageReviewSpec") - proto.RegisterType((*ImageReviewStatus)(nil), "github.com/ericchiang.k8s.apis.imagepolicy.v1alpha1.ImageReviewStatus") + proto.RegisterType((*ImageReview)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReview") + proto.RegisterType((*ImageReviewContainerSpec)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewContainerSpec") + proto.RegisterType((*ImageReviewSpec)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewSpec") + proto.RegisterType((*ImageReviewStatus)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewStatus") } func (m *ImageReview) Marshal() (dAtA []byte, err error) { size := m.Size() @@ -348,24 +346,6 @@ func (m *ImageReviewStatus) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -521,7 +501,7 @@ func (m *ImageReview) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -782,51 +762,14 @@ func (m *ImageReviewSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Annotations == nil { m.Annotations = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -836,41 +779,80 @@ func (m *ImageReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Annotations[mapkey] = mapvalue - } else { - var mapvalue string - m.Annotations[mapkey] = mapvalue } + m.Annotations[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -1132,38 +1114,37 @@ var ( ) func init() { - proto.RegisterFile("github.com/ericchiang/k8s/apis/imagepolicy/v1alpha1/generated.proto", fileDescriptorGenerated) + proto.RegisterFile("k8s.io/api/imagepolicy/v1alpha1/generated.proto", fileDescriptorGenerated) } var fileDescriptorGenerated = []byte{ - // 455 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x92, 0xcf, 0x6e, 0xd3, 0x40, - 0x10, 0xc6, 0xb1, 0x03, 0x25, 0xd9, 0x1c, 0x28, 0x2b, 0x84, 0xac, 0x08, 0x45, 0x55, 0x4e, 0x3d, - 0xc0, 0x9a, 0x44, 0x1c, 0x2a, 0x0e, 0xfc, 0x29, 0xea, 0xa1, 0x48, 0x08, 0xb1, 0x70, 0xea, 0x6d, - 0xea, 0x8c, 0xd2, 0xc5, 0xf6, 0xee, 0xca, 0x3b, 0x76, 0x95, 0x67, 0xe0, 0x05, 0x78, 0x24, 0x6e, - 0xf0, 0x08, 0x28, 0xbc, 0x08, 0xf2, 0x26, 0x21, 0xa6, 0x0e, 0x91, 0x50, 0x6e, 0x1e, 0x7b, 0xbe, - 0xdf, 0x37, 0xf3, 0x8d, 0xd9, 0xcb, 0xf4, 0xc4, 0x09, 0x65, 0xe2, 0xb4, 0xbc, 0xc4, 0x42, 0x23, - 0xa1, 0x8b, 0x6d, 0x3a, 0x8b, 0xc1, 0x2a, 0x17, 0xab, 0x1c, 0x66, 0x68, 0x4d, 0xa6, 0x92, 0x79, - 0x5c, 0x8d, 0x21, 0xb3, 0x57, 0x30, 0x8e, 0x67, 0xa8, 0xb1, 0x00, 0xc2, 0xa9, 0xb0, 0x85, 0x21, - 0xc3, 0xe3, 0x25, 0x40, 0x6c, 0x00, 0xc2, 0xa6, 0x33, 0x51, 0x03, 0x44, 0x03, 0x20, 0xd6, 0x80, - 0xc1, 0x64, 0x87, 0x63, 0x8e, 0x04, 0x71, 0xd5, 0x32, 0x19, 0x3c, 0xd9, 0xae, 0x29, 0x4a, 0x4d, - 0x2a, 0xc7, 0x56, 0xfb, 0xb3, 0xdd, 0xed, 0x2e, 0xb9, 0xc2, 0x1c, 0x5a, 0xaa, 0xf1, 0x76, 0x55, - 0x49, 0x2a, 0x8b, 0x95, 0x26, 0x47, 0x45, 0x4b, 0xf2, 0xf8, 0x9f, 0xbb, 0x6c, 0xd9, 0x62, 0xf4, - 0x25, 0x64, 0xfd, 0xf3, 0x3a, 0x12, 0x89, 0x95, 0xc2, 0x6b, 0xfe, 0x96, 0x75, 0xeb, 0x85, 0xa7, - 0x40, 0x10, 0x05, 0x47, 0xc1, 0x71, 0x7f, 0x22, 0xc4, 0x8e, 0x34, 0xeb, 0x5e, 0x51, 0x8d, 0xc5, - 0xfb, 0xcb, 0xcf, 0x98, 0xd0, 0x3b, 0x24, 0x90, 0x7f, 0xf4, 0xfc, 0x13, 0xbb, 0xed, 0x2c, 0x26, - 0x51, 0xe8, 0x39, 0xaf, 0xc4, 0x7f, 0x5e, 0x45, 0x34, 0xe6, 0xfa, 0x68, 0x31, 0x91, 0x9e, 0xc6, - 0x2f, 0xd8, 0x81, 0x23, 0xa0, 0xd2, 0x45, 0x1d, 0xcf, 0x3d, 0xdd, 0x8b, 0xeb, 0x49, 0x72, 0x45, - 0x1c, 0x3d, 0x65, 0x51, 0xe3, 0xe3, 0x1b, 0xa3, 0x09, 0x94, 0xc6, 0xa2, 0x76, 0xe7, 0x0f, 0xd8, - 0x1d, 0x4f, 0xf3, 0xb1, 0xf4, 0xe4, 0xb2, 0x18, 0x7d, 0x0f, 0xd9, 0xbd, 0x1b, 0x73, 0x72, 0xc5, - 0x58, 0xb2, 0x96, 0xba, 0x28, 0x38, 0xea, 0x1c, 0xf7, 0x27, 0xe7, 0xfb, 0x4c, 0xf9, 0xd7, 0x20, - 0xb2, 0x01, 0xe7, 0x8e, 0xf5, 0x41, 0x6b, 0x43, 0x40, 0xca, 0x68, 0x17, 0x85, 0xde, 0xeb, 0xc3, - 0xbe, 0x49, 0x8b, 0xd7, 0x1b, 0xe6, 0x99, 0xa6, 0x62, 0x2e, 0x9b, 0x2e, 0xfc, 0x11, 0xeb, 0x69, - 0xc8, 0xd1, 0x59, 0x48, 0xd0, 0x1f, 0xa1, 0x27, 0x37, 0x2f, 0x06, 0x2f, 0xd8, 0xe1, 0x4d, 0x39, - 0x3f, 0x64, 0x9d, 0x14, 0xe7, 0xab, 0xe4, 0xea, 0xc7, 0x3a, 0xcd, 0x0a, 0xb2, 0x12, 0xfd, 0xcf, - 0xd1, 0x93, 0xcb, 0xe2, 0x79, 0x78, 0x12, 0x8c, 0xce, 0xd8, 0xfd, 0xd6, 0x81, 0x78, 0xc4, 0xee, - 0x42, 0x96, 0x99, 0x6b, 0x9c, 0x7a, 0x48, 0x57, 0xae, 0x4b, 0xfe, 0x90, 0x1d, 0x14, 0x08, 0xce, - 0xe8, 0x15, 0x69, 0x55, 0x9d, 0x0e, 0xbe, 0x2d, 0x86, 0xc1, 0x8f, 0xc5, 0x30, 0xf8, 0xb9, 0x18, - 0x06, 0x5f, 0x7f, 0x0d, 0x6f, 0x5d, 0x74, 0xd7, 0xeb, 0xfe, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x9d, - 0xd8, 0x94, 0x73, 0x61, 0x04, 0x00, 0x00, + // 446 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x91, 0xc1, 0x8e, 0xd3, 0x30, + 0x10, 0x86, 0x49, 0x0b, 0x4b, 0xeb, 0x1e, 0x58, 0x2c, 0x84, 0xa2, 0x0a, 0x95, 0x55, 0x4f, 0x7b, + 0xb2, 0xb7, 0x05, 0xa1, 0x85, 0x03, 0xd2, 0x02, 0x7b, 0x00, 0x81, 0x90, 0xcc, 0x09, 0x6e, 0x83, + 0x3b, 0x6a, 0x4d, 0x13, 0xdb, 0x8a, 0xdd, 0xac, 0xfa, 0x26, 0xbc, 0x05, 0xaf, 0xc1, 0x91, 0x47, + 0x40, 0xe5, 0xcc, 0x3b, 0x20, 0xbb, 0xed, 0x26, 0x34, 0x54, 0xa8, 0xb7, 0xcc, 0xc4, 0xdf, 0xe7, + 0x99, 0xdf, 0x84, 0xcf, 0xcf, 0x1d, 0x53, 0x86, 0x83, 0x55, 0x5c, 0xe5, 0x30, 0x45, 0x6b, 0x32, + 0x25, 0x97, 0xbc, 0x1c, 0x41, 0x66, 0x67, 0x30, 0xe2, 0x53, 0xd4, 0x58, 0x80, 0xc7, 0x09, 0xb3, + 0x85, 0xf1, 0x86, 0x3e, 0x5c, 0x03, 0x0c, 0xac, 0x62, 0x35, 0x80, 0x6d, 0x81, 0xfe, 0xe3, 0xca, + 0x98, 0x83, 0x9c, 0x29, 0x8d, 0xc5, 0x92, 0xdb, 0xf9, 0x34, 0x34, 0x1c, 0xcf, 0xd1, 0x03, 0x2f, + 0x1b, 0xda, 0x3e, 0xdf, 0x47, 0x15, 0x0b, 0xed, 0x55, 0x8e, 0x0d, 0xe0, 0xc9, 0xff, 0x00, 0x27, + 0x67, 0x98, 0x43, 0x83, 0x7b, 0xb4, 0x8f, 0x5b, 0x78, 0x95, 0x71, 0xa5, 0xbd, 0xf3, 0xc5, 0x2e, + 0x34, 0xfc, 0x9d, 0x90, 0xde, 0xeb, 0xb0, 0xac, 0xc0, 0x52, 0xe1, 0x15, 0x7d, 0x4b, 0x3a, 0x61, + 0x91, 0x09, 0x78, 0x48, 0x93, 0x93, 0xe4, 0xb4, 0x37, 0x3e, 0x63, 0x55, 0x2e, 0xd7, 0x5e, 0x66, + 0xe7, 0xd3, 0xd0, 0x70, 0x2c, 0x9c, 0x66, 0xe5, 0x88, 0xbd, 0xff, 0xfc, 0x05, 0xa5, 0x7f, 0x87, + 0x1e, 0xc4, 0xb5, 0x81, 0xbe, 0x22, 0x37, 0x9d, 0x45, 0x99, 0xb6, 0x1a, 0xa6, 0x7f, 0x26, 0xcc, + 0x6a, 0x93, 0x7c, 0xb0, 0x28, 0x45, 0xa4, 0xe9, 0x1b, 0x72, 0xe4, 0x3c, 0xf8, 0x85, 0x4b, 0xdb, + 0xd1, 0x33, 0x3e, 0xc8, 0x13, 0x49, 0xb1, 0x31, 0x0c, 0xcf, 0x48, 0x5a, 0xfb, 0xf9, 0xd2, 0x68, + 0x0f, 0x61, 0xa1, 0x70, 0x1b, 0xbd, 0x47, 0x6e, 0x45, 0x5b, 0x5c, 0xbc, 0x2b, 0xd6, 0xc5, 0xf0, + 0x5b, 0x8b, 0xdc, 0xd9, 0x99, 0x8b, 0x7e, 0x24, 0x44, 0x6e, 0x51, 0x97, 0x26, 0x27, 0xed, 0xd3, + 0xde, 0xf8, 0xe9, 0x21, 0x53, 0xfd, 0x75, 0xb1, 0xa8, 0xc9, 0xa8, 0x24, 0x3d, 0xd0, 0xda, 0x78, + 0xf0, 0xca, 0x68, 0x97, 0xb6, 0xa2, 0xfb, 0xe2, 0xd0, 0xe4, 0xd8, 0x45, 0xe5, 0xb8, 0xd4, 0xbe, + 0x58, 0x8a, 0xba, 0x95, 0x3e, 0x20, 0x5d, 0x0d, 0x39, 0x3a, 0x0b, 0x12, 0x63, 0xa8, 0x5d, 0x51, + 0x35, 0xfa, 0xcf, 0xc9, 0xf1, 0x2e, 0x4e, 0x8f, 0x49, 0x7b, 0x8e, 0xcb, 0x4d, 0x32, 0xe1, 0x33, + 0xa4, 0x55, 0x42, 0xb6, 0xc0, 0xf8, 0xb8, 0x5d, 0xb1, 0x2e, 0x9e, 0xb5, 0xce, 0x93, 0xe1, 0x25, + 0xb9, 0xdb, 0x78, 0x00, 0x9a, 0x92, 0xdb, 0x90, 0x65, 0xe6, 0x0a, 0x27, 0x51, 0xd2, 0x11, 0xdb, + 0x92, 0xde, 0x27, 0x47, 0x05, 0x82, 0x33, 0x7a, 0x63, 0xda, 0x54, 0x2f, 0xfa, 0xdf, 0x57, 0x83, + 0xe4, 0xc7, 0x6a, 0x90, 0xfc, 0x5c, 0x0d, 0x92, 0xaf, 0xbf, 0x06, 0x37, 0x3e, 0x75, 0xb6, 0xeb, + 0xfe, 0x09, 0x00, 0x00, 0xff, 0xff, 0x40, 0x56, 0x67, 0xe0, 0xdd, 0x03, 0x00, 0x00, } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/meta/v1/generated.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/meta/v1/generated.pb.go index 80b896bf..f13ba207 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/meta/v1/generated.pb.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/meta/v1/generated.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/meta/v1/generated.proto -// DO NOT EDIT! +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto /* Package v1 is a generated protocol buffer package. It is generated from these files: - k8s.io/kubernetes/pkg/apis/meta/v1/generated.proto + k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto It has these top-level messages: APIGroup @@ -24,12 +23,17 @@ GroupVersionForDiscovery GroupVersionKind GroupVersionResource + Initializer + Initializers LabelSelector LabelSelectorRequirement + List ListMeta ListOptions + MicroTime ObjectMeta OwnerReference + Patch Preconditions RootPaths ServerAddressByClientCIDR @@ -47,7 +51,7 @@ package v1 import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import k8s_io_kubernetes_pkg_runtime "github.com/ericchiang/k8s/runtime" +import k8s_io_apimachinery_pkg_runtime "github.com/ericchiang/k8s/runtime" import _ "github.com/ericchiang/k8s/runtime/schema" import _ "github.com/ericchiang/k8s/util/intstr" @@ -141,17 +145,29 @@ func (m *APIGroupList) GetGroups() []*APIGroup { // APIResource specifies the name of a resource and whether it is namespaced. type APIResource struct { - // name is the name of the resource. + // name is the plural name of the resource. Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. + // The singularName is more correct for reporting status on a single item and both singular and plural are allowed + // from the kubectl CLI interface. + SingularName *string `protobuf:"bytes,6,opt,name=singularName" json:"singularName,omitempty"` // namespaced indicates if a resource is namespaced or not. Namespaced *bool `protobuf:"varint,2,opt,name=namespaced" json:"namespaced,omitempty"` + // group is the preferred group of the resource. Empty implies the group of the containing resource list. + // For subresources, this may have a different value, for example: Scale". + Group *string `protobuf:"bytes,8,opt,name=group" json:"group,omitempty"` + // version is the preferred version of the resource. Empty implies the version of the containing resource list + // For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)". + Version *string `protobuf:"bytes,9,opt,name=version" json:"version,omitempty"` // kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo') Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"` // verbs is a list of supported kube verbs (this includes get, list, watch, create, // update, patch, delete, deletecollection, and proxy) Verbs *Verbs `protobuf:"bytes,4,opt,name=verbs" json:"verbs,omitempty"` // shortNames is a list of suggested short names of the resource. - ShortNames []string `protobuf:"bytes,5,rep,name=shortNames" json:"shortNames,omitempty"` + ShortNames []string `protobuf:"bytes,5,rep,name=shortNames" json:"shortNames,omitempty"` + // categories is a list of the grouped resources this resource belongs to (e.g. 'all') + Categories []string `protobuf:"bytes,7,rep,name=categories" json:"categories,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -167,6 +183,13 @@ func (m *APIResource) GetName() string { return "" } +func (m *APIResource) GetSingularName() string { + if m != nil && m.SingularName != nil { + return *m.SingularName + } + return "" +} + func (m *APIResource) GetNamespaced() bool { if m != nil && m.Namespaced != nil { return *m.Namespaced @@ -174,6 +197,20 @@ func (m *APIResource) GetNamespaced() bool { return false } +func (m *APIResource) GetGroup() string { + if m != nil && m.Group != nil { + return *m.Group + } + return "" +} + +func (m *APIResource) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + func (m *APIResource) GetKind() string { if m != nil && m.Kind != nil { return *m.Kind @@ -195,6 +232,13 @@ func (m *APIResource) GetShortNames() []string { return nil } +func (m *APIResource) GetCategories() []string { + if m != nil { + return m.Categories + } + return nil +} + // APIResourceList is a list of APIResource, it is used to expose the name of the // resources supported in a specific group and version, and if the resource // is namespaced. @@ -229,6 +273,7 @@ func (m *APIResourceList) GetResources() []*APIResource { // discover the API at /api, which is the root path of the legacy v1 API. // // +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type APIVersions struct { // versions are the api versions that are available. Versions []string `protobuf:"bytes,1,rep,name=versions" json:"versions,omitempty"` @@ -284,6 +329,10 @@ type DeleteOptions struct { // Either this field or OrphanDependents may be set, but not both. // The default policy is decided by the existing finalizer set in the // metadata.finalizers and the resource-specific default policy. + // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - + // allow the garbage collector to delete the dependents in the background; + // 'Foreground' - a cascading policy that deletes all dependents in the + // foreground. // +optional PropagationPolicy *string `protobuf:"bytes,4,opt,name=propagationPolicy" json:"propagationPolicy,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -376,8 +425,11 @@ type GetOptions struct { // - if unset, then the result is returned from remote storage based on quorum-read flag; // - if it's 0, then we simply return what we currently have in cache, no guarantee; // - if set to non zero, then the result is at least as fresh as given rv. - ResourceVersion *string `protobuf:"bytes,1,opt,name=resourceVersion" json:"resourceVersion,omitempty"` - XXX_unrecognized []byte `json:"-"` + ResourceVersion *string `protobuf:"bytes,1,opt,name=resourceVersion" json:"resourceVersion,omitempty"` + // If true, partially initialized resources are included in the response. + // +optional + IncludeUninitialized *bool `protobuf:"varint,2,opt,name=includeUninitialized" json:"includeUninitialized,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *GetOptions) Reset() { *m = GetOptions{} } @@ -392,6 +444,13 @@ func (m *GetOptions) GetResourceVersion() string { return "" } +func (m *GetOptions) GetIncludeUninitialized() bool { + if m != nil && m.IncludeUninitialized != nil { + return *m.IncludeUninitialized + } + return false +} + // GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying // concepts during lookup stages without having partially valid types // @@ -584,6 +643,59 @@ func (m *GroupVersionResource) GetResource() string { return "" } +// Initializer is information about an initializer that has not yet completed. +type Initializer struct { + // name of the process that is responsible for initializing this object. + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Initializer) Reset() { *m = Initializer{} } +func (m *Initializer) String() string { return proto.CompactTextString(m) } +func (*Initializer) ProtoMessage() {} +func (*Initializer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } + +func (m *Initializer) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +// Initializers tracks the progress of initialization. +type Initializers struct { + // Pending is a list of initializers that must execute in order before this object is visible. + // When the last pending initializer is removed, and no failing result is set, the initializers + // struct will be set to nil and the object is considered as initialized and visible to all + // clients. + // +patchMergeKey=name + // +patchStrategy=merge + Pending []*Initializer `protobuf:"bytes,1,rep,name=pending" json:"pending,omitempty"` + // If result is set with the Failure field, the object will be persisted to storage and then deleted, + // ensuring that other clients can observe the deletion. + Result *Status `protobuf:"bytes,2,opt,name=result" json:"result,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Initializers) Reset() { *m = Initializers{} } +func (m *Initializers) String() string { return proto.CompactTextString(m) } +func (*Initializers) ProtoMessage() {} +func (*Initializers) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } + +func (m *Initializers) GetPending() []*Initializer { + if m != nil { + return m.Pending + } + return nil +} + +func (m *Initializers) GetResult() *Status { + if m != nil { + return m.Result + } + return nil +} + // A label selector is a label query over a set of resources. The result of matchLabels and // matchExpressions are ANDed. An empty label selector matches all objects. A null // label selector matches no objects. @@ -602,7 +714,7 @@ type LabelSelector struct { func (m *LabelSelector) Reset() { *m = LabelSelector{} } func (m *LabelSelector) String() string { return proto.CompactTextString(m) } func (*LabelSelector) ProtoMessage() {} -func (*LabelSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (*LabelSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } func (m *LabelSelector) GetMatchLabels() map[string]string { if m != nil { @@ -622,9 +734,11 @@ func (m *LabelSelector) GetMatchExpressions() []*LabelSelectorRequirement { // relates the key and values. type LabelSelectorRequirement struct { // key is the label key that the selector applies to. + // +patchMergeKey=key + // +patchStrategy=merge Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` // operator represents a key's relationship to a set of values. - // Valid operators ard In, NotIn, Exists and DoesNotExist. + // Valid operators are In, NotIn, Exists and DoesNotExist. Operator *string `protobuf:"bytes,2,opt,name=operator" json:"operator,omitempty"` // values is an array of string values. If the operator is In or NotIn, // the values array must be non-empty. If the operator is Exists or DoesNotExist, @@ -639,7 +753,7 @@ func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequireme func (m *LabelSelectorRequirement) String() string { return proto.CompactTextString(m) } func (*LabelSelectorRequirement) ProtoMessage() {} func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{16} + return fileDescriptorGenerated, []int{18} } func (m *LabelSelectorRequirement) GetKey() string { @@ -663,10 +777,40 @@ func (m *LabelSelectorRequirement) GetValues() []string { return nil } +// List holds a list of objects, which may not be known by the server. +type List struct { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + Metadata *ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + // List of objects + Items []*k8s_io_apimachinery_pkg_runtime.RawExtension `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *List) Reset() { *m = List{} } +func (m *List) String() string { return proto.CompactTextString(m) } +func (*List) ProtoMessage() {} +func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } + +func (m *List) GetMetadata() *ListMeta { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *List) GetItems() []*k8s_io_apimachinery_pkg_runtime.RawExtension { + if m != nil { + return m.Items + } + return nil +} + // ListMeta describes metadata that synthetic resources must have, including lists and // various status objects. A resource may have only one of {ObjectMeta, ListMeta}. type ListMeta struct { - // SelfLink is a URL representing this object. + // selfLink is a URL representing this object. // Populated by the system. // Read-only. // +optional @@ -676,16 +820,23 @@ type ListMeta struct { // Value must be treated as opaque by clients and passed unmodified back to the server. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency // +optional - ResourceVersion *string `protobuf:"bytes,2,opt,name=resourceVersion" json:"resourceVersion,omitempty"` + ResourceVersion *string `protobuf:"bytes,2,opt,name=resourceVersion" json:"resourceVersion,omitempty"` + // continue may be set if the user set a limit on the number of items returned, and indicates that + // the server has more data available. The value is opaque and may be used to issue another request + // to the endpoint that served this list to retrieve the next set of available objects. Continuing a + // list may not be possible if the server configuration has changed or more than a few minutes have + // passed. The resourceVersion field returned when using this continue value will be identical to + // the value in the first response. + Continue *string `protobuf:"bytes,3,opt,name=continue" json:"continue,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *ListMeta) Reset() { *m = ListMeta{} } func (m *ListMeta) String() string { return proto.CompactTextString(m) } func (*ListMeta) ProtoMessage() {} -func (*ListMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (*ListMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } func (m *ListMeta) GetSelfLink() string { if m != nil && m.SelfLink != nil { @@ -701,6 +852,13 @@ func (m *ListMeta) GetResourceVersion() string { return "" } +func (m *ListMeta) GetContinue() string { + if m != nil && m.Continue != nil { + return *m.Continue + } + return "" +} + // ListOptions is the query options to a standard REST list call. type ListOptions struct { // A selector to restrict the list of returned objects by their labels. @@ -711,6 +869,9 @@ type ListOptions struct { // Defaults to everything. // +optional FieldSelector *string `protobuf:"bytes,2,opt,name=fieldSelector" json:"fieldSelector,omitempty"` + // If true, partially initialized resources are included in the response. + // +optional + IncludeUninitialized *bool `protobuf:"varint,6,opt,name=includeUninitialized" json:"includeUninitialized,omitempty"` // Watch for changes to the described resources and return them as a stream of // add, update, and remove notifications. Specify resourceVersion. // +optional @@ -725,14 +886,40 @@ type ListOptions struct { ResourceVersion *string `protobuf:"bytes,4,opt,name=resourceVersion" json:"resourceVersion,omitempty"` // Timeout for the list/watch call. // +optional - TimeoutSeconds *int64 `protobuf:"varint,5,opt,name=timeoutSeconds" json:"timeoutSeconds,omitempty"` - XXX_unrecognized []byte `json:"-"` + TimeoutSeconds *int64 `protobuf:"varint,5,opt,name=timeoutSeconds" json:"timeoutSeconds,omitempty"` + // limit is a maximum number of responses to return for a list call. If more items exist, the + // server will set the `continue` field on the list metadata to a value that can be used with the + // same initial query to retrieve the next set of results. Setting a limit may return fewer than + // the requested amount of items (up to zero items) in the event all requested objects are + // filtered out and clients should only use the presence of the continue field to determine whether + // more results are available. Servers may choose not to support the limit argument and will return + // all of the available results. If limit is specified and the continue field is empty, clients may + // assume that no more results are available. This field is not supported if watch is true. + // + // The server guarantees that the objects returned when using continue will be identical to issuing + // a single list call without a limit - that is, no objects created, modified, or deleted after the + // first request is issued will be included in any subsequent continued requests. This is sometimes + // referred to as a consistent snapshot, and ensures that a client that is using limit to receive + // smaller chunks of a very large result can ensure they see all possible objects. If objects are + // updated during a chunked list the version of the object that was present at the time the first list + // result was calculated is returned. + Limit *int64 `protobuf:"varint,7,opt,name=limit" json:"limit,omitempty"` + // The continue option should be set when retrieving more results from the server. Since this value + // is server defined, clients may only use the continue value from a previous query result with + // identical query parameters (except for the value of continue) and the server may reject a continue + // value it does not recognize. If the specified continue value is no longer valid whether due to + // expiration (generally five to fifteen minutes) or a configuration change on the server the server + // will respond with a 410 ResourceExpired error indicating the client must restart their list without + // the continue field. This field is not supported when watch is true. Clients may start a watch from + // the last resourceVersion value returned by the server and not miss any modifications. + Continue *string `protobuf:"bytes,8,opt,name=continue" json:"continue,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *ListOptions) Reset() { *m = ListOptions{} } func (m *ListOptions) String() string { return proto.CompactTextString(m) } func (*ListOptions) ProtoMessage() {} -func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } func (m *ListOptions) GetLabelSelector() string { if m != nil && m.LabelSelector != nil { @@ -748,6 +935,13 @@ func (m *ListOptions) GetFieldSelector() string { return "" } +func (m *ListOptions) GetIncludeUninitialized() bool { + if m != nil && m.IncludeUninitialized != nil { + return *m.IncludeUninitialized + } + return false +} + func (m *ListOptions) GetWatch() bool { if m != nil && m.Watch != nil { return *m.Watch @@ -769,6 +963,57 @@ func (m *ListOptions) GetTimeoutSeconds() int64 { return 0 } +func (m *ListOptions) GetLimit() int64 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return 0 +} + +func (m *ListOptions) GetContinue() string { + if m != nil && m.Continue != nil { + return *m.Continue + } + return "" +} + +// MicroTime is version of Time with microsecond level precision. +// +// +protobuf.options.marshal=false +// +protobuf.as=Timestamp +// +protobuf.options.(gogoproto.goproto_stringer)=false +type MicroTime struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds *int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. This field may be limited in precision depending on context. + Nanos *int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MicroTime) Reset() { *m = MicroTime{} } +func (m *MicroTime) String() string { return proto.CompactTextString(m) } +func (*MicroTime) ProtoMessage() {} +func (*MicroTime) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } + +func (m *MicroTime) GetSeconds() int64 { + if m != nil && m.Seconds != nil { + return *m.Seconds + } + return 0 +} + +func (m *MicroTime) GetNanos() int32 { + if m != nil && m.Nanos != nil { + return *m.Nanos + } + return 0 +} + // ObjectMeta is metadata that all persisted resources must have, which includes all objects // users must create. type ObjectMeta struct { @@ -794,7 +1039,7 @@ type ObjectMeta struct { // should retry (optionally after the time indicated in the Retry-After header). // // Applied only if Name is not specified. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#idempotency + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency // +optional GenerateName *string `protobuf:"bytes,2,opt,name=generateName" json:"generateName,omitempty"` // Namespace defines the space within each name must be unique. An empty namespace is @@ -830,7 +1075,7 @@ type ObjectMeta struct { // Populated by the system. // Read-only. // Value must be treated as opaque by clients and . - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency // +optional ResourceVersion *string `protobuf:"bytes,6,opt,name=resourceVersion" json:"resourceVersion,omitempty"` // A sequence number representing a specific generation of the desired state. @@ -844,26 +1089,27 @@ type ObjectMeta struct { // Populated by the system. // Read-only. // Null for lists. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional CreationTimestamp *Time `protobuf:"bytes,8,opt,name=creationTimestamp" json:"creationTimestamp,omitempty"` // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This // field is set by the server when a graceful deletion is requested by the user, and is not // directly settable by a client. The resource is expected to be deleted (no longer visible - // from resource lists, and not reachable by name) after the time in this field. Once set, - // this value may not be unset or be set further into the future, although it may be shortened - // or the resource may be deleted prior to this time. For example, a user may request that - // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination - // signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard - // termination signal (SIGKILL) to the container and after cleanup, remove the pod from the - // API. In the presence of network partitions, this object may still exist after this - // timestamp, until an administrator or automated process can determine the resource is - // fully terminated. + // from resource lists, and not reachable by name) after the time in this field, once the + // finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. + // Once the deletionTimestamp is set, this value may not be unset or be set further into the + // future, although it may be shortened or the resource may be deleted prior to this time. + // For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react + // by sending a graceful termination signal to the containers in the pod. After that 30 seconds, + // the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, + // remove the pod from the API. In the presence of network partitions, this object may still + // exist after this timestamp, until an administrator or automated process can determine the + // resource is fully terminated. // If not set, graceful deletion of the object has not been requested. // // Populated by the system when a graceful deletion is requested. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional DeletionTimestamp *Time `protobuf:"bytes,9,opt,name=deletionTimestamp" json:"deletionTimestamp,omitempty"` // Number of seconds allowed for this object to gracefully terminate before @@ -889,12 +1135,25 @@ type ObjectMeta struct { // then an entry in this list will point to this controller, with the controller field set to true. // There cannot be more than one managing controller. // +optional + // +patchMergeKey=uid + // +patchStrategy=merge OwnerReferences []*OwnerReference `protobuf:"bytes,13,rep,name=ownerReferences" json:"ownerReferences,omitempty"` + // An initializer is a controller which enforces some system invariant at object creation time. + // This field is a list of initializers that have not yet acted on this object. If nil or empty, + // this object has been completely initialized. Otherwise, the object is considered uninitialized + // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to + // observe uninitialized objects. + // + // When an object is created, the system will populate this list with the current set of initializers. + // Only privileged users may set or modify this list. Once it is empty, it may not be modified further + // by any user. + Initializers *Initializers `protobuf:"bytes,16,opt,name=initializers" json:"initializers,omitempty"` // Must be empty before the object is deleted from the registry. Each entry // is an identifier for the responsible component that will remove the entry // from the list. If the deletionTimestamp of the object is non-nil, entries // in this list can only be removed. // +optional + // +patchStrategy=merge Finalizers []string `protobuf:"bytes,14,rep,name=finalizers" json:"finalizers,omitempty"` // The name of the cluster which the object belongs to. // This is used to distinguish resources with same name and namespace in different clusters. @@ -907,7 +1166,7 @@ type ObjectMeta struct { func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } func (m *ObjectMeta) String() string { return proto.CompactTextString(m) } func (*ObjectMeta) ProtoMessage() {} -func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } func (m *ObjectMeta) GetName() string { if m != nil && m.Name != nil { @@ -1000,6 +1259,13 @@ func (m *ObjectMeta) GetOwnerReferences() []*OwnerReference { return nil } +func (m *ObjectMeta) GetInitializers() *Initializers { + if m != nil { + return m.Initializers + } + return nil +} + func (m *ObjectMeta) GetFinalizers() []string { if m != nil { return m.Finalizers @@ -1021,7 +1287,7 @@ type OwnerReference struct { // API version of the referent. ApiVersion *string `protobuf:"bytes,5,opt,name=apiVersion" json:"apiVersion,omitempty"` // Kind of the referent. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds Kind *string `protobuf:"bytes,1,opt,name=kind" json:"kind,omitempty"` // Name of the referent. // More info: http://kubernetes.io/docs/user-guide/identifiers#names @@ -1046,7 +1312,7 @@ type OwnerReference struct { func (m *OwnerReference) Reset() { *m = OwnerReference{} } func (m *OwnerReference) String() string { return proto.CompactTextString(m) } func (*OwnerReference) ProtoMessage() {} -func (*OwnerReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } +func (*OwnerReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } func (m *OwnerReference) GetApiVersion() string { if m != nil && m.ApiVersion != nil { @@ -1090,6 +1356,16 @@ func (m *OwnerReference) GetBlockOwnerDeletion() bool { return false } +// Patch is provided to give a concrete name and type to the Kubernetes PATCH request body. +type Patch struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *Patch) Reset() { *m = Patch{} } +func (m *Patch) String() string { return proto.CompactTextString(m) } +func (*Patch) ProtoMessage() {} +func (*Patch) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } + // Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. type Preconditions struct { // Specifies the target UID. @@ -1101,7 +1377,7 @@ type Preconditions struct { func (m *Preconditions) Reset() { *m = Preconditions{} } func (m *Preconditions) String() string { return proto.CompactTextString(m) } func (*Preconditions) ProtoMessage() {} -func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } func (m *Preconditions) GetUid() string { if m != nil && m.Uid != nil { @@ -1121,7 +1397,7 @@ type RootPaths struct { func (m *RootPaths) Reset() { *m = RootPaths{} } func (m *RootPaths) String() string { return proto.CompactTextString(m) } func (*RootPaths) ProtoMessage() {} -func (*RootPaths) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +func (*RootPaths) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } func (m *RootPaths) GetPaths() []string { if m != nil { @@ -1144,7 +1420,7 @@ func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClient func (m *ServerAddressByClientCIDR) String() string { return proto.CompactTextString(m) } func (*ServerAddressByClientCIDR) ProtoMessage() {} func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{23} + return fileDescriptorGenerated, []int{28} } func (m *ServerAddressByClientCIDR) GetClientCIDR() string { @@ -1164,12 +1440,12 @@ func (m *ServerAddressByClientCIDR) GetServerAddress() string { // Status is a return value for calls that don't return other objects. type Status struct { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional Metadata *ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Status of the operation. // One of: "Success" or "Failure". - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status *string `protobuf:"bytes,2,opt,name=status" json:"status,omitempty"` // A human-readable description of the status of this operation. @@ -1196,7 +1472,7 @@ type Status struct { func (m *Status) Reset() { *m = Status{} } func (m *Status) String() string { return proto.CompactTextString(m) } func (*Status) ProtoMessage() {} -func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } func (m *Status) GetMetadata() *ListMeta { if m != nil { @@ -1268,7 +1544,7 @@ type StatusCause struct { func (m *StatusCause) Reset() { *m = StatusCause{} } func (m *StatusCause) String() string { return proto.CompactTextString(m) } func (*StatusCause) ProtoMessage() {} -func (*StatusCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (*StatusCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } func (m *StatusCause) GetReason() string { if m != nil && m.Reason != nil { @@ -1307,14 +1583,21 @@ type StatusDetails struct { Group *string `protobuf:"bytes,2,opt,name=group" json:"group,omitempty"` // The kind attribute of the resource associated with the status StatusReason. // On some operations may differ from the requested resource Kind. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"` + // UID of the resource. + // (when there is a single resource which can be described). + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // +optional + Uid *string `protobuf:"bytes,6,opt,name=uid" json:"uid,omitempty"` // The Causes array includes more details associated with the StatusReason // failure. Not all StatusReasons may provide detailed causes. // +optional Causes []*StatusCause `protobuf:"bytes,4,rep,name=causes" json:"causes,omitempty"` - // If specified, the time in seconds before the operation should be retried. + // If specified, the time in seconds before the operation should be retried. Some errors may indicate + // the client must take an alternate action - for those errors this field may indicate how long to wait + // before taking the alternate action. // +optional RetryAfterSeconds *int32 `protobuf:"varint,5,opt,name=retryAfterSeconds" json:"retryAfterSeconds,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -1323,7 +1606,7 @@ type StatusDetails struct { func (m *StatusDetails) Reset() { *m = StatusDetails{} } func (m *StatusDetails) String() string { return proto.CompactTextString(m) } func (*StatusDetails) ProtoMessage() {} -func (*StatusDetails) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (*StatusDetails) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } func (m *StatusDetails) GetName() string { if m != nil && m.Name != nil { @@ -1346,6 +1629,13 @@ func (m *StatusDetails) GetKind() string { return "" } +func (m *StatusDetails) GetUid() string { + if m != nil && m.Uid != nil { + return *m.Uid + } + return "" +} + func (m *StatusDetails) GetCauses() []*StatusCause { if m != nil { return m.Causes @@ -1369,7 +1659,7 @@ func (m *StatusDetails) GetRetryAfterSeconds() int32 { // +protobuf.options.(gogoproto.goproto_stringer)=false type Time struct { // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to // 9999-12-31T23:59:59Z inclusive. Seconds *int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` // Non-negative fractions of a second at nanosecond resolution. Negative @@ -1383,7 +1673,7 @@ type Time struct { func (m *Time) Reset() { *m = Time{} } func (m *Time) String() string { return proto.CompactTextString(m) } func (*Time) ProtoMessage() {} -func (*Time) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +func (*Time) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } func (m *Time) GetSeconds() int64 { if m != nil && m.Seconds != nil { @@ -1404,7 +1694,7 @@ func (m *Time) GetNanos() int32 { // that matches Time. Do not use in Go structs. type Timestamp struct { // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to // 9999-12-31T23:59:59Z inclusive. Seconds *int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` // Non-negative fractions of a second at nanosecond resolution. Negative @@ -1418,7 +1708,7 @@ type Timestamp struct { func (m *Timestamp) Reset() { *m = Timestamp{} } func (m *Timestamp) String() string { return proto.CompactTextString(m) } func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } func (m *Timestamp) GetSeconds() int64 { if m != nil && m.Seconds != nil { @@ -1437,18 +1727,20 @@ func (m *Timestamp) GetNanos() int32 { // TypeMeta describes an individual object in an API response or request // with strings representing the type of the object and its API schema version. // Structures that are versioned or persisted should inline TypeMeta. +// +// +k8s:deepcopy-gen=false type TypeMeta struct { // Kind is a string value representing the REST resource this object represents. // Servers may infer this from the endpoint the client submits requests to. // Cannot be updated. // In CamelCase. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional Kind *string `protobuf:"bytes,1,opt,name=kind" json:"kind,omitempty"` // APIVersion defines the versioned schema of this representation of an object. // Servers should convert recognized schemas to the latest internal value, and // may reject unrecognized values. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources // +optional ApiVersion *string `protobuf:"bytes,2,opt,name=apiVersion" json:"apiVersion,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -1457,7 +1749,7 @@ type TypeMeta struct { func (m *TypeMeta) Reset() { *m = TypeMeta{} } func (m *TypeMeta) String() string { return proto.CompactTextString(m) } func (*TypeMeta) ProtoMessage() {} -func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } func (m *TypeMeta) GetKind() string { if m != nil && m.Kind != nil { @@ -1485,7 +1777,7 @@ type Verbs struct { func (m *Verbs) Reset() { *m = Verbs{} } func (m *Verbs) String() string { return proto.CompactTextString(m) } func (*Verbs) ProtoMessage() {} -func (*Verbs) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +func (*Verbs) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } func (m *Verbs) GetItems() []string { if m != nil { @@ -1497,6 +1789,8 @@ func (m *Verbs) GetItems() []string { // Event represents a single event to a watched resource. // // +protobuf=true +// +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type WatchEvent struct { Type *string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` // Object is: @@ -1504,14 +1798,14 @@ type WatchEvent struct { // * If Type is Deleted: the state of the object immediately before deletion. // * If Type is Error: *Status is recommended; other types may make sense // depending on context. - Object *k8s_io_kubernetes_pkg_runtime.RawExtension `protobuf:"bytes,2,opt,name=object" json:"object,omitempty"` - XXX_unrecognized []byte `json:"-"` + Object *k8s_io_apimachinery_pkg_runtime.RawExtension `protobuf:"bytes,2,opt,name=object" json:"object,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *WatchEvent) Reset() { *m = WatchEvent{} } func (m *WatchEvent) String() string { return proto.CompactTextString(m) } func (*WatchEvent) ProtoMessage() {} -func (*WatchEvent) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } +func (*WatchEvent) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } func (m *WatchEvent) GetType() string { if m != nil && m.Type != nil { @@ -1520,7 +1814,7 @@ func (m *WatchEvent) GetType() string { return "" } -func (m *WatchEvent) GetObject() *k8s_io_kubernetes_pkg_runtime.RawExtension { +func (m *WatchEvent) GetObject() *k8s_io_apimachinery_pkg_runtime.RawExtension { if m != nil { return m.Object } @@ -1528,38 +1822,43 @@ func (m *WatchEvent) GetObject() *k8s_io_kubernetes_pkg_runtime.RawExtension { } func init() { - proto.RegisterType((*APIGroup)(nil), "github.com/ericchiang.k8s.apis.meta.v1.APIGroup") - proto.RegisterType((*APIGroupList)(nil), "github.com/ericchiang.k8s.apis.meta.v1.APIGroupList") - proto.RegisterType((*APIResource)(nil), "github.com/ericchiang.k8s.apis.meta.v1.APIResource") - proto.RegisterType((*APIResourceList)(nil), "github.com/ericchiang.k8s.apis.meta.v1.APIResourceList") - proto.RegisterType((*APIVersions)(nil), "github.com/ericchiang.k8s.apis.meta.v1.APIVersions") - proto.RegisterType((*DeleteOptions)(nil), "github.com/ericchiang.k8s.apis.meta.v1.DeleteOptions") - proto.RegisterType((*Duration)(nil), "github.com/ericchiang.k8s.apis.meta.v1.Duration") - proto.RegisterType((*ExportOptions)(nil), "github.com/ericchiang.k8s.apis.meta.v1.ExportOptions") - proto.RegisterType((*GetOptions)(nil), "github.com/ericchiang.k8s.apis.meta.v1.GetOptions") - proto.RegisterType((*GroupKind)(nil), "github.com/ericchiang.k8s.apis.meta.v1.GroupKind") - proto.RegisterType((*GroupResource)(nil), "github.com/ericchiang.k8s.apis.meta.v1.GroupResource") - proto.RegisterType((*GroupVersion)(nil), "github.com/ericchiang.k8s.apis.meta.v1.GroupVersion") - proto.RegisterType((*GroupVersionForDiscovery)(nil), "github.com/ericchiang.k8s.apis.meta.v1.GroupVersionForDiscovery") - proto.RegisterType((*GroupVersionKind)(nil), "github.com/ericchiang.k8s.apis.meta.v1.GroupVersionKind") - proto.RegisterType((*GroupVersionResource)(nil), "github.com/ericchiang.k8s.apis.meta.v1.GroupVersionResource") - proto.RegisterType((*LabelSelector)(nil), "github.com/ericchiang.k8s.apis.meta.v1.LabelSelector") - proto.RegisterType((*LabelSelectorRequirement)(nil), "github.com/ericchiang.k8s.apis.meta.v1.LabelSelectorRequirement") - proto.RegisterType((*ListMeta)(nil), "github.com/ericchiang.k8s.apis.meta.v1.ListMeta") - proto.RegisterType((*ListOptions)(nil), "github.com/ericchiang.k8s.apis.meta.v1.ListOptions") - proto.RegisterType((*ObjectMeta)(nil), "github.com/ericchiang.k8s.apis.meta.v1.ObjectMeta") - proto.RegisterType((*OwnerReference)(nil), "github.com/ericchiang.k8s.apis.meta.v1.OwnerReference") - proto.RegisterType((*Preconditions)(nil), "github.com/ericchiang.k8s.apis.meta.v1.Preconditions") - proto.RegisterType((*RootPaths)(nil), "github.com/ericchiang.k8s.apis.meta.v1.RootPaths") - proto.RegisterType((*ServerAddressByClientCIDR)(nil), "github.com/ericchiang.k8s.apis.meta.v1.ServerAddressByClientCIDR") - proto.RegisterType((*Status)(nil), "github.com/ericchiang.k8s.apis.meta.v1.Status") - proto.RegisterType((*StatusCause)(nil), "github.com/ericchiang.k8s.apis.meta.v1.StatusCause") - proto.RegisterType((*StatusDetails)(nil), "github.com/ericchiang.k8s.apis.meta.v1.StatusDetails") - proto.RegisterType((*Time)(nil), "github.com/ericchiang.k8s.apis.meta.v1.Time") - proto.RegisterType((*Timestamp)(nil), "github.com/ericchiang.k8s.apis.meta.v1.Timestamp") - proto.RegisterType((*TypeMeta)(nil), "github.com/ericchiang.k8s.apis.meta.v1.TypeMeta") - proto.RegisterType((*Verbs)(nil), "github.com/ericchiang.k8s.apis.meta.v1.Verbs") - proto.RegisterType((*WatchEvent)(nil), "github.com/ericchiang.k8s.apis.meta.v1.WatchEvent") + proto.RegisterType((*APIGroup)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIGroup") + proto.RegisterType((*APIGroupList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIGroupList") + proto.RegisterType((*APIResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIResource") + proto.RegisterType((*APIResourceList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIResourceList") + proto.RegisterType((*APIVersions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIVersions") + proto.RegisterType((*DeleteOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions") + proto.RegisterType((*Duration)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Duration") + proto.RegisterType((*ExportOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ExportOptions") + proto.RegisterType((*GetOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions") + proto.RegisterType((*GroupKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupKind") + proto.RegisterType((*GroupResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupResource") + proto.RegisterType((*GroupVersion)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersion") + proto.RegisterType((*GroupVersionForDiscovery)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery") + proto.RegisterType((*GroupVersionKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind") + proto.RegisterType((*GroupVersionResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource") + proto.RegisterType((*Initializer)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Initializer") + proto.RegisterType((*Initializers)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Initializers") + proto.RegisterType((*LabelSelector)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector") + proto.RegisterType((*LabelSelectorRequirement)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement") + proto.RegisterType((*List)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.List") + proto.RegisterType((*ListMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta") + proto.RegisterType((*ListOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListOptions") + proto.RegisterType((*MicroTime)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime") + proto.RegisterType((*ObjectMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta") + proto.RegisterType((*OwnerReference)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference") + proto.RegisterType((*Patch)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Patch") + proto.RegisterType((*Preconditions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Preconditions") + proto.RegisterType((*RootPaths)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.RootPaths") + proto.RegisterType((*ServerAddressByClientCIDR)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR") + proto.RegisterType((*Status)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Status") + proto.RegisterType((*StatusCause)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.StatusCause") + proto.RegisterType((*StatusDetails)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.StatusDetails") + proto.RegisterType((*Time)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Time") + proto.RegisterType((*Timestamp)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Timestamp") + proto.RegisterType((*TypeMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.TypeMeta") + proto.RegisterType((*Verbs)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Verbs") + proto.RegisterType((*WatchEvent)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.WatchEvent") } func (m *APIGroup) Marshal() (dAtA []byte, err error) { size := m.Size() @@ -1717,6 +2016,39 @@ func (m *APIResource) MarshalTo(dAtA []byte) (int, error) { i += copy(dAtA[i:], s) } } + if m.SingularName != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SingularName))) + i += copy(dAtA[i:], *m.SingularName) + } + if len(m.Categories) > 0 { + for _, s := range m.Categories { + dAtA[i] = 0x3a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.Group != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Group))) + i += copy(dAtA[i:], *m.Group) + } + if m.Version != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Version))) + i += copy(dAtA[i:], *m.Version) + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -1950,6 +2282,16 @@ func (m *GetOptions) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceVersion))) i += copy(dAtA[i:], *m.ResourceVersion) } + if m.IncludeUninitialized != nil { + dAtA[i] = 0x10 + i++ + if *m.IncludeUninitialized { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -2166,6 +2508,76 @@ func (m *GroupVersionResource) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *Initializer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Initializer) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Name != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name))) + i += copy(dAtA[i:], *m.Name) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Initializers) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Initializers) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Pending) > 0 { + for _, msg := range m.Pending { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Result != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Result.Size())) + n4, err := m.Result.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + func (m *LabelSelector) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2264,7 +2676,7 @@ func (m *LabelSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *ListMeta) Marshal() (dAtA []byte, err error) { +func (m *List) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -2274,29 +2686,78 @@ func (m *ListMeta) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ListMeta) MarshalTo(dAtA []byte) (int, error) { +func (m *List) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - if m.SelfLink != nil { + if m.Metadata != nil { dAtA[i] = 0xa i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SelfLink))) - i += copy(dAtA[i:], *m.SelfLink) - } - if m.ResourceVersion != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceVersion))) - i += copy(dAtA[i:], *m.ResourceVersion) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) + i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) + n5, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 } - return i, nil -} - + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ListMeta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListMeta) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.SelfLink != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SelfLink))) + i += copy(dAtA[i:], *m.SelfLink) + } + if m.ResourceVersion != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceVersion))) + i += copy(dAtA[i:], *m.ResourceVersion) + } + if m.Continue != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Continue))) + i += copy(dAtA[i:], *m.Continue) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + func (m *ListOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2345,6 +2806,58 @@ func (m *ListOptions) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) } + if m.IncludeUninitialized != nil { + dAtA[i] = 0x30 + i++ + if *m.IncludeUninitialized { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Limit != nil { + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Limit)) + } + if m.Continue != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Continue))) + i += copy(dAtA[i:], *m.Continue) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *MicroTime) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MicroTime) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Seconds != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Seconds)) + } + if m.Nanos != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Nanos)) + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -2411,21 +2924,21 @@ func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x42 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.CreationTimestamp.Size())) - n4, err := m.CreationTimestamp.MarshalTo(dAtA[i:]) + n6, err := m.CreationTimestamp.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n4 + i += n6 } if m.DeletionTimestamp != nil { dAtA[i] = 0x4a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DeletionTimestamp.Size())) - n5, err := m.DeletionTimestamp.MarshalTo(dAtA[i:]) + n7, err := m.DeletionTimestamp.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n5 + i += n7 } if m.DeletionGracePeriodSeconds != nil { dAtA[i] = 0x50 @@ -2499,6 +3012,18 @@ func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ClusterName))) i += copy(dAtA[i:], *m.ClusterName) } + if m.Initializers != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Initializers.Size())) + n8, err := m.Initializers.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -2570,6 +3095,27 @@ func (m *OwnerReference) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *Patch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Patch) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + func (m *Preconditions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2685,11 +3231,11 @@ func (m *Status) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n6, err := m.Metadata.MarshalTo(dAtA[i:]) + n9, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n9 } if m.Status != nil { dAtA[i] = 0x12 @@ -2713,11 +3259,11 @@ func (m *Status) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Details.Size())) - n7, err := m.Details.MarshalTo(dAtA[i:]) + n10, err := m.Details.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n10 } if m.Code != nil { dAtA[i] = 0x30 @@ -2819,6 +3365,12 @@ func (m *StatusDetails) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.RetryAfterSeconds)) } + if m.Uid != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Uid))) + i += copy(dAtA[i:], *m.Uid) + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -2981,11 +3533,11 @@ func (m *WatchEvent) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n8, err := m.Object.MarshalTo(dAtA[i:]) + n11, err := m.Object.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n8 + i += n11 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -2993,24 +3545,6 @@ func (m *WatchEvent) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -3088,6 +3622,24 @@ func (m *APIResource) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.SingularName != nil { + l = len(*m.SingularName) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Categories) > 0 { + for _, s := range m.Categories { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Group != nil { + l = len(*m.Group) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Version != nil { + l = len(*m.Version) + n += 1 + l + sovGenerated(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -3191,6 +3743,9 @@ func (m *GetOptions) Size() (n int) { l = len(*m.ResourceVersion) n += 1 + l + sovGenerated(uint64(l)) } + if m.IncludeUninitialized != nil { + n += 2 + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -3307,6 +3862,38 @@ func (m *GroupVersionResource) Size() (n int) { return n } +func (m *Initializer) Size() (n int) { + var l int + _ = l + if m.Name != nil { + l = len(*m.Name) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Initializers) Size() (n int) { + var l int + _ = l + if len(m.Pending) > 0 { + for _, e := range m.Pending { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *LabelSelector) Size() (n int) { var l int _ = l @@ -3353,6 +3940,25 @@ func (m *LabelSelectorRequirement) Size() (n int) { return n } +func (m *List) Size() (n int) { + var l int + _ = l + if m.Metadata != nil { + l = m.Metadata.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *ListMeta) Size() (n int) { var l int _ = l @@ -3364,6 +3970,10 @@ func (m *ListMeta) Size() (n int) { l = len(*m.ResourceVersion) n += 1 + l + sovGenerated(uint64(l)) } + if m.Continue != nil { + l = len(*m.Continue) + n += 1 + l + sovGenerated(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -3391,6 +4001,31 @@ func (m *ListOptions) Size() (n int) { if m.TimeoutSeconds != nil { n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) } + if m.IncludeUninitialized != nil { + n += 2 + } + if m.Limit != nil { + n += 1 + sovGenerated(uint64(*m.Limit)) + } + if m.Continue != nil { + l = len(*m.Continue) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MicroTime) Size() (n int) { + var l int + _ = l + if m.Seconds != nil { + n += 1 + sovGenerated(uint64(*m.Seconds)) + } + if m.Nanos != nil { + n += 1 + sovGenerated(uint64(*m.Nanos)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -3470,6 +4105,10 @@ func (m *ObjectMeta) Size() (n int) { l = len(*m.ClusterName) n += 1 + l + sovGenerated(uint64(l)) } + if m.Initializers != nil { + l = m.Initializers.Size() + n += 2 + l + sovGenerated(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -3507,6 +4146,15 @@ func (m *OwnerReference) Size() (n int) { return n } +func (m *Patch) Size() (n int) { + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *Preconditions) Size() (n int) { var l int _ = l @@ -3629,6 +4277,10 @@ func (m *StatusDetails) Size() (n int) { if m.RetryAfterSeconds != nil { n += 1 + sovGenerated(uint64(*m.RetryAfterSeconds)) } + if m.Uid != nil { + l = len(*m.Uid) + n += 1 + l + sovGenerated(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -4157,13 +4809,132 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } m.ShortNames = append(m.ShortNames, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SingularName", wireType) } - if skippy < 0 { + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.SingularName = &s + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Categories", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Categories = append(m.Categories, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Group = &s + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Version = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4780,6 +5551,27 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.ResourceVersion = &s iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeUninitialized", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.IncludeUninitialized = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -5528,7 +6320,7 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { } return nil } -func (m *LabelSelector) Unmarshal(dAtA []byte) error { +func (m *Initializer) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5551,17 +6343,17 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LabelSelector: wiretype end group for non-group") + return fmt.Errorf("proto: Initializer: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LabelSelector: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Initializer: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchLabels", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5571,34 +6363,78 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + s := string(dAtA[iNdEx:postIndex]) + m.Name = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Initializers) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Initializers: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Initializers: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pending", wireType) } - var stringLenmapkey uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5608,74 +6444,26 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + postIndex := iNdEx + msglen + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.MatchLabels == nil { - m.MatchLabels = make(map[string]string) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.MatchLabels[mapkey] = mapvalue - } else { - var mapvalue string - m.MatchLabels[mapkey] = mapvalue + m.Pending = append(m.Pending, &Initializer{}) + if err := m.Pending[len(m.Pending)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5699,8 +6487,10 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.MatchExpressions = append(m.MatchExpressions, &LabelSelectorRequirement{}) - if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Result == nil { + m.Result = &Status{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -5726,7 +6516,7 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } return nil } -func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { +func (m *LabelSelector) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5749,17 +6539,17 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LabelSelectorRequirement: wiretype end group for non-group") + return fmt.Errorf("proto: LabelSelector: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LabelSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LabelSelector: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MatchLabels", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5769,13 +6559,213 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MatchLabels == nil { + m.MatchLabels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.MatchLabels[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchExpressions = append(m.MatchExpressions, &LabelSelectorRequirement{}) + if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelSelectorRequirement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen @@ -5866,6 +6856,121 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { } return nil } +func (m *List) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: List: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: List: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = &ListMeta{} + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, &k8s_io_apimachinery_pkg_runtime.RawExtension{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ListMeta) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -5955,6 +7060,36 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.ResourceVersion = &s iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Continue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Continue = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -6085,13 +7220,185 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { break } } - b := bool(v != 0) - m.Watch = &b - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + b := bool(v != 0) + m.Watch = &b + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ResourceVersion = &s + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeUninitialized", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.IncludeUninitialized = &b + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Limit = &v + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Continue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Continue = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MicroTime) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF } - var stringLen uint64 + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MicroTime: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MicroTime: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + var v int64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6101,27 +7408,17 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.ResourceVersion = &s - iNdEx = postIndex - case 5: + m.Seconds = &v + case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) } - var v int64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6131,12 +7428,12 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } - m.TimeoutSeconds = &v + m.Nanos = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -6500,51 +7797,14 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6554,41 +7814,80 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex case 12: if wireType != 2 { @@ -6616,51 +7915,14 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Annotations == nil { m.Annotations = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6670,41 +7932,80 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Annotations[mapkey] = mapvalue - } else { - var mapvalue string - m.Annotations[mapkey] = mapvalue } + m.Annotations[mapkey] = mapvalue iNdEx = postIndex case 13: if wireType != 2 { @@ -6796,6 +8097,39 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.ClusterName = &s iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Initializers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Initializers == nil { + m.Initializers = &Initializers{} + } + if err := m.Initializers.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -7031,6 +8365,57 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } return nil } +func (m *Patch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Patch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Patch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *Preconditions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -7841,6 +9226,36 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } } m.RetryAfterSeconds = &v + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Uid = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -8322,7 +9737,7 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Object == nil { - m.Object = &k8s_io_kubernetes_pkg_runtime.RawExtension{} + m.Object = &k8s_io_apimachinery_pkg_runtime.RawExtension{} } if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -8456,107 +9871,120 @@ var ( ) func init() { - proto.RegisterFile("github.com/ericchiang/k8s/apis/meta/v1/generated.proto", fileDescriptorGenerated) + proto.RegisterFile("k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto", fileDescriptorGenerated) } var fileDescriptorGenerated = []byte{ - // 1554 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x58, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0x67, 0xed, 0x38, 0x59, 0x3f, 0xc7, 0x4d, 0xba, 0x8a, 0x90, 0xb1, 0x20, 0x4a, 0x57, 0xa8, - 0x0a, 0x50, 0x6c, 0xa5, 0x82, 0xaa, 0x14, 0x9a, 0x2a, 0x8d, 0x43, 0xa8, 0xda, 0xd2, 0x68, 0x52, - 0xd2, 0x0a, 0x71, 0x60, 0xb2, 0xfb, 0xe2, 0x2c, 0x5e, 0xef, 0x2e, 0x33, 0x63, 0x37, 0x46, 0xe2, - 0xc6, 0x87, 0xe0, 0x80, 0xc4, 0x95, 0x4f, 0xc0, 0x81, 0x1b, 0x12, 0x07, 0x8e, 0x7c, 0x04, 0x28, - 0xdf, 0x80, 0x3b, 0x12, 0x9a, 0xd9, 0xd9, 0xf5, 0xae, 0xbd, 0x4e, 0x1d, 0x10, 0xa7, 0xcc, 0x7b, - 0x9e, 0xf7, 0x7b, 0x7f, 0xe6, 0xfd, 0xdb, 0xc0, 0xf5, 0xde, 0x4d, 0xde, 0xf2, 0xc2, 0x76, 0x6f, - 0x70, 0x8c, 0x2c, 0x40, 0x81, 0xbc, 0x1d, 0xf5, 0xba, 0x6d, 0x1a, 0x79, 0xbc, 0xdd, 0x47, 0x41, - 0xdb, 0xc3, 0xad, 0x76, 0x17, 0x03, 0x64, 0x54, 0xa0, 0xdb, 0x8a, 0x58, 0x28, 0x42, 0xcb, 0x8e, - 0x65, 0x5a, 0x63, 0x99, 0x56, 0xd4, 0xeb, 0xb6, 0xa4, 0x4c, 0x4b, 0xca, 0xb4, 0x86, 0x5b, 0xcd, - 0xb7, 0x8b, 0x71, 0xd9, 0x20, 0x10, 0x5e, 0x1f, 0x27, 0x21, 0x9b, 0xef, 0x9c, 0x7f, 0x9d, 0x3b, - 0xa7, 0xd8, 0xa7, 0x53, 0x52, 0x5b, 0xc5, 0x52, 0x03, 0xe1, 0xf9, 0x6d, 0x2f, 0x10, 0x5c, 0xb0, - 0x49, 0x11, 0xfb, 0x8f, 0x12, 0x98, 0x3b, 0x07, 0xf7, 0xf6, 0x59, 0x38, 0x88, 0x2c, 0x0b, 0x16, - 0x02, 0xda, 0xc7, 0x86, 0xb1, 0x61, 0x6c, 0x56, 0x89, 0x3a, 0x5b, 0x4f, 0xc1, 0x1c, 0x22, 0xe3, - 0x5e, 0x18, 0xf0, 0x46, 0x69, 0xa3, 0xbc, 0x59, 0xbb, 0xfe, 0x41, 0xeb, 0xc5, 0xfe, 0xb6, 0x14, - 0xe0, 0x51, 0x2c, 0xf8, 0x61, 0xc8, 0x3a, 0x1e, 0x77, 0xc2, 0x21, 0xb2, 0x11, 0x49, 0xd1, 0xac, - 0x53, 0x58, 0x8d, 0x18, 0x9e, 0x20, 0x63, 0xe8, 0xea, 0x9b, 0x8d, 0xf2, 0x86, 0xf1, 0x9f, 0x35, - 0x4c, 0xa1, 0x5a, 0x5f, 0x43, 0x93, 0x23, 0x1b, 0x22, 0xdb, 0x71, 0x5d, 0x86, 0x9c, 0xdf, 0x1d, - 0xed, 0xfa, 0x1e, 0x06, 0x62, 0xf7, 0x5e, 0x87, 0xf0, 0xc6, 0x82, 0xf2, 0xea, 0xf6, 0x3c, 0x3a, - 0x0f, 0x67, 0xa1, 0x90, 0x73, 0x14, 0xd8, 0x8f, 0x61, 0x39, 0x09, 0xf1, 0x03, 0x8f, 0x0b, 0xab, - 0x03, 0x8b, 0x5d, 0x49, 0xf0, 0x86, 0xa1, 0x54, 0x5f, 0x9b, 0x47, 0x75, 0x82, 0x40, 0xb4, 0xac, - 0xfd, 0xa3, 0x01, 0xb5, 0x9d, 0x83, 0x7b, 0x04, 0x79, 0x38, 0x60, 0x0e, 0x16, 0x3e, 0xde, 0x3a, - 0x80, 0xfc, 0xcb, 0x23, 0xea, 0xa0, 0xdb, 0x28, 0x6d, 0x18, 0x9b, 0x26, 0xc9, 0x70, 0xa4, 0x4c, - 0xcf, 0x0b, 0x5c, 0x15, 0xf6, 0x2a, 0x51, 0x67, 0xeb, 0x0e, 0x54, 0x86, 0xc8, 0x8e, 0x65, 0x5c, - 0xe4, 0x5b, 0xbc, 0x31, 0x8f, 0x71, 0x47, 0x52, 0x80, 0xc4, 0x72, 0x52, 0x29, 0x3f, 0x0d, 0x99, - 0xf8, 0x58, 0xea, 0x69, 0x54, 0x36, 0xca, 0x9b, 0x55, 0x92, 0xe1, 0xd8, 0xdf, 0x18, 0xb0, 0x92, - 0x31, 0x5c, 0x85, 0xc4, 0x86, 0xe5, 0x6e, 0xe6, 0x3d, 0xb5, 0x13, 0x39, 0x9e, 0xf5, 0x10, 0xaa, - 0x4c, 0xcb, 0x24, 0xa9, 0xd8, 0x9e, 0x33, 0x72, 0x89, 0x2e, 0x32, 0x46, 0xb0, 0x7f, 0x88, 0xe3, - 0x77, 0x94, 0xa4, 0x63, 0x33, 0x93, 0xe8, 0x86, 0x32, 0x7a, 0x9c, 0xaa, 0xe7, 0x27, 0x50, 0xe9, - 0xff, 0x4e, 0xa0, 0xbf, 0x0c, 0xa8, 0x77, 0xd0, 0x47, 0x81, 0x8f, 0x22, 0xa1, 0x0c, 0x6a, 0x81, - 0xd5, 0x65, 0xd4, 0xc1, 0x03, 0x64, 0x5e, 0xe8, 0x1e, 0xa2, 0x13, 0x06, 0x2e, 0x57, 0x51, 0x2b, - 0x93, 0x82, 0x5f, 0xac, 0x27, 0x50, 0x8f, 0x98, 0x3a, 0x7b, 0x42, 0x97, 0xb2, 0x7c, 0xdc, 0xad, - 0x79, 0x6c, 0x3e, 0xc8, 0x0a, 0x92, 0x3c, 0x8e, 0xf5, 0x26, 0xac, 0x86, 0x2c, 0x3a, 0xa5, 0x41, - 0x07, 0x23, 0x0c, 0x5c, 0x0c, 0x04, 0x57, 0xd9, 0x64, 0x92, 0x29, 0xbe, 0x75, 0x0d, 0x2e, 0x47, - 0x2c, 0x8c, 0x68, 0x97, 0x4a, 0xd9, 0x83, 0xd0, 0xf7, 0x9c, 0x91, 0xca, 0xb2, 0x2a, 0x99, 0xfe, - 0xc1, 0xbe, 0x0a, 0x66, 0x67, 0xc0, 0x14, 0x47, 0xbe, 0x8d, 0xab, 0xcf, 0xda, 0xc9, 0x94, 0xb6, - 0x6f, 0x43, 0x7d, 0xef, 0x2c, 0x0a, 0x99, 0x48, 0x62, 0xf3, 0x32, 0x2c, 0xa2, 0x62, 0xa8, 0xab, - 0x26, 0xd1, 0x94, 0xb5, 0x06, 0x15, 0x3c, 0xa3, 0x8e, 0xd0, 0x75, 0x10, 0x13, 0xf6, 0x0d, 0x80, - 0x7d, 0x4c, 0x65, 0x37, 0x61, 0x25, 0xc9, 0x90, 0x7c, 0x2a, 0x4e, 0xb2, 0xed, 0x77, 0xa1, 0xaa, - 0xea, 0xf1, 0xbe, 0xac, 0x99, 0x35, 0xa8, 0xa8, 0x54, 0xd5, 0x97, 0x63, 0x22, 0xad, 0xae, 0xd2, - 0xb8, 0xba, 0xec, 0x1d, 0xa8, 0xc7, 0x65, 0x9c, 0x94, 0x6d, 0xb1, 0x68, 0x13, 0xcc, 0x44, 0xa1, - 0x16, 0x4f, 0x69, 0x7b, 0x1b, 0x96, 0xb3, 0xbd, 0x6f, 0x06, 0x42, 0x03, 0x96, 0x74, 0xfa, 0x6a, - 0x80, 0x84, 0xb4, 0x9f, 0x42, 0x63, 0x56, 0xef, 0x9c, 0xab, 0x0e, 0x67, 0x23, 0x1f, 0xc1, 0x6a, - 0x16, 0xf9, 0x9c, 0xd0, 0xcc, 0xc4, 0x28, 0x6a, 0x49, 0xf6, 0x31, 0xac, 0x65, 0x71, 0x5f, 0x10, - 0xbb, 0xd9, 0xd8, 0xd9, 0xa8, 0x96, 0x27, 0xa2, 0xfa, 0x7d, 0x09, 0xea, 0x0f, 0xe8, 0x31, 0xfa, - 0x87, 0xe8, 0xa3, 0x23, 0x42, 0x66, 0xb9, 0x50, 0xeb, 0x53, 0xe1, 0x9c, 0x2a, 0x6e, 0xd2, 0xab, - 0xef, 0xce, 0x53, 0x31, 0x39, 0x9c, 0xd6, 0xc3, 0x31, 0xc8, 0x5e, 0x20, 0xd8, 0x88, 0x64, 0x61, - 0xe5, 0x14, 0x54, 0xe4, 0xde, 0x59, 0x24, 0x0b, 0xff, 0xa2, 0x73, 0x36, 0xa7, 0x8a, 0xe0, 0x97, - 0x03, 0x8f, 0x61, 0x1f, 0x03, 0x41, 0xa6, 0x50, 0x9b, 0xdb, 0xb0, 0x3a, 0x69, 0x8a, 0xb5, 0x0a, - 0xe5, 0x1e, 0x8e, 0x74, 0xfc, 0xe4, 0x51, 0xc6, 0x74, 0x48, 0xfd, 0x41, 0x92, 0x76, 0x31, 0x71, - 0xab, 0x74, 0xd3, 0xb0, 0x3f, 0x87, 0xc6, 0x2c, 0x6d, 0x05, 0x38, 0x4d, 0x30, 0xc3, 0x48, 0xae, - 0x1a, 0x21, 0x4b, 0x32, 0x38, 0xa1, 0x65, 0x85, 0x2a, 0x58, 0xd9, 0x2a, 0x64, 0xa3, 0xd5, 0x94, - 0x7d, 0x00, 0xa6, 0x9c, 0x06, 0x0f, 0x51, 0x50, 0x29, 0xcf, 0xd1, 0x3f, 0x79, 0xe0, 0x05, 0x3d, - 0x0d, 0x9b, 0xd2, 0x45, 0x55, 0x5a, 0x2a, 0xae, 0xd2, 0x9f, 0x0d, 0xa8, 0x49, 0xc8, 0xa4, 0xbe, - 0x5f, 0x87, 0xba, 0x9f, 0xf5, 0x41, 0x43, 0xe7, 0x99, 0xf2, 0xd6, 0x89, 0x87, 0xbe, 0x9b, 0xde, - 0x8a, 0xd1, 0xf3, 0x4c, 0x19, 0xa9, 0x67, 0x32, 0x9e, 0xba, 0xdf, 0xc5, 0x44, 0x91, 0x6d, 0x0b, - 0x85, 0xb6, 0x59, 0x57, 0xe1, 0x92, 0x5c, 0xe6, 0xc2, 0x81, 0x48, 0xfa, 0x77, 0x45, 0xb5, 0xb6, - 0x09, 0xae, 0xfd, 0xdd, 0x12, 0xc0, 0xa3, 0xe3, 0x2f, 0xd0, 0x89, 0x03, 0x53, 0x34, 0xe7, 0x65, - 0xd9, 0xea, 0xc5, 0x4e, 0xce, 0x58, 0x6d, 0x6f, 0x8e, 0x67, 0xbd, 0x0a, 0xd5, 0x74, 0xf2, 0xeb, - 0xec, 0x1f, 0x33, 0x72, 0xe1, 0x5e, 0x98, 0x08, 0xf7, 0x2a, 0x94, 0x07, 0x9e, 0xab, 0xac, 0xab, - 0x12, 0x79, 0x2c, 0x72, 0x72, 0xb1, 0xd8, 0xc9, 0x75, 0x00, 0x6d, 0x85, 0xbc, 0xb4, 0xa4, 0x1c, - 0xcc, 0x70, 0xac, 0x23, 0xb8, 0xec, 0x30, 0x54, 0xe7, 0xc7, 0x5e, 0x1f, 0xb9, 0xa0, 0xfd, 0xa8, - 0x61, 0xaa, 0xe1, 0xb4, 0x39, 0x4f, 0xfe, 0x4b, 0x21, 0x32, 0x0d, 0x21, 0x71, 0x5d, 0x39, 0x31, - 0x73, 0xb8, 0xd5, 0x8b, 0xe2, 0x4e, 0x41, 0x58, 0xdb, 0xd0, 0x4c, 0x98, 0xfb, 0xd3, 0x03, 0x18, - 0x94, 0x7f, 0xe7, 0xdc, 0xb0, 0x08, 0x2c, 0xfa, 0x71, 0x3f, 0xa9, 0xa9, 0x22, 0xbf, 0x35, 0x8f, - 0x31, 0xe3, 0xd7, 0x6f, 0x65, 0xfb, 0x88, 0x46, 0xb2, 0x28, 0xd4, 0x68, 0x10, 0x84, 0x82, 0xc6, - 0xa3, 0x7d, 0x59, 0x01, 0xdf, 0xb9, 0x20, 0xf0, 0xce, 0x18, 0x41, 0x77, 0xa9, 0x0c, 0xa6, 0xf5, - 0x19, 0xac, 0x84, 0xcf, 0x02, 0x64, 0x44, 0x6e, 0xd6, 0x18, 0xc8, 0x0d, 0xac, 0xae, 0xd4, 0x5c, - 0x9f, 0x4b, 0x4d, 0x4e, 0x94, 0x4c, 0x42, 0xc9, 0x24, 0x39, 0xf1, 0x02, 0xea, 0x7b, 0x5f, 0x21, - 0xe3, 0x8d, 0x4b, 0xf1, 0xc6, 0x38, 0xe6, 0x58, 0x1b, 0x50, 0x73, 0xfc, 0x01, 0x17, 0xc8, 0x54, - 0x76, 0xaf, 0xa8, 0x54, 0xcb, 0xb2, 0x9a, 0xef, 0x41, 0xed, 0x5f, 0xb6, 0x35, 0xd9, 0x16, 0x27, - 0x7d, 0xbf, 0x50, 0x5b, 0xfc, 0xc9, 0x80, 0x4b, 0x79, 0x07, 0xd3, 0x19, 0x66, 0x64, 0xd6, 0xea, - 0xa4, 0x6c, 0xcb, 0x99, 0xb2, 0xd5, 0x85, 0xb5, 0x30, 0x2e, 0xac, 0x75, 0x00, 0x1a, 0x79, 0x49, - 0x4d, 0xc5, 0x15, 0x97, 0xe1, 0xc8, 0xdf, 0x9d, 0x30, 0x10, 0x2c, 0xf4, 0x7d, 0x64, 0xaa, 0xe6, - 0x4c, 0x92, 0xe1, 0xc8, 0xbd, 0xf0, 0xd8, 0x0f, 0x9d, 0x9e, 0x32, 0xa8, 0xa3, 0xd3, 0x50, 0x95, - 0x9d, 0x49, 0x0a, 0x7e, 0xb1, 0xaf, 0x40, 0x3d, 0xb7, 0xde, 0x25, 0x26, 0x19, 0xa9, 0x49, 0xf6, - 0x15, 0xa8, 0x92, 0x30, 0x14, 0x07, 0x54, 0x9c, 0x72, 0x19, 0x86, 0x48, 0x1e, 0xf4, 0x86, 0x1c, - 0x13, 0x36, 0x85, 0x57, 0x66, 0x2e, 0xb6, 0xca, 0xe4, 0x94, 0xd2, 0xc0, 0x19, 0x8e, 0x6c, 0xb6, - 0xb9, 0xd5, 0x37, 0x69, 0xb6, 0x39, 0xa6, 0xfd, 0xb7, 0x01, 0x8b, 0x87, 0x82, 0x8a, 0x01, 0xb7, - 0x3e, 0x02, 0x53, 0x26, 0x96, 0x4b, 0x05, 0x55, 0x70, 0x73, 0x7e, 0x40, 0x25, 0x93, 0x85, 0xa4, - 0xd2, 0x72, 0x0e, 0x71, 0x85, 0xa9, 0x75, 0x6a, 0x4a, 0x6e, 0x10, 0x7d, 0xe4, 0x9c, 0x76, 0x93, - 0xe7, 0x4a, 0x48, 0x29, 0xc1, 0x90, 0xf2, 0xb4, 0xa9, 0x6b, 0xca, 0xba, 0x0f, 0x4b, 0x2e, 0x0a, - 0xea, 0xf9, 0x71, 0x13, 0x9f, 0x73, 0xb3, 0x8e, 0x1d, 0xea, 0xc4, 0x82, 0x24, 0x41, 0x90, 0xa9, - 0xe2, 0x84, 0x2e, 0xaa, 0xe7, 0xad, 0x10, 0x75, 0xb6, 0x3f, 0x81, 0x5a, 0x7c, 0x7b, 0x97, 0x0e, - 0x78, 0xd6, 0x0e, 0x23, 0x67, 0x47, 0xc6, 0xf2, 0x52, 0xde, 0xf2, 0x35, 0xa8, 0xa8, 0xf1, 0xa5, - 0x3d, 0x8a, 0x09, 0xfb, 0x17, 0x03, 0xea, 0x39, 0x2b, 0x0a, 0xc7, 0x4b, 0xba, 0x67, 0x95, 0x8a, - 0xd6, 0xdb, 0xec, 0xc7, 0xe3, 0x3e, 0x2c, 0x3a, 0xd2, 0xc0, 0xe4, 0xab, 0xba, 0x3d, 0x7f, 0x18, - 0x94, 0x63, 0x44, 0x8b, 0xcb, 0x6f, 0x05, 0x86, 0x82, 0x8d, 0x76, 0x4e, 0x04, 0xb2, 0xec, 0x7c, - 0xac, 0x90, 0xe9, 0x1f, 0xec, 0x1b, 0xb0, 0x20, 0x5b, 0xb4, 0x74, 0x9f, 0xe7, 0xbe, 0x85, 0x12, - 0x52, 0xba, 0x10, 0xd0, 0x20, 0x8c, 0x5f, 0xba, 0x42, 0x62, 0xc2, 0x7e, 0x1f, 0xaa, 0xe3, 0xd6, - 0x7e, 0x51, 0xe1, 0x6d, 0x30, 0x1f, 0x8f, 0x22, 0x4c, 0x86, 0xf2, 0x54, 0xc5, 0xe7, 0x6b, 0xb9, - 0x34, 0x59, 0xcb, 0xf6, 0x6b, 0x50, 0x51, 0xdf, 0xcd, 0x12, 0xde, 0x13, 0xd8, 0x4f, 0x8b, 0x4a, - 0x11, 0x36, 0x02, 0x3c, 0x51, 0x2b, 0xdc, 0x50, 0x2e, 0x58, 0x16, 0x2c, 0x88, 0x51, 0x94, 0x3e, - 0x8b, 0x3c, 0x5b, 0xbb, 0xb0, 0x18, 0xaa, 0x06, 0xae, 0xbf, 0xe6, 0xde, 0x9a, 0x11, 0x6c, 0xfd, - 0x5f, 0xa3, 0x16, 0xa1, 0xcf, 0xf6, 0xce, 0x04, 0x06, 0x6a, 0x87, 0xd6, 0xa2, 0x77, 0xd7, 0x7e, - 0x7d, 0xbe, 0x6e, 0xfc, 0xf6, 0x7c, 0xdd, 0xf8, 0xfd, 0xf9, 0xba, 0xf1, 0xed, 0x9f, 0xeb, 0x2f, - 0x7d, 0x5a, 0x1a, 0x6e, 0xfd, 0x13, 0x00, 0x00, 0xff, 0xff, 0x9b, 0x8e, 0x23, 0xd2, 0x07, 0x13, - 0x00, 0x00, + // 1775 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x58, 0x4f, 0x6f, 0x23, 0x49, + 0x15, 0xa7, 0x3b, 0xb1, 0x63, 0x3f, 0xc7, 0x33, 0xd9, 0x52, 0xb4, 0x6a, 0x2c, 0x88, 0x32, 0xad, + 0xd5, 0x2a, 0x5a, 0x16, 0x47, 0x99, 0x5d, 0x56, 0x0b, 0x0b, 0x41, 0xd9, 0x71, 0x76, 0x14, 0x76, + 0x86, 0x89, 0x6a, 0x66, 0xc3, 0xb2, 0x07, 0x44, 0xa5, 0xbb, 0xe2, 0xd4, 0xa4, 0xdd, 0xdd, 0x54, + 0x95, 0x3d, 0x63, 0x2e, 0x88, 0x0b, 0x5f, 0x01, 0xae, 0x88, 0x03, 0x12, 0xe2, 0x13, 0xf0, 0x09, + 0x38, 0x72, 0xe4, 0x88, 0x86, 0x03, 0x5f, 0x80, 0x33, 0x42, 0x55, 0x5d, 0xd5, 0xae, 0xb6, 0xdb, + 0x59, 0x7b, 0x35, 0x7b, 0x72, 0xbd, 0xd7, 0xf5, 0x7e, 0xef, 0xd5, 0xab, 0xf7, 0xaf, 0x0c, 0xef, + 0xdf, 0x7c, 0x28, 0xfa, 0x2c, 0x3b, 0x24, 0x39, 0x1b, 0x91, 0xe8, 0x9a, 0xa5, 0x94, 0x4f, 0x0f, + 0xf3, 0x9b, 0xa1, 0x62, 0x88, 0xc3, 0x11, 0x95, 0xe4, 0x70, 0x72, 0x74, 0x38, 0xa4, 0x29, 0xe5, + 0x44, 0xd2, 0xb8, 0x9f, 0xf3, 0x4c, 0x66, 0xe8, 0xad, 0x42, 0xaa, 0xef, 0x4a, 0xf5, 0xf3, 0x9b, + 0xa1, 0x62, 0x88, 0xbe, 0x92, 0xea, 0x4f, 0x8e, 0x7a, 0x87, 0xcb, 0xb0, 0xf9, 0x38, 0x95, 0x6c, + 0x44, 0xe7, 0x61, 0x7b, 0x1f, 0x7c, 0x99, 0x80, 0x88, 0xae, 0xe9, 0x88, 0x2c, 0xc8, 0xbd, 0xb7, + 0x4c, 0x6e, 0x2c, 0x59, 0x72, 0xc8, 0x52, 0x29, 0x24, 0x9f, 0x17, 0x0a, 0xff, 0xe3, 0x43, 0xeb, + 0xe4, 0xfc, 0xec, 0x21, 0xcf, 0xc6, 0x39, 0x42, 0xb0, 0x99, 0x92, 0x11, 0x0d, 0xbc, 0x7d, 0xef, + 0xa0, 0x8d, 0xf5, 0x1a, 0x7d, 0x01, 0xad, 0x09, 0xe5, 0x82, 0x65, 0xa9, 0x08, 0xfc, 0xfd, 0x8d, + 0x83, 0xce, 0xfd, 0xe3, 0xfe, 0x2a, 0xe7, 0xee, 0x6b, 0xc8, 0x8b, 0x42, 0xf4, 0x93, 0x8c, 0x0f, + 0x98, 0x88, 0xb2, 0x09, 0xe5, 0x53, 0x5c, 0xe2, 0xa1, 0xe7, 0xb0, 0x93, 0x73, 0x7a, 0x45, 0x39, + 0xa7, 0xb1, 0xd9, 0x19, 0x6c, 0xec, 0x7b, 0xaf, 0x41, 0xc7, 0x02, 0x2e, 0xfa, 0x0d, 0xf4, 0x04, + 0xe5, 0x13, 0xca, 0x4f, 0xe2, 0x98, 0x53, 0x21, 0x3e, 0x9e, 0x3e, 0x48, 0x18, 0x4d, 0xe5, 0x83, + 0xb3, 0x01, 0x16, 0xc1, 0xa6, 0x3e, 0xd9, 0x8f, 0x57, 0xd3, 0xfa, 0x74, 0x19, 0x0e, 0xbe, 0x45, + 0x45, 0x78, 0x01, 0xdb, 0xd6, 0xd1, 0x8f, 0x98, 0x90, 0xe8, 0x13, 0x68, 0x0e, 0x15, 0x21, 0x02, + 0x4f, 0x2b, 0xef, 0xaf, 0xa6, 0xdc, 0x62, 0x60, 0x23, 0x1d, 0xfe, 0xd5, 0x87, 0xce, 0xc9, 0xf9, + 0x19, 0xa6, 0x22, 0x1b, 0xf3, 0x88, 0xd6, 0x5e, 0xe2, 0x1e, 0x80, 0xfa, 0x15, 0x39, 0x89, 0x68, + 0x1c, 0xf8, 0xfb, 0xde, 0x41, 0x0b, 0x3b, 0x1c, 0x25, 0x73, 0xc3, 0xd2, 0x58, 0x3b, 0xbf, 0x8d, + 0xf5, 0x1a, 0x9d, 0x40, 0x63, 0x42, 0xf9, 0xa5, 0xf2, 0x8d, 0xba, 0x91, 0xef, 0xac, 0x66, 0xde, + 0x85, 0x12, 0xc1, 0x85, 0xa4, 0x52, 0x2b, 0xae, 0x33, 0x2e, 0x7f, 0xaa, 0x34, 0x05, 0x8d, 0xfd, + 0x8d, 0x83, 0x36, 0x76, 0x38, 0x28, 0x84, 0x6d, 0xc1, 0xd2, 0xe1, 0x38, 0x21, 0x5c, 0x31, 0x82, + 0xa6, 0x56, 0x5f, 0xe1, 0x29, 0x8c, 0x88, 0x48, 0x3a, 0xcc, 0x38, 0xa3, 0x22, 0xd8, 0x2a, 0x30, + 0x66, 0x1c, 0xb4, 0x0b, 0x0d, 0xed, 0x88, 0xa0, 0xa5, 0x85, 0x0b, 0x02, 0x05, 0xb0, 0x65, 0xa2, + 0x2c, 0x68, 0x6b, 0xbe, 0x25, 0xc3, 0xdf, 0x79, 0x70, 0xd7, 0x71, 0x97, 0xbe, 0x8a, 0x10, 0xb6, + 0x87, 0x4e, 0x24, 0x19, 0xd7, 0x55, 0x78, 0xe8, 0x09, 0xb4, 0xb9, 0x91, 0xb1, 0x89, 0x70, 0xb4, + 0xf2, 0x8d, 0x59, 0x6d, 0x78, 0x86, 0x11, 0xfe, 0xc5, 0xd3, 0xf7, 0x76, 0x61, 0x93, 0xa1, 0xe7, + 0x24, 0x9a, 0xa7, 0x8f, 0x39, 0x4b, 0x94, 0xdb, 0x83, 0xd7, 0xff, 0xfa, 0x83, 0xf7, 0xbf, 0x1e, + 0x74, 0x07, 0x34, 0xa1, 0x92, 0x3e, 0xc9, 0xa5, 0x36, 0xa9, 0x0f, 0x68, 0xc8, 0x49, 0x44, 0xcf, + 0x29, 0x67, 0x59, 0xfc, 0x94, 0x46, 0x59, 0x1a, 0x0b, 0xed, 0xb9, 0x0d, 0x5c, 0xf3, 0x05, 0xfd, + 0x1c, 0xba, 0x39, 0xd7, 0x6b, 0x26, 0x4d, 0x31, 0x51, 0x61, 0xf5, 0xde, 0x6a, 0x56, 0x9f, 0xbb, + 0xa2, 0xb8, 0x8a, 0x84, 0xde, 0x81, 0x9d, 0x8c, 0xe7, 0xd7, 0x24, 0x1d, 0xd0, 0x9c, 0xa6, 0x31, + 0x4d, 0xa5, 0xd0, 0x91, 0xdc, 0xc2, 0x0b, 0x7c, 0xf4, 0x2e, 0xbc, 0x91, 0xf3, 0x2c, 0x27, 0x43, + 0xa2, 0x64, 0xcf, 0xb3, 0x84, 0x45, 0x53, 0x1d, 0xe1, 0x6d, 0xbc, 0xf8, 0x21, 0x7c, 0x1b, 0x5a, + 0x83, 0x31, 0xd7, 0x1c, 0x75, 0x3f, 0xb1, 0x59, 0x9b, 0x63, 0x96, 0x74, 0xf8, 0x23, 0xe8, 0x9e, + 0xbe, 0xcc, 0x33, 0x2e, 0xad, 0x77, 0xde, 0x84, 0x26, 0xd5, 0x0c, 0xbd, 0xb5, 0x85, 0x0d, 0xa5, + 0xa2, 0x95, 0xbe, 0x24, 0x91, 0x34, 0x39, 0x58, 0x10, 0xe1, 0x73, 0x80, 0x87, 0xb4, 0x94, 0x3d, + 0x80, 0xbb, 0x36, 0x4a, 0xaa, 0x01, 0x39, 0xcf, 0x46, 0xf7, 0x61, 0x97, 0xa5, 0x51, 0x32, 0x8e, + 0xe9, 0x67, 0x29, 0x4b, 0x99, 0x64, 0x24, 0x61, 0xbf, 0x2e, 0x13, 0xbc, 0xf6, 0x5b, 0xf8, 0x3d, + 0x68, 0xeb, 0xfa, 0xf1, 0xa9, 0xca, 0xf1, 0x32, 0x79, 0x3c, 0x37, 0x79, 0x6c, 0x35, 0xf0, 0x67, + 0xd5, 0x20, 0x3c, 0x81, 0x6e, 0x51, 0x76, 0x6c, 0x99, 0xa9, 0x17, 0xed, 0x41, 0xcb, 0x1a, 0x69, + 0xc4, 0x4b, 0x3a, 0x3c, 0x86, 0x6d, 0xb7, 0x5e, 0x2f, 0x41, 0x70, 0x32, 0xd7, 0xaf, 0x66, 0xee, + 0xe7, 0x10, 0x2c, 0xab, 0xf7, 0x2b, 0x65, 0xf0, 0x72, 0xe4, 0x0b, 0xd8, 0x71, 0x91, 0x6f, 0x71, + 0xcd, 0x52, 0x8c, 0xba, 0x12, 0x1a, 0x5e, 0xc2, 0xae, 0x8b, 0xfb, 0x25, 0xbe, 0x5b, 0x8e, 0xed, + 0x7a, 0x75, 0x63, 0xce, 0xab, 0xf7, 0xa0, 0x73, 0x56, 0x5e, 0x2f, 0xaf, 0xab, 0xfe, 0xe1, 0x1f, + 0x3d, 0xd8, 0x76, 0xf6, 0x08, 0xf4, 0x29, 0x6c, 0xa9, 0x84, 0x60, 0xe9, 0xd0, 0xf4, 0x9e, 0x15, + 0x2b, 0x99, 0x03, 0x82, 0x2d, 0x02, 0x1a, 0x40, 0x93, 0x53, 0x31, 0x4e, 0xa4, 0xc9, 0xe8, 0x77, + 0x57, 0xac, 0x43, 0x92, 0xc8, 0xb1, 0xc0, 0x46, 0x36, 0xfc, 0x93, 0x0f, 0xdd, 0x47, 0xe4, 0x92, + 0x26, 0x4f, 0x69, 0x42, 0x23, 0x99, 0x71, 0x74, 0x05, 0x9d, 0x11, 0x91, 0xd1, 0xb5, 0xe6, 0xda, + 0x26, 0x39, 0x58, 0x0d, 0xbc, 0x82, 0xd4, 0x7f, 0x3c, 0x83, 0x39, 0x4d, 0x25, 0x9f, 0x62, 0x17, + 0x58, 0x0d, 0x21, 0x9a, 0x3c, 0x7d, 0x99, 0xab, 0xba, 0xb7, 0xfe, 0xa0, 0x53, 0x51, 0x86, 0xe9, + 0xaf, 0xc6, 0x8c, 0xd3, 0x11, 0x4d, 0x25, 0x5e, 0xc0, 0xed, 0x1d, 0xc3, 0xce, 0xbc, 0x31, 0x68, + 0x07, 0x36, 0x6e, 0xe8, 0xd4, 0x5c, 0x98, 0x5a, 0xaa, 0xf0, 0x98, 0x90, 0x64, 0x6c, 0x33, 0xa8, + 0x20, 0x7e, 0xe0, 0x7f, 0xe8, 0x85, 0xbf, 0x84, 0x60, 0x99, 0xb6, 0x1a, 0x9c, 0x1e, 0xb4, 0xb2, + 0x5c, 0x4d, 0x7b, 0x19, 0xb7, 0xc9, 0x68, 0x69, 0x55, 0xa0, 0x34, 0xac, 0xaa, 0x94, 0xaa, 0xd7, + 0x18, 0x2a, 0xfc, 0xbd, 0x07, 0x9b, 0xba, 0x27, 0xfe, 0x04, 0x5a, 0xea, 0x80, 0x31, 0x91, 0x44, + 0x63, 0xae, 0x3c, 0xa0, 0x28, 0xe9, 0xc7, 0x54, 0x12, 0x5c, 0xca, 0xa3, 0x07, 0xd0, 0x60, 0x92, + 0x8e, 0xac, 0x5f, 0xbf, 0xbb, 0x14, 0xc8, 0x4c, 0xb8, 0x7d, 0x4c, 0x5e, 0x9c, 0xbe, 0x94, 0x34, + 0xd5, 0x59, 0x53, 0xc8, 0x86, 0x09, 0xb4, 0x2c, 0xb4, 0x3a, 0x99, 0xa0, 0xc9, 0xd5, 0x23, 0x96, + 0xde, 0x98, 0x03, 0x97, 0x74, 0x5d, 0xf9, 0xf4, 0xeb, 0xcb, 0x67, 0x0f, 0x5a, 0x51, 0x96, 0x4a, + 0x96, 0x8e, 0xcb, 0xb4, 0xb2, 0x74, 0xf8, 0x67, 0x1f, 0x3a, 0x4a, 0x9d, 0x2d, 0xca, 0x6f, 0x41, + 0x37, 0x71, 0x3d, 0x6f, 0xd4, 0x56, 0x99, 0x6a, 0xd7, 0x15, 0xa3, 0x49, 0x5c, 0xee, 0x2a, 0x34, + 0x57, 0x99, 0xea, 0x7e, 0x5f, 0xa8, 0x28, 0x30, 0x4d, 0xaa, 0x20, 0xea, 0xec, 0xde, 0xac, 0xb7, + 0xfb, 0x6d, 0xb8, 0xa3, 0xbc, 0x94, 0x8d, 0xa5, 0x6d, 0xbb, 0x0d, 0xdd, 0x8f, 0xe6, 0xb8, 0x4b, + 0xdb, 0x43, 0x73, 0x79, 0x7b, 0x50, 0xb6, 0x25, 0x6c, 0xc4, 0x64, 0xb0, 0xa5, 0x21, 0x0b, 0xa2, + 0xe2, 0xa9, 0xd6, 0x9c, 0xa7, 0x3e, 0x82, 0xf6, 0x63, 0x16, 0xf1, 0xec, 0x19, 0x1b, 0x51, 0x55, + 0xc3, 0x44, 0x65, 0x14, 0xb0, 0xa4, 0x02, 0x4e, 0x49, 0x9a, 0x15, 0x7d, 0xbf, 0x81, 0x0b, 0x22, + 0xfc, 0xdf, 0x16, 0xc0, 0x93, 0xcb, 0xe7, 0x34, 0x2a, 0xee, 0xb5, 0x6e, 0x76, 0x55, 0xa5, 0xdd, + 0x3c, 0x5a, 0xf4, 0x90, 0xe8, 0x9b, 0xd2, 0xee, 0xf0, 0xd0, 0xb7, 0xa0, 0x5d, 0x4e, 0xb3, 0xe6, + 0x2a, 0x67, 0x8c, 0x4a, 0xb4, 0x6c, 0xce, 0x45, 0xcb, 0x0e, 0x6c, 0x8c, 0x59, 0xac, 0x1d, 0xd8, + 0xc6, 0x6a, 0x59, 0x77, 0x0f, 0xcd, 0xfa, 0x7b, 0xd8, 0x03, 0x30, 0x56, 0xa8, 0x4d, 0x85, 0xc3, + 0x1c, 0x0e, 0xfa, 0x1c, 0xde, 0x88, 0x38, 0xd5, 0x6b, 0xe5, 0x1c, 0x21, 0xc9, 0xa8, 0x18, 0x53, + 0x3b, 0xf7, 0xdf, 0x59, 0x2d, 0x97, 0x94, 0x18, 0x5e, 0x04, 0x51, 0xc8, 0xb1, 0x9a, 0xc6, 0x2a, + 0xc8, 0xed, 0xf5, 0x91, 0x17, 0x40, 0xd0, 0x31, 0xf4, 0x2c, 0xf3, 0xe1, 0xe2, 0x78, 0x07, 0xfa, + 0x8c, 0xb7, 0xec, 0x40, 0xcf, 0xa0, 0x99, 0x14, 0x05, 0xbb, 0xa3, 0x73, 0xfd, 0x87, 0xab, 0x99, + 0x33, 0x8b, 0x81, 0xbe, 0x5b, 0xa8, 0x0d, 0x16, 0x8a, 0xa0, 0x43, 0xd2, 0x34, 0x93, 0xa4, 0x18, + 0x1d, 0xb7, 0x35, 0xf4, 0xc9, 0xda, 0xd0, 0x27, 0x33, 0x0c, 0xd3, 0x08, 0x1c, 0x54, 0xf4, 0x0b, + 0xb8, 0x9b, 0xbd, 0x48, 0x29, 0xc7, 0xea, 0xe5, 0x48, 0x53, 0x35, 0xe7, 0x77, 0xb5, 0xa2, 0xf7, + 0x57, 0x54, 0x54, 0x11, 0xc6, 0xf3, 0x60, 0x2a, 0x5c, 0xae, 0x58, 0x6a, 0x7a, 0x70, 0x70, 0xa7, + 0x78, 0xc9, 0xcc, 0x38, 0x68, 0x1f, 0x3a, 0x51, 0x32, 0x16, 0x92, 0x16, 0x8f, 0xa1, 0xbb, 0x3a, + 0xe8, 0x5c, 0x16, 0xba, 0x80, 0x6d, 0xe6, 0xf4, 0xf1, 0x60, 0x47, 0xdf, 0xf8, 0xfd, 0xb5, 0x9b, + 0xb7, 0xc0, 0x15, 0x9c, 0xde, 0xf7, 0xa1, 0xf3, 0x15, 0x3b, 0x92, 0xea, 0x68, 0xf3, 0x5e, 0x5d, + 0xab, 0xa3, 0xfd, 0xcd, 0x83, 0x3b, 0x55, 0xc7, 0x95, 0x93, 0x94, 0xe7, 0x3c, 0x46, 0x6d, 0x61, + 0xd8, 0x70, 0x0a, 0x83, 0x49, 0xdd, 0xcd, 0x59, 0xea, 0xee, 0x01, 0x90, 0x9c, 0xd9, 0xac, 0x2d, + 0x72, 0xda, 0xe1, 0xe8, 0xb7, 0x64, 0x96, 0x4a, 0x9e, 0x25, 0x09, 0xe5, 0xa6, 0x0c, 0x3a, 0x1c, + 0xf5, 0xa6, 0xb9, 0x4c, 0xb2, 0xe8, 0x46, 0x1b, 0x34, 0x30, 0x41, 0xae, 0x13, 0xbb, 0x85, 0x6b, + 0xbe, 0x84, 0x5b, 0xd0, 0x38, 0x57, 0xb5, 0x3b, 0xbc, 0x07, 0xdd, 0xca, 0x0b, 0xc5, 0xda, 0xe6, + 0x95, 0xb6, 0x85, 0xf7, 0xa0, 0x8d, 0xb3, 0x4c, 0x9e, 0x13, 0x79, 0xad, 0x8b, 0x61, 0xae, 0x16, + 0xe6, 0xa1, 0x57, 0x10, 0x21, 0x81, 0x6f, 0x2e, 0x7d, 0x9d, 0x69, 0xdb, 0x4b, 0xca, 0x00, 0x3b, + 0x1c, 0xd5, 0x7a, 0x2a, 0xef, 0x37, 0xdb, 0x7a, 0x2a, 0xcc, 0xf0, 0xb7, 0x3e, 0x34, 0x8b, 0xc9, + 0xeb, 0xb5, 0x36, 0xf8, 0x37, 0xa1, 0x29, 0x34, 0xaa, 0xd1, 0x6a, 0x28, 0xd5, 0x0e, 0x46, 0x54, + 0x08, 0x32, 0xb4, 0x37, 0x67, 0x49, 0x25, 0xc1, 0x29, 0x11, 0x65, 0x93, 0x33, 0x14, 0x7a, 0x0c, + 0x5b, 0x31, 0x95, 0x84, 0x25, 0x45, 0x53, 0x5b, 0xf9, 0x81, 0x58, 0x1c, 0x6a, 0x50, 0x88, 0x62, + 0x8b, 0xa1, 0xe2, 0x26, 0xca, 0xe2, 0xe2, 0x9f, 0x85, 0x06, 0xd6, 0xeb, 0xf0, 0x33, 0xe8, 0x14, + 0xbb, 0x1f, 0x90, 0xb1, 0x70, 0x2d, 0xf1, 0x2a, 0x96, 0x38, 0xb6, 0xfb, 0x55, 0xdb, 0x77, 0xa1, + 0xa1, 0x1b, 0xba, 0x39, 0x53, 0x41, 0x84, 0xff, 0xf4, 0xa0, 0x5b, 0xb1, 0xa2, 0xb6, 0x9b, 0x95, + 0xa3, 0xbf, 0x5f, 0xf7, 0xe2, 0x72, 0xff, 0x7f, 0x39, 0x83, 0x66, 0xa4, 0x0c, 0xb4, 0x7f, 0x4e, + 0x1d, 0xad, 0xe3, 0x08, 0x7d, 0x34, 0x6c, 0x00, 0xd4, 0xa3, 0x97, 0x53, 0xc9, 0xa7, 0x27, 0x57, + 0x92, 0x72, 0x77, 0x66, 0x68, 0xe0, 0xc5, 0x0f, 0x36, 0x76, 0x9b, 0xb3, 0xd8, 0xfd, 0x00, 0x36, + 0xbf, 0x52, 0x77, 0xff, 0x08, 0xda, 0xb3, 0xce, 0xb2, 0xae, 0xf0, 0x31, 0xb4, 0x9e, 0x4d, 0x73, + 0x6a, 0xe7, 0x82, 0x85, 0x92, 0x50, 0x4d, 0x76, 0x7f, 0x3e, 0xd9, 0xc3, 0x6f, 0x43, 0x43, 0xff, + 0x19, 0xa5, 0xe0, 0x8b, 0xe9, 0xd3, 0x24, 0x5b, 0x31, 0x4e, 0x0e, 0x01, 0x7e, 0xa6, 0xc7, 0xf3, + 0x89, 0x1a, 0x9e, 0x11, 0x6c, 0xca, 0x69, 0x5e, 0x5e, 0x95, 0x5a, 0xa3, 0x53, 0x68, 0x66, 0xba, + 0x77, 0x98, 0x87, 0xcd, 0x9a, 0x63, 0xab, 0x11, 0xfe, 0x78, 0xf7, 0xef, 0xaf, 0xf6, 0xbc, 0x7f, + 0xbc, 0xda, 0xf3, 0xfe, 0xf5, 0x6a, 0xcf, 0xfb, 0xc3, 0xbf, 0xf7, 0xbe, 0xf1, 0x85, 0x3f, 0x39, + 0xfa, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa2, 0xaa, 0xeb, 0xab, 0x72, 0x16, 0x00, 0x00, } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/meta/v1/time.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/meta/v1/time.go index 91a47b9e..a4ceb6d1 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/meta/v1/time.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/meta/v1/time.go @@ -7,7 +7,6 @@ import ( // JSON marshaling logic for the Time type. Need to make // third party resources JSON work. - func (t Time) MarshalJSON() ([]byte, error) { var seconds, nanos int64 if t.Seconds != nil { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/policy/v1alpha1/generated.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/policy/v1alpha1/generated.pb.go deleted file mode 100644 index 748b923e..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/policy/v1alpha1/generated.pb.go +++ /dev/null @@ -1,1352 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.proto -// DO NOT EDIT! - -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.proto - - It has these top-level messages: - Eviction - PodDisruptionBudget - PodDisruptionBudgetList - PodDisruptionBudgetSpec - PodDisruptionBudgetStatus -*/ -package v1alpha1 - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "github.com/ericchiang/k8s/api/resource" -import k8s_io_kubernetes_pkg_api_unversioned "github.com/ericchiang/k8s/api/unversioned" -import k8s_io_kubernetes_pkg_api_v1 "github.com/ericchiang/k8s/api/v1" -import _ "github.com/ericchiang/k8s/runtime" -import k8s_io_kubernetes_pkg_util_intstr "github.com/ericchiang/k8s/util/intstr" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// Eviction evicts a pod from its node subject to certain policies and safety constraints. -// This is a subresource of Pod. A request to cause such an eviction is -// created by POSTing to .../pods//evictions. -type Eviction struct { - // ObjectMeta describes the pod that is being evicted. - Metadata *k8s_io_kubernetes_pkg_api_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` - // DeleteOptions may be provided - DeleteOptions *k8s_io_kubernetes_pkg_api_v1.DeleteOptions `protobuf:"bytes,2,opt,name=deleteOptions" json:"deleteOptions,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Eviction) Reset() { *m = Eviction{} } -func (m *Eviction) String() string { return proto.CompactTextString(m) } -func (*Eviction) ProtoMessage() {} -func (*Eviction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *Eviction) GetMetadata() *k8s_io_kubernetes_pkg_api_v1.ObjectMeta { - if m != nil { - return m.Metadata - } - return nil -} - -func (m *Eviction) GetDeleteOptions() *k8s_io_kubernetes_pkg_api_v1.DeleteOptions { - if m != nil { - return m.DeleteOptions - } - return nil -} - -// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods -type PodDisruptionBudget struct { - Metadata *k8s_io_kubernetes_pkg_api_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` - // Specification of the desired behavior of the PodDisruptionBudget. - Spec *PodDisruptionBudgetSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` - // Most recently observed status of the PodDisruptionBudget. - Status *PodDisruptionBudgetStatus `protobuf:"bytes,3,opt,name=status" json:"status,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PodDisruptionBudget) Reset() { *m = PodDisruptionBudget{} } -func (m *PodDisruptionBudget) String() string { return proto.CompactTextString(m) } -func (*PodDisruptionBudget) ProtoMessage() {} -func (*PodDisruptionBudget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *PodDisruptionBudget) GetMetadata() *k8s_io_kubernetes_pkg_api_v1.ObjectMeta { - if m != nil { - return m.Metadata - } - return nil -} - -func (m *PodDisruptionBudget) GetSpec() *PodDisruptionBudgetSpec { - if m != nil { - return m.Spec - } - return nil -} - -func (m *PodDisruptionBudget) GetStatus() *PodDisruptionBudgetStatus { - if m != nil { - return m.Status - } - return nil -} - -// PodDisruptionBudgetList is a collection of PodDisruptionBudgets. -type PodDisruptionBudgetList struct { - Metadata *k8s_io_kubernetes_pkg_api_unversioned.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` - Items []*PodDisruptionBudget `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} } -func (m *PodDisruptionBudgetList) String() string { return proto.CompactTextString(m) } -func (*PodDisruptionBudgetList) ProtoMessage() {} -func (*PodDisruptionBudgetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *PodDisruptionBudgetList) GetMetadata() *k8s_io_kubernetes_pkg_api_unversioned.ListMeta { - if m != nil { - return m.Metadata - } - return nil -} - -func (m *PodDisruptionBudgetList) GetItems() []*PodDisruptionBudget { - if m != nil { - return m.Items - } - return nil -} - -// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget. -type PodDisruptionBudgetSpec struct { - // The minimum number of pods that must be available simultaneously. This - // can be either an integer or a string specifying a percentage, e.g. "28%". - MinAvailable *k8s_io_kubernetes_pkg_util_intstr.IntOrString `protobuf:"bytes,1,opt,name=minAvailable" json:"minAvailable,omitempty"` - // Label query over pods whose evictions are managed by the disruption - // budget. - Selector *k8s_io_kubernetes_pkg_api_unversioned.LabelSelector `protobuf:"bytes,2,opt,name=selector" json:"selector,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} } -func (m *PodDisruptionBudgetSpec) String() string { return proto.CompactTextString(m) } -func (*PodDisruptionBudgetSpec) ProtoMessage() {} -func (*PodDisruptionBudgetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *PodDisruptionBudgetSpec) GetMinAvailable() *k8s_io_kubernetes_pkg_util_intstr.IntOrString { - if m != nil { - return m.MinAvailable - } - return nil -} - -func (m *PodDisruptionBudgetSpec) GetSelector() *k8s_io_kubernetes_pkg_api_unversioned.LabelSelector { - if m != nil { - return m.Selector - } - return nil -} - -// PodDisruptionBudgetStatus represents information about the status of a -// PodDisruptionBudget. Status may trail the actual state of a system. -type PodDisruptionBudgetStatus struct { - // Whether or not a disruption is currently allowed. - DisruptionAllowed *bool `protobuf:"varint,1,opt,name=disruptionAllowed" json:"disruptionAllowed,omitempty"` - // current number of healthy pods - CurrentHealthy *int32 `protobuf:"varint,2,opt,name=currentHealthy" json:"currentHealthy,omitempty"` - // minimum desired number of healthy pods - DesiredHealthy *int32 `protobuf:"varint,3,opt,name=desiredHealthy" json:"desiredHealthy,omitempty"` - // total number of pods counted by this disruption budget - ExpectedPods *int32 `protobuf:"varint,4,opt,name=expectedPods" json:"expectedPods,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PodDisruptionBudgetStatus) Reset() { *m = PodDisruptionBudgetStatus{} } -func (m *PodDisruptionBudgetStatus) String() string { return proto.CompactTextString(m) } -func (*PodDisruptionBudgetStatus) ProtoMessage() {} -func (*PodDisruptionBudgetStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{4} -} - -func (m *PodDisruptionBudgetStatus) GetDisruptionAllowed() bool { - if m != nil && m.DisruptionAllowed != nil { - return *m.DisruptionAllowed - } - return false -} - -func (m *PodDisruptionBudgetStatus) GetCurrentHealthy() int32 { - if m != nil && m.CurrentHealthy != nil { - return *m.CurrentHealthy - } - return 0 -} - -func (m *PodDisruptionBudgetStatus) GetDesiredHealthy() int32 { - if m != nil && m.DesiredHealthy != nil { - return *m.DesiredHealthy - } - return 0 -} - -func (m *PodDisruptionBudgetStatus) GetExpectedPods() int32 { - if m != nil && m.ExpectedPods != nil { - return *m.ExpectedPods - } - return 0 -} - -func init() { - proto.RegisterType((*Eviction)(nil), "github.com/ericchiang.k8s.apis.policy.v1alpha1.Eviction") - proto.RegisterType((*PodDisruptionBudget)(nil), "github.com/ericchiang.k8s.apis.policy.v1alpha1.PodDisruptionBudget") - proto.RegisterType((*PodDisruptionBudgetList)(nil), "github.com/ericchiang.k8s.apis.policy.v1alpha1.PodDisruptionBudgetList") - proto.RegisterType((*PodDisruptionBudgetSpec)(nil), "github.com/ericchiang.k8s.apis.policy.v1alpha1.PodDisruptionBudgetSpec") - proto.RegisterType((*PodDisruptionBudgetStatus)(nil), "github.com/ericchiang.k8s.apis.policy.v1alpha1.PodDisruptionBudgetStatus") -} -func (m *Eviction) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Eviction) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Metadata != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n1, err := m.Metadata.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - } - if m.DeleteOptions != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DeleteOptions.Size())) - n2, err := m.DeleteOptions.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *PodDisruptionBudget) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodDisruptionBudget) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Metadata != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n3, err := m.Metadata.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - } - if m.Spec != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n4, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - } - if m.Status != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n5, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *PodDisruptionBudgetList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodDisruptionBudgetList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Metadata != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n6, err := m.Metadata.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - } - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *PodDisruptionBudgetSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodDisruptionBudgetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.MinAvailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinAvailable.Size())) - n7, err := m.MinAvailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n8, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *PodDisruptionBudgetStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodDisruptionBudgetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.DisruptionAllowed != nil { - dAtA[i] = 0x8 - i++ - if *m.DisruptionAllowed { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.CurrentHealthy != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentHealthy)) - } - if m.DesiredHealthy != nil { - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.DesiredHealthy)) - } - if m.ExpectedPods != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpectedPods)) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *Eviction) Size() (n int) { - var l int - _ = l - if m.Metadata != nil { - l = m.Metadata.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.DeleteOptions != nil { - l = m.DeleteOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *PodDisruptionBudget) Size() (n int) { - var l int - _ = l - if m.Metadata != nil { - l = m.Metadata.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Spec != nil { - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Status != nil { - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *PodDisruptionBudgetList) Size() (n int) { - var l int - _ = l - if m.Metadata != nil { - l = m.Metadata.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *PodDisruptionBudgetSpec) Size() (n int) { - var l int - _ = l - if m.MinAvailable != nil { - l = m.MinAvailable.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *PodDisruptionBudgetStatus) Size() (n int) { - var l int - _ = l - if m.DisruptionAllowed != nil { - n += 2 - } - if m.CurrentHealthy != nil { - n += 1 + sovGenerated(uint64(*m.CurrentHealthy)) - } - if m.DesiredHealthy != nil { - n += 1 + sovGenerated(uint64(*m.DesiredHealthy)) - } - if m.ExpectedPods != nil { - n += 1 + sovGenerated(uint64(*m.ExpectedPods)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Eviction) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Eviction: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Eviction: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_api_v1.ObjectMeta{} - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeleteOptions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.DeleteOptions == nil { - m.DeleteOptions = &k8s_io_kubernetes_pkg_api_v1.DeleteOptions{} - } - if err := m.DeleteOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodDisruptionBudget: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodDisruptionBudget: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_api_v1.ObjectMeta{} - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Spec == nil { - m.Spec = &PodDisruptionBudgetSpec{} - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Status == nil { - m.Status = &PodDisruptionBudgetStatus{} - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodDisruptionBudgetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodDisruptionBudgetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_api_unversioned.ListMeta{} - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, &PodDisruptionBudget{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodDisruptionBudgetSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodDisruptionBudgetSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MinAvailable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MinAvailable == nil { - m.MinAvailable = &k8s_io_kubernetes_pkg_util_intstr.IntOrString{} - } - if err := m.MinAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &k8s_io_kubernetes_pkg_api_unversioned.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodDisruptionBudgetStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodDisruptionBudgetStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DisruptionAllowed", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.DisruptionAllowed = &b - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentHealthy", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.CurrentHealthy = &v - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DesiredHealthy", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.DesiredHealthy = &v - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExpectedPods", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ExpectedPods = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("github.com/ericchiang/k8s/apis/policy/v1alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 520 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x92, 0xcd, 0x8a, 0x53, 0x31, - 0x14, 0x80, 0xbd, 0xf3, 0x23, 0x25, 0x33, 0x0a, 0xc6, 0x85, 0xb5, 0x8b, 0x22, 0x5d, 0x48, 0xd1, - 0x31, 0x97, 0x16, 0x05, 0x71, 0x23, 0x33, 0x76, 0x40, 0x51, 0x69, 0x4d, 0x11, 0x41, 0x70, 0x91, - 0xde, 0x1c, 0x3a, 0xb1, 0x69, 0x12, 0x92, 0x73, 0xaf, 0xce, 0x73, 0xb8, 0x71, 0xef, 0x73, 0x88, - 0x5b, 0x97, 0x3e, 0x82, 0xd4, 0x17, 0x91, 0x7b, 0xfb, 0xe3, 0xf4, 0xe7, 0x0e, 0x03, 0xe3, 0xf6, - 0xe4, 0x7c, 0x5f, 0xce, 0x1f, 0x79, 0x32, 0x7a, 0x1c, 0x98, 0xb2, 0xf1, 0x28, 0x1d, 0x80, 0x37, - 0x80, 0x10, 0x62, 0x37, 0x1a, 0xc6, 0xc2, 0xa9, 0x10, 0x3b, 0xab, 0x55, 0x72, 0x1a, 0x67, 0x2d, - 0xa1, 0xdd, 0x89, 0x68, 0xc5, 0x43, 0x30, 0xe0, 0x05, 0x82, 0x64, 0xce, 0x5b, 0xb4, 0xf4, 0xde, - 0x94, 0x65, 0xff, 0x58, 0xe6, 0x46, 0x43, 0x96, 0xb3, 0x6c, 0xca, 0xb2, 0x39, 0x5b, 0x6b, 0x97, - 0xfe, 0x13, 0x7b, 0x08, 0x36, 0xf5, 0x09, 0xac, 0xfa, 0x6b, 0x8f, 0xca, 0x99, 0xd4, 0x64, 0xe0, - 0x83, 0xb2, 0x06, 0xe4, 0x1a, 0x76, 0x50, 0x8e, 0x65, 0x6b, 0x4d, 0xd4, 0x1e, 0x6c, 0xce, 0xf6, - 0xa9, 0x41, 0x35, 0x5e, 0xaf, 0xa9, 0xb5, 0x39, 0x3d, 0x45, 0xa5, 0x63, 0x65, 0x30, 0xa0, 0x5f, - 0x45, 0x1a, 0xdf, 0x22, 0x52, 0x39, 0xce, 0x54, 0x82, 0xca, 0x1a, 0xda, 0x21, 0x95, 0x31, 0xa0, - 0x90, 0x02, 0x45, 0x35, 0xba, 0x13, 0x35, 0xf7, 0xda, 0x4d, 0x56, 0x3a, 0x46, 0x96, 0xb5, 0x58, - 0x77, 0xf0, 0x11, 0x12, 0x7c, 0x0d, 0x28, 0xf8, 0x82, 0xa4, 0x6f, 0xc8, 0x35, 0x09, 0x1a, 0x10, - 0xba, 0x2e, 0xb7, 0x86, 0xea, 0x56, 0xa1, 0xba, 0x7f, 0xbe, 0xaa, 0x73, 0x16, 0xe1, 0xcb, 0x86, - 0xc6, 0x97, 0x2d, 0x72, 0xb3, 0x67, 0x65, 0x47, 0x05, 0x9f, 0x16, 0xa1, 0xa3, 0x54, 0x0e, 0x01, - 0xff, 0x53, 0xc1, 0xef, 0xc8, 0x4e, 0x70, 0x90, 0xcc, 0xea, 0x7c, 0xc6, 0x2e, 0x7e, 0x39, 0x6c, - 0x43, 0x51, 0x7d, 0x07, 0x09, 0x2f, 0x84, 0xf4, 0x03, 0xb9, 0x1a, 0x50, 0x60, 0x1a, 0xaa, 0xdb, - 0x85, 0xfa, 0xf8, 0xb2, 0xea, 0x42, 0xc6, 0x67, 0xd2, 0xc6, 0xf7, 0x88, 0xdc, 0xda, 0x90, 0xf5, - 0x4a, 0x05, 0xa4, 0x2f, 0xd7, 0x26, 0x13, 0x9f, 0x33, 0x99, 0x33, 0x17, 0xcb, 0x72, 0x7c, 0x65, - 0x40, 0x6f, 0xc9, 0xae, 0x42, 0x18, 0xe7, 0x9b, 0xdc, 0x6e, 0xee, 0xb5, 0x9f, 0x5e, 0xb2, 0x0d, - 0x3e, 0xb5, 0x35, 0x7e, 0x6c, 0xae, 0x3f, 0x1f, 0x20, 0xe5, 0x64, 0x7f, 0xac, 0xcc, 0x61, 0x26, - 0x94, 0x16, 0x03, 0x0d, 0xb3, 0x1e, 0x58, 0xc9, 0xcf, 0xf9, 0x85, 0xb3, 0xe9, 0x85, 0xb3, 0x17, - 0x06, 0xbb, 0xbe, 0x8f, 0x5e, 0x99, 0x21, 0x5f, 0x72, 0xd0, 0x1e, 0xa9, 0x04, 0xd0, 0x90, 0xa0, - 0xf5, 0xb3, 0x5d, 0x3f, 0xbc, 0xe8, 0x4c, 0xc4, 0x00, 0x74, 0x7f, 0xc6, 0xf2, 0x85, 0x25, 0xdf, - 0xc0, 0xed, 0xd2, 0x3d, 0xd1, 0x03, 0x72, 0x43, 0x2e, 0x5e, 0x0e, 0xb5, 0xb6, 0x9f, 0x40, 0x16, - 0x8d, 0x54, 0xf8, 0xfa, 0x03, 0xbd, 0x4b, 0xae, 0x27, 0xa9, 0xf7, 0x60, 0xf0, 0x39, 0x08, 0x8d, - 0x27, 0xa7, 0x45, 0x8d, 0xbb, 0x7c, 0x25, 0x9a, 0xe7, 0x49, 0x08, 0xca, 0x83, 0x9c, 0xe7, 0x6d, - 0x4f, 0xf3, 0x96, 0xa3, 0xb4, 0x41, 0xf6, 0xe1, 0xb3, 0x83, 0x04, 0x41, 0xf6, 0xac, 0x0c, 0xd5, - 0x9d, 0x22, 0x6b, 0x29, 0x76, 0x54, 0xfb, 0x39, 0xa9, 0x47, 0xbf, 0x26, 0xf5, 0xe8, 0xf7, 0xa4, - 0x1e, 0x7d, 0xfd, 0x53, 0xbf, 0xf2, 0xbe, 0x32, 0x5f, 0xdc, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, - 0x0a, 0xc2, 0x95, 0x04, 0x7d, 0x05, 0x00, 0x00, -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/policy/v1beta1/generated.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/policy/v1beta1/generated.pb.go index 31916d66..6c919d33 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/policy/v1beta1/generated.pb.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/policy/v1beta1/generated.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/policy/v1beta1/generated.proto -// DO NOT EDIT! +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/policy/v1beta1/generated.proto /* Package v1beta1 is a generated protocol buffer package. It is generated from these files: - k8s.io/kubernetes/pkg/apis/policy/v1beta1/generated.proto + k8s.io/api/policy/v1beta1/generated.proto It has these top-level messages: Eviction @@ -20,11 +19,11 @@ package v1beta1 import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import k8s_io_kubernetes_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" +import _ "github.com/ericchiang/k8s/apis/core/v1" +import k8s_io_apimachinery_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" import _ "github.com/ericchiang/k8s/runtime" import _ "github.com/ericchiang/k8s/runtime/schema" -import k8s_io_kubernetes_pkg_util_intstr "github.com/ericchiang/k8s/util/intstr" -import _ "github.com/ericchiang/k8s/api/v1" +import k8s_io_apimachinery_pkg_util_intstr "github.com/ericchiang/k8s/util/intstr" import io "io" @@ -44,10 +43,10 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // created by POSTing to .../pods//evictions. type Eviction struct { // ObjectMeta describes the pod that is being evicted. - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // DeleteOptions may be provided - DeleteOptions *k8s_io_kubernetes_pkg_apis_meta_v1.DeleteOptions `protobuf:"bytes,2,opt,name=deleteOptions" json:"deleteOptions,omitempty"` - XXX_unrecognized []byte `json:"-"` + DeleteOptions *k8s_io_apimachinery_pkg_apis_meta_v1.DeleteOptions `protobuf:"bytes,2,opt,name=deleteOptions" json:"deleteOptions,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *Eviction) Reset() { *m = Eviction{} } @@ -55,14 +54,14 @@ func (m *Eviction) String() string { return proto.CompactTextString(m func (*Eviction) ProtoMessage() {} func (*Eviction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } -func (m *Eviction) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *Eviction) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } return nil } -func (m *Eviction) GetDeleteOptions() *k8s_io_kubernetes_pkg_apis_meta_v1.DeleteOptions { +func (m *Eviction) GetDeleteOptions() *k8s_io_apimachinery_pkg_apis_meta_v1.DeleteOptions { if m != nil { return m.DeleteOptions } @@ -71,7 +70,7 @@ func (m *Eviction) GetDeleteOptions() *k8s_io_kubernetes_pkg_apis_meta_v1.Delete // PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods type PodDisruptionBudget struct { - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Specification of the desired behavior of the PodDisruptionBudget. Spec *PodDisruptionBudgetSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` // Most recently observed status of the PodDisruptionBudget. @@ -84,7 +83,7 @@ func (m *PodDisruptionBudget) String() string { return proto.CompactT func (*PodDisruptionBudget) ProtoMessage() {} func (*PodDisruptionBudget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -func (m *PodDisruptionBudget) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *PodDisruptionBudget) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -107,9 +106,9 @@ func (m *PodDisruptionBudget) GetStatus() *PodDisruptionBudgetStatus { // PodDisruptionBudgetList is a collection of PodDisruptionBudgets. type PodDisruptionBudgetList struct { - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` - Items []*PodDisruptionBudget `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` - XXX_unrecognized []byte `json:"-"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Items []*PodDisruptionBudget `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} } @@ -117,7 +116,7 @@ func (m *PodDisruptionBudgetList) String() string { return proto.Comp func (*PodDisruptionBudgetList) ProtoMessage() {} func (*PodDisruptionBudgetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } -func (m *PodDisruptionBudgetList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *PodDisruptionBudgetList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -137,11 +136,16 @@ type PodDisruptionBudgetSpec struct { // "selector" will still be available after the eviction, i.e. even in the // absence of the evicted pod. So for example you can prevent all voluntary // evictions by specifying "100%". - MinAvailable *k8s_io_kubernetes_pkg_util_intstr.IntOrString `protobuf:"bytes,1,opt,name=minAvailable" json:"minAvailable,omitempty"` + MinAvailable *k8s_io_apimachinery_pkg_util_intstr.IntOrString `protobuf:"bytes,1,opt,name=minAvailable" json:"minAvailable,omitempty"` // Label query over pods whose evictions are managed by the disruption // budget. - Selector *k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector `protobuf:"bytes,2,opt,name=selector" json:"selector,omitempty"` - XXX_unrecognized []byte `json:"-"` + Selector *k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector `protobuf:"bytes,2,opt,name=selector" json:"selector,omitempty"` + // An eviction is allowed if at most "maxUnavailable" pods selected by + // "selector" are unavailable after the eviction, i.e. even in absence of + // the evicted pod. For example, one can prevent all voluntary evictions + // by specifying 0. This is a mutually exclusive setting with "minAvailable". + MaxUnavailable *k8s_io_apimachinery_pkg_util_intstr.IntOrString `protobuf:"bytes,3,opt,name=maxUnavailable" json:"maxUnavailable,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} } @@ -149,20 +153,27 @@ func (m *PodDisruptionBudgetSpec) String() string { return proto.Comp func (*PodDisruptionBudgetSpec) ProtoMessage() {} func (*PodDisruptionBudgetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } -func (m *PodDisruptionBudgetSpec) GetMinAvailable() *k8s_io_kubernetes_pkg_util_intstr.IntOrString { +func (m *PodDisruptionBudgetSpec) GetMinAvailable() *k8s_io_apimachinery_pkg_util_intstr.IntOrString { if m != nil { return m.MinAvailable } return nil } -func (m *PodDisruptionBudgetSpec) GetSelector() *k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector { +func (m *PodDisruptionBudgetSpec) GetSelector() *k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector { if m != nil { return m.Selector } return nil } +func (m *PodDisruptionBudgetSpec) GetMaxUnavailable() *k8s_io_apimachinery_pkg_util_intstr.IntOrString { + if m != nil { + return m.MaxUnavailable + } + return nil +} + // PodDisruptionBudgetStatus represents information about the status of a // PodDisruptionBudget. Status may trail the actual state of a system. type PodDisruptionBudgetStatus struct { @@ -181,7 +192,7 @@ type PodDisruptionBudgetStatus struct { // the list automatically by PodDisruptionBudget controller after some time. // If everything goes smooth this map should be empty for the most of the time. // Large number of entries in the map may indicate problems with pod deletions. - DisruptedPods map[string]*k8s_io_kubernetes_pkg_apis_meta_v1.Time `protobuf:"bytes,2,rep,name=disruptedPods" json:"disruptedPods,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + DisruptedPods map[string]*k8s_io_apimachinery_pkg_apis_meta_v1.Time `protobuf:"bytes,2,rep,name=disruptedPods" json:"disruptedPods,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // Number of pod disruptions that are currently allowed. DisruptionsAllowed *int32 `protobuf:"varint,3,opt,name=disruptionsAllowed" json:"disruptionsAllowed,omitempty"` // current number of healthy pods @@ -207,7 +218,7 @@ func (m *PodDisruptionBudgetStatus) GetObservedGeneration() int64 { return 0 } -func (m *PodDisruptionBudgetStatus) GetDisruptedPods() map[string]*k8s_io_kubernetes_pkg_apis_meta_v1.Time { +func (m *PodDisruptionBudgetStatus) GetDisruptedPods() map[string]*k8s_io_apimachinery_pkg_apis_meta_v1.Time { if m != nil { return m.DisruptedPods } @@ -243,11 +254,11 @@ func (m *PodDisruptionBudgetStatus) GetExpectedPods() int32 { } func init() { - proto.RegisterType((*Eviction)(nil), "github.com/ericchiang.k8s.apis.policy.v1beta1.Eviction") - proto.RegisterType((*PodDisruptionBudget)(nil), "github.com/ericchiang.k8s.apis.policy.v1beta1.PodDisruptionBudget") - proto.RegisterType((*PodDisruptionBudgetList)(nil), "github.com/ericchiang.k8s.apis.policy.v1beta1.PodDisruptionBudgetList") - proto.RegisterType((*PodDisruptionBudgetSpec)(nil), "github.com/ericchiang.k8s.apis.policy.v1beta1.PodDisruptionBudgetSpec") - proto.RegisterType((*PodDisruptionBudgetStatus)(nil), "github.com/ericchiang.k8s.apis.policy.v1beta1.PodDisruptionBudgetStatus") + proto.RegisterType((*Eviction)(nil), "k8s.io.api.policy.v1beta1.Eviction") + proto.RegisterType((*PodDisruptionBudget)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudget") + proto.RegisterType((*PodDisruptionBudgetList)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetList") + proto.RegisterType((*PodDisruptionBudgetSpec)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetSpec") + proto.RegisterType((*PodDisruptionBudgetStatus)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetStatus") } func (m *Eviction) Marshal() (dAtA []byte, err error) { size := m.Size() @@ -419,6 +430,16 @@ func (m *PodDisruptionBudgetSpec) MarshalTo(dAtA []byte) (int, error) { } i += n8 } + if m.MaxUnavailable != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) + n9, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -465,11 +486,11 @@ func (m *PodDisruptionBudgetStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(v.Size())) - n9, err := v.MarshalTo(dAtA[i:]) + n10, err := v.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n10 } } } @@ -499,24 +520,6 @@ func (m *PodDisruptionBudgetStatus) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -594,6 +597,10 @@ func (m *PodDisruptionBudgetSpec) Size() (n int) { l = m.Selector.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -706,7 +713,7 @@ func (m *Eviction) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -739,7 +746,7 @@ func (m *Eviction) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.DeleteOptions == nil { - m.DeleteOptions = &k8s_io_kubernetes_pkg_apis_meta_v1.DeleteOptions{} + m.DeleteOptions = &k8s_io_apimachinery_pkg_apis_meta_v1.DeleteOptions{} } if err := m.DeleteOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -823,7 +830,7 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -973,7 +980,7 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1088,7 +1095,7 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.MinAvailable == nil { - m.MinAvailable = &k8s_io_kubernetes_pkg_util_intstr.IntOrString{} + m.MinAvailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} } if err := m.MinAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1121,12 +1128,45 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1224,51 +1264,14 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.DisruptedPods == nil { - m.DisruptedPods = make(map[string]*k8s_io_kubernetes_pkg_apis_meta_v1.Time) + m.DisruptedPods = make(map[string]*k8s_io_apimachinery_pkg_apis_meta_v1.Time) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue *k8s_io_apimachinery_pkg_apis_meta_v1.Time + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1278,46 +1281,85 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_kubernetes_pkg_apis_meta_v1.Time{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.DisruptedPods[mapkey] = mapvalue - } else { - var mapvalue *k8s_io_kubernetes_pkg_apis_meta_v1.Time - m.DisruptedPods[mapkey] = mapvalue } + m.DisruptedPods[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 0 { @@ -1526,48 +1568,47 @@ var ( ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) -func init() { - proto.RegisterFile("github.com/ericchiang/k8s/apis/policy/v1beta1/generated.proto", fileDescriptorGenerated) -} +func init() { proto.RegisterFile("k8s.io/api/policy/v1beta1/generated.proto", fileDescriptorGenerated) } var fileDescriptorGenerated = []byte{ - // 596 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x93, 0xcd, 0x4e, 0x14, 0x4f, - 0x14, 0xc5, 0xff, 0xcd, 0x30, 0xfc, 0xb1, 0x00, 0x63, 0xca, 0x85, 0x03, 0x8b, 0x89, 0xe9, 0x85, - 0xc1, 0x04, 0xab, 0x33, 0xc4, 0x05, 0xba, 0x20, 0x81, 0x0c, 0x11, 0x0d, 0x04, 0x52, 0x10, 0x49, - 0x8c, 0x9b, 0xea, 0xae, 0x9b, 0xa1, 0x98, 0xfe, 0x4a, 0xd5, 0xed, 0xd6, 0x59, 0xf8, 0x1c, 0xfa, - 0x16, 0xae, 0x4d, 0x7c, 0x00, 0x97, 0xae, 0x5d, 0x99, 0xf1, 0x45, 0x4c, 0x7f, 0xcc, 0xc8, 0xd0, - 0xcd, 0x38, 0x06, 0x76, 0x9d, 0xea, 0x73, 0x7e, 0x75, 0xef, 0xb9, 0xb7, 0xc8, 0xb3, 0xfe, 0x96, - 0x61, 0x2a, 0x72, 0xfa, 0x89, 0x0b, 0x3a, 0x04, 0x04, 0xe3, 0xc4, 0xfd, 0x9e, 0x23, 0x62, 0x65, - 0x9c, 0x38, 0xf2, 0x95, 0x37, 0x70, 0xd2, 0x8e, 0x0b, 0x28, 0x3a, 0x4e, 0x0f, 0x42, 0xd0, 0x02, - 0x41, 0xb2, 0x58, 0x47, 0x18, 0xd1, 0xc7, 0x85, 0x95, 0xfd, 0xb1, 0xb2, 0xb8, 0xdf, 0x63, 0x99, - 0x95, 0x15, 0x56, 0x56, 0x5a, 0xd7, 0x36, 0xa7, 0xdc, 0x12, 0x00, 0x0a, 0x27, 0xad, 0xe0, 0xd7, - 0x9e, 0xd4, 0x7b, 0x74, 0x12, 0xa2, 0x0a, 0xa0, 0x22, 0x7f, 0x3a, 0x5d, 0x6e, 0xbc, 0x73, 0x08, - 0x44, 0xc5, 0xd5, 0xa9, 0x77, 0x25, 0xa8, 0x7c, 0x47, 0x85, 0x68, 0x50, 0x57, 0x2c, 0x1b, 0xd7, - 0xf6, 0x52, 0xd3, 0x85, 0xfd, 0xd9, 0x22, 0x8b, 0x7b, 0xa9, 0xf2, 0x50, 0x45, 0x21, 0x7d, 0x45, - 0x16, 0xb3, 0x6e, 0xa5, 0x40, 0xd1, 0xb2, 0x1e, 0x5a, 0xeb, 0x4b, 0x9b, 0x8c, 0x4d, 0x09, 0x31, - 0xd3, 0xb2, 0xb4, 0xc3, 0x8e, 0xdc, 0x0b, 0xf0, 0xf0, 0x10, 0x50, 0xf0, 0xb1, 0x9f, 0x9e, 0x91, - 0x15, 0x09, 0x3e, 0x20, 0x1c, 0xc5, 0x19, 0xdb, 0xb4, 0xe6, 0x72, 0x60, 0x67, 0x16, 0x60, 0xf7, - 0xb2, 0x91, 0x4f, 0x72, 0xec, 0x8f, 0x73, 0xe4, 0xfe, 0x71, 0x24, 0xbb, 0xca, 0xe8, 0x24, 0x3f, - 0xda, 0x4d, 0x64, 0x0f, 0xf0, 0x56, 0x8b, 0x7f, 0x4d, 0xe6, 0x4d, 0x0c, 0x5e, 0x59, 0xf3, 0x2e, - 0x9b, 0x79, 0x93, 0x58, 0x4d, 0x65, 0x27, 0x31, 0x78, 0x3c, 0xe7, 0xd1, 0xb7, 0x64, 0xc1, 0xa0, - 0xc0, 0xc4, 0xb4, 0x1a, 0x39, 0xb9, 0x7b, 0x43, 0x72, 0xce, 0xe2, 0x25, 0xd3, 0xfe, 0x62, 0x91, - 0x07, 0x35, 0xaa, 0x03, 0x65, 0x90, 0xee, 0x57, 0xd2, 0xd9, 0x98, 0x25, 0x9d, 0xcc, 0x7b, 0x25, - 0x9b, 0x53, 0xd2, 0x54, 0x08, 0x41, 0x36, 0xd0, 0xc6, 0xfa, 0xd2, 0xe6, 0xf6, 0xcd, 0x5a, 0xe0, - 0x05, 0xcc, 0xfe, 0x5a, 0x5f, 0x7b, 0x96, 0x1d, 0xe5, 0x64, 0x39, 0x50, 0xe1, 0x4e, 0x2a, 0x94, - 0x2f, 0x5c, 0x1f, 0xfe, 0x32, 0xdd, 0xec, 0x6d, 0xb0, 0xe2, 0x6d, 0xb0, 0x97, 0x21, 0x1e, 0xe9, - 0x13, 0xd4, 0x2a, 0xec, 0xf1, 0x09, 0x06, 0x3d, 0x24, 0x8b, 0x06, 0x7c, 0xf0, 0x30, 0xd2, 0xff, - 0xb2, 0x99, 0x07, 0xc2, 0x05, 0xff, 0xa4, 0x34, 0xf2, 0x31, 0xc2, 0xfe, 0xd1, 0x20, 0xab, 0xd7, - 0x0e, 0x88, 0x32, 0x42, 0x23, 0xd7, 0x80, 0x4e, 0x41, 0xbe, 0x28, 0xde, 0x9f, 0x8a, 0xc2, 0xbc, - 0x8d, 0x06, 0xaf, 0xf9, 0x43, 0x3f, 0x90, 0x15, 0x59, 0x90, 0x40, 0x1e, 0x47, 0x72, 0x14, 0xf5, - 0xd9, 0x6d, 0x6c, 0x0b, 0xeb, 0x5e, 0x26, 0xef, 0x85, 0xa8, 0x07, 0x7c, 0xf2, 0xb6, 0xac, 0x5c, - 0x39, 0xf6, 0x9a, 0x1d, 0xdf, 0x8f, 0xde, 0x81, 0xcc, 0x37, 0xb6, 0xc9, 0x6b, 0xfe, 0xd0, 0x47, - 0xe4, 0xae, 0x97, 0x68, 0x0d, 0x21, 0xee, 0x83, 0xf0, 0xf1, 0x7c, 0xd0, 0x9a, 0xcf, 0xb5, 0x57, - 0x4e, 0x33, 0x9d, 0x04, 0xa3, 0x34, 0xc8, 0x91, 0xae, 0x59, 0xe8, 0x26, 0x4f, 0xa9, 0x4d, 0x96, - 0xe1, 0x7d, 0x0c, 0xde, 0xa8, 0xfb, 0x85, 0x5c, 0x35, 0x71, 0xb6, 0x76, 0x41, 0x68, 0xb5, 0x11, - 0x7a, 0x8f, 0x34, 0xfa, 0x30, 0xc8, 0x93, 0xbd, 0xc3, 0xb3, 0x4f, 0xba, 0x4d, 0x9a, 0xa9, 0xf0, - 0x13, 0x28, 0x87, 0xbc, 0x3e, 0xcb, 0x90, 0x4f, 0x55, 0x00, 0xbc, 0xb0, 0x3d, 0x9f, 0xdb, 0xb2, - 0x76, 0x57, 0xbf, 0x0d, 0xdb, 0xd6, 0xf7, 0x61, 0xdb, 0xfa, 0x39, 0x6c, 0x5b, 0x9f, 0x7e, 0xb5, - 0xff, 0x7b, 0xf3, 0x7f, 0x19, 0xf3, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x14, 0xdf, 0xc5, 0xa1, - 0x9f, 0x06, 0x00, 0x00, + // 611 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x93, 0xcf, 0x6e, 0xd3, 0x4e, + 0x10, 0xc7, 0x7f, 0x6e, 0x9a, 0xfe, 0xca, 0xd2, 0x56, 0x68, 0x39, 0x90, 0xf6, 0x10, 0x21, 0x1f, + 0x10, 0x70, 0x58, 0xd3, 0x3f, 0x42, 0x15, 0x27, 0x5a, 0xa5, 0x14, 0x50, 0x50, 0x2a, 0xb7, 0x48, + 0xc0, 0x6d, 0xe3, 0x1d, 0xa5, 0x4b, 0xec, 0x5d, 0x6b, 0x77, 0x6c, 0x9a, 0x37, 0x41, 0xbc, 0x02, + 0x47, 0x5e, 0x82, 0x03, 0x07, 0x1e, 0x01, 0x95, 0xa7, 0xe0, 0x86, 0xd6, 0x4e, 0xd3, 0xfc, 0x55, + 0xd3, 0x8a, 0x5b, 0x34, 0xfe, 0x7e, 0x3e, 0x9a, 0x99, 0x9d, 0x90, 0x47, 0xdd, 0x5d, 0xcb, 0xa4, + 0x0e, 0x78, 0x2a, 0x83, 0x54, 0xc7, 0x32, 0xea, 0x05, 0xf9, 0x66, 0x1b, 0x90, 0x6f, 0x06, 0x1d, + 0x50, 0x60, 0x38, 0x82, 0x60, 0xa9, 0xd1, 0xa8, 0xe9, 0x7a, 0x19, 0x65, 0x3c, 0x95, 0xac, 0x8c, + 0xb2, 0x7e, 0x74, 0xc3, 0x1f, 0xb2, 0x44, 0xda, 0x40, 0x90, 0x4f, 0xe0, 0x1b, 0x3b, 0x97, 0x99, + 0x84, 0x47, 0xa7, 0x52, 0x81, 0xe9, 0x05, 0x69, 0xb7, 0xe3, 0x0a, 0x36, 0x48, 0x00, 0xf9, 0x34, + 0x2a, 0x98, 0x45, 0x99, 0x4c, 0xa1, 0x4c, 0x60, 0x02, 0x78, 0x7a, 0x15, 0x60, 0xa3, 0x53, 0x48, + 0xf8, 0x04, 0xb7, 0x3d, 0x8b, 0xcb, 0x50, 0xc6, 0x81, 0x54, 0x68, 0xd1, 0x8c, 0x43, 0xfe, 0x37, + 0x8f, 0x2c, 0x1f, 0xe4, 0x32, 0x42, 0xa9, 0x15, 0x6d, 0x92, 0x65, 0x37, 0x85, 0xe0, 0xc8, 0x6b, + 0xde, 0x7d, 0xef, 0xe1, 0xed, 0xad, 0x27, 0xec, 0x72, 0x65, 0x03, 0x29, 0x4b, 0xbb, 0x1d, 0x57, + 0xb0, 0xcc, 0xa5, 0x59, 0xbe, 0xc9, 0x5a, 0xed, 0x8f, 0x10, 0xe1, 0x1b, 0x40, 0x1e, 0x0e, 0x0c, + 0xf4, 0x3d, 0x59, 0x15, 0x10, 0x03, 0x42, 0x2b, 0x75, 0x76, 0x5b, 0x5b, 0x28, 0x94, 0xdb, 0xf3, + 0x29, 0x1b, 0xc3, 0x68, 0x38, 0x6a, 0xf2, 0xff, 0x78, 0xe4, 0xee, 0x91, 0x16, 0x0d, 0x69, 0x4d, + 0x56, 0x94, 0xf6, 0x33, 0xd1, 0x01, 0xfc, 0xc7, 0x03, 0xbc, 0x20, 0x8b, 0x36, 0x85, 0xa8, 0xdf, + 0xf7, 0x16, 0x9b, 0x79, 0x3d, 0x6c, 0x4a, 0x2f, 0xc7, 0x29, 0x44, 0x61, 0xc1, 0xd3, 0x26, 0x59, + 0xb2, 0xc8, 0x31, 0xb3, 0xb5, 0x4a, 0x61, 0xda, 0xb9, 0xa6, 0xa9, 0x60, 0xc3, 0xbe, 0xc3, 0xff, + 0xea, 0x91, 0x7b, 0x53, 0x52, 0x4d, 0x69, 0x91, 0xbe, 0x9e, 0x98, 0x9f, 0xcd, 0x37, 0xbf, 0xa3, + 0xc7, 0xa6, 0x6f, 0x90, 0xaa, 0x44, 0x48, 0xdc, 0xb3, 0x55, 0xc6, 0x44, 0x73, 0x34, 0x1d, 0x96, + 0xb0, 0xff, 0x65, 0x61, 0x6a, 0xb7, 0x6e, 0x3b, 0xf4, 0x84, 0xac, 0x24, 0x52, 0xed, 0xe5, 0x5c, + 0xc6, 0xbc, 0x1d, 0xc3, 0x95, 0x2f, 0xe6, 0xee, 0x98, 0x95, 0x77, 0xcc, 0x5e, 0x29, 0x6c, 0x99, + 0x63, 0x34, 0x52, 0x75, 0xc2, 0x11, 0x0b, 0x6d, 0x91, 0x65, 0x0b, 0x31, 0x44, 0xa8, 0xcd, 0xf5, + 0x2e, 0xae, 0xc9, 0xdb, 0x10, 0x1f, 0xf7, 0xd1, 0x70, 0x20, 0xa1, 0xef, 0xc8, 0x5a, 0xc2, 0xcf, + 0xde, 0x2a, 0x3e, 0x68, 0xb4, 0x72, 0xc3, 0x46, 0xc7, 0x3c, 0xfe, 0x8f, 0x0a, 0x59, 0x9f, 0xf9, + 0xe0, 0x94, 0x11, 0xaa, 0xdb, 0x16, 0x4c, 0x0e, 0xe2, 0xb0, 0xfc, 0xd7, 0x4a, 0xad, 0x8a, 0x25, + 0x55, 0xc2, 0x29, 0x5f, 0x68, 0x42, 0x56, 0x45, 0x69, 0x02, 0x71, 0xa4, 0xc5, 0xc5, 0xc3, 0x1d, + 0xde, 0xe4, 0xda, 0x58, 0x63, 0xd8, 0x74, 0xa0, 0xd0, 0xf4, 0xc2, 0x51, 0xbb, 0x6b, 0x4f, 0x0c, + 0x58, 0xbb, 0x17, 0xc7, 0xfa, 0x13, 0x88, 0x62, 0x35, 0xd5, 0x70, 0xca, 0x17, 0xfa, 0x80, 0xac, + 0x45, 0x99, 0x31, 0xa0, 0xf0, 0x25, 0xf0, 0x18, 0x4f, 0x7b, 0xb5, 0xc5, 0x22, 0x3b, 0x56, 0x75, + 0x39, 0x01, 0x56, 0x1a, 0x10, 0x17, 0xb9, 0x6a, 0x99, 0x1b, 0xad, 0x52, 0x9f, 0xac, 0xc0, 0x59, + 0x0a, 0xd1, 0xc5, 0xb4, 0x4b, 0x45, 0x6a, 0xa4, 0xb6, 0x11, 0x13, 0x3a, 0x39, 0x08, 0xbd, 0x43, + 0x2a, 0x5d, 0xe8, 0x15, 0x9b, 0xbc, 0x15, 0xba, 0x9f, 0xf4, 0x39, 0xa9, 0xe6, 0x3c, 0xce, 0xa0, + 0x7f, 0x30, 0x8f, 0xe7, 0x3b, 0x98, 0x13, 0x99, 0x40, 0x58, 0x82, 0xcf, 0x16, 0x76, 0xbd, 0xfd, + 0xf5, 0xef, 0xe7, 0x75, 0xef, 0xe7, 0x79, 0xdd, 0xfb, 0x75, 0x5e, 0xf7, 0x3e, 0xff, 0xae, 0xff, + 0xf7, 0xe1, 0xff, 0xfe, 0xa2, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x4a, 0x9a, 0x15, 0x41, 0xa5, + 0x06, 0x00, 0x00, } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/policy/v1beta1/register.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/policy/v1beta1/register.go new file mode 100644 index 00000000..b6918916 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/policy/v1beta1/register.go @@ -0,0 +1,9 @@ +package v1beta1 + +import "github.com/ericchiang/k8s" + +func init() { + k8s.Register("policy", "v1beta1", "poddisruptionbudgets", true, &PodDisruptionBudget{}) + + k8s.RegisterList("policy", "v1beta1", "poddisruptionbudgets", true, &PodDisruptionBudgetList{}) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/rbac/v1alpha1/generated.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/rbac/v1alpha1/generated.pb.go index d9e04b96..cbfe7a82 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/rbac/v1alpha1/generated.pb.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/rbac/v1alpha1/generated.pb.go @@ -1,21 +1,19 @@ -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto -// DO NOT EDIT! +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/rbac/v1alpha1/generated.proto /* Package v1alpha1 is a generated protocol buffer package. It is generated from these files: - k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto + k8s.io/api/rbac/v1alpha1/generated.proto It has these top-level messages: + AggregationRule ClusterRole ClusterRoleBinding - ClusterRoleBindingBuilder ClusterRoleBindingList ClusterRoleList PolicyRule - PolicyRuleBuilder Role RoleBinding RoleBindingList @@ -28,11 +26,10 @@ package v1alpha1 import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import k8s_io_kubernetes_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" +import k8s_io_apimachinery_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" import _ "github.com/ericchiang/k8s/runtime" import _ "github.com/ericchiang/k8s/runtime/schema" import _ "github.com/ericchiang/k8s/util/intstr" -import _ "github.com/ericchiang/k8s/api/v1" import io "io" @@ -47,22 +44,48 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole +type AggregationRule struct { + // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. + // If any of the selectors match, then the ClusterRole's permissions will be added + // +optional + ClusterRoleSelectors []*k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector `protobuf:"bytes,1,rep,name=clusterRoleSelectors" json:"clusterRoleSelectors,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AggregationRule) Reset() { *m = AggregationRule{} } +func (m *AggregationRule) String() string { return proto.CompactTextString(m) } +func (*AggregationRule) ProtoMessage() {} +func (*AggregationRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *AggregationRule) GetClusterRoleSelectors() []*k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector { + if m != nil { + return m.ClusterRoleSelectors + } + return nil +} + // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. type ClusterRole struct { // Standard object's metadata. // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Rules holds all the PolicyRules for this ClusterRole - Rules []*PolicyRule `protobuf:"bytes,2,rep,name=rules" json:"rules,omitempty"` - XXX_unrecognized []byte `json:"-"` + Rules []*PolicyRule `protobuf:"bytes,2,rep,name=rules" json:"rules,omitempty"` + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + // +optional + AggregationRule *AggregationRule `protobuf:"bytes,3,opt,name=aggregationRule" json:"aggregationRule,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *ClusterRole) Reset() { *m = ClusterRole{} } func (m *ClusterRole) String() string { return proto.CompactTextString(m) } func (*ClusterRole) ProtoMessage() {} -func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -func (m *ClusterRole) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *ClusterRole) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -76,12 +99,19 @@ func (m *ClusterRole) GetRules() []*PolicyRule { return nil } +func (m *ClusterRole) GetAggregationRule() *AggregationRule { + if m != nil { + return m.AggregationRule + } + return nil +} + // ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, // and adds who information via Subject. type ClusterRoleBinding struct { // Standard object's metadata. // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Subjects holds references to the objects the role applies to. Subjects []*Subject `protobuf:"bytes,2,rep,name=subjects" json:"subjects,omitempty"` // RoleRef can only reference a ClusterRole in the global namespace. @@ -93,9 +123,9 @@ type ClusterRoleBinding struct { func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } func (m *ClusterRoleBinding) String() string { return proto.CompactTextString(m) } func (*ClusterRoleBinding) ProtoMessage() {} -func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } -func (m *ClusterRoleBinding) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *ClusterRoleBinding) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -116,34 +146,11 @@ func (m *ClusterRoleBinding) GetRoleRef() *RoleRef { return nil } -// +k8s:deepcopy-gen=false -// ClusterRoleBindingBuilder let's us attach methods. A no-no for API types. -// We use it to construct bindings in code. It's more compact than trying to write them -// out in a literal. -type ClusterRoleBindingBuilder struct { - ClusterRoleBinding *ClusterRoleBinding `protobuf:"bytes,1,opt,name=clusterRoleBinding" json:"clusterRoleBinding,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ClusterRoleBindingBuilder) Reset() { *m = ClusterRoleBindingBuilder{} } -func (m *ClusterRoleBindingBuilder) String() string { return proto.CompactTextString(m) } -func (*ClusterRoleBindingBuilder) ProtoMessage() {} -func (*ClusterRoleBindingBuilder) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{2} -} - -func (m *ClusterRoleBindingBuilder) GetClusterRoleBinding() *ClusterRoleBinding { - if m != nil { - return m.ClusterRoleBinding - } - return nil -} - // ClusterRoleBindingList is a collection of ClusterRoleBindings type ClusterRoleBindingList struct { // Standard object's metadata. // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Items is a list of ClusterRoleBindings Items []*ClusterRoleBinding `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -154,7 +161,7 @@ func (m *ClusterRoleBindingList) String() string { return proto.Compa func (*ClusterRoleBindingList) ProtoMessage() {} func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } -func (m *ClusterRoleBindingList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *ClusterRoleBindingList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -172,7 +179,7 @@ func (m *ClusterRoleBindingList) GetItems() []*ClusterRoleBinding { type ClusterRoleList struct { // Standard object's metadata. // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Items is a list of ClusterRoles Items []*ClusterRole `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -183,7 +190,7 @@ func (m *ClusterRoleList) String() string { return proto.CompactTextS func (*ClusterRoleList) ProtoMessage() {} func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } -func (m *ClusterRoleList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *ClusterRoleList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -261,32 +268,11 @@ func (m *PolicyRule) GetNonResourceURLs() []string { return nil } -// +k8s:deepcopy-gen=false -// PolicyRuleBuilder let's us attach methods. A no-no for API types. -// We use it to construct rules in code. It's more compact than trying to write them -// out in a literal and allows us to perform some basic checking during construction -type PolicyRuleBuilder struct { - PolicyRule *PolicyRule `protobuf:"bytes,1,opt,name=policyRule" json:"policyRule,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PolicyRuleBuilder) Reset() { *m = PolicyRuleBuilder{} } -func (m *PolicyRuleBuilder) String() string { return proto.CompactTextString(m) } -func (*PolicyRuleBuilder) ProtoMessage() {} -func (*PolicyRuleBuilder) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } - -func (m *PolicyRuleBuilder) GetPolicyRule() *PolicyRule { - if m != nil { - return m.PolicyRule - } - return nil -} - // Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. type Role struct { // Standard object's metadata. // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Rules holds all the PolicyRules for this Role Rules []*PolicyRule `protobuf:"bytes,2,rep,name=rules" json:"rules,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -295,9 +281,9 @@ type Role struct { func (m *Role) Reset() { *m = Role{} } func (m *Role) String() string { return proto.CompactTextString(m) } func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } -func (m *Role) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *Role) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -317,7 +303,7 @@ func (m *Role) GetRules() []*PolicyRule { type RoleBinding struct { // Standard object's metadata. // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Subjects holds references to the objects the role applies to. Subjects []*Subject `protobuf:"bytes,2,rep,name=subjects" json:"subjects,omitempty"` // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. @@ -329,9 +315,9 @@ type RoleBinding struct { func (m *RoleBinding) Reset() { *m = RoleBinding{} } func (m *RoleBinding) String() string { return proto.CompactTextString(m) } func (*RoleBinding) ProtoMessage() {} -func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } -func (m *RoleBinding) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *RoleBinding) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -356,7 +342,7 @@ func (m *RoleBinding) GetRoleRef() *RoleRef { type RoleBindingList struct { // Standard object's metadata. // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Items is a list of RoleBindings Items []*RoleBinding `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -365,9 +351,9 @@ type RoleBindingList struct { func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } func (m *RoleBindingList) String() string { return proto.CompactTextString(m) } func (*RoleBindingList) ProtoMessage() {} -func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } -func (m *RoleBindingList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *RoleBindingList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -385,7 +371,7 @@ func (m *RoleBindingList) GetItems() []*RoleBinding { type RoleList struct { // Standard object's metadata. // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Items is a list of Roles Items []*Role `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -394,9 +380,9 @@ type RoleList struct { func (m *RoleList) Reset() { *m = RoleList{} } func (m *RoleList) String() string { return proto.CompactTextString(m) } func (*RoleList) ProtoMessage() {} -func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } -func (m *RoleList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *RoleList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -424,7 +410,7 @@ type RoleRef struct { func (m *RoleRef) Reset() { *m = RoleRef{} } func (m *RoleRef) String() string { return proto.CompactTextString(m) } func (*RoleRef) ProtoMessage() {} -func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } func (m *RoleRef) GetApiGroup() string { if m != nil && m.ApiGroup != nil { @@ -471,7 +457,7 @@ type Subject struct { func (m *Subject) Reset() { *m = Subject{} } func (m *Subject) String() string { return proto.CompactTextString(m) } func (*Subject) ProtoMessage() {} -func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func (m *Subject) GetKind() string { if m != nil && m.Kind != nil { @@ -502,21 +488,20 @@ func (m *Subject) GetNamespace() string { } func init() { - proto.RegisterType((*ClusterRole)(nil), "github.com/ericchiang.k8s.apis.rbac.v1alpha1.ClusterRole") - proto.RegisterType((*ClusterRoleBinding)(nil), "github.com/ericchiang.k8s.apis.rbac.v1alpha1.ClusterRoleBinding") - proto.RegisterType((*ClusterRoleBindingBuilder)(nil), "github.com/ericchiang.k8s.apis.rbac.v1alpha1.ClusterRoleBindingBuilder") - proto.RegisterType((*ClusterRoleBindingList)(nil), "github.com/ericchiang.k8s.apis.rbac.v1alpha1.ClusterRoleBindingList") - proto.RegisterType((*ClusterRoleList)(nil), "github.com/ericchiang.k8s.apis.rbac.v1alpha1.ClusterRoleList") - proto.RegisterType((*PolicyRule)(nil), "github.com/ericchiang.k8s.apis.rbac.v1alpha1.PolicyRule") - proto.RegisterType((*PolicyRuleBuilder)(nil), "github.com/ericchiang.k8s.apis.rbac.v1alpha1.PolicyRuleBuilder") - proto.RegisterType((*Role)(nil), "github.com/ericchiang.k8s.apis.rbac.v1alpha1.Role") - proto.RegisterType((*RoleBinding)(nil), "github.com/ericchiang.k8s.apis.rbac.v1alpha1.RoleBinding") - proto.RegisterType((*RoleBindingList)(nil), "github.com/ericchiang.k8s.apis.rbac.v1alpha1.RoleBindingList") - proto.RegisterType((*RoleList)(nil), "github.com/ericchiang.k8s.apis.rbac.v1alpha1.RoleList") - proto.RegisterType((*RoleRef)(nil), "github.com/ericchiang.k8s.apis.rbac.v1alpha1.RoleRef") - proto.RegisterType((*Subject)(nil), "github.com/ericchiang.k8s.apis.rbac.v1alpha1.Subject") -} -func (m *ClusterRole) Marshal() (dAtA []byte, err error) { + proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1alpha1.AggregationRule") + proto.RegisterType((*ClusterRole)(nil), "k8s.io.api.rbac.v1alpha1.ClusterRole") + proto.RegisterType((*ClusterRoleBinding)(nil), "k8s.io.api.rbac.v1alpha1.ClusterRoleBinding") + proto.RegisterType((*ClusterRoleBindingList)(nil), "k8s.io.api.rbac.v1alpha1.ClusterRoleBindingList") + proto.RegisterType((*ClusterRoleList)(nil), "k8s.io.api.rbac.v1alpha1.ClusterRoleList") + proto.RegisterType((*PolicyRule)(nil), "k8s.io.api.rbac.v1alpha1.PolicyRule") + proto.RegisterType((*Role)(nil), "k8s.io.api.rbac.v1alpha1.Role") + proto.RegisterType((*RoleBinding)(nil), "k8s.io.api.rbac.v1alpha1.RoleBinding") + proto.RegisterType((*RoleBindingList)(nil), "k8s.io.api.rbac.v1alpha1.RoleBindingList") + proto.RegisterType((*RoleList)(nil), "k8s.io.api.rbac.v1alpha1.RoleList") + proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1alpha1.RoleRef") + proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1alpha1.Subject") +} +func (m *AggregationRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -526,24 +511,14 @@ func (m *ClusterRole) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { +func (m *AggregationRule) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - if m.Metadata != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n1, err := m.Metadata.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - } - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x12 + if len(m.ClusterRoleSelectors) > 0 { + for _, msg := range m.ClusterRoleSelectors { + dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) @@ -559,7 +534,7 @@ func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { +func (m *ClusterRole) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -569,7 +544,7 @@ func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { +func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int @@ -578,14 +553,14 @@ func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n2, err := m.Metadata.MarshalTo(dAtA[i:]) + n1, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n2 + i += n1 } - if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { + if len(m.Rules) > 0 { + for _, msg := range m.Rules { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) @@ -596,15 +571,15 @@ func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { i += n } } - if m.RoleRef != nil { + if m.AggregationRule != nil { dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n3, err := m.RoleRef.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.AggregationRule.Size())) + n2, err := m.AggregationRule.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n3 + i += n2 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -612,7 +587,7 @@ func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *ClusterRoleBindingBuilder) Marshal() (dAtA []byte, err error) { +func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -622,16 +597,38 @@ func (m *ClusterRoleBindingBuilder) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ClusterRoleBindingBuilder) MarshalTo(dAtA []byte) (int, error) { +func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - if m.ClusterRoleBinding != nil { + if m.Metadata != nil { dAtA[i] = 0xa i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ClusterRoleBinding.Size())) - n4, err := m.ClusterRoleBinding.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) + n3, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if len(m.Subjects) > 0 { + for _, msg := range m.Subjects { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.RoleRef != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) + n4, err := m.RoleRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -825,37 +822,6 @@ func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *PolicyRuleBuilder) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PolicyRuleBuilder) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.PolicyRule != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PolicyRule.Size())) - n7, err := m.PolicyRule.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - func (m *Role) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -875,11 +841,11 @@ func (m *Role) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n8, err := m.Metadata.MarshalTo(dAtA[i:]) + n7, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n8 + i += n7 } if len(m.Rules) > 0 { for _, msg := range m.Rules { @@ -918,11 +884,11 @@ func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n9, err := m.Metadata.MarshalTo(dAtA[i:]) + n8, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n8 } if len(m.Subjects) > 0 { for _, msg := range m.Subjects { @@ -940,11 +906,11 @@ func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n10, err := m.RoleRef.MarshalTo(dAtA[i:]) + n9, err := m.RoleRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n9 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -971,11 +937,11 @@ func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n11, err := m.Metadata.MarshalTo(dAtA[i:]) + n10, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n11 + i += n10 } if len(m.Items) > 0 { for _, msg := range m.Items { @@ -1014,11 +980,11 @@ func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n12, err := m.Metadata.MarshalTo(dAtA[i:]) + n11, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n12 + i += n11 } if len(m.Items) > 0 { for _, msg := range m.Items { @@ -1122,24 +1088,6 @@ func (m *Subject) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -1149,6 +1097,21 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return offset + 1 } +func (m *AggregationRule) Size() (n int) { + var l int + _ = l + if len(m.ClusterRoleSelectors) > 0 { + for _, e := range m.ClusterRoleSelectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *ClusterRole) Size() (n int) { var l int _ = l @@ -1162,6 +1125,10 @@ func (m *ClusterRole) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.AggregationRule != nil { + l = m.AggregationRule.Size() + n += 1 + l + sovGenerated(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -1191,19 +1158,6 @@ func (m *ClusterRoleBinding) Size() (n int) { return n } -func (m *ClusterRoleBindingBuilder) Size() (n int) { - var l int - _ = l - if m.ClusterRoleBinding != nil { - l = m.ClusterRoleBinding.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - func (m *ClusterRoleBindingList) Size() (n int) { var l int _ = l @@ -1281,19 +1235,6 @@ func (m *PolicyRule) Size() (n int) { return n } -func (m *PolicyRuleBuilder) Size() (n int) { - var l int - _ = l - if m.PolicyRule != nil { - l = m.PolicyRule.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - func (m *Role) Size() (n int) { var l int _ = l @@ -1433,7 +1374,7 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (m *ClusterRole) Unmarshal(dAtA []byte) error { +func (m *AggregationRule) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1456,48 +1397,15 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ClusterRole: wiretype end group for non-group") + return fmt.Errorf("proto: AggregationRule: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterRole: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AggregationRule: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ClusterRoleSelectors", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1521,8 +1429,8 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Rules = append(m.Rules, &PolicyRule{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}) + if err := m.ClusterRoleSelectors[len(m.ClusterRoleSelectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1548,7 +1456,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } return nil } -func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { +func (m *ClusterRole) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1571,10 +1479,10 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ClusterRoleBinding: wiretype end group for non-group") + return fmt.Errorf("proto: ClusterRole: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterRoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ClusterRole: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -1604,7 +1512,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1612,7 +1520,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1636,14 +1544,14 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Subjects = append(m.Subjects, &Subject{}) - if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Rules = append(m.Rules, &PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AggregationRule", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1667,10 +1575,10 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RoleRef == nil { - m.RoleRef = &RoleRef{} + if m.AggregationRule == nil { + m.AggregationRule = &AggregationRule{} } - if err := m.RoleRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.AggregationRule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1696,7 +1604,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } return nil } -func (m *ClusterRoleBindingBuilder) Unmarshal(dAtA []byte) error { +func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1719,15 +1627,15 @@ func (m *ClusterRoleBindingBuilder) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ClusterRoleBindingBuilder: wiretype end group for non-group") + return fmt.Errorf("proto: ClusterRoleBinding: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterRoleBindingBuilder: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ClusterRoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterRoleBinding", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1751,10 +1659,74 @@ func (m *ClusterRoleBindingBuilder) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ClusterRoleBinding == nil { - m.ClusterRoleBinding = &ClusterRoleBinding{} + if m.Metadata == nil { + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } - if err := m.ClusterRoleBinding.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, &Subject{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RoleRef == nil { + m.RoleRef = &RoleRef{} + } + if err := m.RoleRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1836,7 +1808,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1951,7 +1923,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2206,90 +2178,6 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } return nil } -func (m *PolicyRuleBuilder) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PolicyRuleBuilder: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PolicyRuleBuilder: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PolicyRule", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PolicyRule == nil { - m.PolicyRule = &PolicyRule{} - } - if err := m.PolicyRule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *Role) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -2346,7 +2234,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2461,7 +2349,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2609,7 +2497,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2724,7 +2612,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3200,50 +3088,49 @@ var ( ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) -func init() { - proto.RegisterFile("github.com/ericchiang/k8s/apis/rbac/v1alpha1/generated.proto", fileDescriptorGenerated) -} +func init() { proto.RegisterFile("k8s.io/api/rbac/v1alpha1/generated.proto", fileDescriptorGenerated) } var fileDescriptorGenerated = []byte{ - // 637 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe4, 0x54, 0xcd, 0x8a, 0x13, 0x41, - 0x10, 0xb6, 0x37, 0x89, 0x9b, 0x54, 0x90, 0xc5, 0x46, 0x64, 0x0c, 0x12, 0x96, 0xc1, 0x43, 0x0e, - 0x6b, 0x0f, 0x59, 0x56, 0xd8, 0x83, 0xa7, 0x55, 0x50, 0xf6, 0x47, 0xa5, 0xfd, 0x39, 0x78, 0xeb, - 0x4c, 0xca, 0x6c, 0x9b, 0xc9, 0xcc, 0xd0, 0xdd, 0x13, 0xf0, 0x11, 0x7c, 0x03, 0xf1, 0xe0, 0x4d, - 0xf0, 0xe4, 0xd1, 0x67, 0xf0, 0xe8, 0x23, 0xc8, 0xfa, 0x0c, 0xde, 0x3c, 0xc8, 0xfc, 0x26, 0xbb, - 0x33, 0x2e, 0xd9, 0xb8, 0x0b, 0x82, 0xa7, 0x4c, 0x7f, 0x55, 0xdf, 0xd7, 0x5f, 0x55, 0xa5, 0x0b, - 0xb6, 0xc7, 0xdb, 0x9a, 0xc9, 0xc0, 0x19, 0x47, 0x03, 0x54, 0x3e, 0x1a, 0xd4, 0x4e, 0x38, 0x1e, - 0x39, 0x22, 0x94, 0xda, 0x51, 0x03, 0xe1, 0x3a, 0xd3, 0xbe, 0xf0, 0xc2, 0x43, 0xd1, 0x77, 0x46, - 0xe8, 0xa3, 0x12, 0x06, 0x87, 0x2c, 0x54, 0x81, 0x09, 0x68, 0x2f, 0x65, 0xb2, 0x19, 0x93, 0x85, - 0xe3, 0x11, 0x8b, 0x99, 0x2c, 0x66, 0xb2, 0x9c, 0xd9, 0xd9, 0x3c, 0xe5, 0x8e, 0x09, 0x1a, 0xe1, - 0x4c, 0x4b, 0xea, 0x9d, 0xdb, 0xd5, 0x1c, 0x15, 0xf9, 0x46, 0x4e, 0xb0, 0x94, 0xbe, 0x75, 0x7a, - 0xba, 0x76, 0x0f, 0x71, 0x22, 0x4a, 0xac, 0x7e, 0x35, 0x2b, 0x32, 0xd2, 0x73, 0xa4, 0x6f, 0xb4, - 0x51, 0x25, 0xca, 0xc6, 0x1f, 0x6b, 0xa9, 0xa8, 0xc2, 0xfe, 0x48, 0xa0, 0x7d, 0xcf, 0x8b, 0xb4, - 0x41, 0xc5, 0x03, 0x0f, 0xe9, 0x2e, 0x34, 0xe3, 0x82, 0x87, 0xc2, 0x08, 0x8b, 0xac, 0x93, 0x5e, - 0x7b, 0x93, 0xb1, 0x53, 0xda, 0x18, 0xe7, 0xb2, 0x69, 0x9f, 0x3d, 0x1e, 0xbc, 0x46, 0xd7, 0x1c, - 0xa0, 0x11, 0xbc, 0xe0, 0xd3, 0x5d, 0x68, 0xa8, 0xc8, 0x43, 0x6d, 0xad, 0xac, 0xd7, 0x7a, 0xed, - 0xcd, 0x2d, 0xb6, 0xe8, 0x3c, 0xd8, 0x93, 0xc0, 0x93, 0xee, 0x1b, 0x1e, 0x79, 0xc8, 0x53, 0x09, - 0xfb, 0x17, 0x01, 0x3a, 0xe7, 0x73, 0x47, 0xfa, 0x43, 0xe9, 0x8f, 0xce, 0xd5, 0xee, 0x01, 0x34, - 0x75, 0x94, 0x04, 0x72, 0xc7, 0xfd, 0xc5, 0x1d, 0x3f, 0x4d, 0x99, 0xbc, 0x90, 0xa0, 0x7b, 0xb0, - 0xaa, 0x02, 0x0f, 0x39, 0xbe, 0xb2, 0x6a, 0x89, 0xb3, 0x33, 0xa8, 0xf1, 0x94, 0xc8, 0x73, 0x05, - 0xfb, 0x2d, 0x81, 0x1b, 0xe5, 0xf2, 0x77, 0x22, 0xe9, 0x0d, 0x51, 0x51, 0x0f, 0xa8, 0x5b, 0x0a, - 0x66, 0xfd, 0xb8, 0xbb, 0xf8, 0xad, 0xe5, 0x0b, 0x78, 0x85, 0xae, 0xfd, 0x85, 0xc0, 0xf5, 0x72, - 0xea, 0xbe, 0xd4, 0x86, 0x3e, 0x2c, 0x8d, 0x63, 0x63, 0x91, 0x71, 0xc4, 0xdc, 0x13, 0xc3, 0xe0, - 0xd0, 0x90, 0x06, 0x27, 0xf9, 0x24, 0xfe, 0xae, 0x8a, 0x54, 0xca, 0xfe, 0x44, 0x60, 0x6d, 0x2e, - 0x7a, 0xce, 0x8e, 0xf7, 0x8e, 0x3b, 0xbe, 0xb3, 0x94, 0xe3, 0xdc, 0xea, 0x67, 0x02, 0x30, 0x7b, - 0x04, 0xf4, 0x1a, 0x34, 0xa6, 0xa8, 0x06, 0xda, 0x22, 0xeb, 0xb5, 0x5e, 0x8b, 0xa7, 0x07, 0x7a, - 0x13, 0x5a, 0x22, 0x94, 0x0f, 0x54, 0x10, 0x85, 0xda, 0xaa, 0x25, 0x91, 0x19, 0x10, 0x47, 0x15, - 0xea, 0x20, 0x52, 0x2e, 0x6a, 0xab, 0x9e, 0x46, 0x0b, 0x80, 0xde, 0x82, 0x2b, 0xf9, 0xe1, 0x91, - 0x98, 0xa0, 0xb6, 0x1a, 0x49, 0xc6, 0x71, 0x90, 0xf6, 0x60, 0xcd, 0x0f, 0x7c, 0x9e, 0x61, 0xcf, - 0xf9, 0xbe, 0xb6, 0x2e, 0x27, 0x79, 0x27, 0x61, 0x5b, 0xc2, 0xd5, 0x99, 0xdf, 0xfc, 0x7f, 0xf9, - 0x0c, 0x20, 0x2c, 0xc0, 0xac, 0xbd, 0xcb, 0x6d, 0x81, 0x39, 0x1d, 0xfb, 0x03, 0x81, 0xfa, 0x3f, - 0xbd, 0xab, 0x7e, 0x12, 0x68, 0xff, 0x8f, 0x4b, 0x2a, 0x7e, 0x5f, 0x17, 0xb7, 0x11, 0x96, 0x7f, - 0x5f, 0x15, 0xab, 0xe0, 0x3d, 0x81, 0xe6, 0x05, 0xec, 0x80, 0xfb, 0xc7, 0x3d, 0xb2, 0x33, 0x36, - 0x33, 0x33, 0x77, 0x00, 0xab, 0x59, 0x6f, 0x69, 0x07, 0x9a, 0xf9, 0x8b, 0x4e, 0xac, 0xb5, 0x78, - 0x71, 0xa6, 0x14, 0xea, 0x63, 0xe9, 0x0f, 0xad, 0x95, 0x04, 0x4f, 0xbe, 0x63, 0xcc, 0x17, 0x13, - 0x4c, 0x86, 0xd9, 0xe2, 0xc9, 0xb7, 0x1d, 0xc0, 0x6a, 0x36, 0xf8, 0x82, 0x42, 0xe6, 0x28, 0x5d, - 0x00, 0x11, 0xca, 0x17, 0xa8, 0xb4, 0x0c, 0xfc, 0x4c, 0x6c, 0x0e, 0xa9, 0x92, 0x8c, 0x77, 0x4b, - 0xfc, 0xab, 0x43, 0xe1, 0xa2, 0x55, 0x4f, 0x02, 0x33, 0x60, 0xa7, 0xf3, 0xf5, 0xa8, 0x4b, 0xbe, - 0x1d, 0x75, 0xc9, 0xf7, 0xa3, 0x2e, 0x79, 0xf7, 0xa3, 0x7b, 0xe9, 0x65, 0x33, 0xaf, 0xf3, 0x77, - 0x00, 0x00, 0x00, 0xff, 0xff, 0xc9, 0x85, 0x03, 0x9e, 0xce, 0x09, 0x00, 0x00, + // 645 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x54, 0xcd, 0x8a, 0x13, 0x4d, + 0x14, 0xfd, 0x6a, 0x26, 0xf9, 0x26, 0xb9, 0xe1, 0x23, 0x50, 0x0c, 0x1f, 0x4d, 0x90, 0x30, 0x36, + 0x23, 0x44, 0x90, 0x6a, 0xe7, 0x07, 0x11, 0x07, 0x17, 0x8e, 0x0b, 0x41, 0x32, 0x2a, 0x35, 0xe8, + 0xc2, 0x5d, 0xa5, 0x73, 0xed, 0x29, 0xd3, 0xdd, 0xd5, 0x54, 0x55, 0x07, 0xc6, 0x67, 0x70, 0xe1, + 0x52, 0x7c, 0x02, 0x57, 0x3e, 0x87, 0x4b, 0x77, 0xee, 0x44, 0xc6, 0xad, 0x0f, 0x21, 0xdd, 0x9d, + 0xce, 0xef, 0xc4, 0xc9, 0x62, 0x06, 0xc4, 0x55, 0xba, 0xce, 0xbd, 0xe7, 0xf6, 0x39, 0xb7, 0xeb, + 0x04, 0x3a, 0x83, 0xbb, 0x86, 0x49, 0xe5, 0x89, 0x44, 0x7a, 0xba, 0x27, 0x7c, 0x6f, 0xb8, 0x23, + 0xc2, 0xe4, 0x44, 0xec, 0x78, 0x01, 0xc6, 0xa8, 0x85, 0xc5, 0x3e, 0x4b, 0xb4, 0xb2, 0x8a, 0x3a, + 0x45, 0x27, 0x13, 0x89, 0x64, 0x59, 0x27, 0x2b, 0x3b, 0x5b, 0xfb, 0x93, 0x19, 0x91, 0xf0, 0x4f, + 0x64, 0x8c, 0xfa, 0xd4, 0x4b, 0x06, 0x41, 0x06, 0x18, 0x2f, 0x42, 0x2b, 0xbc, 0xe1, 0xc2, 0xbc, + 0x96, 0xb7, 0x8c, 0xa5, 0xd3, 0xd8, 0xca, 0x08, 0x17, 0x08, 0x77, 0x2e, 0x22, 0x18, 0xff, 0x04, + 0x23, 0xb1, 0xc0, 0xdb, 0x5b, 0xc6, 0x4b, 0xad, 0x0c, 0x3d, 0x19, 0x5b, 0x63, 0xf5, 0x3c, 0xc9, + 0x7d, 0x03, 0xcd, 0x07, 0x41, 0xa0, 0x31, 0x10, 0x56, 0xaa, 0x98, 0xa7, 0x21, 0xd2, 0x00, 0x36, + 0xfd, 0x30, 0x35, 0x16, 0x35, 0x57, 0x21, 0x1e, 0x63, 0x88, 0xbe, 0x55, 0xda, 0x38, 0x64, 0x6b, + 0xbd, 0xd3, 0xd8, 0xdd, 0x63, 0x93, 0xfd, 0x8c, 0x5f, 0xc3, 0x92, 0x41, 0x90, 0x01, 0x86, 0x65, + 0x5b, 0x60, 0xc3, 0x1d, 0xd6, 0x15, 0x3d, 0x0c, 0x4b, 0x2e, 0x3f, 0x77, 0xa0, 0xfb, 0x93, 0x40, + 0xe3, 0xe1, 0xa4, 0x40, 0xbb, 0x50, 0xcb, 0xe8, 0x7d, 0x61, 0x85, 0x43, 0xb6, 0x48, 0xa7, 0xb1, + 0x7b, 0x7b, 0xb5, 0x97, 0x3d, 0xed, 0xbd, 0x46, 0xdf, 0x1e, 0xa1, 0x15, 0x7c, 0x3c, 0x81, 0xde, + 0x83, 0xaa, 0x4e, 0x43, 0x34, 0xce, 0x5a, 0xae, 0x7b, 0x9b, 0x2d, 0xfb, 0xae, 0xec, 0x99, 0x0a, + 0xa5, 0x7f, 0x9a, 0x79, 0xe7, 0x05, 0x85, 0x1e, 0x43, 0x53, 0xcc, 0x6e, 0xc5, 0x59, 0xcf, 0x05, + 0xdd, 0x5c, 0x3e, 0x65, 0x6e, 0x8d, 0x7c, 0x7e, 0x82, 0xfb, 0x8d, 0x00, 0x9d, 0xb2, 0x7b, 0x28, + 0xe3, 0xbe, 0x8c, 0x83, 0x4b, 0x76, 0x7d, 0x1f, 0x6a, 0x26, 0xcd, 0x0b, 0xa5, 0xf1, 0xeb, 0xcb, + 0x25, 0x1f, 0x17, 0x9d, 0x7c, 0x4c, 0xa1, 0x07, 0xb0, 0xa1, 0x55, 0x88, 0x1c, 0x5f, 0x8d, 0x0c, + 0xff, 0x86, 0xcd, 0x8b, 0x46, 0x5e, 0x32, 0xdc, 0x8f, 0x04, 0xfe, 0x5f, 0x34, 0xd8, 0x95, 0xc6, + 0xd2, 0xc7, 0x0b, 0x26, 0xd9, 0x8a, 0xf7, 0x48, 0x9a, 0x79, 0x8b, 0x87, 0x50, 0x95, 0x16, 0xa3, + 0xd2, 0xdf, 0xad, 0xe5, 0x0a, 0x17, 0xc5, 0xf0, 0x82, 0xea, 0x7e, 0x20, 0xd0, 0x9c, 0xaa, 0x5e, + 0xba, 0xc6, 0x83, 0x59, 0x8d, 0x37, 0x56, 0xd2, 0x58, 0x8a, 0xfb, 0x44, 0x00, 0x26, 0x77, 0x92, + 0x6e, 0x42, 0x75, 0x88, 0xba, 0x57, 0x04, 0xb0, 0xce, 0x8b, 0x03, 0xbd, 0x06, 0x75, 0x91, 0xc8, + 0x47, 0x5a, 0xa5, 0x89, 0x71, 0xd6, 0xf3, 0xca, 0x04, 0xc8, 0xaa, 0x1a, 0x8d, 0x4a, 0xb5, 0x8f, + 0xc6, 0xa9, 0x14, 0xd5, 0x31, 0x40, 0xb7, 0xe1, 0xbf, 0xf2, 0xf0, 0x44, 0x44, 0x68, 0x9c, 0x6a, + 0xde, 0x31, 0x0b, 0xd2, 0x0e, 0x34, 0x63, 0x15, 0xf3, 0x11, 0xf6, 0x9c, 0x77, 0x8d, 0xf3, 0x6f, + 0xde, 0x37, 0x0f, 0xbb, 0xef, 0x08, 0x54, 0xfe, 0xac, 0x04, 0xbb, 0x5f, 0x09, 0x34, 0xfe, 0xce, + 0x94, 0x65, 0x57, 0xf7, 0x2a, 0xe3, 0xb5, 0xfa, 0xd5, 0x3d, 0x27, 0x57, 0x6f, 0x09, 0xd4, 0xae, + 0x24, 0x50, 0xfb, 0xb3, 0xaa, 0xda, 0x17, 0x2c, 0x6c, 0x24, 0xe7, 0x08, 0x36, 0x46, 0xfb, 0xa3, + 0x2d, 0xa8, 0x95, 0xf1, 0xc8, 0xc5, 0xd4, 0xf9, 0xf8, 0x4c, 0x29, 0x54, 0x06, 0x32, 0xee, 0x3b, + 0x6b, 0x39, 0x9e, 0x3f, 0x67, 0x58, 0x2c, 0xa2, 0xe2, 0x7f, 0xbf, 0xce, 0xf3, 0x67, 0x57, 0xc1, + 0xc6, 0xe8, 0x63, 0x8e, 0x29, 0x64, 0x8a, 0xd2, 0x06, 0x10, 0x89, 0x7c, 0x81, 0xda, 0x48, 0x15, + 0x8f, 0x86, 0x4d, 0x21, 0xe7, 0x8d, 0xcc, 0x82, 0x9a, 0xfd, 0x9a, 0x44, 0xf8, 0xe8, 0x54, 0xf2, + 0xc2, 0x04, 0x38, 0x6c, 0x7d, 0x3e, 0x6b, 0x93, 0x2f, 0x67, 0x6d, 0xf2, 0xfd, 0xac, 0x4d, 0xde, + 0xff, 0x68, 0xff, 0xf3, 0xb2, 0x56, 0xfa, 0xfc, 0x15, 0x00, 0x00, 0xff, 0xff, 0x4c, 0xb8, 0xb6, + 0x64, 0xd2, 0x08, 0x00, 0x00, } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/rbac/v1alpha1/register.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/rbac/v1alpha1/register.go new file mode 100644 index 00000000..27ad6aee --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/rbac/v1alpha1/register.go @@ -0,0 +1,15 @@ +package v1alpha1 + +import "github.com/ericchiang/k8s" + +func init() { + k8s.Register("rbac.authorization.k8s.io", "v1alpha1", "clusterroles", false, &ClusterRole{}) + k8s.Register("rbac.authorization.k8s.io", "v1alpha1", "clusterrolebindings", false, &ClusterRoleBinding{}) + k8s.Register("rbac.authorization.k8s.io", "v1alpha1", "roles", true, &Role{}) + k8s.Register("rbac.authorization.k8s.io", "v1alpha1", "rolebindings", true, &RoleBinding{}) + + k8s.RegisterList("rbac.authorization.k8s.io", "v1alpha1", "clusterroles", false, &ClusterRoleList{}) + k8s.RegisterList("rbac.authorization.k8s.io", "v1alpha1", "clusterrolebindings", false, &ClusterRoleBindingList{}) + k8s.RegisterList("rbac.authorization.k8s.io", "v1alpha1", "roles", true, &RoleList{}) + k8s.RegisterList("rbac.authorization.k8s.io", "v1alpha1", "rolebindings", true, &RoleBindingList{}) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/rbac/v1beta1/generated.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/rbac/v1beta1/generated.pb.go index 25a1f66c..6d801253 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/rbac/v1beta1/generated.pb.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/rbac/v1beta1/generated.pb.go @@ -1,21 +1,19 @@ -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/rbac/v1beta1/generated.proto -// DO NOT EDIT! +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/rbac/v1beta1/generated.proto /* Package v1beta1 is a generated protocol buffer package. It is generated from these files: - k8s.io/kubernetes/pkg/apis/rbac/v1beta1/generated.proto + k8s.io/api/rbac/v1beta1/generated.proto It has these top-level messages: + AggregationRule ClusterRole ClusterRoleBinding - ClusterRoleBindingBuilder ClusterRoleBindingList ClusterRoleList PolicyRule - PolicyRuleBuilder Role RoleBinding RoleBindingList @@ -28,11 +26,11 @@ package v1beta1 import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import k8s_io_kubernetes_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" +import _ "github.com/ericchiang/k8s/apis/rbac/v1alpha1" +import k8s_io_apimachinery_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" import _ "github.com/ericchiang/k8s/runtime" import _ "github.com/ericchiang/k8s/runtime/schema" import _ "github.com/ericchiang/k8s/util/intstr" -import _ "github.com/ericchiang/k8s/api/v1" import io "io" @@ -47,22 +45,48 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole +type AggregationRule struct { + // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. + // If any of the selectors match, then the ClusterRole's permissions will be added + // +optional + ClusterRoleSelectors []*k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector `protobuf:"bytes,1,rep,name=clusterRoleSelectors" json:"clusterRoleSelectors,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AggregationRule) Reset() { *m = AggregationRule{} } +func (m *AggregationRule) String() string { return proto.CompactTextString(m) } +func (*AggregationRule) ProtoMessage() {} +func (*AggregationRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *AggregationRule) GetClusterRoleSelectors() []*k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector { + if m != nil { + return m.ClusterRoleSelectors + } + return nil +} + // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. type ClusterRole struct { // Standard object's metadata. // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Rules holds all the PolicyRules for this ClusterRole - Rules []*PolicyRule `protobuf:"bytes,2,rep,name=rules" json:"rules,omitempty"` - XXX_unrecognized []byte `json:"-"` + Rules []*PolicyRule `protobuf:"bytes,2,rep,name=rules" json:"rules,omitempty"` + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + // +optional + AggregationRule *AggregationRule `protobuf:"bytes,3,opt,name=aggregationRule" json:"aggregationRule,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *ClusterRole) Reset() { *m = ClusterRole{} } func (m *ClusterRole) String() string { return proto.CompactTextString(m) } func (*ClusterRole) ProtoMessage() {} -func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -func (m *ClusterRole) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *ClusterRole) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -76,12 +100,19 @@ func (m *ClusterRole) GetRules() []*PolicyRule { return nil } +func (m *ClusterRole) GetAggregationRule() *AggregationRule { + if m != nil { + return m.AggregationRule + } + return nil +} + // ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, // and adds who information via Subject. type ClusterRoleBinding struct { // Standard object's metadata. // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Subjects holds references to the objects the role applies to. Subjects []*Subject `protobuf:"bytes,2,rep,name=subjects" json:"subjects,omitempty"` // RoleRef can only reference a ClusterRole in the global namespace. @@ -93,9 +124,9 @@ type ClusterRoleBinding struct { func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } func (m *ClusterRoleBinding) String() string { return proto.CompactTextString(m) } func (*ClusterRoleBinding) ProtoMessage() {} -func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } -func (m *ClusterRoleBinding) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *ClusterRoleBinding) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -116,34 +147,11 @@ func (m *ClusterRoleBinding) GetRoleRef() *RoleRef { return nil } -// +k8s:deepcopy-gen=false -// ClusterRoleBindingBuilder let's us attach methods. A no-no for API types. -// We use it to construct bindings in code. It's more compact than trying to write them -// out in a literal. -type ClusterRoleBindingBuilder struct { - ClusterRoleBinding *ClusterRoleBinding `protobuf:"bytes,1,opt,name=clusterRoleBinding" json:"clusterRoleBinding,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ClusterRoleBindingBuilder) Reset() { *m = ClusterRoleBindingBuilder{} } -func (m *ClusterRoleBindingBuilder) String() string { return proto.CompactTextString(m) } -func (*ClusterRoleBindingBuilder) ProtoMessage() {} -func (*ClusterRoleBindingBuilder) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{2} -} - -func (m *ClusterRoleBindingBuilder) GetClusterRoleBinding() *ClusterRoleBinding { - if m != nil { - return m.ClusterRoleBinding - } - return nil -} - // ClusterRoleBindingList is a collection of ClusterRoleBindings type ClusterRoleBindingList struct { // Standard object's metadata. // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Items is a list of ClusterRoleBindings Items []*ClusterRoleBinding `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -154,7 +162,7 @@ func (m *ClusterRoleBindingList) String() string { return proto.Compa func (*ClusterRoleBindingList) ProtoMessage() {} func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } -func (m *ClusterRoleBindingList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *ClusterRoleBindingList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -172,7 +180,7 @@ func (m *ClusterRoleBindingList) GetItems() []*ClusterRoleBinding { type ClusterRoleList struct { // Standard object's metadata. // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Items is a list of ClusterRoles Items []*ClusterRole `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -183,7 +191,7 @@ func (m *ClusterRoleList) String() string { return proto.CompactTextS func (*ClusterRoleList) ProtoMessage() {} func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } -func (m *ClusterRoleList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *ClusterRoleList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -206,7 +214,8 @@ type PolicyRule struct { // the enumerated resources in any API group will be allowed. // +optional ApiGroups []string `protobuf:"bytes,2,rep,name=apiGroups" json:"apiGroups,omitempty"` - // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + // Resources is a list of resources this rule applies to. '*' represents all resources in the specified apiGroups. + // '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups. // +optional Resources []string `protobuf:"bytes,3,rep,name=resources" json:"resources,omitempty"` // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. @@ -260,32 +269,11 @@ func (m *PolicyRule) GetNonResourceURLs() []string { return nil } -// +k8s:deepcopy-gen=false -// PolicyRuleBuilder let's us attach methods. A no-no for API types. -// We use it to construct rules in code. It's more compact than trying to write them -// out in a literal and allows us to perform some basic checking during construction -type PolicyRuleBuilder struct { - PolicyRule *PolicyRule `protobuf:"bytes,1,opt,name=policyRule" json:"policyRule,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PolicyRuleBuilder) Reset() { *m = PolicyRuleBuilder{} } -func (m *PolicyRuleBuilder) String() string { return proto.CompactTextString(m) } -func (*PolicyRuleBuilder) ProtoMessage() {} -func (*PolicyRuleBuilder) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } - -func (m *PolicyRuleBuilder) GetPolicyRule() *PolicyRule { - if m != nil { - return m.PolicyRule - } - return nil -} - // Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. type Role struct { // Standard object's metadata. // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Rules holds all the PolicyRules for this Role Rules []*PolicyRule `protobuf:"bytes,2,rep,name=rules" json:"rules,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -294,9 +282,9 @@ type Role struct { func (m *Role) Reset() { *m = Role{} } func (m *Role) String() string { return proto.CompactTextString(m) } func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } -func (m *Role) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *Role) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -316,7 +304,7 @@ func (m *Role) GetRules() []*PolicyRule { type RoleBinding struct { // Standard object's metadata. // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Subjects holds references to the objects the role applies to. Subjects []*Subject `protobuf:"bytes,2,rep,name=subjects" json:"subjects,omitempty"` // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. @@ -328,9 +316,9 @@ type RoleBinding struct { func (m *RoleBinding) Reset() { *m = RoleBinding{} } func (m *RoleBinding) String() string { return proto.CompactTextString(m) } func (*RoleBinding) ProtoMessage() {} -func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } -func (m *RoleBinding) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *RoleBinding) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -355,7 +343,7 @@ func (m *RoleBinding) GetRoleRef() *RoleRef { type RoleBindingList struct { // Standard object's metadata. // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Items is a list of RoleBindings Items []*RoleBinding `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -364,9 +352,9 @@ type RoleBindingList struct { func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } func (m *RoleBindingList) String() string { return proto.CompactTextString(m) } func (*RoleBindingList) ProtoMessage() {} -func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } -func (m *RoleBindingList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *RoleBindingList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -384,7 +372,7 @@ func (m *RoleBindingList) GetItems() []*RoleBinding { type RoleList struct { // Standard object's metadata. // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Items is a list of Roles Items []*Role `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -393,9 +381,9 @@ type RoleList struct { func (m *RoleList) Reset() { *m = RoleList{} } func (m *RoleList) String() string { return proto.CompactTextString(m) } func (*RoleList) ProtoMessage() {} -func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } -func (m *RoleList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *RoleList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -423,7 +411,7 @@ type RoleRef struct { func (m *RoleRef) Reset() { *m = RoleRef{} } func (m *RoleRef) String() string { return proto.CompactTextString(m) } func (*RoleRef) ProtoMessage() {} -func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } func (m *RoleRef) GetApiGroup() string { if m != nil && m.ApiGroup != nil { @@ -469,7 +457,7 @@ type Subject struct { func (m *Subject) Reset() { *m = Subject{} } func (m *Subject) String() string { return proto.CompactTextString(m) } func (*Subject) ProtoMessage() {} -func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func (m *Subject) GetKind() string { if m != nil && m.Kind != nil { @@ -500,21 +488,20 @@ func (m *Subject) GetNamespace() string { } func init() { - proto.RegisterType((*ClusterRole)(nil), "github.com/ericchiang.k8s.apis.rbac.v1beta1.ClusterRole") - proto.RegisterType((*ClusterRoleBinding)(nil), "github.com/ericchiang.k8s.apis.rbac.v1beta1.ClusterRoleBinding") - proto.RegisterType((*ClusterRoleBindingBuilder)(nil), "github.com/ericchiang.k8s.apis.rbac.v1beta1.ClusterRoleBindingBuilder") - proto.RegisterType((*ClusterRoleBindingList)(nil), "github.com/ericchiang.k8s.apis.rbac.v1beta1.ClusterRoleBindingList") - proto.RegisterType((*ClusterRoleList)(nil), "github.com/ericchiang.k8s.apis.rbac.v1beta1.ClusterRoleList") - proto.RegisterType((*PolicyRule)(nil), "github.com/ericchiang.k8s.apis.rbac.v1beta1.PolicyRule") - proto.RegisterType((*PolicyRuleBuilder)(nil), "github.com/ericchiang.k8s.apis.rbac.v1beta1.PolicyRuleBuilder") - proto.RegisterType((*Role)(nil), "github.com/ericchiang.k8s.apis.rbac.v1beta1.Role") - proto.RegisterType((*RoleBinding)(nil), "github.com/ericchiang.k8s.apis.rbac.v1beta1.RoleBinding") - proto.RegisterType((*RoleBindingList)(nil), "github.com/ericchiang.k8s.apis.rbac.v1beta1.RoleBindingList") - proto.RegisterType((*RoleList)(nil), "github.com/ericchiang.k8s.apis.rbac.v1beta1.RoleList") - proto.RegisterType((*RoleRef)(nil), "github.com/ericchiang.k8s.apis.rbac.v1beta1.RoleRef") - proto.RegisterType((*Subject)(nil), "github.com/ericchiang.k8s.apis.rbac.v1beta1.Subject") -} -func (m *ClusterRole) Marshal() (dAtA []byte, err error) { + proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1beta1.AggregationRule") + proto.RegisterType((*ClusterRole)(nil), "k8s.io.api.rbac.v1beta1.ClusterRole") + proto.RegisterType((*ClusterRoleBinding)(nil), "k8s.io.api.rbac.v1beta1.ClusterRoleBinding") + proto.RegisterType((*ClusterRoleBindingList)(nil), "k8s.io.api.rbac.v1beta1.ClusterRoleBindingList") + proto.RegisterType((*ClusterRoleList)(nil), "k8s.io.api.rbac.v1beta1.ClusterRoleList") + proto.RegisterType((*PolicyRule)(nil), "k8s.io.api.rbac.v1beta1.PolicyRule") + proto.RegisterType((*Role)(nil), "k8s.io.api.rbac.v1beta1.Role") + proto.RegisterType((*RoleBinding)(nil), "k8s.io.api.rbac.v1beta1.RoleBinding") + proto.RegisterType((*RoleBindingList)(nil), "k8s.io.api.rbac.v1beta1.RoleBindingList") + proto.RegisterType((*RoleList)(nil), "k8s.io.api.rbac.v1beta1.RoleList") + proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1beta1.RoleRef") + proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1beta1.Subject") +} +func (m *AggregationRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -524,24 +511,14 @@ func (m *ClusterRole) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { +func (m *AggregationRule) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - if m.Metadata != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n1, err := m.Metadata.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - } - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x12 + if len(m.ClusterRoleSelectors) > 0 { + for _, msg := range m.ClusterRoleSelectors { + dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) @@ -557,7 +534,7 @@ func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { +func (m *ClusterRole) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -567,7 +544,7 @@ func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { +func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int @@ -576,14 +553,14 @@ func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n2, err := m.Metadata.MarshalTo(dAtA[i:]) + n1, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n2 + i += n1 } - if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { + if len(m.Rules) > 0 { + for _, msg := range m.Rules { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) @@ -594,15 +571,15 @@ func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { i += n } } - if m.RoleRef != nil { + if m.AggregationRule != nil { dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n3, err := m.RoleRef.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.AggregationRule.Size())) + n2, err := m.AggregationRule.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n3 + i += n2 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -610,7 +587,7 @@ func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *ClusterRoleBindingBuilder) Marshal() (dAtA []byte, err error) { +func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -620,16 +597,38 @@ func (m *ClusterRoleBindingBuilder) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ClusterRoleBindingBuilder) MarshalTo(dAtA []byte) (int, error) { +func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - if m.ClusterRoleBinding != nil { + if m.Metadata != nil { dAtA[i] = 0xa i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ClusterRoleBinding.Size())) - n4, err := m.ClusterRoleBinding.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) + n3, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if len(m.Subjects) > 0 { + for _, msg := range m.Subjects { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.RoleRef != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) + n4, err := m.RoleRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -823,37 +822,6 @@ func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *PolicyRuleBuilder) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PolicyRuleBuilder) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.PolicyRule != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PolicyRule.Size())) - n7, err := m.PolicyRule.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - func (m *Role) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -873,11 +841,11 @@ func (m *Role) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n8, err := m.Metadata.MarshalTo(dAtA[i:]) + n7, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n8 + i += n7 } if len(m.Rules) > 0 { for _, msg := range m.Rules { @@ -916,11 +884,11 @@ func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n9, err := m.Metadata.MarshalTo(dAtA[i:]) + n8, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n8 } if len(m.Subjects) > 0 { for _, msg := range m.Subjects { @@ -938,11 +906,11 @@ func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n10, err := m.RoleRef.MarshalTo(dAtA[i:]) + n9, err := m.RoleRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n9 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -969,11 +937,11 @@ func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n11, err := m.Metadata.MarshalTo(dAtA[i:]) + n10, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n11 + i += n10 } if len(m.Items) > 0 { for _, msg := range m.Items { @@ -1012,11 +980,11 @@ func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n12, err := m.Metadata.MarshalTo(dAtA[i:]) + n11, err := m.Metadata.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n12 + i += n11 } if len(m.Items) > 0 { for _, msg := range m.Items { @@ -1120,24 +1088,6 @@ func (m *Subject) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -1147,6 +1097,21 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return offset + 1 } +func (m *AggregationRule) Size() (n int) { + var l int + _ = l + if len(m.ClusterRoleSelectors) > 0 { + for _, e := range m.ClusterRoleSelectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *ClusterRole) Size() (n int) { var l int _ = l @@ -1160,6 +1125,10 @@ func (m *ClusterRole) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.AggregationRule != nil { + l = m.AggregationRule.Size() + n += 1 + l + sovGenerated(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -1189,19 +1158,6 @@ func (m *ClusterRoleBinding) Size() (n int) { return n } -func (m *ClusterRoleBindingBuilder) Size() (n int) { - var l int - _ = l - if m.ClusterRoleBinding != nil { - l = m.ClusterRoleBinding.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - func (m *ClusterRoleBindingList) Size() (n int) { var l int _ = l @@ -1279,19 +1235,6 @@ func (m *PolicyRule) Size() (n int) { return n } -func (m *PolicyRuleBuilder) Size() (n int) { - var l int - _ = l - if m.PolicyRule != nil { - l = m.PolicyRule.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - func (m *Role) Size() (n int) { var l int _ = l @@ -1431,7 +1374,7 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (m *ClusterRole) Unmarshal(dAtA []byte) error { +func (m *AggregationRule) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1454,48 +1397,15 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ClusterRole: wiretype end group for non-group") + return fmt.Errorf("proto: AggregationRule: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterRole: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AggregationRule: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ClusterRoleSelectors", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1519,8 +1429,8 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Rules = append(m.Rules, &PolicyRule{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}) + if err := m.ClusterRoleSelectors[len(m.ClusterRoleSelectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1546,7 +1456,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } return nil } -func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { +func (m *ClusterRole) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1569,10 +1479,10 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ClusterRoleBinding: wiretype end group for non-group") + return fmt.Errorf("proto: ClusterRole: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterRoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ClusterRole: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -1602,7 +1512,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1610,7 +1520,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1634,14 +1544,14 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Subjects = append(m.Subjects, &Subject{}) - if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Rules = append(m.Rules, &PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AggregationRule", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1665,10 +1575,10 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RoleRef == nil { - m.RoleRef = &RoleRef{} + if m.AggregationRule == nil { + m.AggregationRule = &AggregationRule{} } - if err := m.RoleRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.AggregationRule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1694,7 +1604,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } return nil } -func (m *ClusterRoleBindingBuilder) Unmarshal(dAtA []byte) error { +func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1717,15 +1627,15 @@ func (m *ClusterRoleBindingBuilder) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ClusterRoleBindingBuilder: wiretype end group for non-group") + return fmt.Errorf("proto: ClusterRoleBinding: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterRoleBindingBuilder: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ClusterRoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterRoleBinding", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1749,10 +1659,74 @@ func (m *ClusterRoleBindingBuilder) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ClusterRoleBinding == nil { - m.ClusterRoleBinding = &ClusterRoleBinding{} + if m.Metadata == nil { + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } - if err := m.ClusterRoleBinding.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, &Subject{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RoleRef == nil { + m.RoleRef = &RoleRef{} + } + if err := m.RoleRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1834,7 +1808,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1949,7 +1923,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2204,90 +2178,6 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } return nil } -func (m *PolicyRuleBuilder) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PolicyRuleBuilder: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PolicyRuleBuilder: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PolicyRule", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PolicyRule == nil { - m.PolicyRule = &PolicyRule{} - } - if err := m.PolicyRule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *Role) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -2344,7 +2234,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2459,7 +2349,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2607,7 +2497,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2722,7 +2612,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3198,50 +3088,49 @@ var ( ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) -func init() { - proto.RegisterFile("github.com/ericchiang/k8s/apis/rbac/v1beta1/generated.proto", fileDescriptorGenerated) -} +func init() { proto.RegisterFile("k8s.io/api/rbac/v1beta1/generated.proto", fileDescriptorGenerated) } var fileDescriptorGenerated = []byte{ - // 630 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe4, 0x54, 0xbf, 0x6e, 0x13, 0x4f, - 0x10, 0xfe, 0x6d, 0x6c, 0xcb, 0xf6, 0x58, 0x3f, 0x45, 0xac, 0x10, 0xba, 0x44, 0xc8, 0x8a, 0x4e, - 0x48, 0xb8, 0x48, 0xf6, 0x70, 0x88, 0x04, 0x12, 0x5d, 0x52, 0x00, 0x51, 0xc2, 0x9f, 0x8d, 0x68, - 0xe8, 0xd6, 0xe7, 0xc1, 0x59, 0xee, 0x7c, 0x77, 0xda, 0xdd, 0xb3, 0xc4, 0x1b, 0xf0, 0x08, 0x50, - 0xd0, 0x21, 0xd1, 0xd1, 0xf1, 0x0e, 0x94, 0x3c, 0x02, 0x0a, 0x8f, 0x40, 0x47, 0x85, 0xee, 0xaf, - 0x9d, 0x9c, 0x89, 0x8c, 0x49, 0x24, 0x24, 0x2a, 0x7b, 0x67, 0xe6, 0xfb, 0xe6, 0x9b, 0x99, 0x9b, - 0x81, 0x3b, 0xde, 0x5d, 0xcd, 0x64, 0xe8, 0x78, 0xf1, 0x00, 0x55, 0x80, 0x06, 0xb5, 0x13, 0x79, - 0x23, 0x47, 0x44, 0x52, 0x3b, 0x6a, 0x20, 0x5c, 0x67, 0xd2, 0x1f, 0xa0, 0x11, 0x7d, 0x67, 0x84, - 0x01, 0x2a, 0x61, 0x70, 0xc8, 0x22, 0x15, 0x9a, 0x90, 0xde, 0xcc, 0x80, 0x6c, 0x0a, 0x64, 0x91, - 0x37, 0x62, 0x09, 0x90, 0x25, 0x40, 0x96, 0x03, 0xd7, 0xb7, 0xcf, 0xc9, 0x30, 0x46, 0x23, 0x9c, - 0x49, 0x85, 0x7c, 0x7d, 0x6b, 0x3e, 0x46, 0xc5, 0x81, 0x91, 0x63, 0xac, 0x84, 0xef, 0x9c, 0x1f, - 0xae, 0xdd, 0x63, 0x1c, 0x8b, 0x0a, 0xaa, 0x3f, 0x1f, 0x15, 0x1b, 0xe9, 0x3b, 0x32, 0x30, 0xda, - 0xa8, 0x0a, 0x64, 0xf3, 0x97, 0xb5, 0xcc, 0xa9, 0xc2, 0x7e, 0x4f, 0xa0, 0xb3, 0xe7, 0xc7, 0xda, - 0xa0, 0xe2, 0xa1, 0x8f, 0x74, 0x1f, 0x5a, 0x49, 0xc1, 0x43, 0x61, 0x84, 0x45, 0x36, 0x48, 0xaf, - 0xb3, 0xcd, 0xd8, 0x39, 0x5d, 0x4c, 0x62, 0xd9, 0xa4, 0xcf, 0x1e, 0x0f, 0x5e, 0xa2, 0x6b, 0x0e, - 0xd1, 0x08, 0x5e, 0xe2, 0xe9, 0x43, 0x68, 0xa8, 0xd8, 0x47, 0x6d, 0xad, 0x6c, 0xd4, 0x7a, 0x9d, - 0xed, 0xdb, 0x6c, 0xc1, 0x71, 0xb0, 0x27, 0xa1, 0x2f, 0xdd, 0x57, 0x3c, 0xf6, 0x91, 0x67, 0x0c, - 0xf6, 0x0f, 0x02, 0x74, 0x46, 0xe6, 0xae, 0x0c, 0x86, 0x32, 0x18, 0x5d, 0xa8, 0xda, 0x03, 0x68, - 0xe9, 0x38, 0x75, 0x14, 0x82, 0x6f, 0x2d, 0x2c, 0xf8, 0x28, 0x03, 0xf2, 0x92, 0x81, 0xee, 0x43, - 0x53, 0x85, 0x3e, 0x72, 0x7c, 0x61, 0xd5, 0x52, 0x61, 0x8b, 0x93, 0xf1, 0x0c, 0xc7, 0x0b, 0x02, - 0xfb, 0x35, 0x81, 0xb5, 0x6a, 0xf1, 0xbb, 0xb1, 0xf4, 0x87, 0xa8, 0xa8, 0x07, 0xd4, 0xad, 0x38, - 0xf3, 0x6e, 0xdc, 0x5b, 0x38, 0x69, 0x95, 0x9f, 0xcf, 0xa1, 0xb5, 0x3f, 0x11, 0xb8, 0x56, 0x0d, - 0x3d, 0x90, 0xda, 0xd0, 0x07, 0x95, 0x59, 0x6c, 0x2e, 0x32, 0x8b, 0x04, 0x7b, 0x66, 0x12, 0x4f, - 0xa1, 0x21, 0x0d, 0x8e, 0x8b, 0x31, 0xfc, 0x51, 0x11, 0x19, 0x93, 0xfd, 0x81, 0xc0, 0xea, 0x8c, - 0xf7, 0x82, 0x05, 0xef, 0x9f, 0x16, 0xbc, 0xb3, 0x8c, 0xe0, 0x42, 0xe9, 0x47, 0x02, 0x30, 0xfd, - 0xfe, 0xe9, 0x55, 0x68, 0x4c, 0x50, 0x0d, 0xb4, 0x45, 0x36, 0x6a, 0xbd, 0x36, 0xcf, 0x1e, 0xf4, - 0x3a, 0xb4, 0x45, 0x24, 0xef, 0xab, 0x30, 0x8e, 0xb2, 0xa4, 0x6d, 0x3e, 0x35, 0x24, 0x5e, 0x85, - 0x3a, 0x8c, 0x95, 0x8b, 0xda, 0xaa, 0x65, 0xde, 0xd2, 0x40, 0x6f, 0xc0, 0xff, 0xc5, 0xe3, 0x91, - 0x18, 0xa3, 0xb6, 0xea, 0x69, 0xc4, 0x69, 0x23, 0xed, 0xc1, 0x6a, 0x10, 0x06, 0x3c, 0xb7, 0x3d, - 0xe3, 0x07, 0xda, 0x6a, 0xa4, 0x71, 0x67, 0xcd, 0xf6, 0x31, 0x5c, 0x99, 0xea, 0x2d, 0x3e, 0xca, - 0x23, 0x80, 0xa8, 0x34, 0xe6, 0xdd, 0x5d, 0x6a, 0xff, 0x67, 0x68, 0xec, 0x77, 0x04, 0xea, 0x7f, - 0xf3, 0x91, 0xfa, 0x4e, 0xa0, 0xf3, 0xef, 0x5d, 0xa7, 0x64, 0xb5, 0x2e, 0xef, 0x16, 0x2c, 0xbd, - 0x5a, 0x73, 0x8e, 0xc0, 0x5b, 0x02, 0xad, 0x4b, 0xd8, 0xfe, 0xbd, 0xd3, 0x12, 0xb7, 0x7e, 0xaf, - 0x95, 0xb9, 0xb6, 0x43, 0x68, 0xe6, 0x9d, 0xa5, 0xeb, 0xd0, 0x2a, 0x76, 0x39, 0x55, 0xd6, 0xe6, - 0xe5, 0x9b, 0x52, 0xa8, 0x7b, 0x32, 0x18, 0x5a, 0x2b, 0xa9, 0x3d, 0xfd, 0x9f, 0xd8, 0x02, 0x31, - 0xc6, 0x74, 0x92, 0x6d, 0x9e, 0xfe, 0xb7, 0x3d, 0x68, 0xe6, 0x53, 0x2f, 0x21, 0x64, 0x06, 0x32, - 0x9b, 0x62, 0xa5, 0x9a, 0xe2, 0x2c, 0x5d, 0x72, 0x51, 0x92, 0x5f, 0x1d, 0x09, 0x17, 0xad, 0x7a, - 0xea, 0x98, 0x1a, 0x76, 0xd7, 0x3e, 0x9f, 0x74, 0xc9, 0x97, 0x93, 0x2e, 0xf9, 0x7a, 0xd2, 0x25, - 0x6f, 0xbe, 0x75, 0xff, 0x7b, 0xde, 0xcc, 0x4b, 0xfc, 0x19, 0x00, 0x00, 0xff, 0xff, 0xca, 0xc1, - 0xc6, 0xff, 0xbb, 0x09, 0x00, 0x00, + // 643 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x54, 0xcf, 0x6a, 0x14, 0x4f, + 0x10, 0xfe, 0x75, 0xb2, 0xcb, 0xee, 0xd6, 0xf2, 0x63, 0xa1, 0x09, 0x3a, 0x06, 0x5d, 0xc2, 0x18, + 0x70, 0x41, 0xe8, 0x31, 0x89, 0x88, 0x06, 0x2f, 0x89, 0x07, 0x41, 0x36, 0x2a, 0x1d, 0xbc, 0x78, + 0xeb, 0x9d, 0x2d, 0x27, 0xed, 0xce, 0xce, 0x0c, 0xdd, 0x3d, 0x0b, 0xf1, 0x15, 0x3c, 0x78, 0xd5, + 0x27, 0xf0, 0xe6, 0x73, 0x78, 0xf4, 0xe2, 0xcd, 0x83, 0xc4, 0x9b, 0x4f, 0x21, 0xf3, 0x6f, 0xff, + 0x4d, 0x46, 0xf7, 0x90, 0x80, 0x78, 0xca, 0xf4, 0x57, 0xf5, 0x55, 0x7d, 0x5f, 0xa5, 0x6a, 0xe1, + 0xd6, 0xe8, 0xbe, 0x66, 0x32, 0x74, 0x44, 0x24, 0x1d, 0x35, 0x10, 0xae, 0x33, 0xd9, 0x19, 0xa0, + 0x11, 0x3b, 0x8e, 0x87, 0x01, 0x2a, 0x61, 0x70, 0xc8, 0x22, 0x15, 0x9a, 0x90, 0x5e, 0xcd, 0x12, + 0x99, 0x88, 0x24, 0x4b, 0x12, 0x59, 0x9e, 0xb8, 0xd9, 0x2b, 0x57, 0x10, 0x7e, 0x74, 0x52, 0x2e, + 0xb1, 0x79, 0x77, 0x96, 0x39, 0x16, 0xee, 0x89, 0x0c, 0x50, 0x9d, 0x3a, 0xd1, 0xc8, 0x4b, 0x00, + 0xed, 0x8c, 0xd1, 0x08, 0x67, 0x52, 0x66, 0x39, 0x55, 0x2c, 0x15, 0x07, 0x46, 0x8e, 0xb1, 0x44, + 0xb8, 0xf7, 0x27, 0x82, 0x76, 0x4f, 0x70, 0x2c, 0x4a, 0xbc, 0xbd, 0x2a, 0x5e, 0x6c, 0xa4, 0xef, + 0xc8, 0xc0, 0x68, 0xa3, 0x96, 0x49, 0xf6, 0x1b, 0xe8, 0x1c, 0x78, 0x9e, 0x42, 0x4f, 0x18, 0x19, + 0x06, 0x3c, 0xf6, 0x91, 0x7a, 0xb0, 0xe1, 0xfa, 0xb1, 0x36, 0xa8, 0x78, 0xe8, 0xe3, 0x31, 0xfa, + 0xe8, 0x9a, 0x50, 0x69, 0x8b, 0x6c, 0xad, 0xf7, 0xda, 0xbb, 0x7b, 0x6c, 0x36, 0xc8, 0x69, 0x1b, + 0x16, 0x8d, 0xbc, 0x04, 0xd0, 0x2c, 0x99, 0x02, 0x9b, 0xec, 0xb0, 0xbe, 0x18, 0xa0, 0x5f, 0x70, + 0xf9, 0xb9, 0x05, 0xed, 0x9f, 0x04, 0xda, 0x8f, 0x66, 0x01, 0xda, 0x87, 0x66, 0x42, 0x1f, 0x0a, + 0x23, 0x2c, 0xb2, 0x45, 0x7a, 0xed, 0xdd, 0x3b, 0xab, 0x35, 0x7b, 0x36, 0x78, 0x8d, 0xae, 0x39, + 0x42, 0x23, 0xf8, 0xb4, 0x02, 0x7d, 0x00, 0x75, 0x15, 0xfb, 0xa8, 0xad, 0xb5, 0x54, 0xf7, 0x4d, + 0x56, 0xb1, 0x00, 0xec, 0x79, 0xe8, 0x4b, 0xf7, 0x34, 0xb1, 0xce, 0x33, 0x06, 0xe5, 0xd0, 0x11, + 0x8b, 0x43, 0xb1, 0xd6, 0x53, 0x3d, 0xbd, 0xca, 0x22, 0x4b, 0x43, 0xe4, 0xcb, 0x05, 0xec, 0x6f, + 0x04, 0xe8, 0x9c, 0xd9, 0x43, 0x19, 0x0c, 0x65, 0xe0, 0x5d, 0xb0, 0xe7, 0x87, 0xd0, 0xd4, 0x71, + 0x1a, 0x28, 0x6c, 0x6f, 0x55, 0x2a, 0x3e, 0xce, 0x12, 0xf9, 0x94, 0x41, 0xf7, 0xa1, 0xa1, 0x42, + 0x1f, 0x39, 0xbe, 0xca, 0xed, 0x56, 0x93, 0x79, 0x96, 0xc7, 0x0b, 0x82, 0xfd, 0x91, 0xc0, 0x95, + 0xb2, 0xbd, 0xbe, 0xd4, 0x86, 0x3e, 0x29, 0x59, 0x64, 0x2b, 0xee, 0x90, 0xd4, 0xcb, 0x06, 0x0f, + 0xa0, 0x2e, 0x0d, 0x8e, 0x0b, 0x77, 0xb7, 0x2b, 0x05, 0x96, 0xb5, 0xf0, 0x8c, 0x69, 0x7f, 0x20, + 0xd0, 0x99, 0x8b, 0x5e, 0xb8, 0xc4, 0xfd, 0x45, 0x89, 0xdb, 0xab, 0x48, 0x2c, 0xb4, 0x7d, 0x22, + 0x00, 0xb3, 0x75, 0xa4, 0x1b, 0x50, 0x9f, 0xa0, 0x1a, 0x64, 0xa7, 0xd7, 0xe2, 0xd9, 0x83, 0x5e, + 0x87, 0x96, 0x88, 0xe4, 0x63, 0x15, 0xc6, 0x51, 0xd6, 0xa4, 0xc5, 0x67, 0x40, 0x12, 0x55, 0xa8, + 0xc3, 0x58, 0xb9, 0xa8, 0xad, 0xf5, 0x2c, 0x3a, 0x05, 0xe8, 0x36, 0xfc, 0x5f, 0x3c, 0x9e, 0x8a, + 0x31, 0x6a, 0xab, 0x96, 0x66, 0x2c, 0x82, 0xb4, 0x07, 0x9d, 0x20, 0x0c, 0x78, 0x8e, 0xbd, 0xe0, + 0x7d, 0x6d, 0xd5, 0xd3, 0xbc, 0x65, 0xd8, 0x7e, 0x47, 0xa0, 0xf6, 0x57, 0xdd, 0xae, 0xfd, 0x95, + 0x40, 0xfb, 0x5f, 0x3c, 0xb0, 0x64, 0x6d, 0x2f, 0xf3, 0xb2, 0x56, 0x5e, 0xdb, 0x73, 0x4e, 0xea, + 0x2d, 0x81, 0xe6, 0xa5, 0xdc, 0xd2, 0xde, 0xa2, 0xa8, 0x1b, 0xbf, 0x1f, 0x57, 0xae, 0xe6, 0x08, + 0x1a, 0xf9, 0xf4, 0xe8, 0x26, 0x34, 0x8b, 0xcb, 0x48, 0xb5, 0xb4, 0xf8, 0xf4, 0x4d, 0x29, 0xd4, + 0x46, 0x32, 0x18, 0x5a, 0x6b, 0x29, 0x9e, 0x7e, 0x27, 0x58, 0x20, 0xc6, 0xd9, 0xaf, 0x7d, 0x8b, + 0xa7, 0xdf, 0xf6, 0x08, 0x1a, 0xf9, 0x7f, 0x72, 0x4a, 0x21, 0x73, 0x94, 0xf9, 0x16, 0x6b, 0xe5, + 0x16, 0xcb, 0xe5, 0x92, 0xfb, 0x4c, 0xfe, 0xea, 0x48, 0xb8, 0x68, 0xd5, 0xd2, 0xc0, 0x0c, 0x38, + 0xbc, 0xf6, 0xf9, 0xac, 0x4b, 0xbe, 0x9c, 0x75, 0xc9, 0xf7, 0xb3, 0x2e, 0x79, 0xff, 0xa3, 0xfb, + 0xdf, 0xcb, 0x46, 0x6e, 0xf1, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd8, 0xf5, 0xbf, 0x80, 0xea, + 0x08, 0x00, 0x00, } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/rbac/v1beta1/register.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/rbac/v1beta1/register.go new file mode 100644 index 00000000..a9612e38 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/rbac/v1beta1/register.go @@ -0,0 +1,15 @@ +package v1beta1 + +import "github.com/ericchiang/k8s" + +func init() { + k8s.Register("rbac.authorization.k8s.io", "v1beta1", "clusterroles", false, &ClusterRole{}) + k8s.Register("rbac.authorization.k8s.io", "v1beta1", "clusterrolebindings", false, &ClusterRoleBinding{}) + k8s.Register("rbac.authorization.k8s.io", "v1beta1", "roles", true, &Role{}) + k8s.Register("rbac.authorization.k8s.io", "v1beta1", "rolebindings", true, &RoleBinding{}) + + k8s.RegisterList("rbac.authorization.k8s.io", "v1beta1", "clusterroles", false, &ClusterRoleList{}) + k8s.RegisterList("rbac.authorization.k8s.io", "v1beta1", "clusterrolebindings", false, &ClusterRoleBindingList{}) + k8s.RegisterList("rbac.authorization.k8s.io", "v1beta1", "roles", true, &RoleList{}) + k8s.RegisterList("rbac.authorization.k8s.io", "v1beta1", "rolebindings", true, &RoleBindingList{}) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/api/resource/generated.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/resource/generated.pb.go similarity index 82% rename from vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/api/resource/generated.pb.go rename to vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/resource/generated.pb.go index 902725bf..09507b4e 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/api/resource/generated.pb.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/resource/generated.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/api/resource/generated.proto -// DO NOT EDIT! +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/apimachinery/pkg/api/resource/generated.proto /* Package resource is a generated protocol buffer package. It is generated from these files: - k8s.io/kubernetes/pkg/api/resource/generated.proto + k8s.io/apimachinery/pkg/api/resource/generated.proto It has these top-level messages: Quantity @@ -92,6 +91,7 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // +protobuf.embed=string // +protobuf.options.marshal=false // +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:deepcopy-gen=true // +k8s:openapi-gen=true type Quantity struct { String_ *string `protobuf:"bytes,1,opt,name=string" json:"string,omitempty"` @@ -111,7 +111,7 @@ func (m *Quantity) GetString_() string { } func init() { - proto.RegisterType((*Quantity)(nil), "github.com/ericchiang.k8s.api.resource.Quantity") + proto.RegisterType((*Quantity)(nil), "k8s.io.apimachinery.pkg.api.resource.Quantity") } func (m *Quantity) Marshal() (dAtA []byte, err error) { size := m.Size() @@ -140,24 +140,6 @@ func (m *Quantity) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -380,20 +362,20 @@ var ( ) func init() { - proto.RegisterFile("github.com/ericchiang/k8s/api/resource/generated.proto", fileDescriptorGenerated) + proto.RegisterFile("k8s.io/apimachinery/pkg/api/resource/generated.proto", fileDescriptorGenerated) } var fileDescriptorGenerated = []byte{ - // 166 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x32, 0xca, 0xb6, 0x28, 0xd6, - 0xcb, 0xcc, 0xd7, 0xcf, 0x2e, 0x4d, 0x4a, 0x2d, 0xca, 0x4b, 0x2d, 0x49, 0x2d, 0xd6, 0x2f, 0xc8, - 0x4e, 0xd7, 0x4f, 0x2c, 0xc8, 0xd4, 0x2f, 0x4a, 0x2d, 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0xd5, 0x4f, - 0x4f, 0xcd, 0x4b, 0x2d, 0x4a, 0x2c, 0x49, 0x4d, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x52, - 0x82, 0xe8, 0xd1, 0x43, 0xe8, 0xd1, 0x2b, 0xc8, 0x4e, 0xd7, 0x4b, 0x2c, 0xc8, 0xd4, 0x83, 0xe9, - 0x91, 0x32, 0xc4, 0x6e, 0x6e, 0x69, 0x49, 0x66, 0x8e, 0x7e, 0x66, 0x5e, 0x49, 0x71, 0x49, 0x11, - 0xba, 0xb1, 0x4a, 0x4a, 0x5c, 0x1c, 0x81, 0xa5, 0x89, 0x79, 0x25, 0x99, 0x25, 0x95, 0x42, 0x62, - 0x5c, 0x6c, 0xc5, 0x25, 0x45, 0x99, 0x79, 0xe9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x50, - 0x9e, 0x93, 0xd4, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, - 0xe3, 0xb1, 0x1c, 0x43, 0x14, 0x07, 0xcc, 0x4a, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x81, 0x00, - 0xf7, 0xdc, 0xcb, 0x00, 0x00, 0x00, + // 163 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0xc9, 0xb6, 0x28, 0xd6, + 0xcb, 0xcc, 0xd7, 0x4f, 0x2c, 0xc8, 0xcc, 0x4d, 0x4c, 0xce, 0xc8, 0xcc, 0x4b, 0x2d, 0xaa, 0xd4, + 0x2f, 0xc8, 0x4e, 0x07, 0x09, 0xe8, 0x17, 0xa5, 0x16, 0xe7, 0x97, 0x16, 0x25, 0xa7, 0xea, 0xa7, + 0xa7, 0xe6, 0xa5, 0x16, 0x25, 0x96, 0xa4, 0xa6, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xa9, + 0x40, 0x74, 0xe9, 0x21, 0xeb, 0xd2, 0x2b, 0xc8, 0x4e, 0x07, 0x09, 0xe8, 0xc1, 0x74, 0x49, 0x19, + 0xe3, 0x32, 0xbb, 0xb4, 0x24, 0x33, 0x47, 0x3f, 0x33, 0xaf, 0xa4, 0xb8, 0xa4, 0x08, 0xdd, 0x68, + 0x25, 0x25, 0x2e, 0x8e, 0xc0, 0xd2, 0xc4, 0xbc, 0x92, 0xcc, 0x92, 0x4a, 0x21, 0x31, 0x2e, 0xb6, + 0xe2, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x28, 0xcf, 0x49, + 0xea, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf1, 0x58, + 0x8e, 0x21, 0x8a, 0x03, 0x66, 0x29, 0x20, 0x00, 0x00, 0xff, 0xff, 0x1e, 0x28, 0x7b, 0x7c, 0xd1, + 0x00, 0x00, 0x00, } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/settings/v1alpha1/generated.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/settings/v1alpha1/generated.pb.go index 676236b0..54dcaa1a 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/settings/v1alpha1/generated.pb.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/settings/v1alpha1/generated.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/settings/v1alpha1/generated.proto -// DO NOT EDIT! +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/settings/v1alpha1/generated.proto /* Package v1alpha1 is a generated protocol buffer package. It is generated from these files: - k8s.io/kubernetes/pkg/apis/settings/v1alpha1/generated.proto + k8s.io/api/settings/v1alpha1/generated.proto It has these top-level messages: PodPreset @@ -18,10 +17,11 @@ package v1alpha1 import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import k8s_io_kubernetes_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" +import k8s_io_api_core_v1 "github.com/ericchiang/k8s/apis/core/v1" +import k8s_io_apimachinery_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" import _ "github.com/ericchiang/k8s/runtime" import _ "github.com/ericchiang/k8s/runtime/schema" -import k8s_io_kubernetes_pkg_api_v1 "github.com/ericchiang/k8s/api/v1" +import _ "github.com/ericchiang/k8s/util/intstr" import io "io" @@ -40,7 +40,7 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // requirements for a Pod. type PodPreset struct { // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // +optional Spec *PodPresetSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -51,7 +51,7 @@ func (m *PodPreset) String() string { return proto.CompactTextString( func (*PodPreset) ProtoMessage() {} func (*PodPreset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } -func (m *PodPreset) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *PodPreset) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -68,9 +68,9 @@ func (m *PodPreset) GetSpec() *PodPresetSpec { // PodPresetList is a list of PodPreset objects. type PodPresetList struct { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Items is a list of schema objects. Items []*PodPreset `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -81,7 +81,7 @@ func (m *PodPresetList) String() string { return proto.CompactTextStr func (*PodPresetList) ProtoMessage() {} func (*PodPresetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -func (m *PodPresetList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *PodPresetList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -95,24 +95,24 @@ func (m *PodPresetList) GetItems() []*PodPreset { return nil } -// PodPresetSpec is a description of a pod injection policy. +// PodPresetSpec is a description of a pod preset. type PodPresetSpec struct { // Selector is a label query over a set of resources, in this case pods. // Required. - Selector *k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"` + Selector *k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"` // Env defines the collection of EnvVar to inject into containers. // +optional - Env []*k8s_io_kubernetes_pkg_api_v1.EnvVar `protobuf:"bytes,2,rep,name=env" json:"env,omitempty"` + Env []*k8s_io_api_core_v1.EnvVar `protobuf:"bytes,2,rep,name=env" json:"env,omitempty"` // EnvFrom defines the collection of EnvFromSource to inject into containers. // +optional - EnvFrom []*k8s_io_kubernetes_pkg_api_v1.EnvFromSource `protobuf:"bytes,3,rep,name=envFrom" json:"envFrom,omitempty"` + EnvFrom []*k8s_io_api_core_v1.EnvFromSource `protobuf:"bytes,3,rep,name=envFrom" json:"envFrom,omitempty"` // Volumes defines the collection of Volume to inject into the pod. // +optional - Volumes []*k8s_io_kubernetes_pkg_api_v1.Volume `protobuf:"bytes,4,rep,name=volumes" json:"volumes,omitempty"` + Volumes []*k8s_io_api_core_v1.Volume `protobuf:"bytes,4,rep,name=volumes" json:"volumes,omitempty"` // VolumeMounts defines the collection of VolumeMount to inject into containers. // +optional - VolumeMounts []*k8s_io_kubernetes_pkg_api_v1.VolumeMount `protobuf:"bytes,5,rep,name=volumeMounts" json:"volumeMounts,omitempty"` - XXX_unrecognized []byte `json:"-"` + VolumeMounts []*k8s_io_api_core_v1.VolumeMount `protobuf:"bytes,5,rep,name=volumeMounts" json:"volumeMounts,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *PodPresetSpec) Reset() { *m = PodPresetSpec{} } @@ -120,35 +120,35 @@ func (m *PodPresetSpec) String() string { return proto.CompactTextStr func (*PodPresetSpec) ProtoMessage() {} func (*PodPresetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } -func (m *PodPresetSpec) GetSelector() *k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector { +func (m *PodPresetSpec) GetSelector() *k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector { if m != nil { return m.Selector } return nil } -func (m *PodPresetSpec) GetEnv() []*k8s_io_kubernetes_pkg_api_v1.EnvVar { +func (m *PodPresetSpec) GetEnv() []*k8s_io_api_core_v1.EnvVar { if m != nil { return m.Env } return nil } -func (m *PodPresetSpec) GetEnvFrom() []*k8s_io_kubernetes_pkg_api_v1.EnvFromSource { +func (m *PodPresetSpec) GetEnvFrom() []*k8s_io_api_core_v1.EnvFromSource { if m != nil { return m.EnvFrom } return nil } -func (m *PodPresetSpec) GetVolumes() []*k8s_io_kubernetes_pkg_api_v1.Volume { +func (m *PodPresetSpec) GetVolumes() []*k8s_io_api_core_v1.Volume { if m != nil { return m.Volumes } return nil } -func (m *PodPresetSpec) GetVolumeMounts() []*k8s_io_kubernetes_pkg_api_v1.VolumeMount { +func (m *PodPresetSpec) GetVolumeMounts() []*k8s_io_api_core_v1.VolumeMount { if m != nil { return m.VolumeMounts } @@ -156,9 +156,9 @@ func (m *PodPresetSpec) GetVolumeMounts() []*k8s_io_kubernetes_pkg_api_v1.Volume } func init() { - proto.RegisterType((*PodPreset)(nil), "github.com/ericchiang.k8s.apis.settings.v1alpha1.PodPreset") - proto.RegisterType((*PodPresetList)(nil), "github.com/ericchiang.k8s.apis.settings.v1alpha1.PodPresetList") - proto.RegisterType((*PodPresetSpec)(nil), "github.com/ericchiang.k8s.apis.settings.v1alpha1.PodPresetSpec") + proto.RegisterType((*PodPreset)(nil), "k8s.io.api.settings.v1alpha1.PodPreset") + proto.RegisterType((*PodPresetList)(nil), "k8s.io.api.settings.v1alpha1.PodPresetList") + proto.RegisterType((*PodPresetSpec)(nil), "k8s.io.api.settings.v1alpha1.PodPresetSpec") } func (m *PodPreset) Marshal() (dAtA []byte, err error) { size := m.Size() @@ -323,24 +323,6 @@ func (m *PodPresetSpec) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -492,7 +474,7 @@ func (m *PodPreset) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -609,7 +591,7 @@ func (m *PodPresetList) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -724,7 +706,7 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_kubernetes_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -756,7 +738,7 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Env = append(m.Env, &k8s_io_kubernetes_pkg_api_v1.EnvVar{}) + m.Env = append(m.Env, &k8s_io_api_core_v1.EnvVar{}) if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -787,7 +769,7 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.EnvFrom = append(m.EnvFrom, &k8s_io_kubernetes_pkg_api_v1.EnvFromSource{}) + m.EnvFrom = append(m.EnvFrom, &k8s_io_api_core_v1.EnvFromSource{}) if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -818,7 +800,7 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Volumes = append(m.Volumes, &k8s_io_kubernetes_pkg_api_v1.Volume{}) + m.Volumes = append(m.Volumes, &k8s_io_api_core_v1.Volume{}) if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -849,7 +831,7 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeMounts = append(m.VolumeMounts, &k8s_io_kubernetes_pkg_api_v1.VolumeMount{}) + m.VolumeMounts = append(m.VolumeMounts, &k8s_io_api_core_v1.VolumeMount{}) if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -982,35 +964,36 @@ var ( ) func init() { - proto.RegisterFile("github.com/ericchiang/k8s/apis/settings/v1alpha1/generated.proto", fileDescriptorGenerated) + proto.RegisterFile("k8s.io/api/settings/v1alpha1/generated.proto", fileDescriptorGenerated) } var fileDescriptorGenerated = []byte{ - // 409 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x91, 0x4f, 0xeb, 0xd3, 0x30, - 0x1c, 0x87, 0xed, 0xf6, 0x1b, 0x9b, 0x99, 0x5e, 0x72, 0x2a, 0x3b, 0x94, 0x31, 0x3c, 0x4c, 0x9c, - 0x29, 0x1d, 0xa2, 0x82, 0xe2, 0x41, 0x98, 0x88, 0x58, 0x36, 0x32, 0xd8, 0xc1, 0x5b, 0xd6, 0x7d, - 0xe9, 0x6a, 0xdb, 0xa4, 0x24, 0x69, 0x5f, 0x8b, 0x2f, 0xc1, 0x97, 0xe2, 0xd1, 0x97, 0x20, 0xf3, - 0xea, 0x8b, 0x90, 0x74, 0x6b, 0xfd, 0xd3, 0x6d, 0xd6, 0xdf, 0xad, 0x94, 0xcf, 0xf3, 0xe4, 0x69, - 0x8a, 0x5e, 0xc6, 0xcf, 0x15, 0x89, 0x84, 0x1b, 0xe7, 0x5b, 0x90, 0x1c, 0x34, 0x28, 0x37, 0x8b, - 0x43, 0x97, 0x65, 0x91, 0x72, 0x15, 0x68, 0x1d, 0xf1, 0x50, 0xb9, 0x85, 0xc7, 0x92, 0x6c, 0xcf, - 0x3c, 0x37, 0x04, 0x0e, 0x92, 0x69, 0xd8, 0x91, 0x4c, 0x0a, 0x2d, 0xf0, 0xec, 0x48, 0x93, 0x5f, - 0x34, 0xc9, 0xe2, 0x90, 0x18, 0x9a, 0x54, 0x34, 0xa9, 0xe8, 0xd1, 0xfc, 0xca, 0x59, 0x29, 0x68, - 0xe6, 0x16, 0x8d, 0x13, 0x46, 0x8f, 0xcf, 0x33, 0x32, 0xe7, 0x3a, 0x4a, 0xa1, 0x31, 0x7f, 0x72, - 0x7d, 0xae, 0x82, 0x3d, 0xa4, 0xac, 0x41, 0xcd, 0x2e, 0x86, 0x9d, 0x49, 0x9a, 0x7c, 0xb6, 0xd0, - 0xdd, 0x95, 0xd8, 0xad, 0x24, 0x28, 0xd0, 0xf8, 0x1d, 0x1a, 0x98, 0xf6, 0x1d, 0xd3, 0xcc, 0xb6, - 0xc6, 0xd6, 0x74, 0x38, 0x27, 0xe4, 0xca, 0xad, 0x98, 0x2d, 0x29, 0x3c, 0xb2, 0xdc, 0x7e, 0x84, - 0x40, 0xfb, 0xa0, 0x19, 0xad, 0x79, 0xbc, 0x44, 0x37, 0x2a, 0x83, 0xc0, 0xee, 0x94, 0x9e, 0x17, - 0xe4, 0x7f, 0x6e, 0x97, 0xd4, 0x49, 0xeb, 0x0c, 0x02, 0x5a, 0x8a, 0x4c, 0xea, 0xfd, 0xfa, 0xfd, - 0xfb, 0x48, 0x69, 0xfc, 0xb6, 0x91, 0x3b, 0x6b, 0x93, 0x6b, 0xd8, 0xbf, 0x62, 0x7d, 0xd4, 0x8b, - 0x34, 0xa4, 0xca, 0xee, 0x8c, 0xbb, 0xd3, 0xe1, 0xfc, 0xd9, 0x2d, 0x6b, 0xe9, 0xd1, 0x32, 0xf9, - 0xd1, 0xf9, 0x2d, 0xd5, 0x7c, 0x02, 0xf6, 0xd1, 0x40, 0x41, 0x02, 0x81, 0x16, 0xf2, 0x94, 0xea, - 0xb5, 0x4a, 0x65, 0x5b, 0x48, 0xd6, 0x27, 0x90, 0xd6, 0x0a, 0xfc, 0x14, 0x75, 0x81, 0x17, 0xa7, - 0xda, 0x07, 0x97, 0x4d, 0xc6, 0xb1, 0xe0, 0xc5, 0x86, 0x49, 0x6a, 0x00, 0xbc, 0x40, 0x7d, 0xe0, - 0xc5, 0x1b, 0x29, 0x52, 0xbb, 0x5b, 0xb2, 0x8f, 0xfe, 0xc9, 0x9a, 0xf1, 0x5a, 0xe4, 0x32, 0x00, - 0x5a, 0xb1, 0xf8, 0x15, 0xea, 0x17, 0x22, 0xc9, 0x53, 0x50, 0xf6, 0x4d, 0x9b, 0x84, 0x4d, 0x39, - 0xa6, 0x15, 0x84, 0x7d, 0x74, 0xef, 0xf8, 0xe8, 0x8b, 0x9c, 0x6b, 0x65, 0xf7, 0x4a, 0xc9, 0xc3, - 0x36, 0x92, 0x92, 0xa0, 0x7f, 0xe0, 0xaf, 0x47, 0x5f, 0x0e, 0x8e, 0xf5, 0xf5, 0xe0, 0x58, 0xdf, - 0x0e, 0x8e, 0xf5, 0xe9, 0xbb, 0x73, 0xe7, 0xc3, 0xa0, 0xfa, 0x37, 0x3f, 0x03, 0x00, 0x00, 0xff, - 0xff, 0x3e, 0x05, 0x30, 0x95, 0x14, 0x04, 0x00, 0x00, + // 429 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xc1, 0x6a, 0x13, 0x41, + 0x1c, 0x87, 0xdd, 0xa4, 0xa5, 0x71, 0xaa, 0x97, 0x39, 0x2d, 0x41, 0x62, 0xcd, 0xc5, 0x82, 0x65, + 0xc6, 0xb4, 0x45, 0x04, 0x11, 0x41, 0xd1, 0x83, 0xb4, 0xb4, 0x4c, 0xa0, 0x07, 0x6f, 0xd3, 0xc9, + 0x9f, 0x64, 0xcc, 0xee, 0xcc, 0x30, 0xf3, 0xdf, 0x05, 0xdf, 0x44, 0x7c, 0x20, 0xf1, 0xe8, 0x23, + 0x48, 0x7c, 0x11, 0x99, 0x4d, 0x76, 0x1b, 0x1b, 0xd7, 0xe6, 0x16, 0x86, 0xef, 0xfb, 0xe5, 0xdb, + 0xd9, 0x25, 0x47, 0xf3, 0x97, 0x81, 0x69, 0xcb, 0xa5, 0xd3, 0x3c, 0x00, 0xa2, 0x36, 0xd3, 0xc0, + 0xcb, 0x91, 0xcc, 0xdc, 0x4c, 0x8e, 0xf8, 0x14, 0x0c, 0x78, 0x89, 0x30, 0x61, 0xce, 0x5b, 0xb4, + 0xf4, 0xd1, 0x92, 0x66, 0xd2, 0x69, 0x56, 0xd3, 0xac, 0xa6, 0xfb, 0xc3, 0xb5, 0x2d, 0x65, 0x3d, + 0xf0, 0x72, 0x63, 0xa1, 0x7f, 0x7a, 0xc3, 0xe4, 0x52, 0xcd, 0xb4, 0x01, 0xff, 0x85, 0xbb, 0xf9, + 0x34, 0x1e, 0x04, 0x9e, 0x03, 0xca, 0x7f, 0x59, 0xbc, 0xcd, 0xf2, 0x85, 0x41, 0x9d, 0xc3, 0x86, + 0xf0, 0xe2, 0x2e, 0x21, 0xa8, 0x19, 0xe4, 0x72, 0xc3, 0x3b, 0x69, 0xf3, 0x0a, 0xd4, 0x19, 0xd7, + 0x06, 0x03, 0xfa, 0xdb, 0xd2, 0xf0, 0x5b, 0x42, 0xee, 0x5f, 0xda, 0xc9, 0xa5, 0x87, 0x00, 0x48, + 0xcf, 0x48, 0x2f, 0x3e, 0xc6, 0x44, 0xa2, 0x4c, 0x93, 0x83, 0xe4, 0x70, 0xff, 0xf8, 0x39, 0xbb, + 0xb9, 0xb6, 0x66, 0x95, 0xb9, 0xf9, 0x34, 0x1e, 0x04, 0x16, 0x69, 0x56, 0x8e, 0xd8, 0xc5, 0xf5, + 0x67, 0x50, 0x78, 0x0e, 0x28, 0x45, 0xb3, 0x40, 0xdf, 0x90, 0x9d, 0xe0, 0x40, 0xa5, 0x9d, 0x6a, + 0xe9, 0x19, 0xfb, 0xdf, 0x0b, 0x60, 0x4d, 0xc4, 0xd8, 0x81, 0x12, 0x95, 0x18, 0xe3, 0x1e, 0x36, + 0xe7, 0x67, 0x3a, 0x20, 0xfd, 0xb8, 0x11, 0xc8, 0xb6, 0x0b, 0x8c, 0xf6, 0xad, 0xbc, 0xd7, 0x64, + 0x57, 0x23, 0xe4, 0x21, 0xed, 0x1c, 0x74, 0x0f, 0xf7, 0x8f, 0x9f, 0x6e, 0xd9, 0x27, 0x96, 0xd6, + 0xf0, 0x7b, 0x67, 0x2d, 0x2e, 0x46, 0xd3, 0x0b, 0xd2, 0x0b, 0x90, 0x81, 0x42, 0xeb, 0x57, 0x71, + 0x27, 0x5b, 0xc6, 0xc9, 0x6b, 0xc8, 0xc6, 0x2b, 0x55, 0x34, 0x23, 0xf4, 0x88, 0x74, 0xc1, 0x94, + 0xab, 0xbe, 0xfe, 0x7a, 0x5f, 0xfc, 0x44, 0xa3, 0xf9, 0xde, 0x94, 0x57, 0xd2, 0x8b, 0x88, 0xd1, + 0x57, 0x64, 0x0f, 0x4c, 0xf9, 0xc1, 0xdb, 0x3c, 0xed, 0x56, 0xc6, 0x93, 0x16, 0x23, 0x22, 0x63, + 0x5b, 0x78, 0x05, 0xa2, 0x36, 0xe8, 0x29, 0xd9, 0x2b, 0x6d, 0x56, 0xe4, 0x10, 0xd2, 0x9d, 0xf6, + 0xbf, 0xbb, 0xaa, 0x10, 0x51, 0xa3, 0xf4, 0x1d, 0x79, 0xb0, 0xfc, 0x79, 0x6e, 0x0b, 0x83, 0x21, + 0xdd, 0xad, 0xd4, 0xc7, 0xed, 0x6a, 0xc5, 0x89, 0xbf, 0xa4, 0xb7, 0xfd, 0x1f, 0x8b, 0x41, 0xf2, + 0x73, 0x31, 0x48, 0x7e, 0x2d, 0x06, 0xc9, 0xd7, 0xdf, 0x83, 0x7b, 0x9f, 0x7a, 0xf5, 0xad, 0xff, + 0x09, 0x00, 0x00, 0xff, 0xff, 0x2a, 0xe6, 0x36, 0x22, 0xe3, 0x03, 0x00, 0x00, } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/settings/v1alpha1/register.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/settings/v1alpha1/register.go new file mode 100644 index 00000000..88e4be90 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/settings/v1alpha1/register.go @@ -0,0 +1,9 @@ +package v1alpha1 + +import "github.com/ericchiang/k8s" + +func init() { + k8s.Register("settings.k8s.io", "v1alpha1", "podpresets", true, &PodPreset{}) + + k8s.RegisterList("settings.k8s.io", "v1alpha1", "podpresets", true, &PodPresetList{}) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/storage/v1/generated.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/storage/v1/generated.pb.go index 74d60564..45b7e3a4 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/storage/v1/generated.pb.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/storage/v1/generated.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/storage/v1/generated.proto -// DO NOT EDIT! +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/storage/v1/generated.proto /* Package v1 is a generated protocol buffer package. It is generated from these files: - k8s.io/kubernetes/pkg/apis/storage/v1/generated.proto + k8s.io/api/storage/v1/generated.proto It has these top-level messages: StorageClass @@ -17,7 +16,8 @@ package v1 import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import k8s_io_kubernetes_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" +import _ "github.com/ericchiang/k8s/apis/storage/v1beta1" +import k8s_io_apimachinery_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" import _ "github.com/ericchiang/k8s/runtime" import _ "github.com/ericchiang/k8s/runtime/schema" import _ "github.com/ericchiang/k8s/util/intstr" @@ -42,16 +42,34 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // according to etcd is in ObjectMeta.Name. type StorageClass struct { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Provisioner indicates the type of the provisioner. Provisioner *string `protobuf:"bytes,2,opt,name=provisioner" json:"provisioner,omitempty"` // Parameters holds the parameters for the provisioner that should // create volumes of this storage class. // +optional - Parameters map[string]string `protobuf:"bytes,3,rep,name=parameters" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_unrecognized []byte `json:"-"` + Parameters map[string]string `protobuf:"bytes,3,rep,name=parameters" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Dynamically provisioned PersistentVolumes of this storage class are + // created with this reclaimPolicy. Defaults to Delete. + // +optional + ReclaimPolicy *string `protobuf:"bytes,4,opt,name=reclaimPolicy" json:"reclaimPolicy,omitempty"` + // Dynamically provisioned PersistentVolumes of this storage class are + // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - + // mount of the PVs will simply fail if one is invalid. + // +optional + MountOptions []string `protobuf:"bytes,5,rep,name=mountOptions" json:"mountOptions,omitempty"` + // AllowVolumeExpansion shows whether the storage class allow volume expand + // +optional + AllowVolumeExpansion *bool `protobuf:"varint,6,opt,name=allowVolumeExpansion" json:"allowVolumeExpansion,omitempty"` + // VolumeBindingMode indicates how PersistentVolumeClaims should be + // provisioned and bound. When unset, VolumeBindingImmediate is used. + // This field is alpha-level and is only honored by servers that enable + // the VolumeScheduling feature. + // +optional + VolumeBindingMode *string `protobuf:"bytes,7,opt,name=volumeBindingMode" json:"volumeBindingMode,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *StorageClass) Reset() { *m = StorageClass{} } @@ -59,7 +77,7 @@ func (m *StorageClass) String() string { return proto.CompactTextStri func (*StorageClass) ProtoMessage() {} func (*StorageClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } -func (m *StorageClass) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *StorageClass) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -80,12 +98,40 @@ func (m *StorageClass) GetParameters() map[string]string { return nil } +func (m *StorageClass) GetReclaimPolicy() string { + if m != nil && m.ReclaimPolicy != nil { + return *m.ReclaimPolicy + } + return "" +} + +func (m *StorageClass) GetMountOptions() []string { + if m != nil { + return m.MountOptions + } + return nil +} + +func (m *StorageClass) GetAllowVolumeExpansion() bool { + if m != nil && m.AllowVolumeExpansion != nil { + return *m.AllowVolumeExpansion + } + return false +} + +func (m *StorageClass) GetVolumeBindingMode() string { + if m != nil && m.VolumeBindingMode != nil { + return *m.VolumeBindingMode + } + return "" +} + // StorageClassList is a collection of storage classes. type StorageClassList struct { // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Items is the list of StorageClasses Items []*StorageClass `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -96,7 +142,7 @@ func (m *StorageClassList) String() string { return proto.CompactText func (*StorageClassList) ProtoMessage() {} func (*StorageClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -func (m *StorageClassList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *StorageClassList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -111,8 +157,8 @@ func (m *StorageClassList) GetItems() []*StorageClass { } func init() { - proto.RegisterType((*StorageClass)(nil), "github.com/ericchiang.k8s.apis.storage.v1.StorageClass") - proto.RegisterType((*StorageClassList)(nil), "github.com/ericchiang.k8s.apis.storage.v1.StorageClassList") + proto.RegisterType((*StorageClass)(nil), "k8s.io.api.storage.v1.StorageClass") + proto.RegisterType((*StorageClassList)(nil), "k8s.io.api.storage.v1.StorageClassList") } func (m *StorageClass) Marshal() (dAtA []byte, err error) { size := m.Size() @@ -162,6 +208,43 @@ func (m *StorageClass) MarshalTo(dAtA []byte) (int, error) { i += copy(dAtA[i:], v) } } + if m.ReclaimPolicy != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReclaimPolicy))) + i += copy(dAtA[i:], *m.ReclaimPolicy) + } + if len(m.MountOptions) > 0 { + for _, s := range m.MountOptions { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.AllowVolumeExpansion != nil { + dAtA[i] = 0x30 + i++ + if *m.AllowVolumeExpansion { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.VolumeBindingMode != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeBindingMode))) + i += copy(dAtA[i:], *m.VolumeBindingMode) + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -211,24 +294,6 @@ func (m *StorageClassList) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -257,6 +322,23 @@ func (m *StorageClass) Size() (n int) { n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } + if m.ReclaimPolicy != nil { + l = len(*m.ReclaimPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.MountOptions) > 0 { + for _, s := range m.MountOptions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.AllowVolumeExpansion != nil { + n += 2 + } + if m.VolumeBindingMode != nil { + l = len(*m.VolumeBindingMode) + n += 1 + l + sovGenerated(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -351,7 +433,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -413,7 +495,103 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 + if m.Parameters == nil { + m.Parameters = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Parameters[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReclaimPolicy", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -423,12 +601,27 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapkey uint64 + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ReclaimPolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MountOptions", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -438,70 +631,71 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + postIndex := iNdEx + intStringLen + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Parameters == nil { - m.Parameters = make(map[string]string) + m.MountOptions = append(m.MountOptions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowVolumeExpansion", wireType) } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + if iNdEx >= l { + return io.ErrUnexpectedEOF } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AllowVolumeExpansion = &b + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeBindingMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Parameters[mapkey] = mapvalue - } else { - var mapvalue string - m.Parameters[mapkey] = mapvalue + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF } + s := string(dAtA[iNdEx:postIndex]) + m.VolumeBindingMode = &s iNdEx = postIndex default: iNdEx = preIndex @@ -581,7 +775,7 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -745,33 +939,37 @@ var ( ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) -func init() { - proto.RegisterFile("github.com/ericchiang/k8s/apis/storage/v1/generated.proto", fileDescriptorGenerated) -} +func init() { proto.RegisterFile("k8s.io/api/storage/v1/generated.proto", fileDescriptorGenerated) } var fileDescriptorGenerated = []byte{ - // 361 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x91, 0xcf, 0x4a, 0xeb, 0x40, - 0x14, 0xc6, 0xef, 0xa4, 0x14, 0x6e, 0xa7, 0x17, 0x6e, 0x09, 0x5d, 0x84, 0x2e, 0x42, 0x28, 0x5c, - 0xe8, 0xe2, 0x7a, 0x42, 0xaa, 0x42, 0x11, 0xdc, 0x58, 0x04, 0x15, 0x45, 0x89, 0x3b, 0x77, 0xd3, - 0xf6, 0x10, 0xc7, 0x34, 0x7f, 0x98, 0x39, 0x09, 0xf4, 0x4d, 0x5c, 0xba, 0xf3, 0x55, 0x5c, 0xfa, - 0x08, 0x52, 0x5f, 0x44, 0xd2, 0x94, 0x1a, 0xda, 0x52, 0x8a, 0xbb, 0xf9, 0xf3, 0xfd, 0xbe, 0x39, - 0xdf, 0x37, 0xfc, 0x38, 0x1c, 0x68, 0x90, 0x89, 0x1b, 0x66, 0x23, 0x54, 0x31, 0x12, 0x6a, 0x37, - 0x0d, 0x03, 0x57, 0xa4, 0x52, 0xbb, 0x9a, 0x12, 0x25, 0x02, 0x74, 0x73, 0xcf, 0x0d, 0x30, 0x46, - 0x25, 0x08, 0x27, 0x90, 0xaa, 0x84, 0x12, 0xf3, 0x5f, 0x89, 0xc1, 0x37, 0x06, 0x69, 0x18, 0x40, - 0x81, 0xc1, 0x12, 0x83, 0xdc, 0xeb, 0xf4, 0x77, 0xb8, 0x47, 0x48, 0x62, 0x8b, 0x75, 0xe7, 0x60, - 0x3b, 0xa3, 0xb2, 0x98, 0x64, 0x84, 0x1b, 0xf2, 0xa3, 0xdd, 0x72, 0x3d, 0x7e, 0xc4, 0x48, 0x6c, - 0x50, 0xde, 0x76, 0x2a, 0x23, 0x39, 0x75, 0x65, 0x4c, 0x9a, 0xd4, 0x3a, 0xd2, 0x7d, 0x31, 0xf8, - 0x9f, 0xfb, 0x32, 0xda, 0x70, 0x2a, 0xb4, 0x36, 0xaf, 0xf8, 0xef, 0x22, 0xc3, 0x44, 0x90, 0xb0, - 0x98, 0xc3, 0x7a, 0xcd, 0x3e, 0xc0, 0x8e, 0x5a, 0x0a, 0x2d, 0xe4, 0x1e, 0xdc, 0x8e, 0x9e, 0x70, - 0x4c, 0x37, 0x48, 0xc2, 0x5f, 0xf1, 0xa6, 0xc3, 0x9b, 0xa9, 0x4a, 0x72, 0xa9, 0x65, 0x12, 0xa3, - 0xb2, 0x0c, 0x87, 0xf5, 0x1a, 0x7e, 0xf5, 0xc8, 0x1c, 0x73, 0x9e, 0x0a, 0x25, 0x22, 0x24, 0x54, - 0xda, 0xaa, 0x39, 0xb5, 0x5e, 0xb3, 0x3f, 0x84, 0xbd, 0xbe, 0x01, 0xaa, 0x63, 0xc3, 0xdd, 0xca, - 0xe5, 0x3c, 0x26, 0x35, 0xf3, 0x2b, 0xb6, 0x9d, 0x53, 0xfe, 0x77, 0xed, 0xda, 0x6c, 0xf1, 0x5a, - 0x88, 0xb3, 0x45, 0xc0, 0x86, 0x5f, 0x2c, 0xcd, 0x36, 0xaf, 0xe7, 0x62, 0x9a, 0xe1, 0x72, 0xca, - 0x72, 0x73, 0x62, 0x0c, 0x58, 0xf7, 0x95, 0xf1, 0x56, 0xf5, 0xad, 0x6b, 0xa9, 0xc9, 0xbc, 0xd8, - 0xa8, 0xe9, 0xff, 0x3e, 0x35, 0x15, 0xec, 0x5a, 0x49, 0x97, 0xbc, 0x2e, 0x09, 0x23, 0x6d, 0x19, - 0x8b, 0xf4, 0x87, 0x3f, 0x48, 0xef, 0x97, 0x0e, 0x67, 0xed, 0xb7, 0xb9, 0xcd, 0xde, 0xe7, 0x36, - 0xfb, 0x98, 0xdb, 0xec, 0xf9, 0xd3, 0xfe, 0xf5, 0x60, 0xe4, 0xde, 0x57, 0x00, 0x00, 0x00, 0xff, - 0xff, 0xc7, 0xf1, 0x3e, 0x97, 0x0d, 0x03, 0x00, 0x00, + // 460 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0xcf, 0x6e, 0xd3, 0x40, + 0x10, 0xc6, 0x71, 0x4c, 0xa0, 0xd9, 0x14, 0x51, 0x56, 0x41, 0xb2, 0x72, 0x88, 0xac, 0x00, 0x92, + 0x85, 0xd0, 0x9a, 0xa4, 0x08, 0x15, 0x24, 0x2e, 0x45, 0xbd, 0xa0, 0x56, 0xad, 0x5c, 0x89, 0x03, + 0xb7, 0xa9, 0x33, 0x72, 0x97, 0xd8, 0xbb, 0xd6, 0xee, 0xd8, 0x90, 0x37, 0x81, 0x97, 0xe1, 0xcc, + 0x91, 0x47, 0x40, 0xe1, 0x45, 0x90, 0x9d, 0x2a, 0x4d, 0xe3, 0x54, 0xcd, 0xcd, 0xfe, 0x76, 0x7e, + 0xf3, 0xe7, 0xfb, 0xd8, 0x8b, 0xe9, 0x81, 0x15, 0x52, 0x87, 0x90, 0xcb, 0xd0, 0x92, 0x36, 0x90, + 0x60, 0x58, 0x8e, 0xc2, 0x04, 0x15, 0x1a, 0x20, 0x9c, 0x88, 0xdc, 0x68, 0xd2, 0xfc, 0xe9, 0xa2, + 0x4c, 0x40, 0x2e, 0xc5, 0x55, 0x99, 0x28, 0x47, 0xfd, 0x97, 0x1b, 0xe9, 0x0b, 0x24, 0x68, 0xb4, + 0xe8, 0xbf, 0xb9, 0xae, 0xcd, 0x20, 0xbe, 0x94, 0x0a, 0xcd, 0x2c, 0xcc, 0xa7, 0x49, 0x25, 0xd8, + 0x30, 0x43, 0x82, 0x0d, 0x83, 0xfb, 0xe1, 0x6d, 0x94, 0x29, 0x14, 0xc9, 0x0c, 0x1b, 0xc0, 0xdb, + 0xbb, 0x00, 0x1b, 0x5f, 0x62, 0x06, 0x0d, 0x6e, 0xff, 0x36, 0xae, 0x20, 0x99, 0x86, 0x52, 0x91, + 0x25, 0xb3, 0x0e, 0x0d, 0x7f, 0xb9, 0x6c, 0xf7, 0x7c, 0x71, 0xf7, 0xc7, 0x14, 0xac, 0xe5, 0xc7, + 0x6c, 0xa7, 0xba, 0x64, 0x02, 0x04, 0x9e, 0xe3, 0x3b, 0x41, 0x77, 0xfc, 0x5a, 0x5c, 0x5b, 0xb7, + 0x6c, 0x2c, 0xf2, 0x69, 0x52, 0x09, 0x56, 0x54, 0xd5, 0xa2, 0x1c, 0x89, 0xd3, 0x8b, 0xaf, 0x18, + 0xd3, 0x09, 0x12, 0x44, 0xcb, 0x0e, 0xdc, 0x67, 0xdd, 0xdc, 0xe8, 0x52, 0x5a, 0xa9, 0x15, 0x1a, + 0xaf, 0xe5, 0x3b, 0x41, 0x27, 0x5a, 0x95, 0xf8, 0x39, 0x63, 0x39, 0x18, 0xc8, 0x90, 0xd0, 0x58, + 0xcf, 0xf5, 0xdd, 0xa0, 0x3b, 0xde, 0x17, 0x1b, 0xc3, 0x12, 0xab, 0x8b, 0x8a, 0xb3, 0x25, 0x75, + 0xa4, 0xc8, 0xcc, 0xa2, 0x95, 0x36, 0xfc, 0x39, 0x7b, 0x64, 0x30, 0x4e, 0x41, 0x66, 0x67, 0x3a, + 0x95, 0xf1, 0xcc, 0xbb, 0x5f, 0x0f, 0xbe, 0x29, 0xf2, 0x21, 0xdb, 0xcd, 0x74, 0xa1, 0xe8, 0x34, + 0x27, 0xa9, 0x95, 0xf5, 0xda, 0xbe, 0x1b, 0x74, 0xa2, 0x1b, 0x1a, 0x1f, 0xb3, 0x1e, 0xa4, 0xa9, + 0xfe, 0xf6, 0x59, 0xa7, 0x45, 0x86, 0x47, 0xdf, 0x73, 0x50, 0xd5, 0xe2, 0xde, 0x03, 0xdf, 0x09, + 0x76, 0xa2, 0x8d, 0x6f, 0xfc, 0x15, 0x7b, 0x52, 0xd6, 0xd2, 0xa1, 0x54, 0x13, 0xa9, 0x92, 0x13, + 0x3d, 0x41, 0xef, 0x61, 0xbd, 0x41, 0xf3, 0xa1, 0xff, 0x81, 0x3d, 0x5e, 0x3b, 0x85, 0xef, 0x31, + 0x77, 0x8a, 0xb3, 0xda, 0xfe, 0x4e, 0x54, 0x7d, 0xf2, 0x1e, 0x6b, 0x97, 0x90, 0x16, 0x78, 0xe5, + 0xe0, 0xe2, 0xe7, 0x7d, 0xeb, 0xc0, 0x19, 0xfe, 0x74, 0xd8, 0xde, 0xaa, 0x2f, 0xc7, 0xd2, 0x12, + 0xff, 0xd4, 0x08, 0x51, 0x6c, 0x17, 0x62, 0x45, 0xaf, 0x45, 0xf8, 0x8e, 0xb5, 0x25, 0x61, 0x66, + 0xbd, 0x56, 0x9d, 0xcd, 0xb3, 0x2d, 0xb2, 0x89, 0x16, 0xc4, 0x61, 0xef, 0xf7, 0x7c, 0xe0, 0xfc, + 0x99, 0x0f, 0x9c, 0xbf, 0xf3, 0x81, 0xf3, 0xe3, 0xdf, 0xe0, 0xde, 0x97, 0x56, 0x39, 0xfa, 0x1f, + 0x00, 0x00, 0xff, 0xff, 0xa2, 0xef, 0xf0, 0x1a, 0xb1, 0x03, 0x00, 0x00, } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/storage/v1/register.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/storage/v1/register.go new file mode 100644 index 00000000..162f15ae --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/storage/v1/register.go @@ -0,0 +1,9 @@ +package v1 + +import "github.com/ericchiang/k8s" + +func init() { + k8s.Register("storage.k8s.io", "v1", "storageclasss", false, &StorageClass{}) + + k8s.RegisterList("storage.k8s.io", "v1", "storageclasss", false, &StorageClassList{}) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/storage/v1beta1/generated.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/storage/v1beta1/generated.pb.go index 90c93777..20b043db 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/storage/v1beta1/generated.pb.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/storage/v1beta1/generated.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/storage/v1beta1/generated.proto -// DO NOT EDIT! +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/storage/v1beta1/generated.proto /* Package v1beta1 is a generated protocol buffer package. It is generated from these files: - k8s.io/kubernetes/pkg/apis/storage/v1beta1/generated.proto + k8s.io/api/storage/v1beta1/generated.proto It has these top-level messages: StorageClass @@ -17,11 +16,11 @@ package v1beta1 import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import k8s_io_kubernetes_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" +import _ "github.com/ericchiang/k8s/apis/core/v1" +import k8s_io_apimachinery_pkg_apis_meta_v1 "github.com/ericchiang/k8s/apis/meta/v1" import _ "github.com/ericchiang/k8s/runtime" import _ "github.com/ericchiang/k8s/runtime/schema" import _ "github.com/ericchiang/k8s/util/intstr" -import _ "github.com/ericchiang/k8s/api/v1" import io "io" @@ -43,16 +42,34 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // according to etcd is in ObjectMeta.Name. type StorageClass struct { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Provisioner indicates the type of the provisioner. Provisioner *string `protobuf:"bytes,2,opt,name=provisioner" json:"provisioner,omitempty"` // Parameters holds the parameters for the provisioner that should // create volumes of this storage class. // +optional - Parameters map[string]string `protobuf:"bytes,3,rep,name=parameters" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_unrecognized []byte `json:"-"` + Parameters map[string]string `protobuf:"bytes,3,rep,name=parameters" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Dynamically provisioned PersistentVolumes of this storage class are + // created with this reclaimPolicy. Defaults to Delete. + // +optional + ReclaimPolicy *string `protobuf:"bytes,4,opt,name=reclaimPolicy" json:"reclaimPolicy,omitempty"` + // Dynamically provisioned PersistentVolumes of this storage class are + // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - + // mount of the PVs will simply fail if one is invalid. + // +optional + MountOptions []string `protobuf:"bytes,5,rep,name=mountOptions" json:"mountOptions,omitempty"` + // AllowVolumeExpansion shows whether the storage class allow volume expand + // +optional + AllowVolumeExpansion *bool `protobuf:"varint,6,opt,name=allowVolumeExpansion" json:"allowVolumeExpansion,omitempty"` + // VolumeBindingMode indicates how PersistentVolumeClaims should be + // provisioned and bound. When unset, VolumeBindingImmediate is used. + // This field is alpha-level and is only honored by servers that enable + // the VolumeScheduling feature. + // +optional + VolumeBindingMode *string `protobuf:"bytes,7,opt,name=volumeBindingMode" json:"volumeBindingMode,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *StorageClass) Reset() { *m = StorageClass{} } @@ -60,7 +77,7 @@ func (m *StorageClass) String() string { return proto.CompactTextStri func (*StorageClass) ProtoMessage() {} func (*StorageClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } -func (m *StorageClass) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta { +func (m *StorageClass) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta { if m != nil { return m.Metadata } @@ -81,12 +98,40 @@ func (m *StorageClass) GetParameters() map[string]string { return nil } +func (m *StorageClass) GetReclaimPolicy() string { + if m != nil && m.ReclaimPolicy != nil { + return *m.ReclaimPolicy + } + return "" +} + +func (m *StorageClass) GetMountOptions() []string { + if m != nil { + return m.MountOptions + } + return nil +} + +func (m *StorageClass) GetAllowVolumeExpansion() bool { + if m != nil && m.AllowVolumeExpansion != nil { + return *m.AllowVolumeExpansion + } + return false +} + +func (m *StorageClass) GetVolumeBindingMode() string { + if m != nil && m.VolumeBindingMode != nil { + return *m.VolumeBindingMode + } + return "" +} + // StorageClassList is a collection of storage classes. type StorageClassList struct { // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - Metadata *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + Metadata *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Items is the list of StorageClasses Items []*StorageClass `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -97,7 +142,7 @@ func (m *StorageClassList) String() string { return proto.CompactText func (*StorageClassList) ProtoMessage() {} func (*StorageClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -func (m *StorageClassList) GetMetadata() *k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta { +func (m *StorageClassList) GetMetadata() *k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta { if m != nil { return m.Metadata } @@ -112,8 +157,8 @@ func (m *StorageClassList) GetItems() []*StorageClass { } func init() { - proto.RegisterType((*StorageClass)(nil), "github.com/ericchiang.k8s.apis.storage.v1beta1.StorageClass") - proto.RegisterType((*StorageClassList)(nil), "github.com/ericchiang.k8s.apis.storage.v1beta1.StorageClassList") + proto.RegisterType((*StorageClass)(nil), "k8s.io.api.storage.v1beta1.StorageClass") + proto.RegisterType((*StorageClassList)(nil), "k8s.io.api.storage.v1beta1.StorageClassList") } func (m *StorageClass) Marshal() (dAtA []byte, err error) { size := m.Size() @@ -163,6 +208,43 @@ func (m *StorageClass) MarshalTo(dAtA []byte) (int, error) { i += copy(dAtA[i:], v) } } + if m.ReclaimPolicy != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReclaimPolicy))) + i += copy(dAtA[i:], *m.ReclaimPolicy) + } + if len(m.MountOptions) > 0 { + for _, s := range m.MountOptions { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.AllowVolumeExpansion != nil { + dAtA[i] = 0x30 + i++ + if *m.AllowVolumeExpansion { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.VolumeBindingMode != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeBindingMode))) + i += copy(dAtA[i:], *m.VolumeBindingMode) + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -212,24 +294,6 @@ func (m *StorageClassList) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -258,6 +322,23 @@ func (m *StorageClass) Size() (n int) { n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } + if m.ReclaimPolicy != nil { + l = len(*m.ReclaimPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.MountOptions) > 0 { + for _, s := range m.MountOptions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.AllowVolumeExpansion != nil { + n += 2 + } + if m.VolumeBindingMode != nil { + l = len(*m.VolumeBindingMode) + n += 1 + l + sovGenerated(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -352,7 +433,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ObjectMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -414,7 +495,103 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 + if m.Parameters == nil { + m.Parameters = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Parameters[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReclaimPolicy", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -424,12 +601,27 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapkey uint64 + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ReclaimPolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MountOptions", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -439,70 +631,71 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + postIndex := iNdEx + intStringLen + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Parameters == nil { - m.Parameters = make(map[string]string) + m.MountOptions = append(m.MountOptions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowVolumeExpansion", wireType) } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + if iNdEx >= l { + return io.ErrUnexpectedEOF } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AllowVolumeExpansion = &b + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeBindingMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Parameters[mapkey] = mapvalue - } else { - var mapvalue string - m.Parameters[mapkey] = mapvalue + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF } + s := string(dAtA[iNdEx:postIndex]) + m.VolumeBindingMode = &s iNdEx = postIndex default: iNdEx = preIndex @@ -582,7 +775,7 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Metadata == nil { - m.Metadata = &k8s_io_kubernetes_pkg_apis_meta_v1.ListMeta{} + m.Metadata = &k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta{} } if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -746,34 +939,38 @@ var ( ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) -func init() { - proto.RegisterFile("github.com/ericchiang/k8s/apis/storage/v1beta1/generated.proto", fileDescriptorGenerated) -} +func init() { proto.RegisterFile("k8s.io/api/storage/v1beta1/generated.proto", fileDescriptorGenerated) } var fileDescriptorGenerated = []byte{ - // 373 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x91, 0xcd, 0x6a, 0xdb, 0x40, - 0x14, 0x85, 0x3b, 0x32, 0xa6, 0xf5, 0xb8, 0x50, 0x23, 0xba, 0x50, 0xbd, 0x10, 0xc2, 0x2b, 0x53, - 0xdc, 0x19, 0x64, 0xba, 0x30, 0x86, 0x6e, 0x1a, 0x02, 0x26, 0xe4, 0x0f, 0x65, 0x97, 0xdd, 0xd8, - 0xbe, 0xc8, 0x13, 0x59, 0x3f, 0xcc, 0x5c, 0x09, 0xfc, 0x26, 0x79, 0x81, 0xec, 0xf2, 0x20, 0x59, - 0xe6, 0x11, 0x82, 0xf3, 0x22, 0x41, 0x96, 0x70, 0x84, 0xe5, 0x18, 0x93, 0x9d, 0x34, 0x73, 0xbe, - 0x73, 0xef, 0x39, 0x43, 0xc7, 0xc1, 0x48, 0x33, 0x19, 0xf3, 0x20, 0x9d, 0x82, 0x8a, 0x00, 0x41, - 0xf3, 0x24, 0xf0, 0xb9, 0x48, 0xa4, 0xe6, 0x1a, 0x63, 0x25, 0x7c, 0xe0, 0x99, 0x3b, 0x05, 0x14, - 0x2e, 0xf7, 0x21, 0x02, 0x25, 0x10, 0xe6, 0x2c, 0x51, 0x31, 0xc6, 0xe6, 0xef, 0x82, 0x65, 0xef, - 0x2c, 0x4b, 0x02, 0x9f, 0xe5, 0x2c, 0x2b, 0x59, 0x56, 0xb2, 0xdd, 0xe1, 0x81, 0x39, 0x21, 0xa0, - 0xe0, 0x59, 0xcd, 0xbf, 0xfb, 0x67, 0x3f, 0xa3, 0xd2, 0x08, 0x65, 0x08, 0x35, 0xf9, 0xdf, 0xc3, - 0x72, 0x3d, 0x5b, 0x40, 0x28, 0x6a, 0x94, 0xbb, 0x9f, 0x4a, 0x51, 0x2e, 0xb9, 0x8c, 0x50, 0xa3, - 0xaa, 0x21, 0x83, 0x0f, 0xb3, 0xec, 0x49, 0xd1, 0x7b, 0x30, 0xe8, 0xf7, 0x9b, 0xa2, 0x8d, 0x93, - 0xa5, 0xd0, 0xda, 0x3c, 0xa3, 0xdf, 0xf2, 0xc4, 0x73, 0x81, 0xc2, 0x22, 0x0e, 0xe9, 0xb7, 0x87, - 0x8c, 0x1d, 0x68, 0x32, 0xd7, 0xb2, 0xcc, 0x65, 0x57, 0xd3, 0x3b, 0x98, 0xe1, 0x05, 0xa0, 0xf0, - 0xb6, 0xbc, 0xe9, 0xd0, 0x76, 0xa2, 0xe2, 0x4c, 0x6a, 0x19, 0x47, 0xa0, 0x2c, 0xc3, 0x21, 0xfd, - 0x96, 0x57, 0x3d, 0x32, 0x17, 0x94, 0x26, 0x42, 0x89, 0x10, 0x10, 0x94, 0xb6, 0x1a, 0x4e, 0xa3, - 0xdf, 0x1e, 0x4e, 0xd8, 0xf1, 0x2f, 0xc7, 0xaa, 0xbb, 0xb3, 0xeb, 0xad, 0xd5, 0x69, 0x84, 0x6a, - 0xe5, 0x55, 0xbc, 0xbb, 0xff, 0xe8, 0x8f, 0x9d, 0x6b, 0xb3, 0x43, 0x1b, 0x01, 0xac, 0x36, 0x29, - 0x5b, 0x5e, 0xfe, 0x69, 0xfe, 0xa4, 0xcd, 0x4c, 0x2c, 0x53, 0x28, 0x57, 0x2d, 0x7e, 0xc6, 0xc6, - 0x88, 0xf4, 0x1e, 0x09, 0xed, 0x54, 0x67, 0x9d, 0x4b, 0x8d, 0xe6, 0xa4, 0xd6, 0xd5, 0xe0, 0x98, - 0xae, 0x72, 0x76, 0xa7, 0xa9, 0x4b, 0xda, 0x94, 0x08, 0xa1, 0xb6, 0x8c, 0x4d, 0x05, 0xa3, 0xcf, - 0x56, 0xe0, 0x15, 0x36, 0xff, 0x7f, 0x3d, 0xad, 0x6d, 0xf2, 0xbc, 0xb6, 0xc9, 0xcb, 0xda, 0x26, - 0xf7, 0xaf, 0xf6, 0x97, 0xdb, 0xaf, 0xa5, 0xfc, 0x2d, 0x00, 0x00, 0xff, 0xff, 0x9d, 0x5c, 0x08, - 0xd1, 0x54, 0x03, 0x00, 0x00, + // 465 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0x86, 0x71, 0x4c, 0x68, 0xb3, 0x29, 0xa2, 0xac, 0x7a, 0x30, 0x39, 0x44, 0x56, 0xc4, 0xc1, + 0x42, 0x68, 0x4d, 0x02, 0x42, 0x11, 0x12, 0x1c, 0x8a, 0x7a, 0x41, 0xad, 0x5a, 0x19, 0x09, 0x21, + 0x6e, 0x53, 0x67, 0xe4, 0x2e, 0xb1, 0x77, 0xad, 0xdd, 0xb1, 0x21, 0x6f, 0xc2, 0x89, 0x97, 0xe1, + 0xc2, 0x91, 0x47, 0x40, 0xe1, 0x45, 0x90, 0x9d, 0x28, 0x75, 0x93, 0x46, 0xcd, 0xcd, 0xfe, 0xf7, + 0xff, 0xfe, 0x9d, 0x9d, 0x9f, 0x3d, 0x9b, 0x8e, 0xad, 0x90, 0x3a, 0x84, 0x5c, 0x86, 0x96, 0xb4, + 0x81, 0x04, 0xc3, 0x72, 0x78, 0x89, 0x04, 0xc3, 0x30, 0x41, 0x85, 0x06, 0x08, 0x27, 0x22, 0x37, + 0x9a, 0x34, 0xef, 0x2d, 0xbc, 0x02, 0x72, 0x29, 0x96, 0x5e, 0xb1, 0xf4, 0xf6, 0x06, 0x8d, 0x9c, + 0x58, 0x9b, 0x2a, 0x64, 0x9d, 0xef, 0xbd, 0xba, 0xf6, 0x64, 0x10, 0x5f, 0x49, 0x85, 0x66, 0x16, + 0xe6, 0xd3, 0xa4, 0x12, 0x6c, 0x98, 0x21, 0xc1, 0x6d, 0x54, 0xb8, 0x8d, 0x32, 0x85, 0x22, 0x99, + 0xe1, 0x06, 0xf0, 0xfa, 0x2e, 0xc0, 0xc6, 0x57, 0x98, 0xc1, 0x06, 0xf7, 0x72, 0x1b, 0x57, 0x90, + 0x4c, 0x43, 0xa9, 0xc8, 0x92, 0x59, 0x87, 0x06, 0xbf, 0x5c, 0x76, 0xf0, 0x71, 0xb1, 0x8b, 0xf7, + 0x29, 0x58, 0xcb, 0x4f, 0xd9, 0x7e, 0xf5, 0x92, 0x09, 0x10, 0x78, 0x8e, 0xef, 0x04, 0xdd, 0xd1, + 0x0b, 0x71, 0xbd, 0xb7, 0x55, 0xb0, 0xc8, 0xa7, 0x49, 0x25, 0x58, 0x51, 0xb9, 0x45, 0x39, 0x14, + 0xe7, 0x97, 0x5f, 0x31, 0xa6, 0x33, 0x24, 0x88, 0x56, 0x09, 0xdc, 0x67, 0xdd, 0xdc, 0xe8, 0x52, + 0x5a, 0xa9, 0x15, 0x1a, 0xaf, 0xe5, 0x3b, 0x41, 0x27, 0x6a, 0x4a, 0xfc, 0x33, 0x63, 0x39, 0x18, + 0xc8, 0x90, 0xd0, 0x58, 0xcf, 0xf5, 0xdd, 0xa0, 0x3b, 0x1a, 0x8b, 0xed, 0x4d, 0x89, 0xe6, 0xb4, + 0xe2, 0x62, 0x85, 0x9e, 0x28, 0x32, 0xb3, 0xa8, 0x91, 0xc5, 0x9f, 0xb2, 0x87, 0x06, 0xe3, 0x14, + 0x64, 0x76, 0xa1, 0x53, 0x19, 0xcf, 0xbc, 0xfb, 0xf5, 0xed, 0x37, 0x45, 0x3e, 0x60, 0x07, 0x99, + 0x2e, 0x14, 0x9d, 0xe7, 0x24, 0xb5, 0xb2, 0x5e, 0xdb, 0x77, 0x83, 0x4e, 0x74, 0x43, 0xe3, 0x23, + 0x76, 0x04, 0x69, 0xaa, 0xbf, 0x7d, 0xd2, 0x69, 0x91, 0xe1, 0xc9, 0xf7, 0x1c, 0x54, 0x35, 0xbd, + 0xf7, 0xc0, 0x77, 0x82, 0xfd, 0xe8, 0xd6, 0x33, 0xfe, 0x9c, 0x3d, 0x2e, 0x6b, 0xe9, 0x58, 0xaa, + 0x89, 0x54, 0xc9, 0x99, 0x9e, 0xa0, 0xb7, 0x57, 0x4f, 0xb0, 0x79, 0xd0, 0x7b, 0xcb, 0x1e, 0xad, + 0x3d, 0x85, 0x1f, 0x32, 0x77, 0x8a, 0xb3, 0xba, 0x83, 0x4e, 0x54, 0x7d, 0xf2, 0x23, 0xd6, 0x2e, + 0x21, 0x2d, 0x70, 0xb9, 0xc6, 0xc5, 0xcf, 0x9b, 0xd6, 0xd8, 0x19, 0xfc, 0x74, 0xd8, 0x61, 0x73, + 0x2f, 0xa7, 0xd2, 0x12, 0xff, 0xb0, 0xd1, 0xa4, 0xd8, 0xad, 0xc9, 0x8a, 0x5e, 0xeb, 0xf1, 0x1d, + 0x6b, 0x4b, 0xc2, 0xcc, 0x7a, 0xad, 0xba, 0xa0, 0x60, 0xd7, 0x82, 0xa2, 0x05, 0x76, 0xfc, 0xe4, + 0xf7, 0xbc, 0xef, 0xfc, 0x99, 0xf7, 0x9d, 0xbf, 0xf3, 0xbe, 0xf3, 0xe3, 0x5f, 0xff, 0xde, 0x97, + 0xbd, 0xa5, 0xfd, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x64, 0x23, 0xff, 0xfa, 0xc2, 0x03, 0x00, + 0x00, } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/storage/v1beta1/register.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/storage/v1beta1/register.go new file mode 100644 index 00000000..76dedc00 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/apis/storage/v1beta1/register.go @@ -0,0 +1,9 @@ +package v1beta1 + +import "github.com/ericchiang/k8s" + +func init() { + k8s.Register("storage.k8s.io", "v1beta1", "storageclasss", false, &StorageClass{}) + + k8s.RegisterList("storage.k8s.io", "v1beta1", "storageclasss", false, &StorageClassList{}) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/client.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/client.go index c98f1b15..fb8254ac 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/client.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/client.go @@ -1,15 +1,23 @@ /* Package k8s implements a Kubernetes client. - c, err := k8s.NewInClusterClient() - if err != nil { - // handle error - } - extensions := c.ExtensionsV1Beta1() + import ( + "github.com/ericchiang/k8s" + appsv1 "github.com/ericchiang/k8s/apis/apps/v1" + metav1 "github.com/ericchiang/k8s/apis/meta/v1" + ) - ingresses, err := extensions.ListIngresses(ctx, c.Namespace) - if err != nil { - // handle error + func listDeployments() (*apssv1.DeploymentList, error) { + c, err := k8s.NewInClusterClient() + if err != nil { + return nil, err + } + + var deployments appsv1.DeploymentList + if err := c.List(ctx, "my-namespace", &deployments); err != nil { + return nil, err + } + return deployments, nil } */ @@ -21,26 +29,19 @@ import ( "crypto/tls" "crypto/x509" "encoding/base64" - "encoding/binary" "errors" "fmt" "io" "io/ioutil" "net" "net/http" - "net/url" "os" - "path" "strconv" - "strings" "time" "golang.org/x/net/http2" - "github.com/ericchiang/k8s/api/unversioned" - "github.com/ericchiang/k8s/runtime" - "github.com/ericchiang/k8s/watch/versioned" - "github.com/golang/protobuf/proto" + metav1 "github.com/ericchiang/k8s/apis/meta/v1" ) const ( @@ -55,23 +56,15 @@ const ( // String returns a pointer to a string. Useful for creating API objects // that take pointers instead of literals. -// -// cm := &v1.ConfigMap{ -// Metadata: &v1.ObjectMeta{ -// Name: k8s.String("myconfigmap"), -// Namespace: k8s.String("default"), -// }, -// Data: map[string]string{ -// "foo": "bar", -// }, -// } -// func String(s string) *string { return &s } -// Int is a convinence for converting an int literal to a pointer to an int. +// Int is a convenience for converting an int literal to a pointer to an int. func Int(i int) *int { return &i } -// Bool is a convinence for converting a bool literal to a pointer to a bool. +// Int32 is a convenience for converting an int32 literal to a pointer to an int32. +func Int32(i int32) *int32 { return &i } + +// Bool is a convenience for converting a bool literal to a pointer to a bool. func Bool(b bool) *bool { return &b } const ( @@ -292,7 +285,10 @@ func newClient(cluster Cluster, user AuthInfo, namespace string) (*Client, error } // See https://github.com/gtank/cryptopasta - tlsConfig := &tls.Config{MinVersion: tls.VersionTLS12} + tlsConfig := &tls.Config{ + MinVersion: tls.VersionTLS12, + InsecureSkipVerify: cluster.InsecureSkipTLSVerify, + } if len(ca) != 0 { tlsConfig.RootCAs = x509.NewCertPool() @@ -361,7 +357,7 @@ func newClient(cluster Cluster, user AuthInfo, namespace string) (*Client, error // APIError is an error from a unexpected status code. type APIError struct { // The status object returned by the Kubernetes API, - Status *unversioned.Status + Status *metav1.Status // Status code returned by the HTTP request. // @@ -378,18 +374,18 @@ func (e *APIError) Error() string { return fmt.Sprintf("%#v", e) } -func checkStatusCode(c *codec, statusCode int, body []byte) error { +func checkStatusCode(contentType string, statusCode int, body []byte) error { if statusCode/100 == 2 { return nil } - return newAPIError(c, statusCode, body) + return newAPIError(contentType, statusCode, body) } -func newAPIError(c *codec, statusCode int, body []byte) error { - status := new(unversioned.Status) - if err := c.unmarshal(body, status); err != nil { - return fmt.Errorf("decode error status: %v", err) +func newAPIError(contentType string, statusCode int, body []byte) error { + status := new(metav1.Status) + if err := unmarshal(body, contentType, status); err != nil { + return fmt.Errorf("decode error status %d: %v", statusCode, err) } return &APIError{status, statusCode} } @@ -401,118 +397,95 @@ func (c *Client) client() *http.Client { return c.Client } -// The following methods hold the logic for interacting with the Kubernetes API. Generated -// clients are thin wrappers on top of these methods. +// Create creates a resource of a registered type. The API version and resource +// type is determined by the type of the req argument. The result is unmarshaled +// into req. // -// This client implements specs in the "API Conventions" developer document, which can be -// found here: +// configMap := corev1.ConfigMap{ +// Metadata: &metav1.ObjectMeta{ +// Name: k8s.String("my-configmap"), +// Namespace: k8s.String("my-namespace"), +// }, +// Data: map[string]string{ +// "my-key": "my-val", +// }, +// } +// if err := client.Create(ctx, &configMap); err != nil { +// // handle error +// } +// // resource is updated with response of create request +// fmt.Println(conifgMap.Metaata.GetCreationTimestamp()) // -// https://github.com/kubernetes/kubernetes/blob/master/docs/devel/api-conventions.md - -func (c *Client) urlFor(apiGroup, apiVersion, namespace, resource, name string, options ...Option) string { - basePath := "apis/" - if apiGroup == "" { - basePath = "api/" - } - - var p string - if namespace != "" { - p = path.Join(basePath, apiGroup, apiVersion, "namespaces", namespace, resource, name) - } else { - p = path.Join(basePath, apiGroup, apiVersion, resource, name) - } - endpoint := "" - if strings.HasSuffix(c.Endpoint, "/") { - endpoint = c.Endpoint + p - } else { - endpoint = c.Endpoint + "/" + p - } - if len(options) == 0 { - return endpoint - } - - v := url.Values{} - for _, option := range options { - key, val := option.queryParam() - v.Set(key, val) - } - return endpoint + "?" + v.Encode() -} - -func (c *Client) urlForPath(path string) string { - if strings.HasPrefix(path, "/") { - path = path[1:] - } - if strings.HasSuffix(c.Endpoint, "/") { - return c.Endpoint + path - } - return c.Endpoint + "/" + path -} - -func (c *Client) create(ctx context.Context, codec *codec, verb, url string, req, resp interface{}) error { - body, err := codec.marshal(req) +func (c *Client) Create(ctx context.Context, req Resource, options ...Option) error { + url, err := resourceURL(c.Endpoint, req, false, options...) if err != nil { return err } + return c.do(ctx, "POST", url, req, req) +} - r, err := c.newRequest(ctx, verb, url, bytes.NewReader(body)) +func (c *Client) Delete(ctx context.Context, req Resource, options ...Option) error { + url, err := resourceURL(c.Endpoint, req, true, options...) if err != nil { return err } - r.Header.Set("Content-Type", codec.contentType) - r.Header.Set("Accept", codec.contentType) + return c.do(ctx, "DELETE", url, nil, nil) +} - re, err := c.client().Do(r) +func (c *Client) Update(ctx context.Context, req Resource, options ...Option) error { + url, err := resourceURL(c.Endpoint, req, true, options...) if err != nil { return err } - defer re.Body.Close() + return c.do(ctx, "PUT", url, req, req) +} - respBody, err := ioutil.ReadAll(re.Body) +func (c *Client) Get(ctx context.Context, namespace, name string, resp Resource, options ...Option) error { + url, err := resourceGetURL(c.Endpoint, namespace, name, resp, options...) if err != nil { - return fmt.Errorf("read body: %v", err) - } - - if err := checkStatusCode(codec, re.StatusCode, respBody); err != nil { return err } - return codec.unmarshal(respBody, resp) + return c.do(ctx, "GET", url, nil, resp) } -func (c *Client) delete(ctx context.Context, codec *codec, url string) error { - r, err := c.newRequest(ctx, "DELETE", url, nil) +func (c *Client) List(ctx context.Context, namespace string, resp ResourceList, options ...Option) error { + url, err := resourceListURL(c.Endpoint, namespace, resp, options...) if err != nil { return err } - r.Header.Set("Accept", codec.contentType) + return c.do(ctx, "GET", url, nil, resp) +} - re, err := c.client().Do(r) - if err != nil { - return err +func (c *Client) do(ctx context.Context, verb, url string, req, resp interface{}) error { + var ( + contentType string + body io.Reader + ) + if req != nil { + ct, data, err := marshal(req) + if err != nil { + return fmt.Errorf("encoding object: %v", err) + } + contentType = ct + body = bytes.NewReader(data) } - defer re.Body.Close() - - respBody, err := ioutil.ReadAll(re.Body) + r, err := http.NewRequest(verb, url, body) if err != nil { - return fmt.Errorf("read body: %v", err) + return fmt.Errorf("new request: %v", err) } - - if err := checkStatusCode(codec, re.StatusCode, respBody); err != nil { - return err + if contentType != "" { + r.Header.Set("Content-Type", contentType) + r.Header.Set("Accept", contentType) + } else if resp != nil { + r.Header.Set("Accept", contentTypeFor(resp)) } - return nil -} - -// get can be used to either get or list a given resource. -func (c *Client) get(ctx context.Context, codec *codec, url string, resp interface{}) error { - r, err := c.newRequest(ctx, "GET", url, nil) - if err != nil { - return err + if c.SetHeaders != nil { + c.SetHeaders(r.Header) } - r.Header.Set("Accept", codec.contentType) + re, err := c.client().Do(r) if err != nil { - return err + return fmt.Errorf("performing request: %v", err) } defer re.Body.Close() @@ -521,95 +494,14 @@ func (c *Client) get(ctx context.Context, codec *codec, url string, resp interfa return fmt.Errorf("read body: %v", err) } - if err := checkStatusCode(codec, re.StatusCode, respBody); err != nil { + respCT := re.Header.Get("Content-Type") + if err := checkStatusCode(respCT, re.StatusCode, respBody); err != nil { return err } - return codec.unmarshal(respBody, resp) -} - -var unknownPrefix = []byte{0x6b, 0x38, 0x73, 0x00} - -func parseUnknown(b []byte) (*runtime.Unknown, error) { - if !bytes.HasPrefix(b, unknownPrefix) { - return nil, errors.New("bytes did not start with expected prefix") - } - - var u runtime.Unknown - if err := proto.Unmarshal(b[len(unknownPrefix):], &u); err != nil { - return nil, err - } - return &u, nil -} - -type event struct { - event *versioned.Event - unknown *runtime.Unknown -} - -type watcher struct { - r io.ReadCloser -} - -func (w *watcher) Close() error { - return w.r.Close() -} - -// Decode the next event from a watch stream. -// -// See: https://github.com/kubernetes/community/blob/master/contributors/design-proposals/protobuf.md#streaming-wire-format -func (w *watcher) next() (*versioned.Event, *runtime.Unknown, error) { - length := make([]byte, 4) - if _, err := io.ReadFull(w.r, length); err != nil { - return nil, nil, err - } - - body := make([]byte, int(binary.BigEndian.Uint32(length))) - if _, err := io.ReadFull(w.r, body); err != nil { - return nil, nil, fmt.Errorf("read frame body: %v", err) - } - - var event versioned.Event - if err := proto.Unmarshal(body, &event); err != nil { - return nil, nil, err - } - - if event.Object == nil { - return nil, nil, fmt.Errorf("event had no underlying object") - } - - unknown, err := parseUnknown(event.Object.Raw) - if err != nil { - return nil, nil, err - } - - return &event, unknown, nil -} - -func (c *Client) watch(ctx context.Context, url string) (*watcher, error) { - if strings.Contains(url, "?") { - url = url + "&watch=true" - } else { - url = url + "?watch=true" - } - r, err := c.newRequest(ctx, "GET", url, nil) - if err != nil { - return nil, err - } - r.Header.Set("Accept", "application/vnd.kubernetes.protobuf;type=watch") - resp, err := c.client().Do(r) - if err != nil { - return nil, err - } - - if resp.StatusCode/100 != 2 { - body, err := ioutil.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - return nil, err + if resp != nil { + if err := unmarshal(respBody, respCT, resp); err != nil { + return fmt.Errorf("decode response: %v", err) } - return nil, newAPIError(pbCodec, resp.StatusCode, body) } - - w := &watcher{resp.Body} - return w, nil + return nil } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/codec.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/codec.go index d2096715..e4fdc0ab 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/codec.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/codec.go @@ -10,41 +10,56 @@ import ( "github.com/golang/protobuf/proto" ) -type codec struct { - contentType string - marshal func(interface{}) ([]byte, error) - unmarshal func([]byte, interface{}) error +const ( + contentTypePB = "application/vnd.kubernetes.protobuf" + contentTypeJSON = "application/json" +) + +func contentTypeFor(i interface{}) string { + if _, ok := i.(proto.Message); ok { + return contentTypePB + } + return contentTypeJSON } -var ( - // Kubernetes implements its own custom protobuf format to allow clients (and possibly - // servers) to use either JSON or protocol buffers. The protocol introduces a custom content - // type and magic bytes to signal the use of protobufs, and wraps each object with API group, - // version and resource data. - // - // The protocol spec which this client implements can be found here: - // - // https://github.com/kubernetes/kubernetes/blob/master/docs/proposals/protobuf.md - // - pbCodec = &codec{ - contentType: "application/vnd.kubernetes.protobuf", - marshal: marshalPB, - unmarshal: unmarshalPB, +// marshal encodes an object and returns the content type of that resource +// and the marshaled representation. +// +// marshal prefers protobuf encoding, but falls back to JSON. +func marshal(i interface{}) (string, []byte, error) { + if _, ok := i.(proto.Message); ok { + data, err := marshalPB(i) + return contentTypePB, data, err } - jsonCodec = &codec{ - contentType: "application/json", - marshal: json.Marshal, - unmarshal: json.Unmarshal, + data, err := json.Marshal(i) + return contentTypeJSON, data, err +} + +// unmarshal decoded an object given the content type of the encoded form. +func unmarshal(data []byte, contentType string, i interface{}) error { + msg, isPBMsg := i.(proto.Message) + if contentType == contentTypePB && isPBMsg { + if err := unmarshalPB(data, msg); err != nil { + return fmt.Errorf("decode protobuf: %v", err) + } + return nil } -) + if isPBMsg { + // only decode into JSON of a protobuf message if the type + // explicitly implements json.Unmarshaler + if _, ok := i.(json.Unmarshaler); !ok { + return errors.New("cannot decode json payload into protobuf object") + } + } + if err := json.Unmarshal(data, i); err != nil { + return fmt.Errorf("decode json: %v", err) + } + return nil +} var magicBytes = []byte{0x6b, 0x38, 0x73, 0x00} -func unmarshalPB(b []byte, obj interface{}) error { - message, ok := obj.(proto.Message) - if !ok { - return fmt.Errorf("expected obj of type proto.Message, got %T", obj) - } +func unmarshalPB(b []byte, msg proto.Message) error { if len(b) < len(magicBytes) { return errors.New("payload is not a kubernetes protobuf object") } @@ -56,7 +71,7 @@ func unmarshalPB(b []byte, obj interface{}) error { if err := u.Unmarshal(b[len(magicBytes):]); err != nil { return fmt.Errorf("unmarshal unknown: %v", err) } - return proto.Unmarshal(u.Raw, message) + return proto.Unmarshal(u.Raw, msg) } func marshalPB(obj interface{}) ([]byte, error) { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/config.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/config.go index 4da56a0e..3b9972e5 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/config.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/config.go @@ -28,143 +28,143 @@ type Config struct { // Legacy field from pkg/api/types.go TypeMeta. // TODO(jlowdermilk): remove this after eliminating downstream dependencies. // +optional - Kind string `json:"kind,omitempty"` + Kind string `json:"kind,omitempty" yaml:"kind,omitempty"` // DEPRECATED: APIVersion is the preferred api version for communicating with the kubernetes cluster (v1, v2, etc). // Because a cluster can run multiple API groups and potentially multiple versions of each, it no longer makes sense to specify // a single value for the cluster version. // This field isn't really needed anyway, so we are deprecating it without replacement. // It will be ignored if it is present. // +optional - APIVersion string `json:"apiVersion,omitempty"` + APIVersion string `json:"apiVersion,omitempty" yaml:"apiVersion,omitempty"` // Preferences holds general information to be use for cli interactions - Preferences Preferences `json:"preferences"` + Preferences Preferences `json:"preferences" yaml:"preferences"` // Clusters is a map of referencable names to cluster configs - Clusters []NamedCluster `json:"clusters"` + Clusters []NamedCluster `json:"clusters" yaml:"clusters"` // AuthInfos is a map of referencable names to user configs - AuthInfos []NamedAuthInfo `json:"users"` + AuthInfos []NamedAuthInfo `json:"users" yaml:"users"` // Contexts is a map of referencable names to context configs - Contexts []NamedContext `json:"contexts"` + Contexts []NamedContext `json:"contexts" yaml:"contexts"` // CurrentContext is the name of the context that you would like to use by default - CurrentContext string `json:"current-context"` + CurrentContext string `json:"current-context" yaml:"current-context"` // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields // +optional - Extensions []NamedExtension `json:"extensions,omitempty"` + Extensions []NamedExtension `json:"extensions,omitempty" yaml:"extensions,omitempty"` } type Preferences struct { // +optional - Colors bool `json:"colors,omitempty"` + Colors bool `json:"colors,omitempty" yaml:"colors,omitempty"` // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields // +optional - Extensions []NamedExtension `json:"extensions,omitempty"` + Extensions []NamedExtension `json:"extensions,omitempty" yaml:"extensions,omitempty"` } // Cluster contains information about how to communicate with a kubernetes cluster type Cluster struct { // Server is the address of the kubernetes cluster (https://hostname:port). - Server string `json:"server"` + Server string `json:"server" yaml:"server"` // APIVersion is the preferred api version for communicating with the kubernetes cluster (v1, v2, etc). // +optional - APIVersion string `json:"api-version,omitempty"` + APIVersion string `json:"api-version,omitempty" yaml:"api-version,omitempty"` // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure. // +optional - InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` + InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty" yaml:"insecure-skip-tls-verify,omitempty"` // CertificateAuthority is the path to a cert file for the certificate authority. // +optional - CertificateAuthority string `json:"certificate-authority,omitempty"` + CertificateAuthority string `json:"certificate-authority,omitempty" yaml:"certificate-authority,omitempty"` // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority // +optional - CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` + CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty" yaml:"certificate-authority-data,omitempty"` // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields // +optional - Extensions []NamedExtension `json:"extensions,omitempty"` + Extensions []NamedExtension `json:"extensions,omitempty" yaml:"extensions,omitempty"` } // AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are. type AuthInfo struct { // ClientCertificate is the path to a client cert file for TLS. // +optional - ClientCertificate string `json:"client-certificate,omitempty"` + ClientCertificate string `json:"client-certificate,omitempty" yaml:"client-certificate,omitempty"` // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate // +optional - ClientCertificateData []byte `json:"client-certificate-data,omitempty"` + ClientCertificateData []byte `json:"client-certificate-data,omitempty" yaml:"client-certificate-data,omitempty"` // ClientKey is the path to a client key file for TLS. // +optional - ClientKey string `json:"client-key,omitempty"` + ClientKey string `json:"client-key,omitempty" yaml:"client-key,omitempty"` // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey // +optional - ClientKeyData []byte `json:"client-key-data,omitempty"` + ClientKeyData []byte `json:"client-key-data,omitempty" yaml:"client-key-data,omitempty"` // Token is the bearer token for authentication to the kubernetes cluster. // +optional - Token string `json:"token,omitempty"` + Token string `json:"token,omitempty" yaml:"token,omitempty"` // TokenFile is a pointer to a file that contains a bearer token (as described above). If both Token and TokenFile are present, Token takes precedence. // +optional - TokenFile string `json:"tokenFile,omitempty"` + TokenFile string `json:"tokenFile,omitempty" yaml:"tokenFile,omitempty"` // Impersonate is the username to imperonate. The name matches the flag. // +optional - Impersonate string `json:"as,omitempty"` + Impersonate string `json:"as,omitempty" yaml:"as,omitempty"` // Username is the username for basic authentication to the kubernetes cluster. // +optional - Username string `json:"username,omitempty"` + Username string `json:"username,omitempty" yaml:"username,omitempty"` // Password is the password for basic authentication to the kubernetes cluster. // +optional - Password string `json:"password,omitempty"` + Password string `json:"password,omitempty" yaml:"password,omitempty"` // AuthProvider specifies a custom authentication plugin for the kubernetes cluster. // +optional - AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"` + AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty" yaml:"auth-provider,omitempty"` // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields // +optional - Extensions []NamedExtension `json:"extensions,omitempty"` + Extensions []NamedExtension `json:"extensions,omitempty" yaml:"extensions,omitempty"` } // Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) type Context struct { // Cluster is the name of the cluster for this context - Cluster string `json:"cluster"` + Cluster string `json:"cluster" yaml:"cluster"` // AuthInfo is the name of the authInfo for this context - AuthInfo string `json:"user"` + AuthInfo string `json:"user" yaml:"user"` // Namespace is the default namespace to use on unspecified requests // +optional - Namespace string `json:"namespace,omitempty"` + Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"` // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields // +optional - Extensions []NamedExtension `json:"extensions,omitempty"` + Extensions []NamedExtension `json:"extensions,omitempty" yaml:"extensions,omitempty"` } // NamedCluster relates nicknames to cluster information type NamedCluster struct { // Name is the nickname for this Cluster - Name string `json:"name"` + Name string `json:"name" yaml:"name"` // Cluster holds the cluster information - Cluster Cluster `json:"cluster"` + Cluster Cluster `json:"cluster" yaml:"cluster"` } // NamedContext relates nicknames to context information type NamedContext struct { // Name is the nickname for this Context - Name string `json:"name"` + Name string `json:"name" yaml:"name"` // Context holds the context information - Context Context `json:"context"` + Context Context `json:"context" yaml:"context"` } // NamedAuthInfo relates nicknames to auth information type NamedAuthInfo struct { // Name is the nickname for this AuthInfo - Name string `json:"name"` + Name string `json:"name" yaml:"name"` // AuthInfo holds the auth information - AuthInfo AuthInfo `json:"user"` + AuthInfo AuthInfo `json:"user" yaml:"user"` } // NamedExtension relates nicknames to extension information type NamedExtension struct { // Name is the nickname for this Extension - Name string `json:"name"` + Name string `json:"name" yaml:"name"` // Extension holds the extension information - Extension runtime.RawExtension `json:"extension"` + Extension runtime.RawExtension `json:"extension" yaml:"extension"` } // AuthProviderConfig holds the configuration for a specified auth provider. type AuthProviderConfig struct { - Name string `json:"name"` - Config map[string]string `json:"config"` + Name string `json:"name" yaml:"name"` + Config map[string]string `json:"config" yaml:"config"` } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/discovery.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/discovery.go index b2713cba..a219dae8 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/discovery.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/discovery.go @@ -4,7 +4,7 @@ import ( "context" "path" - "github.com/ericchiang/k8s/api/unversioned" + metav1 "github.com/ericchiang/k8s/apis/meta/v1" ) type Version struct { @@ -19,45 +19,48 @@ type Version struct { Platform string `json:"platform"` } -func (c *Client) Discovery() *Discovery { - return &Discovery{c} -} - // Discovery is a client used to determine the API version and supported // resources of the server. type Discovery struct { client *Client } +func NewDiscoveryClient(c *Client) *Discovery { + return &Discovery{c} +} + +func (d *Discovery) get(ctx context.Context, path string, resp interface{}) error { + return d.client.do(ctx, "GET", urlForPath(d.client.Endpoint, path), nil, resp) +} + func (d *Discovery) Version(ctx context.Context) (*Version, error) { var v Version - if err := d.client.get(ctx, jsonCodec, d.client.urlForPath("version"), &v); err != nil { + if err := d.get(ctx, "version", &v); err != nil { return nil, err } return &v, nil } -func (d *Discovery) APIGroups(ctx context.Context) (*unversioned.APIGroupList, error) { - var groups unversioned.APIGroupList - if err := d.client.get(ctx, pbCodec, d.client.urlForPath("apis"), &groups); err != nil { +func (d *Discovery) APIGroups(ctx context.Context) (*metav1.APIGroupList, error) { + var groups metav1.APIGroupList + if err := d.get(ctx, "apis", &groups); err != nil { return nil, err } return &groups, nil } -func (d *Discovery) APIGroup(ctx context.Context, name string) (*unversioned.APIGroup, error) { - var group unversioned.APIGroup - if err := d.client.get(ctx, pbCodec, d.client.urlForPath(path.Join("apis", name)), &group); err != nil { +func (d *Discovery) APIGroup(ctx context.Context, name string) (*metav1.APIGroup, error) { + var group metav1.APIGroup + if err := d.get(ctx, path.Join("apis", name), &group); err != nil { return nil, err } return &group, nil } -func (d *Discovery) APIResources(ctx context.Context, groupName, groupVersion string) (*unversioned.APIResourceList, error) { - var list unversioned.APIResourceList - if err := d.client.get(ctx, pbCodec, d.client.urlForPath(path.Join("apis", groupName, groupVersion)), &list); err != nil { +func (d *Discovery) APIResources(ctx context.Context, groupName, groupVersion string) (*metav1.APIResourceList, error) { + var list metav1.APIResourceList + if err := d.get(ctx, path.Join("apis", groupName, groupVersion), &list); err != nil { return nil, err } return &list, nil - } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/gen.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/gen.go deleted file mode 100644 index 1cfb06c7..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/gen.go +++ /dev/null @@ -1,382 +0,0 @@ -// +build ignore - -package main - -import ( - "bytes" - "errors" - "fmt" - "go/types" - "io/ioutil" - "os" - "os/exec" - "path" - "sort" - "strings" - "text/template" - - "golang.org/x/tools/go/loader" -) - -func main() { - if err := load(); err != nil { - fmt.Fprintln(os.Stderr, err.Error()) - os.Exit(2) - } -} - -func isInterface(obj interface{}) (*types.Interface, bool) { - switch obj := obj.(type) { - case *types.TypeName: - return isInterface(obj.Type()) - case *types.Named: - return isInterface(obj.Underlying()) - case *types.Interface: - return obj, true - default: - return nil, false - } -} - -type Resource struct { - Name string - Namespaced bool - HasList bool - Pluralized string -} - -type byName []Resource - -func (n byName) Len() int { return len(n) } -func (n byName) Swap(i, j int) { n[i], n[j] = n[j], n[i] } -func (n byName) Less(i, j int) bool { return n[i].Name < n[j].Name } - -type Package struct { - Name string - APIGroup string - APIVersion string - ImportPath string - ImportName string - Resources []Resource -} - -type byGroup []Package - -func (r byGroup) Len() int { return len(r) } -func (r byGroup) Swap(i, j int) { r[i], r[j] = r[j], r[i] } - -func (r byGroup) Less(i, j int) bool { - if r[i].APIGroup != r[j].APIGroup { - return r[i].APIGroup < r[j].APIGroup - } - return r[i].APIVersion < r[j].APIVersion -} - -// Incorrect but this is basically what Kubernetes does. -func pluralize(s string) string { - switch { - case strings.HasSuffix(s, "points"): - // NOTE: the k8s "endpoints" resource is already pluralized - return s - case strings.HasSuffix(s, "s"): - return s + "es" - case strings.HasSuffix(s, "y"): - return s[:len(s)-1] + "ies" - default: - return s + "s" - } -} - -var tmpl = template.Must(template.New("").Funcs(template.FuncMap{ - "pluralize": pluralize, -}).Parse(` -// {{ .Name }} returns a client for interacting with the {{ .APIGroup }}/{{ .APIVersion }} API group. -func (c *Client) {{ .Name }}() *{{ .Name }} { - return &{{ .Name }}{c} -} - -// {{ .Name }} is a client for interacting with the {{ .APIGroup }}/{{ .APIVersion }} API group. -type {{ .Name }} struct { - client *Client -} -{{ range $i, $r := .Resources }} -func (c *{{ $.Name }}) Create{{ $r.Name }}(ctx context.Context, obj *{{ $.ImportName }}.{{ $r.Name }}) (*{{ $.ImportName }}.{{ $r.Name }}, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !{{ $r.Namespaced }} && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if {{ $r.Namespaced }} { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("{{ $.APIGroup }}", "{{ $.APIVersion }}", ns, "{{ $r.Pluralized }}", "") - resp := new({{ $.ImportName }}.{{ $r.Name }}) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *{{ $.Name }}) Update{{ $r.Name }}(ctx context.Context, obj *{{ $.ImportName }}.{{ $r.Name }}) (*{{ $.ImportName }}.{{ $r.Name }}, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !{{ $r.Namespaced }} && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if {{ $r.Namespaced }} { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("{{ $.APIGroup }}", "{{ $.APIVersion }}", *md.Namespace, "{{ $r.Pluralized }}", *md.Name) - resp := new({{ $.ImportName }}.{{ $r.Name }}) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *{{ $.Name }}) Delete{{ $r.Name }}(ctx context.Context, name string{{ if $r.Namespaced }}, namespace string{{ end }}) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("{{ $.APIGroup }}", "{{ $.APIVersion }}", {{ if $r.Namespaced }}namespace{{ else }}AllNamespaces{{ end }}, "{{ $r.Pluralized }}", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *{{ $.Name }}) Get{{ $r.Name }}(ctx context.Context, name{{ if $r.Namespaced }}, namespace{{ end }} string) (*{{ $.ImportName }}.{{ $r.Name }}, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("{{ $.APIGroup }}", "{{ $.APIVersion }}", {{ if $r.Namespaced }}namespace{{ else }}AllNamespaces{{ end }}, "{{ $r.Pluralized }}", name) - resp := new({{ $.ImportName }}.{{ $r.Name }}) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -{{- if $r.HasList }} - -type {{ $.Name }}{{ $r.Name }}Watcher struct { - watcher *watcher -} - -func (w *{{ $.Name }}{{ $r.Name }}Watcher) Next() (*versioned.Event, *{{ $.ImportName }}.{{ $r.Name }}, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new({{ $.ImportName }}.{{ $r.Name }}) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *{{ $.Name }}{{ $r.Name }}Watcher) Close() error { - return w.watcher.Close() -} - -func (c *{{ $.Name }}) Watch{{ $r.Name | pluralize }}(ctx context.Context{{ if $r.Namespaced }}, namespace string{{ end }}, options ...Option) (*{{ $.Name }}{{ $r.Name }}Watcher, error) { - url := c.client.urlFor("{{ $.APIGroup }}", "{{ $.APIVersion }}", {{ if $r.Namespaced }}namespace{{ else }}AllNamespaces{{ end }}, "{{ $r.Pluralized }}", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &{{ $.Name }}{{ $r.Name }}Watcher{watcher}, nil -} - -func (c *{{ $.Name }}) List{{ $r.Name | pluralize }}(ctx context.Context{{ if $r.Namespaced }}, namespace string{{ end }}, options ...Option) (*{{ $.ImportName }}.{{ $r.Name }}List, error) { - url := c.client.urlFor("{{ $.APIGroup }}", "{{ $.APIVersion }}", {{ if $r.Namespaced }}namespace{{ else }}AllNamespaces{{ end }}, "{{ $r.Pluralized }}", "", options...) - resp := new({{ $.ImportName }}.{{ $r.Name }}List) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -}{{ end }} -{{ end }} -`)) - -var ( - apiGroupName = map[string]string{ - "authentication": "authentication.k8s.io", - "authorization": "authorization.k8s.io", - "certificates": "certificates.k8s.io", - "rbac": "rbac.authorization.k8s.io", - "storage": "storage.k8s.io", - } - notNamespaced = map[string]bool{ - "ClusterRole": true, - "ClusterRoleBinding": true, - - "ComponentStatus": true, - "Node": true, - "Namespace": true, - "PersistentVolume": true, - - "PodSecurityPolicy": true, - "ThirdPartyResource": true, - - "CertificateSigningRequest": true, - - "TokenReview": true, - - "SubjectAccessReview": true, - "SelfSubjectAccessReview": true, - - "ImageReview": true, - - "StorageClass": true, - } -) - -func clientName(apiGroup, apiVersion string) string { - switch apiGroup { - case "": - apiGroup = "Core" - case "rbac": - apiGroup = "RBAC" - default: - apiGroup = strings.Title(apiGroup) - } - r := strings.NewReplacer("alpha", "Alpha", "beta", "Beta") - return apiGroup + r.Replace(strings.Title(apiVersion)) -} - -func load() error { - out, err := exec.Command("go", "list", "./...").CombinedOutput() - if err != nil { - return fmt.Errorf("go list: %v %s", err, out) - } - - var conf loader.Config - if _, err := conf.FromArgs(strings.Fields(string(out)), false); err != nil { - return fmt.Errorf("from args: %v", err) - } - - prog, err := conf.Load() - if err != nil { - return fmt.Errorf("load: %v", err) - } - thisPkg, ok := prog.Imported["github.com/ericchiang/k8s"] - if !ok { - return errors.New("could not find this package") - } - - // Types defined in tpr.go. It's hacky, but to "load" interfaces as their - // go/types equilvalent, we either have to: - // - // * Define them in code somewhere (what we're doing here). - // * Manually construct them using go/types (blah). - // * Parse them from an inlined string (doesn't work in combination with other pkgs). - // - var interfaces []*types.Interface - for _, s := range []string{"object", "after16Object"} { - obj := thisPkg.Pkg.Scope().Lookup(s) - if obj == nil { - return errors.New("failed to lookup object interface") - } - intr, ok := isInterface(obj) - if !ok { - return errors.New("failed to convert to interface") - } - interfaces = append(interfaces, intr) - } - - var pkgs []Package - for name, pkgInfo := range prog.Imported { - pkg := Package{ - APIVersion: path.Base(name), - APIGroup: path.Base(path.Dir(name)), - ImportPath: name, - } - pkg.ImportName = pkg.APIGroup + pkg.APIVersion - - if pkg.APIGroup == "api" { - pkg.APIGroup = "" - } - - pkg.Name = clientName(pkg.APIGroup, pkg.APIVersion) - if name, ok := apiGroupName[pkg.APIGroup]; ok { - pkg.APIGroup = name - } - - for _, obj := range pkgInfo.Defs { - tn, ok := obj.(*types.TypeName) - if !ok { - continue - } - impl := false - for _, intr := range interfaces { - impl = impl || types.Implements(types.NewPointer(tn.Type()), intr) - } - if !impl { - continue - } - if tn.Name() == "JobTemplateSpec" { - continue - } - - pkg.Resources = append(pkg.Resources, Resource{ - Name: tn.Name(), - Pluralized: pluralize(strings.ToLower(tn.Name())), - HasList: pkgInfo.Pkg.Scope().Lookup(tn.Name()+"List") != nil, - Namespaced: !notNamespaced[tn.Name()], - }) - } - pkgs = append(pkgs, pkg) - } - - sort.Sort(byGroup(pkgs)) - - buff := new(bytes.Buffer) - buff.WriteString("package k8s\n\n") - buff.WriteString("import (\n") - buff.WriteString("\t\"context\"\n") - buff.WriteString("\t\"fmt\"\n\n") - for _, pkg := range pkgs { - if len(pkg.Resources) == 0 { - continue - } - fmt.Fprintf(buff, "\t%s \"%s\"\n", pkg.ImportName, pkg.ImportPath) - } - fmt.Fprintf(buff, "\t%q\n", "github.com/ericchiang/k8s/watch/versioned") - fmt.Fprintf(buff, "\t%q\n", "github.com/golang/protobuf/proto") - buff.WriteString(")\n") - - for _, pkg := range pkgs { - sort.Sort(byName(pkg.Resources)) - for _, resource := range pkg.Resources { - fmt.Println(pkg.APIGroup, pkg.APIVersion, resource.Name) - } - if len(pkg.Resources) != 0 { - if err := tmpl.Execute(buff, pkg); err != nil { - return fmt.Errorf("execute: %v", err) - } - } - } - return ioutil.WriteFile("types.go", buff.Bytes(), 0644) -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/gen.sh b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/gen.sh deleted file mode 100755 index af587ecb..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/gen.sh +++ /dev/null @@ -1,110 +0,0 @@ -#!/bin/bash - -set -ex - -# Clean up any existing build. -rm -rf assets/k8s.io -mkdir -p assets/k8s.io/kubernetes - -VERSIONS=( "1.4.7" "1.5.1" "1.6.0-rc.1" ) - -for VERSION in ${VERSIONS[@]}; do - if [ ! -f assets/v${VERSION}.zip ]; then - wget https://github.com/kubernetes/kubernetes/archive/v${VERSION}.zip -O assets/v${VERSION}.zip - fi - - # Copy source tree to assets/k8s.io/kubernetes. Newer versions overwrite existing ones. - unzip -q assets/v${VERSION}.zip -d assets/ - cp -r assets/kubernetes-${VERSION}/* assets/k8s.io/kubernetes - rm -rf assets/kubernetes-${VERSION} -done - -# Rewrite API machinery files to their equivalent. -apimachinery=assets/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/ -for file in $( find $apimachinery -type f -name '*.proto' ); do - path=assets/k8s.io/kubernetes/${file#$apimachinery} - mkdir -p $(dirname $path) - mv $file $path -done - -# Remove any existing generated code. -rm -rf api apis config.go runtime util types.go watch - -# Generate Go code from proto definitions. -PKG=$PWD -cd assets - -protobuf=$( find k8s.io/kubernetes/pkg/{api,apis,util,runtime,watch} -name '*.proto' ) - -# Remote this ununused import: -# https://github.com/kubernetes/kubernetes/blob/v1.6.0-rc.1/pkg/api/v1/generated.proto#L29 -sed -i '/"k8s\.io\/apiserver\/pkg\/apis\/example\/v1\/generated.proto"/d' $protobuf - -# Rewrite all of the API machineary out of staging. -sed -i 's|"k8s.io/apimachinery/|"k8s.io/kubernetes/|g' $protobuf -sed -i 's/k8s\.io.apimachinery/k8s\.io.kubernetes/g' $protobuf - -for file in $protobuf; do - echo $file - # Generate protoc definitions at the base of this repo. - protoc --gofast_out=$PKG $file -done - -cd - - -mv k8s.io/kubernetes/pkg/* . -rm -rf k8s.io - -# Copy kubeconfig structs. -client_dir="client/unversioned/clientcmd/api/v1" -cp assets/k8s.io/kubernetes/pkg/${client_dir}/types.go config.go -sed -i 's|package v1|package k8s|g' config.go - -# Rewrite imports for the generated fiels. -sed -i 's|"k8s.io/kubernetes/pkg|"github.com/ericchiang/k8s|g' $( find {api,apis,config.go,util,runtime,watch} -name '*.go' ) -sed -i 's|"k8s.io.kubernetes.pkg.|"github.com/ericchiang.k8s.|g' $( find {api,apis,config.go,util,runtime,watch} -name '*.go' ) - -# Clean up assets. -rm -rf assets/k8s.io - -# Generate HTTP clients from Go structs. -go run gen.go - -# Fix JSON marshaling for types need by third party resources. -cat << EOF >> api/unversioned/time.go -package unversioned - -import ( - "encoding/json" - "time" -) - -// JSON marshaling logic for the Time type. Need to make -// third party resources JSON work. - -func (t Time) MarshalJSON() ([]byte, error) { - var seconds, nanos int64 - if t.Seconds != nil { - seconds = *t.Seconds - } - if t.Nanos != nil { - nanos = int64(*t.Nanos) - } - return json.Marshal(time.Unix(seconds, nanos)) -} - -func (t *Time) UnmarshalJSON(p []byte) error { - var t1 time.Time - if err := json.Unmarshal(p, &t1); err != nil { - return err - } - seconds := t1.Unix() - nanos := int32(t1.UnixNano()) - t.Seconds = &seconds - t.Nanos = &nanos - return nil -} -EOF -gofmt -w api/unversioned/time.go -cp api/unversioned/time.go apis/meta/v1 -sed -i 's|package unversioned|package v1|g' apis/meta/v1/time.go diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/labels.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/labels.go index 19639e38..aa71d1fb 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/labels.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/labels.go @@ -35,10 +35,10 @@ func (l labelSelectorOption) queryParam() (string, string) { } func (l *LabelSelector) Selector() Option { - return labelSelectorOption(l.encode()) + return labelSelectorOption(l.String()) } -func (l *LabelSelector) encode() string { +func (l *LabelSelector) String() string { return strings.Join(l.stmts, ",") } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/resource.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/resource.go new file mode 100644 index 00000000..3277faf1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/resource.go @@ -0,0 +1,168 @@ +package k8s + +import ( + "errors" + "fmt" + "net/url" + "path" + "reflect" + "strings" + + metav1 "github.com/ericchiang/k8s/apis/meta/v1" +) + +type resourceType struct { + apiGroup string + apiVersion string + name string + namespaced bool +} + +var ( + resources = map[reflect.Type]resourceType{} + resourceLists = map[reflect.Type]resourceType{} +) + +// Resource is a Kubernetes resource, such as a Node or Pod. +type Resource interface { + GetMetadata() *metav1.ObjectMeta +} + +// Resource is list of common Kubernetes resources, such as a NodeList or +// PodList. +type ResourceList interface { + GetMetadata() *metav1.ListMeta +} + +func Register(apiGroup, apiVersion, name string, namespaced bool, r Resource) { + rt := reflect.TypeOf(r) + if _, ok := resources[rt]; ok { + panic(fmt.Sprintf("resource registered twice %T", r)) + } + resources[rt] = resourceType{apiGroup, apiVersion, name, namespaced} +} + +func RegisterList(apiGroup, apiVersion, name string, namespaced bool, l ResourceList) { + rt := reflect.TypeOf(l) + if _, ok := resources[rt]; ok { + panic(fmt.Sprintf("resource registered twice %T", l)) + } + resourceLists[rt] = resourceType{apiGroup, apiVersion, name, namespaced} +} + +func urlFor(endpoint, apiGroup, apiVersion, namespace, resource, name string, options ...Option) string { + basePath := "apis/" + if apiGroup == "" { + basePath = "api/" + } + + var p string + if namespace != "" { + p = path.Join(basePath, apiGroup, apiVersion, "namespaces", namespace, resource, name) + } else { + p = path.Join(basePath, apiGroup, apiVersion, resource, name) + } + e := "" + if strings.HasSuffix(endpoint, "/") { + e = endpoint + p + } else { + e = endpoint + "/" + p + } + if len(options) == 0 { + return e + } + + v := url.Values{} + for _, option := range options { + key, val := option.queryParam() + v.Set(key, val) + } + return e + "?" + v.Encode() +} + +func urlForPath(endpoint, path string) string { + if strings.HasPrefix(path, "/") { + path = path[1:] + } + if strings.HasSuffix(endpoint, "/") { + return endpoint + path + } + return endpoint + "/" + path +} + +func resourceURL(endpoint string, r Resource, withName bool, options ...Option) (string, error) { + t, ok := resources[reflect.TypeOf(r)] + if !ok { + return "", fmt.Errorf("unregistered type %T", r) + } + meta := r.GetMetadata() + if meta == nil { + return "", errors.New("resource has no object meta") + } + switch { + case t.namespaced && (meta.Namespace == nil || *meta.Namespace == ""): + return "", errors.New("no resource namespace provided") + case !t.namespaced && (meta.Namespace != nil && *meta.Namespace != ""): + return "", errors.New("resource not namespaced") + case withName && (meta.Name == nil || *meta.Name == ""): + return "", errors.New("no resource name provided") + } + name := "" + if withName { + name = *meta.Name + } + namespace := "" + if t.namespaced { + namespace = *meta.Namespace + } + + return urlFor(endpoint, t.apiGroup, t.apiVersion, namespace, t.name, name, options...), nil +} + +func resourceGetURL(endpoint, namespace, name string, r Resource, options ...Option) (string, error) { + t, ok := resources[reflect.TypeOf(r)] + if !ok { + return "", fmt.Errorf("unregistered type %T", r) + } + + if !t.namespaced && namespace != "" { + return "", fmt.Errorf("type not namespaced") + } + if t.namespaced && namespace == "" { + return "", fmt.Errorf("no namespace provided") + } + + return urlFor(endpoint, t.apiGroup, t.apiVersion, namespace, t.name, name, options...), nil +} + +func resourceListURL(endpoint, namespace string, r ResourceList, options ...Option) (string, error) { + t, ok := resourceLists[reflect.TypeOf(r)] + if !ok { + return "", fmt.Errorf("unregistered type %T", r) + } + + if !t.namespaced && namespace != "" { + return "", fmt.Errorf("type not namespaced") + } + + return urlFor(endpoint, t.apiGroup, t.apiVersion, namespace, t.name, "", options...), nil +} + +func resourceWatchURL(endpoint, namespace string, r Resource, options ...Option) (string, error) { + t, ok := resources[reflect.TypeOf(r)] + if !ok { + return "", fmt.Errorf("unregistered type %T", r) + } + + if !t.namespaced && namespace != "" { + return "", fmt.Errorf("type not namespaced") + } + + url := urlFor(endpoint, t.apiGroup, t.apiVersion, namespace, t.name, "", options...) + if strings.Contains(url, "?") { + url = url + "&watch=true" + } else { + url = url + "?watch=true" + } + return url, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/runtime/generated.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/runtime/generated.pb.go index 9fceb5ef..de8ca3c4 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/runtime/generated.pb.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/runtime/generated.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/runtime/generated.proto -// DO NOT EDIT! +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/apimachinery/pkg/runtime/generated.proto /* Package runtime is a generated protocol buffer package. It is generated from these files: - k8s.io/kubernetes/pkg/runtime/generated.proto + k8s.io/apimachinery/pkg/runtime/generated.proto It has these top-level messages: RawExtension @@ -109,7 +108,7 @@ func (m *RawExtension) GetRaw() []byte { // TypeMeta is provided here for convenience. You may use it directly from this package or define // your own with the same fields. // -// +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen=false // +protobuf=true // +k8s:openapi-gen=true type TypeMeta struct { @@ -146,6 +145,7 @@ func (m *TypeMeta) GetKind() string { // metadata and field mutatation. // // +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +protobuf=true // +k8s:openapi-gen=true type Unknown struct { @@ -197,9 +197,9 @@ func (m *Unknown) GetContentType() string { } func init() { - proto.RegisterType((*RawExtension)(nil), "github.com/ericchiang.k8s.runtime.RawExtension") - proto.RegisterType((*TypeMeta)(nil), "github.com/ericchiang.k8s.runtime.TypeMeta") - proto.RegisterType((*Unknown)(nil), "github.com/ericchiang.k8s.runtime.Unknown") + proto.RegisterType((*RawExtension)(nil), "k8s.io.apimachinery.pkg.runtime.RawExtension") + proto.RegisterType((*TypeMeta)(nil), "k8s.io.apimachinery.pkg.runtime.TypeMeta") + proto.RegisterType((*Unknown)(nil), "k8s.io.apimachinery.pkg.runtime.Unknown") } func (m *RawExtension) Marshal() (dAtA []byte, err error) { size := m.Size() @@ -310,24 +310,6 @@ func (m *Unknown) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -879,27 +861,27 @@ var ( ) func init() { - proto.RegisterFile("github.com/ericchiang/k8s/runtime/generated.proto", fileDescriptorGenerated) + proto.RegisterFile("k8s.io/apimachinery/pkg/runtime/generated.proto", fileDescriptorGenerated) } var fileDescriptorGenerated = []byte{ - // 275 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x7c, 0x8f, 0xc1, 0x4a, 0xc3, 0x40, - 0x10, 0x86, 0xdd, 0xb6, 0xd0, 0x3a, 0x2d, 0x28, 0x7b, 0x8a, 0x82, 0x21, 0xe4, 0x62, 0x2f, 0x6e, - 0xd0, 0x93, 0x27, 0x0f, 0x4a, 0x8f, 0x5e, 0x82, 0x7a, 0xf0, 0x16, 0x9b, 0x21, 0x2c, 0xab, 0xb3, - 0x61, 0x33, 0x21, 0xfa, 0x26, 0x3e, 0x84, 0x0f, 0xe2, 0xd1, 0x47, 0x90, 0xf8, 0x22, 0x92, 0x35, - 0xad, 0xa5, 0x88, 0xb7, 0xe1, 0xdf, 0x6f, 0xfe, 0xfd, 0x06, 0x4e, 0xcc, 0x79, 0xa5, 0xb4, 0x4d, - 0x4c, 0xfd, 0x80, 0x8e, 0x90, 0xb1, 0x4a, 0x4a, 0x53, 0x24, 0xae, 0x26, 0xd6, 0x4f, 0x98, 0x14, - 0x48, 0xe8, 0x32, 0xc6, 0x5c, 0x95, 0xce, 0xb2, 0x95, 0x47, 0x3f, 0xb8, 0xfa, 0xc5, 0x55, 0x69, - 0x0a, 0xd5, 0xe3, 0x87, 0xa7, 0x7f, 0xb7, 0xd5, 0xac, 0x1f, 0x13, 0x4d, 0x5c, 0xb1, 0xdb, 0x6e, - 0x8c, 0x23, 0x98, 0xa5, 0x59, 0xb3, 0x78, 0x66, 0xa4, 0x4a, 0x5b, 0x92, 0xfb, 0x30, 0x74, 0x59, - 0x13, 0x88, 0x48, 0xcc, 0x67, 0x69, 0x37, 0xc6, 0x17, 0x30, 0xb9, 0x79, 0x29, 0xf1, 0x1a, 0x39, - 0x93, 0x21, 0x40, 0x56, 0xea, 0x3b, 0x74, 0x1d, 0xeb, 0xa1, 0xdd, 0x74, 0x23, 0x91, 0x12, 0x46, - 0x46, 0x53, 0x1e, 0x0c, 0xfc, 0x8b, 0x9f, 0xe3, 0x37, 0x01, 0xe3, 0x5b, 0x32, 0x64, 0x1b, 0x92, - 0x57, 0x30, 0xe1, 0xbe, 0xcb, 0x6f, 0x4f, 0xcf, 0x8e, 0xd5, 0xbf, 0x27, 0xa9, 0xd5, 0xd7, 0xe9, - 0x7a, 0x71, 0xa5, 0x38, 0x58, 0x2b, 0xca, 0x39, 0xec, 0x2d, 0x2d, 0x31, 0x12, 0x2f, 0x68, 0x69, - 0x73, 0x4d, 0x45, 0x30, 0xf4, 0x06, 0xdb, 0xb1, 0x8c, 0x60, 0xda, 0x47, 0x5d, 0x71, 0x30, 0xf2, - 0xd4, 0x66, 0x74, 0x79, 0xf0, 0xde, 0x86, 0xe2, 0xa3, 0x0d, 0xc5, 0x67, 0x1b, 0x8a, 0xd7, 0xaf, - 0x70, 0xe7, 0x7e, 0xdc, 0xbb, 0x7c, 0x07, 0x00, 0x00, 0xff, 0xff, 0x56, 0xe9, 0xf9, 0xae, 0xad, - 0x01, 0x00, 0x00, + // 278 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x8f, 0xb1, 0x4e, 0xf3, 0x30, + 0x14, 0x85, 0x7f, 0xb7, 0x95, 0xda, 0xff, 0xb6, 0x12, 0xc8, 0x53, 0x60, 0x08, 0x55, 0xa6, 0xb2, + 0xd8, 0x12, 0x2c, 0x4c, 0x0c, 0x48, 0x19, 0x59, 0x22, 0x60, 0x60, 0xb3, 0x12, 0x2b, 0x58, 0xa1, + 0xd7, 0x96, 0x73, 0xab, 0xd0, 0x37, 0xe1, 0x29, 0x78, 0x0e, 0x46, 0x1e, 0x01, 0x85, 0x17, 0x41, + 0x31, 0x69, 0x15, 0x55, 0x42, 0x6c, 0x47, 0xc7, 0xe7, 0x3b, 0x3e, 0x17, 0x64, 0x75, 0x55, 0x0b, + 0x63, 0xa5, 0x72, 0x66, 0xad, 0xf2, 0x27, 0x83, 0xda, 0x6f, 0xa5, 0xab, 0x4a, 0xe9, 0x37, 0x48, + 0x66, 0xad, 0x65, 0xa9, 0x51, 0x7b, 0x45, 0xba, 0x10, 0xce, 0x5b, 0xb2, 0xfc, 0xec, 0x07, 0x10, + 0x43, 0x40, 0xb8, 0xaa, 0x14, 0x3d, 0x70, 0x7a, 0xf9, 0x5b, 0xe3, 0x86, 0xcc, 0xb3, 0x34, 0x48, + 0x35, 0xf9, 0xc3, 0xd6, 0x64, 0x09, 0x8b, 0x4c, 0x35, 0xe9, 0x0b, 0x69, 0xac, 0x8d, 0x45, 0x7e, + 0x0c, 0x63, 0xaf, 0x9a, 0x88, 0x2d, 0xd9, 0x6a, 0x91, 0x75, 0x32, 0xb9, 0x86, 0xd9, 0xdd, 0xd6, + 0xe9, 0x5b, 0x4d, 0x8a, 0xc7, 0x00, 0xca, 0x99, 0x07, 0xed, 0xbb, 0x6c, 0x08, 0xfd, 0xcf, 0x06, + 0x0e, 0xe7, 0x30, 0xa9, 0x0c, 0x16, 0xd1, 0x28, 0xbc, 0x04, 0x9d, 0xbc, 0x31, 0x98, 0xde, 0x63, + 0x85, 0xb6, 0x41, 0x9e, 0xc2, 0x8c, 0xfa, 0xae, 0x40, 0xcf, 0x2f, 0xce, 0xc5, 0x1f, 0x67, 0x89, + 0xdd, 0xe7, 0xd9, 0x1e, 0xdd, 0x8d, 0x1c, 0xed, 0x47, 0xf2, 0x15, 0x1c, 0xe5, 0x16, 0x49, 0x23, + 0xa5, 0x98, 0xdb, 0xc2, 0x60, 0x19, 0x8d, 0xc3, 0x86, 0x43, 0x9b, 0x2f, 0x61, 0xde, 0x5b, 0x5d, + 0x71, 0x34, 0x09, 0xa9, 0xa1, 0x75, 0x73, 0xf2, 0xde, 0xc6, 0xec, 0xa3, 0x8d, 0xd9, 0x67, 0x1b, + 0xb3, 0xd7, 0xaf, 0xf8, 0xdf, 0xe3, 0xb4, 0xdf, 0xf2, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x60, 0x1f, + 0x94, 0x77, 0xb5, 0x01, 0x00, 0x00, } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/runtime/schema/generated.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/runtime/schema/generated.pb.go index 6880ee2b..83bda636 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/runtime/schema/generated.pb.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/runtime/schema/generated.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/runtime/schema/generated.proto -// DO NOT EDIT! +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/apimachinery/pkg/runtime/schema/generated.proto /* Package schema is a generated protocol buffer package. It is generated from these files: - k8s.io/kubernetes/pkg/runtime/schema/generated.proto + k8s.io/apimachinery/pkg/runtime/schema/generated.proto It has these top-level messages: */ @@ -29,18 +28,18 @@ var _ = math.Inf const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package func init() { - proto.RegisterFile("github.com/ericchiang/k8s/runtime/schema/generated.proto", fileDescriptorGenerated) + proto.RegisterFile("k8s.io/apimachinery/pkg/runtime/schema/generated.proto", fileDescriptorGenerated) } var fileDescriptorGenerated = []byte{ - // 136 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x32, 0xc9, 0xb6, 0x28, 0xd6, - 0xcb, 0xcc, 0xd7, 0xcf, 0x2e, 0x4d, 0x4a, 0x2d, 0xca, 0x4b, 0x2d, 0x49, 0x2d, 0xd6, 0x2f, 0xc8, - 0x4e, 0xd7, 0x2f, 0x2a, 0xcd, 0x2b, 0xc9, 0xcc, 0x4d, 0xd5, 0x2f, 0x4e, 0xce, 0x48, 0xcd, 0x4d, - 0xd4, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0x4a, 0x2c, 0x49, 0x4d, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, - 0x17, 0x52, 0x81, 0xe8, 0xd2, 0x43, 0xe8, 0xd2, 0x2b, 0xc8, 0x4e, 0xd7, 0x83, 0xea, 0xd2, 0x83, - 0xe8, 0x92, 0x32, 0xc4, 0x6e, 0x76, 0x69, 0x49, 0x66, 0x8e, 0x7e, 0x66, 0x5e, 0x49, 0x71, 0x49, - 0x11, 0xba, 0xc1, 0x4e, 0x12, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, - 0x1c, 0xe3, 0x8c, 0xc7, 0x72, 0x0c, 0x51, 0x6c, 0x10, 0xc3, 0x00, 0x01, 0x00, 0x00, 0xff, 0xff, - 0xea, 0x33, 0x0e, 0xbb, 0xa9, 0x00, 0x00, 0x00, + // 138 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0xcb, 0xb6, 0x28, 0xd6, + 0xcb, 0xcc, 0xd7, 0x4f, 0x2c, 0xc8, 0xcc, 0x4d, 0x4c, 0xce, 0xc8, 0xcc, 0x4b, 0x2d, 0xaa, 0xd4, + 0x2f, 0xc8, 0x4e, 0xd7, 0x2f, 0x2a, 0xcd, 0x2b, 0xc9, 0xcc, 0x4d, 0xd5, 0x2f, 0x4e, 0xce, 0x48, + 0xcd, 0x4d, 0xd4, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0x4a, 0x2c, 0x49, 0x4d, 0xd1, 0x2b, 0x28, 0xca, + 0x2f, 0xc9, 0x17, 0x52, 0x83, 0xe8, 0xd3, 0x43, 0xd6, 0xa7, 0x57, 0x90, 0x9d, 0xae, 0x07, 0xd5, + 0xa7, 0x07, 0xd1, 0x27, 0x65, 0x8c, 0xcb, 0xfc, 0xd2, 0x92, 0xcc, 0x1c, 0xfd, 0xcc, 0xbc, 0x92, + 0xe2, 0x92, 0x22, 0x74, 0xc3, 0x9d, 0x24, 0x4e, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, + 0xc1, 0x23, 0x39, 0xc6, 0x19, 0x8f, 0xe5, 0x18, 0xa2, 0xd8, 0x20, 0xc6, 0x01, 0x02, 0x00, 0x00, + 0xff, 0xff, 0xf9, 0x8e, 0xdb, 0x42, 0xaf, 0x00, 0x00, 0x00, } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/tprs.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/tprs.go deleted file mode 100644 index b204e863..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/tprs.go +++ /dev/null @@ -1,166 +0,0 @@ -package k8s - -import ( - "context" - "errors" - - "github.com/ericchiang/k8s/api/v1" - metav1 "github.com/ericchiang/k8s/apis/meta/v1" -) - -// ThirdPartyResources is a client used for interacting with user -// defined API groups. It uses JSON encoding instead of protobufs -// which are unsupported for these APIs. -// -// Users are expected to define their own third party resources. -// -// const metricsResource = "metrics" -// -// // First, define a third party resources with TypeMeta -// // and ObjectMeta fields. -// type Metric struct { -// *unversioned.TypeMeta `json:",inline"` -// *v1.ObjectMeta `json:"metadata,omitempty"` -// -// Timestamp time.Time `json:"timestamp"` -// Value []byte `json:"value"` -// } -// -// // Define a list wrapper. -// type MetricsList struct { -// *unversioned.TypeMeta `json:",inline"` -// *unversioned.ListMeta `json:"metadata,omitempty"` -// -// Items []Metric `json:"items"` -// } -// -// Register the new resource by creating a ThirdPartyResource type. -// -// // Create a ThirdPartyResource -// tpr := &v1beta1.ThirdPartyResource{ -// Metadata: &v1.ObjectMeta{ -// Name: k8s.String("metric.metrics.example.com"), -// }, -// Description: k8s.String("A custom third party resource"), -// Versions: []*v1beta1.APIVersion{ -// {Name: k8s.String("v1")}, -// }, -// } -// _, err := client.ExtensionsV1Beta1().CreateThirdPartyResource(ctx, trp) -// if err != nil { -// // handle error -// } -// -// After creating the resource type, create a ThirdPartyResources client then -// use interact with it like any other API group. For example to create a third -// party resource: -// -// metricsClient := client.ThirdPartyResources("metrics.example.com", "v1") -// -// metric := &Metric{ -// ObjectMeta: &v1.ObjectMeta{ -// Name: k8s.String("foo"), -// }, -// Timestamp: time.Now(), -// Value: 42, -// } -// -// err = metricsClient.Create(ctx, metricsResource, client.Namespace, metric, metric) -// if err != nil { -// // handle error -// } -// -// List a set of third party resources: -// -// var metrics MetricsList -// metricsClient.List(ctx, metricsResource, &metrics) -// -// Or delete: -// -// tprClient.Delete(ctx, metricsResource, client.Namespace, *metric.Name) -// -type ThirdPartyResources struct { - c *Client - - apiGroup string - apiVersion string -} - -// ThirdPartyResources returns a client for interacting with a ThirdPartyResource -// API group. -func (c *Client) ThirdPartyResources(apiGroup, apiVersion string) *ThirdPartyResources { - return &ThirdPartyResources{c, apiGroup, apiVersion} -} - -func checkResource(apiGroup, apiVersion, resource, namespace, name string) error { - if apiGroup == "" { - return errors.New("no api group provided") - } - if apiVersion == "" { - return errors.New("no api version provided") - } - if resource == "" { - return errors.New("no resource version provided") - } - if namespace == "" { - return errors.New("no namespace provided") - } - if name == "" { - return errors.New("no resource name provided") - } - return nil -} - -// object and after16Object are used by go/types to detect types that are likely -// to be Kubernetes resources. Types that implement this resources are likely -// resource. -// -// They're defined here but only used in gen.go. -type object interface { - GetMetadata() *v1.ObjectMeta -} - -// after16Object uses the new ObjectMeta's home. -type after16Object interface { - GetMetadata() *metav1.ObjectMeta -} - -func (t *ThirdPartyResources) Create(ctx context.Context, resource, namespace string, req, resp interface{}) error { - if err := checkResource(t.apiGroup, t.apiVersion, resource, namespace, "not required"); err != nil { - return err - } - url := t.c.urlFor(t.apiGroup, t.apiVersion, namespace, resource, "") - return t.c.create(ctx, jsonCodec, "POST", url, req, resp) -} - -func (t *ThirdPartyResources) Update(ctx context.Context, resource, namespace, name string, req, resp interface{}) error { - if err := checkResource(t.apiGroup, t.apiVersion, resource, namespace, "not required"); err != nil { - return err - } - url := t.c.urlFor(t.apiGroup, t.apiVersion, namespace, resource, name) - return t.c.create(ctx, jsonCodec, "PUT", url, req, resp) -} - -func (t *ThirdPartyResources) Get(ctx context.Context, resource, namespace, name string, resp interface{}) error { - if err := checkResource(t.apiGroup, t.apiVersion, resource, namespace, name); err != nil { - return err - } - url := t.c.urlFor(t.apiGroup, t.apiVersion, namespace, resource, name) - return t.c.get(ctx, jsonCodec, url, resp) -} - -func (t *ThirdPartyResources) Delete(ctx context.Context, resource, namespace, name string) error { - if err := checkResource(t.apiGroup, t.apiVersion, resource, namespace, name); err != nil { - return err - } - url := t.c.urlFor(t.apiGroup, t.apiVersion, namespace, resource, name) - return t.c.delete(ctx, jsonCodec, url) -} - -func (t *ThirdPartyResources) List(ctx context.Context, resource, namespace string, resp interface{}) error { - if err := checkResource(t.apiGroup, t.apiVersion, resource, namespace, "name not required"); err != nil { - return err - } - url := t.c.urlFor(t.apiGroup, t.apiVersion, namespace, resource, "") - return t.c.get(ctx, jsonCodec, url, resp) -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/types.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/types.go deleted file mode 100644 index 6ba0b7a0..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/types.go +++ /dev/null @@ -1,7091 +0,0 @@ -package k8s - -import ( - "context" - "fmt" - - apiv1 "github.com/ericchiang/k8s/api/v1" - appsv1alpha1 "github.com/ericchiang/k8s/apis/apps/v1alpha1" - appsv1beta1 "github.com/ericchiang/k8s/apis/apps/v1beta1" - authenticationv1 "github.com/ericchiang/k8s/apis/authentication/v1" - authenticationv1beta1 "github.com/ericchiang/k8s/apis/authentication/v1beta1" - authorizationv1 "github.com/ericchiang/k8s/apis/authorization/v1" - authorizationv1beta1 "github.com/ericchiang/k8s/apis/authorization/v1beta1" - autoscalingv1 "github.com/ericchiang/k8s/apis/autoscaling/v1" - autoscalingv2alpha1 "github.com/ericchiang/k8s/apis/autoscaling/v2alpha1" - batchv1 "github.com/ericchiang/k8s/apis/batch/v1" - batchv2alpha1 "github.com/ericchiang/k8s/apis/batch/v2alpha1" - certificatesv1alpha1 "github.com/ericchiang/k8s/apis/certificates/v1alpha1" - certificatesv1beta1 "github.com/ericchiang/k8s/apis/certificates/v1beta1" - extensionsv1beta1 "github.com/ericchiang/k8s/apis/extensions/v1beta1" - imagepolicyv1alpha1 "github.com/ericchiang/k8s/apis/imagepolicy/v1alpha1" - policyv1alpha1 "github.com/ericchiang/k8s/apis/policy/v1alpha1" - policyv1beta1 "github.com/ericchiang/k8s/apis/policy/v1beta1" - rbacv1alpha1 "github.com/ericchiang/k8s/apis/rbac/v1alpha1" - rbacv1beta1 "github.com/ericchiang/k8s/apis/rbac/v1beta1" - settingsv1alpha1 "github.com/ericchiang/k8s/apis/settings/v1alpha1" - storagev1 "github.com/ericchiang/k8s/apis/storage/v1" - storagev1beta1 "github.com/ericchiang/k8s/apis/storage/v1beta1" - "github.com/ericchiang/k8s/watch/versioned" - "github.com/golang/protobuf/proto" -) - -// CoreV1 returns a client for interacting with the /v1 API group. -func (c *Client) CoreV1() *CoreV1 { - return &CoreV1{c} -} - -// CoreV1 is a client for interacting with the /v1 API group. -type CoreV1 struct { - client *Client -} - -func (c *CoreV1) CreateBinding(ctx context.Context, obj *apiv1.Binding) (*apiv1.Binding, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", ns, "bindings", "") - resp := new(apiv1.Binding) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) UpdateBinding(ctx context.Context, obj *apiv1.Binding) (*apiv1.Binding, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", *md.Namespace, "bindings", *md.Name) - resp := new(apiv1.Binding) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) DeleteBinding(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", namespace, "bindings", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *CoreV1) GetBinding(ctx context.Context, name, namespace string) (*apiv1.Binding, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", namespace, "bindings", name) - resp := new(apiv1.Binding) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) CreateComponentStatus(ctx context.Context, obj *apiv1.ComponentStatus) (*apiv1.ComponentStatus, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", ns, "componentstatuses", "") - resp := new(apiv1.ComponentStatus) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) UpdateComponentStatus(ctx context.Context, obj *apiv1.ComponentStatus) (*apiv1.ComponentStatus, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", *md.Namespace, "componentstatuses", *md.Name) - resp := new(apiv1.ComponentStatus) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) DeleteComponentStatus(ctx context.Context, name string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", AllNamespaces, "componentstatuses", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *CoreV1) GetComponentStatus(ctx context.Context, name string) (*apiv1.ComponentStatus, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", AllNamespaces, "componentstatuses", name) - resp := new(apiv1.ComponentStatus) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type CoreV1ComponentStatusWatcher struct { - watcher *watcher -} - -func (w *CoreV1ComponentStatusWatcher) Next() (*versioned.Event, *apiv1.ComponentStatus, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(apiv1.ComponentStatus) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *CoreV1ComponentStatusWatcher) Close() error { - return w.watcher.Close() -} - -func (c *CoreV1) WatchComponentStatuses(ctx context.Context, options ...Option) (*CoreV1ComponentStatusWatcher, error) { - url := c.client.urlFor("", "v1", AllNamespaces, "componentstatuses", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &CoreV1ComponentStatusWatcher{watcher}, nil -} - -func (c *CoreV1) ListComponentStatuses(ctx context.Context, options ...Option) (*apiv1.ComponentStatusList, error) { - url := c.client.urlFor("", "v1", AllNamespaces, "componentstatuses", "", options...) - resp := new(apiv1.ComponentStatusList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) CreateConfigMap(ctx context.Context, obj *apiv1.ConfigMap) (*apiv1.ConfigMap, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", ns, "configmaps", "") - resp := new(apiv1.ConfigMap) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) UpdateConfigMap(ctx context.Context, obj *apiv1.ConfigMap) (*apiv1.ConfigMap, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", *md.Namespace, "configmaps", *md.Name) - resp := new(apiv1.ConfigMap) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) DeleteConfigMap(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", namespace, "configmaps", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *CoreV1) GetConfigMap(ctx context.Context, name, namespace string) (*apiv1.ConfigMap, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", namespace, "configmaps", name) - resp := new(apiv1.ConfigMap) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type CoreV1ConfigMapWatcher struct { - watcher *watcher -} - -func (w *CoreV1ConfigMapWatcher) Next() (*versioned.Event, *apiv1.ConfigMap, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(apiv1.ConfigMap) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *CoreV1ConfigMapWatcher) Close() error { - return w.watcher.Close() -} - -func (c *CoreV1) WatchConfigMaps(ctx context.Context, namespace string, options ...Option) (*CoreV1ConfigMapWatcher, error) { - url := c.client.urlFor("", "v1", namespace, "configmaps", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &CoreV1ConfigMapWatcher{watcher}, nil -} - -func (c *CoreV1) ListConfigMaps(ctx context.Context, namespace string, options ...Option) (*apiv1.ConfigMapList, error) { - url := c.client.urlFor("", "v1", namespace, "configmaps", "", options...) - resp := new(apiv1.ConfigMapList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) CreateEndpoints(ctx context.Context, obj *apiv1.Endpoints) (*apiv1.Endpoints, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", ns, "endpoints", "") - resp := new(apiv1.Endpoints) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) UpdateEndpoints(ctx context.Context, obj *apiv1.Endpoints) (*apiv1.Endpoints, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", *md.Namespace, "endpoints", *md.Name) - resp := new(apiv1.Endpoints) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) DeleteEndpoints(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", namespace, "endpoints", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *CoreV1) GetEndpoints(ctx context.Context, name, namespace string) (*apiv1.Endpoints, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", namespace, "endpoints", name) - resp := new(apiv1.Endpoints) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type CoreV1EndpointsWatcher struct { - watcher *watcher -} - -func (w *CoreV1EndpointsWatcher) Next() (*versioned.Event, *apiv1.Endpoints, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(apiv1.Endpoints) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *CoreV1EndpointsWatcher) Close() error { - return w.watcher.Close() -} - -func (c *CoreV1) WatchEndpoints(ctx context.Context, namespace string, options ...Option) (*CoreV1EndpointsWatcher, error) { - url := c.client.urlFor("", "v1", namespace, "endpoints", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &CoreV1EndpointsWatcher{watcher}, nil -} - -func (c *CoreV1) ListEndpoints(ctx context.Context, namespace string, options ...Option) (*apiv1.EndpointsList, error) { - url := c.client.urlFor("", "v1", namespace, "endpoints", "", options...) - resp := new(apiv1.EndpointsList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) CreateEvent(ctx context.Context, obj *apiv1.Event) (*apiv1.Event, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", ns, "events", "") - resp := new(apiv1.Event) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) UpdateEvent(ctx context.Context, obj *apiv1.Event) (*apiv1.Event, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", *md.Namespace, "events", *md.Name) - resp := new(apiv1.Event) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) DeleteEvent(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", namespace, "events", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *CoreV1) GetEvent(ctx context.Context, name, namespace string) (*apiv1.Event, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", namespace, "events", name) - resp := new(apiv1.Event) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type CoreV1EventWatcher struct { - watcher *watcher -} - -func (w *CoreV1EventWatcher) Next() (*versioned.Event, *apiv1.Event, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(apiv1.Event) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *CoreV1EventWatcher) Close() error { - return w.watcher.Close() -} - -func (c *CoreV1) WatchEvents(ctx context.Context, namespace string, options ...Option) (*CoreV1EventWatcher, error) { - url := c.client.urlFor("", "v1", namespace, "events", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &CoreV1EventWatcher{watcher}, nil -} - -func (c *CoreV1) ListEvents(ctx context.Context, namespace string, options ...Option) (*apiv1.EventList, error) { - url := c.client.urlFor("", "v1", namespace, "events", "", options...) - resp := new(apiv1.EventList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) CreateLimitRange(ctx context.Context, obj *apiv1.LimitRange) (*apiv1.LimitRange, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", ns, "limitranges", "") - resp := new(apiv1.LimitRange) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) UpdateLimitRange(ctx context.Context, obj *apiv1.LimitRange) (*apiv1.LimitRange, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", *md.Namespace, "limitranges", *md.Name) - resp := new(apiv1.LimitRange) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) DeleteLimitRange(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", namespace, "limitranges", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *CoreV1) GetLimitRange(ctx context.Context, name, namespace string) (*apiv1.LimitRange, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", namespace, "limitranges", name) - resp := new(apiv1.LimitRange) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type CoreV1LimitRangeWatcher struct { - watcher *watcher -} - -func (w *CoreV1LimitRangeWatcher) Next() (*versioned.Event, *apiv1.LimitRange, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(apiv1.LimitRange) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *CoreV1LimitRangeWatcher) Close() error { - return w.watcher.Close() -} - -func (c *CoreV1) WatchLimitRanges(ctx context.Context, namespace string, options ...Option) (*CoreV1LimitRangeWatcher, error) { - url := c.client.urlFor("", "v1", namespace, "limitranges", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &CoreV1LimitRangeWatcher{watcher}, nil -} - -func (c *CoreV1) ListLimitRanges(ctx context.Context, namespace string, options ...Option) (*apiv1.LimitRangeList, error) { - url := c.client.urlFor("", "v1", namespace, "limitranges", "", options...) - resp := new(apiv1.LimitRangeList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) CreateNamespace(ctx context.Context, obj *apiv1.Namespace) (*apiv1.Namespace, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", ns, "namespaces", "") - resp := new(apiv1.Namespace) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) UpdateNamespace(ctx context.Context, obj *apiv1.Namespace) (*apiv1.Namespace, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", *md.Namespace, "namespaces", *md.Name) - resp := new(apiv1.Namespace) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) DeleteNamespace(ctx context.Context, name string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", AllNamespaces, "namespaces", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *CoreV1) GetNamespace(ctx context.Context, name string) (*apiv1.Namespace, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", AllNamespaces, "namespaces", name) - resp := new(apiv1.Namespace) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type CoreV1NamespaceWatcher struct { - watcher *watcher -} - -func (w *CoreV1NamespaceWatcher) Next() (*versioned.Event, *apiv1.Namespace, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(apiv1.Namespace) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *CoreV1NamespaceWatcher) Close() error { - return w.watcher.Close() -} - -func (c *CoreV1) WatchNamespaces(ctx context.Context, options ...Option) (*CoreV1NamespaceWatcher, error) { - url := c.client.urlFor("", "v1", AllNamespaces, "namespaces", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &CoreV1NamespaceWatcher{watcher}, nil -} - -func (c *CoreV1) ListNamespaces(ctx context.Context, options ...Option) (*apiv1.NamespaceList, error) { - url := c.client.urlFor("", "v1", AllNamespaces, "namespaces", "", options...) - resp := new(apiv1.NamespaceList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) CreateNode(ctx context.Context, obj *apiv1.Node) (*apiv1.Node, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", ns, "nodes", "") - resp := new(apiv1.Node) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) UpdateNode(ctx context.Context, obj *apiv1.Node) (*apiv1.Node, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", *md.Namespace, "nodes", *md.Name) - resp := new(apiv1.Node) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) DeleteNode(ctx context.Context, name string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", AllNamespaces, "nodes", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *CoreV1) GetNode(ctx context.Context, name string) (*apiv1.Node, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", AllNamespaces, "nodes", name) - resp := new(apiv1.Node) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type CoreV1NodeWatcher struct { - watcher *watcher -} - -func (w *CoreV1NodeWatcher) Next() (*versioned.Event, *apiv1.Node, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(apiv1.Node) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *CoreV1NodeWatcher) Close() error { - return w.watcher.Close() -} - -func (c *CoreV1) WatchNodes(ctx context.Context, options ...Option) (*CoreV1NodeWatcher, error) { - url := c.client.urlFor("", "v1", AllNamespaces, "nodes", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &CoreV1NodeWatcher{watcher}, nil -} - -func (c *CoreV1) ListNodes(ctx context.Context, options ...Option) (*apiv1.NodeList, error) { - url := c.client.urlFor("", "v1", AllNamespaces, "nodes", "", options...) - resp := new(apiv1.NodeList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) CreatePersistentVolume(ctx context.Context, obj *apiv1.PersistentVolume) (*apiv1.PersistentVolume, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", ns, "persistentvolumes", "") - resp := new(apiv1.PersistentVolume) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) UpdatePersistentVolume(ctx context.Context, obj *apiv1.PersistentVolume) (*apiv1.PersistentVolume, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", *md.Namespace, "persistentvolumes", *md.Name) - resp := new(apiv1.PersistentVolume) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) DeletePersistentVolume(ctx context.Context, name string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", AllNamespaces, "persistentvolumes", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *CoreV1) GetPersistentVolume(ctx context.Context, name string) (*apiv1.PersistentVolume, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", AllNamespaces, "persistentvolumes", name) - resp := new(apiv1.PersistentVolume) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type CoreV1PersistentVolumeWatcher struct { - watcher *watcher -} - -func (w *CoreV1PersistentVolumeWatcher) Next() (*versioned.Event, *apiv1.PersistentVolume, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(apiv1.PersistentVolume) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *CoreV1PersistentVolumeWatcher) Close() error { - return w.watcher.Close() -} - -func (c *CoreV1) WatchPersistentVolumes(ctx context.Context, options ...Option) (*CoreV1PersistentVolumeWatcher, error) { - url := c.client.urlFor("", "v1", AllNamespaces, "persistentvolumes", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &CoreV1PersistentVolumeWatcher{watcher}, nil -} - -func (c *CoreV1) ListPersistentVolumes(ctx context.Context, options ...Option) (*apiv1.PersistentVolumeList, error) { - url := c.client.urlFor("", "v1", AllNamespaces, "persistentvolumes", "", options...) - resp := new(apiv1.PersistentVolumeList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) CreatePersistentVolumeClaim(ctx context.Context, obj *apiv1.PersistentVolumeClaim) (*apiv1.PersistentVolumeClaim, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", ns, "persistentvolumeclaims", "") - resp := new(apiv1.PersistentVolumeClaim) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) UpdatePersistentVolumeClaim(ctx context.Context, obj *apiv1.PersistentVolumeClaim) (*apiv1.PersistentVolumeClaim, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", *md.Namespace, "persistentvolumeclaims", *md.Name) - resp := new(apiv1.PersistentVolumeClaim) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) DeletePersistentVolumeClaim(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", namespace, "persistentvolumeclaims", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *CoreV1) GetPersistentVolumeClaim(ctx context.Context, name, namespace string) (*apiv1.PersistentVolumeClaim, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", namespace, "persistentvolumeclaims", name) - resp := new(apiv1.PersistentVolumeClaim) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type CoreV1PersistentVolumeClaimWatcher struct { - watcher *watcher -} - -func (w *CoreV1PersistentVolumeClaimWatcher) Next() (*versioned.Event, *apiv1.PersistentVolumeClaim, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(apiv1.PersistentVolumeClaim) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *CoreV1PersistentVolumeClaimWatcher) Close() error { - return w.watcher.Close() -} - -func (c *CoreV1) WatchPersistentVolumeClaims(ctx context.Context, namespace string, options ...Option) (*CoreV1PersistentVolumeClaimWatcher, error) { - url := c.client.urlFor("", "v1", namespace, "persistentvolumeclaims", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &CoreV1PersistentVolumeClaimWatcher{watcher}, nil -} - -func (c *CoreV1) ListPersistentVolumeClaims(ctx context.Context, namespace string, options ...Option) (*apiv1.PersistentVolumeClaimList, error) { - url := c.client.urlFor("", "v1", namespace, "persistentvolumeclaims", "", options...) - resp := new(apiv1.PersistentVolumeClaimList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) CreatePod(ctx context.Context, obj *apiv1.Pod) (*apiv1.Pod, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", ns, "pods", "") - resp := new(apiv1.Pod) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) UpdatePod(ctx context.Context, obj *apiv1.Pod) (*apiv1.Pod, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", *md.Namespace, "pods", *md.Name) - resp := new(apiv1.Pod) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) DeletePod(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", namespace, "pods", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *CoreV1) GetPod(ctx context.Context, name, namespace string) (*apiv1.Pod, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", namespace, "pods", name) - resp := new(apiv1.Pod) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type CoreV1PodWatcher struct { - watcher *watcher -} - -func (w *CoreV1PodWatcher) Next() (*versioned.Event, *apiv1.Pod, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(apiv1.Pod) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *CoreV1PodWatcher) Close() error { - return w.watcher.Close() -} - -func (c *CoreV1) WatchPods(ctx context.Context, namespace string, options ...Option) (*CoreV1PodWatcher, error) { - url := c.client.urlFor("", "v1", namespace, "pods", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &CoreV1PodWatcher{watcher}, nil -} - -func (c *CoreV1) ListPods(ctx context.Context, namespace string, options ...Option) (*apiv1.PodList, error) { - url := c.client.urlFor("", "v1", namespace, "pods", "", options...) - resp := new(apiv1.PodList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) CreatePodStatusResult(ctx context.Context, obj *apiv1.PodStatusResult) (*apiv1.PodStatusResult, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", ns, "podstatusresults", "") - resp := new(apiv1.PodStatusResult) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) UpdatePodStatusResult(ctx context.Context, obj *apiv1.PodStatusResult) (*apiv1.PodStatusResult, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", *md.Namespace, "podstatusresults", *md.Name) - resp := new(apiv1.PodStatusResult) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) DeletePodStatusResult(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", namespace, "podstatusresults", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *CoreV1) GetPodStatusResult(ctx context.Context, name, namespace string) (*apiv1.PodStatusResult, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", namespace, "podstatusresults", name) - resp := new(apiv1.PodStatusResult) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) CreatePodTemplate(ctx context.Context, obj *apiv1.PodTemplate) (*apiv1.PodTemplate, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", ns, "podtemplates", "") - resp := new(apiv1.PodTemplate) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) UpdatePodTemplate(ctx context.Context, obj *apiv1.PodTemplate) (*apiv1.PodTemplate, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", *md.Namespace, "podtemplates", *md.Name) - resp := new(apiv1.PodTemplate) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) DeletePodTemplate(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", namespace, "podtemplates", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *CoreV1) GetPodTemplate(ctx context.Context, name, namespace string) (*apiv1.PodTemplate, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", namespace, "podtemplates", name) - resp := new(apiv1.PodTemplate) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type CoreV1PodTemplateWatcher struct { - watcher *watcher -} - -func (w *CoreV1PodTemplateWatcher) Next() (*versioned.Event, *apiv1.PodTemplate, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(apiv1.PodTemplate) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *CoreV1PodTemplateWatcher) Close() error { - return w.watcher.Close() -} - -func (c *CoreV1) WatchPodTemplates(ctx context.Context, namespace string, options ...Option) (*CoreV1PodTemplateWatcher, error) { - url := c.client.urlFor("", "v1", namespace, "podtemplates", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &CoreV1PodTemplateWatcher{watcher}, nil -} - -func (c *CoreV1) ListPodTemplates(ctx context.Context, namespace string, options ...Option) (*apiv1.PodTemplateList, error) { - url := c.client.urlFor("", "v1", namespace, "podtemplates", "", options...) - resp := new(apiv1.PodTemplateList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) CreatePodTemplateSpec(ctx context.Context, obj *apiv1.PodTemplateSpec) (*apiv1.PodTemplateSpec, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", ns, "podtemplatespecs", "") - resp := new(apiv1.PodTemplateSpec) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) UpdatePodTemplateSpec(ctx context.Context, obj *apiv1.PodTemplateSpec) (*apiv1.PodTemplateSpec, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", *md.Namespace, "podtemplatespecs", *md.Name) - resp := new(apiv1.PodTemplateSpec) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) DeletePodTemplateSpec(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", namespace, "podtemplatespecs", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *CoreV1) GetPodTemplateSpec(ctx context.Context, name, namespace string) (*apiv1.PodTemplateSpec, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", namespace, "podtemplatespecs", name) - resp := new(apiv1.PodTemplateSpec) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) CreateRangeAllocation(ctx context.Context, obj *apiv1.RangeAllocation) (*apiv1.RangeAllocation, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", ns, "rangeallocations", "") - resp := new(apiv1.RangeAllocation) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) UpdateRangeAllocation(ctx context.Context, obj *apiv1.RangeAllocation) (*apiv1.RangeAllocation, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", *md.Namespace, "rangeallocations", *md.Name) - resp := new(apiv1.RangeAllocation) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) DeleteRangeAllocation(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", namespace, "rangeallocations", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *CoreV1) GetRangeAllocation(ctx context.Context, name, namespace string) (*apiv1.RangeAllocation, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", namespace, "rangeallocations", name) - resp := new(apiv1.RangeAllocation) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) CreateReplicationController(ctx context.Context, obj *apiv1.ReplicationController) (*apiv1.ReplicationController, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", ns, "replicationcontrollers", "") - resp := new(apiv1.ReplicationController) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) UpdateReplicationController(ctx context.Context, obj *apiv1.ReplicationController) (*apiv1.ReplicationController, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", *md.Namespace, "replicationcontrollers", *md.Name) - resp := new(apiv1.ReplicationController) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) DeleteReplicationController(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", namespace, "replicationcontrollers", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *CoreV1) GetReplicationController(ctx context.Context, name, namespace string) (*apiv1.ReplicationController, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", namespace, "replicationcontrollers", name) - resp := new(apiv1.ReplicationController) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type CoreV1ReplicationControllerWatcher struct { - watcher *watcher -} - -func (w *CoreV1ReplicationControllerWatcher) Next() (*versioned.Event, *apiv1.ReplicationController, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(apiv1.ReplicationController) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *CoreV1ReplicationControllerWatcher) Close() error { - return w.watcher.Close() -} - -func (c *CoreV1) WatchReplicationControllers(ctx context.Context, namespace string, options ...Option) (*CoreV1ReplicationControllerWatcher, error) { - url := c.client.urlFor("", "v1", namespace, "replicationcontrollers", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &CoreV1ReplicationControllerWatcher{watcher}, nil -} - -func (c *CoreV1) ListReplicationControllers(ctx context.Context, namespace string, options ...Option) (*apiv1.ReplicationControllerList, error) { - url := c.client.urlFor("", "v1", namespace, "replicationcontrollers", "", options...) - resp := new(apiv1.ReplicationControllerList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) CreateResourceQuota(ctx context.Context, obj *apiv1.ResourceQuota) (*apiv1.ResourceQuota, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", ns, "resourcequotas", "") - resp := new(apiv1.ResourceQuota) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) UpdateResourceQuota(ctx context.Context, obj *apiv1.ResourceQuota) (*apiv1.ResourceQuota, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", *md.Namespace, "resourcequotas", *md.Name) - resp := new(apiv1.ResourceQuota) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) DeleteResourceQuota(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", namespace, "resourcequotas", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *CoreV1) GetResourceQuota(ctx context.Context, name, namespace string) (*apiv1.ResourceQuota, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", namespace, "resourcequotas", name) - resp := new(apiv1.ResourceQuota) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type CoreV1ResourceQuotaWatcher struct { - watcher *watcher -} - -func (w *CoreV1ResourceQuotaWatcher) Next() (*versioned.Event, *apiv1.ResourceQuota, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(apiv1.ResourceQuota) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *CoreV1ResourceQuotaWatcher) Close() error { - return w.watcher.Close() -} - -func (c *CoreV1) WatchResourceQuotas(ctx context.Context, namespace string, options ...Option) (*CoreV1ResourceQuotaWatcher, error) { - url := c.client.urlFor("", "v1", namespace, "resourcequotas", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &CoreV1ResourceQuotaWatcher{watcher}, nil -} - -func (c *CoreV1) ListResourceQuotas(ctx context.Context, namespace string, options ...Option) (*apiv1.ResourceQuotaList, error) { - url := c.client.urlFor("", "v1", namespace, "resourcequotas", "", options...) - resp := new(apiv1.ResourceQuotaList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) CreateSecret(ctx context.Context, obj *apiv1.Secret) (*apiv1.Secret, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", ns, "secrets", "") - resp := new(apiv1.Secret) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) UpdateSecret(ctx context.Context, obj *apiv1.Secret) (*apiv1.Secret, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", *md.Namespace, "secrets", *md.Name) - resp := new(apiv1.Secret) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) DeleteSecret(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", namespace, "secrets", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *CoreV1) GetSecret(ctx context.Context, name, namespace string) (*apiv1.Secret, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", namespace, "secrets", name) - resp := new(apiv1.Secret) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type CoreV1SecretWatcher struct { - watcher *watcher -} - -func (w *CoreV1SecretWatcher) Next() (*versioned.Event, *apiv1.Secret, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(apiv1.Secret) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *CoreV1SecretWatcher) Close() error { - return w.watcher.Close() -} - -func (c *CoreV1) WatchSecrets(ctx context.Context, namespace string, options ...Option) (*CoreV1SecretWatcher, error) { - url := c.client.urlFor("", "v1", namespace, "secrets", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &CoreV1SecretWatcher{watcher}, nil -} - -func (c *CoreV1) ListSecrets(ctx context.Context, namespace string, options ...Option) (*apiv1.SecretList, error) { - url := c.client.urlFor("", "v1", namespace, "secrets", "", options...) - resp := new(apiv1.SecretList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) CreateService(ctx context.Context, obj *apiv1.Service) (*apiv1.Service, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", ns, "services", "") - resp := new(apiv1.Service) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) UpdateService(ctx context.Context, obj *apiv1.Service) (*apiv1.Service, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", *md.Namespace, "services", *md.Name) - resp := new(apiv1.Service) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) DeleteService(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", namespace, "services", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *CoreV1) GetService(ctx context.Context, name, namespace string) (*apiv1.Service, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", namespace, "services", name) - resp := new(apiv1.Service) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type CoreV1ServiceWatcher struct { - watcher *watcher -} - -func (w *CoreV1ServiceWatcher) Next() (*versioned.Event, *apiv1.Service, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(apiv1.Service) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *CoreV1ServiceWatcher) Close() error { - return w.watcher.Close() -} - -func (c *CoreV1) WatchServices(ctx context.Context, namespace string, options ...Option) (*CoreV1ServiceWatcher, error) { - url := c.client.urlFor("", "v1", namespace, "services", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &CoreV1ServiceWatcher{watcher}, nil -} - -func (c *CoreV1) ListServices(ctx context.Context, namespace string, options ...Option) (*apiv1.ServiceList, error) { - url := c.client.urlFor("", "v1", namespace, "services", "", options...) - resp := new(apiv1.ServiceList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) CreateServiceAccount(ctx context.Context, obj *apiv1.ServiceAccount) (*apiv1.ServiceAccount, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", ns, "serviceaccounts", "") - resp := new(apiv1.ServiceAccount) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) UpdateServiceAccount(ctx context.Context, obj *apiv1.ServiceAccount) (*apiv1.ServiceAccount, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("", "v1", *md.Namespace, "serviceaccounts", *md.Name) - resp := new(apiv1.ServiceAccount) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CoreV1) DeleteServiceAccount(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", namespace, "serviceaccounts", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *CoreV1) GetServiceAccount(ctx context.Context, name, namespace string) (*apiv1.ServiceAccount, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("", "v1", namespace, "serviceaccounts", name) - resp := new(apiv1.ServiceAccount) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type CoreV1ServiceAccountWatcher struct { - watcher *watcher -} - -func (w *CoreV1ServiceAccountWatcher) Next() (*versioned.Event, *apiv1.ServiceAccount, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(apiv1.ServiceAccount) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *CoreV1ServiceAccountWatcher) Close() error { - return w.watcher.Close() -} - -func (c *CoreV1) WatchServiceAccounts(ctx context.Context, namespace string, options ...Option) (*CoreV1ServiceAccountWatcher, error) { - url := c.client.urlFor("", "v1", namespace, "serviceaccounts", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &CoreV1ServiceAccountWatcher{watcher}, nil -} - -func (c *CoreV1) ListServiceAccounts(ctx context.Context, namespace string, options ...Option) (*apiv1.ServiceAccountList, error) { - url := c.client.urlFor("", "v1", namespace, "serviceaccounts", "", options...) - resp := new(apiv1.ServiceAccountList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - - -// AppsV1Alpha1 returns a client for interacting with the apps/v1alpha1 API group. -func (c *Client) AppsV1Alpha1() *AppsV1Alpha1 { - return &AppsV1Alpha1{c} -} - -// AppsV1Alpha1 is a client for interacting with the apps/v1alpha1 API group. -type AppsV1Alpha1 struct { - client *Client -} - -func (c *AppsV1Alpha1) CreatePetSet(ctx context.Context, obj *appsv1alpha1.PetSet) (*appsv1alpha1.PetSet, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("apps", "v1alpha1", ns, "petsets", "") - resp := new(appsv1alpha1.PetSet) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *AppsV1Alpha1) UpdatePetSet(ctx context.Context, obj *appsv1alpha1.PetSet) (*appsv1alpha1.PetSet, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("apps", "v1alpha1", *md.Namespace, "petsets", *md.Name) - resp := new(appsv1alpha1.PetSet) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *AppsV1Alpha1) DeletePetSet(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("apps", "v1alpha1", namespace, "petsets", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *AppsV1Alpha1) GetPetSet(ctx context.Context, name, namespace string) (*appsv1alpha1.PetSet, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("apps", "v1alpha1", namespace, "petsets", name) - resp := new(appsv1alpha1.PetSet) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type AppsV1Alpha1PetSetWatcher struct { - watcher *watcher -} - -func (w *AppsV1Alpha1PetSetWatcher) Next() (*versioned.Event, *appsv1alpha1.PetSet, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(appsv1alpha1.PetSet) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *AppsV1Alpha1PetSetWatcher) Close() error { - return w.watcher.Close() -} - -func (c *AppsV1Alpha1) WatchPetSets(ctx context.Context, namespace string, options ...Option) (*AppsV1Alpha1PetSetWatcher, error) { - url := c.client.urlFor("apps", "v1alpha1", namespace, "petsets", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &AppsV1Alpha1PetSetWatcher{watcher}, nil -} - -func (c *AppsV1Alpha1) ListPetSets(ctx context.Context, namespace string, options ...Option) (*appsv1alpha1.PetSetList, error) { - url := c.client.urlFor("apps", "v1alpha1", namespace, "petsets", "", options...) - resp := new(appsv1alpha1.PetSetList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - - -// AppsV1Beta1 returns a client for interacting with the apps/v1beta1 API group. -func (c *Client) AppsV1Beta1() *AppsV1Beta1 { - return &AppsV1Beta1{c} -} - -// AppsV1Beta1 is a client for interacting with the apps/v1beta1 API group. -type AppsV1Beta1 struct { - client *Client -} - -func (c *AppsV1Beta1) CreateDeployment(ctx context.Context, obj *appsv1beta1.Deployment) (*appsv1beta1.Deployment, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("apps", "v1beta1", ns, "deployments", "") - resp := new(appsv1beta1.Deployment) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *AppsV1Beta1) UpdateDeployment(ctx context.Context, obj *appsv1beta1.Deployment) (*appsv1beta1.Deployment, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("apps", "v1beta1", *md.Namespace, "deployments", *md.Name) - resp := new(appsv1beta1.Deployment) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *AppsV1Beta1) DeleteDeployment(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("apps", "v1beta1", namespace, "deployments", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *AppsV1Beta1) GetDeployment(ctx context.Context, name, namespace string) (*appsv1beta1.Deployment, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("apps", "v1beta1", namespace, "deployments", name) - resp := new(appsv1beta1.Deployment) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type AppsV1Beta1DeploymentWatcher struct { - watcher *watcher -} - -func (w *AppsV1Beta1DeploymentWatcher) Next() (*versioned.Event, *appsv1beta1.Deployment, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(appsv1beta1.Deployment) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *AppsV1Beta1DeploymentWatcher) Close() error { - return w.watcher.Close() -} - -func (c *AppsV1Beta1) WatchDeployments(ctx context.Context, namespace string, options ...Option) (*AppsV1Beta1DeploymentWatcher, error) { - url := c.client.urlFor("apps", "v1beta1", namespace, "deployments", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &AppsV1Beta1DeploymentWatcher{watcher}, nil -} - -func (c *AppsV1Beta1) ListDeployments(ctx context.Context, namespace string, options ...Option) (*appsv1beta1.DeploymentList, error) { - url := c.client.urlFor("apps", "v1beta1", namespace, "deployments", "", options...) - resp := new(appsv1beta1.DeploymentList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *AppsV1Beta1) CreateScale(ctx context.Context, obj *appsv1beta1.Scale) (*appsv1beta1.Scale, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("apps", "v1beta1", ns, "scales", "") - resp := new(appsv1beta1.Scale) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *AppsV1Beta1) UpdateScale(ctx context.Context, obj *appsv1beta1.Scale) (*appsv1beta1.Scale, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("apps", "v1beta1", *md.Namespace, "scales", *md.Name) - resp := new(appsv1beta1.Scale) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *AppsV1Beta1) DeleteScale(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("apps", "v1beta1", namespace, "scales", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *AppsV1Beta1) GetScale(ctx context.Context, name, namespace string) (*appsv1beta1.Scale, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("apps", "v1beta1", namespace, "scales", name) - resp := new(appsv1beta1.Scale) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *AppsV1Beta1) CreateStatefulSet(ctx context.Context, obj *appsv1beta1.StatefulSet) (*appsv1beta1.StatefulSet, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("apps", "v1beta1", ns, "statefulsets", "") - resp := new(appsv1beta1.StatefulSet) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *AppsV1Beta1) UpdateStatefulSet(ctx context.Context, obj *appsv1beta1.StatefulSet) (*appsv1beta1.StatefulSet, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("apps", "v1beta1", *md.Namespace, "statefulsets", *md.Name) - resp := new(appsv1beta1.StatefulSet) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *AppsV1Beta1) DeleteStatefulSet(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("apps", "v1beta1", namespace, "statefulsets", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *AppsV1Beta1) GetStatefulSet(ctx context.Context, name, namespace string) (*appsv1beta1.StatefulSet, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("apps", "v1beta1", namespace, "statefulsets", name) - resp := new(appsv1beta1.StatefulSet) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type AppsV1Beta1StatefulSetWatcher struct { - watcher *watcher -} - -func (w *AppsV1Beta1StatefulSetWatcher) Next() (*versioned.Event, *appsv1beta1.StatefulSet, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(appsv1beta1.StatefulSet) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *AppsV1Beta1StatefulSetWatcher) Close() error { - return w.watcher.Close() -} - -func (c *AppsV1Beta1) WatchStatefulSets(ctx context.Context, namespace string, options ...Option) (*AppsV1Beta1StatefulSetWatcher, error) { - url := c.client.urlFor("apps", "v1beta1", namespace, "statefulsets", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &AppsV1Beta1StatefulSetWatcher{watcher}, nil -} - -func (c *AppsV1Beta1) ListStatefulSets(ctx context.Context, namespace string, options ...Option) (*appsv1beta1.StatefulSetList, error) { - url := c.client.urlFor("apps", "v1beta1", namespace, "statefulsets", "", options...) - resp := new(appsv1beta1.StatefulSetList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - - -// AuthenticationV1 returns a client for interacting with the authentication.k8s.io/v1 API group. -func (c *Client) AuthenticationV1() *AuthenticationV1 { - return &AuthenticationV1{c} -} - -// AuthenticationV1 is a client for interacting with the authentication.k8s.io/v1 API group. -type AuthenticationV1 struct { - client *Client -} - -func (c *AuthenticationV1) CreateTokenReview(ctx context.Context, obj *authenticationv1.TokenReview) (*authenticationv1.TokenReview, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("authentication.k8s.io", "v1", ns, "tokenreviews", "") - resp := new(authenticationv1.TokenReview) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *AuthenticationV1) UpdateTokenReview(ctx context.Context, obj *authenticationv1.TokenReview) (*authenticationv1.TokenReview, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("authentication.k8s.io", "v1", *md.Namespace, "tokenreviews", *md.Name) - resp := new(authenticationv1.TokenReview) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *AuthenticationV1) DeleteTokenReview(ctx context.Context, name string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("authentication.k8s.io", "v1", AllNamespaces, "tokenreviews", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *AuthenticationV1) GetTokenReview(ctx context.Context, name string) (*authenticationv1.TokenReview, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("authentication.k8s.io", "v1", AllNamespaces, "tokenreviews", name) - resp := new(authenticationv1.TokenReview) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - - -// AuthenticationV1Beta1 returns a client for interacting with the authentication.k8s.io/v1beta1 API group. -func (c *Client) AuthenticationV1Beta1() *AuthenticationV1Beta1 { - return &AuthenticationV1Beta1{c} -} - -// AuthenticationV1Beta1 is a client for interacting with the authentication.k8s.io/v1beta1 API group. -type AuthenticationV1Beta1 struct { - client *Client -} - -func (c *AuthenticationV1Beta1) CreateTokenReview(ctx context.Context, obj *authenticationv1beta1.TokenReview) (*authenticationv1beta1.TokenReview, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("authentication.k8s.io", "v1beta1", ns, "tokenreviews", "") - resp := new(authenticationv1beta1.TokenReview) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *AuthenticationV1Beta1) UpdateTokenReview(ctx context.Context, obj *authenticationv1beta1.TokenReview) (*authenticationv1beta1.TokenReview, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("authentication.k8s.io", "v1beta1", *md.Namespace, "tokenreviews", *md.Name) - resp := new(authenticationv1beta1.TokenReview) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *AuthenticationV1Beta1) DeleteTokenReview(ctx context.Context, name string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("authentication.k8s.io", "v1beta1", AllNamespaces, "tokenreviews", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *AuthenticationV1Beta1) GetTokenReview(ctx context.Context, name string) (*authenticationv1beta1.TokenReview, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("authentication.k8s.io", "v1beta1", AllNamespaces, "tokenreviews", name) - resp := new(authenticationv1beta1.TokenReview) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - - -// AuthorizationV1 returns a client for interacting with the authorization.k8s.io/v1 API group. -func (c *Client) AuthorizationV1() *AuthorizationV1 { - return &AuthorizationV1{c} -} - -// AuthorizationV1 is a client for interacting with the authorization.k8s.io/v1 API group. -type AuthorizationV1 struct { - client *Client -} - -func (c *AuthorizationV1) CreateLocalSubjectAccessReview(ctx context.Context, obj *authorizationv1.LocalSubjectAccessReview) (*authorizationv1.LocalSubjectAccessReview, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("authorization.k8s.io", "v1", ns, "localsubjectaccessreviews", "") - resp := new(authorizationv1.LocalSubjectAccessReview) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *AuthorizationV1) UpdateLocalSubjectAccessReview(ctx context.Context, obj *authorizationv1.LocalSubjectAccessReview) (*authorizationv1.LocalSubjectAccessReview, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("authorization.k8s.io", "v1", *md.Namespace, "localsubjectaccessreviews", *md.Name) - resp := new(authorizationv1.LocalSubjectAccessReview) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *AuthorizationV1) DeleteLocalSubjectAccessReview(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("authorization.k8s.io", "v1", namespace, "localsubjectaccessreviews", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *AuthorizationV1) GetLocalSubjectAccessReview(ctx context.Context, name, namespace string) (*authorizationv1.LocalSubjectAccessReview, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("authorization.k8s.io", "v1", namespace, "localsubjectaccessreviews", name) - resp := new(authorizationv1.LocalSubjectAccessReview) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *AuthorizationV1) CreateSelfSubjectAccessReview(ctx context.Context, obj *authorizationv1.SelfSubjectAccessReview) (*authorizationv1.SelfSubjectAccessReview, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("authorization.k8s.io", "v1", ns, "selfsubjectaccessreviews", "") - resp := new(authorizationv1.SelfSubjectAccessReview) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *AuthorizationV1) UpdateSelfSubjectAccessReview(ctx context.Context, obj *authorizationv1.SelfSubjectAccessReview) (*authorizationv1.SelfSubjectAccessReview, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("authorization.k8s.io", "v1", *md.Namespace, "selfsubjectaccessreviews", *md.Name) - resp := new(authorizationv1.SelfSubjectAccessReview) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *AuthorizationV1) DeleteSelfSubjectAccessReview(ctx context.Context, name string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("authorization.k8s.io", "v1", AllNamespaces, "selfsubjectaccessreviews", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *AuthorizationV1) GetSelfSubjectAccessReview(ctx context.Context, name string) (*authorizationv1.SelfSubjectAccessReview, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("authorization.k8s.io", "v1", AllNamespaces, "selfsubjectaccessreviews", name) - resp := new(authorizationv1.SelfSubjectAccessReview) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *AuthorizationV1) CreateSubjectAccessReview(ctx context.Context, obj *authorizationv1.SubjectAccessReview) (*authorizationv1.SubjectAccessReview, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("authorization.k8s.io", "v1", ns, "subjectaccessreviews", "") - resp := new(authorizationv1.SubjectAccessReview) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *AuthorizationV1) UpdateSubjectAccessReview(ctx context.Context, obj *authorizationv1.SubjectAccessReview) (*authorizationv1.SubjectAccessReview, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("authorization.k8s.io", "v1", *md.Namespace, "subjectaccessreviews", *md.Name) - resp := new(authorizationv1.SubjectAccessReview) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *AuthorizationV1) DeleteSubjectAccessReview(ctx context.Context, name string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("authorization.k8s.io", "v1", AllNamespaces, "subjectaccessreviews", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *AuthorizationV1) GetSubjectAccessReview(ctx context.Context, name string) (*authorizationv1.SubjectAccessReview, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("authorization.k8s.io", "v1", AllNamespaces, "subjectaccessreviews", name) - resp := new(authorizationv1.SubjectAccessReview) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - - -// AuthorizationV1Beta1 returns a client for interacting with the authorization.k8s.io/v1beta1 API group. -func (c *Client) AuthorizationV1Beta1() *AuthorizationV1Beta1 { - return &AuthorizationV1Beta1{c} -} - -// AuthorizationV1Beta1 is a client for interacting with the authorization.k8s.io/v1beta1 API group. -type AuthorizationV1Beta1 struct { - client *Client -} - -func (c *AuthorizationV1Beta1) CreateLocalSubjectAccessReview(ctx context.Context, obj *authorizationv1beta1.LocalSubjectAccessReview) (*authorizationv1beta1.LocalSubjectAccessReview, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("authorization.k8s.io", "v1beta1", ns, "localsubjectaccessreviews", "") - resp := new(authorizationv1beta1.LocalSubjectAccessReview) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *AuthorizationV1Beta1) UpdateLocalSubjectAccessReview(ctx context.Context, obj *authorizationv1beta1.LocalSubjectAccessReview) (*authorizationv1beta1.LocalSubjectAccessReview, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("authorization.k8s.io", "v1beta1", *md.Namespace, "localsubjectaccessreviews", *md.Name) - resp := new(authorizationv1beta1.LocalSubjectAccessReview) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *AuthorizationV1Beta1) DeleteLocalSubjectAccessReview(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("authorization.k8s.io", "v1beta1", namespace, "localsubjectaccessreviews", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *AuthorizationV1Beta1) GetLocalSubjectAccessReview(ctx context.Context, name, namespace string) (*authorizationv1beta1.LocalSubjectAccessReview, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("authorization.k8s.io", "v1beta1", namespace, "localsubjectaccessreviews", name) - resp := new(authorizationv1beta1.LocalSubjectAccessReview) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *AuthorizationV1Beta1) CreateSelfSubjectAccessReview(ctx context.Context, obj *authorizationv1beta1.SelfSubjectAccessReview) (*authorizationv1beta1.SelfSubjectAccessReview, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("authorization.k8s.io", "v1beta1", ns, "selfsubjectaccessreviews", "") - resp := new(authorizationv1beta1.SelfSubjectAccessReview) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *AuthorizationV1Beta1) UpdateSelfSubjectAccessReview(ctx context.Context, obj *authorizationv1beta1.SelfSubjectAccessReview) (*authorizationv1beta1.SelfSubjectAccessReview, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("authorization.k8s.io", "v1beta1", *md.Namespace, "selfsubjectaccessreviews", *md.Name) - resp := new(authorizationv1beta1.SelfSubjectAccessReview) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *AuthorizationV1Beta1) DeleteSelfSubjectAccessReview(ctx context.Context, name string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("authorization.k8s.io", "v1beta1", AllNamespaces, "selfsubjectaccessreviews", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *AuthorizationV1Beta1) GetSelfSubjectAccessReview(ctx context.Context, name string) (*authorizationv1beta1.SelfSubjectAccessReview, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("authorization.k8s.io", "v1beta1", AllNamespaces, "selfsubjectaccessreviews", name) - resp := new(authorizationv1beta1.SelfSubjectAccessReview) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *AuthorizationV1Beta1) CreateSubjectAccessReview(ctx context.Context, obj *authorizationv1beta1.SubjectAccessReview) (*authorizationv1beta1.SubjectAccessReview, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("authorization.k8s.io", "v1beta1", ns, "subjectaccessreviews", "") - resp := new(authorizationv1beta1.SubjectAccessReview) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *AuthorizationV1Beta1) UpdateSubjectAccessReview(ctx context.Context, obj *authorizationv1beta1.SubjectAccessReview) (*authorizationv1beta1.SubjectAccessReview, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("authorization.k8s.io", "v1beta1", *md.Namespace, "subjectaccessreviews", *md.Name) - resp := new(authorizationv1beta1.SubjectAccessReview) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *AuthorizationV1Beta1) DeleteSubjectAccessReview(ctx context.Context, name string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("authorization.k8s.io", "v1beta1", AllNamespaces, "subjectaccessreviews", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *AuthorizationV1Beta1) GetSubjectAccessReview(ctx context.Context, name string) (*authorizationv1beta1.SubjectAccessReview, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("authorization.k8s.io", "v1beta1", AllNamespaces, "subjectaccessreviews", name) - resp := new(authorizationv1beta1.SubjectAccessReview) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - - -// AutoscalingV1 returns a client for interacting with the autoscaling/v1 API group. -func (c *Client) AutoscalingV1() *AutoscalingV1 { - return &AutoscalingV1{c} -} - -// AutoscalingV1 is a client for interacting with the autoscaling/v1 API group. -type AutoscalingV1 struct { - client *Client -} - -func (c *AutoscalingV1) CreateHorizontalPodAutoscaler(ctx context.Context, obj *autoscalingv1.HorizontalPodAutoscaler) (*autoscalingv1.HorizontalPodAutoscaler, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("autoscaling", "v1", ns, "horizontalpodautoscalers", "") - resp := new(autoscalingv1.HorizontalPodAutoscaler) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *AutoscalingV1) UpdateHorizontalPodAutoscaler(ctx context.Context, obj *autoscalingv1.HorizontalPodAutoscaler) (*autoscalingv1.HorizontalPodAutoscaler, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("autoscaling", "v1", *md.Namespace, "horizontalpodautoscalers", *md.Name) - resp := new(autoscalingv1.HorizontalPodAutoscaler) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *AutoscalingV1) DeleteHorizontalPodAutoscaler(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("autoscaling", "v1", namespace, "horizontalpodautoscalers", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *AutoscalingV1) GetHorizontalPodAutoscaler(ctx context.Context, name, namespace string) (*autoscalingv1.HorizontalPodAutoscaler, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("autoscaling", "v1", namespace, "horizontalpodautoscalers", name) - resp := new(autoscalingv1.HorizontalPodAutoscaler) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type AutoscalingV1HorizontalPodAutoscalerWatcher struct { - watcher *watcher -} - -func (w *AutoscalingV1HorizontalPodAutoscalerWatcher) Next() (*versioned.Event, *autoscalingv1.HorizontalPodAutoscaler, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(autoscalingv1.HorizontalPodAutoscaler) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *AutoscalingV1HorizontalPodAutoscalerWatcher) Close() error { - return w.watcher.Close() -} - -func (c *AutoscalingV1) WatchHorizontalPodAutoscalers(ctx context.Context, namespace string, options ...Option) (*AutoscalingV1HorizontalPodAutoscalerWatcher, error) { - url := c.client.urlFor("autoscaling", "v1", namespace, "horizontalpodautoscalers", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &AutoscalingV1HorizontalPodAutoscalerWatcher{watcher}, nil -} - -func (c *AutoscalingV1) ListHorizontalPodAutoscalers(ctx context.Context, namespace string, options ...Option) (*autoscalingv1.HorizontalPodAutoscalerList, error) { - url := c.client.urlFor("autoscaling", "v1", namespace, "horizontalpodautoscalers", "", options...) - resp := new(autoscalingv1.HorizontalPodAutoscalerList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *AutoscalingV1) CreateScale(ctx context.Context, obj *autoscalingv1.Scale) (*autoscalingv1.Scale, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("autoscaling", "v1", ns, "scales", "") - resp := new(autoscalingv1.Scale) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *AutoscalingV1) UpdateScale(ctx context.Context, obj *autoscalingv1.Scale) (*autoscalingv1.Scale, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("autoscaling", "v1", *md.Namespace, "scales", *md.Name) - resp := new(autoscalingv1.Scale) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *AutoscalingV1) DeleteScale(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("autoscaling", "v1", namespace, "scales", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *AutoscalingV1) GetScale(ctx context.Context, name, namespace string) (*autoscalingv1.Scale, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("autoscaling", "v1", namespace, "scales", name) - resp := new(autoscalingv1.Scale) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - - -// AutoscalingV2Alpha1 returns a client for interacting with the autoscaling/v2alpha1 API group. -func (c *Client) AutoscalingV2Alpha1() *AutoscalingV2Alpha1 { - return &AutoscalingV2Alpha1{c} -} - -// AutoscalingV2Alpha1 is a client for interacting with the autoscaling/v2alpha1 API group. -type AutoscalingV2Alpha1 struct { - client *Client -} - -func (c *AutoscalingV2Alpha1) CreateHorizontalPodAutoscaler(ctx context.Context, obj *autoscalingv2alpha1.HorizontalPodAutoscaler) (*autoscalingv2alpha1.HorizontalPodAutoscaler, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("autoscaling", "v2alpha1", ns, "horizontalpodautoscalers", "") - resp := new(autoscalingv2alpha1.HorizontalPodAutoscaler) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *AutoscalingV2Alpha1) UpdateHorizontalPodAutoscaler(ctx context.Context, obj *autoscalingv2alpha1.HorizontalPodAutoscaler) (*autoscalingv2alpha1.HorizontalPodAutoscaler, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("autoscaling", "v2alpha1", *md.Namespace, "horizontalpodautoscalers", *md.Name) - resp := new(autoscalingv2alpha1.HorizontalPodAutoscaler) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *AutoscalingV2Alpha1) DeleteHorizontalPodAutoscaler(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("autoscaling", "v2alpha1", namespace, "horizontalpodautoscalers", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *AutoscalingV2Alpha1) GetHorizontalPodAutoscaler(ctx context.Context, name, namespace string) (*autoscalingv2alpha1.HorizontalPodAutoscaler, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("autoscaling", "v2alpha1", namespace, "horizontalpodautoscalers", name) - resp := new(autoscalingv2alpha1.HorizontalPodAutoscaler) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type AutoscalingV2Alpha1HorizontalPodAutoscalerWatcher struct { - watcher *watcher -} - -func (w *AutoscalingV2Alpha1HorizontalPodAutoscalerWatcher) Next() (*versioned.Event, *autoscalingv2alpha1.HorizontalPodAutoscaler, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(autoscalingv2alpha1.HorizontalPodAutoscaler) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *AutoscalingV2Alpha1HorizontalPodAutoscalerWatcher) Close() error { - return w.watcher.Close() -} - -func (c *AutoscalingV2Alpha1) WatchHorizontalPodAutoscalers(ctx context.Context, namespace string, options ...Option) (*AutoscalingV2Alpha1HorizontalPodAutoscalerWatcher, error) { - url := c.client.urlFor("autoscaling", "v2alpha1", namespace, "horizontalpodautoscalers", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &AutoscalingV2Alpha1HorizontalPodAutoscalerWatcher{watcher}, nil -} - -func (c *AutoscalingV2Alpha1) ListHorizontalPodAutoscalers(ctx context.Context, namespace string, options ...Option) (*autoscalingv2alpha1.HorizontalPodAutoscalerList, error) { - url := c.client.urlFor("autoscaling", "v2alpha1", namespace, "horizontalpodautoscalers", "", options...) - resp := new(autoscalingv2alpha1.HorizontalPodAutoscalerList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - - -// BatchV1 returns a client for interacting with the batch/v1 API group. -func (c *Client) BatchV1() *BatchV1 { - return &BatchV1{c} -} - -// BatchV1 is a client for interacting with the batch/v1 API group. -type BatchV1 struct { - client *Client -} - -func (c *BatchV1) CreateJob(ctx context.Context, obj *batchv1.Job) (*batchv1.Job, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("batch", "v1", ns, "jobs", "") - resp := new(batchv1.Job) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *BatchV1) UpdateJob(ctx context.Context, obj *batchv1.Job) (*batchv1.Job, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("batch", "v1", *md.Namespace, "jobs", *md.Name) - resp := new(batchv1.Job) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *BatchV1) DeleteJob(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("batch", "v1", namespace, "jobs", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *BatchV1) GetJob(ctx context.Context, name, namespace string) (*batchv1.Job, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("batch", "v1", namespace, "jobs", name) - resp := new(batchv1.Job) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type BatchV1JobWatcher struct { - watcher *watcher -} - -func (w *BatchV1JobWatcher) Next() (*versioned.Event, *batchv1.Job, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(batchv1.Job) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *BatchV1JobWatcher) Close() error { - return w.watcher.Close() -} - -func (c *BatchV1) WatchJobs(ctx context.Context, namespace string, options ...Option) (*BatchV1JobWatcher, error) { - url := c.client.urlFor("batch", "v1", namespace, "jobs", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &BatchV1JobWatcher{watcher}, nil -} - -func (c *BatchV1) ListJobs(ctx context.Context, namespace string, options ...Option) (*batchv1.JobList, error) { - url := c.client.urlFor("batch", "v1", namespace, "jobs", "", options...) - resp := new(batchv1.JobList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - - -// BatchV2Alpha1 returns a client for interacting with the batch/v2alpha1 API group. -func (c *Client) BatchV2Alpha1() *BatchV2Alpha1 { - return &BatchV2Alpha1{c} -} - -// BatchV2Alpha1 is a client for interacting with the batch/v2alpha1 API group. -type BatchV2Alpha1 struct { - client *Client -} - -func (c *BatchV2Alpha1) CreateCronJob(ctx context.Context, obj *batchv2alpha1.CronJob) (*batchv2alpha1.CronJob, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("batch", "v2alpha1", ns, "cronjobs", "") - resp := new(batchv2alpha1.CronJob) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *BatchV2Alpha1) UpdateCronJob(ctx context.Context, obj *batchv2alpha1.CronJob) (*batchv2alpha1.CronJob, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("batch", "v2alpha1", *md.Namespace, "cronjobs", *md.Name) - resp := new(batchv2alpha1.CronJob) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *BatchV2Alpha1) DeleteCronJob(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("batch", "v2alpha1", namespace, "cronjobs", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *BatchV2Alpha1) GetCronJob(ctx context.Context, name, namespace string) (*batchv2alpha1.CronJob, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("batch", "v2alpha1", namespace, "cronjobs", name) - resp := new(batchv2alpha1.CronJob) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type BatchV2Alpha1CronJobWatcher struct { - watcher *watcher -} - -func (w *BatchV2Alpha1CronJobWatcher) Next() (*versioned.Event, *batchv2alpha1.CronJob, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(batchv2alpha1.CronJob) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *BatchV2Alpha1CronJobWatcher) Close() error { - return w.watcher.Close() -} - -func (c *BatchV2Alpha1) WatchCronJobs(ctx context.Context, namespace string, options ...Option) (*BatchV2Alpha1CronJobWatcher, error) { - url := c.client.urlFor("batch", "v2alpha1", namespace, "cronjobs", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &BatchV2Alpha1CronJobWatcher{watcher}, nil -} - -func (c *BatchV2Alpha1) ListCronJobs(ctx context.Context, namespace string, options ...Option) (*batchv2alpha1.CronJobList, error) { - url := c.client.urlFor("batch", "v2alpha1", namespace, "cronjobs", "", options...) - resp := new(batchv2alpha1.CronJobList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *BatchV2Alpha1) CreateJobTemplate(ctx context.Context, obj *batchv2alpha1.JobTemplate) (*batchv2alpha1.JobTemplate, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("batch", "v2alpha1", ns, "jobtemplates", "") - resp := new(batchv2alpha1.JobTemplate) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *BatchV2Alpha1) UpdateJobTemplate(ctx context.Context, obj *batchv2alpha1.JobTemplate) (*batchv2alpha1.JobTemplate, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("batch", "v2alpha1", *md.Namespace, "jobtemplates", *md.Name) - resp := new(batchv2alpha1.JobTemplate) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *BatchV2Alpha1) DeleteJobTemplate(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("batch", "v2alpha1", namespace, "jobtemplates", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *BatchV2Alpha1) GetJobTemplate(ctx context.Context, name, namespace string) (*batchv2alpha1.JobTemplate, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("batch", "v2alpha1", namespace, "jobtemplates", name) - resp := new(batchv2alpha1.JobTemplate) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - - -// CertificatesV1Alpha1 returns a client for interacting with the certificates.k8s.io/v1alpha1 API group. -func (c *Client) CertificatesV1Alpha1() *CertificatesV1Alpha1 { - return &CertificatesV1Alpha1{c} -} - -// CertificatesV1Alpha1 is a client for interacting with the certificates.k8s.io/v1alpha1 API group. -type CertificatesV1Alpha1 struct { - client *Client -} - -func (c *CertificatesV1Alpha1) CreateCertificateSigningRequest(ctx context.Context, obj *certificatesv1alpha1.CertificateSigningRequest) (*certificatesv1alpha1.CertificateSigningRequest, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("certificates.k8s.io", "v1alpha1", ns, "certificatesigningrequests", "") - resp := new(certificatesv1alpha1.CertificateSigningRequest) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CertificatesV1Alpha1) UpdateCertificateSigningRequest(ctx context.Context, obj *certificatesv1alpha1.CertificateSigningRequest) (*certificatesv1alpha1.CertificateSigningRequest, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("certificates.k8s.io", "v1alpha1", *md.Namespace, "certificatesigningrequests", *md.Name) - resp := new(certificatesv1alpha1.CertificateSigningRequest) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CertificatesV1Alpha1) DeleteCertificateSigningRequest(ctx context.Context, name string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("certificates.k8s.io", "v1alpha1", AllNamespaces, "certificatesigningrequests", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *CertificatesV1Alpha1) GetCertificateSigningRequest(ctx context.Context, name string) (*certificatesv1alpha1.CertificateSigningRequest, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("certificates.k8s.io", "v1alpha1", AllNamespaces, "certificatesigningrequests", name) - resp := new(certificatesv1alpha1.CertificateSigningRequest) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type CertificatesV1Alpha1CertificateSigningRequestWatcher struct { - watcher *watcher -} - -func (w *CertificatesV1Alpha1CertificateSigningRequestWatcher) Next() (*versioned.Event, *certificatesv1alpha1.CertificateSigningRequest, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(certificatesv1alpha1.CertificateSigningRequest) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *CertificatesV1Alpha1CertificateSigningRequestWatcher) Close() error { - return w.watcher.Close() -} - -func (c *CertificatesV1Alpha1) WatchCertificateSigningRequests(ctx context.Context, options ...Option) (*CertificatesV1Alpha1CertificateSigningRequestWatcher, error) { - url := c.client.urlFor("certificates.k8s.io", "v1alpha1", AllNamespaces, "certificatesigningrequests", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &CertificatesV1Alpha1CertificateSigningRequestWatcher{watcher}, nil -} - -func (c *CertificatesV1Alpha1) ListCertificateSigningRequests(ctx context.Context, options ...Option) (*certificatesv1alpha1.CertificateSigningRequestList, error) { - url := c.client.urlFor("certificates.k8s.io", "v1alpha1", AllNamespaces, "certificatesigningrequests", "", options...) - resp := new(certificatesv1alpha1.CertificateSigningRequestList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - - -// CertificatesV1Beta1 returns a client for interacting with the certificates.k8s.io/v1beta1 API group. -func (c *Client) CertificatesV1Beta1() *CertificatesV1Beta1 { - return &CertificatesV1Beta1{c} -} - -// CertificatesV1Beta1 is a client for interacting with the certificates.k8s.io/v1beta1 API group. -type CertificatesV1Beta1 struct { - client *Client -} - -func (c *CertificatesV1Beta1) CreateCertificateSigningRequest(ctx context.Context, obj *certificatesv1beta1.CertificateSigningRequest) (*certificatesv1beta1.CertificateSigningRequest, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("certificates.k8s.io", "v1beta1", ns, "certificatesigningrequests", "") - resp := new(certificatesv1beta1.CertificateSigningRequest) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CertificatesV1Beta1) UpdateCertificateSigningRequest(ctx context.Context, obj *certificatesv1beta1.CertificateSigningRequest) (*certificatesv1beta1.CertificateSigningRequest, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("certificates.k8s.io", "v1beta1", *md.Namespace, "certificatesigningrequests", *md.Name) - resp := new(certificatesv1beta1.CertificateSigningRequest) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *CertificatesV1Beta1) DeleteCertificateSigningRequest(ctx context.Context, name string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("certificates.k8s.io", "v1beta1", AllNamespaces, "certificatesigningrequests", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *CertificatesV1Beta1) GetCertificateSigningRequest(ctx context.Context, name string) (*certificatesv1beta1.CertificateSigningRequest, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("certificates.k8s.io", "v1beta1", AllNamespaces, "certificatesigningrequests", name) - resp := new(certificatesv1beta1.CertificateSigningRequest) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type CertificatesV1Beta1CertificateSigningRequestWatcher struct { - watcher *watcher -} - -func (w *CertificatesV1Beta1CertificateSigningRequestWatcher) Next() (*versioned.Event, *certificatesv1beta1.CertificateSigningRequest, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(certificatesv1beta1.CertificateSigningRequest) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *CertificatesV1Beta1CertificateSigningRequestWatcher) Close() error { - return w.watcher.Close() -} - -func (c *CertificatesV1Beta1) WatchCertificateSigningRequests(ctx context.Context, options ...Option) (*CertificatesV1Beta1CertificateSigningRequestWatcher, error) { - url := c.client.urlFor("certificates.k8s.io", "v1beta1", AllNamespaces, "certificatesigningrequests", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &CertificatesV1Beta1CertificateSigningRequestWatcher{watcher}, nil -} - -func (c *CertificatesV1Beta1) ListCertificateSigningRequests(ctx context.Context, options ...Option) (*certificatesv1beta1.CertificateSigningRequestList, error) { - url := c.client.urlFor("certificates.k8s.io", "v1beta1", AllNamespaces, "certificatesigningrequests", "", options...) - resp := new(certificatesv1beta1.CertificateSigningRequestList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - - -// ExtensionsV1Beta1 returns a client for interacting with the extensions/v1beta1 API group. -func (c *Client) ExtensionsV1Beta1() *ExtensionsV1Beta1 { - return &ExtensionsV1Beta1{c} -} - -// ExtensionsV1Beta1 is a client for interacting with the extensions/v1beta1 API group. -type ExtensionsV1Beta1 struct { - client *Client -} - -func (c *ExtensionsV1Beta1) CreateDaemonSet(ctx context.Context, obj *extensionsv1beta1.DaemonSet) (*extensionsv1beta1.DaemonSet, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("extensions", "v1beta1", ns, "daemonsets", "") - resp := new(extensionsv1beta1.DaemonSet) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *ExtensionsV1Beta1) UpdateDaemonSet(ctx context.Context, obj *extensionsv1beta1.DaemonSet) (*extensionsv1beta1.DaemonSet, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("extensions", "v1beta1", *md.Namespace, "daemonsets", *md.Name) - resp := new(extensionsv1beta1.DaemonSet) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *ExtensionsV1Beta1) DeleteDaemonSet(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("extensions", "v1beta1", namespace, "daemonsets", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *ExtensionsV1Beta1) GetDaemonSet(ctx context.Context, name, namespace string) (*extensionsv1beta1.DaemonSet, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("extensions", "v1beta1", namespace, "daemonsets", name) - resp := new(extensionsv1beta1.DaemonSet) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type ExtensionsV1Beta1DaemonSetWatcher struct { - watcher *watcher -} - -func (w *ExtensionsV1Beta1DaemonSetWatcher) Next() (*versioned.Event, *extensionsv1beta1.DaemonSet, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(extensionsv1beta1.DaemonSet) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *ExtensionsV1Beta1DaemonSetWatcher) Close() error { - return w.watcher.Close() -} - -func (c *ExtensionsV1Beta1) WatchDaemonSets(ctx context.Context, namespace string, options ...Option) (*ExtensionsV1Beta1DaemonSetWatcher, error) { - url := c.client.urlFor("extensions", "v1beta1", namespace, "daemonsets", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &ExtensionsV1Beta1DaemonSetWatcher{watcher}, nil -} - -func (c *ExtensionsV1Beta1) ListDaemonSets(ctx context.Context, namespace string, options ...Option) (*extensionsv1beta1.DaemonSetList, error) { - url := c.client.urlFor("extensions", "v1beta1", namespace, "daemonsets", "", options...) - resp := new(extensionsv1beta1.DaemonSetList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *ExtensionsV1Beta1) CreateDeployment(ctx context.Context, obj *extensionsv1beta1.Deployment) (*extensionsv1beta1.Deployment, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("extensions", "v1beta1", ns, "deployments", "") - resp := new(extensionsv1beta1.Deployment) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *ExtensionsV1Beta1) UpdateDeployment(ctx context.Context, obj *extensionsv1beta1.Deployment) (*extensionsv1beta1.Deployment, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("extensions", "v1beta1", *md.Namespace, "deployments", *md.Name) - resp := new(extensionsv1beta1.Deployment) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *ExtensionsV1Beta1) DeleteDeployment(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("extensions", "v1beta1", namespace, "deployments", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *ExtensionsV1Beta1) GetDeployment(ctx context.Context, name, namespace string) (*extensionsv1beta1.Deployment, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("extensions", "v1beta1", namespace, "deployments", name) - resp := new(extensionsv1beta1.Deployment) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type ExtensionsV1Beta1DeploymentWatcher struct { - watcher *watcher -} - -func (w *ExtensionsV1Beta1DeploymentWatcher) Next() (*versioned.Event, *extensionsv1beta1.Deployment, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(extensionsv1beta1.Deployment) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *ExtensionsV1Beta1DeploymentWatcher) Close() error { - return w.watcher.Close() -} - -func (c *ExtensionsV1Beta1) WatchDeployments(ctx context.Context, namespace string, options ...Option) (*ExtensionsV1Beta1DeploymentWatcher, error) { - url := c.client.urlFor("extensions", "v1beta1", namespace, "deployments", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &ExtensionsV1Beta1DeploymentWatcher{watcher}, nil -} - -func (c *ExtensionsV1Beta1) ListDeployments(ctx context.Context, namespace string, options ...Option) (*extensionsv1beta1.DeploymentList, error) { - url := c.client.urlFor("extensions", "v1beta1", namespace, "deployments", "", options...) - resp := new(extensionsv1beta1.DeploymentList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *ExtensionsV1Beta1) CreateIngress(ctx context.Context, obj *extensionsv1beta1.Ingress) (*extensionsv1beta1.Ingress, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("extensions", "v1beta1", ns, "ingresses", "") - resp := new(extensionsv1beta1.Ingress) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *ExtensionsV1Beta1) UpdateIngress(ctx context.Context, obj *extensionsv1beta1.Ingress) (*extensionsv1beta1.Ingress, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("extensions", "v1beta1", *md.Namespace, "ingresses", *md.Name) - resp := new(extensionsv1beta1.Ingress) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *ExtensionsV1Beta1) DeleteIngress(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("extensions", "v1beta1", namespace, "ingresses", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *ExtensionsV1Beta1) GetIngress(ctx context.Context, name, namespace string) (*extensionsv1beta1.Ingress, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("extensions", "v1beta1", namespace, "ingresses", name) - resp := new(extensionsv1beta1.Ingress) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type ExtensionsV1Beta1IngressWatcher struct { - watcher *watcher -} - -func (w *ExtensionsV1Beta1IngressWatcher) Next() (*versioned.Event, *extensionsv1beta1.Ingress, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(extensionsv1beta1.Ingress) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *ExtensionsV1Beta1IngressWatcher) Close() error { - return w.watcher.Close() -} - -func (c *ExtensionsV1Beta1) WatchIngresses(ctx context.Context, namespace string, options ...Option) (*ExtensionsV1Beta1IngressWatcher, error) { - url := c.client.urlFor("extensions", "v1beta1", namespace, "ingresses", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &ExtensionsV1Beta1IngressWatcher{watcher}, nil -} - -func (c *ExtensionsV1Beta1) ListIngresses(ctx context.Context, namespace string, options ...Option) (*extensionsv1beta1.IngressList, error) { - url := c.client.urlFor("extensions", "v1beta1", namespace, "ingresses", "", options...) - resp := new(extensionsv1beta1.IngressList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *ExtensionsV1Beta1) CreateNetworkPolicy(ctx context.Context, obj *extensionsv1beta1.NetworkPolicy) (*extensionsv1beta1.NetworkPolicy, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("extensions", "v1beta1", ns, "networkpolicies", "") - resp := new(extensionsv1beta1.NetworkPolicy) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *ExtensionsV1Beta1) UpdateNetworkPolicy(ctx context.Context, obj *extensionsv1beta1.NetworkPolicy) (*extensionsv1beta1.NetworkPolicy, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("extensions", "v1beta1", *md.Namespace, "networkpolicies", *md.Name) - resp := new(extensionsv1beta1.NetworkPolicy) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *ExtensionsV1Beta1) DeleteNetworkPolicy(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("extensions", "v1beta1", namespace, "networkpolicies", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *ExtensionsV1Beta1) GetNetworkPolicy(ctx context.Context, name, namespace string) (*extensionsv1beta1.NetworkPolicy, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("extensions", "v1beta1", namespace, "networkpolicies", name) - resp := new(extensionsv1beta1.NetworkPolicy) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type ExtensionsV1Beta1NetworkPolicyWatcher struct { - watcher *watcher -} - -func (w *ExtensionsV1Beta1NetworkPolicyWatcher) Next() (*versioned.Event, *extensionsv1beta1.NetworkPolicy, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(extensionsv1beta1.NetworkPolicy) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *ExtensionsV1Beta1NetworkPolicyWatcher) Close() error { - return w.watcher.Close() -} - -func (c *ExtensionsV1Beta1) WatchNetworkPolicies(ctx context.Context, namespace string, options ...Option) (*ExtensionsV1Beta1NetworkPolicyWatcher, error) { - url := c.client.urlFor("extensions", "v1beta1", namespace, "networkpolicies", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &ExtensionsV1Beta1NetworkPolicyWatcher{watcher}, nil -} - -func (c *ExtensionsV1Beta1) ListNetworkPolicies(ctx context.Context, namespace string, options ...Option) (*extensionsv1beta1.NetworkPolicyList, error) { - url := c.client.urlFor("extensions", "v1beta1", namespace, "networkpolicies", "", options...) - resp := new(extensionsv1beta1.NetworkPolicyList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *ExtensionsV1Beta1) CreatePodSecurityPolicy(ctx context.Context, obj *extensionsv1beta1.PodSecurityPolicy) (*extensionsv1beta1.PodSecurityPolicy, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("extensions", "v1beta1", ns, "podsecuritypolicies", "") - resp := new(extensionsv1beta1.PodSecurityPolicy) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *ExtensionsV1Beta1) UpdatePodSecurityPolicy(ctx context.Context, obj *extensionsv1beta1.PodSecurityPolicy) (*extensionsv1beta1.PodSecurityPolicy, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("extensions", "v1beta1", *md.Namespace, "podsecuritypolicies", *md.Name) - resp := new(extensionsv1beta1.PodSecurityPolicy) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *ExtensionsV1Beta1) DeletePodSecurityPolicy(ctx context.Context, name string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("extensions", "v1beta1", AllNamespaces, "podsecuritypolicies", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *ExtensionsV1Beta1) GetPodSecurityPolicy(ctx context.Context, name string) (*extensionsv1beta1.PodSecurityPolicy, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("extensions", "v1beta1", AllNamespaces, "podsecuritypolicies", name) - resp := new(extensionsv1beta1.PodSecurityPolicy) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type ExtensionsV1Beta1PodSecurityPolicyWatcher struct { - watcher *watcher -} - -func (w *ExtensionsV1Beta1PodSecurityPolicyWatcher) Next() (*versioned.Event, *extensionsv1beta1.PodSecurityPolicy, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(extensionsv1beta1.PodSecurityPolicy) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *ExtensionsV1Beta1PodSecurityPolicyWatcher) Close() error { - return w.watcher.Close() -} - -func (c *ExtensionsV1Beta1) WatchPodSecurityPolicies(ctx context.Context, options ...Option) (*ExtensionsV1Beta1PodSecurityPolicyWatcher, error) { - url := c.client.urlFor("extensions", "v1beta1", AllNamespaces, "podsecuritypolicies", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &ExtensionsV1Beta1PodSecurityPolicyWatcher{watcher}, nil -} - -func (c *ExtensionsV1Beta1) ListPodSecurityPolicies(ctx context.Context, options ...Option) (*extensionsv1beta1.PodSecurityPolicyList, error) { - url := c.client.urlFor("extensions", "v1beta1", AllNamespaces, "podsecuritypolicies", "", options...) - resp := new(extensionsv1beta1.PodSecurityPolicyList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *ExtensionsV1Beta1) CreateReplicaSet(ctx context.Context, obj *extensionsv1beta1.ReplicaSet) (*extensionsv1beta1.ReplicaSet, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("extensions", "v1beta1", ns, "replicasets", "") - resp := new(extensionsv1beta1.ReplicaSet) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *ExtensionsV1Beta1) UpdateReplicaSet(ctx context.Context, obj *extensionsv1beta1.ReplicaSet) (*extensionsv1beta1.ReplicaSet, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("extensions", "v1beta1", *md.Namespace, "replicasets", *md.Name) - resp := new(extensionsv1beta1.ReplicaSet) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *ExtensionsV1Beta1) DeleteReplicaSet(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("extensions", "v1beta1", namespace, "replicasets", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *ExtensionsV1Beta1) GetReplicaSet(ctx context.Context, name, namespace string) (*extensionsv1beta1.ReplicaSet, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("extensions", "v1beta1", namespace, "replicasets", name) - resp := new(extensionsv1beta1.ReplicaSet) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type ExtensionsV1Beta1ReplicaSetWatcher struct { - watcher *watcher -} - -func (w *ExtensionsV1Beta1ReplicaSetWatcher) Next() (*versioned.Event, *extensionsv1beta1.ReplicaSet, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(extensionsv1beta1.ReplicaSet) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *ExtensionsV1Beta1ReplicaSetWatcher) Close() error { - return w.watcher.Close() -} - -func (c *ExtensionsV1Beta1) WatchReplicaSets(ctx context.Context, namespace string, options ...Option) (*ExtensionsV1Beta1ReplicaSetWatcher, error) { - url := c.client.urlFor("extensions", "v1beta1", namespace, "replicasets", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &ExtensionsV1Beta1ReplicaSetWatcher{watcher}, nil -} - -func (c *ExtensionsV1Beta1) ListReplicaSets(ctx context.Context, namespace string, options ...Option) (*extensionsv1beta1.ReplicaSetList, error) { - url := c.client.urlFor("extensions", "v1beta1", namespace, "replicasets", "", options...) - resp := new(extensionsv1beta1.ReplicaSetList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *ExtensionsV1Beta1) CreateScale(ctx context.Context, obj *extensionsv1beta1.Scale) (*extensionsv1beta1.Scale, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("extensions", "v1beta1", ns, "scales", "") - resp := new(extensionsv1beta1.Scale) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *ExtensionsV1Beta1) UpdateScale(ctx context.Context, obj *extensionsv1beta1.Scale) (*extensionsv1beta1.Scale, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("extensions", "v1beta1", *md.Namespace, "scales", *md.Name) - resp := new(extensionsv1beta1.Scale) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *ExtensionsV1Beta1) DeleteScale(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("extensions", "v1beta1", namespace, "scales", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *ExtensionsV1Beta1) GetScale(ctx context.Context, name, namespace string) (*extensionsv1beta1.Scale, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("extensions", "v1beta1", namespace, "scales", name) - resp := new(extensionsv1beta1.Scale) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *ExtensionsV1Beta1) CreateThirdPartyResource(ctx context.Context, obj *extensionsv1beta1.ThirdPartyResource) (*extensionsv1beta1.ThirdPartyResource, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("extensions", "v1beta1", ns, "thirdpartyresources", "") - resp := new(extensionsv1beta1.ThirdPartyResource) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *ExtensionsV1Beta1) UpdateThirdPartyResource(ctx context.Context, obj *extensionsv1beta1.ThirdPartyResource) (*extensionsv1beta1.ThirdPartyResource, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("extensions", "v1beta1", *md.Namespace, "thirdpartyresources", *md.Name) - resp := new(extensionsv1beta1.ThirdPartyResource) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *ExtensionsV1Beta1) DeleteThirdPartyResource(ctx context.Context, name string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("extensions", "v1beta1", AllNamespaces, "thirdpartyresources", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *ExtensionsV1Beta1) GetThirdPartyResource(ctx context.Context, name string) (*extensionsv1beta1.ThirdPartyResource, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("extensions", "v1beta1", AllNamespaces, "thirdpartyresources", name) - resp := new(extensionsv1beta1.ThirdPartyResource) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type ExtensionsV1Beta1ThirdPartyResourceWatcher struct { - watcher *watcher -} - -func (w *ExtensionsV1Beta1ThirdPartyResourceWatcher) Next() (*versioned.Event, *extensionsv1beta1.ThirdPartyResource, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(extensionsv1beta1.ThirdPartyResource) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *ExtensionsV1Beta1ThirdPartyResourceWatcher) Close() error { - return w.watcher.Close() -} - -func (c *ExtensionsV1Beta1) WatchThirdPartyResources(ctx context.Context, options ...Option) (*ExtensionsV1Beta1ThirdPartyResourceWatcher, error) { - url := c.client.urlFor("extensions", "v1beta1", AllNamespaces, "thirdpartyresources", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &ExtensionsV1Beta1ThirdPartyResourceWatcher{watcher}, nil -} - -func (c *ExtensionsV1Beta1) ListThirdPartyResources(ctx context.Context, options ...Option) (*extensionsv1beta1.ThirdPartyResourceList, error) { - url := c.client.urlFor("extensions", "v1beta1", AllNamespaces, "thirdpartyresources", "", options...) - resp := new(extensionsv1beta1.ThirdPartyResourceList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *ExtensionsV1Beta1) CreateThirdPartyResourceData(ctx context.Context, obj *extensionsv1beta1.ThirdPartyResourceData) (*extensionsv1beta1.ThirdPartyResourceData, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("extensions", "v1beta1", ns, "thirdpartyresourcedatas", "") - resp := new(extensionsv1beta1.ThirdPartyResourceData) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *ExtensionsV1Beta1) UpdateThirdPartyResourceData(ctx context.Context, obj *extensionsv1beta1.ThirdPartyResourceData) (*extensionsv1beta1.ThirdPartyResourceData, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("extensions", "v1beta1", *md.Namespace, "thirdpartyresourcedatas", *md.Name) - resp := new(extensionsv1beta1.ThirdPartyResourceData) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *ExtensionsV1Beta1) DeleteThirdPartyResourceData(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("extensions", "v1beta1", namespace, "thirdpartyresourcedatas", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *ExtensionsV1Beta1) GetThirdPartyResourceData(ctx context.Context, name, namespace string) (*extensionsv1beta1.ThirdPartyResourceData, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("extensions", "v1beta1", namespace, "thirdpartyresourcedatas", name) - resp := new(extensionsv1beta1.ThirdPartyResourceData) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type ExtensionsV1Beta1ThirdPartyResourceDataWatcher struct { - watcher *watcher -} - -func (w *ExtensionsV1Beta1ThirdPartyResourceDataWatcher) Next() (*versioned.Event, *extensionsv1beta1.ThirdPartyResourceData, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(extensionsv1beta1.ThirdPartyResourceData) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *ExtensionsV1Beta1ThirdPartyResourceDataWatcher) Close() error { - return w.watcher.Close() -} - -func (c *ExtensionsV1Beta1) WatchThirdPartyResourceDatas(ctx context.Context, namespace string, options ...Option) (*ExtensionsV1Beta1ThirdPartyResourceDataWatcher, error) { - url := c.client.urlFor("extensions", "v1beta1", namespace, "thirdpartyresourcedatas", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &ExtensionsV1Beta1ThirdPartyResourceDataWatcher{watcher}, nil -} - -func (c *ExtensionsV1Beta1) ListThirdPartyResourceDatas(ctx context.Context, namespace string, options ...Option) (*extensionsv1beta1.ThirdPartyResourceDataList, error) { - url := c.client.urlFor("extensions", "v1beta1", namespace, "thirdpartyresourcedatas", "", options...) - resp := new(extensionsv1beta1.ThirdPartyResourceDataList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - - -// ImagepolicyV1Alpha1 returns a client for interacting with the imagepolicy/v1alpha1 API group. -func (c *Client) ImagepolicyV1Alpha1() *ImagepolicyV1Alpha1 { - return &ImagepolicyV1Alpha1{c} -} - -// ImagepolicyV1Alpha1 is a client for interacting with the imagepolicy/v1alpha1 API group. -type ImagepolicyV1Alpha1 struct { - client *Client -} - -func (c *ImagepolicyV1Alpha1) CreateImageReview(ctx context.Context, obj *imagepolicyv1alpha1.ImageReview) (*imagepolicyv1alpha1.ImageReview, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("imagepolicy", "v1alpha1", ns, "imagereviews", "") - resp := new(imagepolicyv1alpha1.ImageReview) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *ImagepolicyV1Alpha1) UpdateImageReview(ctx context.Context, obj *imagepolicyv1alpha1.ImageReview) (*imagepolicyv1alpha1.ImageReview, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("imagepolicy", "v1alpha1", *md.Namespace, "imagereviews", *md.Name) - resp := new(imagepolicyv1alpha1.ImageReview) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *ImagepolicyV1Alpha1) DeleteImageReview(ctx context.Context, name string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("imagepolicy", "v1alpha1", AllNamespaces, "imagereviews", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *ImagepolicyV1Alpha1) GetImageReview(ctx context.Context, name string) (*imagepolicyv1alpha1.ImageReview, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("imagepolicy", "v1alpha1", AllNamespaces, "imagereviews", name) - resp := new(imagepolicyv1alpha1.ImageReview) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - - -// PolicyV1Alpha1 returns a client for interacting with the policy/v1alpha1 API group. -func (c *Client) PolicyV1Alpha1() *PolicyV1Alpha1 { - return &PolicyV1Alpha1{c} -} - -// PolicyV1Alpha1 is a client for interacting with the policy/v1alpha1 API group. -type PolicyV1Alpha1 struct { - client *Client -} - -func (c *PolicyV1Alpha1) CreateEviction(ctx context.Context, obj *policyv1alpha1.Eviction) (*policyv1alpha1.Eviction, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("policy", "v1alpha1", ns, "evictions", "") - resp := new(policyv1alpha1.Eviction) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *PolicyV1Alpha1) UpdateEviction(ctx context.Context, obj *policyv1alpha1.Eviction) (*policyv1alpha1.Eviction, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("policy", "v1alpha1", *md.Namespace, "evictions", *md.Name) - resp := new(policyv1alpha1.Eviction) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *PolicyV1Alpha1) DeleteEviction(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("policy", "v1alpha1", namespace, "evictions", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *PolicyV1Alpha1) GetEviction(ctx context.Context, name, namespace string) (*policyv1alpha1.Eviction, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("policy", "v1alpha1", namespace, "evictions", name) - resp := new(policyv1alpha1.Eviction) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *PolicyV1Alpha1) CreatePodDisruptionBudget(ctx context.Context, obj *policyv1alpha1.PodDisruptionBudget) (*policyv1alpha1.PodDisruptionBudget, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("policy", "v1alpha1", ns, "poddisruptionbudgets", "") - resp := new(policyv1alpha1.PodDisruptionBudget) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *PolicyV1Alpha1) UpdatePodDisruptionBudget(ctx context.Context, obj *policyv1alpha1.PodDisruptionBudget) (*policyv1alpha1.PodDisruptionBudget, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("policy", "v1alpha1", *md.Namespace, "poddisruptionbudgets", *md.Name) - resp := new(policyv1alpha1.PodDisruptionBudget) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *PolicyV1Alpha1) DeletePodDisruptionBudget(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("policy", "v1alpha1", namespace, "poddisruptionbudgets", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *PolicyV1Alpha1) GetPodDisruptionBudget(ctx context.Context, name, namespace string) (*policyv1alpha1.PodDisruptionBudget, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("policy", "v1alpha1", namespace, "poddisruptionbudgets", name) - resp := new(policyv1alpha1.PodDisruptionBudget) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type PolicyV1Alpha1PodDisruptionBudgetWatcher struct { - watcher *watcher -} - -func (w *PolicyV1Alpha1PodDisruptionBudgetWatcher) Next() (*versioned.Event, *policyv1alpha1.PodDisruptionBudget, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(policyv1alpha1.PodDisruptionBudget) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *PolicyV1Alpha1PodDisruptionBudgetWatcher) Close() error { - return w.watcher.Close() -} - -func (c *PolicyV1Alpha1) WatchPodDisruptionBudgets(ctx context.Context, namespace string, options ...Option) (*PolicyV1Alpha1PodDisruptionBudgetWatcher, error) { - url := c.client.urlFor("policy", "v1alpha1", namespace, "poddisruptionbudgets", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &PolicyV1Alpha1PodDisruptionBudgetWatcher{watcher}, nil -} - -func (c *PolicyV1Alpha1) ListPodDisruptionBudgets(ctx context.Context, namespace string, options ...Option) (*policyv1alpha1.PodDisruptionBudgetList, error) { - url := c.client.urlFor("policy", "v1alpha1", namespace, "poddisruptionbudgets", "", options...) - resp := new(policyv1alpha1.PodDisruptionBudgetList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - - -// PolicyV1Beta1 returns a client for interacting with the policy/v1beta1 API group. -func (c *Client) PolicyV1Beta1() *PolicyV1Beta1 { - return &PolicyV1Beta1{c} -} - -// PolicyV1Beta1 is a client for interacting with the policy/v1beta1 API group. -type PolicyV1Beta1 struct { - client *Client -} - -func (c *PolicyV1Beta1) CreateEviction(ctx context.Context, obj *policyv1beta1.Eviction) (*policyv1beta1.Eviction, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("policy", "v1beta1", ns, "evictions", "") - resp := new(policyv1beta1.Eviction) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *PolicyV1Beta1) UpdateEviction(ctx context.Context, obj *policyv1beta1.Eviction) (*policyv1beta1.Eviction, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("policy", "v1beta1", *md.Namespace, "evictions", *md.Name) - resp := new(policyv1beta1.Eviction) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *PolicyV1Beta1) DeleteEviction(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("policy", "v1beta1", namespace, "evictions", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *PolicyV1Beta1) GetEviction(ctx context.Context, name, namespace string) (*policyv1beta1.Eviction, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("policy", "v1beta1", namespace, "evictions", name) - resp := new(policyv1beta1.Eviction) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *PolicyV1Beta1) CreatePodDisruptionBudget(ctx context.Context, obj *policyv1beta1.PodDisruptionBudget) (*policyv1beta1.PodDisruptionBudget, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("policy", "v1beta1", ns, "poddisruptionbudgets", "") - resp := new(policyv1beta1.PodDisruptionBudget) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *PolicyV1Beta1) UpdatePodDisruptionBudget(ctx context.Context, obj *policyv1beta1.PodDisruptionBudget) (*policyv1beta1.PodDisruptionBudget, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("policy", "v1beta1", *md.Namespace, "poddisruptionbudgets", *md.Name) - resp := new(policyv1beta1.PodDisruptionBudget) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *PolicyV1Beta1) DeletePodDisruptionBudget(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("policy", "v1beta1", namespace, "poddisruptionbudgets", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *PolicyV1Beta1) GetPodDisruptionBudget(ctx context.Context, name, namespace string) (*policyv1beta1.PodDisruptionBudget, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("policy", "v1beta1", namespace, "poddisruptionbudgets", name) - resp := new(policyv1beta1.PodDisruptionBudget) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type PolicyV1Beta1PodDisruptionBudgetWatcher struct { - watcher *watcher -} - -func (w *PolicyV1Beta1PodDisruptionBudgetWatcher) Next() (*versioned.Event, *policyv1beta1.PodDisruptionBudget, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(policyv1beta1.PodDisruptionBudget) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *PolicyV1Beta1PodDisruptionBudgetWatcher) Close() error { - return w.watcher.Close() -} - -func (c *PolicyV1Beta1) WatchPodDisruptionBudgets(ctx context.Context, namespace string, options ...Option) (*PolicyV1Beta1PodDisruptionBudgetWatcher, error) { - url := c.client.urlFor("policy", "v1beta1", namespace, "poddisruptionbudgets", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &PolicyV1Beta1PodDisruptionBudgetWatcher{watcher}, nil -} - -func (c *PolicyV1Beta1) ListPodDisruptionBudgets(ctx context.Context, namespace string, options ...Option) (*policyv1beta1.PodDisruptionBudgetList, error) { - url := c.client.urlFor("policy", "v1beta1", namespace, "poddisruptionbudgets", "", options...) - resp := new(policyv1beta1.PodDisruptionBudgetList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - - -// RBACV1Alpha1 returns a client for interacting with the rbac.authorization.k8s.io/v1alpha1 API group. -func (c *Client) RBACV1Alpha1() *RBACV1Alpha1 { - return &RBACV1Alpha1{c} -} - -// RBACV1Alpha1 is a client for interacting with the rbac.authorization.k8s.io/v1alpha1 API group. -type RBACV1Alpha1 struct { - client *Client -} - -func (c *RBACV1Alpha1) CreateClusterRole(ctx context.Context, obj *rbacv1alpha1.ClusterRole) (*rbacv1alpha1.ClusterRole, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("rbac.authorization.k8s.io", "v1alpha1", ns, "clusterroles", "") - resp := new(rbacv1alpha1.ClusterRole) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *RBACV1Alpha1) UpdateClusterRole(ctx context.Context, obj *rbacv1alpha1.ClusterRole) (*rbacv1alpha1.ClusterRole, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("rbac.authorization.k8s.io", "v1alpha1", *md.Namespace, "clusterroles", *md.Name) - resp := new(rbacv1alpha1.ClusterRole) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *RBACV1Alpha1) DeleteClusterRole(ctx context.Context, name string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("rbac.authorization.k8s.io", "v1alpha1", AllNamespaces, "clusterroles", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *RBACV1Alpha1) GetClusterRole(ctx context.Context, name string) (*rbacv1alpha1.ClusterRole, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("rbac.authorization.k8s.io", "v1alpha1", AllNamespaces, "clusterroles", name) - resp := new(rbacv1alpha1.ClusterRole) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type RBACV1Alpha1ClusterRoleWatcher struct { - watcher *watcher -} - -func (w *RBACV1Alpha1ClusterRoleWatcher) Next() (*versioned.Event, *rbacv1alpha1.ClusterRole, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(rbacv1alpha1.ClusterRole) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *RBACV1Alpha1ClusterRoleWatcher) Close() error { - return w.watcher.Close() -} - -func (c *RBACV1Alpha1) WatchClusterRoles(ctx context.Context, options ...Option) (*RBACV1Alpha1ClusterRoleWatcher, error) { - url := c.client.urlFor("rbac.authorization.k8s.io", "v1alpha1", AllNamespaces, "clusterroles", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &RBACV1Alpha1ClusterRoleWatcher{watcher}, nil -} - -func (c *RBACV1Alpha1) ListClusterRoles(ctx context.Context, options ...Option) (*rbacv1alpha1.ClusterRoleList, error) { - url := c.client.urlFor("rbac.authorization.k8s.io", "v1alpha1", AllNamespaces, "clusterroles", "", options...) - resp := new(rbacv1alpha1.ClusterRoleList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *RBACV1Alpha1) CreateClusterRoleBinding(ctx context.Context, obj *rbacv1alpha1.ClusterRoleBinding) (*rbacv1alpha1.ClusterRoleBinding, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("rbac.authorization.k8s.io", "v1alpha1", ns, "clusterrolebindings", "") - resp := new(rbacv1alpha1.ClusterRoleBinding) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *RBACV1Alpha1) UpdateClusterRoleBinding(ctx context.Context, obj *rbacv1alpha1.ClusterRoleBinding) (*rbacv1alpha1.ClusterRoleBinding, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("rbac.authorization.k8s.io", "v1alpha1", *md.Namespace, "clusterrolebindings", *md.Name) - resp := new(rbacv1alpha1.ClusterRoleBinding) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *RBACV1Alpha1) DeleteClusterRoleBinding(ctx context.Context, name string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("rbac.authorization.k8s.io", "v1alpha1", AllNamespaces, "clusterrolebindings", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *RBACV1Alpha1) GetClusterRoleBinding(ctx context.Context, name string) (*rbacv1alpha1.ClusterRoleBinding, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("rbac.authorization.k8s.io", "v1alpha1", AllNamespaces, "clusterrolebindings", name) - resp := new(rbacv1alpha1.ClusterRoleBinding) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type RBACV1Alpha1ClusterRoleBindingWatcher struct { - watcher *watcher -} - -func (w *RBACV1Alpha1ClusterRoleBindingWatcher) Next() (*versioned.Event, *rbacv1alpha1.ClusterRoleBinding, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(rbacv1alpha1.ClusterRoleBinding) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *RBACV1Alpha1ClusterRoleBindingWatcher) Close() error { - return w.watcher.Close() -} - -func (c *RBACV1Alpha1) WatchClusterRoleBindings(ctx context.Context, options ...Option) (*RBACV1Alpha1ClusterRoleBindingWatcher, error) { - url := c.client.urlFor("rbac.authorization.k8s.io", "v1alpha1", AllNamespaces, "clusterrolebindings", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &RBACV1Alpha1ClusterRoleBindingWatcher{watcher}, nil -} - -func (c *RBACV1Alpha1) ListClusterRoleBindings(ctx context.Context, options ...Option) (*rbacv1alpha1.ClusterRoleBindingList, error) { - url := c.client.urlFor("rbac.authorization.k8s.io", "v1alpha1", AllNamespaces, "clusterrolebindings", "", options...) - resp := new(rbacv1alpha1.ClusterRoleBindingList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *RBACV1Alpha1) CreateRole(ctx context.Context, obj *rbacv1alpha1.Role) (*rbacv1alpha1.Role, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("rbac.authorization.k8s.io", "v1alpha1", ns, "roles", "") - resp := new(rbacv1alpha1.Role) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *RBACV1Alpha1) UpdateRole(ctx context.Context, obj *rbacv1alpha1.Role) (*rbacv1alpha1.Role, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("rbac.authorization.k8s.io", "v1alpha1", *md.Namespace, "roles", *md.Name) - resp := new(rbacv1alpha1.Role) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *RBACV1Alpha1) DeleteRole(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("rbac.authorization.k8s.io", "v1alpha1", namespace, "roles", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *RBACV1Alpha1) GetRole(ctx context.Context, name, namespace string) (*rbacv1alpha1.Role, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("rbac.authorization.k8s.io", "v1alpha1", namespace, "roles", name) - resp := new(rbacv1alpha1.Role) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type RBACV1Alpha1RoleWatcher struct { - watcher *watcher -} - -func (w *RBACV1Alpha1RoleWatcher) Next() (*versioned.Event, *rbacv1alpha1.Role, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(rbacv1alpha1.Role) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *RBACV1Alpha1RoleWatcher) Close() error { - return w.watcher.Close() -} - -func (c *RBACV1Alpha1) WatchRoles(ctx context.Context, namespace string, options ...Option) (*RBACV1Alpha1RoleWatcher, error) { - url := c.client.urlFor("rbac.authorization.k8s.io", "v1alpha1", namespace, "roles", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &RBACV1Alpha1RoleWatcher{watcher}, nil -} - -func (c *RBACV1Alpha1) ListRoles(ctx context.Context, namespace string, options ...Option) (*rbacv1alpha1.RoleList, error) { - url := c.client.urlFor("rbac.authorization.k8s.io", "v1alpha1", namespace, "roles", "", options...) - resp := new(rbacv1alpha1.RoleList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *RBACV1Alpha1) CreateRoleBinding(ctx context.Context, obj *rbacv1alpha1.RoleBinding) (*rbacv1alpha1.RoleBinding, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("rbac.authorization.k8s.io", "v1alpha1", ns, "rolebindings", "") - resp := new(rbacv1alpha1.RoleBinding) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *RBACV1Alpha1) UpdateRoleBinding(ctx context.Context, obj *rbacv1alpha1.RoleBinding) (*rbacv1alpha1.RoleBinding, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("rbac.authorization.k8s.io", "v1alpha1", *md.Namespace, "rolebindings", *md.Name) - resp := new(rbacv1alpha1.RoleBinding) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *RBACV1Alpha1) DeleteRoleBinding(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("rbac.authorization.k8s.io", "v1alpha1", namespace, "rolebindings", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *RBACV1Alpha1) GetRoleBinding(ctx context.Context, name, namespace string) (*rbacv1alpha1.RoleBinding, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("rbac.authorization.k8s.io", "v1alpha1", namespace, "rolebindings", name) - resp := new(rbacv1alpha1.RoleBinding) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type RBACV1Alpha1RoleBindingWatcher struct { - watcher *watcher -} - -func (w *RBACV1Alpha1RoleBindingWatcher) Next() (*versioned.Event, *rbacv1alpha1.RoleBinding, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(rbacv1alpha1.RoleBinding) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *RBACV1Alpha1RoleBindingWatcher) Close() error { - return w.watcher.Close() -} - -func (c *RBACV1Alpha1) WatchRoleBindings(ctx context.Context, namespace string, options ...Option) (*RBACV1Alpha1RoleBindingWatcher, error) { - url := c.client.urlFor("rbac.authorization.k8s.io", "v1alpha1", namespace, "rolebindings", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &RBACV1Alpha1RoleBindingWatcher{watcher}, nil -} - -func (c *RBACV1Alpha1) ListRoleBindings(ctx context.Context, namespace string, options ...Option) (*rbacv1alpha1.RoleBindingList, error) { - url := c.client.urlFor("rbac.authorization.k8s.io", "v1alpha1", namespace, "rolebindings", "", options...) - resp := new(rbacv1alpha1.RoleBindingList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - - -// RBACV1Beta1 returns a client for interacting with the rbac.authorization.k8s.io/v1beta1 API group. -func (c *Client) RBACV1Beta1() *RBACV1Beta1 { - return &RBACV1Beta1{c} -} - -// RBACV1Beta1 is a client for interacting with the rbac.authorization.k8s.io/v1beta1 API group. -type RBACV1Beta1 struct { - client *Client -} - -func (c *RBACV1Beta1) CreateClusterRole(ctx context.Context, obj *rbacv1beta1.ClusterRole) (*rbacv1beta1.ClusterRole, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("rbac.authorization.k8s.io", "v1beta1", ns, "clusterroles", "") - resp := new(rbacv1beta1.ClusterRole) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *RBACV1Beta1) UpdateClusterRole(ctx context.Context, obj *rbacv1beta1.ClusterRole) (*rbacv1beta1.ClusterRole, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("rbac.authorization.k8s.io", "v1beta1", *md.Namespace, "clusterroles", *md.Name) - resp := new(rbacv1beta1.ClusterRole) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *RBACV1Beta1) DeleteClusterRole(ctx context.Context, name string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("rbac.authorization.k8s.io", "v1beta1", AllNamespaces, "clusterroles", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *RBACV1Beta1) GetClusterRole(ctx context.Context, name string) (*rbacv1beta1.ClusterRole, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("rbac.authorization.k8s.io", "v1beta1", AllNamespaces, "clusterroles", name) - resp := new(rbacv1beta1.ClusterRole) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type RBACV1Beta1ClusterRoleWatcher struct { - watcher *watcher -} - -func (w *RBACV1Beta1ClusterRoleWatcher) Next() (*versioned.Event, *rbacv1beta1.ClusterRole, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(rbacv1beta1.ClusterRole) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *RBACV1Beta1ClusterRoleWatcher) Close() error { - return w.watcher.Close() -} - -func (c *RBACV1Beta1) WatchClusterRoles(ctx context.Context, options ...Option) (*RBACV1Beta1ClusterRoleWatcher, error) { - url := c.client.urlFor("rbac.authorization.k8s.io", "v1beta1", AllNamespaces, "clusterroles", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &RBACV1Beta1ClusterRoleWatcher{watcher}, nil -} - -func (c *RBACV1Beta1) ListClusterRoles(ctx context.Context, options ...Option) (*rbacv1beta1.ClusterRoleList, error) { - url := c.client.urlFor("rbac.authorization.k8s.io", "v1beta1", AllNamespaces, "clusterroles", "", options...) - resp := new(rbacv1beta1.ClusterRoleList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *RBACV1Beta1) CreateClusterRoleBinding(ctx context.Context, obj *rbacv1beta1.ClusterRoleBinding) (*rbacv1beta1.ClusterRoleBinding, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("rbac.authorization.k8s.io", "v1beta1", ns, "clusterrolebindings", "") - resp := new(rbacv1beta1.ClusterRoleBinding) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *RBACV1Beta1) UpdateClusterRoleBinding(ctx context.Context, obj *rbacv1beta1.ClusterRoleBinding) (*rbacv1beta1.ClusterRoleBinding, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("rbac.authorization.k8s.io", "v1beta1", *md.Namespace, "clusterrolebindings", *md.Name) - resp := new(rbacv1beta1.ClusterRoleBinding) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *RBACV1Beta1) DeleteClusterRoleBinding(ctx context.Context, name string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("rbac.authorization.k8s.io", "v1beta1", AllNamespaces, "clusterrolebindings", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *RBACV1Beta1) GetClusterRoleBinding(ctx context.Context, name string) (*rbacv1beta1.ClusterRoleBinding, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("rbac.authorization.k8s.io", "v1beta1", AllNamespaces, "clusterrolebindings", name) - resp := new(rbacv1beta1.ClusterRoleBinding) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type RBACV1Beta1ClusterRoleBindingWatcher struct { - watcher *watcher -} - -func (w *RBACV1Beta1ClusterRoleBindingWatcher) Next() (*versioned.Event, *rbacv1beta1.ClusterRoleBinding, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(rbacv1beta1.ClusterRoleBinding) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *RBACV1Beta1ClusterRoleBindingWatcher) Close() error { - return w.watcher.Close() -} - -func (c *RBACV1Beta1) WatchClusterRoleBindings(ctx context.Context, options ...Option) (*RBACV1Beta1ClusterRoleBindingWatcher, error) { - url := c.client.urlFor("rbac.authorization.k8s.io", "v1beta1", AllNamespaces, "clusterrolebindings", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &RBACV1Beta1ClusterRoleBindingWatcher{watcher}, nil -} - -func (c *RBACV1Beta1) ListClusterRoleBindings(ctx context.Context, options ...Option) (*rbacv1beta1.ClusterRoleBindingList, error) { - url := c.client.urlFor("rbac.authorization.k8s.io", "v1beta1", AllNamespaces, "clusterrolebindings", "", options...) - resp := new(rbacv1beta1.ClusterRoleBindingList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *RBACV1Beta1) CreateRole(ctx context.Context, obj *rbacv1beta1.Role) (*rbacv1beta1.Role, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("rbac.authorization.k8s.io", "v1beta1", ns, "roles", "") - resp := new(rbacv1beta1.Role) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *RBACV1Beta1) UpdateRole(ctx context.Context, obj *rbacv1beta1.Role) (*rbacv1beta1.Role, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("rbac.authorization.k8s.io", "v1beta1", *md.Namespace, "roles", *md.Name) - resp := new(rbacv1beta1.Role) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *RBACV1Beta1) DeleteRole(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("rbac.authorization.k8s.io", "v1beta1", namespace, "roles", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *RBACV1Beta1) GetRole(ctx context.Context, name, namespace string) (*rbacv1beta1.Role, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("rbac.authorization.k8s.io", "v1beta1", namespace, "roles", name) - resp := new(rbacv1beta1.Role) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type RBACV1Beta1RoleWatcher struct { - watcher *watcher -} - -func (w *RBACV1Beta1RoleWatcher) Next() (*versioned.Event, *rbacv1beta1.Role, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(rbacv1beta1.Role) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *RBACV1Beta1RoleWatcher) Close() error { - return w.watcher.Close() -} - -func (c *RBACV1Beta1) WatchRoles(ctx context.Context, namespace string, options ...Option) (*RBACV1Beta1RoleWatcher, error) { - url := c.client.urlFor("rbac.authorization.k8s.io", "v1beta1", namespace, "roles", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &RBACV1Beta1RoleWatcher{watcher}, nil -} - -func (c *RBACV1Beta1) ListRoles(ctx context.Context, namespace string, options ...Option) (*rbacv1beta1.RoleList, error) { - url := c.client.urlFor("rbac.authorization.k8s.io", "v1beta1", namespace, "roles", "", options...) - resp := new(rbacv1beta1.RoleList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -func (c *RBACV1Beta1) CreateRoleBinding(ctx context.Context, obj *rbacv1beta1.RoleBinding) (*rbacv1beta1.RoleBinding, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("rbac.authorization.k8s.io", "v1beta1", ns, "rolebindings", "") - resp := new(rbacv1beta1.RoleBinding) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *RBACV1Beta1) UpdateRoleBinding(ctx context.Context, obj *rbacv1beta1.RoleBinding) (*rbacv1beta1.RoleBinding, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("rbac.authorization.k8s.io", "v1beta1", *md.Namespace, "rolebindings", *md.Name) - resp := new(rbacv1beta1.RoleBinding) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *RBACV1Beta1) DeleteRoleBinding(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("rbac.authorization.k8s.io", "v1beta1", namespace, "rolebindings", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *RBACV1Beta1) GetRoleBinding(ctx context.Context, name, namespace string) (*rbacv1beta1.RoleBinding, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("rbac.authorization.k8s.io", "v1beta1", namespace, "rolebindings", name) - resp := new(rbacv1beta1.RoleBinding) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type RBACV1Beta1RoleBindingWatcher struct { - watcher *watcher -} - -func (w *RBACV1Beta1RoleBindingWatcher) Next() (*versioned.Event, *rbacv1beta1.RoleBinding, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(rbacv1beta1.RoleBinding) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *RBACV1Beta1RoleBindingWatcher) Close() error { - return w.watcher.Close() -} - -func (c *RBACV1Beta1) WatchRoleBindings(ctx context.Context, namespace string, options ...Option) (*RBACV1Beta1RoleBindingWatcher, error) { - url := c.client.urlFor("rbac.authorization.k8s.io", "v1beta1", namespace, "rolebindings", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &RBACV1Beta1RoleBindingWatcher{watcher}, nil -} - -func (c *RBACV1Beta1) ListRoleBindings(ctx context.Context, namespace string, options ...Option) (*rbacv1beta1.RoleBindingList, error) { - url := c.client.urlFor("rbac.authorization.k8s.io", "v1beta1", namespace, "rolebindings", "", options...) - resp := new(rbacv1beta1.RoleBindingList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - - -// SettingsV1Alpha1 returns a client for interacting with the settings/v1alpha1 API group. -func (c *Client) SettingsV1Alpha1() *SettingsV1Alpha1 { - return &SettingsV1Alpha1{c} -} - -// SettingsV1Alpha1 is a client for interacting with the settings/v1alpha1 API group. -type SettingsV1Alpha1 struct { - client *Client -} - -func (c *SettingsV1Alpha1) CreatePodPreset(ctx context.Context, obj *settingsv1alpha1.PodPreset) (*settingsv1alpha1.PodPreset, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("settings", "v1alpha1", ns, "podpresets", "") - resp := new(settingsv1alpha1.PodPreset) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *SettingsV1Alpha1) UpdatePodPreset(ctx context.Context, obj *settingsv1alpha1.PodPreset) (*settingsv1alpha1.PodPreset, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !true && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if true { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("settings", "v1alpha1", *md.Namespace, "podpresets", *md.Name) - resp := new(settingsv1alpha1.PodPreset) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *SettingsV1Alpha1) DeletePodPreset(ctx context.Context, name string, namespace string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("settings", "v1alpha1", namespace, "podpresets", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *SettingsV1Alpha1) GetPodPreset(ctx context.Context, name, namespace string) (*settingsv1alpha1.PodPreset, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("settings", "v1alpha1", namespace, "podpresets", name) - resp := new(settingsv1alpha1.PodPreset) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type SettingsV1Alpha1PodPresetWatcher struct { - watcher *watcher -} - -func (w *SettingsV1Alpha1PodPresetWatcher) Next() (*versioned.Event, *settingsv1alpha1.PodPreset, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(settingsv1alpha1.PodPreset) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *SettingsV1Alpha1PodPresetWatcher) Close() error { - return w.watcher.Close() -} - -func (c *SettingsV1Alpha1) WatchPodPresets(ctx context.Context, namespace string, options ...Option) (*SettingsV1Alpha1PodPresetWatcher, error) { - url := c.client.urlFor("settings", "v1alpha1", namespace, "podpresets", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &SettingsV1Alpha1PodPresetWatcher{watcher}, nil -} - -func (c *SettingsV1Alpha1) ListPodPresets(ctx context.Context, namespace string, options ...Option) (*settingsv1alpha1.PodPresetList, error) { - url := c.client.urlFor("settings", "v1alpha1", namespace, "podpresets", "", options...) - resp := new(settingsv1alpha1.PodPresetList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - - -// StorageV1 returns a client for interacting with the storage.k8s.io/v1 API group. -func (c *Client) StorageV1() *StorageV1 { - return &StorageV1{c} -} - -// StorageV1 is a client for interacting with the storage.k8s.io/v1 API group. -type StorageV1 struct { - client *Client -} - -func (c *StorageV1) CreateStorageClass(ctx context.Context, obj *storagev1.StorageClass) (*storagev1.StorageClass, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("storage.k8s.io", "v1", ns, "storageclasses", "") - resp := new(storagev1.StorageClass) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *StorageV1) UpdateStorageClass(ctx context.Context, obj *storagev1.StorageClass) (*storagev1.StorageClass, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("storage.k8s.io", "v1", *md.Namespace, "storageclasses", *md.Name) - resp := new(storagev1.StorageClass) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *StorageV1) DeleteStorageClass(ctx context.Context, name string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("storage.k8s.io", "v1", AllNamespaces, "storageclasses", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *StorageV1) GetStorageClass(ctx context.Context, name string) (*storagev1.StorageClass, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("storage.k8s.io", "v1", AllNamespaces, "storageclasses", name) - resp := new(storagev1.StorageClass) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type StorageV1StorageClassWatcher struct { - watcher *watcher -} - -func (w *StorageV1StorageClassWatcher) Next() (*versioned.Event, *storagev1.StorageClass, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(storagev1.StorageClass) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *StorageV1StorageClassWatcher) Close() error { - return w.watcher.Close() -} - -func (c *StorageV1) WatchStorageClasses(ctx context.Context, options ...Option) (*StorageV1StorageClassWatcher, error) { - url := c.client.urlFor("storage.k8s.io", "v1", AllNamespaces, "storageclasses", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &StorageV1StorageClassWatcher{watcher}, nil -} - -func (c *StorageV1) ListStorageClasses(ctx context.Context, options ...Option) (*storagev1.StorageClassList, error) { - url := c.client.urlFor("storage.k8s.io", "v1", AllNamespaces, "storageclasses", "", options...) - resp := new(storagev1.StorageClassList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - - -// StorageV1Beta1 returns a client for interacting with the storage.k8s.io/v1beta1 API group. -func (c *Client) StorageV1Beta1() *StorageV1Beta1 { - return &StorageV1Beta1{c} -} - -// StorageV1Beta1 is a client for interacting with the storage.k8s.io/v1beta1 API group. -type StorageV1Beta1 struct { - client *Client -} - -func (c *StorageV1Beta1) CreateStorageClass(ctx context.Context, obj *storagev1beta1.StorageClass) (*storagev1beta1.StorageClass, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != ""{ - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("storage.k8s.io", "v1beta1", ns, "storageclasses", "") - resp := new(storagev1beta1.StorageClass) - err := c.client.create(ctx, pbCodec, "POST", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *StorageV1Beta1) UpdateStorageClass(ctx context.Context, obj *storagev1beta1.StorageClass) (*storagev1beta1.StorageClass, error) { - md := obj.GetMetadata() - if md.Name != nil && *md.Name == "" { - return nil, fmt.Errorf("no name for given object") - } - - ns := "" - if md.Namespace != nil { - ns = *md.Namespace - } - if !false && ns != "" { - return nil, fmt.Errorf("resource isn't namespaced") - } - - if false { - if ns == "" { - return nil, fmt.Errorf("no resource namespace provided") - } - md.Namespace = &ns - } - url := c.client.urlFor("storage.k8s.io", "v1beta1", *md.Namespace, "storageclasses", *md.Name) - resp := new(storagev1beta1.StorageClass) - err := c.client.create(ctx, pbCodec, "PUT", url, obj, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *StorageV1Beta1) DeleteStorageClass(ctx context.Context, name string) error { - if name == "" { - return fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("storage.k8s.io", "v1beta1", AllNamespaces, "storageclasses", name) - return c.client.delete(ctx, pbCodec, url) -} - -func (c *StorageV1Beta1) GetStorageClass(ctx context.Context, name string) (*storagev1beta1.StorageClass, error) { - if name == "" { - return nil, fmt.Errorf("create: no name for given object") - } - url := c.client.urlFor("storage.k8s.io", "v1beta1", AllNamespaces, "storageclasses", name) - resp := new(storagev1beta1.StorageClass) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - -type StorageV1Beta1StorageClassWatcher struct { - watcher *watcher -} - -func (w *StorageV1Beta1StorageClassWatcher) Next() (*versioned.Event, *storagev1beta1.StorageClass, error) { - event, unknown, err := w.watcher.next() - if err != nil { - return nil, nil, err - } - resp := new(storagev1beta1.StorageClass) - if err := proto.Unmarshal(unknown.Raw, resp); err != nil { - return nil, nil, err - } - return event, resp, nil -} - -func (w *StorageV1Beta1StorageClassWatcher) Close() error { - return w.watcher.Close() -} - -func (c *StorageV1Beta1) WatchStorageClasses(ctx context.Context, options ...Option) (*StorageV1Beta1StorageClassWatcher, error) { - url := c.client.urlFor("storage.k8s.io", "v1beta1", AllNamespaces, "storageclasses", "", options...) - watcher, err := c.client.watch(ctx, url) - if err != nil { - return nil, err - } - return &StorageV1Beta1StorageClassWatcher{watcher}, nil -} - -func (c *StorageV1Beta1) ListStorageClasses(ctx context.Context, options ...Option) (*storagev1beta1.StorageClassList, error) { - url := c.client.urlFor("storage.k8s.io", "v1beta1", AllNamespaces, "storageclasses", "", options...) - resp := new(storagev1beta1.StorageClassList) - if err := c.client.get(ctx, pbCodec, url, resp); err != nil { - return nil, err - } - return resp, nil -} - diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/util/intstr/generated.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/util/intstr/generated.pb.go index bdfa1ca3..36a796a5 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/util/intstr/generated.pb.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/util/intstr/generated.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/util/intstr/generated.proto -// DO NOT EDIT! +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/apimachinery/pkg/util/intstr/generated.proto /* Package intstr is a generated protocol buffer package. It is generated from these files: - k8s.io/kubernetes/pkg/util/intstr/generated.proto + k8s.io/apimachinery/pkg/util/intstr/generated.proto It has these top-level messages: IntOrString @@ -73,7 +72,7 @@ func (m *IntOrString) GetStrVal() string { } func init() { - proto.RegisterType((*IntOrString)(nil), "github.com/ericchiang.k8s.util.intstr.IntOrString") + proto.RegisterType((*IntOrString)(nil), "k8s.io.apimachinery.pkg.util.intstr.IntOrString") } func (m *IntOrString) Marshal() (dAtA []byte, err error) { size := m.Size() @@ -112,24 +111,6 @@ func (m *IntOrString) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -398,21 +379,21 @@ var ( ) func init() { - proto.RegisterFile("github.com/ericchiang/k8s/util/intstr/generated.proto", fileDescriptorGenerated) + proto.RegisterFile("k8s.io/apimachinery/pkg/util/intstr/generated.proto", fileDescriptorGenerated) } var fileDescriptorGenerated = []byte{ - // 180 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x32, 0xcc, 0xb6, 0x28, 0xd6, - 0xcb, 0xcc, 0xd7, 0xcf, 0x2e, 0x4d, 0x4a, 0x2d, 0xca, 0x4b, 0x2d, 0x49, 0x2d, 0xd6, 0x2f, 0xc8, - 0x4e, 0xd7, 0x2f, 0x2d, 0xc9, 0xcc, 0xd1, 0xcf, 0xcc, 0x2b, 0x29, 0x2e, 0x29, 0xd2, 0x4f, 0x4f, - 0xcd, 0x4b, 0x2d, 0x4a, 0x2c, 0x49, 0x4d, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x52, 0x84, - 0x68, 0xd1, 0x43, 0x68, 0xd1, 0x2b, 0xc8, 0x4e, 0xd7, 0x03, 0x69, 0xd1, 0x83, 0x68, 0x51, 0x0a, - 0xe4, 0xe2, 0xf6, 0xcc, 0x2b, 0xf1, 0x2f, 0x0a, 0x2e, 0x29, 0xca, 0xcc, 0x4b, 0x17, 0x12, 0xe2, - 0x62, 0x29, 0xa9, 0x2c, 0x48, 0x95, 0x60, 0x54, 0x60, 0xd4, 0x60, 0x0e, 0x02, 0xb3, 0x85, 0xc4, - 0xb8, 0xd8, 0x32, 0xf3, 0x4a, 0xc2, 0x12, 0x73, 0x24, 0x98, 0x14, 0x18, 0x35, 0x58, 0x83, 0xa0, - 0x3c, 0x90, 0x78, 0x71, 0x49, 0x11, 0x48, 0x9c, 0x59, 0x81, 0x51, 0x83, 0x33, 0x08, 0xca, 0x73, - 0x92, 0x38, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x67, 0x3c, - 0x96, 0x63, 0x88, 0x62, 0x83, 0x58, 0x06, 0x08, 0x00, 0x00, 0xff, 0xff, 0xcd, 0x20, 0xf2, 0x02, - 0xc3, 0x00, 0x00, 0x00, + // 182 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0xce, 0xb6, 0x28, 0xd6, + 0xcb, 0xcc, 0xd7, 0x4f, 0x2c, 0xc8, 0xcc, 0x4d, 0x4c, 0xce, 0xc8, 0xcc, 0x4b, 0x2d, 0xaa, 0xd4, + 0x2f, 0xc8, 0x4e, 0xd7, 0x2f, 0x2d, 0xc9, 0xcc, 0xd1, 0xcf, 0xcc, 0x2b, 0x29, 0x2e, 0x29, 0xd2, + 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0x4a, 0x2c, 0x49, 0x4d, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, + 0x52, 0x86, 0x68, 0xd2, 0x43, 0xd6, 0xa4, 0x57, 0x90, 0x9d, 0xae, 0x07, 0xd2, 0xa4, 0x07, 0xd1, + 0xa4, 0x14, 0xc8, 0xc5, 0xed, 0x99, 0x57, 0xe2, 0x5f, 0x14, 0x5c, 0x52, 0x94, 0x99, 0x97, 0x2e, + 0x24, 0xc4, 0xc5, 0x52, 0x52, 0x59, 0x90, 0x2a, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x1c, 0x04, 0x66, + 0x0b, 0x89, 0x71, 0xb1, 0x65, 0xe6, 0x95, 0x84, 0x25, 0xe6, 0x48, 0x30, 0x29, 0x30, 0x6a, 0xb0, + 0x06, 0x41, 0x79, 0x20, 0xf1, 0xe2, 0x92, 0x22, 0x90, 0x38, 0xb3, 0x02, 0xa3, 0x06, 0x67, 0x10, + 0x94, 0xe7, 0x24, 0x71, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, + 0xce, 0x78, 0x2c, 0xc7, 0x10, 0xc5, 0x06, 0xb1, 0x0c, 0x10, 0x00, 0x00, 0xff, 0xff, 0x2f, 0xa6, + 0x56, 0x6e, 0xc7, 0x00, 0x00, 0x00, } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/watch.go b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/watch.go new file mode 100644 index 00000000..91c82dc1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/ericchiang/k8s/watch.go @@ -0,0 +1,199 @@ +package k8s + +import ( + "bytes" + "context" + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + + "github.com/ericchiang/k8s/runtime" + "github.com/ericchiang/k8s/watch/versioned" + "github.com/golang/protobuf/proto" +) + +// Decode events from a watch stream. +// +// See: https://github.com/kubernetes/community/blob/master/contributors/design-proposals/protobuf.md#streaming-wire-format + +// Watcher receives a stream of events tracking a particular resource within +// a namespace or across all namespaces. +// +// Watcher does not automatically reconnect. If a watch fails, a new watch must +// be initialized. +type Watcher struct { + watcher interface { + Next(Resource) (string, error) + Close() error + } +} + +// Next decodes the next event from the watch stream. Errors are fatal, and +// indicate that the watcher should no longer be used, and must be recreated. +func (w *Watcher) Next(r Resource) (string, error) { + return w.watcher.Next(r) +} + +// Close closes the active connection with the API server being used for +// the watch. +func (w *Watcher) Close() error { + return w.watcher.Close() +} + +type watcherJSON struct { + d *json.Decoder + c io.Closer +} + +func (w *watcherJSON) Close() error { + return w.c.Close() +} + +func (w *watcherJSON) Next(r Resource) (string, error) { + var event struct { + Type string `json:"type"` + Object json.RawMessage `json:"object"` + } + if err := w.d.Decode(&event); err != nil { + return "", fmt.Errorf("decode event: %v", err) + } + if event.Type == "" { + return "", errors.New("wwatch event had no type field") + } + if err := json.Unmarshal([]byte(event.Object), r); err != nil { + return "", fmt.Errorf("decode resource: %v", err) + } + return event.Type, nil +} + +type watcherPB struct { + r io.ReadCloser +} + +func (w *watcherPB) Next(r Resource) (string, error) { + msg, ok := r.(proto.Message) + if !ok { + return "", errors.New("object was not a protobuf message") + } + event, unknown, err := w.next() + if err != nil { + return "", err + } + if event.Type == nil || *event.Type == "" { + return "", errors.New("watch event had no type field") + } + if err := proto.Unmarshal(unknown.Raw, msg); err != nil { + return "", err + } + return *event.Type, nil +} + +func (w *watcherPB) Close() error { + return w.r.Close() +} + +func (w *watcherPB) next() (*versioned.Event, *runtime.Unknown, error) { + length := make([]byte, 4) + if _, err := io.ReadFull(w.r, length); err != nil { + return nil, nil, err + } + + body := make([]byte, int(binary.BigEndian.Uint32(length))) + if _, err := io.ReadFull(w.r, body); err != nil { + return nil, nil, fmt.Errorf("read frame body: %v", err) + } + + var event versioned.Event + if err := proto.Unmarshal(body, &event); err != nil { + return nil, nil, err + } + + if event.Object == nil { + return nil, nil, fmt.Errorf("event had no underlying object") + } + + unknown, err := parseUnknown(event.Object.Raw) + if err != nil { + return nil, nil, err + } + + return &event, unknown, nil +} + +var unknownPrefix = []byte{0x6b, 0x38, 0x73, 0x00} + +func parseUnknown(b []byte) (*runtime.Unknown, error) { + if !bytes.HasPrefix(b, unknownPrefix) { + return nil, errors.New("bytes did not start with expected prefix") + } + + var u runtime.Unknown + if err := proto.Unmarshal(b[len(unknownPrefix):], &u); err != nil { + return nil, err + } + return &u, nil +} + +// Watch creates a watch on a resource. It takes an example Resource to +// determine what endpoint to watch. +// +// Watch does not automatically reconnect. If a watch fails, a new watch must +// be initialized. +// +// // Watch configmaps in the "kube-system" namespace +// var configMap corev1.ConfigMap +// watcher, err := client.Watch(ctx, "kube-system", &configMap) +// if err != nil { +// // handle error +// } +// defer watcher.Close() // Always close the returned watcher. +// +// for { +// cm := new(corev1.ConfigMap) +// eventType, err := watcher.Next(cm) +// if err != nil { +// // watcher encountered and error, exit or create a new watcher +// } +// fmt.Println(eventType, *cm.Metadata.Name) +// } +// +func (c *Client) Watch(ctx context.Context, namespace string, r Resource, options ...Option) (*Watcher, error) { + url, err := resourceWatchURL(c.Endpoint, namespace, r, options...) + if err != nil { + return nil, err + } + + ct := contentTypeFor(r) + + req, err := c.newRequest(ctx, "GET", url, nil) + if err != nil { + return nil, err + } + req.Header.Set("Accept", ct) + + resp, err := c.client().Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode/100 != 2 { + body, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return nil, err + } + return nil, newAPIError(resp.Header.Get("Content-Type"), resp.StatusCode, body) + } + + if ct == contentTypePB { + return &Watcher{&watcherPB{r: resp.Body}}, nil + } + + return &Watcher{&watcherJSON{ + d: json.NewDecoder(resp.Body), + c: resp.Body, + }}, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsevents/example/main.go b/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsevents/example/main.go new file mode 100644 index 00000000..68588097 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsevents/example/main.go @@ -0,0 +1,101 @@ +// +build darwin + +package main + +import ( + "bufio" + "io/ioutil" + "log" + "os" + "runtime" + "time" + + "github.com/fsnotify/fsevents" +) + +func main() { + path, err := ioutil.TempDir("", "fsexample") + if err != nil { + log.Fatalf("Failed to create TempDir: %v", err) + } + dev, err := fsevents.DeviceForPath(path) + if err != nil { + log.Fatalf("Failed to retrieve device for path: %v", err) + } + log.Print(dev) + log.Println(fsevents.EventIDForDeviceBeforeTime(dev, time.Now())) + + es := &fsevents.EventStream{ + Paths: []string{path}, + Latency: 500 * time.Millisecond, + Device: dev, + Flags: fsevents.FileEvents | fsevents.WatchRoot} + es.Start() + ec := es.Events + + log.Println("Device UUID", fsevents.GetDeviceUUID(dev)) + + go func() { + for msg := range ec { + for _, event := range msg { + logEvent(event) + } + } + }() + + in := bufio.NewReader(os.Stdin) + + if false { + log.Print("Started, press enter to GC") + in.ReadString('\n') + runtime.GC() + log.Print("GC'd, press enter to quit") + in.ReadString('\n') + } else { + log.Print("Started, press enter to stop") + in.ReadString('\n') + es.Stop() + + log.Print("Stopped, press enter to restart") + in.ReadString('\n') + es.Resume = true + es.Start() + + log.Print("Restarted, press enter to quit") + in.ReadString('\n') + es.Stop() + } +} + +var noteDescription = map[fsevents.EventFlags]string{ + fsevents.MustScanSubDirs: "MustScanSubdirs", + fsevents.UserDropped: "UserDropped", + fsevents.KernelDropped: "KernelDropped", + fsevents.EventIDsWrapped: "EventIDsWrapped", + fsevents.HistoryDone: "HistoryDone", + fsevents.RootChanged: "RootChanged", + fsevents.Mount: "Mount", + fsevents.Unmount: "Unmount", + + fsevents.ItemCreated: "Created", + fsevents.ItemRemoved: "Removed", + fsevents.ItemInodeMetaMod: "InodeMetaMod", + fsevents.ItemRenamed: "Renamed", + fsevents.ItemModified: "Modified", + fsevents.ItemFinderInfoMod: "FinderInfoMod", + fsevents.ItemChangeOwner: "ChangeOwner", + fsevents.ItemXattrMod: "XAttrMod", + fsevents.ItemIsFile: "IsFile", + fsevents.ItemIsDir: "IsDir", + fsevents.ItemIsSymlink: "IsSymLink", +} + +func logEvent(event fsevents.Event) { + note := "" + for bit, description := range noteDescription { + if event.Flags&bit == bit { + note += description + " " + } + } + log.Printf("EventID: %d Path: %s Flags: %s", event.ID, event.Path, note) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsevents/wrap.go b/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsevents/wrap.go index 26184ab4..698aec34 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsevents/wrap.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsevents/wrap.go @@ -1,4 +1,4 @@ -// +build darwin +// +build darwin,go1.10 package fsevents @@ -109,14 +109,14 @@ func GetStreamRefPaths(f FSEventStreamRef) []string { // in the FSEvents database func GetDeviceUUID(deviceID int32) string { uuid := C.FSEventsCopyUUIDForDevice(C.dev_t(deviceID)) - if uuid == nil { + if uuid == C.CFUUIDRef(0) { return "" } return cfStringToGoString(C.CFUUIDCreateString(nil, uuid)) } func cfStringToGoString(cfs C.CFStringRef) string { - if cfs == nil { + if cfs == 0 { return "" } cfStr := C.CFStringCreateCopy(nil, cfs) @@ -173,7 +173,7 @@ func createPaths(paths []string) (C.CFArrayRef, error) { defer C.free(unsafe.Pointer(cpath)) str := C.CFStringCreateWithCString(nil, cpath, C.kCFStringEncodingUTF8) - C.CFArrayAppendValue(cPaths, unsafe.Pointer(str)) + C.CFArrayAppendValue(C.CFMutableArrayRef(cPaths), unsafe.Pointer(str)) } var err error if len(errs) > 0 { @@ -229,7 +229,7 @@ func (es *EventStream) start(paths []string, callbackInfo uintptr) { go func() { runtime.LockOSThread() es.rlref = CFRunLoopRef(C.CFRunLoopGetCurrent()) - C.FSEventStreamScheduleWithRunLoop(es.stream, es.rlref, C.kCFRunLoopDefaultMode) + C.FSEventStreamScheduleWithRunLoop(es.stream, C.CFRunLoopRef(es.rlref), C.kCFRunLoopDefaultMode) C.FSEventStreamStart(es.stream) close(started) C.CFRunLoopRun() @@ -265,5 +265,5 @@ func stop(stream FSEventStreamRef, rlref CFRunLoopRef) { C.FSEventStreamStop(stream) C.FSEventStreamInvalidate(stream) C.FSEventStreamRelease(stream) - C.CFRunLoopStop(rlref) + C.CFRunLoopStop(C.CFRunLoopRef(rlref)) } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsevents/wrap_deprecated.go b/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsevents/wrap_deprecated.go new file mode 100644 index 00000000..5493428b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsevents/wrap_deprecated.go @@ -0,0 +1,276 @@ +// +build darwin,!go1.10 + +package fsevents + +/* +#cgo LDFLAGS: -framework CoreServices +#include +#include + +static CFArrayRef ArrayCreateMutable(int len) { + return CFArrayCreateMutable(NULL, len, &kCFTypeArrayCallBacks); +} + +extern void fsevtCallback(FSEventStreamRef p0, uintptr_t info, size_t p1, char** p2, FSEventStreamEventFlags* p3, FSEventStreamEventId* p4); + +static FSEventStreamRef EventStreamCreateRelativeToDevice(FSEventStreamContext * context, uintptr_t info, dev_t dev, CFArrayRef paths, FSEventStreamEventId since, CFTimeInterval latency, FSEventStreamCreateFlags flags) { + context->info = (void*) info; + return FSEventStreamCreateRelativeToDevice(NULL, (FSEventStreamCallback) fsevtCallback, context, dev, paths, since, latency, flags); +} + +static FSEventStreamRef EventStreamCreate(FSEventStreamContext * context, uintptr_t info, CFArrayRef paths, FSEventStreamEventId since, CFTimeInterval latency, FSEventStreamCreateFlags flags) { + context->info = (void*) info; + return FSEventStreamCreate(NULL, (FSEventStreamCallback) fsevtCallback, context, paths, since, latency, flags); +} +*/ +import "C" +import ( + "fmt" + "log" + "path/filepath" + "reflect" + "runtime" + "time" + "unsafe" +) + +// eventIDSinceNow is a sentinel to begin watching events "since now". +// NOTE: Go 1.9.2 broke compatibility here, for 1.9.1 and earlier we did: +// uint64(C.kFSEventStreamEventIdSinceNow + (1 << 64)) +// But 1.9.2+ complains about overflow and requires: +// uint64(C.kFSEventStreamEventIdSinceNow) +// There does not seem to be an easy way to rectify, so hardcoding the value +// here from FSEvents.h: +// kFSEventStreamEventIdSinceNow = 0xFFFFFFFFFFFFFFFFULL +const eventIDSinceNow = uint64(0xFFFFFFFFFFFFFFFF) + +// LatestEventID returns the most recently generated event ID, system-wide. +func LatestEventID() uint64 { + return uint64(C.FSEventsGetCurrentEventId()) +} + +// arguments are released by C at the end of the callback. Ensure copies +// are made if data is expected to persist beyond this function ending. +// +//export fsevtCallback +func fsevtCallback(stream C.FSEventStreamRef, info uintptr, numEvents C.size_t, cpaths **C.char, cflags *C.FSEventStreamEventFlags, cids *C.FSEventStreamEventId) { + l := int(numEvents) + events := make([]Event, l) + + es := registry.Get(info) + if es == nil { + log.Printf("failed to retrieve registry %d", info) + return + } + // These slices are backed by C data. Ensure data is copied out + // if it expected to exist outside of this function. + paths := (*[1 << 30]*C.char)(unsafe.Pointer(cpaths))[:l:l] + ids := (*[1 << 30]C.FSEventStreamEventId)(unsafe.Pointer(cids))[:l:l] + flags := (*[1 << 30]C.FSEventStreamEventFlags)(unsafe.Pointer(cflags))[:l:l] + for i := range events { + events[i] = Event{ + Path: C.GoString(paths[i]), + Flags: EventFlags(flags[i]), + ID: uint64(ids[i]), + } + es.EventID = uint64(ids[i]) + } + + es.Events <- events +} + +// FSEventStreamRef wraps C.FSEventStreamRef +type FSEventStreamRef C.FSEventStreamRef + +// GetStreamRefEventID retrieves the last EventID from the ref +func GetStreamRefEventID(f FSEventStreamRef) uint64 { + return uint64(C.FSEventStreamGetLatestEventId(f)) +} + +// GetStreamRefDeviceID retrieves the device ID the stream is watching +func GetStreamRefDeviceID(f FSEventStreamRef) int32 { + return int32(C.FSEventStreamGetDeviceBeingWatched(f)) +} + +// GetStreamRefDescription retrieves debugging description information +// about the StreamRef +func GetStreamRefDescription(f FSEventStreamRef) string { + return cfStringToGoString(C.FSEventStreamCopyDescription(f)) +} + +// GetStreamRefPaths returns a copy of the paths being watched by +// this stream +func GetStreamRefPaths(f FSEventStreamRef) []string { + arr := C.FSEventStreamCopyPathsBeingWatched(f) + l := cfArrayLen(arr) + + ss := make([]string, l) + for i := range ss { + void := C.CFArrayGetValueAtIndex(arr, C.CFIndex(i)) + ss[i] = cfStringToGoString(C.CFStringRef(void)) + } + return ss +} + +// GetDeviceUUID retrieves the UUID required to identify an EventID +// in the FSEvents database +func GetDeviceUUID(deviceID int32) string { + uuid := C.FSEventsCopyUUIDForDevice(C.dev_t(deviceID)) + if uuid == nil { + return "" + } + return cfStringToGoString(C.CFUUIDCreateString(nil, uuid)) +} + +func cfStringToGoString(cfs C.CFStringRef) string { + if cfs == nil { + return "" + } + cfStr := C.CFStringCreateCopy(nil, cfs) + length := C.CFStringGetLength(cfStr) + if length == 0 { + // short-cut for empty strings + return "" + } + cfRange := C.CFRange{0, length} + enc := C.CFStringEncoding(C.kCFStringEncodingUTF8) + // first find the buffer size necessary + var usedBufLen C.CFIndex + if C.CFStringGetBytes(cfStr, cfRange, enc, 0, C.false, nil, 0, &usedBufLen) == 0 { + return "" + } + + bs := make([]byte, usedBufLen) + buf := (*C.UInt8)(unsafe.Pointer(&bs[0])) + if C.CFStringGetBytes(cfStr, cfRange, enc, 0, C.false, buf, usedBufLen, nil) == 0 { + return "" + } + + // Create a string (byte array) backed by C byte array + header := (*reflect.SliceHeader)(unsafe.Pointer(&bs)) + strHeader := &reflect.StringHeader{ + Data: header.Data, + Len: header.Len, + } + return *(*string)(unsafe.Pointer(strHeader)) +} + +// CFRunLoopRef wraps C.CFRunLoopRef +type CFRunLoopRef C.CFRunLoopRef + +// EventIDForDeviceBeforeTime returns an event ID before a given time. +func EventIDForDeviceBeforeTime(dev int32, before time.Time) uint64 { + tm := C.CFAbsoluteTime(before.Unix()) + return uint64(C.FSEventsGetLastEventIdForDeviceBeforeTime(C.dev_t(dev), tm)) +} + +// createPaths accepts the user defined set of paths and returns FSEvents +// compatible array of paths +func createPaths(paths []string) (C.CFArrayRef, error) { + cPaths := C.ArrayCreateMutable(C.int(len(paths))) + var errs []error + for _, path := range paths { + p, err := filepath.Abs(path) + if err != nil { + // hack up some reporting errors, but don't prevent execution + // because of them + errs = append(errs, err) + } + cpath := C.CString(p) + defer C.free(unsafe.Pointer(cpath)) + + str := C.CFStringCreateWithCString(nil, cpath, C.kCFStringEncodingUTF8) + C.CFArrayAppendValue(cPaths, unsafe.Pointer(str)) + } + var err error + if len(errs) > 0 { + err = fmt.Errorf("%q", errs) + } + return cPaths, err +} + +// CFArrayLen retrieves the length of CFArray type +// See https://developer.apple.com/library/mac/documentation/CoreFoundation/Reference/CFArrayRef/#//apple_ref/c/func/CFArrayGetCount +func cfArrayLen(ref C.CFArrayRef) int { + // FIXME: this will probably crash on 32bit, untested + // requires OS X v10.0 + return int(C.CFArrayGetCount(ref)) +} + +func setupStream(paths []string, flags CreateFlags, callbackInfo uintptr, eventID uint64, latency time.Duration, deviceID int32) FSEventStreamRef { + cPaths, err := createPaths(paths) + if err != nil { + log.Printf("Error creating paths: %s", err) + } + defer C.CFRelease(C.CFTypeRef(cPaths)) + + since := C.FSEventStreamEventId(eventID) + context := C.FSEventStreamContext{} + info := C.uintptr_t(callbackInfo) + cfinv := C.CFTimeInterval(float64(latency) / float64(time.Second)) + + var ref C.FSEventStreamRef + if deviceID != 0 { + ref = C.EventStreamCreateRelativeToDevice(&context, info, + C.dev_t(deviceID), cPaths, since, cfinv, + C.FSEventStreamCreateFlags(flags)) + } else { + ref = C.EventStreamCreate(&context, info, cPaths, since, + cfinv, C.FSEventStreamCreateFlags(flags)) + } + + return FSEventStreamRef(ref) +} + +func (es *EventStream) start(paths []string, callbackInfo uintptr) { + + since := eventIDSinceNow + if es.Resume { + since = es.EventID + } + + es.stream = setupStream(paths, es.Flags, callbackInfo, since, es.Latency, es.Device) + + started := make(chan struct{}) + + go func() { + runtime.LockOSThread() + es.rlref = CFRunLoopRef(C.CFRunLoopGetCurrent()) + C.FSEventStreamScheduleWithRunLoop(es.stream, es.rlref, C.kCFRunLoopDefaultMode) + C.FSEventStreamStart(es.stream) + close(started) + C.CFRunLoopRun() + }() + + if !es.hasFinalizer { + // TODO: There is no guarantee this run before program exit + // and could result in panics at exit. + runtime.SetFinalizer(es, finalizer) + es.hasFinalizer = true + } + + <-started +} + +func finalizer(es *EventStream) { + // If an EventStream is freed without Stop being called it will + // cause a panic. This avoids that, and closes the stream instead. + es.Stop() +} + +// flush drains the event stream of undelivered events +func flush(stream FSEventStreamRef, sync bool) { + if sync { + C.FSEventStreamFlushSync(stream) + } else { + C.FSEventStreamFlushAsync(stream) + } +} + +// stop requests fsevents stops streaming events +func stop(stream FSEventStreamRef, rlref CFRunLoopRef) { + C.FSEventStreamStop(stream) + C.FSEventStreamInvalidate(stream) + C.FSEventStreamRelease(stream) + C.CFRunLoopStop(rlref) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsnotify/AUTHORS index 0a5bf8f6..5ab5d41c 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsnotify/AUTHORS +++ b/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsnotify/AUTHORS @@ -8,8 +8,10 @@ # Please keep the list sorted. +Aaron L Adrien Bustany Amit Krishnan +Anmol Sethi Bjørn Erik Pedersen Bruno Bigras Caleb Spare @@ -26,6 +28,7 @@ Kelvin Fo Ken-ichirou MATSUZAWA Matt Layher Nathan Youngman +Nickolai Zeldovich Patrick Paul Hammond Pawel Knap @@ -33,12 +36,15 @@ Pieter Droogendijk Pursuit92 Riku Voipio Rob Figueiredo +Rodrigo Chiossi Slawek Ligus Soge Zhang Tiffany Jernigan Tilak Sharma +Tom Payne Travis Cline Tudor Golubenco +Vahe Khachikyan Yukang bronze1man debrando diff --git a/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index 8c732c1d..be4d7ea2 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -1,5 +1,15 @@ # Changelog +## v1.4.7 / 2018-01-09 + +* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) +* Tests: Fix missing verb on format string (thanks @rchiossi) +* Linux: Fix deadlock in Remove (thanks @aarondl) +* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne) +* Docs: Moved FAQ into the README (thanks @vahe) +* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich) +* Docs: replace references to OS X with macOS + ## v1.4.2 / 2016-10-10 * Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsnotify/fen.go index ced39cb8..b5a41c89 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsnotify/fen.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsnotify/fen.go @@ -35,3 +35,9 @@ func (w *Watcher) Add(name string) error { func (w *Watcher) Remove(name string) error { return nil } + +// SetRecursive enables watches to also monitor subdirectories. Currently +// only supported under Windows. +func (w *Watcher) SetRecursive() error { + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsnotify/inotify.go index d9fd1b88..98cb3739 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsnotify/inotify.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsnotify/inotify.go @@ -162,6 +162,12 @@ func (w *Watcher) Remove(name string) error { return nil } +// SetRecursive enables watches to also monitor subdirectories. Currently +// only supported under Windows. +func (w *Watcher) SetRecursive() error { + return errors.New("recursion not supported") +} + type watch struct { wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsnotify/kqueue.go index c2b4acb1..fc0975df 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsnotify/kqueue.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsnotify/kqueue.go @@ -22,7 +22,7 @@ import ( type Watcher struct { Events chan Event Errors chan error - done chan bool // Channel for sending a "quit message" to the reader goroutine + done chan struct{} // Channel for sending a "quit message" to the reader goroutine kq int // File descriptor (as returned by the kqueue() syscall). @@ -56,7 +56,7 @@ func NewWatcher() (*Watcher, error) { externalWatches: make(map[string]bool), Events: make(chan Event), Errors: make(chan error), - done: make(chan bool), + done: make(chan struct{}), } go w.readEvents() @@ -71,10 +71,8 @@ func (w *Watcher) Close() error { return nil } w.isClosed = true - w.mu.Unlock() // copy paths to remove while locked - w.mu.Lock() var pathsToRemove = make([]string, 0, len(w.watches)) for name := range w.watches { pathsToRemove = append(pathsToRemove, name) @@ -82,15 +80,12 @@ func (w *Watcher) Close() error { w.mu.Unlock() // unlock before calling Remove, which also locks - var err error for _, name := range pathsToRemove { - if e := w.Remove(name); e != nil && err == nil { - err = e - } + w.Remove(name) } - // Send "quit" message to the reader goroutine: - w.done <- true + // send a "quit" message to the reader goroutine + close(w.done) return nil } @@ -152,6 +147,12 @@ func (w *Watcher) Remove(name string) error { return nil } +// SetRecursive enables watches to also monitor subdirectories. Currently +// only supported under Windows. +func (w *Watcher) SetRecursive() error { + return errors.New("recursion not supported") +} + // Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME @@ -266,17 +267,12 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { func (w *Watcher) readEvents() { eventBuffer := make([]unix.Kevent_t, 10) +loop: for { // See if there is a message on the "done" channel select { case <-w.done: - err := unix.Close(w.kq) - if err != nil { - w.Errors <- err - } - close(w.Events) - close(w.Errors) - return + break loop default: } @@ -284,7 +280,11 @@ func (w *Watcher) readEvents() { kevents, err := read(w.kq, eventBuffer, &keventWaitTime) // EINTR is okay, the syscall was interrupted before timeout expired. if err != nil && err != unix.EINTR { - w.Errors <- err + select { + case w.Errors <- err: + case <-w.done: + break loop + } continue } @@ -319,8 +319,12 @@ func (w *Watcher) readEvents() { if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { w.sendDirectoryChangeEvents(event.Name) } else { - // Send the event on the Events channel - w.Events <- event + // Send the event on the Events channel. + select { + case w.Events <- event: + case <-w.done: + break loop + } } if event.Op&Remove == Remove { @@ -352,6 +356,18 @@ func (w *Watcher) readEvents() { kevents = kevents[1:] } } + + // cleanup + err := unix.Close(w.kq) + if err != nil { + // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors. + select { + case w.Errors <- err: + default: + } + } + close(w.Events) + close(w.Errors) } // newEvent returns an platform-independent Event based on kqueue Fflags. @@ -407,7 +423,11 @@ func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { // Get all files files, err := ioutil.ReadDir(dirPath) if err != nil { - w.Errors <- err + select { + case w.Errors <- err: + case <-w.done: + return + } } // Search for new files @@ -428,7 +448,11 @@ func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInf w.mu.Unlock() if !doesExist { // Send create event - w.Events <- newCreateEvent(filePath) + select { + case w.Events <- newCreateEvent(filePath): + case <-w.done: + return + } } // like watchDirectoryFiles (but without doing another ReadDir) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsnotify/windows.go index 09436f31..a5a7ccbe 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsnotify/windows.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsnotify/windows.go @@ -13,20 +13,22 @@ import ( "path/filepath" "runtime" "sync" + "sync/atomic" "syscall" "unsafe" ) // Watcher watches a set of files, delivering events to a channel. type Watcher struct { - Events chan Event - Errors chan error - isClosed bool // Set to true when Close() is first called - mu sync.Mutex // Map access - port syscall.Handle // Handle to completion port - watches watchMap // Map of watches (key: i-number) - input chan *input // Inputs to the reader are sent on this channel - quit chan chan<- error + Events chan Event + Errors chan error + isClosed bool // Set to true when Close() is first called + mu sync.Mutex // Map access + port syscall.Handle // Handle to completion port + watches watchMap // Map of watches (key: i-number) + input chan *input // Inputs to the reader are sent on this channel + quit chan chan<- error + watchSubTree uint32 } // NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. @@ -95,6 +97,13 @@ func (w *Watcher) Remove(name string) error { return <-in.reply } +// SetRecursive enables watches to also monitor subdirectories. Currently +// only supported under Windows. +func (w *Watcher) SetRecursive() error { + atomic.StoreUint32(&w.watchSubTree, 1) + return nil +} + const ( // Options for AddWatch sysFSONESHOT = 0x80000000 @@ -348,7 +357,8 @@ func (w *Watcher) startRead(watch *watch) error { return nil } e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], - uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) + uint32(unsafe.Sizeof(watch.buf)), atomic.LoadUint32(&w.watchSubTree) != 0, + mask, nil, &watch.ov, 0) if e != nil { err := os.NewSyscallError("ReadDirectoryChanges", e) if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/LICENSE new file mode 100644 index 00000000..36275367 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/LICENSE @@ -0,0 +1,7 @@ +Copyright © 2013-2018 Yasuhiro Matsumoto, http://mattn.kaoriya.net + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/pmezard/go-difflib/LICENSE similarity index 100% rename from vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/LICENSE rename to vendor/github.com/elastic/beats/vendor/github.com/pmezard/go-difflib/LICENSE diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/elastic/beats/vendor/github.com/pmezard/go-difflib/difflib/difflib.go similarity index 95% rename from vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/difflib/difflib.go rename to vendor/github.com/elastic/beats/vendor/github.com/pmezard/go-difflib/difflib/difflib.go index 64cc40fe..003e99fa 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/difflib/difflib.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/pmezard/go-difflib/difflib/difflib.go @@ -559,10 +559,14 @@ type UnifiedDiff struct { func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { buf := bufio.NewWriter(writer) defer buf.Flush() - w := func(format string, args ...interface{}) error { + wf := func(format string, args ...interface{}) error { _, err := buf.WriteString(fmt.Sprintf(format, args...)) return err } + ws := func(s string) error { + _, err := buf.WriteString(s) + return err + } if len(diff.Eol) == 0 { diff.Eol = "\n" @@ -581,26 +585,28 @@ func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { if len(diff.ToDate) > 0 { toDate = "\t" + diff.ToDate } - err := w("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) - if err != nil { - return err - } - err = w("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) - if err != nil { - return err + if diff.FromFile != "" || diff.ToFile != "" { + err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) + if err != nil { + return err + } + err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) + if err != nil { + return err + } } } first, last := g[0], g[len(g)-1] range1 := formatRangeUnified(first.I1, last.I2) range2 := formatRangeUnified(first.J1, last.J2) - if err := w("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { + if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { return err } for _, c := range g { i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 if c.Tag == 'e' { for _, line := range diff.A[i1:i2] { - if err := w(" " + line); err != nil { + if err := ws(" " + line); err != nil { return err } } @@ -608,14 +614,14 @@ func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { } if c.Tag == 'r' || c.Tag == 'd' { for _, line := range diff.A[i1:i2] { - if err := w("-" + line); err != nil { + if err := ws("-" + line); err != nil { return err } } } if c.Tag == 'r' || c.Tag == 'i' { for _, line := range diff.B[j1:j2] { - if err := w("+" + line); err != nil { + if err := ws("+" + line); err != nil { return err } } @@ -669,12 +675,18 @@ func WriteContextDiff(writer io.Writer, diff ContextDiff) error { buf := bufio.NewWriter(writer) defer buf.Flush() var diffErr error - w := func(format string, args ...interface{}) { + wf := func(format string, args ...interface{}) { _, err := buf.WriteString(fmt.Sprintf(format, args...)) if diffErr == nil && err != nil { diffErr = err } } + ws := func(s string) { + _, err := buf.WriteString(s) + if diffErr == nil && err != nil { + diffErr = err + } + } if len(diff.Eol) == 0 { diff.Eol = "\n" @@ -700,15 +712,17 @@ func WriteContextDiff(writer io.Writer, diff ContextDiff) error { if len(diff.ToDate) > 0 { toDate = "\t" + diff.ToDate } - w("*** %s%s%s", diff.FromFile, fromDate, diff.Eol) - w("--- %s%s%s", diff.ToFile, toDate, diff.Eol) + if diff.FromFile != "" || diff.ToFile != "" { + wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol) + wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol) + } } first, last := g[0], g[len(g)-1] - w("***************" + diff.Eol) + ws("***************" + diff.Eol) range1 := formatRangeContext(first.I1, last.I2) - w("*** %s ****%s", range1, diff.Eol) + wf("*** %s ****%s", range1, diff.Eol) for _, c := range g { if c.Tag == 'r' || c.Tag == 'd' { for _, cc := range g { @@ -716,7 +730,7 @@ func WriteContextDiff(writer io.Writer, diff ContextDiff) error { continue } for _, line := range diff.A[cc.I1:cc.I2] { - w(prefix[cc.Tag] + line) + ws(prefix[cc.Tag] + line) } } break @@ -724,7 +738,7 @@ func WriteContextDiff(writer io.Writer, diff ContextDiff) error { } range2 := formatRangeContext(first.J1, last.J2) - w("--- %s ----%s", range2, diff.Eol) + wf("--- %s ----%s", range2, diff.Eol) for _, c := range g { if c.Tag == 'r' || c.Tag == 'i' { for _, cc := range g { @@ -732,7 +746,7 @@ func WriteContextDiff(writer io.Writer, diff ContextDiff) error { continue } for _, line := range diff.B[cc.J1:cc.J2] { - w(prefix[cc.Tag] + line) + ws(prefix[cc.Tag] + line) } } break diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/CONTRIBUTING.md b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/CONTRIBUTING.md similarity index 81% rename from vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/CONTRIBUTING.md rename to vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/CONTRIBUTING.md index 5705f0fb..40503edb 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/CONTRIBUTING.md +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/CONTRIBUTING.md @@ -2,9 +2,9 @@ Prometheus uses GitHub to manage reviews of pull requests. -* If you have a trivial fix or improvement, go ahead and create a pull - request, addressing (with `@...`) one or more of the maintainers - (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request. +* If you have a trivial fix or improvement, go ahead and create a pull request, + addressing (with `@...`) the maintainer of this repository (see + [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. * If you plan to do something more involved, first discuss your ideas on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/LICENSE similarity index 99% rename from vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/LICENSE rename to vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/LICENSE index 8dada3ed..261eeb9e 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/LICENSE +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/LICENSE @@ -178,7 +178,7 @@ APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" + boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright {yyyy} {name of copyright owner} + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/MAINTAINERS.md new file mode 100644 index 00000000..35993c41 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/MAINTAINERS.md @@ -0,0 +1 @@ +* Tobias Schmidt diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/Makefile new file mode 100644 index 00000000..5c8f7262 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/Makefile @@ -0,0 +1,71 @@ +# Copyright 2018 The Prometheus Authors +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Ensure GOBIN is not set during build so that promu is installed to the correct path +unexport GOBIN + +GO ?= go +GOFMT ?= $(GO)fmt +FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) +STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck +pkgs = $(shell $(GO) list ./... | grep -v /vendor/) + +PREFIX ?= $(shell pwd) +BIN_DIR ?= $(shell pwd) + +ifdef DEBUG + bindata_flags = -debug +endif + +STATICCHECK_IGNORE = + +all: format staticcheck build test + +style: + @echo ">> checking code style" + @! $(GOFMT) -d $(shell find . -path ./vendor -prune -o -name '*.go' -print) | grep '^' + +check_license: + @echo ">> checking license header" + @./scripts/check_license.sh + +test: fixtures/.unpacked sysfs/fixtures/.unpacked + @echo ">> running all tests" + @$(GO) test -race $(shell $(GO) list ./... | grep -v /vendor/ | grep -v examples) + +format: + @echo ">> formatting code" + @$(GO) fmt $(pkgs) + +vet: + @echo ">> vetting code" + @$(GO) vet $(pkgs) + +staticcheck: $(STATICCHECK) + @echo ">> running staticcheck" + @$(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs) + +%/.unpacked: %.ttar + ./ttar -C $(dir $*) -x -f $*.ttar + touch $@ + +$(FIRST_GOPATH)/bin/staticcheck: + @GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck + +.PHONY: all style check_license format test vet staticcheck + +# Declaring the binaries at their default locations as PHONY targets is a hack +# to ensure the latest version is downloaded on every make execution. +# If this is not desired, copy/symlink these binaries to a different path and +# set the respective environment variables. +.PHONY: $(GOPATH)/bin/staticcheck diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/NOTICE b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/NOTICE similarity index 100% rename from vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/NOTICE rename to vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/NOTICE diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/README.md b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/README.md similarity index 78% rename from vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/README.md rename to vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/README.md index 6e7ee6b8..20954947 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/README.md +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/README.md @@ -8,3 +8,4 @@ backwards-incompatible ways without warnings. Use it at your own risk. [![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs) [![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs) +[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/buddyinfo.go new file mode 100644 index 00000000..d3a82680 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -0,0 +1,95 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +// A BuddyInfo is the details parsed from /proc/buddyinfo. +// The data is comprised of an array of free fragments of each size. +// The sizes are 2^n*PAGE_SIZE, where n is the array index. +type BuddyInfo struct { + Node string + Zone string + Sizes []float64 +} + +// NewBuddyInfo reads the buddyinfo statistics. +func NewBuddyInfo() ([]BuddyInfo, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return nil, err + } + + return fs.NewBuddyInfo() +} + +// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. +func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) { + file, err := os.Open(fs.Path("buddyinfo")) + if err != nil { + return nil, err + } + defer file.Close() + + return parseBuddyInfo(file) +} + +func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { + var ( + buddyInfo = []BuddyInfo{} + scanner = bufio.NewScanner(r) + bucketCount = -1 + ) + + for scanner.Scan() { + var err error + line := scanner.Text() + parts := strings.Fields(line) + + if len(parts) < 4 { + return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo") + } + + node := strings.TrimRight(parts[1], ",") + zone := strings.TrimRight(parts[3], ",") + arraySize := len(parts[4:]) + + if bucketCount == -1 { + bucketCount = arraySize + } else { + if bucketCount != arraySize { + return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize) + } + } + + sizes := make([]float64, arraySize) + for i := 0; i < arraySize; i++ { + sizes[i], err = strconv.ParseFloat(parts[i+4], 64) + if err != nil { + return nil, fmt.Errorf("invalid value in buddyinfo: %s", err) + } + } + + buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes}) + } + + return buddyInfo, scanner.Err() +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/doc.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/doc.go similarity index 100% rename from vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/doc.go rename to vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/doc.go diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/fixtures.ttar new file mode 100644 index 00000000..3ee8291e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/fixtures.ttar @@ -0,0 +1,446 @@ +# Archive created by ttar -c -f fixtures.ttar fixtures/ +Directory: fixtures +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/26231 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/cmdline +Lines: 1 +vimNULLBYTEtest.goNULLBYTE+10NULLBYTEEOF +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/comm +Lines: 1 +vim +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/exe +SymlinkTo: /usr/bin/vim +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/26231/fd +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/fd/0 +SymlinkTo: ../../symlinktargets/abc +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/fd/1 +SymlinkTo: ../../symlinktargets/def +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/fd/10 +SymlinkTo: ../../symlinktargets/xyz +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/fd/2 +SymlinkTo: ../../symlinktargets/ghi +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/fd/3 +SymlinkTo: ../../symlinktargets/uvw +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/io +Lines: 7 +rchar: 750339 +wchar: 818609 +syscr: 7405 +syscw: 5245 +read_bytes: 1024 +write_bytes: 2048 +cancelled_write_bytes: -1024 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/limits +Lines: 17 +Limit Soft Limit Hard Limit Units +Max cpu time unlimited unlimited seconds +Max file size unlimited unlimited bytes +Max data size unlimited unlimited bytes +Max stack size 8388608 unlimited bytes +Max core file size 0 unlimited bytes +Max resident set unlimited unlimited bytes +Max processes 62898 62898 processes +Max open files 2048 4096 files +Max locked memory 65536 65536 bytes +Max address space 8589934592 unlimited bytes +Max file locks unlimited unlimited locks +Max pending signals 62898 62898 signals +Max msgqueue size 819200 819200 bytes +Max nice priority 0 0 +Max realtime priority 0 0 +Max realtime timeout unlimited unlimited us +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/mountstats +Lines: 19 +device rootfs mounted on / with fstype rootfs +device sysfs mounted on /sys with fstype sysfs +device proc mounted on /proc with fstype proc +device /dev/sda1 mounted on / with fstype ext4 +device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1 + opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=192.168.1.5,local_lock=none + age: 13968 + caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255 + nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured + sec: flavor=1,pseudoflavor=1 + events: 52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0 + bytes: 1207640230 0 0 0 1210214218 0 295483 0 + RPC iostats version: 1.0 p/v: 100003/4 (nfs) + xprt: tcp 832 0 1 0 11 6428 6428 0 12154 0 24 26 5726 + per-op statistics + NULL: 0 0 0 0 0 0 0 0 + READ: 1298 1298 0 207680 1210292152 6 79386 79407 + WRITE: 0 0 0 0 0 0 0 0 + +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/26231/net +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/net/dev +Lines: 4 +Inter-| Receive | Transmit + face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed + lo: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + eth0: 438 5 0 0 0 0 0 0 648 8 0 0 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/26231/ns +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/ns/mnt +SymlinkTo: mnt:[4026531840] +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/ns/net +SymlinkTo: net:[4026531993] +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/stat +Lines: 1 +26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/26232 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/cmdline +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/comm +Lines: 1 +ata_sff +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/26232/fd +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/fd/0 +SymlinkTo: ../../symlinktargets/abc +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/fd/1 +SymlinkTo: ../../symlinktargets/def +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/fd/2 +SymlinkTo: ../../symlinktargets/ghi +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/fd/3 +SymlinkTo: ../../symlinktargets/uvw +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/fd/4 +SymlinkTo: ../../symlinktargets/xyz +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/limits +Lines: 17 +Limit Soft Limit Hard Limit Units +Max cpu time unlimited unlimited seconds +Max file size unlimited unlimited bytes +Max data size unlimited unlimited bytes +Max stack size 8388608 unlimited bytes +Max core file size 0 unlimited bytes +Max resident set unlimited unlimited bytes +Max processes 29436 29436 processes +Max open files 1024 4096 files +Max locked memory 65536 65536 bytes +Max address space unlimited unlimited bytes +Max file locks unlimited unlimited locks +Max pending signals 29436 29436 signals +Max msgqueue size 819200 819200 bytes +Max nice priority 0 0 +Max realtime priority 0 0 +Max realtime timeout unlimited unlimited us +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/stat +Lines: 1 +33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/26233 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26233/cmdline +Lines: 1 +com.github.uiautomatorNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEEOF +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/584 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/584/stat +Lines: 2 +1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0 +#!/bin/cat /proc/self/stat +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/buddyinfo +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/buddyinfo/short +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/buddyinfo/short/buddyinfo +Lines: 3 +Node 0, zone +Node 0, zone +Node 0, zone +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/buddyinfo/sizemismatch +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/buddyinfo/sizemismatch/buddyinfo +Lines: 3 +Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 +Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 0 +Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/buddyinfo/valid +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/buddyinfo/valid/buddyinfo +Lines: 3 +Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 +Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 +Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/fs +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/fs/xfs +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/fs/xfs/stat +Lines: 23 +extent_alloc 92447 97589 92448 93751 +abt 0 0 0 0 +blk_map 1767055 188820 184891 92447 92448 2140766 0 +bmbt 0 0 0 0 +dir 185039 92447 92444 136422 +trans 706 944304 0 +ig 185045 58807 0 126238 0 33637 22 +log 2883 113448 9 17360 739 +push_ail 945014 0 134260 15483 0 3940 464 159985 0 40 +xstrat 92447 0 +rw 107739 94045 +attr 4 0 0 0 +icluster 8677 7849 135802 +vnodes 92601 0 0 0 92444 92444 92444 0 +buf 2666287 7122 2659202 3599 2 7085 0 10297 7085 +abtb2 184941 1277345 13257 13278 0 0 0 0 0 0 0 0 0 0 2746147 +abtc2 345295 2416764 172637 172658 0 0 0 0 0 0 0 0 0 0 21406023 +bmbt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +ibt2 343004 1358467 0 0 0 0 0 0 0 0 0 0 0 0 0 +fibt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +qm 0 0 0 0 0 0 0 0 +xpc 399724544 92823103 86219234 +debug 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/mdstat +Lines: 26 +Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] +md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] + 5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU] + +md127 : active raid1 sdi2[0] sdj2[1] + 312319552 blocks [2/2] [UU] + +md0 : active raid1 sdk[2](S) sdi1[0] sdj1[1] + 248896 blocks [2/2] [UU] + +md4 : inactive raid1 sda3[0] sdb3[1] + 4883648 blocks [2/2] [UU] + +md6 : active raid1 sdb2[2] sda2[0] + 195310144 blocks [2/1] [U_] + [=>...................] recovery = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec + +md8 : active raid1 sdb1[1] sda1[0] + 195310144 blocks [2/2] [UU] + [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec + +md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1] + 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU] + bitmap: 0/30 pages [0KB], 65536KB chunk + +unused devices: +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/net +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/net/dev +Lines: 6 +Inter-| Receive | Transmit + face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed +vethf345468: 648 8 0 0 0 0 0 0 438 5 0 0 0 0 0 0 + lo: 1664039048 1566805 0 0 0 0 0 0 1664039048 1566805 0 0 0 0 0 0 +docker0: 2568 38 0 0 0 0 0 0 438 5 0 0 0 0 0 0 + eth0: 874354587 1036395 0 0 0 0 0 0 563352563 732147 0 0 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/net/ip_vs +Lines: 21 +IP Virtual Server version 1.2.1 (size=4096) +Prot LocalAddress:Port Scheduler Flags + -> RemoteAddress:Port Forward Weight ActiveConn InActConn +TCP C0A80016:0CEA wlc + -> C0A85216:0CEA Tunnel 100 248 2 + -> C0A85318:0CEA Tunnel 100 248 2 + -> C0A85315:0CEA Tunnel 100 248 1 +TCP C0A80039:0CEA wlc + -> C0A85416:0CEA Tunnel 0 0 0 + -> C0A85215:0CEA Tunnel 100 1499 0 + -> C0A83215:0CEA Tunnel 100 1498 0 +TCP C0A80037:0CEA wlc + -> C0A8321A:0CEA Tunnel 0 0 0 + -> C0A83120:0CEA Tunnel 100 0 0 +TCP [2620:0000:0000:0000:0000:0000:0000:0001]:0050 sh + -> [2620:0000:0000:0000:0000:0000:0000:0002]:0050 Route 1 0 0 + -> [2620:0000:0000:0000:0000:0000:0000:0003]:0050 Route 1 0 0 + -> [2620:0000:0000:0000:0000:0000:0000:0004]:0050 Route 1 1 1 +FWM 10001000 wlc + -> C0A8321A:0CEA Route 0 0 1 + -> C0A83215:0CEA Route 0 0 2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/net/ip_vs_stats +Lines: 6 + Total Incoming Outgoing Incoming Outgoing + Conns Packets Packets Bytes Bytes + 16AA370 E33656E5 0 51D8C8883AB3 0 + + Conns/s Pkts/s Pkts/s Bytes/s Bytes/s + 4 1FB3C 0 1282A8F 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/net/rpc +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/net/rpc/nfs +Lines: 5 +net 18628 0 18628 6 +rpc 4329785 0 4338291 +proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 +proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 0 241 4 4 2 39 +proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/net/rpc/nfsd +Lines: 11 +rc 0 6 18622 +fh 0 0 0 0 0 +io 157286400 0 +th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 +ra 32 0 0 0 0 0 0 0 0 0 0 0 +net 18628 0 18628 6 +rpc 18628 0 0 0 0 +proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 +proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0 +proc4 2 2 10853 +proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/net/xfrm_stat +Lines: 28 +XfrmInError 1 +XfrmInBufferError 2 +XfrmInHdrError 4 +XfrmInNoStates 3 +XfrmInStateProtoError 40 +XfrmInStateModeError 100 +XfrmInStateSeqError 6000 +XfrmInStateExpired 4 +XfrmInStateMismatch 23451 +XfrmInStateInvalid 55555 +XfrmInTmplMismatch 51 +XfrmInNoPols 65432 +XfrmInPolBlock 100 +XfrmInPolError 10000 +XfrmOutError 1000000 +XfrmOutBundleGenError 43321 +XfrmOutBundleCheckError 555 +XfrmOutNoStates 869 +XfrmOutStateProtoError 4542 +XfrmOutStateModeError 4 +XfrmOutStateSeqError 543 +XfrmOutStateExpired 565 +XfrmOutPolBlock 43456 +XfrmOutPolDead 7656 +XfrmOutPolError 1454 +XfrmFwdHdrError 6654 +XfrmOutStateInvalid 28765 +XfrmAcquireError 24532 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/self +SymlinkTo: 26231 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/stat +Lines: 16 +cpu 301854 612 111922 8979004 3552 2 3944 0 0 0 +cpu0 44490 19 21045 1087069 220 1 3410 0 0 0 +cpu1 47869 23 16474 1110787 591 0 46 0 0 0 +cpu2 46504 36 15916 1112321 441 0 326 0 0 0 +cpu3 47054 102 15683 1113230 533 0 60 0 0 0 +cpu4 28413 25 10776 1140321 217 0 8 0 0 0 +cpu5 29271 101 11586 1136270 672 0 30 0 0 0 +cpu6 29152 36 10276 1139721 319 0 29 0 0 0 +cpu7 29098 268 10164 1139282 555 0 31 0 0 0 +intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +ctxt 38014093 +btime 1418183276 +processes 26442 +procs_running 2 +procs_blocked 1 +softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/symlinktargets +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/symlinktargets/README +Lines: 2 +This directory contains some empty files that are the symlinks the files in the "fd" directory point to. +They are otherwise ignored by the tests +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/symlinktargets/abc +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/symlinktargets/def +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/symlinktargets/ghi +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/symlinktargets/uvw +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/symlinktargets/xyz +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/fs.go new file mode 100644 index 00000000..b6c6b2ce --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/fs.go @@ -0,0 +1,82 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "os" + "path" + + "github.com/prometheus/procfs/nfs" + "github.com/prometheus/procfs/xfs" +) + +// FS represents the pseudo-filesystem proc, which provides an interface to +// kernel data structures. +type FS string + +// DefaultMountPoint is the common mount point of the proc filesystem. +const DefaultMountPoint = "/proc" + +// NewFS returns a new FS mounted under the given mountPoint. It will error +// if the mount point can't be read. +func NewFS(mountPoint string) (FS, error) { + info, err := os.Stat(mountPoint) + if err != nil { + return "", fmt.Errorf("could not read %s: %s", mountPoint, err) + } + if !info.IsDir() { + return "", fmt.Errorf("mount point %s is not a directory", mountPoint) + } + + return FS(mountPoint), nil +} + +// Path returns the path of the given subsystem relative to the procfs root. +func (fs FS) Path(p ...string) string { + return path.Join(append([]string{string(fs)}, p...)...) +} + +// XFSStats retrieves XFS filesystem runtime statistics. +func (fs FS) XFSStats() (*xfs.Stats, error) { + f, err := os.Open(fs.Path("fs/xfs/stat")) + if err != nil { + return nil, err + } + defer f.Close() + + return xfs.ParseStats(f) +} + +// NFSClientRPCStats retrieves NFS client RPC statistics. +func (fs FS) NFSClientRPCStats() (*nfs.ClientRPCStats, error) { + f, err := os.Open(fs.Path("net/rpc/nfs")) + if err != nil { + return nil, err + } + defer f.Close() + + return nfs.ParseClientRPCStats(f) +} + +// NFSdServerRPCStats retrieves NFS daemon RPC statistics. +func (fs FS) NFSdServerRPCStats() (*nfs.ServerRPCStats, error) { + f, err := os.Open(fs.Path("net/rpc/nfsd")) + if err != nil { + return nil, err + } + defer f.Close() + + return nfs.ParseServerRPCStats(f) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/internal/util/parse.go new file mode 100644 index 00000000..1ad21c91 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -0,0 +1,46 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import "strconv" + +// ParseUint32s parses a slice of strings into a slice of uint32s. +func ParseUint32s(ss []string) ([]uint32, error) { + us := make([]uint32, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 10, 32) + if err != nil { + return nil, err + } + + us = append(us, uint32(u)) + } + + return us, nil +} + +// ParseUint64s parses a slice of strings into a slice of uint64s. +func ParseUint64s(ss []string) ([]uint64, error) { + us := make([]uint64, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + us = append(us, u) + } + + return us, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/ipvs.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/ipvs.go similarity index 78% rename from vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/ipvs.go rename to vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/ipvs.go index e7012f73..e36d4a3b 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/ipvs.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/ipvs.go @@ -1,3 +1,16 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package procfs import ( @@ -31,14 +44,16 @@ type IPVSStats struct { type IPVSBackendStatus struct { // The local (virtual) IP address. LocalAddress net.IP - // The local (virtual) port. - LocalPort uint16 - // The transport protocol (TCP, UDP). - Proto string // The remote (real) IP address. RemoteAddress net.IP + // The local (virtual) port. + LocalPort uint16 // The remote (real) port. RemotePort uint16 + // The local firewall mark + LocalMark string + // The transport protocol (TCP, UDP). + Proto string // The current number of active connections for this virtual/real address pair. ActiveConn uint64 // The current number of inactive connections for this virtual/real address pair. @@ -142,13 +157,14 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { status []IPVSBackendStatus scanner = bufio.NewScanner(file) proto string + localMark string localAddress net.IP localPort uint16 err error ) for scanner.Scan() { - fields := strings.Fields(string(scanner.Text())) + fields := strings.Fields(scanner.Text()) if len(fields) == 0 { continue } @@ -160,10 +176,19 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { continue } proto = fields[0] + localMark = "" localAddress, localPort, err = parseIPPort(fields[1]) if err != nil { return nil, err } + case fields[0] == "FWM": + if len(fields) < 2 { + continue + } + proto = fields[0] + localMark = fields[1] + localAddress = nil + localPort = 0 case fields[0] == "->": if len(fields) < 6 { continue @@ -187,6 +212,7 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { status = append(status, IPVSBackendStatus{ LocalAddress: localAddress, LocalPort: localPort, + LocalMark: localMark, RemoteAddress: remoteAddress, RemotePort: remotePort, Proto: proto, @@ -200,22 +226,31 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { } func parseIPPort(s string) (net.IP, uint16, error) { - tmp := strings.SplitN(s, ":", 2) - - if len(tmp) != 2 { - return nil, 0, fmt.Errorf("invalid IP:Port: %s", s) - } + var ( + ip net.IP + err error + ) - if len(tmp[0]) != 8 && len(tmp[0]) != 32 { - return nil, 0, fmt.Errorf("invalid IP: %s", tmp[0]) + switch len(s) { + case 13: + ip, err = hex.DecodeString(s[0:8]) + if err != nil { + return nil, 0, err + } + case 46: + ip = net.ParseIP(s[1:40]) + if ip == nil { + return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40]) + } + default: + return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s) } - ip, err := hex.DecodeString(tmp[0]) - if err != nil { - return nil, 0, err + portString := s[len(s)-4:] + if len(portString) != 4 { + return nil, 0, fmt.Errorf("unexpected port string format: %s", portString) } - - port, err := strconv.ParseUint(tmp[1], 16, 16) + port, err := strconv.ParseUint(portString, 16, 16) if err != nil { return nil, 0, err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/mdstat.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/mdstat.go similarity index 85% rename from vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/mdstat.go rename to vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/mdstat.go index d7a248c0..9dc19583 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/mdstat.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/mdstat.go @@ -1,3 +1,16 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package procfs import ( diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/mountstats.go new file mode 100644 index 00000000..e95ddbc6 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/mountstats.go @@ -0,0 +1,569 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +// While implementing parsing of /proc/[pid]/mountstats, this blog was used +// heavily as a reference: +// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex +// +// Special thanks to Chris Siebenmann for all of his posts explaining the +// various statistics available for NFS. + +import ( + "bufio" + "fmt" + "io" + "strconv" + "strings" + "time" +) + +// Constants shared between multiple functions. +const ( + deviceEntryLen = 8 + + fieldBytesLen = 8 + fieldEventsLen = 27 + + statVersion10 = "1.0" + statVersion11 = "1.1" + + fieldTransport10Len = 10 + fieldTransport11Len = 13 +) + +// A Mount is a device mount parsed from /proc/[pid]/mountstats. +type Mount struct { + // Name of the device. + Device string + // The mount point of the device. + Mount string + // The filesystem type used by the device. + Type string + // If available additional statistics related to this Mount. + // Use a type assertion to determine if additional statistics are available. + Stats MountStats +} + +// A MountStats is a type which contains detailed statistics for a specific +// type of Mount. +type MountStats interface { + mountStats() +} + +// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts. +type MountStatsNFS struct { + // The version of statistics provided. + StatVersion string + // The age of the NFS mount. + Age time.Duration + // Statistics related to byte counters for various operations. + Bytes NFSBytesStats + // Statistics related to various NFS event occurrences. + Events NFSEventsStats + // Statistics broken down by filesystem operation. + Operations []NFSOperationStats + // Statistics about the NFS RPC transport. + Transport NFSTransportStats +} + +// mountStats implements MountStats. +func (m MountStatsNFS) mountStats() {} + +// A NFSBytesStats contains statistics about the number of bytes read and written +// by an NFS client to and from an NFS server. +type NFSBytesStats struct { + // Number of bytes read using the read() syscall. + Read uint64 + // Number of bytes written using the write() syscall. + Write uint64 + // Number of bytes read using the read() syscall in O_DIRECT mode. + DirectRead uint64 + // Number of bytes written using the write() syscall in O_DIRECT mode. + DirectWrite uint64 + // Number of bytes read from the NFS server, in total. + ReadTotal uint64 + // Number of bytes written to the NFS server, in total. + WriteTotal uint64 + // Number of pages read directly via mmap()'d files. + ReadPages uint64 + // Number of pages written directly via mmap()'d files. + WritePages uint64 +} + +// A NFSEventsStats contains statistics about NFS event occurrences. +type NFSEventsStats struct { + // Number of times cached inode attributes are re-validated from the server. + InodeRevalidate uint64 + // Number of times cached dentry nodes are re-validated from the server. + DnodeRevalidate uint64 + // Number of times an inode cache is cleared. + DataInvalidate uint64 + // Number of times cached inode attributes are invalidated. + AttributeInvalidate uint64 + // Number of times files or directories have been open()'d. + VFSOpen uint64 + // Number of times a directory lookup has occurred. + VFSLookup uint64 + // Number of times permissions have been checked. + VFSAccess uint64 + // Number of updates (and potential writes) to pages. + VFSUpdatePage uint64 + // Number of pages read directly via mmap()'d files. + VFSReadPage uint64 + // Number of times a group of pages have been read. + VFSReadPages uint64 + // Number of pages written directly via mmap()'d files. + VFSWritePage uint64 + // Number of times a group of pages have been written. + VFSWritePages uint64 + // Number of times directory entries have been read with getdents(). + VFSGetdents uint64 + // Number of times attributes have been set on inodes. + VFSSetattr uint64 + // Number of pending writes that have been forcefully flushed to the server. + VFSFlush uint64 + // Number of times fsync() has been called on directories and files. + VFSFsync uint64 + // Number of times locking has been attempted on a file. + VFSLock uint64 + // Number of times files have been closed and released. + VFSFileRelease uint64 + // Unknown. Possibly unused. + CongestionWait uint64 + // Number of times files have been truncated. + Truncation uint64 + // Number of times a file has been grown due to writes beyond its existing end. + WriteExtension uint64 + // Number of times a file was removed while still open by another process. + SillyRename uint64 + // Number of times the NFS server gave less data than expected while reading. + ShortRead uint64 + // Number of times the NFS server wrote less data than expected while writing. + ShortWrite uint64 + // Number of times the NFS server indicated EJUKEBOX; retrieving data from + // offline storage. + JukeboxDelay uint64 + // Number of NFS v4.1+ pNFS reads. + PNFSRead uint64 + // Number of NFS v4.1+ pNFS writes. + PNFSWrite uint64 +} + +// A NFSOperationStats contains statistics for a single operation. +type NFSOperationStats struct { + // The name of the operation. + Operation string + // Number of requests performed for this operation. + Requests uint64 + // Number of times an actual RPC request has been transmitted for this operation. + Transmissions uint64 + // Number of times a request has had a major timeout. + MajorTimeouts uint64 + // Number of bytes sent for this operation, including RPC headers and payload. + BytesSent uint64 + // Number of bytes received for this operation, including RPC headers and payload. + BytesReceived uint64 + // Duration all requests spent queued for transmission before they were sent. + CumulativeQueueTime time.Duration + // Duration it took to get a reply back after the request was transmitted. + CumulativeTotalResponseTime time.Duration + // Duration from when a request was enqueued to when it was completely handled. + CumulativeTotalRequestTime time.Duration +} + +// A NFSTransportStats contains statistics for the NFS mount RPC requests and +// responses. +type NFSTransportStats struct { + // The local port used for the NFS mount. + Port uint64 + // Number of times the client has had to establish a connection from scratch + // to the NFS server. + Bind uint64 + // Number of times the client has made a TCP connection to the NFS server. + Connect uint64 + // Duration (in jiffies, a kernel internal unit of time) the NFS mount has + // spent waiting for connections to the server to be established. + ConnectIdleTime uint64 + // Duration since the NFS mount last saw any RPC traffic. + IdleTime time.Duration + // Number of RPC requests for this mount sent to the NFS server. + Sends uint64 + // Number of RPC responses for this mount received from the NFS server. + Receives uint64 + // Number of times the NFS server sent a response with a transaction ID + // unknown to this client. + BadTransactionIDs uint64 + // A running counter, incremented on each request as the current difference + // ebetween sends and receives. + CumulativeActiveRequests uint64 + // A running counter, incremented on each request by the current backlog + // queue size. + CumulativeBacklog uint64 + + // Stats below only available with stat version 1.1. + + // Maximum number of simultaneously active RPC requests ever used. + MaximumRPCSlotsUsed uint64 + // A running counter, incremented on each request as the current size of the + // sending queue. + CumulativeSendingQueue uint64 + // A running counter, incremented on each request as the current size of the + // pending queue. + CumulativePendingQueue uint64 +} + +// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice +// of Mount structures containing detailed information about each mount. +// If available, statistics for each mount are parsed as well. +func parseMountStats(r io.Reader) ([]*Mount, error) { + const ( + device = "device" + statVersionPrefix = "statvers=" + + nfs3Type = "nfs" + nfs4Type = "nfs4" + ) + + var mounts []*Mount + + s := bufio.NewScanner(r) + for s.Scan() { + // Only look for device entries in this function + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 || ss[0] != device { + continue + } + + m, err := parseMount(ss) + if err != nil { + return nil, err + } + + // Does this mount also possess statistics information? + if len(ss) > deviceEntryLen { + // Only NFSv3 and v4 are supported for parsing statistics + if m.Type != nfs3Type && m.Type != nfs4Type { + return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type) + } + + statVersion := strings.TrimPrefix(ss[8], statVersionPrefix) + + stats, err := parseMountStatsNFS(s, statVersion) + if err != nil { + return nil, err + } + + m.Stats = stats + } + + mounts = append(mounts, m) + } + + return mounts, s.Err() +} + +// parseMount parses an entry in /proc/[pid]/mountstats in the format: +// device [device] mounted on [mount] with fstype [type] +func parseMount(ss []string) (*Mount, error) { + if len(ss) < deviceEntryLen { + return nil, fmt.Errorf("invalid device entry: %v", ss) + } + + // Check for specific words appearing at specific indices to ensure + // the format is consistent with what we expect + format := []struct { + i int + s string + }{ + {i: 0, s: "device"}, + {i: 2, s: "mounted"}, + {i: 3, s: "on"}, + {i: 5, s: "with"}, + {i: 6, s: "fstype"}, + } + + for _, f := range format { + if ss[f.i] != f.s { + return nil, fmt.Errorf("invalid device entry: %v", ss) + } + } + + return &Mount{ + Device: ss[1], + Mount: ss[4], + Type: ss[7], + }, nil +} + +// parseMountStatsNFS parses a MountStatsNFS by scanning additional information +// related to NFS statistics. +func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) { + // Field indicators for parsing specific types of data + const ( + fieldAge = "age:" + fieldBytes = "bytes:" + fieldEvents = "events:" + fieldPerOpStats = "per-op" + fieldTransport = "xprt:" + ) + + stats := &MountStatsNFS{ + StatVersion: statVersion, + } + + for s.Scan() { + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 { + break + } + if len(ss) < 2 { + return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + } + + switch ss[0] { + case fieldAge: + // Age integer is in seconds + d, err := time.ParseDuration(ss[1] + "s") + if err != nil { + return nil, err + } + + stats.Age = d + case fieldBytes: + bstats, err := parseNFSBytesStats(ss[1:]) + if err != nil { + return nil, err + } + + stats.Bytes = *bstats + case fieldEvents: + estats, err := parseNFSEventsStats(ss[1:]) + if err != nil { + return nil, err + } + + stats.Events = *estats + case fieldTransport: + if len(ss) < 3 { + return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss) + } + + tstats, err := parseNFSTransportStats(ss[2:], statVersion) + if err != nil { + return nil, err + } + + stats.Transport = *tstats + } + + // When encountering "per-operation statistics", we must break this + // loop and parse them separately to ensure we can terminate parsing + // before reaching another device entry; hence why this 'if' statement + // is not just another switch case + if ss[0] == fieldPerOpStats { + break + } + } + + if err := s.Err(); err != nil { + return nil, err + } + + // NFS per-operation stats appear last before the next device entry + perOpStats, err := parseNFSOperationStats(s) + if err != nil { + return nil, err + } + + stats.Operations = perOpStats + + return stats, nil +} + +// parseNFSBytesStats parses a NFSBytesStats line using an input set of +// integer fields. +func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { + if len(ss) != fieldBytesLen { + return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss) + } + + ns := make([]uint64, 0, fieldBytesLen) + for _, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + return &NFSBytesStats{ + Read: ns[0], + Write: ns[1], + DirectRead: ns[2], + DirectWrite: ns[3], + ReadTotal: ns[4], + WriteTotal: ns[5], + ReadPages: ns[6], + WritePages: ns[7], + }, nil +} + +// parseNFSEventsStats parses a NFSEventsStats line using an input set of +// integer fields. +func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { + if len(ss) != fieldEventsLen { + return nil, fmt.Errorf("invalid NFS events stats: %v", ss) + } + + ns := make([]uint64, 0, fieldEventsLen) + for _, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + return &NFSEventsStats{ + InodeRevalidate: ns[0], + DnodeRevalidate: ns[1], + DataInvalidate: ns[2], + AttributeInvalidate: ns[3], + VFSOpen: ns[4], + VFSLookup: ns[5], + VFSAccess: ns[6], + VFSUpdatePage: ns[7], + VFSReadPage: ns[8], + VFSReadPages: ns[9], + VFSWritePage: ns[10], + VFSWritePages: ns[11], + VFSGetdents: ns[12], + VFSSetattr: ns[13], + VFSFlush: ns[14], + VFSFsync: ns[15], + VFSLock: ns[16], + VFSFileRelease: ns[17], + CongestionWait: ns[18], + Truncation: ns[19], + WriteExtension: ns[20], + SillyRename: ns[21], + ShortRead: ns[22], + ShortWrite: ns[23], + JukeboxDelay: ns[24], + PNFSRead: ns[25], + PNFSWrite: ns[26], + }, nil +} + +// parseNFSOperationStats parses a slice of NFSOperationStats by scanning +// additional information about per-operation statistics until an empty +// line is reached. +func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { + const ( + // Number of expected fields in each per-operation statistics set + numFields = 9 + ) + + var ops []NFSOperationStats + + for s.Scan() { + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 { + // Must break when reading a blank line after per-operation stats to + // enable top-level function to parse the next device entry + break + } + + if len(ss) != numFields { + return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) + } + + // Skip string operation name for integers + ns := make([]uint64, 0, numFields-1) + for _, st := range ss[1:] { + n, err := strconv.ParseUint(st, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + ops = append(ops, NFSOperationStats{ + Operation: strings.TrimSuffix(ss[0], ":"), + Requests: ns[0], + Transmissions: ns[1], + MajorTimeouts: ns[2], + BytesSent: ns[3], + BytesReceived: ns[4], + CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond, + CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond, + CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond, + }) + } + + return ops, s.Err() +} + +// parseNFSTransportStats parses a NFSTransportStats line using an input set of +// integer fields matched to a specific stats version. +func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) { + switch statVersion { + case statVersion10: + if len(ss) != fieldTransport10Len { + return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss) + } + case statVersion11: + if len(ss) != fieldTransport11Len { + return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss) + } + default: + return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion) + } + + // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay + // in a v1.0 response. + // + // Note: slice length must be set to length of v1.1 stats to avoid a panic when + // only v1.0 stats are present. + // See: https://github.com/prometheus/node_exporter/issues/571. + ns := make([]uint64, fieldTransport11Len) + for i, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns[i] = n + } + + return &NFSTransportStats{ + Port: ns[0], + Bind: ns[1], + Connect: ns[2], + ConnectIdleTime: ns[3], + IdleTime: time.Duration(ns[4]) * time.Second, + Sends: ns[5], + Receives: ns[6], + BadTransactionIDs: ns[7], + CumulativeActiveRequests: ns[8], + CumulativeBacklog: ns[9], + MaximumRPCSlotsUsed: ns[10], + CumulativeSendingQueue: ns[11], + CumulativePendingQueue: ns[12], + }, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/net_dev.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/net_dev.go similarity index 90% rename from vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/net_dev.go rename to vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/net_dev.go index ce3b2638..6c17affe 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/net_dev.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/net_dev.go @@ -1,3 +1,16 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package procfs import ( @@ -78,7 +91,7 @@ func newNetDev(file string) (NetDev, error) { nd[line.Name] = *line } - return nd, nil + return nd, s.Err() } // parseLine parses a single line from the /proc/net/dev file. Header lines diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/nfs/nfs.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/nfs/nfs.go new file mode 100644 index 00000000..e2185b78 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/nfs/nfs.go @@ -0,0 +1,263 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package nfsd implements parsing of /proc/net/rpc/nfsd. +// Fields are documented in https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/ +package nfs + +// ReplyCache models the "rc" line. +type ReplyCache struct { + Hits uint64 + Misses uint64 + NoCache uint64 +} + +// FileHandles models the "fh" line. +type FileHandles struct { + Stale uint64 + TotalLookups uint64 + AnonLookups uint64 + DirNoCache uint64 + NoDirNoCache uint64 +} + +// InputOutput models the "io" line. +type InputOutput struct { + Read uint64 + Write uint64 +} + +// Threads models the "th" line. +type Threads struct { + Threads uint64 + FullCnt uint64 +} + +// ReadAheadCache models the "ra" line. +type ReadAheadCache struct { + CacheSize uint64 + CacheHistogram []uint64 + NotFound uint64 +} + +// Network models the "net" line. +type Network struct { + NetCount uint64 + UDPCount uint64 + TCPCount uint64 + TCPConnect uint64 +} + +// ClientRPC models the nfs "rpc" line. +type ClientRPC struct { + RPCCount uint64 + Retransmissions uint64 + AuthRefreshes uint64 +} + +// ServerRPC models the nfsd "rpc" line. +type ServerRPC struct { + RPCCount uint64 + BadCnt uint64 + BadFmt uint64 + BadAuth uint64 + BadcInt uint64 +} + +// V2Stats models the "proc2" line. +type V2Stats struct { + Null uint64 + GetAttr uint64 + SetAttr uint64 + Root uint64 + Lookup uint64 + ReadLink uint64 + Read uint64 + WrCache uint64 + Write uint64 + Create uint64 + Remove uint64 + Rename uint64 + Link uint64 + SymLink uint64 + MkDir uint64 + RmDir uint64 + ReadDir uint64 + FsStat uint64 +} + +// V3Stats models the "proc3" line. +type V3Stats struct { + Null uint64 + GetAttr uint64 + SetAttr uint64 + Lookup uint64 + Access uint64 + ReadLink uint64 + Read uint64 + Write uint64 + Create uint64 + MkDir uint64 + SymLink uint64 + MkNod uint64 + Remove uint64 + RmDir uint64 + Rename uint64 + Link uint64 + ReadDir uint64 + ReadDirPlus uint64 + FsStat uint64 + FsInfo uint64 + PathConf uint64 + Commit uint64 +} + +// ClientV4Stats models the nfs "proc4" line. +type ClientV4Stats struct { + Null uint64 + Read uint64 + Write uint64 + Commit uint64 + Open uint64 + OpenConfirm uint64 + OpenNoattr uint64 + OpenDowngrade uint64 + Close uint64 + Setattr uint64 + FsInfo uint64 + Renew uint64 + SetClientId uint64 + SetClientIdConfirm uint64 + Lock uint64 + Lockt uint64 + Locku uint64 + Access uint64 + Getattr uint64 + Lookup uint64 + LookupRoot uint64 + Remove uint64 + Rename uint64 + Link uint64 + Symlink uint64 + Create uint64 + Pathconf uint64 + StatFs uint64 + ReadLink uint64 + ReadDir uint64 + ServerCaps uint64 + DelegReturn uint64 + GetAcl uint64 + SetAcl uint64 + FsLocations uint64 + ReleaseLockowner uint64 + Secinfo uint64 + FsidPresent uint64 + ExchangeId uint64 + CreateSession uint64 + DestroySession uint64 + Sequence uint64 + GetLeaseTime uint64 + ReclaimComplete uint64 + LayoutGet uint64 + GetDeviceInfo uint64 + LayoutCommit uint64 + LayoutReturn uint64 + SecinfoNoName uint64 + TestStateId uint64 + FreeStateId uint64 + GetDeviceList uint64 + BindConnToSession uint64 + DestroyClientId uint64 + Seek uint64 + Allocate uint64 + DeAllocate uint64 + LayoutStats uint64 + Clone uint64 +} + +// ServerV4Stats models the nfsd "proc4" line. +type ServerV4Stats struct { + Null uint64 + Compound uint64 +} + +// V4Ops models the "proc4ops" line: NFSv4 operations +// Variable list, see: +// v4.0 https://tools.ietf.org/html/rfc3010 (38 operations) +// v4.1 https://tools.ietf.org/html/rfc5661 (58 operations) +// v4.2 https://tools.ietf.org/html/draft-ietf-nfsv4-minorversion2-41 (71 operations) +type V4Ops struct { + //Values uint64 // Variable depending on v4.x sub-version. TODO: Will this always at least include the fields in this struct? + Op0Unused uint64 + Op1Unused uint64 + Op2Future uint64 + Access uint64 + Close uint64 + Commit uint64 + Create uint64 + DelegPurge uint64 + DelegReturn uint64 + GetAttr uint64 + GetFH uint64 + Link uint64 + Lock uint64 + Lockt uint64 + Locku uint64 + Lookup uint64 + LookupRoot uint64 + Nverify uint64 + Open uint64 + OpenAttr uint64 + OpenConfirm uint64 + OpenDgrd uint64 + PutFH uint64 + PutPubFH uint64 + PutRootFH uint64 + Read uint64 + ReadDir uint64 + ReadLink uint64 + Remove uint64 + Rename uint64 + Renew uint64 + RestoreFH uint64 + SaveFH uint64 + SecInfo uint64 + SetAttr uint64 + Verify uint64 + Write uint64 + RelLockOwner uint64 +} + +// RPCStats models all stats from /proc/net/rpc/nfs. +type ClientRPCStats struct { + Network Network + ClientRPC ClientRPC + V2Stats V2Stats + V3Stats V3Stats + ClientV4Stats ClientV4Stats +} + +// ServerRPCStats models all stats from /proc/net/rpc/nfsd. +type ServerRPCStats struct { + ReplyCache ReplyCache + FileHandles FileHandles + InputOutput InputOutput + Threads Threads + ReadAheadCache ReadAheadCache + Network Network + ServerRPC ServerRPC + V2Stats V2Stats + V3Stats V3Stats + ServerV4Stats ServerV4Stats + V4Ops V4Ops +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/nfs/parse.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/nfs/parse.go new file mode 100644 index 00000000..8f568f01 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/nfs/parse.go @@ -0,0 +1,317 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nfs + +import ( + "fmt" +) + +func parseReplyCache(v []uint64) (ReplyCache, error) { + if len(v) != 3 { + return ReplyCache{}, fmt.Errorf("invalid ReplyCache line %q", v) + } + + return ReplyCache{ + Hits: v[0], + Misses: v[1], + NoCache: v[2], + }, nil +} + +func parseFileHandles(v []uint64) (FileHandles, error) { + if len(v) != 5 { + return FileHandles{}, fmt.Errorf("invalid FileHandles, line %q", v) + } + + return FileHandles{ + Stale: v[0], + TotalLookups: v[1], + AnonLookups: v[2], + DirNoCache: v[3], + NoDirNoCache: v[4], + }, nil +} + +func parseInputOutput(v []uint64) (InputOutput, error) { + if len(v) != 2 { + return InputOutput{}, fmt.Errorf("invalid InputOutput line %q", v) + } + + return InputOutput{ + Read: v[0], + Write: v[1], + }, nil +} + +func parseThreads(v []uint64) (Threads, error) { + if len(v) != 2 { + return Threads{}, fmt.Errorf("invalid Threads line %q", v) + } + + return Threads{ + Threads: v[0], + FullCnt: v[1], + }, nil +} + +func parseReadAheadCache(v []uint64) (ReadAheadCache, error) { + if len(v) != 12 { + return ReadAheadCache{}, fmt.Errorf("invalid ReadAheadCache line %q", v) + } + + return ReadAheadCache{ + CacheSize: v[0], + CacheHistogram: v[1:11], + NotFound: v[11], + }, nil +} + +func parseNetwork(v []uint64) (Network, error) { + if len(v) != 4 { + return Network{}, fmt.Errorf("invalid Network line %q", v) + } + + return Network{ + NetCount: v[0], + UDPCount: v[1], + TCPCount: v[2], + TCPConnect: v[3], + }, nil +} + +func parseServerRPC(v []uint64) (ServerRPC, error) { + if len(v) != 5 { + return ServerRPC{}, fmt.Errorf("invalid RPC line %q", v) + } + + return ServerRPC{ + RPCCount: v[0], + BadCnt: v[1], + BadFmt: v[2], + BadAuth: v[3], + BadcInt: v[4], + }, nil +} + +func parseClientRPC(v []uint64) (ClientRPC, error) { + if len(v) != 3 { + return ClientRPC{}, fmt.Errorf("invalid RPC line %q", v) + } + + return ClientRPC{ + RPCCount: v[0], + Retransmissions: v[1], + AuthRefreshes: v[2], + }, nil +} + +func parseV2Stats(v []uint64) (V2Stats, error) { + values := int(v[0]) + if len(v[1:]) != values || values != 18 { + return V2Stats{}, fmt.Errorf("invalid V2Stats line %q", v) + } + + return V2Stats{ + Null: v[1], + GetAttr: v[2], + SetAttr: v[3], + Root: v[4], + Lookup: v[5], + ReadLink: v[6], + Read: v[7], + WrCache: v[8], + Write: v[9], + Create: v[10], + Remove: v[11], + Rename: v[12], + Link: v[13], + SymLink: v[14], + MkDir: v[15], + RmDir: v[16], + ReadDir: v[17], + FsStat: v[18], + }, nil +} + +func parseV3Stats(v []uint64) (V3Stats, error) { + values := int(v[0]) + if len(v[1:]) != values || values != 22 { + return V3Stats{}, fmt.Errorf("invalid V3Stats line %q", v) + } + + return V3Stats{ + Null: v[1], + GetAttr: v[2], + SetAttr: v[3], + Lookup: v[4], + Access: v[5], + ReadLink: v[6], + Read: v[7], + Write: v[8], + Create: v[9], + MkDir: v[10], + SymLink: v[11], + MkNod: v[12], + Remove: v[13], + RmDir: v[14], + Rename: v[15], + Link: v[16], + ReadDir: v[17], + ReadDirPlus: v[18], + FsStat: v[19], + FsInfo: v[20], + PathConf: v[21], + Commit: v[22], + }, nil +} + +func parseClientV4Stats(v []uint64) (ClientV4Stats, error) { + values := int(v[0]) + if len(v[1:]) != values { + return ClientV4Stats{}, fmt.Errorf("invalid ClientV4Stats line %q", v) + } + + // This function currently supports mapping 59 NFS v4 client stats. Older + // kernels may emit fewer stats, so we must detect this and pad out the + // values to match the expected slice size. + if values < 59 { + newValues := make([]uint64, 60) + copy(newValues, v) + v = newValues + } + + return ClientV4Stats{ + Null: v[1], + Read: v[2], + Write: v[3], + Commit: v[4], + Open: v[5], + OpenConfirm: v[6], + OpenNoattr: v[7], + OpenDowngrade: v[8], + Close: v[9], + Setattr: v[10], + FsInfo: v[11], + Renew: v[12], + SetClientId: v[13], + SetClientIdConfirm: v[14], + Lock: v[15], + Lockt: v[16], + Locku: v[17], + Access: v[18], + Getattr: v[19], + Lookup: v[20], + LookupRoot: v[21], + Remove: v[22], + Rename: v[23], + Link: v[24], + Symlink: v[25], + Create: v[26], + Pathconf: v[27], + StatFs: v[28], + ReadLink: v[29], + ReadDir: v[30], + ServerCaps: v[31], + DelegReturn: v[32], + GetAcl: v[33], + SetAcl: v[34], + FsLocations: v[35], + ReleaseLockowner: v[36], + Secinfo: v[37], + FsidPresent: v[38], + ExchangeId: v[39], + CreateSession: v[40], + DestroySession: v[41], + Sequence: v[42], + GetLeaseTime: v[43], + ReclaimComplete: v[44], + LayoutGet: v[45], + GetDeviceInfo: v[46], + LayoutCommit: v[47], + LayoutReturn: v[48], + SecinfoNoName: v[49], + TestStateId: v[50], + FreeStateId: v[51], + GetDeviceList: v[52], + BindConnToSession: v[53], + DestroyClientId: v[54], + Seek: v[55], + Allocate: v[56], + DeAllocate: v[57], + LayoutStats: v[58], + Clone: v[59], + }, nil +} + +func parseServerV4Stats(v []uint64) (ServerV4Stats, error) { + values := int(v[0]) + if len(v[1:]) != values || values != 2 { + return ServerV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v) + } + + return ServerV4Stats{ + Null: v[1], + Compound: v[2], + }, nil +} + +func parseV4Ops(v []uint64) (V4Ops, error) { + values := int(v[0]) + if len(v[1:]) != values || values < 39 { + return V4Ops{}, fmt.Errorf("invalid V4Ops line %q", v) + } + + stats := V4Ops{ + Op0Unused: v[1], + Op1Unused: v[2], + Op2Future: v[3], + Access: v[4], + Close: v[5], + Commit: v[6], + Create: v[7], + DelegPurge: v[8], + DelegReturn: v[9], + GetAttr: v[10], + GetFH: v[11], + Link: v[12], + Lock: v[13], + Lockt: v[14], + Locku: v[15], + Lookup: v[16], + LookupRoot: v[17], + Nverify: v[18], + Open: v[19], + OpenAttr: v[20], + OpenConfirm: v[21], + OpenDgrd: v[22], + PutFH: v[23], + PutPubFH: v[24], + PutRootFH: v[25], + Read: v[26], + ReadDir: v[27], + ReadLink: v[28], + Remove: v[29], + Rename: v[30], + Renew: v[31], + RestoreFH: v[32], + SaveFH: v[33], + SecInfo: v[34], + SetAttr: v[35], + Verify: v[36], + Write: v[37], + RelLockOwner: v[38], + } + + return stats, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go new file mode 100644 index 00000000..c0d3a5ad --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go @@ -0,0 +1,67 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nfs + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ParseClientRPCStats returns stats read from /proc/net/rpc/nfs +func ParseClientRPCStats(r io.Reader) (*ClientRPCStats, error) { + stats := &ClientRPCStats{} + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(scanner.Text()) + // require at least + if len(parts) < 2 { + return nil, fmt.Errorf("invalid NFS metric line %q", line) + } + + values, err := util.ParseUint64s(parts[1:]) + if err != nil { + return nil, fmt.Errorf("error parsing NFS metric line: %s", err) + } + + switch metricLine := parts[0]; metricLine { + case "net": + stats.Network, err = parseNetwork(values) + case "rpc": + stats.ClientRPC, err = parseClientRPC(values) + case "proc2": + stats.V2Stats, err = parseV2Stats(values) + case "proc3": + stats.V3Stats, err = parseV3Stats(values) + case "proc4": + stats.ClientV4Stats, err = parseClientV4Stats(values) + default: + return nil, fmt.Errorf("unknown NFS metric line %q", metricLine) + } + if err != nil { + return nil, fmt.Errorf("errors parsing NFS metric line: %s", err) + } + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning NFS file: %s", err) + } + + return stats, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go new file mode 100644 index 00000000..57bb4a35 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go @@ -0,0 +1,89 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nfs + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ParseServerRPCStats returns stats read from /proc/net/rpc/nfsd +func ParseServerRPCStats(r io.Reader) (*ServerRPCStats, error) { + stats := &ServerRPCStats{} + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(scanner.Text()) + // require at least + if len(parts) < 2 { + return nil, fmt.Errorf("invalid NFSd metric line %q", line) + } + label := parts[0] + + var values []uint64 + var err error + if label == "th" { + if len(parts) < 3 { + return nil, fmt.Errorf("invalid NFSd th metric line %q", line) + } + values, err = util.ParseUint64s(parts[1:3]) + } else { + values, err = util.ParseUint64s(parts[1:]) + } + if err != nil { + return nil, fmt.Errorf("error parsing NFSd metric line: %s", err) + } + + switch metricLine := parts[0]; metricLine { + case "rc": + stats.ReplyCache, err = parseReplyCache(values) + case "fh": + stats.FileHandles, err = parseFileHandles(values) + case "io": + stats.InputOutput, err = parseInputOutput(values) + case "th": + stats.Threads, err = parseThreads(values) + case "ra": + stats.ReadAheadCache, err = parseReadAheadCache(values) + case "net": + stats.Network, err = parseNetwork(values) + case "rpc": + stats.ServerRPC, err = parseServerRPC(values) + case "proc2": + stats.V2Stats, err = parseV2Stats(values) + case "proc3": + stats.V3Stats, err = parseV3Stats(values) + case "proc4": + stats.ServerV4Stats, err = parseServerV4Stats(values) + case "proc4ops": + stats.V4Ops, err = parseV4Ops(values) + default: + return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine) + } + if err != nil { + return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err) + } + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning NFSd file: %s", err) + } + + return stats, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/proc.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/proc.go similarity index 82% rename from vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/proc.go rename to vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/proc.go index 0d0a6a90..7cf5b8ac 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/proc.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/proc.go @@ -1,6 +1,20 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package procfs import ( + "bytes" "fmt" "io/ioutil" "os" @@ -113,7 +127,7 @@ func (p Proc) CmdLine() ([]string, error) { return []string{}, nil } - return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil + return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil } // Comm returns the command name of a process. @@ -192,6 +206,18 @@ func (p Proc) FileDescriptorsLen() (int, error) { return len(fds), nil } +// MountStats retrieves statistics and configuration for mount points in a +// process's namespace. +func (p Proc) MountStats() ([]*Mount, error) { + f, err := os.Open(p.path("mountstats")) + if err != nil { + return nil, err + } + defer f.Close() + + return parseMountStats(f) +} + func (p Proc) fileDescriptors() ([]string, error) { d, err := os.Open(p.path("fd")) if err != nil { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/proc_io.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/proc_io.go similarity index 63% rename from vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/proc_io.go rename to vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/proc_io.go index b4e31d7b..0251c83b 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/proc_io.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/proc_io.go @@ -1,3 +1,16 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package procfs import ( @@ -47,9 +60,6 @@ func (p Proc) NewIO() (ProcIO, error) { _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) - if err != nil { - return pio, err - } - return pio, nil + return pio, err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/proc_limits.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/proc_limits.go similarity index 78% rename from vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/proc_limits.go rename to vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/proc_limits.go index 2df997ce..f04ba6fd 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/proc_limits.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/proc_limits.go @@ -1,3 +1,16 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package procfs import ( @@ -13,46 +26,46 @@ import ( // http://man7.org/linux/man-pages/man2/getrlimit.2.html. type ProcLimits struct { // CPU time limit in seconds. - CPUTime int + CPUTime int64 // Maximum size of files that the process may create. - FileSize int + FileSize int64 // Maximum size of the process's data segment (initialized data, // uninitialized data, and heap). - DataSize int + DataSize int64 // Maximum size of the process stack in bytes. - StackSize int + StackSize int64 // Maximum size of a core file. - CoreFileSize int + CoreFileSize int64 // Limit of the process's resident set in pages. - ResidentSet int + ResidentSet int64 // Maximum number of processes that can be created for the real user ID of // the calling process. - Processes int + Processes int64 // Value one greater than the maximum file descriptor number that can be // opened by this process. - OpenFiles int + OpenFiles int64 // Maximum number of bytes of memory that may be locked into RAM. - LockedMemory int + LockedMemory int64 // Maximum size of the process's virtual memory address space in bytes. - AddressSpace int + AddressSpace int64 // Limit on the combined number of flock(2) locks and fcntl(2) leases that // this process may establish. - FileLocks int + FileLocks int64 // Limit of signals that may be queued for the real user ID of the calling // process. - PendingSignals int + PendingSignals int64 // Limit on the number of bytes that can be allocated for POSIX message // queues for the real user ID of the calling process. - MsqqueueSize int + MsqqueueSize int64 // Limit of the nice priority set using setpriority(2) or nice(2). - NicePriority int + NicePriority int64 // Limit of the real-time priority set using sched_setscheduler(2) or // sched_setparam(2). - RealtimePriority int + RealtimePriority int64 // Limit (in microseconds) on the amount of CPU time that a process // scheduled under a real-time scheduling policy may consume without making // a blocking system call. - RealtimeTimeout int + RealtimeTimeout int64 } const ( @@ -125,13 +138,13 @@ func (p Proc) NewLimits() (ProcLimits, error) { return l, s.Err() } -func parseInt(s string) (int, error) { +func parseInt(s string) (int64, error) { if s == limitsUnlimited { return -1, nil } - i, err := strconv.ParseInt(s, 10, 32) + i, err := strconv.ParseInt(s, 10, 64) if err != nil { return 0, fmt.Errorf("couldn't parse value %s: %s", s, err) } - return int(i), nil + return i, nil } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/proc_ns.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/proc_ns.go similarity index 69% rename from vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/proc_ns.go rename to vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/proc_ns.go index befdd269..d06c26eb 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/proc_ns.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/proc_ns.go @@ -1,3 +1,16 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package procfs import ( diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/proc_stat.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/proc_stat.go similarity index 88% rename from vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/proc_stat.go rename to vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/proc_stat.go index 724e271b..3cf2a9f1 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/procfs/proc_stat.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/proc_stat.go @@ -1,3 +1,16 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package procfs import ( diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/stat.go new file mode 100644 index 00000000..61eb6b0e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/stat.go @@ -0,0 +1,232 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +// CPUStat shows how much time the cpu spend in various stages. +type CPUStat struct { + User float64 + Nice float64 + System float64 + Idle float64 + Iowait float64 + IRQ float64 + SoftIRQ float64 + Steal float64 + Guest float64 + GuestNice float64 +} + +// SoftIRQStat represent the softirq statistics as exported in the procfs stat file. +// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html +// It is possible to get per-cpu stats by reading /proc/softirqs +type SoftIRQStat struct { + Hi uint64 + Timer uint64 + NetTx uint64 + NetRx uint64 + Block uint64 + BlockIoPoll uint64 + Tasklet uint64 + Sched uint64 + Hrtimer uint64 + Rcu uint64 +} + +// Stat represents kernel/system statistics. +type Stat struct { + // Boot time in seconds since the Epoch. + BootTime uint64 + // Summed up cpu statistics. + CPUTotal CPUStat + // Per-CPU statistics. + CPU []CPUStat + // Number of times interrupts were handled, which contains numbered and unnumbered IRQs. + IRQTotal uint64 + // Number of times a numbered IRQ was triggered. + IRQ []uint64 + // Number of times a context switch happened. + ContextSwitches uint64 + // Number of times a process was created. + ProcessCreated uint64 + // Number of processes currently running. + ProcessesRunning uint64 + // Number of processes currently blocked (waiting for IO). + ProcessesBlocked uint64 + // Number of times a softirq was scheduled. + SoftIRQTotal uint64 + // Detailed softirq statistics. + SoftIRQ SoftIRQStat +} + +// NewStat returns kernel/system statistics read from /proc/stat. +func NewStat() (Stat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Stat{}, err + } + + return fs.NewStat() +} + +// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum). +func parseCPUStat(line string) (CPUStat, int64, error) { + cpuStat := CPUStat{} + var cpu string + + count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f", + &cpu, + &cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle, + &cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal, + &cpuStat.Guest, &cpuStat.GuestNice) + + if err != nil && err != io.EOF { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): %s", line, err) + } + if count == 0 { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): 0 elements parsed", line) + } + + cpuStat.User /= userHZ + cpuStat.Nice /= userHZ + cpuStat.System /= userHZ + cpuStat.Idle /= userHZ + cpuStat.Iowait /= userHZ + cpuStat.IRQ /= userHZ + cpuStat.SoftIRQ /= userHZ + cpuStat.Steal /= userHZ + cpuStat.Guest /= userHZ + cpuStat.GuestNice /= userHZ + + if cpu == "cpu" { + return cpuStat, -1, nil + } + + cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) + if err != nil { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu/cpuid): %s", line, err) + } + + return cpuStat, cpuID, nil +} + +// Parse a softirq line. +func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { + softIRQStat := SoftIRQStat{} + var total uint64 + var prefix string + + _, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d", + &prefix, &total, + &softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx, + &softIRQStat.Block, &softIRQStat.BlockIoPoll, + &softIRQStat.Tasklet, &softIRQStat.Sched, + &softIRQStat.Hrtimer, &softIRQStat.Rcu) + + if err != nil { + return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %s (softirq): %s", line, err) + } + + return softIRQStat, total, nil +} + +// NewStat returns an information about current kernel/system statistics. +func (fs FS) NewStat() (Stat, error) { + // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt + + f, err := os.Open(fs.Path("stat")) + if err != nil { + return Stat{}, err + } + defer f.Close() + + stat := Stat{} + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(scanner.Text()) + // require at least + if len(parts) < 2 { + continue + } + switch { + case parts[0] == "btime": + if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (btime): %s", parts[1], err) + } + case parts[0] == "intr": + if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (intr): %s", parts[1], err) + } + numberedIRQs := parts[2:] + stat.IRQ = make([]uint64, len(numberedIRQs)) + for i, count := range numberedIRQs { + if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (intr%d): %s", count, i, err) + } + } + case parts[0] == "ctxt": + if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (ctxt): %s", parts[1], err) + } + case parts[0] == "processes": + if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (processes): %s", parts[1], err) + } + case parts[0] == "procs_running": + if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (procs_running): %s", parts[1], err) + } + case parts[0] == "procs_blocked": + if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (procs_blocked): %s", parts[1], err) + } + case parts[0] == "softirq": + softIRQStats, total, err := parseSoftIRQStat(line) + if err != nil { + return Stat{}, err + } + stat.SoftIRQTotal = total + stat.SoftIRQ = softIRQStats + case strings.HasPrefix(parts[0], "cpu"): + cpuStat, cpuID, err := parseCPUStat(line) + if err != nil { + return Stat{}, err + } + if cpuID == -1 { + stat.CPUTotal = cpuStat + } else { + for int64(len(stat.CPU)) <= cpuID { + stat.CPU = append(stat.CPU, CPUStat{}) + } + stat.CPU[cpuID] = cpuStat + } + } + } + + if err := scanner.Err(); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err) + } + + return stat, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/ttar b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/ttar new file mode 100755 index 00000000..b0171a12 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/ttar @@ -0,0 +1,389 @@ +#!/usr/bin/env bash + +# Purpose: plain text tar format +# Limitations: - only suitable for text files, directories, and symlinks +# - stores only filename, content, and mode +# - not designed for untrusted input +# +# Note: must work with bash version 3.2 (macOS) + +# Copyright 2017 Roger Luethi +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit -o nounset + +# Sanitize environment (for instance, standard sorting of glob matches) +export LC_ALL=C + +path="" +CMD="" +ARG_STRING="$*" + +#------------------------------------------------------------------------------ +# Not all sed implementations can work on null bytes. In order to make ttar +# work out of the box on macOS, use Python as a stream editor. + +USE_PYTHON=0 + +PYTHON_CREATE_FILTER=$(cat << 'PCF' +#!/usr/bin/env python + +import re +import sys + +for line in sys.stdin: + line = re.sub(r'EOF', r'\EOF', line) + line = re.sub(r'NULLBYTE', r'\NULLBYTE', line) + line = re.sub('\x00', r'NULLBYTE', line) + sys.stdout.write(line) +PCF +) + +PYTHON_EXTRACT_FILTER=$(cat << 'PEF' +#!/usr/bin/env python + +import re +import sys + +for line in sys.stdin: + line = re.sub(r'(?/dev/null; then + echo "ERROR Python not found. Aborting." + exit 2 + fi + USE_PYTHON=1 + fi +} + +#------------------------------------------------------------------------------ + +function usage { + bname=$(basename "$0") + cat << USAGE +Usage: $bname [-C ] -c -f (create archive) + $bname -t -f (list archive contents) + $bname [-C ] -x -f (extract archive) + +Options: + -C (change directory) + -v (verbose) + +Example: Change to sysfs directory, create ttar file from fixtures directory + $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/ +USAGE +exit "$1" +} + +function vecho { + if [ "${VERBOSE:-}" == "yes" ]; then + echo >&7 "$@" + fi +} + +function set_cmd { + if [ -n "$CMD" ]; then + echo "ERROR: more than one command given" + echo + usage 2 + fi + CMD=$1 +} + +unset VERBOSE + +while getopts :cf:htxvC: opt; do + case $opt in + c) + set_cmd "create" + ;; + f) + ARCHIVE=$OPTARG + ;; + h) + usage 0 + ;; + t) + set_cmd "list" + ;; + x) + set_cmd "extract" + ;; + v) + VERBOSE=yes + exec 7>&1 + ;; + C) + CDIR=$OPTARG + ;; + *) + echo >&2 "ERROR: invalid option -$OPTARG" + echo + usage 1 + ;; + esac +done + +# Remove processed options from arguments +shift $(( OPTIND - 1 )); + +if [ "${CMD:-}" == "" ]; then + echo >&2 "ERROR: no command given" + echo + usage 1 +elif [ "${ARCHIVE:-}" == "" ]; then + echo >&2 "ERROR: no archive name given" + echo + usage 1 +fi + +function list { + local path="" + local size=0 + local line_no=0 + local ttar_file=$1 + if [ -n "${2:-}" ]; then + echo >&2 "ERROR: too many arguments." + echo + usage 1 + fi + if [ ! -e "$ttar_file" ]; then + echo >&2 "ERROR: file not found ($ttar_file)" + echo + usage 1 + fi + while read -r line; do + line_no=$(( line_no + 1 )) + if [ $size -gt 0 ]; then + size=$(( size - 1 )) + continue + fi + if [[ $line =~ ^Path:\ (.*)$ ]]; then + path=${BASH_REMATCH[1]} + elif [[ $line =~ ^Lines:\ (.*)$ ]]; then + size=${BASH_REMATCH[1]} + echo "$path" + elif [[ $line =~ ^Directory:\ (.*)$ ]]; then + path=${BASH_REMATCH[1]} + echo "$path/" + elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then + echo "$path -> ${BASH_REMATCH[1]}" + fi + done < "$ttar_file" +} + +function extract { + local path="" + local size=0 + local line_no=0 + local ttar_file=$1 + if [ -n "${2:-}" ]; then + echo >&2 "ERROR: too many arguments." + echo + usage 1 + fi + if [ ! -e "$ttar_file" ]; then + echo >&2 "ERROR: file not found ($ttar_file)" + echo + usage 1 + fi + while IFS= read -r line; do + line_no=$(( line_no + 1 )) + local eof_without_newline + if [ "$size" -gt 0 ]; then + if [[ "$line" =~ [^\\]EOF ]]; then + # An EOF not preceeded by a backslash indicates that the line + # does not end with a newline + eof_without_newline=1 + else + eof_without_newline=0 + fi + # Replace NULLBYTE with null byte if at beginning of line + # Replace NULLBYTE with null byte unless preceeded by backslash + # Remove one backslash in front of NULLBYTE (if any) + # Remove EOF unless preceeded by backslash + # Remove one backslash in front of EOF + if [ $USE_PYTHON -eq 1 ]; then + echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path" + else + # The repeated pattern makes up for sed's lack of negative + # lookbehind assertions (for consecutive null bytes). + echo -n "$line" | \ + sed -e 's/^NULLBYTE/\x0/g; + s/\([^\\]\)NULLBYTE/\1\x0/g; + s/\([^\\]\)NULLBYTE/\1\x0/g; + s/\\NULLBYTE/NULLBYTE/g; + s/\([^\\]\)EOF/\1/g; + s/\\EOF/EOF/g; + ' >> "$path" + fi + if [[ "$eof_without_newline" -eq 0 ]]; then + echo >> "$path" + fi + size=$(( size - 1 )) + continue + fi + if [[ $line =~ ^Path:\ (.*)$ ]]; then + path=${BASH_REMATCH[1]} + if [ -e "$path" ] || [ -L "$path" ]; then + rm "$path" + fi + elif [[ $line =~ ^Lines:\ (.*)$ ]]; then + size=${BASH_REMATCH[1]} + # Create file even if it is zero-length. + touch "$path" + vecho " $path" + elif [[ $line =~ ^Mode:\ (.*)$ ]]; then + mode=${BASH_REMATCH[1]} + chmod "$mode" "$path" + vecho "$mode" + elif [[ $line =~ ^Directory:\ (.*)$ ]]; then + path=${BASH_REMATCH[1]} + mkdir -p "$path" + vecho " $path/" + elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then + ln -s "${BASH_REMATCH[1]}" "$path" + vecho " $path -> ${BASH_REMATCH[1]}" + elif [[ $line =~ ^# ]]; then + # Ignore comments between files + continue + else + echo >&2 "ERROR: Unknown keyword on line $line_no: $line" + exit 1 + fi + done < "$ttar_file" +} + +function div { + echo "# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" \ + "- - - - - -" +} + +function get_mode { + local mfile=$1 + if [ -z "${STAT_OPTION:-}" ]; then + if stat -c '%a' "$mfile" >/dev/null 2>&1; then + # GNU stat + STAT_OPTION='-c' + STAT_FORMAT='%a' + else + # BSD stat + STAT_OPTION='-f' + # Octal output, user/group/other (omit file type, sticky bit) + STAT_FORMAT='%OLp' + fi + fi + stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile" +} + +function _create { + shopt -s nullglob + local mode + local eof_without_newline + while (( "$#" )); do + file=$1 + if [ -L "$file" ]; then + echo "Path: $file" + symlinkTo=$(readlink "$file") + echo "SymlinkTo: $symlinkTo" + vecho " $file -> $symlinkTo" + div + elif [ -d "$file" ]; then + # Strip trailing slash (if there is one) + file=${file%/} + echo "Directory: $file" + mode=$(get_mode "$file") + echo "Mode: $mode" + vecho "$mode $file/" + div + # Find all files and dirs, including hidden/dot files + for x in "$file/"{*,.[^.]*}; do + _create "$x" + done + elif [ -f "$file" ]; then + echo "Path: $file" + lines=$(wc -l "$file"|awk '{print $1}') + eof_without_newline=0 + if [[ "$(wc -c "$file"|awk '{print $1}')" -gt 0 ]] && \ + [[ "$(tail -c 1 "$file" | wc -l)" -eq 0 ]]; then + eof_without_newline=1 + lines=$((lines+1)) + fi + echo "Lines: $lines" + # Add backslash in front of EOF + # Add backslash in front of NULLBYTE + # Replace null byte with NULLBYTE + if [ $USE_PYTHON -eq 1 ]; then + < "$file" python -c "$PYTHON_CREATE_FILTER" + else + < "$file" \ + sed 's/EOF/\\EOF/g; + s/NULLBYTE/\\NULLBYTE/g; + s/\x0/NULLBYTE/g; + ' + fi + if [[ "$eof_without_newline" -eq 1 ]]; then + # Finish line with EOF to indicate that the original line did + # not end with a linefeed + echo "EOF" + fi + mode=$(get_mode "$file") + echo "Mode: $mode" + vecho "$mode $file" + div + else + echo >&2 "ERROR: file not found ($file in $(pwd))" + exit 2 + fi + shift + done +} + +function create { + ttar_file=$1 + shift + if [ -z "${1:-}" ]; then + echo >&2 "ERROR: missing arguments." + echo + usage 1 + fi + if [ -e "$ttar_file" ]; then + rm "$ttar_file" + fi + exec > "$ttar_file" + echo "# Archive created by ttar $ARG_STRING" + _create "$@" +} + +test_environment + +if [ -n "${CDIR:-}" ]; then + if [[ "$ARCHIVE" != /* ]]; then + # Relative path: preserve the archive's location before changing + # directory + ARCHIVE="$(pwd)/$ARCHIVE" + fi + cd "$CDIR" +fi + +"$CMD" "$ARCHIVE" "$@" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/xfrm.go new file mode 100644 index 00000000..ffe9df50 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/xfrm.go @@ -0,0 +1,187 @@ +// Copyright 2017 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" +) + +// XfrmStat models the contents of /proc/net/xfrm_stat. +type XfrmStat struct { + // All errors which are not matched by other + XfrmInError int + // No buffer is left + XfrmInBufferError int + // Header Error + XfrmInHdrError int + // No state found + // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong + XfrmInNoStates int + // Transformation protocol specific error + // e.g. SA Key is wrong + XfrmInStateProtoError int + // Transformation mode specific error + XfrmInStateModeError int + // Sequence error + // e.g. sequence number is out of window + XfrmInStateSeqError int + // State is expired + XfrmInStateExpired int + // State has mismatch option + // e.g. UDP encapsulation type is mismatched + XfrmInStateMismatch int + // State is invalid + XfrmInStateInvalid int + // No matching template for states + // e.g. Inbound SAs are correct but SP rule is wrong + XfrmInTmplMismatch int + // No policy is found for states + // e.g. Inbound SAs are correct but no SP is found + XfrmInNoPols int + // Policy discards + XfrmInPolBlock int + // Policy error + XfrmInPolError int + // All errors which are not matched by others + XfrmOutError int + // Bundle generation error + XfrmOutBundleGenError int + // Bundle check error + XfrmOutBundleCheckError int + // No state was found + XfrmOutNoStates int + // Transformation protocol specific error + XfrmOutStateProtoError int + // Transportation mode specific error + XfrmOutStateModeError int + // Sequence error + // i.e sequence number overflow + XfrmOutStateSeqError int + // State is expired + XfrmOutStateExpired int + // Policy discads + XfrmOutPolBlock int + // Policy is dead + XfrmOutPolDead int + // Policy Error + XfrmOutPolError int + XfrmFwdHdrError int + XfrmOutStateInvalid int + XfrmAcquireError int +} + +// NewXfrmStat reads the xfrm_stat statistics. +func NewXfrmStat() (XfrmStat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return XfrmStat{}, err + } + + return fs.NewXfrmStat() +} + +// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem. +func (fs FS) NewXfrmStat() (XfrmStat, error) { + file, err := os.Open(fs.Path("net/xfrm_stat")) + if err != nil { + return XfrmStat{}, err + } + defer file.Close() + + var ( + x = XfrmStat{} + s = bufio.NewScanner(file) + ) + + for s.Scan() { + fields := strings.Fields(s.Text()) + + if len(fields) != 2 { + return XfrmStat{}, fmt.Errorf( + "couldnt parse %s line %s", file.Name(), s.Text()) + } + + name := fields[0] + value, err := strconv.Atoi(fields[1]) + if err != nil { + return XfrmStat{}, err + } + + switch name { + case "XfrmInError": + x.XfrmInError = value + case "XfrmInBufferError": + x.XfrmInBufferError = value + case "XfrmInHdrError": + x.XfrmInHdrError = value + case "XfrmInNoStates": + x.XfrmInNoStates = value + case "XfrmInStateProtoError": + x.XfrmInStateProtoError = value + case "XfrmInStateModeError": + x.XfrmInStateModeError = value + case "XfrmInStateSeqError": + x.XfrmInStateSeqError = value + case "XfrmInStateExpired": + x.XfrmInStateExpired = value + case "XfrmInStateInvalid": + x.XfrmInStateInvalid = value + case "XfrmInTmplMismatch": + x.XfrmInTmplMismatch = value + case "XfrmInNoPols": + x.XfrmInNoPols = value + case "XfrmInPolBlock": + x.XfrmInPolBlock = value + case "XfrmInPolError": + x.XfrmInPolError = value + case "XfrmOutError": + x.XfrmOutError = value + case "XfrmInStateMismatch": + x.XfrmInStateMismatch = value + case "XfrmOutBundleGenError": + x.XfrmOutBundleGenError = value + case "XfrmOutBundleCheckError": + x.XfrmOutBundleCheckError = value + case "XfrmOutNoStates": + x.XfrmOutNoStates = value + case "XfrmOutStateProtoError": + x.XfrmOutStateProtoError = value + case "XfrmOutStateModeError": + x.XfrmOutStateModeError = value + case "XfrmOutStateSeqError": + x.XfrmOutStateSeqError = value + case "XfrmOutStateExpired": + x.XfrmOutStateExpired = value + case "XfrmOutPolBlock": + x.XfrmOutPolBlock = value + case "XfrmOutPolDead": + x.XfrmOutPolDead = value + case "XfrmOutPolError": + x.XfrmOutPolError = value + case "XfrmFwdHdrError": + x.XfrmFwdHdrError = value + case "XfrmOutStateInvalid": + x.XfrmOutStateInvalid = value + case "XfrmAcquireError": + x.XfrmAcquireError = value + } + + } + + return x, s.Err() +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/xfs/parse.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/xfs/parse.go new file mode 100644 index 00000000..2bc0ef34 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/xfs/parse.go @@ -0,0 +1,330 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package xfs + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ParseStats parses a Stats from an input io.Reader, using the format +// found in /proc/fs/xfs/stat. +func ParseStats(r io.Reader) (*Stats, error) { + const ( + // Fields parsed into stats structures. + fieldExtentAlloc = "extent_alloc" + fieldAbt = "abt" + fieldBlkMap = "blk_map" + fieldBmbt = "bmbt" + fieldDir = "dir" + fieldTrans = "trans" + fieldIg = "ig" + fieldLog = "log" + fieldRw = "rw" + fieldAttr = "attr" + fieldIcluster = "icluster" + fieldVnodes = "vnodes" + fieldBuf = "buf" + fieldXpc = "xpc" + + // Unimplemented at this time due to lack of documentation. + fieldPushAil = "push_ail" + fieldXstrat = "xstrat" + fieldAbtb2 = "abtb2" + fieldAbtc2 = "abtc2" + fieldBmbt2 = "bmbt2" + fieldIbt2 = "ibt2" + fieldFibt2 = "fibt2" + fieldQm = "qm" + fieldDebug = "debug" + ) + + var xfss Stats + + s := bufio.NewScanner(r) + for s.Scan() { + // Expect at least a string label and a single integer value, ex: + // - abt 0 + // - rw 1 2 + ss := strings.Fields(string(s.Bytes())) + if len(ss) < 2 { + continue + } + label := ss[0] + + // Extended precision counters are uint64 values. + if label == fieldXpc { + us, err := util.ParseUint64s(ss[1:]) + if err != nil { + return nil, err + } + + xfss.ExtendedPrecision, err = extendedPrecisionStats(us) + if err != nil { + return nil, err + } + + continue + } + + // All other counters are uint32 values. + us, err := util.ParseUint32s(ss[1:]) + if err != nil { + return nil, err + } + + switch label { + case fieldExtentAlloc: + xfss.ExtentAllocation, err = extentAllocationStats(us) + case fieldAbt: + xfss.AllocationBTree, err = btreeStats(us) + case fieldBlkMap: + xfss.BlockMapping, err = blockMappingStats(us) + case fieldBmbt: + xfss.BlockMapBTree, err = btreeStats(us) + case fieldDir: + xfss.DirectoryOperation, err = directoryOperationStats(us) + case fieldTrans: + xfss.Transaction, err = transactionStats(us) + case fieldIg: + xfss.InodeOperation, err = inodeOperationStats(us) + case fieldLog: + xfss.LogOperation, err = logOperationStats(us) + case fieldRw: + xfss.ReadWrite, err = readWriteStats(us) + case fieldAttr: + xfss.AttributeOperation, err = attributeOperationStats(us) + case fieldIcluster: + xfss.InodeClustering, err = inodeClusteringStats(us) + case fieldVnodes: + xfss.Vnode, err = vnodeStats(us) + case fieldBuf: + xfss.Buffer, err = bufferStats(us) + } + if err != nil { + return nil, err + } + } + + return &xfss, s.Err() +} + +// extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s. +func extentAllocationStats(us []uint32) (ExtentAllocationStats, error) { + if l := len(us); l != 4 { + return ExtentAllocationStats{}, fmt.Errorf("incorrect number of values for XFS extent allocation stats: %d", l) + } + + return ExtentAllocationStats{ + ExtentsAllocated: us[0], + BlocksAllocated: us[1], + ExtentsFreed: us[2], + BlocksFreed: us[3], + }, nil +} + +// btreeStats builds a BTreeStats from a slice of uint32s. +func btreeStats(us []uint32) (BTreeStats, error) { + if l := len(us); l != 4 { + return BTreeStats{}, fmt.Errorf("incorrect number of values for XFS btree stats: %d", l) + } + + return BTreeStats{ + Lookups: us[0], + Compares: us[1], + RecordsInserted: us[2], + RecordsDeleted: us[3], + }, nil +} + +// BlockMappingStat builds a BlockMappingStats from a slice of uint32s. +func blockMappingStats(us []uint32) (BlockMappingStats, error) { + if l := len(us); l != 7 { + return BlockMappingStats{}, fmt.Errorf("incorrect number of values for XFS block mapping stats: %d", l) + } + + return BlockMappingStats{ + Reads: us[0], + Writes: us[1], + Unmaps: us[2], + ExtentListInsertions: us[3], + ExtentListDeletions: us[4], + ExtentListLookups: us[5], + ExtentListCompares: us[6], + }, nil +} + +// DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s. +func directoryOperationStats(us []uint32) (DirectoryOperationStats, error) { + if l := len(us); l != 4 { + return DirectoryOperationStats{}, fmt.Errorf("incorrect number of values for XFS directory operation stats: %d", l) + } + + return DirectoryOperationStats{ + Lookups: us[0], + Creates: us[1], + Removes: us[2], + Getdents: us[3], + }, nil +} + +// TransactionStats builds a TransactionStats from a slice of uint32s. +func transactionStats(us []uint32) (TransactionStats, error) { + if l := len(us); l != 3 { + return TransactionStats{}, fmt.Errorf("incorrect number of values for XFS transaction stats: %d", l) + } + + return TransactionStats{ + Sync: us[0], + Async: us[1], + Empty: us[2], + }, nil +} + +// InodeOperationStats builds an InodeOperationStats from a slice of uint32s. +func inodeOperationStats(us []uint32) (InodeOperationStats, error) { + if l := len(us); l != 7 { + return InodeOperationStats{}, fmt.Errorf("incorrect number of values for XFS inode operation stats: %d", l) + } + + return InodeOperationStats{ + Attempts: us[0], + Found: us[1], + Recycle: us[2], + Missed: us[3], + Duplicate: us[4], + Reclaims: us[5], + AttributeChange: us[6], + }, nil +} + +// LogOperationStats builds a LogOperationStats from a slice of uint32s. +func logOperationStats(us []uint32) (LogOperationStats, error) { + if l := len(us); l != 5 { + return LogOperationStats{}, fmt.Errorf("incorrect number of values for XFS log operation stats: %d", l) + } + + return LogOperationStats{ + Writes: us[0], + Blocks: us[1], + NoInternalBuffers: us[2], + Force: us[3], + ForceSleep: us[4], + }, nil +} + +// ReadWriteStats builds a ReadWriteStats from a slice of uint32s. +func readWriteStats(us []uint32) (ReadWriteStats, error) { + if l := len(us); l != 2 { + return ReadWriteStats{}, fmt.Errorf("incorrect number of values for XFS read write stats: %d", l) + } + + return ReadWriteStats{ + Read: us[0], + Write: us[1], + }, nil +} + +// AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s. +func attributeOperationStats(us []uint32) (AttributeOperationStats, error) { + if l := len(us); l != 4 { + return AttributeOperationStats{}, fmt.Errorf("incorrect number of values for XFS attribute operation stats: %d", l) + } + + return AttributeOperationStats{ + Get: us[0], + Set: us[1], + Remove: us[2], + List: us[3], + }, nil +} + +// InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s. +func inodeClusteringStats(us []uint32) (InodeClusteringStats, error) { + if l := len(us); l != 3 { + return InodeClusteringStats{}, fmt.Errorf("incorrect number of values for XFS inode clustering stats: %d", l) + } + + return InodeClusteringStats{ + Iflush: us[0], + Flush: us[1], + FlushInode: us[2], + }, nil +} + +// VnodeStats builds a VnodeStats from a slice of uint32s. +func vnodeStats(us []uint32) (VnodeStats, error) { + // The attribute "Free" appears to not be available on older XFS + // stats versions. Therefore, 7 or 8 elements may appear in + // this slice. + l := len(us) + if l != 7 && l != 8 { + return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l) + } + + s := VnodeStats{ + Active: us[0], + Allocate: us[1], + Get: us[2], + Hold: us[3], + Release: us[4], + Reclaim: us[5], + Remove: us[6], + } + + // Skip adding free, unless it is present. The zero value will + // be used in place of an actual count. + if l == 7 { + return s, nil + } + + s.Free = us[7] + return s, nil +} + +// BufferStats builds a BufferStats from a slice of uint32s. +func bufferStats(us []uint32) (BufferStats, error) { + if l := len(us); l != 9 { + return BufferStats{}, fmt.Errorf("incorrect number of values for XFS buffer stats: %d", l) + } + + return BufferStats{ + Get: us[0], + Create: us[1], + GetLocked: us[2], + GetLockedWaited: us[3], + BusyLocked: us[4], + MissLocked: us[5], + PageRetries: us[6], + PageFound: us[7], + GetRead: us[8], + }, nil +} + +// ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s. +func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) { + if l := len(us); l != 3 { + return ExtendedPrecisionStats{}, fmt.Errorf("incorrect number of values for XFS extended precision stats: %d", l) + } + + return ExtendedPrecisionStats{ + FlushBytes: us[0], + WriteBytes: us[1], + ReadBytes: us[2], + }, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/xfs/xfs.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/xfs/xfs.go new file mode 100644 index 00000000..d86794b7 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/procfs/xfs/xfs.go @@ -0,0 +1,163 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package xfs provides access to statistics exposed by the XFS filesystem. +package xfs + +// Stats contains XFS filesystem runtime statistics, parsed from +// /proc/fs/xfs/stat. +// +// The names and meanings of each statistic were taken from +// http://xfs.org/index.php/Runtime_Stats and xfs_stats.h in the Linux +// kernel source. Most counters are uint32s (same data types used in +// xfs_stats.h), but some of the "extended precision stats" are uint64s. +type Stats struct { + // The name of the filesystem used to source these statistics. + // If empty, this indicates aggregated statistics for all XFS + // filesystems on the host. + Name string + + ExtentAllocation ExtentAllocationStats + AllocationBTree BTreeStats + BlockMapping BlockMappingStats + BlockMapBTree BTreeStats + DirectoryOperation DirectoryOperationStats + Transaction TransactionStats + InodeOperation InodeOperationStats + LogOperation LogOperationStats + ReadWrite ReadWriteStats + AttributeOperation AttributeOperationStats + InodeClustering InodeClusteringStats + Vnode VnodeStats + Buffer BufferStats + ExtendedPrecision ExtendedPrecisionStats +} + +// ExtentAllocationStats contains statistics regarding XFS extent allocations. +type ExtentAllocationStats struct { + ExtentsAllocated uint32 + BlocksAllocated uint32 + ExtentsFreed uint32 + BlocksFreed uint32 +} + +// BTreeStats contains statistics regarding an XFS internal B-tree. +type BTreeStats struct { + Lookups uint32 + Compares uint32 + RecordsInserted uint32 + RecordsDeleted uint32 +} + +// BlockMappingStats contains statistics regarding XFS block maps. +type BlockMappingStats struct { + Reads uint32 + Writes uint32 + Unmaps uint32 + ExtentListInsertions uint32 + ExtentListDeletions uint32 + ExtentListLookups uint32 + ExtentListCompares uint32 +} + +// DirectoryOperationStats contains statistics regarding XFS directory entries. +type DirectoryOperationStats struct { + Lookups uint32 + Creates uint32 + Removes uint32 + Getdents uint32 +} + +// TransactionStats contains statistics regarding XFS metadata transactions. +type TransactionStats struct { + Sync uint32 + Async uint32 + Empty uint32 +} + +// InodeOperationStats contains statistics regarding XFS inode operations. +type InodeOperationStats struct { + Attempts uint32 + Found uint32 + Recycle uint32 + Missed uint32 + Duplicate uint32 + Reclaims uint32 + AttributeChange uint32 +} + +// LogOperationStats contains statistics regarding the XFS log buffer. +type LogOperationStats struct { + Writes uint32 + Blocks uint32 + NoInternalBuffers uint32 + Force uint32 + ForceSleep uint32 +} + +// ReadWriteStats contains statistics regarding the number of read and write +// system calls for XFS filesystems. +type ReadWriteStats struct { + Read uint32 + Write uint32 +} + +// AttributeOperationStats contains statistics regarding manipulation of +// XFS extended file attributes. +type AttributeOperationStats struct { + Get uint32 + Set uint32 + Remove uint32 + List uint32 +} + +// InodeClusteringStats contains statistics regarding XFS inode clustering +// operations. +type InodeClusteringStats struct { + Iflush uint32 + Flush uint32 + FlushInode uint32 +} + +// VnodeStats contains statistics regarding XFS vnode operations. +type VnodeStats struct { + Active uint32 + Allocate uint32 + Get uint32 + Hold uint32 + Release uint32 + Reclaim uint32 + Remove uint32 + Free uint32 +} + +// BufferStats contains statistics regarding XFS read/write I/O buffers. +type BufferStats struct { + Get uint32 + Create uint32 + GetLocked uint32 + GetLockedWaited uint32 + BusyLocked uint32 + MissLocked uint32 + PageRetries uint32 + PageFound uint32 + GetRead uint32 +} + +// ExtendedPrecisionStats contains high precision counters used to track the +// total number of bytes read, written, or flushed, during XFS operations. +type ExtendedPrecisionStats struct { + FlushBytes uint64 + WriteBytes uint64 + ReadBytes uint64 +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/samuel/go-parser/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/samuel/go-parser/LICENSE new file mode 100644 index 00000000..bc00498c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/samuel/go-parser/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2013, Samuel Stauffer +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +* Neither the name of the author nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/samuel/go-parser/README.md b/vendor/github.com/elastic/beats/vendor/github.com/samuel/go-parser/README.md new file mode 100644 index 00000000..859cfb19 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/samuel/go-parser/README.md @@ -0,0 +1,11 @@ +Go Parser Library +================= + +[![GoDoc](https://godoc.org/github.com/samuel/go-parser/parser?status.svg)](https://godoc.org/github.com/samuel/go-parser/parser) + +WARNING: This package isn't maintained or complete in any way. I recommend using https://github.com/PuerkitoBio/pigeon instead. + +License +------- + +3-clause BSD. See LICENSE file. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_darwin.go b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_darwin.go index dc642df3..82ffacbd 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_darwin.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_darwin.go @@ -3,14 +3,19 @@ package disk import ( + "context" "path" - "syscall" "unsafe" "github.com/shirou/gopsutil/internal/common" + "golang.org/x/sys/unix" ) func Partitions(all bool) ([]PartitionStat, error) { + return PartitionsWithContext(context.Background(), all) +} + +func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) { var ret []PartitionStat count, err := Getfsstat(nil, MntWait) @@ -88,13 +93,17 @@ func Partitions(all bool) ([]PartitionStat, error) { } func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { + return GetfsstatWithContext(context.Background(), buf, flags) +} + +func GetfsstatWithContext(ctx context.Context, buf []Statfs_t, flags int) (n int, err error) { var _p0 unsafe.Pointer var bufsize uintptr if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) } - r0, _, e1 := syscall.Syscall(SYS_GETFSSTAT64, uintptr(_p0), bufsize, uintptr(flags)) + r0, _, e1 := unix.Syscall(SYS_GETFSSTAT64, uintptr(_p0), bufsize, uintptr(flags)) n = int(r0) if e1 != 0 { err = e1 @@ -102,6 +111,6 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { return } -func getFsType(stat syscall.Statfs_t) string { +func getFsType(stat unix.Statfs_t) string { return common.IntToString(stat.Fstypename[:]) } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_darwin_cgo.go b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_darwin_cgo.go index 21077978..480e2377 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_darwin_cgo.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_darwin_cgo.go @@ -4,7 +4,6 @@ package disk /* -#cgo CFLAGS: -mmacosx-version-min=10.10 -DMACOSX_DEPLOYMENT_TARGET=10.10 #cgo LDFLAGS: -lobjc -framework Foundation -framework IOKit #include @@ -27,6 +26,7 @@ typedef struct import "C" import ( + "context" "errors" "strings" "unsafe" @@ -35,6 +35,10 @@ import ( ) func IOCounters(names ...string) (map[string]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), names...) +} + +func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) { if C.StartIOCounterFetch() == 0 { return nil, errors.New("Unable to fetch disk list") } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_darwin_nocgo.go b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_darwin_nocgo.go index 60fd7a6c..fe76d83e 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_darwin_nocgo.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_darwin_nocgo.go @@ -3,8 +3,16 @@ package disk -import "github.com/shirou/gopsutil/internal/common" +import ( + "context" + + "github.com/shirou/gopsutil/internal/common" +) func IOCounters(names ...string) (map[string]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), names...) +} + +func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) { return nil, common.ErrNotImplementedError } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_fallback.go b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_fallback.go index db521842..22eb5079 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_fallback.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_fallback.go @@ -1,17 +1,33 @@ -// +build !darwin,!linux,!freebsd,!openbsd,!windows +// +build !darwin,!linux,!freebsd,!openbsd,!windows,!solaris package disk -import "github.com/shirou/gopsutil/internal/common" +import ( + "context" + + "github.com/shirou/gopsutil/internal/common" +) func IOCounters(names ...string) (map[string]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), names...) +} + +func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) { return nil, common.ErrNotImplementedError } func Partitions(all bool) ([]PartitionStat, error) { + return PartitionsWithContext(context.Background(), all) +} + +func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) { return []PartitionStat{}, common.ErrNotImplementedError } func Usage(path string) (*UsageStat, error) { + return UsageWithContext(context.Background(), path) +} + +func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) { return nil, common.ErrNotImplementedError } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_freebsd.go b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_freebsd.go index 8569d14c..bfb6580c 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_freebsd.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_freebsd.go @@ -4,20 +4,26 @@ package disk import ( "bytes" + "context" "encoding/binary" "path" "strconv" - "syscall" "unsafe" + "golang.org/x/sys/unix" + "github.com/shirou/gopsutil/internal/common" ) func Partitions(all bool) ([]PartitionStat, error) { + return PartitionsWithContext(context.Background(), all) +} + +func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) { var ret []PartitionStat // get length - count, err := syscall.Getfsstat(nil, MNT_WAIT) + count, err := unix.Getfsstat(nil, MNT_WAIT) if err != nil { return ret, err } @@ -95,11 +101,15 @@ func Partitions(all bool) ([]PartitionStat, error) { } func IOCounters(names ...string) (map[string]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), names...) +} + +func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) { // statinfo->devinfo->devstat // /usr/include/devinfo.h ret := make(map[string]IOCountersStat) - r, err := syscall.Sysctl("kern.devstat.all") + r, err := unix.Sysctl("kern.devstat.all") if err != nil { return nil, err } @@ -149,13 +159,17 @@ func (b Bintime) Compute() float64 { // Getfsstat is borrowed from pkg/syscall/syscall_freebsd.go // change Statfs_t to Statfs in order to get more information func Getfsstat(buf []Statfs, flags int) (n int, err error) { + return GetfsstatWithContext(context.Background(), buf, flags) +} + +func GetfsstatWithContext(ctx context.Context, buf []Statfs, flags int) (n int, err error) { var _p0 unsafe.Pointer var bufsize uintptr if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) bufsize = unsafe.Sizeof(Statfs{}) * uintptr(len(buf)) } - r0, _, e1 := syscall.Syscall(syscall.SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) + r0, _, e1 := unix.Syscall(unix.SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) n = int(r0) if e1 != 0 { err = e1 @@ -175,6 +189,6 @@ func parseDevstat(buf []byte) (Devstat, error) { return ds, nil } -func getFsType(stat syscall.Statfs_t) string { +func getFsType(stat unix.Statfs_t) string { return common.IntToString(stat.Fstypename[:]) } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_linux.go b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_linux.go index 5595b764..f5eb5261 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_linux.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_linux.go @@ -3,11 +3,14 @@ package disk import ( + "context" "fmt" "os/exec" + "path/filepath" "strconv" "strings" - "syscall" + + "golang.org/x/sys/unix" "github.com/shirou/gopsutil/internal/common" ) @@ -214,10 +217,12 @@ var fsTypeMap = map[int64]string{ // Partitions returns disk partitions. If all is false, returns // physical devices only (e.g. hard disks, cd-rom drives, USB keys) // and ignore all others (e.g. memory partitions such as /dev/shm) -// -// should use setmntent(3) but this implement use /etc/mtab file func Partitions(all bool) ([]PartitionStat, error) { - filename := common.HostEtc("mtab") + return PartitionsWithContext(context.Background(), all) +} + +func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) { + filename := common.HostProc("self/mounts") lines, err := common.ReadLines(filename) if err != nil { return nil, err @@ -273,6 +278,10 @@ func getFileSystems() ([]string, error) { } func IOCounters(names ...string) (map[string]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), names...) +} + +func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) { filename := common.HostProc("diskstats") lines, err := common.ReadLines(filename) if err != nil { @@ -281,6 +290,11 @@ func IOCounters(names ...string) (map[string]IOCountersStat, error) { ret := make(map[string]IOCountersStat, 0) empty := IOCountersStat{} + // use only basename such as "/dev/sda1" to "sda1" + for i, name := range names { + names[i] = filepath.Base(name) + } + for _, line := range lines { fields := strings.Fields(line) if len(fields) < 14 { @@ -364,6 +378,10 @@ func IOCounters(names ...string) (map[string]IOCountersStat, error) { // GetDiskSerialNumber returns Serial Number of given device or empty string // on error. Name of device is expected, eg. /dev/sda func GetDiskSerialNumber(name string) string { + return GetDiskSerialNumberWithContext(context.Background(), name) +} + +func GetDiskSerialNumberWithContext(ctx context.Context, name string) string { n := fmt.Sprintf("--name=%s", name) udevadm, err := exec.LookPath("/sbin/udevadm") if err != nil { @@ -388,7 +406,7 @@ func GetDiskSerialNumber(name string) string { return "" } -func getFsType(stat syscall.Statfs_t) string { +func getFsType(stat unix.Statfs_t) string { t := int64(stat.Type) ret, ok := fsTypeMap[t] if !ok { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_openbsd.go b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_openbsd.go index 28866df7..0ac752a5 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_openbsd.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_openbsd.go @@ -4,19 +4,24 @@ package disk import ( "bytes" + "context" "encoding/binary" "path" - "syscall" "unsafe" "github.com/shirou/gopsutil/internal/common" + "golang.org/x/sys/unix" ) func Partitions(all bool) ([]PartitionStat, error) { + return PartitionsWithContext(context.Background(), all) +} + +func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) { var ret []PartitionStat // get length - count, err := syscall.Getfsstat(nil, MNT_WAIT) + count, err := unix.Getfsstat(nil, MNT_WAIT) if err != nil { return ret, err } @@ -64,9 +69,13 @@ func Partitions(all bool) ([]PartitionStat, error) { } func IOCounters(names ...string) (map[string]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), names...) +} + +func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) { ret := make(map[string]IOCountersStat) - r, err := syscall.Sysctl("hw.diskstats") + r, err := unix.SysctlRaw("hw.diskstats") if err != nil { return nil, err } @@ -106,13 +115,17 @@ func IOCounters(names ...string) (map[string]IOCountersStat, error) { // Getfsstat is borrowed from pkg/syscall/syscall_freebsd.go // change Statfs_t to Statfs in order to get more information func Getfsstat(buf []Statfs, flags int) (n int, err error) { + return GetfsstatWithContext(context.Background(), buf, flags) +} + +func GetfsstatWithContext(ctx context.Context, buf []Statfs, flags int) (n int, err error) { var _p0 unsafe.Pointer var bufsize uintptr if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) bufsize = unsafe.Sizeof(Statfs{}) * uintptr(len(buf)) } - r0, _, e1 := syscall.Syscall(syscall.SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) + r0, _, e1 := unix.Syscall(unix.SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) n = int(r0) if e1 != 0 { err = e1 @@ -133,8 +146,12 @@ func parseDiskstats(buf []byte) (Diskstats, error) { } func Usage(path string) (*UsageStat, error) { - stat := syscall.Statfs_t{} - err := syscall.Statfs(path, &stat) + return UsageWithContext(context.Background(), path) +} + +func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) { + stat := unix.Statfs_t{} + err := unix.Statfs(path, &stat) if err != nil { return nil, err } @@ -157,6 +174,6 @@ func Usage(path string) (*UsageStat, error) { return ret, nil } -func getFsType(stat syscall.Statfs_t) string { +func getFsType(stat unix.Statfs_t) string { return common.IntToString(stat.F_fstypename[:]) } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_solaris.go b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_solaris.go new file mode 100644 index 00000000..c6608357 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_solaris.go @@ -0,0 +1,127 @@ +// +build solaris + +package disk + +import ( + "bufio" + "context" + "fmt" + "math" + "os" + "strings" + + "github.com/shirou/gopsutil/internal/common" + "golang.org/x/sys/unix" +) + +const ( + // _DEFAULT_NUM_MOUNTS is set to `cat /etc/mnttab | wc -l` rounded up to the + // nearest power of two. + _DEFAULT_NUM_MOUNTS = 32 + + // _MNTTAB default place to read mount information + _MNTTAB = "/etc/mnttab" +) + +var ( + // A blacklist of read-only virtual filesystems. Writable filesystems are of + // operational concern and must not be included in this list. + fsTypeBlacklist = map[string]struct{}{ + "ctfs": struct{}{}, + "dev": struct{}{}, + "fd": struct{}{}, + "lofs": struct{}{}, + "lxproc": struct{}{}, + "mntfs": struct{}{}, + "objfs": struct{}{}, + "proc": struct{}{}, + } +) + +func Partitions(all bool) ([]PartitionStat, error) { + return PartitionsWithContext(context.Background(), all) +} + +func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) { + ret := make([]PartitionStat, 0, _DEFAULT_NUM_MOUNTS) + + // Scan mnttab(4) + f, err := os.Open(_MNTTAB) + if err != nil { + } + defer func() { + if err == nil { + err = f.Close() + } else { + f.Close() + } + }() + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + fields := strings.Split(scanner.Text(), "\t") + + if _, found := fsTypeBlacklist[fields[2]]; found { + continue + } + + ret = append(ret, PartitionStat{ + // NOTE(seanc@): Device isn't exactly accurate: from mnttab(4): "The name + // of the resource that has been mounted." Ideally this value would come + // from Statvfs_t.Fsid but I'm leaving it to the caller to traverse + // unix.Statvfs(). + Device: fields[0], + Mountpoint: fields[1], + Fstype: fields[2], + Opts: fields[3], + }) + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("unable to scan %q: %v", _MNTTAB, err) + } + + return ret, err +} + +func IOCounters(names ...string) (map[string]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), names...) +} + +func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) { + return nil, common.ErrNotImplementedError +} + +func Usage(path string) (*UsageStat, error) { + return UsageWithContext(context.Background(), path) +} + +func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) { + statvfs := unix.Statvfs_t{} + if err := unix.Statvfs(path, &statvfs); err != nil { + return nil, fmt.Errorf("unable to call statvfs(2) on %q: %v", path, err) + } + + usageStat := &UsageStat{ + Path: path, + Fstype: common.IntToString(statvfs.Basetype[:]), + Total: statvfs.Blocks * statvfs.Frsize, + Free: statvfs.Bfree * statvfs.Frsize, + Used: (statvfs.Blocks - statvfs.Bfree) * statvfs.Frsize, + + // NOTE: ZFS (and FreeBZSD's UFS2) use dynamic inode/dnode allocation. + // Explicitly return a near-zero value for InodesUsedPercent so that nothing + // attempts to garbage collect based on a lack of available inodes/dnodes. + // Similarly, don't use the zero value to prevent divide-by-zero situations + // and inject a faux near-zero value. Filesystems evolve. Has your + // filesystem evolved? Probably not if you care about the number of + // available inodes. + InodesTotal: 1024.0 * 1024.0, + InodesUsed: 1024.0, + InodesFree: math.MaxUint64, + InodesUsedPercent: (1024.0 / (1024.0 * 1024.0)) * 100.0, + } + + usageStat.UsedPercent = (float64(usageStat.Used) / float64(usageStat.Total)) * 100.0 + + return usageStat, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_unix.go b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_unix.go index f0616c30..bafef513 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_unix.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_unix.go @@ -2,11 +2,22 @@ package disk -import "syscall" +import ( + "context" + "golang.org/x/sys/unix" +) + +// Usage returns a file system usage. path is a filessytem path such +// as "/", not device file path like "/dev/vda1". If you want to use +// a return value of disk.Partitions, use "Mountpoint" not "Device". func Usage(path string) (*UsageStat, error) { - stat := syscall.Statfs_t{} - err := syscall.Statfs(path, &stat) + return UsageWithContext(context.Background(), path) +} + +func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) { + stat := unix.Statfs_t{} + err := unix.Statfs(path, &stat) if err != nil { return nil, err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_windows.go index ca16a4aa..7389b5a7 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_windows.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/disk_windows.go @@ -4,12 +4,11 @@ package disk import ( "bytes" - "syscall" + "context" "unsafe" - "github.com/StackExchange/wmi" - "github.com/shirou/gopsutil/internal/common" + "golang.org/x/sys/windows" ) var ( @@ -37,13 +36,17 @@ type Win32_PerfFormattedData struct { const WaitMSec = 500 func Usage(path string) (*UsageStat, error) { + return UsageWithContext(context.Background(), path) +} + +func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) { ret := &UsageStat{} lpFreeBytesAvailable := int64(0) lpTotalNumberOfBytes := int64(0) lpTotalNumberOfFreeBytes := int64(0) diskret, _, err := procGetDiskFreeSpaceExW.Call( - uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(path))), + uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(path))), uintptr(unsafe.Pointer(&lpFreeBytesAvailable)), uintptr(unsafe.Pointer(&lpTotalNumberOfBytes)), uintptr(unsafe.Pointer(&lpTotalNumberOfFreeBytes))) @@ -65,6 +68,10 @@ func Usage(path string) (*UsageStat, error) { } func Partitions(all bool) ([]PartitionStat, error) { + return PartitionsWithContext(context.Background(), all) +} + +func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) { var ret []PartitionStat lpBuffer := make([]byte, 254) diskret, _, err := procGetLogicalDriveStringsW.Call( @@ -79,20 +86,20 @@ func Partitions(all bool) ([]PartitionStat, error) { if path == "A:" || path == "B:" { // skip floppy drives continue } - typepath, _ := syscall.UTF16PtrFromString(path) + typepath, _ := windows.UTF16PtrFromString(path) typeret, _, _ := procGetDriveType.Call(uintptr(unsafe.Pointer(typepath))) if typeret == 0 { - return ret, syscall.GetLastError() + return ret, windows.GetLastError() } - // 2: DRIVE_REMOVABLE 3: DRIVE_FIXED 5: DRIVE_CDROM + // 2: DRIVE_REMOVABLE 3: DRIVE_FIXED 4: DRIVE_REMOTE 5: DRIVE_CDROM - if typeret == 2 || typeret == 3 || typeret == 5 { + if typeret == 2 || typeret == 3 || typeret == 4 || typeret == 5 { lpVolumeNameBuffer := make([]byte, 256) lpVolumeSerialNumber := int64(0) lpMaximumComponentLength := int64(0) lpFileSystemFlags := int64(0) lpFileSystemNameBuffer := make([]byte, 256) - volpath, _ := syscall.UTF16PtrFromString(string(v) + ":/") + volpath, _ := windows.UTF16PtrFromString(string(v) + ":/") driveret, _, err := provGetVolumeInformation.Call( uintptr(unsafe.Pointer(volpath)), uintptr(unsafe.Pointer(&lpVolumeNameBuffer[0])), @@ -103,7 +110,7 @@ func Partitions(all bool) ([]PartitionStat, error) { uintptr(unsafe.Pointer(&lpFileSystemNameBuffer[0])), uintptr(len(lpFileSystemNameBuffer))) if driveret == 0 { - if typeret == 5 { + if typeret == 5 || typeret == 2 { continue //device is not ready will happen if there is no disk in the drive } return ret, err @@ -130,10 +137,16 @@ func Partitions(all bool) ([]PartitionStat, error) { } func IOCounters(names ...string) (map[string]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), names...) +} + +func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) { ret := make(map[string]IOCountersStat, 0) var dst []Win32_PerfFormattedData - err := wmi.Query("SELECT * FROM Win32_PerfFormattedData_PerfDisk_LogicalDisk ", &dst) + ctx, cancel := context.WithTimeout(context.Background(), common.Timeout) + defer cancel() + err := common.WMIQueryWithContext(ctx, "SELECT * FROM Win32_PerfFormattedData_PerfDisk_LogicalDisk", &dst) if err != nil { return ret, err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/types_freebsd.go b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/types_freebsd.go deleted file mode 100644 index dd6ddc4f..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/types_freebsd.go +++ /dev/null @@ -1,88 +0,0 @@ -// +build ignore -// Hand writing: _Ctype_struct___0 - -/* -Input to cgo -godefs. - -*/ - -package disk - -/* -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -// because statinfo has long double snap_time, redefine with changing long long -struct statinfo2 { - long cp_time[CPUSTATES]; - long tk_nin; - long tk_nout; - struct devinfo *dinfo; - long long snap_time; -}; -*/ -import "C" - -// Machine characteristics; for internal use. - -const ( - sizeofPtr = C.sizeofPtr - sizeofShort = C.sizeof_short - sizeofInt = C.sizeof_int - sizeofLong = C.sizeof_long - sizeofLongLong = C.sizeof_longlong - sizeofLongDouble = C.sizeof_longlong - - DEVSTAT_NO_DATA = 0x00 - DEVSTAT_READ = 0x01 - DEVSTAT_WRITE = 0x02 - DEVSTAT_FREE = 0x03 - - // from sys/mount.h - MNT_RDONLY = 0x00000001 /* read only filesystem */ - MNT_SYNCHRONOUS = 0x00000002 /* filesystem written synchronously */ - MNT_NOEXEC = 0x00000004 /* can't exec from filesystem */ - MNT_NOSUID = 0x00000008 /* don't honor setuid bits on fs */ - MNT_UNION = 0x00000020 /* union with underlying filesystem */ - MNT_ASYNC = 0x00000040 /* filesystem written asynchronously */ - MNT_SUIDDIR = 0x00100000 /* special handling of SUID on dirs */ - MNT_SOFTDEP = 0x00200000 /* soft updates being done */ - MNT_NOSYMFOLLOW = 0x00400000 /* do not follow symlinks */ - MNT_GJOURNAL = 0x02000000 /* GEOM journal support enabled */ - MNT_MULTILABEL = 0x04000000 /* MAC support for individual objects */ - MNT_ACLS = 0x08000000 /* ACL support enabled */ - MNT_NOATIME = 0x10000000 /* disable update of file access time */ - MNT_NOCLUSTERR = 0x40000000 /* disable cluster read */ - MNT_NOCLUSTERW = 0x80000000 /* disable cluster write */ - MNT_NFS4ACLS = 0x00000010 - - MNT_WAIT = 1 /* synchronously wait for I/O to complete */ - MNT_NOWAIT = 2 /* start all I/O, but do not wait for it */ - MNT_LAZY = 3 /* push data not written by filesystem syncer */ - MNT_SUSPEND = 4 /* Suspend file system after sync */ -) - -const ( - sizeOfDevstat = C.sizeof_struct_devstat -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong - _C_long_double C.longlong -) - -type Statfs C.struct_statfs -type Fsid C.struct_fsid - -type Devstat C.struct_devstat -type Bintime C.struct_bintime diff --git a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/types_openbsd.go b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/types_openbsd.go deleted file mode 100644 index 1e3ddef5..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/disk/types_openbsd.go +++ /dev/null @@ -1,70 +0,0 @@ -// +build ignore -// Hand writing: _Ctype_struct___0 - -/* -Input to cgo -godefs. -*/ - -package disk - -/* -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -*/ -import "C" - -// Machine characteristics; for internal use. - -const ( - sizeofPtr = C.sizeofPtr - sizeofShort = C.sizeof_short - sizeofInt = C.sizeof_int - sizeofLong = C.sizeof_long - sizeofLongLong = C.sizeof_longlong - sizeofLongDouble = C.sizeof_longlong - - DEVSTAT_NO_DATA = 0x00 - DEVSTAT_READ = 0x01 - DEVSTAT_WRITE = 0x02 - DEVSTAT_FREE = 0x03 - - // from sys/mount.h - MNT_RDONLY = 0x00000001 /* read only filesystem */ - MNT_SYNCHRONOUS = 0x00000002 /* filesystem written synchronously */ - MNT_NOEXEC = 0x00000004 /* can't exec from filesystem */ - MNT_NOSUID = 0x00000008 /* don't honor setuid bits on fs */ - MNT_NODEV = 0x00000010 /* don't interpret special files */ - MNT_ASYNC = 0x00000040 /* filesystem written asynchronously */ - - MNT_WAIT = 1 /* synchronously wait for I/O to complete */ - MNT_NOWAIT = 2 /* start all I/O, but do not wait for it */ - MNT_LAZY = 3 /* push data not written by filesystem syncer */ -) - -const ( - sizeOfDiskstats = C.sizeof_struct_diskstats -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong - _C_long_double C.longlong -) - -type Statfs C.struct_statfs -type Diskstats C.struct_diskstats -type Fsid C.fsid_t -type Timeval C.struct_timeval - -type Diskstat C.struct_diskstat -type Bintime C.struct_bintime diff --git a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/doc.go b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/doc.go new file mode 100644 index 00000000..6a65fe26 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/doc.go @@ -0,0 +1 @@ +package gopsutil diff --git a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/internal/common/common.go b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/internal/common/common.go index cb6d3f3a..fcee6be8 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/internal/common/common.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/internal/common/common.go @@ -9,10 +9,10 @@ package common import ( "bufio" "bytes" + "context" "errors" "fmt" "io/ioutil" - "log" "net/url" "os" "os/exec" @@ -27,7 +27,7 @@ import ( var ( Timeout = 3 * time.Second - ErrTimeout = errors.New("Command timed out.") + ErrTimeout = errors.New("command timed out") ) type Invoker interface { @@ -37,8 +37,24 @@ type Invoker interface { type Invoke struct{} func (i Invoke) Command(name string, arg ...string) ([]byte, error) { - cmd := exec.Command(name, arg...) - return CombinedOutputTimeout(cmd, Timeout) + ctxt, cancel := context.WithTimeout(context.Background(), Timeout) + defer cancel() + + cmd := exec.CommandContext(ctxt, name, arg...) + + var buf bytes.Buffer + cmd.Stdout = &buf + cmd.Stderr = &buf + + if err := cmd.Start(); err != nil { + return buf.Bytes(), err + } + + if err := cmd.Wait(); err != nil { + return buf.Bytes(), err + } + + return buf.Bytes(), nil } type FakeInvoke struct { @@ -300,42 +316,8 @@ func HostEtc(combineWith ...string) string { return GetEnv("HOST_ETC", "/etc", combineWith...) } -// CombinedOutputTimeout runs the given command with the given timeout and -// returns the combined output of stdout and stderr. -// If the command times out, it attempts to kill the process. -// copied from https://github.com/influxdata/telegraf -func CombinedOutputTimeout(c *exec.Cmd, timeout time.Duration) ([]byte, error) { - var b bytes.Buffer - c.Stdout = &b - c.Stderr = &b - if err := c.Start(); err != nil { - return nil, err - } - err := WaitTimeout(c, timeout) - return b.Bytes(), err -} - -// WaitTimeout waits for the given command to finish with a timeout. -// It assumes the command has already been started. -// If the command times out, it attempts to kill the process. -// copied from https://github.com/influxdata/telegraf -func WaitTimeout(c *exec.Cmd, timeout time.Duration) error { - timer := time.NewTimer(timeout) - done := make(chan error) - go func() { done <- c.Wait() }() - select { - case err := <-done: - timer.Stop() - return err - case <-timer.C: - if err := c.Process.Kill(); err != nil { - log.Printf("FATAL error killing process: %s", err) - return err - } - // wait for the command to return after killing it - <-done - return ErrTimeout - } +func HostVar(combineWith ...string) string { + return GetEnv("HOST_VAR", "/var", combineWith...) } // https://gist.github.com/kylelemons/1525278 @@ -380,3 +362,19 @@ func Pipeline(cmds ...*exec.Cmd) ([]byte, []byte, error) { // Return the pipeline output and the collected standard error return output.Bytes(), stderr.Bytes(), nil } + +// getSysctrlEnv sets LC_ALL=C in a list of env vars for use when running +// sysctl commands (see DoSysctrl). +func getSysctrlEnv(env []string) []string { + foundLC := false + for i, line := range env { + if strings.HasPrefix(line, "LC_ALL") { + env[i] = "LC_ALL=C" + foundLC = true + } + } + if !foundLC { + env = append(env, "LC_ALL=C") + } + return env +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/internal/common/common_darwin.go b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/internal/common/common_darwin.go index 2e1552ae..2b6d4c14 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/internal/common/common_darwin.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/internal/common/common_darwin.go @@ -6,21 +6,19 @@ import ( "os" "os/exec" "strings" - "syscall" "unsafe" + + "golang.org/x/sys/unix" ) func DoSysctrl(mib string) ([]string, error) { - err := os.Setenv("LC_ALL", "C") - if err != nil { - return []string{}, err - } - sysctl, err := exec.LookPath("/usr/sbin/sysctl") if err != nil { return []string{}, err } - out, err := exec.Command(sysctl, "-n", mib).Output() + cmd := exec.Command(sysctl, "-n", mib) + cmd.Env = getSysctrlEnv(os.Environ()) + out, err := cmd.Output() if err != nil { return []string{}, err } @@ -36,8 +34,8 @@ func CallSyscall(mib []int32) ([]byte, uint64, error) { // get required buffer size length := uint64(0) - _, _, err := syscall.Syscall6( - syscall.SYS___SYSCTL, + _, _, err := unix.Syscall6( + unix.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), uintptr(miblen), 0, @@ -54,8 +52,8 @@ func CallSyscall(mib []int32) ([]byte, uint64, error) { } // get proc info itself buf := make([]byte, length) - _, _, err = syscall.Syscall6( - syscall.SYS___SYSCTL, + _, _, err = unix.Syscall6( + unix.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), uintptr(miblen), uintptr(unsafe.Pointer(&buf[0])), diff --git a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/internal/common/common_freebsd.go b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/internal/common/common_freebsd.go index bda4ecfa..107e2c9c 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/internal/common/common_freebsd.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/internal/common/common_freebsd.go @@ -6,20 +6,19 @@ import ( "os" "os/exec" "strings" - "syscall" "unsafe" + + "golang.org/x/sys/unix" ) func DoSysctrl(mib string) ([]string, error) { - err := os.Setenv("LC_ALL", "C") - if err != nil { - return []string{}, err - } sysctl, err := exec.LookPath("/sbin/sysctl") if err != nil { return []string{}, err } - out, err := exec.Command(sysctl, "-n", mib).Output() + cmd := exec.Command(sysctl, "-n", mib) + cmd.Env = getSysctrlEnv(os.Environ()) + out, err := cmd.Output() if err != nil { return []string{}, err } @@ -36,8 +35,8 @@ func CallSyscall(mib []int32) ([]byte, uint64, error) { // get required buffer size length := uint64(0) - _, _, err := syscall.Syscall6( - syscall.SYS___SYSCTL, + _, _, err := unix.Syscall6( + unix.SYS___SYSCTL, uintptr(mibptr), uintptr(miblen), 0, @@ -54,8 +53,8 @@ func CallSyscall(mib []int32) ([]byte, uint64, error) { } // get proc info itself buf := make([]byte, length) - _, _, err = syscall.Syscall6( - syscall.SYS___SYSCTL, + _, _, err = unix.Syscall6( + unix.SYS___SYSCTL, uintptr(mibptr), uintptr(miblen), uintptr(unsafe.Pointer(&buf[0])), diff --git a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/internal/common/common_linux.go b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/internal/common/common_linux.go index 3d0fc50d..4e829e05 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/internal/common/common_linux.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/internal/common/common_linux.go @@ -9,15 +9,13 @@ import ( ) func DoSysctrl(mib string) ([]string, error) { - err := os.Setenv("LC_ALL", "C") - if err != nil { - return []string{}, err - } sysctl, err := exec.LookPath("/sbin/sysctl") if err != nil { return []string{}, err } - out, err := exec.Command(sysctl, "-n", mib).Output() + cmd := exec.Command(sysctl, "-n", mib) + cmd.Env = getSysctrlEnv(os.Environ()) + out, err := cmd.Output() if err != nil { return []string{}, err } @@ -35,7 +33,7 @@ func NumProcs() (uint64, error) { } defer f.Close() - list, err := f.Readdir(-1) + list, err := f.Readdirnames(-1) if err != nil { return 0, err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/internal/common/common_openbsd.go b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/internal/common/common_openbsd.go index 959d9e56..398f7854 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/internal/common/common_openbsd.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/internal/common/common_openbsd.go @@ -6,20 +6,19 @@ import ( "os" "os/exec" "strings" - "syscall" "unsafe" + + "golang.org/x/sys/unix" ) func DoSysctrl(mib string) ([]string, error) { - err := os.Setenv("LC_ALL", "C") - if err != nil { - return []string{}, err - } sysctl, err := exec.LookPath("/sbin/sysctl") if err != nil { return []string{}, err } - out, err := exec.Command(sysctl, "-n", mib).Output() + cmd := exec.Command(sysctl, "-n", mib) + cmd.Env = getSysctrlEnv(os.Environ()) + out, err := cmd.Output() if err != nil { return []string{}, err } @@ -36,8 +35,8 @@ func CallSyscall(mib []int32) ([]byte, uint64, error) { // get required buffer size length := uint64(0) - _, _, err := syscall.Syscall6( - syscall.SYS___SYSCTL, + _, _, err := unix.Syscall6( + unix.SYS___SYSCTL, uintptr(mibptr), uintptr(miblen), 0, @@ -54,8 +53,8 @@ func CallSyscall(mib []int32) ([]byte, uint64, error) { } // get proc info itself buf := make([]byte, length) - _, _, err = syscall.Syscall6( - syscall.SYS___SYSCTL, + _, _, err = unix.Syscall6( + unix.SYS___SYSCTL, uintptr(mibptr), uintptr(miblen), uintptr(unsafe.Pointer(&buf[0])), diff --git a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/internal/common/common_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/internal/common/common_windows.go index d727378c..1dffe615 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/internal/common/common_windows.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/internal/common/common_windows.go @@ -3,8 +3,11 @@ package common import ( - "syscall" + "context" "unsafe" + + "github.com/StackExchange/wmi" + "golang.org/x/sys/windows" ) // for double values @@ -44,9 +47,10 @@ const ( ) var ( - Modkernel32 = syscall.NewLazyDLL("kernel32.dll") - ModNt = syscall.NewLazyDLL("ntdll.dll") - ModPdh = syscall.NewLazyDLL("pdh.dll") + Modkernel32 = windows.NewLazyDLL("kernel32.dll") + ModNt = windows.NewLazyDLL("ntdll.dll") + ModPdh = windows.NewLazyDLL("pdh.dll") + ModPsapi = windows.NewLazyDLL("psapi.dll") ProcGetSystemTimes = Modkernel32.NewProc("GetSystemTimes") ProcNtQuerySystemInformation = ModNt.NewProc("NtQuerySystemInformation") @@ -77,13 +81,13 @@ func BytePtrToString(p *uint8) string { type CounterInfo struct { PostName string CounterName string - Counter syscall.Handle + Counter windows.Handle } // CreateQuery XXX // copied from https://github.com/mackerelio/mackerel-agent/ -func CreateQuery() (syscall.Handle, error) { - var query syscall.Handle +func CreateQuery() (windows.Handle, error) { + var query windows.Handle r, _, err := PdhOpenQuery.Call(0, 0, uintptr(unsafe.Pointer(&query))) if r != 0 { return 0, err @@ -92,11 +96,11 @@ func CreateQuery() (syscall.Handle, error) { } // CreateCounter XXX -func CreateCounter(query syscall.Handle, pname, cname string) (*CounterInfo, error) { - var counter syscall.Handle +func CreateCounter(query windows.Handle, pname, cname string) (*CounterInfo, error) { + var counter windows.Handle r, _, err := PdhAddCounter.Call( uintptr(query), - uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(cname))), + uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(cname))), 0, uintptr(unsafe.Pointer(&counter))) if r != 0 { @@ -108,3 +112,18 @@ func CreateCounter(query syscall.Handle, pname, cname string) (*CounterInfo, err Counter: counter, }, nil } + +// WMIQueryWithContext - wraps wmi.Query with a timed-out context to avoid hanging +func WMIQueryWithContext(ctx context.Context, query string, dst interface{}, connectServerArgs ...interface{}) error { + errChan := make(chan error, 1) + go func() { + errChan <- wmi.Query(query, dst, connectServerArgs...) + }() + + select { + case <-ctx.Done(): + return ctx.Err() + case err := <-errChan: + return err + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/net/net.go b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/net/net.go index 48660ec7..428e68e1 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/net/net.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/net/net.go @@ -1,6 +1,7 @@ package net import ( + "context" "encoding/json" "fmt" "net" @@ -111,6 +112,10 @@ func (n InterfaceAddr) String() string { } func Interfaces() ([]InterfaceStat, error) { + return InterfacesWithContext(context.Background()) +} + +func InterfacesWithContext(ctx context.Context) ([]InterfaceStat, error) { is, err := net.Interfaces() if err != nil { return nil, err diff --git a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/net/net_darwin.go b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/net/net_darwin.go index f1065c6d..2afb0f08 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/net/net_darwin.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/net/net_darwin.go @@ -3,6 +3,7 @@ package net import ( + "context" "errors" "fmt" "os/exec" @@ -18,7 +19,7 @@ var ( const endOfLine = "\n" -func parseNetstatLine(line string) (stat *IOCountersStat, linkId *uint, err error) { +func parseNetstatLine(line string) (stat *IOCountersStat, linkID *uint, err error) { var ( numericValue uint64 columns = strings.Fields(line) @@ -35,8 +36,8 @@ func parseNetstatLine(line string) (stat *IOCountersStat, linkId *uint, err erro if err != nil { return } - linkIdUint := uint(numericValue) - linkId = &linkIdUint + linkIDUint := uint(numericValue) + linkID = &linkIDUint } base := 1 @@ -91,7 +92,7 @@ func parseNetstatLine(line string) (stat *IOCountersStat, linkId *uint, err erro } type netstatInterface struct { - linkId *uint + linkID *uint stat *IOCountersStat } @@ -112,7 +113,7 @@ func parseNetstatOutput(output string) ([]netstatInterface, error) { for index := 0; index < numberInterfaces; index++ { nsIface := netstatInterface{} - if nsIface.stat, nsIface.linkId, err = parseNetstatLine(lines[index+1]); err != nil { + if nsIface.stat, nsIface.linkID, err = parseNetstatLine(lines[index+1]); err != nil { return nil, err } interfaces[index] = nsIface @@ -126,7 +127,7 @@ type mapInterfaceNameUsage map[string]uint func newMapInterfaceNameUsage(ifaces []netstatInterface) mapInterfaceNameUsage { output := make(mapInterfaceNameUsage) for index := range ifaces { - if ifaces[index].linkId != nil { + if ifaces[index].linkID != nil { ifaceName := ifaces[index].stat.Name usage, ok := output[ifaceName] if ok { @@ -164,6 +165,10 @@ func (min mapInterfaceNameUsage) notTruncated() []string { // lo0 16384 ::1/128 ::1 869107 - 169411755 869107 - 169411755 - - // lo0 16384 127 127.0.0.1 869107 - 169411755 869107 - 169411755 - - func IOCounters(pernic bool) ([]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), pernic) +} + +func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { var ( ret []IOCountersStat retIndex int @@ -192,7 +197,7 @@ func IOCounters(pernic bool) ([]IOCountersStat, error) { if !ifaceUsage.isTruncated() { // no truncated interface name, return stats of all interface with for index := range nsInterfaces { - if nsInterfaces[index].linkId != nil { + if nsInterfaces[index].linkID != nil { ret[retIndex] = *nsInterfaces[index].stat retIndex++ } @@ -212,7 +217,7 @@ func IOCounters(pernic bool) ([]IOCountersStat, error) { for _, interfaceName := range interfaceNames { truncated := true for index := range nsInterfaces { - if nsInterfaces[index].linkId != nil && nsInterfaces[index].stat.Name == interfaceName { + if nsInterfaces[index].linkID != nil && nsInterfaces[index].stat.Name == interfaceName { // handle the non truncated name to avoid execute netstat for them again ret[retIndex] = *nsInterfaces[index].stat retIndex++ @@ -234,7 +239,7 @@ func IOCounters(pernic bool) ([]IOCountersStat, error) { continue } for index := range parsedIfaces { - if parsedIfaces[index].linkId != nil { + if parsedIfaces[index].linkID != nil { ret = append(ret, *parsedIfaces[index].stat) break } @@ -251,10 +256,18 @@ func IOCounters(pernic bool) ([]IOCountersStat, error) { // NetIOCountersByFile is an method which is added just a compatibility for linux. func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { + return IOCountersByFileWithContext(context.Background(), pernic, filename) +} + +func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { return IOCounters(pernic) } func FilterCounters() ([]FilterStat, error) { + return FilterCountersWithContext(context.Background()) +} + +func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { return nil, errors.New("NetFilterCounters not implemented for darwin") } @@ -263,5 +276,9 @@ func FilterCounters() ([]FilterStat, error) { // just the protocols in the list are returned. // Not Implemented for Darwin func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { + return ProtoCountersWithContext(context.Background(), protocols) +} + +func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { return nil, errors.New("NetProtoCounters not implemented for darwin") } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/net/net_fallback.go b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/net/net_fallback.go index 653bd47e..7c5e632f 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/net/net_fallback.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/net/net_fallback.go @@ -2,24 +2,48 @@ package net -import "github.com/shirou/gopsutil/internal/common" +import ( + "context" + + "github.com/shirou/gopsutil/internal/common" +) func IOCounters(pernic bool) ([]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), pernic) +} + +func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { return []IOCountersStat{}, common.ErrNotImplementedError } func FilterCounters() ([]FilterStat, error) { + return FilterCountersWithContext(context.Background()) +} + +func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { return []FilterStat{}, common.ErrNotImplementedError } func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { + return ProtoCountersWithContext(context.Background(), protocols) +} + +func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { return []ProtoCountersStat{}, common.ErrNotImplementedError } func Connections(kind string) ([]ConnectionStat, error) { + return ConnectionsWithContext(context.Background(), kind) +} + +func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { + return ConnectionsMaxWithContext(context.Background(), kind, max) +} + +func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/net/net_freebsd.go b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/net/net_freebsd.go index 2b546550..9daed8d7 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/net/net_freebsd.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/net/net_freebsd.go @@ -3,6 +3,7 @@ package net import ( + "context" "errors" "os/exec" "strconv" @@ -12,6 +13,10 @@ import ( ) func IOCounters(pernic bool) ([]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), pernic) +} + +func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { netstat, err := exec.LookPath("/usr/bin/netstat") if err != nil { return nil, err @@ -92,10 +97,18 @@ func IOCounters(pernic bool) ([]IOCountersStat, error) { // NetIOCountersByFile is an method which is added just a compatibility for linux. func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { + return IOCountersByFileWithContext(context.Background(), pernic, filename) +} + +func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { return IOCounters(pernic) } func FilterCounters() ([]FilterStat, error) { + return FilterCountersWithContext(context.Background()) +} + +func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { return nil, errors.New("NetFilterCounters not implemented for freebsd") } @@ -104,5 +117,9 @@ func FilterCounters() ([]FilterStat, error) { // just the protocols in the list are returned. // Not Implemented for FreeBSD func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { + return ProtoCountersWithContext(context.Background(), protocols) +} + +func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { return nil, errors.New("NetProtoCounters not implemented for freebsd") } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/net/net_linux.go b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/net/net_linux.go index 12214d92..fc2b22ea 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/net/net_linux.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/net/net_linux.go @@ -3,6 +3,8 @@ package net import ( + "bytes" + "context" "encoding/hex" "errors" "fmt" @@ -22,11 +24,19 @@ import ( // every network interface installed on the system is returned // separately. func IOCounters(pernic bool) ([]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), pernic) +} + +func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { filename := common.HostProc("net/dev") return IOCountersByFile(pernic, filename) } func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { + return IOCountersByFileWithContext(context.Background(), pernic, filename) +} + +func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { lines, err := common.ReadLines(filename) if err != nil { return nil, err @@ -131,6 +141,10 @@ var netProtocols = []string{ // Available protocols: // ip,icmp,icmpmsg,tcp,udp,udplite func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { + return ProtoCountersWithContext(context.Background(), protocols) +} + +func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { if len(protocols) == 0 { protocols = netProtocols } @@ -190,6 +204,10 @@ func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { // the currently in use conntrack count and the max. // If the file does not exist or is invalid it will return nil. func FilterCounters() ([]FilterStat, error) { + return FilterCountersWithContext(context.Background()) +} + +func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { countfile := common.HostProc("sys/net/netfilter/nf_conntrack_count") maxfile := common.HostProc("sys/net/netfilter/nf_conntrack_max") @@ -261,17 +279,17 @@ var kindUNIX = netConnectionKindType{ } var netConnectionKindMap = map[string][]netConnectionKindType{ - "all": []netConnectionKindType{kindTCP4, kindTCP6, kindUDP4, kindUDP6, kindUNIX}, - "tcp": []netConnectionKindType{kindTCP4, kindTCP6}, - "tcp4": []netConnectionKindType{kindTCP4}, - "tcp6": []netConnectionKindType{kindTCP6}, - "udp": []netConnectionKindType{kindUDP4, kindUDP6}, - "udp4": []netConnectionKindType{kindUDP4}, - "udp6": []netConnectionKindType{kindUDP6}, - "unix": []netConnectionKindType{kindUNIX}, - "inet": []netConnectionKindType{kindTCP4, kindTCP6, kindUDP4, kindUDP6}, - "inet4": []netConnectionKindType{kindTCP4, kindUDP4}, - "inet6": []netConnectionKindType{kindTCP6, kindUDP6}, + "all": {kindTCP4, kindTCP6, kindUDP4, kindUDP6, kindUNIX}, + "tcp": {kindTCP4, kindTCP6}, + "tcp4": {kindTCP4}, + "tcp6": {kindTCP6}, + "udp": {kindUDP4, kindUDP6}, + "udp4": {kindUDP4}, + "udp6": {kindUDP6}, + "unix": {kindUNIX}, + "inet": {kindTCP4, kindTCP6, kindUDP4, kindUDP6}, + "inet4": {kindTCP4, kindUDP4}, + "inet6": {kindTCP6, kindUDP6}, } type inodeMap struct { @@ -293,17 +311,29 @@ type connTmp struct { // Return a list of network connections opened. func Connections(kind string) ([]ConnectionStat, error) { + return ConnectionsWithContext(context.Background(), kind) +} + +func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { return ConnectionsPid(kind, 0) } // Return a list of network connections opened returning at most `max` // connections for each running process. func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { + return ConnectionsMaxWithContext(context.Background(), kind, max) +} + +func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { return ConnectionsPidMax(kind, 0, max) } // Return a list of network connections opened by a process. func ConnectionsPid(kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidWithContext(context.Background(), kind, pid) +} + +func ConnectionsPidWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { tmap, ok := netConnectionKindMap[kind] if !ok { return nil, fmt.Errorf("invalid kind, %s", kind) @@ -321,13 +351,17 @@ func ConnectionsPid(kind string, pid int32) ([]ConnectionStat, error) { } } if err != nil { - return nil, fmt.Errorf("cound not get pid(s), %d", pid) + return nil, fmt.Errorf("cound not get pid(s), %d: %s", pid, err) } return statsFromInodes(root, pid, tmap, inodes) } // Return up to `max` network connections opened by a process. func ConnectionsPidMax(kind string, pid int32, max int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(context.Background(), kind, pid, max) +} + +func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { tmap, ok := netConnectionKindMap[kind] if !ok { return nil, fmt.Errorf("invalid kind, %s", kind) @@ -415,12 +449,12 @@ func getProcInodes(root string, pid int32, max int) (map[string][]inodeMap, erro dir := fmt.Sprintf("%s/%d/fd", root, pid) f, err := os.Open(dir) if err != nil { - return ret, nil + return ret, err } defer f.Close() files, err := f.Readdir(max) if err != nil { - return ret, nil + return ret, err } for _, fd := range files { inodePath := fmt.Sprintf("%s/%d/fd/%s", root, pid, fd.Name()) @@ -458,6 +492,10 @@ func getProcInodes(root string, pid int32, max int) (map[string][]inodeMap, erro // FIXME: Import process occures import cycle. // move to common made other platform breaking. Need consider. func Pids() ([]int32, error) { + return PidsWithContext(context.Background()) +} + +func PidsWithContext(ctx context.Context) ([]int32, error) { var ret []int32 d, err := os.Open(common.HostProc()) @@ -540,6 +578,10 @@ func getProcInodesAll(root string, max int) (map[string][]inodeMap, error) { for _, pid := range pids { t, err := getProcInodes(root, pid, max) if err != nil { + // skip if permission error or no longer exists + if os.IsPermission(err) || os.IsNotExist(err) { + continue + } return ret, err } if len(t) == 0 { @@ -587,6 +629,10 @@ func decodeAddress(family uint32, src string) (Addr, error) { // Reverse reverses array of bytes. func Reverse(s []byte) []byte { + return ReverseWithContext(context.Background(), s) +} + +func ReverseWithContext(ctx context.Context, s []byte) []byte { for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { s[i], s[j] = s[j], s[i] } @@ -613,14 +659,22 @@ func processInet(file string, kind netConnectionKindType, inodes map[string][]in // IPv6 not supported, return empty. return []connTmp{}, nil } - lines, err := common.ReadLines(file) + + // Read the contents of the /proc file with a single read sys call. + // This minimizes duplicates in the returned connections + // For more info: + // https://github.com/shirou/gopsutil/pull/361 + contents, err := ioutil.ReadFile(file) if err != nil { return nil, err } + + lines := bytes.Split(contents, []byte("\n")) + var ret []connTmp // skip first line for _, line := range lines[1:] { - l := strings.Fields(line) + l := strings.Fields(string(line)) if len(l) < 10 { continue } @@ -667,15 +721,21 @@ func processInet(file string, kind netConnectionKindType, inodes map[string][]in } func processUnix(file string, kind netConnectionKindType, inodes map[string][]inodeMap, filterPid int32) ([]connTmp, error) { - lines, err := common.ReadLines(file) + // Read the contents of the /proc file with a single read sys call. + // This minimizes duplicates in the returned connections + // For more info: + // https://github.com/shirou/gopsutil/pull/361 + contents, err := ioutil.ReadFile(file) if err != nil { return nil, err } + lines := bytes.Split(contents, []byte("\n")) + var ret []connTmp // skip first line for _, line := range lines[1:] { - tokens := strings.Fields(line) + tokens := strings.Fields(string(line)) if len(tokens) < 6 { continue } @@ -690,7 +750,7 @@ func processUnix(file string, kind netConnectionKindType, inodes map[string][]in pairs, exists := inodes[inode] if !exists { pairs = []inodeMap{ - inodeMap{}, + {}, } } for _, pair := range pairs { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/net/net_openbsd.go b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/net/net_openbsd.go index 85cc70c4..4b194eb6 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/net/net_openbsd.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/net/net_openbsd.go @@ -3,14 +3,20 @@ package net import ( + "context" "errors" + "fmt" "os/exec" + "regexp" "strconv" "strings" + "syscall" "github.com/shirou/gopsutil/internal/common" ) +var portMatch = regexp.MustCompile(`(.*)\.(\d+)$`) + func ParseNetstat(output string, mode string, iocs map[string]IOCountersStat) error { lines := strings.Split(output, "\n") @@ -92,7 +98,11 @@ func ParseNetstat(output string, mode string, } func IOCounters(pernic bool) ([]IOCountersStat, error) { - netstat, err := exec.LookPath("/usr/bin/netstat") + return IOCountersWithContext(context.Background(), pernic) +} + +func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { + netstat, err := exec.LookPath("netstat") if err != nil { return nil, err } @@ -131,10 +141,18 @@ func IOCounters(pernic bool) ([]IOCountersStat, error) { // NetIOCountersByFile is an method which is added just a compatibility for linux. func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { + return IOCountersByFileWithContext(context.Background(), pernic, filename) +} + +func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { return IOCounters(pernic) } func FilterCounters() ([]FilterStat, error) { + return FilterCountersWithContext(context.Background()) +} + +func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { return nil, errors.New("NetFilterCounters not implemented for openbsd") } @@ -143,11 +161,152 @@ func FilterCounters() ([]FilterStat, error) { // just the protocols in the list are returned. // Not Implemented for OpenBSD func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { + return ProtoCountersWithContext(context.Background(), protocols) +} + +func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { return nil, errors.New("NetProtoCounters not implemented for openbsd") } +func parseNetstatLine(line string) (ConnectionStat, error) { + f := strings.Fields(line) + if len(f) < 5 { + return ConnectionStat{}, fmt.Errorf("wrong line,%s", line) + } + + var netType, netFamily uint32 + switch f[0] { + case "tcp": + netType = syscall.SOCK_STREAM + netFamily = syscall.AF_INET + case "udp": + netType = syscall.SOCK_DGRAM + netFamily = syscall.AF_INET + case "tcp6": + netType = syscall.SOCK_STREAM + netFamily = syscall.AF_INET6 + case "udp6": + netType = syscall.SOCK_DGRAM + netFamily = syscall.AF_INET6 + default: + return ConnectionStat{}, fmt.Errorf("unknown type, %s", f[0]) + } + + laddr, raddr, err := parseNetstatAddr(f[3], f[4], netFamily) + if err != nil { + return ConnectionStat{}, fmt.Errorf("failed to parse netaddr, %s %s", f[3], f[4]) + } + + n := ConnectionStat{ + Fd: uint32(0), // not supported + Family: uint32(netFamily), + Type: uint32(netType), + Laddr: laddr, + Raddr: raddr, + Pid: int32(0), // not supported + } + if len(f) == 6 { + n.Status = f[5] + } + + return n, nil +} + +func parseNetstatAddr(local string, remote string, family uint32) (laddr Addr, raddr Addr, err error) { + parse := func(l string) (Addr, error) { + matches := portMatch.FindStringSubmatch(l) + if matches == nil { + return Addr{}, fmt.Errorf("wrong addr, %s", l) + } + host := matches[1] + port := matches[2] + if host == "*" { + switch family { + case syscall.AF_INET: + host = "0.0.0.0" + case syscall.AF_INET6: + host = "::" + default: + return Addr{}, fmt.Errorf("unknown family, %d", family) + } + } + lport, err := strconv.Atoi(port) + if err != nil { + return Addr{}, err + } + return Addr{IP: host, Port: uint32(lport)}, nil + } + + laddr, err = parse(local) + if remote != "*.*" { // remote addr exists + raddr, err = parse(remote) + if err != nil { + return laddr, raddr, err + } + } + + return laddr, raddr, err +} + // Return a list of network connections opened. -// Not Implemented for OpenBSD func Connections(kind string) ([]ConnectionStat, error) { - return nil, errors.New("Connections not implemented for openbsd") + return ConnectionsWithContext(context.Background(), kind) +} + +func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + var ret []ConnectionStat + + args := []string{"-na"} + switch strings.ToLower(kind) { + default: + fallthrough + case "": + fallthrough + case "all": + fallthrough + case "inet": + // nothing to add + case "inet4": + args = append(args, "-finet") + case "inet6": + args = append(args, "-finet6") + case "tcp": + args = append(args, "-ptcp") + case "tcp4": + args = append(args, "-ptcp", "-finet") + case "tcp6": + args = append(args, "-ptcp", "-finet6") + case "udp": + args = append(args, "-pudp") + case "udp4": + args = append(args, "-pudp", "-finet") + case "udp6": + args = append(args, "-pudp", "-finet6") + case "unix": + return ret, common.ErrNotImplementedError + } + + netstat, err := exec.LookPath("netstat") + if err != nil { + return nil, err + } + out, err := invoke.Command(netstat, args...) + + if err != nil { + return nil, err + } + lines := strings.Split(string(out), "\n") + for _, line := range lines { + if !(strings.HasPrefix(line, "tcp") || strings.HasPrefix(line, "udp")) { + continue + } + n, err := parseNetstatLine(line) + if err != nil { + continue + } + + ret = append(ret, n) + } + + return ret, nil } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/net/net_unix.go b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/net/net_unix.go index 1224128a..5ceb9cc5 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/net/net_unix.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/net/net_unix.go @@ -3,6 +3,7 @@ package net import ( + "context" "strings" "github.com/shirou/gopsutil/internal/common" @@ -10,17 +11,29 @@ import ( // Return a list of network connections opened. func Connections(kind string) ([]ConnectionStat, error) { + return ConnectionsWithContext(context.Background(), kind) +} + +func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { return ConnectionsPid(kind, 0) } // Return a list of network connections opened returning at most `max` // connections for each running process. func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { + return ConnectionsMaxWithContext(context.Background(), kind, max) +} + +func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } // Return a list of network connections opened by a process. func ConnectionsPid(kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidWithContext(context.Background(), kind, pid) +} + +func ConnectionsPidWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { var ret []ConnectionStat args := []string{"-i"} @@ -75,5 +88,9 @@ func ConnectionsPid(kind string, pid int32) ([]ConnectionStat, error) { // Return up to `max` network connections opened by a process. func ConnectionsPidMax(kind string, pid int32, max int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(context.Background(), kind, pid, max) +} + +func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/net/net_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/net/net_windows.go index 996e8322..7fff20e2 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/net/net_windows.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/shirou/gopsutil/net/net_windows.go @@ -3,16 +3,17 @@ package net import ( + "context" "errors" "net" "os" - "syscall" "github.com/shirou/gopsutil/internal/common" + "golang.org/x/sys/windows" ) var ( - modiphlpapi = syscall.NewLazyDLL("iphlpapi.dll") + modiphlpapi = windows.NewLazyDLL("iphlpapi.dll") procGetExtendedTCPTable = modiphlpapi.NewProc("GetExtendedTcpTable") procGetExtendedUDPTable = modiphlpapi.NewProc("GetExtendedUdpTable") ) @@ -30,6 +31,10 @@ const ( ) func IOCounters(pernic bool) ([]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), pernic) +} + +func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { ifs, err := net.Interfaces() if err != nil { return nil, err @@ -41,8 +46,8 @@ func IOCounters(pernic bool) ([]IOCountersStat, error) { Name: ifi.Name, } - row := syscall.MibIfRow{Index: uint32(ifi.Index)} - e := syscall.GetIfEntry(&row) + row := windows.MibIfRow{Index: uint32(ifi.Index)} + e := windows.GetIfEntry(&row) if e != nil { return nil, os.NewSyscallError("GetIfEntry", e) } @@ -66,11 +71,19 @@ func IOCounters(pernic bool) ([]IOCountersStat, error) { // NetIOCountersByFile is an method which is added just a compatibility for linux. func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { + return IOCountersByFileWithContext(context.Background(), pernic, filename) +} + +func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { return IOCounters(pernic) } // Return a list of network connections opened by a process func Connections(kind string) ([]ConnectionStat, error) { + return ConnectionsWithContext(context.Background(), kind) +} + +func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { var ret []ConnectionStat return ret, common.ErrNotImplementedError @@ -79,10 +92,18 @@ func Connections(kind string) ([]ConnectionStat, error) { // Return a list of network connections opened returning at most `max` // connections for each running process. func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { + return ConnectionsMaxWithContext(context.Background(), kind, max) +} + +func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } func FilterCounters() ([]FilterStat, error) { + return FilterCountersWithContext(context.Background()) +} + +func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { return nil, errors.New("NetFilterCounters not implemented for windows") } @@ -91,5 +112,9 @@ func FilterCounters() ([]FilterStat, error) { // just the protocols in the list are returned. // Not Implemented for Windows func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { + return ProtoCountersWithContext(context.Background(), protocols) +} + +func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { return nil, errors.New("NetProtoCounters not implemented for windows") } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/Godeps/Godeps.json b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/Godeps/Godeps.json deleted file mode 100644 index b206a609..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/Godeps/Godeps.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "ImportPath": "github.com/stretchr/testify", - "GoVersion": "go1.5", - "Packages": [ - "./..." - ], - "Deps": [ - { - "ImportPath": "github.com/davecgh/go-spew/spew", - "Rev": "2df174808ee097f90d259e432cc04442cf60be21" - }, - { - "ImportPath": "github.com/pmezard/go-difflib/difflib", - "Rev": "d8ed2627bdf02c080bf22230dbb337003b7aba2d" - }, - { - "ImportPath": "github.com/stretchr/objx", - "Rev": "cbeaeb16a013161a98496fad62933b1d21786672" - } - ] -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/Godeps/Readme b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/Godeps/Readme deleted file mode 100644 index 4cdaa53d..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/Godeps/Readme +++ /dev/null @@ -1,5 +0,0 @@ -This directory tree is generated automatically by godep. - -Please do not edit. - -See https://github.com/tools/godep for more information. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/README.md b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/README.md deleted file mode 100644 index aaf2aa0a..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/README.md +++ /dev/null @@ -1,332 +0,0 @@ -Testify - Thou Shalt Write Tests -================================ - -[![Build Status](https://travis-ci.org/stretchr/testify.svg)](https://travis-ci.org/stretchr/testify) - -Go code (golang) set of packages that provide many tools for testifying that your code will behave as you intend. - -Features include: - - * [Easy assertions](#assert-package) - * [Mocking](#mock-package) - * [HTTP response trapping](#http-package) - * [Testing suite interfaces and functions](#suite-package) - -Get started: - - * Install testify with [one line of code](#installation), or [update it with another](#staying-up-to-date) - * For an introduction to writing test code in Go, see http://golang.org/doc/code.html#Testing - * Check out the API Documentation http://godoc.org/github.com/stretchr/testify - * To make your testing life easier, check out our other project, [gorc](http://github.com/stretchr/gorc) - * A little about [Test-Driven Development (TDD)](http://en.wikipedia.org/wiki/Test-driven_development) - - - -[`assert`](http://godoc.org/github.com/stretchr/testify/assert "API documentation") package -------------------------------------------------------------------------------------------- - -The `assert` package provides some helpful methods that allow you to write better test code in Go. - - * Prints friendly, easy to read failure descriptions - * Allows for very readable code - * Optionally annotate each assertion with a message - -See it in action: - -```go -package yours - -import ( - "testing" - "github.com/stretchr/testify/assert" -) - -func TestSomething(t *testing.T) { - - // assert equality - assert.Equal(t, 123, 123, "they should be equal") - - // assert inequality - assert.NotEqual(t, 123, 456, "they should not be equal") - - // assert for nil (good for errors) - assert.Nil(t, object) - - // assert for not nil (good when you expect something) - if assert.NotNil(t, object) { - - // now we know that object isn't nil, we are safe to make - // further assertions without causing any errors - assert.Equal(t, "Something", object.Value) - - } - -} -``` - - * Every assert func takes the `testing.T` object as the first argument. This is how it writes the errors out through the normal `go test` capabilities. - * Every assert func returns a bool indicating whether the assertion was successful or not, this is useful for if you want to go on making further assertions under certain conditions. - -if you assert many times, use the below: - -```go -package yours - -import ( - "testing" - "github.com/stretchr/testify/assert" -) - -func TestSomething(t *testing.T) { - assert := assert.New(t) - - // assert equality - assert.Equal(123, 123, "they should be equal") - - // assert inequality - assert.NotEqual(123, 456, "they should not be equal") - - // assert for nil (good for errors) - assert.Nil(object) - - // assert for not nil (good when you expect something) - if assert.NotNil(object) { - - // now we know that object isn't nil, we are safe to make - // further assertions without causing any errors - assert.Equal("Something", object.Value) - } -} -``` - -[`require`](http://godoc.org/github.com/stretchr/testify/require "API documentation") package ---------------------------------------------------------------------------------------------- - -The `require` package provides same global functions as the `assert` package, but instead of returning a boolean result they terminate current test. - -See [t.FailNow](http://golang.org/pkg/testing/#T.FailNow) for details. - - -[`http`](http://godoc.org/github.com/stretchr/testify/http "API documentation") package ---------------------------------------------------------------------------------------- - -The `http` package contains test objects useful for testing code that relies on the `net/http` package. Check out the [(deprecated) API documentation for the `http` package](http://godoc.org/github.com/stretchr/testify/http). - -We recommend you use [httptest](http://golang.org/pkg/net/http/httptest) instead. - -[`mock`](http://godoc.org/github.com/stretchr/testify/mock "API documentation") package ----------------------------------------------------------------------------------------- - -The `mock` package provides a mechanism for easily writing mock objects that can be used in place of real objects when writing test code. - -An example test function that tests a piece of code that relies on an external object `testObj`, can setup expectations (testify) and assert that they indeed happened: - -```go -package yours - -import ( - "testing" - "github.com/stretchr/testify/mock" -) - -/* - Test objects -*/ - -// MyMockedObject is a mocked object that implements an interface -// that describes an object that the code I am testing relies on. -type MyMockedObject struct{ - mock.Mock -} - -// DoSomething is a method on MyMockedObject that implements some interface -// and just records the activity, and returns what the Mock object tells it to. -// -// In the real object, this method would do something useful, but since this -// is a mocked object - we're just going to stub it out. -// -// NOTE: This method is not being tested here, code that uses this object is. -func (m *MyMockedObject) DoSomething(number int) (bool, error) { - - args := m.Called(number) - return args.Bool(0), args.Error(1) - -} - -/* - Actual test functions -*/ - -// TestSomething is an example of how to use our test object to -// make assertions about some target code we are testing. -func TestSomething(t *testing.T) { - - // create an instance of our test object - testObj := new(MyMockedObject) - - // setup expectations - testObj.On("DoSomething", 123).Return(true, nil) - - // call the code we are testing - targetFuncThatDoesSomethingWithObj(testObj) - - // assert that the expectations were met - testObj.AssertExpectations(t) - -} -``` - -For more information on how to write mock code, check out the [API documentation for the `mock` package](http://godoc.org/github.com/stretchr/testify/mock). - -You can use the [mockery tool](http://github.com/vektra/mockery) to autogenerate the mock code against an interface as well, making using mocks much quicker. - -[`suite`](http://godoc.org/github.com/stretchr/testify/suite "API documentation") package ------------------------------------------------------------------------------------------ - -The `suite` package provides functionality that you might be used to from more common object oriented languages. With it, you can build a testing suite as a struct, build setup/teardown methods and testing methods on your struct, and run them with 'go test' as per normal. - -An example suite is shown below: - -```go -// Basic imports -import ( - "testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/suite" -) - -// Define the suite, and absorb the built-in basic suite -// functionality from testify - including a T() method which -// returns the current testing context -type ExampleTestSuite struct { - suite.Suite - VariableThatShouldStartAtFive int -} - -// Make sure that VariableThatShouldStartAtFive is set to five -// before each test -func (suite *ExampleTestSuite) SetupTest() { - suite.VariableThatShouldStartAtFive = 5 -} - -// All methods that begin with "Test" are run as tests within a -// suite. -func (suite *ExampleTestSuite) TestExample() { - assert.Equal(suite.T(), 5, suite.VariableThatShouldStartAtFive) -} - -// In order for 'go test' to run this suite, we need to create -// a normal test function and pass our suite to suite.Run -func TestExampleTestSuite(t *testing.T) { - suite.Run(t, new(ExampleTestSuite)) -} -``` - -For a more complete example, using all of the functionality provided by the suite package, look at our [example testing suite](https://github.com/stretchr/testify/blob/master/suite/suite_test.go) - -For more information on writing suites, check out the [API documentation for the `suite` package](http://godoc.org/github.com/stretchr/testify/suite). - -`Suite` object has assertion methods: - -```go -// Basic imports -import ( - "testing" - "github.com/stretchr/testify/suite" -) - -// Define the suite, and absorb the built-in basic suite -// functionality from testify - including assertion methods. -type ExampleTestSuite struct { - suite.Suite - VariableThatShouldStartAtFive int -} - -// Make sure that VariableThatShouldStartAtFive is set to five -// before each test -func (suite *ExampleTestSuite) SetupTest() { - suite.VariableThatShouldStartAtFive = 5 -} - -// All methods that begin with "Test" are run as tests within a -// suite. -func (suite *ExampleTestSuite) TestExample() { - suite.Equal(suite.VariableThatShouldStartAtFive, 5) -} - -// In order for 'go test' to run this suite, we need to create -// a normal test function and pass our suite to suite.Run -func TestExampleTestSuite(t *testing.T) { - suite.Run(t, new(ExampleTestSuite)) -} -``` - ------- - -Installation -============ - -To install Testify, use `go get`: - - * Latest version: go get github.com/stretchr/testify - * Specific version: go get gopkg.in/stretchr/testify.v1 - -This will then make the following packages available to you: - - github.com/stretchr/testify/assert - github.com/stretchr/testify/mock - github.com/stretchr/testify/http - -Import the `testify/assert` package into your code using this template: - -```go -package yours - -import ( - "testing" - "github.com/stretchr/testify/assert" -) - -func TestSomething(t *testing.T) { - - assert.True(t, true, "True is true!") - -} -``` - ------- - -Staying up to date -================== - -To update Testify to the latest version, use `go get -u github.com/stretchr/testify`. - ------- - -Version History -=============== - - * 1.0 - New package versioning strategy adopted. - ------- - -Contributing -============ - -Please feel free to submit issues, fork the repository and send pull requests! - -When submitting an issue, we ask that you please include a complete test function that demonstrates the issue. Extra credit for those using Testify to write the test code that demonstrates it. - ------- - -Licence -======= -Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell - -Please consider promoting this project if you find it useful. - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/assert/assertion_format.go new file mode 100644 index 00000000..3e172f2c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -0,0 +1,405 @@ +/* +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen +* THIS FILE MUST NOT BE EDITED BY HAND + */ + +package assert + +import ( + http "net/http" + url "net/url" + time "time" +) + +// Conditionf uses a Comparison to assert a complex condition. +func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bool { + return Condition(t, comp, append([]interface{}{msg}, args...)...) +} + +// Containsf asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") +// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") +// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { + return Contains(t, s, contains, append([]interface{}{msg}, args...)...) +} + +// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool { + return DirExists(t, path, append([]interface{}{msg}, args...)...) +} + +// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted")) +// +// Returns whether the assertion was successful (true) or not (false). +func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) +} + +// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// assert.Emptyf(t, obj, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + return Empty(t, object, append([]interface{}{msg}, args...)...) +} + +// Equalf asserts that two objects are equal. +// +// assert.Equalf(t, 123, 123, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + return Equal(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// EqualErrorf asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool { + return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...) +} + +// EqualValuesf asserts that two objects are equal or convertable to the same types +// and equal. +// +// assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123)) +// +// Returns whether the assertion was successful (true) or not (false). +func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + return EqualValues(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// Errorf asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if assert.Errorf(t, err, "error message %s", "formatted") { +// assert.Equal(t, expectedErrorf, err) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { + return Error(t, err, append([]interface{}{msg}, args...)...) +} + +// Exactlyf asserts that two objects are equal in value and type. +// +// assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123)) +// +// Returns whether the assertion was successful (true) or not (false). +func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + return Exactly(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// Failf reports a failure through +func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) bool { + return Fail(t, failureMessage, append([]interface{}{msg}, args...)...) +} + +// FailNowf fails test +func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) bool { + return FailNow(t, failureMessage, append([]interface{}{msg}, args...)...) +} + +// Falsef asserts that the specified value is false. +// +// assert.Falsef(t, myBool, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool { + return False(t, value, append([]interface{}{msg}, args...)...) +} + +// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool { + return FileExists(t, path, append([]interface{}{msg}, args...)...) +} + +// HTTPBodyContainsf asserts that a specified handler returns a +// body that contains a string. +// +// assert.HTTPBodyContainsf(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { + return HTTPBodyContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...) +} + +// HTTPBodyNotContainsf asserts that a specified handler returns a +// body that does not contain a string. +// +// assert.HTTPBodyNotContainsf(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { + return HTTPBodyNotContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...) +} + +// HTTPErrorf asserts that a specified handler returns an error status code. +// +// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + return HTTPError(t, handler, method, url, values, append([]interface{}{msg}, args...)...) +} + +// HTTPRedirectf asserts that a specified handler returns a redirect status code. +// +// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...) +} + +// HTTPSuccessf asserts that a specified handler returns a success status code. +// +// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + return HTTPSuccess(t, handler, method, url, values, append([]interface{}{msg}, args...)...) +} + +// Implementsf asserts that an object is implemented by the specified interface. +// +// assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) +func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + return Implements(t, interfaceObject, object, append([]interface{}{msg}, args...)...) +} + +// InDeltaf asserts that the two numerals are within delta of each other. +// +// assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) +// +// Returns whether the assertion was successful (true) or not (false). +func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + return InDelta(t, expected, actual, delta, append([]interface{}{msg}, args...)...) +} + +// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + return InDeltaMapValues(t, expected, actual, delta, append([]interface{}{msg}, args...)...) +} + +// InDeltaSlicef is the same as InDelta, except it compares two slices. +func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + return InDeltaSlice(t, expected, actual, delta, append([]interface{}{msg}, args...)...) +} + +// InEpsilonf asserts that expected and actual have a relative error less than epsilon +// +// Returns whether the assertion was successful (true) or not (false). +func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { + return InEpsilon(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...) +} + +// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. +func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { + return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...) +} + +// IsTypef asserts that the specified objects are of the same type. +func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { + return IsType(t, expectedType, object, append([]interface{}{msg}, args...)...) +} + +// JSONEqf asserts that two JSON strings are equivalent. +// +// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { + return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// Lenf asserts that the specified object has specific length. +// Lenf also fails if the object has a type that len() not accept. +// +// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool { + return Len(t, object, length, append([]interface{}{msg}, args...)...) +} + +// Nilf asserts that the specified object is nil. +// +// assert.Nilf(t, err, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + return Nil(t, object, append([]interface{}{msg}, args...)...) +} + +// NoErrorf asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if assert.NoErrorf(t, err, "error message %s", "formatted") { +// assert.Equal(t, expectedObj, actualObj) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool { + return NoError(t, err, append([]interface{}{msg}, args...)...) +} + +// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") +// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") +// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { + return NotContains(t, s, contains, append([]interface{}{msg}, args...)...) +} + +// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { +// assert.Equal(t, "two", obj[1]) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + return NotEmpty(t, object, append([]interface{}{msg}, args...)...) +} + +// NotEqualf asserts that the specified values are NOT equal. +// +// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// NotNilf asserts that the specified object is not nil. +// +// assert.NotNilf(t, err, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + return NotNil(t, object, append([]interface{}{msg}, args...)...) +} + +// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { + return NotPanics(t, f, append([]interface{}{msg}, args...)...) +} + +// NotRegexpf asserts that a specified regexp does not match a string. +// +// assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") +// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { + return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...) +} + +// NotSubsetf asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { + return NotSubset(t, list, subset, append([]interface{}{msg}, args...)...) +} + +// NotZerof asserts that i is not the zero value for its type and returns the truth. +func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { + return NotZero(t, i, append([]interface{}{msg}, args...)...) +} + +// Panicsf asserts that the code inside the specified PanicTestFunc panics. +// +// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { + return Panics(t, f, append([]interface{}{msg}, args...)...) +} + +// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { + return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...) +} + +// Regexpf asserts that a specified regexp matches a string. +// +// assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") +// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { + return Regexp(t, rx, str, append([]interface{}{msg}, args...)...) +} + +// Subsetf asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { + return Subset(t, list, subset, append([]interface{}{msg}, args...)...) +} + +// Truef asserts that the specified value is true. +// +// assert.Truef(t, myBool, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func Truef(t TestingT, value bool, msg string, args ...interface{}) bool { + return True(t, value, append([]interface{}{msg}, args...)...) +} + +// WithinDurationf asserts that the two times are within duration delta of each other. +// +// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { + return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...) +} + +// Zerof asserts that i is the zero value for its type and returns the truth. +func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { + return Zero(t, i, append([]interface{}{msg}, args...)...) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl new file mode 100644 index 00000000..c5cc66f4 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl @@ -0,0 +1,4 @@ +{{.CommentFormat}} +func {{.DocInfo.Name}}f(t TestingT, {{.ParamsFormat}}) bool { + return {{.DocInfo.Name}}(t, {{.ForwardedParamsFormat}}) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/assert/assertion_forward.go index e6a79604..7c4f497b 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -1,387 +1,798 @@ /* * CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen * THIS FILE MUST NOT BE EDITED BY HAND -*/ + */ package assert import ( - http "net/http" url "net/url" time "time" ) - // Condition uses a Comparison to assert a complex condition. func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool { return Condition(a.t, comp, msgAndArgs...) } +// Conditionf uses a Comparison to assert a complex condition. +func (a *Assertions) Conditionf(comp Comparison, msg string, args ...interface{}) bool { + return Conditionf(a.t, comp, msg, args...) +} // Contains asserts that the specified string, list(array, slice...) or map contains the // specified substring or element. -// -// a.Contains("Hello World", "World", "But 'Hello World' does contain 'World'") -// a.Contains(["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'") -// a.Contains({"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'") -// +// +// a.Contains("Hello World", "World") +// a.Contains(["Hello", "World"], "World") +// a.Contains({"Hello": "World"}, "Hello") +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { return Contains(a.t, s, contains, msgAndArgs...) } +// Containsf asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// a.Containsf("Hello World", "World", "error message %s", "formatted") +// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted") +// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { + return Containsf(a.t, s, contains, msg, args...) +} + +// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool { + return DirExists(a.t, path, msgAndArgs...) +} + +// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) bool { + return DirExistsf(a.t, path, msg, args...) +} + +// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// a.ElementsMatch([1, 3, 2, 3], [1, 3, 3, 2])) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool { + return ElementsMatch(a.t, listA, listB, msgAndArgs...) +} + +// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// a.ElementsMatchf([1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted")) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + return ElementsMatchf(a.t, listA, listB, msg, args...) +} // Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either // a slice or a channel with len == 0. -// +// // a.Empty(obj) -// +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { return Empty(a.t, object, msgAndArgs...) } +// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// a.Emptyf(obj, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool { + return Emptyf(a.t, object, msg, args...) +} // Equal asserts that two objects are equal. -// -// a.Equal(123, 123, "123 and 123 should be equal") -// +// +// a.Equal(123, 123) +// // Returns whether the assertion was successful (true) or not (false). +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { return Equal(a.t, expected, actual, msgAndArgs...) } - // EqualError asserts that a function returned an error (i.e. not `nil`) // and that it is equal to the provided error. -// +// // actualObj, err := SomeFunction() -// if assert.Error(t, err, "An error was expected") { -// assert.Equal(t, err, expectedError) -// } -// +// a.EqualError(err, expectedErrorString) +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool { return EqualError(a.t, theError, errString, msgAndArgs...) } +// EqualErrorf asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) bool { + return EqualErrorf(a.t, theError, errString, msg, args...) +} // EqualValues asserts that two objects are equal or convertable to the same types // and equal. -// -// a.EqualValues(uint32(123), int32(123), "123 and 123 should be equal") -// +// +// a.EqualValues(uint32(123), int32(123)) +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { return EqualValues(a.t, expected, actual, msgAndArgs...) } +// EqualValuesf asserts that two objects are equal or convertable to the same types +// and equal. +// +// a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123)) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + return EqualValuesf(a.t, expected, actual, msg, args...) +} + +// Equalf asserts that two objects are equal. +// +// a.Equalf(123, 123, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + return Equalf(a.t, expected, actual, msg, args...) +} // Error asserts that a function returned an error (i.e. not `nil`). -// +// // actualObj, err := SomeFunction() -// if a.Error(err, "An error was expected") { -// assert.Equal(t, err, expectedError) +// if a.Error(err) { +// assert.Equal(t, expectedError, err) // } -// +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { return Error(a.t, err, msgAndArgs...) } +// Errorf asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if a.Errorf(err, "error message %s", "formatted") { +// assert.Equal(t, expectedErrorf, err) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { + return Errorf(a.t, err, msg, args...) +} -// Exactly asserts that two objects are equal is value and type. -// -// a.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal") -// +// Exactly asserts that two objects are equal in value and type. +// +// a.Exactly(int32(123), int64(123)) +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { return Exactly(a.t, expected, actual, msgAndArgs...) } +// Exactlyf asserts that two objects are equal in value and type. +// +// a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123)) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + return Exactlyf(a.t, expected, actual, msg, args...) +} // Fail reports a failure through func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool { return Fail(a.t, failureMessage, msgAndArgs...) } - // FailNow fails test func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool { return FailNow(a.t, failureMessage, msgAndArgs...) } +// FailNowf fails test +func (a *Assertions) FailNowf(failureMessage string, msg string, args ...interface{}) bool { + return FailNowf(a.t, failureMessage, msg, args...) +} + +// Failf reports a failure through +func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{}) bool { + return Failf(a.t, failureMessage, msg, args...) +} // False asserts that the specified value is false. -// -// a.False(myBool, "myBool should be false") -// +// +// a.False(myBool) +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { return False(a.t, value, msgAndArgs...) } +// Falsef asserts that the specified value is false. +// +// a.Falsef(myBool, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool { + return Falsef(a.t, value, msg, args...) +} + +// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool { + return FileExists(a.t, path, msgAndArgs...) +} + +// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) bool { + return FileExistsf(a.t, path, msg, args...) +} // HTTPBodyContains asserts that a specified handler returns a // body that contains a string. -// +// // a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") -// +// // Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool { - return HTTPBodyContains(a.t, handler, method, url, values, str) +func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { + return HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...) } +// HTTPBodyContainsf asserts that a specified handler returns a +// body that contains a string. +// +// a.HTTPBodyContainsf(myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { + return HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...) +} // HTTPBodyNotContains asserts that a specified handler returns a // body that does not contain a string. -// +// // a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") -// +// // Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool { - return HTTPBodyNotContains(a.t, handler, method, url, values, str) +func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { + return HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...) } +// HTTPBodyNotContainsf asserts that a specified handler returns a +// body that does not contain a string. +// +// a.HTTPBodyNotContainsf(myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { + return HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...) +} // HTTPError asserts that a specified handler returns an error status code. -// +// // a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// +// // Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values) bool { - return HTTPError(a.t, handler, method, url, values) +func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { + return HTTPError(a.t, handler, method, url, values, msgAndArgs...) } +// HTTPErrorf asserts that a specified handler returns an error status code. +// +// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + return HTTPErrorf(a.t, handler, method, url, values, msg, args...) +} // HTTPRedirect asserts that a specified handler returns a redirect status code. -// +// // a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// +// // Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values) bool { - return HTTPRedirect(a.t, handler, method, url, values) +func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { + return HTTPRedirect(a.t, handler, method, url, values, msgAndArgs...) } +// HTTPRedirectf asserts that a specified handler returns a redirect status code. +// +// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + return HTTPRedirectf(a.t, handler, method, url, values, msg, args...) +} // HTTPSuccess asserts that a specified handler returns a success status code. -// +// // a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) -// +// // Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values) bool { - return HTTPSuccess(a.t, handler, method, url, values) +func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { + return HTTPSuccess(a.t, handler, method, url, values, msgAndArgs...) } +// HTTPSuccessf asserts that a specified handler returns a success status code. +// +// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + return HTTPSuccessf(a.t, handler, method, url, values, msg, args...) +} // Implements asserts that an object is implemented by the specified interface. -// -// a.Implements((*MyInterface)(nil), new(MyObject), "MyObject") +// +// a.Implements((*MyInterface)(nil), new(MyObject)) func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { return Implements(a.t, interfaceObject, object, msgAndArgs...) } +// Implementsf asserts that an object is implemented by the specified interface. +// +// a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) +func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + return Implementsf(a.t, interfaceObject, object, msg, args...) +} // InDelta asserts that the two numerals are within delta of each other. -// +// // a.InDelta(math.Pi, (22 / 7.0), 0.01) -// +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { return InDelta(a.t, expected, actual, delta, msgAndArgs...) } +// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func (a *Assertions) InDeltaMapValues(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + return InDeltaMapValues(a.t, expected, actual, delta, msgAndArgs...) +} + +// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func (a *Assertions) InDeltaMapValuesf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + return InDeltaMapValuesf(a.t, expected, actual, delta, msg, args...) +} // InDeltaSlice is the same as InDelta, except it compares two slices. func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...) } +// InDeltaSlicef is the same as InDelta, except it compares two slices. +func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + return InDeltaSlicef(a.t, expected, actual, delta, msg, args...) +} + +// InDeltaf asserts that the two numerals are within delta of each other. +// +// a.InDeltaf(math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + return InDeltaf(a.t, expected, actual, delta, msg, args...) +} // InEpsilon asserts that expected and actual have a relative error less than epsilon -// +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) } +// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. +func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + return InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...) +} -// InEpsilonSlice is the same as InEpsilon, except it compares two slices. -func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - return InEpsilonSlice(a.t, expected, actual, delta, msgAndArgs...) +// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. +func (a *Assertions) InEpsilonSlicef(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { + return InEpsilonSlicef(a.t, expected, actual, epsilon, msg, args...) } +// InEpsilonf asserts that expected and actual have a relative error less than epsilon +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { + return InEpsilonf(a.t, expected, actual, epsilon, msg, args...) +} // IsType asserts that the specified objects are of the same type. func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { return IsType(a.t, expectedType, object, msgAndArgs...) } +// IsTypef asserts that the specified objects are of the same type. +func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { + return IsTypef(a.t, expectedType, object, msg, args...) +} // JSONEq asserts that two JSON strings are equivalent. -// +// // a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) -// +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool { return JSONEq(a.t, expected, actual, msgAndArgs...) } +// JSONEqf asserts that two JSON strings are equivalent. +// +// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) bool { + return JSONEqf(a.t, expected, actual, msg, args...) +} // Len asserts that the specified object has specific length. // Len also fails if the object has a type that len() not accept. -// -// a.Len(mySlice, 3, "The size of slice is not 3") -// +// +// a.Len(mySlice, 3) +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool { return Len(a.t, object, length, msgAndArgs...) } +// Lenf asserts that the specified object has specific length. +// Lenf also fails if the object has a type that len() not accept. +// +// a.Lenf(mySlice, 3, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) bool { + return Lenf(a.t, object, length, msg, args...) +} // Nil asserts that the specified object is nil. -// -// a.Nil(err, "err should be nothing") -// +// +// a.Nil(err) +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { return Nil(a.t, object, msgAndArgs...) } +// Nilf asserts that the specified object is nil. +// +// a.Nilf(err, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) bool { + return Nilf(a.t, object, msg, args...) +} // NoError asserts that a function returned no error (i.e. `nil`). -// +// // actualObj, err := SomeFunction() // if a.NoError(err) { -// assert.Equal(t, actualObj, expectedObj) +// assert.Equal(t, expectedObj, actualObj) // } -// +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool { return NoError(a.t, err, msgAndArgs...) } +// NoErrorf asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if a.NoErrorf(err, "error message %s", "formatted") { +// assert.Equal(t, expectedObj, actualObj) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool { + return NoErrorf(a.t, err, msg, args...) +} // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. -// -// a.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") -// a.NotContains(["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'") -// a.NotContains({"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'") -// +// +// a.NotContains("Hello World", "Earth") +// a.NotContains(["Hello", "World"], "Earth") +// a.NotContains({"Hello": "World"}, "Earth") +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { return NotContains(a.t, s, contains, msgAndArgs...) } +// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted") +// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted") +// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { + return NotContainsf(a.t, s, contains, msg, args...) +} // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. -// +// // if a.NotEmpty(obj) { // assert.Equal(t, "two", obj[1]) // } -// +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool { return NotEmpty(a.t, object, msgAndArgs...) } +// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if a.NotEmptyf(obj, "error message %s", "formatted") { +// assert.Equal(t, "two", obj[1]) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) bool { + return NotEmptyf(a.t, object, msg, args...) +} // NotEqual asserts that the specified values are NOT equal. -// -// a.NotEqual(obj1, obj2, "two objects shouldn't be equal") -// +// +// a.NotEqual(obj1, obj2) +// // Returns whether the assertion was successful (true) or not (false). +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { return NotEqual(a.t, expected, actual, msgAndArgs...) } +// NotEqualf asserts that the specified values are NOT equal. +// +// a.NotEqualf(obj1, obj2, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + return NotEqualf(a.t, expected, actual, msg, args...) +} // NotNil asserts that the specified object is not nil. -// -// a.NotNil(err, "err should be something") -// +// +// a.NotNil(err) +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool { return NotNil(a.t, object, msgAndArgs...) } +// NotNilf asserts that the specified object is not nil. +// +// a.NotNilf(err, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) bool { + return NotNilf(a.t, object, msg, args...) +} // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// a.NotPanics(func(){ -// RemainCalm() -// }, "Calling RemainCalm() should NOT panic") -// +// +// a.NotPanics(func(){ RemainCalm() }) +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool { return NotPanics(a.t, f, msgAndArgs...) } +// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{}) bool { + return NotPanicsf(a.t, f, msg, args...) +} // NotRegexp asserts that a specified regexp does not match a string. -// +// // a.NotRegexp(regexp.MustCompile("starts"), "it's starting") // a.NotRegexp("^start", "it's not starting") -// +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { return NotRegexp(a.t, rx, str, msgAndArgs...) } +// NotRegexpf asserts that a specified regexp does not match a string. +// +// a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") +// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { + return NotRegexpf(a.t, rx, str, msg, args...) +} + +// NotSubset asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { + return NotSubset(a.t, list, subset, msgAndArgs...) +} + +// NotSubsetf asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { + return NotSubsetf(a.t, list, subset, msg, args...) +} // NotZero asserts that i is not the zero value for its type and returns the truth. func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool { return NotZero(a.t, i, msgAndArgs...) } +// NotZerof asserts that i is not the zero value for its type and returns the truth. +func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) bool { + return NotZerof(a.t, i, msg, args...) +} // Panics asserts that the code inside the specified PanicTestFunc panics. -// -// a.Panics(func(){ -// GoCrazy() -// }, "Calling GoCrazy() should panic") -// +// +// a.Panics(func(){ GoCrazy() }) +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { return Panics(a.t, f, msgAndArgs...) } +// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// a.PanicsWithValue("crazy error", func(){ GoCrazy() }) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { + return PanicsWithValue(a.t, expected, f, msgAndArgs...) +} + +// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { + return PanicsWithValuef(a.t, expected, f, msg, args...) +} + +// Panicsf asserts that the code inside the specified PanicTestFunc panics. +// +// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) bool { + return Panicsf(a.t, f, msg, args...) +} // Regexp asserts that a specified regexp matches a string. -// +// // a.Regexp(regexp.MustCompile("start"), "it's starting") // a.Regexp("start...$", "it's not starting") -// +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { return Regexp(a.t, rx, str, msgAndArgs...) } +// Regexpf asserts that a specified regexp matches a string. +// +// a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") +// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { + return Regexpf(a.t, rx, str, msg, args...) +} + +// Subset asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { + return Subset(a.t, list, subset, msgAndArgs...) +} + +// Subsetf asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { + return Subsetf(a.t, list, subset, msg, args...) +} // True asserts that the specified value is true. -// -// a.True(myBool, "myBool should be true") -// +// +// a.True(myBool) +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { return True(a.t, value, msgAndArgs...) } +// Truef asserts that the specified value is true. +// +// a.Truef(myBool, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool { + return Truef(a.t, value, msg, args...) +} // WithinDuration asserts that the two times are within duration delta of each other. -// -// a.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") -// +// +// a.WithinDuration(time.Now(), time.Now(), 10*time.Second) +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { return WithinDuration(a.t, expected, actual, delta, msgAndArgs...) } +// WithinDurationf asserts that the two times are within duration delta of each other. +// +// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { + return WithinDurationf(a.t, expected, actual, delta, msg, args...) +} // Zero asserts that i is the zero value for its type and returns the truth. func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool { return Zero(a.t, i, msgAndArgs...) } + +// Zerof asserts that i is the zero value for its type and returns the truth. +func (a *Assertions) Zerof(i interface{}, msg string, args ...interface{}) bool { + return Zerof(a.t, i, msg, args...) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/assert/assertions.go index d7c16c59..9d387bc7 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/assert/assertions.go @@ -4,8 +4,10 @@ import ( "bufio" "bytes" "encoding/json" + "errors" "fmt" "math" + "os" "reflect" "regexp" "runtime" @@ -18,6 +20,8 @@ import ( "github.com/pmezard/go-difflib/difflib" ) +//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_format.go.tmpl + // TestingT is an interface wrapper around *testing.T type TestingT interface { Errorf(format string, args ...interface{}) @@ -38,7 +42,15 @@ func ObjectsAreEqual(expected, actual interface{}) bool { if expected == nil || actual == nil { return expected == actual } - + if exp, ok := expected.([]byte); ok { + act, ok := actual.([]byte) + if !ok { + return false + } else if exp == nil || act == nil { + return exp == nil && act == nil + } + return bytes.Equal(exp, act) + } return reflect.DeepEqual(expected, actual) } @@ -65,7 +77,7 @@ func ObjectsAreEqualValues(expected, actual interface{}) bool { /* CallerInfo is necessary because the assert functions use the testing object internally, causing it to print the file:line of the assert method, rather than where -the problem actually occured in calling code.*/ +the problem actually occurred in calling code.*/ // CallerInfo returns an array of strings containing the file and line number // of each stack frame leading from the current test to the assert call that @@ -82,7 +94,9 @@ func CallerInfo() []string { for i := 0; ; i++ { pc, file, line, ok = runtime.Caller(i) if !ok { - return nil + // The breaks below failed to terminate the loop, and we ran off the + // end of the call stack. + break } // This is a huge edge case, but it will panic if this is the case, see #180 @@ -90,18 +104,30 @@ func CallerInfo() []string { break } - parts := strings.Split(file, "/") - dir := parts[len(parts)-2] - file = parts[len(parts)-1] - if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { - callers = append(callers, fmt.Sprintf("%s:%d", file, line)) - } - f := runtime.FuncForPC(pc) if f == nil { break } name = f.Name() + + // testing.tRunner is the standard library function that calls + // tests. Subtests are called directly by tRunner, without going through + // the Test/Benchmark/Example function that contains the t.Run calls, so + // with subtests we should break when we hit tRunner, without adding it + // to the list of callers. + if name == "testing.tRunner" { + break + } + + parts := strings.Split(file, "/") + file = parts[len(parts)-1] + if len(parts) > 1 { + dir := parts[len(parts)-2] + if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { + callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + } + } + // Drop the package segments := strings.Split(name, ".") name = segments[len(segments)-1] @@ -141,7 +167,7 @@ func getWhitespaceString() string { parts := strings.Split(file, "/") file = parts[len(parts)-1] - return strings.Repeat(" ", len(fmt.Sprintf("%s:%d: ", file, line))) + return strings.Repeat(" ", len(fmt.Sprintf("%s:%d: ", file, line))) } @@ -158,22 +184,18 @@ func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { return "" } -// Indents all lines of the message by appending a number of tabs to each line, in an output format compatible with Go's -// test printing (see inner comment for specifics) -func indentMessageLines(message string, tabs int) string { +// Aligns the provided message so that all lines after the first line start at the same location as the first line. +// Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab). +// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the +// basis on which the alignment occurs). +func indentMessageLines(message string, longestLabelLen int) string { outBuf := new(bytes.Buffer) for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ { + // no need to align first line because it starts at the correct location (after the label) if i != 0 { - outBuf.WriteRune('\n') - } - for ii := 0; ii < tabs; ii++ { - outBuf.WriteRune('\t') - // Bizarrely, all lines except the first need one fewer tabs prepended, so deliberately advance the counter - // by 1 prematurely. - if ii == 0 && i > 0 { - ii++ - } + // append alignLen+1 spaces to align with "{{longestLabel}}:" before adding tab + outBuf.WriteString("\n\r\t" + strings.Repeat(" ", longestLabelLen+1) + "\t") } outBuf.WriteString(scanner.Text()) } @@ -205,42 +227,70 @@ func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool // Fail reports a failure through func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { + content := []labeledContent{ + {"Error Trace", strings.Join(CallerInfo(), "\n\r\t\t\t")}, + {"Error", failureMessage}, + } - message := messageFromMsgAndArgs(msgAndArgs...) + // Add test name if the Go version supports it + if n, ok := t.(interface { + Name() string + }); ok { + content = append(content, labeledContent{"Test", n.Name()}) + } - errorTrace := strings.Join(CallerInfo(), "\n\r\t\t\t") + message := messageFromMsgAndArgs(msgAndArgs...) if len(message) > 0 { - t.Errorf("\r%s\r\tError Trace:\t%s\n"+ - "\r\tError:%s\n"+ - "\r\tMessages:\t%s\n\r", - getWhitespaceString(), - errorTrace, - indentMessageLines(failureMessage, 2), - message) - } else { - t.Errorf("\r%s\r\tError Trace:\t%s\n"+ - "\r\tError:%s\n\r", - getWhitespaceString(), - errorTrace, - indentMessageLines(failureMessage, 2)) + content = append(content, labeledContent{"Messages", message}) } + t.Errorf("%s", "\r"+getWhitespaceString()+labeledOutput(content...)) + return false } +type labeledContent struct { + label string + content string +} + +// labeledOutput returns a string consisting of the provided labeledContent. Each labeled output is appended in the following manner: +// +// \r\t{{label}}:{{align_spaces}}\t{{content}}\n +// +// The initial carriage return is required to undo/erase any padding added by testing.T.Errorf. The "\t{{label}}:" is for the label. +// If a label is shorter than the longest label provided, padding spaces are added to make all the labels match in length. Once this +// alignment is achieved, "\t{{content}}\n" is added for the output. +// +// If the content of the labeledOutput contains line breaks, the subsequent lines are aligned so that they start at the same location as the first line. +func labeledOutput(content ...labeledContent) string { + longestLabel := 0 + for _, v := range content { + if len(v.label) > longestLabel { + longestLabel = len(v.label) + } + } + var output string + for _, v := range content { + output += "\r\t" + v.label + ":" + strings.Repeat(" ", longestLabel-len(v.label)) + "\t" + indentMessageLines(v.content, longestLabel) + "\n" + } + return output +} + // Implements asserts that an object is implemented by the specified interface. // -// assert.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject") +// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { - interfaceType := reflect.TypeOf(interfaceObject).Elem() + if object == nil { + return Fail(t, fmt.Sprintf("Cannot check if nil implements %v", interfaceType), msgAndArgs...) + } if !reflect.TypeOf(object).Implements(interfaceType) { return Fail(t, fmt.Sprintf("%T must implement %v", object, interfaceType), msgAndArgs...) } return true - } // IsType asserts that the specified objects are of the same type. @@ -255,41 +305,70 @@ func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs // Equal asserts that two objects are equal. // -// assert.Equal(t, 123, 123, "123 and 123 should be equal") +// assert.Equal(t, 123, 123) // // Returns whether the assertion was successful (true) or not (false). +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if err := validateEqualArgs(expected, actual); err != nil { + return Fail(t, fmt.Sprintf("Invalid operation: %#v == %#v (%s)", + expected, actual, err), msgAndArgs...) + } if !ObjectsAreEqual(expected, actual) { diff := diff(expected, actual) - return Fail(t, fmt.Sprintf("Not equal: %#v (expected)\n"+ - " != %#v (actual)%s", expected, actual, diff), msgAndArgs...) + expected, actual = formatUnequalValues(expected, actual) + return Fail(t, fmt.Sprintf("Not equal: \n"+ + "expected: %s\n"+ + "actual : %s%s", expected, actual, diff), msgAndArgs...) } return true } +// formatUnequalValues takes two values of arbitrary types and returns string +// representations appropriate to be presented to the user. +// +// If the values are not of like type, the returned strings will be prefixed +// with the type name, and the value will be enclosed in parenthesis similar +// to a type conversion in the Go grammar. +func formatUnequalValues(expected, actual interface{}) (e string, a string) { + if reflect.TypeOf(expected) != reflect.TypeOf(actual) { + return fmt.Sprintf("%T(%#v)", expected, expected), + fmt.Sprintf("%T(%#v)", actual, actual) + } + + return fmt.Sprintf("%#v", expected), + fmt.Sprintf("%#v", actual) +} + // EqualValues asserts that two objects are equal or convertable to the same types // and equal. // -// assert.EqualValues(t, uint32(123), int32(123), "123 and 123 should be equal") +// assert.EqualValues(t, uint32(123), int32(123)) // // Returns whether the assertion was successful (true) or not (false). func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { if !ObjectsAreEqualValues(expected, actual) { - return Fail(t, fmt.Sprintf("Not equal: %#v (expected)\n"+ - " != %#v (actual)", expected, actual), msgAndArgs...) + diff := diff(expected, actual) + expected, actual = formatUnequalValues(expected, actual) + return Fail(t, fmt.Sprintf("Not equal: \n"+ + "expected: %s\n"+ + "actual : %s%s", expected, actual, diff), msgAndArgs...) } return true } -// Exactly asserts that two objects are equal is value and type. +// Exactly asserts that two objects are equal in value and type. // -// assert.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal") +// assert.Exactly(t, int32(123), int64(123)) // // Returns whether the assertion was successful (true) or not (false). func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { @@ -307,7 +386,7 @@ func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} // NotNil asserts that the specified object is not nil. // -// assert.NotNil(t, err, "err should be something") +// assert.NotNil(t, err) // // Returns whether the assertion was successful (true) or not (false). func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { @@ -334,7 +413,7 @@ func isNil(object interface{}) bool { // Nil asserts that the specified object is nil. // -// assert.Nil(t, err, "err should be nothing") +// assert.Nil(t, err) // // Returns whether the assertion was successful (true) or not (false). func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { @@ -344,66 +423,32 @@ func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...) } -var numericZeros = []interface{}{ - int(0), - int8(0), - int16(0), - int32(0), - int64(0), - uint(0), - uint8(0), - uint16(0), - uint32(0), - uint64(0), - float32(0), - float64(0), -} - // isEmpty gets whether the specified object is considered empty or not. func isEmpty(object interface{}) bool { + // get nil case out of the way if object == nil { return true - } else if object == "" { - return true - } else if object == false { - return true - } - - for _, v := range numericZeros { - if object == v { - return true - } } objValue := reflect.ValueOf(object) switch objValue.Kind() { - case reflect.Map: - fallthrough - case reflect.Slice, reflect.Chan: - { - return (objValue.Len() == 0) - } - case reflect.Struct: - switch object.(type) { - case time.Time: - return object.(time.Time).IsZero() - } + // collection types are empty when they have no element + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + return objValue.Len() == 0 + // pointers are empty if nil or if the value they point to is empty case reflect.Ptr: - { - if objValue.IsNil() { - return true - } - switch object.(type) { - case *time.Time: - return object.(*time.Time).IsZero() - default: - return false - } + if objValue.IsNil() { + return true } + deref := objValue.Elem().Interface() + return isEmpty(deref) + // for all other types, compare against the zero value + default: + zero := reflect.Zero(objValue.Type()) + return reflect.DeepEqual(object, zero.Interface()) } - return false } // Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either @@ -457,7 +502,7 @@ func getLen(x interface{}) (ok bool, length int) { // Len asserts that the specified object has specific length. // Len also fails if the object has a type that len() not accept. // -// assert.Len(t, mySlice, 3, "The size of slice is not 3") +// assert.Len(t, mySlice, 3) // // Returns whether the assertion was successful (true) or not (false). func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool { @@ -474,7 +519,7 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) // True asserts that the specified value is true. // -// assert.True(t, myBool, "myBool should be true") +// assert.True(t, myBool) // // Returns whether the assertion was successful (true) or not (false). func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { @@ -489,7 +534,7 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { // False asserts that the specified value is false. // -// assert.False(t, myBool, "myBool should be false") +// assert.False(t, myBool) // // Returns whether the assertion was successful (true) or not (false). func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { @@ -504,10 +549,17 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { // NotEqual asserts that the specified values are NOT equal. // -// assert.NotEqual(t, obj1, obj2, "two objects shouldn't be equal") +// assert.NotEqual(t, obj1, obj2) // // Returns whether the assertion was successful (true) or not (false). +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if err := validateEqualArgs(expected, actual); err != nil { + return Fail(t, fmt.Sprintf("Invalid operation: %#v != %#v (%s)", + expected, actual, err), msgAndArgs...) + } if ObjectsAreEqual(expected, actual) { return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...) @@ -558,9 +610,9 @@ func includeElement(list interface{}, element interface{}) (ok, found bool) { // Contains asserts that the specified string, list(array, slice...) or map contains the // specified substring or element. // -// assert.Contains(t, "Hello World", "World", "But 'Hello World' does contain 'World'") -// assert.Contains(t, ["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'") -// assert.Contains(t, {"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'") +// assert.Contains(t, "Hello World", "World") +// assert.Contains(t, ["Hello", "World"], "World") +// assert.Contains(t, {"Hello": "World"}, "Hello") // // Returns whether the assertion was successful (true) or not (false). func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { @@ -580,9 +632,9 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // -// assert.NotContains(t, "Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") -// assert.NotContains(t, ["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'") -// assert.NotContains(t, {"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'") +// assert.NotContains(t, "Hello World", "Earth") +// assert.NotContains(t, ["Hello", "World"], "Earth") +// assert.NotContains(t, {"Hello": "World"}, "Earth") // // Returns whether the assertion was successful (true) or not (false). func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { @@ -599,6 +651,148 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) } +// Subset asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// +// Returns whether the assertion was successful (true) or not (false). +func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { + if subset == nil { + return true // we consider nil to be equal to the nil set + } + + subsetValue := reflect.ValueOf(subset) + defer func() { + if e := recover(); e != nil { + ok = false + } + }() + + listKind := reflect.TypeOf(list).Kind() + subsetKind := reflect.TypeOf(subset).Kind() + + if listKind != reflect.Array && listKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) + } + + if subsetKind != reflect.Array && subsetKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) + } + + for i := 0; i < subsetValue.Len(); i++ { + element := subsetValue.Index(i).Interface() + ok, found := includeElement(list, element) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) + } + if !found { + return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...) + } + } + + return true +} + +// NotSubset asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// +// Returns whether the assertion was successful (true) or not (false). +func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { + if subset == nil { + return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...) + } + + subsetValue := reflect.ValueOf(subset) + defer func() { + if e := recover(); e != nil { + ok = false + } + }() + + listKind := reflect.TypeOf(list).Kind() + subsetKind := reflect.TypeOf(subset).Kind() + + if listKind != reflect.Array && listKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) + } + + if subsetKind != reflect.Array && subsetKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) + } + + for i := 0; i < subsetValue.Len(); i++ { + element := subsetValue.Index(i).Interface() + ok, found := includeElement(list, element) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) + } + if !found { + return true + } + } + + return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...) +} + +// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2])) +// +// Returns whether the assertion was successful (true) or not (false). +func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) { + if isEmpty(listA) && isEmpty(listB) { + return true + } + + aKind := reflect.TypeOf(listA).Kind() + bKind := reflect.TypeOf(listB).Kind() + + if aKind != reflect.Array && aKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listA, aKind), msgAndArgs...) + } + + if bKind != reflect.Array && bKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listB, bKind), msgAndArgs...) + } + + aValue := reflect.ValueOf(listA) + bValue := reflect.ValueOf(listB) + + aLen := aValue.Len() + bLen := bValue.Len() + + if aLen != bLen { + return Fail(t, fmt.Sprintf("lengths don't match: %d != %d", aLen, bLen), msgAndArgs...) + } + + // Mark indexes in bValue that we already used + visited := make([]bool, bLen) + for i := 0; i < aLen; i++ { + element := aValue.Index(i).Interface() + found := false + for j := 0; j < bLen; j++ { + if visited[j] { + continue + } + if ObjectsAreEqual(bValue.Index(j).Interface(), element) { + visited[j] = true + found = true + break + } + } + if !found { + return Fail(t, fmt.Sprintf("element %s appears more times in %s than in %s", element, aValue, bValue), msgAndArgs...) + } + } + + return true +} + // Condition uses a Comparison to assert a complex condition. func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { result := comp() @@ -636,9 +830,7 @@ func didPanic(f PanicTestFunc) (bool, interface{}) { // Panics asserts that the code inside the specified PanicTestFunc panics. // -// assert.Panics(t, func(){ -// GoCrazy() -// }, "Calling GoCrazy() should panic") +// assert.Panics(t, func(){ GoCrazy() }) // // Returns whether the assertion was successful (true) or not (false). func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { @@ -650,11 +842,28 @@ func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { return true } +// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) +// +// Returns whether the assertion was successful (true) or not (false). +func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { + + funcDidPanic, panicValue := didPanic(f) + if !funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...) + } + if panicValue != expected { + return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%v\n\r\tPanic value:\t%v", f, expected, panicValue), msgAndArgs...) + } + + return true +} + // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. // -// assert.NotPanics(t, func(){ -// RemainCalm() -// }, "Calling RemainCalm() should NOT panic") +// assert.NotPanics(t, func(){ RemainCalm() }) // // Returns whether the assertion was successful (true) or not (false). func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { @@ -668,7 +877,7 @@ func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { // WithinDuration asserts that the two times are within duration delta of each other. // -// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") +// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) // // Returns whether the assertion was successful (true) or not (false). func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { @@ -708,6 +917,8 @@ func toFloat(x interface{}) (float64, bool) { xf = float64(xn) case float64: xf = float64(xn) + case time.Duration: + xf = float64(xn) default: xok = false } @@ -730,7 +941,7 @@ func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs } if math.IsNaN(af) { - return Fail(t, fmt.Sprintf("Actual must not be NaN"), msgAndArgs...) + return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...) } if math.IsNaN(bf) { @@ -757,7 +968,7 @@ func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAn expectedSlice := reflect.ValueOf(expected) for i := 0; i < actualSlice.Len(); i++ { - result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta) + result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta, msgAndArgs...) if !result { return result } @@ -766,6 +977,47 @@ func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAn return true } +// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if expected == nil || actual == nil || + reflect.TypeOf(actual).Kind() != reflect.Map || + reflect.TypeOf(expected).Kind() != reflect.Map { + return Fail(t, "Arguments must be maps", msgAndArgs...) + } + + expectedMap := reflect.ValueOf(expected) + actualMap := reflect.ValueOf(actual) + + if expectedMap.Len() != actualMap.Len() { + return Fail(t, "Arguments must have the same numbe of keys", msgAndArgs...) + } + + for _, k := range expectedMap.MapKeys() { + ev := expectedMap.MapIndex(k) + av := actualMap.MapIndex(k) + + if !ev.IsValid() { + return Fail(t, fmt.Sprintf("missing key %q in expected map", k), msgAndArgs...) + } + + if !av.IsValid() { + return Fail(t, fmt.Sprintf("missing key %q in actual map", k), msgAndArgs...) + } + + if !InDelta( + t, + ev.Interface(), + av.Interface(), + delta, + msgAndArgs..., + ) { + return false + } + } + + return true +} + func calcRelativeError(expected, actual interface{}) (float64, error) { af, aok := toFloat(expected) if !aok { @@ -776,7 +1028,7 @@ func calcRelativeError(expected, actual interface{}) (float64, error) { } bf, bok := toFloat(actual) if !bok { - return 0, fmt.Errorf("expected value %q cannot be converted to float", actual) + return 0, fmt.Errorf("actual value %q cannot be converted to float", actual) } return math.Abs(af-bf) / math.Abs(af), nil @@ -792,7 +1044,7 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd } if actualEpsilon > epsilon { return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+ - " < %#v (actual)", actualEpsilon, epsilon), msgAndArgs...) + " < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...) } return true @@ -827,51 +1079,55 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m // // actualObj, err := SomeFunction() // if assert.NoError(t, err) { -// assert.Equal(t, actualObj, expectedObj) +// assert.Equal(t, expectedObj, actualObj) // } // // Returns whether the assertion was successful (true) or not (false). func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { - if isNil(err) { - return true + if err != nil { + return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...) } - return Fail(t, fmt.Sprintf("Received unexpected error %q", err), msgAndArgs...) + return true } // Error asserts that a function returned an error (i.e. not `nil`). // // actualObj, err := SomeFunction() -// if assert.Error(t, err, "An error was expected") { -// assert.Equal(t, err, expectedError) +// if assert.Error(t, err) { +// assert.Equal(t, expectedError, err) // } // // Returns whether the assertion was successful (true) or not (false). func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { - message := messageFromMsgAndArgs(msgAndArgs...) - return NotNil(t, err, "An error is expected but got nil. %s", message) + if err == nil { + return Fail(t, "An error is expected but got nil.", msgAndArgs...) + } + return true } // EqualError asserts that a function returned an error (i.e. not `nil`) // and that it is equal to the provided error. // // actualObj, err := SomeFunction() -// if assert.Error(t, err, "An error was expected") { -// assert.Equal(t, err, expectedError) -// } +// assert.EqualError(t, err, expectedErrorString) // // Returns whether the assertion was successful (true) or not (false). func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { - - message := messageFromMsgAndArgs(msgAndArgs...) - if !NotNil(t, theError, "An error is expected but got nil. %s", message) { + if !Error(t, theError, msgAndArgs...) { return false } - s := "An error with value \"%s\" is expected but got \"%s\". %s" - return Equal(t, errString, theError.Error(), - s, errString, theError.Error(), message) + expected := errString + actual := theError.Error() + // don't need to use deep equals here, we know they are both strings + if expected != actual { + return Fail(t, fmt.Sprintf("Error message not equal:\n"+ + "expected: %q\n"+ + "actual : %q", expected, actual), msgAndArgs...) + } + return true } // matchRegexp return true if a specified regexp matches a string. @@ -938,6 +1194,36 @@ func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { return true } +// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool { + info, err := os.Lstat(path) + if err != nil { + if os.IsNotExist(err) { + return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...) + } + return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...) + } + if info.IsDir() { + return Fail(t, fmt.Sprintf("%q is a directory", path), msgAndArgs...) + } + return true +} + +// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool { + info, err := os.Lstat(path) + if err != nil { + if os.IsNotExist(err) { + return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...) + } + return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...) + } + if !info.IsDir() { + return Fail(t, fmt.Sprintf("%q is a file", path), msgAndArgs...) + } + return true +} + // JSONEq asserts that two JSON strings are equivalent. // // assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) @@ -986,9 +1272,8 @@ func diff(expected interface{}, actual interface{}) string { return "" } - spew.Config.SortKeys = true - e := spew.Sdump(expected) - a := spew.Sdump(actual) + e := spewConfig.Sdump(expected) + a := spewConfig.Sdump(actual) diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ A: difflib.SplitLines(e), @@ -1002,3 +1287,26 @@ func diff(expected interface{}, actual interface{}) string { return "\n\nDiff:\n" + diff } + +// validateEqualArgs checks whether provided arguments can be safely used in the +// Equal/NotEqual functions. +func validateEqualArgs(expected, actual interface{}) error { + if isFunction(expected) || isFunction(actual) { + return errors.New("cannot take func type as argument") + } + return nil +} + +func isFunction(arg interface{}) bool { + if arg == nil { + return false + } + return reflect.TypeOf(arg).Kind() == reflect.Func +} + +var spewConfig = spew.ConfigState{ + Indent: " ", + DisablePointerAddresses: true, + DisableCapacities: true, + SortKeys: true, +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/assert/forward_assertions.go index b867e95e..9ad56851 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/assert/forward_assertions.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/assert/forward_assertions.go @@ -13,4 +13,4 @@ func New(t TestingT) *Assertions { } } -//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl +//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/assert/http_assertions.go index e1b9442b..3101e78d 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -8,16 +8,16 @@ import ( "strings" ) -// httpCode is a helper that returns HTTP code of the response. It returns -1 -// if building a new request fails. -func httpCode(handler http.HandlerFunc, method, url string, values url.Values) int { +// httpCode is a helper that returns HTTP code of the response. It returns -1 and +// an error if building a new request fails. +func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) { w := httptest.NewRecorder() req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) if err != nil { - return -1 + return -1, err } handler(w, req) - return w.Code + return w.Code, nil } // HTTPSuccess asserts that a specified handler returns a success status code. @@ -25,12 +25,19 @@ func httpCode(handler http.HandlerFunc, method, url string, values url.Values) i // assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) // // Returns whether the assertion was successful (true) or not (false). -func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool { - code := httpCode(handler, method, url, values) - if code == -1 { +func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { + code, err := httpCode(handler, method, url, values) + if err != nil { + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) return false } - return code >= http.StatusOK && code <= http.StatusPartialContent + + isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent + if !isSuccessCode { + Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code)) + } + + return isSuccessCode } // HTTPRedirect asserts that a specified handler returns a redirect status code. @@ -38,12 +45,19 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, value // assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). -func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool { - code := httpCode(handler, method, url, values) - if code == -1 { +func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { + code, err := httpCode(handler, method, url, values) + if err != nil { + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) return false } - return code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect + + isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect + if !isRedirectCode { + Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code)) + } + + return isRedirectCode } // HTTPError asserts that a specified handler returns an error status code. @@ -51,12 +65,19 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, valu // assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). -func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool { - code := httpCode(handler, method, url, values) - if code == -1 { +func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { + code, err := httpCode(handler, method, url, values) + if err != nil { + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) return false } - return code >= http.StatusBadRequest + + isErrorCode := code >= http.StatusBadRequest + if !isErrorCode { + Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code)) + } + + return isErrorCode } // HTTPBody is a helper that returns HTTP body of the response. It returns @@ -77,7 +98,7 @@ func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) s // assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). -func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}) bool { +func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { body := HTTPBody(handler, method, url, values) contains := strings.Contains(body, fmt.Sprint(str)) @@ -94,12 +115,12 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, // assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). -func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}) bool { +func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { body := HTTPBody(handler, method, url, values) contains := strings.Contains(body, fmt.Sprint(str)) if contains { - Fail(t, "Expected response body for %s to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body) + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) } return !contains diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/doc.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/doc.go deleted file mode 100644 index 377d5cc5..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -// Package testify is a set of packages that provide many tools for testifying that your code will behave as you intend. -// -// testify contains the following packages: -// -// The assert package provides a comprehensive set of assertion functions that tie in to the Go testing system. -// -// The http package contains tools to make it easier to test http activity using the Go testing system. -// -// The mock package provides a system by which it is possible to mock your objects and verify calls are happening as expected. -// -// The suite package provides a basic structure for using structs as testing suites, and methods on those structs as tests. It includes setup/teardown functionality in the way of interfaces. -package testify - -// blank imports help docs. -import ( - // assert package - _ "github.com/stretchr/testify/assert" - // http package - _ "github.com/stretchr/testify/http" - // mock package - _ "github.com/stretchr/testify/mock" -) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/http/doc.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/http/doc.go deleted file mode 100644 index 695167c6..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/http/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package http DEPRECATED USE net/http/httptest -package http diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/http/test_response_writer.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/http/test_response_writer.go deleted file mode 100644 index 5c3f813f..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/http/test_response_writer.go +++ /dev/null @@ -1,49 +0,0 @@ -package http - -import ( - "net/http" -) - -// TestResponseWriter DEPRECATED: We recommend you use http://golang.org/pkg/net/http/httptest instead. -type TestResponseWriter struct { - - // StatusCode is the last int written by the call to WriteHeader(int) - StatusCode int - - // Output is a string containing the written bytes using the Write([]byte) func. - Output string - - // header is the internal storage of the http.Header object - header http.Header -} - -// Header DEPRECATED: We recommend you use http://golang.org/pkg/net/http/httptest instead. -func (rw *TestResponseWriter) Header() http.Header { - - if rw.header == nil { - rw.header = make(http.Header) - } - - return rw.header -} - -// Write DEPRECATED: We recommend you use http://golang.org/pkg/net/http/httptest instead. -func (rw *TestResponseWriter) Write(bytes []byte) (int, error) { - - // assume 200 success if no header has been set - if rw.StatusCode == 0 { - rw.WriteHeader(200) - } - - // add these bytes to the output string - rw.Output = rw.Output + string(bytes) - - // return normal values - return 0, nil - -} - -// WriteHeader DEPRECATED: We recommend you use http://golang.org/pkg/net/http/httptest instead. -func (rw *TestResponseWriter) WriteHeader(i int) { - rw.StatusCode = i -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/http/test_round_tripper.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/http/test_round_tripper.go deleted file mode 100644 index b1e32f1d..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/http/test_round_tripper.go +++ /dev/null @@ -1,17 +0,0 @@ -package http - -import ( - "github.com/stretchr/testify/mock" - "net/http" -) - -// TestRoundTripper DEPRECATED USE net/http/httptest -type TestRoundTripper struct { - mock.Mock -} - -// RoundTrip DEPRECATED USE net/http/httptest -func (t *TestRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - args := t.Called(req) - return args.Get(0).(*http.Response), args.Error(1) -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/mock/doc.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/mock/doc.go deleted file mode 100644 index 7324128e..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/mock/doc.go +++ /dev/null @@ -1,44 +0,0 @@ -// Package mock provides a system by which it is possible to mock your objects -// and verify calls are happening as expected. -// -// Example Usage -// -// The mock package provides an object, Mock, that tracks activity on another object. It is usually -// embedded into a test object as shown below: -// -// type MyTestObject struct { -// // add a Mock object instance -// mock.Mock -// -// // other fields go here as normal -// } -// -// When implementing the methods of an interface, you wire your functions up -// to call the Mock.Called(args...) method, and return the appropriate values. -// -// For example, to mock a method that saves the name and age of a person and returns -// the year of their birth or an error, you might write this: -// -// func (o *MyTestObject) SavePersonDetails(firstname, lastname string, age int) (int, error) { -// args := o.Called(firstname, lastname, age) -// return args.Int(0), args.Error(1) -// } -// -// The Int, Error and Bool methods are examples of strongly typed getters that take the argument -// index position. Given this argument list: -// -// (12, true, "Something") -// -// You could read them out strongly typed like this: -// -// args.Int(0) -// args.Bool(1) -// args.String(2) -// -// For objects of your own type, use the generic Arguments.Get(index) method and make a type assertion: -// -// return args.Get(0).(*MyObject), args.Get(1).(*AnotherObjectOfMine) -// -// This may cause a panic if the object you are getting is nil (the type assertion will fail), in those -// cases you should check for nil first. -package mock diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/mock/mock.go deleted file mode 100644 index 637896b4..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/mock/mock.go +++ /dev/null @@ -1,683 +0,0 @@ -package mock - -import ( - "fmt" - "reflect" - "regexp" - "runtime" - "strings" - "sync" - "time" - - "github.com/stretchr/objx" - "github.com/stretchr/testify/assert" -) - -// TestingT is an interface wrapper around *testing.T -type TestingT interface { - Logf(format string, args ...interface{}) - Errorf(format string, args ...interface{}) - FailNow() -} - -/* - Call -*/ - -// Call represents a method call and is used for setting expectations, -// as well as recording activity. -type Call struct { - Parent *Mock - - // The name of the method that was or will be called. - Method string - - // Holds the arguments of the method. - Arguments Arguments - - // Holds the arguments that should be returned when - // this method is called. - ReturnArguments Arguments - - // The number of times to return the return arguments when setting - // expectations. 0 means to always return the value. - Repeatability int - - // Holds a channel that will be used to block the Return until it either - // recieves a message or is closed. nil means it returns immediately. - WaitFor <-chan time.Time - - // Holds a handler used to manipulate arguments content that are passed by - // reference. It's useful when mocking methods such as unmarshalers or - // decoders. - RunFn func(Arguments) -} - -func newCall(parent *Mock, methodName string, methodArguments ...interface{}) *Call { - return &Call{ - Parent: parent, - Method: methodName, - Arguments: methodArguments, - ReturnArguments: make([]interface{}, 0), - Repeatability: 0, - WaitFor: nil, - RunFn: nil, - } -} - -func (c *Call) lock() { - c.Parent.mutex.Lock() -} - -func (c *Call) unlock() { - c.Parent.mutex.Unlock() -} - -// Return specifies the return arguments for the expectation. -// -// Mock.On("DoSomething").Return(errors.New("failed")) -func (c *Call) Return(returnArguments ...interface{}) *Call { - c.lock() - defer c.unlock() - - c.ReturnArguments = returnArguments - - return c -} - -// Once indicates that that the mock should only return the value once. -// -// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Once() -func (c *Call) Once() *Call { - return c.Times(1) -} - -// Twice indicates that that the mock should only return the value twice. -// -// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Twice() -func (c *Call) Twice() *Call { - return c.Times(2) -} - -// Times indicates that that the mock should only return the indicated number -// of times. -// -// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Times(5) -func (c *Call) Times(i int) *Call { - c.lock() - defer c.unlock() - c.Repeatability = i - return c -} - -// WaitUntil sets the channel that will block the mock's return until its closed -// or a message is received. -// -// Mock.On("MyMethod", arg1, arg2).WaitUntil(time.After(time.Second)) -func (c *Call) WaitUntil(w <-chan time.Time) *Call { - c.lock() - defer c.unlock() - c.WaitFor = w - return c -} - -// After sets how long to block until the call returns -// -// Mock.On("MyMethod", arg1, arg2).After(time.Second) -func (c *Call) After(d time.Duration) *Call { - return c.WaitUntil(time.After(d)) -} - -// Run sets a handler to be called before returning. It can be used when -// mocking a method such as unmarshalers that takes a pointer to a struct and -// sets properties in such struct -// -// Mock.On("Unmarshal", AnythingOfType("*map[string]interface{}").Return().Run(function(args Arguments) { -// arg := args.Get(0).(*map[string]interface{}) -// arg["foo"] = "bar" -// }) -func (c *Call) Run(fn func(Arguments)) *Call { - c.lock() - defer c.unlock() - c.RunFn = fn - return c -} - -// On chains a new expectation description onto the mocked interface. This -// allows syntax like. -// -// Mock. -// On("MyMethod", 1).Return(nil). -// On("MyOtherMethod", 'a', 'b', 'c').Return(errors.New("Some Error")) -func (c *Call) On(methodName string, arguments ...interface{}) *Call { - return c.Parent.On(methodName, arguments...) -} - -// Mock is the workhorse used to track activity on another object. -// For an example of its usage, refer to the "Example Usage" section at the top -// of this document. -type Mock struct { - // Represents the calls that are expected of - // an object. - ExpectedCalls []*Call - - // Holds the calls that were made to this mocked object. - Calls []Call - - // TestData holds any data that might be useful for testing. Testify ignores - // this data completely allowing you to do whatever you like with it. - testData objx.Map - - mutex sync.Mutex -} - -// TestData holds any data that might be useful for testing. Testify ignores -// this data completely allowing you to do whatever you like with it. -func (m *Mock) TestData() objx.Map { - - if m.testData == nil { - m.testData = make(objx.Map) - } - - return m.testData -} - -/* - Setting expectations -*/ - -// On starts a description of an expectation of the specified method -// being called. -// -// Mock.On("MyMethod", arg1, arg2) -func (m *Mock) On(methodName string, arguments ...interface{}) *Call { - for _, arg := range arguments { - if v := reflect.ValueOf(arg); v.Kind() == reflect.Func { - panic(fmt.Sprintf("cannot use Func in expectations. Use mock.AnythingOfType(\"%T\")", arg)) - } - } - - m.mutex.Lock() - defer m.mutex.Unlock() - c := newCall(m, methodName, arguments...) - m.ExpectedCalls = append(m.ExpectedCalls, c) - return c -} - -// /* -// Recording and responding to activity -// */ - -func (m *Mock) findExpectedCall(method string, arguments ...interface{}) (int, *Call) { - m.mutex.Lock() - defer m.mutex.Unlock() - for i, call := range m.ExpectedCalls { - if call.Method == method && call.Repeatability > -1 { - - _, diffCount := call.Arguments.Diff(arguments) - if diffCount == 0 { - return i, call - } - - } - } - return -1, nil -} - -func (m *Mock) findClosestCall(method string, arguments ...interface{}) (bool, *Call) { - diffCount := 0 - var closestCall *Call - - for _, call := range m.expectedCalls() { - if call.Method == method { - - _, tempDiffCount := call.Arguments.Diff(arguments) - if tempDiffCount < diffCount || diffCount == 0 { - diffCount = tempDiffCount - closestCall = call - } - - } - } - - if closestCall == nil { - return false, nil - } - - return true, closestCall -} - -func callString(method string, arguments Arguments, includeArgumentValues bool) string { - - var argValsString string - if includeArgumentValues { - var argVals []string - for argIndex, arg := range arguments { - argVals = append(argVals, fmt.Sprintf("%d: %#v", argIndex, arg)) - } - argValsString = fmt.Sprintf("\n\t\t%s", strings.Join(argVals, "\n\t\t")) - } - - return fmt.Sprintf("%s(%s)%s", method, arguments.String(), argValsString) -} - -// Called tells the mock object that a method has been called, and gets an array -// of arguments to return. Panics if the call is unexpected (i.e. not preceeded by -// appropriate .On .Return() calls) -// If Call.WaitFor is set, blocks until the channel is closed or receives a message. -func (m *Mock) Called(arguments ...interface{}) Arguments { - // get the calling function's name - pc, _, _, ok := runtime.Caller(1) - if !ok { - panic("Couldn't get the caller information") - } - functionPath := runtime.FuncForPC(pc).Name() - //Next four lines are required to use GCCGO function naming conventions. - //For Ex: github_com_docker_libkv_store_mock.WatchTree.pN39_github_com_docker_libkv_store_mock.Mock - //uses inteface information unlike golang github.com/docker/libkv/store/mock.(*Mock).WatchTree - //With GCCGO we need to remove interface information starting from pN
. - re := regexp.MustCompile("\\.pN\\d+_") - if re.MatchString(functionPath) { - functionPath = re.Split(functionPath, -1)[0] - } - parts := strings.Split(functionPath, ".") - functionName := parts[len(parts)-1] - - found, call := m.findExpectedCall(functionName, arguments...) - - if found < 0 { - // we have to fail here - because we don't know what to do - // as the return arguments. This is because: - // - // a) this is a totally unexpected call to this method, - // b) the arguments are not what was expected, or - // c) the developer has forgotten to add an accompanying On...Return pair. - - closestFound, closestCall := m.findClosestCall(functionName, arguments...) - - if closestFound { - panic(fmt.Sprintf("\n\nmock: Unexpected Method Call\n-----------------------------\n\n%s\n\nThe closest call I have is: \n\n%s\n", callString(functionName, arguments, true), callString(functionName, closestCall.Arguments, true))) - } else { - panic(fmt.Sprintf("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(\"%s\").Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", functionName, functionName, callString(functionName, arguments, true), assert.CallerInfo())) - } - } else { - m.mutex.Lock() - switch { - case call.Repeatability == 1: - call.Repeatability = -1 - - case call.Repeatability > 1: - call.Repeatability-- - } - m.mutex.Unlock() - } - - // add the call - m.mutex.Lock() - m.Calls = append(m.Calls, *newCall(m, functionName, arguments...)) - m.mutex.Unlock() - - // block if specified - if call.WaitFor != nil { - <-call.WaitFor - } - - if call.RunFn != nil { - call.RunFn(arguments) - } - - return call.ReturnArguments -} - -/* - Assertions -*/ - -// AssertExpectationsForObjects asserts that everything specified with On and Return -// of the specified objects was in fact called as expected. -// -// Calls may have occurred in any order. -func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool { - var success = true - for _, obj := range testObjects { - mockObj := obj.(Mock) - success = success && mockObj.AssertExpectations(t) - } - return success -} - -// AssertExpectations asserts that everything specified with On and Return was -// in fact called as expected. Calls may have occurred in any order. -func (m *Mock) AssertExpectations(t TestingT) bool { - var somethingMissing bool - var failedExpectations int - - // iterate through each expectation - expectedCalls := m.expectedCalls() - for _, expectedCall := range expectedCalls { - if !m.methodWasCalled(expectedCall.Method, expectedCall.Arguments) { - somethingMissing = true - failedExpectations++ - t.Logf("\u274C\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String()) - } else { - m.mutex.Lock() - if expectedCall.Repeatability > 0 { - somethingMissing = true - failedExpectations++ - } else { - t.Logf("\u2705\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String()) - } - m.mutex.Unlock() - } - } - - if somethingMissing { - t.Errorf("FAIL: %d out of %d expectation(s) were met.\n\tThe code you are testing needs to make %d more call(s).\n\tat: %s", len(expectedCalls)-failedExpectations, len(expectedCalls), failedExpectations, assert.CallerInfo()) - } - - return !somethingMissing -} - -// AssertNumberOfCalls asserts that the method was called expectedCalls times. -func (m *Mock) AssertNumberOfCalls(t TestingT, methodName string, expectedCalls int) bool { - var actualCalls int - for _, call := range m.calls() { - if call.Method == methodName { - actualCalls++ - } - } - return assert.Equal(t, expectedCalls, actualCalls, fmt.Sprintf("Expected number of calls (%d) does not match the actual number of calls (%d).", expectedCalls, actualCalls)) -} - -// AssertCalled asserts that the method was called. -func (m *Mock) AssertCalled(t TestingT, methodName string, arguments ...interface{}) bool { - if !assert.True(t, m.methodWasCalled(methodName, arguments), fmt.Sprintf("The \"%s\" method should have been called with %d argument(s), but was not.", methodName, len(arguments))) { - t.Logf("%v", m.expectedCalls()) - return false - } - return true -} - -// AssertNotCalled asserts that the method was not called. -func (m *Mock) AssertNotCalled(t TestingT, methodName string, arguments ...interface{}) bool { - if !assert.False(t, m.methodWasCalled(methodName, arguments), fmt.Sprintf("The \"%s\" method was called with %d argument(s), but should NOT have been.", methodName, len(arguments))) { - t.Logf("%v", m.expectedCalls()) - return false - } - return true -} - -func (m *Mock) methodWasCalled(methodName string, expected []interface{}) bool { - for _, call := range m.calls() { - if call.Method == methodName { - - _, differences := Arguments(expected).Diff(call.Arguments) - - if differences == 0 { - // found the expected call - return true - } - - } - } - // we didn't find the expected call - return false -} - -func (m *Mock) expectedCalls() []*Call { - m.mutex.Lock() - defer m.mutex.Unlock() - return append([]*Call{}, m.ExpectedCalls...) -} - -func (m *Mock) calls() []Call { - m.mutex.Lock() - defer m.mutex.Unlock() - return append([]Call{}, m.Calls...) -} - -/* - Arguments -*/ - -// Arguments holds an array of method arguments or return values. -type Arguments []interface{} - -const ( - // Anything is used in Diff and Assert when the argument being tested - // shouldn't be taken into consideration. - Anything string = "mock.Anything" -) - -// AnythingOfTypeArgument is a string that contains the type of an argument -// for use when type checking. Used in Diff and Assert. -type AnythingOfTypeArgument string - -// AnythingOfType returns an AnythingOfTypeArgument object containing the -// name of the type to check for. Used in Diff and Assert. -// -// For example: -// Assert(t, AnythingOfType("string"), AnythingOfType("int")) -func AnythingOfType(t string) AnythingOfTypeArgument { - return AnythingOfTypeArgument(t) -} - -// argumentMatcher performs custom argument matching, returning whether or -// not the argument is matched by the expectation fixture function. -type argumentMatcher struct { - // fn is a function which accepts one argument, and returns a bool. - fn reflect.Value -} - -func (f argumentMatcher) Matches(argument interface{}) bool { - expectType := f.fn.Type().In(0) - - if reflect.TypeOf(argument).AssignableTo(expectType) { - result := f.fn.Call([]reflect.Value{reflect.ValueOf(argument)}) - return result[0].Bool() - } - return false -} - -func (f argumentMatcher) String() string { - return fmt.Sprintf("func(%s) bool", f.fn.Type().In(0).Name()) -} - -// MatchedBy can be used to match a mock call based on only certain properties -// from a complex struct or some calculation. It takes a function that will be -// evaluated with the called argument and will return true when there's a match -// and false otherwise. -// -// Example: -// m.On("Do", func(req *http.Request) bool { return req.Host == "example.com" }) -// -// |fn|, must be a function accepting a single argument (of the expected type) -// which returns a bool. If |fn| doesn't match the required signature, -// MathedBy() panics. -func MatchedBy(fn interface{}) argumentMatcher { - fnType := reflect.TypeOf(fn) - - if fnType.Kind() != reflect.Func { - panic(fmt.Sprintf("assert: arguments: %s is not a func", fn)) - } - if fnType.NumIn() != 1 { - panic(fmt.Sprintf("assert: arguments: %s does not take exactly one argument", fn)) - } - if fnType.NumOut() != 1 || fnType.Out(0).Kind() != reflect.Bool { - panic(fmt.Sprintf("assert: arguments: %s does not return a bool", fn)) - } - - return argumentMatcher{fn: reflect.ValueOf(fn)} -} - -// Get Returns the argument at the specified index. -func (args Arguments) Get(index int) interface{} { - if index+1 > len(args) { - panic(fmt.Sprintf("assert: arguments: Cannot call Get(%d) because there are %d argument(s).", index, len(args))) - } - return args[index] -} - -// Is gets whether the objects match the arguments specified. -func (args Arguments) Is(objects ...interface{}) bool { - for i, obj := range args { - if obj != objects[i] { - return false - } - } - return true -} - -// Diff gets a string describing the differences between the arguments -// and the specified objects. -// -// Returns the diff string and number of differences found. -func (args Arguments) Diff(objects []interface{}) (string, int) { - - var output = "\n" - var differences int - - var maxArgCount = len(args) - if len(objects) > maxArgCount { - maxArgCount = len(objects) - } - - for i := 0; i < maxArgCount; i++ { - var actual, expected interface{} - - if len(objects) <= i { - actual = "(Missing)" - } else { - actual = objects[i] - } - - if len(args) <= i { - expected = "(Missing)" - } else { - expected = args[i] - } - - if matcher, ok := expected.(argumentMatcher); ok { - if matcher.Matches(actual) { - output = fmt.Sprintf("%s\t%d: \u2705 %s matched by %s\n", output, i, actual, matcher) - } else { - differences++ - output = fmt.Sprintf("%s\t%d: \u2705 %s not matched by %s\n", output, i, actual, matcher) - } - } else if reflect.TypeOf(expected) == reflect.TypeOf((*AnythingOfTypeArgument)(nil)).Elem() { - - // type checking - if reflect.TypeOf(actual).Name() != string(expected.(AnythingOfTypeArgument)) && reflect.TypeOf(actual).String() != string(expected.(AnythingOfTypeArgument)) { - // not match - differences++ - output = fmt.Sprintf("%s\t%d: \u274C type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actual) - } - - } else { - - // normal checking - - if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) { - // match - output = fmt.Sprintf("%s\t%d: \u2705 %s == %s\n", output, i, actual, expected) - } else { - // not match - differences++ - output = fmt.Sprintf("%s\t%d: \u274C %s != %s\n", output, i, actual, expected) - } - } - - } - - if differences == 0 { - return "No differences.", differences - } - - return output, differences - -} - -// Assert compares the arguments with the specified objects and fails if -// they do not exactly match. -func (args Arguments) Assert(t TestingT, objects ...interface{}) bool { - - // get the differences - diff, diffCount := args.Diff(objects) - - if diffCount == 0 { - return true - } - - // there are differences... report them... - t.Logf(diff) - t.Errorf("%sArguments do not match.", assert.CallerInfo()) - - return false - -} - -// String gets the argument at the specified index. Panics if there is no argument, or -// if the argument is of the wrong type. -// -// If no index is provided, String() returns a complete string representation -// of the arguments. -func (args Arguments) String(indexOrNil ...int) string { - - if len(indexOrNil) == 0 { - // normal String() method - return a string representation of the args - var argsStr []string - for _, arg := range args { - argsStr = append(argsStr, fmt.Sprintf("%s", reflect.TypeOf(arg))) - } - return strings.Join(argsStr, ",") - } else if len(indexOrNil) == 1 { - // Index has been specified - get the argument at that index - var index = indexOrNil[0] - var s string - var ok bool - if s, ok = args.Get(index).(string); !ok { - panic(fmt.Sprintf("assert: arguments: String(%d) failed because object wasn't correct type: %s", index, args.Get(index))) - } - return s - } - - panic(fmt.Sprintf("assert: arguments: Wrong number of arguments passed to String. Must be 0 or 1, not %d", len(indexOrNil))) - -} - -// Int gets the argument at the specified index. Panics if there is no argument, or -// if the argument is of the wrong type. -func (args Arguments) Int(index int) int { - var s int - var ok bool - if s, ok = args.Get(index).(int); !ok { - panic(fmt.Sprintf("assert: arguments: Int(%d) failed because object wasn't correct type: %v", index, args.Get(index))) - } - return s -} - -// Error gets the argument at the specified index. Panics if there is no argument, or -// if the argument is of the wrong type. -func (args Arguments) Error(index int) error { - obj := args.Get(index) - var s error - var ok bool - if obj == nil { - return nil - } - if s, ok = obj.(error); !ok { - panic(fmt.Sprintf("assert: arguments: Error(%d) failed because object wasn't correct type: %v", index, args.Get(index))) - } - return s -} - -// Bool gets the argument at the specified index. Panics if there is no argument, or -// if the argument is of the wrong type. -func (args Arguments) Bool(index int) bool { - var s bool - var ok bool - if s, ok = args.Get(index).(bool); !ok { - panic(fmt.Sprintf("assert: arguments: Bool(%d) failed because object wasn't correct type: %v", index, args.Get(index))) - } - return s -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/require/doc.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/require/doc.go deleted file mode 100644 index 169de392..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/require/doc.go +++ /dev/null @@ -1,28 +0,0 @@ -// Package require implements the same assertions as the `assert` package but -// stops test execution when a test fails. -// -// Example Usage -// -// The following is a complete example using require in a standard test function: -// import ( -// "testing" -// "github.com/stretchr/testify/require" -// ) -// -// func TestSomething(t *testing.T) { -// -// var a string = "Hello" -// var b string = "Hello" -// -// require.Equal(t, a, b, "The two words should be the same.") -// -// } -// -// Assertions -// -// The `require` package have same global functions as in the `assert` package, -// but instead of returning a boolean result they call `t.FailNow()`. -// -// Every assertion function also takes an optional string message as the final argument, -// allowing custom error messages to be appended to the message the assertion method outputs. -package require diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/require/forward_requirements.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/require/forward_requirements.go deleted file mode 100644 index d3c2ab9b..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/require/forward_requirements.go +++ /dev/null @@ -1,16 +0,0 @@ -package require - -// Assertions provides assertion methods around the -// TestingT interface. -type Assertions struct { - t TestingT -} - -// New makes a new Assertions object for the specified TestingT. -func New(t TestingT) *Assertions { - return &Assertions{ - t: t, - } -} - -//go:generate go run ../_codegen/main.go -output-package=require -template=require_forward.go.tmpl diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/require/require.go deleted file mode 100644 index 1bcfcb0d..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/require/require.go +++ /dev/null @@ -1,464 +0,0 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND -*/ - -package require - -import ( - - assert "github.com/stretchr/testify/assert" - http "net/http" - url "net/url" - time "time" -) - - -// Condition uses a Comparison to assert a complex condition. -func Condition(t TestingT, comp assert.Comparison, msgAndArgs ...interface{}) { - if !assert.Condition(t, comp, msgAndArgs...) { - t.FailNow() - } -} - - -// Contains asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// assert.Contains(t, "Hello World", "World", "But 'Hello World' does contain 'World'") -// assert.Contains(t, ["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'") -// assert.Contains(t, {"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'") -// -// Returns whether the assertion was successful (true) or not (false). -func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { - if !assert.Contains(t, s, contains, msgAndArgs...) { - t.FailNow() - } -} - - -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// assert.Empty(t, obj) -// -// Returns whether the assertion was successful (true) or not (false). -func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { - if !assert.Empty(t, object, msgAndArgs...) { - t.FailNow() - } -} - - -// Equal asserts that two objects are equal. -// -// assert.Equal(t, 123, 123, "123 and 123 should be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { - if !assert.Equal(t, expected, actual, msgAndArgs...) { - t.FailNow() - } -} - - -// EqualError asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// if assert.Error(t, err, "An error was expected") { -// assert.Equal(t, err, expectedError) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) { - if !assert.EqualError(t, theError, errString, msgAndArgs...) { - t.FailNow() - } -} - - -// EqualValues asserts that two objects are equal or convertable to the same types -// and equal. -// -// assert.EqualValues(t, uint32(123), int32(123), "123 and 123 should be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { - if !assert.EqualValues(t, expected, actual, msgAndArgs...) { - t.FailNow() - } -} - - -// Error asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if assert.Error(t, err, "An error was expected") { -// assert.Equal(t, err, expectedError) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func Error(t TestingT, err error, msgAndArgs ...interface{}) { - if !assert.Error(t, err, msgAndArgs...) { - t.FailNow() - } -} - - -// Exactly asserts that two objects are equal is value and type. -// -// assert.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { - if !assert.Exactly(t, expected, actual, msgAndArgs...) { - t.FailNow() - } -} - - -// Fail reports a failure through -func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) { - if !assert.Fail(t, failureMessage, msgAndArgs...) { - t.FailNow() - } -} - - -// FailNow fails test -func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) { - if !assert.FailNow(t, failureMessage, msgAndArgs...) { - t.FailNow() - } -} - - -// False asserts that the specified value is false. -// -// assert.False(t, myBool, "myBool should be false") -// -// Returns whether the assertion was successful (true) or not (false). -func False(t TestingT, value bool, msgAndArgs ...interface{}) { - if !assert.False(t, value, msgAndArgs...) { - t.FailNow() - } -} - - -// HTTPBodyContains asserts that a specified handler returns a -// body that contains a string. -// -// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) { - if !assert.HTTPBodyContains(t, handler, method, url, values, str) { - t.FailNow() - } -} - - -// HTTPBodyNotContains asserts that a specified handler returns a -// body that does not contain a string. -// -// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) { - if !assert.HTTPBodyNotContains(t, handler, method, url, values, str) { - t.FailNow() - } -} - - -// HTTPError asserts that a specified handler returns an error status code. -// -// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values) { - if !assert.HTTPError(t, handler, method, url, values) { - t.FailNow() - } -} - - -// HTTPRedirect asserts that a specified handler returns a redirect status code. -// -// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values) { - if !assert.HTTPRedirect(t, handler, method, url, values) { - t.FailNow() - } -} - - -// HTTPSuccess asserts that a specified handler returns a success status code. -// -// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values) { - if !assert.HTTPSuccess(t, handler, method, url, values) { - t.FailNow() - } -} - - -// Implements asserts that an object is implemented by the specified interface. -// -// assert.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject") -func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { - if !assert.Implements(t, interfaceObject, object, msgAndArgs...) { - t.FailNow() - } -} - - -// InDelta asserts that the two numerals are within delta of each other. -// -// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) -// -// Returns whether the assertion was successful (true) or not (false). -func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { - if !assert.InDelta(t, expected, actual, delta, msgAndArgs...) { - t.FailNow() - } -} - - -// InDeltaSlice is the same as InDelta, except it compares two slices. -func InDeltaSlice(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { - if !assert.InDeltaSlice(t, expected, actual, delta, msgAndArgs...) { - t.FailNow() - } -} - - -// InEpsilon asserts that expected and actual have a relative error less than epsilon -// -// Returns whether the assertion was successful (true) or not (false). -func InEpsilon(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { - if !assert.InEpsilon(t, expected, actual, epsilon, msgAndArgs...) { - t.FailNow() - } -} - - -// InEpsilonSlice is the same as InEpsilon, except it compares two slices. -func InEpsilonSlice(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { - if !assert.InEpsilonSlice(t, expected, actual, delta, msgAndArgs...) { - t.FailNow() - } -} - - -// IsType asserts that the specified objects are of the same type. -func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { - if !assert.IsType(t, expectedType, object, msgAndArgs...) { - t.FailNow() - } -} - - -// JSONEq asserts that two JSON strings are equivalent. -// -// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) -// -// Returns whether the assertion was successful (true) or not (false). -func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { - if !assert.JSONEq(t, expected, actual, msgAndArgs...) { - t.FailNow() - } -} - - -// Len asserts that the specified object has specific length. -// Len also fails if the object has a type that len() not accept. -// -// assert.Len(t, mySlice, 3, "The size of slice is not 3") -// -// Returns whether the assertion was successful (true) or not (false). -func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) { - if !assert.Len(t, object, length, msgAndArgs...) { - t.FailNow() - } -} - - -// Nil asserts that the specified object is nil. -// -// assert.Nil(t, err, "err should be nothing") -// -// Returns whether the assertion was successful (true) or not (false). -func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { - if !assert.Nil(t, object, msgAndArgs...) { - t.FailNow() - } -} - - -// NoError asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if assert.NoError(t, err) { -// assert.Equal(t, actualObj, expectedObj) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func NoError(t TestingT, err error, msgAndArgs ...interface{}) { - if !assert.NoError(t, err, msgAndArgs...) { - t.FailNow() - } -} - - -// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the -// specified substring or element. -// -// assert.NotContains(t, "Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") -// assert.NotContains(t, ["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'") -// assert.NotContains(t, {"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'") -// -// Returns whether the assertion was successful (true) or not (false). -func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { - if !assert.NotContains(t, s, contains, msgAndArgs...) { - t.FailNow() - } -} - - -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if assert.NotEmpty(t, obj) { -// assert.Equal(t, "two", obj[1]) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { - if !assert.NotEmpty(t, object, msgAndArgs...) { - t.FailNow() - } -} - - -// NotEqual asserts that the specified values are NOT equal. -// -// assert.NotEqual(t, obj1, obj2, "two objects shouldn't be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { - if !assert.NotEqual(t, expected, actual, msgAndArgs...) { - t.FailNow() - } -} - - -// NotNil asserts that the specified object is not nil. -// -// assert.NotNil(t, err, "err should be something") -// -// Returns whether the assertion was successful (true) or not (false). -func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { - if !assert.NotNil(t, object, msgAndArgs...) { - t.FailNow() - } -} - - -// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// assert.NotPanics(t, func(){ -// RemainCalm() -// }, "Calling RemainCalm() should NOT panic") -// -// Returns whether the assertion was successful (true) or not (false). -func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { - if !assert.NotPanics(t, f, msgAndArgs...) { - t.FailNow() - } -} - - -// NotRegexp asserts that a specified regexp does not match a string. -// -// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") -// assert.NotRegexp(t, "^start", "it's not starting") -// -// Returns whether the assertion was successful (true) or not (false). -func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { - if !assert.NotRegexp(t, rx, str, msgAndArgs...) { - t.FailNow() - } -} - - -// NotZero asserts that i is not the zero value for its type and returns the truth. -func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) { - if !assert.NotZero(t, i, msgAndArgs...) { - t.FailNow() - } -} - - -// Panics asserts that the code inside the specified PanicTestFunc panics. -// -// assert.Panics(t, func(){ -// GoCrazy() -// }, "Calling GoCrazy() should panic") -// -// Returns whether the assertion was successful (true) or not (false). -func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { - if !assert.Panics(t, f, msgAndArgs...) { - t.FailNow() - } -} - - -// Regexp asserts that a specified regexp matches a string. -// -// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") -// assert.Regexp(t, "start...$", "it's not starting") -// -// Returns whether the assertion was successful (true) or not (false). -func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { - if !assert.Regexp(t, rx, str, msgAndArgs...) { - t.FailNow() - } -} - - -// True asserts that the specified value is true. -// -// assert.True(t, myBool, "myBool should be true") -// -// Returns whether the assertion was successful (true) or not (false). -func True(t TestingT, value bool, msgAndArgs ...interface{}) { - if !assert.True(t, value, msgAndArgs...) { - t.FailNow() - } -} - - -// WithinDuration asserts that the two times are within duration delta of each other. -// -// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") -// -// Returns whether the assertion was successful (true) or not (false). -func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { - if !assert.WithinDuration(t, expected, actual, delta, msgAndArgs...) { - t.FailNow() - } -} - - -// Zero asserts that i is the zero value for its type and returns the truth. -func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) { - if !assert.Zero(t, i, msgAndArgs...) { - t.FailNow() - } -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/require/require.go.tmpl b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/require/require.go.tmpl deleted file mode 100644 index ab1b1e9f..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/require/require.go.tmpl +++ /dev/null @@ -1,6 +0,0 @@ -{{.Comment}} -func {{.DocInfo.Name}}(t TestingT, {{.Params}}) { - if !assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { - t.FailNow() - } -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/require/require_forward.go deleted file mode 100644 index 58324f10..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/require/require_forward.go +++ /dev/null @@ -1,388 +0,0 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND -*/ - -package require - -import ( - - assert "github.com/stretchr/testify/assert" - http "net/http" - url "net/url" - time "time" -) - - -// Condition uses a Comparison to assert a complex condition. -func (a *Assertions) Condition(comp assert.Comparison, msgAndArgs ...interface{}) { - Condition(a.t, comp, msgAndArgs...) -} - - -// Contains asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// a.Contains("Hello World", "World", "But 'Hello World' does contain 'World'") -// a.Contains(["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'") -// a.Contains({"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) { - Contains(a.t, s, contains, msgAndArgs...) -} - - -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// a.Empty(obj) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) { - Empty(a.t, object, msgAndArgs...) -} - - -// Equal asserts that two objects are equal. -// -// a.Equal(123, 123, "123 and 123 should be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { - Equal(a.t, expected, actual, msgAndArgs...) -} - - -// EqualError asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// if assert.Error(t, err, "An error was expected") { -// assert.Equal(t, err, expectedError) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) { - EqualError(a.t, theError, errString, msgAndArgs...) -} - - -// EqualValues asserts that two objects are equal or convertable to the same types -// and equal. -// -// a.EqualValues(uint32(123), int32(123), "123 and 123 should be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { - EqualValues(a.t, expected, actual, msgAndArgs...) -} - - -// Error asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if a.Error(err, "An error was expected") { -// assert.Equal(t, err, expectedError) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Error(err error, msgAndArgs ...interface{}) { - Error(a.t, err, msgAndArgs...) -} - - -// Exactly asserts that two objects are equal is value and type. -// -// a.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { - Exactly(a.t, expected, actual, msgAndArgs...) -} - - -// Fail reports a failure through -func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) { - Fail(a.t, failureMessage, msgAndArgs...) -} - - -// FailNow fails test -func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) { - FailNow(a.t, failureMessage, msgAndArgs...) -} - - -// False asserts that the specified value is false. -// -// a.False(myBool, "myBool should be false") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) False(value bool, msgAndArgs ...interface{}) { - False(a.t, value, msgAndArgs...) -} - - -// HTTPBodyContains asserts that a specified handler returns a -// body that contains a string. -// -// a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) { - HTTPBodyContains(a.t, handler, method, url, values, str) -} - - -// HTTPBodyNotContains asserts that a specified handler returns a -// body that does not contain a string. -// -// a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) { - HTTPBodyNotContains(a.t, handler, method, url, values, str) -} - - -// HTTPError asserts that a specified handler returns an error status code. -// -// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values) { - HTTPError(a.t, handler, method, url, values) -} - - -// HTTPRedirect asserts that a specified handler returns a redirect status code. -// -// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values) { - HTTPRedirect(a.t, handler, method, url, values) -} - - -// HTTPSuccess asserts that a specified handler returns a success status code. -// -// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values) { - HTTPSuccess(a.t, handler, method, url, values) -} - - -// Implements asserts that an object is implemented by the specified interface. -// -// a.Implements((*MyInterface)(nil), new(MyObject), "MyObject") -func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { - Implements(a.t, interfaceObject, object, msgAndArgs...) -} - - -// InDelta asserts that the two numerals are within delta of each other. -// -// a.InDelta(math.Pi, (22 / 7.0), 0.01) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { - InDelta(a.t, expected, actual, delta, msgAndArgs...) -} - - -// InDeltaSlice is the same as InDelta, except it compares two slices. -func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { - InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...) -} - - -// InEpsilon asserts that expected and actual have a relative error less than epsilon -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { - InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) -} - - -// InEpsilonSlice is the same as InEpsilon, except it compares two slices. -func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { - InEpsilonSlice(a.t, expected, actual, delta, msgAndArgs...) -} - - -// IsType asserts that the specified objects are of the same type. -func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { - IsType(a.t, expectedType, object, msgAndArgs...) -} - - -// JSONEq asserts that two JSON strings are equivalent. -// -// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) { - JSONEq(a.t, expected, actual, msgAndArgs...) -} - - -// Len asserts that the specified object has specific length. -// Len also fails if the object has a type that len() not accept. -// -// a.Len(mySlice, 3, "The size of slice is not 3") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) { - Len(a.t, object, length, msgAndArgs...) -} - - -// Nil asserts that the specified object is nil. -// -// a.Nil(err, "err should be nothing") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) { - Nil(a.t, object, msgAndArgs...) -} - - -// NoError asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if a.NoError(err) { -// assert.Equal(t, actualObj, expectedObj) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) { - NoError(a.t, err, msgAndArgs...) -} - - -// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the -// specified substring or element. -// -// a.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") -// a.NotContains(["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'") -// a.NotContains({"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) { - NotContains(a.t, s, contains, msgAndArgs...) -} - - -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if a.NotEmpty(obj) { -// assert.Equal(t, "two", obj[1]) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) { - NotEmpty(a.t, object, msgAndArgs...) -} - - -// NotEqual asserts that the specified values are NOT equal. -// -// a.NotEqual(obj1, obj2, "two objects shouldn't be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { - NotEqual(a.t, expected, actual, msgAndArgs...) -} - - -// NotNil asserts that the specified object is not nil. -// -// a.NotNil(err, "err should be something") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) { - NotNil(a.t, object, msgAndArgs...) -} - - -// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// a.NotPanics(func(){ -// RemainCalm() -// }, "Calling RemainCalm() should NOT panic") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotPanics(f assert.PanicTestFunc, msgAndArgs ...interface{}) { - NotPanics(a.t, f, msgAndArgs...) -} - - -// NotRegexp asserts that a specified regexp does not match a string. -// -// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") -// a.NotRegexp("^start", "it's not starting") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) { - NotRegexp(a.t, rx, str, msgAndArgs...) -} - - -// NotZero asserts that i is not the zero value for its type and returns the truth. -func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) { - NotZero(a.t, i, msgAndArgs...) -} - - -// Panics asserts that the code inside the specified PanicTestFunc panics. -// -// a.Panics(func(){ -// GoCrazy() -// }, "Calling GoCrazy() should panic") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Panics(f assert.PanicTestFunc, msgAndArgs ...interface{}) { - Panics(a.t, f, msgAndArgs...) -} - - -// Regexp asserts that a specified regexp matches a string. -// -// a.Regexp(regexp.MustCompile("start"), "it's starting") -// a.Regexp("start...$", "it's not starting") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) { - Regexp(a.t, rx, str, msgAndArgs...) -} - - -// True asserts that the specified value is true. -// -// a.True(myBool, "myBool should be true") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) True(value bool, msgAndArgs ...interface{}) { - True(a.t, value, msgAndArgs...) -} - - -// WithinDuration asserts that the two times are within duration delta of each other. -// -// a.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { - WithinDuration(a.t, expected, actual, delta, msgAndArgs...) -} - - -// Zero asserts that i is the zero value for its type and returns the truth. -func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) { - Zero(a.t, i, msgAndArgs...) -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/require/require_forward.go.tmpl b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/require/require_forward.go.tmpl deleted file mode 100644 index b93569e0..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/require/require_forward.go.tmpl +++ /dev/null @@ -1,4 +0,0 @@ -{{.CommentWithoutT "a"}} -func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) { - {{.DocInfo.Name}}(a.t, {{.ForwardedParams}}) -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/require/requirements.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/require/requirements.go deleted file mode 100644 index 41147562..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/require/requirements.go +++ /dev/null @@ -1,9 +0,0 @@ -package require - -// TestingT is an interface wrapper around *testing.T -type TestingT interface { - Errorf(format string, args ...interface{}) - FailNow() -} - -//go:generate go run ../_codegen/main.go -output-package=require -template=require.go.tmpl diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/suite/doc.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/suite/doc.go deleted file mode 100644 index f91a245d..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/suite/doc.go +++ /dev/null @@ -1,65 +0,0 @@ -// Package suite contains logic for creating testing suite structs -// and running the methods on those structs as tests. The most useful -// piece of this package is that you can create setup/teardown methods -// on your testing suites, which will run before/after the whole suite -// or individual tests (depending on which interface(s) you -// implement). -// -// A testing suite is usually built by first extending the built-in -// suite functionality from suite.Suite in testify. Alternatively, -// you could reproduce that logic on your own if you wanted (you -// just need to implement the TestingSuite interface from -// suite/interfaces.go). -// -// After that, you can implement any of the interfaces in -// suite/interfaces.go to add setup/teardown functionality to your -// suite, and add any methods that start with "Test" to add tests. -// Methods that do not match any suite interfaces and do not begin -// with "Test" will not be run by testify, and can safely be used as -// helper methods. -// -// Once you've built your testing suite, you need to run the suite -// (using suite.Run from testify) inside any function that matches the -// identity that "go test" is already looking for (i.e. -// func(*testing.T)). -// -// Regular expression to select test suites specified command-line -// argument "-run". Regular expression to select the methods -// of test suites specified command-line argument "-m". -// Suite object has assertion methods. -// -// A crude example: -// // Basic imports -// import ( -// "testing" -// "github.com/stretchr/testify/assert" -// "github.com/stretchr/testify/suite" -// ) -// -// // Define the suite, and absorb the built-in basic suite -// // functionality from testify - including a T() method which -// // returns the current testing context -// type ExampleTestSuite struct { -// suite.Suite -// VariableThatShouldStartAtFive int -// } -// -// // Make sure that VariableThatShouldStartAtFive is set to five -// // before each test -// func (suite *ExampleTestSuite) SetupTest() { -// suite.VariableThatShouldStartAtFive = 5 -// } -// -// // All methods that begin with "Test" are run as tests within a -// // suite. -// func (suite *ExampleTestSuite) TestExample() { -// assert.Equal(suite.T(), 5, suite.VariableThatShouldStartAtFive) -// suite.Equal(5, suite.VariableThatShouldStartAtFive) -// } -// -// // In order for 'go test' to run this suite, we need to create -// // a normal test function and pass our suite to suite.Run -// func TestExampleTestSuite(t *testing.T) { -// suite.Run(t, new(ExampleTestSuite)) -// } -package suite diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/suite/interfaces.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/suite/interfaces.go deleted file mode 100644 index 20969472..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/suite/interfaces.go +++ /dev/null @@ -1,34 +0,0 @@ -package suite - -import "testing" - -// TestingSuite can store and return the current *testing.T context -// generated by 'go test'. -type TestingSuite interface { - T() *testing.T - SetT(*testing.T) -} - -// SetupAllSuite has a SetupSuite method, which will run before the -// tests in the suite are run. -type SetupAllSuite interface { - SetupSuite() -} - -// SetupTestSuite has a SetupTest method, which will run before each -// test in the suite. -type SetupTestSuite interface { - SetupTest() -} - -// TearDownAllSuite has a TearDownSuite method, which will run after -// all the tests in the suite have been run. -type TearDownAllSuite interface { - TearDownSuite() -} - -// TearDownTestSuite has a TearDownTest method, which will run after -// each test in the suite. -type TearDownTestSuite interface { - TearDownTest() -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/suite/suite.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/suite/suite.go deleted file mode 100644 index f831e251..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/suite/suite.go +++ /dev/null @@ -1,115 +0,0 @@ -package suite - -import ( - "flag" - "fmt" - "os" - "reflect" - "regexp" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -var matchMethod = flag.String("m", "", "regular expression to select tests of the suite to run") - -// Suite is a basic testing suite with methods for storing and -// retrieving the current *testing.T context. -type Suite struct { - *assert.Assertions - require *require.Assertions - t *testing.T -} - -// T retrieves the current *testing.T context. -func (suite *Suite) T() *testing.T { - return suite.t -} - -// SetT sets the current *testing.T context. -func (suite *Suite) SetT(t *testing.T) { - suite.t = t - suite.Assertions = assert.New(t) - suite.require = require.New(t) -} - -// Require returns a require context for suite. -func (suite *Suite) Require() *require.Assertions { - if suite.require == nil { - suite.require = require.New(suite.T()) - } - return suite.require -} - -// Assert returns an assert context for suite. Normally, you can call -// `suite.NoError(expected, actual)`, but for situations where the embedded -// methods are overridden (for example, you might want to override -// assert.Assertions with require.Assertions), this method is provided so you -// can call `suite.Assert().NoError()`. -func (suite *Suite) Assert() *assert.Assertions { - if suite.Assertions == nil { - suite.Assertions = assert.New(suite.T()) - } - return suite.Assertions -} - -// Run takes a testing suite and runs all of the tests attached -// to it. -func Run(t *testing.T, suite TestingSuite) { - suite.SetT(t) - - if setupAllSuite, ok := suite.(SetupAllSuite); ok { - setupAllSuite.SetupSuite() - } - defer func() { - if tearDownAllSuite, ok := suite.(TearDownAllSuite); ok { - tearDownAllSuite.TearDownSuite() - } - }() - - methodFinder := reflect.TypeOf(suite) - tests := []testing.InternalTest{} - for index := 0; index < methodFinder.NumMethod(); index++ { - method := methodFinder.Method(index) - ok, err := methodFilter(method.Name) - if err != nil { - fmt.Fprintf(os.Stderr, "testify: invalid regexp for -m: %s\n", err) - os.Exit(1) - } - if ok { - test := testing.InternalTest{ - Name: method.Name, - F: func(t *testing.T) { - parentT := suite.T() - suite.SetT(t) - if setupTestSuite, ok := suite.(SetupTestSuite); ok { - setupTestSuite.SetupTest() - } - defer func() { - if tearDownTestSuite, ok := suite.(TearDownTestSuite); ok { - tearDownTestSuite.TearDownTest() - } - suite.SetT(parentT) - }() - method.Func.Call([]reflect.Value{reflect.ValueOf(suite)}) - }, - } - tests = append(tests, test) - } - } - - if !testing.RunTests(func(_, _ string) (bool, error) { return true, nil }, - tests) { - t.Fail() - } -} - -// Filtering method according to set regular expression -// specified command-line argument -m -func methodFilter(name string) (bool, error) { - if ok, _ := regexp.MatchString("^Test", name); !ok { - return false, nil - } - return regexp.MatchString(*matchMethod, name) -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/bypass.go deleted file mode 100644 index a8d27a3f..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright (c) 2015 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is not running on Google App Engine and "-tags disableunsafe" -// is not added to the go build command line. -// +build !appengine,!disableunsafe - -package spew - -import ( - "reflect" - "unsafe" -) - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = false - - // ptrSize is the size of a pointer on the current arch. - ptrSize = unsafe.Sizeof((*byte)(nil)) -) - -var ( - // offsetPtr, offsetScalar, and offsetFlag are the offsets for the - // internal reflect.Value fields. These values are valid before golang - // commit ecccf07e7f9d which changed the format. The are also valid - // after commit 82f48826c6c7 which changed the format again to mirror - // the original format. Code in the init function updates these offsets - // as necessary. - offsetPtr = uintptr(ptrSize) - offsetScalar = uintptr(0) - offsetFlag = uintptr(ptrSize * 2) - - // flagKindWidth and flagKindShift indicate various bits that the - // reflect package uses internally to track kind information. - // - // flagRO indicates whether or not the value field of a reflect.Value is - // read-only. - // - // flagIndir indicates whether the value field of a reflect.Value is - // the actual data or a pointer to the data. - // - // These values are valid before golang commit 90a7c3c86944 which - // changed their positions. Code in the init function updates these - // flags as necessary. - flagKindWidth = uintptr(5) - flagKindShift = uintptr(flagKindWidth - 1) - flagRO = uintptr(1 << 0) - flagIndir = uintptr(1 << 1) -) - -func init() { - // Older versions of reflect.Value stored small integers directly in the - // ptr field (which is named val in the older versions). Versions - // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named - // scalar for this purpose which unfortunately came before the flag - // field, so the offset of the flag field is different for those - // versions. - // - // This code constructs a new reflect.Value from a known small integer - // and checks if the size of the reflect.Value struct indicates it has - // the scalar field. When it does, the offsets are updated accordingly. - vv := reflect.ValueOf(0xf00) - if unsafe.Sizeof(vv) == (ptrSize * 4) { - offsetScalar = ptrSize * 2 - offsetFlag = ptrSize * 3 - } - - // Commit 90a7c3c86944 changed the flag positions such that the low - // order bits are the kind. This code extracts the kind from the flags - // field and ensures it's the correct type. When it's not, the flag - // order has been changed to the newer format, so the flags are updated - // accordingly. - upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag) - upfv := *(*uintptr)(upf) - flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) { - flagKindShift = 0 - flagRO = 1 << 5 - flagIndir = 1 << 6 - } -} - -// unsafeReflectValue converts the passed reflect.Value into a one that bypasses -// the typical safety restrictions preventing access to unaddressable and -// unexported data. It works by digging the raw pointer to the underlying -// value out of the protected value and generating a new unprotected (unsafe) -// reflect.Value to it. -// -// This allows us to check for implementations of the Stringer and error -// interfaces to be used for pretty printing ordinarily unaddressable and -// inaccessible values such as unexported struct fields. -func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { - indirects := 1 - vt := v.Type() - upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr) - rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag)) - if rvf&flagIndir != 0 { - vt = reflect.PtrTo(v.Type()) - indirects++ - } else if offsetScalar != 0 { - // The value is in the scalar field when it's not one of the - // reference types. - switch vt.Kind() { - case reflect.Uintptr: - case reflect.Chan: - case reflect.Func: - case reflect.Map: - case reflect.Ptr: - case reflect.UnsafePointer: - default: - upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + - offsetScalar) - } - } - - pv := reflect.NewAt(vt, upv) - rv = pv - for i := 0; i < indirects; i++ { - rv = rv.Elem() - } - return rv -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go deleted file mode 100644 index 457e4123..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (c) 2015 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when either the code is running on Google App Engine or "-tags disableunsafe" -// is added to the go build command line. -// +build appengine disableunsafe - -package spew - -import "reflect" - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = true -) - -// unsafeReflectValue typically converts the passed reflect.Value into a one -// that bypasses the typical safety restrictions preventing access to -// unaddressable and unexported data. However, doing this relies on access to -// the unsafe package. This is a stub version which simply returns the passed -// reflect.Value when the unsafe package is not available. -func unsafeReflectValue(v reflect.Value) reflect.Value { - return v -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/common.go deleted file mode 100644 index 14f02dc1..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/common.go +++ /dev/null @@ -1,341 +0,0 @@ -/* - * Copyright (c) 2013 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "reflect" - "sort" - "strconv" -) - -// Some constants in the form of bytes to avoid string overhead. This mirrors -// the technique used in the fmt package. -var ( - panicBytes = []byte("(PANIC=") - plusBytes = []byte("+") - iBytes = []byte("i") - trueBytes = []byte("true") - falseBytes = []byte("false") - interfaceBytes = []byte("(interface {})") - commaNewlineBytes = []byte(",\n") - newlineBytes = []byte("\n") - openBraceBytes = []byte("{") - openBraceNewlineBytes = []byte("{\n") - closeBraceBytes = []byte("}") - asteriskBytes = []byte("*") - colonBytes = []byte(":") - colonSpaceBytes = []byte(": ") - openParenBytes = []byte("(") - closeParenBytes = []byte(")") - spaceBytes = []byte(" ") - pointerChainBytes = []byte("->") - nilAngleBytes = []byte("") - maxNewlineBytes = []byte("\n") - maxShortBytes = []byte("") - circularBytes = []byte("") - circularShortBytes = []byte("") - invalidAngleBytes = []byte("") - openBracketBytes = []byte("[") - closeBracketBytes = []byte("]") - percentBytes = []byte("%") - precisionBytes = []byte(".") - openAngleBytes = []byte("<") - closeAngleBytes = []byte(">") - openMapBytes = []byte("map[") - closeMapBytes = []byte("]") - lenEqualsBytes = []byte("len=") - capEqualsBytes = []byte("cap=") -) - -// hexDigits is used to map a decimal value to a hex digit. -var hexDigits = "0123456789abcdef" - -// catchPanic handles any panics that might occur during the handleMethods -// calls. -func catchPanic(w io.Writer, v reflect.Value) { - if err := recover(); err != nil { - w.Write(panicBytes) - fmt.Fprintf(w, "%v", err) - w.Write(closeParenBytes) - } -} - -// handleMethods attempts to call the Error and String methods on the underlying -// type the passed reflect.Value represents and outputes the result to Writer w. -// -// It handles panics in any called methods by catching and displaying the error -// as the formatted value. -func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { - // We need an interface to check if the type implements the error or - // Stringer interface. However, the reflect package won't give us an - // interface on certain things like unexported struct fields in order - // to enforce visibility rules. We use unsafe, when it's available, - // to bypass these restrictions since this package does not mutate the - // values. - if !v.CanInterface() { - if UnsafeDisabled { - return false - } - - v = unsafeReflectValue(v) - } - - // Choose whether or not to do error and Stringer interface lookups against - // the base type or a pointer to the base type depending on settings. - // Technically calling one of these methods with a pointer receiver can - // mutate the value, however, types which choose to satisify an error or - // Stringer interface with a pointer receiver should not be mutating their - // state inside these interface methods. - if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { - v = unsafeReflectValue(v) - } - if v.CanAddr() { - v = v.Addr() - } - - // Is it an error or Stringer? - switch iface := v.Interface().(type) { - case error: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.Error())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - - w.Write([]byte(iface.Error())) - return true - - case fmt.Stringer: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.String())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - w.Write([]byte(iface.String())) - return true - } - return false -} - -// printBool outputs a boolean value as true or false to Writer w. -func printBool(w io.Writer, val bool) { - if val { - w.Write(trueBytes) - } else { - w.Write(falseBytes) - } -} - -// printInt outputs a signed integer value to Writer w. -func printInt(w io.Writer, val int64, base int) { - w.Write([]byte(strconv.FormatInt(val, base))) -} - -// printUint outputs an unsigned integer value to Writer w. -func printUint(w io.Writer, val uint64, base int) { - w.Write([]byte(strconv.FormatUint(val, base))) -} - -// printFloat outputs a floating point value using the specified precision, -// which is expected to be 32 or 64bit, to Writer w. -func printFloat(w io.Writer, val float64, precision int) { - w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) -} - -// printComplex outputs a complex value using the specified float precision -// for the real and imaginary parts to Writer w. -func printComplex(w io.Writer, c complex128, floatPrecision int) { - r := real(c) - w.Write(openParenBytes) - w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) - i := imag(c) - if i >= 0 { - w.Write(plusBytes) - } - w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) - w.Write(iBytes) - w.Write(closeParenBytes) -} - -// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x' -// prefix to Writer w. -func printHexPtr(w io.Writer, p uintptr) { - // Null pointer. - num := uint64(p) - if num == 0 { - w.Write(nilAngleBytes) - return - } - - // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix - buf := make([]byte, 18) - - // It's simpler to construct the hex string right to left. - base := uint64(16) - i := len(buf) - 1 - for num >= base { - buf[i] = hexDigits[num%base] - num /= base - i-- - } - buf[i] = hexDigits[num] - - // Add '0x' prefix. - i-- - buf[i] = 'x' - i-- - buf[i] = '0' - - // Strip unused leading bytes. - buf = buf[i:] - w.Write(buf) -} - -// valuesSorter implements sort.Interface to allow a slice of reflect.Value -// elements to be sorted. -type valuesSorter struct { - values []reflect.Value - strings []string // either nil or same len and values - cs *ConfigState -} - -// newValuesSorter initializes a valuesSorter instance, which holds a set of -// surrogate keys on which the data should be sorted. It uses flags in -// ConfigState to decide if and how to populate those surrogate keys. -func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { - vs := &valuesSorter{values: values, cs: cs} - if canSortSimply(vs.values[0].Kind()) { - return vs - } - if !cs.DisableMethods { - vs.strings = make([]string, len(values)) - for i := range vs.values { - b := bytes.Buffer{} - if !handleMethods(cs, &b, vs.values[i]) { - vs.strings = nil - break - } - vs.strings[i] = b.String() - } - } - if vs.strings == nil && cs.SpewKeys { - vs.strings = make([]string, len(values)) - for i := range vs.values { - vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) - } - } - return vs -} - -// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted -// directly, or whether it should be considered for sorting by surrogate keys -// (if the ConfigState allows it). -func canSortSimply(kind reflect.Kind) bool { - // This switch parallels valueSortLess, except for the default case. - switch kind { - case reflect.Bool: - return true - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return true - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return true - case reflect.Float32, reflect.Float64: - return true - case reflect.String: - return true - case reflect.Uintptr: - return true - case reflect.Array: - return true - } - return false -} - -// Len returns the number of values in the slice. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Len() int { - return len(s.values) -} - -// Swap swaps the values at the passed indices. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Swap(i, j int) { - s.values[i], s.values[j] = s.values[j], s.values[i] - if s.strings != nil { - s.strings[i], s.strings[j] = s.strings[j], s.strings[i] - } -} - -// valueSortLess returns whether the first value should sort before the second -// value. It is used by valueSorter.Less as part of the sort.Interface -// implementation. -func valueSortLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Bool: - return !a.Bool() && b.Bool() - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return a.Int() < b.Int() - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return a.Uint() < b.Uint() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.String: - return a.String() < b.String() - case reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Array: - // Compare the contents of both arrays. - l := a.Len() - for i := 0; i < l; i++ { - av := a.Index(i) - bv := b.Index(i) - if av.Interface() == bv.Interface() { - continue - } - return valueSortLess(av, bv) - } - } - return a.String() < b.String() -} - -// Less returns whether the value at index i should sort before the -// value at index j. It is part of the sort.Interface implementation. -func (s *valuesSorter) Less(i, j int) bool { - if s.strings == nil { - return valueSortLess(s.values[i], s.values[j]) - } - return s.strings[i] < s.strings[j] -} - -// sortValues is a sort function that handles both native types and any type that -// can be converted to error or Stringer. Other inputs are sorted according to -// their Value.String() value to ensure display stability. -func sortValues(values []reflect.Value, cs *ConfigState) { - if len(values) == 0 { - return - } - sort.Sort(newValuesSorter(values, cs)) -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/config.go deleted file mode 100644 index ee1ab07b..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/config.go +++ /dev/null @@ -1,297 +0,0 @@ -/* - * Copyright (c) 2013 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "os" -) - -// ConfigState houses the configuration options used by spew to format and -// display values. There is a global instance, Config, that is used to control -// all top-level Formatter and Dump functionality. Each ConfigState instance -// provides methods equivalent to the top-level functions. -// -// The zero value for ConfigState provides no indentation. You would typically -// want to set it to a space or a tab. -// -// Alternatively, you can use NewDefaultConfig to get a ConfigState instance -// with default settings. See the documentation of NewDefaultConfig for default -// values. -type ConfigState struct { - // Indent specifies the string to use for each indentation level. The - // global config instance that all top-level functions use set this to a - // single space by default. If you would like more indentation, you might - // set this to a tab with "\t" or perhaps two spaces with " ". - Indent string - - // MaxDepth controls the maximum number of levels to descend into nested - // data structures. The default, 0, means there is no limit. - // - // NOTE: Circular data structures are properly detected, so it is not - // necessary to set this value unless you specifically want to limit deeply - // nested data structures. - MaxDepth int - - // DisableMethods specifies whether or not error and Stringer interfaces are - // invoked for types that implement them. - DisableMethods bool - - // DisablePointerMethods specifies whether or not to check for and invoke - // error and Stringer interfaces on types which only accept a pointer - // receiver when the current type is not a pointer. - // - // NOTE: This might be an unsafe action since calling one of these methods - // with a pointer receiver could technically mutate the value, however, - // in practice, types which choose to satisify an error or Stringer - // interface with a pointer receiver should not be mutating their state - // inside these interface methods. As a result, this option relies on - // access to the unsafe package, so it will not have any effect when - // running in environments without access to the unsafe package such as - // Google App Engine or with the "disableunsafe" build tag specified. - DisablePointerMethods bool - - // ContinueOnMethod specifies whether or not recursion should continue once - // a custom error or Stringer interface is invoked. The default, false, - // means it will print the results of invoking the custom error or Stringer - // interface and return immediately instead of continuing to recurse into - // the internals of the data type. - // - // NOTE: This flag does not have any effect if method invocation is disabled - // via the DisableMethods or DisablePointerMethods options. - ContinueOnMethod bool - - // SortKeys specifies map keys should be sorted before being printed. Use - // this to have a more deterministic, diffable output. Note that only - // native types (bool, int, uint, floats, uintptr and string) and types - // that support the error or Stringer interfaces (if methods are - // enabled) are supported, with other types sorted according to the - // reflect.Value.String() output which guarantees display stability. - SortKeys bool - - // SpewKeys specifies that, as a last resort attempt, map keys should - // be spewed to strings and sorted by those strings. This is only - // considered if SortKeys is true. - SpewKeys bool -} - -// Config is the active configuration of the top-level functions. -// The configuration can be changed by modifying the contents of spew.Config. -var Config = ConfigState{Indent: " "} - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the formatted string as a value that satisfies error. See NewFormatter -// for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, c.convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, c.convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, c.convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a Formatter interface returned by c.NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, c.convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Print(a ...interface{}) (n int, err error) { - return fmt.Print(c.convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, c.convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Println(a ...interface{}) (n int, err error) { - return fmt.Println(c.convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprint(a ...interface{}) string { - return fmt.Sprint(c.convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, c.convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a Formatter interface returned by c.NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintln(a ...interface{}) string { - return fmt.Sprintln(c.convertArgs(a)...) -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -c.Printf, c.Println, or c.Printf. -*/ -func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(c, v) -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { - fdump(c, w, a...) -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by modifying the public members -of c. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func (c *ConfigState) Dump(a ...interface{}) { - fdump(c, os.Stdout, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func (c *ConfigState) Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(c, &buf, a...) - return buf.String() -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a spew Formatter interface using -// the ConfigState associated with s. -func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = newFormatter(c, arg) - } - return formatters -} - -// NewDefaultConfig returns a ConfigState with the following default settings. -// -// Indent: " " -// MaxDepth: 0 -// DisableMethods: false -// DisablePointerMethods: false -// ContinueOnMethod: false -// SortKeys: false -func NewDefaultConfig() *ConfigState { - return &ConfigState{Indent: " "} -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/doc.go deleted file mode 100644 index 5be0c406..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/doc.go +++ /dev/null @@ -1,202 +0,0 @@ -/* - * Copyright (c) 2013 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -/* -Package spew implements a deep pretty printer for Go data structures to aid in -debugging. - -A quick overview of the additional features spew provides over the built-in -printing facilities for Go data types are as follows: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output (only when using - Dump style) - -There are two different approaches spew allows for dumping Go data structures: - - * Dump style which prints with newlines, customizable indentation, - and additional debug information such as types and all pointer addresses - used to indirect to the final value - * A custom Formatter interface that integrates cleanly with the standard fmt - package and replaces %v, %+v, %#v, and %#+v to provide inline printing - similar to the default %v while providing the additional functionality - outlined above and passing unsupported format verbs such as %x and %q - along to fmt - -Quick Start - -This section demonstrates how to quickly get started with spew. See the -sections below for further details on formatting and configuration options. - -To dump a variable with full newlines, indentation, type, and pointer -information use Dump, Fdump, or Sdump: - spew.Dump(myVar1, myVar2, ...) - spew.Fdump(someWriter, myVar1, myVar2, ...) - str := spew.Sdump(myVar1, myVar2, ...) - -Alternatively, if you would prefer to use format strings with a compacted inline -printing style, use the convenience wrappers Printf, Fprintf, etc with -%v (most compact), %+v (adds pointer addresses), %#v (adds types), or -%#+v (adds types and pointer addresses): - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -Configuration Options - -Configuration of spew is handled by fields in the ConfigState type. For -convenience, all of the top-level functions use a global state available -via the spew.Config global. - -It is also possible to create a ConfigState instance that provides methods -equivalent to the top-level functions. This allows concurrent configuration -options. See the ConfigState documentation for more details. - -The following configuration options are available: - * Indent - String to use for each indentation level for Dump functions. - It is a single space by default. A popular alternative is "\t". - - * MaxDepth - Maximum number of levels to descend into nested data structures. - There is no limit by default. - - * DisableMethods - Disables invocation of error and Stringer interface methods. - Method invocation is enabled by default. - - * DisablePointerMethods - Disables invocation of error and Stringer interface methods on types - which only accept pointer receivers from non-pointer variables. - Pointer method invocation is enabled by default. - - * ContinueOnMethod - Enables recursion into types after invoking error and Stringer interface - methods. Recursion after method invocation is disabled by default. - - * SortKeys - Specifies map keys should be sorted before being printed. Use - this to have a more deterministic, diffable output. Note that - only native types (bool, int, uint, floats, uintptr and string) - and types which implement error or Stringer interfaces are - supported with other types sorted according to the - reflect.Value.String() output which guarantees display - stability. Natural map order is used by default. - - * SpewKeys - Specifies that, as a last resort attempt, map keys should be - spewed to strings and sorted by those strings. This is only - considered if SortKeys is true. - -Dump Usage - -Simply call spew.Dump with a list of variables you want to dump: - - spew.Dump(myVar1, myVar2, ...) - -You may also call spew.Fdump if you would prefer to output to an arbitrary -io.Writer. For example, to dump to standard error: - - spew.Fdump(os.Stderr, myVar1, myVar2, ...) - -A third option is to call spew.Sdump to get the formatted output as a string: - - str := spew.Sdump(myVar1, myVar2, ...) - -Sample Dump Output - -See the Dump example for details on the setup of the types and variables being -shown here. - - (main.Foo) { - unexportedField: (*main.Bar)(0xf84002e210)({ - flag: (main.Flag) flagTwo, - data: (uintptr) - }), - ExportedField: (map[interface {}]interface {}) (len=1) { - (string) (len=3) "one": (bool) true - } - } - -Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C -command as shown. - ([]uint8) (len=32 cap=32) { - 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | - 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| - 00000020 31 32 |12| - } - -Custom Formatter - -Spew provides a custom formatter that implements the fmt.Formatter interface -so that it integrates cleanly with standard fmt package printing functions. The -formatter is useful for inline printing of smaller data types similar to the -standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Custom Formatter Usage - -The simplest way to make use of the spew custom formatter is to call one of the -convenience functions such as spew.Printf, spew.Println, or spew.Printf. The -functions have syntax you are most likely already familiar with: - - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Println(myVar, myVar2) - spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -See the Index for the full list convenience functions. - -Sample Formatter Output - -Double pointer to a uint8: - %v: <**>5 - %+v: <**>(0xf8400420d0->0xf8400420c8)5 - %#v: (**uint8)5 - %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 - -Pointer to circular struct with a uint8 field and a pointer to itself: - %v: <*>{1 <*>} - %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} - %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} - %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} - -See the Printf example for details on the setup of variables being shown -here. - -Errors - -Since it is possible for custom Stringer/error interfaces to panic, spew -detects them and handles them internally by printing the panic information -inline with the output. Since spew is intended to provide deep pretty printing -capabilities on structures, it intentionally does not return any errors. -*/ -package spew diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/dump.go deleted file mode 100644 index 36a2b6cc..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/dump.go +++ /dev/null @@ -1,511 +0,0 @@ -/* - * Copyright (c) 2013 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "encoding/hex" - "fmt" - "io" - "os" - "reflect" - "regexp" - "strconv" - "strings" -) - -var ( - // uint8Type is a reflect.Type representing a uint8. It is used to - // convert cgo types to uint8 slices for hexdumping. - uint8Type = reflect.TypeOf(uint8(0)) - - // cCharRE is a regular expression that matches a cgo char. - // It is used to detect character arrays to hexdump them. - cCharRE = regexp.MustCompile("^.*\\._Ctype_char$") - - // cUnsignedCharRE is a regular expression that matches a cgo unsigned - // char. It is used to detect unsigned character arrays to hexdump - // them. - cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$") - - // cUint8tCharRE is a regular expression that matches a cgo uint8_t. - // It is used to detect uint8_t arrays to hexdump them. - cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$") -) - -// dumpState contains information about the state of a dump operation. -type dumpState struct { - w io.Writer - depth int - pointers map[uintptr]int - ignoreNextType bool - ignoreNextIndent bool - cs *ConfigState -} - -// indent performs indentation according to the depth level and cs.Indent -// option. -func (d *dumpState) indent() { - if d.ignoreNextIndent { - d.ignoreNextIndent = false - return - } - d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) -} - -// unpackValue returns values inside of non-nil interfaces when possible. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface && !v.IsNil() { - v = v.Elem() - } - return v -} - -// dumpPtr handles formatting of pointers by indirecting them as necessary. -func (d *dumpState) dumpPtr(v reflect.Value) { - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range d.pointers { - if depth >= d.depth { - delete(d.pointers, k) - } - } - - // Keep list of all dereferenced pointers to show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by dereferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := d.pointers[addr]; ok && pd < d.depth { - cycleFound = true - indirects-- - break - } - d.pointers[addr] = d.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type information. - d.w.Write(openParenBytes) - d.w.Write(bytes.Repeat(asteriskBytes, indirects)) - d.w.Write([]byte(ve.Type().String())) - d.w.Write(closeParenBytes) - - // Display pointer information. - if len(pointerChain) > 0 { - d.w.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - d.w.Write(pointerChainBytes) - } - printHexPtr(d.w, addr) - } - d.w.Write(closeParenBytes) - } - - // Display dereferenced value. - d.w.Write(openParenBytes) - switch { - case nilFound == true: - d.w.Write(nilAngleBytes) - - case cycleFound == true: - d.w.Write(circularBytes) - - default: - d.ignoreNextType = true - d.dump(ve) - } - d.w.Write(closeParenBytes) -} - -// dumpSlice handles formatting of arrays and slices. Byte (uint8 under -// reflection) arrays and slices are dumped in hexdump -C fashion. -func (d *dumpState) dumpSlice(v reflect.Value) { - // Determine whether this type should be hex dumped or not. Also, - // for types which should be hexdumped, try to use the underlying data - // first, then fall back to trying to convert them to a uint8 slice. - var buf []uint8 - doConvert := false - doHexDump := false - numEntries := v.Len() - if numEntries > 0 { - vt := v.Index(0).Type() - vts := vt.String() - switch { - // C types that need to be converted. - case cCharRE.MatchString(vts): - fallthrough - case cUnsignedCharRE.MatchString(vts): - fallthrough - case cUint8tCharRE.MatchString(vts): - doConvert = true - - // Try to use existing uint8 slices and fall back to converting - // and copying if that fails. - case vt.Kind() == reflect.Uint8: - // TODO(davec): Fix up the disableUnsafe bits... - - // We need an addressable interface to convert the type - // to a byte slice. However, the reflect package won't - // give us an interface on certain things like - // unexported struct fields in order to enforce - // visibility rules. We use unsafe, when available, to - // bypass these restrictions since this package does not - // mutate the values. - vs := v - if !vs.CanInterface() || !vs.CanAddr() { - vs = unsafeReflectValue(vs) - } - if !UnsafeDisabled { - vs = vs.Slice(0, numEntries) - - // Use the existing uint8 slice if it can be - // type asserted. - iface := vs.Interface() - if slice, ok := iface.([]uint8); ok { - buf = slice - doHexDump = true - break - } - } - - // The underlying data needs to be converted if it can't - // be type asserted to a uint8 slice. - doConvert = true - } - - // Copy and convert the underlying type if needed. - if doConvert && vt.ConvertibleTo(uint8Type) { - // Convert and copy each element into a uint8 byte - // slice. - buf = make([]uint8, numEntries) - for i := 0; i < numEntries; i++ { - vv := v.Index(i) - buf[i] = uint8(vv.Convert(uint8Type).Uint()) - } - doHexDump = true - } - } - - // Hexdump the entire slice as needed. - if doHexDump { - indent := strings.Repeat(d.cs.Indent, d.depth) - str := indent + hex.Dump(buf) - str = strings.Replace(str, "\n", "\n"+indent, -1) - str = strings.TrimRight(str, d.cs.Indent) - d.w.Write([]byte(str)) - return - } - - // Recursively call dump for each item. - for i := 0; i < numEntries; i++ { - d.dump(d.unpackValue(v.Index(i))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } -} - -// dump is the main workhorse for dumping a value. It uses the passed reflect -// value to figure out what kind of object we are dealing with and formats it -// appropriately. It is a recursive function, however circular data structures -// are detected and handled properly. -func (d *dumpState) dump(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - d.w.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - d.indent() - d.dumpPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !d.ignoreNextType { - d.indent() - d.w.Write(openParenBytes) - d.w.Write([]byte(v.Type().String())) - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - d.ignoreNextType = false - - // Display length and capacity if the built-in len and cap functions - // work with the value's kind and the len/cap itself is non-zero. - valueLen, valueCap := 0, 0 - switch v.Kind() { - case reflect.Array, reflect.Slice, reflect.Chan: - valueLen, valueCap = v.Len(), v.Cap() - case reflect.Map, reflect.String: - valueLen = v.Len() - } - if valueLen != 0 || valueCap != 0 { - d.w.Write(openParenBytes) - if valueLen != 0 { - d.w.Write(lenEqualsBytes) - printInt(d.w, int64(valueLen), 10) - } - if valueCap != 0 { - if valueLen != 0 { - d.w.Write(spaceBytes) - } - d.w.Write(capEqualsBytes) - printInt(d.w, int64(valueCap), 10) - } - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - - // Call Stringer/error interfaces if they exist and the handle methods flag - // is enabled - if !d.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(d.cs, d.w, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(d.w, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(d.w, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(d.w, v.Uint(), 10) - - case reflect.Float32: - printFloat(d.w, v.Float(), 32) - - case reflect.Float64: - printFloat(d.w, v.Float(), 64) - - case reflect.Complex64: - printComplex(d.w, v.Complex(), 32) - - case reflect.Complex128: - printComplex(d.w, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - d.dumpSlice(v) - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.String: - d.w.Write([]byte(strconv.Quote(v.String()))) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - d.w.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - numEntries := v.Len() - keys := v.MapKeys() - if d.cs.SortKeys { - sortValues(keys, d.cs) - } - for i, key := range keys { - d.dump(d.unpackValue(key)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.MapIndex(key))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Struct: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - vt := v.Type() - numFields := v.NumField() - for i := 0; i < numFields; i++ { - d.indent() - vtf := vt.Field(i) - d.w.Write([]byte(vtf.Name)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.Field(i))) - if i < (numFields - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(d.w, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(d.w, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it in case any new - // types are added. - default: - if v.CanInterface() { - fmt.Fprintf(d.w, "%v", v.Interface()) - } else { - fmt.Fprintf(d.w, "%v", v.String()) - } - } -} - -// fdump is a helper function to consolidate the logic from the various public -// methods which take varying writers and config states. -func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { - for _, arg := range a { - if arg == nil { - w.Write(interfaceBytes) - w.Write(spaceBytes) - w.Write(nilAngleBytes) - w.Write(newlineBytes) - continue - } - - d := dumpState{w: w, cs: cs} - d.pointers = make(map[uintptr]int) - d.dump(reflect.ValueOf(arg)) - d.w.Write(newlineBytes) - } -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func Fdump(w io.Writer, a ...interface{}) { - fdump(&Config, w, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(&Config, &buf, a...) - return buf.String() -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by an exported package global, -spew.Config. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func Dump(a ...interface{}) { - fdump(&Config, os.Stdout, a...) -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/format.go deleted file mode 100644 index ecf3b80e..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/format.go +++ /dev/null @@ -1,419 +0,0 @@ -/* - * Copyright (c) 2013 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "reflect" - "strconv" - "strings" -) - -// supportedFlags is a list of all the character flags supported by fmt package. -const supportedFlags = "0-+# " - -// formatState implements the fmt.Formatter interface and contains information -// about the state of a formatting operation. The NewFormatter function can -// be used to get a new Formatter which can be used directly as arguments -// in standard fmt package printing calls. -type formatState struct { - value interface{} - fs fmt.State - depth int - pointers map[uintptr]int - ignoreNextType bool - cs *ConfigState -} - -// buildDefaultFormat recreates the original format string without precision -// and width information to pass in to fmt.Sprintf in the case of an -// unrecognized type. Unless new types are added to the language, this -// function won't ever be called. -func (f *formatState) buildDefaultFormat() (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - buf.WriteRune('v') - - format = buf.String() - return format -} - -// constructOrigFormat recreates the original format string including precision -// and width information to pass along to the standard fmt package. This allows -// automatic deferral of all format strings this package doesn't support. -func (f *formatState) constructOrigFormat(verb rune) (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - if width, ok := f.fs.Width(); ok { - buf.WriteString(strconv.Itoa(width)) - } - - if precision, ok := f.fs.Precision(); ok { - buf.Write(precisionBytes) - buf.WriteString(strconv.Itoa(precision)) - } - - buf.WriteRune(verb) - - format = buf.String() - return format -} - -// unpackValue returns values inside of non-nil interfaces when possible and -// ensures that types for values which have been unpacked from an interface -// are displayed when the show types flag is also set. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (f *formatState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface { - f.ignoreNextType = false - if !v.IsNil() { - v = v.Elem() - } - } - return v -} - -// formatPtr handles formatting of pointers by indirecting them as necessary. -func (f *formatState) formatPtr(v reflect.Value) { - // Display nil if top level pointer is nil. - showTypes := f.fs.Flag('#') - if v.IsNil() && (!showTypes || f.ignoreNextType) { - f.fs.Write(nilAngleBytes) - return - } - - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range f.pointers { - if depth >= f.depth { - delete(f.pointers, k) - } - } - - // Keep list of all dereferenced pointers to possibly show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by derferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := f.pointers[addr]; ok && pd < f.depth { - cycleFound = true - indirects-- - break - } - f.pointers[addr] = f.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type or indirection level depending on flags. - if showTypes && !f.ignoreNextType { - f.fs.Write(openParenBytes) - f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) - f.fs.Write([]byte(ve.Type().String())) - f.fs.Write(closeParenBytes) - } else { - if nilFound || cycleFound { - indirects += strings.Count(ve.Type().String(), "*") - } - f.fs.Write(openAngleBytes) - f.fs.Write([]byte(strings.Repeat("*", indirects))) - f.fs.Write(closeAngleBytes) - } - - // Display pointer information depending on flags. - if f.fs.Flag('+') && (len(pointerChain) > 0) { - f.fs.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - f.fs.Write(pointerChainBytes) - } - printHexPtr(f.fs, addr) - } - f.fs.Write(closeParenBytes) - } - - // Display dereferenced value. - switch { - case nilFound == true: - f.fs.Write(nilAngleBytes) - - case cycleFound == true: - f.fs.Write(circularShortBytes) - - default: - f.ignoreNextType = true - f.format(ve) - } -} - -// format is the main workhorse for providing the Formatter interface. It -// uses the passed reflect value to figure out what kind of object we are -// dealing with and formats it appropriately. It is a recursive function, -// however circular data structures are detected and handled properly. -func (f *formatState) format(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - f.fs.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - f.formatPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !f.ignoreNextType && f.fs.Flag('#') { - f.fs.Write(openParenBytes) - f.fs.Write([]byte(v.Type().String())) - f.fs.Write(closeParenBytes) - } - f.ignoreNextType = false - - // Call Stringer/error interfaces if they exist and the handle methods - // flag is enabled. - if !f.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(f.cs, f.fs, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(f.fs, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(f.fs, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(f.fs, v.Uint(), 10) - - case reflect.Float32: - printFloat(f.fs, v.Float(), 32) - - case reflect.Float64: - printFloat(f.fs, v.Float(), 64) - - case reflect.Complex64: - printComplex(f.fs, v.Complex(), 32) - - case reflect.Complex128: - printComplex(f.fs, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - f.fs.Write(openBracketBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - numEntries := v.Len() - for i := 0; i < numEntries; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(v.Index(i))) - } - } - f.depth-- - f.fs.Write(closeBracketBytes) - - case reflect.String: - f.fs.Write([]byte(v.String())) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - f.fs.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - - f.fs.Write(openMapBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - keys := v.MapKeys() - if f.cs.SortKeys { - sortValues(keys, f.cs) - } - for i, key := range keys { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(key)) - f.fs.Write(colonBytes) - f.ignoreNextType = true - f.format(f.unpackValue(v.MapIndex(key))) - } - } - f.depth-- - f.fs.Write(closeMapBytes) - - case reflect.Struct: - numFields := v.NumField() - f.fs.Write(openBraceBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - vt := v.Type() - for i := 0; i < numFields; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - vtf := vt.Field(i) - if f.fs.Flag('+') || f.fs.Flag('#') { - f.fs.Write([]byte(vtf.Name)) - f.fs.Write(colonBytes) - } - f.format(f.unpackValue(v.Field(i))) - } - } - f.depth-- - f.fs.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(f.fs, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(f.fs, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it if any get added. - default: - format := f.buildDefaultFormat() - if v.CanInterface() { - fmt.Fprintf(f.fs, format, v.Interface()) - } else { - fmt.Fprintf(f.fs, format, v.String()) - } - } -} - -// Format satisfies the fmt.Formatter interface. See NewFormatter for usage -// details. -func (f *formatState) Format(fs fmt.State, verb rune) { - f.fs = fs - - // Use standard formatting for verbs that are not v. - if verb != 'v' { - format := f.constructOrigFormat(verb) - fmt.Fprintf(fs, format, f.value) - return - } - - if f.value == nil { - if fs.Flag('#') { - fs.Write(interfaceBytes) - } - fs.Write(nilAngleBytes) - return - } - - f.format(reflect.ValueOf(f.value)) -} - -// newFormatter is a helper function to consolidate the logic from the various -// public methods which take varying config states. -func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { - fs := &formatState{value: v, cs: cs} - fs.pointers = make(map[uintptr]int) - return fs -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -Printf, Println, or Fprintf. -*/ -func NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(&Config, v) -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/spew.go deleted file mode 100644 index d8233f54..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/spew.go +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright (c) 2013 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "fmt" - "io" -) - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the formatted string as a value that satisfies error. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a default Formatter interface returned by NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) -func Print(a ...interface{}) (n int, err error) { - return fmt.Print(convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) -func Println(a ...interface{}) (n int, err error) { - return fmt.Println(convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprint(a ...interface{}) string { - return fmt.Sprint(convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintln(a ...interface{}) string { - return fmt.Sprintln(convertArgs(a)...) -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a default spew Formatter interface. -func convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = NewFormatter(arg) - } - return formatters -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/LICENSE.md b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/LICENSE.md deleted file mode 100644 index 21999458..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/LICENSE.md +++ /dev/null @@ -1,23 +0,0 @@ -objx - by Mat Ryer and Tyler Bunnell - -The MIT License (MIT) - -Copyright (c) 2014 Stretchr, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/README.md b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/README.md deleted file mode 100644 index 4aa18068..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# objx - - * Jump into the [API Documentation](http://godoc.org/github.com/stretchr/objx) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/accessors.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/accessors.go deleted file mode 100644 index 721bcac7..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/accessors.go +++ /dev/null @@ -1,179 +0,0 @@ -package objx - -import ( - "fmt" - "regexp" - "strconv" - "strings" -) - -// arrayAccesRegexString is the regex used to extract the array number -// from the access path -const arrayAccesRegexString = `^(.+)\[([0-9]+)\]$` - -// arrayAccesRegex is the compiled arrayAccesRegexString -var arrayAccesRegex = regexp.MustCompile(arrayAccesRegexString) - -// Get gets the value using the specified selector and -// returns it inside a new Obj object. -// -// If it cannot find the value, Get will return a nil -// value inside an instance of Obj. -// -// Get can only operate directly on map[string]interface{} and []interface. -// -// Example -// -// To access the title of the third chapter of the second book, do: -// -// o.Get("books[1].chapters[2].title") -func (m Map) Get(selector string) *Value { - rawObj := access(m, selector, nil, false, false) - return &Value{data: rawObj} -} - -// Set sets the value using the specified selector and -// returns the object on which Set was called. -// -// Set can only operate directly on map[string]interface{} and []interface -// -// Example -// -// To set the title of the third chapter of the second book, do: -// -// o.Set("books[1].chapters[2].title","Time to Go") -func (m Map) Set(selector string, value interface{}) Map { - access(m, selector, value, true, false) - return m -} - -// access accesses the object using the selector and performs the -// appropriate action. -func access(current, selector, value interface{}, isSet, panics bool) interface{} { - - switch selector.(type) { - case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: - - if array, ok := current.([]interface{}); ok { - index := intFromInterface(selector) - - if index >= len(array) { - if panics { - panic(fmt.Sprintf("objx: Index %d is out of range. Slice only contains %d items.", index, len(array))) - } - return nil - } - - return array[index] - } - - return nil - - case string: - - selStr := selector.(string) - selSegs := strings.SplitN(selStr, PathSeparator, 2) - thisSel := selSegs[0] - index := -1 - var err error - - // https://github.com/stretchr/objx/issues/12 - if strings.Contains(thisSel, "[") { - - arrayMatches := arrayAccesRegex.FindStringSubmatch(thisSel) - - if len(arrayMatches) > 0 { - - // Get the key into the map - thisSel = arrayMatches[1] - - // Get the index into the array at the key - index, err = strconv.Atoi(arrayMatches[2]) - - if err != nil { - // This should never happen. If it does, something has gone - // seriously wrong. Panic. - panic("objx: Array index is not an integer. Must use array[int].") - } - - } - } - - if curMap, ok := current.(Map); ok { - current = map[string]interface{}(curMap) - } - - // get the object in question - switch current.(type) { - case map[string]interface{}: - curMSI := current.(map[string]interface{}) - if len(selSegs) <= 1 && isSet { - curMSI[thisSel] = value - return nil - } else { - current = curMSI[thisSel] - } - default: - current = nil - } - - if current == nil && panics { - panic(fmt.Sprintf("objx: '%v' invalid on object.", selector)) - } - - // do we need to access the item of an array? - if index > -1 { - if array, ok := current.([]interface{}); ok { - if index < len(array) { - current = array[index] - } else { - if panics { - panic(fmt.Sprintf("objx: Index %d is out of range. Slice only contains %d items.", index, len(array))) - } - current = nil - } - } - } - - if len(selSegs) > 1 { - current = access(current, selSegs[1], value, isSet, panics) - } - - } - - return current - -} - -// intFromInterface converts an interface object to the largest -// representation of an unsigned integer using a type switch and -// assertions -func intFromInterface(selector interface{}) int { - var value int - switch selector.(type) { - case int: - value = selector.(int) - case int8: - value = int(selector.(int8)) - case int16: - value = int(selector.(int16)) - case int32: - value = int(selector.(int32)) - case int64: - value = int(selector.(int64)) - case uint: - value = int(selector.(uint)) - case uint8: - value = int(selector.(uint8)) - case uint16: - value = int(selector.(uint16)) - case uint32: - value = int(selector.(uint32)) - case uint64: - value = int(selector.(uint64)) - default: - panic("objx: array access argument is not an integer type (this should never happen)") - } - - return value -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/index.html b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/index.html deleted file mode 100644 index 379ffc3c..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/index.html +++ /dev/null @@ -1,86 +0,0 @@ - - - - Codegen - - - - - -

- Template -

-

- Use {x} as a placeholder for each argument. -

- - -

- Arguments (comma separated) -

-

- One block per line -

- - -

- Output -

- - - - - - - - diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/constants.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/constants.go deleted file mode 100644 index f9eb42a2..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/constants.go +++ /dev/null @@ -1,13 +0,0 @@ -package objx - -const ( - // PathSeparator is the character used to separate the elements - // of the keypath. - // - // For example, `location.address.city` - PathSeparator string = "." - - // SignatureSeparator is the character that is used to - // separate the Base64 string from the security signature. - SignatureSeparator = "_" -) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/conversions.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/conversions.go deleted file mode 100644 index 9cdfa9f9..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/conversions.go +++ /dev/null @@ -1,117 +0,0 @@ -package objx - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "net/url" -) - -// JSON converts the contained object to a JSON string -// representation -func (m Map) JSON() (string, error) { - - result, err := json.Marshal(m) - - if err != nil { - err = errors.New("objx: JSON encode failed with: " + err.Error()) - } - - return string(result), err - -} - -// MustJSON converts the contained object to a JSON string -// representation and panics if there is an error -func (m Map) MustJSON() string { - result, err := m.JSON() - if err != nil { - panic(err.Error()) - } - return result -} - -// Base64 converts the contained object to a Base64 string -// representation of the JSON string representation -func (m Map) Base64() (string, error) { - - var buf bytes.Buffer - - jsonData, err := m.JSON() - if err != nil { - return "", err - } - - encoder := base64.NewEncoder(base64.StdEncoding, &buf) - encoder.Write([]byte(jsonData)) - encoder.Close() - - return buf.String(), nil - -} - -// MustBase64 converts the contained object to a Base64 string -// representation of the JSON string representation and panics -// if there is an error -func (m Map) MustBase64() string { - result, err := m.Base64() - if err != nil { - panic(err.Error()) - } - return result -} - -// SignedBase64 converts the contained object to a Base64 string -// representation of the JSON string representation and signs it -// using the provided key. -func (m Map) SignedBase64(key string) (string, error) { - - base64, err := m.Base64() - if err != nil { - return "", err - } - - sig := HashWithKey(base64, key) - - return base64 + SignatureSeparator + sig, nil - -} - -// MustSignedBase64 converts the contained object to a Base64 string -// representation of the JSON string representation and signs it -// using the provided key and panics if there is an error -func (m Map) MustSignedBase64(key string) string { - result, err := m.SignedBase64(key) - if err != nil { - panic(err.Error()) - } - return result -} - -/* - URL Query - ------------------------------------------------ -*/ - -// URLValues creates a url.Values object from an Obj. This -// function requires that the wrapped object be a map[string]interface{} -func (m Map) URLValues() url.Values { - - vals := make(url.Values) - - for k, v := range m { - //TODO: can this be done without sprintf? - vals.Set(k, fmt.Sprintf("%v", v)) - } - - return vals -} - -// URLQuery gets an encoded URL query representing the given -// Obj. This function requires that the wrapped object be a -// map[string]interface{} -func (m Map) URLQuery() (string, error) { - return m.URLValues().Encode(), nil -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/doc.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/doc.go deleted file mode 100644 index 47bf85e4..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/doc.go +++ /dev/null @@ -1,72 +0,0 @@ -// objx - Go package for dealing with maps, slices, JSON and other data. -// -// Overview -// -// Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes -// a powerful `Get` method (among others) that allows you to easily and quickly get -// access to data within the map, without having to worry too much about type assertions, -// missing data, default values etc. -// -// Pattern -// -// Objx uses a preditable pattern to make access data from within `map[string]interface{}'s -// easy. -// -// Call one of the `objx.` functions to create your `objx.Map` to get going: -// -// m, err := objx.FromJSON(json) -// -// NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, -// the rest will be optimistic and try to figure things out without panicking. -// -// Use `Get` to access the value you're interested in. You can use dot and array -// notation too: -// -// m.Get("places[0].latlng") -// -// Once you have saught the `Value` you're interested in, you can use the `Is*` methods -// to determine its type. -// -// if m.Get("code").IsStr() { /* ... */ } -// -// Or you can just assume the type, and use one of the strong type methods to -// extract the real value: -// -// m.Get("code").Int() -// -// If there's no value there (or if it's the wrong type) then a default value -// will be returned, or you can be explicit about the default value. -// -// Get("code").Int(-1) -// -// If you're dealing with a slice of data as a value, Objx provides many useful -// methods for iterating, manipulating and selecting that data. You can find out more -// by exploring the index below. -// -// Reading data -// -// A simple example of how to use Objx: -// -// // use MustFromJSON to make an objx.Map from some JSON -// m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) -// -// // get the details -// name := m.Get("name").Str() -// age := m.Get("age").Int() -// -// // get their nickname (or use their name if they -// // don't have one) -// nickname := m.Get("nickname").Str(name) -// -// Ranging -// -// Since `objx.Map` is a `map[string]interface{}` you can treat it as such. For -// example, to `range` the data, do what you would expect: -// -// m := objx.MustFromJSON(json) -// for key, value := range m { -// -// /* ... do your magic ... */ -// -// } -package objx diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/map.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/map.go deleted file mode 100644 index eb6ed8e2..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/map.go +++ /dev/null @@ -1,222 +0,0 @@ -package objx - -import ( - "encoding/base64" - "encoding/json" - "errors" - "io/ioutil" - "net/url" - "strings" -) - -// MSIConvertable is an interface that defines methods for converting your -// custom types to a map[string]interface{} representation. -type MSIConvertable interface { - // MSI gets a map[string]interface{} (msi) representing the - // object. - MSI() map[string]interface{} -} - -// Map provides extended functionality for working with -// untyped data, in particular map[string]interface (msi). -type Map map[string]interface{} - -// Value returns the internal value instance -func (m Map) Value() *Value { - return &Value{data: m} -} - -// Nil represents a nil Map. -var Nil Map = New(nil) - -// New creates a new Map containing the map[string]interface{} in the data argument. -// If the data argument is not a map[string]interface, New attempts to call the -// MSI() method on the MSIConvertable interface to create one. -func New(data interface{}) Map { - if _, ok := data.(map[string]interface{}); !ok { - if converter, ok := data.(MSIConvertable); ok { - data = converter.MSI() - } else { - return nil - } - } - return Map(data.(map[string]interface{})) -} - -// MSI creates a map[string]interface{} and puts it inside a new Map. -// -// The arguments follow a key, value pattern. -// -// Panics -// -// Panics if any key arugment is non-string or if there are an odd number of arguments. -// -// Example -// -// To easily create Maps: -// -// m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true)) -// -// // creates an Map equivalent to -// m := objx.New(map[string]interface{}{"name": "Mat", "age": 29, "subobj": map[string]interface{}{"active": true}}) -func MSI(keyAndValuePairs ...interface{}) Map { - - newMap := make(map[string]interface{}) - keyAndValuePairsLen := len(keyAndValuePairs) - - if keyAndValuePairsLen%2 != 0 { - panic("objx: MSI must have an even number of arguments following the 'key, value' pattern.") - } - - for i := 0; i < keyAndValuePairsLen; i = i + 2 { - - key := keyAndValuePairs[i] - value := keyAndValuePairs[i+1] - - // make sure the key is a string - keyString, keyStringOK := key.(string) - if !keyStringOK { - panic("objx: MSI must follow 'string, interface{}' pattern. " + keyString + " is not a valid key.") - } - - newMap[keyString] = value - - } - - return New(newMap) -} - -// ****** Conversion Constructors - -// MustFromJSON creates a new Map containing the data specified in the -// jsonString. -// -// Panics if the JSON is invalid. -func MustFromJSON(jsonString string) Map { - o, err := FromJSON(jsonString) - - if err != nil { - panic("objx: MustFromJSON failed with error: " + err.Error()) - } - - return o -} - -// FromJSON creates a new Map containing the data specified in the -// jsonString. -// -// Returns an error if the JSON is invalid. -func FromJSON(jsonString string) (Map, error) { - - var data interface{} - err := json.Unmarshal([]byte(jsonString), &data) - - if err != nil { - return Nil, err - } - - return New(data), nil - -} - -// FromBase64 creates a new Obj containing the data specified -// in the Base64 string. -// -// The string is an encoded JSON string returned by Base64 -func FromBase64(base64String string) (Map, error) { - - decoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(base64String)) - - decoded, err := ioutil.ReadAll(decoder) - if err != nil { - return nil, err - } - - return FromJSON(string(decoded)) -} - -// MustFromBase64 creates a new Obj containing the data specified -// in the Base64 string and panics if there is an error. -// -// The string is an encoded JSON string returned by Base64 -func MustFromBase64(base64String string) Map { - - result, err := FromBase64(base64String) - - if err != nil { - panic("objx: MustFromBase64 failed with error: " + err.Error()) - } - - return result -} - -// FromSignedBase64 creates a new Obj containing the data specified -// in the Base64 string. -// -// The string is an encoded JSON string returned by SignedBase64 -func FromSignedBase64(base64String, key string) (Map, error) { - parts := strings.Split(base64String, SignatureSeparator) - if len(parts) != 2 { - return nil, errors.New("objx: Signed base64 string is malformed.") - } - - sig := HashWithKey(parts[0], key) - if parts[1] != sig { - return nil, errors.New("objx: Signature for base64 data does not match.") - } - - return FromBase64(parts[0]) -} - -// MustFromSignedBase64 creates a new Obj containing the data specified -// in the Base64 string and panics if there is an error. -// -// The string is an encoded JSON string returned by Base64 -func MustFromSignedBase64(base64String, key string) Map { - - result, err := FromSignedBase64(base64String, key) - - if err != nil { - panic("objx: MustFromSignedBase64 failed with error: " + err.Error()) - } - - return result -} - -// FromURLQuery generates a new Obj by parsing the specified -// query. -// -// For queries with multiple values, the first value is selected. -func FromURLQuery(query string) (Map, error) { - - vals, err := url.ParseQuery(query) - - if err != nil { - return nil, err - } - - m := make(map[string]interface{}) - for k, vals := range vals { - m[k] = vals[0] - } - - return New(m), nil -} - -// MustFromURLQuery generates a new Obj by parsing the specified -// query. -// -// For queries with multiple values, the first value is selected. -// -// Panics if it encounters an error -func MustFromURLQuery(query string) Map { - - o, err := FromURLQuery(query) - - if err != nil { - panic("objx: MustFromURLQuery failed with error: " + err.Error()) - } - - return o - -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/mutations.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/mutations.go deleted file mode 100644 index b35c8639..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/mutations.go +++ /dev/null @@ -1,81 +0,0 @@ -package objx - -// Exclude returns a new Map with the keys in the specified []string -// excluded. -func (d Map) Exclude(exclude []string) Map { - - excluded := make(Map) - for k, v := range d { - var shouldInclude bool = true - for _, toExclude := range exclude { - if k == toExclude { - shouldInclude = false - break - } - } - if shouldInclude { - excluded[k] = v - } - } - - return excluded -} - -// Copy creates a shallow copy of the Obj. -func (m Map) Copy() Map { - copied := make(map[string]interface{}) - for k, v := range m { - copied[k] = v - } - return New(copied) -} - -// Merge blends the specified map with a copy of this map and returns the result. -// -// Keys that appear in both will be selected from the specified map. -// This method requires that the wrapped object be a map[string]interface{} -func (m Map) Merge(merge Map) Map { - return m.Copy().MergeHere(merge) -} - -// Merge blends the specified map with this map and returns the current map. -// -// Keys that appear in both will be selected from the specified map. The original map -// will be modified. This method requires that -// the wrapped object be a map[string]interface{} -func (m Map) MergeHere(merge Map) Map { - - for k, v := range merge { - m[k] = v - } - - return m - -} - -// Transform builds a new Obj giving the transformer a chance -// to change the keys and values as it goes. This method requires that -// the wrapped object be a map[string]interface{} -func (m Map) Transform(transformer func(key string, value interface{}) (string, interface{})) Map { - newMap := make(map[string]interface{}) - for k, v := range m { - modifiedKey, modifiedVal := transformer(k, v) - newMap[modifiedKey] = modifiedVal - } - return New(newMap) -} - -// TransformKeys builds a new map using the specified key mapping. -// -// Unspecified keys will be unaltered. -// This method requires that the wrapped object be a map[string]interface{} -func (m Map) TransformKeys(mapping map[string]string) Map { - return m.Transform(func(key string, value interface{}) (string, interface{}) { - - if newKey, ok := mapping[key]; ok { - return newKey, value - } - - return key, value - }) -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/security.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/security.go deleted file mode 100644 index fdd6be9c..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/security.go +++ /dev/null @@ -1,14 +0,0 @@ -package objx - -import ( - "crypto/sha1" - "encoding/hex" -) - -// HashWithKey hashes the specified string using the security -// key. -func HashWithKey(data, key string) string { - hash := sha1.New() - hash.Write([]byte(data + ":" + key)) - return hex.EncodeToString(hash.Sum(nil)) -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/tests.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/tests.go deleted file mode 100644 index d9e0b479..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/tests.go +++ /dev/null @@ -1,17 +0,0 @@ -package objx - -// Has gets whether there is something at the specified selector -// or not. -// -// If m is nil, Has will always return false. -func (m Map) Has(selector string) bool { - if m == nil { - return false - } - return !m.Get(selector).IsNil() -} - -// IsNil gets whether the data is nil or not. -func (v *Value) IsNil() bool { - return v == nil || v.data == nil -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/type_specific_codegen.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/type_specific_codegen.go deleted file mode 100644 index f3ecb29b..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/type_specific_codegen.go +++ /dev/null @@ -1,2881 +0,0 @@ -package objx - -/* - Inter (interface{} and []interface{}) - -------------------------------------------------- -*/ - -// Inter gets the value as a interface{}, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Inter(optionalDefault ...interface{}) interface{} { - if s, ok := v.data.(interface{}); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustInter gets the value as a interface{}. -// -// Panics if the object is not a interface{}. -func (v *Value) MustInter() interface{} { - return v.data.(interface{}) -} - -// InterSlice gets the value as a []interface{}, returns the optionalDefault -// value or nil if the value is not a []interface{}. -func (v *Value) InterSlice(optionalDefault ...[]interface{}) []interface{} { - if s, ok := v.data.([]interface{}); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustInterSlice gets the value as a []interface{}. -// -// Panics if the object is not a []interface{}. -func (v *Value) MustInterSlice() []interface{} { - return v.data.([]interface{}) -} - -// IsInter gets whether the object contained is a interface{} or not. -func (v *Value) IsInter() bool { - _, ok := v.data.(interface{}) - return ok -} - -// IsInterSlice gets whether the object contained is a []interface{} or not. -func (v *Value) IsInterSlice() bool { - _, ok := v.data.([]interface{}) - return ok -} - -// EachInter calls the specified callback for each object -// in the []interface{}. -// -// Panics if the object is the wrong type. -func (v *Value) EachInter(callback func(int, interface{}) bool) *Value { - - for index, val := range v.MustInterSlice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereInter uses the specified decider function to select items -// from the []interface{}. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereInter(decider func(int, interface{}) bool) *Value { - - var selected []interface{} - - v.EachInter(func(index int, val interface{}) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupInter uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]interface{}. -func (v *Value) GroupInter(grouper func(int, interface{}) string) *Value { - - groups := make(map[string][]interface{}) - - v.EachInter(func(index int, val interface{}) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]interface{}, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceInter uses the specified function to replace each interface{}s -// by iterating each item. The data in the returned result will be a -// []interface{} containing the replaced items. -func (v *Value) ReplaceInter(replacer func(int, interface{}) interface{}) *Value { - - arr := v.MustInterSlice() - replaced := make([]interface{}, len(arr)) - - v.EachInter(func(index int, val interface{}) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectInter uses the specified collector function to collect a value -// for each of the interface{}s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectInter(collector func(int, interface{}) interface{}) *Value { - - arr := v.MustInterSlice() - collected := make([]interface{}, len(arr)) - - v.EachInter(func(index int, val interface{}) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - MSI (map[string]interface{} and []map[string]interface{}) - -------------------------------------------------- -*/ - -// MSI gets the value as a map[string]interface{}, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) MSI(optionalDefault ...map[string]interface{}) map[string]interface{} { - if s, ok := v.data.(map[string]interface{}); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustMSI gets the value as a map[string]interface{}. -// -// Panics if the object is not a map[string]interface{}. -func (v *Value) MustMSI() map[string]interface{} { - return v.data.(map[string]interface{}) -} - -// MSISlice gets the value as a []map[string]interface{}, returns the optionalDefault -// value or nil if the value is not a []map[string]interface{}. -func (v *Value) MSISlice(optionalDefault ...[]map[string]interface{}) []map[string]interface{} { - if s, ok := v.data.([]map[string]interface{}); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustMSISlice gets the value as a []map[string]interface{}. -// -// Panics if the object is not a []map[string]interface{}. -func (v *Value) MustMSISlice() []map[string]interface{} { - return v.data.([]map[string]interface{}) -} - -// IsMSI gets whether the object contained is a map[string]interface{} or not. -func (v *Value) IsMSI() bool { - _, ok := v.data.(map[string]interface{}) - return ok -} - -// IsMSISlice gets whether the object contained is a []map[string]interface{} or not. -func (v *Value) IsMSISlice() bool { - _, ok := v.data.([]map[string]interface{}) - return ok -} - -// EachMSI calls the specified callback for each object -// in the []map[string]interface{}. -// -// Panics if the object is the wrong type. -func (v *Value) EachMSI(callback func(int, map[string]interface{}) bool) *Value { - - for index, val := range v.MustMSISlice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereMSI uses the specified decider function to select items -// from the []map[string]interface{}. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereMSI(decider func(int, map[string]interface{}) bool) *Value { - - var selected []map[string]interface{} - - v.EachMSI(func(index int, val map[string]interface{}) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupMSI uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]map[string]interface{}. -func (v *Value) GroupMSI(grouper func(int, map[string]interface{}) string) *Value { - - groups := make(map[string][]map[string]interface{}) - - v.EachMSI(func(index int, val map[string]interface{}) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]map[string]interface{}, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceMSI uses the specified function to replace each map[string]interface{}s -// by iterating each item. The data in the returned result will be a -// []map[string]interface{} containing the replaced items. -func (v *Value) ReplaceMSI(replacer func(int, map[string]interface{}) map[string]interface{}) *Value { - - arr := v.MustMSISlice() - replaced := make([]map[string]interface{}, len(arr)) - - v.EachMSI(func(index int, val map[string]interface{}) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectMSI uses the specified collector function to collect a value -// for each of the map[string]interface{}s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectMSI(collector func(int, map[string]interface{}) interface{}) *Value { - - arr := v.MustMSISlice() - collected := make([]interface{}, len(arr)) - - v.EachMSI(func(index int, val map[string]interface{}) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - ObjxMap ((Map) and [](Map)) - -------------------------------------------------- -*/ - -// ObjxMap gets the value as a (Map), returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) ObjxMap(optionalDefault ...(Map)) Map { - if s, ok := v.data.((Map)); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return New(nil) -} - -// MustObjxMap gets the value as a (Map). -// -// Panics if the object is not a (Map). -func (v *Value) MustObjxMap() Map { - return v.data.((Map)) -} - -// ObjxMapSlice gets the value as a [](Map), returns the optionalDefault -// value or nil if the value is not a [](Map). -func (v *Value) ObjxMapSlice(optionalDefault ...[](Map)) [](Map) { - if s, ok := v.data.([](Map)); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustObjxMapSlice gets the value as a [](Map). -// -// Panics if the object is not a [](Map). -func (v *Value) MustObjxMapSlice() [](Map) { - return v.data.([](Map)) -} - -// IsObjxMap gets whether the object contained is a (Map) or not. -func (v *Value) IsObjxMap() bool { - _, ok := v.data.((Map)) - return ok -} - -// IsObjxMapSlice gets whether the object contained is a [](Map) or not. -func (v *Value) IsObjxMapSlice() bool { - _, ok := v.data.([](Map)) - return ok -} - -// EachObjxMap calls the specified callback for each object -// in the [](Map). -// -// Panics if the object is the wrong type. -func (v *Value) EachObjxMap(callback func(int, Map) bool) *Value { - - for index, val := range v.MustObjxMapSlice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereObjxMap uses the specified decider function to select items -// from the [](Map). The object contained in the result will contain -// only the selected items. -func (v *Value) WhereObjxMap(decider func(int, Map) bool) *Value { - - var selected [](Map) - - v.EachObjxMap(func(index int, val Map) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupObjxMap uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][](Map). -func (v *Value) GroupObjxMap(grouper func(int, Map) string) *Value { - - groups := make(map[string][](Map)) - - v.EachObjxMap(func(index int, val Map) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([](Map), 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceObjxMap uses the specified function to replace each (Map)s -// by iterating each item. The data in the returned result will be a -// [](Map) containing the replaced items. -func (v *Value) ReplaceObjxMap(replacer func(int, Map) Map) *Value { - - arr := v.MustObjxMapSlice() - replaced := make([](Map), len(arr)) - - v.EachObjxMap(func(index int, val Map) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectObjxMap uses the specified collector function to collect a value -// for each of the (Map)s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectObjxMap(collector func(int, Map) interface{}) *Value { - - arr := v.MustObjxMapSlice() - collected := make([]interface{}, len(arr)) - - v.EachObjxMap(func(index int, val Map) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Bool (bool and []bool) - -------------------------------------------------- -*/ - -// Bool gets the value as a bool, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Bool(optionalDefault ...bool) bool { - if s, ok := v.data.(bool); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return false -} - -// MustBool gets the value as a bool. -// -// Panics if the object is not a bool. -func (v *Value) MustBool() bool { - return v.data.(bool) -} - -// BoolSlice gets the value as a []bool, returns the optionalDefault -// value or nil if the value is not a []bool. -func (v *Value) BoolSlice(optionalDefault ...[]bool) []bool { - if s, ok := v.data.([]bool); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustBoolSlice gets the value as a []bool. -// -// Panics if the object is not a []bool. -func (v *Value) MustBoolSlice() []bool { - return v.data.([]bool) -} - -// IsBool gets whether the object contained is a bool or not. -func (v *Value) IsBool() bool { - _, ok := v.data.(bool) - return ok -} - -// IsBoolSlice gets whether the object contained is a []bool or not. -func (v *Value) IsBoolSlice() bool { - _, ok := v.data.([]bool) - return ok -} - -// EachBool calls the specified callback for each object -// in the []bool. -// -// Panics if the object is the wrong type. -func (v *Value) EachBool(callback func(int, bool) bool) *Value { - - for index, val := range v.MustBoolSlice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereBool uses the specified decider function to select items -// from the []bool. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereBool(decider func(int, bool) bool) *Value { - - var selected []bool - - v.EachBool(func(index int, val bool) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupBool uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]bool. -func (v *Value) GroupBool(grouper func(int, bool) string) *Value { - - groups := make(map[string][]bool) - - v.EachBool(func(index int, val bool) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]bool, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceBool uses the specified function to replace each bools -// by iterating each item. The data in the returned result will be a -// []bool containing the replaced items. -func (v *Value) ReplaceBool(replacer func(int, bool) bool) *Value { - - arr := v.MustBoolSlice() - replaced := make([]bool, len(arr)) - - v.EachBool(func(index int, val bool) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectBool uses the specified collector function to collect a value -// for each of the bools in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectBool(collector func(int, bool) interface{}) *Value { - - arr := v.MustBoolSlice() - collected := make([]interface{}, len(arr)) - - v.EachBool(func(index int, val bool) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Str (string and []string) - -------------------------------------------------- -*/ - -// Str gets the value as a string, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Str(optionalDefault ...string) string { - if s, ok := v.data.(string); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return "" -} - -// MustStr gets the value as a string. -// -// Panics if the object is not a string. -func (v *Value) MustStr() string { - return v.data.(string) -} - -// StrSlice gets the value as a []string, returns the optionalDefault -// value or nil if the value is not a []string. -func (v *Value) StrSlice(optionalDefault ...[]string) []string { - if s, ok := v.data.([]string); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustStrSlice gets the value as a []string. -// -// Panics if the object is not a []string. -func (v *Value) MustStrSlice() []string { - return v.data.([]string) -} - -// IsStr gets whether the object contained is a string or not. -func (v *Value) IsStr() bool { - _, ok := v.data.(string) - return ok -} - -// IsStrSlice gets whether the object contained is a []string or not. -func (v *Value) IsStrSlice() bool { - _, ok := v.data.([]string) - return ok -} - -// EachStr calls the specified callback for each object -// in the []string. -// -// Panics if the object is the wrong type. -func (v *Value) EachStr(callback func(int, string) bool) *Value { - - for index, val := range v.MustStrSlice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereStr uses the specified decider function to select items -// from the []string. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereStr(decider func(int, string) bool) *Value { - - var selected []string - - v.EachStr(func(index int, val string) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupStr uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]string. -func (v *Value) GroupStr(grouper func(int, string) string) *Value { - - groups := make(map[string][]string) - - v.EachStr(func(index int, val string) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]string, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceStr uses the specified function to replace each strings -// by iterating each item. The data in the returned result will be a -// []string containing the replaced items. -func (v *Value) ReplaceStr(replacer func(int, string) string) *Value { - - arr := v.MustStrSlice() - replaced := make([]string, len(arr)) - - v.EachStr(func(index int, val string) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectStr uses the specified collector function to collect a value -// for each of the strings in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectStr(collector func(int, string) interface{}) *Value { - - arr := v.MustStrSlice() - collected := make([]interface{}, len(arr)) - - v.EachStr(func(index int, val string) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Int (int and []int) - -------------------------------------------------- -*/ - -// Int gets the value as a int, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Int(optionalDefault ...int) int { - if s, ok := v.data.(int); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustInt gets the value as a int. -// -// Panics if the object is not a int. -func (v *Value) MustInt() int { - return v.data.(int) -} - -// IntSlice gets the value as a []int, returns the optionalDefault -// value or nil if the value is not a []int. -func (v *Value) IntSlice(optionalDefault ...[]int) []int { - if s, ok := v.data.([]int); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustIntSlice gets the value as a []int. -// -// Panics if the object is not a []int. -func (v *Value) MustIntSlice() []int { - return v.data.([]int) -} - -// IsInt gets whether the object contained is a int or not. -func (v *Value) IsInt() bool { - _, ok := v.data.(int) - return ok -} - -// IsIntSlice gets whether the object contained is a []int or not. -func (v *Value) IsIntSlice() bool { - _, ok := v.data.([]int) - return ok -} - -// EachInt calls the specified callback for each object -// in the []int. -// -// Panics if the object is the wrong type. -func (v *Value) EachInt(callback func(int, int) bool) *Value { - - for index, val := range v.MustIntSlice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereInt uses the specified decider function to select items -// from the []int. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereInt(decider func(int, int) bool) *Value { - - var selected []int - - v.EachInt(func(index int, val int) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupInt uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]int. -func (v *Value) GroupInt(grouper func(int, int) string) *Value { - - groups := make(map[string][]int) - - v.EachInt(func(index int, val int) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]int, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceInt uses the specified function to replace each ints -// by iterating each item. The data in the returned result will be a -// []int containing the replaced items. -func (v *Value) ReplaceInt(replacer func(int, int) int) *Value { - - arr := v.MustIntSlice() - replaced := make([]int, len(arr)) - - v.EachInt(func(index int, val int) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectInt uses the specified collector function to collect a value -// for each of the ints in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectInt(collector func(int, int) interface{}) *Value { - - arr := v.MustIntSlice() - collected := make([]interface{}, len(arr)) - - v.EachInt(func(index int, val int) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Int8 (int8 and []int8) - -------------------------------------------------- -*/ - -// Int8 gets the value as a int8, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Int8(optionalDefault ...int8) int8 { - if s, ok := v.data.(int8); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustInt8 gets the value as a int8. -// -// Panics if the object is not a int8. -func (v *Value) MustInt8() int8 { - return v.data.(int8) -} - -// Int8Slice gets the value as a []int8, returns the optionalDefault -// value or nil if the value is not a []int8. -func (v *Value) Int8Slice(optionalDefault ...[]int8) []int8 { - if s, ok := v.data.([]int8); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustInt8Slice gets the value as a []int8. -// -// Panics if the object is not a []int8. -func (v *Value) MustInt8Slice() []int8 { - return v.data.([]int8) -} - -// IsInt8 gets whether the object contained is a int8 or not. -func (v *Value) IsInt8() bool { - _, ok := v.data.(int8) - return ok -} - -// IsInt8Slice gets whether the object contained is a []int8 or not. -func (v *Value) IsInt8Slice() bool { - _, ok := v.data.([]int8) - return ok -} - -// EachInt8 calls the specified callback for each object -// in the []int8. -// -// Panics if the object is the wrong type. -func (v *Value) EachInt8(callback func(int, int8) bool) *Value { - - for index, val := range v.MustInt8Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereInt8 uses the specified decider function to select items -// from the []int8. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereInt8(decider func(int, int8) bool) *Value { - - var selected []int8 - - v.EachInt8(func(index int, val int8) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupInt8 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]int8. -func (v *Value) GroupInt8(grouper func(int, int8) string) *Value { - - groups := make(map[string][]int8) - - v.EachInt8(func(index int, val int8) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]int8, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceInt8 uses the specified function to replace each int8s -// by iterating each item. The data in the returned result will be a -// []int8 containing the replaced items. -func (v *Value) ReplaceInt8(replacer func(int, int8) int8) *Value { - - arr := v.MustInt8Slice() - replaced := make([]int8, len(arr)) - - v.EachInt8(func(index int, val int8) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectInt8 uses the specified collector function to collect a value -// for each of the int8s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectInt8(collector func(int, int8) interface{}) *Value { - - arr := v.MustInt8Slice() - collected := make([]interface{}, len(arr)) - - v.EachInt8(func(index int, val int8) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Int16 (int16 and []int16) - -------------------------------------------------- -*/ - -// Int16 gets the value as a int16, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Int16(optionalDefault ...int16) int16 { - if s, ok := v.data.(int16); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustInt16 gets the value as a int16. -// -// Panics if the object is not a int16. -func (v *Value) MustInt16() int16 { - return v.data.(int16) -} - -// Int16Slice gets the value as a []int16, returns the optionalDefault -// value or nil if the value is not a []int16. -func (v *Value) Int16Slice(optionalDefault ...[]int16) []int16 { - if s, ok := v.data.([]int16); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustInt16Slice gets the value as a []int16. -// -// Panics if the object is not a []int16. -func (v *Value) MustInt16Slice() []int16 { - return v.data.([]int16) -} - -// IsInt16 gets whether the object contained is a int16 or not. -func (v *Value) IsInt16() bool { - _, ok := v.data.(int16) - return ok -} - -// IsInt16Slice gets whether the object contained is a []int16 or not. -func (v *Value) IsInt16Slice() bool { - _, ok := v.data.([]int16) - return ok -} - -// EachInt16 calls the specified callback for each object -// in the []int16. -// -// Panics if the object is the wrong type. -func (v *Value) EachInt16(callback func(int, int16) bool) *Value { - - for index, val := range v.MustInt16Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereInt16 uses the specified decider function to select items -// from the []int16. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereInt16(decider func(int, int16) bool) *Value { - - var selected []int16 - - v.EachInt16(func(index int, val int16) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupInt16 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]int16. -func (v *Value) GroupInt16(grouper func(int, int16) string) *Value { - - groups := make(map[string][]int16) - - v.EachInt16(func(index int, val int16) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]int16, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceInt16 uses the specified function to replace each int16s -// by iterating each item. The data in the returned result will be a -// []int16 containing the replaced items. -func (v *Value) ReplaceInt16(replacer func(int, int16) int16) *Value { - - arr := v.MustInt16Slice() - replaced := make([]int16, len(arr)) - - v.EachInt16(func(index int, val int16) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectInt16 uses the specified collector function to collect a value -// for each of the int16s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectInt16(collector func(int, int16) interface{}) *Value { - - arr := v.MustInt16Slice() - collected := make([]interface{}, len(arr)) - - v.EachInt16(func(index int, val int16) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Int32 (int32 and []int32) - -------------------------------------------------- -*/ - -// Int32 gets the value as a int32, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Int32(optionalDefault ...int32) int32 { - if s, ok := v.data.(int32); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustInt32 gets the value as a int32. -// -// Panics if the object is not a int32. -func (v *Value) MustInt32() int32 { - return v.data.(int32) -} - -// Int32Slice gets the value as a []int32, returns the optionalDefault -// value or nil if the value is not a []int32. -func (v *Value) Int32Slice(optionalDefault ...[]int32) []int32 { - if s, ok := v.data.([]int32); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustInt32Slice gets the value as a []int32. -// -// Panics if the object is not a []int32. -func (v *Value) MustInt32Slice() []int32 { - return v.data.([]int32) -} - -// IsInt32 gets whether the object contained is a int32 or not. -func (v *Value) IsInt32() bool { - _, ok := v.data.(int32) - return ok -} - -// IsInt32Slice gets whether the object contained is a []int32 or not. -func (v *Value) IsInt32Slice() bool { - _, ok := v.data.([]int32) - return ok -} - -// EachInt32 calls the specified callback for each object -// in the []int32. -// -// Panics if the object is the wrong type. -func (v *Value) EachInt32(callback func(int, int32) bool) *Value { - - for index, val := range v.MustInt32Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereInt32 uses the specified decider function to select items -// from the []int32. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereInt32(decider func(int, int32) bool) *Value { - - var selected []int32 - - v.EachInt32(func(index int, val int32) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupInt32 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]int32. -func (v *Value) GroupInt32(grouper func(int, int32) string) *Value { - - groups := make(map[string][]int32) - - v.EachInt32(func(index int, val int32) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]int32, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceInt32 uses the specified function to replace each int32s -// by iterating each item. The data in the returned result will be a -// []int32 containing the replaced items. -func (v *Value) ReplaceInt32(replacer func(int, int32) int32) *Value { - - arr := v.MustInt32Slice() - replaced := make([]int32, len(arr)) - - v.EachInt32(func(index int, val int32) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectInt32 uses the specified collector function to collect a value -// for each of the int32s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectInt32(collector func(int, int32) interface{}) *Value { - - arr := v.MustInt32Slice() - collected := make([]interface{}, len(arr)) - - v.EachInt32(func(index int, val int32) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Int64 (int64 and []int64) - -------------------------------------------------- -*/ - -// Int64 gets the value as a int64, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Int64(optionalDefault ...int64) int64 { - if s, ok := v.data.(int64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustInt64 gets the value as a int64. -// -// Panics if the object is not a int64. -func (v *Value) MustInt64() int64 { - return v.data.(int64) -} - -// Int64Slice gets the value as a []int64, returns the optionalDefault -// value or nil if the value is not a []int64. -func (v *Value) Int64Slice(optionalDefault ...[]int64) []int64 { - if s, ok := v.data.([]int64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustInt64Slice gets the value as a []int64. -// -// Panics if the object is not a []int64. -func (v *Value) MustInt64Slice() []int64 { - return v.data.([]int64) -} - -// IsInt64 gets whether the object contained is a int64 or not. -func (v *Value) IsInt64() bool { - _, ok := v.data.(int64) - return ok -} - -// IsInt64Slice gets whether the object contained is a []int64 or not. -func (v *Value) IsInt64Slice() bool { - _, ok := v.data.([]int64) - return ok -} - -// EachInt64 calls the specified callback for each object -// in the []int64. -// -// Panics if the object is the wrong type. -func (v *Value) EachInt64(callback func(int, int64) bool) *Value { - - for index, val := range v.MustInt64Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereInt64 uses the specified decider function to select items -// from the []int64. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereInt64(decider func(int, int64) bool) *Value { - - var selected []int64 - - v.EachInt64(func(index int, val int64) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupInt64 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]int64. -func (v *Value) GroupInt64(grouper func(int, int64) string) *Value { - - groups := make(map[string][]int64) - - v.EachInt64(func(index int, val int64) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]int64, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceInt64 uses the specified function to replace each int64s -// by iterating each item. The data in the returned result will be a -// []int64 containing the replaced items. -func (v *Value) ReplaceInt64(replacer func(int, int64) int64) *Value { - - arr := v.MustInt64Slice() - replaced := make([]int64, len(arr)) - - v.EachInt64(func(index int, val int64) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectInt64 uses the specified collector function to collect a value -// for each of the int64s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectInt64(collector func(int, int64) interface{}) *Value { - - arr := v.MustInt64Slice() - collected := make([]interface{}, len(arr)) - - v.EachInt64(func(index int, val int64) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Uint (uint and []uint) - -------------------------------------------------- -*/ - -// Uint gets the value as a uint, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Uint(optionalDefault ...uint) uint { - if s, ok := v.data.(uint); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustUint gets the value as a uint. -// -// Panics if the object is not a uint. -func (v *Value) MustUint() uint { - return v.data.(uint) -} - -// UintSlice gets the value as a []uint, returns the optionalDefault -// value or nil if the value is not a []uint. -func (v *Value) UintSlice(optionalDefault ...[]uint) []uint { - if s, ok := v.data.([]uint); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustUintSlice gets the value as a []uint. -// -// Panics if the object is not a []uint. -func (v *Value) MustUintSlice() []uint { - return v.data.([]uint) -} - -// IsUint gets whether the object contained is a uint or not. -func (v *Value) IsUint() bool { - _, ok := v.data.(uint) - return ok -} - -// IsUintSlice gets whether the object contained is a []uint or not. -func (v *Value) IsUintSlice() bool { - _, ok := v.data.([]uint) - return ok -} - -// EachUint calls the specified callback for each object -// in the []uint. -// -// Panics if the object is the wrong type. -func (v *Value) EachUint(callback func(int, uint) bool) *Value { - - for index, val := range v.MustUintSlice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereUint uses the specified decider function to select items -// from the []uint. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereUint(decider func(int, uint) bool) *Value { - - var selected []uint - - v.EachUint(func(index int, val uint) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupUint uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]uint. -func (v *Value) GroupUint(grouper func(int, uint) string) *Value { - - groups := make(map[string][]uint) - - v.EachUint(func(index int, val uint) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]uint, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceUint uses the specified function to replace each uints -// by iterating each item. The data in the returned result will be a -// []uint containing the replaced items. -func (v *Value) ReplaceUint(replacer func(int, uint) uint) *Value { - - arr := v.MustUintSlice() - replaced := make([]uint, len(arr)) - - v.EachUint(func(index int, val uint) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectUint uses the specified collector function to collect a value -// for each of the uints in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectUint(collector func(int, uint) interface{}) *Value { - - arr := v.MustUintSlice() - collected := make([]interface{}, len(arr)) - - v.EachUint(func(index int, val uint) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Uint8 (uint8 and []uint8) - -------------------------------------------------- -*/ - -// Uint8 gets the value as a uint8, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Uint8(optionalDefault ...uint8) uint8 { - if s, ok := v.data.(uint8); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustUint8 gets the value as a uint8. -// -// Panics if the object is not a uint8. -func (v *Value) MustUint8() uint8 { - return v.data.(uint8) -} - -// Uint8Slice gets the value as a []uint8, returns the optionalDefault -// value or nil if the value is not a []uint8. -func (v *Value) Uint8Slice(optionalDefault ...[]uint8) []uint8 { - if s, ok := v.data.([]uint8); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustUint8Slice gets the value as a []uint8. -// -// Panics if the object is not a []uint8. -func (v *Value) MustUint8Slice() []uint8 { - return v.data.([]uint8) -} - -// IsUint8 gets whether the object contained is a uint8 or not. -func (v *Value) IsUint8() bool { - _, ok := v.data.(uint8) - return ok -} - -// IsUint8Slice gets whether the object contained is a []uint8 or not. -func (v *Value) IsUint8Slice() bool { - _, ok := v.data.([]uint8) - return ok -} - -// EachUint8 calls the specified callback for each object -// in the []uint8. -// -// Panics if the object is the wrong type. -func (v *Value) EachUint8(callback func(int, uint8) bool) *Value { - - for index, val := range v.MustUint8Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereUint8 uses the specified decider function to select items -// from the []uint8. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereUint8(decider func(int, uint8) bool) *Value { - - var selected []uint8 - - v.EachUint8(func(index int, val uint8) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupUint8 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]uint8. -func (v *Value) GroupUint8(grouper func(int, uint8) string) *Value { - - groups := make(map[string][]uint8) - - v.EachUint8(func(index int, val uint8) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]uint8, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceUint8 uses the specified function to replace each uint8s -// by iterating each item. The data in the returned result will be a -// []uint8 containing the replaced items. -func (v *Value) ReplaceUint8(replacer func(int, uint8) uint8) *Value { - - arr := v.MustUint8Slice() - replaced := make([]uint8, len(arr)) - - v.EachUint8(func(index int, val uint8) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectUint8 uses the specified collector function to collect a value -// for each of the uint8s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectUint8(collector func(int, uint8) interface{}) *Value { - - arr := v.MustUint8Slice() - collected := make([]interface{}, len(arr)) - - v.EachUint8(func(index int, val uint8) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Uint16 (uint16 and []uint16) - -------------------------------------------------- -*/ - -// Uint16 gets the value as a uint16, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Uint16(optionalDefault ...uint16) uint16 { - if s, ok := v.data.(uint16); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustUint16 gets the value as a uint16. -// -// Panics if the object is not a uint16. -func (v *Value) MustUint16() uint16 { - return v.data.(uint16) -} - -// Uint16Slice gets the value as a []uint16, returns the optionalDefault -// value or nil if the value is not a []uint16. -func (v *Value) Uint16Slice(optionalDefault ...[]uint16) []uint16 { - if s, ok := v.data.([]uint16); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustUint16Slice gets the value as a []uint16. -// -// Panics if the object is not a []uint16. -func (v *Value) MustUint16Slice() []uint16 { - return v.data.([]uint16) -} - -// IsUint16 gets whether the object contained is a uint16 or not. -func (v *Value) IsUint16() bool { - _, ok := v.data.(uint16) - return ok -} - -// IsUint16Slice gets whether the object contained is a []uint16 or not. -func (v *Value) IsUint16Slice() bool { - _, ok := v.data.([]uint16) - return ok -} - -// EachUint16 calls the specified callback for each object -// in the []uint16. -// -// Panics if the object is the wrong type. -func (v *Value) EachUint16(callback func(int, uint16) bool) *Value { - - for index, val := range v.MustUint16Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereUint16 uses the specified decider function to select items -// from the []uint16. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereUint16(decider func(int, uint16) bool) *Value { - - var selected []uint16 - - v.EachUint16(func(index int, val uint16) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupUint16 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]uint16. -func (v *Value) GroupUint16(grouper func(int, uint16) string) *Value { - - groups := make(map[string][]uint16) - - v.EachUint16(func(index int, val uint16) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]uint16, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceUint16 uses the specified function to replace each uint16s -// by iterating each item. The data in the returned result will be a -// []uint16 containing the replaced items. -func (v *Value) ReplaceUint16(replacer func(int, uint16) uint16) *Value { - - arr := v.MustUint16Slice() - replaced := make([]uint16, len(arr)) - - v.EachUint16(func(index int, val uint16) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectUint16 uses the specified collector function to collect a value -// for each of the uint16s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectUint16(collector func(int, uint16) interface{}) *Value { - - arr := v.MustUint16Slice() - collected := make([]interface{}, len(arr)) - - v.EachUint16(func(index int, val uint16) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Uint32 (uint32 and []uint32) - -------------------------------------------------- -*/ - -// Uint32 gets the value as a uint32, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Uint32(optionalDefault ...uint32) uint32 { - if s, ok := v.data.(uint32); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustUint32 gets the value as a uint32. -// -// Panics if the object is not a uint32. -func (v *Value) MustUint32() uint32 { - return v.data.(uint32) -} - -// Uint32Slice gets the value as a []uint32, returns the optionalDefault -// value or nil if the value is not a []uint32. -func (v *Value) Uint32Slice(optionalDefault ...[]uint32) []uint32 { - if s, ok := v.data.([]uint32); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustUint32Slice gets the value as a []uint32. -// -// Panics if the object is not a []uint32. -func (v *Value) MustUint32Slice() []uint32 { - return v.data.([]uint32) -} - -// IsUint32 gets whether the object contained is a uint32 or not. -func (v *Value) IsUint32() bool { - _, ok := v.data.(uint32) - return ok -} - -// IsUint32Slice gets whether the object contained is a []uint32 or not. -func (v *Value) IsUint32Slice() bool { - _, ok := v.data.([]uint32) - return ok -} - -// EachUint32 calls the specified callback for each object -// in the []uint32. -// -// Panics if the object is the wrong type. -func (v *Value) EachUint32(callback func(int, uint32) bool) *Value { - - for index, val := range v.MustUint32Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereUint32 uses the specified decider function to select items -// from the []uint32. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereUint32(decider func(int, uint32) bool) *Value { - - var selected []uint32 - - v.EachUint32(func(index int, val uint32) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupUint32 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]uint32. -func (v *Value) GroupUint32(grouper func(int, uint32) string) *Value { - - groups := make(map[string][]uint32) - - v.EachUint32(func(index int, val uint32) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]uint32, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceUint32 uses the specified function to replace each uint32s -// by iterating each item. The data in the returned result will be a -// []uint32 containing the replaced items. -func (v *Value) ReplaceUint32(replacer func(int, uint32) uint32) *Value { - - arr := v.MustUint32Slice() - replaced := make([]uint32, len(arr)) - - v.EachUint32(func(index int, val uint32) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectUint32 uses the specified collector function to collect a value -// for each of the uint32s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectUint32(collector func(int, uint32) interface{}) *Value { - - arr := v.MustUint32Slice() - collected := make([]interface{}, len(arr)) - - v.EachUint32(func(index int, val uint32) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Uint64 (uint64 and []uint64) - -------------------------------------------------- -*/ - -// Uint64 gets the value as a uint64, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Uint64(optionalDefault ...uint64) uint64 { - if s, ok := v.data.(uint64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustUint64 gets the value as a uint64. -// -// Panics if the object is not a uint64. -func (v *Value) MustUint64() uint64 { - return v.data.(uint64) -} - -// Uint64Slice gets the value as a []uint64, returns the optionalDefault -// value or nil if the value is not a []uint64. -func (v *Value) Uint64Slice(optionalDefault ...[]uint64) []uint64 { - if s, ok := v.data.([]uint64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustUint64Slice gets the value as a []uint64. -// -// Panics if the object is not a []uint64. -func (v *Value) MustUint64Slice() []uint64 { - return v.data.([]uint64) -} - -// IsUint64 gets whether the object contained is a uint64 or not. -func (v *Value) IsUint64() bool { - _, ok := v.data.(uint64) - return ok -} - -// IsUint64Slice gets whether the object contained is a []uint64 or not. -func (v *Value) IsUint64Slice() bool { - _, ok := v.data.([]uint64) - return ok -} - -// EachUint64 calls the specified callback for each object -// in the []uint64. -// -// Panics if the object is the wrong type. -func (v *Value) EachUint64(callback func(int, uint64) bool) *Value { - - for index, val := range v.MustUint64Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereUint64 uses the specified decider function to select items -// from the []uint64. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereUint64(decider func(int, uint64) bool) *Value { - - var selected []uint64 - - v.EachUint64(func(index int, val uint64) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupUint64 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]uint64. -func (v *Value) GroupUint64(grouper func(int, uint64) string) *Value { - - groups := make(map[string][]uint64) - - v.EachUint64(func(index int, val uint64) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]uint64, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceUint64 uses the specified function to replace each uint64s -// by iterating each item. The data in the returned result will be a -// []uint64 containing the replaced items. -func (v *Value) ReplaceUint64(replacer func(int, uint64) uint64) *Value { - - arr := v.MustUint64Slice() - replaced := make([]uint64, len(arr)) - - v.EachUint64(func(index int, val uint64) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectUint64 uses the specified collector function to collect a value -// for each of the uint64s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectUint64(collector func(int, uint64) interface{}) *Value { - - arr := v.MustUint64Slice() - collected := make([]interface{}, len(arr)) - - v.EachUint64(func(index int, val uint64) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Uintptr (uintptr and []uintptr) - -------------------------------------------------- -*/ - -// Uintptr gets the value as a uintptr, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Uintptr(optionalDefault ...uintptr) uintptr { - if s, ok := v.data.(uintptr); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustUintptr gets the value as a uintptr. -// -// Panics if the object is not a uintptr. -func (v *Value) MustUintptr() uintptr { - return v.data.(uintptr) -} - -// UintptrSlice gets the value as a []uintptr, returns the optionalDefault -// value or nil if the value is not a []uintptr. -func (v *Value) UintptrSlice(optionalDefault ...[]uintptr) []uintptr { - if s, ok := v.data.([]uintptr); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustUintptrSlice gets the value as a []uintptr. -// -// Panics if the object is not a []uintptr. -func (v *Value) MustUintptrSlice() []uintptr { - return v.data.([]uintptr) -} - -// IsUintptr gets whether the object contained is a uintptr or not. -func (v *Value) IsUintptr() bool { - _, ok := v.data.(uintptr) - return ok -} - -// IsUintptrSlice gets whether the object contained is a []uintptr or not. -func (v *Value) IsUintptrSlice() bool { - _, ok := v.data.([]uintptr) - return ok -} - -// EachUintptr calls the specified callback for each object -// in the []uintptr. -// -// Panics if the object is the wrong type. -func (v *Value) EachUintptr(callback func(int, uintptr) bool) *Value { - - for index, val := range v.MustUintptrSlice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereUintptr uses the specified decider function to select items -// from the []uintptr. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereUintptr(decider func(int, uintptr) bool) *Value { - - var selected []uintptr - - v.EachUintptr(func(index int, val uintptr) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupUintptr uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]uintptr. -func (v *Value) GroupUintptr(grouper func(int, uintptr) string) *Value { - - groups := make(map[string][]uintptr) - - v.EachUintptr(func(index int, val uintptr) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]uintptr, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceUintptr uses the specified function to replace each uintptrs -// by iterating each item. The data in the returned result will be a -// []uintptr containing the replaced items. -func (v *Value) ReplaceUintptr(replacer func(int, uintptr) uintptr) *Value { - - arr := v.MustUintptrSlice() - replaced := make([]uintptr, len(arr)) - - v.EachUintptr(func(index int, val uintptr) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectUintptr uses the specified collector function to collect a value -// for each of the uintptrs in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectUintptr(collector func(int, uintptr) interface{}) *Value { - - arr := v.MustUintptrSlice() - collected := make([]interface{}, len(arr)) - - v.EachUintptr(func(index int, val uintptr) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Float32 (float32 and []float32) - -------------------------------------------------- -*/ - -// Float32 gets the value as a float32, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Float32(optionalDefault ...float32) float32 { - if s, ok := v.data.(float32); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustFloat32 gets the value as a float32. -// -// Panics if the object is not a float32. -func (v *Value) MustFloat32() float32 { - return v.data.(float32) -} - -// Float32Slice gets the value as a []float32, returns the optionalDefault -// value or nil if the value is not a []float32. -func (v *Value) Float32Slice(optionalDefault ...[]float32) []float32 { - if s, ok := v.data.([]float32); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustFloat32Slice gets the value as a []float32. -// -// Panics if the object is not a []float32. -func (v *Value) MustFloat32Slice() []float32 { - return v.data.([]float32) -} - -// IsFloat32 gets whether the object contained is a float32 or not. -func (v *Value) IsFloat32() bool { - _, ok := v.data.(float32) - return ok -} - -// IsFloat32Slice gets whether the object contained is a []float32 or not. -func (v *Value) IsFloat32Slice() bool { - _, ok := v.data.([]float32) - return ok -} - -// EachFloat32 calls the specified callback for each object -// in the []float32. -// -// Panics if the object is the wrong type. -func (v *Value) EachFloat32(callback func(int, float32) bool) *Value { - - for index, val := range v.MustFloat32Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereFloat32 uses the specified decider function to select items -// from the []float32. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereFloat32(decider func(int, float32) bool) *Value { - - var selected []float32 - - v.EachFloat32(func(index int, val float32) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupFloat32 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]float32. -func (v *Value) GroupFloat32(grouper func(int, float32) string) *Value { - - groups := make(map[string][]float32) - - v.EachFloat32(func(index int, val float32) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]float32, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceFloat32 uses the specified function to replace each float32s -// by iterating each item. The data in the returned result will be a -// []float32 containing the replaced items. -func (v *Value) ReplaceFloat32(replacer func(int, float32) float32) *Value { - - arr := v.MustFloat32Slice() - replaced := make([]float32, len(arr)) - - v.EachFloat32(func(index int, val float32) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectFloat32 uses the specified collector function to collect a value -// for each of the float32s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectFloat32(collector func(int, float32) interface{}) *Value { - - arr := v.MustFloat32Slice() - collected := make([]interface{}, len(arr)) - - v.EachFloat32(func(index int, val float32) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Float64 (float64 and []float64) - -------------------------------------------------- -*/ - -// Float64 gets the value as a float64, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Float64(optionalDefault ...float64) float64 { - if s, ok := v.data.(float64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustFloat64 gets the value as a float64. -// -// Panics if the object is not a float64. -func (v *Value) MustFloat64() float64 { - return v.data.(float64) -} - -// Float64Slice gets the value as a []float64, returns the optionalDefault -// value or nil if the value is not a []float64. -func (v *Value) Float64Slice(optionalDefault ...[]float64) []float64 { - if s, ok := v.data.([]float64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustFloat64Slice gets the value as a []float64. -// -// Panics if the object is not a []float64. -func (v *Value) MustFloat64Slice() []float64 { - return v.data.([]float64) -} - -// IsFloat64 gets whether the object contained is a float64 or not. -func (v *Value) IsFloat64() bool { - _, ok := v.data.(float64) - return ok -} - -// IsFloat64Slice gets whether the object contained is a []float64 or not. -func (v *Value) IsFloat64Slice() bool { - _, ok := v.data.([]float64) - return ok -} - -// EachFloat64 calls the specified callback for each object -// in the []float64. -// -// Panics if the object is the wrong type. -func (v *Value) EachFloat64(callback func(int, float64) bool) *Value { - - for index, val := range v.MustFloat64Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereFloat64 uses the specified decider function to select items -// from the []float64. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereFloat64(decider func(int, float64) bool) *Value { - - var selected []float64 - - v.EachFloat64(func(index int, val float64) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupFloat64 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]float64. -func (v *Value) GroupFloat64(grouper func(int, float64) string) *Value { - - groups := make(map[string][]float64) - - v.EachFloat64(func(index int, val float64) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]float64, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceFloat64 uses the specified function to replace each float64s -// by iterating each item. The data in the returned result will be a -// []float64 containing the replaced items. -func (v *Value) ReplaceFloat64(replacer func(int, float64) float64) *Value { - - arr := v.MustFloat64Slice() - replaced := make([]float64, len(arr)) - - v.EachFloat64(func(index int, val float64) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectFloat64 uses the specified collector function to collect a value -// for each of the float64s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectFloat64(collector func(int, float64) interface{}) *Value { - - arr := v.MustFloat64Slice() - collected := make([]interface{}, len(arr)) - - v.EachFloat64(func(index int, val float64) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Complex64 (complex64 and []complex64) - -------------------------------------------------- -*/ - -// Complex64 gets the value as a complex64, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Complex64(optionalDefault ...complex64) complex64 { - if s, ok := v.data.(complex64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustComplex64 gets the value as a complex64. -// -// Panics if the object is not a complex64. -func (v *Value) MustComplex64() complex64 { - return v.data.(complex64) -} - -// Complex64Slice gets the value as a []complex64, returns the optionalDefault -// value or nil if the value is not a []complex64. -func (v *Value) Complex64Slice(optionalDefault ...[]complex64) []complex64 { - if s, ok := v.data.([]complex64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustComplex64Slice gets the value as a []complex64. -// -// Panics if the object is not a []complex64. -func (v *Value) MustComplex64Slice() []complex64 { - return v.data.([]complex64) -} - -// IsComplex64 gets whether the object contained is a complex64 or not. -func (v *Value) IsComplex64() bool { - _, ok := v.data.(complex64) - return ok -} - -// IsComplex64Slice gets whether the object contained is a []complex64 or not. -func (v *Value) IsComplex64Slice() bool { - _, ok := v.data.([]complex64) - return ok -} - -// EachComplex64 calls the specified callback for each object -// in the []complex64. -// -// Panics if the object is the wrong type. -func (v *Value) EachComplex64(callback func(int, complex64) bool) *Value { - - for index, val := range v.MustComplex64Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereComplex64 uses the specified decider function to select items -// from the []complex64. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereComplex64(decider func(int, complex64) bool) *Value { - - var selected []complex64 - - v.EachComplex64(func(index int, val complex64) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupComplex64 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]complex64. -func (v *Value) GroupComplex64(grouper func(int, complex64) string) *Value { - - groups := make(map[string][]complex64) - - v.EachComplex64(func(index int, val complex64) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]complex64, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceComplex64 uses the specified function to replace each complex64s -// by iterating each item. The data in the returned result will be a -// []complex64 containing the replaced items. -func (v *Value) ReplaceComplex64(replacer func(int, complex64) complex64) *Value { - - arr := v.MustComplex64Slice() - replaced := make([]complex64, len(arr)) - - v.EachComplex64(func(index int, val complex64) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectComplex64 uses the specified collector function to collect a value -// for each of the complex64s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectComplex64(collector func(int, complex64) interface{}) *Value { - - arr := v.MustComplex64Slice() - collected := make([]interface{}, len(arr)) - - v.EachComplex64(func(index int, val complex64) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Complex128 (complex128 and []complex128) - -------------------------------------------------- -*/ - -// Complex128 gets the value as a complex128, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Complex128(optionalDefault ...complex128) complex128 { - if s, ok := v.data.(complex128); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustComplex128 gets the value as a complex128. -// -// Panics if the object is not a complex128. -func (v *Value) MustComplex128() complex128 { - return v.data.(complex128) -} - -// Complex128Slice gets the value as a []complex128, returns the optionalDefault -// value or nil if the value is not a []complex128. -func (v *Value) Complex128Slice(optionalDefault ...[]complex128) []complex128 { - if s, ok := v.data.([]complex128); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustComplex128Slice gets the value as a []complex128. -// -// Panics if the object is not a []complex128. -func (v *Value) MustComplex128Slice() []complex128 { - return v.data.([]complex128) -} - -// IsComplex128 gets whether the object contained is a complex128 or not. -func (v *Value) IsComplex128() bool { - _, ok := v.data.(complex128) - return ok -} - -// IsComplex128Slice gets whether the object contained is a []complex128 or not. -func (v *Value) IsComplex128Slice() bool { - _, ok := v.data.([]complex128) - return ok -} - -// EachComplex128 calls the specified callback for each object -// in the []complex128. -// -// Panics if the object is the wrong type. -func (v *Value) EachComplex128(callback func(int, complex128) bool) *Value { - - for index, val := range v.MustComplex128Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereComplex128 uses the specified decider function to select items -// from the []complex128. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereComplex128(decider func(int, complex128) bool) *Value { - - var selected []complex128 - - v.EachComplex128(func(index int, val complex128) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupComplex128 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]complex128. -func (v *Value) GroupComplex128(grouper func(int, complex128) string) *Value { - - groups := make(map[string][]complex128) - - v.EachComplex128(func(index int, val complex128) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]complex128, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceComplex128 uses the specified function to replace each complex128s -// by iterating each item. The data in the returned result will be a -// []complex128 containing the replaced items. -func (v *Value) ReplaceComplex128(replacer func(int, complex128) complex128) *Value { - - arr := v.MustComplex128Slice() - replaced := make([]complex128, len(arr)) - - v.EachComplex128(func(index int, val complex128) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectComplex128 uses the specified collector function to collect a value -// for each of the complex128s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectComplex128(collector func(int, complex128) interface{}) *Value { - - arr := v.MustComplex128Slice() - collected := make([]interface{}, len(arr)) - - v.EachComplex128(func(index int, val complex128) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/value.go b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/value.go deleted file mode 100644 index 7aaef06b..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/value.go +++ /dev/null @@ -1,13 +0,0 @@ -package objx - -// Value provides methods for extracting interface{} data in various -// types. -type Value struct { - // data contains the raw data being managed by this Value - data interface{} -} - -// Data returns the raw data contained by this Value -func (v *Value) Data() interface{} { - return v.data -} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/theckman/go-flock/LICENSE similarity index 51% rename from vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/LICENSE rename to vendor/github.com/elastic/beats/vendor/github.com/theckman/go-flock/LICENSE index 545174c1..aff7d358 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/fsouza/go-dockerclient/LICENSE +++ b/vendor/github.com/elastic/beats/vendor/github.com/theckman/go-flock/LICENSE @@ -1,18 +1,23 @@ -Copyright (c) 2013-2017, go-dockerclient authors +Copyright (c) 2015, Tim Heckman All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, -this list of conditions and the following disclaimer in the documentation -and/or other materials provided with the distribution. +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of linode-netint nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR diff --git a/vendor/github.com/elastic/beats/vendor/github.com/theckman/go-flock/README.md b/vendor/github.com/elastic/beats/vendor/github.com/theckman/go-flock/README.md new file mode 100644 index 00000000..38c794c8 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/theckman/go-flock/README.md @@ -0,0 +1,40 @@ +# go-flock +[![TravisCI Build Status](https://img.shields.io/travis/theckman/go-flock/master.svg?style=flat)](https://travis-ci.org/theckman/go-flock) +[![GoDoc](https://img.shields.io/badge/godoc-go--flock-blue.svg?style=flat)](https://godoc.org/github.com/theckman/go-flock) +[![License](https://img.shields.io/badge/license-BSD_3--Clause-brightgreen.svg?style=flat)](https://github.com/theckman/go-flock/blob/master/LICENSE) + +`flock` implements a thread-safe sync.Locker interface for file locking. It also +includes a non-blocking TryLock() function to allow locking without blocking execution. + +## License +`flock` is released under the BSD 3-Clause License. See the `LICENSE` file for more details. + +## Go Compatibility +This package makes use of the `context` package that was introduced in Go 1.7. As such, this +package has an implicit dependency on Go 1.7+. + +## Installation +``` +go get -u github.com/theckman/go-flock +``` + +## Usage +```Go +import "github.com/theckman/go-flock" + +fileLock := flock.NewFlock("/var/lock/go-lock.lock") + +locked, err := fileLock.TryLock() + +if err != nil { + // handle locking error +} + +if locked { + // do work + fileLock.Unlock() +} +``` + +For more detailed usage information take a look at the package API docs on +[GoDoc](https://godoc.org/github.com/theckman/go-flock). diff --git a/vendor/github.com/elastic/beats/vendor/github.com/theckman/go-flock/appveyor.yml b/vendor/github.com/elastic/beats/vendor/github.com/theckman/go-flock/appveyor.yml new file mode 100644 index 00000000..2b2d603f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/theckman/go-flock/appveyor.yml @@ -0,0 +1,25 @@ +version: '{build}' + +build: false +deploy: false + +clone_folder: 'c:\gopath\src\github.com\theckman\go-flock' + +environment: + GOPATH: 'c:\gopath' + GOVERSION: '1.9.2' + +init: + - git config --global core.autocrlf input + +install: + - rmdir c:\go /s /q + - appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-amd64.msi + - msiexec /i go%GOVERSION%.windows-amd64.msi /q + - set Path=c:\go\bin;c:\gopath\bin;%Path% + - go version + - go env + +test_script: + - go get -t ./... + - go test -v ./... diff --git a/vendor/github.com/elastic/beats/vendor/github.com/theckman/go-flock/flock.go b/vendor/github.com/elastic/beats/vendor/github.com/theckman/go-flock/flock.go new file mode 100644 index 00000000..867c765f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/theckman/go-flock/flock.go @@ -0,0 +1,107 @@ +// Copyright 2015 Tim Heckman. All rights reserved. +// Use of this source code is governed by the BSD 3-Clause +// license that can be found in the LICENSE file. + +// Package flock implements a thread-safe sync.Locker interface for file locking. +// It also includes a non-blocking TryLock() function to allow locking +// without blocking execution. +// +// Package flock is released under the BSD 3-Clause License. See the LICENSE file +// for more details. +// +// While using this library, remember that the locking behaviors are not +// guaranteed to be the same on each platform. For example, some UNIX-like +// operating systems will transparently convert a shared lock to an exclusive +// lock. If you Unlock() the flock from a location where you believe that you +// have the shared lock, you may accidently drop the exclusive lock. +package flock + +import ( + "context" + "os" + "sync" + "time" +) + +// Flock is the struct type to handle file locking. All fields are unexported, +// with access to some of the fields provided by getter methods (Path() and Locked()). +type Flock struct { + path string + m sync.RWMutex + fh *os.File + l bool + r bool +} + +// NewFlock is a function to return a new instance of *Flock. The only parameter +// it takes is the path to the desired lockfile. +func NewFlock(path string) *Flock { + return &Flock{path: path} +} + +// Path is a function to return the path as provided in NewFlock(). +func (f *Flock) Path() string { + return f.path +} + +// Locked is a function to return the current lock state (locked: true, unlocked: false). +func (f *Flock) Locked() bool { + f.m.RLock() + defer f.m.RUnlock() + return f.l +} + +// RLocked is a function to return the current read lock state (locked: true, unlocked: false). +func (f *Flock) RLocked() bool { + f.m.RLock() + defer f.m.RUnlock() + return f.r +} + +func (f *Flock) String() string { + return f.path +} + +// TryLockContext repeatedly tries to take an exclusive lock until one of the +// conditions is met: TryLock succeeds, TryLock fails with error, or Context +// Done channel is closed. +func (f *Flock) TryLockContext(ctx context.Context, retryDelay time.Duration) (bool, error) { + return tryCtx(f.TryLock, ctx, retryDelay) +} + +// TryRLockContext repeatedly tries to take a shared lock until one of the +// conditions is met: TryRLock succeeds, TryRLock fails with error, or Context +// Done channel is closed. +func (f *Flock) TryRLockContext(ctx context.Context, retryDelay time.Duration) (bool, error) { + return tryCtx(f.TryRLock, ctx, retryDelay) +} + +func tryCtx(fn func() (bool, error), ctx context.Context, retryDelay time.Duration) (bool, error) { + if ctx.Err() != nil { + return false, ctx.Err() + } + for { + if ok, err := fn(); ok || err != nil { + return ok, err + } + select { + case <-ctx.Done(): + return false, ctx.Err() + case <-time.After(retryDelay): + // try again + } + } +} + +func (f *Flock) setFh() error { + // open a new os.File instance + // create it if it doesn't exist, and open the file read-only. + fh, err := os.OpenFile(f.path, os.O_CREATE|os.O_RDONLY, os.FileMode(0600)) + if err != nil { + return err + } + + // set the filehandle on the struct + f.fh = fh + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/theckman/go-flock/flock_unix.go b/vendor/github.com/elastic/beats/vendor/github.com/theckman/go-flock/flock_unix.go new file mode 100644 index 00000000..a9ae0a89 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/theckman/go-flock/flock_unix.go @@ -0,0 +1,146 @@ +// Copyright 2015 Tim Heckman. All rights reserved. +// Use of this source code is governed by the BSD 3-Clause +// license that can be found in the LICENSE file. + +// +build !windows + +package flock + +import ( + "syscall" +) + +// Lock is a blocking call to try and take an exclusive file lock. It will wait +// until it is able to obtain the exclusive file lock. It's recommended that +// TryLock() be used over this function. This function may block the ability to +// query the current Locked() or RLocked() status due to a RW-mutex lock. +// +// If we are already exclusive-locked, this function short-circuits and returns +// immediately assuming it can take the mutex lock. +// +// If the *Flock has a shared lock (RLock), this may transparently replace the +// shared lock with an exclusive lock on some UNIX-like operating systems. Be +// careful when using exclusive locks in conjunction with shared locks +// (RLock()), because calling Unlock() may accidentally release the exclusive +// lock that was once a shared lock. +func (f *Flock) Lock() error { + return f.lock(&f.l, syscall.LOCK_EX) +} + +// RLock is a blocking call to try and take a ahred file lock. It will wait +// until it is able to obtain the shared file lock. It's recommended that +// TryRLock() be used over this function. This function may block the ability to +// query the current Locked() or RLocked() status due to a RW-mutex lock. +// +// If we are already shared-locked, this function short-circuits and returns +// immediately assuming it can take the mutex lock. +func (f *Flock) RLock() error { + return f.lock(&f.r, syscall.LOCK_SH) +} + +func (f *Flock) lock(locked *bool, flag int) error { + f.m.Lock() + defer f.m.Unlock() + + if *locked { + return nil + } + + if f.fh == nil { + if err := f.setFh(); err != nil { + return err + } + } + + if err := syscall.Flock(int(f.fh.Fd()), flag); err != nil { + return err + } + + *locked = true + return nil +} + +// Unlock is a function to unlock the file. This file takes a RW-mutex lock, so +// while it is running the Locked() and RLocked() functions will be blocked. +// +// This function short-circuits if we are unlocked already. If not, it calls +// syscall.LOCK_UN on the file and closes the file descriptor. It does not +// remove the file from disk. It's up to your application to do. +// +// Please note, if your shared lock became an exclusive lock this may +// unintentionally drop the exclusive lock if called by the consumer that +// believes they have a shared lock. Please see Lock() for more details. +func (f *Flock) Unlock() error { + f.m.Lock() + defer f.m.Unlock() + + // if we aren't locked or if the lockfile instance is nil + // just return a nil error because we are unlocked + if (!f.l && !f.r) || f.fh == nil { + return nil + } + + // mark the file as unlocked + if err := syscall.Flock(int(f.fh.Fd()), syscall.LOCK_UN); err != nil { + return err + } + + f.fh.Close() + + f.l = false + f.r = false + f.fh = nil + + return nil +} + +// TryLock is the preferred function for taking an exclusive file lock. This +// function takes an RW-mutex lock before it tries to lock the file, so there is +// the possibility that this function may block for a short time if another +// goroutine is trying to take any action. +// +// The actual file lock is non-blocking. If we are unable to get the exclusive +// file lock, the function will return false instead of waiting for the lock. If +// we get the lock, we also set the *Flock instance as being exclusive-locked. +func (f *Flock) TryLock() (bool, error) { + return f.try(&f.l, syscall.LOCK_EX) +} + +// TryRLock is the preferred function for taking a shared file lock. This +// function takes an RW-mutex lock before it tries to lock the file, so there is +// the possibility that this function may block for a short time if another +// goroutine is trying to take any action. +// +// The actual file lock is non-blocking. If we are unable to get the shared file +// lock, the function will return false instead of waiting for the lock. If we +// get the lock, we also set the *Flock instance as being share-locked. +func (f *Flock) TryRLock() (bool, error) { + return f.try(&f.r, syscall.LOCK_SH) +} + +func (f *Flock) try(locked *bool, flag int) (bool, error) { + f.m.Lock() + defer f.m.Unlock() + + if *locked { + return true, nil + } + + if f.fh == nil { + if err := f.setFh(); err != nil { + return false, err + } + } + + err := syscall.Flock(int(f.fh.Fd()), flag|syscall.LOCK_NB) + + switch err { + case syscall.EWOULDBLOCK: + return false, nil + case nil: + *locked = true + return true, nil + } + + return false, err +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/theckman/go-flock/flock_winapi.go b/vendor/github.com/elastic/beats/vendor/github.com/theckman/go-flock/flock_winapi.go new file mode 100644 index 00000000..fe405a25 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/theckman/go-flock/flock_winapi.go @@ -0,0 +1,76 @@ +// Copyright 2015 Tim Heckman. All rights reserved. +// Use of this source code is governed by the BSD 3-Clause +// license that can be found in the LICENSE file. + +// +build windows + +package flock + +import ( + "syscall" + "unsafe" +) + +var ( + kernel32, _ = syscall.LoadLibrary("kernel32.dll") + procLockFileEx, _ = syscall.GetProcAddress(kernel32, "LockFileEx") + procUnlockFileEx, _ = syscall.GetProcAddress(kernel32, "UnlockFileEx") +) + +const ( + winLockfileFailImmediately = 0x00000001 + winLockfileExclusiveLock = 0x00000002 + winLockfileSharedLock = 0x00000000 +) + +// Use of 0x00000000 for the shared lock is a guess based on some the MS Windows +// `LockFileEX` docs, which document the `LOCKFILE_EXCLUSIVE_LOCK` flag as: +// +// > The function requests an exclusive lock. Otherwise, it requests a shared +// > lock. +// +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx + +func lockFileEx(handle syscall.Handle, flags uint32, reserved uint32, numberOfBytesToLockLow uint32, numberOfBytesToLockHigh uint32, offset *syscall.Overlapped) (bool, syscall.Errno) { + r1, _, errNo := syscall.Syscall6( + uintptr(procLockFileEx), + 6, + uintptr(handle), + uintptr(flags), + uintptr(reserved), + uintptr(numberOfBytesToLockLow), + uintptr(numberOfBytesToLockHigh), + uintptr(unsafe.Pointer(offset))) + + if r1 != 1 { + if errNo == 0 { + return false, syscall.EINVAL + } + + return false, errNo + } + + return true, 0 +} + +func unlockFileEx(handle syscall.Handle, reserved uint32, numberOfBytesToLockLow uint32, numberOfBytesToLockHigh uint32, offset *syscall.Overlapped) (bool, syscall.Errno) { + r1, _, errNo := syscall.Syscall6( + uintptr(procUnlockFileEx), + 5, + uintptr(handle), + uintptr(reserved), + uintptr(numberOfBytesToLockLow), + uintptr(numberOfBytesToLockHigh), + uintptr(unsafe.Pointer(offset)), + 0) + + if r1 != 1 { + if errNo == 0 { + return false, syscall.EINVAL + } + + return false, errNo + } + + return true, 0 +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/theckman/go-flock/flock_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/theckman/go-flock/flock_windows.go new file mode 100644 index 00000000..a0103f6d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/theckman/go-flock/flock_windows.go @@ -0,0 +1,140 @@ +// Copyright 2015 Tim Heckman. All rights reserved. +// Use of this source code is governed by the BSD 3-Clause +// license that can be found in the LICENSE file. + +package flock + +import ( + "syscall" +) + +// ErrorLockViolation is the error code returned from the Windows syscall when a +// lock would block and you ask to fail immediately. +const ErrorLockViolation syscall.Errno = 0x21 // 33 + +// Lock is a blocking call to try and take an exclusive file lock. It will wait +// until it is able to obtain the exclusive file lock. It's recommended that +// TryLock() be used over this function. This function may block the ability to +// query the current Locked() or RLocked() status due to a RW-mutex lock. +// +// If we are already locked, this function short-circuits and returns +// immediately assuming it can take the mutex lock. +func (f *Flock) Lock() error { + return f.lock(&f.l, winLockfileExclusiveLock) +} + +// RLock is a blocking call to try and take a sahred file lock. It will wait +// until it is able to obtain the shared file lock. It's recommended that +// TryRLock() be used over this function. This function may block the ability to +// query the current Locked() or RLocked() status due to a RW-mutex lock. +// +// If we are already locked, this function short-circuits and returns +// immediately assuming it can take the mutex lock. +func (f *Flock) RLock() error { + return f.lock(&f.r, winLockfileSharedLock) +} + +func (f *Flock) lock(locked *bool, flag uint32) error { + f.m.Lock() + defer f.m.Unlock() + + if *locked { + return nil + } + + if f.fh == nil { + if err := f.setFh(); err != nil { + return err + } + } + + if _, errNo := lockFileEx(syscall.Handle(f.fh.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}); errNo > 0 { + return errNo + } + + *locked = true + return nil +} + +// Unlock is a function to unlock the file. This file takes a RW-mutex lock, so +// while it is running the Locked() and RLocked() functions will be blocked. +// +// This function short-circuits if we are unlocked already. If not, it calls +// UnlockFileEx() on the file and closes the file descriptor. It does not remove +// the file from disk. It's up to your application to do. +func (f *Flock) Unlock() error { + f.m.Lock() + defer f.m.Unlock() + + // if we aren't locked or if the lockfile instance is nil + // just return a nil error because we are unlocked + if (!f.l && !f.r) || f.fh == nil { + return nil + } + + // mark the file as unlocked + if _, errNo := unlockFileEx(syscall.Handle(f.fh.Fd()), 0, 1, 0, &syscall.Overlapped{}); errNo > 0 { + return errNo + } + + f.fh.Close() + + f.l = false + f.r = false + f.fh = nil + + return nil +} + +// TryLock is the preferred function for taking an exlusive file lock. This +// function does take a RW-mutex lock before it tries to lock the file, so there +// is the possibility that this function may block for a short time if another +// goroutine is trying to take any action. +// +// The actual file lock is non-blocking. If we are unable to get the exclusive +// file lock, the function will return false instead of waiting for the lock. If +// we get the lock, we also set the *Flock instance as being exclusive-locked. +func (f *Flock) TryLock() (bool, error) { + return f.try(&f.l, winLockfileExclusiveLock) +} + +// TryRLock is the preferred function for taking a shared file lock. This +// function does take a RW-mutex lock before it tries to lock the file, so there +// is the possibility that this function may block for a short time if another +// goroutine is trying to take any action. +// +// The actual file lock is non-blocking. If we are unable to get the shared file +// lock, the function will return false instead of waiting for the lock. If we +// get the lock, we also set the *Flock instance as being shared-locked. +func (f *Flock) TryRLock() (bool, error) { + return f.try(&f.r, winLockfileSharedLock) +} + +func (f *Flock) try(locked *bool, flag uint32) (bool, error) { + f.m.Lock() + defer f.m.Unlock() + + if *locked { + return true, nil + } + + if f.fh == nil { + if err := f.setFh(); err != nil { + return false, err + } + } + + _, errNo := lockFileEx(syscall.Handle(f.fh.Fd()), flag|winLockfileFailImmediately, 0, 1, 0, &syscall.Overlapped{}) + + if errNo > 0 { + if errNo == ErrorLockViolation || errNo == syscall.ERROR_IO_PENDING { + return false, nil + } + + return false, errNo + } + + *locked = true + + return true, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/tsg/gopacket/afpacket/afpacket.go b/vendor/github.com/elastic/beats/vendor/github.com/tsg/gopacket/afpacket/afpacket.go index ed0c5ce7..fe622918 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/tsg/gopacket/afpacket/afpacket.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/tsg/gopacket/afpacket/afpacket.go @@ -16,14 +16,16 @@ package afpacket import ( "errors" "fmt" - "github.com/tsg/gopacket" - "github.com/tsg/gopacket/layers" - "github.com/tsg/gopacket/pcap" "net" "runtime" "sync" + "syscall" "time" "unsafe" + + "github.com/tsg/gopacket" + "github.com/tsg/gopacket/layers" + "github.com/tsg/gopacket/pcap" ) /* @@ -310,7 +312,13 @@ func (h *TPacket) pollForFirstPacket(hdr header) error { h.pollset.events = C.POLLIN h.pollset.revents = 0 timeout := C.int(h.opts.timeout / time.Millisecond) - _, err := C.poll(&h.pollset, 1, timeout) + n, err := C.poll(&h.pollset, 1, timeout) + if n == 0 { + /* propagate timeout when no packets are available + otherwise it will loop forever until a packet + is received. */ + return syscall.EINTR + } h.stats.Polls++ if err != nil { return err diff --git a/vendor/github.com/elastic/beats/vendor/github.com/tsg/gopacket/layers/lldp.go b/vendor/github.com/elastic/beats/vendor/github.com/tsg/gopacket/layers/lldp.go index c77bd981..613d3820 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/tsg/gopacket/layers/lldp.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/tsg/gopacket/layers/lldp.go @@ -9,6 +9,7 @@ package layers import ( "encoding/binary" "fmt" + "github.com/tsg/gopacket" ) @@ -737,7 +738,7 @@ func decodeLinkLayerDiscovery(data []byte, p gopacket.PacketBuilder) error { for len(vData) > 0 { nbit := vData[0] & 0x01 t := LLDPTLVType(vData[0] >> 1) - val := LinkLayerDiscoveryValue{Type: t, Length: uint16(nbit<<8 + vData[1])} + val := LinkLayerDiscoveryValue{Type: t, Length: uint16(nbit)<<8 + uint16(vData[1])} if val.Length > 0 { val.Value = vData[2 : val.Length+2] } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/tsg/gopacket/pcap/pcap.go b/vendor/github.com/elastic/beats/vendor/github.com/tsg/gopacket/pcap/pcap.go index f5612e6f..c869bcc1 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/tsg/gopacket/pcap/pcap.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/tsg/gopacket/pcap/pcap.go @@ -131,6 +131,10 @@ type Handle struct { // huge memory hit, so to handle that we store them here instead. pkthdr *C.struct_pcap_pkthdr buf_ptr *C.u_char + // This is required to poll the pcap handle for incoming packets, due to + // newer Linux kernels supporting TPACKET_V3 not starting the timeout until + // the first packet is received. + packetPoller *packetPoll } // Stats contains statistics on how many packets were handled by a pcap handle, @@ -206,10 +210,14 @@ func OpenLive(device string, snaplen int32, promisc bool, timeout time.Duration) dev := C.CString(device) defer C.free(unsafe.Pointer(dev)) - p.cptr = C.pcap_open_live(dev, C.int(snaplen), pro, timeoutMillis(timeout), buf) + timeoutMs := timeoutMillis(timeout) + p.cptr = C.pcap_open_live(dev, C.int(snaplen), pro, timeoutMs, buf) if p.cptr == nil { return nil, errors.New(C.GoString(buf)) } + if !p.blockForever { + p.packetPoller = NewPacketPoll(p.cptr, timeoutMs) + } return p, nil } @@ -316,6 +324,9 @@ func (a activateError) Error() string { func (p *Handle) getNextBufPtrLocked(ci *gopacket.CaptureInfo) error { var result NextError for { + if !p.packetPoller.AwaitForPackets() { + return NextErrorTimeoutExpired + } result = NextError(C.pcap_next_ex(p.cptr, &p.pkthdr, &p.buf_ptr)) if p.blockForever && result == NextErrorTimeoutExpired { continue @@ -544,6 +555,11 @@ func findalladdresses(addresses *_Ctype_struct_pcap_addr) (retval []InterfaceAdd for curaddr := addresses; curaddr != nil; curaddr = (*_Ctype_struct_pcap_addr)(curaddr.next) { var a InterfaceAddress var err error + // In case of a tun device on Linux the link layer has no curaddr.addr. + // Do not crash trying to check the family type. + if curaddr.addr == nil { + continue + } if a.IP, err = sockaddr_to_IP((*syscall.RawSockaddr)(unsafe.Pointer(curaddr.addr))); err != nil { continue } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/tsg/gopacket/pcap/pcap_poll_common.go b/vendor/github.com/elastic/beats/vendor/github.com/tsg/gopacket/pcap/pcap_poll_common.go new file mode 100644 index 00000000..9d96f485 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/tsg/gopacket/pcap/pcap_poll_common.go @@ -0,0 +1,18 @@ +// +build !linux + +package pcap + +/* +#include +*/ +import "C" + +type packetPoll struct{} + +func NewPacketPoll(_ *C.pcap_t, _ C.int) *packetPoll { + return nil +} + +func (t *packetPoll) AwaitForPackets() bool { + return true +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/tsg/gopacket/pcap/pcap_poll_linux.go b/vendor/github.com/elastic/beats/vendor/github.com/tsg/gopacket/pcap/pcap_poll_linux.go new file mode 100644 index 00000000..30ec8de7 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/tsg/gopacket/pcap/pcap_poll_linux.go @@ -0,0 +1,52 @@ +// +build linux + +package pcap + +/* +#include +#include +#include +*/ +import "C" +import "syscall" + +// packetPoll holds all the parameters required to use poll(2) on the pcap +// file descriptor. +type packetPoll struct { + pollfd C.struct_pollfd + timeout C.int +} + +func captureIsTPacketV3(fildes int) bool { + version, err := syscall.GetsockoptInt(fildes, syscall.SOL_PACKET, C.PACKET_VERSION) + return err == nil && version == C.TPACKET_V3 +} + +// NewPacketPoll returns a new packetPoller if the pcap handle requires it +// in order to timeout effectively when no packets are received. This is only +// necessary when TPACKET_V3 interface is used to receive packets. +func NewPacketPoll(ptr *C.pcap_t, timeout C.int) *packetPoll { + fildes := C.pcap_fileno(ptr) + if !captureIsTPacketV3(int(fildes)) { + return nil + } + return &packetPoll{ + pollfd: C.struct_pollfd{ + fd: fildes, + events: C.POLLIN, + revents: 0, + }, + timeout: timeout, + } +} + +func (t *packetPoll) AwaitForPackets() bool { + if t != nil { + t.pollfd.revents = 0 + // block until the capture file descriptor is readable or a timeout + // happens. + n, err := C.poll(&t.pollfd, 1, t.timeout) + return err != nil || n != 0 + } + return true +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/0gen.go b/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/0gen.go new file mode 100644 index 00000000..da238013 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/0gen.go @@ -0,0 +1,4 @@ +package bin + +//go:generate mktmpl -f -o bin.generated.go bin.yml +//go:generate mktmpl -f -o bin.generated_test.go bin_test.yml diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/README.md b/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/README.md new file mode 100644 index 00000000..45ba5c35 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/README.md @@ -0,0 +1,2 @@ +# go-bin +Support for encoding/decoding buffers using casts into go structures diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/bin.generated.go b/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/bin.generated.go new file mode 100644 index 00000000..d0cc2854 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/bin.generated.go @@ -0,0 +1,260 @@ +// This file has been generated from 'bin.yml', do not edit +package bin + +import "encoding/binary" + +// I8be wraps a byte array into a big endian encoded 8bit signed integer. +type I8be [1]byte + +// Len returns the number of bytes required to store the value. +func (b *I8be) Len() int { return 1 } + +// Get returns the decoded value. +func (b *I8be) Get() int8 { + return int8(b[0]) +} + +// Set encodes a new value into the backing buffer: +func (b *I8be) Set(v int8) { + b[0] = byte(v) +} + +// I16be wraps a byte array into a big endian encoded 16bit signed integer. +type I16be [2]byte + +// Len returns the number of bytes required to store the value. +func (b *I16be) Len() int { return 2 } + +// Get returns the decoded value. +func (b *I16be) Get() int16 { + return int16(binary.BigEndian.Uint16(b[:])) +} + +// Set encodes a new value into the backing buffer: +func (b *I16be) Set(v int16) { + binary.BigEndian.PutUint16(b[:], uint16(v)) +} + +// I32be wraps a byte array into a big endian encoded 32bit signed integer. +type I32be [4]byte + +// Len returns the number of bytes required to store the value. +func (b *I32be) Len() int { return 4 } + +// Get returns the decoded value. +func (b *I32be) Get() int32 { + return int32(binary.BigEndian.Uint32(b[:])) +} + +// Set encodes a new value into the backing buffer: +func (b *I32be) Set(v int32) { + binary.BigEndian.PutUint32(b[:], uint32(v)) +} + +// I64be wraps a byte array into a big endian encoded 64bit signed integer. +type I64be [8]byte + +// Len returns the number of bytes required to store the value. +func (b *I64be) Len() int { return 8 } + +// Get returns the decoded value. +func (b *I64be) Get() int64 { + return int64(binary.BigEndian.Uint64(b[:])) +} + +// Set encodes a new value into the backing buffer: +func (b *I64be) Set(v int64) { + binary.BigEndian.PutUint64(b[:], uint64(v)) +} + +// U8be wraps a byte array into a big endian encoded 8bit unsigned integer. +type U8be [1]byte + +// Len returns the number of bytes required to store the value. +func (b *U8be) Len() int { return 1 } + +// Get returns the decoded value. +func (b *U8be) Get() uint8 { + return uint8(b[0]) +} + +// Set encodes a new value into the backing buffer: +func (b *U8be) Set(v uint8) { + b[0] = byte(v) +} + +// U16be wraps a byte array into a big endian encoded 16bit unsigned integer. +type U16be [2]byte + +// Len returns the number of bytes required to store the value. +func (b *U16be) Len() int { return 2 } + +// Get returns the decoded value. +func (b *U16be) Get() uint16 { + return uint16(binary.BigEndian.Uint16(b[:])) +} + +// Set encodes a new value into the backing buffer: +func (b *U16be) Set(v uint16) { + binary.BigEndian.PutUint16(b[:], uint16(v)) +} + +// U32be wraps a byte array into a big endian encoded 32bit unsigned integer. +type U32be [4]byte + +// Len returns the number of bytes required to store the value. +func (b *U32be) Len() int { return 4 } + +// Get returns the decoded value. +func (b *U32be) Get() uint32 { + return uint32(binary.BigEndian.Uint32(b[:])) +} + +// Set encodes a new value into the backing buffer: +func (b *U32be) Set(v uint32) { + binary.BigEndian.PutUint32(b[:], uint32(v)) +} + +// U64be wraps a byte array into a big endian encoded 64bit unsigned integer. +type U64be [8]byte + +// Len returns the number of bytes required to store the value. +func (b *U64be) Len() int { return 8 } + +// Get returns the decoded value. +func (b *U64be) Get() uint64 { + return uint64(binary.BigEndian.Uint64(b[:])) +} + +// Set encodes a new value into the backing buffer: +func (b *U64be) Set(v uint64) { + binary.BigEndian.PutUint64(b[:], uint64(v)) +} + +// I8le wraps a byte array into a little endian encoded 8bit signed integer. +type I8le [1]byte + +// Len returns the number of bytes required to store the value. +func (b *I8le) Len() int { return 1 } + +// Get returns the decoded value. +func (b *I8le) Get() int8 { + return int8(b[0]) +} + +// Set encodes a new value into the backing buffer: +func (b *I8le) Set(v int8) { + b[0] = byte(v) +} + +// I16le wraps a byte array into a little endian encoded 16bit signed integer. +type I16le [2]byte + +// Len returns the number of bytes required to store the value. +func (b *I16le) Len() int { return 2 } + +// Get returns the decoded value. +func (b *I16le) Get() int16 { + return int16(binary.LittleEndian.Uint16(b[:])) +} + +// Set encodes a new value into the backing buffer: +func (b *I16le) Set(v int16) { + binary.LittleEndian.PutUint16(b[:], uint16(v)) +} + +// I32le wraps a byte array into a little endian encoded 32bit signed integer. +type I32le [4]byte + +// Len returns the number of bytes required to store the value. +func (b *I32le) Len() int { return 4 } + +// Get returns the decoded value. +func (b *I32le) Get() int32 { + return int32(binary.LittleEndian.Uint32(b[:])) +} + +// Set encodes a new value into the backing buffer: +func (b *I32le) Set(v int32) { + binary.LittleEndian.PutUint32(b[:], uint32(v)) +} + +// I64le wraps a byte array into a little endian encoded 64bit signed integer. +type I64le [8]byte + +// Len returns the number of bytes required to store the value. +func (b *I64le) Len() int { return 8 } + +// Get returns the decoded value. +func (b *I64le) Get() int64 { + return int64(binary.LittleEndian.Uint64(b[:])) +} + +// Set encodes a new value into the backing buffer: +func (b *I64le) Set(v int64) { + binary.LittleEndian.PutUint64(b[:], uint64(v)) +} + +// U8le wraps a byte array into a little endian encoded 8bit unsigned integer. +type U8le [1]byte + +// Len returns the number of bytes required to store the value. +func (b *U8le) Len() int { return 1 } + +// Get returns the decoded value. +func (b *U8le) Get() uint8 { + return uint8(b[0]) +} + +// Set encodes a new value into the backing buffer: +func (b *U8le) Set(v uint8) { + b[0] = byte(v) +} + +// U16le wraps a byte array into a little endian encoded 16bit unsigned integer. +type U16le [2]byte + +// Len returns the number of bytes required to store the value. +func (b *U16le) Len() int { return 2 } + +// Get returns the decoded value. +func (b *U16le) Get() uint16 { + return uint16(binary.LittleEndian.Uint16(b[:])) +} + +// Set encodes a new value into the backing buffer: +func (b *U16le) Set(v uint16) { + binary.LittleEndian.PutUint16(b[:], uint16(v)) +} + +// U32le wraps a byte array into a little endian encoded 32bit unsigned integer. +type U32le [4]byte + +// Len returns the number of bytes required to store the value. +func (b *U32le) Len() int { return 4 } + +// Get returns the decoded value. +func (b *U32le) Get() uint32 { + return uint32(binary.LittleEndian.Uint32(b[:])) +} + +// Set encodes a new value into the backing buffer: +func (b *U32le) Set(v uint32) { + binary.LittleEndian.PutUint32(b[:], uint32(v)) +} + +// U64le wraps a byte array into a little endian encoded 64bit unsigned integer. +type U64le [8]byte + +// Len returns the number of bytes required to store the value. +func (b *U64le) Len() int { return 8 } + +// Get returns the decoded value. +func (b *U64le) Get() uint64 { + return uint64(binary.LittleEndian.Uint64(b[:])) +} + +// Set encodes a new value into the backing buffer: +func (b *U64le) Set(v uint64) { + binary.LittleEndian.PutUint64(b[:], uint64(v)) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/bin.yml b/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/bin.yml new file mode 100644 index 00000000..416b225d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/bin.yml @@ -0,0 +1,53 @@ +import: + - 'types.yml' + +main: | + package bin + + import "encoding/binary" + + {{ range $enc, $long := data.endianess }} + {{ range $signdness, $long := data.signdness }} + {{ range $k, $bits := data.bits }} + {{ invoke "makeBinType" "bits" $bits "name" $signdness "enc" $enc }} + {{ end }} + {{ end }} + {{ end }} + +# makeBinType(bits, name, enc) +templates.makeBinType: | + {{ $bits := .bits }} + {{ $len := div $bits 8 }} + {{ $name := .name }} + {{ $enc := .enc }} + {{ $endianess := index data.endianess $enc }} + {{ $inst := capitalize $endianess | printf "%vEndian" }} + {{ $signdness := index data.signdness $name }} + {{ $gotype := printf "%v%v" (index data.baseType $name) $bits }} + {{ $accessor := printf "Uint%v" $bits }} + + {{ $type := printf "%v%v%v" (capitalize $name) $bits $enc }} + + // {{ $type }} wraps a byte array into a {{ $endianess }} endian encoded {{ $bits }}bit {{ $signdness }} integer. + type {{ $type }} [{{ $len }}]byte + + // Len returns the number of bytes required to store the value. + func (b *{{ $type }}) Len() int { return {{ $len }} } + + // Get returns the decoded value. + func (b *{{ $type }}) Get() {{ $gotype }} { + {{- if (eq $bits 8) }} + return {{ $gotype }}(b[0]) + {{ else }} + return {{ $gotype }}(binary.{{ $inst }}.{{ $accessor }}(b[:])) + {{ end -}} + } + + // Set encodes a new value into the backing buffer: + func (b *{{ $type }}) Set(v {{ $gotype }}) { + {{- if (eq $bits 8) }} + b[0] = byte(v) + {{ else }} + binary.{{ $inst }}.Put{{ $accessor }}(b[:], uint{{ $bits }}(v)) + {{ end -}} + } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/bin_test.yml b/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/bin_test.yml new file mode 100644 index 00000000..4da772af --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/bin_test.yml @@ -0,0 +1,53 @@ +import: + - 'types.yml' + +main: | + package bin + + func TestPrimitives(t *testing.T) { + {{ range $enc, $long := data.endianess }} + {{ range $signdness, $long := data.signdness }} + {{ range $k, $bits := data.bits }} + {{ invoke "makeBinType" "bits" $bits "name" $signdness "enc" $enc }} + {{ end }} + {{ end }} + {{ end }} + } + +templates.makeBinType: | + {{ $bits := .bits }} + {{ $len := div $bits 8 }} + {{ $name := .name }} + {{ $enc := .enc }} + {{ $endianess := index data.endianess $enc }} + {{ $inst := capitalize $endianess | printf "%vEndian" }} + {{ $signdness := index data.signdness $name }} + {{ $gotype := printf "%v%v" (index data.baseType $name) $bits }} + {{ $accessor := printf "Uint%v" $bits }} + + {{ $type := printf "%v%v%v" (capitalize $name) $bits $enc }} + + t.Run("{{ $gotype }} {{ $endianess }} endian", func(t *testing.T) { + var v {{ $type }} + err := quick.Check(func(in {{ $gotype }}) bool { + v.Set(in) + + // check raw contents correct encoding + tmp := make([]byte, v.Len()) + {{ if (eq $bits 8) }} + tmp[0] = byte(in) + {{ else }} + binary.{{ $inst }}.Put{{ $accessor }}(tmp, uint{{ $bits }}(in)) + {{ end }} + if !bytes.Equal(v[:], tmp) { + t.Error("encoding mismatch") + return false + } + + // check extracted value matches original value + return v.Get() == in + }, nil) + if err != nil { + t.Error(err) + } + }) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/cast.go b/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/cast.go new file mode 100644 index 00000000..ac444ba5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/cast.go @@ -0,0 +1,53 @@ +package bin + +import ( + "errors" + "fmt" + "reflect" + "unsafe" +) + +var ( + errPtrPtrStructRequired = errors.New("pointer to pointer of go structure required") +) + +type emptyIfc struct { + typ, ptr unsafe.Pointer +} + +// UnsafeCastStruct casts a byte slice its contents into an arbitrary go-structure. +// The structure passed must be a pointer to a pointer of a struct to be casted too. +// +// If the input buffers length is 0, `to` will be set to nil. +// +// The operation is unsafe, as it does not validate the input value to be a +// pointer of a pointer, plus no length check is executed. +func UnsafeCastStruct(to interface{}, b []byte) { + ifc := (*emptyIfc)(unsafe.Pointer(&to)) + + if len(b) != 0 { + *(*uintptr)(ifc.ptr) = uintptr(unsafe.Pointer(&b[0])) + } else { + *(*uintptr)(ifc.ptr) = 0 + } +} + +// CastStruct casts a byte slice its contents into an arbitrary go-structure. +// The structure passed must be a pointer to a pointer of a structed to be casted too. +// An error is returned if the input type is invalid or the buffer is not big +// enough to hold the structure. +// If the input buffers length is 0, `to` will be set to nil. +func CastStruct(to interface{}, b []byte) error { + v := reflect.ValueOf(to) + if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Ptr { + return errPtrPtrStructRequired + } + + if bl, tl := len(b), int(v.Type().Size()); 0 < bl && bl < tl { + return fmt.Errorf("buffer of %v byte(s) can not be casted into structure requiring %v byte(s)", + bl, tl) + } + + UnsafeCastStruct(to, b) + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/native_be.go b/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/native_be.go new file mode 100644 index 00000000..9516267e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/native_be.go @@ -0,0 +1,18 @@ +// +build ppc64 mips64 mips s390x + +package bin + +import "encoding/binary" + +// Architecture native encoding +var NativeEndian = binary.BigEndian + +type I8 = I8be +type I16 = I16be +type I32 = I32be +type I64 = I64be + +type U8 = U8be +type U16 = U16be +type U32 = U32be +type U64 = U64be diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/native_le.go b/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/native_le.go new file mode 100644 index 00000000..914cfced --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/native_le.go @@ -0,0 +1,18 @@ +// +build amd64 386 arm arm64 ppc64le mips64le mipsle + +package bin + +import "encoding/binary" + +// Architecture native encoding +var NativeEndian = binary.LittleEndian + +type I8 = I8le +type I16 = I16le +type I32 = I32le +type I64 = I64le + +type U8 = U8le +type U16 = U16le +type U32 = U32le +type U64 = U64le diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/types.yml b/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/types.yml new file mode 100644 index 00000000..0ef2ce20 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/types.yml @@ -0,0 +1,6 @@ +data: + bits: [8, 16, 32, 64] + endianess: {le: little, be: big} + signdness: {i: signed, u: unsigned} + baseType: {i: int, u: uint} + diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/CHANGELOG.md b/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/CHANGELOG.md deleted file mode 100644 index db614cb4..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/CHANGELOG.md +++ /dev/null @@ -1,28 +0,0 @@ -# Change Log -All notable changes to this project will be documented in this file. -This project adheres to [Semantic Versioning](http://semver.org/). - -## [Unreleased] - -### Added - -### Changed - -### Deprecated - -### Removed - -### Fixed - -## [0.0.2] - -### Added -- Add struct tag option ",omitempty". -- Add StringConvVisitor converting all primitive values to strings. -- Move and export object visitor into visitors package - -### Fixed -- Fix invalid pointer indirections in struct to array/map. - -[Unreleased]: https://github.com/elastic/go-ucfg/compare/v0.0.2...HEAD -[0.0.2]: https://github.com/elastic/go-ucfg/compare/v0.0.1...v0.0.2 diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/buffer.go b/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/buffer.go deleted file mode 100644 index 869810bc..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/gotype/buffer.go +++ /dev/null @@ -1,103 +0,0 @@ -package gotype - -import ( - "sync" - "unsafe" -) - -var alignment = unsafe.Alignof((*uint32)(nil)) - -var stridePool = sync.Pool{} - -type buffer struct { - strides []stride - strides0 [8]stride - i int - - preAlloc uintptr -} - -type stride struct { - raw []byte - pos uintptr -} - -func (b *buffer) init(alloc int) { - b.strides = b.strides0[:1] - b.preAlloc = uintptr(alloc) - b.i = 0 - - s := &b.strides[0] - s.raw = b.allocStride() - s.pos = 0 -} - -func (b *buffer) allocStride() []byte { - if bytesIfc := stridePool.Get(); bytesIfc != nil { - return bytesIfc.([]byte) - } - return make([]byte, b.preAlloc) -} - -func (b *buffer) alloc(sz int) unsafe.Pointer { - // align 'sz' to next for bytes after 'sz' - aligned := (((uintptr(sz) + (alignment - 1)) / alignment) * alignment) - total := aligned + alignment - - mem := b.doAlloc(total) - - szPtr := (*uint32)(unsafe.Pointer(&mem[aligned])) - *szPtr = uint32(total) - return unsafe.Pointer(&mem[0]) -} - -func (b *buffer) release() { - s := &b.strides[b.i] - if s.pos == 0 { - panic("release of unallocated memory") - } - - szPtr := (*uint32)(unsafe.Pointer(&s.raw[s.pos-alignment])) - sz := uintptr(*szPtr) - - s.pos -= sz - if s.pos == 0 && b.i > 0 { - // release (last) stride - stridePool.Put(s.raw) - s.raw = nil - b.strides = b.strides[:b.i] - b.i-- - } -} - -func (b *buffer) doAlloc(sz uintptr) []byte { - s := &b.strides[b.i] - space := uintptr(len(s.raw)) - s.pos - - if space < sz { - var bytes []byte - - if b.preAlloc < sz { - bytes = make([]byte, sz) - } else { - bytes = b.allocStride() - } - - b.strides = append(b.strides, stride{ - raw: bytes, - pos: sz, - }) - b.i++ - - return b.strides[b.i].raw[0:] - } - - start := s.pos - s.pos += sz - - mem := s.raw[start:s.pos] - for i := range mem { - mem[i] = 0 - } - return mem -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/internal/gen/gen_yaml.go b/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/internal/gen/gen_yaml.go deleted file mode 100644 index ec3ad315..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/internal/gen/gen_yaml.go +++ /dev/null @@ -1,337 +0,0 @@ -package main - -import ( - "bufio" - "bytes" - "errors" - "flag" - "fmt" - "io/ioutil" - "os" - "strings" - "text/template" - - "github.com/elastic/go-ucfg" - "github.com/elastic/go-ucfg/cfgutil" - "github.com/elastic/go-ucfg/yaml" - - "golang.org/x/tools/imports" -) - -var cfgOpts = []ucfg.Option{ - ucfg.PathSep("."), - ucfg.ResolveEnv, -} - -var datOpts = append([]ucfg.Option{ucfg.VarExp}, cfgOpts...) - -func main() { - if err := run(); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} - -func run() error { - to := flag.String("o", "", "write to") - format := flag.Bool("f", false, "format output using goimports") - dataFile := flag.String("d", "", "input data file for use to fill out") - - flag.Parse() - args := flag.Args() - if len(args) == 0 { - return errors.New("Missing input file") - } - - userData, err := loadData(*dataFile) - if err != nil { - return fmt.Errorf("Failed to read data file: %v", err) - } - - gen := struct { - Import []string - Templates map[string]string - Main string - }{} - if err = loadConfigInto(args[0], &gen); err != nil { - errPrint("Failed to load script template") - return err - } - - dat := struct { - Data *ucfg.Config - }{} - if err = loadConfigInto(args[0], &dat, ucfg.VarExp); err != nil { - errPrint("Failed to load script data") - return err - } - - var T *template.Template - D := cfgutil.NewCollector(nil, datOpts...) - var data map[string]interface{} - - var defaultFuncs = template.FuncMap{ - "data": func() map[string]interface{} { return data }, - "toLower": strings.ToLower, - "toUpper": strings.ToUpper, - "capitalize": strings.Title, - "isnil": func(v interface{}) bool { - return v == nil - }, - "default": func(D, v interface{}) interface{} { - if v == nil { - return D - } - return v - }, - "dict": makeDict, - "invoke": makeInvokeCommand(&T), // invoke another template with named parameters - } - - var td *ucfg.Config - T, td, err = loadTemplates(template.New("").Funcs(defaultFuncs), gen.Import) - if err := D.Add(td, err); err != nil { - errPrint("Failed to load imported template files") - return err - } - - if err := D.Add(dat.Data, nil); err != nil { - errPrint("Failed to merge template data with top-level script") - return err - } - - if err := D.Add(ucfg.NewFrom(userData, datOpts...)); err != nil { - errPrintf("Failed to merge user data") - return err - } - - if err := D.Config().Unpack(&data, datOpts...); err != nil { - errPrint("Failed to unpack data") - return err - } - - T, err = addTemplates(T, gen.Templates) - if err != nil { - return err - } - - var buf bytes.Buffer - header := fmt.Sprintf("// This file has been generated from '%v', do not edit\n", args[0]) - buf.WriteString(header) - T = T.New("master") - T, err = T.Parse(gen.Main) - if err != nil { - return fmt.Errorf("Parsing 'template' fields failed with %v", err) - } - - if err := T.Execute(&buf, data); err != nil { - return fmt.Errorf("executing template failed with %v", err) - } - - content := buf.Bytes() - if *format { - content, err = imports.Process(*to, content, nil) - if err != nil { - return fmt.Errorf("Applying goimports failed with: %v", err) - } - } - - if *to != "" { - return ioutil.WriteFile(*to, content, 0644) - } - - _, err = os.Stdout.Write(content) - return err -} - -func loadTemplates(T *template.Template, files []string) (*template.Template, *ucfg.Config, error) { - - /* - var childData []*ucfg.Config - var templatesData []*ucfg.Config - */ - - childData := cfgutil.NewCollector(nil, datOpts...) - templateData := cfgutil.NewCollector(nil, datOpts...) - - for _, file := range files { - gen := struct { - Import []string - Templates map[string]string - }{} - - dat := struct { - Data *ucfg.Config - }{} - - err := loadConfigInto(file, &gen) - if err != nil { - return nil, nil, err - } - - var D *ucfg.Config - T, D, err = loadTemplates(T, gen.Import) - if err != nil { - return nil, nil, err - } - - T, err = addTemplates(T, gen.Templates) - if err != nil { - return nil, nil, err - } - - err = loadConfigInto(file, &dat, ucfg.VarExp) - if err != nil { - errPrint("Failed to load data from: ", file) - return nil, nil, err - } - - childData.Add(D, nil) - templateData.Add(dat.Data, nil) - } - - if err := childData.Error(); err != nil { - errPrintf("Procesing file %v: failed to merge child data: %v", files, err) - return nil, nil, err - } - - if err := templateData.Error(); err != nil { - errPrintf("Procesing file %v: failed to merge template data: %v", files, err) - return nil, nil, err - } - - if err := childData.Add(templateData.Config(), templateData.Error()); err != nil { - errPrintf("Failed to combine template data: ", err) - return nil, nil, err - } - - return T, childData.Config(), nil -} - -func addTemplates(T *template.Template, templates map[string]string) (*template.Template, error) { - for name, content := range templates { - var err error - - T = T.New(name) - T, err = T.Parse(content) - if err != nil { - return nil, fmt.Errorf("failed to parse template %v: %v", name, err) - } - } - - return T, nil -} - -func loadConfig(file string, extraOpts ...ucfg.Option) (cfg *ucfg.Config, err error) { - opts := append(append([]ucfg.Option{}, extraOpts...), cfgOpts...) - cfg, err = yaml.NewConfigWithFile(file, opts...) - if err != nil { - err = fmt.Errorf("Failed to read file %v with: %v", file, err) - } - return -} - -func loadConfigInto(file string, to interface{}, extraOpts ...ucfg.Option) error { - cfg, err := loadConfig(file, extraOpts...) - if err == nil { - err = readConfig(cfg, to, extraOpts...) - } - return err -} - -func readConfig(cfg *ucfg.Config, to interface{}, extraOpts ...ucfg.Option) error { - opts := append(append([]ucfg.Option{}, extraOpts...), cfgOpts...) - if err := cfg.Unpack(to, opts...); err != nil { - return fmt.Errorf("Parsing template file failed with %v", err) - } - return nil -} - -func makeDict(values ...interface{}) (map[string]interface{}, error) { - if len(values)%2 != 0 { - return nil, errors.New("invalid dict call") - } - - dict := make(map[string]interface{}, len(values)/2) - for i := 0; i < len(values); i += 2 { - key, ok := values[i].(string) - if !ok { - return nil, errors.New("dict keys must be strings") - } - dict[key] = values[i+1] - } - return dict, nil -} - -func makeInvokeCommand(T **template.Template) func(string, ...interface{}) (string, error) { - return func(name string, values ...interface{}) (string, error) { - params, err := makeDict(values...) - if err != nil { - return "", err - } - - var buf bytes.Buffer - err = (*T).ExecuteTemplate(&buf, name, params) - return buf.String(), err - - } -} - -func loadData(file string) (map[string]interface{}, error) { - if file == "" { - return nil, nil - } - - meta := struct { - Entries map[string]struct { - Default string - Description string - } `config:",inline"` - }{} - - err := loadConfigInto(file, &meta, ucfg.VarExp) - if err != nil { - return nil, err - } - - reader := bufio.NewReader(os.Stdin) - - state := map[string]interface{}{} - for name, entry := range meta.Entries { - // parse default value - T, err := template.New("").Parse(entry.Default) - if err != nil { - return nil, fmt.Errorf("Failed to parse data entry %v: %v", name, err) - } - - var buf bytes.Buffer - if err := T.Execute(&buf, state); err != nil { - return nil, fmt.Errorf("Failed to evaluate data entry %v: %v", name, err) - } - - // ask user for input - defaultValue := buf.String() - fmt.Printf("%v\n%v [%v]: ", entry.Description, name, defaultValue) - value, err := reader.ReadString('\n') - if err != nil { - return nil, fmt.Errorf("Error waiting for user input: %v", err) - } - - value = strings.TrimSpace(value) - if value == "" { - value = defaultValue - } - - state[name] = value - } - - return state, nil -} - -func errPrint(msg ...interface{}) { - fmt.Fprintln(os.Stderr, msg...) -} - -func errPrintf(format string, msg ...interface{}) { - fmt.Fprintf(os.Stderr, format+"\n", msg...) -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/sftest/cases.go b/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/sftest/cases.go deleted file mode 100644 index 62ecd41b..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/sftest/cases.go +++ /dev/null @@ -1,216 +0,0 @@ -package sftest - -import structform "github.com/urso/go-structform" - -var Samples = concatSamples( - SamplesPrimitives, - SamplesArr, - SamplesObj, - SamplesCombinations, -) - -var SamplesPrimitives = []Recording{ - // simple primitives - {NilRec{}}, // "null" - {BoolRec{true}}, // "true" - {BoolRec{false}}, // "false" - {StringRec{"test"}}, - {StringRec{`test with " being special`}}, - {StringRec{""}}, - - // int types - {IntRec{8}}, - {IntRec{42}}, - {IntRec{100}}, - {IntRec{-90}}, - {IntRec{12345678}}, - {IntRec{-12345678}}, - {Int8Rec{8}}, - {Int8Rec{42}}, - {Int8Rec{100}}, - {Int8Rec{-90}}, - {Int16Rec{8}}, - {Int16Rec{42}}, - {Int16Rec{100}}, - {Int16Rec{-90}}, - {Int16Rec{500}}, - {Int16Rec{-500}}, - {Int32Rec{8}}, - {Int32Rec{42}}, - {Int32Rec{100}}, - {Int32Rec{-90}}, - {Int32Rec{500}}, - {Int32Rec{-500}}, - {Int32Rec{12345678}}, - {Int32Rec{-12345678}}, - {Int64Rec{8}}, - {Int64Rec{42}}, - {Int64Rec{100}}, - {Int64Rec{-90}}, - {Int64Rec{500}}, - {Int64Rec{-500}}, - {Int64Rec{123456781234}}, - {Int64Rec{-123456781234}}, - - // uint types - {UintRec{8}}, - {UintRec{42}}, - {UintRec{100}}, - {UintRec{12345678}}, - {Uint8Rec{8}}, - {Uint8Rec{42}}, - {Uint8Rec{100}}, - {ByteRec{8}}, - {ByteRec{42}}, - {ByteRec{100}}, - {Uint16Rec{8}}, - {Uint16Rec{42}}, - {Uint16Rec{100}}, - {Uint16Rec{500}}, - {Uint32Rec{8}}, - {Uint32Rec{42}}, - {Uint32Rec{100}}, - {Uint32Rec{500}}, - {Uint32Rec{12345678}}, - {Uint64Rec{8}}, - {Uint64Rec{42}}, - {Uint64Rec{100}}, - {Uint64Rec{500}}, - {Uint64Rec{123456781234}}, - - // float types - {Float32Rec{3.14}}, - {Float32Rec{-3.14}}, - {Float64Rec{3.14}}, - {Float64Rec{-3.14}}, -} - -var SamplesArr = []Recording{ - // empty arrays `[]` - Arr(0, structform.AnyType), - Arr(-1, structform.AnyType), - - // nested empty array `[[]]` - Arr(1, structform.AnyType, - Arr(0, structform.AnyType), - ), - Arr(-1, structform.AnyType, - Arr(0, structform.AnyType), - ), - Arr(-1, structform.AnyType, - Arr(-1, structform.AnyType), - ), - - // array with arbitrary values - Arr(-1, structform.AnyType, - NilRec{}, - BoolRec{true}, - BoolRec{false}, - IntRec{1}, - Int64Rec{12345678910}, - Float32Rec{3.14}, - Float64Rec{7e+09}, - StringRec{"test"}, - ), - Arr(2, structform.AnyType, - Int8Rec{1}, - BoolRec{true}, - ), - { - Int8ArrRec{[]int8{1, 2, 3}}, - }, - { - StringArrRec{[]string{"a", "b", "c"}}, - }, -} - -var SamplesObj = []Recording{ - // empty object '{}' - Obj(-1, structform.AnyType), - Obj(0, structform.AnyType), - - Obj(-1, structform.AnyType, - "a", NilRec{}, - ), - - // objects - Obj(-1, structform.AnyType, - "a", StringRec{"test"}), - Obj(1, structform.StringType, - "a", StringRec{"test"}), - Obj(-1, structform.AnyType, - "a", BoolRec{true}, - "b", BoolRec{false}, - ), - Obj(2, structform.AnyType, - "a", BoolRec{true}, - "b", BoolRec{false}, - ), - Obj(-1, structform.BoolType, - "a", BoolRec{true}, - "b", BoolRec{false}, - ), - Obj(2, structform.BoolType, - "a", BoolRec{true}, - "b", BoolRec{false}, - ), - Obj(-1, structform.AnyType, - "a", UintRec{1}, - "b", Float64Rec{3.14}, - "c", StringRec{"test"}, - "d", BoolRec{true}, - ), - - // typed objects - { - StringObjRec{map[string]string{ - "a": "test1", - "b": "test2", - "c": "test3", - }}, - }, - { - UintObjRec{map[string]uint{ - "a": 1, - "b": 2, - "c": 3, - }}, - }, -} - -var SamplesCombinations = []Recording{ - // objects in array - Arr(-1, structform.AnyType, - Obj(-1, structform.AnyType)), - Arr(1, structform.AnyType, - Obj(0, structform.AnyType)), - Arr(-1, structform.AnyType, - Obj(-1, structform.AnyType, - "a", IntRec{-1}, - ), - Obj(1, structform.UintType, - "a", UintRec{1}, - ), - ), - Arr(2, structform.AnyType, - Obj(-1, structform.AnyType, - "a", IntRec{-1}, - ), - Obj(1, structform.UintType, - "a", UintRec{1}, - ), - ), - - // array in object - Obj(-1, structform.AnyType, - "a", Arr(3, structform.IntType, - IntRec{1}, IntRec{2}, IntRec{3}), - ), - Obj(1, structform.AnyType, - "a", Arr(3, structform.IntType, - IntRec{1}, IntRec{2}, IntRec{3}), - ), - Obj(1, structform.AnyType, - "a", Int8ArrRec{[]int8{1, 2, 3}}, - ), -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/sftest/sftest.go b/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/sftest/sftest.go deleted file mode 100644 index 53d81071..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/sftest/sftest.go +++ /dev/null @@ -1,312 +0,0 @@ -package sftest - -import ( - "encoding/json" - "errors" - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - structform "github.com/urso/go-structform" -) - -type Recording []Record - -type Record interface { - Replay(v structform.ExtVisitor) error -} - -type NilRec struct{} -type BoolRec struct{ Value bool } -type StringRec struct{ Value string } -type IntRec struct{ Value int } -type Int8Rec struct{ Value int8 } -type Int16Rec struct{ Value int16 } -type Int32Rec struct{ Value int32 } -type Int64Rec struct{ Value int64 } -type UintRec struct{ Value uint } -type ByteRec struct{ Value byte } -type Uint8Rec struct{ Value uint8 } -type Uint16Rec struct{ Value uint16 } -type Uint32Rec struct{ Value uint32 } -type Uint64Rec struct{ Value uint64 } -type Float32Rec struct{ Value float32 } -type Float64Rec struct{ Value float64 } - -// extended (yet) non-recordible records -type Int8ArrRec struct{ Value []int8 } -type StringArrRec struct{ Value []string } -type StringObjRec struct{ Value map[string]string } -type UintObjRec struct{ Value map[string]uint } - -type ObjectStartRec struct { - Len int - T structform.BaseType -} -type ObjectFinishRec struct{} -type ObjectKeyRec struct{ Value string } - -type ArrayStartRec struct { - Len int - T structform.BaseType -} -type ArrayFinishRec struct{} - -func (NilRec) Replay(vs structform.ExtVisitor) error { return vs.OnNil() } -func (r BoolRec) Replay(vs structform.ExtVisitor) error { return vs.OnBool(r.Value) } -func (r StringRec) Replay(vs structform.ExtVisitor) error { return vs.OnString(r.Value) } -func (r IntRec) Replay(vs structform.ExtVisitor) error { return vs.OnInt(r.Value) } -func (r Int8Rec) Replay(vs structform.ExtVisitor) error { return vs.OnInt8(r.Value) } -func (r Int16Rec) Replay(vs structform.ExtVisitor) error { return vs.OnInt16(r.Value) } -func (r Int32Rec) Replay(vs structform.ExtVisitor) error { return vs.OnInt32(r.Value) } -func (r Int64Rec) Replay(vs structform.ExtVisitor) error { return vs.OnInt64(r.Value) } -func (r UintRec) Replay(vs structform.ExtVisitor) error { return vs.OnUint(r.Value) } -func (r ByteRec) Replay(vs structform.ExtVisitor) error { return vs.OnByte(r.Value) } -func (r Uint8Rec) Replay(vs structform.ExtVisitor) error { return vs.OnUint8(r.Value) } -func (r Uint16Rec) Replay(vs structform.ExtVisitor) error { return vs.OnUint16(r.Value) } -func (r Uint32Rec) Replay(vs structform.ExtVisitor) error { return vs.OnUint32(r.Value) } -func (r Uint64Rec) Replay(vs structform.ExtVisitor) error { return vs.OnUint64(r.Value) } -func (r Float32Rec) Replay(vs structform.ExtVisitor) error { return vs.OnFloat32(r.Value) } -func (r Float64Rec) Replay(vs structform.ExtVisitor) error { return vs.OnFloat64(r.Value) } -func (r ObjectStartRec) Replay(vs structform.ExtVisitor) error { return vs.OnObjectStart(r.Len, r.T) } -func (r ObjectFinishRec) Replay(vs structform.ExtVisitor) error { return vs.OnObjectFinished() } -func (r ObjectKeyRec) Replay(vs structform.ExtVisitor) error { return vs.OnKey(r.Value) } -func (r ArrayStartRec) Replay(vs structform.ExtVisitor) error { return vs.OnArrayStart(r.Len, r.T) } -func (r ArrayFinishRec) Replay(vs structform.ExtVisitor) error { return vs.OnArrayFinished() } - -func (r Int8ArrRec) Replay(vs structform.ExtVisitor) error { return vs.OnInt8Array(r.Value) } -func (r StringArrRec) Replay(vs structform.ExtVisitor) error { return vs.OnStringArray(r.Value) } -func (r UintObjRec) Replay(vs structform.ExtVisitor) error { return vs.OnUintObject(r.Value) } -func (r StringObjRec) Replay(vs structform.ExtVisitor) error { return vs.OnStringObject(r.Value) } - -func (rec *Recording) Replay(vs structform.Visitor) error { - evs := structform.EnsureExtVisitor(vs) - for _, r := range *rec { - if err := r.Replay(evs); err != nil { - return err - } - } - return nil -} - -func (r *Recording) add(v Record) error { - *r = append(*r, v) - return nil -} - -func (r *Recording) OnNil() error { return r.add(NilRec{}) } -func (r *Recording) OnBool(b bool) error { return r.add(BoolRec{b}) } -func (r *Recording) OnString(s string) error { return r.add(StringRec{s}) } -func (r *Recording) OnInt8(i int8) error { return r.add(Int8Rec{i}) } -func (r *Recording) OnInt16(i int16) error { return r.add(Int16Rec{i}) } -func (r *Recording) OnInt32(i int32) error { return r.add(Int32Rec{i}) } -func (r *Recording) OnInt64(i int64) error { return r.add(Int64Rec{i}) } -func (r *Recording) OnInt(i int) error { return r.add(IntRec{i}) } -func (r *Recording) OnUint8(i uint8) error { return r.add(Uint8Rec{i}) } -func (r *Recording) OnByte(i byte) error { return r.add(ByteRec{i}) } -func (r *Recording) OnUint16(i uint16) error { return r.add(Uint16Rec{i}) } -func (r *Recording) OnUint32(i uint32) error { return r.add(Uint32Rec{i}) } -func (r *Recording) OnUint64(i uint64) error { return r.add(Uint64Rec{i}) } -func (r *Recording) OnUint(i uint) error { return r.add(UintRec{i}) } -func (r *Recording) OnFloat32(i float32) error { return r.add(Float32Rec{i}) } -func (r *Recording) OnFloat64(i float64) error { return r.add(Float64Rec{i}) } - -func (r *Recording) OnArrayStart(len int, baseType structform.BaseType) error { - return r.add(ArrayStartRec{len, baseType}) -} -func (r *Recording) OnArrayFinished() error { - return r.add(ArrayFinishRec{}) -} - -func (r *Recording) OnObjectStart(len int, baseType structform.BaseType) error { - return r.add(ObjectStartRec{len, baseType}) -} -func (r *Recording) OnObjectFinished() error { - return r.add(ObjectFinishRec{}) -} -func (r *Recording) OnKey(s string) error { - return r.add(ObjectKeyRec{s}) -} - -func (r *Recording) expand() Recording { - var to Recording - r.Replay(&to) - return to -} - -func (r Recording) Assert(t *testing.T, expected Recording) { - exp, err := expected.ToJSON() - if err != nil { - t.Error("Assert (expected): ", err) - t.Logf(" recording: %#v", exp) - return - } - - act, err := r.ToJSON() - if err != nil { - t.Error("Assert (actual): ", err) - t.Logf(" recording: %#v", r) - return - } - - assert.Equal(t, exp, act) -} - -func (r Recording) ToJSON() (string, error) { - v, _, err := buildValue(r.expand()) - if err != nil { - return "", err - } - - b, err := json.MarshalIndent(v, "", " ") - if err != nil { - return "", err - } - - return string(b), nil -} - -type builder struct { - stack []interface{} - value interface{} -} - -func buildValue(rec Recording) (interface{}, Recording, error) { - if len(rec) == 0 { - return nil, nil, errors.New("empty recording") - } - - switch v := rec[0].(type) { - case NilRec: - return nil, rec[1:], nil - case BoolRec: - return v.Value, rec[1:], nil - case StringRec: - return v.Value, rec[1:], nil - case IntRec: - return v.Value, rec[1:], nil - case Int8Rec: - return v.Value, rec[1:], nil - case Int16Rec: - return v.Value, rec[1:], nil - case Int32Rec: - return v.Value, rec[1:], nil - case Int64Rec: - return v.Value, rec[1:], nil - case UintRec: - return v.Value, rec[1:], nil - case ByteRec: - return v.Value, rec[1:], nil - case Uint8Rec: - return v.Value, rec[1:], nil - case Uint16Rec: - return v.Value, rec[1:], nil - case Uint32Rec: - return v.Value, rec[1:], nil - case Uint64Rec: - return v.Value, rec[1:], nil - case Float32Rec: - return v.Value, rec[1:], nil - case Float64Rec: - return v.Value, rec[1:], nil - case ArrayStartRec: - return buildArray(rec[1:]) - case ObjectStartRec: - return buildObject(rec[1:]) - - default: - return nil, nil, fmt.Errorf("Invalid record entry: %v", v) - } -} - -func buildArray(rec Recording) (interface{}, Recording, error) { - a := []interface{}{} - - for len(rec) > 0 { - var ( - v interface{} - err error - ) - - if _, end := rec[0].(ArrayFinishRec); end { - return a, rec[1:], nil - } - - v, rec, err = buildValue(rec) - if err != nil { - return nil, nil, err - } - - a = append(a, v) - } - - return nil, nil, errors.New("missing array finish record") -} - -func buildObject(rec Recording) (interface{}, Recording, error) { - obj := map[string]interface{}{} - - for len(rec) > 0 { - var ( - key string - v interface{} - err error - ) - - switch v := rec[0].(type) { - case ObjectFinishRec: - return obj, rec[1:], nil - case ObjectKeyRec: - key = v.Value - } - - v, rec, err = buildValue(rec[1:]) - if err != nil { - return nil, nil, err - } - - obj[key] = v - } - - return nil, nil, errors.New("missing object finish record") -} - -func TestEncodeParseConsistent( - t *testing.T, - samples []Recording, - constr func() (structform.Visitor, func(structform.Visitor) error), -) { - for i, sample := range samples { - expected, err := sample.ToJSON() - if err != nil { - panic(err) - } - - t.Logf("test %v: %#v => %v", i, sample, expected) - - enc, dec := constr() - - err = sample.Replay(enc) - if err != nil { - t.Errorf("Failed to encode %#v with %v", sample, err) - return - } - - var target Recording - err = dec(&target) - if err != nil { - t.Errorf("Failed to decode %#v with %v", target, err) - t.Logf(" recording: %#v", target) - } - - target.Assert(t, sample) - } -} - -func concatSamples(recs ...[]Recording) []Recording { - var out []Recording - for _, r := range recs { - out = append(out, r...) - } - return out -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/sftest/util.go b/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/sftest/util.go deleted file mode 100644 index 6352350e..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-structform/sftest/util.go +++ /dev/null @@ -1,46 +0,0 @@ -package sftest - -import structform "github.com/urso/go-structform" - -func Arr(l int, t structform.BaseType, elems ...interface{}) []Record { - a := []Record{ArrayStartRec{l, t}} - for _, elem := range elems { - switch v := elem.(type) { - case Record: - a = append(a, v) - case []Record: - a = append(a, v...) - case Recording: - a = append(a, v...) - default: - panic("invalid key type") - } - } - - return append(a, ArrayFinishRec{}) -} - -func Obj(l int, t structform.BaseType, kv ...interface{}) []Record { - if len(kv)%2 != 0 { - panic("invalid object") - } - - a := []Record{ObjectStartRec{l, t}} - for i := 0; i < len(kv); i += 2 { - k := kv[i].(string) - a = append(a, ObjectKeyRec{k}) - - switch v := kv[i+1].(type) { - case Record: - a = append(a, v) - case []Record: - a = append(a, v...) - case Recording: - a = append(a, v...) - default: - panic("invalid key type") - } - } - - return append(a, ObjectFinishRec{}) -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/qcgen/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/urso/qcgen/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/urso/qcgen/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/qcgen/README.md b/vendor/github.com/elastic/beats/vendor/github.com/urso/qcgen/README.md new file mode 100644 index 00000000..882062dd --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/urso/qcgen/README.md @@ -0,0 +1,2 @@ +# qcgen +helpers for building go quick check generators from custom functions diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/qcgen/helpers.go b/vendor/github.com/elastic/beats/vendor/github.com/urso/qcgen/helpers.go new file mode 100644 index 00000000..a9878211 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/urso/qcgen/helpers.go @@ -0,0 +1,32 @@ +package qcgen + +import "math/rand" + +func GenBool(rng *rand.Rand) bool { + return (rng.Uint64() & 1) == 1 +} + +func GenUint64(rng *rand.Rand) uint64 { + return rng.Uint64() +} + +func MakeGenUint64Range(min, max uint64) func(rng *rand.Rand) uint64 { + return func(rng *rand.Rand) uint64 { + return GenUint64Range(rng, min, max) + } +} + +func GenUint64Range(rng *rand.Rand, min, max uint64) uint64 { + delta := max - min + return min + (rng.Uint64() % delta) +} + +func MakeGenUintRange(min, max uint) func(rng *rand.Rand) uint { + return func(rng *rand.Rand) uint { + return GenUintRange(rng, min, max) + } +} + +func GenUintRange(rng *rand.Rand, min, max uint) uint { + return uint(GenUint64Range(rng, uint64(min), uint64(max))) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/qcgen/qcgen.go b/vendor/github.com/elastic/beats/vendor/github.com/urso/qcgen/qcgen.go new file mode 100644 index 00000000..f7ec12ff --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/urso/qcgen/qcgen.go @@ -0,0 +1,80 @@ +package qcgen + +import ( + "fmt" + "math/rand" + "reflect" + "testing/quick" +) + +type Generator struct { + arguments []userGen +} + +type userGen func(rng *rand.Rand, params []reflect.Value) reflect.Value + +var tRand = reflect.TypeOf((*rand.Rand)(nil)) + +// NewGenerator creates a new generator. Each function must implement +// `func(*rand.Rand) T`, with T being the custom type to be generated. +// The generators Gen methods selects the function to execute on +// matching return type. +func NewGenerator(testFn interface{}, fns ...interface{}) *Generator { + mapping := map[reflect.Type]reflect.Value{} + + for i, fn := range fns { + v := reflect.ValueOf(fn) + t := v.Type() + if t.Kind() != reflect.Func { + panic(fmt.Errorf("argument %v is no function", i)) + } + + if t.NumIn() != 1 || t.NumOut() != 1 { + panic(fmt.Errorf("argument %v must accept one argument and return one value", i)) + } + + tIn := t.In(0) + if tIn != tRand { + panic(fmt.Errorf("argument %v must accept *rand.Rand as input only", i)) + } + + mapping[t.Out(0)] = v + } + + fn := reflect.TypeOf(testFn) + argGen := make([]userGen, fn.NumIn()) + for i := range argGen { + tIn := fn.In(i) + if v, exists := mapping[tIn]; exists { + argGen[i] = makeUserGen(v) + } else { + argGen[i] = makeDefaultGen(tIn) + } + } + + return &Generator{argGen} +} + +func makeUserGen(fn reflect.Value) userGen { + return func(_ *rand.Rand, params []reflect.Value) reflect.Value { + out := fn.Call(params) + return out[0] + } +} + +func makeDefaultGen(t reflect.Type) userGen { + return func(rng *rand.Rand, _ []reflect.Value) reflect.Value { + out, ok := quick.Value(t, rng) + if !ok { + panic(fmt.Errorf("cannot create arbitrary value of type %s", t)) + } + return out + } +} + +func (g *Generator) Gen(args []reflect.Value, rng *rand.Rand) { + rngParam := []reflect.Value{reflect.ValueOf(rng)} + for i := range args { + args[i] = g.arguments[i](rng, rngParam) + } +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go index 92944f3b..4933ac36 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go @@ -93,5 +93,13 @@ func ReadPassword(fd int) ([]byte, error) { windows.SetConsoleMode(windows.Handle(fd), old) }() - return readPasswordLine(os.NewFile(uintptr(fd), "stdin")) + var h windows.Handle + p, _ := windows.GetCurrentProcess() + if err := windows.DuplicateHandle(p, windows.Handle(fd), p, &h, 0, false, windows.DUPLICATE_SAME_ACCESS); err != nil { + return nil, err + } + + f := os.NewFile(uintptr(h), "stdin") + defer f.Close() + return readPasswordLine(f) } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/bpf/constants.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/bpf/constants.go index 2c8bbab7..b89ca352 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/bpf/constants.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/bpf/constants.go @@ -70,57 +70,60 @@ type Extension int // Extension functions available in the Linux kernel. const ( + // extOffset is the negative maximum number of instructions used + // to load instructions by overloading the K argument. + extOffset = -0x1000 // ExtLen returns the length of the packet. ExtLen Extension = 1 // ExtProto returns the packet's L3 protocol type. - ExtProto = 0 + ExtProto Extension = 0 // ExtType returns the packet's type (skb->pkt_type in the kernel) // // TODO: better documentation. How nice an API do we want to // provide for these esoteric extensions? - ExtType = 4 + ExtType Extension = 4 // ExtPayloadOffset returns the offset of the packet payload, or // the first protocol header that the kernel does not know how to // parse. - ExtPayloadOffset = 52 + ExtPayloadOffset Extension = 52 // ExtInterfaceIndex returns the index of the interface on which // the packet was received. - ExtInterfaceIndex = 8 + ExtInterfaceIndex Extension = 8 // ExtNetlinkAttr returns the netlink attribute of type X at // offset A. - ExtNetlinkAttr = 12 + ExtNetlinkAttr Extension = 12 // ExtNetlinkAttrNested returns the nested netlink attribute of // type X at offset A. - ExtNetlinkAttrNested = 16 + ExtNetlinkAttrNested Extension = 16 // ExtMark returns the packet's mark value. - ExtMark = 20 + ExtMark Extension = 20 // ExtQueue returns the packet's assigned hardware queue. - ExtQueue = 24 + ExtQueue Extension = 24 // ExtLinkLayerType returns the packet's hardware address type // (e.g. Ethernet, Infiniband). - ExtLinkLayerType = 28 + ExtLinkLayerType Extension = 28 // ExtRXHash returns the packets receive hash. // // TODO: figure out what this rxhash actually is. - ExtRXHash = 32 + ExtRXHash Extension = 32 // ExtCPUID returns the ID of the CPU processing the current // packet. - ExtCPUID = 36 + ExtCPUID Extension = 36 // ExtVLANTag returns the packet's VLAN tag. - ExtVLANTag = 44 + ExtVLANTag Extension = 44 // ExtVLANTagPresent returns non-zero if the packet has a VLAN // tag. // // TODO: I think this might be a lie: it reads bit 0x1000 of the // VLAN header, which changed meaning in recent revisions of the // spec - this extension may now return meaningless information. - ExtVLANTagPresent = 48 + ExtVLANTagPresent Extension = 48 // ExtVLANProto returns 0x8100 if the frame has a VLAN header, // 0x88a8 if the frame has a "Q-in-Q" double VLAN header, or some // other value if no VLAN information is present. - ExtVLANProto = 60 + ExtVLANProto Extension = 60 // ExtRand returns a uniformly random uint32. - ExtRand = 56 + ExtRand Extension = 56 ) // The following gives names to various bit patterns used in opcode construction. diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/bpf/instructions.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/bpf/instructions.go index 68ae6f54..3b4fd089 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/bpf/instructions.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/bpf/instructions.go @@ -57,6 +57,9 @@ func (ri RawInstruction) Disassemble() Instruction { } return LoadScratch{Dst: reg, N: int(ri.K)} case opAddrModeAbsolute: + if ri.K > extOffset+0xffffffff { + return LoadExtension{Num: Extension(-extOffset + ri.K)} + } return LoadAbsolute{Size: sz, Off: ri.K} case opAddrModeIndirect: return LoadIndirect{Size: sz, Off: ri.K} @@ -104,6 +107,14 @@ func (ri RawInstruction) Disassemble() Instruction { case opJumpAlways: return Jump{Skip: ri.K} case opJumpEqual: + if ri.Jt == 0 { + return JumpIf{ + Cond: JumpNotEqual, + Val: ri.K, + SkipTrue: ri.Jf, + SkipFalse: 0, + } + } return JumpIf{ Cond: JumpEqual, Val: ri.K, @@ -111,6 +122,14 @@ func (ri RawInstruction) Disassemble() Instruction { SkipFalse: ri.Jf, } case opJumpGT: + if ri.Jt == 0 { + return JumpIf{ + Cond: JumpLessOrEqual, + Val: ri.K, + SkipTrue: ri.Jf, + SkipFalse: 0, + } + } return JumpIf{ Cond: JumpGreaterThan, Val: ri.K, @@ -118,6 +137,14 @@ func (ri RawInstruction) Disassemble() Instruction { SkipFalse: ri.Jf, } case opJumpGE: + if ri.Jt == 0 { + return JumpIf{ + Cond: JumpLessThan, + Val: ri.K, + SkipTrue: ri.Jf, + SkipFalse: 0, + } + } return JumpIf{ Cond: JumpGreaterOrEqual, Val: ri.K, @@ -171,6 +198,18 @@ func (a LoadConstant) Assemble() (RawInstruction, error) { return assembleLoad(a.Dst, 4, opAddrModeImmediate, a.Val) } +// String returns the the instruction in assembler notation. +func (a LoadConstant) String() string { + switch a.Dst { + case RegA: + return fmt.Sprintf("ld #%d", a.Val) + case RegX: + return fmt.Sprintf("ldx #%d", a.Val) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + // LoadScratch loads scratch[N] into register Dst. type LoadScratch struct { Dst Register @@ -185,6 +224,18 @@ func (a LoadScratch) Assemble() (RawInstruction, error) { return assembleLoad(a.Dst, 4, opAddrModeScratch, uint32(a.N)) } +// String returns the the instruction in assembler notation. +func (a LoadScratch) String() string { + switch a.Dst { + case RegA: + return fmt.Sprintf("ld M[%d]", a.N) + case RegX: + return fmt.Sprintf("ldx M[%d]", a.N) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + // LoadAbsolute loads packet[Off:Off+Size] as an integer value into // register A. type LoadAbsolute struct { @@ -197,6 +248,23 @@ func (a LoadAbsolute) Assemble() (RawInstruction, error) { return assembleLoad(RegA, a.Size, opAddrModeAbsolute, a.Off) } +// String returns the the instruction in assembler notation. +func (a LoadAbsolute) String() string { + switch a.Size { + case 1: // byte + return fmt.Sprintf("ldb [%d]", a.Off) + case 2: // half word + return fmt.Sprintf("ldh [%d]", a.Off) + case 4: // word + if a.Off > extOffset+0xffffffff { + return LoadExtension{Num: Extension(a.Off + 0x1000)}.String() + } + return fmt.Sprintf("ld [%d]", a.Off) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + // LoadIndirect loads packet[X+Off:X+Off+Size] as an integer value // into register A. type LoadIndirect struct { @@ -209,6 +277,20 @@ func (a LoadIndirect) Assemble() (RawInstruction, error) { return assembleLoad(RegA, a.Size, opAddrModeIndirect, a.Off) } +// String returns the the instruction in assembler notation. +func (a LoadIndirect) String() string { + switch a.Size { + case 1: // byte + return fmt.Sprintf("ldb [x + %d]", a.Off) + case 2: // half word + return fmt.Sprintf("ldh [x + %d]", a.Off) + case 4: // word + return fmt.Sprintf("ld [x + %d]", a.Off) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + // LoadMemShift multiplies the first 4 bits of the byte at packet[Off] // by 4 and stores the result in register X. // @@ -224,6 +306,11 @@ func (a LoadMemShift) Assemble() (RawInstruction, error) { return assembleLoad(RegX, 1, opAddrModeMemShift, a.Off) } +// String returns the the instruction in assembler notation. +func (a LoadMemShift) String() string { + return fmt.Sprintf("ldx 4*([%d]&0xf)", a.Off) +} + // LoadExtension invokes a linux-specific extension and stores the // result in register A. type LoadExtension struct { @@ -235,7 +322,47 @@ func (a LoadExtension) Assemble() (RawInstruction, error) { if a.Num == ExtLen { return assembleLoad(RegA, 4, opAddrModePacketLen, 0) } - return assembleLoad(RegA, 4, opAddrModeAbsolute, uint32(-0x1000+a.Num)) + return assembleLoad(RegA, 4, opAddrModeAbsolute, uint32(extOffset+a.Num)) +} + +// String returns the the instruction in assembler notation. +func (a LoadExtension) String() string { + switch a.Num { + case ExtLen: + return "ld #len" + case ExtProto: + return "ld #proto" + case ExtType: + return "ld #type" + case ExtPayloadOffset: + return "ld #poff" + case ExtInterfaceIndex: + return "ld #ifidx" + case ExtNetlinkAttr: + return "ld #nla" + case ExtNetlinkAttrNested: + return "ld #nlan" + case ExtMark: + return "ld #mark" + case ExtQueue: + return "ld #queue" + case ExtLinkLayerType: + return "ld #hatype" + case ExtRXHash: + return "ld #rxhash" + case ExtCPUID: + return "ld #cpu" + case ExtVLANTag: + return "ld #vlan_tci" + case ExtVLANTagPresent: + return "ld #vlan_avail" + case ExtVLANProto: + return "ld #vlan_tpid" + case ExtRand: + return "ld #rand" + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } } // StoreScratch stores register Src into scratch[N]. @@ -265,6 +392,18 @@ func (a StoreScratch) Assemble() (RawInstruction, error) { }, nil } +// String returns the the instruction in assembler notation. +func (a StoreScratch) String() string { + switch a.Src { + case RegA: + return fmt.Sprintf("st M[%d]", a.N) + case RegX: + return fmt.Sprintf("stx M[%d]", a.N) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + // ALUOpConstant executes A = A Val. type ALUOpConstant struct { Op ALUOp @@ -279,6 +418,34 @@ func (a ALUOpConstant) Assemble() (RawInstruction, error) { }, nil } +// String returns the the instruction in assembler notation. +func (a ALUOpConstant) String() string { + switch a.Op { + case ALUOpAdd: + return fmt.Sprintf("add #%d", a.Val) + case ALUOpSub: + return fmt.Sprintf("sub #%d", a.Val) + case ALUOpMul: + return fmt.Sprintf("mul #%d", a.Val) + case ALUOpDiv: + return fmt.Sprintf("div #%d", a.Val) + case ALUOpMod: + return fmt.Sprintf("mod #%d", a.Val) + case ALUOpAnd: + return fmt.Sprintf("and #%d", a.Val) + case ALUOpOr: + return fmt.Sprintf("or #%d", a.Val) + case ALUOpXor: + return fmt.Sprintf("xor #%d", a.Val) + case ALUOpShiftLeft: + return fmt.Sprintf("lsh #%d", a.Val) + case ALUOpShiftRight: + return fmt.Sprintf("rsh #%d", a.Val) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + // ALUOpX executes A = A X type ALUOpX struct { Op ALUOp @@ -291,6 +458,34 @@ func (a ALUOpX) Assemble() (RawInstruction, error) { }, nil } +// String returns the the instruction in assembler notation. +func (a ALUOpX) String() string { + switch a.Op { + case ALUOpAdd: + return "add x" + case ALUOpSub: + return "sub x" + case ALUOpMul: + return "mul x" + case ALUOpDiv: + return "div x" + case ALUOpMod: + return "mod x" + case ALUOpAnd: + return "and x" + case ALUOpOr: + return "or x" + case ALUOpXor: + return "xor x" + case ALUOpShiftLeft: + return "lsh x" + case ALUOpShiftRight: + return "rsh x" + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + // NegateA executes A = -A. type NegateA struct{} @@ -301,6 +496,11 @@ func (a NegateA) Assemble() (RawInstruction, error) { }, nil } +// String returns the the instruction in assembler notation. +func (a NegateA) String() string { + return fmt.Sprintf("neg") +} + // Jump skips the following Skip instructions in the program. type Jump struct { Skip uint32 @@ -314,6 +514,11 @@ func (a Jump) Assemble() (RawInstruction, error) { }, nil } +// String returns the the instruction in assembler notation. +func (a Jump) String() string { + return fmt.Sprintf("ja %d", a.Skip) +} + // JumpIf skips the following Skip instructions in the program if A // Val is true. type JumpIf struct { @@ -361,6 +566,51 @@ func (a JumpIf) Assemble() (RawInstruction, error) { }, nil } +// String returns the the instruction in assembler notation. +func (a JumpIf) String() string { + switch a.Cond { + // K == A + case JumpEqual: + return conditionalJump(a, "jeq", "jneq") + // K != A + case JumpNotEqual: + return fmt.Sprintf("jneq #%d,%d", a.Val, a.SkipTrue) + // K > A + case JumpGreaterThan: + return conditionalJump(a, "jgt", "jle") + // K < A + case JumpLessThan: + return fmt.Sprintf("jlt #%d,%d", a.Val, a.SkipTrue) + // K >= A + case JumpGreaterOrEqual: + return conditionalJump(a, "jge", "jlt") + // K <= A + case JumpLessOrEqual: + return fmt.Sprintf("jle #%d,%d", a.Val, a.SkipTrue) + // K & A != 0 + case JumpBitsSet: + if a.SkipFalse > 0 { + return fmt.Sprintf("jset #%d,%d,%d", a.Val, a.SkipTrue, a.SkipFalse) + } + return fmt.Sprintf("jset #%d,%d", a.Val, a.SkipTrue) + // K & A == 0, there is no assembler instruction for JumpBitNotSet, use JumpBitSet and invert skips + case JumpBitsNotSet: + return JumpIf{Cond: JumpBitsSet, SkipTrue: a.SkipFalse, SkipFalse: a.SkipTrue, Val: a.Val}.String() + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +func conditionalJump(inst JumpIf, positiveJump, negativeJump string) string { + if inst.SkipTrue > 0 { + if inst.SkipFalse > 0 { + return fmt.Sprintf("%s #%d,%d,%d", positiveJump, inst.Val, inst.SkipTrue, inst.SkipFalse) + } + return fmt.Sprintf("%s #%d,%d", positiveJump, inst.Val, inst.SkipTrue) + } + return fmt.Sprintf("%s #%d,%d", negativeJump, inst.Val, inst.SkipFalse) +} + // RetA exits the BPF program, returning the value of register A. type RetA struct{} @@ -371,6 +621,11 @@ func (a RetA) Assemble() (RawInstruction, error) { }, nil } +// String returns the the instruction in assembler notation. +func (a RetA) String() string { + return fmt.Sprintf("ret a") +} + // RetConstant exits the BPF program, returning a constant value. type RetConstant struct { Val uint32 @@ -384,6 +639,11 @@ func (a RetConstant) Assemble() (RawInstruction, error) { }, nil } +// String returns the the instruction in assembler notation. +func (a RetConstant) String() string { + return fmt.Sprintf("ret #%d", a.Val) +} + // TXA copies the value of register X to register A. type TXA struct{} @@ -394,6 +654,11 @@ func (a TXA) Assemble() (RawInstruction, error) { }, nil } +// String returns the the instruction in assembler notation. +func (a TXA) String() string { + return fmt.Sprintf("txa") +} + // TAX copies the value of register A to register X. type TAX struct{} @@ -404,6 +669,11 @@ func (a TAX) Assemble() (RawInstruction, error) { }, nil } +// String returns the the instruction in assembler notation. +func (a TAX) String() string { + return fmt.Sprintf("tax") +} + func assembleLoad(dst Register, loadSize int, mode uint16, k uint32) (RawInstruction, error) { var ( cls uint16 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/bpf/setter.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/bpf/setter.go new file mode 100644 index 00000000..43e35f0a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/bpf/setter.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +// A Setter is a type which can attach a compiled BPF filter to itself. +type Setter interface { + SetBPF(filter []RawInstruction) error +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/context/context.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/context/context.go index f143ed6a..d3681ab4 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/context/context.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/context/context.go @@ -36,103 +36,6 @@ // Contexts. package context // import "golang.org/x/net/context" -import "time" - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out chan<- Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - // Background returns a non-nil, empty Context. It is never canceled, has no // values, and has no deadline. It is typically used by the main function, // initialization, and tests, and as the top-level Context for incoming @@ -149,8 +52,3 @@ func Background() Context { func TODO() Context { return todo } - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/context/go19.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/context/go19.go new file mode 100644 index 00000000..d88bd1db --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/context/go19.go @@ -0,0 +1,20 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package context + +import "context" // standard library's context, as of Go 1.7 + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context = context.Context + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc = context.CancelFunc diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/context/pre_go19.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/context/pre_go19.go new file mode 100644 index 00000000..b105f80b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/context/pre_go19.go @@ -0,0 +1,109 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package context + +import "time" + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + // + // WithCancel arranges for Done to be closed when cancel is called; + // WithDeadline arranges for Done to be closed when the deadline + // expires; WithTimeout arranges for Done to be closed when the timeout + // elapses. + // + // Done is provided for use in select statements: + // + // // Stream generates values with DoSomething and sends them to out + // // until DoSomething returns an error or ctx.Done is closed. + // func Stream(ctx context.Context, out chan<- Value) error { + // for { + // v, err := DoSomething(ctx) + // if err != nil { + // return err + // } + // select { + // case <-ctx.Done(): + // return ctx.Err() + // case out <- v: + // } + // } + // } + // + // See http://blog.golang.org/pipelines for more examples of how to use + // a Done channel for cancelation. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + // + // A key identifies a specific value in a Context. Functions that wish + // to store values in Context typically allocate a key in a global + // variable then use that key as the argument to context.WithValue and + // Context.Value. A key can be any type that supports equality; + // packages should define keys as an unexported type to avoid + // collisions. + // + // Packages that define a Context key should provide type-safe accessors + // for the values stores using that key: + // + // // Package user defines a User type that's stored in Contexts. + // package user + // + // import "golang.org/x/net/context" + // + // // User is the type of value stored in the Contexts. + // type User struct {...} + // + // // key is an unexported type for keys defined in this package. + // // This prevents collisions with keys defined in other packages. + // type key int + // + // // userKey is the key for user.User values in Contexts. It is + // // unexported; clients use user.NewContext and user.FromContext + // // instead of using this key directly. + // var userKey key = 0 + // + // // NewContext returns a new Context that carries value u. + // func NewContext(ctx context.Context, u *User) context.Context { + // return context.WithValue(ctx, userKey, u) + // } + // + // // FromContext returns the User value stored in ctx, if any. + // func FromContext(ctx context.Context) (*User, bool) { + // u, ok := ctx.Value(userKey).(*User) + // return u, ok + // } + Value(key interface{}) interface{} +} + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc func() diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/ciphers.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/ciphers.go new file mode 100644 index 00000000..698860b7 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/ciphers.go @@ -0,0 +1,641 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +// A list of the possible cipher suite ids. Taken from +// http://www.iana.org/assignments/tls-parameters/tls-parameters.txt + +const ( + cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000 + cipher_TLS_RSA_WITH_NULL_MD5 uint16 = 0x0001 + cipher_TLS_RSA_WITH_NULL_SHA uint16 = 0x0002 + cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0003 + cipher_TLS_RSA_WITH_RC4_128_MD5 uint16 = 0x0004 + cipher_TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005 + cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x0006 + cipher_TLS_RSA_WITH_IDEA_CBC_SHA uint16 = 0x0007 + cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0008 + cipher_TLS_RSA_WITH_DES_CBC_SHA uint16 = 0x0009 + cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000A + cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000B + cipher_TLS_DH_DSS_WITH_DES_CBC_SHA uint16 = 0x000C + cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x000D + cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000E + cipher_TLS_DH_RSA_WITH_DES_CBC_SHA uint16 = 0x000F + cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0010 + cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011 + cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA uint16 = 0x0012 + cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x0013 + cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014 + cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA uint16 = 0x0015 + cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0016 + cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0017 + cipher_TLS_DH_anon_WITH_RC4_128_MD5 uint16 = 0x0018 + cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019 + cipher_TLS_DH_anon_WITH_DES_CBC_SHA uint16 = 0x001A + cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0x001B + // Reserved uint16 = 0x001C-1D + cipher_TLS_KRB5_WITH_DES_CBC_SHA uint16 = 0x001E + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA uint16 = 0x001F + cipher_TLS_KRB5_WITH_RC4_128_SHA uint16 = 0x0020 + cipher_TLS_KRB5_WITH_IDEA_CBC_SHA uint16 = 0x0021 + cipher_TLS_KRB5_WITH_DES_CBC_MD5 uint16 = 0x0022 + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 uint16 = 0x0023 + cipher_TLS_KRB5_WITH_RC4_128_MD5 uint16 = 0x0024 + cipher_TLS_KRB5_WITH_IDEA_CBC_MD5 uint16 = 0x0025 + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA uint16 = 0x0026 + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA uint16 = 0x0027 + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA uint16 = 0x0028 + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 uint16 = 0x0029 + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x002A + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 uint16 = 0x002B + cipher_TLS_PSK_WITH_NULL_SHA uint16 = 0x002C + cipher_TLS_DHE_PSK_WITH_NULL_SHA uint16 = 0x002D + cipher_TLS_RSA_PSK_WITH_NULL_SHA uint16 = 0x002E + cipher_TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002F + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0030 + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0031 + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0032 + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0033 + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA uint16 = 0x0034 + cipher_TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035 + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0036 + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0037 + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0038 + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0039 + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA uint16 = 0x003A + cipher_TLS_RSA_WITH_NULL_SHA256 uint16 = 0x003B + cipher_TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003C + cipher_TLS_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x003D + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x003E + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003F + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x0040 + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0041 + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0042 + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0043 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044 + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045 + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046 + // Reserved uint16 = 0x0047-4F + // Reserved uint16 = 0x0050-58 + // Reserved uint16 = 0x0059-5C + // Unassigned uint16 = 0x005D-5F + // Reserved uint16 = 0x0060-66 + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067 + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x0068 + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x0069 + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D + // Unassigned uint16 = 0x006E-83 + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0084 + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0085 + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0086 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0087 + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0088 + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0089 + cipher_TLS_PSK_WITH_RC4_128_SHA uint16 = 0x008A + cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008B + cipher_TLS_PSK_WITH_AES_128_CBC_SHA uint16 = 0x008C + cipher_TLS_PSK_WITH_AES_256_CBC_SHA uint16 = 0x008D + cipher_TLS_DHE_PSK_WITH_RC4_128_SHA uint16 = 0x008E + cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008F + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0090 + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0091 + cipher_TLS_RSA_PSK_WITH_RC4_128_SHA uint16 = 0x0092 + cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x0093 + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0094 + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0095 + cipher_TLS_RSA_WITH_SEED_CBC_SHA uint16 = 0x0096 + cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA uint16 = 0x0097 + cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA uint16 = 0x0098 + cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA uint16 = 0x0099 + cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA uint16 = 0x009A + cipher_TLS_DH_anon_WITH_SEED_CBC_SHA uint16 = 0x009B + cipher_TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009C + cipher_TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009D + cipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009E + cipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009F + cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x00A0 + cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x00A1 + cipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A2 + cipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A3 + cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A4 + cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A5 + cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256 uint16 = 0x00A6 + cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384 uint16 = 0x00A7 + cipher_TLS_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00A8 + cipher_TLS_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00A9 + cipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AA + cipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AB + cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AC + cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AD + cipher_TLS_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00AE + cipher_TLS_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00AF + cipher_TLS_PSK_WITH_NULL_SHA256 uint16 = 0x00B0 + cipher_TLS_PSK_WITH_NULL_SHA384 uint16 = 0x00B1 + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B2 + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B3 + cipher_TLS_DHE_PSK_WITH_NULL_SHA256 uint16 = 0x00B4 + cipher_TLS_DHE_PSK_WITH_NULL_SHA384 uint16 = 0x00B5 + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B6 + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B7 + cipher_TLS_RSA_PSK_WITH_NULL_SHA256 uint16 = 0x00B8 + cipher_TLS_RSA_PSK_WITH_NULL_SHA384 uint16 = 0x00B9 + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BA + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BB + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BC + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C0 + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C1 + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C2 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3 + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4 + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5 + // Unassigned uint16 = 0x00C6-FE + cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF + // Unassigned uint16 = 0x01-55,* + cipher_TLS_FALLBACK_SCSV uint16 = 0x5600 + // Unassigned uint16 = 0x5601 - 0xC000 + cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA uint16 = 0xC001 + cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA uint16 = 0xC002 + cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC003 + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC004 + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC005 + cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA uint16 = 0xC006 + cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xC007 + cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC008 + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC009 + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC00A + cipher_TLS_ECDH_RSA_WITH_NULL_SHA uint16 = 0xC00B + cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA uint16 = 0xC00C + cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC00D + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC00E + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC00F + cipher_TLS_ECDHE_RSA_WITH_NULL_SHA uint16 = 0xC010 + cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xC011 + cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC012 + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC013 + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC014 + cipher_TLS_ECDH_anon_WITH_NULL_SHA uint16 = 0xC015 + cipher_TLS_ECDH_anon_WITH_RC4_128_SHA uint16 = 0xC016 + cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0xC017 + cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA uint16 = 0xC018 + cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA uint16 = 0xC019 + cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01A + cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01B + cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01C + cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA uint16 = 0xC01D + cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC01E + cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA uint16 = 0xC01F + cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA uint16 = 0xC020 + cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC021 + cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA uint16 = 0xC022 + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC023 + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC024 + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC025 + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC026 + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC027 + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC028 + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC029 + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC02A + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02B + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02C + cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02D + cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02E + cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02F + cipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC030 + cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC031 + cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC032 + cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA uint16 = 0xC033 + cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0xC034 + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0xC035 + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0xC036 + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0xC037 + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0xC038 + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA uint16 = 0xC039 + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256 uint16 = 0xC03A + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384 uint16 = 0xC03B + cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03C + cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03D + cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03E + cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03F + cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC040 + cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC041 + cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC042 + cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC043 + cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC044 + cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC045 + cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC046 + cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC047 + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC048 + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC049 + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04A + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04B + cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04C + cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04D + cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04E + cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04F + cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC050 + cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC051 + cipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC052 + cipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC053 + cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC054 + cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC055 + cipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC056 + cipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC057 + cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC058 + cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC059 + cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05A + cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05B + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05C + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05D + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05E + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05F + cipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC060 + cipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC061 + cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC062 + cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC063 + cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC064 + cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC065 + cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC066 + cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC067 + cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC068 + cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC069 + cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06A + cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06B + cipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06C + cipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06D + cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06E + cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06F + cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC070 + cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC071 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC074 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC075 + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC076 + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC077 + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC078 + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC079 + cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07A + cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07B + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07C + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07D + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07E + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07F + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC080 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC081 + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC082 + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC083 + cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC084 + cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC085 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC088 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC089 + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08A + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08B + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08C + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08D + cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08E + cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08F + cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC090 + cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC091 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC092 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC093 + cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC094 + cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC095 + cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC096 + cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC097 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC098 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC099 + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC09A + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC09B + cipher_TLS_RSA_WITH_AES_128_CCM uint16 = 0xC09C + cipher_TLS_RSA_WITH_AES_256_CCM uint16 = 0xC09D + cipher_TLS_DHE_RSA_WITH_AES_128_CCM uint16 = 0xC09E + cipher_TLS_DHE_RSA_WITH_AES_256_CCM uint16 = 0xC09F + cipher_TLS_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A0 + cipher_TLS_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A1 + cipher_TLS_DHE_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A2 + cipher_TLS_DHE_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A3 + cipher_TLS_PSK_WITH_AES_128_CCM uint16 = 0xC0A4 + cipher_TLS_PSK_WITH_AES_256_CCM uint16 = 0xC0A5 + cipher_TLS_DHE_PSK_WITH_AES_128_CCM uint16 = 0xC0A6 + cipher_TLS_DHE_PSK_WITH_AES_256_CCM uint16 = 0xC0A7 + cipher_TLS_PSK_WITH_AES_128_CCM_8 uint16 = 0xC0A8 + cipher_TLS_PSK_WITH_AES_256_CCM_8 uint16 = 0xC0A9 + cipher_TLS_PSK_DHE_WITH_AES_128_CCM_8 uint16 = 0xC0AA + cipher_TLS_PSK_DHE_WITH_AES_256_CCM_8 uint16 = 0xC0AB + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM uint16 = 0xC0AC + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM uint16 = 0xC0AD + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 uint16 = 0xC0AE + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 uint16 = 0xC0AF + // Unassigned uint16 = 0xC0B0-FF + // Unassigned uint16 = 0xC1-CB,* + // Unassigned uint16 = 0xCC00-A7 + cipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA8 + cipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9 + cipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAA + cipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAB + cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAC + cipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAD + cipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAE +) + +// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. +// References: +// https://tools.ietf.org/html/rfc7540#appendix-A +// Reject cipher suites from Appendix A. +// "This list includes those cipher suites that do not +// offer an ephemeral key exchange and those that are +// based on the TLS null, stream or block cipher type" +func isBadCipher(cipher uint16) bool { + switch cipher { + case cipher_TLS_NULL_WITH_NULL_NULL, + cipher_TLS_RSA_WITH_NULL_MD5, + cipher_TLS_RSA_WITH_NULL_SHA, + cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5, + cipher_TLS_RSA_WITH_RC4_128_MD5, + cipher_TLS_RSA_WITH_RC4_128_SHA, + cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5, + cipher_TLS_RSA_WITH_IDEA_CBC_SHA, + cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_RSA_WITH_DES_CBC_SHA, + cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DH_DSS_WITH_DES_CBC_SHA, + cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DH_RSA_WITH_DES_CBC_SHA, + cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5, + cipher_TLS_DH_anon_WITH_RC4_128_MD5, + cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DH_anon_WITH_DES_CBC_SHA, + cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_KRB5_WITH_DES_CBC_SHA, + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_KRB5_WITH_RC4_128_SHA, + cipher_TLS_KRB5_WITH_IDEA_CBC_SHA, + cipher_TLS_KRB5_WITH_DES_CBC_MD5, + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5, + cipher_TLS_KRB5_WITH_RC4_128_MD5, + cipher_TLS_KRB5_WITH_IDEA_CBC_MD5, + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA, + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA, + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA, + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5, + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5, + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5, + cipher_TLS_PSK_WITH_NULL_SHA, + cipher_TLS_DHE_PSK_WITH_NULL_SHA, + cipher_TLS_RSA_PSK_WITH_NULL_SHA, + cipher_TLS_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA, + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA, + cipher_TLS_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA, + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_WITH_NULL_SHA256, + cipher_TLS_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_RSA_WITH_AES_256_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256, + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_PSK_WITH_RC4_128_SHA, + cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_DHE_PSK_WITH_RC4_128_SHA, + cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_PSK_WITH_RC4_128_SHA, + cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_WITH_SEED_CBC_SHA, + cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA, + cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA, + cipher_TLS_DH_anon_WITH_SEED_CBC_SHA, + cipher_TLS_RSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_RSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256, + cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384, + cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256, + cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384, + cipher_TLS_PSK_WITH_AES_128_GCM_SHA256, + cipher_TLS_PSK_WITH_AES_256_GCM_SHA384, + cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256, + cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384, + cipher_TLS_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_PSK_WITH_NULL_SHA256, + cipher_TLS_PSK_WITH_NULL_SHA384, + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_DHE_PSK_WITH_NULL_SHA256, + cipher_TLS_DHE_PSK_WITH_NULL_SHA384, + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_RSA_PSK_WITH_NULL_SHA256, + cipher_TLS_RSA_PSK_WITH_NULL_SHA384, + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV, + cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA, + cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA, + cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDH_RSA_WITH_NULL_SHA, + cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA, + cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_RSA_WITH_NULL_SHA, + cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA, + cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDH_anon_WITH_NULL_SHA, + cipher_TLS_ECDH_anon_WITH_RC4_128_SHA, + cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA, + cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA, + cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA, + cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA, + cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA, + cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA, + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256, + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384, + cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_RSA_WITH_AES_128_CCM, + cipher_TLS_RSA_WITH_AES_256_CCM, + cipher_TLS_RSA_WITH_AES_128_CCM_8, + cipher_TLS_RSA_WITH_AES_256_CCM_8, + cipher_TLS_PSK_WITH_AES_128_CCM, + cipher_TLS_PSK_WITH_AES_256_CCM, + cipher_TLS_PSK_WITH_AES_128_CCM_8, + cipher_TLS_PSK_WITH_AES_256_CCM_8: + return true + default: + return false + } +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/client_conn_pool.go index 547e238a..bdf5652b 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/client_conn_pool.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/client_conn_pool.go @@ -53,13 +53,13 @@ const ( ) func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) { - if req.Close && dialOnMiss { + if isConnectionCloseRequest(req) && dialOnMiss { // It gets its own connection. - cc, err := p.t.dialClientConn(addr) + const singleUse = true + cc, err := p.t.dialClientConn(addr, singleUse) if err != nil { return nil, err } - cc.singleUse = true return cc, nil } p.mu.Lock() @@ -104,7 +104,8 @@ func (p *clientConnPool) getStartDialLocked(addr string) *dialCall { // run in its own goroutine. func (c *dialCall) dial(addr string) { - c.res, c.err = c.p.t.dialClientConn(addr) + const singleUse = false // shared conn + c.res, c.err = c.p.t.dialClientConn(addr, singleUse) close(c.done) c.p.mu.Lock() @@ -246,7 +247,7 @@ func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn { } // noDialClientConnPool is an implementation of http2.ClientConnPool -// which never dials. We let the HTTP/1.1 client dial and use its TLS +// which never dials. We let the HTTP/1.1 client dial and use its TLS // connection instead. type noDialClientConnPool struct{ *clientConnPool } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/configure_transport.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/configure_transport.go index 4f720f53..b65fc6d4 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/configure_transport.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/configure_transport.go @@ -56,7 +56,7 @@ func configureTransport(t1 *http.Transport) (*Transport, error) { } // registerHTTPSProtocol calls Transport.RegisterProtocol but -// convering panics into errors. +// converting panics into errors. func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) { defer func() { if e := recover(); e != nil { diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/databuffer.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/databuffer.go new file mode 100644 index 00000000..a3067f8d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/databuffer.go @@ -0,0 +1,146 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" + "fmt" + "sync" +) + +// Buffer chunks are allocated from a pool to reduce pressure on GC. +// The maximum wasted space per dataBuffer is 2x the largest size class, +// which happens when the dataBuffer has multiple chunks and there is +// one unread byte in both the first and last chunks. We use a few size +// classes to minimize overheads for servers that typically receive very +// small request bodies. +// +// TODO: Benchmark to determine if the pools are necessary. The GC may have +// improved enough that we can instead allocate chunks like this: +// make([]byte, max(16<<10, expectedBytesRemaining)) +var ( + dataChunkSizeClasses = []int{ + 1 << 10, + 2 << 10, + 4 << 10, + 8 << 10, + 16 << 10, + } + dataChunkPools = [...]sync.Pool{ + {New: func() interface{} { return make([]byte, 1<<10) }}, + {New: func() interface{} { return make([]byte, 2<<10) }}, + {New: func() interface{} { return make([]byte, 4<<10) }}, + {New: func() interface{} { return make([]byte, 8<<10) }}, + {New: func() interface{} { return make([]byte, 16<<10) }}, + } +) + +func getDataBufferChunk(size int64) []byte { + i := 0 + for ; i < len(dataChunkSizeClasses)-1; i++ { + if size <= int64(dataChunkSizeClasses[i]) { + break + } + } + return dataChunkPools[i].Get().([]byte) +} + +func putDataBufferChunk(p []byte) { + for i, n := range dataChunkSizeClasses { + if len(p) == n { + dataChunkPools[i].Put(p) + return + } + } + panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) +} + +// dataBuffer is an io.ReadWriter backed by a list of data chunks. +// Each dataBuffer is used to read DATA frames on a single stream. +// The buffer is divided into chunks so the server can limit the +// total memory used by a single connection without limiting the +// request body size on any single stream. +type dataBuffer struct { + chunks [][]byte + r int // next byte to read is chunks[0][r] + w int // next byte to write is chunks[len(chunks)-1][w] + size int // total buffered bytes + expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0) +} + +var errReadEmpty = errors.New("read from empty dataBuffer") + +// Read copies bytes from the buffer into p. +// It is an error to read when no data is available. +func (b *dataBuffer) Read(p []byte) (int, error) { + if b.size == 0 { + return 0, errReadEmpty + } + var ntotal int + for len(p) > 0 && b.size > 0 { + readFrom := b.bytesFromFirstChunk() + n := copy(p, readFrom) + p = p[n:] + ntotal += n + b.r += n + b.size -= n + // If the first chunk has been consumed, advance to the next chunk. + if b.r == len(b.chunks[0]) { + putDataBufferChunk(b.chunks[0]) + end := len(b.chunks) - 1 + copy(b.chunks[:end], b.chunks[1:]) + b.chunks[end] = nil + b.chunks = b.chunks[:end] + b.r = 0 + } + } + return ntotal, nil +} + +func (b *dataBuffer) bytesFromFirstChunk() []byte { + if len(b.chunks) == 1 { + return b.chunks[0][b.r:b.w] + } + return b.chunks[0][b.r:] +} + +// Len returns the number of bytes of the unread portion of the buffer. +func (b *dataBuffer) Len() int { + return b.size +} + +// Write appends p to the buffer. +func (b *dataBuffer) Write(p []byte) (int, error) { + ntotal := len(p) + for len(p) > 0 { + // If the last chunk is empty, allocate a new chunk. Try to allocate + // enough to fully copy p plus any additional bytes we expect to + // receive. However, this may allocate less than len(p). + want := int64(len(p)) + if b.expected > want { + want = b.expected + } + chunk := b.lastChunkOrAlloc(want) + n := copy(chunk[b.w:], p) + p = p[n:] + b.w += n + b.size += n + b.expected -= int64(n) + } + return ntotal, nil +} + +func (b *dataBuffer) lastChunkOrAlloc(want int64) []byte { + if len(b.chunks) != 0 { + last := b.chunks[len(b.chunks)-1] + if b.w < len(last) { + return last + } + } + chunk := getDataBufferChunk(want) + b.chunks = append(b.chunks, chunk) + b.w = 0 + return chunk +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/errors.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/errors.go index 71a4e290..71f2c463 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/errors.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/errors.go @@ -64,9 +64,17 @@ func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: type StreamError struct { StreamID uint32 Code ErrCode + Cause error // optional additional detail +} + +func streamError(id uint32, code ErrCode) StreamError { + return StreamError{StreamID: id, Code: code} } func (e StreamError) Error() string { + if e.Cause != nil { + return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause) + } return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code) } @@ -79,13 +87,16 @@ type goAwayFlowError struct{} func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" } -// connErrorReason wraps a ConnectionError with an informative error about why it occurs. - +// connError represents an HTTP/2 ConnectionError error code, along +// with a string (for debugging) explaining why. +// // Errors of this type are only returned by the frame parser functions -// and converted into ConnectionError(ErrCodeProtocol). +// and converted into ConnectionError(Code), after stashing away +// the Reason into the Framer's errDetail field, accessible via +// the (*Framer).ErrorDetail method. type connError struct { - Code ErrCode - Reason string + Code ErrCode // the ConnectionError error code + Reason string // additional reason } func (e connError) Error() string { diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/fixed_buffer.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/fixed_buffer.go deleted file mode 100644 index 47da0f0b..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/fixed_buffer.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "errors" -) - -// fixedBuffer is an io.ReadWriter backed by a fixed size buffer. -// It never allocates, but moves old data as new data is written. -type fixedBuffer struct { - buf []byte - r, w int -} - -var ( - errReadEmpty = errors.New("read from empty fixedBuffer") - errWriteFull = errors.New("write on full fixedBuffer") -) - -// Read copies bytes from the buffer into p. -// It is an error to read when no data is available. -func (b *fixedBuffer) Read(p []byte) (n int, err error) { - if b.r == b.w { - return 0, errReadEmpty - } - n = copy(p, b.buf[b.r:b.w]) - b.r += n - if b.r == b.w { - b.r = 0 - b.w = 0 - } - return n, nil -} - -// Len returns the number of bytes of the unread portion of the buffer. -func (b *fixedBuffer) Len() int { - return b.w - b.r -} - -// Write copies bytes from p into the buffer. -// It is an error to write more data than the buffer can hold. -func (b *fixedBuffer) Write(p []byte) (n int, err error) { - // Slide existing data to beginning. - if b.r > 0 && len(p) > len(b.buf)-b.w { - copy(b.buf, b.buf[b.r:b.w]) - b.w -= b.r - b.r = 0 - } - - // Write new data. - n = copy(b.buf[b.w:], p) - b.w += n - if n < len(p) { - err = errWriteFull - } - return n, err -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/frame.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/frame.go index 981d407a..3b148907 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/frame.go @@ -122,7 +122,7 @@ var flagName = map[FrameType]map[Flags]string{ // a frameParser parses a frame given its FrameHeader and payload // bytes. The length of payload will always equal fh.Length (which // might be 0). -type frameParser func(fh FrameHeader, payload []byte) (Frame, error) +type frameParser func(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) var frameParsers = map[FrameType]frameParser{ FrameData: parseDataFrame, @@ -312,15 +312,19 @@ type Framer struct { MaxHeaderListSize uint32 // TODO: track which type of frame & with which flags was sent - // last. Then return an error (unless AllowIllegalWrites) if + // last. Then return an error (unless AllowIllegalWrites) if // we're in the middle of a header block and a // non-Continuation or Continuation on a different stream is // attempted to be written. - logReads bool + logReads, logWrites bool - debugFramer *Framer // only use for logging written writes - debugFramerBuf *bytes.Buffer + debugFramer *Framer // only use for logging written writes + debugFramerBuf *bytes.Buffer + debugReadLoggerf func(string, ...interface{}) + debugWriteLoggerf func(string, ...interface{}) + + frameCache *frameCache // nil if frames aren't reused (default) } func (fr *Framer) maxHeaderListSize() uint32 { @@ -355,7 +359,7 @@ func (f *Framer) endWrite() error { byte(length>>16), byte(length>>8), byte(length)) - if logFrameWrites { + if f.logWrites { f.logWrite() } @@ -378,10 +382,10 @@ func (f *Framer) logWrite() { f.debugFramerBuf.Write(f.wbuf) fr, err := f.debugFramer.ReadFrame() if err != nil { - log.Printf("http2: Framer %p: failed to decode just-written frame", f) + f.debugWriteLoggerf("http2: Framer %p: failed to decode just-written frame", f) return } - log.Printf("http2: Framer %p: wrote %v", f, summarizeFrame(fr)) + f.debugWriteLoggerf("http2: Framer %p: wrote %v", f, summarizeFrame(fr)) } func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) } @@ -396,12 +400,36 @@ const ( maxFrameSize = 1<<24 - 1 ) +// SetReuseFrames allows the Framer to reuse Frames. +// If called on a Framer, Frames returned by calls to ReadFrame are only +// valid until the next call to ReadFrame. +func (fr *Framer) SetReuseFrames() { + if fr.frameCache != nil { + return + } + fr.frameCache = &frameCache{} +} + +type frameCache struct { + dataFrame DataFrame +} + +func (fc *frameCache) getDataFrame() *DataFrame { + if fc == nil { + return &DataFrame{} + } + return &fc.dataFrame +} + // NewFramer returns a Framer that writes frames to w and reads them from r. func NewFramer(w io.Writer, r io.Reader) *Framer { fr := &Framer{ - w: w, - r: r, - logReads: logFrameReads, + w: w, + r: r, + logReads: logFrameReads, + logWrites: logFrameWrites, + debugReadLoggerf: log.Printf, + debugWriteLoggerf: log.Printf, } fr.getReadBuf = func(size uint32) []byte { if cap(fr.readBuf) >= int(size) { @@ -472,7 +500,7 @@ func (fr *Framer) ReadFrame() (Frame, error) { if _, err := io.ReadFull(fr.r, payload); err != nil { return nil, err } - f, err := typeFrameParser(fh.Type)(fh, payload) + f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, payload) if err != nil { if ce, ok := err.(connError); ok { return nil, fr.connError(ce.Code, ce.Reason) @@ -483,7 +511,7 @@ func (fr *Framer) ReadFrame() (Frame, error) { return nil, err } if fr.logReads { - log.Printf("http2: Framer %p: read %v", fr, summarizeFrame(f)) + fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f)) } if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil { return fr.readMetaFrame(f.(*HeadersFrame)) @@ -560,7 +588,7 @@ func (f *DataFrame) Data() []byte { return f.data } -func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) { +func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) { if fh.StreamID == 0 { // DATA frames MUST be associated with a stream. If a // DATA frame is received whose stream identifier @@ -569,9 +597,9 @@ func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) { // PROTOCOL_ERROR. return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"} } - f := &DataFrame{ - FrameHeader: fh, - } + f := fc.getDataFrame() + f.FrameHeader = fh + var padSize byte if fh.Flags.Has(FlagDataPadded) { var err error @@ -594,6 +622,8 @@ func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) { var ( errStreamID = errors.New("invalid stream ID") errDepStreamID = errors.New("invalid dependent stream ID") + errPadLength = errors.New("pad length too large") + errPadBytes = errors.New("padding bytes must all be zeros unless AllowIllegalWrites is enabled") ) func validStreamIDOrZero(streamID uint32) bool { @@ -607,18 +637,51 @@ func validStreamID(streamID uint32) bool { // WriteData writes a DATA frame. // // It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. +// It is the caller's responsibility not to violate the maximum frame size +// and to not call other Write methods concurrently. func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error { - // TODO: ignoring padding for now. will add when somebody cares. + return f.WriteDataPadded(streamID, endStream, data, nil) +} + +// WriteData writes a DATA frame with optional padding. +// +// If pad is nil, the padding bit is not sent. +// The length of pad must not exceed 255 bytes. +// The bytes of pad must all be zero, unless f.AllowIllegalWrites is set. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility not to violate the maximum frame size +// and to not call other Write methods concurrently. +func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error { if !validStreamID(streamID) && !f.AllowIllegalWrites { return errStreamID } + if len(pad) > 0 { + if len(pad) > 255 { + return errPadLength + } + if !f.AllowIllegalWrites { + for _, b := range pad { + if b != 0 { + // "Padding octets MUST be set to zero when sending." + return errPadBytes + } + } + } + } var flags Flags if endStream { flags |= FlagDataEndStream } + if pad != nil { + flags |= FlagDataPadded + } f.startWrite(FrameData, flags, streamID) + if pad != nil { + f.wbuf = append(f.wbuf, byte(len(pad))) + } f.wbuf = append(f.wbuf, data...) + f.wbuf = append(f.wbuf, pad...) return f.endWrite() } @@ -632,10 +695,10 @@ type SettingsFrame struct { p []byte } -func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) { +func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 { // When this (ACK 0x1) bit is set, the payload of the - // SETTINGS frame MUST be empty. Receipt of a + // SETTINGS frame MUST be empty. Receipt of a // SETTINGS frame with the ACK flag set and a length // field value other than 0 MUST be treated as a // connection error (Section 5.4.1) of type @@ -644,7 +707,7 @@ func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) { } if fh.StreamID != 0 { // SETTINGS frames always apply to a connection, - // never a single stream. The stream identifier for a + // never a single stream. The stream identifier for a // SETTINGS frame MUST be zero (0x0). If an endpoint // receives a SETTINGS frame whose stream identifier // field is anything other than 0x0, the endpoint MUST @@ -714,7 +777,7 @@ func (f *Framer) WriteSettings(settings ...Setting) error { return f.endWrite() } -// WriteSettings writes an empty SETTINGS frame with the ACK bit set. +// WriteSettingsAck writes an empty SETTINGS frame with the ACK bit set. // // It will perform exactly one Write to the underlying Writer. // It is the caller's responsibility to not call other Write methods concurrently. @@ -734,7 +797,7 @@ type PingFrame struct { func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) } -func parsePingFrame(fh FrameHeader, payload []byte) (Frame, error) { +func parsePingFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { if len(payload) != 8 { return nil, ConnectionError(ErrCodeFrameSize) } @@ -774,7 +837,7 @@ func (f *GoAwayFrame) DebugData() []byte { return f.debugData } -func parseGoAwayFrame(fh FrameHeader, p []byte) (Frame, error) { +func parseGoAwayFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { if fh.StreamID != 0 { return nil, ConnectionError(ErrCodeProtocol) } @@ -814,7 +877,7 @@ func (f *UnknownFrame) Payload() []byte { return f.p } -func parseUnknownFrame(fh FrameHeader, p []byte) (Frame, error) { +func parseUnknownFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { return &UnknownFrame{fh, p}, nil } @@ -825,7 +888,7 @@ type WindowUpdateFrame struct { Increment uint32 // never read with high bit set } -func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) { +func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { if len(p) != 4 { return nil, ConnectionError(ErrCodeFrameSize) } @@ -840,7 +903,7 @@ func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) { if fh.StreamID == 0 { return nil, ConnectionError(ErrCodeProtocol) } - return nil, StreamError{fh.StreamID, ErrCodeProtocol} + return nil, streamError(fh.StreamID, ErrCodeProtocol) } return &WindowUpdateFrame{ FrameHeader: fh, @@ -890,12 +953,12 @@ func (f *HeadersFrame) HasPriority() bool { return f.FrameHeader.Flags.Has(FlagHeadersPriority) } -func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) { +func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { hf := &HeadersFrame{ FrameHeader: fh, } if fh.StreamID == 0 { - // HEADERS frames MUST be associated with a stream. If a HEADERS frame + // HEADERS frames MUST be associated with a stream. If a HEADERS frame // is received whose stream identifier field is 0x0, the recipient MUST // respond with a connection error (Section 5.4.1) of type // PROTOCOL_ERROR. @@ -921,7 +984,7 @@ func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) { } } if len(p)-int(padLength) <= 0 { - return nil, StreamError{fh.StreamID, ErrCodeProtocol} + return nil, streamError(fh.StreamID, ErrCodeProtocol) } hf.headerFragBuf = p[:len(p)-int(padLength)] return hf, nil @@ -1017,7 +1080,7 @@ type PriorityParam struct { Exclusive bool // Weight is the stream's zero-indexed weight. It should be - // set together with StreamDep, or neither should be set. Per + // set together with StreamDep, or neither should be set. Per // the spec, "Add one to the value to obtain a weight between // 1 and 256." Weight uint8 @@ -1027,7 +1090,7 @@ func (p PriorityParam) IsZero() bool { return p == PriorityParam{} } -func parsePriorityFrame(fh FrameHeader, payload []byte) (Frame, error) { +func parsePriorityFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { if fh.StreamID == 0 { return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"} } @@ -1074,7 +1137,7 @@ type RSTStreamFrame struct { ErrCode ErrCode } -func parseRSTStreamFrame(fh FrameHeader, p []byte) (Frame, error) { +func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { if len(p) != 4 { return nil, ConnectionError(ErrCodeFrameSize) } @@ -1104,7 +1167,7 @@ type ContinuationFrame struct { headerFragBuf []byte } -func parseContinuationFrame(fh FrameHeader, p []byte) (Frame, error) { +func parseContinuationFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { if fh.StreamID == 0 { return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"} } @@ -1154,7 +1217,7 @@ func (f *PushPromiseFrame) HeadersEnded() bool { return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders) } -func parsePushPromise(fh FrameHeader, p []byte) (_ Frame, err error) { +func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { pp := &PushPromiseFrame{ FrameHeader: fh, } @@ -1396,6 +1459,9 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { hdec.SetEmitEnabled(true) hdec.SetMaxStringLength(fr.maxHeaderStringLen()) hdec.SetEmitFunc(func(hf hpack.HeaderField) { + if VerboseLogs && fr.logReads { + fr.debugReadLoggerf("http2: decoded hpack field %+v", hf) + } if !httplex.ValidHeaderFieldValue(hf.Value) { invalid = headerFieldValueError(hf.Value) } @@ -1454,11 +1520,17 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { } if invalid != nil { fr.errDetail = invalid - return nil, StreamError{mh.StreamID, ErrCodeProtocol} + if VerboseLogs { + log.Printf("http2: invalid header: %v", invalid) + } + return nil, StreamError{mh.StreamID, ErrCodeProtocol, invalid} } if err := mh.checkPseudos(); err != nil { fr.errDetail = err - return nil, StreamError{mh.StreamID, ErrCodeProtocol} + if VerboseLogs { + log.Printf("http2: invalid pseudo headers: %v", err) + } + return nil, StreamError{mh.StreamID, ErrCodeProtocol, err} } return mh, nil } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/go16.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/go16.go index 2b72855f..00b2e9e3 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/go16.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/go16.go @@ -7,7 +7,6 @@ package http2 import ( - "crypto/tls" "net/http" "time" ) @@ -15,29 +14,3 @@ import ( func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { return t1.ExpectContinueTimeout } - -// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. -func isBadCipher(cipher uint16) bool { - switch cipher { - case tls.TLS_RSA_WITH_RC4_128_SHA, - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_GCM_SHA256, - tls.TLS_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: - // Reject cipher suites from Appendix A. - // "This list includes those cipher suites that do not - // offer an ephemeral key exchange and those that are - // based on the TLS null, stream or block cipher type" - return true - default: - return false - } -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/go17.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/go17.go index 730319dd..47b7fae0 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/go17.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/go17.go @@ -39,6 +39,13 @@ type clientTrace httptrace.ClientTrace func reqContext(r *http.Request) context.Context { return r.Context() } +func (t *Transport) idleConnTimeout() time.Duration { + if t.t1 != nil { + return t.t1.IdleConnTimeout + } + return 0 +} + func setResponseUncompressed(res *http.Response) { res.Uncompressed = true } func traceGotConn(req *http.Request, cc *ClientConn) { @@ -92,3 +99,8 @@ func requestTrace(req *http.Request) *clientTrace { trace := httptrace.ContextClientTrace(req.Context()) return (*clientTrace)(trace) } + +// Ping sends a PING frame to the server and waits for the ack. +func (cc *ClientConn) Ping(ctx context.Context) error { + return cc.ping(ctx) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/go17_not18.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/go17_not18.go new file mode 100644 index 00000000..b4c52ece --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/go17_not18.go @@ -0,0 +1,36 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7,!go1.8 + +package http2 + +import "crypto/tls" + +// temporary copy of Go 1.7's private tls.Config.clone: +func cloneTLSConfig(c *tls.Config) *tls.Config { + return &tls.Config{ + Rand: c.Rand, + Time: c.Time, + Certificates: c.Certificates, + NameToCertificate: c.NameToCertificate, + GetCertificate: c.GetCertificate, + RootCAs: c.RootCAs, + NextProtos: c.NextProtos, + ServerName: c.ServerName, + ClientAuth: c.ClientAuth, + ClientCAs: c.ClientCAs, + InsecureSkipVerify: c.InsecureSkipVerify, + CipherSuites: c.CipherSuites, + PreferServerCipherSuites: c.PreferServerCipherSuites, + SessionTicketsDisabled: c.SessionTicketsDisabled, + SessionTicketKey: c.SessionTicketKey, + ClientSessionCache: c.ClientSessionCache, + MinVersion: c.MinVersion, + MaxVersion: c.MaxVersion, + CurvePreferences: c.CurvePreferences, + DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, + Renegotiation: c.Renegotiation, + } +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/go18.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/go18.go new file mode 100644 index 00000000..4f30d228 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/go18.go @@ -0,0 +1,56 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package http2 + +import ( + "crypto/tls" + "io" + "net/http" +) + +func cloneTLSConfig(c *tls.Config) *tls.Config { + c2 := c.Clone() + c2.GetClientCertificate = c.GetClientCertificate // golang.org/issue/19264 + return c2 +} + +var _ http.Pusher = (*responseWriter)(nil) + +// Push implements http.Pusher. +func (w *responseWriter) Push(target string, opts *http.PushOptions) error { + internalOpts := pushOptions{} + if opts != nil { + internalOpts.Method = opts.Method + internalOpts.Header = opts.Header + } + return w.push(target, internalOpts) +} + +func configureServer18(h1 *http.Server, h2 *Server) error { + if h2.IdleTimeout == 0 { + if h1.IdleTimeout != 0 { + h2.IdleTimeout = h1.IdleTimeout + } else { + h2.IdleTimeout = h1.ReadTimeout + } + } + return nil +} + +func shouldLogPanic(panicValue interface{}) bool { + return panicValue != nil && panicValue != http.ErrAbortHandler +} + +func reqGetBody(req *http.Request) func() (io.ReadCloser, error) { + return req.GetBody +} + +func reqBodyIsNoBody(body io.ReadCloser) bool { + return body == http.NoBody +} + +func go18httpNoBody() io.ReadCloser { return http.NoBody } // for tests only diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/go19.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/go19.go new file mode 100644 index 00000000..38124ba5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/go19.go @@ -0,0 +1,16 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package http2 + +import ( + "net/http" +) + +func configureServer19(s *http.Server, conf *Server) error { + s.RegisterOnShutdown(conf.state.startGracefulShutdown) + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/hpack/encode.go index f9bb0339..54726c2a 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/hpack/encode.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/hpack/encode.go @@ -39,13 +39,14 @@ func NewEncoder(w io.Writer) *Encoder { tableSizeUpdate: false, w: w, } + e.dynTab.table.init() e.dynTab.setMaxSize(initialHeaderTableSize) return e } // WriteField encodes f into a single Write to e's underlying Writer. // This function may also produce bytes for "Header Table Size Update" -// if necessary. If produced, it is done before encoding f. +// if necessary. If produced, it is done before encoding f. func (e *Encoder) WriteField(f HeaderField) error { e.buf = e.buf[:0] @@ -88,29 +89,17 @@ func (e *Encoder) WriteField(f HeaderField) error { // only name matches, i points to that index and nameValueMatch // becomes false. func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) { - for idx, hf := range staticTable { - if !constantTimeStringCompare(hf.Name, f.Name) { - continue - } - if i == 0 { - i = uint64(idx + 1) - } - if f.Sensitive { - continue - } - if !constantTimeStringCompare(hf.Value, f.Value) { - continue - } - i = uint64(idx + 1) - nameValueMatch = true - return + i, nameValueMatch = staticTable.search(f) + if nameValueMatch { + return i, true } - j, nameValueMatch := e.dynTab.search(f) + j, nameValueMatch := e.dynTab.table.search(f) if nameValueMatch || (i == 0 && j != 0) { - i = j + uint64(len(staticTable)) + return j + uint64(staticTable.len()), nameValueMatch } - return + + return i, false } // SetMaxDynamicTableSize changes the dynamic header table size to v. diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/hpack/hpack.go index 8aa197ad..176644ac 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/hpack/hpack.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/hpack/hpack.go @@ -57,11 +57,11 @@ func (hf HeaderField) String() string { return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix) } -// Size returns the size of an entry per RFC 7540 section 5.2. +// Size returns the size of an entry per RFC 7541 section 4.1. func (hf HeaderField) Size() uint32 { // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 // "The size of the dynamic table is the sum of the size of - // its entries. The size of an entry is the sum of its name's + // its entries. The size of an entry is the sum of its name's // length in octets (as defined in Section 5.2), its value's // length in octets (see Section 5.2), plus 32. The size of // an entry is calculated using the length of the name and @@ -102,6 +102,7 @@ func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decod emit: emitFunc, emitEnabled: true, } + d.dynTab.table.init() d.dynTab.allowedMaxSize = maxDynamicTableSize d.dynTab.setMaxSize(maxDynamicTableSize) return d @@ -154,12 +155,9 @@ func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) { } type dynamicTable struct { - // ents is the FIFO described at // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 - // The newest (low index) is append at the end, and items are - // evicted from the front. - ents []HeaderField - size uint32 + table headerFieldTable + size uint32 // in bytes maxSize uint32 // current maxSize allowedMaxSize uint32 // maxSize may go up to this, inclusive } @@ -169,95 +167,45 @@ func (dt *dynamicTable) setMaxSize(v uint32) { dt.evict() } -// TODO: change dynamicTable to be a struct with a slice and a size int field, -// per http://http2.github.io/http2-spec/compression.html#rfc.section.4.1: -// -// -// Then make add increment the size. maybe the max size should move from Decoder to -// dynamicTable and add should return an ok bool if there was enough space. -// -// Later we'll need a remove operation on dynamicTable. - func (dt *dynamicTable) add(f HeaderField) { - dt.ents = append(dt.ents, f) + dt.table.addEntry(f) dt.size += f.Size() dt.evict() } -// If we're too big, evict old stuff (front of the slice) +// If we're too big, evict old stuff. func (dt *dynamicTable) evict() { - base := dt.ents // keep base pointer of slice - for dt.size > dt.maxSize { - dt.size -= dt.ents[0].Size() - dt.ents = dt.ents[1:] - } - - // Shift slice contents down if we evicted things. - if len(dt.ents) != len(base) { - copy(base, dt.ents) - dt.ents = base[:len(dt.ents)] + var n int + for dt.size > dt.maxSize && n < dt.table.len() { + dt.size -= dt.table.ents[n].Size() + n++ } -} - -// constantTimeStringCompare compares string a and b in a constant -// time manner. -func constantTimeStringCompare(a, b string) bool { - if len(a) != len(b) { - return false - } - - c := byte(0) - - for i := 0; i < len(a); i++ { - c |= a[i] ^ b[i] - } - - return c == 0 -} - -// Search searches f in the table. The return value i is 0 if there is -// no name match. If there is name match or name/value match, i is the -// index of that entry (1-based). If both name and value match, -// nameValueMatch becomes true. -func (dt *dynamicTable) search(f HeaderField) (i uint64, nameValueMatch bool) { - l := len(dt.ents) - for j := l - 1; j >= 0; j-- { - ent := dt.ents[j] - if !constantTimeStringCompare(ent.Name, f.Name) { - continue - } - if i == 0 { - i = uint64(l - j) - } - if f.Sensitive { - continue - } - if !constantTimeStringCompare(ent.Value, f.Value) { - continue - } - i = uint64(l - j) - nameValueMatch = true - return - } - return + dt.table.evictOldest(n) } func (d *Decoder) maxTableIndex() int { - return len(d.dynTab.ents) + len(staticTable) + // This should never overflow. RFC 7540 Section 6.5.2 limits the size of + // the dynamic table to 2^32 bytes, where each entry will occupy more than + // one byte. Further, the staticTable has a fixed, small length. + return d.dynTab.table.len() + staticTable.len() } func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { - if i < 1 { + // See Section 2.3.3. + if i == 0 { return } + if i <= uint64(staticTable.len()) { + return staticTable.ents[i-1], true + } if i > uint64(d.maxTableIndex()) { return } - if i <= uint64(len(staticTable)) { - return staticTable[i-1], true - } - dents := d.dynTab.ents - return dents[len(dents)-(int(i)-len(staticTable))], true + // In the dynamic table, newer entries have lower indices. + // However, dt.ents[0] is the oldest entry. Hence, dt.ents is + // the reversed dynamic table. + dt := d.dynTab.table + return dt.ents[dt.len()-(int(i)-staticTable.len())], true } // Decode decodes an entire block. @@ -307,7 +255,7 @@ func (d *Decoder) Write(p []byte) (n int, err error) { err = d.parseHeaderFieldRepr() if err == errNeedMore { // Extra paranoia, making sure saveBuf won't - // get too large. All the varint and string + // get too large. All the varint and string // reading code earlier should already catch // overlong things and return ErrStringLength, // but keep this as a last resort. diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/hpack/tables.go index b9283a02..a66cfbea 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/hpack/tables.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/hpack/tables.go @@ -4,73 +4,200 @@ package hpack -func pair(name, value string) HeaderField { - return HeaderField{Name: name, Value: value} +import ( + "fmt" +) + +// headerFieldTable implements a list of HeaderFields. +// This is used to implement the static and dynamic tables. +type headerFieldTable struct { + // For static tables, entries are never evicted. + // + // For dynamic tables, entries are evicted from ents[0] and added to the end. + // Each entry has a unique id that starts at one and increments for each + // entry that is added. This unique id is stable across evictions, meaning + // it can be used as a pointer to a specific entry. As in hpack, unique ids + // are 1-based. The unique id for ents[k] is k + evictCount + 1. + // + // Zero is not a valid unique id. + // + // evictCount should not overflow in any remotely practical situation. In + // practice, we will have one dynamic table per HTTP/2 connection. If we + // assume a very powerful server that handles 1M QPS per connection and each + // request adds (then evicts) 100 entries from the table, it would still take + // 2M years for evictCount to overflow. + ents []HeaderField + evictCount uint64 + + // byName maps a HeaderField name to the unique id of the newest entry with + // the same name. See above for a definition of "unique id". + byName map[string]uint64 + + // byNameValue maps a HeaderField name/value pair to the unique id of the newest + // entry with the same name and value. See above for a definition of "unique id". + byNameValue map[pairNameValue]uint64 +} + +type pairNameValue struct { + name, value string +} + +func (t *headerFieldTable) init() { + t.byName = make(map[string]uint64) + t.byNameValue = make(map[pairNameValue]uint64) +} + +// len reports the number of entries in the table. +func (t *headerFieldTable) len() int { + return len(t.ents) +} + +// addEntry adds a new entry. +func (t *headerFieldTable) addEntry(f HeaderField) { + id := uint64(t.len()) + t.evictCount + 1 + t.byName[f.Name] = id + t.byNameValue[pairNameValue{f.Name, f.Value}] = id + t.ents = append(t.ents, f) +} + +// evictOldest evicts the n oldest entries in the table. +func (t *headerFieldTable) evictOldest(n int) { + if n > t.len() { + panic(fmt.Sprintf("evictOldest(%v) on table with %v entries", n, t.len())) + } + for k := 0; k < n; k++ { + f := t.ents[k] + id := t.evictCount + uint64(k) + 1 + if t.byName[f.Name] == id { + delete(t.byName, f.Name) + } + if p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id { + delete(t.byNameValue, p) + } + } + copy(t.ents, t.ents[n:]) + for k := t.len() - n; k < t.len(); k++ { + t.ents[k] = HeaderField{} // so strings can be garbage collected + } + t.ents = t.ents[:t.len()-n] + if t.evictCount+uint64(n) < t.evictCount { + panic("evictCount overflow") + } + t.evictCount += uint64(n) +} + +// search finds f in the table. If there is no match, i is 0. +// If both name and value match, i is the matched index and nameValueMatch +// becomes true. If only name matches, i points to that index and +// nameValueMatch becomes false. +// +// The returned index is a 1-based HPACK index. For dynamic tables, HPACK says +// that index 1 should be the newest entry, but t.ents[0] is the oldest entry, +// meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic +// table, the return value i actually refers to the entry t.ents[t.len()-i]. +// +// All tables are assumed to be a dynamic tables except for the global +// staticTable pointer. +// +// See Section 2.3.3. +func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) { + if !f.Sensitive { + if id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 { + return t.idToIndex(id), true + } + } + if id := t.byName[f.Name]; id != 0 { + return t.idToIndex(id), false + } + return 0, false +} + +// idToIndex converts a unique id to an HPACK index. +// See Section 2.3.3. +func (t *headerFieldTable) idToIndex(id uint64) uint64 { + if id <= t.evictCount { + panic(fmt.Sprintf("id (%v) <= evictCount (%v)", id, t.evictCount)) + } + k := id - t.evictCount - 1 // convert id to an index t.ents[k] + if t != staticTable { + return uint64(t.len()) - k // dynamic table + } + return k + 1 } // http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B -var staticTable = [...]HeaderField{ - pair(":authority", ""), // index 1 (1-based) - pair(":method", "GET"), - pair(":method", "POST"), - pair(":path", "/"), - pair(":path", "/index.html"), - pair(":scheme", "http"), - pair(":scheme", "https"), - pair(":status", "200"), - pair(":status", "204"), - pair(":status", "206"), - pair(":status", "304"), - pair(":status", "400"), - pair(":status", "404"), - pair(":status", "500"), - pair("accept-charset", ""), - pair("accept-encoding", "gzip, deflate"), - pair("accept-language", ""), - pair("accept-ranges", ""), - pair("accept", ""), - pair("access-control-allow-origin", ""), - pair("age", ""), - pair("allow", ""), - pair("authorization", ""), - pair("cache-control", ""), - pair("content-disposition", ""), - pair("content-encoding", ""), - pair("content-language", ""), - pair("content-length", ""), - pair("content-location", ""), - pair("content-range", ""), - pair("content-type", ""), - pair("cookie", ""), - pair("date", ""), - pair("etag", ""), - pair("expect", ""), - pair("expires", ""), - pair("from", ""), - pair("host", ""), - pair("if-match", ""), - pair("if-modified-since", ""), - pair("if-none-match", ""), - pair("if-range", ""), - pair("if-unmodified-since", ""), - pair("last-modified", ""), - pair("link", ""), - pair("location", ""), - pair("max-forwards", ""), - pair("proxy-authenticate", ""), - pair("proxy-authorization", ""), - pair("range", ""), - pair("referer", ""), - pair("refresh", ""), - pair("retry-after", ""), - pair("server", ""), - pair("set-cookie", ""), - pair("strict-transport-security", ""), - pair("transfer-encoding", ""), - pair("user-agent", ""), - pair("vary", ""), - pair("via", ""), - pair("www-authenticate", ""), +var staticTable = newStaticTable() +var staticTableEntries = [...]HeaderField{ + {Name: ":authority"}, + {Name: ":method", Value: "GET"}, + {Name: ":method", Value: "POST"}, + {Name: ":path", Value: "/"}, + {Name: ":path", Value: "/index.html"}, + {Name: ":scheme", Value: "http"}, + {Name: ":scheme", Value: "https"}, + {Name: ":status", Value: "200"}, + {Name: ":status", Value: "204"}, + {Name: ":status", Value: "206"}, + {Name: ":status", Value: "304"}, + {Name: ":status", Value: "400"}, + {Name: ":status", Value: "404"}, + {Name: ":status", Value: "500"}, + {Name: "accept-charset"}, + {Name: "accept-encoding", Value: "gzip, deflate"}, + {Name: "accept-language"}, + {Name: "accept-ranges"}, + {Name: "accept"}, + {Name: "access-control-allow-origin"}, + {Name: "age"}, + {Name: "allow"}, + {Name: "authorization"}, + {Name: "cache-control"}, + {Name: "content-disposition"}, + {Name: "content-encoding"}, + {Name: "content-language"}, + {Name: "content-length"}, + {Name: "content-location"}, + {Name: "content-range"}, + {Name: "content-type"}, + {Name: "cookie"}, + {Name: "date"}, + {Name: "etag"}, + {Name: "expect"}, + {Name: "expires"}, + {Name: "from"}, + {Name: "host"}, + {Name: "if-match"}, + {Name: "if-modified-since"}, + {Name: "if-none-match"}, + {Name: "if-range"}, + {Name: "if-unmodified-since"}, + {Name: "last-modified"}, + {Name: "link"}, + {Name: "location"}, + {Name: "max-forwards"}, + {Name: "proxy-authenticate"}, + {Name: "proxy-authorization"}, + {Name: "range"}, + {Name: "referer"}, + {Name: "refresh"}, + {Name: "retry-after"}, + {Name: "server"}, + {Name: "set-cookie"}, + {Name: "strict-transport-security"}, + {Name: "transfer-encoding"}, + {Name: "user-agent"}, + {Name: "vary"}, + {Name: "via"}, + {Name: "www-authenticate"}, +} + +func newStaticTable() *headerFieldTable { + t := &headerFieldTable{} + t.init() + for _, e := range staticTableEntries[:] { + t.addEntry(e) + } + return t } var huffmanCodes = [256]uint32{ diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/http2.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/http2.go index 0173aed6..d565f40e 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/http2.go @@ -13,7 +13,8 @@ // See https://http2.github.io/ for more information on HTTP/2. // // See https://http2.golang.org/ for a test server running this code. -package http2 +// +package http2 // import "golang.org/x/net/http2" import ( "bufio" @@ -35,6 +36,7 @@ var ( VerboseLogs bool logFrameWrites bool logFrameReads bool + inTests bool ) func init() { @@ -76,13 +78,23 @@ var ( type streamState int +// HTTP/2 stream states. +// +// See http://tools.ietf.org/html/rfc7540#section-5.1. +// +// For simplicity, the server code merges "reserved (local)" into +// "half-closed (remote)". This is one less state transition to track. +// The only downside is that we send PUSH_PROMISEs slightly less +// liberally than allowable. More discussion here: +// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html +// +// "reserved (remote)" is omitted since the client code does not +// support server push. const ( stateIdle streamState = iota stateOpen stateHalfClosedLocal stateHalfClosedRemote - stateResvLocal - stateResvRemote stateClosed ) @@ -91,8 +103,6 @@ var stateName = [...]string{ stateOpen: "Open", stateHalfClosedLocal: "HalfClosedLocal", stateHalfClosedRemote: "HalfClosedRemote", - stateResvLocal: "ResvLocal", - stateResvRemote: "ResvRemote", stateClosed: "Closed", } @@ -252,14 +262,27 @@ func newBufferedWriter(w io.Writer) *bufferedWriter { return &bufferedWriter{w: w} } +// bufWriterPoolBufferSize is the size of bufio.Writer's +// buffers created using bufWriterPool. +// +// TODO: pick a less arbitrary value? this is a bit under +// (3 x typical 1500 byte MTU) at least. Other than that, +// not much thought went into it. +const bufWriterPoolBufferSize = 4 << 10 + var bufWriterPool = sync.Pool{ New: func() interface{} { - // TODO: pick something better? this is a bit under - // (3 x typical 1500 byte MTU) at least. - return bufio.NewWriterSize(nil, 4<<10) + return bufio.NewWriterSize(nil, bufWriterPoolBufferSize) }, } +func (w *bufferedWriter) Available() int { + if w.bw == nil { + return bufWriterPoolBufferSize + } + return w.bw.Available() +} + func (w *bufferedWriter) Write(p []byte) (n int, err error) { if w.bw == nil { bw := bufWriterPool.Get().(*bufio.Writer) @@ -342,10 +365,27 @@ func (s *sorter) Keys(h http.Header) []string { } func (s *sorter) SortStrings(ss []string) { - // Our sorter works on s.v, which sorter owners, so + // Our sorter works on s.v, which sorter owns, so // stash it away while we sort the user's buffer. save := s.v s.v = ss sort.Sort(s) s.v = save } + +// validPseudoPath reports whether v is a valid :path pseudo-header +// value. It must be either: +// +// *) a non-empty string starting with '/' +// *) the string '*', for OPTIONS requests. +// +// For now this is only used a quick check for deciding when to clean +// up Opaque URLs before sending requests from the Transport. +// See golang.org/issue/16847 +// +// We used to enforce that the path also didn't start with "//", but +// Google's GFE accepts such paths and Chrome sends them, so ignore +// that part of the spec. See golang.org/issue/19103. +func validPseudoPath(v string) bool { + return (len(v) > 0 && v[0] == '/') || v == "*" +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/not_go16.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/not_go16.go index efd2e128..508cebcc 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/not_go16.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/not_go16.go @@ -7,7 +7,6 @@ package http2 import ( - "crypto/tls" "net/http" "time" ) @@ -20,27 +19,3 @@ func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { return 0 } - -// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. -func isBadCipher(cipher uint16) bool { - switch cipher { - case tls.TLS_RSA_WITH_RC4_128_SHA, - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: - // Reject cipher suites from Appendix A. - // "This list includes those cipher suites that do not - // offer an ephemeral key exchange and those that are - // based on the TLS null, stream or block cipher type" - return true - default: - return false - } -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/not_go17.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/not_go17.go index 28df0c16..140434a7 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/not_go17.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/not_go17.go @@ -7,11 +7,16 @@ package http2 import ( + "crypto/tls" "net" "net/http" + "time" ) -type contextContext interface{} +type contextContext interface { + Done() <-chan struct{} + Err() error +} type fakeContext struct{} @@ -49,3 +54,34 @@ func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) { func requestWithContext(req *http.Request, ctx contextContext) *http.Request { return req } + +// temporary copy of Go 1.6's private tls.Config.clone: +func cloneTLSConfig(c *tls.Config) *tls.Config { + return &tls.Config{ + Rand: c.Rand, + Time: c.Time, + Certificates: c.Certificates, + NameToCertificate: c.NameToCertificate, + GetCertificate: c.GetCertificate, + RootCAs: c.RootCAs, + NextProtos: c.NextProtos, + ServerName: c.ServerName, + ClientAuth: c.ClientAuth, + ClientCAs: c.ClientCAs, + InsecureSkipVerify: c.InsecureSkipVerify, + CipherSuites: c.CipherSuites, + PreferServerCipherSuites: c.PreferServerCipherSuites, + SessionTicketsDisabled: c.SessionTicketsDisabled, + SessionTicketKey: c.SessionTicketKey, + ClientSessionCache: c.ClientSessionCache, + MinVersion: c.MinVersion, + MaxVersion: c.MaxVersion, + CurvePreferences: c.CurvePreferences, + } +} + +func (cc *ClientConn) Ping(ctx contextContext) error { + return cc.ping(ctx) +} + +func (t *Transport) idleConnTimeout() time.Duration { return 0 } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/not_go18.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/not_go18.go new file mode 100644 index 00000000..6f8d3f86 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/not_go18.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.8 + +package http2 + +import ( + "io" + "net/http" +) + +func configureServer18(h1 *http.Server, h2 *Server) error { + // No IdleTimeout to sync prior to Go 1.8. + return nil +} + +func shouldLogPanic(panicValue interface{}) bool { + return panicValue != nil +} + +func reqGetBody(req *http.Request) func() (io.ReadCloser, error) { + return nil +} + +func reqBodyIsNoBody(io.ReadCloser) bool { return false } + +func go18httpNoBody() io.ReadCloser { return nil } // for tests only diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/not_go19.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/not_go19.go new file mode 100644 index 00000000..5ae07726 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/not_go19.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package http2 + +import ( + "net/http" +) + +func configureServer19(s *http.Server, conf *Server) error { + // not supported prior to go1.9 + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/pipe.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/pipe.go index 53b7a1da..a6140099 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/pipe.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/pipe.go @@ -10,13 +10,13 @@ import ( "sync" ) -// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like +// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like // io.Pipe except there are no PipeReader/PipeWriter halves, and the // underlying buffer is an interface. (io.Pipe is always unbuffered) type pipe struct { mu sync.Mutex - c sync.Cond // c.L lazily initialized to &p.mu - b pipeBuffer + c sync.Cond // c.L lazily initialized to &p.mu + b pipeBuffer // nil when done reading err error // read error once empty. non-nil means closed. breakErr error // immediate read error (caller doesn't see rest of b) donec chan struct{} // closed on error @@ -32,6 +32,9 @@ type pipeBuffer interface { func (p *pipe) Len() int { p.mu.Lock() defer p.mu.Unlock() + if p.b == nil { + return 0 + } return p.b.Len() } @@ -47,7 +50,7 @@ func (p *pipe) Read(d []byte) (n int, err error) { if p.breakErr != nil { return 0, p.breakErr } - if p.b.Len() > 0 { + if p.b != nil && p.b.Len() > 0 { return p.b.Read(d) } if p.err != nil { @@ -55,6 +58,7 @@ func (p *pipe) Read(d []byte) (n int, err error) { p.readFn() // e.g. copy trailers p.readFn = nil // not sticky like p.err } + p.b = nil return 0, p.err } p.c.Wait() @@ -75,6 +79,9 @@ func (p *pipe) Write(d []byte) (n int, err error) { if p.err != nil { return 0, errClosedPipeWrite } + if p.breakErr != nil { + return len(d), nil // discard when there is no reader + } return p.b.Write(d) } @@ -109,6 +116,9 @@ func (p *pipe) closeWithError(dst *error, err error, fn func()) { return } p.readFn = fn + if dst == &p.breakErr { + p.b = nil + } *dst = err p.closeDoneLocked() } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/server.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/server.go index f368738f..eae143dd 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/server.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/server.go @@ -2,17 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// TODO: replace all <-sc.doneServing with reads from the stream's cw -// instead, and make sure that on close we close all open -// streams. then remove doneServing? - -// TODO: re-audit GOAWAY support. Consider each incoming frame type and -// whether it should be ignored during graceful shutdown. - -// TODO: disconnect idle clients. GFE seems to do 4 minutes. make -// configurable? or maximum number of idle clients and remove the -// oldest? - // TODO: turn off the serve goroutine when idle, so // an idle conn only has the readFrames goroutine active. (which could // also be optimized probably to pin less memory in crypto/tls). This @@ -44,6 +33,7 @@ import ( "fmt" "io" "log" + "math" "net" "net/http" "net/textproto" @@ -114,6 +104,47 @@ type Server struct { // PermitProhibitedCipherSuites, if true, permits the use of // cipher suites prohibited by the HTTP/2 spec. PermitProhibitedCipherSuites bool + + // IdleTimeout specifies how long until idle clients should be + // closed with a GOAWAY frame. PING frames are not considered + // activity for the purposes of IdleTimeout. + IdleTimeout time.Duration + + // MaxUploadBufferPerConnection is the size of the initial flow + // control window for each connections. The HTTP/2 spec does not + // allow this to be smaller than 65535 or larger than 2^32-1. + // If the value is outside this range, a default value will be + // used instead. + MaxUploadBufferPerConnection int32 + + // MaxUploadBufferPerStream is the size of the initial flow control + // window for each stream. The HTTP/2 spec does not allow this to + // be larger than 2^32-1. If the value is zero or larger than the + // maximum, a default value will be used instead. + MaxUploadBufferPerStream int32 + + // NewWriteScheduler constructs a write scheduler for a connection. + // If nil, a default scheduler is chosen. + NewWriteScheduler func() WriteScheduler + + // Internal state. This is a pointer (rather than embedded directly) + // so that we don't embed a Mutex in this struct, which will make the + // struct non-copyable, which might break some callers. + state *serverInternalState +} + +func (s *Server) initialConnRecvWindowSize() int32 { + if s.MaxUploadBufferPerConnection > initialWindowSize { + return s.MaxUploadBufferPerConnection + } + return 1 << 20 +} + +func (s *Server) initialStreamRecvWindowSize() int32 { + if s.MaxUploadBufferPerStream > 0 { + return s.MaxUploadBufferPerStream + } + return 1 << 20 } func (s *Server) maxReadFrameSize() uint32 { @@ -130,15 +161,59 @@ func (s *Server) maxConcurrentStreams() uint32 { return defaultMaxStreams } +type serverInternalState struct { + mu sync.Mutex + activeConns map[*serverConn]struct{} +} + +func (s *serverInternalState) registerConn(sc *serverConn) { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + s.activeConns[sc] = struct{}{} + s.mu.Unlock() +} + +func (s *serverInternalState) unregisterConn(sc *serverConn) { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + delete(s.activeConns, sc) + s.mu.Unlock() +} + +func (s *serverInternalState) startGracefulShutdown() { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + for sc := range s.activeConns { + sc.startGracefulShutdown() + } + s.mu.Unlock() +} + // ConfigureServer adds HTTP/2 support to a net/http Server. // // The configuration conf may be nil. // // ConfigureServer must be called before s begins serving. func ConfigureServer(s *http.Server, conf *Server) error { + if s == nil { + panic("nil *http.Server") + } if conf == nil { conf = new(Server) } + conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})} + if err := configureServer18(s, conf); err != nil { + return err + } + if err := configureServer19(s, conf); err != nil { + return err + } if s.TLSConfig == nil { s.TLSConfig = new(tls.Config) @@ -183,9 +258,6 @@ func ConfigureServer(s *http.Server, conf *Server) error { if !haveNPN { s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS) } - // h2-14 is temporary (as of 2015-03-05) while we wait for all browsers - // to switch to "h2". - s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, "h2-14") if s.TLSNextProto == nil { s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} @@ -200,7 +272,6 @@ func ConfigureServer(s *http.Server, conf *Server) error { }) } s.TLSNextProto[NextProtoTLS] = protoHandler - s.TLSNextProto["h2-14"] = protoHandler // temporary; see above. return nil } @@ -254,29 +325,50 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { defer cancel() sc := &serverConn{ - srv: s, - hs: opts.baseConfig(), - conn: c, - baseCtx: baseCtx, - remoteAddrStr: c.RemoteAddr().String(), - bw: newBufferedWriter(c), - handler: opts.handler(), - streams: make(map[uint32]*stream), - readFrameCh: make(chan readFrameResult), - wantWriteFrameCh: make(chan frameWriteMsg, 8), - wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync - bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way - doneServing: make(chan struct{}), - advMaxStreams: s.maxConcurrentStreams(), - writeSched: writeScheduler{ - maxFrameSize: initialMaxFrameSize, - }, - initialWindowSize: initialWindowSize, - headerTableSize: initialHeaderTableSize, - serveG: newGoroutineLock(), - pushEnabled: true, + srv: s, + hs: opts.baseConfig(), + conn: c, + baseCtx: baseCtx, + remoteAddrStr: c.RemoteAddr().String(), + bw: newBufferedWriter(c), + handler: opts.handler(), + streams: make(map[uint32]*stream), + readFrameCh: make(chan readFrameResult), + wantWriteFrameCh: make(chan FrameWriteRequest, 8), + serveMsgCh: make(chan interface{}, 8), + wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync + bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way + doneServing: make(chan struct{}), + clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" + advMaxStreams: s.maxConcurrentStreams(), + initialStreamSendWindowSize: initialWindowSize, + maxFrameSize: initialMaxFrameSize, + headerTableSize: initialHeaderTableSize, + serveG: newGoroutineLock(), + pushEnabled: true, + } + + s.state.registerConn(sc) + defer s.state.unregisterConn(sc) + + // The net/http package sets the write deadline from the + // http.Server.WriteTimeout during the TLS handshake, but then + // passes the connection off to us with the deadline already set. + // Write deadlines are set per stream in serverConn.newStream. + // Disarm the net.Conn write deadline here. + if sc.hs.WriteTimeout != 0 { + sc.conn.SetWriteDeadline(time.Time{}) + } + + if s.NewWriteScheduler != nil { + sc.writeSched = s.NewWriteScheduler() + } else { + sc.writeSched = NewRandomWriteScheduler() } + // These start at the RFC-specified defaults. If there is a higher + // configured value for inflow, that will be updated when we send a + // WINDOW_UPDATE shortly after sending SETTINGS. sc.flow.add(initialWindowSize) sc.inflow.add(initialWindowSize) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) @@ -356,45 +448,52 @@ type serverConn struct { handler http.Handler baseCtx contextContext framer *Framer - doneServing chan struct{} // closed when serverConn.serve ends - readFrameCh chan readFrameResult // written by serverConn.readFrames - wantWriteFrameCh chan frameWriteMsg // from handlers -> serve - wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes - bodyReadCh chan bodyReadMsg // from handlers -> serve - testHookCh chan func(int) // code to run on the serve loop - flow flow // conn-wide (not stream-specific) outbound flow control - inflow flow // conn-wide inbound flow control - tlsState *tls.ConnectionState // shared by all handlers, like net/http + doneServing chan struct{} // closed when serverConn.serve ends + readFrameCh chan readFrameResult // written by serverConn.readFrames + wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve + wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes + bodyReadCh chan bodyReadMsg // from handlers -> serve + serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop + flow flow // conn-wide (not stream-specific) outbound flow control + inflow flow // conn-wide inbound flow control + tlsState *tls.ConnectionState // shared by all handlers, like net/http remoteAddrStr string + writeSched WriteScheduler // Everything following is owned by the serve loop; use serveG.check(): - serveG goroutineLock // used to verify funcs are on serve() - pushEnabled bool - sawFirstSettings bool // got the initial SETTINGS frame after the preface - needToSendSettingsAck bool - unackedSettings int // how many SETTINGS have we sent without ACKs? - clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) - advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client - curOpenStreams uint32 // client's number of open streams - maxStreamID uint32 // max ever seen - streams map[uint32]*stream - initialWindowSize int32 - headerTableSize uint32 - peerMaxHeaderListSize uint32 // zero means unknown (default) - canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case - writingFrame bool // started write goroutine but haven't heard back on wroteFrameCh - needsFrameFlush bool // last frame write wasn't a flush - writeSched writeScheduler - inGoAway bool // we've started to or sent GOAWAY - needToSendGoAway bool // we need to schedule a GOAWAY frame write - goAwayCode ErrCode - shutdownTimerCh <-chan time.Time // nil until used - shutdownTimer *time.Timer // nil until used - freeRequestBodyBuf []byte // if non-nil, a free initialWindowSize buffer for getRequestBodyBuf + serveG goroutineLock // used to verify funcs are on serve() + pushEnabled bool + sawFirstSettings bool // got the initial SETTINGS frame after the preface + needToSendSettingsAck bool + unackedSettings int // how many SETTINGS have we sent without ACKs? + clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) + advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client + curClientStreams uint32 // number of open streams initiated by the client + curPushedStreams uint32 // number of open streams initiated by server push + maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests + maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes + streams map[uint32]*stream + initialStreamSendWindowSize int32 + maxFrameSize int32 + headerTableSize uint32 + peerMaxHeaderListSize uint32 // zero means unknown (default) + canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case + writingFrame bool // started writing a frame (on serve goroutine or separate) + writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh + needsFrameFlush bool // last frame write wasn't a flush + inGoAway bool // we've started to or sent GOAWAY + inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop + needToSendGoAway bool // we need to schedule a GOAWAY frame write + goAwayCode ErrCode + shutdownTimer *time.Timer // nil until used + idleTimer *time.Timer // nil if unused // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer hpackEncoder *hpack.Encoder + + // Used by startGracefulShutdown. + shutdownOnce sync.Once } func (sc *serverConn) maxHeaderListSize() uint32 { @@ -409,6 +508,11 @@ func (sc *serverConn) maxHeaderListSize() uint32 { return uint32(n + typicalHeaders*perFieldOverhead) } +func (sc *serverConn) curOpenStreams() uint32 { + sc.serveG.check() + return sc.curClientStreams + sc.curPushedStreams +} + // stream represents a stream. This is the minimal metadata needed by // the serve goroutine. Most of the actual stream state is owned by // the http.Handler's goroutine in the responseWriter. Because the @@ -434,11 +538,10 @@ type stream struct { numTrailerValues int64 weight uint8 state streamState - sentReset bool // only true once detached from streams map - gotReset bool // only true once detacted from streams map - gotTrailerHeader bool // HEADER frame for trailers was seen - wroteHeaders bool // whether we wrote headers (not status 100) - reqBuf []byte + resetQueued bool // RST_STREAM queued for write; set by sc.resetStream + gotTrailerHeader bool // HEADER frame for trailers was seen + wroteHeaders bool // whether we wrote headers (not status 100) + writeDeadline *time.Timer // nil if unused trailer http.Header // accumulated trailers reqTrailer http.Header // handler's Request.Trailer @@ -453,7 +556,7 @@ func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) { func (sc *serverConn) state(streamID uint32) (streamState, *stream) { sc.serveG.check() - // http://http2.github.io/http2-spec/#rfc.section.5.1 + // http://tools.ietf.org/html/rfc7540#section-5.1 if st, ok := sc.streams[streamID]; ok { return st.state, st } @@ -463,8 +566,14 @@ func (sc *serverConn) state(streamID uint32) (streamState, *stream) { // a client sends a HEADERS frame on stream 7 without ever sending a // frame on stream 5, then stream 5 transitions to the "closed" // state when the first frame for stream 7 is sent or received." - if streamID <= sc.maxStreamID { - return stateClosed, nil + if streamID%2 == 1 { + if streamID <= sc.maxClientStreamID { + return stateClosed, nil + } + } else { + if streamID <= sc.maxPushPromiseID { + return stateClosed, nil + } } return stateIdle, nil } @@ -603,17 +712,17 @@ func (sc *serverConn) readFrames() { // frameWriteResult is the message passed from writeFrameAsync to the serve goroutine. type frameWriteResult struct { - wm frameWriteMsg // what was written (or attempted) - err error // result of the writeFrame call + wr FrameWriteRequest // what was written (or attempted) + err error // result of the writeFrame call } // writeFrameAsync runs in its own goroutine and writes a single frame // and then reports when it's done. // At most one goroutine can be running writeFrameAsync at a time per // serverConn. -func (sc *serverConn) writeFrameAsync(wm frameWriteMsg) { - err := wm.write.writeFrame(sc) - sc.wroteFrameCh <- frameWriteResult{wm, err} +func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) { + err := wr.write.writeFrame(sc) + sc.wroteFrameCh <- frameWriteResult{wr, err} } func (sc *serverConn) closeAllStreamsOnConnClose() { @@ -657,40 +766,53 @@ func (sc *serverConn) serve() { sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) } - sc.writeFrame(frameWriteMsg{ + sc.writeFrame(FrameWriteRequest{ write: writeSettings{ {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, {SettingMaxConcurrentStreams, sc.advMaxStreams}, {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - - // TODO: more actual settings, notably - // SettingInitialWindowSize, but then we also - // want to bump up the conn window size the - // same amount here right after the settings + {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, }, }) sc.unackedSettings++ + // Each connection starts with intialWindowSize inflow tokens. + // If a higher value is configured, we add more tokens. + if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { + sc.sendWindowUpdate(nil, int(diff)) + } + if err := sc.readPreface(); err != nil { sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) return } // Now that we've got the preface, get us out of the - // "StateNew" state. We can't go directly to idle, though. + // "StateNew" state. We can't go directly to idle, though. // Active means we read some data and anticipate a request. We'll // do another Active when we get a HEADERS frame. sc.setConnState(http.StateActive) sc.setConnState(http.StateIdle) + if sc.srv.IdleTimeout != 0 { + sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) + defer sc.idleTimer.Stop() + } + go sc.readFrames() // closed by defer sc.conn.Close above - settingsTimer := time.NewTimer(firstSettingsTimeout) + settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer) + defer settingsTimer.Stop() + loopNum := 0 for { loopNum++ select { - case wm := <-sc.wantWriteFrameCh: - sc.writeFrame(wm) + case wr := <-sc.wantWriteFrameCh: + if se, ok := wr.write.(StreamError); ok { + sc.resetStream(se) + break + } + sc.writeFrame(wr) case res := <-sc.wroteFrameCh: sc.wroteFrame(res) case res := <-sc.readFrameCh: @@ -698,24 +820,75 @@ func (sc *serverConn) serve() { return } res.readMore() - if settingsTimer.C != nil { + if settingsTimer != nil { settingsTimer.Stop() - settingsTimer.C = nil + settingsTimer = nil } case m := <-sc.bodyReadCh: sc.noteBodyRead(m.st, m.n) - case <-settingsTimer.C: - sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr()) - return - case <-sc.shutdownTimerCh: - sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) + case msg := <-sc.serveMsgCh: + switch v := msg.(type) { + case func(int): + v(loopNum) // for testing + case *serverMessage: + switch v { + case settingsTimerMsg: + sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr()) + return + case idleTimerMsg: + sc.vlogf("connection is idle") + sc.goAway(ErrCodeNo) + case shutdownTimerMsg: + sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) + return + case gracefulShutdownMsg: + sc.startGracefulShutdownInternal() + default: + panic("unknown timer") + } + case *startPushRequest: + sc.startPush(v) + default: + panic(fmt.Sprintf("unexpected type %T", v)) + } + } + + if sc.inGoAway && sc.curOpenStreams() == 0 && !sc.needToSendGoAway && !sc.writingFrame { return - case fn := <-sc.testHookCh: - fn(loopNum) } } } +func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) { + select { + case <-sc.doneServing: + case <-sharedCh: + close(privateCh) + } +} + +type serverMessage int + +// Message values sent to serveMsgCh. +var ( + settingsTimerMsg = new(serverMessage) + idleTimerMsg = new(serverMessage) + shutdownTimerMsg = new(serverMessage) + gracefulShutdownMsg = new(serverMessage) +) + +func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } +func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) } +func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) } + +func (sc *serverConn) sendServeMsg(msg interface{}) { + sc.serveG.checkNotOn() // NOT + select { + case sc.serveMsgCh <- msg: + case <-sc.doneServing: + } +} + // readPreface reads the ClientPreface greeting from the peer // or returns an error on timeout or an invalid greeting. func (sc *serverConn) readPreface() error { @@ -760,7 +933,7 @@ func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStrea ch := errChanPool.Get().(chan error) writeArg := writeDataPool.Get().(*writeData) *writeArg = writeData{stream.id, data, endStream} - err := sc.writeFrameFromHandler(frameWriteMsg{ + err := sc.writeFrameFromHandler(FrameWriteRequest{ write: writeArg, stream: stream, done: ch, @@ -796,17 +969,17 @@ func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStrea return err } -// writeFrameFromHandler sends wm to sc.wantWriteFrameCh, but aborts +// writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts // if the connection has gone away. // // This must not be run from the serve goroutine itself, else it might // deadlock writing to sc.wantWriteFrameCh (which is only mildly // buffered and is read by serve itself). If you're on the serve // goroutine, call writeFrame instead. -func (sc *serverConn) writeFrameFromHandler(wm frameWriteMsg) error { +func (sc *serverConn) writeFrameFromHandler(wr FrameWriteRequest) error { sc.serveG.checkNotOn() // NOT select { - case sc.wantWriteFrameCh <- wm: + case sc.wantWriteFrameCh <- wr: return nil case <-sc.doneServing: // Serve loop is gone. @@ -823,55 +996,103 @@ func (sc *serverConn) writeFrameFromHandler(wm frameWriteMsg) error { // make it onto the wire // // If you're not on the serve goroutine, use writeFrameFromHandler instead. -func (sc *serverConn) writeFrame(wm frameWriteMsg) { +func (sc *serverConn) writeFrame(wr FrameWriteRequest) { sc.serveG.check() + // If true, wr will not be written and wr.done will not be signaled. var ignoreWrite bool + // We are not allowed to write frames on closed streams. RFC 7540 Section + // 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on + // a closed stream." Our server never sends PRIORITY, so that exception + // does not apply. + // + // The serverConn might close an open stream while the stream's handler + // is still running. For example, the server might close a stream when it + // receives bad data from the client. If this happens, the handler might + // attempt to write a frame after the stream has been closed (since the + // handler hasn't yet been notified of the close). In this case, we simply + // ignore the frame. The handler will notice that the stream is closed when + // it waits for the frame to be written. + // + // As an exception to this rule, we allow sending RST_STREAM after close. + // This allows us to immediately reject new streams without tracking any + // state for those streams (except for the queued RST_STREAM frame). This + // may result in duplicate RST_STREAMs in some cases, but the client should + // ignore those. + if wr.StreamID() != 0 { + _, isReset := wr.write.(StreamError) + if state, _ := sc.state(wr.StreamID()); state == stateClosed && !isReset { + ignoreWrite = true + } + } + // Don't send a 100-continue response if we've already sent headers. // See golang.org/issue/14030. - switch wm.write.(type) { + switch wr.write.(type) { case *writeResHeaders: - wm.stream.wroteHeaders = true + wr.stream.wroteHeaders = true case write100ContinueHeadersFrame: - if wm.stream.wroteHeaders { + if wr.stream.wroteHeaders { + // We do not need to notify wr.done because this frame is + // never written with wr.done != nil. + if wr.done != nil { + panic("wr.done != nil for write100ContinueHeadersFrame") + } ignoreWrite = true } } if !ignoreWrite { - sc.writeSched.add(wm) + sc.writeSched.Push(wr) } sc.scheduleFrameWrite() } -// startFrameWrite starts a goroutine to write wm (in a separate +// startFrameWrite starts a goroutine to write wr (in a separate // goroutine since that might block on the network), and updates the -// serve goroutine's state about the world, updated from info in wm. -func (sc *serverConn) startFrameWrite(wm frameWriteMsg) { +// serve goroutine's state about the world, updated from info in wr. +func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) { sc.serveG.check() if sc.writingFrame { panic("internal error: can only be writing one frame at a time") } - st := wm.stream + st := wr.stream if st != nil { switch st.state { case stateHalfClosedLocal: - panic("internal error: attempt to send frame on half-closed-local stream") - case stateClosed: - if st.sentReset || st.gotReset { - // Skip this frame. - sc.scheduleFrameWrite() - return + switch wr.write.(type) { + case StreamError, handlerPanicRST, writeWindowUpdate: + // RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE + // in this state. (We never send PRIORITY from the server, so that is not checked.) + default: + panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr)) } - panic(fmt.Sprintf("internal error: attempt to send a write %v on a closed stream", wm)) + case stateClosed: + panic(fmt.Sprintf("internal error: attempt to send frame on a closed stream: %v", wr)) + } + } + if wpp, ok := wr.write.(*writePushPromise); ok { + var err error + wpp.promisedID, err = wpp.allocatePromisedID() + if err != nil { + sc.writingFrameAsync = false + wr.replyToWriter(err) + return } } sc.writingFrame = true sc.needsFrameFlush = true - go sc.writeFrameAsync(wm) + if wr.write.staysWithinBuffer(sc.bw.Available()) { + sc.writingFrameAsync = false + err := wr.write.writeFrame(sc) + sc.wroteFrame(frameWriteResult{wr, err}) + } else { + sc.writingFrameAsync = true + go sc.writeFrameAsync(wr) + } } // errHandlerPanicked is the error given to any callers blocked in a read from @@ -887,27 +1108,12 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) { panic("internal error: expected to be already writing a frame") } sc.writingFrame = false + sc.writingFrameAsync = false - wm := res.wm - st := wm.stream - - closeStream := endsStream(wm.write) - - if _, ok := wm.write.(handlerPanicRST); ok { - sc.closeStream(st, errHandlerPanicked) - } - - // Reply (if requested) to the blocked ServeHTTP goroutine. - if ch := wm.done; ch != nil { - select { - case ch <- res.err: - default: - panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wm.write)) - } - } - wm.write = nil // prevent use (assume it's tainted after wm.done send) + wr := res.wr - if closeStream { + if writeEndsStream(wr.write) { + st := wr.stream if st == nil { panic("internal error: expecting non-nil stream") } @@ -916,19 +1122,37 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) { // Here we would go to stateHalfClosedLocal in // theory, but since our handler is done and // the net/http package provides no mechanism - // for finishing writing to a ResponseWriter - // while still reading data (see possible TODO - // at top of this file), we go into closed - // state here anyway, after telling the peer - // we're hanging up on them. - st.state = stateHalfClosedLocal // won't last long, but necessary for closeStream via resetStream - errCancel := StreamError{st.id, ErrCodeCancel} - sc.resetStream(errCancel) + // for closing a ResponseWriter while still + // reading data (see possible TODO at top of + // this file), we go into closed state here + // anyway, after telling the peer we're + // hanging up on them. We'll transition to + // stateClosed after the RST_STREAM frame is + // written. + st.state = stateHalfClosedLocal + // Section 8.1: a server MAY request that the client abort + // transmission of a request without error by sending a + // RST_STREAM with an error code of NO_ERROR after sending + // a complete response. + sc.resetStream(streamError(st.id, ErrCodeNo)) case stateHalfClosedRemote: sc.closeStream(st, errHandlerComplete) } + } else { + switch v := wr.write.(type) { + case StreamError: + // st may be unknown if the RST_STREAM was generated to reject bad input. + if st, ok := sc.streams[v.StreamID]; ok { + sc.closeStream(st, v) + } + case handlerPanicRST: + sc.closeStream(wr.stream, errHandlerPanicked) + } } + // Reply (if requested) to unblock the ServeHTTP goroutine. + wr.replyToWriter(res.err) + sc.scheduleFrameWrite() } @@ -946,47 +1170,77 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) { // flush the write buffer. func (sc *serverConn) scheduleFrameWrite() { sc.serveG.check() - if sc.writingFrame { - return - } - if sc.needToSendGoAway { - sc.needToSendGoAway = false - sc.startFrameWrite(frameWriteMsg{ - write: &writeGoAway{ - maxStreamID: sc.maxStreamID, - code: sc.goAwayCode, - }, - }) + if sc.writingFrame || sc.inFrameScheduleLoop { return } - if sc.needToSendSettingsAck { - sc.needToSendSettingsAck = false - sc.startFrameWrite(frameWriteMsg{write: writeSettingsAck{}}) - return - } - if !sc.inGoAway { - if wm, ok := sc.writeSched.take(); ok { - sc.startFrameWrite(wm) - return + sc.inFrameScheduleLoop = true + for !sc.writingFrameAsync { + if sc.needToSendGoAway { + sc.needToSendGoAway = false + sc.startFrameWrite(FrameWriteRequest{ + write: &writeGoAway{ + maxStreamID: sc.maxClientStreamID, + code: sc.goAwayCode, + }, + }) + continue } + if sc.needToSendSettingsAck { + sc.needToSendSettingsAck = false + sc.startFrameWrite(FrameWriteRequest{write: writeSettingsAck{}}) + continue + } + if !sc.inGoAway || sc.goAwayCode == ErrCodeNo { + if wr, ok := sc.writeSched.Pop(); ok { + sc.startFrameWrite(wr) + continue + } + } + if sc.needsFrameFlush { + sc.startFrameWrite(FrameWriteRequest{write: flushFrameWriter{}}) + sc.needsFrameFlush = false // after startFrameWrite, since it sets this true + continue + } + break } - if sc.needsFrameFlush { - sc.startFrameWrite(frameWriteMsg{write: flushFrameWriter{}}) - sc.needsFrameFlush = false // after startFrameWrite, since it sets this true - return - } + sc.inFrameScheduleLoop = false +} + +// startGracefulShutdown gracefully shuts down a connection. This +// sends GOAWAY with ErrCodeNo to tell the client we're gracefully +// shutting down. The connection isn't closed until all current +// streams are done. +// +// startGracefulShutdown returns immediately; it does not wait until +// the connection has shut down. +func (sc *serverConn) startGracefulShutdown() { + sc.serveG.checkNotOn() // NOT + sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) }) +} + +func (sc *serverConn) startGracefulShutdownInternal() { + sc.goAwayIn(ErrCodeNo, 0) } func (sc *serverConn) goAway(code ErrCode) { sc.serveG.check() - if sc.inGoAway { - return - } + var forceCloseIn time.Duration if code != ErrCodeNo { - sc.shutDownIn(250 * time.Millisecond) + forceCloseIn = 250 * time.Millisecond } else { // TODO: configurable - sc.shutDownIn(1 * time.Second) + forceCloseIn = 1 * time.Second + } + sc.goAwayIn(code, forceCloseIn) +} + +func (sc *serverConn) goAwayIn(code ErrCode, forceCloseIn time.Duration) { + sc.serveG.check() + if sc.inGoAway { + return + } + if forceCloseIn != 0 { + sc.shutDownIn(forceCloseIn) } sc.inGoAway = true sc.needToSendGoAway = true @@ -996,16 +1250,14 @@ func (sc *serverConn) goAway(code ErrCode) { func (sc *serverConn) shutDownIn(d time.Duration) { sc.serveG.check() - sc.shutdownTimer = time.NewTimer(d) - sc.shutdownTimerCh = sc.shutdownTimer.C + sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer) } func (sc *serverConn) resetStream(se StreamError) { sc.serveG.check() - sc.writeFrame(frameWriteMsg{write: se}) + sc.writeFrame(FrameWriteRequest{write: se}) if st, ok := sc.streams[se.StreamID]; ok { - st.sentReset = true - sc.closeStream(st, se) + st.resetQueued = true } } @@ -1090,6 +1342,8 @@ func (sc *serverConn) processFrame(f Frame) error { return sc.processResetStream(f) case *PriorityFrame: return sc.processPriority(f) + case *GoAwayFrame: + return sc.processGoAway(f) case *PushPromiseFrame: // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR. @@ -1115,7 +1369,10 @@ func (sc *serverConn) processPing(f *PingFrame) error { // PROTOCOL_ERROR." return ConnectionError(ErrCodeProtocol) } - sc.writeFrame(frameWriteMsg{write: writePingAck{f}}) + if sc.inGoAway && sc.goAwayCode != ErrCodeNo { + return nil + } + sc.writeFrame(FrameWriteRequest{write: writePingAck{f}}) return nil } @@ -1123,7 +1380,14 @@ func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error { sc.serveG.check() switch { case f.StreamID != 0: // stream-level flow control - st := sc.streams[f.StreamID] + state, st := sc.state(f.StreamID) + if state == stateIdle { + // Section 5.1: "Receiving any frame other than HEADERS + // or PRIORITY on a stream in this state MUST be + // treated as a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } if st == nil { // "WINDOW_UPDATE can be sent by a peer that has sent a // frame bearing the END_STREAM flag. This means that a @@ -1133,7 +1397,7 @@ func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error { return nil } if !st.flow.add(int32(f.Increment)) { - return StreamError{f.StreamID, ErrCodeFlowControl} + return streamError(f.StreamID, ErrCodeFlowControl) } default: // connection-level flow control if !sc.flow.add(int32(f.Increment)) { @@ -1157,9 +1421,8 @@ func (sc *serverConn) processResetStream(f *RSTStreamFrame) error { return ConnectionError(ErrCodeProtocol) } if st != nil { - st.gotReset = true st.cancelCtx() - sc.closeStream(st, StreamError{f.StreamID, f.ErrCode}) + sc.closeStream(st, streamError(f.StreamID, f.ErrCode)) } return nil } @@ -1170,28 +1433,33 @@ func (sc *serverConn) closeStream(st *stream, err error) { panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state)) } st.state = stateClosed - sc.curOpenStreams-- - if sc.curOpenStreams == 0 { - sc.setConnState(http.StateIdle) + if st.writeDeadline != nil { + st.writeDeadline.Stop() + } + if st.isPushed() { + sc.curPushedStreams-- + } else { + sc.curClientStreams-- } delete(sc.streams, st.id) + if len(sc.streams) == 0 { + sc.setConnState(http.StateIdle) + if sc.srv.IdleTimeout != 0 { + sc.idleTimer.Reset(sc.srv.IdleTimeout) + } + if h1ServerKeepAlivesDisabled(sc.hs) { + sc.startGracefulShutdownInternal() + } + } if p := st.body; p != nil { + // Return any buffered unread bytes worth of conn-level flow control. + // See golang.org/issue/16481 + sc.sendWindowUpdate(nil, p.Len()) + p.CloseWithError(err) } st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc - sc.writeSched.forgetStream(st.id) - if st.reqBuf != nil { - // Stash this request body buffer (64k) away for reuse - // by a future POST/PUT/etc. - // - // TODO(bradfitz): share on the server? sync.Pool? - // Server requires locks and might hurt contention. - // sync.Pool might work, or might be worse, depending - // on goroutine CPU migrations. (get and put on - // separate CPUs). Maybe a mix of strategies. But - // this is an easy win for now. - sc.freeRequestBodyBuf = st.reqBuf - } + sc.writeSched.CloseStream(st.id) } func (sc *serverConn) processSettings(f *SettingsFrame) error { @@ -1233,7 +1501,7 @@ func (sc *serverConn) processSetting(s Setting) error { case SettingInitialWindowSize: return sc.processSettingInitialWindowSize(s.Val) case SettingMaxFrameSize: - sc.writeSched.maxFrameSize = s.Val + sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31 case SettingMaxHeaderListSize: sc.peerMaxHeaderListSize = s.Val default: @@ -1258,9 +1526,9 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { // adjust the size of all stream flow control windows that it // maintains by the difference between the new value and the // old value." - old := sc.initialWindowSize - sc.initialWindowSize = int32(val) - growth := sc.initialWindowSize - old // may be negative + old := sc.initialStreamSendWindowSize + sc.initialStreamSendWindowSize = int32(val) + growth := int32(val) - old // may be negative for _, st := range sc.streams { if !st.flow.add(growth) { // 6.9.2 Initial Flow Control Window Size @@ -1277,43 +1545,82 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { func (sc *serverConn) processData(f *DataFrame) error { sc.serveG.check() + if sc.inGoAway && sc.goAwayCode != ErrCodeNo { + return nil + } + data := f.Data() + // "If a DATA frame is received whose stream is not in "open" // or "half closed (local)" state, the recipient MUST respond // with a stream error (Section 5.4.2) of type STREAM_CLOSED." id := f.Header().StreamID - st, ok := sc.streams[id] - if !ok || st.state != stateOpen || st.gotTrailerHeader { + state, st := sc.state(id) + if id == 0 || state == stateIdle { + // Section 5.1: "Receiving any frame other than HEADERS + // or PRIORITY on a stream in this state MUST be + // treated as a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued { // This includes sending a RST_STREAM if the stream is // in stateHalfClosedLocal (which currently means that // the http.Handler returned, so it's done reading & // done writing). Try to stop the client from sending // more DATA. - return StreamError{id, ErrCodeStreamClosed} + + // But still enforce their connection-level flow control, + // and return any flow control bytes since we're not going + // to consume them. + if sc.inflow.available() < int32(f.Length) { + return streamError(id, ErrCodeFlowControl) + } + // Deduct the flow control from inflow, since we're + // going to immediately add it back in + // sendWindowUpdate, which also schedules sending the + // frames. + sc.inflow.take(int32(f.Length)) + sc.sendWindowUpdate(nil, int(f.Length)) // conn-level + + if st != nil && st.resetQueued { + // Already have a stream error in flight. Don't send another. + return nil + } + return streamError(id, ErrCodeStreamClosed) } if st.body == nil { panic("internal error: should have a body in this state") } - data := f.Data() // Sender sending more than they'd declared? if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) - return StreamError{id, ErrCodeStreamClosed} + return streamError(id, ErrCodeStreamClosed) } - if len(data) > 0 { + if f.Length > 0 { // Check whether the client has flow control quota. - if int(st.inflow.available()) < len(data) { - return StreamError{id, ErrCodeFlowControl} + if st.inflow.available() < int32(f.Length) { + return streamError(id, ErrCodeFlowControl) } - st.inflow.take(int32(len(data))) - wrote, err := st.body.Write(data) - if err != nil { - return StreamError{id, ErrCodeStreamClosed} + st.inflow.take(int32(f.Length)) + + if len(data) > 0 { + wrote, err := st.body.Write(data) + if err != nil { + return streamError(id, ErrCodeStreamClosed) + } + if wrote != len(data) { + panic("internal error: bad Writer") + } + st.bodyBytes += int64(len(data)) } - if wrote != len(data) { - panic("internal error: bad Writer") + + // Return any padded flow control now, since we won't + // refund it later on body reads. + if pad := int32(f.Length) - int32(len(data)); pad > 0 { + sc.sendWindowUpdate32(nil, pad) + sc.sendWindowUpdate32(st, pad) } - st.bodyBytes += int64(len(data)) } if f.StreamEnded() { st.endStream() @@ -1321,6 +1628,25 @@ func (sc *serverConn) processData(f *DataFrame) error { return nil } +func (sc *serverConn) processGoAway(f *GoAwayFrame) error { + sc.serveG.check() + if f.ErrCode != ErrCodeNo { + sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f) + } else { + sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f) + } + sc.startGracefulShutdownInternal() + // http://tools.ietf.org/html/rfc7540#section-6.8 + // We should not create any new streams, which means we should disable push. + sc.pushEnabled = false + return nil +} + +// isPushed reports whether the stream is server-initiated. +func (st *stream) isPushed() bool { + return st.id%2 == 0 +} + // endStream closes a Request.Body's pipe. It is called when a DATA // frame says a request body is over (or after trailers). func (st *stream) endStream() { @@ -1348,14 +1674,20 @@ func (st *stream) copyTrailersToHandlerRequest() { } } +// onWriteTimeout is run on its own goroutine (from time.AfterFunc) +// when the stream's WriteTimeout has fired. +func (st *stream) onWriteTimeout() { + st.sc.writeFrameFromHandler(FrameWriteRequest{write: streamError(st.id, ErrCodeInternal)}) +} + func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { sc.serveG.check() - id := f.Header().StreamID + id := f.StreamID if sc.inGoAway { // Ignore. return nil } - // http://http2.github.io/http2-spec/#rfc.section.5.1.1 + // http://tools.ietf.org/html/rfc7540#section-5.1.1 // Streams initiated by a client MUST use odd-numbered stream // identifiers. [...] An endpoint that receives an unexpected // stream identifier MUST respond with a connection error @@ -1367,8 +1699,12 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // send a trailer for an open one. If we already have a stream // open, let it process its own HEADERS frame (trailers at this // point, if it's valid). - st := sc.streams[f.Header().StreamID] - if st != nil { + if st := sc.streams[f.StreamID]; st != nil { + if st.resetQueued { + // We're sending RST_STREAM to close the stream, so don't bother + // processing this frame. + return nil + } return st.processTrailerHeaders(f) } @@ -1377,54 +1713,45 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // endpoint has opened or reserved. [...] An endpoint that // receives an unexpected stream identifier MUST respond with // a connection error (Section 5.4.1) of type PROTOCOL_ERROR. - if id <= sc.maxStreamID { + if id <= sc.maxClientStreamID { return ConnectionError(ErrCodeProtocol) } - sc.maxStreamID = id + sc.maxClientStreamID = id - ctx, cancelCtx := contextWithCancel(sc.baseCtx) - st = &stream{ - sc: sc, - id: id, - state: stateOpen, - ctx: ctx, - cancelCtx: cancelCtx, - } - if f.StreamEnded() { - st.state = stateHalfClosedRemote + if sc.idleTimer != nil { + sc.idleTimer.Stop() } - st.cw.Init() - - st.flow.conn = &sc.flow // link to conn-level counter - st.flow.add(sc.initialWindowSize) - st.inflow.conn = &sc.inflow // link to conn-level counter - st.inflow.add(initialWindowSize) // TODO: update this when we send a higher initial window size in the initial settings - sc.streams[id] = st - if f.HasPriority() { - adjustStreamPriority(sc.streams, st.id, f.Priority) - } - sc.curOpenStreams++ - if sc.curOpenStreams == 1 { - sc.setConnState(http.StateActive) - } - if sc.curOpenStreams > sc.advMaxStreams { - // "Endpoints MUST NOT exceed the limit set by their - // peer. An endpoint that receives a HEADERS frame - // that causes their advertised concurrent stream - // limit to be exceeded MUST treat this as a stream - // error (Section 5.4.2) of type PROTOCOL_ERROR or - // REFUSED_STREAM." + // http://tools.ietf.org/html/rfc7540#section-5.1.2 + // [...] Endpoints MUST NOT exceed the limit set by their peer. An + // endpoint that receives a HEADERS frame that causes their + // advertised concurrent stream limit to be exceeded MUST treat + // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR + // or REFUSED_STREAM. + if sc.curClientStreams+1 > sc.advMaxStreams { if sc.unackedSettings == 0 { // They should know better. - return StreamError{st.id, ErrCodeProtocol} + return streamError(id, ErrCodeProtocol) } // Assume it's a network race, where they just haven't // received our last SETTINGS update. But actually // this can't happen yet, because we don't yet provide // a way for users to adjust server parameters at // runtime. - return StreamError{st.id, ErrCodeRefusedStream} + return streamError(id, ErrCodeRefusedStream) + } + + initialState := stateOpen + if f.StreamEnded() { + initialState = stateHalfClosedRemote + } + st := sc.newStream(id, 0, initialState) + + if f.HasPriority() { + if err := checkPriority(f.StreamID, f.Priority); err != nil { + return err + } + sc.writeSched.AdjustStream(st.id, f.Priority) } rw, req, err := sc.newWriterAndRequest(st, f) @@ -1442,10 +1769,21 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { if f.Truncated { // Their header list was too long. Send a 431 error. handler = handleHeaderListTooLong - } else if err := checkValidHTTP2Request(req); err != nil { + } else if err := checkValidHTTP2RequestHeaders(req.Header); err != nil { handler = new400Handler(err) } + // The net/http package sets the read deadline from the + // http.Server.ReadTimeout during the TLS handshake, but then + // passes the connection off to us with the deadline already + // set. Disarm it here after the request headers are read, + // similar to how the http1 server works. Here it's + // technically more like the http1 Server's ReadHeaderTimeout + // (in Go 1.8), though. That's a more sane option anyway. + if sc.hs.ReadTimeout != 0 { + sc.conn.SetReadDeadline(time.Time{}) + } + go sc.runHandler(rw, req, handler) return nil } @@ -1458,11 +1796,11 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { } st.gotTrailerHeader = true if !f.StreamEnded() { - return StreamError{st.id, ErrCodeProtocol} + return streamError(st.id, ErrCodeProtocol) } if len(f.PseudoFields()) > 0 { - return StreamError{st.id, ErrCodeProtocol} + return streamError(st.id, ErrCodeProtocol) } if st.trailer != nil { for _, hf := range f.RegularFields() { @@ -1471,7 +1809,7 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { // TODO: send more details to the peer somehow. But http2 has // no way to send debug data at a stream level. Discuss with // HTTP folk. - return StreamError{st.id, ErrCodeProtocol} + return streamError(st.id, ErrCodeProtocol) } st.trailer[key] = append(st.trailer[key], hf.Value) } @@ -1480,62 +1818,81 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { return nil } +func checkPriority(streamID uint32, p PriorityParam) error { + if streamID == p.StreamDep { + // Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat + // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR." + // Section 5.3.3 says that a stream can depend on one of its dependencies, + // so it's only self-dependencies that are forbidden. + return streamError(streamID, ErrCodeProtocol) + } + return nil +} + func (sc *serverConn) processPriority(f *PriorityFrame) error { - adjustStreamPriority(sc.streams, f.StreamID, f.PriorityParam) + if sc.inGoAway { + return nil + } + if err := checkPriority(f.StreamID, f.PriorityParam); err != nil { + return err + } + sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam) return nil } -func adjustStreamPriority(streams map[uint32]*stream, streamID uint32, priority PriorityParam) { - st, ok := streams[streamID] - if !ok { - // TODO: not quite correct (this streamID might - // already exist in the dep tree, but be closed), but - // close enough for now. - return +func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream { + sc.serveG.check() + if id == 0 { + panic("internal error: cannot create stream with id 0") } - st.weight = priority.Weight - parent := streams[priority.StreamDep] // might be nil - if parent == st { - // if client tries to set this stream to be the parent of itself - // ignore and keep going - return + + ctx, cancelCtx := contextWithCancel(sc.baseCtx) + st := &stream{ + sc: sc, + id: id, + state: state, + ctx: ctx, + cancelCtx: cancelCtx, + } + st.cw.Init() + st.flow.conn = &sc.flow // link to conn-level counter + st.flow.add(sc.initialStreamSendWindowSize) + st.inflow.conn = &sc.inflow // link to conn-level counter + st.inflow.add(sc.srv.initialStreamRecvWindowSize()) + if sc.hs.WriteTimeout != 0 { + st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } - // section 5.3.3: If a stream is made dependent on one of its - // own dependencies, the formerly dependent stream is first - // moved to be dependent on the reprioritized stream's previous - // parent. The moved dependency retains its weight. - for piter := parent; piter != nil; piter = piter.parent { - if piter == st { - parent.parent = st.parent - break - } + sc.streams[id] = st + sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID}) + if st.isPushed() { + sc.curPushedStreams++ + } else { + sc.curClientStreams++ } - st.parent = parent - if priority.Exclusive && (st.parent != nil || priority.StreamDep == 0) { - for _, openStream := range streams { - if openStream != st && openStream.parent == st.parent { - openStream.parent = st - } - } + if sc.curOpenStreams() == 1 { + sc.setConnState(http.StateActive) } + + return st } func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { sc.serveG.check() - method := f.PseudoValue("method") - path := f.PseudoValue("path") - scheme := f.PseudoValue("scheme") - authority := f.PseudoValue("authority") + rp := requestParam{ + method: f.PseudoValue("method"), + scheme: f.PseudoValue("scheme"), + authority: f.PseudoValue("authority"), + path: f.PseudoValue("path"), + } - isConnect := method == "CONNECT" + isConnect := rp.method == "CONNECT" if isConnect { - if path != "" || scheme != "" || authority == "" { - return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} + if rp.path != "" || rp.scheme != "" || rp.authority == "" { + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) } - } else if method == "" || path == "" || - (scheme != "https" && scheme != "http") { + } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { // See 8.1.2.6 Malformed Requests and Responses: // // Malformed requests or responses that are detected @@ -1546,40 +1903,66 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res // "All HTTP/2 requests MUST include exactly one valid // value for the :method, :scheme, and :path // pseudo-header fields" - return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) } bodyOpen := !f.StreamEnded() - if method == "HEAD" && bodyOpen { + if rp.method == "HEAD" && bodyOpen { // HEAD requests can't have bodies - return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) } - var tlsState *tls.ConnectionState // nil if not scheme https - if scheme == "https" { - tlsState = sc.tlsState + rp.header = make(http.Header) + for _, hf := range f.RegularFields() { + rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) + } + if rp.authority == "" { + rp.authority = rp.header.Get("Host") } - header := make(http.Header) - for _, hf := range f.RegularFields() { - header.Add(sc.canonicalHeader(hf.Name), hf.Value) + rw, req, err := sc.newWriterAndRequestNoBody(st, rp) + if err != nil { + return nil, nil, err } + if bodyOpen { + if vv, ok := rp.header["Content-Length"]; ok { + req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) + } else { + req.ContentLength = -1 + } + req.Body.(*requestBody).pipe = &pipe{ + b: &dataBuffer{expected: req.ContentLength}, + } + } + return rw, req, nil +} + +type requestParam struct { + method string + scheme, authority, path string + header http.Header +} + +func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) { + sc.serveG.check() - if authority == "" { - authority = header.Get("Host") + var tlsState *tls.ConnectionState // nil if not scheme https + if rp.scheme == "https" { + tlsState = sc.tlsState } - needsContinue := header.Get("Expect") == "100-continue" + + needsContinue := rp.header.Get("Expect") == "100-continue" if needsContinue { - header.Del("Expect") + rp.header.Del("Expect") } // Merge Cookie headers into one "; "-delimited value. - if cookies := header["Cookie"]; len(cookies) > 1 { - header.Set("Cookie", strings.Join(cookies, "; ")) + if cookies := rp.header["Cookie"]; len(cookies) > 1 { + rp.header.Set("Cookie", strings.Join(cookies, "; ")) } // Setup Trailers var trailer http.Header - for _, v := range header["Trailer"] { + for _, v := range rp.header["Trailer"] { for _, key := range strings.Split(v, ",") { key = http.CanonicalHeaderKey(strings.TrimSpace(key)) switch key { @@ -1594,57 +1977,42 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res } } } - delete(header, "Trailer") + delete(rp.header, "Trailer") - body := &requestBody{ - conn: sc, - stream: st, - needsContinue: needsContinue, - } var url_ *url.URL var requestURI string - if isConnect { - url_ = &url.URL{Host: authority} - requestURI = authority // mimic HTTP/1 server behavior + if rp.method == "CONNECT" { + url_ = &url.URL{Host: rp.authority} + requestURI = rp.authority // mimic HTTP/1 server behavior } else { var err error - url_, err = url.ParseRequestURI(path) + url_, err = url.ParseRequestURI(rp.path) if err != nil { - return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} + return nil, nil, streamError(st.id, ErrCodeProtocol) } - requestURI = path + requestURI = rp.path + } + + body := &requestBody{ + conn: sc, + stream: st, + needsContinue: needsContinue, } req := &http.Request{ - Method: method, + Method: rp.method, URL: url_, RemoteAddr: sc.remoteAddrStr, - Header: header, + Header: rp.header, RequestURI: requestURI, Proto: "HTTP/2.0", ProtoMajor: 2, ProtoMinor: 0, TLS: tlsState, - Host: authority, + Host: rp.authority, Body: body, Trailer: trailer, } req = requestWithContext(req, st.ctx) - if bodyOpen { - // Disabled, per golang.org/issue/14960: - // st.reqBuf = sc.getRequestBodyBuf() - // TODO: remove this 64k of garbage per request (again, but without a data race): - buf := make([]byte, initialWindowSize) - - body.pipe = &pipe{ - b: &fixedBuffer{buf: buf}, - } - - if vv, ok := header["Content-Length"]; ok { - req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) - } else { - req.ContentLength = -1 - } - } rws := responseWriterStatePool.Get().(*responseWriterState) bwSave := rws.bw @@ -1660,15 +2028,6 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res return rw, req, nil } -func (sc *serverConn) getRequestBodyBuf() []byte { - sc.serveG.check() - if buf := sc.freeRequestBodyBuf; buf != nil { - sc.freeRequestBodyBuf = nil - return buf - } - return make([]byte, initialWindowSize) -} - // Run on its own goroutine. func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { didPanic := true @@ -1676,15 +2035,17 @@ func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler rw.rws.stream.cancelCtx() if didPanic { e := recover() - // Same as net/http: - const size = 64 << 10 - buf := make([]byte, size) - buf = buf[:runtime.Stack(buf, false)] - sc.writeFrameFromHandler(frameWriteMsg{ + sc.writeFrameFromHandler(FrameWriteRequest{ write: handlerPanicRST{rw.rws.stream.id}, stream: rw.rws.stream, }) - sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf) + // Same as net/http: + if shouldLogPanic(e) { + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf) + } return } rw.handlerDone() @@ -1715,7 +2076,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro // mutates it. errc = errChanPool.Get().(chan error) } - if err := sc.writeFrameFromHandler(frameWriteMsg{ + if err := sc.writeFrameFromHandler(FrameWriteRequest{ write: headerData, stream: st, done: errc, @@ -1738,7 +2099,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro // called from handler goroutines. func (sc *serverConn) write100ContinueHeaders(st *stream) { - sc.writeFrameFromHandler(frameWriteMsg{ + sc.writeFrameFromHandler(FrameWriteRequest{ write: write100ContinueHeadersFrame{st.id}, stream: st, }) @@ -1754,11 +2115,13 @@ type bodyReadMsg struct { // called from handler goroutines. // Notes that the handler for the given stream ID read n bytes of its body // and schedules flow control tokens to be sent. -func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int) { +func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) { sc.serveG.checkNotOn() // NOT on - select { - case sc.bodyReadCh <- bodyReadMsg{st, n}: - case <-sc.doneServing: + if n > 0 { + select { + case sc.bodyReadCh <- bodyReadMsg{st, n}: + case <-sc.doneServing: + } } } @@ -1801,7 +2164,7 @@ func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { if st != nil { streamID = st.id } - sc.writeFrame(frameWriteMsg{ + sc.writeFrame(FrameWriteRequest{ write: writeWindowUpdate{streamID: streamID, n: uint32(n)}, stream: st, }) @@ -1816,16 +2179,19 @@ func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { } } +// requestBody is the Handler's Request.Body type. +// Read and Close may be called concurrently. type requestBody struct { stream *stream conn *serverConn - closed bool + closed bool // for use by Close only + sawEOF bool // for use by Read only pipe *pipe // non-nil if we have a HTTP entity message body needsContinue bool // need to send a 100-continue } func (b *requestBody) Close() error { - if b.pipe != nil { + if b.pipe != nil && !b.closed { b.pipe.BreakWithError(errClosedBody) } b.closed = true @@ -1837,18 +2203,22 @@ func (b *requestBody) Read(p []byte) (n int, err error) { b.needsContinue = false b.conn.write100ContinueHeaders(b.stream) } - if b.pipe == nil { + if b.pipe == nil || b.sawEOF { return 0, io.EOF } n, err = b.pipe.Read(p) - if n > 0 { - b.conn.noteBodyReadFromHandler(b.stream, n) + if err == io.EOF { + b.sawEOF = true + } + if b.conn == nil && inTests { + return } + b.conn.noteBodyReadFromHandler(b.stream, n, err) return } -// responseWriter is the http.ResponseWriter implementation. It's -// intentionally small (1 pointer wide) to minimize garbage. The +// responseWriter is the http.ResponseWriter implementation. It's +// intentionally small (1 pointer wide) to minimize garbage. The // responseWriterState pointer inside is zeroed at the end of a // request (in handlerDone) and calls on the responseWriter thereafter // simply crash (caller's mistake), but the much larger responseWriterState @@ -1882,6 +2252,7 @@ type responseWriterState struct { wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. sentHeader bool // have we sent the header frame? handlerDone bool // handler has finished + dirty bool // a Write failed; don't reuse this responseWriterState sentContentLen int64 // non-zero if handler set a Content-Length header wroteBytes int64 @@ -1963,6 +2334,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { date: date, }) if err != nil { + rws.dirty = true return 0, err } if endStream { @@ -1984,6 +2356,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { if len(p) > 0 || endStream { // only send a 0 byte DATA frame if we're ending the stream. if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { + rws.dirty = true return 0, err } } @@ -1995,6 +2368,9 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { trailers: rws.trailers, endStream: true, }) + if err != nil { + rws.dirty = true + } return len(p), err } return len(p), nil @@ -2022,7 +2398,7 @@ const TrailerPrefix = "Trailer:" // says you SHOULD (but not must) predeclare any trailers in the // header, the official ResponseWriter rules said trailers in Go must // be predeclared, and then we reuse the same ResponseWriter.Header() -// map to mean both Headers and Trailers. When it's time to write the +// map to mean both Headers and Trailers. When it's time to write the // Trailers, we pick out the fields of Headers that were declared as // trailers. That worked for a while, until we found the first major // user of Trailers in the wild: gRPC (using them only over http2), @@ -2081,8 +2457,9 @@ func (w *responseWriter) CloseNotify() <-chan bool { if ch == nil { ch = make(chan bool, 1) rws.closeNotifierCh = ch + cw := rws.stream.cw go func() { - rws.stream.cw.Wait() // wait for close + cw.Wait() // wait for close ch <- true }() } @@ -2133,7 +2510,7 @@ func cloneHeader(h http.Header) http.Header { // // * Handler calls w.Write or w.WriteString -> // * -> rws.bw (*bufio.Writer) -> -// * (Handler migth call Flush) +// * (Handler might call Flush) // * -> chunkWriter{rws} // * -> responseWriterState.writeChunk(p []byte) // * -> responseWriterState.writeChunk (most of the magic; see comment there) @@ -2172,10 +2549,213 @@ func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, func (w *responseWriter) handlerDone() { rws := w.rws + dirty := rws.dirty rws.handlerDone = true w.Flush() w.rws = nil - responseWriterStatePool.Put(rws) + if !dirty { + // Only recycle the pool if all prior Write calls to + // the serverConn goroutine completed successfully. If + // they returned earlier due to resets from the peer + // there might still be write goroutines outstanding + // from the serverConn referencing the rws memory. See + // issue 20704. + responseWriterStatePool.Put(rws) + } +} + +// Push errors. +var ( + ErrRecursivePush = errors.New("http2: recursive push not allowed") + ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS") +) + +// pushOptions is the internal version of http.PushOptions, which we +// cannot include here because it's only defined in Go 1.8 and later. +type pushOptions struct { + Method string + Header http.Header +} + +func (w *responseWriter) push(target string, opts pushOptions) error { + st := w.rws.stream + sc := st.sc + sc.serveG.checkNotOn() + + // No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream." + // http://tools.ietf.org/html/rfc7540#section-6.6 + if st.isPushed() { + return ErrRecursivePush + } + + // Default options. + if opts.Method == "" { + opts.Method = "GET" + } + if opts.Header == nil { + opts.Header = http.Header{} + } + wantScheme := "http" + if w.rws.req.TLS != nil { + wantScheme = "https" + } + + // Validate the request. + u, err := url.Parse(target) + if err != nil { + return err + } + if u.Scheme == "" { + if !strings.HasPrefix(target, "/") { + return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target) + } + u.Scheme = wantScheme + u.Host = w.rws.req.Host + } else { + if u.Scheme != wantScheme { + return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme) + } + if u.Host == "" { + return errors.New("URL must have a host") + } + } + for k := range opts.Header { + if strings.HasPrefix(k, ":") { + return fmt.Errorf("promised request headers cannot include pseudo header %q", k) + } + // These headers are meaningful only if the request has a body, + // but PUSH_PROMISE requests cannot have a body. + // http://tools.ietf.org/html/rfc7540#section-8.2 + // Also disallow Host, since the promised URL must be absolute. + switch strings.ToLower(k) { + case "content-length", "content-encoding", "trailer", "te", "expect", "host": + return fmt.Errorf("promised request headers cannot include %q", k) + } + } + if err := checkValidHTTP2RequestHeaders(opts.Header); err != nil { + return err + } + + // The RFC effectively limits promised requests to GET and HEAD: + // "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]" + // http://tools.ietf.org/html/rfc7540#section-8.2 + if opts.Method != "GET" && opts.Method != "HEAD" { + return fmt.Errorf("method %q must be GET or HEAD", opts.Method) + } + + msg := &startPushRequest{ + parent: st, + method: opts.Method, + url: u, + header: cloneHeader(opts.Header), + done: errChanPool.Get().(chan error), + } + + select { + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + case sc.serveMsgCh <- msg: + } + + select { + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + case err := <-msg.done: + errChanPool.Put(msg.done) + return err + } +} + +type startPushRequest struct { + parent *stream + method string + url *url.URL + header http.Header + done chan error +} + +func (sc *serverConn) startPush(msg *startPushRequest) { + sc.serveG.check() + + // http://tools.ietf.org/html/rfc7540#section-6.6. + // PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that + // is in either the "open" or "half-closed (remote)" state. + if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote { + // responseWriter.Push checks that the stream is peer-initiaed. + msg.done <- errStreamClosed + return + } + + // http://tools.ietf.org/html/rfc7540#section-6.6. + if !sc.pushEnabled { + msg.done <- http.ErrNotSupported + return + } + + // PUSH_PROMISE frames must be sent in increasing order by stream ID, so + // we allocate an ID for the promised stream lazily, when the PUSH_PROMISE + // is written. Once the ID is allocated, we start the request handler. + allocatePromisedID := func() (uint32, error) { + sc.serveG.check() + + // Check this again, just in case. Technically, we might have received + // an updated SETTINGS by the time we got around to writing this frame. + if !sc.pushEnabled { + return 0, http.ErrNotSupported + } + // http://tools.ietf.org/html/rfc7540#section-6.5.2. + if sc.curPushedStreams+1 > sc.clientMaxStreams { + return 0, ErrPushLimitReached + } + + // http://tools.ietf.org/html/rfc7540#section-5.1.1. + // Streams initiated by the server MUST use even-numbered identifiers. + // A server that is unable to establish a new stream identifier can send a GOAWAY + // frame so that the client is forced to open a new connection for new streams. + if sc.maxPushPromiseID+2 >= 1<<31 { + sc.startGracefulShutdownInternal() + return 0, ErrPushLimitReached + } + sc.maxPushPromiseID += 2 + promisedID := sc.maxPushPromiseID + + // http://tools.ietf.org/html/rfc7540#section-8.2. + // Strictly speaking, the new stream should start in "reserved (local)", then + // transition to "half closed (remote)" after sending the initial HEADERS, but + // we start in "half closed (remote)" for simplicity. + // See further comments at the definition of stateHalfClosedRemote. + promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote) + rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{ + method: msg.method, + scheme: msg.url.Scheme, + authority: msg.url.Host, + path: msg.url.RequestURI(), + header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE + }) + if err != nil { + // Should not happen, since we've already validated msg.url. + panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err)) + } + + go sc.runHandler(rw, req, sc.handler.ServeHTTP) + return promisedID, nil + } + + sc.writeFrame(FrameWriteRequest{ + write: &writePushPromise{ + streamID: msg.parent.id, + method: msg.method, + url: msg.url, + h: msg.header, + allocatePromisedID: allocatePromisedID, + }, + stream: msg.parent, + done: msg.done, + }) } // foreachHeaderElement splits v according to the "#rule" construction @@ -2205,16 +2785,16 @@ var connHeaders = []string{ "Upgrade", } -// checkValidHTTP2Request checks whether req is a valid HTTP/2 request, +// checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request, // per RFC 7540 Section 8.1.2.2. // The returned error is reported to users. -func checkValidHTTP2Request(req *http.Request) error { - for _, h := range connHeaders { - if _, ok := req.Header[h]; ok { - return fmt.Errorf("request header %q is not valid in HTTP/2", h) +func checkValidHTTP2RequestHeaders(h http.Header) error { + for _, k := range connHeaders { + if _, ok := h[k]; ok { + return fmt.Errorf("request header %q is not valid in HTTP/2", k) } } - te := req.Header["Te"] + te := h["Te"] if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) { return errors.New(`request header "TE" may only be "trailers" in HTTP/2`) } @@ -2261,3 +2841,17 @@ var badTrailer = map[string]bool{ "Transfer-Encoding": true, "Www-Authenticate": true, } + +// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives +// disabled. See comments on h1ServerShutdownChan above for why +// the code is written this way. +func h1ServerKeepAlivesDisabled(hs *http.Server) bool { + var x interface{} = hs + type I interface { + doKeepAlives() bool + } + if hs, ok := x.(I); ok { + return !hs.doKeepAlives() + } + return false +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/transport.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/transport.go index fb8dd997..b6a5c7dd 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/transport.go @@ -10,12 +10,14 @@ import ( "bufio" "bytes" "compress/gzip" + "crypto/rand" "crypto/tls" "errors" "fmt" "io" "io/ioutil" "log" + "math" "net" "net/http" "sort" @@ -25,6 +27,7 @@ import ( "time" "golang.org/x/net/http2/hpack" + "golang.org/x/net/idna" "golang.org/x/net/lex/httplex" ) @@ -148,27 +151,32 @@ type ClientConn struct { readerDone chan struct{} // closed on error readerErr error // set before readerDone is closed - mu sync.Mutex // guards following - cond *sync.Cond // hold mu; broadcast on flow/closed changes - flow flow // our conn-level flow control quota (cs.flow is per stream) - inflow flow // peer's conn-level flow control - closed bool - goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received - goAwayDebug string // goAway frame's debug data, retained as a string - streams map[uint32]*clientStream // client-initiated - nextStreamID uint32 - bw *bufio.Writer - br *bufio.Reader - fr *Framer - lastActive time.Time - - // Settings from peer: + idleTimeout time.Duration // or 0 for never + idleTimer *time.Timer + + mu sync.Mutex // guards following + cond *sync.Cond // hold mu; broadcast on flow/closed changes + flow flow // our conn-level flow control quota (cs.flow is per stream) + inflow flow // peer's conn-level flow control + closed bool + wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back + goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received + goAwayDebug string // goAway frame's debug data, retained as a string + streams map[uint32]*clientStream // client-initiated + nextStreamID uint32 + pings map[[8]byte]chan struct{} // in flight ping data to notification channel + bw *bufio.Writer + br *bufio.Reader + fr *Framer + lastActive time.Time + // Settings from peer: (also guarded by mu) maxFrameSize uint32 maxConcurrentStreams uint32 initialWindowSize uint32 - hbuf bytes.Buffer // HPACK encoder writes into this - henc *hpack.Encoder - freeBuf [][]byte + + hbuf bytes.Buffer // HPACK encoder writes into this + henc *hpack.Encoder + freeBuf [][]byte wmu sync.Mutex // held while writing; acquire AFTER mu if holding both werr error // first write error that has occurred @@ -183,6 +191,7 @@ type clientStream struct { ID uint32 resc chan resAndError bufPipe pipe // buffered pipe with the flow-controlled response payload + startedWrite bool // started request body write; guarded by cc.mu requestedGzip bool on100 func() // optional code to run if get a 100 continue response @@ -191,6 +200,7 @@ type clientStream struct { bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read readErr error // sticky read error; owned by transportResponseBody.Read stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu + didReset bool // whether we sent a RST_STREAM to the server; guarded by cc.mu peerReset chan struct{} // closed on peer reset resetErr error // populated before peerReset is closed @@ -218,15 +228,26 @@ func (cs *clientStream) awaitRequestCancel(req *http.Request) { } select { case <-req.Cancel: + cs.cancelStream() cs.bufPipe.CloseWithError(errRequestCanceled) - cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) case <-ctx.Done(): + cs.cancelStream() cs.bufPipe.CloseWithError(ctx.Err()) - cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) case <-cs.done: } } +func (cs *clientStream) cancelStream() { + cs.cc.mu.Lock() + didReset := cs.didReset + cs.didReset = true + cs.cc.mu.Unlock() + + if !didReset { + cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } +} + // checkResetOrDone reports any error sent in a RST_STREAM frame by the // server, or errStreamClosed if the stream is complete. func (cs *clientStream) checkResetOrDone() error { @@ -283,14 +304,22 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { // authorityAddr returns a given authority (a host/IP, or host:port / ip:port) // and returns a host:port. The port 443 is added if needed. func authorityAddr(scheme string, authority string) (addr string) { - if _, _, err := net.SplitHostPort(authority); err == nil { - return authority + host, port, err := net.SplitHostPort(authority) + if err != nil { // authority didn't have a port + port = "443" + if scheme == "http" { + port = "80" + } + host = authority + } + if a, err := idna.ToASCII(host); err == nil { + host = a } - port := "443" - if scheme == "http" { - port = "80" + // IPv6 address literal, without a port: + if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { + return host + ":" + port } - return net.JoinHostPort(authority, port) + return net.JoinHostPort(host, port) } // RoundTripOpt is like RoundTrip, but takes options. @@ -308,8 +337,10 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res } traceGotConn(req, cc) res, err := cc.RoundTrip(req) - if shouldRetryRequest(req, err) { - continue + if err != nil { + if req, err = shouldRetryRequest(req, err); err == nil { + continue + } } if err != nil { t.vlogf("RoundTrip failure: %v", err) @@ -331,15 +362,44 @@ func (t *Transport) CloseIdleConnections() { var ( errClientConnClosed = errors.New("http2: client conn is closed") errClientConnUnusable = errors.New("http2: client conn not usable") + + errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") + errClientConnGotGoAwayAfterSomeReqBody = errors.New("http2: Transport received Server's graceful shutdown GOAWAY; some request body already written") ) -func shouldRetryRequest(req *http.Request, err error) bool { - // TODO: retry GET requests (no bodies) more aggressively, if shutdown - // before response. - return err == errClientConnUnusable +// shouldRetryRequest is called by RoundTrip when a request fails to get +// response headers. It is always called with a non-nil error. +// It returns either a request to retry (either the same request, or a +// modified clone), or an error if the request can't be replayed. +func shouldRetryRequest(req *http.Request, err error) (*http.Request, error) { + switch err { + default: + return nil, err + case errClientConnUnusable, errClientConnGotGoAway: + return req, nil + case errClientConnGotGoAwayAfterSomeReqBody: + // If the Body is nil (or http.NoBody), it's safe to reuse + // this request and its Body. + if req.Body == nil || reqBodyIsNoBody(req.Body) { + return req, nil + } + // Otherwise we depend on the Request having its GetBody + // func defined. + getBody := reqGetBody(req) // Go 1.8: getBody = req.GetBody + if getBody == nil { + return nil, errors.New("http2: Transport: peer server initiated graceful shutdown after some of Request.Body was written; define Request.GetBody to avoid this error") + } + body, err := getBody() + if err != nil { + return nil, err + } + newReq := *req + newReq.Body = body + return &newReq, nil + } } -func (t *Transport) dialClientConn(addr string) (*ClientConn, error) { +func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) { host, _, err := net.SplitHostPort(addr) if err != nil { return nil, err @@ -348,13 +408,13 @@ func (t *Transport) dialClientConn(addr string) (*ClientConn, error) { if err != nil { return nil, err } - return t.NewClientConn(tconn) + return t.newClientConn(tconn, singleUse) } func (t *Transport) newTLSConfig(host string) *tls.Config { cfg := new(tls.Config) if t.TLSClientConfig != nil { - *cfg = *t.TLSClientConfig + *cfg = *cloneTLSConfig(t.TLSClientConfig) } if !strSliceContains(cfg.NextProtos, NextProtoTLS) { cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) @@ -409,14 +469,10 @@ func (t *Transport) expectContinueTimeout() time.Duration { } func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { - if VerboseLogs { - t.vlogf("http2: Transport creating client conn to %v", c.RemoteAddr()) - } - if _, err := c.Write(clientPreface); err != nil { - t.vlogf("client preface write error: %v", err) - return nil, err - } + return t.newClientConn(c, false) +} +func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { cc := &ClientConn{ t: t, tconn: c, @@ -426,7 +482,18 @@ func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { initialWindowSize: 65535, // spec default maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. streams: make(map[uint32]*clientStream), + singleUse: singleUse, + wantSettingsAck: true, + pings: make(map[[8]byte]chan struct{}), + } + if d := t.idleConnTimeout(); d != 0 { + cc.idleTimeout = d + cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) } + if VerboseLogs { + t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) + } + cc.cond = sync.NewCond(&cc.mu) cc.flow.add(int32(initialWindowSize)) @@ -454,6 +521,8 @@ func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { if max := t.maxHeaderListSize(); max != 0 { initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) } + + cc.bw.Write(clientPreface) cc.fr.WriteSettings(initialSettings...) cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) cc.inflow.add(transportDefaultConnFlow + initialWindowSize) @@ -462,33 +531,6 @@ func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { return nil, cc.werr } - // Read the obligatory SETTINGS frame - f, err := cc.fr.ReadFrame() - if err != nil { - return nil, err - } - sf, ok := f.(*SettingsFrame) - if !ok { - return nil, fmt.Errorf("expected settings frame, got: %T", f) - } - cc.fr.WriteSettingsAck() - cc.bw.Flush() - - sf.ForeachSetting(func(s Setting) error { - switch s.ID { - case SettingMaxFrameSize: - cc.maxFrameSize = s.Val - case SettingMaxConcurrentStreams: - cc.maxConcurrentStreams = s.Val - case SettingInitialWindowSize: - cc.initialWindowSize = s.Val - default: - // TODO(bradfitz): handle more; at least SETTINGS_HEADER_TABLE_SIZE? - t.vlogf("Unhandled Setting: %v", s) - } - return nil - }) - go cc.readLoop() return cc, nil } @@ -507,6 +549,15 @@ func (cc *ClientConn) setGoAway(f *GoAwayFrame) { if old != nil && old.ErrCode != ErrCodeNo { cc.goAway.ErrCode = old.ErrCode } + last := f.LastStreamID + for streamID, cs := range cc.streams { + if streamID > last { + select { + case cs.resc <- resAndError{err: errClientConnGotGoAway}: + default: + } + } + } } func (cc *ClientConn) CanTakeNewRequest() bool { @@ -521,7 +572,17 @@ func (cc *ClientConn) canTakeNewRequestLocked() bool { } return cc.goAway == nil && !cc.closed && int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) && - cc.nextStreamID < 2147483647 + cc.nextStreamID < math.MaxInt32 +} + +// onIdleTimeout is called from a time.AfterFunc goroutine. It will +// only be called when we're idle, but because we're coming from a new +// goroutine, there could be a new request coming in at the same time, +// so this simply calls the synchronized closeIfIdle to shut down this +// connection. The timer could just call closeIfIdle, but this is more +// clear. +func (cc *ClientConn) onIdleTimeout() { + cc.closeIfIdle() } func (cc *ClientConn) closeIfIdle() { @@ -531,9 +592,13 @@ func (cc *ClientConn) closeIfIdle() { return } cc.closed = true + nextID := cc.nextStreamID // TODO: do clients send GOAWAY too? maybe? Just Close: cc.mu.Unlock() + if VerboseLogs { + cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2) + } cc.tconn.Close() } @@ -593,8 +658,6 @@ func commaSeparatedTrailers(req *http.Request) (string, error) { } if len(keys) > 0 { sort.Strings(keys) - // TODO: could do better allocation-wise here, but trailers are rare, - // so being lazy for now. return strings.Join(keys, ","), nil } return "", nil @@ -616,48 +679,37 @@ func (cc *ClientConn) responseHeaderTimeout() time.Duration { // Certain headers are special-cased as okay but not transmitted later. func checkConnHeaders(req *http.Request) error { if v := req.Header.Get("Upgrade"); v != "" { - return errors.New("http2: invalid Upgrade request header") + return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"]) } - if v := req.Header.Get("Transfer-Encoding"); (v != "" && v != "chunked") || len(req.Header["Transfer-Encoding"]) > 1 { - return errors.New("http2: invalid Transfer-Encoding request header") + if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { + return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) } - if v := req.Header.Get("Connection"); (v != "" && v != "close" && v != "keep-alive") || len(req.Header["Connection"]) > 1 { - return errors.New("http2: invalid Connection request header") + if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "close" && vv[0] != "keep-alive") { + return fmt.Errorf("http2: invalid Connection request header: %q", vv) } return nil } -func bodyAndLength(req *http.Request) (body io.Reader, contentLen int64) { - body = req.Body - if body == nil { - return nil, 0 +// actualContentLength returns a sanitized version of +// req.ContentLength, where 0 actually means zero (not unknown) and -1 +// means unknown. +func actualContentLength(req *http.Request) int64 { + if req.Body == nil || reqBodyIsNoBody(req.Body) { + return 0 } if req.ContentLength != 0 { - return req.Body, req.ContentLength - } - - // We have a body but a zero content length. Test to see if - // it's actually zero or just unset. - var buf [1]byte - n, rerr := io.ReadFull(body, buf[:]) - if rerr != nil && rerr != io.EOF { - return errorReader{rerr}, -1 + return req.ContentLength } - if n == 1 { - // Oh, guess there is data in this Body Reader after all. - // The ContentLength field just wasn't set. - // Stich the Body back together again, re-attaching our - // consumed byte. - return io.MultiReader(bytes.NewReader(buf[:]), body), -1 - } - // Body is actually zero bytes. - return nil, 0 + return -1 } func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { if err := checkConnHeaders(req); err != nil { return nil, err } + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } trailers, err := commaSeparatedTrailers(req) if err != nil { @@ -665,9 +717,6 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { } hasTrailers := trailers != "" - body, contentLen := bodyAndLength(req) - hasBody := body != nil - cc.mu.Lock() cc.lastActive = time.Now() if cc.closed || !cc.canTakeNewRequestLocked() { @@ -675,6 +724,10 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { return nil, errClientConnUnusable } + body := req.Body + contentLen := actualContentLength(req) + hasBody := contentLen != 0 + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? var requestedGzip bool if !cc.t.disableCompression() && @@ -747,30 +800,41 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { bodyWritten := false ctx := reqContext(req) + handleReadLoopResponse := func(re resAndError) (*http.Response, error) { + res := re.res + if re.err != nil || res.StatusCode > 299 { + // On error or status code 3xx, 4xx, 5xx, etc abort any + // ongoing write, assuming that the server doesn't care + // about our request body. If the server replied with 1xx or + // 2xx, however, then assume the server DOES potentially + // want our body (e.g. full-duplex streaming: + // golang.org/issue/13444). If it turns out the server + // doesn't, they'll RST_STREAM us soon enough. This is a + // heuristic to avoid adding knobs to Transport. Hopefully + // we can keep it. + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWrite) + } + if re.err != nil { + if re.err == errClientConnGotGoAway { + cc.mu.Lock() + if cs.startedWrite { + re.err = errClientConnGotGoAwayAfterSomeReqBody + } + cc.mu.Unlock() + } + cc.forgetStreamID(cs.ID) + return nil, re.err + } + res.Request = req + res.TLS = cc.tlsState + return res, nil + } + for { select { case re := <-readLoopResCh: - res := re.res - if re.err != nil || res.StatusCode > 299 { - // On error or status code 3xx, 4xx, 5xx, etc abort any - // ongoing write, assuming that the server doesn't care - // about our request body. If the server replied with 1xx or - // 2xx, however, then assume the server DOES potentially - // want our body (e.g. full-duplex streaming: - // golang.org/issue/13444). If it turns out the server - // doesn't, they'll RST_STREAM us soon enough. This is a - // heuristic to avoid adding knobs to Transport. Hopefully - // we can keep it. - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWrite) - } - if re.err != nil { - cc.forgetStreamID(cs.ID) - return nil, re.err - } - res.Request = req - res.TLS = cc.tlsState - return res, nil + return handleReadLoopResponse(re) case <-respHeaderTimer: cc.forgetStreamID(cs.ID) if !hasBody || bodyWritten { @@ -804,6 +868,12 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { // forgetStreamID. return nil, cs.resetErr case err := <-bodyWriter.resc: + // Prefer the read loop's response, if available. Issue 16102. + select { + case re := <-readLoopResCh: + return handleReadLoopResponse(re) + default: + } if err != nil { return nil, err } @@ -908,10 +978,11 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) ( err = cc.fr.WriteData(cs.ID, sentEnd, data) if err == nil { // TODO(bradfitz): this flush is for latency, not bandwidth. - // Most requests won't need this. Make this opt-in or opt-out? - // Use some heuristic on the body type? Nagel-like timers? - // Based on 'n'? Only last chunk of this for loop, unless flow control - // tokens are low? For now, always: + // Most requests won't need this. Make this opt-in or + // opt-out? Use some heuristic on the body type? Nagel-like + // timers? Based on 'n'? Only last chunk of this for loop, + // unless flow control tokens are low? For now, always. + // If we change this, see comment below. err = cc.bw.Flush() } cc.wmu.Unlock() @@ -921,28 +992,33 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) ( } } + if sentEnd { + // Already sent END_STREAM (which implies we have no + // trailers) and flushed, because currently all + // WriteData frames above get a flush. So we're done. + return nil + } + + var trls []byte + if hasTrailers { + cc.mu.Lock() + defer cc.mu.Unlock() + trls = cc.encodeTrailers(req) + } + cc.wmu.Lock() - if !sentEnd { - var trls []byte - if hasTrailers { - cc.mu.Lock() - trls = cc.encodeTrailers(req) - cc.mu.Unlock() - } + defer cc.wmu.Unlock() - // Avoid forgetting to send an END_STREAM if the encoded - // trailers are 0 bytes. Both results produce and END_STREAM. - if len(trls) > 0 { - err = cc.writeHeaders(cs.ID, true, trls) - } else { - err = cc.fr.WriteData(cs.ID, true, nil) - } + // Two ways to send END_STREAM: either with trailers, or + // with an empty DATA frame. + if len(trls) > 0 { + err = cc.writeHeaders(cs.ID, true, trls) + } else { + err = cc.fr.WriteData(cs.ID, true, nil) } if ferr := cc.bw.Flush(); ferr != nil && err == nil { err = ferr } - cc.wmu.Unlock() - return err } @@ -995,6 +1071,26 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail if host == "" { host = req.URL.Host } + host, err := httplex.PunycodeHostPort(host) + if err != nil { + return nil, err + } + + var path string + if req.Method != "CONNECT" { + path = req.URL.RequestURI() + if !validPseudoPath(path) { + orig := path + path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) + if !validPseudoPath(path) { + if req.URL.Opaque != "" { + return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) + } else { + return nil, fmt.Errorf("invalid request :path %q", orig) + } + } + } + } // Check for any invalid headers and return an error before we // potentially pollute our hpack state. (We want to be able to @@ -1018,8 +1114,8 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail cc.writeHeader(":authority", host) cc.writeHeader(":method", req.Method) if req.Method != "CONNECT" { - cc.writeHeader(":path", req.URL.RequestURI()) - cc.writeHeader(":scheme", "https") + cc.writeHeader(":path", path) + cc.writeHeader(":scheme", req.URL.Scheme) } if trailers != "" { cc.writeHeader("trailer", trailers) @@ -1146,6 +1242,9 @@ func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { if andRemove && cs != nil && !cc.closed { cc.lastActive = time.Now() delete(cc.streams, id) + if len(cc.streams) == 0 && cc.idleTimer != nil { + cc.idleTimer.Reset(cc.idleTimeout) + } close(cs.done) cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl } @@ -1188,27 +1287,37 @@ func (e GoAwayError) Error() string { e.LastStreamID, e.ErrCode, e.DebugData) } +func isEOFOrNetReadError(err error) bool { + if err == io.EOF { + return true + } + ne, ok := err.(*net.OpError) + return ok && ne.Op == "read" +} + func (rl *clientConnReadLoop) cleanup() { cc := rl.cc defer cc.tconn.Close() defer cc.t.connPool().MarkDead(cc) defer close(cc.readerDone) + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + // Close any response bodies if the server closes prematurely. // TODO: also do this if we've written the headers but not // gotten a response yet. err := cc.readerErr cc.mu.Lock() - if err == io.EOF { - if cc.goAway != nil { - err = GoAwayError{ - LastStreamID: cc.goAway.LastStreamID, - ErrCode: cc.goAway.ErrCode, - DebugData: cc.goAwayDebug, - } - } else { - err = io.ErrUnexpectedEOF + if cc.goAway != nil && isEOFOrNetReadError(err) { + err = GoAwayError{ + LastStreamID: cc.goAway.LastStreamID, + ErrCode: cc.goAway.ErrCode, + DebugData: cc.goAwayDebug, } + } else if err == io.EOF { + err = io.ErrUnexpectedEOF } for _, cs := range rl.activeRes { cs.bufPipe.CloseWithError(err) @@ -1228,15 +1337,20 @@ func (rl *clientConnReadLoop) cleanup() { func (rl *clientConnReadLoop) run() error { cc := rl.cc rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse - gotReply := false // ever saw a reply + gotReply := false // ever saw a HEADERS reply + gotSettings := false for { f, err := cc.fr.ReadFrame() if err != nil { - cc.vlogf("Transport readFrame error: (%T) %v", err, err) + cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) } if se, ok := err.(StreamError); ok { if cs := cc.streamByID(se.StreamID, true /*ended; remove it*/); cs != nil { - rl.endStreamError(cs, cc.fr.errDetail) + cs.cc.writeStreamReset(cs.ID, se.Code, err) + if se.Cause == nil { + se.Cause = cc.fr.errDetail + } + rl.endStreamError(cs, se) } continue } else if err != nil { @@ -1245,6 +1359,13 @@ func (rl *clientConnReadLoop) run() error { if VerboseLogs { cc.vlogf("http2: Transport received %s", summarizeFrame(f)) } + if !gotSettings { + if _, ok := f.(*SettingsFrame); !ok { + cc.logf("protocol error: received %T before a SETTINGS frame", f) + return ConnectionError(ErrCodeProtocol) + } + gotSettings = true + } maybeIdle := false // whether frame might transition us to idle switch f := f.(type) { @@ -1273,6 +1394,9 @@ func (rl *clientConnReadLoop) run() error { cc.logf("Transport: unhandled response frame type %T", f) } if err != nil { + if VerboseLogs { + cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) + } return err } if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 { @@ -1404,8 +1528,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra return res, nil } - buf := new(bytes.Buffer) // TODO(bradfitz): recycle this garbage - cs.bufPipe = pipe{b: buf} + cs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}} cs.bytesRemain = res.ContentLength res.Body = transportResponseBody{cs} go cs.awaitRequestCancel(cs.req) @@ -1522,10 +1645,28 @@ var errClosedResponseBody = errors.New("http2: response body closed") func (b transportResponseBody) Close() error { cs := b.cs - if cs.bufPipe.Err() != io.EOF { - // TODO: write test for this - cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + cc := cs.cc + + serverSentStreamEnd := cs.bufPipe.Err() == io.EOF + unread := cs.bufPipe.Len() + + if unread > 0 || !serverSentStreamEnd { + cc.mu.Lock() + cc.wmu.Lock() + if !serverSentStreamEnd { + cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel) + cs.didReset = true + } + // Return connection-level flow control. + if unread > 0 { + cc.inflow.add(int32(unread)) + cc.fr.WriteWindowUpdate(0, uint32(unread)) + } + cc.bw.Flush() + cc.wmu.Unlock() + cc.mu.Unlock() } + cs.bufPipe.BreakWithError(errClosedResponseBody) return nil } @@ -1533,6 +1674,7 @@ func (b transportResponseBody) Close() error { func (rl *clientConnReadLoop) processData(f *DataFrame) error { cc := rl.cc cs := cc.streamByID(f.StreamID, f.StreamEnded()) + data := f.Data() if cs == nil { cc.mu.Lock() neverSent := cc.nextStreamID @@ -1546,28 +1688,67 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { // TODO: be stricter here? only silently ignore things which // we canceled, but not things which were closed normally // by the peer? Tough without accumulating too much state. + + // But at least return their flow control: + if f.Length > 0 { + cc.mu.Lock() + cc.inflow.add(int32(f.Length)) + cc.mu.Unlock() + + cc.wmu.Lock() + cc.fr.WriteWindowUpdate(0, uint32(f.Length)) + cc.bw.Flush() + cc.wmu.Unlock() + } return nil } - if data := f.Data(); len(data) > 0 { - if cs.bufPipe.b == nil { - // Data frame after it's already closed? - cc.logf("http2: Transport received DATA frame for closed stream; closing connection") - return ConnectionError(ErrCodeProtocol) + if f.Length > 0 { + if cs.req.Method == "HEAD" && len(data) > 0 { + cc.logf("protocol error: received DATA on a HEAD request") + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + }) + return nil } - // Check connection-level flow control. cc.mu.Lock() - if cs.inflow.available() >= int32(len(data)) { - cs.inflow.take(int32(len(data))) + if cs.inflow.available() >= int32(f.Length) { + cs.inflow.take(int32(f.Length)) } else { cc.mu.Unlock() return ConnectionError(ErrCodeFlowControl) } + // Return any padded flow control now, since we won't + // refund it later on body reads. + var refund int + if pad := int(f.Length) - len(data); pad > 0 { + refund += pad + } + // Return len(data) now if the stream is already closed, + // since data will never be read. + didReset := cs.didReset + if didReset { + refund += len(data) + } + if refund > 0 { + cc.inflow.add(int32(refund)) + cc.wmu.Lock() + cc.fr.WriteWindowUpdate(0, uint32(refund)) + if !didReset { + cs.inflow.add(int32(refund)) + cc.fr.WriteWindowUpdate(cs.ID, uint32(refund)) + } + cc.bw.Flush() + cc.wmu.Unlock() + } cc.mu.Unlock() - if _, err := cs.bufPipe.Write(data); err != nil { - rl.endStreamError(cs, err) - return err + if len(data) > 0 && !didReset { + if _, err := cs.bufPipe.Write(data); err != nil { + rl.endStreamError(cs, err) + return err + } } } @@ -1593,9 +1774,14 @@ func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { } cs.bufPipe.closeWithErrorAndCode(err, code) delete(rl.activeRes, cs.ID) - if cs.req.Close || cs.req.Header.Get("Connection") == "close" { + if isConnectionCloseRequest(cs.req) { rl.closeWhenIdle = true } + + select { + case cs.resc <- resAndError{err: err}: + default: + } } func (cs *clientStream) copyTrailers() { @@ -1623,18 +1809,39 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { cc := rl.cc cc.mu.Lock() defer cc.mu.Unlock() - return f.ForeachSetting(func(s Setting) error { + + if f.IsAck() { + if cc.wantSettingsAck { + cc.wantSettingsAck = false + return nil + } + return ConnectionError(ErrCodeProtocol) + } + + err := f.ForeachSetting(func(s Setting) error { switch s.ID { case SettingMaxFrameSize: cc.maxFrameSize = s.Val case SettingMaxConcurrentStreams: cc.maxConcurrentStreams = s.Val case SettingInitialWindowSize: - // TODO: error if this is too large. + // Values above the maximum flow-control + // window size of 2^31-1 MUST be treated as a + // connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR. + if s.Val > math.MaxInt32 { + return ConnectionError(ErrCodeFlowControl) + } - // TODO: adjust flow control of still-open + // Adjust flow control of currently-open // frames by the difference of the old initial // window size and this one. + delta := int32(s.Val) - int32(cc.initialWindowSize) + for _, cs := range cc.streams { + cs.flow.add(delta) + } + cc.cond.Broadcast() + cc.initialWindowSize = s.Val default: // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. @@ -1642,6 +1849,16 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { } return nil }) + if err != nil { + return err + } + + cc.wmu.Lock() + defer cc.wmu.Unlock() + + cc.fr.WriteSettingsAck() + cc.bw.Flush() + return cc.werr } func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { @@ -1678,7 +1895,7 @@ func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { // which closes this, so there // isn't a race. default: - err := StreamError{cs.ID, f.ErrCode} + err := streamError(cs.ID, f.ErrCode) cs.resetErr = err close(cs.peerReset) cs.bufPipe.CloseWithError(err) @@ -1688,10 +1905,56 @@ func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { return nil } +// Ping sends a PING frame to the server and waits for the ack. +// Public implementation is in go17.go and not_go17.go +func (cc *ClientConn) ping(ctx contextContext) error { + c := make(chan struct{}) + // Generate a random payload + var p [8]byte + for { + if _, err := rand.Read(p[:]); err != nil { + return err + } + cc.mu.Lock() + // check for dup before insert + if _, found := cc.pings[p]; !found { + cc.pings[p] = c + cc.mu.Unlock() + break + } + cc.mu.Unlock() + } + cc.wmu.Lock() + if err := cc.fr.WritePing(false, p); err != nil { + cc.wmu.Unlock() + return err + } + if err := cc.bw.Flush(); err != nil { + cc.wmu.Unlock() + return err + } + cc.wmu.Unlock() + select { + case <-c: + return nil + case <-ctx.Done(): + return ctx.Err() + case <-cc.readerDone: + // connection closed + return cc.readerErr + } +} + func (rl *clientConnReadLoop) processPing(f *PingFrame) error { if f.IsAck() { - // 6.7 PING: " An endpoint MUST NOT respond to PING frames - // containing this flag." + cc := rl.cc + cc.mu.Lock() + defer cc.mu.Unlock() + // If ack, notify listener if any + if c, ok := cc.pings[f.Data]; ok { + close(c) + delete(cc.pings, f.Data) + } return nil } cc := rl.cc @@ -1715,8 +1978,10 @@ func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { } func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { - // TODO: do something with err? send it as a debug frame to the peer? - // But that's only in GOAWAY. Invent a new frame type? Is there one already? + // TODO: map err to more interesting error codes, once the + // HTTP community comes up with some. But currently for + // RST_STREAM there's no equivalent to GOAWAY frame's debug + // data, and the error codes are all pretty vague ("cancel"). cc.wmu.Lock() cc.fr.WriteRSTStream(streamID, code) cc.bw.Flush() @@ -1811,6 +2076,9 @@ func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s body resc := make(chan error, 1) s.resc = resc s.fn = func() { + cs.cc.mu.Lock() + cs.startedWrite = true + cs.cc.mu.Unlock() resc <- cs.writeRequestBody(body, cs.req.Body) } s.delay = t.expectContinueTimeout() @@ -1866,3 +2134,9 @@ func (s bodyWriterState) scheduleBodyWrite() { s.timer.Reset(s.delay) } } + +// isConnectionCloseRequest reports whether req should use its own +// connection for a single request and then close the connection. +func isConnectionCloseRequest(req *http.Request) bool { + return req.Close || httplex.HeaderValuesContainsToken(req.Header["Connection"], "close") +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/write.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/write.go index 27ef0dd4..6b0dfae3 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/write.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/write.go @@ -9,6 +9,7 @@ import ( "fmt" "log" "net/http" + "net/url" "time" "golang.org/x/net/http2/hpack" @@ -18,6 +19,11 @@ import ( // writeFramer is implemented by any type that is used to write frames. type writeFramer interface { writeFrame(writeContext) error + + // staysWithinBuffer reports whether this writer promises that + // it will only write less than or equal to size bytes, and it + // won't Flush the write context. + staysWithinBuffer(size int) bool } // writeContext is the interface needed by the various frame writer @@ -39,9 +45,10 @@ type writeContext interface { HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) } -// endsStream reports whether the given frame writer w will locally -// close the stream. -func endsStream(w writeFramer) bool { +// writeEndsStream reports whether w writes a frame that will transition +// the stream to a half-closed local state. This returns false for RST_STREAM, +// which closes the entire stream (not just the local half). +func writeEndsStream(w writeFramer) bool { switch v := w.(type) { case *writeData: return v.endStream @@ -51,7 +58,7 @@ func endsStream(w writeFramer) bool { // This can only happen if the caller reuses w after it's // been intentionally nil'ed out to prevent use. Keep this // here to catch future refactoring breaking it. - panic("endsStream called on nil writeFramer") + panic("writeEndsStream called on nil writeFramer") } return false } @@ -62,8 +69,16 @@ func (flushFrameWriter) writeFrame(ctx writeContext) error { return ctx.Flush() } +func (flushFrameWriter) staysWithinBuffer(max int) bool { return false } + type writeSettings []Setting +func (s writeSettings) staysWithinBuffer(max int) bool { + const settingSize = 6 // uint16 + uint32 + return frameHeaderLen+settingSize*len(s) <= max + +} + func (s writeSettings) writeFrame(ctx writeContext) error { return ctx.Framer().WriteSettings([]Setting(s)...) } @@ -83,6 +98,8 @@ func (p *writeGoAway) writeFrame(ctx writeContext) error { return err } +func (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes + type writeData struct { streamID uint32 p []byte @@ -97,6 +114,10 @@ func (w *writeData) writeFrame(ctx writeContext) error { return ctx.Framer().WriteData(w.streamID, w.endStream, w.p) } +func (w *writeData) staysWithinBuffer(max int) bool { + return frameHeaderLen+len(w.p) <= max +} + // handlerPanicRST is the message sent from handler goroutines when // the handler panics. type handlerPanicRST struct { @@ -107,22 +128,57 @@ func (hp handlerPanicRST) writeFrame(ctx writeContext) error { return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal) } +func (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + func (se StreamError) writeFrame(ctx writeContext) error { return ctx.Framer().WriteRSTStream(se.StreamID, se.Code) } +func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + type writePingAck struct{ pf *PingFrame } func (w writePingAck) writeFrame(ctx writeContext) error { return ctx.Framer().WritePing(true, w.pf.Data) } +func (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max } + type writeSettingsAck struct{} func (writeSettingsAck) writeFrame(ctx writeContext) error { return ctx.Framer().WriteSettingsAck() } +func (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max } + +// splitHeaderBlock splits headerBlock into fragments so that each fragment fits +// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true +// for the first/last fragment, respectively. +func splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error { + // For now we're lazy and just pick the minimum MAX_FRAME_SIZE + // that all peers must support (16KB). Later we could care + // more and send larger frames if the peer advertised it, but + // there's little point. Most headers are small anyway (so we + // generally won't have CONTINUATION frames), and extra frames + // only waste 9 bytes anyway. + const maxFrameSize = 16384 + + first := true + for len(headerBlock) > 0 { + frag := headerBlock + if len(frag) > maxFrameSize { + frag = frag[:maxFrameSize] + } + headerBlock = headerBlock[len(frag):] + if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil { + return err + } + first = false + } + return nil +} + // writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames // for HTTP response headers or trailers from a server handler. type writeResHeaders struct { @@ -144,6 +200,17 @@ func encKV(enc *hpack.Encoder, k, v string) { enc.WriteField(hpack.HeaderField{Name: k, Value: v}) } +func (w *writeResHeaders) staysWithinBuffer(max int) bool { + // TODO: this is a common one. It'd be nice to return true + // here and get into the fast path if we could be clever and + // calculate the size fast enough, or at least a conservative + // uppper bound that usually fires. (Maybe if w.h and + // w.trailers are nil, so we don't need to enumerate it.) + // Otherwise I'm afraid that just calculating the length to + // answer this question would be slower than the ~2µs benefit. + return false +} + func (w *writeResHeaders) writeFrame(ctx writeContext) error { enc, buf := ctx.HeaderEncoder() buf.Reset() @@ -169,39 +236,69 @@ func (w *writeResHeaders) writeFrame(ctx writeContext) error { panic("unexpected empty hpack") } - // For now we're lazy and just pick the minimum MAX_FRAME_SIZE - // that all peers must support (16KB). Later we could care - // more and send larger frames if the peer advertised it, but - // there's little point. Most headers are small anyway (so we - // generally won't have CONTINUATION frames), and extra frames - // only waste 9 bytes anyway. - const maxFrameSize = 16384 + return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) +} - first := true - for len(headerBlock) > 0 { - frag := headerBlock - if len(frag) > maxFrameSize { - frag = frag[:maxFrameSize] - } - headerBlock = headerBlock[len(frag):] - endHeaders := len(headerBlock) == 0 - var err error - if first { - first = false - err = ctx.Framer().WriteHeaders(HeadersFrameParam{ - StreamID: w.streamID, - BlockFragment: frag, - EndStream: w.endStream, - EndHeaders: endHeaders, - }) - } else { - err = ctx.Framer().WriteContinuation(w.streamID, endHeaders, frag) - } - if err != nil { - return err - } +func (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { + if firstFrag { + return ctx.Framer().WriteHeaders(HeadersFrameParam{ + StreamID: w.streamID, + BlockFragment: frag, + EndStream: w.endStream, + EndHeaders: lastFrag, + }) + } else { + return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) + } +} + +// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames. +type writePushPromise struct { + streamID uint32 // pusher stream + method string // for :method + url *url.URL // for :scheme, :authority, :path + h http.Header + + // Creates an ID for a pushed stream. This runs on serveG just before + // the frame is written. The returned ID is copied to promisedID. + allocatePromisedID func() (uint32, error) + promisedID uint32 +} + +func (w *writePushPromise) staysWithinBuffer(max int) bool { + // TODO: see writeResHeaders.staysWithinBuffer + return false +} + +func (w *writePushPromise) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + + encKV(enc, ":method", w.method) + encKV(enc, ":scheme", w.url.Scheme) + encKV(enc, ":authority", w.url.Host) + encKV(enc, ":path", w.url.RequestURI()) + encodeHeaders(enc, w.h, nil) + + headerBlock := buf.Bytes() + if len(headerBlock) == 0 { + panic("unexpected empty hpack") + } + + return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) +} + +func (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { + if firstFrag { + return ctx.Framer().WritePushPromise(PushPromiseParam{ + StreamID: w.streamID, + PromiseID: w.promisedID, + BlockFragment: frag, + EndHeaders: lastFrag, + }) + } else { + return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) } - return nil } type write100ContinueHeadersFrame struct { @@ -220,15 +317,24 @@ func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error { }) } +func (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool { + // Sloppy but conservative: + return 9+2*(len(":status")+len("100")) <= max +} + type writeWindowUpdate struct { streamID uint32 // or 0 for conn-level n uint32 } +func (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + func (wu writeWindowUpdate) writeFrame(ctx writeContext) error { return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n) } +// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k]) +// is encoded only only if k is in keys. func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { if keys == nil { sorter := sorterPool.Get().(*sorter) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/writesched.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/writesched.go index c24316ce..4fe30730 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/writesched.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/writesched.go @@ -6,14 +6,53 @@ package http2 import "fmt" -// frameWriteMsg is a request to write a frame. -type frameWriteMsg struct { +// WriteScheduler is the interface implemented by HTTP/2 write schedulers. +// Methods are never called concurrently. +type WriteScheduler interface { + // OpenStream opens a new stream in the write scheduler. + // It is illegal to call this with streamID=0 or with a streamID that is + // already open -- the call may panic. + OpenStream(streamID uint32, options OpenStreamOptions) + + // CloseStream closes a stream in the write scheduler. Any frames queued on + // this stream should be discarded. It is illegal to call this on a stream + // that is not open -- the call may panic. + CloseStream(streamID uint32) + + // AdjustStream adjusts the priority of the given stream. This may be called + // on a stream that has not yet been opened or has been closed. Note that + // RFC 7540 allows PRIORITY frames to be sent on streams in any state. See: + // https://tools.ietf.org/html/rfc7540#section-5.1 + AdjustStream(streamID uint32, priority PriorityParam) + + // Push queues a frame in the scheduler. In most cases, this will not be + // called with wr.StreamID()!=0 unless that stream is currently open. The one + // exception is RST_STREAM frames, which may be sent on idle or closed streams. + Push(wr FrameWriteRequest) + + // Pop dequeues the next frame to write. Returns false if no frames can + // be written. Frames with a given wr.StreamID() are Pop'd in the same + // order they are Push'd. + Pop() (wr FrameWriteRequest, ok bool) +} + +// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream. +type OpenStreamOptions struct { + // PusherID is zero if the stream was initiated by the client. Otherwise, + // PusherID names the stream that pushed the newly opened stream. + PusherID uint32 +} + +// FrameWriteRequest is a request to write a frame. +type FrameWriteRequest struct { // write is the interface value that does the writing, once the - // writeScheduler (below) has decided to select this frame - // to write. The write functions are all defined in write.go. + // WriteScheduler has selected this frame to write. The write + // functions are all defined in write.go. write writeFramer - stream *stream // used for prioritization. nil for non-stream frames. + // stream is the stream on which this frame will be written. + // nil for non-stream frames like PING and SETTINGS. + stream *stream // done, if non-nil, must be a buffered channel with space for // 1 message and is sent the return value from write (or an @@ -21,263 +60,183 @@ type frameWriteMsg struct { done chan error } -// for debugging only: -func (wm frameWriteMsg) String() string { - var streamID uint32 - if wm.stream != nil { - streamID = wm.stream.id - } - var des string - if s, ok := wm.write.(fmt.Stringer); ok { - des = s.String() - } else { - des = fmt.Sprintf("%T", wm.write) - } - return fmt.Sprintf("[frameWriteMsg stream=%d, ch=%v, type: %v]", streamID, wm.done != nil, des) -} - -// writeScheduler tracks pending frames to write, priorities, and decides -// the next one to use. It is not thread-safe. -type writeScheduler struct { - // zero are frames not associated with a specific stream. - // They're sent before any stream-specific freams. - zero writeQueue - - // maxFrameSize is the maximum size of a DATA frame - // we'll write. Must be non-zero and between 16K-16M. - maxFrameSize uint32 - - // sq contains the stream-specific queues, keyed by stream ID. - // when a stream is idle, it's deleted from the map. - sq map[uint32]*writeQueue - - // canSend is a slice of memory that's reused between frame - // scheduling decisions to hold the list of writeQueues (from sq) - // which have enough flow control data to send. After canSend is - // built, the best is selected. - canSend []*writeQueue - - // pool of empty queues for reuse. - queuePool []*writeQueue -} - -func (ws *writeScheduler) putEmptyQueue(q *writeQueue) { - if len(q.s) != 0 { - panic("queue must be empty") - } - ws.queuePool = append(ws.queuePool, q) -} - -func (ws *writeScheduler) getEmptyQueue() *writeQueue { - ln := len(ws.queuePool) - if ln == 0 { - return new(writeQueue) - } - q := ws.queuePool[ln-1] - ws.queuePool = ws.queuePool[:ln-1] - return q -} - -func (ws *writeScheduler) empty() bool { return ws.zero.empty() && len(ws.sq) == 0 } - -func (ws *writeScheduler) add(wm frameWriteMsg) { - st := wm.stream - if st == nil { - ws.zero.push(wm) - } else { - ws.streamQueue(st.id).push(wm) - } -} - -func (ws *writeScheduler) streamQueue(streamID uint32) *writeQueue { - if q, ok := ws.sq[streamID]; ok { - return q - } - if ws.sq == nil { - ws.sq = make(map[uint32]*writeQueue) - } - q := ws.getEmptyQueue() - ws.sq[streamID] = q - return q -} - -// take returns the most important frame to write and removes it from the scheduler. -// It is illegal to call this if the scheduler is empty or if there are no connection-level -// flow control bytes available. -func (ws *writeScheduler) take() (wm frameWriteMsg, ok bool) { - if ws.maxFrameSize == 0 { - panic("internal error: ws.maxFrameSize not initialized or invalid") - } - - // If there any frames not associated with streams, prefer those first. - // These are usually SETTINGS, etc. - if !ws.zero.empty() { - return ws.zero.shift(), true - } - if len(ws.sq) == 0 { - return - } - - // Next, prioritize frames on streams that aren't DATA frames (no cost). - for id, q := range ws.sq { - if q.firstIsNoCost() { - return ws.takeFrom(id, q) +// StreamID returns the id of the stream this frame will be written to. +// 0 is used for non-stream frames such as PING and SETTINGS. +func (wr FrameWriteRequest) StreamID() uint32 { + if wr.stream == nil { + if se, ok := wr.write.(StreamError); ok { + // (*serverConn).resetStream doesn't set + // stream because it doesn't necessarily have + // one. So special case this type of write + // message. + return se.StreamID } - } - - // Now, all that remains are DATA frames with non-zero bytes to - // send. So pick the best one. - if len(ws.canSend) != 0 { - panic("should be empty") - } - for _, q := range ws.sq { - if n := ws.streamWritableBytes(q); n > 0 { - ws.canSend = append(ws.canSend, q) - } - } - if len(ws.canSend) == 0 { - return - } - defer ws.zeroCanSend() - - // TODO: find the best queue - q := ws.canSend[0] - - return ws.takeFrom(q.streamID(), q) -} - -// zeroCanSend is defered from take. -func (ws *writeScheduler) zeroCanSend() { - for i := range ws.canSend { - ws.canSend[i] = nil - } - ws.canSend = ws.canSend[:0] -} - -// streamWritableBytes returns the number of DATA bytes we could write -// from the given queue's stream, if this stream/queue were -// selected. It is an error to call this if q's head isn't a -// *writeData. -func (ws *writeScheduler) streamWritableBytes(q *writeQueue) int32 { - wm := q.head() - ret := wm.stream.flow.available() // max we can write - if ret == 0 { return 0 } - if int32(ws.maxFrameSize) < ret { - ret = int32(ws.maxFrameSize) - } - if ret == 0 { - panic("internal error: ws.maxFrameSize not initialized or invalid") - } - wd := wm.write.(*writeData) - if len(wd.p) < int(ret) { - ret = int32(len(wd.p)) - } - return ret -} - -func (ws *writeScheduler) takeFrom(id uint32, q *writeQueue) (wm frameWriteMsg, ok bool) { - wm = q.head() - // If the first item in this queue costs flow control tokens - // and we don't have enough, write as much as we can. - if wd, ok := wm.write.(*writeData); ok && len(wd.p) > 0 { - allowed := wm.stream.flow.available() // max we can write - if allowed == 0 { - // No quota available. Caller can try the next stream. - return frameWriteMsg{}, false + return wr.stream.id +} + +// DataSize returns the number of flow control bytes that must be consumed +// to write this entire frame. This is 0 for non-DATA frames. +func (wr FrameWriteRequest) DataSize() int { + if wd, ok := wr.write.(*writeData); ok { + return len(wd.p) + } + return 0 +} + +// Consume consumes min(n, available) bytes from this frame, where available +// is the number of flow control bytes available on the stream. Consume returns +// 0, 1, or 2 frames, where the integer return value gives the number of frames +// returned. +// +// If flow control prevents consuming any bytes, this returns (_, _, 0). If +// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this +// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and +// 'rest' contains the remaining bytes. The consumed bytes are deducted from the +// underlying stream's flow control budget. +func (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteRequest, int) { + var empty FrameWriteRequest + + // Non-DATA frames are always consumed whole. + wd, ok := wr.write.(*writeData) + if !ok || len(wd.p) == 0 { + return wr, empty, 1 + } + + // Might need to split after applying limits. + allowed := wr.stream.flow.available() + if n < allowed { + allowed = n + } + if wr.stream.sc.maxFrameSize < allowed { + allowed = wr.stream.sc.maxFrameSize + } + if allowed <= 0 { + return empty, empty, 0 + } + if len(wd.p) > int(allowed) { + wr.stream.flow.take(allowed) + consumed := FrameWriteRequest{ + stream: wr.stream, + write: &writeData{ + streamID: wd.streamID, + p: wd.p[:allowed], + // Even if the original had endStream set, there + // are bytes remaining because len(wd.p) > allowed, + // so we know endStream is false. + endStream: false, + }, + // Our caller is blocking on the final DATA frame, not + // this intermediate frame, so no need to wait. + done: nil, } - if int32(ws.maxFrameSize) < allowed { - allowed = int32(ws.maxFrameSize) - } - // TODO: further restrict the allowed size, because even if - // the peer says it's okay to write 16MB data frames, we might - // want to write smaller ones to properly weight competing - // streams' priorities. - - if len(wd.p) > int(allowed) { - wm.stream.flow.take(allowed) - chunk := wd.p[:allowed] - wd.p = wd.p[allowed:] - // Make up a new write message of a valid size, rather - // than shifting one off the queue. - return frameWriteMsg{ - stream: wm.stream, - write: &writeData{ - streamID: wd.streamID, - p: chunk, - // even if the original had endStream set, there - // arebytes remaining because len(wd.p) > allowed, - // so we know endStream is false: - endStream: false, - }, - // our caller is blocking on the final DATA frame, not - // these intermediates, so no need to wait: - done: nil, - }, true + rest := FrameWriteRequest{ + stream: wr.stream, + write: &writeData{ + streamID: wd.streamID, + p: wd.p[allowed:], + endStream: wd.endStream, + }, + done: wr.done, } - wm.stream.flow.take(int32(len(wd.p))) + return consumed, rest, 2 } - q.shift() - if q.empty() { - ws.putEmptyQueue(q) - delete(ws.sq, id) + // The frame is consumed whole. + // NB: This cast cannot overflow because allowed is <= math.MaxInt32. + wr.stream.flow.take(int32(len(wd.p))) + return wr, empty, 1 +} + +// String is for debugging only. +func (wr FrameWriteRequest) String() string { + var des string + if s, ok := wr.write.(fmt.Stringer); ok { + des = s.String() + } else { + des = fmt.Sprintf("%T", wr.write) } - return wm, true + return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des) } -func (ws *writeScheduler) forgetStream(id uint32) { - q, ok := ws.sq[id] - if !ok { +// replyToWriter sends err to wr.done and panics if the send must block +// This does nothing if wr.done is nil. +func (wr *FrameWriteRequest) replyToWriter(err error) { + if wr.done == nil { return } - delete(ws.sq, id) - - // But keep it for others later. - for i := range q.s { - q.s[i] = frameWriteMsg{} + select { + case wr.done <- err: + default: + panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write)) } - q.s = q.s[:0] - ws.putEmptyQueue(q) + wr.write = nil // prevent use (assume it's tainted after wr.done send) } +// writeQueue is used by implementations of WriteScheduler. type writeQueue struct { - s []frameWriteMsg + s []FrameWriteRequest } -// streamID returns the stream ID for a non-empty stream-specific queue. -func (q *writeQueue) streamID() uint32 { return q.s[0].stream.id } - func (q *writeQueue) empty() bool { return len(q.s) == 0 } -func (q *writeQueue) push(wm frameWriteMsg) { - q.s = append(q.s, wm) +func (q *writeQueue) push(wr FrameWriteRequest) { + q.s = append(q.s, wr) } -// head returns the next item that would be removed by shift. -func (q *writeQueue) head() frameWriteMsg { +func (q *writeQueue) shift() FrameWriteRequest { if len(q.s) == 0 { panic("invalid use of queue") } - return q.s[0] + wr := q.s[0] + // TODO: less copy-happy queue. + copy(q.s, q.s[1:]) + q.s[len(q.s)-1] = FrameWriteRequest{} + q.s = q.s[:len(q.s)-1] + return wr } -func (q *writeQueue) shift() frameWriteMsg { +// consume consumes up to n bytes from q.s[0]. If the frame is +// entirely consumed, it is removed from the queue. If the frame +// is partially consumed, the frame is kept with the consumed +// bytes removed. Returns true iff any bytes were consumed. +func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) { if len(q.s) == 0 { - panic("invalid use of queue") + return FrameWriteRequest{}, false } - wm := q.s[0] - // TODO: less copy-happy queue. - copy(q.s, q.s[1:]) - q.s[len(q.s)-1] = frameWriteMsg{} - q.s = q.s[:len(q.s)-1] - return wm + consumed, rest, numresult := q.s[0].Consume(n) + switch numresult { + case 0: + return FrameWriteRequest{}, false + case 1: + q.shift() + case 2: + q.s[0] = rest + } + return consumed, true +} + +type writeQueuePool []*writeQueue + +// put inserts an unused writeQueue into the pool. +func (p *writeQueuePool) put(q *writeQueue) { + for i := range q.s { + q.s[i] = FrameWriteRequest{} + } + q.s = q.s[:0] + *p = append(*p, q) } -func (q *writeQueue) firstIsNoCost() bool { - if df, ok := q.s[0].write.(*writeData); ok { - return len(df.p) == 0 +// get returns an empty writeQueue. +func (p *writeQueuePool) get() *writeQueue { + ln := len(*p) + if ln == 0 { + return new(writeQueue) } - return true + x := ln - 1 + q := (*p)[x] + (*p)[x] = nil + *p = (*p)[:x] + return q } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/writesched_priority.go new file mode 100644 index 00000000..848fed6e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/writesched_priority.go @@ -0,0 +1,452 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "fmt" + "math" + "sort" +) + +// RFC 7540, Section 5.3.5: the default weight is 16. +const priorityDefaultWeight = 15 // 16 = 15 + 1 + +// PriorityWriteSchedulerConfig configures a priorityWriteScheduler. +type PriorityWriteSchedulerConfig struct { + // MaxClosedNodesInTree controls the maximum number of closed streams to + // retain in the priority tree. Setting this to zero saves a small amount + // of memory at the cost of performance. + // + // See RFC 7540, Section 5.3.4: + // "It is possible for a stream to become closed while prioritization + // information ... is in transit. ... This potentially creates suboptimal + // prioritization, since the stream could be given a priority that is + // different from what is intended. To avoid these problems, an endpoint + // SHOULD retain stream prioritization state for a period after streams + // become closed. The longer state is retained, the lower the chance that + // streams are assigned incorrect or default priority values." + MaxClosedNodesInTree int + + // MaxIdleNodesInTree controls the maximum number of idle streams to + // retain in the priority tree. Setting this to zero saves a small amount + // of memory at the cost of performance. + // + // See RFC 7540, Section 5.3.4: + // Similarly, streams that are in the "idle" state can be assigned + // priority or become a parent of other streams. This allows for the + // creation of a grouping node in the dependency tree, which enables + // more flexible expressions of priority. Idle streams begin with a + // default priority (Section 5.3.5). + MaxIdleNodesInTree int + + // ThrottleOutOfOrderWrites enables write throttling to help ensure that + // data is delivered in priority order. This works around a race where + // stream B depends on stream A and both streams are about to call Write + // to queue DATA frames. If B wins the race, a naive scheduler would eagerly + // write as much data from B as possible, but this is suboptimal because A + // is a higher-priority stream. With throttling enabled, we write a small + // amount of data from B to minimize the amount of bandwidth that B can + // steal from A. + ThrottleOutOfOrderWrites bool +} + +// NewPriorityWriteScheduler constructs a WriteScheduler that schedules +// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3. +// If cfg is nil, default options are used. +func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler { + if cfg == nil { + // For justification of these defaults, see: + // https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY + cfg = &PriorityWriteSchedulerConfig{ + MaxClosedNodesInTree: 10, + MaxIdleNodesInTree: 10, + ThrottleOutOfOrderWrites: false, + } + } + + ws := &priorityWriteScheduler{ + nodes: make(map[uint32]*priorityNode), + maxClosedNodesInTree: cfg.MaxClosedNodesInTree, + maxIdleNodesInTree: cfg.MaxIdleNodesInTree, + enableWriteThrottle: cfg.ThrottleOutOfOrderWrites, + } + ws.nodes[0] = &ws.root + if cfg.ThrottleOutOfOrderWrites { + ws.writeThrottleLimit = 1024 + } else { + ws.writeThrottleLimit = math.MaxInt32 + } + return ws +} + +type priorityNodeState int + +const ( + priorityNodeOpen priorityNodeState = iota + priorityNodeClosed + priorityNodeIdle +) + +// priorityNode is a node in an HTTP/2 priority tree. +// Each node is associated with a single stream ID. +// See RFC 7540, Section 5.3. +type priorityNode struct { + q writeQueue // queue of pending frames to write + id uint32 // id of the stream, or 0 for the root of the tree + weight uint8 // the actual weight is weight+1, so the value is in [1,256] + state priorityNodeState // open | closed | idle + bytes int64 // number of bytes written by this node, or 0 if closed + subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree + + // These links form the priority tree. + parent *priorityNode + kids *priorityNode // start of the kids list + prev, next *priorityNode // doubly-linked list of siblings +} + +func (n *priorityNode) setParent(parent *priorityNode) { + if n == parent { + panic("setParent to self") + } + if n.parent == parent { + return + } + // Unlink from current parent. + if parent := n.parent; parent != nil { + if n.prev == nil { + parent.kids = n.next + } else { + n.prev.next = n.next + } + if n.next != nil { + n.next.prev = n.prev + } + } + // Link to new parent. + // If parent=nil, remove n from the tree. + // Always insert at the head of parent.kids (this is assumed by walkReadyInOrder). + n.parent = parent + if parent == nil { + n.next = nil + n.prev = nil + } else { + n.next = parent.kids + n.prev = nil + if n.next != nil { + n.next.prev = n + } + parent.kids = n + } +} + +func (n *priorityNode) addBytes(b int64) { + n.bytes += b + for ; n != nil; n = n.parent { + n.subtreeBytes += b + } +} + +// walkReadyInOrder iterates over the tree in priority order, calling f for each node +// with a non-empty write queue. When f returns true, this funcion returns true and the +// walk halts. tmp is used as scratch space for sorting. +// +// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true +// if any ancestor p of n is still open (ignoring the root node). +func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool { + if !n.q.empty() && f(n, openParent) { + return true + } + if n.kids == nil { + return false + } + + // Don't consider the root "open" when updating openParent since + // we can't send data frames on the root stream (only control frames). + if n.id != 0 { + openParent = openParent || (n.state == priorityNodeOpen) + } + + // Common case: only one kid or all kids have the same weight. + // Some clients don't use weights; other clients (like web browsers) + // use mostly-linear priority trees. + w := n.kids.weight + needSort := false + for k := n.kids.next; k != nil; k = k.next { + if k.weight != w { + needSort = true + break + } + } + if !needSort { + for k := n.kids; k != nil; k = k.next { + if k.walkReadyInOrder(openParent, tmp, f) { + return true + } + } + return false + } + + // Uncommon case: sort the child nodes. We remove the kids from the parent, + // then re-insert after sorting so we can reuse tmp for future sort calls. + *tmp = (*tmp)[:0] + for n.kids != nil { + *tmp = append(*tmp, n.kids) + n.kids.setParent(nil) + } + sort.Sort(sortPriorityNodeSiblings(*tmp)) + for i := len(*tmp) - 1; i >= 0; i-- { + (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids + } + for k := n.kids; k != nil; k = k.next { + if k.walkReadyInOrder(openParent, tmp, f) { + return true + } + } + return false +} + +type sortPriorityNodeSiblings []*priorityNode + +func (z sortPriorityNodeSiblings) Len() int { return len(z) } +func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] } +func (z sortPriorityNodeSiblings) Less(i, k int) bool { + // Prefer the subtree that has sent fewer bytes relative to its weight. + // See sections 5.3.2 and 5.3.4. + wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes) + wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes) + if bi == 0 && bk == 0 { + return wi >= wk + } + if bk == 0 { + return false + } + return bi/bk <= wi/wk +} + +type priorityWriteScheduler struct { + // root is the root of the priority tree, where root.id = 0. + // The root queues control frames that are not associated with any stream. + root priorityNode + + // nodes maps stream ids to priority tree nodes. + nodes map[uint32]*priorityNode + + // maxID is the maximum stream id in nodes. + maxID uint32 + + // lists of nodes that have been closed or are idle, but are kept in + // the tree for improved prioritization. When the lengths exceed either + // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded. + closedNodes, idleNodes []*priorityNode + + // From the config. + maxClosedNodesInTree int + maxIdleNodesInTree int + writeThrottleLimit int32 + enableWriteThrottle bool + + // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations. + tmp []*priorityNode + + // pool of empty queues for reuse. + queuePool writeQueuePool +} + +func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { + // The stream may be currently idle but cannot be opened or closed. + if curr := ws.nodes[streamID]; curr != nil { + if curr.state != priorityNodeIdle { + panic(fmt.Sprintf("stream %d already opened", streamID)) + } + curr.state = priorityNodeOpen + return + } + + // RFC 7540, Section 5.3.5: + // "All streams are initially assigned a non-exclusive dependency on stream 0x0. + // Pushed streams initially depend on their associated stream. In both cases, + // streams are assigned a default weight of 16." + parent := ws.nodes[options.PusherID] + if parent == nil { + parent = &ws.root + } + n := &priorityNode{ + q: *ws.queuePool.get(), + id: streamID, + weight: priorityDefaultWeight, + state: priorityNodeOpen, + } + n.setParent(parent) + ws.nodes[streamID] = n + if streamID > ws.maxID { + ws.maxID = streamID + } +} + +func (ws *priorityWriteScheduler) CloseStream(streamID uint32) { + if streamID == 0 { + panic("violation of WriteScheduler interface: cannot close stream 0") + } + if ws.nodes[streamID] == nil { + panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID)) + } + if ws.nodes[streamID].state != priorityNodeOpen { + panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID)) + } + + n := ws.nodes[streamID] + n.state = priorityNodeClosed + n.addBytes(-n.bytes) + + q := n.q + ws.queuePool.put(&q) + n.q.s = nil + if ws.maxClosedNodesInTree > 0 { + ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n) + } else { + ws.removeNode(n) + } +} + +func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { + if streamID == 0 { + panic("adjustPriority on root") + } + + // If streamID does not exist, there are two cases: + // - A closed stream that has been removed (this will have ID <= maxID) + // - An idle stream that is being used for "grouping" (this will have ID > maxID) + n := ws.nodes[streamID] + if n == nil { + if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 { + return + } + ws.maxID = streamID + n = &priorityNode{ + q: *ws.queuePool.get(), + id: streamID, + weight: priorityDefaultWeight, + state: priorityNodeIdle, + } + n.setParent(&ws.root) + ws.nodes[streamID] = n + ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n) + } + + // Section 5.3.1: A dependency on a stream that is not currently in the tree + // results in that stream being given a default priority (Section 5.3.5). + parent := ws.nodes[priority.StreamDep] + if parent == nil { + n.setParent(&ws.root) + n.weight = priorityDefaultWeight + return + } + + // Ignore if the client tries to make a node its own parent. + if n == parent { + return + } + + // Section 5.3.3: + // "If a stream is made dependent on one of its own dependencies, the + // formerly dependent stream is first moved to be dependent on the + // reprioritized stream's previous parent. The moved dependency retains + // its weight." + // + // That is: if parent depends on n, move parent to depend on n.parent. + for x := parent.parent; x != nil; x = x.parent { + if x == n { + parent.setParent(n.parent) + break + } + } + + // Section 5.3.3: The exclusive flag causes the stream to become the sole + // dependency of its parent stream, causing other dependencies to become + // dependent on the exclusive stream. + if priority.Exclusive { + k := parent.kids + for k != nil { + next := k.next + if k != n { + k.setParent(n) + } + k = next + } + } + + n.setParent(parent) + n.weight = priority.Weight +} + +func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { + var n *priorityNode + if id := wr.StreamID(); id == 0 { + n = &ws.root + } else { + n = ws.nodes[id] + if n == nil { + // id is an idle or closed stream. wr should not be a HEADERS or + // DATA frame. However, wr can be a RST_STREAM. In this case, we + // push wr onto the root, rather than creating a new priorityNode, + // since RST_STREAM is tiny and the stream's priority is unknown + // anyway. See issue #17919. + if wr.DataSize() > 0 { + panic("add DATA on non-open stream") + } + n = &ws.root + } + } + n.q.push(wr) +} + +func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) { + ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool { + limit := int32(math.MaxInt32) + if openParent { + limit = ws.writeThrottleLimit + } + wr, ok = n.q.consume(limit) + if !ok { + return false + } + n.addBytes(int64(wr.DataSize())) + // If B depends on A and B continuously has data available but A + // does not, gradually increase the throttling limit to allow B to + // steal more and more bandwidth from A. + if openParent { + ws.writeThrottleLimit += 1024 + if ws.writeThrottleLimit < 0 { + ws.writeThrottleLimit = math.MaxInt32 + } + } else if ws.enableWriteThrottle { + ws.writeThrottleLimit = 1024 + } + return true + }) + return wr, ok +} + +func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) { + if maxSize == 0 { + return + } + if len(*list) == maxSize { + // Remove the oldest node, then shift left. + ws.removeNode((*list)[0]) + x := (*list)[1:] + copy(*list, x) + *list = (*list)[:len(x)] + } + *list = append(*list, n) +} + +func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { + for k := n.kids; k != nil; k = k.next { + k.setParent(n.parent) + } + n.setParent(nil) + delete(ws.nodes, n.id) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/writesched_random.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/writesched_random.go new file mode 100644 index 00000000..36d7919f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/http2/writesched_random.go @@ -0,0 +1,72 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import "math" + +// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2 +// priorities. Control frames like SETTINGS and PING are written before DATA +// frames, but if no control frames are queued and multiple streams have queued +// HEADERS or DATA frames, Pop selects a ready stream arbitrarily. +func NewRandomWriteScheduler() WriteScheduler { + return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)} +} + +type randomWriteScheduler struct { + // zero are frames not associated with a specific stream. + zero writeQueue + + // sq contains the stream-specific queues, keyed by stream ID. + // When a stream is idle or closed, it's deleted from the map. + sq map[uint32]*writeQueue + + // pool of empty queues for reuse. + queuePool writeQueuePool +} + +func (ws *randomWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { + // no-op: idle streams are not tracked +} + +func (ws *randomWriteScheduler) CloseStream(streamID uint32) { + q, ok := ws.sq[streamID] + if !ok { + return + } + delete(ws.sq, streamID) + ws.queuePool.put(q) +} + +func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { + // no-op: priorities are ignored +} + +func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) { + id := wr.StreamID() + if id == 0 { + ws.zero.push(wr) + return + } + q, ok := ws.sq[id] + if !ok { + q = ws.queuePool.get() + ws.sq[id] = q + } + q.push(wr) +} + +func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) { + // Control frames first. + if !ws.zero.empty() { + return ws.zero.shift(), true + } + // Iterate over all non-idle streams until finding one that can be consumed. + for _, q := range ws.sq { + if wr, ok := q.consume(math.MaxInt32); ok { + return wr, true + } + } + return FrameWriteRequest{}, false +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/icmp/echo.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/icmp/echo.go index dd551811..e6f15efd 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/icmp/echo.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/icmp/echo.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/icmp/helper.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/icmp/helper.go deleted file mode 100644 index 6c4e633b..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/icmp/helper.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package icmp - -import ( - "encoding/binary" - "unsafe" -) - -var ( - // See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html. - freebsdVersion uint32 - - nativeEndian binary.ByteOrder -) - -func init() { - i := uint32(1) - b := (*[4]byte)(unsafe.Pointer(&i)) - if b[0] == 1 { - nativeEndian = binary.LittleEndian - } else { - nativeEndian = binary.BigEndian - } -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/icmp/ipv4.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/icmp/ipv4.go index 729ddc97..ffc66ed4 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/icmp/ipv4.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/icmp/ipv4.go @@ -9,9 +9,14 @@ import ( "net" "runtime" + "golang.org/x/net/internal/socket" "golang.org/x/net/ipv4" ) +// freebsdVersion is set in sys_freebsd.go. +// See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html. +var freebsdVersion uint32 + // ParseIPv4Header parses b as an IPv4 header of ICMP error message // invoking packet, which is contained in ICMP error message. func ParseIPv4Header(b []byte) (*ipv4.Header, error) { @@ -36,12 +41,12 @@ func ParseIPv4Header(b []byte) (*ipv4.Header, error) { } switch runtime.GOOS { case "darwin": - h.TotalLen = int(nativeEndian.Uint16(b[2:4])) + h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) case "freebsd": if freebsdVersion >= 1000000 { h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) } else { - h.TotalLen = int(nativeEndian.Uint16(b[2:4])) + h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) } default: h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/icmp/ipv6.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/icmp/ipv6.go index 58eaa77d..2e8cfeb1 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/icmp/ipv6.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/icmp/ipv6.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/icmp/listen_posix.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/icmp/listen_posix.go index b9f26079..7fac4f96 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/icmp/listen_posix.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/icmp/listen_posix.go @@ -65,22 +65,24 @@ func ListenPacket(network, address string) (*PacketConn, error) { if err != nil { return nil, os.NewSyscallError("socket", err) } - defer syscall.Close(s) if runtime.GOOS == "darwin" && family == syscall.AF_INET { if err := syscall.SetsockoptInt(s, iana.ProtocolIP, sysIP_STRIPHDR, 1); err != nil { + syscall.Close(s) return nil, os.NewSyscallError("setsockopt", err) } } sa, err := sockaddr(family, address) if err != nil { + syscall.Close(s) return nil, err } if err := syscall.Bind(s, sa); err != nil { + syscall.Close(s) return nil, os.NewSyscallError("bind", err) } f := os.NewFile(uintptr(s), "datagram-oriented icmp") - defer f.Close() c, cerr = net.FilePacketConn(f) + f.Close() default: c, cerr = net.ListenPacket(network, address) } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/icmp/message.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/icmp/message.go index 42d6df2c..81140b0d 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/icmp/message.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/icmp/message.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -24,6 +24,8 @@ import ( "golang.org/x/net/ipv6" ) +// BUG(mikio): This package is not implemented on NaCl and Plan 9. + var ( errMessageTooShort = errors.New("message too short") errHeaderTooShort = errors.New("header too short") diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/icmp/messagebody.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/icmp/messagebody.go index 2121a17b..2463730a 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/icmp/messagebody.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/icmp/messagebody.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/idna/idna.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/idna/idna.go new file mode 100644 index 00000000..346fe442 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/idna/idna.go @@ -0,0 +1,732 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package idna implements IDNA2008 using the compatibility processing +// defined by UTS (Unicode Technical Standard) #46, which defines a standard to +// deal with the transition from IDNA2003. +// +// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC +// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894. +// UTS #46 is defined in http://www.unicode.org/reports/tr46. +// See http://unicode.org/cldr/utility/idna.jsp for a visualization of the +// differences between these two standards. +package idna // import "golang.org/x/net/idna" + +import ( + "fmt" + "strings" + "unicode/utf8" + + "golang.org/x/text/secure/bidirule" + "golang.org/x/text/unicode/bidi" + "golang.org/x/text/unicode/norm" +) + +// NOTE: Unlike common practice in Go APIs, the functions will return a +// sanitized domain name in case of errors. Browsers sometimes use a partially +// evaluated string as lookup. +// TODO: the current error handling is, in my opinion, the least opinionated. +// Other strategies are also viable, though: +// Option 1) Return an empty string in case of error, but allow the user to +// specify explicitly which errors to ignore. +// Option 2) Return the partially evaluated string if it is itself a valid +// string, otherwise return the empty string in case of error. +// Option 3) Option 1 and 2. +// Option 4) Always return an empty string for now and implement Option 1 as +// needed, and document that the return string may not be empty in case of +// error in the future. +// I think Option 1 is best, but it is quite opinionated. + +// ToASCII is a wrapper for Punycode.ToASCII. +func ToASCII(s string) (string, error) { + return Punycode.process(s, true) +} + +// ToUnicode is a wrapper for Punycode.ToUnicode. +func ToUnicode(s string) (string, error) { + return Punycode.process(s, false) +} + +// An Option configures a Profile at creation time. +type Option func(*options) + +// Transitional sets a Profile to use the Transitional mapping as defined in UTS +// #46. This will cause, for example, "ß" to be mapped to "ss". Using the +// transitional mapping provides a compromise between IDNA2003 and IDNA2008 +// compatibility. It is used by most browsers when resolving domain names. This +// option is only meaningful if combined with MapForLookup. +func Transitional(transitional bool) Option { + return func(o *options) { o.transitional = true } +} + +// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts +// are longer than allowed by the RFC. +func VerifyDNSLength(verify bool) Option { + return func(o *options) { o.verifyDNSLength = verify } +} + +// RemoveLeadingDots removes leading label separators. Leading runes that map to +// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well. +// +// This is the behavior suggested by the UTS #46 and is adopted by some +// browsers. +func RemoveLeadingDots(remove bool) Option { + return func(o *options) { o.removeLeadingDots = remove } +} + +// ValidateLabels sets whether to check the mandatory label validation criteria +// as defined in Section 5.4 of RFC 5891. This includes testing for correct use +// of hyphens ('-'), normalization, validity of runes, and the context rules. +func ValidateLabels(enable bool) Option { + return func(o *options) { + // Don't override existing mappings, but set one that at least checks + // normalization if it is not set. + if o.mapping == nil && enable { + o.mapping = normalize + } + o.trie = trie + o.validateLabels = enable + o.fromPuny = validateFromPunycode + } +} + +// StrictDomainName limits the set of permissible ASCII characters to those +// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the +// hyphen). This is set by default for MapForLookup and ValidateForRegistration. +// +// This option is useful, for instance, for browsers that allow characters +// outside this range, for example a '_' (U+005F LOW LINE). See +// http://www.rfc-editor.org/std/std3.txt for more details This option +// corresponds to the UseSTD3ASCIIRules option in UTS #46. +func StrictDomainName(use bool) Option { + return func(o *options) { + o.trie = trie + o.useSTD3Rules = use + o.fromPuny = validateFromPunycode + } +} + +// NOTE: the following options pull in tables. The tables should not be linked +// in as long as the options are not used. + +// BidiRule enables the Bidi rule as defined in RFC 5893. Any application +// that relies on proper validation of labels should include this rule. +func BidiRule() Option { + return func(o *options) { o.bidirule = bidirule.ValidString } +} + +// ValidateForRegistration sets validation options to verify that a given IDN is +// properly formatted for registration as defined by Section 4 of RFC 5891. +func ValidateForRegistration() Option { + return func(o *options) { + o.mapping = validateRegistration + StrictDomainName(true)(o) + ValidateLabels(true)(o) + VerifyDNSLength(true)(o) + BidiRule()(o) + } +} + +// MapForLookup sets validation and mapping options such that a given IDN is +// transformed for domain name lookup according to the requirements set out in +// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894, +// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option +// to add this check. +// +// The mappings include normalization and mapping case, width and other +// compatibility mappings. +func MapForLookup() Option { + return func(o *options) { + o.mapping = validateAndMap + StrictDomainName(true)(o) + ValidateLabels(true)(o) + } +} + +type options struct { + transitional bool + useSTD3Rules bool + validateLabels bool + verifyDNSLength bool + removeLeadingDots bool + + trie *idnaTrie + + // fromPuny calls validation rules when converting A-labels to U-labels. + fromPuny func(p *Profile, s string) error + + // mapping implements a validation and mapping step as defined in RFC 5895 + // or UTS 46, tailored to, for example, domain registration or lookup. + mapping func(p *Profile, s string) (mapped string, isBidi bool, err error) + + // bidirule, if specified, checks whether s conforms to the Bidi Rule + // defined in RFC 5893. + bidirule func(s string) bool +} + +// A Profile defines the configuration of an IDNA mapper. +type Profile struct { + options +} + +func apply(o *options, opts []Option) { + for _, f := range opts { + f(o) + } +} + +// New creates a new Profile. +// +// With no options, the returned Profile is the most permissive and equals the +// Punycode Profile. Options can be passed to further restrict the Profile. The +// MapForLookup and ValidateForRegistration options set a collection of options, +// for lookup and registration purposes respectively, which can be tailored by +// adding more fine-grained options, where later options override earlier +// options. +func New(o ...Option) *Profile { + p := &Profile{} + apply(&p.options, o) + return p +} + +// ToASCII converts a domain or domain label to its ASCII form. For example, +// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and +// ToASCII("golang") is "golang". If an error is encountered it will return +// an error and a (partially) processed result. +func (p *Profile) ToASCII(s string) (string, error) { + return p.process(s, true) +} + +// ToUnicode converts a domain or domain label to its Unicode form. For example, +// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and +// ToUnicode("golang") is "golang". If an error is encountered it will return +// an error and a (partially) processed result. +func (p *Profile) ToUnicode(s string) (string, error) { + pp := *p + pp.transitional = false + return pp.process(s, false) +} + +// String reports a string with a description of the profile for debugging +// purposes. The string format may change with different versions. +func (p *Profile) String() string { + s := "" + if p.transitional { + s = "Transitional" + } else { + s = "NonTransitional" + } + if p.useSTD3Rules { + s += ":UseSTD3Rules" + } + if p.validateLabels { + s += ":ValidateLabels" + } + if p.verifyDNSLength { + s += ":VerifyDNSLength" + } + return s +} + +var ( + // Punycode is a Profile that does raw punycode processing with a minimum + // of validation. + Punycode *Profile = punycode + + // Lookup is the recommended profile for looking up domain names, according + // to Section 5 of RFC 5891. The exact configuration of this profile may + // change over time. + Lookup *Profile = lookup + + // Display is the recommended profile for displaying domain names. + // The configuration of this profile may change over time. + Display *Profile = display + + // Registration is the recommended profile for checking whether a given + // IDN is valid for registration, according to Section 4 of RFC 5891. + Registration *Profile = registration + + punycode = &Profile{} + lookup = &Profile{options{ + transitional: true, + useSTD3Rules: true, + validateLabels: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, + }} + display = &Profile{options{ + useSTD3Rules: true, + validateLabels: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, + }} + registration = &Profile{options{ + useSTD3Rules: true, + validateLabels: true, + verifyDNSLength: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateRegistration, + bidirule: bidirule.ValidString, + }} + + // TODO: profiles + // Register: recommended for approving domain names: don't do any mappings + // but rather reject on invalid input. Bundle or block deviation characters. +) + +type labelError struct{ label, code_ string } + +func (e labelError) code() string { return e.code_ } +func (e labelError) Error() string { + return fmt.Sprintf("idna: invalid label %q", e.label) +} + +type runeError rune + +func (e runeError) code() string { return "P1" } +func (e runeError) Error() string { + return fmt.Sprintf("idna: disallowed rune %U", e) +} + +// process implements the algorithm described in section 4 of UTS #46, +// see http://www.unicode.org/reports/tr46. +func (p *Profile) process(s string, toASCII bool) (string, error) { + var err error + var isBidi bool + if p.mapping != nil { + s, isBidi, err = p.mapping(p, s) + } + // Remove leading empty labels. + if p.removeLeadingDots { + for ; len(s) > 0 && s[0] == '.'; s = s[1:] { + } + } + // TODO: allow for a quick check of the tables data. + // It seems like we should only create this error on ToASCII, but the + // UTS 46 conformance tests suggests we should always check this. + if err == nil && p.verifyDNSLength && s == "" { + err = &labelError{s, "A4"} + } + labels := labelIter{orig: s} + for ; !labels.done(); labels.next() { + label := labels.label() + if label == "" { + // Empty labels are not okay. The label iterator skips the last + // label if it is empty. + if err == nil && p.verifyDNSLength { + err = &labelError{s, "A4"} + } + continue + } + if strings.HasPrefix(label, acePrefix) { + u, err2 := decode(label[len(acePrefix):]) + if err2 != nil { + if err == nil { + err = err2 + } + // Spec says keep the old label. + continue + } + isBidi = isBidi || bidirule.DirectionString(u) != bidi.LeftToRight + labels.set(u) + if err == nil && p.validateLabels { + err = p.fromPuny(p, u) + } + if err == nil { + // This should be called on NonTransitional, according to the + // spec, but that currently does not have any effect. Use the + // original profile to preserve options. + err = p.validateLabel(u) + } + } else if err == nil { + err = p.validateLabel(label) + } + } + if isBidi && p.bidirule != nil && err == nil { + for labels.reset(); !labels.done(); labels.next() { + if !p.bidirule(labels.label()) { + err = &labelError{s, "B"} + break + } + } + } + if toASCII { + for labels.reset(); !labels.done(); labels.next() { + label := labels.label() + if !ascii(label) { + a, err2 := encode(acePrefix, label) + if err == nil { + err = err2 + } + label = a + labels.set(a) + } + n := len(label) + if p.verifyDNSLength && err == nil && (n == 0 || n > 63) { + err = &labelError{label, "A4"} + } + } + } + s = labels.result() + if toASCII && p.verifyDNSLength && err == nil { + // Compute the length of the domain name minus the root label and its dot. + n := len(s) + if n > 0 && s[n-1] == '.' { + n-- + } + if len(s) < 1 || n > 253 { + err = &labelError{s, "A4"} + } + } + return s, err +} + +func normalize(p *Profile, s string) (mapped string, isBidi bool, err error) { + // TODO: consider first doing a quick check to see if any of these checks + // need to be done. This will make it slower in the general case, but + // faster in the common case. + mapped = norm.NFC.String(s) + isBidi = bidirule.DirectionString(mapped) == bidi.RightToLeft + return mapped, isBidi, nil +} + +func validateRegistration(p *Profile, s string) (idem string, bidi bool, err error) { + // TODO: filter need for normalization in loop below. + if !norm.NFC.IsNormalString(s) { + return s, false, &labelError{s, "V1"} + } + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + if sz == 0 { + return s, bidi, runeError(utf8.RuneError) + } + bidi = bidi || info(v).isBidi(s[i:]) + // Copy bytes not copied so far. + switch p.simplify(info(v).category()) { + // TODO: handle the NV8 defined in the Unicode idna data set to allow + // for strict conformance to IDNA2008. + case valid, deviation: + case disallowed, mapped, unknown, ignored: + r, _ := utf8.DecodeRuneInString(s[i:]) + return s, bidi, runeError(r) + } + i += sz + } + return s, bidi, nil +} + +func (c info) isBidi(s string) bool { + if !c.isMapped() { + return c&attributesMask == rtl + } + // TODO: also store bidi info for mapped data. This is possible, but a bit + // cumbersome and not for the common case. + p, _ := bidi.LookupString(s) + switch p.Class() { + case bidi.R, bidi.AL, bidi.AN: + return true + } + return false +} + +func validateAndMap(p *Profile, s string) (vm string, bidi bool, err error) { + var ( + b []byte + k int + ) + // combinedInfoBits contains the or-ed bits of all runes. We use this + // to derive the mayNeedNorm bit later. This may trigger normalization + // overeagerly, but it will not do so in the common case. The end result + // is another 10% saving on BenchmarkProfile for the common case. + var combinedInfoBits info + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + if sz == 0 { + b = append(b, s[k:i]...) + b = append(b, "\ufffd"...) + k = len(s) + if err == nil { + err = runeError(utf8.RuneError) + } + break + } + combinedInfoBits |= info(v) + bidi = bidi || info(v).isBidi(s[i:]) + start := i + i += sz + // Copy bytes not copied so far. + switch p.simplify(info(v).category()) { + case valid: + continue + case disallowed: + if err == nil { + r, _ := utf8.DecodeRuneInString(s[start:]) + err = runeError(r) + } + continue + case mapped, deviation: + b = append(b, s[k:start]...) + b = info(v).appendMapping(b, s[start:i]) + case ignored: + b = append(b, s[k:start]...) + // drop the rune + case unknown: + b = append(b, s[k:start]...) + b = append(b, "\ufffd"...) + } + k = i + } + if k == 0 { + // No changes so far. + if combinedInfoBits&mayNeedNorm != 0 { + s = norm.NFC.String(s) + } + } else { + b = append(b, s[k:]...) + if norm.NFC.QuickSpan(b) != len(b) { + b = norm.NFC.Bytes(b) + } + // TODO: the punycode converters require strings as input. + s = string(b) + } + return s, bidi, err +} + +// A labelIter allows iterating over domain name labels. +type labelIter struct { + orig string + slice []string + curStart int + curEnd int + i int +} + +func (l *labelIter) reset() { + l.curStart = 0 + l.curEnd = 0 + l.i = 0 +} + +func (l *labelIter) done() bool { + return l.curStart >= len(l.orig) +} + +func (l *labelIter) result() string { + if l.slice != nil { + return strings.Join(l.slice, ".") + } + return l.orig +} + +func (l *labelIter) label() string { + if l.slice != nil { + return l.slice[l.i] + } + p := strings.IndexByte(l.orig[l.curStart:], '.') + l.curEnd = l.curStart + p + if p == -1 { + l.curEnd = len(l.orig) + } + return l.orig[l.curStart:l.curEnd] +} + +// next sets the value to the next label. It skips the last label if it is empty. +func (l *labelIter) next() { + l.i++ + if l.slice != nil { + if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" { + l.curStart = len(l.orig) + } + } else { + l.curStart = l.curEnd + 1 + if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' { + l.curStart = len(l.orig) + } + } +} + +func (l *labelIter) set(s string) { + if l.slice == nil { + l.slice = strings.Split(l.orig, ".") + } + l.slice[l.i] = s +} + +// acePrefix is the ASCII Compatible Encoding prefix. +const acePrefix = "xn--" + +func (p *Profile) simplify(cat category) category { + switch cat { + case disallowedSTD3Mapped: + if p.useSTD3Rules { + cat = disallowed + } else { + cat = mapped + } + case disallowedSTD3Valid: + if p.useSTD3Rules { + cat = disallowed + } else { + cat = valid + } + case deviation: + if !p.transitional { + cat = valid + } + case validNV8, validXV8: + // TODO: handle V2008 + cat = valid + } + return cat +} + +func validateFromPunycode(p *Profile, s string) error { + if !norm.NFC.IsNormalString(s) { + return &labelError{s, "V1"} + } + // TODO: detect whether string may have to be normalized in the following + // loop. + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + if sz == 0 { + return runeError(utf8.RuneError) + } + if c := p.simplify(info(v).category()); c != valid && c != deviation { + return &labelError{s, "V6"} + } + i += sz + } + return nil +} + +const ( + zwnj = "\u200c" + zwj = "\u200d" +) + +type joinState int8 + +const ( + stateStart joinState = iota + stateVirama + stateBefore + stateBeforeVirama + stateAfter + stateFAIL +) + +var joinStates = [][numJoinTypes]joinState{ + stateStart: { + joiningL: stateBefore, + joiningD: stateBefore, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateVirama, + }, + stateVirama: { + joiningL: stateBefore, + joiningD: stateBefore, + }, + stateBefore: { + joiningL: stateBefore, + joiningD: stateBefore, + joiningT: stateBefore, + joinZWNJ: stateAfter, + joinZWJ: stateFAIL, + joinVirama: stateBeforeVirama, + }, + stateBeforeVirama: { + joiningL: stateBefore, + joiningD: stateBefore, + joiningT: stateBefore, + }, + stateAfter: { + joiningL: stateFAIL, + joiningD: stateBefore, + joiningT: stateAfter, + joiningR: stateStart, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateAfter, // no-op as we can't accept joiners here + }, + stateFAIL: { + 0: stateFAIL, + joiningL: stateFAIL, + joiningD: stateFAIL, + joiningT: stateFAIL, + joiningR: stateFAIL, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateFAIL, + }, +} + +// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are +// already implicitly satisfied by the overall implementation. +func (p *Profile) validateLabel(s string) (err error) { + if s == "" { + if p.verifyDNSLength { + return &labelError{s, "A4"} + } + return nil + } + if !p.validateLabels { + return nil + } + trie := p.trie // p.validateLabels is only set if trie is set. + if len(s) > 4 && s[2] == '-' && s[3] == '-' { + return &labelError{s, "V2"} + } + if s[0] == '-' || s[len(s)-1] == '-' { + return &labelError{s, "V3"} + } + // TODO: merge the use of this in the trie. + v, sz := trie.lookupString(s) + x := info(v) + if x.isModifier() { + return &labelError{s, "V5"} + } + // Quickly return in the absence of zero-width (non) joiners. + if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 { + return nil + } + st := stateStart + for i := 0; ; { + jt := x.joinType() + if s[i:i+sz] == zwj { + jt = joinZWJ + } else if s[i:i+sz] == zwnj { + jt = joinZWNJ + } + st = joinStates[st][jt] + if x.isViramaModifier() { + st = joinStates[st][joinVirama] + } + if i += sz; i == len(s) { + break + } + v, sz = trie.lookupString(s[i:]) + x = info(v) + } + if st == stateFAIL || st == stateAfter { + return &labelError{s, "C"} + } + return nil +} + +func ascii(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/idna/punycode.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/idna/punycode.go new file mode 100644 index 00000000..02c7d59a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/idna/punycode.go @@ -0,0 +1,203 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +// This file implements the Punycode algorithm from RFC 3492. + +import ( + "math" + "strings" + "unicode/utf8" +) + +// These parameter values are specified in section 5. +// +// All computation is done with int32s, so that overflow behavior is identical +// regardless of whether int is 32-bit or 64-bit. +const ( + base int32 = 36 + damp int32 = 700 + initialBias int32 = 72 + initialN int32 = 128 + skew int32 = 38 + tmax int32 = 26 + tmin int32 = 1 +) + +func punyError(s string) error { return &labelError{s, "A3"} } + +// decode decodes a string as specified in section 6.2. +func decode(encoded string) (string, error) { + if encoded == "" { + return "", nil + } + pos := 1 + strings.LastIndex(encoded, "-") + if pos == 1 { + return "", punyError(encoded) + } + if pos == len(encoded) { + return encoded[:len(encoded)-1], nil + } + output := make([]rune, 0, len(encoded)) + if pos != 0 { + for _, r := range encoded[:pos-1] { + output = append(output, r) + } + } + i, n, bias := int32(0), initialN, initialBias + for pos < len(encoded) { + oldI, w := i, int32(1) + for k := base; ; k += base { + if pos == len(encoded) { + return "", punyError(encoded) + } + digit, ok := decodeDigit(encoded[pos]) + if !ok { + return "", punyError(encoded) + } + pos++ + i += digit * w + if i < 0 { + return "", punyError(encoded) + } + t := k - bias + if t < tmin { + t = tmin + } else if t > tmax { + t = tmax + } + if digit < t { + break + } + w *= base - t + if w >= math.MaxInt32/base { + return "", punyError(encoded) + } + } + x := int32(len(output) + 1) + bias = adapt(i-oldI, x, oldI == 0) + n += i / x + i %= x + if n > utf8.MaxRune || len(output) >= 1024 { + return "", punyError(encoded) + } + output = append(output, 0) + copy(output[i+1:], output[i:]) + output[i] = n + i++ + } + return string(output), nil +} + +// encode encodes a string as specified in section 6.3 and prepends prefix to +// the result. +// +// The "while h < length(input)" line in the specification becomes "for +// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes. +func encode(prefix, s string) (string, error) { + output := make([]byte, len(prefix), len(prefix)+1+2*len(s)) + copy(output, prefix) + delta, n, bias := int32(0), initialN, initialBias + b, remaining := int32(0), int32(0) + for _, r := range s { + if r < 0x80 { + b++ + output = append(output, byte(r)) + } else { + remaining++ + } + } + h := b + if b > 0 { + output = append(output, '-') + } + for remaining != 0 { + m := int32(0x7fffffff) + for _, r := range s { + if m > r && r >= n { + m = r + } + } + delta += (m - n) * (h + 1) + if delta < 0 { + return "", punyError(s) + } + n = m + for _, r := range s { + if r < n { + delta++ + if delta < 0 { + return "", punyError(s) + } + continue + } + if r > n { + continue + } + q := delta + for k := base; ; k += base { + t := k - bias + if t < tmin { + t = tmin + } else if t > tmax { + t = tmax + } + if q < t { + break + } + output = append(output, encodeDigit(t+(q-t)%(base-t))) + q = (q - t) / (base - t) + } + output = append(output, encodeDigit(q)) + bias = adapt(delta, h+1, h == b) + delta = 0 + h++ + remaining-- + } + delta++ + n++ + } + return string(output), nil +} + +func decodeDigit(x byte) (digit int32, ok bool) { + switch { + case '0' <= x && x <= '9': + return int32(x - ('0' - 26)), true + case 'A' <= x && x <= 'Z': + return int32(x - 'A'), true + case 'a' <= x && x <= 'z': + return int32(x - 'a'), true + } + return 0, false +} + +func encodeDigit(digit int32) byte { + switch { + case 0 <= digit && digit < 26: + return byte(digit + 'a') + case 26 <= digit && digit < 36: + return byte(digit + ('0' - 26)) + } + panic("idna: internal error in punycode encoding") +} + +// adapt is the bias adaptation function specified in section 6.1. +func adapt(delta, numPoints int32, firstTime bool) int32 { + if firstTime { + delta /= damp + } else { + delta /= 2 + } + delta += delta / numPoints + k := int32(0) + for delta > ((base-tmin)*tmax)/2 { + delta /= base - tmin + k += base + } + return k + (base-tmin+1)*delta/(delta+skew) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/idna/tables.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/idna/tables.go new file mode 100644 index 00000000..f910b269 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/idna/tables.go @@ -0,0 +1,4557 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package idna + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "10.0.0" + +var mappings string = "" + // Size: 8176 bytes + "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" + + "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" + + "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" + + "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" + + "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" + + "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" + + "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" + + "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" + + "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" + + "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" + + "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" + + "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" + + "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" + + "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" + + "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" + + "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" + + "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" + + "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" + + "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" + + "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" + + "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" + + "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" + + "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" + + "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" + + "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" + + "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" + + ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" + + "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" + + "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" + + "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" + + "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" + + "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" + + "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" + + "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" + + "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" + + "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" + + "インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" + + "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" + + "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル\x0fサンチ" + + "ーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ハイツ" + + "\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" + + "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cポイ" + + "ント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク\x0fマ" + + "ンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" + + "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" + + "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" + + "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" + + "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" + + "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" + + "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" + + "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" + + "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" + + "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" + + "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" + + "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" + + "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" + + "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x04𤋮\x04𢡊\x04𢡄\x04𣏕\x04𥉉" + + "\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ\x04יִ" + + "\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּׂ\x04א" + + "ַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04ךּ\x04" + + "כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" + + "\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ\x02ڤ" + + "\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ\x02ڳ" + + "\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ\x02ۅ" + + "\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02ی\x04" + + "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" + + "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" + + "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" + + "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ\x04فم" + + "\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" + + "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" + + "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" + + "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ٍّ\x05" + + " َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" + + "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" + + "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" + + "\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" + + "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" + + "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" + + "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" + + "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" + + "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" + + "\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" + + "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" + + "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" + + "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" + + "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" + + "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي\x06سخي" + + "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" + + "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" + + "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" + + "\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ\x02إ" + + "\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز\x02س" + + "\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن\x02ه" + + "\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" + + "\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲\x08𝆹" + + "𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η\x02" + + "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ\x02ڡ" + + "\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" + + "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" + + "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" + + "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" + + "c\x02mc\x02md\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ\x03二\x03多\x03解" + + "\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終\x03生\x03販\x03声" + + "\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指\x03走\x03打\x03禁" + + "\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔三〕\x09〔二〕\x09〔安" + + "〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03丸\x03乁\x03你\x03" + + "侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03具\x03㒹\x03內\x03" + + "冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉\x03" + + "勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03灰\x03及\x03叟\x03" + + "叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03啣\x03善\x03喙\x03" + + "喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03埴\x03堍\x03型\x03" + + "堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03㛮\x03" + + "嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03屮\x03峀\x03岍\x03" + + "嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03㡢\x03㡼\x03庰\x03" + + "庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03忍\x03志\x03忹\x03" + + "悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03憤\x03憯\x03懞\x03" + + "懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03掃\x03揤\x03搢\x03" + + "揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03書\x03晉\x03㬙\x03" + + "暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03朡\x03杞\x03杓\x03" + + "㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03槪\x03檨\x03櫛\x03" + + "㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03汧\x03洖\x03派\x03" + + "海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03淹\x03潮\x03濆\x03" + + "瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03爵\x03牐\x03犀\x03" + + "犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03㼛\x03甤\x03甾\x03" + + "異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03䂖\x03硎\x03碌\x03" + + "磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03築\x03䈧\x03糒\x03" + + "䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03罺\x03羕\x03翺\x03" + + "者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03䑫\x03芑\x03芋\x03" + + "芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03莽\x03菧\x03著\x03" + + "荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03䕫\x03虐\x03虜\x03" + + "虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03蠁\x03䗹\x03衠\x03" + + "衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03豕\x03貫\x03賁\x03" + + "贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03鈸\x03鋗\x03鋘\x03" + + "鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03䩶\x03韠\x03䪲\x03" + + "頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03鳽\x03䳎\x03䳭\x03" + + "鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻" + +var xorData string = "" + // Size: 4855 bytes + "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" + + "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" + + "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" + + "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" + + "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" + + "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" + + "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" + + "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" + + "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" + + "\x03\x037 \x03\x0b+\x03\x02\x01\x04\x02\x01\x02\x02\x019\x02\x03\x1c\x02" + + "\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03\xc1r\x02" + + "\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<\x03\xc1s*" + + "\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03\x83\xab" + + "\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96\xe1\xcd" + + "\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03\x9a\xec" + + "\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c!\x03" + + "\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03ʦ\x93" + + "\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7\x03" + + "\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca\xfa" + + "\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e\x03" + + "\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca\xe3" + + "\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99\x03" + + "\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca\xe8" + + "\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03\x0b" + + "\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06\x05" + + "\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03\x0786" + + "\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/\x03" + + "\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f\x03" + + "\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-\x03" + + "\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03\x07" + + "\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03\x07" + + "\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03\x07" + + "\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b\x0a" + + "\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03\x07" + + "\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+\x03" + + "\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03\x04" + + "4\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03\x04+ " + + "\x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!\x22" + + "\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04\x03" + + "\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>\x03" + + "\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03\x054" + + "\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03\x05)" + + ":\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$\x1e" + + "\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226\x03" + + "\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05\x1b" + + "\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05\x03" + + "\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03\x06" + + "\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08\x03" + + "\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03\x0a6" + + "\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a\x1f" + + "\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03\x0a" + + "\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f\x02" + + "\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/\x03" + + "\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a\x00" + + "\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+\x10" + + "\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#<" + + "\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!\x00" + + "\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18.\x03" + + "\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15\x22" + + "\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b\x12" + + "\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05<" + + "\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" + + "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" + + "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" + + "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" + + "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" + + "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" + + "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" + + "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" + + "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" + + "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" + + "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" + + "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" + + "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" + + "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" + + "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" + + "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" + + "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" + + "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" + + "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" + + "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" + + "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" + + "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" + + "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" + + "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" + + "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" + + "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" + + "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" + + "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," + + "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" + + "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" + + "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" + + "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" + + ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" + + "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" + + "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" + + "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" + + "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" + + "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" + + "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" + + "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" + + "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" + + "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" + + "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" + + "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" + + "(\x04\x023 \x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!\x10\x03\x0b!0" + + "\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b\x03\x09\x1f" + + "\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14\x03\x0a\x01" + + "\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03\x08='\x03" + + "\x08\x1a\x0a\x03\x07\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07\x01\x00" + + "\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03\x09\x11" + + "\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03\x0a/1" + + "\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03\x07<3" + + "\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06\x13\x00" + + "\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(;\x03" + + "\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08\x14$" + + "\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03\x0a" + + "\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19\x01" + + "\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18\x03" + + "\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03\x07" + + "\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03\x0a" + + "\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03\x0b" + + "\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03\x08" + + "\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05\x03" + + "\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11\x03" + + "\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03\x09" + + "\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a." + + "\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" + + "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" + + "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " + + "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" + + "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" + + "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" + + "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" + + "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" + + "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" + + "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," + + "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" + + "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" + + "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" + + "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" + + "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" + + "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" + + "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" + + "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" + + "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" + + "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" + + "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" + + "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" + + "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" + + "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" + + "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" + + "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" + + "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" + + "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" + + "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" + + "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" + + "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" + + "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" + + "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" + + "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" + + "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" + + "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" + + "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" + + "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" + + "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" + + "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" + + "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" + + "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" + + "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" + + "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" + + "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" + + "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" + + "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" + + "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" + + "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," + + "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" + + "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" + + "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" + + "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" + + "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" + + "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" + + "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" + + "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" + + "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" + + "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" + + "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" + + "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" + + "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" + + "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" + + "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" + + "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" + + "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" + + "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" + + "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" + + "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" + + "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" + + "\x04\x03\x0c?\x05\x03\x0c" + + "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" + + "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" + + "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" + + "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" + + "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" + + "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" + + "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" + + "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" + + "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" + + "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" + + "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" + + "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" + + "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" + + "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" + + "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" + + "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" + + "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" + + "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" + + "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" + + "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" + + "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" + + "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" + + "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" + + "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" + + "\x05\x22\x05\x03\x050\x1d" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// idnaTrie. Total size: 29052 bytes (28.37 KiB). Checksum: ef06e7ecc26f36dd. +type idnaTrie struct{} + +func newIdnaTrie(i int) *idnaTrie { + return &idnaTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 125: + return uint16(idnaValues[n<<6+uint32(b)]) + default: + n -= 125 + return uint16(idnaSparse.lookup(n, b)) + } +} + +// idnaValues: 127 blocks, 8128 entries, 16256 bytes +// The third block is the zero block. +var idnaValues = [8128]uint16{ + // Block 0x0, offset 0x0 + 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080, + 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080, + 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080, + 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080, + 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080, + 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080, + 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080, + 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080, + 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008, + 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080, + 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080, + // Block 0x1, offset 0x40 + 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105, + 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105, + 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105, + 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105, + 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080, + 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008, + 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008, + 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008, + 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008, + 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080, + 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040, + 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040, + 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040, + 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040, + 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040, + 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018, + 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018, + 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a, + 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005, + 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018, + 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018, + // Block 0x4, offset 0x100 + 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008, + 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008, + 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008, + 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008, + 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008, + 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008, + 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008, + 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008, + 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008, + 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d, + 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199, + // Block 0x5, offset 0x140 + 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d, + 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008, + 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008, + 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008, + 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008, + 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008, + 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008, + 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008, + 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008, + 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d, + 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9, + // Block 0x6, offset 0x180 + 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008, + 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d, + 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d, + 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d, + 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155, + 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008, + 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d, + 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd, + 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d, + 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008, + 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9, + 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d, + 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d, + 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d, + 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008, + 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008, + 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008, + 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008, + 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008, + 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008, + 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008, + // Block 0x8, offset 0x200 + 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008, + 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008, + 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008, + 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008, + 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008, + 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008, + 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008, + 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008, + 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008, + 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d, + 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008, + // Block 0x9, offset 0x240 + 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018, + 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008, + 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008, + 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018, + 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a, + 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369, + 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018, + 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018, + 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018, + 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018, + 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018, + // Block 0xa, offset 0x280 + 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d, + 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308, + 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308, + 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308, + 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308, + 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308, + 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308, + 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308, + 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008, + 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008, + 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2, + 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040, + 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105, + 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105, + 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105, + 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d, + 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d, + 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008, + 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008, + 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008, + 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008, + // Block 0xc, offset 0x300 + 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008, + 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008, + 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd, + 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008, + 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008, + 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008, + 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008, + 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008, + 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd, + 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008, + 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d, + // Block 0xd, offset 0x340 + 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008, + 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008, + 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008, + 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008, + 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008, + 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008, + 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008, + 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008, + 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008, + 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008, + 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008, + // Block 0xe, offset 0x380 + 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308, + 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008, + 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008, + 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008, + 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008, + 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008, + 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008, + 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008, + 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008, + 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008, + 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d, + 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d, + 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008, + 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008, + 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008, + 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008, + 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008, + 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008, + 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008, + 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008, + 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008, + // Block 0x10, offset 0x400 + 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008, + 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008, + 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008, + 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008, + 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008, + 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008, + 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008, + 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008, + 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5, + 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5, + 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5, + // Block 0x11, offset 0x440 + 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840, + 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818, + 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308, + 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308, + 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040, + 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08, + 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08, + 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08, + 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08, + 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08, + 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08, + // Block 0x12, offset 0x480 + 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08, + 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308, + 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308, + 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308, + 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308, + 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808, + 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808, + 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08, + 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429, + 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08, + 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08, + 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08, + 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08, + 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308, + 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840, + 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308, + 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018, + 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08, + 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008, + 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08, + 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08, + // Block 0x14, offset 0x500 + 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818, + 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818, + 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308, + 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08, + 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08, + 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08, + 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08, + 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08, + 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308, + 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308, + 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308, + // Block 0x15, offset 0x540 + 0x540: 0x0c08, 0x541: 0x0a08, 0x542: 0x0a08, 0x543: 0x0a08, 0x544: 0x0a08, 0x545: 0x0a08, + 0x546: 0x0c08, 0x547: 0x0c08, 0x548: 0x0a08, 0x549: 0x0c08, 0x54a: 0x0a08, 0x54b: 0x0a08, + 0x54c: 0x0a08, 0x54d: 0x0a08, 0x54e: 0x0a08, 0x54f: 0x0a08, 0x550: 0x0a08, 0x551: 0x0a08, + 0x552: 0x0a08, 0x553: 0x0a08, 0x554: 0x0c08, 0x555: 0x0a08, 0x556: 0x0808, 0x557: 0x0808, + 0x558: 0x0808, 0x559: 0x3308, 0x55a: 0x3308, 0x55b: 0x3308, 0x55c: 0x0040, 0x55d: 0x0040, + 0x55e: 0x0818, 0x55f: 0x0040, 0x560: 0x0a08, 0x561: 0x0808, 0x562: 0x0a08, 0x563: 0x0a08, + 0x564: 0x0a08, 0x565: 0x0a08, 0x566: 0x0808, 0x567: 0x0c08, 0x568: 0x0a08, 0x569: 0x0c08, + 0x56a: 0x0c08, 0x56b: 0x0040, 0x56c: 0x0040, 0x56d: 0x0040, 0x56e: 0x0040, 0x56f: 0x0040, + 0x570: 0x0040, 0x571: 0x0040, 0x572: 0x0040, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040, + 0x576: 0x0040, 0x577: 0x0040, 0x578: 0x0040, 0x579: 0x0040, 0x57a: 0x0040, 0x57b: 0x0040, + 0x57c: 0x0040, 0x57d: 0x0040, 0x57e: 0x0040, 0x57f: 0x0040, + // Block 0x16, offset 0x580 + 0x580: 0x3008, 0x581: 0x3308, 0x582: 0x3308, 0x583: 0x3308, 0x584: 0x3308, 0x585: 0x3308, + 0x586: 0x3308, 0x587: 0x3308, 0x588: 0x3308, 0x589: 0x3008, 0x58a: 0x3008, 0x58b: 0x3008, + 0x58c: 0x3008, 0x58d: 0x3b08, 0x58e: 0x3008, 0x58f: 0x3008, 0x590: 0x0008, 0x591: 0x3308, + 0x592: 0x3308, 0x593: 0x3308, 0x594: 0x3308, 0x595: 0x3308, 0x596: 0x3308, 0x597: 0x3308, + 0x598: 0x04c9, 0x599: 0x0501, 0x59a: 0x0539, 0x59b: 0x0571, 0x59c: 0x05a9, 0x59d: 0x05e1, + 0x59e: 0x0619, 0x59f: 0x0651, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x3308, 0x5a3: 0x3308, + 0x5a4: 0x0018, 0x5a5: 0x0018, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008, + 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008, + 0x5b0: 0x0018, 0x5b1: 0x0008, 0x5b2: 0x0008, 0x5b3: 0x0008, 0x5b4: 0x0008, 0x5b5: 0x0008, + 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0008, 0x5bb: 0x0008, + 0x5bc: 0x0008, 0x5bd: 0x0008, 0x5be: 0x0008, 0x5bf: 0x0008, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x0008, 0x5c1: 0x3308, 0x5c2: 0x3008, 0x5c3: 0x3008, 0x5c4: 0x0040, 0x5c5: 0x0008, + 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0008, + 0x5cc: 0x0008, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040, + 0x5d2: 0x0040, 0x5d3: 0x0008, 0x5d4: 0x0008, 0x5d5: 0x0008, 0x5d6: 0x0008, 0x5d7: 0x0008, + 0x5d8: 0x0008, 0x5d9: 0x0008, 0x5da: 0x0008, 0x5db: 0x0008, 0x5dc: 0x0008, 0x5dd: 0x0008, + 0x5de: 0x0008, 0x5df: 0x0008, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x0008, 0x5e3: 0x0008, + 0x5e4: 0x0008, 0x5e5: 0x0008, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0040, + 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008, + 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0040, 0x5f4: 0x0040, 0x5f5: 0x0040, + 0x5f6: 0x0008, 0x5f7: 0x0008, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040, + 0x5fc: 0x3308, 0x5fd: 0x0008, 0x5fe: 0x3008, 0x5ff: 0x3008, + // Block 0x18, offset 0x600 + 0x600: 0x3008, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3308, 0x604: 0x3308, 0x605: 0x0040, + 0x606: 0x0040, 0x607: 0x3008, 0x608: 0x3008, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x3008, + 0x60c: 0x3008, 0x60d: 0x3b08, 0x60e: 0x0008, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x0040, + 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x3008, + 0x618: 0x0040, 0x619: 0x0040, 0x61a: 0x0040, 0x61b: 0x0040, 0x61c: 0x0689, 0x61d: 0x06c1, + 0x61e: 0x0040, 0x61f: 0x06f9, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x3308, 0x623: 0x3308, + 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008, + 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008, + 0x630: 0x0008, 0x631: 0x0008, 0x632: 0x0018, 0x633: 0x0018, 0x634: 0x0018, 0x635: 0x0018, + 0x636: 0x0018, 0x637: 0x0018, 0x638: 0x0018, 0x639: 0x0018, 0x63a: 0x0018, 0x63b: 0x0018, + 0x63c: 0x0008, 0x63d: 0x0018, 0x63e: 0x0040, 0x63f: 0x0040, + // Block 0x19, offset 0x640 + 0x640: 0x0040, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x3008, 0x644: 0x0040, 0x645: 0x0008, + 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0040, + 0x64c: 0x0040, 0x64d: 0x0040, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0040, + 0x652: 0x0040, 0x653: 0x0008, 0x654: 0x0008, 0x655: 0x0008, 0x656: 0x0008, 0x657: 0x0008, + 0x658: 0x0008, 0x659: 0x0008, 0x65a: 0x0008, 0x65b: 0x0008, 0x65c: 0x0008, 0x65d: 0x0008, + 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008, + 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040, + 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008, + 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0731, 0x674: 0x0040, 0x675: 0x0008, + 0x676: 0x0769, 0x677: 0x0040, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040, + 0x67c: 0x3308, 0x67d: 0x0040, 0x67e: 0x3008, 0x67f: 0x3008, + // Block 0x1a, offset 0x680 + 0x680: 0x3008, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x0040, 0x684: 0x0040, 0x685: 0x0040, + 0x686: 0x0040, 0x687: 0x3308, 0x688: 0x3308, 0x689: 0x0040, 0x68a: 0x0040, 0x68b: 0x3308, + 0x68c: 0x3308, 0x68d: 0x3b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0040, 0x691: 0x3308, + 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040, + 0x698: 0x0040, 0x699: 0x07a1, 0x69a: 0x07d9, 0x69b: 0x0811, 0x69c: 0x0008, 0x69d: 0x0040, + 0x69e: 0x0849, 0x69f: 0x0040, 0x6a0: 0x0040, 0x6a1: 0x0040, 0x6a2: 0x0040, 0x6a3: 0x0040, + 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008, + 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008, + 0x6b0: 0x3308, 0x6b1: 0x3308, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0008, 0x6b5: 0x3308, + 0x6b6: 0x0040, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0040, 0x6ba: 0x0040, 0x6bb: 0x0040, + 0x6bc: 0x0040, 0x6bd: 0x0040, 0x6be: 0x0040, 0x6bf: 0x0040, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x0040, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3008, 0x6c4: 0x0040, 0x6c5: 0x0008, + 0x6c6: 0x0008, 0x6c7: 0x0008, 0x6c8: 0x0008, 0x6c9: 0x0008, 0x6ca: 0x0008, 0x6cb: 0x0008, + 0x6cc: 0x0008, 0x6cd: 0x0008, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0008, + 0x6d2: 0x0040, 0x6d3: 0x0008, 0x6d4: 0x0008, 0x6d5: 0x0008, 0x6d6: 0x0008, 0x6d7: 0x0008, + 0x6d8: 0x0008, 0x6d9: 0x0008, 0x6da: 0x0008, 0x6db: 0x0008, 0x6dc: 0x0008, 0x6dd: 0x0008, + 0x6de: 0x0008, 0x6df: 0x0008, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x0008, 0x6e3: 0x0008, + 0x6e4: 0x0008, 0x6e5: 0x0008, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0040, + 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008, + 0x6f0: 0x0008, 0x6f1: 0x0040, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0040, 0x6f5: 0x0008, + 0x6f6: 0x0008, 0x6f7: 0x0008, 0x6f8: 0x0008, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040, + 0x6fc: 0x3308, 0x6fd: 0x0008, 0x6fe: 0x3008, 0x6ff: 0x3008, + // Block 0x1c, offset 0x700 + 0x700: 0x3008, 0x701: 0x3308, 0x702: 0x3308, 0x703: 0x3308, 0x704: 0x3308, 0x705: 0x3308, + 0x706: 0x0040, 0x707: 0x3308, 0x708: 0x3308, 0x709: 0x3008, 0x70a: 0x0040, 0x70b: 0x3008, + 0x70c: 0x3008, 0x70d: 0x3b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0008, 0x711: 0x0040, + 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x0040, 0x717: 0x0040, + 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0040, 0x71d: 0x0040, + 0x71e: 0x0040, 0x71f: 0x0040, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x3308, 0x723: 0x3308, + 0x724: 0x0040, 0x725: 0x0040, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0008, + 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008, + 0x730: 0x0018, 0x731: 0x0018, 0x732: 0x0040, 0x733: 0x0040, 0x734: 0x0040, 0x735: 0x0040, + 0x736: 0x0040, 0x737: 0x0040, 0x738: 0x0040, 0x739: 0x0008, 0x73a: 0x3308, 0x73b: 0x3308, + 0x73c: 0x3308, 0x73d: 0x3308, 0x73e: 0x3308, 0x73f: 0x3308, + // Block 0x1d, offset 0x740 + 0x740: 0x0040, 0x741: 0x3308, 0x742: 0x3008, 0x743: 0x3008, 0x744: 0x0040, 0x745: 0x0008, + 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0008, + 0x74c: 0x0008, 0x74d: 0x0040, 0x74e: 0x0040, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040, + 0x752: 0x0040, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0008, 0x757: 0x0008, + 0x758: 0x0008, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0008, 0x75c: 0x0008, 0x75d: 0x0008, + 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x0008, 0x763: 0x0008, + 0x764: 0x0008, 0x765: 0x0008, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0040, + 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008, + 0x770: 0x0008, 0x771: 0x0040, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0040, 0x775: 0x0008, + 0x776: 0x0008, 0x777: 0x0008, 0x778: 0x0008, 0x779: 0x0008, 0x77a: 0x0040, 0x77b: 0x0040, + 0x77c: 0x3308, 0x77d: 0x0008, 0x77e: 0x3008, 0x77f: 0x3308, + // Block 0x1e, offset 0x780 + 0x780: 0x3008, 0x781: 0x3308, 0x782: 0x3308, 0x783: 0x3308, 0x784: 0x3308, 0x785: 0x0040, + 0x786: 0x0040, 0x787: 0x3008, 0x788: 0x3008, 0x789: 0x0040, 0x78a: 0x0040, 0x78b: 0x3008, + 0x78c: 0x3008, 0x78d: 0x3b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040, + 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x0040, 0x796: 0x3308, 0x797: 0x3008, + 0x798: 0x0040, 0x799: 0x0040, 0x79a: 0x0040, 0x79b: 0x0040, 0x79c: 0x0881, 0x79d: 0x08b9, + 0x79e: 0x0040, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x3308, 0x7a3: 0x3308, + 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008, + 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008, + 0x7b0: 0x0018, 0x7b1: 0x0008, 0x7b2: 0x0018, 0x7b3: 0x0018, 0x7b4: 0x0018, 0x7b5: 0x0018, + 0x7b6: 0x0018, 0x7b7: 0x0018, 0x7b8: 0x0040, 0x7b9: 0x0040, 0x7ba: 0x0040, 0x7bb: 0x0040, + 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x0040, 0x7bf: 0x0040, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x0040, 0x7c1: 0x0040, 0x7c2: 0x3308, 0x7c3: 0x0008, 0x7c4: 0x0040, 0x7c5: 0x0008, + 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0040, + 0x7cc: 0x0040, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040, + 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0040, 0x7d7: 0x0040, + 0x7d8: 0x0040, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0008, 0x7dd: 0x0040, + 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0040, 0x7e1: 0x0040, 0x7e2: 0x0040, 0x7e3: 0x0008, + 0x7e4: 0x0008, 0x7e5: 0x0040, 0x7e6: 0x0040, 0x7e7: 0x0040, 0x7e8: 0x0008, 0x7e9: 0x0008, + 0x7ea: 0x0008, 0x7eb: 0x0040, 0x7ec: 0x0040, 0x7ed: 0x0040, 0x7ee: 0x0008, 0x7ef: 0x0008, + 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0008, 0x7f5: 0x0008, + 0x7f6: 0x0008, 0x7f7: 0x0008, 0x7f8: 0x0008, 0x7f9: 0x0008, 0x7fa: 0x0040, 0x7fb: 0x0040, + 0x7fc: 0x0040, 0x7fd: 0x0040, 0x7fe: 0x3008, 0x7ff: 0x3008, + // Block 0x20, offset 0x800 + 0x800: 0x3308, 0x801: 0x3008, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x3008, 0x805: 0x0040, + 0x806: 0x3308, 0x807: 0x3308, 0x808: 0x3308, 0x809: 0x0040, 0x80a: 0x3308, 0x80b: 0x3308, + 0x80c: 0x3308, 0x80d: 0x3b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040, + 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x3308, 0x816: 0x3308, 0x817: 0x0040, + 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040, + 0x81e: 0x0040, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x3308, 0x823: 0x3308, + 0x824: 0x0040, 0x825: 0x0040, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0008, + 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008, + 0x830: 0x0040, 0x831: 0x0040, 0x832: 0x0040, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040, + 0x836: 0x0040, 0x837: 0x0040, 0x838: 0x0018, 0x839: 0x0018, 0x83a: 0x0018, 0x83b: 0x0018, + 0x83c: 0x0018, 0x83d: 0x0018, 0x83e: 0x0018, 0x83f: 0x0018, + // Block 0x21, offset 0x840 + 0x840: 0x0008, 0x841: 0x3308, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x0040, 0x845: 0x0008, + 0x846: 0x0008, 0x847: 0x0008, 0x848: 0x0008, 0x849: 0x0008, 0x84a: 0x0008, 0x84b: 0x0008, + 0x84c: 0x0008, 0x84d: 0x0040, 0x84e: 0x0008, 0x84f: 0x0008, 0x850: 0x0008, 0x851: 0x0040, + 0x852: 0x0008, 0x853: 0x0008, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x0008, + 0x858: 0x0008, 0x859: 0x0008, 0x85a: 0x0008, 0x85b: 0x0008, 0x85c: 0x0008, 0x85d: 0x0008, + 0x85e: 0x0008, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x0008, 0x863: 0x0008, + 0x864: 0x0008, 0x865: 0x0008, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0040, + 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008, + 0x870: 0x0008, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0008, 0x874: 0x0040, 0x875: 0x0008, + 0x876: 0x0008, 0x877: 0x0008, 0x878: 0x0008, 0x879: 0x0008, 0x87a: 0x0040, 0x87b: 0x0040, + 0x87c: 0x3308, 0x87d: 0x0008, 0x87e: 0x3008, 0x87f: 0x3308, + // Block 0x22, offset 0x880 + 0x880: 0x3008, 0x881: 0x3008, 0x882: 0x3008, 0x883: 0x3008, 0x884: 0x3008, 0x885: 0x0040, + 0x886: 0x3308, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008, + 0x88c: 0x3308, 0x88d: 0x3b08, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040, + 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0040, 0x895: 0x3008, 0x896: 0x3008, 0x897: 0x0040, + 0x898: 0x0040, 0x899: 0x0040, 0x89a: 0x0040, 0x89b: 0x0040, 0x89c: 0x0040, 0x89d: 0x0040, + 0x89e: 0x0008, 0x89f: 0x0040, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308, + 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008, + 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008, + 0x8b0: 0x0040, 0x8b1: 0x0008, 0x8b2: 0x0008, 0x8b3: 0x0040, 0x8b4: 0x0040, 0x8b5: 0x0040, + 0x8b6: 0x0040, 0x8b7: 0x0040, 0x8b8: 0x0040, 0x8b9: 0x0040, 0x8ba: 0x0040, 0x8bb: 0x0040, + 0x8bc: 0x0040, 0x8bd: 0x0040, 0x8be: 0x0040, 0x8bf: 0x0040, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x3008, 0x8c1: 0x3308, 0x8c2: 0x3308, 0x8c3: 0x3308, 0x8c4: 0x3308, 0x8c5: 0x0040, + 0x8c6: 0x3008, 0x8c7: 0x3008, 0x8c8: 0x3008, 0x8c9: 0x0040, 0x8ca: 0x3008, 0x8cb: 0x3008, + 0x8cc: 0x3008, 0x8cd: 0x3b08, 0x8ce: 0x0008, 0x8cf: 0x0018, 0x8d0: 0x0040, 0x8d1: 0x0040, + 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x3008, + 0x8d8: 0x0018, 0x8d9: 0x0018, 0x8da: 0x0018, 0x8db: 0x0018, 0x8dc: 0x0018, 0x8dd: 0x0018, + 0x8de: 0x0018, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x3308, 0x8e3: 0x3308, + 0x8e4: 0x0040, 0x8e5: 0x0040, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0008, + 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008, + 0x8f0: 0x0018, 0x8f1: 0x0018, 0x8f2: 0x0018, 0x8f3: 0x0018, 0x8f4: 0x0018, 0x8f5: 0x0018, + 0x8f6: 0x0018, 0x8f7: 0x0018, 0x8f8: 0x0018, 0x8f9: 0x0018, 0x8fa: 0x0008, 0x8fb: 0x0008, + 0x8fc: 0x0008, 0x8fd: 0x0008, 0x8fe: 0x0008, 0x8ff: 0x0008, + // Block 0x24, offset 0x900 + 0x900: 0x0040, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x0040, 0x904: 0x0008, 0x905: 0x0040, + 0x906: 0x0040, 0x907: 0x0008, 0x908: 0x0008, 0x909: 0x0040, 0x90a: 0x0008, 0x90b: 0x0040, + 0x90c: 0x0040, 0x90d: 0x0008, 0x90e: 0x0040, 0x90f: 0x0040, 0x910: 0x0040, 0x911: 0x0040, + 0x912: 0x0040, 0x913: 0x0040, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0008, + 0x918: 0x0040, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0008, 0x91d: 0x0008, + 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0040, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008, + 0x924: 0x0040, 0x925: 0x0008, 0x926: 0x0040, 0x927: 0x0008, 0x928: 0x0040, 0x929: 0x0040, + 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0040, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008, + 0x930: 0x0008, 0x931: 0x3308, 0x932: 0x0008, 0x933: 0x0929, 0x934: 0x3308, 0x935: 0x3308, + 0x936: 0x3308, 0x937: 0x3308, 0x938: 0x3308, 0x939: 0x3308, 0x93a: 0x0040, 0x93b: 0x3308, + 0x93c: 0x3308, 0x93d: 0x0008, 0x93e: 0x0040, 0x93f: 0x0040, + // Block 0x25, offset 0x940 + 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x09d1, 0x944: 0x0008, 0x945: 0x0008, + 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0040, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008, + 0x94c: 0x0008, 0x94d: 0x0a09, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008, + 0x952: 0x0a41, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0a79, + 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0ab1, 0x95d: 0x0008, + 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008, + 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0ae9, + 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0040, 0x96e: 0x0040, 0x96f: 0x0040, + 0x970: 0x0040, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x0b21, 0x974: 0x3308, 0x975: 0x0b59, + 0x976: 0x0b91, 0x977: 0x0bc9, 0x978: 0x0c19, 0x979: 0x0c51, 0x97a: 0x3308, 0x97b: 0x3308, + 0x97c: 0x3308, 0x97d: 0x3308, 0x97e: 0x3308, 0x97f: 0x3008, + // Block 0x26, offset 0x980 + 0x980: 0x3308, 0x981: 0x0ca1, 0x982: 0x3308, 0x983: 0x3308, 0x984: 0x3b08, 0x985: 0x0018, + 0x986: 0x3308, 0x987: 0x3308, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008, + 0x98c: 0x0008, 0x98d: 0x3308, 0x98e: 0x3308, 0x98f: 0x3308, 0x990: 0x3308, 0x991: 0x3308, + 0x992: 0x3308, 0x993: 0x0cd9, 0x994: 0x3308, 0x995: 0x3308, 0x996: 0x3308, 0x997: 0x3308, + 0x998: 0x0040, 0x999: 0x3308, 0x99a: 0x3308, 0x99b: 0x3308, 0x99c: 0x3308, 0x99d: 0x0d11, + 0x99e: 0x3308, 0x99f: 0x3308, 0x9a0: 0x3308, 0x9a1: 0x3308, 0x9a2: 0x0d49, 0x9a3: 0x3308, + 0x9a4: 0x3308, 0x9a5: 0x3308, 0x9a6: 0x3308, 0x9a7: 0x0d81, 0x9a8: 0x3308, 0x9a9: 0x3308, + 0x9aa: 0x3308, 0x9ab: 0x3308, 0x9ac: 0x0db9, 0x9ad: 0x3308, 0x9ae: 0x3308, 0x9af: 0x3308, + 0x9b0: 0x3308, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x3308, 0x9b4: 0x3308, 0x9b5: 0x3308, + 0x9b6: 0x3308, 0x9b7: 0x3308, 0x9b8: 0x3308, 0x9b9: 0x0df1, 0x9ba: 0x3308, 0x9bb: 0x3308, + 0x9bc: 0x3308, 0x9bd: 0x0040, 0x9be: 0x0018, 0x9bf: 0x0018, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008, + 0x9c6: 0x0008, 0x9c7: 0x0008, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008, + 0x9cc: 0x0008, 0x9cd: 0x0008, 0x9ce: 0x0008, 0x9cf: 0x0008, 0x9d0: 0x0008, 0x9d1: 0x0008, + 0x9d2: 0x0008, 0x9d3: 0x0008, 0x9d4: 0x0008, 0x9d5: 0x0008, 0x9d6: 0x0008, 0x9d7: 0x0008, + 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x0008, 0x9dc: 0x0008, 0x9dd: 0x0008, + 0x9de: 0x0008, 0x9df: 0x0008, 0x9e0: 0x0008, 0x9e1: 0x0008, 0x9e2: 0x0008, 0x9e3: 0x0008, + 0x9e4: 0x0008, 0x9e5: 0x0008, 0x9e6: 0x0008, 0x9e7: 0x0008, 0x9e8: 0x0008, 0x9e9: 0x0008, + 0x9ea: 0x0008, 0x9eb: 0x0008, 0x9ec: 0x0039, 0x9ed: 0x0ed1, 0x9ee: 0x0ee9, 0x9ef: 0x0008, + 0x9f0: 0x0ef9, 0x9f1: 0x0f09, 0x9f2: 0x0f19, 0x9f3: 0x0f31, 0x9f4: 0x0249, 0x9f5: 0x0f41, + 0x9f6: 0x0259, 0x9f7: 0x0f51, 0x9f8: 0x0359, 0x9f9: 0x0f61, 0x9fa: 0x0f71, 0x9fb: 0x0008, + 0x9fc: 0x00d9, 0x9fd: 0x0f81, 0x9fe: 0x0f99, 0x9ff: 0x0269, + // Block 0x28, offset 0xa00 + 0xa00: 0x0fa9, 0xa01: 0x0fb9, 0xa02: 0x0279, 0xa03: 0x0039, 0xa04: 0x0fc9, 0xa05: 0x0fe1, + 0xa06: 0x059d, 0xa07: 0x0ee9, 0xa08: 0x0ef9, 0xa09: 0x0f09, 0xa0a: 0x0ff9, 0xa0b: 0x1011, + 0xa0c: 0x1029, 0xa0d: 0x0f31, 0xa0e: 0x0008, 0xa0f: 0x0f51, 0xa10: 0x0f61, 0xa11: 0x1041, + 0xa12: 0x00d9, 0xa13: 0x1059, 0xa14: 0x05b5, 0xa15: 0x05b5, 0xa16: 0x0f99, 0xa17: 0x0fa9, + 0xa18: 0x0fb9, 0xa19: 0x059d, 0xa1a: 0x1071, 0xa1b: 0x1089, 0xa1c: 0x05cd, 0xa1d: 0x1099, + 0xa1e: 0x10b1, 0xa1f: 0x10c9, 0xa20: 0x10e1, 0xa21: 0x10f9, 0xa22: 0x0f41, 0xa23: 0x0269, + 0xa24: 0x0fb9, 0xa25: 0x1089, 0xa26: 0x1099, 0xa27: 0x10b1, 0xa28: 0x1111, 0xa29: 0x10e1, + 0xa2a: 0x10f9, 0xa2b: 0x0008, 0xa2c: 0x0008, 0xa2d: 0x0008, 0xa2e: 0x0008, 0xa2f: 0x0008, + 0xa30: 0x0008, 0xa31: 0x0008, 0xa32: 0x0008, 0xa33: 0x0008, 0xa34: 0x0008, 0xa35: 0x0008, + 0xa36: 0x0008, 0xa37: 0x0008, 0xa38: 0x1129, 0xa39: 0x0008, 0xa3a: 0x0008, 0xa3b: 0x0008, + 0xa3c: 0x0008, 0xa3d: 0x0008, 0xa3e: 0x0008, 0xa3f: 0x0008, + // Block 0x29, offset 0xa40 + 0xa40: 0x0008, 0xa41: 0x0008, 0xa42: 0x0008, 0xa43: 0x0008, 0xa44: 0x0008, 0xa45: 0x0008, + 0xa46: 0x0008, 0xa47: 0x0008, 0xa48: 0x0008, 0xa49: 0x0008, 0xa4a: 0x0008, 0xa4b: 0x0008, + 0xa4c: 0x0008, 0xa4d: 0x0008, 0xa4e: 0x0008, 0xa4f: 0x0008, 0xa50: 0x0008, 0xa51: 0x0008, + 0xa52: 0x0008, 0xa53: 0x0008, 0xa54: 0x0008, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008, + 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0008, 0xa5b: 0x1141, 0xa5c: 0x1159, 0xa5d: 0x1169, + 0xa5e: 0x1181, 0xa5f: 0x1029, 0xa60: 0x1199, 0xa61: 0x11a9, 0xa62: 0x11c1, 0xa63: 0x11d9, + 0xa64: 0x11f1, 0xa65: 0x1209, 0xa66: 0x1221, 0xa67: 0x05e5, 0xa68: 0x1239, 0xa69: 0x1251, + 0xa6a: 0xe17d, 0xa6b: 0x1269, 0xa6c: 0x1281, 0xa6d: 0x1299, 0xa6e: 0x12b1, 0xa6f: 0x12c9, + 0xa70: 0x12e1, 0xa71: 0x12f9, 0xa72: 0x1311, 0xa73: 0x1329, 0xa74: 0x1341, 0xa75: 0x1359, + 0xa76: 0x1371, 0xa77: 0x1389, 0xa78: 0x05fd, 0xa79: 0x13a1, 0xa7a: 0x13b9, 0xa7b: 0x13d1, + 0xa7c: 0x13e1, 0xa7d: 0x13f9, 0xa7e: 0x1411, 0xa7f: 0x1429, + // Block 0x2a, offset 0xa80 + 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008, + 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008, + 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008, + 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0xe00d, 0xa97: 0x0008, + 0xa98: 0xe00d, 0xa99: 0x0008, 0xa9a: 0xe00d, 0xa9b: 0x0008, 0xa9c: 0xe00d, 0xa9d: 0x0008, + 0xa9e: 0xe00d, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008, + 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008, + 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008, + 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008, + 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008, + 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008, + // Block 0x2b, offset 0xac0 + 0xac0: 0xe00d, 0xac1: 0x0008, 0xac2: 0xe00d, 0xac3: 0x0008, 0xac4: 0xe00d, 0xac5: 0x0008, + 0xac6: 0xe00d, 0xac7: 0x0008, 0xac8: 0xe00d, 0xac9: 0x0008, 0xaca: 0xe00d, 0xacb: 0x0008, + 0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008, + 0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008, + 0xad8: 0x0008, 0xad9: 0x0008, 0xada: 0x0615, 0xadb: 0x0635, 0xadc: 0x0008, 0xadd: 0x0008, + 0xade: 0x1441, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008, + 0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008, + 0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008, + 0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008, + 0xaf6: 0xe00d, 0xaf7: 0x0008, 0xaf8: 0xe00d, 0xaf9: 0x0008, 0xafa: 0xe00d, 0xafb: 0x0008, + 0xafc: 0xe00d, 0xafd: 0x0008, 0xafe: 0xe00d, 0xaff: 0x0008, + // Block 0x2c, offset 0xb00 + 0xb00: 0x0008, 0xb01: 0x0008, 0xb02: 0x0008, 0xb03: 0x0008, 0xb04: 0x0008, 0xb05: 0x0008, + 0xb06: 0x0040, 0xb07: 0x0040, 0xb08: 0xe045, 0xb09: 0xe045, 0xb0a: 0xe045, 0xb0b: 0xe045, + 0xb0c: 0xe045, 0xb0d: 0xe045, 0xb0e: 0x0040, 0xb0f: 0x0040, 0xb10: 0x0008, 0xb11: 0x0008, + 0xb12: 0x0008, 0xb13: 0x0008, 0xb14: 0x0008, 0xb15: 0x0008, 0xb16: 0x0008, 0xb17: 0x0008, + 0xb18: 0x0040, 0xb19: 0xe045, 0xb1a: 0x0040, 0xb1b: 0xe045, 0xb1c: 0x0040, 0xb1d: 0xe045, + 0xb1e: 0x0040, 0xb1f: 0xe045, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x0008, + 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045, + 0xb2a: 0xe045, 0xb2b: 0xe045, 0xb2c: 0xe045, 0xb2d: 0xe045, 0xb2e: 0xe045, 0xb2f: 0xe045, + 0xb30: 0x0008, 0xb31: 0x1459, 0xb32: 0x0008, 0xb33: 0x1471, 0xb34: 0x0008, 0xb35: 0x1489, + 0xb36: 0x0008, 0xb37: 0x14a1, 0xb38: 0x0008, 0xb39: 0x14b9, 0xb3a: 0x0008, 0xb3b: 0x14d1, + 0xb3c: 0x0008, 0xb3d: 0x14e9, 0xb3e: 0x0040, 0xb3f: 0x0040, + // Block 0x2d, offset 0xb40 + 0xb40: 0x1501, 0xb41: 0x1531, 0xb42: 0x1561, 0xb43: 0x1591, 0xb44: 0x15c1, 0xb45: 0x15f1, + 0xb46: 0x1621, 0xb47: 0x1651, 0xb48: 0x1501, 0xb49: 0x1531, 0xb4a: 0x1561, 0xb4b: 0x1591, + 0xb4c: 0x15c1, 0xb4d: 0x15f1, 0xb4e: 0x1621, 0xb4f: 0x1651, 0xb50: 0x1681, 0xb51: 0x16b1, + 0xb52: 0x16e1, 0xb53: 0x1711, 0xb54: 0x1741, 0xb55: 0x1771, 0xb56: 0x17a1, 0xb57: 0x17d1, + 0xb58: 0x1681, 0xb59: 0x16b1, 0xb5a: 0x16e1, 0xb5b: 0x1711, 0xb5c: 0x1741, 0xb5d: 0x1771, + 0xb5e: 0x17a1, 0xb5f: 0x17d1, 0xb60: 0x1801, 0xb61: 0x1831, 0xb62: 0x1861, 0xb63: 0x1891, + 0xb64: 0x18c1, 0xb65: 0x18f1, 0xb66: 0x1921, 0xb67: 0x1951, 0xb68: 0x1801, 0xb69: 0x1831, + 0xb6a: 0x1861, 0xb6b: 0x1891, 0xb6c: 0x18c1, 0xb6d: 0x18f1, 0xb6e: 0x1921, 0xb6f: 0x1951, + 0xb70: 0x0008, 0xb71: 0x0008, 0xb72: 0x1981, 0xb73: 0x19b1, 0xb74: 0x19d9, 0xb75: 0x0040, + 0xb76: 0x0008, 0xb77: 0x1a01, 0xb78: 0xe045, 0xb79: 0xe045, 0xb7a: 0x064d, 0xb7b: 0x1459, + 0xb7c: 0x19b1, 0xb7d: 0x0666, 0xb7e: 0x1a31, 0xb7f: 0x0686, + // Block 0x2e, offset 0xb80 + 0xb80: 0x06a6, 0xb81: 0x1a4a, 0xb82: 0x1a79, 0xb83: 0x1aa9, 0xb84: 0x1ad1, 0xb85: 0x0040, + 0xb86: 0x0008, 0xb87: 0x1af9, 0xb88: 0x06c5, 0xb89: 0x1471, 0xb8a: 0x06dd, 0xb8b: 0x1489, + 0xb8c: 0x1aa9, 0xb8d: 0x1b2a, 0xb8e: 0x1b5a, 0xb8f: 0x1b8a, 0xb90: 0x0008, 0xb91: 0x0008, + 0xb92: 0x0008, 0xb93: 0x1bb9, 0xb94: 0x0040, 0xb95: 0x0040, 0xb96: 0x0008, 0xb97: 0x0008, + 0xb98: 0xe045, 0xb99: 0xe045, 0xb9a: 0x06f5, 0xb9b: 0x14a1, 0xb9c: 0x0040, 0xb9d: 0x1bd2, + 0xb9e: 0x1c02, 0xb9f: 0x1c32, 0xba0: 0x0008, 0xba1: 0x0008, 0xba2: 0x0008, 0xba3: 0x1c61, + 0xba4: 0x0008, 0xba5: 0x0008, 0xba6: 0x0008, 0xba7: 0x0008, 0xba8: 0xe045, 0xba9: 0xe045, + 0xbaa: 0x070d, 0xbab: 0x14d1, 0xbac: 0xe04d, 0xbad: 0x1c7a, 0xbae: 0x03d2, 0xbaf: 0x1caa, + 0xbb0: 0x0040, 0xbb1: 0x0040, 0xbb2: 0x1cb9, 0xbb3: 0x1ce9, 0xbb4: 0x1d11, 0xbb5: 0x0040, + 0xbb6: 0x0008, 0xbb7: 0x1d39, 0xbb8: 0x0725, 0xbb9: 0x14b9, 0xbba: 0x0515, 0xbbb: 0x14e9, + 0xbbc: 0x1ce9, 0xbbd: 0x073e, 0xbbe: 0x075e, 0xbbf: 0x0040, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x000a, 0xbc1: 0x000a, 0xbc2: 0x000a, 0xbc3: 0x000a, 0xbc4: 0x000a, 0xbc5: 0x000a, + 0xbc6: 0x000a, 0xbc7: 0x000a, 0xbc8: 0x000a, 0xbc9: 0x000a, 0xbca: 0x000a, 0xbcb: 0x03c0, + 0xbcc: 0x0003, 0xbcd: 0x0003, 0xbce: 0x0340, 0xbcf: 0x0b40, 0xbd0: 0x0018, 0xbd1: 0xe00d, + 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x077e, + 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018, + 0xbde: 0x0018, 0xbdf: 0x0018, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018, + 0xbe4: 0x0040, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0018, 0xbe8: 0x0040, 0xbe9: 0x0040, + 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x000a, + 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x1d69, 0xbf4: 0x1da1, 0xbf5: 0x0018, + 0xbf6: 0x1df1, 0xbf7: 0x1e29, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018, + 0xbfc: 0x1e7a, 0xbfd: 0x0018, 0xbfe: 0x079e, 0xbff: 0x0018, + // Block 0x30, offset 0xc00 + 0xc00: 0x0018, 0xc01: 0x0018, 0xc02: 0x0018, 0xc03: 0x0018, 0xc04: 0x0018, 0xc05: 0x0018, + 0xc06: 0x0018, 0xc07: 0x1e92, 0xc08: 0x1eaa, 0xc09: 0x1ec2, 0xc0a: 0x0018, 0xc0b: 0x0018, + 0xc0c: 0x0018, 0xc0d: 0x0018, 0xc0e: 0x0018, 0xc0f: 0x0018, 0xc10: 0x0018, 0xc11: 0x0018, + 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x1ed9, + 0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018, + 0xc1e: 0x0018, 0xc1f: 0x000a, 0xc20: 0x03c0, 0xc21: 0x0340, 0xc22: 0x0340, 0xc23: 0x0340, + 0xc24: 0x03c0, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0040, 0xc28: 0x0040, 0xc29: 0x0040, + 0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x0340, + 0xc30: 0x1f41, 0xc31: 0x0f41, 0xc32: 0x0040, 0xc33: 0x0040, 0xc34: 0x1f51, 0xc35: 0x1f61, + 0xc36: 0x1f71, 0xc37: 0x1f81, 0xc38: 0x1f91, 0xc39: 0x1fa1, 0xc3a: 0x1fb2, 0xc3b: 0x07bd, + 0xc3c: 0x1fc2, 0xc3d: 0x1fd2, 0xc3e: 0x1fe2, 0xc3f: 0x0f71, + // Block 0x31, offset 0xc40 + 0xc40: 0x1f41, 0xc41: 0x00c9, 0xc42: 0x0069, 0xc43: 0x0079, 0xc44: 0x1f51, 0xc45: 0x1f61, + 0xc46: 0x1f71, 0xc47: 0x1f81, 0xc48: 0x1f91, 0xc49: 0x1fa1, 0xc4a: 0x1fb2, 0xc4b: 0x07d5, + 0xc4c: 0x1fc2, 0xc4d: 0x1fd2, 0xc4e: 0x1fe2, 0xc4f: 0x0040, 0xc50: 0x0039, 0xc51: 0x0f09, + 0xc52: 0x00d9, 0xc53: 0x0369, 0xc54: 0x0ff9, 0xc55: 0x0249, 0xc56: 0x0f51, 0xc57: 0x0359, + 0xc58: 0x0f61, 0xc59: 0x0f71, 0xc5a: 0x0f99, 0xc5b: 0x01d9, 0xc5c: 0x0fa9, 0xc5d: 0x0040, + 0xc5e: 0x0040, 0xc5f: 0x0040, 0xc60: 0x0018, 0xc61: 0x0018, 0xc62: 0x0018, 0xc63: 0x0018, + 0xc64: 0x0018, 0xc65: 0x0018, 0xc66: 0x0018, 0xc67: 0x0018, 0xc68: 0x1ff1, 0xc69: 0x0018, + 0xc6a: 0x0018, 0xc6b: 0x0018, 0xc6c: 0x0018, 0xc6d: 0x0018, 0xc6e: 0x0018, 0xc6f: 0x0018, + 0xc70: 0x0018, 0xc71: 0x0018, 0xc72: 0x0018, 0xc73: 0x0018, 0xc74: 0x0018, 0xc75: 0x0018, + 0xc76: 0x0018, 0xc77: 0x0018, 0xc78: 0x0018, 0xc79: 0x0018, 0xc7a: 0x0018, 0xc7b: 0x0018, + 0xc7c: 0x0018, 0xc7d: 0x0018, 0xc7e: 0x0018, 0xc7f: 0x0018, + // Block 0x32, offset 0xc80 + 0xc80: 0x07ee, 0xc81: 0x080e, 0xc82: 0x1159, 0xc83: 0x082d, 0xc84: 0x0018, 0xc85: 0x084e, + 0xc86: 0x086e, 0xc87: 0x1011, 0xc88: 0x0018, 0xc89: 0x088d, 0xc8a: 0x0f31, 0xc8b: 0x0249, + 0xc8c: 0x0249, 0xc8d: 0x0249, 0xc8e: 0x0249, 0xc8f: 0x2009, 0xc90: 0x0f41, 0xc91: 0x0f41, + 0xc92: 0x0359, 0xc93: 0x0359, 0xc94: 0x0018, 0xc95: 0x0f71, 0xc96: 0x2021, 0xc97: 0x0018, + 0xc98: 0x0018, 0xc99: 0x0f99, 0xc9a: 0x2039, 0xc9b: 0x0269, 0xc9c: 0x0269, 0xc9d: 0x0269, + 0xc9e: 0x0018, 0xc9f: 0x0018, 0xca0: 0x2049, 0xca1: 0x08ad, 0xca2: 0x2061, 0xca3: 0x0018, + 0xca4: 0x13d1, 0xca5: 0x0018, 0xca6: 0x2079, 0xca7: 0x0018, 0xca8: 0x13d1, 0xca9: 0x0018, + 0xcaa: 0x0f51, 0xcab: 0x2091, 0xcac: 0x0ee9, 0xcad: 0x1159, 0xcae: 0x0018, 0xcaf: 0x0f09, + 0xcb0: 0x0f09, 0xcb1: 0x1199, 0xcb2: 0x0040, 0xcb3: 0x0f61, 0xcb4: 0x00d9, 0xcb5: 0x20a9, + 0xcb6: 0x20c1, 0xcb7: 0x20d9, 0xcb8: 0x20f1, 0xcb9: 0x0f41, 0xcba: 0x0018, 0xcbb: 0x08cd, + 0xcbc: 0x2109, 0xcbd: 0x10b1, 0xcbe: 0x10b1, 0xcbf: 0x2109, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x08ed, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0ef9, + 0xcc6: 0x0ef9, 0xcc7: 0x0f09, 0xcc8: 0x0f41, 0xcc9: 0x0259, 0xcca: 0x0018, 0xccb: 0x0018, + 0xccc: 0x0018, 0xccd: 0x0018, 0xcce: 0x0008, 0xccf: 0x0018, 0xcd0: 0x2121, 0xcd1: 0x2151, + 0xcd2: 0x2181, 0xcd3: 0x21b9, 0xcd4: 0x21e9, 0xcd5: 0x2219, 0xcd6: 0x2249, 0xcd7: 0x2279, + 0xcd8: 0x22a9, 0xcd9: 0x22d9, 0xcda: 0x2309, 0xcdb: 0x2339, 0xcdc: 0x2369, 0xcdd: 0x2399, + 0xcde: 0x23c9, 0xcdf: 0x23f9, 0xce0: 0x0f41, 0xce1: 0x2421, 0xce2: 0x0905, 0xce3: 0x2439, + 0xce4: 0x1089, 0xce5: 0x2451, 0xce6: 0x0925, 0xce7: 0x2469, 0xce8: 0x2491, 0xce9: 0x0369, + 0xcea: 0x24a9, 0xceb: 0x0945, 0xcec: 0x0359, 0xced: 0x1159, 0xcee: 0x0ef9, 0xcef: 0x0f61, + 0xcf0: 0x0f41, 0xcf1: 0x2421, 0xcf2: 0x0965, 0xcf3: 0x2439, 0xcf4: 0x1089, 0xcf5: 0x2451, + 0xcf6: 0x0985, 0xcf7: 0x2469, 0xcf8: 0x2491, 0xcf9: 0x0369, 0xcfa: 0x24a9, 0xcfb: 0x09a5, + 0xcfc: 0x0359, 0xcfd: 0x1159, 0xcfe: 0x0ef9, 0xcff: 0x0f61, + // Block 0x34, offset 0xd00 + 0xd00: 0x0018, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x0018, + 0xd06: 0x0018, 0xd07: 0x0018, 0xd08: 0x0018, 0xd09: 0x0018, 0xd0a: 0x0018, 0xd0b: 0x0040, + 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040, + 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040, + 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0040, 0xd1d: 0x0040, + 0xd1e: 0x0040, 0xd1f: 0x0040, 0xd20: 0x00c9, 0xd21: 0x0069, 0xd22: 0x0079, 0xd23: 0x1f51, + 0xd24: 0x1f61, 0xd25: 0x1f71, 0xd26: 0x1f81, 0xd27: 0x1f91, 0xd28: 0x1fa1, 0xd29: 0x2601, + 0xd2a: 0x2619, 0xd2b: 0x2631, 0xd2c: 0x2649, 0xd2d: 0x2661, 0xd2e: 0x2679, 0xd2f: 0x2691, + 0xd30: 0x26a9, 0xd31: 0x26c1, 0xd32: 0x26d9, 0xd33: 0x26f1, 0xd34: 0x0a06, 0xd35: 0x0a26, + 0xd36: 0x0a46, 0xd37: 0x0a66, 0xd38: 0x0a86, 0xd39: 0x0aa6, 0xd3a: 0x0ac6, 0xd3b: 0x0ae6, + 0xd3c: 0x0b06, 0xd3d: 0x270a, 0xd3e: 0x2732, 0xd3f: 0x275a, + // Block 0x35, offset 0xd40 + 0xd40: 0x2782, 0xd41: 0x27aa, 0xd42: 0x27d2, 0xd43: 0x27fa, 0xd44: 0x2822, 0xd45: 0x284a, + 0xd46: 0x2872, 0xd47: 0x289a, 0xd48: 0x0040, 0xd49: 0x0040, 0xd4a: 0x0040, 0xd4b: 0x0040, + 0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040, + 0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040, + 0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0b26, 0xd5d: 0x0b46, + 0xd5e: 0x0b66, 0xd5f: 0x0b86, 0xd60: 0x0ba6, 0xd61: 0x0bc6, 0xd62: 0x0be6, 0xd63: 0x0c06, + 0xd64: 0x0c26, 0xd65: 0x0c46, 0xd66: 0x0c66, 0xd67: 0x0c86, 0xd68: 0x0ca6, 0xd69: 0x0cc6, + 0xd6a: 0x0ce6, 0xd6b: 0x0d06, 0xd6c: 0x0d26, 0xd6d: 0x0d46, 0xd6e: 0x0d66, 0xd6f: 0x0d86, + 0xd70: 0x0da6, 0xd71: 0x0dc6, 0xd72: 0x0de6, 0xd73: 0x0e06, 0xd74: 0x0e26, 0xd75: 0x0e46, + 0xd76: 0x0039, 0xd77: 0x0ee9, 0xd78: 0x1159, 0xd79: 0x0ef9, 0xd7a: 0x0f09, 0xd7b: 0x1199, + 0xd7c: 0x0f31, 0xd7d: 0x0249, 0xd7e: 0x0f41, 0xd7f: 0x0259, + // Block 0x36, offset 0xd80 + 0xd80: 0x0f51, 0xd81: 0x0359, 0xd82: 0x0f61, 0xd83: 0x0f71, 0xd84: 0x00d9, 0xd85: 0x0f99, + 0xd86: 0x2039, 0xd87: 0x0269, 0xd88: 0x01d9, 0xd89: 0x0fa9, 0xd8a: 0x0fb9, 0xd8b: 0x1089, + 0xd8c: 0x0279, 0xd8d: 0x0369, 0xd8e: 0x0289, 0xd8f: 0x13d1, 0xd90: 0x0039, 0xd91: 0x0ee9, + 0xd92: 0x1159, 0xd93: 0x0ef9, 0xd94: 0x0f09, 0xd95: 0x1199, 0xd96: 0x0f31, 0xd97: 0x0249, + 0xd98: 0x0f41, 0xd99: 0x0259, 0xd9a: 0x0f51, 0xd9b: 0x0359, 0xd9c: 0x0f61, 0xd9d: 0x0f71, + 0xd9e: 0x00d9, 0xd9f: 0x0f99, 0xda0: 0x2039, 0xda1: 0x0269, 0xda2: 0x01d9, 0xda3: 0x0fa9, + 0xda4: 0x0fb9, 0xda5: 0x1089, 0xda6: 0x0279, 0xda7: 0x0369, 0xda8: 0x0289, 0xda9: 0x13d1, + 0xdaa: 0x1f41, 0xdab: 0x0018, 0xdac: 0x0018, 0xdad: 0x0018, 0xdae: 0x0018, 0xdaf: 0x0018, + 0xdb0: 0x0018, 0xdb1: 0x0018, 0xdb2: 0x0018, 0xdb3: 0x0018, 0xdb4: 0x0018, 0xdb5: 0x0018, + 0xdb6: 0x0018, 0xdb7: 0x0018, 0xdb8: 0x0018, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018, + 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x0008, 0xdc1: 0x0008, 0xdc2: 0x0008, 0xdc3: 0x0008, 0xdc4: 0x0008, 0xdc5: 0x0008, + 0xdc6: 0x0008, 0xdc7: 0x0008, 0xdc8: 0x0008, 0xdc9: 0x0008, 0xdca: 0x0008, 0xdcb: 0x0008, + 0xdcc: 0x0008, 0xdcd: 0x0008, 0xdce: 0x0008, 0xdcf: 0x0008, 0xdd0: 0x0008, 0xdd1: 0x0008, + 0xdd2: 0x0008, 0xdd3: 0x0008, 0xdd4: 0x0008, 0xdd5: 0x0008, 0xdd6: 0x0008, 0xdd7: 0x0008, + 0xdd8: 0x0008, 0xdd9: 0x0008, 0xdda: 0x0008, 0xddb: 0x0008, 0xddc: 0x0008, 0xddd: 0x0008, + 0xdde: 0x0008, 0xddf: 0x0040, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0x2971, 0xde3: 0x0ebd, + 0xde4: 0x2989, 0xde5: 0x0008, 0xde6: 0x0008, 0xde7: 0xe07d, 0xde8: 0x0008, 0xde9: 0xe01d, + 0xdea: 0x0008, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0x0fe1, 0xdee: 0x1281, 0xdef: 0x0fc9, + 0xdf0: 0x1141, 0xdf1: 0x0008, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0008, 0xdf5: 0xe01d, + 0xdf6: 0x0008, 0xdf7: 0x0008, 0xdf8: 0x0008, 0xdf9: 0x0008, 0xdfa: 0x0008, 0xdfb: 0x0008, + 0xdfc: 0x0259, 0xdfd: 0x1089, 0xdfe: 0x29a1, 0xdff: 0x29b9, + // Block 0x38, offset 0xe00 + 0xe00: 0xe00d, 0xe01: 0x0008, 0xe02: 0xe00d, 0xe03: 0x0008, 0xe04: 0xe00d, 0xe05: 0x0008, + 0xe06: 0xe00d, 0xe07: 0x0008, 0xe08: 0xe00d, 0xe09: 0x0008, 0xe0a: 0xe00d, 0xe0b: 0x0008, + 0xe0c: 0xe00d, 0xe0d: 0x0008, 0xe0e: 0xe00d, 0xe0f: 0x0008, 0xe10: 0xe00d, 0xe11: 0x0008, + 0xe12: 0xe00d, 0xe13: 0x0008, 0xe14: 0xe00d, 0xe15: 0x0008, 0xe16: 0xe00d, 0xe17: 0x0008, + 0xe18: 0xe00d, 0xe19: 0x0008, 0xe1a: 0xe00d, 0xe1b: 0x0008, 0xe1c: 0xe00d, 0xe1d: 0x0008, + 0xe1e: 0xe00d, 0xe1f: 0x0008, 0xe20: 0xe00d, 0xe21: 0x0008, 0xe22: 0xe00d, 0xe23: 0x0008, + 0xe24: 0x0008, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018, + 0xe2a: 0x0018, 0xe2b: 0xe03d, 0xe2c: 0x0008, 0xe2d: 0xe01d, 0xe2e: 0x0008, 0xe2f: 0x3308, + 0xe30: 0x3308, 0xe31: 0x3308, 0xe32: 0xe00d, 0xe33: 0x0008, 0xe34: 0x0040, 0xe35: 0x0040, + 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0018, 0xe3a: 0x0018, 0xe3b: 0x0018, + 0xe3c: 0x0018, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018, + // Block 0x39, offset 0xe40 + 0xe40: 0x26fd, 0xe41: 0x271d, 0xe42: 0x273d, 0xe43: 0x275d, 0xe44: 0x277d, 0xe45: 0x279d, + 0xe46: 0x27bd, 0xe47: 0x27dd, 0xe48: 0x27fd, 0xe49: 0x281d, 0xe4a: 0x283d, 0xe4b: 0x285d, + 0xe4c: 0x287d, 0xe4d: 0x289d, 0xe4e: 0x28bd, 0xe4f: 0x28dd, 0xe50: 0x28fd, 0xe51: 0x291d, + 0xe52: 0x293d, 0xe53: 0x295d, 0xe54: 0x297d, 0xe55: 0x299d, 0xe56: 0x0040, 0xe57: 0x0040, + 0xe58: 0x0040, 0xe59: 0x0040, 0xe5a: 0x0040, 0xe5b: 0x0040, 0xe5c: 0x0040, 0xe5d: 0x0040, + 0xe5e: 0x0040, 0xe5f: 0x0040, 0xe60: 0x0040, 0xe61: 0x0040, 0xe62: 0x0040, 0xe63: 0x0040, + 0xe64: 0x0040, 0xe65: 0x0040, 0xe66: 0x0040, 0xe67: 0x0040, 0xe68: 0x0040, 0xe69: 0x0040, + 0xe6a: 0x0040, 0xe6b: 0x0040, 0xe6c: 0x0040, 0xe6d: 0x0040, 0xe6e: 0x0040, 0xe6f: 0x0040, + 0xe70: 0x0040, 0xe71: 0x0040, 0xe72: 0x0040, 0xe73: 0x0040, 0xe74: 0x0040, 0xe75: 0x0040, + 0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0040, 0xe7a: 0x0040, 0xe7b: 0x0040, + 0xe7c: 0x0040, 0xe7d: 0x0040, 0xe7e: 0x0040, 0xe7f: 0x0040, + // Block 0x3a, offset 0xe80 + 0xe80: 0x000a, 0xe81: 0x0018, 0xe82: 0x29d1, 0xe83: 0x0018, 0xe84: 0x0018, 0xe85: 0x0008, + 0xe86: 0x0008, 0xe87: 0x0008, 0xe88: 0x0018, 0xe89: 0x0018, 0xe8a: 0x0018, 0xe8b: 0x0018, + 0xe8c: 0x0018, 0xe8d: 0x0018, 0xe8e: 0x0018, 0xe8f: 0x0018, 0xe90: 0x0018, 0xe91: 0x0018, + 0xe92: 0x0018, 0xe93: 0x0018, 0xe94: 0x0018, 0xe95: 0x0018, 0xe96: 0x0018, 0xe97: 0x0018, + 0xe98: 0x0018, 0xe99: 0x0018, 0xe9a: 0x0018, 0xe9b: 0x0018, 0xe9c: 0x0018, 0xe9d: 0x0018, + 0xe9e: 0x0018, 0xe9f: 0x0018, 0xea0: 0x0018, 0xea1: 0x0018, 0xea2: 0x0018, 0xea3: 0x0018, + 0xea4: 0x0018, 0xea5: 0x0018, 0xea6: 0x0018, 0xea7: 0x0018, 0xea8: 0x0018, 0xea9: 0x0018, + 0xeaa: 0x3308, 0xeab: 0x3308, 0xeac: 0x3308, 0xead: 0x3308, 0xeae: 0x3018, 0xeaf: 0x3018, + 0xeb0: 0x0018, 0xeb1: 0x0018, 0xeb2: 0x0018, 0xeb3: 0x0018, 0xeb4: 0x0018, 0xeb5: 0x0018, + 0xeb6: 0xe125, 0xeb7: 0x0018, 0xeb8: 0x29bd, 0xeb9: 0x29dd, 0xeba: 0x29fd, 0xebb: 0x0018, + 0xebc: 0x0008, 0xebd: 0x0018, 0xebe: 0x0018, 0xebf: 0x0018, + // Block 0x3b, offset 0xec0 + 0xec0: 0x2b3d, 0xec1: 0x2b5d, 0xec2: 0x2b7d, 0xec3: 0x2b9d, 0xec4: 0x2bbd, 0xec5: 0x2bdd, + 0xec6: 0x2bdd, 0xec7: 0x2bdd, 0xec8: 0x2bfd, 0xec9: 0x2bfd, 0xeca: 0x2bfd, 0xecb: 0x2bfd, + 0xecc: 0x2c1d, 0xecd: 0x2c1d, 0xece: 0x2c1d, 0xecf: 0x2c3d, 0xed0: 0x2c5d, 0xed1: 0x2c5d, + 0xed2: 0x2a7d, 0xed3: 0x2a7d, 0xed4: 0x2c5d, 0xed5: 0x2c5d, 0xed6: 0x2c7d, 0xed7: 0x2c7d, + 0xed8: 0x2c5d, 0xed9: 0x2c5d, 0xeda: 0x2a7d, 0xedb: 0x2a7d, 0xedc: 0x2c5d, 0xedd: 0x2c5d, + 0xede: 0x2c3d, 0xedf: 0x2c3d, 0xee0: 0x2c9d, 0xee1: 0x2c9d, 0xee2: 0x2cbd, 0xee3: 0x2cbd, + 0xee4: 0x0040, 0xee5: 0x2cdd, 0xee6: 0x2cfd, 0xee7: 0x2d1d, 0xee8: 0x2d1d, 0xee9: 0x2d3d, + 0xeea: 0x2d5d, 0xeeb: 0x2d7d, 0xeec: 0x2d9d, 0xeed: 0x2dbd, 0xeee: 0x2ddd, 0xeef: 0x2dfd, + 0xef0: 0x2e1d, 0xef1: 0x2e3d, 0xef2: 0x2e3d, 0xef3: 0x2e5d, 0xef4: 0x2e7d, 0xef5: 0x2e7d, + 0xef6: 0x2e9d, 0xef7: 0x2ebd, 0xef8: 0x2e5d, 0xef9: 0x2edd, 0xefa: 0x2efd, 0xefb: 0x2edd, + 0xefc: 0x2e5d, 0xefd: 0x2f1d, 0xefe: 0x2f3d, 0xeff: 0x2f5d, + // Block 0x3c, offset 0xf00 + 0xf00: 0x2f7d, 0xf01: 0x2f9d, 0xf02: 0x2cfd, 0xf03: 0x2cdd, 0xf04: 0x2fbd, 0xf05: 0x2fdd, + 0xf06: 0x2ffd, 0xf07: 0x301d, 0xf08: 0x303d, 0xf09: 0x305d, 0xf0a: 0x307d, 0xf0b: 0x309d, + 0xf0c: 0x30bd, 0xf0d: 0x30dd, 0xf0e: 0x30fd, 0xf0f: 0x0040, 0xf10: 0x0018, 0xf11: 0x0018, + 0xf12: 0x311d, 0xf13: 0x313d, 0xf14: 0x315d, 0xf15: 0x317d, 0xf16: 0x319d, 0xf17: 0x31bd, + 0xf18: 0x31dd, 0xf19: 0x31fd, 0xf1a: 0x321d, 0xf1b: 0x323d, 0xf1c: 0x315d, 0xf1d: 0x325d, + 0xf1e: 0x327d, 0xf1f: 0x329d, 0xf20: 0x0008, 0xf21: 0x0008, 0xf22: 0x0008, 0xf23: 0x0008, + 0xf24: 0x0008, 0xf25: 0x0008, 0xf26: 0x0008, 0xf27: 0x0008, 0xf28: 0x0008, 0xf29: 0x0008, + 0xf2a: 0x0008, 0xf2b: 0x0008, 0xf2c: 0x0008, 0xf2d: 0x0008, 0xf2e: 0x0008, 0xf2f: 0x0008, + 0xf30: 0x0008, 0xf31: 0x0008, 0xf32: 0x0008, 0xf33: 0x0008, 0xf34: 0x0008, 0xf35: 0x0008, + 0xf36: 0x0008, 0xf37: 0x0008, 0xf38: 0x0008, 0xf39: 0x0008, 0xf3a: 0x0008, 0xf3b: 0x0040, + 0xf3c: 0x0040, 0xf3d: 0x0040, 0xf3e: 0x0040, 0xf3f: 0x0040, + // Block 0x3d, offset 0xf40 + 0xf40: 0x36a2, 0xf41: 0x36d2, 0xf42: 0x3702, 0xf43: 0x3732, 0xf44: 0x32bd, 0xf45: 0x32dd, + 0xf46: 0x32fd, 0xf47: 0x331d, 0xf48: 0x0018, 0xf49: 0x0018, 0xf4a: 0x0018, 0xf4b: 0x0018, + 0xf4c: 0x0018, 0xf4d: 0x0018, 0xf4e: 0x0018, 0xf4f: 0x0018, 0xf50: 0x333d, 0xf51: 0x3761, + 0xf52: 0x3779, 0xf53: 0x3791, 0xf54: 0x37a9, 0xf55: 0x37c1, 0xf56: 0x37d9, 0xf57: 0x37f1, + 0xf58: 0x3809, 0xf59: 0x3821, 0xf5a: 0x3839, 0xf5b: 0x3851, 0xf5c: 0x3869, 0xf5d: 0x3881, + 0xf5e: 0x3899, 0xf5f: 0x38b1, 0xf60: 0x335d, 0xf61: 0x337d, 0xf62: 0x339d, 0xf63: 0x33bd, + 0xf64: 0x33dd, 0xf65: 0x33dd, 0xf66: 0x33fd, 0xf67: 0x341d, 0xf68: 0x343d, 0xf69: 0x345d, + 0xf6a: 0x347d, 0xf6b: 0x349d, 0xf6c: 0x34bd, 0xf6d: 0x34dd, 0xf6e: 0x34fd, 0xf6f: 0x351d, + 0xf70: 0x353d, 0xf71: 0x355d, 0xf72: 0x357d, 0xf73: 0x359d, 0xf74: 0x35bd, 0xf75: 0x35dd, + 0xf76: 0x35fd, 0xf77: 0x361d, 0xf78: 0x363d, 0xf79: 0x365d, 0xf7a: 0x367d, 0xf7b: 0x369d, + 0xf7c: 0x38c9, 0xf7d: 0x3901, 0xf7e: 0x36bd, 0xf7f: 0x0018, + // Block 0x3e, offset 0xf80 + 0xf80: 0x36dd, 0xf81: 0x36fd, 0xf82: 0x371d, 0xf83: 0x373d, 0xf84: 0x375d, 0xf85: 0x377d, + 0xf86: 0x379d, 0xf87: 0x37bd, 0xf88: 0x37dd, 0xf89: 0x37fd, 0xf8a: 0x381d, 0xf8b: 0x383d, + 0xf8c: 0x385d, 0xf8d: 0x387d, 0xf8e: 0x389d, 0xf8f: 0x38bd, 0xf90: 0x38dd, 0xf91: 0x38fd, + 0xf92: 0x391d, 0xf93: 0x393d, 0xf94: 0x395d, 0xf95: 0x397d, 0xf96: 0x399d, 0xf97: 0x39bd, + 0xf98: 0x39dd, 0xf99: 0x39fd, 0xf9a: 0x3a1d, 0xf9b: 0x3a3d, 0xf9c: 0x3a5d, 0xf9d: 0x3a7d, + 0xf9e: 0x3a9d, 0xf9f: 0x3abd, 0xfa0: 0x3add, 0xfa1: 0x3afd, 0xfa2: 0x3b1d, 0xfa3: 0x3b3d, + 0xfa4: 0x3b5d, 0xfa5: 0x3b7d, 0xfa6: 0x127d, 0xfa7: 0x3b9d, 0xfa8: 0x3bbd, 0xfa9: 0x3bdd, + 0xfaa: 0x3bfd, 0xfab: 0x3c1d, 0xfac: 0x3c3d, 0xfad: 0x3c5d, 0xfae: 0x239d, 0xfaf: 0x3c7d, + 0xfb0: 0x3c9d, 0xfb1: 0x3939, 0xfb2: 0x3951, 0xfb3: 0x3969, 0xfb4: 0x3981, 0xfb5: 0x3999, + 0xfb6: 0x39b1, 0xfb7: 0x39c9, 0xfb8: 0x39e1, 0xfb9: 0x39f9, 0xfba: 0x3a11, 0xfbb: 0x3a29, + 0xfbc: 0x3a41, 0xfbd: 0x3a59, 0xfbe: 0x3a71, 0xfbf: 0x3a89, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x3aa1, 0xfc1: 0x3ac9, 0xfc2: 0x3af1, 0xfc3: 0x3b19, 0xfc4: 0x3b41, 0xfc5: 0x3b69, + 0xfc6: 0x3b91, 0xfc7: 0x3bb9, 0xfc8: 0x3be1, 0xfc9: 0x3c09, 0xfca: 0x3c39, 0xfcb: 0x3c69, + 0xfcc: 0x3c99, 0xfcd: 0x3cbd, 0xfce: 0x3cb1, 0xfcf: 0x3cdd, 0xfd0: 0x3cfd, 0xfd1: 0x3d15, + 0xfd2: 0x3d2d, 0xfd3: 0x3d45, 0xfd4: 0x3d5d, 0xfd5: 0x3d5d, 0xfd6: 0x3d45, 0xfd7: 0x3d75, + 0xfd8: 0x07bd, 0xfd9: 0x3d8d, 0xfda: 0x3da5, 0xfdb: 0x3dbd, 0xfdc: 0x3dd5, 0xfdd: 0x3ded, + 0xfde: 0x3e05, 0xfdf: 0x3e1d, 0xfe0: 0x3e35, 0xfe1: 0x3e4d, 0xfe2: 0x3e65, 0xfe3: 0x3e7d, + 0xfe4: 0x3e95, 0xfe5: 0x3e95, 0xfe6: 0x3ead, 0xfe7: 0x3ead, 0xfe8: 0x3ec5, 0xfe9: 0x3ec5, + 0xfea: 0x3edd, 0xfeb: 0x3ef5, 0xfec: 0x3f0d, 0xfed: 0x3f25, 0xfee: 0x3f3d, 0xfef: 0x3f3d, + 0xff0: 0x3f55, 0xff1: 0x3f55, 0xff2: 0x3f55, 0xff3: 0x3f6d, 0xff4: 0x3f85, 0xff5: 0x3f9d, + 0xff6: 0x3fb5, 0xff7: 0x3f9d, 0xff8: 0x3fcd, 0xff9: 0x3fe5, 0xffa: 0x3f6d, 0xffb: 0x3ffd, + 0xffc: 0x4015, 0xffd: 0x4015, 0xffe: 0x4015, 0xfff: 0x0040, + // Block 0x40, offset 0x1000 + 0x1000: 0x3cc9, 0x1001: 0x3d31, 0x1002: 0x3d99, 0x1003: 0x3e01, 0x1004: 0x3e51, 0x1005: 0x3eb9, + 0x1006: 0x3f09, 0x1007: 0x3f59, 0x1008: 0x3fd9, 0x1009: 0x4041, 0x100a: 0x4091, 0x100b: 0x40e1, + 0x100c: 0x4131, 0x100d: 0x4199, 0x100e: 0x4201, 0x100f: 0x4251, 0x1010: 0x42a1, 0x1011: 0x42d9, + 0x1012: 0x4329, 0x1013: 0x4391, 0x1014: 0x43f9, 0x1015: 0x4431, 0x1016: 0x44b1, 0x1017: 0x4549, + 0x1018: 0x45c9, 0x1019: 0x4619, 0x101a: 0x4699, 0x101b: 0x4719, 0x101c: 0x4781, 0x101d: 0x47d1, + 0x101e: 0x4821, 0x101f: 0x4871, 0x1020: 0x48d9, 0x1021: 0x4959, 0x1022: 0x49c1, 0x1023: 0x4a11, + 0x1024: 0x4a61, 0x1025: 0x4ab1, 0x1026: 0x4ae9, 0x1027: 0x4b21, 0x1028: 0x4b59, 0x1029: 0x4b91, + 0x102a: 0x4be1, 0x102b: 0x4c31, 0x102c: 0x4cb1, 0x102d: 0x4d01, 0x102e: 0x4d69, 0x102f: 0x4de9, + 0x1030: 0x4e39, 0x1031: 0x4e71, 0x1032: 0x4ea9, 0x1033: 0x4f29, 0x1034: 0x4f91, 0x1035: 0x5011, + 0x1036: 0x5061, 0x1037: 0x50e1, 0x1038: 0x5119, 0x1039: 0x5169, 0x103a: 0x51b9, 0x103b: 0x5209, + 0x103c: 0x5259, 0x103d: 0x52a9, 0x103e: 0x5311, 0x103f: 0x5361, + // Block 0x41, offset 0x1040 + 0x1040: 0x5399, 0x1041: 0x53e9, 0x1042: 0x5439, 0x1043: 0x5489, 0x1044: 0x54f1, 0x1045: 0x5541, + 0x1046: 0x5591, 0x1047: 0x55e1, 0x1048: 0x5661, 0x1049: 0x56c9, 0x104a: 0x5701, 0x104b: 0x5781, + 0x104c: 0x57b9, 0x104d: 0x5821, 0x104e: 0x5889, 0x104f: 0x58d9, 0x1050: 0x5929, 0x1051: 0x5979, + 0x1052: 0x59e1, 0x1053: 0x5a19, 0x1054: 0x5a69, 0x1055: 0x5ad1, 0x1056: 0x5b09, 0x1057: 0x5b89, + 0x1058: 0x5bd9, 0x1059: 0x5c01, 0x105a: 0x5c29, 0x105b: 0x5c51, 0x105c: 0x5c79, 0x105d: 0x5ca1, + 0x105e: 0x5cc9, 0x105f: 0x5cf1, 0x1060: 0x5d19, 0x1061: 0x5d41, 0x1062: 0x5d69, 0x1063: 0x5d99, + 0x1064: 0x5dc9, 0x1065: 0x5df9, 0x1066: 0x5e29, 0x1067: 0x5e59, 0x1068: 0x5e89, 0x1069: 0x5eb9, + 0x106a: 0x5ee9, 0x106b: 0x5f19, 0x106c: 0x5f49, 0x106d: 0x5f79, 0x106e: 0x5fa9, 0x106f: 0x5fd9, + 0x1070: 0x6009, 0x1071: 0x402d, 0x1072: 0x6039, 0x1073: 0x6051, 0x1074: 0x404d, 0x1075: 0x6069, + 0x1076: 0x6081, 0x1077: 0x6099, 0x1078: 0x406d, 0x1079: 0x406d, 0x107a: 0x60b1, 0x107b: 0x60c9, + 0x107c: 0x6101, 0x107d: 0x6139, 0x107e: 0x6171, 0x107f: 0x61a9, + // Block 0x42, offset 0x1080 + 0x1080: 0x6211, 0x1081: 0x6229, 0x1082: 0x408d, 0x1083: 0x6241, 0x1084: 0x6259, 0x1085: 0x6271, + 0x1086: 0x6289, 0x1087: 0x62a1, 0x1088: 0x40ad, 0x1089: 0x62b9, 0x108a: 0x62e1, 0x108b: 0x62f9, + 0x108c: 0x40cd, 0x108d: 0x40cd, 0x108e: 0x6311, 0x108f: 0x6329, 0x1090: 0x6341, 0x1091: 0x40ed, + 0x1092: 0x410d, 0x1093: 0x412d, 0x1094: 0x414d, 0x1095: 0x416d, 0x1096: 0x6359, 0x1097: 0x6371, + 0x1098: 0x6389, 0x1099: 0x63a1, 0x109a: 0x63b9, 0x109b: 0x418d, 0x109c: 0x63d1, 0x109d: 0x63e9, + 0x109e: 0x6401, 0x109f: 0x41ad, 0x10a0: 0x41cd, 0x10a1: 0x6419, 0x10a2: 0x41ed, 0x10a3: 0x420d, + 0x10a4: 0x422d, 0x10a5: 0x6431, 0x10a6: 0x424d, 0x10a7: 0x6449, 0x10a8: 0x6479, 0x10a9: 0x6211, + 0x10aa: 0x426d, 0x10ab: 0x428d, 0x10ac: 0x42ad, 0x10ad: 0x42cd, 0x10ae: 0x64b1, 0x10af: 0x64f1, + 0x10b0: 0x6539, 0x10b1: 0x6551, 0x10b2: 0x42ed, 0x10b3: 0x6569, 0x10b4: 0x6581, 0x10b5: 0x6599, + 0x10b6: 0x430d, 0x10b7: 0x65b1, 0x10b8: 0x65c9, 0x10b9: 0x65b1, 0x10ba: 0x65e1, 0x10bb: 0x65f9, + 0x10bc: 0x432d, 0x10bd: 0x6611, 0x10be: 0x6629, 0x10bf: 0x6611, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x434d, 0x10c1: 0x436d, 0x10c2: 0x0040, 0x10c3: 0x6641, 0x10c4: 0x6659, 0x10c5: 0x6671, + 0x10c6: 0x6689, 0x10c7: 0x0040, 0x10c8: 0x66c1, 0x10c9: 0x66d9, 0x10ca: 0x66f1, 0x10cb: 0x6709, + 0x10cc: 0x6721, 0x10cd: 0x6739, 0x10ce: 0x6401, 0x10cf: 0x6751, 0x10d0: 0x6769, 0x10d1: 0x6781, + 0x10d2: 0x438d, 0x10d3: 0x6799, 0x10d4: 0x6289, 0x10d5: 0x43ad, 0x10d6: 0x43cd, 0x10d7: 0x67b1, + 0x10d8: 0x0040, 0x10d9: 0x43ed, 0x10da: 0x67c9, 0x10db: 0x67e1, 0x10dc: 0x67f9, 0x10dd: 0x6811, + 0x10de: 0x6829, 0x10df: 0x6859, 0x10e0: 0x6889, 0x10e1: 0x68b1, 0x10e2: 0x68d9, 0x10e3: 0x6901, + 0x10e4: 0x6929, 0x10e5: 0x6951, 0x10e6: 0x6979, 0x10e7: 0x69a1, 0x10e8: 0x69c9, 0x10e9: 0x69f1, + 0x10ea: 0x6a21, 0x10eb: 0x6a51, 0x10ec: 0x6a81, 0x10ed: 0x6ab1, 0x10ee: 0x6ae1, 0x10ef: 0x6b11, + 0x10f0: 0x6b41, 0x10f1: 0x6b71, 0x10f2: 0x6ba1, 0x10f3: 0x6bd1, 0x10f4: 0x6c01, 0x10f5: 0x6c31, + 0x10f6: 0x6c61, 0x10f7: 0x6c91, 0x10f8: 0x6cc1, 0x10f9: 0x6cf1, 0x10fa: 0x6d21, 0x10fb: 0x6d51, + 0x10fc: 0x6d81, 0x10fd: 0x6db1, 0x10fe: 0x6de1, 0x10ff: 0x440d, + // Block 0x44, offset 0x1100 + 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008, + 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008, + 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008, + 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008, + 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0xe00d, 0x111d: 0x0008, + 0x111e: 0xe00d, 0x111f: 0x0008, 0x1120: 0xe00d, 0x1121: 0x0008, 0x1122: 0xe00d, 0x1123: 0x0008, + 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008, + 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x3308, + 0x1130: 0x3318, 0x1131: 0x3318, 0x1132: 0x3318, 0x1133: 0x0018, 0x1134: 0x3308, 0x1135: 0x3308, + 0x1136: 0x3308, 0x1137: 0x3308, 0x1138: 0x3308, 0x1139: 0x3308, 0x113a: 0x3308, 0x113b: 0x3308, + 0x113c: 0x3308, 0x113d: 0x3308, 0x113e: 0x0018, 0x113f: 0x0008, + // Block 0x45, offset 0x1140 + 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008, + 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008, + 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008, + 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008, + 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0x0ea1, 0x115d: 0x6e11, + 0x115e: 0x3308, 0x115f: 0x3308, 0x1160: 0x0008, 0x1161: 0x0008, 0x1162: 0x0008, 0x1163: 0x0008, + 0x1164: 0x0008, 0x1165: 0x0008, 0x1166: 0x0008, 0x1167: 0x0008, 0x1168: 0x0008, 0x1169: 0x0008, + 0x116a: 0x0008, 0x116b: 0x0008, 0x116c: 0x0008, 0x116d: 0x0008, 0x116e: 0x0008, 0x116f: 0x0008, + 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008, + 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0x0008, 0x117a: 0x0008, 0x117b: 0x0008, + 0x117c: 0x0008, 0x117d: 0x0008, 0x117e: 0x0008, 0x117f: 0x0008, + // Block 0x46, offset 0x1180 + 0x1180: 0x0018, 0x1181: 0x0018, 0x1182: 0x0018, 0x1183: 0x0018, 0x1184: 0x0018, 0x1185: 0x0018, + 0x1186: 0x0018, 0x1187: 0x0018, 0x1188: 0x0018, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0x0018, + 0x118c: 0x0018, 0x118d: 0x0018, 0x118e: 0x0018, 0x118f: 0x0018, 0x1190: 0x0018, 0x1191: 0x0018, + 0x1192: 0x0018, 0x1193: 0x0018, 0x1194: 0x0018, 0x1195: 0x0018, 0x1196: 0x0018, 0x1197: 0x0008, + 0x1198: 0x0008, 0x1199: 0x0008, 0x119a: 0x0008, 0x119b: 0x0008, 0x119c: 0x0008, 0x119d: 0x0008, + 0x119e: 0x0008, 0x119f: 0x0008, 0x11a0: 0x0018, 0x11a1: 0x0018, 0x11a2: 0xe00d, 0x11a3: 0x0008, + 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008, + 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008, + 0x11b0: 0x0008, 0x11b1: 0x0008, 0x11b2: 0xe00d, 0x11b3: 0x0008, 0x11b4: 0xe00d, 0x11b5: 0x0008, + 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0xe00d, 0x11b9: 0x0008, 0x11ba: 0xe00d, 0x11bb: 0x0008, + 0x11bc: 0xe00d, 0x11bd: 0x0008, 0x11be: 0xe00d, 0x11bf: 0x0008, + // Block 0x47, offset 0x11c0 + 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008, + 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0xe00d, 0x11c9: 0x0008, 0x11ca: 0xe00d, 0x11cb: 0x0008, + 0x11cc: 0xe00d, 0x11cd: 0x0008, 0x11ce: 0xe00d, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008, + 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0xe00d, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008, + 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008, + 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008, + 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008, + 0x11ea: 0xe00d, 0x11eb: 0x0008, 0x11ec: 0xe00d, 0x11ed: 0x0008, 0x11ee: 0xe00d, 0x11ef: 0x0008, + 0x11f0: 0xe0fd, 0x11f1: 0x0008, 0x11f2: 0x0008, 0x11f3: 0x0008, 0x11f4: 0x0008, 0x11f5: 0x0008, + 0x11f6: 0x0008, 0x11f7: 0x0008, 0x11f8: 0x0008, 0x11f9: 0xe01d, 0x11fa: 0x0008, 0x11fb: 0xe03d, + 0x11fc: 0x0008, 0x11fd: 0x442d, 0x11fe: 0xe00d, 0x11ff: 0x0008, + // Block 0x48, offset 0x1200 + 0x1200: 0xe00d, 0x1201: 0x0008, 0x1202: 0xe00d, 0x1203: 0x0008, 0x1204: 0xe00d, 0x1205: 0x0008, + 0x1206: 0xe00d, 0x1207: 0x0008, 0x1208: 0x0008, 0x1209: 0x0018, 0x120a: 0x0018, 0x120b: 0xe03d, + 0x120c: 0x0008, 0x120d: 0x11d9, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0xe00d, 0x1211: 0x0008, + 0x1212: 0xe00d, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x0008, 0x1216: 0xe00d, 0x1217: 0x0008, + 0x1218: 0xe00d, 0x1219: 0x0008, 0x121a: 0xe00d, 0x121b: 0x0008, 0x121c: 0xe00d, 0x121d: 0x0008, + 0x121e: 0xe00d, 0x121f: 0x0008, 0x1220: 0xe00d, 0x1221: 0x0008, 0x1222: 0xe00d, 0x1223: 0x0008, + 0x1224: 0xe00d, 0x1225: 0x0008, 0x1226: 0xe00d, 0x1227: 0x0008, 0x1228: 0xe00d, 0x1229: 0x0008, + 0x122a: 0x6e29, 0x122b: 0x1029, 0x122c: 0x11c1, 0x122d: 0x6e41, 0x122e: 0x1221, 0x122f: 0x0040, + 0x1230: 0x6e59, 0x1231: 0x6e71, 0x1232: 0x1239, 0x1233: 0x444d, 0x1234: 0xe00d, 0x1235: 0x0008, + 0x1236: 0xe00d, 0x1237: 0x0008, 0x1238: 0x0040, 0x1239: 0x0040, 0x123a: 0x0040, 0x123b: 0x0040, + 0x123c: 0x0040, 0x123d: 0x0040, 0x123e: 0x0040, 0x123f: 0x0040, + // Block 0x49, offset 0x1240 + 0x1240: 0x64d5, 0x1241: 0x64f5, 0x1242: 0x6515, 0x1243: 0x6535, 0x1244: 0x6555, 0x1245: 0x6575, + 0x1246: 0x6595, 0x1247: 0x65b5, 0x1248: 0x65d5, 0x1249: 0x65f5, 0x124a: 0x6615, 0x124b: 0x6635, + 0x124c: 0x6655, 0x124d: 0x6675, 0x124e: 0x0008, 0x124f: 0x0008, 0x1250: 0x6695, 0x1251: 0x0008, + 0x1252: 0x66b5, 0x1253: 0x0008, 0x1254: 0x0008, 0x1255: 0x66d5, 0x1256: 0x66f5, 0x1257: 0x6715, + 0x1258: 0x6735, 0x1259: 0x6755, 0x125a: 0x6775, 0x125b: 0x6795, 0x125c: 0x67b5, 0x125d: 0x67d5, + 0x125e: 0x67f5, 0x125f: 0x0008, 0x1260: 0x6815, 0x1261: 0x0008, 0x1262: 0x6835, 0x1263: 0x0008, + 0x1264: 0x0008, 0x1265: 0x6855, 0x1266: 0x6875, 0x1267: 0x0008, 0x1268: 0x0008, 0x1269: 0x0008, + 0x126a: 0x6895, 0x126b: 0x68b5, 0x126c: 0x68d5, 0x126d: 0x68f5, 0x126e: 0x6915, 0x126f: 0x6935, + 0x1270: 0x6955, 0x1271: 0x6975, 0x1272: 0x6995, 0x1273: 0x69b5, 0x1274: 0x69d5, 0x1275: 0x69f5, + 0x1276: 0x6a15, 0x1277: 0x6a35, 0x1278: 0x6a55, 0x1279: 0x6a75, 0x127a: 0x6a95, 0x127b: 0x6ab5, + 0x127c: 0x6ad5, 0x127d: 0x6af5, 0x127e: 0x6b15, 0x127f: 0x6b35, + // Block 0x4a, offset 0x1280 + 0x1280: 0x7a95, 0x1281: 0x7ab5, 0x1282: 0x7ad5, 0x1283: 0x7af5, 0x1284: 0x7b15, 0x1285: 0x7b35, + 0x1286: 0x7b55, 0x1287: 0x7b75, 0x1288: 0x7b95, 0x1289: 0x7bb5, 0x128a: 0x7bd5, 0x128b: 0x7bf5, + 0x128c: 0x7c15, 0x128d: 0x7c35, 0x128e: 0x7c55, 0x128f: 0x6ec9, 0x1290: 0x6ef1, 0x1291: 0x6f19, + 0x1292: 0x7c75, 0x1293: 0x7c95, 0x1294: 0x7cb5, 0x1295: 0x6f41, 0x1296: 0x6f69, 0x1297: 0x6f91, + 0x1298: 0x7cd5, 0x1299: 0x7cf5, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x0040, + 0x129e: 0x0040, 0x129f: 0x0040, 0x12a0: 0x0040, 0x12a1: 0x0040, 0x12a2: 0x0040, 0x12a3: 0x0040, + 0x12a4: 0x0040, 0x12a5: 0x0040, 0x12a6: 0x0040, 0x12a7: 0x0040, 0x12a8: 0x0040, 0x12a9: 0x0040, + 0x12aa: 0x0040, 0x12ab: 0x0040, 0x12ac: 0x0040, 0x12ad: 0x0040, 0x12ae: 0x0040, 0x12af: 0x0040, + 0x12b0: 0x0040, 0x12b1: 0x0040, 0x12b2: 0x0040, 0x12b3: 0x0040, 0x12b4: 0x0040, 0x12b5: 0x0040, + 0x12b6: 0x0040, 0x12b7: 0x0040, 0x12b8: 0x0040, 0x12b9: 0x0040, 0x12ba: 0x0040, 0x12bb: 0x0040, + 0x12bc: 0x0040, 0x12bd: 0x0040, 0x12be: 0x0040, 0x12bf: 0x0040, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x6fb9, 0x12c1: 0x6fd1, 0x12c2: 0x6fe9, 0x12c3: 0x7d15, 0x12c4: 0x7d35, 0x12c5: 0x7001, + 0x12c6: 0x7001, 0x12c7: 0x0040, 0x12c8: 0x0040, 0x12c9: 0x0040, 0x12ca: 0x0040, 0x12cb: 0x0040, + 0x12cc: 0x0040, 0x12cd: 0x0040, 0x12ce: 0x0040, 0x12cf: 0x0040, 0x12d0: 0x0040, 0x12d1: 0x0040, + 0x12d2: 0x0040, 0x12d3: 0x7019, 0x12d4: 0x7041, 0x12d5: 0x7069, 0x12d6: 0x7091, 0x12d7: 0x70b9, + 0x12d8: 0x0040, 0x12d9: 0x0040, 0x12da: 0x0040, 0x12db: 0x0040, 0x12dc: 0x0040, 0x12dd: 0x70e1, + 0x12de: 0x3308, 0x12df: 0x7109, 0x12e0: 0x7131, 0x12e1: 0x20a9, 0x12e2: 0x20f1, 0x12e3: 0x7149, + 0x12e4: 0x7161, 0x12e5: 0x7179, 0x12e6: 0x7191, 0x12e7: 0x71a9, 0x12e8: 0x71c1, 0x12e9: 0x1fb2, + 0x12ea: 0x71d9, 0x12eb: 0x7201, 0x12ec: 0x7229, 0x12ed: 0x7261, 0x12ee: 0x7299, 0x12ef: 0x72c1, + 0x12f0: 0x72e9, 0x12f1: 0x7311, 0x12f2: 0x7339, 0x12f3: 0x7361, 0x12f4: 0x7389, 0x12f5: 0x73b1, + 0x12f6: 0x73d9, 0x12f7: 0x0040, 0x12f8: 0x7401, 0x12f9: 0x7429, 0x12fa: 0x7451, 0x12fb: 0x7479, + 0x12fc: 0x74a1, 0x12fd: 0x0040, 0x12fe: 0x74c9, 0x12ff: 0x0040, + // Block 0x4c, offset 0x1300 + 0x1300: 0x74f1, 0x1301: 0x7519, 0x1302: 0x0040, 0x1303: 0x7541, 0x1304: 0x7569, 0x1305: 0x0040, + 0x1306: 0x7591, 0x1307: 0x75b9, 0x1308: 0x75e1, 0x1309: 0x7609, 0x130a: 0x7631, 0x130b: 0x7659, + 0x130c: 0x7681, 0x130d: 0x76a9, 0x130e: 0x76d1, 0x130f: 0x76f9, 0x1310: 0x7721, 0x1311: 0x7721, + 0x1312: 0x7739, 0x1313: 0x7739, 0x1314: 0x7739, 0x1315: 0x7739, 0x1316: 0x7751, 0x1317: 0x7751, + 0x1318: 0x7751, 0x1319: 0x7751, 0x131a: 0x7769, 0x131b: 0x7769, 0x131c: 0x7769, 0x131d: 0x7769, + 0x131e: 0x7781, 0x131f: 0x7781, 0x1320: 0x7781, 0x1321: 0x7781, 0x1322: 0x7799, 0x1323: 0x7799, + 0x1324: 0x7799, 0x1325: 0x7799, 0x1326: 0x77b1, 0x1327: 0x77b1, 0x1328: 0x77b1, 0x1329: 0x77b1, + 0x132a: 0x77c9, 0x132b: 0x77c9, 0x132c: 0x77c9, 0x132d: 0x77c9, 0x132e: 0x77e1, 0x132f: 0x77e1, + 0x1330: 0x77e1, 0x1331: 0x77e1, 0x1332: 0x77f9, 0x1333: 0x77f9, 0x1334: 0x77f9, 0x1335: 0x77f9, + 0x1336: 0x7811, 0x1337: 0x7811, 0x1338: 0x7811, 0x1339: 0x7811, 0x133a: 0x7829, 0x133b: 0x7829, + 0x133c: 0x7829, 0x133d: 0x7829, 0x133e: 0x7841, 0x133f: 0x7841, + // Block 0x4d, offset 0x1340 + 0x1340: 0x7841, 0x1341: 0x7841, 0x1342: 0x7859, 0x1343: 0x7859, 0x1344: 0x7871, 0x1345: 0x7871, + 0x1346: 0x7889, 0x1347: 0x7889, 0x1348: 0x78a1, 0x1349: 0x78a1, 0x134a: 0x78b9, 0x134b: 0x78b9, + 0x134c: 0x78d1, 0x134d: 0x78d1, 0x134e: 0x78e9, 0x134f: 0x78e9, 0x1350: 0x78e9, 0x1351: 0x78e9, + 0x1352: 0x7901, 0x1353: 0x7901, 0x1354: 0x7901, 0x1355: 0x7901, 0x1356: 0x7919, 0x1357: 0x7919, + 0x1358: 0x7919, 0x1359: 0x7919, 0x135a: 0x7931, 0x135b: 0x7931, 0x135c: 0x7931, 0x135d: 0x7931, + 0x135e: 0x7949, 0x135f: 0x7949, 0x1360: 0x7961, 0x1361: 0x7961, 0x1362: 0x7961, 0x1363: 0x7961, + 0x1364: 0x7979, 0x1365: 0x7979, 0x1366: 0x7991, 0x1367: 0x7991, 0x1368: 0x7991, 0x1369: 0x7991, + 0x136a: 0x79a9, 0x136b: 0x79a9, 0x136c: 0x79a9, 0x136d: 0x79a9, 0x136e: 0x79c1, 0x136f: 0x79c1, + 0x1370: 0x79d9, 0x1371: 0x79d9, 0x1372: 0x0818, 0x1373: 0x0818, 0x1374: 0x0818, 0x1375: 0x0818, + 0x1376: 0x0818, 0x1377: 0x0818, 0x1378: 0x0818, 0x1379: 0x0818, 0x137a: 0x0818, 0x137b: 0x0818, + 0x137c: 0x0818, 0x137d: 0x0818, 0x137e: 0x0818, 0x137f: 0x0818, + // Block 0x4e, offset 0x1380 + 0x1380: 0x0818, 0x1381: 0x0818, 0x1382: 0x0040, 0x1383: 0x0040, 0x1384: 0x0040, 0x1385: 0x0040, + 0x1386: 0x0040, 0x1387: 0x0040, 0x1388: 0x0040, 0x1389: 0x0040, 0x138a: 0x0040, 0x138b: 0x0040, + 0x138c: 0x0040, 0x138d: 0x0040, 0x138e: 0x0040, 0x138f: 0x0040, 0x1390: 0x0040, 0x1391: 0x0040, + 0x1392: 0x0040, 0x1393: 0x79f1, 0x1394: 0x79f1, 0x1395: 0x79f1, 0x1396: 0x79f1, 0x1397: 0x7a09, + 0x1398: 0x7a09, 0x1399: 0x7a21, 0x139a: 0x7a21, 0x139b: 0x7a39, 0x139c: 0x7a39, 0x139d: 0x0479, + 0x139e: 0x7a51, 0x139f: 0x7a51, 0x13a0: 0x7a69, 0x13a1: 0x7a69, 0x13a2: 0x7a81, 0x13a3: 0x7a81, + 0x13a4: 0x7a99, 0x13a5: 0x7a99, 0x13a6: 0x7a99, 0x13a7: 0x7a99, 0x13a8: 0x7ab1, 0x13a9: 0x7ab1, + 0x13aa: 0x7ac9, 0x13ab: 0x7ac9, 0x13ac: 0x7af1, 0x13ad: 0x7af1, 0x13ae: 0x7b19, 0x13af: 0x7b19, + 0x13b0: 0x7b41, 0x13b1: 0x7b41, 0x13b2: 0x7b69, 0x13b3: 0x7b69, 0x13b4: 0x7b91, 0x13b5: 0x7b91, + 0x13b6: 0x7bb9, 0x13b7: 0x7bb9, 0x13b8: 0x7bb9, 0x13b9: 0x7be1, 0x13ba: 0x7be1, 0x13bb: 0x7be1, + 0x13bc: 0x7c09, 0x13bd: 0x7c09, 0x13be: 0x7c09, 0x13bf: 0x7c09, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x85f9, 0x13c1: 0x8621, 0x13c2: 0x8649, 0x13c3: 0x8671, 0x13c4: 0x8699, 0x13c5: 0x86c1, + 0x13c6: 0x86e9, 0x13c7: 0x8711, 0x13c8: 0x8739, 0x13c9: 0x8761, 0x13ca: 0x8789, 0x13cb: 0x87b1, + 0x13cc: 0x87d9, 0x13cd: 0x8801, 0x13ce: 0x8829, 0x13cf: 0x8851, 0x13d0: 0x8879, 0x13d1: 0x88a1, + 0x13d2: 0x88c9, 0x13d3: 0x88f1, 0x13d4: 0x8919, 0x13d5: 0x8941, 0x13d6: 0x8969, 0x13d7: 0x8991, + 0x13d8: 0x89b9, 0x13d9: 0x89e1, 0x13da: 0x8a09, 0x13db: 0x8a31, 0x13dc: 0x8a59, 0x13dd: 0x8a81, + 0x13de: 0x8aaa, 0x13df: 0x8ada, 0x13e0: 0x8b0a, 0x13e1: 0x8b3a, 0x13e2: 0x8b6a, 0x13e3: 0x8b9a, + 0x13e4: 0x8bc9, 0x13e5: 0x8bf1, 0x13e6: 0x7c71, 0x13e7: 0x8c19, 0x13e8: 0x7be1, 0x13e9: 0x7c99, + 0x13ea: 0x8c41, 0x13eb: 0x8c69, 0x13ec: 0x7d39, 0x13ed: 0x8c91, 0x13ee: 0x7d61, 0x13ef: 0x7d89, + 0x13f0: 0x8cb9, 0x13f1: 0x8ce1, 0x13f2: 0x7e29, 0x13f3: 0x8d09, 0x13f4: 0x7e51, 0x13f5: 0x7e79, + 0x13f6: 0x8d31, 0x13f7: 0x8d59, 0x13f8: 0x7ec9, 0x13f9: 0x8d81, 0x13fa: 0x7ef1, 0x13fb: 0x7f19, + 0x13fc: 0x83a1, 0x13fd: 0x83c9, 0x13fe: 0x8441, 0x13ff: 0x8469, + // Block 0x50, offset 0x1400 + 0x1400: 0x8491, 0x1401: 0x8531, 0x1402: 0x8559, 0x1403: 0x8581, 0x1404: 0x85a9, 0x1405: 0x8649, + 0x1406: 0x8671, 0x1407: 0x8699, 0x1408: 0x8da9, 0x1409: 0x8739, 0x140a: 0x8dd1, 0x140b: 0x8df9, + 0x140c: 0x8829, 0x140d: 0x8e21, 0x140e: 0x8851, 0x140f: 0x8879, 0x1410: 0x8a81, 0x1411: 0x8e49, + 0x1412: 0x8e71, 0x1413: 0x89b9, 0x1414: 0x8e99, 0x1415: 0x89e1, 0x1416: 0x8a09, 0x1417: 0x7c21, + 0x1418: 0x7c49, 0x1419: 0x8ec1, 0x141a: 0x7c71, 0x141b: 0x8ee9, 0x141c: 0x7cc1, 0x141d: 0x7ce9, + 0x141e: 0x7d11, 0x141f: 0x7d39, 0x1420: 0x8f11, 0x1421: 0x7db1, 0x1422: 0x7dd9, 0x1423: 0x7e01, + 0x1424: 0x7e29, 0x1425: 0x8f39, 0x1426: 0x7ec9, 0x1427: 0x7f41, 0x1428: 0x7f69, 0x1429: 0x7f91, + 0x142a: 0x7fb9, 0x142b: 0x7fe1, 0x142c: 0x8031, 0x142d: 0x8059, 0x142e: 0x8081, 0x142f: 0x80a9, + 0x1430: 0x80d1, 0x1431: 0x80f9, 0x1432: 0x8f61, 0x1433: 0x8121, 0x1434: 0x8149, 0x1435: 0x8171, + 0x1436: 0x8199, 0x1437: 0x81c1, 0x1438: 0x81e9, 0x1439: 0x8239, 0x143a: 0x8261, 0x143b: 0x8289, + 0x143c: 0x82b1, 0x143d: 0x82d9, 0x143e: 0x8301, 0x143f: 0x8329, + // Block 0x51, offset 0x1440 + 0x1440: 0x8351, 0x1441: 0x8379, 0x1442: 0x83f1, 0x1443: 0x8419, 0x1444: 0x84b9, 0x1445: 0x84e1, + 0x1446: 0x8509, 0x1447: 0x8531, 0x1448: 0x8559, 0x1449: 0x85d1, 0x144a: 0x85f9, 0x144b: 0x8621, + 0x144c: 0x8649, 0x144d: 0x8f89, 0x144e: 0x86c1, 0x144f: 0x86e9, 0x1450: 0x8711, 0x1451: 0x8739, + 0x1452: 0x87b1, 0x1453: 0x87d9, 0x1454: 0x8801, 0x1455: 0x8829, 0x1456: 0x8fb1, 0x1457: 0x88a1, + 0x1458: 0x88c9, 0x1459: 0x8fd9, 0x145a: 0x8941, 0x145b: 0x8969, 0x145c: 0x8991, 0x145d: 0x89b9, + 0x145e: 0x9001, 0x145f: 0x7c71, 0x1460: 0x8ee9, 0x1461: 0x7d39, 0x1462: 0x8f11, 0x1463: 0x7e29, + 0x1464: 0x8f39, 0x1465: 0x7ec9, 0x1466: 0x9029, 0x1467: 0x80d1, 0x1468: 0x9051, 0x1469: 0x9079, + 0x146a: 0x90a1, 0x146b: 0x8531, 0x146c: 0x8559, 0x146d: 0x8649, 0x146e: 0x8829, 0x146f: 0x8fb1, + 0x1470: 0x89b9, 0x1471: 0x9001, 0x1472: 0x90c9, 0x1473: 0x9101, 0x1474: 0x9139, 0x1475: 0x9171, + 0x1476: 0x9199, 0x1477: 0x91c1, 0x1478: 0x91e9, 0x1479: 0x9211, 0x147a: 0x9239, 0x147b: 0x9261, + 0x147c: 0x9289, 0x147d: 0x92b1, 0x147e: 0x92d9, 0x147f: 0x9301, + // Block 0x52, offset 0x1480 + 0x1480: 0x9329, 0x1481: 0x9351, 0x1482: 0x9379, 0x1483: 0x93a1, 0x1484: 0x93c9, 0x1485: 0x93f1, + 0x1486: 0x9419, 0x1487: 0x9441, 0x1488: 0x9469, 0x1489: 0x9491, 0x148a: 0x94b9, 0x148b: 0x94e1, + 0x148c: 0x9079, 0x148d: 0x9509, 0x148e: 0x9531, 0x148f: 0x9559, 0x1490: 0x9581, 0x1491: 0x9171, + 0x1492: 0x9199, 0x1493: 0x91c1, 0x1494: 0x91e9, 0x1495: 0x9211, 0x1496: 0x9239, 0x1497: 0x9261, + 0x1498: 0x9289, 0x1499: 0x92b1, 0x149a: 0x92d9, 0x149b: 0x9301, 0x149c: 0x9329, 0x149d: 0x9351, + 0x149e: 0x9379, 0x149f: 0x93a1, 0x14a0: 0x93c9, 0x14a1: 0x93f1, 0x14a2: 0x9419, 0x14a3: 0x9441, + 0x14a4: 0x9469, 0x14a5: 0x9491, 0x14a6: 0x94b9, 0x14a7: 0x94e1, 0x14a8: 0x9079, 0x14a9: 0x9509, + 0x14aa: 0x9531, 0x14ab: 0x9559, 0x14ac: 0x9581, 0x14ad: 0x9491, 0x14ae: 0x94b9, 0x14af: 0x94e1, + 0x14b0: 0x9079, 0x14b1: 0x9051, 0x14b2: 0x90a1, 0x14b3: 0x8211, 0x14b4: 0x8059, 0x14b5: 0x8081, + 0x14b6: 0x80a9, 0x14b7: 0x9491, 0x14b8: 0x94b9, 0x14b9: 0x94e1, 0x14ba: 0x8211, 0x14bb: 0x8239, + 0x14bc: 0x95a9, 0x14bd: 0x95a9, 0x14be: 0x0018, 0x14bf: 0x0018, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x0040, 0x14c1: 0x0040, 0x14c2: 0x0040, 0x14c3: 0x0040, 0x14c4: 0x0040, 0x14c5: 0x0040, + 0x14c6: 0x0040, 0x14c7: 0x0040, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040, + 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x95d1, 0x14d1: 0x9609, + 0x14d2: 0x9609, 0x14d3: 0x9641, 0x14d4: 0x9679, 0x14d5: 0x96b1, 0x14d6: 0x96e9, 0x14d7: 0x9721, + 0x14d8: 0x9759, 0x14d9: 0x9759, 0x14da: 0x9791, 0x14db: 0x97c9, 0x14dc: 0x9801, 0x14dd: 0x9839, + 0x14de: 0x9871, 0x14df: 0x98a9, 0x14e0: 0x98a9, 0x14e1: 0x98e1, 0x14e2: 0x9919, 0x14e3: 0x9919, + 0x14e4: 0x9951, 0x14e5: 0x9951, 0x14e6: 0x9989, 0x14e7: 0x99c1, 0x14e8: 0x99c1, 0x14e9: 0x99f9, + 0x14ea: 0x9a31, 0x14eb: 0x9a31, 0x14ec: 0x9a69, 0x14ed: 0x9a69, 0x14ee: 0x9aa1, 0x14ef: 0x9ad9, + 0x14f0: 0x9ad9, 0x14f1: 0x9b11, 0x14f2: 0x9b11, 0x14f3: 0x9b49, 0x14f4: 0x9b81, 0x14f5: 0x9bb9, + 0x14f6: 0x9bf1, 0x14f7: 0x9bf1, 0x14f8: 0x9c29, 0x14f9: 0x9c61, 0x14fa: 0x9c99, 0x14fb: 0x9cd1, + 0x14fc: 0x9d09, 0x14fd: 0x9d09, 0x14fe: 0x9d41, 0x14ff: 0x9d79, + // Block 0x54, offset 0x1500 + 0x1500: 0xa949, 0x1501: 0xa981, 0x1502: 0xa9b9, 0x1503: 0xa8a1, 0x1504: 0x9bb9, 0x1505: 0x9989, + 0x1506: 0xa9f1, 0x1507: 0xaa29, 0x1508: 0x0040, 0x1509: 0x0040, 0x150a: 0x0040, 0x150b: 0x0040, + 0x150c: 0x0040, 0x150d: 0x0040, 0x150e: 0x0040, 0x150f: 0x0040, 0x1510: 0x0040, 0x1511: 0x0040, + 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0040, 0x1515: 0x0040, 0x1516: 0x0040, 0x1517: 0x0040, + 0x1518: 0x0040, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040, + 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x0040, 0x1521: 0x0040, 0x1522: 0x0040, 0x1523: 0x0040, + 0x1524: 0x0040, 0x1525: 0x0040, 0x1526: 0x0040, 0x1527: 0x0040, 0x1528: 0x0040, 0x1529: 0x0040, + 0x152a: 0x0040, 0x152b: 0x0040, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040, + 0x1530: 0xaa61, 0x1531: 0xaa99, 0x1532: 0xaad1, 0x1533: 0xab19, 0x1534: 0xab61, 0x1535: 0xaba9, + 0x1536: 0xabf1, 0x1537: 0xac39, 0x1538: 0xac81, 0x1539: 0xacc9, 0x153a: 0xad02, 0x153b: 0xae12, + 0x153c: 0xae91, 0x153d: 0x0018, 0x153e: 0x0040, 0x153f: 0x0040, + // Block 0x55, offset 0x1540 + 0x1540: 0x33c0, 0x1541: 0x33c0, 0x1542: 0x33c0, 0x1543: 0x33c0, 0x1544: 0x33c0, 0x1545: 0x33c0, + 0x1546: 0x33c0, 0x1547: 0x33c0, 0x1548: 0x33c0, 0x1549: 0x33c0, 0x154a: 0x33c0, 0x154b: 0x33c0, + 0x154c: 0x33c0, 0x154d: 0x33c0, 0x154e: 0x33c0, 0x154f: 0x33c0, 0x1550: 0xaeda, 0x1551: 0x7d55, + 0x1552: 0x0040, 0x1553: 0xaeea, 0x1554: 0x03c2, 0x1555: 0xaefa, 0x1556: 0xaf0a, 0x1557: 0x7d75, + 0x1558: 0x7d95, 0x1559: 0x0040, 0x155a: 0x0040, 0x155b: 0x0040, 0x155c: 0x0040, 0x155d: 0x0040, + 0x155e: 0x0040, 0x155f: 0x0040, 0x1560: 0x3308, 0x1561: 0x3308, 0x1562: 0x3308, 0x1563: 0x3308, + 0x1564: 0x3308, 0x1565: 0x3308, 0x1566: 0x3308, 0x1567: 0x3308, 0x1568: 0x3308, 0x1569: 0x3308, + 0x156a: 0x3308, 0x156b: 0x3308, 0x156c: 0x3308, 0x156d: 0x3308, 0x156e: 0x3308, 0x156f: 0x3308, + 0x1570: 0x0040, 0x1571: 0x7db5, 0x1572: 0x7dd5, 0x1573: 0xaf1a, 0x1574: 0xaf1a, 0x1575: 0x1fd2, + 0x1576: 0x1fe2, 0x1577: 0xaf2a, 0x1578: 0xaf3a, 0x1579: 0x7df5, 0x157a: 0x7e15, 0x157b: 0x7e35, + 0x157c: 0x7df5, 0x157d: 0x7e55, 0x157e: 0x7e75, 0x157f: 0x7e55, + // Block 0x56, offset 0x1580 + 0x1580: 0x7e95, 0x1581: 0x7eb5, 0x1582: 0x7ed5, 0x1583: 0x7eb5, 0x1584: 0x7ef5, 0x1585: 0x0018, + 0x1586: 0x0018, 0x1587: 0xaf4a, 0x1588: 0xaf5a, 0x1589: 0x7f16, 0x158a: 0x7f36, 0x158b: 0x7f56, + 0x158c: 0x7f76, 0x158d: 0xaf1a, 0x158e: 0xaf1a, 0x158f: 0xaf1a, 0x1590: 0xaeda, 0x1591: 0x7f95, + 0x1592: 0x0040, 0x1593: 0x0040, 0x1594: 0x03c2, 0x1595: 0xaeea, 0x1596: 0xaf0a, 0x1597: 0xaefa, + 0x1598: 0x7fb5, 0x1599: 0x1fd2, 0x159a: 0x1fe2, 0x159b: 0xaf2a, 0x159c: 0xaf3a, 0x159d: 0x7e95, + 0x159e: 0x7ef5, 0x159f: 0xaf6a, 0x15a0: 0xaf7a, 0x15a1: 0xaf8a, 0x15a2: 0x1fb2, 0x15a3: 0xaf99, + 0x15a4: 0xafaa, 0x15a5: 0xafba, 0x15a6: 0x1fc2, 0x15a7: 0x0040, 0x15a8: 0xafca, 0x15a9: 0xafda, + 0x15aa: 0xafea, 0x15ab: 0xaffa, 0x15ac: 0x0040, 0x15ad: 0x0040, 0x15ae: 0x0040, 0x15af: 0x0040, + 0x15b0: 0x7fd6, 0x15b1: 0xb009, 0x15b2: 0x7ff6, 0x15b3: 0x0808, 0x15b4: 0x8016, 0x15b5: 0x0040, + 0x15b6: 0x8036, 0x15b7: 0xb031, 0x15b8: 0x8056, 0x15b9: 0xb059, 0x15ba: 0x8076, 0x15bb: 0xb081, + 0x15bc: 0x8096, 0x15bd: 0xb0a9, 0x15be: 0x80b6, 0x15bf: 0xb0d1, + // Block 0x57, offset 0x15c0 + 0x15c0: 0xb0f9, 0x15c1: 0xb111, 0x15c2: 0xb111, 0x15c3: 0xb129, 0x15c4: 0xb129, 0x15c5: 0xb141, + 0x15c6: 0xb141, 0x15c7: 0xb159, 0x15c8: 0xb159, 0x15c9: 0xb171, 0x15ca: 0xb171, 0x15cb: 0xb171, + 0x15cc: 0xb171, 0x15cd: 0xb189, 0x15ce: 0xb189, 0x15cf: 0xb1a1, 0x15d0: 0xb1a1, 0x15d1: 0xb1a1, + 0x15d2: 0xb1a1, 0x15d3: 0xb1b9, 0x15d4: 0xb1b9, 0x15d5: 0xb1d1, 0x15d6: 0xb1d1, 0x15d7: 0xb1d1, + 0x15d8: 0xb1d1, 0x15d9: 0xb1e9, 0x15da: 0xb1e9, 0x15db: 0xb1e9, 0x15dc: 0xb1e9, 0x15dd: 0xb201, + 0x15de: 0xb201, 0x15df: 0xb201, 0x15e0: 0xb201, 0x15e1: 0xb219, 0x15e2: 0xb219, 0x15e3: 0xb219, + 0x15e4: 0xb219, 0x15e5: 0xb231, 0x15e6: 0xb231, 0x15e7: 0xb231, 0x15e8: 0xb231, 0x15e9: 0xb249, + 0x15ea: 0xb249, 0x15eb: 0xb261, 0x15ec: 0xb261, 0x15ed: 0xb279, 0x15ee: 0xb279, 0x15ef: 0xb291, + 0x15f0: 0xb291, 0x15f1: 0xb2a9, 0x15f2: 0xb2a9, 0x15f3: 0xb2a9, 0x15f4: 0xb2a9, 0x15f5: 0xb2c1, + 0x15f6: 0xb2c1, 0x15f7: 0xb2c1, 0x15f8: 0xb2c1, 0x15f9: 0xb2d9, 0x15fa: 0xb2d9, 0x15fb: 0xb2d9, + 0x15fc: 0xb2d9, 0x15fd: 0xb2f1, 0x15fe: 0xb2f1, 0x15ff: 0xb2f1, + // Block 0x58, offset 0x1600 + 0x1600: 0xb2f1, 0x1601: 0xb309, 0x1602: 0xb309, 0x1603: 0xb309, 0x1604: 0xb309, 0x1605: 0xb321, + 0x1606: 0xb321, 0x1607: 0xb321, 0x1608: 0xb321, 0x1609: 0xb339, 0x160a: 0xb339, 0x160b: 0xb339, + 0x160c: 0xb339, 0x160d: 0xb351, 0x160e: 0xb351, 0x160f: 0xb351, 0x1610: 0xb351, 0x1611: 0xb369, + 0x1612: 0xb369, 0x1613: 0xb369, 0x1614: 0xb369, 0x1615: 0xb381, 0x1616: 0xb381, 0x1617: 0xb381, + 0x1618: 0xb381, 0x1619: 0xb399, 0x161a: 0xb399, 0x161b: 0xb399, 0x161c: 0xb399, 0x161d: 0xb3b1, + 0x161e: 0xb3b1, 0x161f: 0xb3b1, 0x1620: 0xb3b1, 0x1621: 0xb3c9, 0x1622: 0xb3c9, 0x1623: 0xb3c9, + 0x1624: 0xb3c9, 0x1625: 0xb3e1, 0x1626: 0xb3e1, 0x1627: 0xb3e1, 0x1628: 0xb3e1, 0x1629: 0xb3f9, + 0x162a: 0xb3f9, 0x162b: 0xb3f9, 0x162c: 0xb3f9, 0x162d: 0xb411, 0x162e: 0xb411, 0x162f: 0x7ab1, + 0x1630: 0x7ab1, 0x1631: 0xb429, 0x1632: 0xb429, 0x1633: 0xb429, 0x1634: 0xb429, 0x1635: 0xb441, + 0x1636: 0xb441, 0x1637: 0xb469, 0x1638: 0xb469, 0x1639: 0xb491, 0x163a: 0xb491, 0x163b: 0xb4b9, + 0x163c: 0xb4b9, 0x163d: 0x0040, 0x163e: 0x0040, 0x163f: 0x03c0, + // Block 0x59, offset 0x1640 + 0x1640: 0x0040, 0x1641: 0xaefa, 0x1642: 0xb4e2, 0x1643: 0xaf6a, 0x1644: 0xafda, 0x1645: 0xafea, + 0x1646: 0xaf7a, 0x1647: 0xb4f2, 0x1648: 0x1fd2, 0x1649: 0x1fe2, 0x164a: 0xaf8a, 0x164b: 0x1fb2, + 0x164c: 0xaeda, 0x164d: 0xaf99, 0x164e: 0x29d1, 0x164f: 0xb502, 0x1650: 0x1f41, 0x1651: 0x00c9, + 0x1652: 0x0069, 0x1653: 0x0079, 0x1654: 0x1f51, 0x1655: 0x1f61, 0x1656: 0x1f71, 0x1657: 0x1f81, + 0x1658: 0x1f91, 0x1659: 0x1fa1, 0x165a: 0xaeea, 0x165b: 0x03c2, 0x165c: 0xafaa, 0x165d: 0x1fc2, + 0x165e: 0xafba, 0x165f: 0xaf0a, 0x1660: 0xaffa, 0x1661: 0x0039, 0x1662: 0x0ee9, 0x1663: 0x1159, + 0x1664: 0x0ef9, 0x1665: 0x0f09, 0x1666: 0x1199, 0x1667: 0x0f31, 0x1668: 0x0249, 0x1669: 0x0f41, + 0x166a: 0x0259, 0x166b: 0x0f51, 0x166c: 0x0359, 0x166d: 0x0f61, 0x166e: 0x0f71, 0x166f: 0x00d9, + 0x1670: 0x0f99, 0x1671: 0x2039, 0x1672: 0x0269, 0x1673: 0x01d9, 0x1674: 0x0fa9, 0x1675: 0x0fb9, + 0x1676: 0x1089, 0x1677: 0x0279, 0x1678: 0x0369, 0x1679: 0x0289, 0x167a: 0x13d1, 0x167b: 0xaf4a, + 0x167c: 0xafca, 0x167d: 0xaf5a, 0x167e: 0xb512, 0x167f: 0xaf1a, + // Block 0x5a, offset 0x1680 + 0x1680: 0x1caa, 0x1681: 0x0039, 0x1682: 0x0ee9, 0x1683: 0x1159, 0x1684: 0x0ef9, 0x1685: 0x0f09, + 0x1686: 0x1199, 0x1687: 0x0f31, 0x1688: 0x0249, 0x1689: 0x0f41, 0x168a: 0x0259, 0x168b: 0x0f51, + 0x168c: 0x0359, 0x168d: 0x0f61, 0x168e: 0x0f71, 0x168f: 0x00d9, 0x1690: 0x0f99, 0x1691: 0x2039, + 0x1692: 0x0269, 0x1693: 0x01d9, 0x1694: 0x0fa9, 0x1695: 0x0fb9, 0x1696: 0x1089, 0x1697: 0x0279, + 0x1698: 0x0369, 0x1699: 0x0289, 0x169a: 0x13d1, 0x169b: 0xaf2a, 0x169c: 0xb522, 0x169d: 0xaf3a, + 0x169e: 0xb532, 0x169f: 0x80d5, 0x16a0: 0x80f5, 0x16a1: 0x29d1, 0x16a2: 0x8115, 0x16a3: 0x8115, + 0x16a4: 0x8135, 0x16a5: 0x8155, 0x16a6: 0x8175, 0x16a7: 0x8195, 0x16a8: 0x81b5, 0x16a9: 0x81d5, + 0x16aa: 0x81f5, 0x16ab: 0x8215, 0x16ac: 0x8235, 0x16ad: 0x8255, 0x16ae: 0x8275, 0x16af: 0x8295, + 0x16b0: 0x82b5, 0x16b1: 0x82d5, 0x16b2: 0x82f5, 0x16b3: 0x8315, 0x16b4: 0x8335, 0x16b5: 0x8355, + 0x16b6: 0x8375, 0x16b7: 0x8395, 0x16b8: 0x83b5, 0x16b9: 0x83d5, 0x16ba: 0x83f5, 0x16bb: 0x8415, + 0x16bc: 0x81b5, 0x16bd: 0x8435, 0x16be: 0x8455, 0x16bf: 0x8215, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x8475, 0x16c1: 0x8495, 0x16c2: 0x84b5, 0x16c3: 0x84d5, 0x16c4: 0x84f5, 0x16c5: 0x8515, + 0x16c6: 0x8535, 0x16c7: 0x8555, 0x16c8: 0x84d5, 0x16c9: 0x8575, 0x16ca: 0x84d5, 0x16cb: 0x8595, + 0x16cc: 0x8595, 0x16cd: 0x85b5, 0x16ce: 0x85b5, 0x16cf: 0x85d5, 0x16d0: 0x8515, 0x16d1: 0x85f5, + 0x16d2: 0x8615, 0x16d3: 0x85f5, 0x16d4: 0x8635, 0x16d5: 0x8615, 0x16d6: 0x8655, 0x16d7: 0x8655, + 0x16d8: 0x8675, 0x16d9: 0x8675, 0x16da: 0x8695, 0x16db: 0x8695, 0x16dc: 0x8615, 0x16dd: 0x8115, + 0x16de: 0x86b5, 0x16df: 0x86d5, 0x16e0: 0x0040, 0x16e1: 0x86f5, 0x16e2: 0x8715, 0x16e3: 0x8735, + 0x16e4: 0x8755, 0x16e5: 0x8735, 0x16e6: 0x8775, 0x16e7: 0x8795, 0x16e8: 0x87b5, 0x16e9: 0x87b5, + 0x16ea: 0x87d5, 0x16eb: 0x87d5, 0x16ec: 0x87f5, 0x16ed: 0x87f5, 0x16ee: 0x87d5, 0x16ef: 0x87d5, + 0x16f0: 0x8815, 0x16f1: 0x8835, 0x16f2: 0x8855, 0x16f3: 0x8875, 0x16f4: 0x8895, 0x16f5: 0x88b5, + 0x16f6: 0x88b5, 0x16f7: 0x88b5, 0x16f8: 0x88d5, 0x16f9: 0x88d5, 0x16fa: 0x88d5, 0x16fb: 0x88d5, + 0x16fc: 0x87b5, 0x16fd: 0x87b5, 0x16fe: 0x87b5, 0x16ff: 0x0040, + // Block 0x5c, offset 0x1700 + 0x1700: 0x0040, 0x1701: 0x0040, 0x1702: 0x8715, 0x1703: 0x86f5, 0x1704: 0x88f5, 0x1705: 0x86f5, + 0x1706: 0x8715, 0x1707: 0x86f5, 0x1708: 0x0040, 0x1709: 0x0040, 0x170a: 0x8915, 0x170b: 0x8715, + 0x170c: 0x8935, 0x170d: 0x88f5, 0x170e: 0x8935, 0x170f: 0x8715, 0x1710: 0x0040, 0x1711: 0x0040, + 0x1712: 0x8955, 0x1713: 0x8975, 0x1714: 0x8875, 0x1715: 0x8935, 0x1716: 0x88f5, 0x1717: 0x8935, + 0x1718: 0x0040, 0x1719: 0x0040, 0x171a: 0x8995, 0x171b: 0x89b5, 0x171c: 0x8995, 0x171d: 0x0040, + 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0xb541, 0x1721: 0xb559, 0x1722: 0xb571, 0x1723: 0x89d6, + 0x1724: 0xb589, 0x1725: 0xb5a1, 0x1726: 0x89f5, 0x1727: 0x0040, 0x1728: 0x8a15, 0x1729: 0x8a35, + 0x172a: 0x8a55, 0x172b: 0x8a35, 0x172c: 0x8a75, 0x172d: 0x8a95, 0x172e: 0x8ab5, 0x172f: 0x0040, + 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040, + 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0340, 0x173a: 0x0340, 0x173b: 0x0340, + 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040, + // Block 0x5d, offset 0x1740 + 0x1740: 0x0a08, 0x1741: 0x0a08, 0x1742: 0x0a08, 0x1743: 0x0a08, 0x1744: 0x0a08, 0x1745: 0x0c08, + 0x1746: 0x0808, 0x1747: 0x0c08, 0x1748: 0x0818, 0x1749: 0x0c08, 0x174a: 0x0c08, 0x174b: 0x0808, + 0x174c: 0x0808, 0x174d: 0x0908, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0c08, 0x1751: 0x0c08, + 0x1752: 0x0c08, 0x1753: 0x0a08, 0x1754: 0x0a08, 0x1755: 0x0a08, 0x1756: 0x0a08, 0x1757: 0x0908, + 0x1758: 0x0a08, 0x1759: 0x0a08, 0x175a: 0x0a08, 0x175b: 0x0a08, 0x175c: 0x0a08, 0x175d: 0x0c08, + 0x175e: 0x0a08, 0x175f: 0x0a08, 0x1760: 0x0a08, 0x1761: 0x0c08, 0x1762: 0x0808, 0x1763: 0x0808, + 0x1764: 0x0c08, 0x1765: 0x3308, 0x1766: 0x3308, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0040, + 0x176a: 0x0040, 0x176b: 0x0a18, 0x176c: 0x0a18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0c18, + 0x1770: 0x0818, 0x1771: 0x0818, 0x1772: 0x0818, 0x1773: 0x0818, 0x1774: 0x0818, 0x1775: 0x0818, + 0x1776: 0x0818, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040, + 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040, + // Block 0x5e, offset 0x1780 + 0x1780: 0x0a08, 0x1781: 0x0c08, 0x1782: 0x0a08, 0x1783: 0x0c08, 0x1784: 0x0c08, 0x1785: 0x0c08, + 0x1786: 0x0a08, 0x1787: 0x0a08, 0x1788: 0x0a08, 0x1789: 0x0c08, 0x178a: 0x0a08, 0x178b: 0x0a08, + 0x178c: 0x0c08, 0x178d: 0x0a08, 0x178e: 0x0c08, 0x178f: 0x0c08, 0x1790: 0x0a08, 0x1791: 0x0c08, + 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x0040, + 0x1798: 0x0040, 0x1799: 0x0818, 0x179a: 0x0818, 0x179b: 0x0818, 0x179c: 0x0818, 0x179d: 0x0040, + 0x179e: 0x0040, 0x179f: 0x0040, 0x17a0: 0x0040, 0x17a1: 0x0040, 0x17a2: 0x0040, 0x17a3: 0x0040, + 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x0040, 0x17a7: 0x0040, 0x17a8: 0x0040, 0x17a9: 0x0c18, + 0x17aa: 0x0c18, 0x17ab: 0x0c18, 0x17ac: 0x0c18, 0x17ad: 0x0a18, 0x17ae: 0x0a18, 0x17af: 0x0818, + 0x17b0: 0x0040, 0x17b1: 0x0040, 0x17b2: 0x0040, 0x17b3: 0x0040, 0x17b4: 0x0040, 0x17b5: 0x0040, + 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040, + 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x3308, 0x17c1: 0x3308, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x0040, 0x17c5: 0x0008, + 0x17c6: 0x0008, 0x17c7: 0x0008, 0x17c8: 0x0008, 0x17c9: 0x0008, 0x17ca: 0x0008, 0x17cb: 0x0008, + 0x17cc: 0x0008, 0x17cd: 0x0040, 0x17ce: 0x0040, 0x17cf: 0x0008, 0x17d0: 0x0008, 0x17d1: 0x0040, + 0x17d2: 0x0040, 0x17d3: 0x0008, 0x17d4: 0x0008, 0x17d5: 0x0008, 0x17d6: 0x0008, 0x17d7: 0x0008, + 0x17d8: 0x0008, 0x17d9: 0x0008, 0x17da: 0x0008, 0x17db: 0x0008, 0x17dc: 0x0008, 0x17dd: 0x0008, + 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x0008, 0x17e3: 0x0008, + 0x17e4: 0x0008, 0x17e5: 0x0008, 0x17e6: 0x0008, 0x17e7: 0x0008, 0x17e8: 0x0008, 0x17e9: 0x0040, + 0x17ea: 0x0008, 0x17eb: 0x0008, 0x17ec: 0x0008, 0x17ed: 0x0008, 0x17ee: 0x0008, 0x17ef: 0x0008, + 0x17f0: 0x0008, 0x17f1: 0x0040, 0x17f2: 0x0008, 0x17f3: 0x0008, 0x17f4: 0x0040, 0x17f5: 0x0008, + 0x17f6: 0x0008, 0x17f7: 0x0008, 0x17f8: 0x0008, 0x17f9: 0x0008, 0x17fa: 0x0040, 0x17fb: 0x0040, + 0x17fc: 0x3308, 0x17fd: 0x0008, 0x17fe: 0x3008, 0x17ff: 0x3008, + // Block 0x60, offset 0x1800 + 0x1800: 0x3308, 0x1801: 0x3008, 0x1802: 0x3008, 0x1803: 0x3008, 0x1804: 0x3008, 0x1805: 0x0040, + 0x1806: 0x0040, 0x1807: 0x3008, 0x1808: 0x3008, 0x1809: 0x0040, 0x180a: 0x0040, 0x180b: 0x3008, + 0x180c: 0x3008, 0x180d: 0x3808, 0x180e: 0x0040, 0x180f: 0x0040, 0x1810: 0x0008, 0x1811: 0x0040, + 0x1812: 0x0040, 0x1813: 0x0040, 0x1814: 0x0040, 0x1815: 0x0040, 0x1816: 0x0040, 0x1817: 0x3008, + 0x1818: 0x0040, 0x1819: 0x0040, 0x181a: 0x0040, 0x181b: 0x0040, 0x181c: 0x0040, 0x181d: 0x0008, + 0x181e: 0x0008, 0x181f: 0x0008, 0x1820: 0x0008, 0x1821: 0x0008, 0x1822: 0x3008, 0x1823: 0x3008, + 0x1824: 0x0040, 0x1825: 0x0040, 0x1826: 0x3308, 0x1827: 0x3308, 0x1828: 0x3308, 0x1829: 0x3308, + 0x182a: 0x3308, 0x182b: 0x3308, 0x182c: 0x3308, 0x182d: 0x0040, 0x182e: 0x0040, 0x182f: 0x0040, + 0x1830: 0x3308, 0x1831: 0x3308, 0x1832: 0x3308, 0x1833: 0x3308, 0x1834: 0x3308, 0x1835: 0x0040, + 0x1836: 0x0040, 0x1837: 0x0040, 0x1838: 0x0040, 0x1839: 0x0040, 0x183a: 0x0040, 0x183b: 0x0040, + 0x183c: 0x0040, 0x183d: 0x0040, 0x183e: 0x0040, 0x183f: 0x0040, + // Block 0x61, offset 0x1840 + 0x1840: 0x0039, 0x1841: 0x0ee9, 0x1842: 0x1159, 0x1843: 0x0ef9, 0x1844: 0x0f09, 0x1845: 0x1199, + 0x1846: 0x0f31, 0x1847: 0x0249, 0x1848: 0x0f41, 0x1849: 0x0259, 0x184a: 0x0f51, 0x184b: 0x0359, + 0x184c: 0x0f61, 0x184d: 0x0f71, 0x184e: 0x00d9, 0x184f: 0x0f99, 0x1850: 0x2039, 0x1851: 0x0269, + 0x1852: 0x01d9, 0x1853: 0x0fa9, 0x1854: 0x0fb9, 0x1855: 0x1089, 0x1856: 0x0279, 0x1857: 0x0369, + 0x1858: 0x0289, 0x1859: 0x13d1, 0x185a: 0x0039, 0x185b: 0x0ee9, 0x185c: 0x1159, 0x185d: 0x0ef9, + 0x185e: 0x0f09, 0x185f: 0x1199, 0x1860: 0x0f31, 0x1861: 0x0249, 0x1862: 0x0f41, 0x1863: 0x0259, + 0x1864: 0x0f51, 0x1865: 0x0359, 0x1866: 0x0f61, 0x1867: 0x0f71, 0x1868: 0x00d9, 0x1869: 0x0f99, + 0x186a: 0x2039, 0x186b: 0x0269, 0x186c: 0x01d9, 0x186d: 0x0fa9, 0x186e: 0x0fb9, 0x186f: 0x1089, + 0x1870: 0x0279, 0x1871: 0x0369, 0x1872: 0x0289, 0x1873: 0x13d1, 0x1874: 0x0039, 0x1875: 0x0ee9, + 0x1876: 0x1159, 0x1877: 0x0ef9, 0x1878: 0x0f09, 0x1879: 0x1199, 0x187a: 0x0f31, 0x187b: 0x0249, + 0x187c: 0x0f41, 0x187d: 0x0259, 0x187e: 0x0f51, 0x187f: 0x0359, + // Block 0x62, offset 0x1880 + 0x1880: 0x0f61, 0x1881: 0x0f71, 0x1882: 0x00d9, 0x1883: 0x0f99, 0x1884: 0x2039, 0x1885: 0x0269, + 0x1886: 0x01d9, 0x1887: 0x0fa9, 0x1888: 0x0fb9, 0x1889: 0x1089, 0x188a: 0x0279, 0x188b: 0x0369, + 0x188c: 0x0289, 0x188d: 0x13d1, 0x188e: 0x0039, 0x188f: 0x0ee9, 0x1890: 0x1159, 0x1891: 0x0ef9, + 0x1892: 0x0f09, 0x1893: 0x1199, 0x1894: 0x0f31, 0x1895: 0x0040, 0x1896: 0x0f41, 0x1897: 0x0259, + 0x1898: 0x0f51, 0x1899: 0x0359, 0x189a: 0x0f61, 0x189b: 0x0f71, 0x189c: 0x00d9, 0x189d: 0x0f99, + 0x189e: 0x2039, 0x189f: 0x0269, 0x18a0: 0x01d9, 0x18a1: 0x0fa9, 0x18a2: 0x0fb9, 0x18a3: 0x1089, + 0x18a4: 0x0279, 0x18a5: 0x0369, 0x18a6: 0x0289, 0x18a7: 0x13d1, 0x18a8: 0x0039, 0x18a9: 0x0ee9, + 0x18aa: 0x1159, 0x18ab: 0x0ef9, 0x18ac: 0x0f09, 0x18ad: 0x1199, 0x18ae: 0x0f31, 0x18af: 0x0249, + 0x18b0: 0x0f41, 0x18b1: 0x0259, 0x18b2: 0x0f51, 0x18b3: 0x0359, 0x18b4: 0x0f61, 0x18b5: 0x0f71, + 0x18b6: 0x00d9, 0x18b7: 0x0f99, 0x18b8: 0x2039, 0x18b9: 0x0269, 0x18ba: 0x01d9, 0x18bb: 0x0fa9, + 0x18bc: 0x0fb9, 0x18bd: 0x1089, 0x18be: 0x0279, 0x18bf: 0x0369, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x0289, 0x18c1: 0x13d1, 0x18c2: 0x0039, 0x18c3: 0x0ee9, 0x18c4: 0x1159, 0x18c5: 0x0ef9, + 0x18c6: 0x0f09, 0x18c7: 0x1199, 0x18c8: 0x0f31, 0x18c9: 0x0249, 0x18ca: 0x0f41, 0x18cb: 0x0259, + 0x18cc: 0x0f51, 0x18cd: 0x0359, 0x18ce: 0x0f61, 0x18cf: 0x0f71, 0x18d0: 0x00d9, 0x18d1: 0x0f99, + 0x18d2: 0x2039, 0x18d3: 0x0269, 0x18d4: 0x01d9, 0x18d5: 0x0fa9, 0x18d6: 0x0fb9, 0x18d7: 0x1089, + 0x18d8: 0x0279, 0x18d9: 0x0369, 0x18da: 0x0289, 0x18db: 0x13d1, 0x18dc: 0x0039, 0x18dd: 0x0040, + 0x18de: 0x1159, 0x18df: 0x0ef9, 0x18e0: 0x0040, 0x18e1: 0x0040, 0x18e2: 0x0f31, 0x18e3: 0x0040, + 0x18e4: 0x0040, 0x18e5: 0x0259, 0x18e6: 0x0f51, 0x18e7: 0x0040, 0x18e8: 0x0040, 0x18e9: 0x0f71, + 0x18ea: 0x00d9, 0x18eb: 0x0f99, 0x18ec: 0x2039, 0x18ed: 0x0040, 0x18ee: 0x01d9, 0x18ef: 0x0fa9, + 0x18f0: 0x0fb9, 0x18f1: 0x1089, 0x18f2: 0x0279, 0x18f3: 0x0369, 0x18f4: 0x0289, 0x18f5: 0x13d1, + 0x18f6: 0x0039, 0x18f7: 0x0ee9, 0x18f8: 0x1159, 0x18f9: 0x0ef9, 0x18fa: 0x0040, 0x18fb: 0x1199, + 0x18fc: 0x0040, 0x18fd: 0x0249, 0x18fe: 0x0f41, 0x18ff: 0x0259, + // Block 0x64, offset 0x1900 + 0x1900: 0x0f51, 0x1901: 0x0359, 0x1902: 0x0f61, 0x1903: 0x0f71, 0x1904: 0x0040, 0x1905: 0x0f99, + 0x1906: 0x2039, 0x1907: 0x0269, 0x1908: 0x01d9, 0x1909: 0x0fa9, 0x190a: 0x0fb9, 0x190b: 0x1089, + 0x190c: 0x0279, 0x190d: 0x0369, 0x190e: 0x0289, 0x190f: 0x13d1, 0x1910: 0x0039, 0x1911: 0x0ee9, + 0x1912: 0x1159, 0x1913: 0x0ef9, 0x1914: 0x0f09, 0x1915: 0x1199, 0x1916: 0x0f31, 0x1917: 0x0249, + 0x1918: 0x0f41, 0x1919: 0x0259, 0x191a: 0x0f51, 0x191b: 0x0359, 0x191c: 0x0f61, 0x191d: 0x0f71, + 0x191e: 0x00d9, 0x191f: 0x0f99, 0x1920: 0x2039, 0x1921: 0x0269, 0x1922: 0x01d9, 0x1923: 0x0fa9, + 0x1924: 0x0fb9, 0x1925: 0x1089, 0x1926: 0x0279, 0x1927: 0x0369, 0x1928: 0x0289, 0x1929: 0x13d1, + 0x192a: 0x0039, 0x192b: 0x0ee9, 0x192c: 0x1159, 0x192d: 0x0ef9, 0x192e: 0x0f09, 0x192f: 0x1199, + 0x1930: 0x0f31, 0x1931: 0x0249, 0x1932: 0x0f41, 0x1933: 0x0259, 0x1934: 0x0f51, 0x1935: 0x0359, + 0x1936: 0x0f61, 0x1937: 0x0f71, 0x1938: 0x00d9, 0x1939: 0x0f99, 0x193a: 0x2039, 0x193b: 0x0269, + 0x193c: 0x01d9, 0x193d: 0x0fa9, 0x193e: 0x0fb9, 0x193f: 0x1089, + // Block 0x65, offset 0x1940 + 0x1940: 0x0279, 0x1941: 0x0369, 0x1942: 0x0289, 0x1943: 0x13d1, 0x1944: 0x0039, 0x1945: 0x0ee9, + 0x1946: 0x0040, 0x1947: 0x0ef9, 0x1948: 0x0f09, 0x1949: 0x1199, 0x194a: 0x0f31, 0x194b: 0x0040, + 0x194c: 0x0040, 0x194d: 0x0259, 0x194e: 0x0f51, 0x194f: 0x0359, 0x1950: 0x0f61, 0x1951: 0x0f71, + 0x1952: 0x00d9, 0x1953: 0x0f99, 0x1954: 0x2039, 0x1955: 0x0040, 0x1956: 0x01d9, 0x1957: 0x0fa9, + 0x1958: 0x0fb9, 0x1959: 0x1089, 0x195a: 0x0279, 0x195b: 0x0369, 0x195c: 0x0289, 0x195d: 0x0040, + 0x195e: 0x0039, 0x195f: 0x0ee9, 0x1960: 0x1159, 0x1961: 0x0ef9, 0x1962: 0x0f09, 0x1963: 0x1199, + 0x1964: 0x0f31, 0x1965: 0x0249, 0x1966: 0x0f41, 0x1967: 0x0259, 0x1968: 0x0f51, 0x1969: 0x0359, + 0x196a: 0x0f61, 0x196b: 0x0f71, 0x196c: 0x00d9, 0x196d: 0x0f99, 0x196e: 0x2039, 0x196f: 0x0269, + 0x1970: 0x01d9, 0x1971: 0x0fa9, 0x1972: 0x0fb9, 0x1973: 0x1089, 0x1974: 0x0279, 0x1975: 0x0369, + 0x1976: 0x0289, 0x1977: 0x13d1, 0x1978: 0x0039, 0x1979: 0x0ee9, 0x197a: 0x0040, 0x197b: 0x0ef9, + 0x197c: 0x0f09, 0x197d: 0x1199, 0x197e: 0x0f31, 0x197f: 0x0040, + // Block 0x66, offset 0x1980 + 0x1980: 0x0f41, 0x1981: 0x0259, 0x1982: 0x0f51, 0x1983: 0x0359, 0x1984: 0x0f61, 0x1985: 0x0040, + 0x1986: 0x00d9, 0x1987: 0x0040, 0x1988: 0x0040, 0x1989: 0x0040, 0x198a: 0x01d9, 0x198b: 0x0fa9, + 0x198c: 0x0fb9, 0x198d: 0x1089, 0x198e: 0x0279, 0x198f: 0x0369, 0x1990: 0x0289, 0x1991: 0x0040, + 0x1992: 0x0039, 0x1993: 0x0ee9, 0x1994: 0x1159, 0x1995: 0x0ef9, 0x1996: 0x0f09, 0x1997: 0x1199, + 0x1998: 0x0f31, 0x1999: 0x0249, 0x199a: 0x0f41, 0x199b: 0x0259, 0x199c: 0x0f51, 0x199d: 0x0359, + 0x199e: 0x0f61, 0x199f: 0x0f71, 0x19a0: 0x00d9, 0x19a1: 0x0f99, 0x19a2: 0x2039, 0x19a3: 0x0269, + 0x19a4: 0x01d9, 0x19a5: 0x0fa9, 0x19a6: 0x0fb9, 0x19a7: 0x1089, 0x19a8: 0x0279, 0x19a9: 0x0369, + 0x19aa: 0x0289, 0x19ab: 0x13d1, 0x19ac: 0x0039, 0x19ad: 0x0ee9, 0x19ae: 0x1159, 0x19af: 0x0ef9, + 0x19b0: 0x0f09, 0x19b1: 0x1199, 0x19b2: 0x0f31, 0x19b3: 0x0249, 0x19b4: 0x0f41, 0x19b5: 0x0259, + 0x19b6: 0x0f51, 0x19b7: 0x0359, 0x19b8: 0x0f61, 0x19b9: 0x0f71, 0x19ba: 0x00d9, 0x19bb: 0x0f99, + 0x19bc: 0x2039, 0x19bd: 0x0269, 0x19be: 0x01d9, 0x19bf: 0x0fa9, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x0fb9, 0x19c1: 0x1089, 0x19c2: 0x0279, 0x19c3: 0x0369, 0x19c4: 0x0289, 0x19c5: 0x13d1, + 0x19c6: 0x0039, 0x19c7: 0x0ee9, 0x19c8: 0x1159, 0x19c9: 0x0ef9, 0x19ca: 0x0f09, 0x19cb: 0x1199, + 0x19cc: 0x0f31, 0x19cd: 0x0249, 0x19ce: 0x0f41, 0x19cf: 0x0259, 0x19d0: 0x0f51, 0x19d1: 0x0359, + 0x19d2: 0x0f61, 0x19d3: 0x0f71, 0x19d4: 0x00d9, 0x19d5: 0x0f99, 0x19d6: 0x2039, 0x19d7: 0x0269, + 0x19d8: 0x01d9, 0x19d9: 0x0fa9, 0x19da: 0x0fb9, 0x19db: 0x1089, 0x19dc: 0x0279, 0x19dd: 0x0369, + 0x19de: 0x0289, 0x19df: 0x13d1, 0x19e0: 0x0039, 0x19e1: 0x0ee9, 0x19e2: 0x1159, 0x19e3: 0x0ef9, + 0x19e4: 0x0f09, 0x19e5: 0x1199, 0x19e6: 0x0f31, 0x19e7: 0x0249, 0x19e8: 0x0f41, 0x19e9: 0x0259, + 0x19ea: 0x0f51, 0x19eb: 0x0359, 0x19ec: 0x0f61, 0x19ed: 0x0f71, 0x19ee: 0x00d9, 0x19ef: 0x0f99, + 0x19f0: 0x2039, 0x19f1: 0x0269, 0x19f2: 0x01d9, 0x19f3: 0x0fa9, 0x19f4: 0x0fb9, 0x19f5: 0x1089, + 0x19f6: 0x0279, 0x19f7: 0x0369, 0x19f8: 0x0289, 0x19f9: 0x13d1, 0x19fa: 0x0039, 0x19fb: 0x0ee9, + 0x19fc: 0x1159, 0x19fd: 0x0ef9, 0x19fe: 0x0f09, 0x19ff: 0x1199, + // Block 0x68, offset 0x1a00 + 0x1a00: 0x0f31, 0x1a01: 0x0249, 0x1a02: 0x0f41, 0x1a03: 0x0259, 0x1a04: 0x0f51, 0x1a05: 0x0359, + 0x1a06: 0x0f61, 0x1a07: 0x0f71, 0x1a08: 0x00d9, 0x1a09: 0x0f99, 0x1a0a: 0x2039, 0x1a0b: 0x0269, + 0x1a0c: 0x01d9, 0x1a0d: 0x0fa9, 0x1a0e: 0x0fb9, 0x1a0f: 0x1089, 0x1a10: 0x0279, 0x1a11: 0x0369, + 0x1a12: 0x0289, 0x1a13: 0x13d1, 0x1a14: 0x0039, 0x1a15: 0x0ee9, 0x1a16: 0x1159, 0x1a17: 0x0ef9, + 0x1a18: 0x0f09, 0x1a19: 0x1199, 0x1a1a: 0x0f31, 0x1a1b: 0x0249, 0x1a1c: 0x0f41, 0x1a1d: 0x0259, + 0x1a1e: 0x0f51, 0x1a1f: 0x0359, 0x1a20: 0x0f61, 0x1a21: 0x0f71, 0x1a22: 0x00d9, 0x1a23: 0x0f99, + 0x1a24: 0x2039, 0x1a25: 0x0269, 0x1a26: 0x01d9, 0x1a27: 0x0fa9, 0x1a28: 0x0fb9, 0x1a29: 0x1089, + 0x1a2a: 0x0279, 0x1a2b: 0x0369, 0x1a2c: 0x0289, 0x1a2d: 0x13d1, 0x1a2e: 0x0039, 0x1a2f: 0x0ee9, + 0x1a30: 0x1159, 0x1a31: 0x0ef9, 0x1a32: 0x0f09, 0x1a33: 0x1199, 0x1a34: 0x0f31, 0x1a35: 0x0249, + 0x1a36: 0x0f41, 0x1a37: 0x0259, 0x1a38: 0x0f51, 0x1a39: 0x0359, 0x1a3a: 0x0f61, 0x1a3b: 0x0f71, + 0x1a3c: 0x00d9, 0x1a3d: 0x0f99, 0x1a3e: 0x2039, 0x1a3f: 0x0269, + // Block 0x69, offset 0x1a40 + 0x1a40: 0x01d9, 0x1a41: 0x0fa9, 0x1a42: 0x0fb9, 0x1a43: 0x1089, 0x1a44: 0x0279, 0x1a45: 0x0369, + 0x1a46: 0x0289, 0x1a47: 0x13d1, 0x1a48: 0x0039, 0x1a49: 0x0ee9, 0x1a4a: 0x1159, 0x1a4b: 0x0ef9, + 0x1a4c: 0x0f09, 0x1a4d: 0x1199, 0x1a4e: 0x0f31, 0x1a4f: 0x0249, 0x1a50: 0x0f41, 0x1a51: 0x0259, + 0x1a52: 0x0f51, 0x1a53: 0x0359, 0x1a54: 0x0f61, 0x1a55: 0x0f71, 0x1a56: 0x00d9, 0x1a57: 0x0f99, + 0x1a58: 0x2039, 0x1a59: 0x0269, 0x1a5a: 0x01d9, 0x1a5b: 0x0fa9, 0x1a5c: 0x0fb9, 0x1a5d: 0x1089, + 0x1a5e: 0x0279, 0x1a5f: 0x0369, 0x1a60: 0x0289, 0x1a61: 0x13d1, 0x1a62: 0x0039, 0x1a63: 0x0ee9, + 0x1a64: 0x1159, 0x1a65: 0x0ef9, 0x1a66: 0x0f09, 0x1a67: 0x1199, 0x1a68: 0x0f31, 0x1a69: 0x0249, + 0x1a6a: 0x0f41, 0x1a6b: 0x0259, 0x1a6c: 0x0f51, 0x1a6d: 0x0359, 0x1a6e: 0x0f61, 0x1a6f: 0x0f71, + 0x1a70: 0x00d9, 0x1a71: 0x0f99, 0x1a72: 0x2039, 0x1a73: 0x0269, 0x1a74: 0x01d9, 0x1a75: 0x0fa9, + 0x1a76: 0x0fb9, 0x1a77: 0x1089, 0x1a78: 0x0279, 0x1a79: 0x0369, 0x1a7a: 0x0289, 0x1a7b: 0x13d1, + 0x1a7c: 0x0039, 0x1a7d: 0x0ee9, 0x1a7e: 0x1159, 0x1a7f: 0x0ef9, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x0f09, 0x1a81: 0x1199, 0x1a82: 0x0f31, 0x1a83: 0x0249, 0x1a84: 0x0f41, 0x1a85: 0x0259, + 0x1a86: 0x0f51, 0x1a87: 0x0359, 0x1a88: 0x0f61, 0x1a89: 0x0f71, 0x1a8a: 0x00d9, 0x1a8b: 0x0f99, + 0x1a8c: 0x2039, 0x1a8d: 0x0269, 0x1a8e: 0x01d9, 0x1a8f: 0x0fa9, 0x1a90: 0x0fb9, 0x1a91: 0x1089, + 0x1a92: 0x0279, 0x1a93: 0x0369, 0x1a94: 0x0289, 0x1a95: 0x13d1, 0x1a96: 0x0039, 0x1a97: 0x0ee9, + 0x1a98: 0x1159, 0x1a99: 0x0ef9, 0x1a9a: 0x0f09, 0x1a9b: 0x1199, 0x1a9c: 0x0f31, 0x1a9d: 0x0249, + 0x1a9e: 0x0f41, 0x1a9f: 0x0259, 0x1aa0: 0x0f51, 0x1aa1: 0x0359, 0x1aa2: 0x0f61, 0x1aa3: 0x0f71, + 0x1aa4: 0x00d9, 0x1aa5: 0x0f99, 0x1aa6: 0x2039, 0x1aa7: 0x0269, 0x1aa8: 0x01d9, 0x1aa9: 0x0fa9, + 0x1aaa: 0x0fb9, 0x1aab: 0x1089, 0x1aac: 0x0279, 0x1aad: 0x0369, 0x1aae: 0x0289, 0x1aaf: 0x13d1, + 0x1ab0: 0x0039, 0x1ab1: 0x0ee9, 0x1ab2: 0x1159, 0x1ab3: 0x0ef9, 0x1ab4: 0x0f09, 0x1ab5: 0x1199, + 0x1ab6: 0x0f31, 0x1ab7: 0x0249, 0x1ab8: 0x0f41, 0x1ab9: 0x0259, 0x1aba: 0x0f51, 0x1abb: 0x0359, + 0x1abc: 0x0f61, 0x1abd: 0x0f71, 0x1abe: 0x00d9, 0x1abf: 0x0f99, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x2039, 0x1ac1: 0x0269, 0x1ac2: 0x01d9, 0x1ac3: 0x0fa9, 0x1ac4: 0x0fb9, 0x1ac5: 0x1089, + 0x1ac6: 0x0279, 0x1ac7: 0x0369, 0x1ac8: 0x0289, 0x1ac9: 0x13d1, 0x1aca: 0x0039, 0x1acb: 0x0ee9, + 0x1acc: 0x1159, 0x1acd: 0x0ef9, 0x1ace: 0x0f09, 0x1acf: 0x1199, 0x1ad0: 0x0f31, 0x1ad1: 0x0249, + 0x1ad2: 0x0f41, 0x1ad3: 0x0259, 0x1ad4: 0x0f51, 0x1ad5: 0x0359, 0x1ad6: 0x0f61, 0x1ad7: 0x0f71, + 0x1ad8: 0x00d9, 0x1ad9: 0x0f99, 0x1ada: 0x2039, 0x1adb: 0x0269, 0x1adc: 0x01d9, 0x1add: 0x0fa9, + 0x1ade: 0x0fb9, 0x1adf: 0x1089, 0x1ae0: 0x0279, 0x1ae1: 0x0369, 0x1ae2: 0x0289, 0x1ae3: 0x13d1, + 0x1ae4: 0xba81, 0x1ae5: 0xba99, 0x1ae6: 0x0040, 0x1ae7: 0x0040, 0x1ae8: 0xbab1, 0x1ae9: 0x1099, + 0x1aea: 0x10b1, 0x1aeb: 0x10c9, 0x1aec: 0xbac9, 0x1aed: 0xbae1, 0x1aee: 0xbaf9, 0x1aef: 0x1429, + 0x1af0: 0x1a31, 0x1af1: 0xbb11, 0x1af2: 0xbb29, 0x1af3: 0xbb41, 0x1af4: 0xbb59, 0x1af5: 0xbb71, + 0x1af6: 0xbb89, 0x1af7: 0x2109, 0x1af8: 0x1111, 0x1af9: 0x1429, 0x1afa: 0xbba1, 0x1afb: 0xbbb9, + 0x1afc: 0xbbd1, 0x1afd: 0x10e1, 0x1afe: 0x10f9, 0x1aff: 0xbbe9, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0x2079, 0x1b01: 0xbc01, 0x1b02: 0xbab1, 0x1b03: 0x1099, 0x1b04: 0x10b1, 0x1b05: 0x10c9, + 0x1b06: 0xbac9, 0x1b07: 0xbae1, 0x1b08: 0xbaf9, 0x1b09: 0x1429, 0x1b0a: 0x1a31, 0x1b0b: 0xbb11, + 0x1b0c: 0xbb29, 0x1b0d: 0xbb41, 0x1b0e: 0xbb59, 0x1b0f: 0xbb71, 0x1b10: 0xbb89, 0x1b11: 0x2109, + 0x1b12: 0x1111, 0x1b13: 0xbba1, 0x1b14: 0xbba1, 0x1b15: 0xbbb9, 0x1b16: 0xbbd1, 0x1b17: 0x10e1, + 0x1b18: 0x10f9, 0x1b19: 0xbbe9, 0x1b1a: 0x2079, 0x1b1b: 0xbc21, 0x1b1c: 0xbac9, 0x1b1d: 0x1429, + 0x1b1e: 0xbb11, 0x1b1f: 0x10e1, 0x1b20: 0x1111, 0x1b21: 0x2109, 0x1b22: 0xbab1, 0x1b23: 0x1099, + 0x1b24: 0x10b1, 0x1b25: 0x10c9, 0x1b26: 0xbac9, 0x1b27: 0xbae1, 0x1b28: 0xbaf9, 0x1b29: 0x1429, + 0x1b2a: 0x1a31, 0x1b2b: 0xbb11, 0x1b2c: 0xbb29, 0x1b2d: 0xbb41, 0x1b2e: 0xbb59, 0x1b2f: 0xbb71, + 0x1b30: 0xbb89, 0x1b31: 0x2109, 0x1b32: 0x1111, 0x1b33: 0x1429, 0x1b34: 0xbba1, 0x1b35: 0xbbb9, + 0x1b36: 0xbbd1, 0x1b37: 0x10e1, 0x1b38: 0x10f9, 0x1b39: 0xbbe9, 0x1b3a: 0x2079, 0x1b3b: 0xbc01, + 0x1b3c: 0xbab1, 0x1b3d: 0x1099, 0x1b3e: 0x10b1, 0x1b3f: 0x10c9, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0xbac9, 0x1b41: 0xbae1, 0x1b42: 0xbaf9, 0x1b43: 0x1429, 0x1b44: 0x1a31, 0x1b45: 0xbb11, + 0x1b46: 0xbb29, 0x1b47: 0xbb41, 0x1b48: 0xbb59, 0x1b49: 0xbb71, 0x1b4a: 0xbb89, 0x1b4b: 0x2109, + 0x1b4c: 0x1111, 0x1b4d: 0xbba1, 0x1b4e: 0xbba1, 0x1b4f: 0xbbb9, 0x1b50: 0xbbd1, 0x1b51: 0x10e1, + 0x1b52: 0x10f9, 0x1b53: 0xbbe9, 0x1b54: 0x2079, 0x1b55: 0xbc21, 0x1b56: 0xbac9, 0x1b57: 0x1429, + 0x1b58: 0xbb11, 0x1b59: 0x10e1, 0x1b5a: 0x1111, 0x1b5b: 0x2109, 0x1b5c: 0xbab1, 0x1b5d: 0x1099, + 0x1b5e: 0x10b1, 0x1b5f: 0x10c9, 0x1b60: 0xbac9, 0x1b61: 0xbae1, 0x1b62: 0xbaf9, 0x1b63: 0x1429, + 0x1b64: 0x1a31, 0x1b65: 0xbb11, 0x1b66: 0xbb29, 0x1b67: 0xbb41, 0x1b68: 0xbb59, 0x1b69: 0xbb71, + 0x1b6a: 0xbb89, 0x1b6b: 0x2109, 0x1b6c: 0x1111, 0x1b6d: 0x1429, 0x1b6e: 0xbba1, 0x1b6f: 0xbbb9, + 0x1b70: 0xbbd1, 0x1b71: 0x10e1, 0x1b72: 0x10f9, 0x1b73: 0xbbe9, 0x1b74: 0x2079, 0x1b75: 0xbc01, + 0x1b76: 0xbab1, 0x1b77: 0x1099, 0x1b78: 0x10b1, 0x1b79: 0x10c9, 0x1b7a: 0xbac9, 0x1b7b: 0xbae1, + 0x1b7c: 0xbaf9, 0x1b7d: 0x1429, 0x1b7e: 0x1a31, 0x1b7f: 0xbb11, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0xbb29, 0x1b81: 0xbb41, 0x1b82: 0xbb59, 0x1b83: 0xbb71, 0x1b84: 0xbb89, 0x1b85: 0x2109, + 0x1b86: 0x1111, 0x1b87: 0xbba1, 0x1b88: 0xbba1, 0x1b89: 0xbbb9, 0x1b8a: 0xbbd1, 0x1b8b: 0x10e1, + 0x1b8c: 0x10f9, 0x1b8d: 0xbbe9, 0x1b8e: 0x2079, 0x1b8f: 0xbc21, 0x1b90: 0xbac9, 0x1b91: 0x1429, + 0x1b92: 0xbb11, 0x1b93: 0x10e1, 0x1b94: 0x1111, 0x1b95: 0x2109, 0x1b96: 0xbab1, 0x1b97: 0x1099, + 0x1b98: 0x10b1, 0x1b99: 0x10c9, 0x1b9a: 0xbac9, 0x1b9b: 0xbae1, 0x1b9c: 0xbaf9, 0x1b9d: 0x1429, + 0x1b9e: 0x1a31, 0x1b9f: 0xbb11, 0x1ba0: 0xbb29, 0x1ba1: 0xbb41, 0x1ba2: 0xbb59, 0x1ba3: 0xbb71, + 0x1ba4: 0xbb89, 0x1ba5: 0x2109, 0x1ba6: 0x1111, 0x1ba7: 0x1429, 0x1ba8: 0xbba1, 0x1ba9: 0xbbb9, + 0x1baa: 0xbbd1, 0x1bab: 0x10e1, 0x1bac: 0x10f9, 0x1bad: 0xbbe9, 0x1bae: 0x2079, 0x1baf: 0xbc01, + 0x1bb0: 0xbab1, 0x1bb1: 0x1099, 0x1bb2: 0x10b1, 0x1bb3: 0x10c9, 0x1bb4: 0xbac9, 0x1bb5: 0xbae1, + 0x1bb6: 0xbaf9, 0x1bb7: 0x1429, 0x1bb8: 0x1a31, 0x1bb9: 0xbb11, 0x1bba: 0xbb29, 0x1bbb: 0xbb41, + 0x1bbc: 0xbb59, 0x1bbd: 0xbb71, 0x1bbe: 0xbb89, 0x1bbf: 0x2109, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0x1111, 0x1bc1: 0xbba1, 0x1bc2: 0xbba1, 0x1bc3: 0xbbb9, 0x1bc4: 0xbbd1, 0x1bc5: 0x10e1, + 0x1bc6: 0x10f9, 0x1bc7: 0xbbe9, 0x1bc8: 0x2079, 0x1bc9: 0xbc21, 0x1bca: 0xbac9, 0x1bcb: 0x1429, + 0x1bcc: 0xbb11, 0x1bcd: 0x10e1, 0x1bce: 0x1111, 0x1bcf: 0x2109, 0x1bd0: 0xbab1, 0x1bd1: 0x1099, + 0x1bd2: 0x10b1, 0x1bd3: 0x10c9, 0x1bd4: 0xbac9, 0x1bd5: 0xbae1, 0x1bd6: 0xbaf9, 0x1bd7: 0x1429, + 0x1bd8: 0x1a31, 0x1bd9: 0xbb11, 0x1bda: 0xbb29, 0x1bdb: 0xbb41, 0x1bdc: 0xbb59, 0x1bdd: 0xbb71, + 0x1bde: 0xbb89, 0x1bdf: 0x2109, 0x1be0: 0x1111, 0x1be1: 0x1429, 0x1be2: 0xbba1, 0x1be3: 0xbbb9, + 0x1be4: 0xbbd1, 0x1be5: 0x10e1, 0x1be6: 0x10f9, 0x1be7: 0xbbe9, 0x1be8: 0x2079, 0x1be9: 0xbc01, + 0x1bea: 0xbab1, 0x1beb: 0x1099, 0x1bec: 0x10b1, 0x1bed: 0x10c9, 0x1bee: 0xbac9, 0x1bef: 0xbae1, + 0x1bf0: 0xbaf9, 0x1bf1: 0x1429, 0x1bf2: 0x1a31, 0x1bf3: 0xbb11, 0x1bf4: 0xbb29, 0x1bf5: 0xbb41, + 0x1bf6: 0xbb59, 0x1bf7: 0xbb71, 0x1bf8: 0xbb89, 0x1bf9: 0x2109, 0x1bfa: 0x1111, 0x1bfb: 0xbba1, + 0x1bfc: 0xbba1, 0x1bfd: 0xbbb9, 0x1bfe: 0xbbd1, 0x1bff: 0x10e1, + // Block 0x70, offset 0x1c00 + 0x1c00: 0x10f9, 0x1c01: 0xbbe9, 0x1c02: 0x2079, 0x1c03: 0xbc21, 0x1c04: 0xbac9, 0x1c05: 0x1429, + 0x1c06: 0xbb11, 0x1c07: 0x10e1, 0x1c08: 0x1111, 0x1c09: 0x2109, 0x1c0a: 0xbc41, 0x1c0b: 0xbc41, + 0x1c0c: 0x0040, 0x1c0d: 0x0040, 0x1c0e: 0x1f41, 0x1c0f: 0x00c9, 0x1c10: 0x0069, 0x1c11: 0x0079, + 0x1c12: 0x1f51, 0x1c13: 0x1f61, 0x1c14: 0x1f71, 0x1c15: 0x1f81, 0x1c16: 0x1f91, 0x1c17: 0x1fa1, + 0x1c18: 0x1f41, 0x1c19: 0x00c9, 0x1c1a: 0x0069, 0x1c1b: 0x0079, 0x1c1c: 0x1f51, 0x1c1d: 0x1f61, + 0x1c1e: 0x1f71, 0x1c1f: 0x1f81, 0x1c20: 0x1f91, 0x1c21: 0x1fa1, 0x1c22: 0x1f41, 0x1c23: 0x00c9, + 0x1c24: 0x0069, 0x1c25: 0x0079, 0x1c26: 0x1f51, 0x1c27: 0x1f61, 0x1c28: 0x1f71, 0x1c29: 0x1f81, + 0x1c2a: 0x1f91, 0x1c2b: 0x1fa1, 0x1c2c: 0x1f41, 0x1c2d: 0x00c9, 0x1c2e: 0x0069, 0x1c2f: 0x0079, + 0x1c30: 0x1f51, 0x1c31: 0x1f61, 0x1c32: 0x1f71, 0x1c33: 0x1f81, 0x1c34: 0x1f91, 0x1c35: 0x1fa1, + 0x1c36: 0x1f41, 0x1c37: 0x00c9, 0x1c38: 0x0069, 0x1c39: 0x0079, 0x1c3a: 0x1f51, 0x1c3b: 0x1f61, + 0x1c3c: 0x1f71, 0x1c3d: 0x1f81, 0x1c3e: 0x1f91, 0x1c3f: 0x1fa1, + // Block 0x71, offset 0x1c40 + 0x1c40: 0xe115, 0x1c41: 0xe115, 0x1c42: 0xe135, 0x1c43: 0xe135, 0x1c44: 0xe115, 0x1c45: 0xe115, + 0x1c46: 0xe175, 0x1c47: 0xe175, 0x1c48: 0xe115, 0x1c49: 0xe115, 0x1c4a: 0xe135, 0x1c4b: 0xe135, + 0x1c4c: 0xe115, 0x1c4d: 0xe115, 0x1c4e: 0xe1f5, 0x1c4f: 0xe1f5, 0x1c50: 0xe115, 0x1c51: 0xe115, + 0x1c52: 0xe135, 0x1c53: 0xe135, 0x1c54: 0xe115, 0x1c55: 0xe115, 0x1c56: 0xe175, 0x1c57: 0xe175, + 0x1c58: 0xe115, 0x1c59: 0xe115, 0x1c5a: 0xe135, 0x1c5b: 0xe135, 0x1c5c: 0xe115, 0x1c5d: 0xe115, + 0x1c5e: 0x8b05, 0x1c5f: 0x8b05, 0x1c60: 0x04b5, 0x1c61: 0x04b5, 0x1c62: 0x0a08, 0x1c63: 0x0a08, + 0x1c64: 0x0a08, 0x1c65: 0x0a08, 0x1c66: 0x0a08, 0x1c67: 0x0a08, 0x1c68: 0x0a08, 0x1c69: 0x0a08, + 0x1c6a: 0x0a08, 0x1c6b: 0x0a08, 0x1c6c: 0x0a08, 0x1c6d: 0x0a08, 0x1c6e: 0x0a08, 0x1c6f: 0x0a08, + 0x1c70: 0x0a08, 0x1c71: 0x0a08, 0x1c72: 0x0a08, 0x1c73: 0x0a08, 0x1c74: 0x0a08, 0x1c75: 0x0a08, + 0x1c76: 0x0a08, 0x1c77: 0x0a08, 0x1c78: 0x0a08, 0x1c79: 0x0a08, 0x1c7a: 0x0a08, 0x1c7b: 0x0a08, + 0x1c7c: 0x0a08, 0x1c7d: 0x0a08, 0x1c7e: 0x0a08, 0x1c7f: 0x0a08, + // Block 0x72, offset 0x1c80 + 0x1c80: 0xb189, 0x1c81: 0xb1a1, 0x1c82: 0xb201, 0x1c83: 0xb249, 0x1c84: 0x0040, 0x1c85: 0xb411, + 0x1c86: 0xb291, 0x1c87: 0xb219, 0x1c88: 0xb309, 0x1c89: 0xb429, 0x1c8a: 0xb399, 0x1c8b: 0xb3b1, + 0x1c8c: 0xb3c9, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0xb369, 0x1c91: 0xb2d9, + 0x1c92: 0xb381, 0x1c93: 0xb279, 0x1c94: 0xb2c1, 0x1c95: 0xb1d1, 0x1c96: 0xb1e9, 0x1c97: 0xb231, + 0x1c98: 0xb261, 0x1c99: 0xb2f1, 0x1c9a: 0xb321, 0x1c9b: 0xb351, 0x1c9c: 0xbc59, 0x1c9d: 0x7949, + 0x1c9e: 0xbc71, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040, + 0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0x0040, 0x1ca9: 0xb429, + 0x1caa: 0xb399, 0x1cab: 0xb3b1, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339, + 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1, + 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0x0040, 0x1cbb: 0xb351, + 0x1cbc: 0x0040, 0x1cbd: 0x0040, 0x1cbe: 0x0040, 0x1cbf: 0x0040, + // Block 0x73, offset 0x1cc0 + 0x1cc0: 0x0040, 0x1cc1: 0x0040, 0x1cc2: 0xb201, 0x1cc3: 0x0040, 0x1cc4: 0x0040, 0x1cc5: 0x0040, + 0x1cc6: 0x0040, 0x1cc7: 0xb219, 0x1cc8: 0x0040, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1, + 0x1ccc: 0x0040, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0x0040, 0x1cd1: 0xb2d9, + 0x1cd2: 0xb381, 0x1cd3: 0x0040, 0x1cd4: 0xb2c1, 0x1cd5: 0x0040, 0x1cd6: 0x0040, 0x1cd7: 0xb231, + 0x1cd8: 0x0040, 0x1cd9: 0xb2f1, 0x1cda: 0x0040, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x7949, + 0x1cde: 0x0040, 0x1cdf: 0xbc89, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0x0040, + 0x1ce4: 0xb3f9, 0x1ce5: 0x0040, 0x1ce6: 0x0040, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429, + 0x1cea: 0xb399, 0x1ceb: 0x0040, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339, + 0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0x0040, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1, + 0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0x0040, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351, + 0x1cfc: 0xbc59, 0x1cfd: 0x0040, 0x1cfe: 0xbc71, 0x1cff: 0x0040, + // Block 0x74, offset 0x1d00 + 0x1d00: 0xb189, 0x1d01: 0xb1a1, 0x1d02: 0xb201, 0x1d03: 0xb249, 0x1d04: 0xb3f9, 0x1d05: 0xb411, + 0x1d06: 0xb291, 0x1d07: 0xb219, 0x1d08: 0xb309, 0x1d09: 0xb429, 0x1d0a: 0x0040, 0x1d0b: 0xb3b1, + 0x1d0c: 0xb3c9, 0x1d0d: 0xb3e1, 0x1d0e: 0xb2a9, 0x1d0f: 0xb339, 0x1d10: 0xb369, 0x1d11: 0xb2d9, + 0x1d12: 0xb381, 0x1d13: 0xb279, 0x1d14: 0xb2c1, 0x1d15: 0xb1d1, 0x1d16: 0xb1e9, 0x1d17: 0xb231, + 0x1d18: 0xb261, 0x1d19: 0xb2f1, 0x1d1a: 0xb321, 0x1d1b: 0xb351, 0x1d1c: 0x0040, 0x1d1d: 0x0040, + 0x1d1e: 0x0040, 0x1d1f: 0x0040, 0x1d20: 0x0040, 0x1d21: 0xb1a1, 0x1d22: 0xb201, 0x1d23: 0xb249, + 0x1d24: 0x0040, 0x1d25: 0xb411, 0x1d26: 0xb291, 0x1d27: 0xb219, 0x1d28: 0xb309, 0x1d29: 0xb429, + 0x1d2a: 0x0040, 0x1d2b: 0xb3b1, 0x1d2c: 0xb3c9, 0x1d2d: 0xb3e1, 0x1d2e: 0xb2a9, 0x1d2f: 0xb339, + 0x1d30: 0xb369, 0x1d31: 0xb2d9, 0x1d32: 0xb381, 0x1d33: 0xb279, 0x1d34: 0xb2c1, 0x1d35: 0xb1d1, + 0x1d36: 0xb1e9, 0x1d37: 0xb231, 0x1d38: 0xb261, 0x1d39: 0xb2f1, 0x1d3a: 0xb321, 0x1d3b: 0xb351, + 0x1d3c: 0x0040, 0x1d3d: 0x0040, 0x1d3e: 0x0040, 0x1d3f: 0x0040, + // Block 0x75, offset 0x1d40 + 0x1d40: 0x0040, 0x1d41: 0xbca2, 0x1d42: 0xbcba, 0x1d43: 0xbcd2, 0x1d44: 0xbcea, 0x1d45: 0xbd02, + 0x1d46: 0xbd1a, 0x1d47: 0xbd32, 0x1d48: 0xbd4a, 0x1d49: 0xbd62, 0x1d4a: 0xbd7a, 0x1d4b: 0x0018, + 0x1d4c: 0x0018, 0x1d4d: 0x0040, 0x1d4e: 0x0040, 0x1d4f: 0x0040, 0x1d50: 0xbd92, 0x1d51: 0xbdb2, + 0x1d52: 0xbdd2, 0x1d53: 0xbdf2, 0x1d54: 0xbe12, 0x1d55: 0xbe32, 0x1d56: 0xbe52, 0x1d57: 0xbe72, + 0x1d58: 0xbe92, 0x1d59: 0xbeb2, 0x1d5a: 0xbed2, 0x1d5b: 0xbef2, 0x1d5c: 0xbf12, 0x1d5d: 0xbf32, + 0x1d5e: 0xbf52, 0x1d5f: 0xbf72, 0x1d60: 0xbf92, 0x1d61: 0xbfb2, 0x1d62: 0xbfd2, 0x1d63: 0xbff2, + 0x1d64: 0xc012, 0x1d65: 0xc032, 0x1d66: 0xc052, 0x1d67: 0xc072, 0x1d68: 0xc092, 0x1d69: 0xc0b2, + 0x1d6a: 0xc0d1, 0x1d6b: 0x1159, 0x1d6c: 0x0269, 0x1d6d: 0x6671, 0x1d6e: 0xc111, 0x1d6f: 0x0040, + 0x1d70: 0x0039, 0x1d71: 0x0ee9, 0x1d72: 0x1159, 0x1d73: 0x0ef9, 0x1d74: 0x0f09, 0x1d75: 0x1199, + 0x1d76: 0x0f31, 0x1d77: 0x0249, 0x1d78: 0x0f41, 0x1d79: 0x0259, 0x1d7a: 0x0f51, 0x1d7b: 0x0359, + 0x1d7c: 0x0f61, 0x1d7d: 0x0f71, 0x1d7e: 0x00d9, 0x1d7f: 0x0f99, + // Block 0x76, offset 0x1d80 + 0x1d80: 0x2039, 0x1d81: 0x0269, 0x1d82: 0x01d9, 0x1d83: 0x0fa9, 0x1d84: 0x0fb9, 0x1d85: 0x1089, + 0x1d86: 0x0279, 0x1d87: 0x0369, 0x1d88: 0x0289, 0x1d89: 0x13d1, 0x1d8a: 0xc129, 0x1d8b: 0x65b1, + 0x1d8c: 0xc141, 0x1d8d: 0x1441, 0x1d8e: 0xc159, 0x1d8f: 0xc179, 0x1d90: 0x0018, 0x1d91: 0x0018, + 0x1d92: 0x0018, 0x1d93: 0x0018, 0x1d94: 0x0018, 0x1d95: 0x0018, 0x1d96: 0x0018, 0x1d97: 0x0018, + 0x1d98: 0x0018, 0x1d99: 0x0018, 0x1d9a: 0x0018, 0x1d9b: 0x0018, 0x1d9c: 0x0018, 0x1d9d: 0x0018, + 0x1d9e: 0x0018, 0x1d9f: 0x0018, 0x1da0: 0x0018, 0x1da1: 0x0018, 0x1da2: 0x0018, 0x1da3: 0x0018, + 0x1da4: 0x0018, 0x1da5: 0x0018, 0x1da6: 0x0018, 0x1da7: 0x0018, 0x1da8: 0x0018, 0x1da9: 0x0018, + 0x1daa: 0xc191, 0x1dab: 0xc1a9, 0x1dac: 0x0040, 0x1dad: 0x0040, 0x1dae: 0x0040, 0x1daf: 0x0040, + 0x1db0: 0x0018, 0x1db1: 0x0018, 0x1db2: 0x0018, 0x1db3: 0x0018, 0x1db4: 0x0018, 0x1db5: 0x0018, + 0x1db6: 0x0018, 0x1db7: 0x0018, 0x1db8: 0x0018, 0x1db9: 0x0018, 0x1dba: 0x0018, 0x1dbb: 0x0018, + 0x1dbc: 0x0018, 0x1dbd: 0x0018, 0x1dbe: 0x0018, 0x1dbf: 0x0018, + // Block 0x77, offset 0x1dc0 + 0x1dc0: 0xc1d9, 0x1dc1: 0xc211, 0x1dc2: 0xc249, 0x1dc3: 0x0040, 0x1dc4: 0x0040, 0x1dc5: 0x0040, + 0x1dc6: 0x0040, 0x1dc7: 0x0040, 0x1dc8: 0x0040, 0x1dc9: 0x0040, 0x1dca: 0x0040, 0x1dcb: 0x0040, + 0x1dcc: 0x0040, 0x1dcd: 0x0040, 0x1dce: 0x0040, 0x1dcf: 0x0040, 0x1dd0: 0xc269, 0x1dd1: 0xc289, + 0x1dd2: 0xc2a9, 0x1dd3: 0xc2c9, 0x1dd4: 0xc2e9, 0x1dd5: 0xc309, 0x1dd6: 0xc329, 0x1dd7: 0xc349, + 0x1dd8: 0xc369, 0x1dd9: 0xc389, 0x1dda: 0xc3a9, 0x1ddb: 0xc3c9, 0x1ddc: 0xc3e9, 0x1ddd: 0xc409, + 0x1dde: 0xc429, 0x1ddf: 0xc449, 0x1de0: 0xc469, 0x1de1: 0xc489, 0x1de2: 0xc4a9, 0x1de3: 0xc4c9, + 0x1de4: 0xc4e9, 0x1de5: 0xc509, 0x1de6: 0xc529, 0x1de7: 0xc549, 0x1de8: 0xc569, 0x1de9: 0xc589, + 0x1dea: 0xc5a9, 0x1deb: 0xc5c9, 0x1dec: 0xc5e9, 0x1ded: 0xc609, 0x1dee: 0xc629, 0x1def: 0xc649, + 0x1df0: 0xc669, 0x1df1: 0xc689, 0x1df2: 0xc6a9, 0x1df3: 0xc6c9, 0x1df4: 0xc6e9, 0x1df5: 0xc709, + 0x1df6: 0xc729, 0x1df7: 0xc749, 0x1df8: 0xc769, 0x1df9: 0xc789, 0x1dfa: 0xc7a9, 0x1dfb: 0xc7c9, + 0x1dfc: 0x0040, 0x1dfd: 0x0040, 0x1dfe: 0x0040, 0x1dff: 0x0040, + // Block 0x78, offset 0x1e00 + 0x1e00: 0xcaf9, 0x1e01: 0xcb19, 0x1e02: 0xcb39, 0x1e03: 0x8b1d, 0x1e04: 0xcb59, 0x1e05: 0xcb79, + 0x1e06: 0xcb99, 0x1e07: 0xcbb9, 0x1e08: 0xcbd9, 0x1e09: 0xcbf9, 0x1e0a: 0xcc19, 0x1e0b: 0xcc39, + 0x1e0c: 0xcc59, 0x1e0d: 0x8b3d, 0x1e0e: 0xcc79, 0x1e0f: 0xcc99, 0x1e10: 0xccb9, 0x1e11: 0xccd9, + 0x1e12: 0x8b5d, 0x1e13: 0xccf9, 0x1e14: 0xcd19, 0x1e15: 0xc429, 0x1e16: 0x8b7d, 0x1e17: 0xcd39, + 0x1e18: 0xcd59, 0x1e19: 0xcd79, 0x1e1a: 0xcd99, 0x1e1b: 0xcdb9, 0x1e1c: 0x8b9d, 0x1e1d: 0xcdd9, + 0x1e1e: 0xcdf9, 0x1e1f: 0xce19, 0x1e20: 0xce39, 0x1e21: 0xce59, 0x1e22: 0xc789, 0x1e23: 0xce79, + 0x1e24: 0xce99, 0x1e25: 0xceb9, 0x1e26: 0xced9, 0x1e27: 0xcef9, 0x1e28: 0xcf19, 0x1e29: 0xcf39, + 0x1e2a: 0xcf59, 0x1e2b: 0xcf79, 0x1e2c: 0xcf99, 0x1e2d: 0xcfb9, 0x1e2e: 0xcfd9, 0x1e2f: 0xcff9, + 0x1e30: 0xd019, 0x1e31: 0xd039, 0x1e32: 0xd039, 0x1e33: 0xd039, 0x1e34: 0x8bbd, 0x1e35: 0xd059, + 0x1e36: 0xd079, 0x1e37: 0xd099, 0x1e38: 0x8bdd, 0x1e39: 0xd0b9, 0x1e3a: 0xd0d9, 0x1e3b: 0xd0f9, + 0x1e3c: 0xd119, 0x1e3d: 0xd139, 0x1e3e: 0xd159, 0x1e3f: 0xd179, + // Block 0x79, offset 0x1e40 + 0x1e40: 0xd199, 0x1e41: 0xd1b9, 0x1e42: 0xd1d9, 0x1e43: 0xd1f9, 0x1e44: 0xd219, 0x1e45: 0xd239, + 0x1e46: 0xd239, 0x1e47: 0xd259, 0x1e48: 0xd279, 0x1e49: 0xd299, 0x1e4a: 0xd2b9, 0x1e4b: 0xd2d9, + 0x1e4c: 0xd2f9, 0x1e4d: 0xd319, 0x1e4e: 0xd339, 0x1e4f: 0xd359, 0x1e50: 0xd379, 0x1e51: 0xd399, + 0x1e52: 0xd3b9, 0x1e53: 0xd3d9, 0x1e54: 0xd3f9, 0x1e55: 0xd419, 0x1e56: 0xd439, 0x1e57: 0xd459, + 0x1e58: 0xd479, 0x1e59: 0x8bfd, 0x1e5a: 0xd499, 0x1e5b: 0xd4b9, 0x1e5c: 0xd4d9, 0x1e5d: 0xc309, + 0x1e5e: 0xd4f9, 0x1e5f: 0xd519, 0x1e60: 0x8c1d, 0x1e61: 0x8c3d, 0x1e62: 0xd539, 0x1e63: 0xd559, + 0x1e64: 0xd579, 0x1e65: 0xd599, 0x1e66: 0xd5b9, 0x1e67: 0xd5d9, 0x1e68: 0x2040, 0x1e69: 0xd5f9, + 0x1e6a: 0xd619, 0x1e6b: 0xd619, 0x1e6c: 0x8c5d, 0x1e6d: 0xd639, 0x1e6e: 0xd659, 0x1e6f: 0xd679, + 0x1e70: 0xd699, 0x1e71: 0x8c7d, 0x1e72: 0xd6b9, 0x1e73: 0xd6d9, 0x1e74: 0x2040, 0x1e75: 0xd6f9, + 0x1e76: 0xd719, 0x1e77: 0xd739, 0x1e78: 0xd759, 0x1e79: 0xd779, 0x1e7a: 0xd799, 0x1e7b: 0x8c9d, + 0x1e7c: 0xd7b9, 0x1e7d: 0x8cbd, 0x1e7e: 0xd7d9, 0x1e7f: 0xd7f9, + // Block 0x7a, offset 0x1e80 + 0x1e80: 0xd819, 0x1e81: 0xd839, 0x1e82: 0xd859, 0x1e83: 0xd879, 0x1e84: 0xd899, 0x1e85: 0xd8b9, + 0x1e86: 0xd8d9, 0x1e87: 0xd8f9, 0x1e88: 0xd919, 0x1e89: 0x8cdd, 0x1e8a: 0xd939, 0x1e8b: 0xd959, + 0x1e8c: 0xd979, 0x1e8d: 0xd999, 0x1e8e: 0xd9b9, 0x1e8f: 0x8cfd, 0x1e90: 0xd9d9, 0x1e91: 0x8d1d, + 0x1e92: 0x8d3d, 0x1e93: 0xd9f9, 0x1e94: 0xda19, 0x1e95: 0xda19, 0x1e96: 0xda39, 0x1e97: 0x8d5d, + 0x1e98: 0x8d7d, 0x1e99: 0xda59, 0x1e9a: 0xda79, 0x1e9b: 0xda99, 0x1e9c: 0xdab9, 0x1e9d: 0xdad9, + 0x1e9e: 0xdaf9, 0x1e9f: 0xdb19, 0x1ea0: 0xdb39, 0x1ea1: 0xdb59, 0x1ea2: 0xdb79, 0x1ea3: 0xdb99, + 0x1ea4: 0x8d9d, 0x1ea5: 0xdbb9, 0x1ea6: 0xdbd9, 0x1ea7: 0xdbf9, 0x1ea8: 0xdc19, 0x1ea9: 0xdbf9, + 0x1eaa: 0xdc39, 0x1eab: 0xdc59, 0x1eac: 0xdc79, 0x1ead: 0xdc99, 0x1eae: 0xdcb9, 0x1eaf: 0xdcd9, + 0x1eb0: 0xdcf9, 0x1eb1: 0xdd19, 0x1eb2: 0xdd39, 0x1eb3: 0xdd59, 0x1eb4: 0xdd79, 0x1eb5: 0xdd99, + 0x1eb6: 0xddb9, 0x1eb7: 0xddd9, 0x1eb8: 0x8dbd, 0x1eb9: 0xddf9, 0x1eba: 0xde19, 0x1ebb: 0xde39, + 0x1ebc: 0xde59, 0x1ebd: 0xde79, 0x1ebe: 0x8ddd, 0x1ebf: 0xde99, + // Block 0x7b, offset 0x1ec0 + 0x1ec0: 0xe599, 0x1ec1: 0xe5b9, 0x1ec2: 0xe5d9, 0x1ec3: 0xe5f9, 0x1ec4: 0xe619, 0x1ec5: 0xe639, + 0x1ec6: 0x8efd, 0x1ec7: 0xe659, 0x1ec8: 0xe679, 0x1ec9: 0xe699, 0x1eca: 0xe6b9, 0x1ecb: 0xe6d9, + 0x1ecc: 0xe6f9, 0x1ecd: 0x8f1d, 0x1ece: 0xe719, 0x1ecf: 0xe739, 0x1ed0: 0x8f3d, 0x1ed1: 0x8f5d, + 0x1ed2: 0xe759, 0x1ed3: 0xe779, 0x1ed4: 0xe799, 0x1ed5: 0xe7b9, 0x1ed6: 0xe7d9, 0x1ed7: 0xe7f9, + 0x1ed8: 0xe819, 0x1ed9: 0xe839, 0x1eda: 0xe859, 0x1edb: 0x8f7d, 0x1edc: 0xe879, 0x1edd: 0x8f9d, + 0x1ede: 0xe899, 0x1edf: 0x2040, 0x1ee0: 0xe8b9, 0x1ee1: 0xe8d9, 0x1ee2: 0xe8f9, 0x1ee3: 0x8fbd, + 0x1ee4: 0xe919, 0x1ee5: 0xe939, 0x1ee6: 0x8fdd, 0x1ee7: 0x8ffd, 0x1ee8: 0xe959, 0x1ee9: 0xe979, + 0x1eea: 0xe999, 0x1eeb: 0xe9b9, 0x1eec: 0xe9d9, 0x1eed: 0xe9d9, 0x1eee: 0xe9f9, 0x1eef: 0xea19, + 0x1ef0: 0xea39, 0x1ef1: 0xea59, 0x1ef2: 0xea79, 0x1ef3: 0xea99, 0x1ef4: 0xeab9, 0x1ef5: 0x901d, + 0x1ef6: 0xead9, 0x1ef7: 0x903d, 0x1ef8: 0xeaf9, 0x1ef9: 0x905d, 0x1efa: 0xeb19, 0x1efb: 0x907d, + 0x1efc: 0x909d, 0x1efd: 0x90bd, 0x1efe: 0xeb39, 0x1eff: 0xeb59, + // Block 0x7c, offset 0x1f00 + 0x1f00: 0xeb79, 0x1f01: 0x90dd, 0x1f02: 0x90fd, 0x1f03: 0x911d, 0x1f04: 0x913d, 0x1f05: 0xeb99, + 0x1f06: 0xebb9, 0x1f07: 0xebb9, 0x1f08: 0xebd9, 0x1f09: 0xebf9, 0x1f0a: 0xec19, 0x1f0b: 0xec39, + 0x1f0c: 0xec59, 0x1f0d: 0x915d, 0x1f0e: 0xec79, 0x1f0f: 0xec99, 0x1f10: 0xecb9, 0x1f11: 0xecd9, + 0x1f12: 0x917d, 0x1f13: 0xecf9, 0x1f14: 0x919d, 0x1f15: 0x91bd, 0x1f16: 0xed19, 0x1f17: 0xed39, + 0x1f18: 0xed59, 0x1f19: 0xed79, 0x1f1a: 0xed99, 0x1f1b: 0xedb9, 0x1f1c: 0x91dd, 0x1f1d: 0x91fd, + 0x1f1e: 0x921d, 0x1f1f: 0x2040, 0x1f20: 0xedd9, 0x1f21: 0x923d, 0x1f22: 0xedf9, 0x1f23: 0xee19, + 0x1f24: 0xee39, 0x1f25: 0x925d, 0x1f26: 0xee59, 0x1f27: 0xee79, 0x1f28: 0xee99, 0x1f29: 0xeeb9, + 0x1f2a: 0xeed9, 0x1f2b: 0x927d, 0x1f2c: 0xeef9, 0x1f2d: 0xef19, 0x1f2e: 0xef39, 0x1f2f: 0xef59, + 0x1f30: 0xef79, 0x1f31: 0xef99, 0x1f32: 0x929d, 0x1f33: 0x92bd, 0x1f34: 0xefb9, 0x1f35: 0x92dd, + 0x1f36: 0xefd9, 0x1f37: 0x92fd, 0x1f38: 0xeff9, 0x1f39: 0xf019, 0x1f3a: 0xf039, 0x1f3b: 0x931d, + 0x1f3c: 0x933d, 0x1f3d: 0xf059, 0x1f3e: 0x935d, 0x1f3f: 0xf079, + // Block 0x7d, offset 0x1f40 + 0x1f40: 0xf6b9, 0x1f41: 0xf6d9, 0x1f42: 0xf6f9, 0x1f43: 0xf719, 0x1f44: 0xf739, 0x1f45: 0x951d, + 0x1f46: 0xf759, 0x1f47: 0xf779, 0x1f48: 0xf799, 0x1f49: 0xf7b9, 0x1f4a: 0xf7d9, 0x1f4b: 0x953d, + 0x1f4c: 0x955d, 0x1f4d: 0xf7f9, 0x1f4e: 0xf819, 0x1f4f: 0xf839, 0x1f50: 0xf859, 0x1f51: 0xf879, + 0x1f52: 0xf899, 0x1f53: 0x957d, 0x1f54: 0xf8b9, 0x1f55: 0xf8d9, 0x1f56: 0xf8f9, 0x1f57: 0xf919, + 0x1f58: 0x959d, 0x1f59: 0x95bd, 0x1f5a: 0xf939, 0x1f5b: 0xf959, 0x1f5c: 0xf979, 0x1f5d: 0x95dd, + 0x1f5e: 0xf999, 0x1f5f: 0xf9b9, 0x1f60: 0x6815, 0x1f61: 0x95fd, 0x1f62: 0xf9d9, 0x1f63: 0xf9f9, + 0x1f64: 0xfa19, 0x1f65: 0x961d, 0x1f66: 0xfa39, 0x1f67: 0xfa59, 0x1f68: 0xfa79, 0x1f69: 0xfa99, + 0x1f6a: 0xfab9, 0x1f6b: 0xfad9, 0x1f6c: 0xfaf9, 0x1f6d: 0x963d, 0x1f6e: 0xfb19, 0x1f6f: 0xfb39, + 0x1f70: 0xfb59, 0x1f71: 0x965d, 0x1f72: 0xfb79, 0x1f73: 0xfb99, 0x1f74: 0xfbb9, 0x1f75: 0xfbd9, + 0x1f76: 0x7b35, 0x1f77: 0x967d, 0x1f78: 0xfbf9, 0x1f79: 0xfc19, 0x1f7a: 0xfc39, 0x1f7b: 0x969d, + 0x1f7c: 0xfc59, 0x1f7d: 0x96bd, 0x1f7e: 0xfc79, 0x1f7f: 0xfc79, + // Block 0x7e, offset 0x1f80 + 0x1f80: 0xfc99, 0x1f81: 0x96dd, 0x1f82: 0xfcb9, 0x1f83: 0xfcd9, 0x1f84: 0xfcf9, 0x1f85: 0xfd19, + 0x1f86: 0xfd39, 0x1f87: 0xfd59, 0x1f88: 0xfd79, 0x1f89: 0x96fd, 0x1f8a: 0xfd99, 0x1f8b: 0xfdb9, + 0x1f8c: 0xfdd9, 0x1f8d: 0xfdf9, 0x1f8e: 0xfe19, 0x1f8f: 0xfe39, 0x1f90: 0x971d, 0x1f91: 0xfe59, + 0x1f92: 0x973d, 0x1f93: 0x975d, 0x1f94: 0x977d, 0x1f95: 0xfe79, 0x1f96: 0xfe99, 0x1f97: 0xfeb9, + 0x1f98: 0xfed9, 0x1f99: 0xfef9, 0x1f9a: 0xff19, 0x1f9b: 0xff39, 0x1f9c: 0xff59, 0x1f9d: 0x979d, + 0x1f9e: 0x0040, 0x1f9f: 0x0040, 0x1fa0: 0x0040, 0x1fa1: 0x0040, 0x1fa2: 0x0040, 0x1fa3: 0x0040, + 0x1fa4: 0x0040, 0x1fa5: 0x0040, 0x1fa6: 0x0040, 0x1fa7: 0x0040, 0x1fa8: 0x0040, 0x1fa9: 0x0040, + 0x1faa: 0x0040, 0x1fab: 0x0040, 0x1fac: 0x0040, 0x1fad: 0x0040, 0x1fae: 0x0040, 0x1faf: 0x0040, + 0x1fb0: 0x0040, 0x1fb1: 0x0040, 0x1fb2: 0x0040, 0x1fb3: 0x0040, 0x1fb4: 0x0040, 0x1fb5: 0x0040, + 0x1fb6: 0x0040, 0x1fb7: 0x0040, 0x1fb8: 0x0040, 0x1fb9: 0x0040, 0x1fba: 0x0040, 0x1fbb: 0x0040, + 0x1fbc: 0x0040, 0x1fbd: 0x0040, 0x1fbe: 0x0040, 0x1fbf: 0x0040, +} + +// idnaIndex: 36 blocks, 2304 entries, 4608 bytes +// Block 0 is the zero block. +var idnaIndex = [2304]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x7d, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05, + 0xc8: 0x06, 0xc9: 0x7e, 0xca: 0x7f, 0xcb: 0x07, 0xcc: 0x80, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a, + 0xd0: 0x81, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x82, 0xd6: 0x83, 0xd7: 0x84, + 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x85, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x86, 0xde: 0x87, 0xdf: 0x88, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07, + 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c, + 0xf0: 0x1d, 0xf1: 0x1e, 0xf2: 0x1e, 0xf3: 0x20, 0xf4: 0x21, + // Block 0x4, offset 0x100 + 0x120: 0x89, 0x121: 0x13, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x14, 0x126: 0x15, 0x127: 0x16, + 0x128: 0x17, 0x129: 0x18, 0x12a: 0x19, 0x12b: 0x1a, 0x12c: 0x1b, 0x12d: 0x1c, 0x12e: 0x1d, 0x12f: 0x8d, + 0x130: 0x8e, 0x131: 0x1e, 0x132: 0x1f, 0x133: 0x20, 0x134: 0x8f, 0x135: 0x21, 0x136: 0x90, 0x137: 0x91, + 0x138: 0x92, 0x139: 0x93, 0x13a: 0x22, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x23, 0x13e: 0x24, 0x13f: 0x96, + // Block 0x5, offset 0x140 + 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e, + 0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6, + 0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f, + 0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae, + 0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6, + 0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe, + 0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x25, 0x175: 0x26, 0x176: 0x27, 0x177: 0xc3, + 0x178: 0x28, 0x179: 0x28, 0x17a: 0x29, 0x17b: 0x28, 0x17c: 0xc4, 0x17d: 0x2a, 0x17e: 0x2b, 0x17f: 0x2c, + // Block 0x6, offset 0x180 + 0x180: 0x2d, 0x181: 0x2e, 0x182: 0x2f, 0x183: 0xc5, 0x184: 0x30, 0x185: 0x31, 0x186: 0xc6, 0x187: 0x9b, + 0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0x9b, + 0x190: 0xca, 0x191: 0x32, 0x192: 0x33, 0x193: 0x34, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b, + 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b, + 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b, + 0x1a8: 0xcb, 0x1a9: 0xcc, 0x1aa: 0x9b, 0x1ab: 0xcd, 0x1ac: 0x9b, 0x1ad: 0xce, 0x1ae: 0xcf, 0x1af: 0xd0, + 0x1b0: 0xd1, 0x1b1: 0x35, 0x1b2: 0x28, 0x1b3: 0x36, 0x1b4: 0xd2, 0x1b5: 0xd3, 0x1b6: 0xd4, 0x1b7: 0xd5, + 0x1b8: 0xd6, 0x1b9: 0xd7, 0x1ba: 0xd8, 0x1bb: 0xd9, 0x1bc: 0xda, 0x1bd: 0xdb, 0x1be: 0xdc, 0x1bf: 0x37, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x38, 0x1c1: 0xdd, 0x1c2: 0xde, 0x1c3: 0xdf, 0x1c4: 0xe0, 0x1c5: 0x39, 0x1c6: 0x3a, 0x1c7: 0xe1, + 0x1c8: 0xe2, 0x1c9: 0x3b, 0x1ca: 0x3c, 0x1cb: 0x3d, 0x1cc: 0x3e, 0x1cd: 0x3f, 0x1ce: 0x40, 0x1cf: 0x41, + 0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f, + 0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f, + 0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f, + 0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f, + 0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f, + 0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f, + // Block 0x8, offset 0x200 + 0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f, + 0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f, + 0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f, + 0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f, + 0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f, + 0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f, + 0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b, + 0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f, + // Block 0x9, offset 0x240 + 0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f, + 0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f, + 0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f, + 0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f, + 0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f, + 0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f, + 0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f, + 0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f, + // Block 0xa, offset 0x280 + 0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f, + 0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f, + 0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f, + 0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f, + 0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f, + 0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f, + 0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f, + 0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe3, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f, + 0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f, + 0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe4, 0x2d3: 0xe5, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f, + 0x2d8: 0xe6, 0x2d9: 0x42, 0x2da: 0x43, 0x2db: 0xe7, 0x2dc: 0x44, 0x2dd: 0x45, 0x2de: 0x46, 0x2df: 0xe8, + 0x2e0: 0xe9, 0x2e1: 0xea, 0x2e2: 0xeb, 0x2e3: 0xec, 0x2e4: 0xed, 0x2e5: 0xee, 0x2e6: 0xef, 0x2e7: 0xf0, + 0x2e8: 0xf1, 0x2e9: 0xf2, 0x2ea: 0xf3, 0x2eb: 0xf4, 0x2ec: 0xf5, 0x2ed: 0xf6, 0x2ee: 0xf7, 0x2ef: 0xf8, + 0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f, + 0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f, + // Block 0xc, offset 0x300 + 0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f, + 0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f, + 0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f, + 0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xf9, 0x31f: 0xfa, + // Block 0xd, offset 0x340 + 0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba, + 0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba, + 0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba, + 0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba, + 0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba, + 0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba, + 0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba, + 0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba, + // Block 0xe, offset 0x380 + 0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba, + 0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba, + 0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba, + 0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba, + 0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfb, 0x3a5: 0xfc, 0x3a6: 0xfd, 0x3a7: 0xfe, + 0x3a8: 0x47, 0x3a9: 0xff, 0x3aa: 0x100, 0x3ab: 0x48, 0x3ac: 0x49, 0x3ad: 0x4a, 0x3ae: 0x4b, 0x3af: 0x4c, + 0x3b0: 0x101, 0x3b1: 0x4d, 0x3b2: 0x4e, 0x3b3: 0x4f, 0x3b4: 0x50, 0x3b5: 0x51, 0x3b6: 0x102, 0x3b7: 0x52, + 0x3b8: 0x53, 0x3b9: 0x54, 0x3ba: 0x55, 0x3bb: 0x56, 0x3bc: 0x57, 0x3bd: 0x58, 0x3be: 0x59, 0x3bf: 0x5a, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x103, 0x3c1: 0x104, 0x3c2: 0x9f, 0x3c3: 0x105, 0x3c4: 0x106, 0x3c5: 0x9b, 0x3c6: 0x107, 0x3c7: 0x108, + 0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x109, 0x3cb: 0x10a, 0x3cc: 0x10b, 0x3cd: 0x10c, 0x3ce: 0x10d, 0x3cf: 0x10e, + 0x3d0: 0x10f, 0x3d1: 0x9f, 0x3d2: 0x110, 0x3d3: 0x111, 0x3d4: 0x112, 0x3d5: 0x113, 0x3d6: 0xba, 0x3d7: 0xba, + 0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x114, 0x3dd: 0x115, 0x3de: 0xba, 0x3df: 0xba, + 0x3e0: 0x116, 0x3e1: 0x117, 0x3e2: 0x118, 0x3e3: 0x119, 0x3e4: 0x11a, 0x3e5: 0xba, 0x3e6: 0x11b, 0x3e7: 0x11c, + 0x3e8: 0x11d, 0x3e9: 0x11e, 0x3ea: 0x11f, 0x3eb: 0x5b, 0x3ec: 0x120, 0x3ed: 0x121, 0x3ee: 0x5c, 0x3ef: 0xba, + 0x3f0: 0x122, 0x3f1: 0x123, 0x3f2: 0x124, 0x3f3: 0x125, 0x3f4: 0xba, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba, + 0x3f8: 0xba, 0x3f9: 0x126, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0xba, 0x3fd: 0xba, 0x3fe: 0xba, 0x3ff: 0xba, + // Block 0x10, offset 0x400 + 0x400: 0x127, 0x401: 0x128, 0x402: 0x129, 0x403: 0x12a, 0x404: 0x12b, 0x405: 0x12c, 0x406: 0x12d, 0x407: 0x12e, + 0x408: 0x12f, 0x409: 0xba, 0x40a: 0x130, 0x40b: 0x131, 0x40c: 0x5d, 0x40d: 0x5e, 0x40e: 0xba, 0x40f: 0xba, + 0x410: 0x132, 0x411: 0x133, 0x412: 0x134, 0x413: 0x135, 0x414: 0xba, 0x415: 0xba, 0x416: 0x136, 0x417: 0x137, + 0x418: 0x138, 0x419: 0x139, 0x41a: 0x13a, 0x41b: 0x13b, 0x41c: 0x13c, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba, + 0x420: 0xba, 0x421: 0xba, 0x422: 0x13d, 0x423: 0x13e, 0x424: 0xba, 0x425: 0xba, 0x426: 0xba, 0x427: 0xba, + 0x428: 0x13f, 0x429: 0x140, 0x42a: 0x141, 0x42b: 0x142, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba, + 0x430: 0x143, 0x431: 0x144, 0x432: 0x145, 0x433: 0xba, 0x434: 0x146, 0x435: 0x147, 0x436: 0xba, 0x437: 0xba, + 0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0xba, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0xba, + // Block 0x11, offset 0x440 + 0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f, + 0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x148, 0x44f: 0xba, + 0x450: 0x9b, 0x451: 0x149, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x14a, 0x456: 0xba, 0x457: 0xba, + 0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba, + 0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba, + 0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba, + 0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba, + 0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba, + // Block 0x12, offset 0x480 + 0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f, + 0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f, + 0x490: 0x14b, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba, + 0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba, + 0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba, + 0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba, + 0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba, + 0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba, + // Block 0x13, offset 0x4c0 + 0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba, + 0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba, + 0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f, + 0x4d8: 0x9f, 0x4d9: 0x14c, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba, + 0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba, + 0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba, + 0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba, + 0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba, + // Block 0x14, offset 0x500 + 0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba, + 0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba, + 0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba, + 0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba, + 0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f, + 0x528: 0x142, 0x529: 0x14d, 0x52a: 0xba, 0x52b: 0x14e, 0x52c: 0x14f, 0x52d: 0x150, 0x52e: 0x151, 0x52f: 0xba, + 0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba, + 0x538: 0xba, 0x539: 0xba, 0x53a: 0xba, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x152, 0x53e: 0x153, 0x53f: 0x154, + // Block 0x15, offset 0x540 + 0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f, + 0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f, + 0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f, + 0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x155, + 0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f, + 0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x156, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba, + 0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba, + 0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba, + // Block 0x16, offset 0x580 + 0x580: 0x9f, 0x581: 0x9f, 0x582: 0x9f, 0x583: 0x9f, 0x584: 0x157, 0x585: 0x158, 0x586: 0x9f, 0x587: 0x9f, + 0x588: 0x9f, 0x589: 0x9f, 0x58a: 0x9f, 0x58b: 0x159, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba, + 0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba, + 0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba, + 0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba, + 0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba, + 0x5b0: 0x9f, 0x5b1: 0x15a, 0x5b2: 0x15b, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba, + 0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x15c, 0x5c4: 0x15d, 0x5c5: 0x15e, 0x5c6: 0x15f, 0x5c7: 0x160, + 0x5c8: 0x9b, 0x5c9: 0x161, 0x5ca: 0xba, 0x5cb: 0xba, 0x5cc: 0x9b, 0x5cd: 0x162, 0x5ce: 0xba, 0x5cf: 0xba, + 0x5d0: 0x5f, 0x5d1: 0x60, 0x5d2: 0x61, 0x5d3: 0x62, 0x5d4: 0x63, 0x5d5: 0x64, 0x5d6: 0x65, 0x5d7: 0x66, + 0x5d8: 0x67, 0x5d9: 0x68, 0x5da: 0x69, 0x5db: 0x6a, 0x5dc: 0x6b, 0x5dd: 0x6c, 0x5de: 0x6d, 0x5df: 0x6e, + 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b, + 0x5e8: 0x163, 0x5e9: 0x164, 0x5ea: 0x165, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba, + 0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba, + 0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba, + // Block 0x18, offset 0x600 + 0x600: 0x166, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0xba, 0x605: 0xba, 0x606: 0xba, 0x607: 0xba, + 0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0xba, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba, + 0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba, + 0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba, + 0x620: 0x122, 0x621: 0x122, 0x622: 0x122, 0x623: 0x167, 0x624: 0x6f, 0x625: 0x168, 0x626: 0xba, 0x627: 0xba, + 0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba, + 0x630: 0xba, 0x631: 0xba, 0x632: 0xba, 0x633: 0xba, 0x634: 0xba, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba, + 0x638: 0x70, 0x639: 0x71, 0x63a: 0x72, 0x63b: 0x169, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba, + // Block 0x19, offset 0x640 + 0x640: 0x16a, 0x641: 0x9b, 0x642: 0x16b, 0x643: 0x16c, 0x644: 0x73, 0x645: 0x74, 0x646: 0x16d, 0x647: 0x16e, + 0x648: 0x75, 0x649: 0x16f, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b, + 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b, + 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x170, 0x65c: 0x9b, 0x65d: 0x171, 0x65e: 0x9b, 0x65f: 0x172, + 0x660: 0x173, 0x661: 0x174, 0x662: 0x175, 0x663: 0xba, 0x664: 0x176, 0x665: 0x177, 0x666: 0x178, 0x667: 0x179, + 0x668: 0xba, 0x669: 0xba, 0x66a: 0xba, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba, + 0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba, + 0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba, + // Block 0x1a, offset 0x680 + 0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f, + 0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f, + 0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f, + 0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x17a, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f, + 0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f, + 0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f, + 0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f, + 0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f, + 0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f, + 0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f, + 0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x17b, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f, + 0x6e0: 0x17c, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f, + 0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f, + 0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f, + 0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f, + // Block 0x1c, offset 0x700 + 0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f, + 0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f, + 0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f, + 0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f, + 0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f, + 0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f, + 0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f, + 0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x17d, 0x73b: 0x9f, 0x73c: 0x9f, 0x73d: 0x9f, 0x73e: 0x9f, 0x73f: 0x9f, + // Block 0x1d, offset 0x740 + 0x740: 0x9f, 0x741: 0x9f, 0x742: 0x9f, 0x743: 0x9f, 0x744: 0x9f, 0x745: 0x9f, 0x746: 0x9f, 0x747: 0x9f, + 0x748: 0x9f, 0x749: 0x9f, 0x74a: 0x9f, 0x74b: 0x9f, 0x74c: 0x9f, 0x74d: 0x9f, 0x74e: 0x9f, 0x74f: 0x9f, + 0x750: 0x9f, 0x751: 0x9f, 0x752: 0x9f, 0x753: 0x9f, 0x754: 0x9f, 0x755: 0x9f, 0x756: 0x9f, 0x757: 0x9f, + 0x758: 0x9f, 0x759: 0x9f, 0x75a: 0x9f, 0x75b: 0x9f, 0x75c: 0x9f, 0x75d: 0x9f, 0x75e: 0x9f, 0x75f: 0x9f, + 0x760: 0x9f, 0x761: 0x9f, 0x762: 0x9f, 0x763: 0x9f, 0x764: 0x9f, 0x765: 0x9f, 0x766: 0x9f, 0x767: 0x9f, + 0x768: 0x9f, 0x769: 0x9f, 0x76a: 0x9f, 0x76b: 0x9f, 0x76c: 0x9f, 0x76d: 0x9f, 0x76e: 0x9f, 0x76f: 0x17e, + 0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba, + 0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba, + // Block 0x1e, offset 0x780 + 0x780: 0xba, 0x781: 0xba, 0x782: 0xba, 0x783: 0xba, 0x784: 0xba, 0x785: 0xba, 0x786: 0xba, 0x787: 0xba, + 0x788: 0xba, 0x789: 0xba, 0x78a: 0xba, 0x78b: 0xba, 0x78c: 0xba, 0x78d: 0xba, 0x78e: 0xba, 0x78f: 0xba, + 0x790: 0xba, 0x791: 0xba, 0x792: 0xba, 0x793: 0xba, 0x794: 0xba, 0x795: 0xba, 0x796: 0xba, 0x797: 0xba, + 0x798: 0xba, 0x799: 0xba, 0x79a: 0xba, 0x79b: 0xba, 0x79c: 0xba, 0x79d: 0xba, 0x79e: 0xba, 0x79f: 0xba, + 0x7a0: 0x76, 0x7a1: 0x77, 0x7a2: 0x78, 0x7a3: 0x17f, 0x7a4: 0x79, 0x7a5: 0x7a, 0x7a6: 0x180, 0x7a7: 0x7b, + 0x7a8: 0x7c, 0x7a9: 0xba, 0x7aa: 0xba, 0x7ab: 0xba, 0x7ac: 0xba, 0x7ad: 0xba, 0x7ae: 0xba, 0x7af: 0xba, + 0x7b0: 0xba, 0x7b1: 0xba, 0x7b2: 0xba, 0x7b3: 0xba, 0x7b4: 0xba, 0x7b5: 0xba, 0x7b6: 0xba, 0x7b7: 0xba, + 0x7b8: 0xba, 0x7b9: 0xba, 0x7ba: 0xba, 0x7bb: 0xba, 0x7bc: 0xba, 0x7bd: 0xba, 0x7be: 0xba, 0x7bf: 0xba, + // Block 0x1f, offset 0x7c0 + 0x7d0: 0x0d, 0x7d1: 0x0e, 0x7d2: 0x0f, 0x7d3: 0x10, 0x7d4: 0x11, 0x7d5: 0x0b, 0x7d6: 0x12, 0x7d7: 0x07, + 0x7d8: 0x13, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x14, 0x7dc: 0x0b, 0x7dd: 0x15, 0x7de: 0x16, 0x7df: 0x17, + 0x7e0: 0x07, 0x7e1: 0x07, 0x7e2: 0x07, 0x7e3: 0x07, 0x7e4: 0x07, 0x7e5: 0x07, 0x7e6: 0x07, 0x7e7: 0x07, + 0x7e8: 0x07, 0x7e9: 0x07, 0x7ea: 0x18, 0x7eb: 0x19, 0x7ec: 0x1a, 0x7ed: 0x07, 0x7ee: 0x1b, 0x7ef: 0x1c, + 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b, + 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b, + // Block 0x20, offset 0x800 + 0x800: 0x0b, 0x801: 0x0b, 0x802: 0x0b, 0x803: 0x0b, 0x804: 0x0b, 0x805: 0x0b, 0x806: 0x0b, 0x807: 0x0b, + 0x808: 0x0b, 0x809: 0x0b, 0x80a: 0x0b, 0x80b: 0x0b, 0x80c: 0x0b, 0x80d: 0x0b, 0x80e: 0x0b, 0x80f: 0x0b, + 0x810: 0x0b, 0x811: 0x0b, 0x812: 0x0b, 0x813: 0x0b, 0x814: 0x0b, 0x815: 0x0b, 0x816: 0x0b, 0x817: 0x0b, + 0x818: 0x0b, 0x819: 0x0b, 0x81a: 0x0b, 0x81b: 0x0b, 0x81c: 0x0b, 0x81d: 0x0b, 0x81e: 0x0b, 0x81f: 0x0b, + 0x820: 0x0b, 0x821: 0x0b, 0x822: 0x0b, 0x823: 0x0b, 0x824: 0x0b, 0x825: 0x0b, 0x826: 0x0b, 0x827: 0x0b, + 0x828: 0x0b, 0x829: 0x0b, 0x82a: 0x0b, 0x82b: 0x0b, 0x82c: 0x0b, 0x82d: 0x0b, 0x82e: 0x0b, 0x82f: 0x0b, + 0x830: 0x0b, 0x831: 0x0b, 0x832: 0x0b, 0x833: 0x0b, 0x834: 0x0b, 0x835: 0x0b, 0x836: 0x0b, 0x837: 0x0b, + 0x838: 0x0b, 0x839: 0x0b, 0x83a: 0x0b, 0x83b: 0x0b, 0x83c: 0x0b, 0x83d: 0x0b, 0x83e: 0x0b, 0x83f: 0x0b, + // Block 0x21, offset 0x840 + 0x840: 0x181, 0x841: 0x182, 0x842: 0xba, 0x843: 0xba, 0x844: 0x183, 0x845: 0x183, 0x846: 0x183, 0x847: 0x184, + 0x848: 0xba, 0x849: 0xba, 0x84a: 0xba, 0x84b: 0xba, 0x84c: 0xba, 0x84d: 0xba, 0x84e: 0xba, 0x84f: 0xba, + 0x850: 0xba, 0x851: 0xba, 0x852: 0xba, 0x853: 0xba, 0x854: 0xba, 0x855: 0xba, 0x856: 0xba, 0x857: 0xba, + 0x858: 0xba, 0x859: 0xba, 0x85a: 0xba, 0x85b: 0xba, 0x85c: 0xba, 0x85d: 0xba, 0x85e: 0xba, 0x85f: 0xba, + 0x860: 0xba, 0x861: 0xba, 0x862: 0xba, 0x863: 0xba, 0x864: 0xba, 0x865: 0xba, 0x866: 0xba, 0x867: 0xba, + 0x868: 0xba, 0x869: 0xba, 0x86a: 0xba, 0x86b: 0xba, 0x86c: 0xba, 0x86d: 0xba, 0x86e: 0xba, 0x86f: 0xba, + 0x870: 0xba, 0x871: 0xba, 0x872: 0xba, 0x873: 0xba, 0x874: 0xba, 0x875: 0xba, 0x876: 0xba, 0x877: 0xba, + 0x878: 0xba, 0x879: 0xba, 0x87a: 0xba, 0x87b: 0xba, 0x87c: 0xba, 0x87d: 0xba, 0x87e: 0xba, 0x87f: 0xba, + // Block 0x22, offset 0x880 + 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b, + 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b, + 0x890: 0x0b, 0x891: 0x0b, 0x892: 0x0b, 0x893: 0x0b, 0x894: 0x0b, 0x895: 0x0b, 0x896: 0x0b, 0x897: 0x0b, + 0x898: 0x0b, 0x899: 0x0b, 0x89a: 0x0b, 0x89b: 0x0b, 0x89c: 0x0b, 0x89d: 0x0b, 0x89e: 0x0b, 0x89f: 0x0b, + 0x8a0: 0x1f, 0x8a1: 0x0b, 0x8a2: 0x0b, 0x8a3: 0x0b, 0x8a4: 0x0b, 0x8a5: 0x0b, 0x8a6: 0x0b, 0x8a7: 0x0b, + 0x8a8: 0x0b, 0x8a9: 0x0b, 0x8aa: 0x0b, 0x8ab: 0x0b, 0x8ac: 0x0b, 0x8ad: 0x0b, 0x8ae: 0x0b, 0x8af: 0x0b, + 0x8b0: 0x0b, 0x8b1: 0x0b, 0x8b2: 0x0b, 0x8b3: 0x0b, 0x8b4: 0x0b, 0x8b5: 0x0b, 0x8b6: 0x0b, 0x8b7: 0x0b, + 0x8b8: 0x0b, 0x8b9: 0x0b, 0x8ba: 0x0b, 0x8bb: 0x0b, 0x8bc: 0x0b, 0x8bd: 0x0b, 0x8be: 0x0b, 0x8bf: 0x0b, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0b, 0x8c1: 0x0b, 0x8c2: 0x0b, 0x8c3: 0x0b, 0x8c4: 0x0b, 0x8c5: 0x0b, 0x8c6: 0x0b, 0x8c7: 0x0b, + 0x8c8: 0x0b, 0x8c9: 0x0b, 0x8ca: 0x0b, 0x8cb: 0x0b, 0x8cc: 0x0b, 0x8cd: 0x0b, 0x8ce: 0x0b, 0x8cf: 0x0b, +} + +// idnaSparseOffset: 264 entries, 528 bytes +var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x34, 0x3f, 0x4b, 0x4f, 0x5e, 0x63, 0x6b, 0x77, 0x85, 0x8a, 0x93, 0xa3, 0xb1, 0xbd, 0xc9, 0xda, 0xe4, 0xeb, 0xf8, 0x109, 0x110, 0x11b, 0x12a, 0x138, 0x142, 0x144, 0x149, 0x14c, 0x14f, 0x151, 0x15d, 0x168, 0x170, 0x176, 0x17c, 0x181, 0x186, 0x189, 0x18d, 0x193, 0x198, 0x1a4, 0x1ae, 0x1b4, 0x1c5, 0x1cf, 0x1d2, 0x1da, 0x1dd, 0x1ea, 0x1f2, 0x1f6, 0x1fd, 0x205, 0x215, 0x221, 0x223, 0x22d, 0x239, 0x245, 0x251, 0x259, 0x25e, 0x268, 0x279, 0x27d, 0x288, 0x28c, 0x295, 0x29d, 0x2a3, 0x2a8, 0x2ab, 0x2af, 0x2b5, 0x2b9, 0x2bd, 0x2c3, 0x2ca, 0x2d0, 0x2d8, 0x2df, 0x2ea, 0x2f4, 0x2f8, 0x2fb, 0x301, 0x305, 0x307, 0x30a, 0x30c, 0x30f, 0x319, 0x31c, 0x32b, 0x32f, 0x334, 0x337, 0x33b, 0x340, 0x345, 0x34b, 0x351, 0x360, 0x366, 0x36a, 0x379, 0x37e, 0x386, 0x390, 0x39b, 0x3a3, 0x3b4, 0x3bd, 0x3cd, 0x3da, 0x3e4, 0x3e9, 0x3f6, 0x3fa, 0x3ff, 0x401, 0x405, 0x407, 0x40b, 0x414, 0x41a, 0x41e, 0x42e, 0x438, 0x43d, 0x440, 0x446, 0x44d, 0x452, 0x456, 0x45c, 0x461, 0x46a, 0x46f, 0x475, 0x47c, 0x483, 0x48a, 0x48e, 0x493, 0x496, 0x49b, 0x4a7, 0x4ad, 0x4b2, 0x4b9, 0x4c1, 0x4c6, 0x4ca, 0x4da, 0x4e1, 0x4e5, 0x4e9, 0x4f0, 0x4f2, 0x4f5, 0x4f8, 0x4fc, 0x500, 0x506, 0x50f, 0x51b, 0x522, 0x52b, 0x533, 0x53a, 0x548, 0x555, 0x562, 0x56b, 0x56f, 0x57d, 0x585, 0x590, 0x599, 0x59f, 0x5a7, 0x5b0, 0x5ba, 0x5bd, 0x5c9, 0x5cc, 0x5d1, 0x5de, 0x5e7, 0x5f3, 0x5f6, 0x600, 0x609, 0x615, 0x622, 0x62a, 0x62d, 0x632, 0x635, 0x638, 0x63b, 0x642, 0x649, 0x64d, 0x658, 0x65b, 0x661, 0x666, 0x66a, 0x66d, 0x670, 0x673, 0x676, 0x679, 0x67e, 0x688, 0x68b, 0x68f, 0x69e, 0x6aa, 0x6ae, 0x6b3, 0x6b8, 0x6bc, 0x6c1, 0x6ca, 0x6d5, 0x6db, 0x6e3, 0x6e7, 0x6eb, 0x6f1, 0x6f7, 0x6fc, 0x6ff, 0x70f, 0x716, 0x719, 0x71c, 0x720, 0x726, 0x72b, 0x730, 0x735, 0x738, 0x73d, 0x740, 0x743, 0x747, 0x74b, 0x74e, 0x75e, 0x76f, 0x774, 0x776, 0x778} + +// idnaSparseValues: 1915 entries, 7660 bytes +var idnaSparseValues = [1915]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x07}, + {value: 0xe105, lo: 0x80, hi: 0x96}, + {value: 0x0018, lo: 0x97, hi: 0x97}, + {value: 0xe105, lo: 0x98, hi: 0x9e}, + {value: 0x001f, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbf}, + // Block 0x1, offset 0x8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0xe01d, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0335, lo: 0x83, hi: 0x83}, + {value: 0x034d, lo: 0x84, hi: 0x84}, + {value: 0x0365, lo: 0x85, hi: 0x85}, + {value: 0xe00d, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0xe00d, lo: 0x88, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x89}, + {value: 0xe00d, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe00d, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0x8d}, + {value: 0xe00d, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0xbf}, + // Block 0x2, offset 0x19 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x0249, lo: 0xb0, hi: 0xb0}, + {value: 0x037d, lo: 0xb1, hi: 0xb1}, + {value: 0x0259, lo: 0xb2, hi: 0xb2}, + {value: 0x0269, lo: 0xb3, hi: 0xb3}, + {value: 0x034d, lo: 0xb4, hi: 0xb4}, + {value: 0x0395, lo: 0xb5, hi: 0xb5}, + {value: 0xe1bd, lo: 0xb6, hi: 0xb6}, + {value: 0x0279, lo: 0xb7, hi: 0xb7}, + {value: 0x0289, lo: 0xb8, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbf}, + // Block 0x3, offset 0x25 + {value: 0x0000, lo: 0x01}, + {value: 0x3308, lo: 0x80, hi: 0xbf}, + // Block 0x4, offset 0x27 + {value: 0x0000, lo: 0x04}, + {value: 0x03f5, lo: 0x80, hi: 0x8f}, + {value: 0xe105, lo: 0x90, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x5, offset 0x2c + {value: 0x0000, lo: 0x07}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x0545, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x0008, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x6, offset 0x34 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0401, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x88}, + {value: 0x0018, lo: 0x89, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0xbd}, + {value: 0x0818, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x7, offset 0x3f + {value: 0x0000, lo: 0x0b}, + {value: 0x0818, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x82}, + {value: 0x0818, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x85}, + {value: 0x0818, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0808, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x8, offset 0x4b + {value: 0x0000, lo: 0x03}, + {value: 0x0a08, lo: 0x80, hi: 0x87}, + {value: 0x0c08, lo: 0x88, hi: 0x99}, + {value: 0x0a08, lo: 0x9a, hi: 0xbf}, + // Block 0x9, offset 0x4f + {value: 0x0000, lo: 0x0e}, + {value: 0x3308, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0c08, lo: 0x8d, hi: 0x8d}, + {value: 0x0a08, lo: 0x8e, hi: 0x98}, + {value: 0x0c08, lo: 0x99, hi: 0x9b}, + {value: 0x0a08, lo: 0x9c, hi: 0xaa}, + {value: 0x0c08, lo: 0xab, hi: 0xac}, + {value: 0x0a08, lo: 0xad, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb1}, + {value: 0x0a08, lo: 0xb2, hi: 0xb2}, + {value: 0x0c08, lo: 0xb3, hi: 0xb4}, + {value: 0x0a08, lo: 0xb5, hi: 0xb7}, + {value: 0x0c08, lo: 0xb8, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbf}, + // Block 0xa, offset 0x5e + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xb0}, + {value: 0x0808, lo: 0xb1, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xb, offset 0x63 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x89}, + {value: 0x0a08, lo: 0x8a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0xc, offset 0x6b + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x99}, + {value: 0x0808, lo: 0x9a, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa3}, + {value: 0x0808, lo: 0xa4, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa7}, + {value: 0x0808, lo: 0xa8, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0818, lo: 0xb0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd, offset 0x77 + {value: 0x0000, lo: 0x0d}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0a08, lo: 0xa0, hi: 0xa9}, + {value: 0x0c08, lo: 0xaa, hi: 0xac}, + {value: 0x0808, lo: 0xad, hi: 0xad}, + {value: 0x0c08, lo: 0xae, hi: 0xae}, + {value: 0x0a08, lo: 0xaf, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb2}, + {value: 0x0a08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0a08, lo: 0xb6, hi: 0xb8}, + {value: 0x0c08, lo: 0xb9, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0xe, offset 0x85 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0xa1}, + {value: 0x0840, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xbf}, + // Block 0xf, offset 0x8a + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x10, offset 0x93 + {value: 0x0000, lo: 0x0f}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x85}, + {value: 0x3008, lo: 0x86, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x3008, lo: 0x8a, hi: 0x8c}, + {value: 0x3b08, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x11, offset 0xa3 + {value: 0x0000, lo: 0x0d}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x12, offset 0xb1 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xba}, + {value: 0x3b08, lo: 0xbb, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x13, offset 0xbd + {value: 0x0000, lo: 0x0b}, + {value: 0x0040, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x14, offset 0xc9 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x89}, + {value: 0x3b08, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x3008, lo: 0x98, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x15, offset 0xda + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb2}, + {value: 0x08f1, lo: 0xb3, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb9}, + {value: 0x3b08, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0x16, offset 0xe4 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x8e}, + {value: 0x0018, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0xbf}, + // Block 0x17, offset 0xeb + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0961, lo: 0x9c, hi: 0x9c}, + {value: 0x0999, lo: 0x9d, hi: 0x9d}, + {value: 0x0008, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x18, offset 0xf8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe03d, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x19, offset 0x109 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0xbf}, + // Block 0x1a, offset 0x110 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x1b, offset 0x11b + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x3008, lo: 0x96, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x3308, lo: 0x9e, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xa1}, + {value: 0x3008, lo: 0xa2, hi: 0xa4}, + {value: 0x0008, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xbf}, + // Block 0x1c, offset 0x12a + {value: 0x0000, lo: 0x0d}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x8c}, + {value: 0x3308, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x3008, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x1d, offset 0x138 + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x86}, + {value: 0x055d, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8c}, + {value: 0x055d, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbb}, + {value: 0xe105, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0x1e, offset 0x142 + {value: 0x0000, lo: 0x01}, + {value: 0x0018, lo: 0x80, hi: 0xbf}, + // Block 0x1f, offset 0x144 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa0}, + {value: 0x2018, lo: 0xa1, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x20, offset 0x149 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa7}, + {value: 0x2018, lo: 0xa8, hi: 0xbf}, + // Block 0x21, offset 0x14c + {value: 0x0000, lo: 0x02}, + {value: 0x2018, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0xbf}, + // Block 0x22, offset 0x14f + {value: 0x0000, lo: 0x01}, + {value: 0x0008, lo: 0x80, hi: 0xbf}, + // Block 0x23, offset 0x151 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x24, offset 0x15d + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x25, offset 0x168 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x26, offset 0x170 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x27, offset 0x176 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x28, offset 0x17c + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x29, offset 0x181 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x2a, offset 0x186 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x2b, offset 0x189 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xbf}, + // Block 0x2c, offset 0x18d + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x2d, offset 0x193 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x2e, offset 0x198 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x3b08, lo: 0x94, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x2f, offset 0x1a4 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x30, offset 0x1ae + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xb3}, + {value: 0x3340, lo: 0xb4, hi: 0xb5}, + {value: 0x3008, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x31, offset 0x1b4 + {value: 0x0000, lo: 0x10}, + {value: 0x3008, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x91}, + {value: 0x3b08, lo: 0x92, hi: 0x92}, + {value: 0x3308, lo: 0x93, hi: 0x93}, + {value: 0x0018, lo: 0x94, hi: 0x96}, + {value: 0x0008, lo: 0x97, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x32, offset 0x1c5 + {value: 0x0000, lo: 0x09}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x86}, + {value: 0x0218, lo: 0x87, hi: 0x87}, + {value: 0x0018, lo: 0x88, hi: 0x8a}, + {value: 0x33c0, lo: 0x8b, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0208, lo: 0xa0, hi: 0xbf}, + // Block 0x33, offset 0x1cf + {value: 0x0000, lo: 0x02}, + {value: 0x0208, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x34, offset 0x1d2 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0208, lo: 0x87, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xa9}, + {value: 0x0208, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x35, offset 0x1da + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0x36, offset 0x1dd + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x37, offset 0x1ea + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x38, offset 0x1f2 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x39, offset 0x1f6 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0028, lo: 0x9a, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xbf}, + // Block 0x3a, offset 0x1fd + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x3308, lo: 0x97, hi: 0x98}, + {value: 0x3008, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x3b, offset 0x205 + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x94}, + {value: 0x3008, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3b08, lo: 0xa0, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xac}, + {value: 0x3008, lo: 0xad, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x3c, offset 0x215 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xbd}, + {value: 0x3318, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x3d, offset 0x221 + {value: 0x0000, lo: 0x01}, + {value: 0x0040, lo: 0x80, hi: 0xbf}, + // Block 0x3e, offset 0x223 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3008, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x3f, offset 0x22d + {value: 0x0000, lo: 0x0b}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x3808, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x40, offset 0x239 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3808, lo: 0xaa, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xbf}, + // Block 0x41, offset 0x245 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3008, lo: 0xaa, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3808, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbf}, + // Block 0x42, offset 0x251 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x3008, lo: 0xa4, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbf}, + // Block 0x43, offset 0x259 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x44, offset 0x25e + {value: 0x0000, lo: 0x09}, + {value: 0x0e29, lo: 0x80, hi: 0x80}, + {value: 0x0e41, lo: 0x81, hi: 0x81}, + {value: 0x0e59, lo: 0x82, hi: 0x82}, + {value: 0x0e71, lo: 0x83, hi: 0x83}, + {value: 0x0e89, lo: 0x84, hi: 0x85}, + {value: 0x0ea1, lo: 0x86, hi: 0x86}, + {value: 0x0eb9, lo: 0x87, hi: 0x87}, + {value: 0x057d, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0x45, offset 0x268 + {value: 0x0000, lo: 0x10}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x92}, + {value: 0x0018, lo: 0x93, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa8}, + {value: 0x0008, lo: 0xa9, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x46, offset 0x279 + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0x47, offset 0x27d + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x87}, + {value: 0xe045, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0xe045, lo: 0x98, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0xe045, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbf}, + // Block 0x48, offset 0x288 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x3318, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbf}, + // Block 0x49, offset 0x28c + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x88}, + {value: 0x24c1, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x4a, offset 0x295 + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x24f1, lo: 0xac, hi: 0xac}, + {value: 0x2529, lo: 0xad, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xae}, + {value: 0x2579, lo: 0xaf, hi: 0xaf}, + {value: 0x25b1, lo: 0xb0, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0x4b, offset 0x29d + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x9f}, + {value: 0x0080, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xad}, + {value: 0x0080, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x4c, offset 0x2a3 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xa8}, + {value: 0x09c5, lo: 0xa9, hi: 0xa9}, + {value: 0x09e5, lo: 0xaa, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xbf}, + // Block 0x4d, offset 0x2a8 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xbf}, + // Block 0x4e, offset 0x2ab + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x28c1, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0xbf}, + // Block 0x4f, offset 0x2af + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0e66, lo: 0xb4, hi: 0xb4}, + {value: 0x292a, lo: 0xb5, hi: 0xb5}, + {value: 0x0e86, lo: 0xb6, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x50, offset 0x2b5 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x9b}, + {value: 0x2941, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0xbf}, + // Block 0x51, offset 0x2b9 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x52, offset 0x2bd + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0018, lo: 0xbd, hi: 0xbf}, + // Block 0x53, offset 0x2c3 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x92}, + {value: 0x0040, lo: 0x93, hi: 0xab}, + {value: 0x0018, lo: 0xac, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x54, offset 0x2ca + {value: 0x0000, lo: 0x05}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x03f5, lo: 0x90, hi: 0x9f}, + {value: 0x0ea5, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x55, offset 0x2d0 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x56, offset 0x2d8 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xae}, + {value: 0xe075, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0x57, offset 0x2df + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x58, offset 0x2ea + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xbf}, + // Block 0x59, offset 0x2f4 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x5a, offset 0x2f8 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0x5b, offset 0x2fb + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9e}, + {value: 0x0edd, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0x5c, offset 0x301 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb2}, + {value: 0x0efd, lo: 0xb3, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x5d, offset 0x305 + {value: 0x0020, lo: 0x01}, + {value: 0x0f1d, lo: 0x80, hi: 0xbf}, + // Block 0x5e, offset 0x307 + {value: 0x0020, lo: 0x02}, + {value: 0x171d, lo: 0x80, hi: 0x8f}, + {value: 0x18fd, lo: 0x90, hi: 0xbf}, + // Block 0x5f, offset 0x30a + {value: 0x0020, lo: 0x01}, + {value: 0x1efd, lo: 0x80, hi: 0xbf}, + // Block 0x60, offset 0x30c + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x61, offset 0x30f + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9a}, + {value: 0x29e2, lo: 0x9b, hi: 0x9b}, + {value: 0x2a0a, lo: 0x9c, hi: 0x9c}, + {value: 0x0008, lo: 0x9d, hi: 0x9e}, + {value: 0x2a31, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x62, offset 0x319 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbe}, + {value: 0x2a69, lo: 0xbf, hi: 0xbf}, + // Block 0x63, offset 0x31c + {value: 0x0000, lo: 0x0e}, + {value: 0x0040, lo: 0x80, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xb0}, + {value: 0x2a1d, lo: 0xb1, hi: 0xb1}, + {value: 0x2a3d, lo: 0xb2, hi: 0xb2}, + {value: 0x2a5d, lo: 0xb3, hi: 0xb3}, + {value: 0x2a7d, lo: 0xb4, hi: 0xb4}, + {value: 0x2a5d, lo: 0xb5, hi: 0xb5}, + {value: 0x2a9d, lo: 0xb6, hi: 0xb6}, + {value: 0x2abd, lo: 0xb7, hi: 0xb7}, + {value: 0x2add, lo: 0xb8, hi: 0xb9}, + {value: 0x2afd, lo: 0xba, hi: 0xbb}, + {value: 0x2b1d, lo: 0xbc, hi: 0xbd}, + {value: 0x2afd, lo: 0xbe, hi: 0xbf}, + // Block 0x64, offset 0x32b + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x65, offset 0x32f + {value: 0x0030, lo: 0x04}, + {value: 0x2aa2, lo: 0x80, hi: 0x9d}, + {value: 0x305a, lo: 0x9e, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x30a2, lo: 0xa0, hi: 0xbf}, + // Block 0x66, offset 0x334 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0x67, offset 0x337 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x68, offset 0x33b + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x69, offset 0x340 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0x6a, offset 0x345 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb1}, + {value: 0x0018, lo: 0xb2, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6b, offset 0x34b + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0xb6}, + {value: 0x0008, lo: 0xb7, hi: 0xb7}, + {value: 0x2009, lo: 0xb8, hi: 0xb8}, + {value: 0x6e89, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xbf}, + // Block 0x6c, offset 0x351 + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x3308, lo: 0x8b, hi: 0x8b}, + {value: 0x0008, lo: 0x8c, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x6d, offset 0x360 + {value: 0x0000, lo: 0x05}, + {value: 0x0208, lo: 0x80, hi: 0xb1}, + {value: 0x0108, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6e, offset 0x366 + {value: 0x0000, lo: 0x03}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xbf}, + // Block 0x6f, offset 0x36a + {value: 0x0000, lo: 0x0e}, + {value: 0x3008, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xba}, + {value: 0x0008, lo: 0xbb, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x70, offset 0x379 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x71, offset 0x37e + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x91}, + {value: 0x3008, lo: 0x92, hi: 0x92}, + {value: 0x3808, lo: 0x93, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x72, offset 0x386 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb9}, + {value: 0x3008, lo: 0xba, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x73, offset 0x390 + {value: 0x0000, lo: 0x0a}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x74, offset 0x39b + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x75, offset 0x3a3 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8c}, + {value: 0x3008, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbd}, + {value: 0x0008, lo: 0xbe, hi: 0xbf}, + // Block 0x76, offset 0x3b4 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x77, offset 0x3bd + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x9a}, + {value: 0x0008, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3b08, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x78, offset 0x3cd + {value: 0x0000, lo: 0x0c}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x90}, + {value: 0x0008, lo: 0x91, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x79, offset 0x3da + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x4465, lo: 0x9c, hi: 0x9c}, + {value: 0x447d, lo: 0x9d, hi: 0x9d}, + {value: 0x2971, lo: 0x9e, hi: 0x9e}, + {value: 0xe06d, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xaf}, + {value: 0x4495, lo: 0xb0, hi: 0xbf}, + // Block 0x7a, offset 0x3e4 + {value: 0x0000, lo: 0x04}, + {value: 0x44b5, lo: 0x80, hi: 0x8f}, + {value: 0x44d5, lo: 0x90, hi: 0x9f}, + {value: 0x44f5, lo: 0xa0, hi: 0xaf}, + {value: 0x44d5, lo: 0xb0, hi: 0xbf}, + // Block 0x7b, offset 0x3e9 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3b08, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x7c, offset 0x3f6 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x7d, offset 0x3fa + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x7e, offset 0x3ff + {value: 0x0020, lo: 0x01}, + {value: 0x4515, lo: 0x80, hi: 0xbf}, + // Block 0x7f, offset 0x401 + {value: 0x0020, lo: 0x03}, + {value: 0x4d15, lo: 0x80, hi: 0x94}, + {value: 0x4ad5, lo: 0x95, hi: 0x95}, + {value: 0x4fb5, lo: 0x96, hi: 0xbf}, + // Block 0x80, offset 0x405 + {value: 0x0020, lo: 0x01}, + {value: 0x54f5, lo: 0x80, hi: 0xbf}, + // Block 0x81, offset 0x407 + {value: 0x0020, lo: 0x03}, + {value: 0x5cf5, lo: 0x80, hi: 0x84}, + {value: 0x5655, lo: 0x85, hi: 0x85}, + {value: 0x5d95, lo: 0x86, hi: 0xbf}, + // Block 0x82, offset 0x40b + {value: 0x0020, lo: 0x08}, + {value: 0x6b55, lo: 0x80, hi: 0x8f}, + {value: 0x6d15, lo: 0x90, hi: 0x90}, + {value: 0x6d55, lo: 0x91, hi: 0xab}, + {value: 0x6ea1, lo: 0xac, hi: 0xac}, + {value: 0x70b5, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x70d5, lo: 0xb0, hi: 0xbf}, + // Block 0x83, offset 0x414 + {value: 0x0020, lo: 0x05}, + {value: 0x72d5, lo: 0x80, hi: 0xad}, + {value: 0x6535, lo: 0xae, hi: 0xae}, + {value: 0x7895, lo: 0xaf, hi: 0xb5}, + {value: 0x6f55, lo: 0xb6, hi: 0xb6}, + {value: 0x7975, lo: 0xb7, hi: 0xbf}, + // Block 0x84, offset 0x41a + {value: 0x0028, lo: 0x03}, + {value: 0x7c21, lo: 0x80, hi: 0x82}, + {value: 0x7be1, lo: 0x83, hi: 0x83}, + {value: 0x7c99, lo: 0x84, hi: 0xbf}, + // Block 0x85, offset 0x41e + {value: 0x0038, lo: 0x0f}, + {value: 0x9db1, lo: 0x80, hi: 0x83}, + {value: 0x9e59, lo: 0x84, hi: 0x85}, + {value: 0x9e91, lo: 0x86, hi: 0x87}, + {value: 0x9ec9, lo: 0x88, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0xa089, lo: 0x92, hi: 0x97}, + {value: 0xa1a1, lo: 0x98, hi: 0x9c}, + {value: 0xa281, lo: 0x9d, hi: 0xb3}, + {value: 0x9d41, lo: 0xb4, hi: 0xb4}, + {value: 0x9db1, lo: 0xb5, hi: 0xb5}, + {value: 0xa789, lo: 0xb6, hi: 0xbb}, + {value: 0xa869, lo: 0xbc, hi: 0xbc}, + {value: 0xa7f9, lo: 0xbd, hi: 0xbd}, + {value: 0xa8d9, lo: 0xbe, hi: 0xbf}, + // Block 0x86, offset 0x42e + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x0008, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x87, offset 0x438 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0x88, offset 0x43d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x89, offset 0x440 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x8a, offset 0x446 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x8b, offset 0x44d + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x8c, offset 0x452 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x8d, offset 0x456 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x8e, offset 0x45c + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xbf}, + // Block 0x8f, offset 0x461 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x90, offset 0x46a + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x91, offset 0x46f + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0x92, offset 0x475 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x97}, + {value: 0x8ad5, lo: 0x98, hi: 0x9f}, + {value: 0x8aed, lo: 0xa0, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xbf}, + // Block 0x93, offset 0x47c + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x8aed, lo: 0xb0, hi: 0xb7}, + {value: 0x8ad5, lo: 0xb8, hi: 0xbf}, + // Block 0x94, offset 0x483 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x95, offset 0x48a + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x96, offset 0x48e + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xae}, + {value: 0x0018, lo: 0xaf, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x97, offset 0x493 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x98, offset 0x496 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xbf}, + // Block 0x99, offset 0x49b + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0808, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0808, lo: 0x8a, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb6}, + {value: 0x0808, lo: 0xb7, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbb}, + {value: 0x0808, lo: 0xbc, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x0808, lo: 0xbf, hi: 0xbf}, + // Block 0x9a, offset 0x4a7 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x96}, + {value: 0x0818, lo: 0x97, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb6}, + {value: 0x0818, lo: 0xb7, hi: 0xbf}, + // Block 0x9b, offset 0x4ad + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa6}, + {value: 0x0818, lo: 0xa7, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x9c, offset 0x4b2 + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xba}, + {value: 0x0818, lo: 0xbb, hi: 0xbf}, + // Block 0x9d, offset 0x4b9 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0818, lo: 0x96, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbe}, + {value: 0x0818, lo: 0xbf, hi: 0xbf}, + // Block 0x9e, offset 0x4c1 + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbb}, + {value: 0x0818, lo: 0xbc, hi: 0xbd}, + {value: 0x0808, lo: 0xbe, hi: 0xbf}, + // Block 0x9f, offset 0x4c6 + {value: 0x0000, lo: 0x03}, + {value: 0x0818, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x0818, lo: 0x92, hi: 0xbf}, + // Block 0xa0, offset 0x4ca + {value: 0x0000, lo: 0x0f}, + {value: 0x0808, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x94}, + {value: 0x0808, lo: 0x95, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0x98}, + {value: 0x0808, lo: 0x99, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xa1, offset 0x4da + {value: 0x0000, lo: 0x06}, + {value: 0x0818, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0818, lo: 0x90, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xbc}, + {value: 0x0818, lo: 0xbd, hi: 0xbf}, + // Block 0xa2, offset 0x4e1 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0x9c}, + {value: 0x0818, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xa3, offset 0x4e5 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb8}, + {value: 0x0018, lo: 0xb9, hi: 0xbf}, + // Block 0xa4, offset 0x4e9 + {value: 0x0000, lo: 0x06}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0818, lo: 0x98, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb7}, + {value: 0x0818, lo: 0xb8, hi: 0xbf}, + // Block 0xa5, offset 0x4f0 + {value: 0x0000, lo: 0x01}, + {value: 0x0808, lo: 0x80, hi: 0xbf}, + // Block 0xa6, offset 0x4f2 + {value: 0x0000, lo: 0x02}, + {value: 0x0808, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0xa7, offset 0x4f5 + {value: 0x0000, lo: 0x02}, + {value: 0x03dd, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xa8, offset 0x4f8 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xbf}, + // Block 0xa9, offset 0x4fc + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0818, lo: 0xa0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xaa, offset 0x500 + {value: 0x0000, lo: 0x05}, + {value: 0x3008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xab, offset 0x506 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x91}, + {value: 0x0018, lo: 0x92, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xac, offset 0x50f + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbc}, + {value: 0x0340, lo: 0xbd, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0xad, offset 0x51b + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xae, offset 0x522 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb2}, + {value: 0x3b08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xbf}, + // Block 0xaf, offset 0x52b + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xb0, offset 0x533 + {value: 0x0000, lo: 0x06}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xbe}, + {value: 0x3008, lo: 0xbf, hi: 0xbf}, + // Block 0xb1, offset 0x53a + {value: 0x0000, lo: 0x0d}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x89}, + {value: 0x3308, lo: 0x8a, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xb2, offset 0x548 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3808, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xb3, offset 0x555 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0008, lo: 0x9f, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xb4, offset 0x562 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x3308, lo: 0x9f, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa9}, + {value: 0x3b08, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xb5, offset 0x56b + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xb6, offset 0x56f + {value: 0x0000, lo: 0x0d}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x84}, + {value: 0x3008, lo: 0x85, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xb7, offset 0x57d + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xb8, offset 0x585 + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x85}, + {value: 0x0018, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xb9, offset 0x590 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xba, offset 0x599 + {value: 0x0000, lo: 0x05}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9b}, + {value: 0x3308, lo: 0x9c, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xbb, offset 0x59f + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xbc, offset 0x5a7 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xbd, offset 0x5b0 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb5}, + {value: 0x3808, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0xbe, offset 0x5ba + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0xbf, offset 0x5bd + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbf}, + // Block 0xc0, offset 0x5c9 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xbf}, + // Block 0xc1, offset 0x5cc + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0xc2, offset 0x5d1 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0xc3, offset 0x5de + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x3b08, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0xbf}, + // Block 0xc4, offset 0x5e7 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x89}, + {value: 0x3308, lo: 0x8a, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x98}, + {value: 0x3b08, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xa2}, + {value: 0x0040, lo: 0xa3, hi: 0xbf}, + // Block 0xc5, offset 0x5f3 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xc6, offset 0x5f6 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xc7, offset 0x600 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xbf}, + // Block 0xc8, offset 0x609 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xa9}, + {value: 0x3308, lo: 0xaa, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xc9, offset 0x615 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xca, offset 0x622 + {value: 0x0000, lo: 0x07}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xcb, offset 0x62a + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xcc, offset 0x62d + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xcd, offset 0x632 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0xbf}, + // Block 0xce, offset 0x635 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xbf}, + // Block 0xcf, offset 0x638 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0xbf}, + // Block 0xd0, offset 0x63b + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xd1, offset 0x642 + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xd2, offset 0x649 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0xd3, offset 0x64d + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x0008, lo: 0xa3, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0xd4, offset 0x658 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0xbf}, + // Block 0xd5, offset 0x65b + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3008, lo: 0x91, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd6, offset 0x661 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8e}, + {value: 0x3308, lo: 0x8f, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xd7, offset 0x666 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xbf}, + // Block 0xd8, offset 0x66a + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xd9, offset 0x66d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xda, offset 0x670 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xbf}, + // Block 0xdb, offset 0x673 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xdc, offset 0x676 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0xdd, offset 0x679 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0xde, offset 0x67e + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x03c0, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xbf}, + // Block 0xdf, offset 0x688 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xe0, offset 0x68b + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xbf}, + // Block 0xe1, offset 0x68f + {value: 0x0000, lo: 0x0e}, + {value: 0x0018, lo: 0x80, hi: 0x9d}, + {value: 0xb5b9, lo: 0x9e, hi: 0x9e}, + {value: 0xb601, lo: 0x9f, hi: 0x9f}, + {value: 0xb649, lo: 0xa0, hi: 0xa0}, + {value: 0xb6b1, lo: 0xa1, hi: 0xa1}, + {value: 0xb719, lo: 0xa2, hi: 0xa2}, + {value: 0xb781, lo: 0xa3, hi: 0xa3}, + {value: 0xb7e9, lo: 0xa4, hi: 0xa4}, + {value: 0x3018, lo: 0xa5, hi: 0xa6}, + {value: 0x3318, lo: 0xa7, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xac}, + {value: 0x3018, lo: 0xad, hi: 0xb2}, + {value: 0x0340, lo: 0xb3, hi: 0xba}, + {value: 0x3318, lo: 0xbb, hi: 0xbf}, + // Block 0xe2, offset 0x69e + {value: 0x0000, lo: 0x0b}, + {value: 0x3318, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0x84}, + {value: 0x3318, lo: 0x85, hi: 0x8b}, + {value: 0x0018, lo: 0x8c, hi: 0xa9}, + {value: 0x3318, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xba}, + {value: 0xb851, lo: 0xbb, hi: 0xbb}, + {value: 0xb899, lo: 0xbc, hi: 0xbc}, + {value: 0xb8e1, lo: 0xbd, hi: 0xbd}, + {value: 0xb949, lo: 0xbe, hi: 0xbe}, + {value: 0xb9b1, lo: 0xbf, hi: 0xbf}, + // Block 0xe3, offset 0x6aa + {value: 0x0000, lo: 0x03}, + {value: 0xba19, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xbf}, + // Block 0xe4, offset 0x6ae + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x3318, lo: 0x82, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0xbf}, + // Block 0xe5, offset 0x6b3 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xe6, offset 0x6b8 + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0xe7, offset 0x6bc + {value: 0x0000, lo: 0x04}, + {value: 0x3308, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0xe8, offset 0x6c1 + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x3308, lo: 0xa1, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xe9, offset 0x6ca + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa4}, + {value: 0x0040, lo: 0xa5, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0xea, offset 0x6d5 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x86}, + {value: 0x0818, lo: 0x87, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0xeb, offset 0x6db + {value: 0x0000, lo: 0x07}, + {value: 0x0a08, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0818, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xec, offset 0x6e3 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xed, offset 0x6e7 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0xee, offset 0x6eb + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0xef, offset 0x6f1 + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xf0, offset 0x6f7 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8f}, + {value: 0xc1c1, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xf1, offset 0x6fc + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xbf}, + // Block 0xf2, offset 0x6ff + {value: 0x0000, lo: 0x0f}, + {value: 0xc7e9, lo: 0x80, hi: 0x80}, + {value: 0xc839, lo: 0x81, hi: 0x81}, + {value: 0xc889, lo: 0x82, hi: 0x82}, + {value: 0xc8d9, lo: 0x83, hi: 0x83}, + {value: 0xc929, lo: 0x84, hi: 0x84}, + {value: 0xc979, lo: 0x85, hi: 0x85}, + {value: 0xc9c9, lo: 0x86, hi: 0x86}, + {value: 0xca19, lo: 0x87, hi: 0x87}, + {value: 0xca69, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0xcab9, lo: 0x90, hi: 0x90}, + {value: 0xcad9, lo: 0x91, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xbf}, + // Block 0xf3, offset 0x70f + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xf4, offset 0x716 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0xf5, offset 0x719 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0xbf}, + // Block 0xf6, offset 0x71c + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0xf7, offset 0x720 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0xf8, offset 0x726 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xbf}, + // Block 0xf9, offset 0x72b + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xfa, offset 0x730 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0xfb, offset 0x735 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0xbf}, + // Block 0xfc, offset 0x738 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xbf}, + // Block 0xfd, offset 0x73d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0xfe, offset 0x740 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xff, offset 0x743 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x100, offset 0x747 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x101, offset 0x74b + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x102, offset 0x74e + {value: 0x0020, lo: 0x0f}, + {value: 0xdeb9, lo: 0x80, hi: 0x89}, + {value: 0x8dfd, lo: 0x8a, hi: 0x8a}, + {value: 0xdff9, lo: 0x8b, hi: 0x9c}, + {value: 0x8e1d, lo: 0x9d, hi: 0x9d}, + {value: 0xe239, lo: 0x9e, hi: 0xa2}, + {value: 0x8e3d, lo: 0xa3, hi: 0xa3}, + {value: 0xe2d9, lo: 0xa4, hi: 0xab}, + {value: 0x7ed5, lo: 0xac, hi: 0xac}, + {value: 0xe3d9, lo: 0xad, hi: 0xaf}, + {value: 0x8e5d, lo: 0xb0, hi: 0xb0}, + {value: 0xe439, lo: 0xb1, hi: 0xb6}, + {value: 0x8e7d, lo: 0xb7, hi: 0xb9}, + {value: 0xe4f9, lo: 0xba, hi: 0xba}, + {value: 0x8edd, lo: 0xbb, hi: 0xbb}, + {value: 0xe519, lo: 0xbc, hi: 0xbf}, + // Block 0x103, offset 0x75e + {value: 0x0020, lo: 0x10}, + {value: 0x937d, lo: 0x80, hi: 0x80}, + {value: 0xf099, lo: 0x81, hi: 0x86}, + {value: 0x939d, lo: 0x87, hi: 0x8a}, + {value: 0xd9f9, lo: 0x8b, hi: 0x8b}, + {value: 0xf159, lo: 0x8c, hi: 0x96}, + {value: 0x941d, lo: 0x97, hi: 0x97}, + {value: 0xf2b9, lo: 0x98, hi: 0xa3}, + {value: 0x943d, lo: 0xa4, hi: 0xa6}, + {value: 0xf439, lo: 0xa7, hi: 0xaa}, + {value: 0x949d, lo: 0xab, hi: 0xab}, + {value: 0xf4b9, lo: 0xac, hi: 0xac}, + {value: 0x94bd, lo: 0xad, hi: 0xad}, + {value: 0xf4d9, lo: 0xae, hi: 0xaf}, + {value: 0x94dd, lo: 0xb0, hi: 0xb1}, + {value: 0xf519, lo: 0xb2, hi: 0xbe}, + {value: 0x2040, lo: 0xbf, hi: 0xbf}, + // Block 0x104, offset 0x76f + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0340, lo: 0x81, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x9f}, + {value: 0x0340, lo: 0xa0, hi: 0xbf}, + // Block 0x105, offset 0x774 + {value: 0x0000, lo: 0x01}, + {value: 0x0340, lo: 0x80, hi: 0xbf}, + // Block 0x106, offset 0x776 + {value: 0x0000, lo: 0x01}, + {value: 0x33c0, lo: 0x80, hi: 0xbf}, + // Block 0x107, offset 0x778 + {value: 0x0000, lo: 0x02}, + {value: 0x33c0, lo: 0x80, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, +} + +// Total table size 42115 bytes (41KiB); checksum: F4A1FA4E diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/idna/trie.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/idna/trie.go new file mode 100644 index 00000000..c4ef847e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/idna/trie.go @@ -0,0 +1,72 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +// appendMapping appends the mapping for the respective rune. isMapped must be +// true. A mapping is a categorization of a rune as defined in UTS #46. +func (c info) appendMapping(b []byte, s string) []byte { + index := int(c >> indexShift) + if c&xorBit == 0 { + s := mappings[index:] + return append(b, s[1:s[0]+1]...) + } + b = append(b, s...) + if c&inlineXOR == inlineXOR { + // TODO: support and handle two-byte inline masks + b[len(b)-1] ^= byte(index) + } else { + for p := len(b) - int(xorData[index]); p < len(b); p++ { + index++ + b[p] ^= xorData[index] + } + } + return b +} + +// Sparse block handling code. + +type valueRange struct { + value uint16 // header: value:stride + lo, hi byte // header: lo:n +} + +type sparseBlocks struct { + values []valueRange + offset []uint16 +} + +var idnaSparse = sparseBlocks{ + values: idnaSparseValues[:], + offset: idnaSparseOffset[:], +} + +// Don't use newIdnaTrie to avoid unconditional linking in of the table. +var trie = &idnaTrie{} + +// lookup determines the type of block n and looks up the value for b. +// For n < t.cutoff, the block is a simple lookup table. Otherwise, the block +// is a list of ranges with an accompanying value. Given a matching range r, +// the value for b is by r.value + (b - r.lo) * stride. +func (t *sparseBlocks) lookup(n uint32, b byte) uint16 { + offset := t.offset[n] + header := t.values[offset] + lo := offset + 1 + hi := lo + uint16(header.lo) + for lo < hi { + m := lo + (hi-lo)/2 + r := t.values[m] + if r.lo <= b && b <= r.hi { + return r.value + uint16(b-r.lo)*header.value + } + if b < r.lo { + hi = m + } else { + lo = m + 1 + } + } + return 0 +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/idna/trieval.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/idna/trieval.go new file mode 100644 index 00000000..7a8cf889 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/idna/trieval.go @@ -0,0 +1,119 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package idna + +// This file contains definitions for interpreting the trie value of the idna +// trie generated by "go run gen*.go". It is shared by both the generator +// program and the resultant package. Sharing is achieved by the generator +// copying gen_trieval.go to trieval.go and changing what's above this comment. + +// info holds information from the IDNA mapping table for a single rune. It is +// the value returned by a trie lookup. In most cases, all information fits in +// a 16-bit value. For mappings, this value may contain an index into a slice +// with the mapped string. Such mappings can consist of the actual mapped value +// or an XOR pattern to be applied to the bytes of the UTF8 encoding of the +// input rune. This technique is used by the cases packages and reduces the +// table size significantly. +// +// The per-rune values have the following format: +// +// if mapped { +// if inlinedXOR { +// 15..13 inline XOR marker +// 12..11 unused +// 10..3 inline XOR mask +// } else { +// 15..3 index into xor or mapping table +// } +// } else { +// 15..14 unused +// 13 mayNeedNorm +// 12..11 attributes +// 10..8 joining type +// 7..3 category type +// } +// 2 use xor pattern +// 1..0 mapped category +// +// See the definitions below for a more detailed description of the various +// bits. +type info uint16 + +const ( + catSmallMask = 0x3 + catBigMask = 0xF8 + indexShift = 3 + xorBit = 0x4 // interpret the index as an xor pattern + inlineXOR = 0xE000 // These bits are set if the XOR pattern is inlined. + + joinShift = 8 + joinMask = 0x07 + + // Attributes + attributesMask = 0x1800 + viramaModifier = 0x1800 + modifier = 0x1000 + rtl = 0x0800 + + mayNeedNorm = 0x2000 +) + +// A category corresponds to a category defined in the IDNA mapping table. +type category uint16 + +const ( + unknown category = 0 // not currently defined in unicode. + mapped category = 1 + disallowedSTD3Mapped category = 2 + deviation category = 3 +) + +const ( + valid category = 0x08 + validNV8 category = 0x18 + validXV8 category = 0x28 + disallowed category = 0x40 + disallowedSTD3Valid category = 0x80 + ignored category = 0xC0 +) + +// join types and additional rune information +const ( + joiningL = (iota + 1) + joiningD + joiningT + joiningR + + //the following types are derived during processing + joinZWJ + joinZWNJ + joinVirama + numJoinTypes +) + +func (c info) isMapped() bool { + return c&0x3 != 0 +} + +func (c info) category() category { + small := c & catSmallMask + if small != 0 { + return category(small) + } + return category(c & catBigMask) +} + +func (c info) joinType() info { + if c.isMapped() { + return 0 + } + return (c >> joinShift) & joinMask +} + +func (c info) isModifier() bool { + return c&(modifier|catSmallMask) == modifier +} + +func (c info) isViramaModifier() bool { + return c&(attributesMask|catSmallMask) == viramaModifier +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/iana/const.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/iana/const.go index 3438a27c..c9df24d9 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/iana/const.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/iana/const.go @@ -4,7 +4,7 @@ // Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA). package iana // import "golang.org/x/net/internal/iana" -// Differentiated Services Field Codepoints (DSCP), Updated: 2013-06-25 +// Differentiated Services Field Codepoints (DSCP), Updated: 2017-05-12 const ( DiffServCS0 = 0x0 // CS0 DiffServCS1 = 0x20 // CS1 @@ -26,7 +26,7 @@ const ( DiffServAF41 = 0x88 // AF41 DiffServAF42 = 0x90 // AF42 DiffServAF43 = 0x98 // AF43 - DiffServEFPHB = 0xb8 // EF PHB + DiffServEF = 0xb8 // EF DiffServVOICEADMIT = 0xb0 // VOICE-ADMIT ) @@ -38,7 +38,7 @@ const ( CongestionExperienced = 0x3 // CE (Congestion Experienced) ) -// Protocol Numbers, Updated: 2015-10-06 +// Protocol Numbers, Updated: 2016-06-22 const ( ProtocolIP = 0 // IPv4 encapsulation, pseudo protocol number ProtocolHOPOPT = 0 // IPv6 Hop-by-Hop Option diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/iana/gen.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/iana/gen.go deleted file mode 100644 index 2d8c07ca..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/iana/gen.go +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -//go:generate go run gen.go - -// This program generates internet protocol constants and tables by -// reading IANA protocol registries. -package main - -import ( - "bytes" - "encoding/xml" - "fmt" - "go/format" - "io" - "io/ioutil" - "net/http" - "os" - "strconv" - "strings" -) - -var registries = []struct { - url string - parse func(io.Writer, io.Reader) error -}{ - { - "http://www.iana.org/assignments/dscp-registry/dscp-registry.xml", - parseDSCPRegistry, - }, - { - "http://www.iana.org/assignments/ipv4-tos-byte/ipv4-tos-byte.xml", - parseTOSTCByte, - }, - { - "http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xml", - parseProtocolNumbers, - }, -} - -func main() { - var bb bytes.Buffer - fmt.Fprintf(&bb, "// go generate gen.go\n") - fmt.Fprintf(&bb, "// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\n") - fmt.Fprintf(&bb, "// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA).\n") - fmt.Fprintf(&bb, `package iana // import "golang.org/x/net/internal/iana"`+"\n\n") - for _, r := range registries { - resp, err := http.Get(r.url) - if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - fmt.Fprintf(os.Stderr, "got HTTP status code %v for %v\n", resp.StatusCode, r.url) - os.Exit(1) - } - if err := r.parse(&bb, resp.Body); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - fmt.Fprintf(&bb, "\n") - } - b, err := format.Source(bb.Bytes()) - if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - if err := ioutil.WriteFile("const.go", b, 0644); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} - -func parseDSCPRegistry(w io.Writer, r io.Reader) error { - dec := xml.NewDecoder(r) - var dr dscpRegistry - if err := dec.Decode(&dr); err != nil { - return err - } - drs := dr.escape() - fmt.Fprintf(w, "// %s, Updated: %s\n", dr.Title, dr.Updated) - fmt.Fprintf(w, "const (\n") - for _, dr := range drs { - fmt.Fprintf(w, "DiffServ%s = %#x", dr.Name, dr.Value) - fmt.Fprintf(w, "// %s\n", dr.OrigName) - } - fmt.Fprintf(w, ")\n") - return nil -} - -type dscpRegistry struct { - XMLName xml.Name `xml:"registry"` - Title string `xml:"title"` - Updated string `xml:"updated"` - Note string `xml:"note"` - RegTitle string `xml:"registry>title"` - PoolRecords []struct { - Name string `xml:"name"` - Space string `xml:"space"` - } `xml:"registry>record"` - Records []struct { - Name string `xml:"name"` - Space string `xml:"space"` - } `xml:"registry>registry>record"` -} - -type canonDSCPRecord struct { - OrigName string - Name string - Value int -} - -func (drr *dscpRegistry) escape() []canonDSCPRecord { - drs := make([]canonDSCPRecord, len(drr.Records)) - sr := strings.NewReplacer( - "+", "", - "-", "", - "/", "", - ".", "", - " ", "", - ) - for i, dr := range drr.Records { - s := strings.TrimSpace(dr.Name) - drs[i].OrigName = s - drs[i].Name = sr.Replace(s) - n, err := strconv.ParseUint(dr.Space, 2, 8) - if err != nil { - continue - } - drs[i].Value = int(n) << 2 - } - return drs -} - -func parseTOSTCByte(w io.Writer, r io.Reader) error { - dec := xml.NewDecoder(r) - var ttb tosTCByte - if err := dec.Decode(&ttb); err != nil { - return err - } - trs := ttb.escape() - fmt.Fprintf(w, "// %s, Updated: %s\n", ttb.Title, ttb.Updated) - fmt.Fprintf(w, "const (\n") - for _, tr := range trs { - fmt.Fprintf(w, "%s = %#x", tr.Keyword, tr.Value) - fmt.Fprintf(w, "// %s\n", tr.OrigKeyword) - } - fmt.Fprintf(w, ")\n") - return nil -} - -type tosTCByte struct { - XMLName xml.Name `xml:"registry"` - Title string `xml:"title"` - Updated string `xml:"updated"` - Note string `xml:"note"` - RegTitle string `xml:"registry>title"` - Records []struct { - Binary string `xml:"binary"` - Keyword string `xml:"keyword"` - } `xml:"registry>record"` -} - -type canonTOSTCByteRecord struct { - OrigKeyword string - Keyword string - Value int -} - -func (ttb *tosTCByte) escape() []canonTOSTCByteRecord { - trs := make([]canonTOSTCByteRecord, len(ttb.Records)) - sr := strings.NewReplacer( - "Capable", "", - "(", "", - ")", "", - "+", "", - "-", "", - "/", "", - ".", "", - " ", "", - ) - for i, tr := range ttb.Records { - s := strings.TrimSpace(tr.Keyword) - trs[i].OrigKeyword = s - ss := strings.Split(s, " ") - if len(ss) > 1 { - trs[i].Keyword = strings.Join(ss[1:], " ") - } else { - trs[i].Keyword = ss[0] - } - trs[i].Keyword = sr.Replace(trs[i].Keyword) - n, err := strconv.ParseUint(tr.Binary, 2, 8) - if err != nil { - continue - } - trs[i].Value = int(n) - } - return trs -} - -func parseProtocolNumbers(w io.Writer, r io.Reader) error { - dec := xml.NewDecoder(r) - var pn protocolNumbers - if err := dec.Decode(&pn); err != nil { - return err - } - prs := pn.escape() - prs = append([]canonProtocolRecord{{ - Name: "IP", - Descr: "IPv4 encapsulation, pseudo protocol number", - Value: 0, - }}, prs...) - fmt.Fprintf(w, "// %s, Updated: %s\n", pn.Title, pn.Updated) - fmt.Fprintf(w, "const (\n") - for _, pr := range prs { - if pr.Name == "" { - continue - } - fmt.Fprintf(w, "Protocol%s = %d", pr.Name, pr.Value) - s := pr.Descr - if s == "" { - s = pr.OrigName - } - fmt.Fprintf(w, "// %s\n", s) - } - fmt.Fprintf(w, ")\n") - return nil -} - -type protocolNumbers struct { - XMLName xml.Name `xml:"registry"` - Title string `xml:"title"` - Updated string `xml:"updated"` - RegTitle string `xml:"registry>title"` - Note string `xml:"registry>note"` - Records []struct { - Value string `xml:"value"` - Name string `xml:"name"` - Descr string `xml:"description"` - } `xml:"registry>record"` -} - -type canonProtocolRecord struct { - OrigName string - Name string - Descr string - Value int -} - -func (pn *protocolNumbers) escape() []canonProtocolRecord { - prs := make([]canonProtocolRecord, len(pn.Records)) - sr := strings.NewReplacer( - "-in-", "in", - "-within-", "within", - "-over-", "over", - "+", "P", - "-", "", - "/", "", - ".", "", - " ", "", - ) - for i, pr := range pn.Records { - if strings.Contains(pr.Name, "Deprecated") || - strings.Contains(pr.Name, "deprecated") { - continue - } - prs[i].OrigName = pr.Name - s := strings.TrimSpace(pr.Name) - switch pr.Name { - case "ISIS over IPv4": - prs[i].Name = "ISIS" - case "manet": - prs[i].Name = "MANET" - default: - prs[i].Name = sr.Replace(s) - } - ss := strings.Split(pr.Descr, "\n") - for i := range ss { - ss[i] = strings.TrimSpace(ss[i]) - } - if len(ss) > 1 { - prs[i].Descr = strings.Join(ss, " ") - } else { - prs[i].Descr = ss[0] - } - prs[i].Value, _ = strconv.Atoi(pr.Value) - } - return prs -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/cmsghdr.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/cmsghdr.go new file mode 100644 index 00000000..1eb07d26 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/cmsghdr.go @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package socket + +func (h *cmsghdr) len() int { return int(h.Len) } +func (h *cmsghdr) lvl() int { return int(h.Level) } +func (h *cmsghdr) typ() int { return int(h.Type) } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go new file mode 100644 index 00000000..d1d0c2de --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go @@ -0,0 +1,13 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package socket + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = uint32(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go new file mode 100644 index 00000000..bac66811 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm mips mipsle 386 +// +build linux + +package socket + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = uint32(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go new file mode 100644 index 00000000..63f0534f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64 amd64 ppc64 ppc64le mips64 mips64le s390x +// +build linux + +package socket + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = uint64(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go new file mode 100644 index 00000000..7dedd430 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64 +// +build solaris + +package socket + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = uint32(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go new file mode 100644 index 00000000..a4e71226 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go @@ -0,0 +1,17 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package socket + +type cmsghdr struct{} + +const sizeofCmsghdr = 0 + +func (h *cmsghdr) len() int { return 0 } +func (h *cmsghdr) lvl() int { return 0 } +func (h *cmsghdr) typ() int { return 0 } + +func (h *cmsghdr) set(l, lvl, typ int) {} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/error_unix.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/error_unix.go new file mode 100644 index 00000000..93dff918 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/error_unix.go @@ -0,0 +1,31 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package socket + +import "syscall" + +var ( + errEAGAIN error = syscall.EAGAIN + errEINVAL error = syscall.EINVAL + errENOENT error = syscall.ENOENT +) + +// errnoErr returns common boxed Errno values, to prevent allocations +// at runtime. +func errnoErr(errno syscall.Errno) error { + switch errno { + case 0: + return nil + case syscall.EAGAIN: + return errEAGAIN + case syscall.EINVAL: + return errEINVAL + case syscall.ENOENT: + return errENOENT + } + return errno +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/error_windows.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/error_windows.go new file mode 100644 index 00000000..6a6379a8 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/error_windows.go @@ -0,0 +1,26 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import "syscall" + +var ( + errERROR_IO_PENDING error = syscall.ERROR_IO_PENDING + errEINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent allocations +// at runtime. +func errnoErr(errno syscall.Errno) error { + switch errno { + case 0: + return nil + case syscall.ERROR_IO_PENDING: + return errERROR_IO_PENDING + case syscall.EINVAL: + return errEINVAL + } + return errno +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/iovec_32bit.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/iovec_32bit.go new file mode 100644 index 00000000..05d6082d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/iovec_32bit.go @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm mips mipsle 386 +// +build darwin dragonfly freebsd linux netbsd openbsd + +package socket + +import "unsafe" + +func (v *iovec) set(b []byte) { + l := len(b) + if l == 0 { + return + } + v.Base = (*byte)(unsafe.Pointer(&b[0])) + v.Len = uint32(l) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/iovec_64bit.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/iovec_64bit.go new file mode 100644 index 00000000..afb34ad5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/iovec_64bit.go @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64 amd64 ppc64 ppc64le mips64 mips64le s390x +// +build darwin dragonfly freebsd linux netbsd openbsd + +package socket + +import "unsafe" + +func (v *iovec) set(b []byte) { + l := len(b) + if l == 0 { + return + } + v.Base = (*byte)(unsafe.Pointer(&b[0])) + v.Len = uint64(l) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go new file mode 100644 index 00000000..8d17a40c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64 +// +build solaris + +package socket + +import "unsafe" + +func (v *iovec) set(b []byte) { + l := len(b) + if l == 0 { + return + } + v.Base = (*int8)(unsafe.Pointer(&b[0])) + v.Len = uint64(l) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/iovec_stub.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/iovec_stub.go new file mode 100644 index 00000000..c87d2a93 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/iovec_stub.go @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package socket + +type iovec struct{} + +func (v *iovec) set(b []byte) {} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go new file mode 100644 index 00000000..2e80a9cb --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux,!netbsd + +package socket + +import "net" + +type mmsghdr struct{} + +type mmsghdrs []mmsghdr + +func (hs mmsghdrs) pack(ms []Message, parseFn func([]byte, string) (net.Addr, error), marshalFn func(net.Addr) []byte) error { + return nil +} + +func (hs mmsghdrs) unpack(ms []Message, parseFn func([]byte, string) (net.Addr, error), hint string) error { + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go new file mode 100644 index 00000000..3c42ea7a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go @@ -0,0 +1,42 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux netbsd + +package socket + +import "net" + +type mmsghdrs []mmsghdr + +func (hs mmsghdrs) pack(ms []Message, parseFn func([]byte, string) (net.Addr, error), marshalFn func(net.Addr) []byte) error { + for i := range hs { + vs := make([]iovec, len(ms[i].Buffers)) + var sa []byte + if parseFn != nil { + sa = make([]byte, sizeofSockaddrInet6) + } + if marshalFn != nil { + sa = marshalFn(ms[i].Addr) + } + hs[i].Hdr.pack(vs, ms[i].Buffers, ms[i].OOB, sa) + } + return nil +} + +func (hs mmsghdrs) unpack(ms []Message, parseFn func([]byte, string) (net.Addr, error), hint string) error { + for i := range hs { + ms[i].N = int(hs[i].Len) + ms[i].NN = hs[i].Hdr.controllen() + ms[i].Flags = hs[i].Hdr.flags() + if parseFn != nil { + var err error + ms[i].Addr, err = parseFn(hs[i].Hdr.name(), hint) + if err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go new file mode 100644 index 00000000..5567afc8 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go @@ -0,0 +1,39 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package socket + +import "unsafe" + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) { + for i := range vs { + vs[i].set(bs[i]) + } + h.setIov(vs) + if len(oob) > 0 { + h.Control = (*byte)(unsafe.Pointer(&oob[0])) + h.Controllen = uint32(len(oob)) + } + if sa != nil { + h.Name = (*byte)(unsafe.Pointer(&sa[0])) + h.Namelen = uint32(len(sa)) + } +} + +func (h *msghdr) name() []byte { + if h.Name != nil && h.Namelen > 0 { + return (*[sizeofSockaddrInet6]byte)(unsafe.Pointer(h.Name))[:h.Namelen] + } + return nil +} + +func (h *msghdr) controllen() int { + return int(h.Controllen) +} + +func (h *msghdr) flags() int { + return int(h.Flags) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go new file mode 100644 index 00000000..b8c87b72 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd + +package socket + +func (h *msghdr) setIov(vs []iovec) { + l := len(vs) + if l == 0 { + return + } + h.Iov = &vs[0] + h.Iovlen = int32(l) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/msghdr_linux.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/msghdr_linux.go new file mode 100644 index 00000000..5a38798c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/msghdr_linux.go @@ -0,0 +1,36 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import "unsafe" + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) { + for i := range vs { + vs[i].set(bs[i]) + } + h.setIov(vs) + if len(oob) > 0 { + h.setControl(oob) + } + if sa != nil { + h.Name = (*byte)(unsafe.Pointer(&sa[0])) + h.Namelen = uint32(len(sa)) + } +} + +func (h *msghdr) name() []byte { + if h.Name != nil && h.Namelen > 0 { + return (*[sizeofSockaddrInet6]byte)(unsafe.Pointer(h.Name))[:h.Namelen] + } + return nil +} + +func (h *msghdr) controllen() int { + return int(h.Controllen) +} + +func (h *msghdr) flags() int { + return int(h.Flags) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go new file mode 100644 index 00000000..a7a5987c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go @@ -0,0 +1,24 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm mips mipsle 386 +// +build linux + +package socket + +import "unsafe" + +func (h *msghdr) setIov(vs []iovec) { + l := len(vs) + if l == 0 { + return + } + h.Iov = &vs[0] + h.Iovlen = uint32(l) +} + +func (h *msghdr) setControl(b []byte) { + h.Control = (*byte)(unsafe.Pointer(&b[0])) + h.Controllen = uint32(len(b)) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go new file mode 100644 index 00000000..610fc4f3 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go @@ -0,0 +1,24 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64 amd64 ppc64 ppc64le mips64 mips64le s390x +// +build linux + +package socket + +import "unsafe" + +func (h *msghdr) setIov(vs []iovec) { + l := len(vs) + if l == 0 { + return + } + h.Iov = &vs[0] + h.Iovlen = uint64(l) +} + +func (h *msghdr) setControl(b []byte) { + h.Control = (*byte)(unsafe.Pointer(&b[0])) + h.Controllen = uint64(len(b)) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go new file mode 100644 index 00000000..71a69e25 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +func (h *msghdr) setIov(vs []iovec) { + l := len(vs) + if l == 0 { + return + } + h.Iov = &vs[0] + h.Iovlen = uint32(l) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go new file mode 100644 index 00000000..6465b207 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go @@ -0,0 +1,36 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64 +// +build solaris + +package socket + +import "unsafe" + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) { + for i := range vs { + vs[i].set(bs[i]) + } + if len(vs) > 0 { + h.Iov = &vs[0] + h.Iovlen = int32(len(vs)) + } + if len(oob) > 0 { + h.Accrights = (*int8)(unsafe.Pointer(&oob[0])) + h.Accrightslen = int32(len(oob)) + } + if sa != nil { + h.Name = (*byte)(unsafe.Pointer(&sa[0])) + h.Namelen = uint32(len(sa)) + } +} + +func (h *msghdr) controllen() int { + return int(h.Accrightslen) +} + +func (h *msghdr) flags() int { + return int(NativeEndian.Uint32(h.Pad_cgo_2[:])) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/msghdr_stub.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/msghdr_stub.go new file mode 100644 index 00000000..64e81733 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/msghdr_stub.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package socket + +type msghdr struct{} + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) {} +func (h *msghdr) name() []byte { return nil } +func (h *msghdr) controllen() int { return 0 } +func (h *msghdr) flags() int { return 0 } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/rawconn.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/rawconn.go new file mode 100644 index 00000000..d6871d55 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/rawconn.go @@ -0,0 +1,66 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package socket + +import ( + "errors" + "net" + "os" + "syscall" +) + +// A Conn represents a raw connection. +type Conn struct { + network string + c syscall.RawConn +} + +// NewConn returns a new raw connection. +func NewConn(c net.Conn) (*Conn, error) { + var err error + var cc Conn + switch c := c.(type) { + case *net.TCPConn: + cc.network = "tcp" + cc.c, err = c.SyscallConn() + case *net.UDPConn: + cc.network = "udp" + cc.c, err = c.SyscallConn() + case *net.IPConn: + cc.network = "ip" + cc.c, err = c.SyscallConn() + default: + return nil, errors.New("unknown connection type") + } + if err != nil { + return nil, err + } + return &cc, nil +} + +func (o *Option) get(c *Conn, b []byte) (int, error) { + var operr error + var n int + fn := func(s uintptr) { + n, operr = getsockopt(s, o.Level, o.Name, b) + } + if err := c.c.Control(fn); err != nil { + return 0, err + } + return n, os.NewSyscallError("getsockopt", operr) +} + +func (o *Option) set(c *Conn, b []byte) error { + var operr error + fn := func(s uintptr) { + operr = setsockopt(s, o.Level, o.Name, b) + } + if err := c.c.Control(fn); err != nil { + return err + } + return os.NewSyscallError("setsockopt", operr) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go new file mode 100644 index 00000000..499164a3 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go @@ -0,0 +1,74 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build linux + +package socket + +import ( + "net" + "os" + "syscall" +) + +func (c *Conn) recvMsgs(ms []Message, flags int) (int, error) { + hs := make(mmsghdrs, len(ms)) + var parseFn func([]byte, string) (net.Addr, error) + if c.network != "tcp" { + parseFn = parseInetAddr + } + if err := hs.pack(ms, parseFn, nil); err != nil { + return 0, err + } + var operr error + var n int + fn := func(s uintptr) bool { + n, operr = recvmmsg(s, hs, flags) + if operr == syscall.EAGAIN { + return false + } + return true + } + if err := c.c.Read(fn); err != nil { + return n, err + } + if operr != nil { + return n, os.NewSyscallError("recvmmsg", operr) + } + if err := hs[:n].unpack(ms[:n], parseFn, c.network); err != nil { + return n, err + } + return n, nil +} + +func (c *Conn) sendMsgs(ms []Message, flags int) (int, error) { + hs := make(mmsghdrs, len(ms)) + var marshalFn func(net.Addr) []byte + if c.network != "tcp" { + marshalFn = marshalInetAddr + } + if err := hs.pack(ms, nil, marshalFn); err != nil { + return 0, err + } + var operr error + var n int + fn := func(s uintptr) bool { + n, operr = sendmmsg(s, hs, flags) + if operr == syscall.EAGAIN { + return false + } + return true + } + if err := c.c.Write(fn); err != nil { + return n, err + } + if operr != nil { + return n, os.NewSyscallError("sendmmsg", operr) + } + if err := hs[:n].unpack(ms[:n], nil, ""); err != nil { + return n, err + } + return n, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/rawconn_msg.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/rawconn_msg.go new file mode 100644 index 00000000..b21d2e64 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/rawconn_msg.go @@ -0,0 +1,77 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package socket + +import ( + "os" + "syscall" +) + +func (c *Conn) recvMsg(m *Message, flags int) error { + var h msghdr + vs := make([]iovec, len(m.Buffers)) + var sa []byte + if c.network != "tcp" { + sa = make([]byte, sizeofSockaddrInet6) + } + h.pack(vs, m.Buffers, m.OOB, sa) + var operr error + var n int + fn := func(s uintptr) bool { + n, operr = recvmsg(s, &h, flags) + if operr == syscall.EAGAIN { + return false + } + return true + } + if err := c.c.Read(fn); err != nil { + return err + } + if operr != nil { + return os.NewSyscallError("recvmsg", operr) + } + if c.network != "tcp" { + var err error + m.Addr, err = parseInetAddr(sa[:], c.network) + if err != nil { + return err + } + } + m.N = n + m.NN = h.controllen() + m.Flags = h.flags() + return nil +} + +func (c *Conn) sendMsg(m *Message, flags int) error { + var h msghdr + vs := make([]iovec, len(m.Buffers)) + var sa []byte + if m.Addr != nil { + sa = marshalInetAddr(m.Addr) + } + h.pack(vs, m.Buffers, m.OOB, sa) + var operr error + var n int + fn := func(s uintptr) bool { + n, operr = sendmsg(s, &h, flags) + if operr == syscall.EAGAIN { + return false + } + return true + } + if err := c.c.Write(fn); err != nil { + return err + } + if operr != nil { + return os.NewSyscallError("sendmsg", operr) + } + m.N = n + m.NN = len(m.OOB) + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go new file mode 100644 index 00000000..f78832aa --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go @@ -0,0 +1,18 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build !linux + +package socket + +import "errors" + +func (c *Conn) recvMsgs(ms []Message, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func (c *Conn) sendMsgs(ms []Message, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go new file mode 100644 index 00000000..96733cbe --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go @@ -0,0 +1,18 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package socket + +import "errors" + +func (c *Conn) recvMsg(m *Message, flags int) error { + return errors.New("not implemented") +} + +func (c *Conn) sendMsg(m *Message, flags int) error { + return errors.New("not implemented") +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/rawconn_stub.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/rawconn_stub.go new file mode 100644 index 00000000..d2add1a0 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/rawconn_stub.go @@ -0,0 +1,25 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package socket + +import "errors" + +func (c *Conn) recvMsg(m *Message, flags int) error { + return errors.New("not implemented") +} + +func (c *Conn) sendMsg(m *Message, flags int) error { + return errors.New("not implemented") +} + +func (c *Conn) recvMsgs(ms []Message, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func (c *Conn) sendMsgs(ms []Message, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/reflect.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/reflect.go new file mode 100644 index 00000000..bb179f11 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/reflect.go @@ -0,0 +1,62 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package socket + +import ( + "errors" + "net" + "os" + "reflect" + "runtime" +) + +// A Conn represents a raw connection. +type Conn struct { + c net.Conn +} + +// NewConn returns a new raw connection. +func NewConn(c net.Conn) (*Conn, error) { + return &Conn{c: c}, nil +} + +func (o *Option) get(c *Conn, b []byte) (int, error) { + s, err := socketOf(c.c) + if err != nil { + return 0, err + } + n, err := getsockopt(s, o.Level, o.Name, b) + return n, os.NewSyscallError("getsockopt", err) +} + +func (o *Option) set(c *Conn, b []byte) error { + s, err := socketOf(c.c) + if err != nil { + return err + } + return os.NewSyscallError("setsockopt", setsockopt(s, o.Level, o.Name, b)) +} + +func socketOf(c net.Conn) (uintptr, error) { + switch c.(type) { + case *net.TCPConn, *net.UDPConn, *net.IPConn: + v := reflect.ValueOf(c) + switch e := v.Elem(); e.Kind() { + case reflect.Struct: + fd := e.FieldByName("conn").FieldByName("fd") + switch e := fd.Elem(); e.Kind() { + case reflect.Struct: + sysfd := e.FieldByName("sysfd") + if runtime.GOOS == "windows" { + return uintptr(sysfd.Uint()), nil + } + return uintptr(sysfd.Int()), nil + } + } + } + return 0, errors.New("invalid type") +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/socket.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/socket.go new file mode 100644 index 00000000..5f9730e6 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/socket.go @@ -0,0 +1,285 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package socket provides a portable interface for socket system +// calls. +package socket // import "golang.org/x/net/internal/socket" + +import ( + "errors" + "net" + "unsafe" +) + +// An Option represents a sticky socket option. +type Option struct { + Level int // level + Name int // name; must be equal or greater than 1 + Len int // length of value in bytes; must be equal or greater than 1 +} + +// Get reads a value for the option from the kernel. +// It returns the number of bytes written into b. +func (o *Option) Get(c *Conn, b []byte) (int, error) { + if o.Name < 1 || o.Len < 1 { + return 0, errors.New("invalid option") + } + if len(b) < o.Len { + return 0, errors.New("short buffer") + } + return o.get(c, b) +} + +// GetInt returns an integer value for the option. +// +// The Len field of Option must be either 1 or 4. +func (o *Option) GetInt(c *Conn) (int, error) { + if o.Len != 1 && o.Len != 4 { + return 0, errors.New("invalid option") + } + var b []byte + var bb [4]byte + if o.Len == 1 { + b = bb[:1] + } else { + b = bb[:4] + } + n, err := o.get(c, b) + if err != nil { + return 0, err + } + if n != o.Len { + return 0, errors.New("invalid option length") + } + if o.Len == 1 { + return int(b[0]), nil + } + return int(NativeEndian.Uint32(b[:4])), nil +} + +// Set writes the option and value to the kernel. +func (o *Option) Set(c *Conn, b []byte) error { + if o.Name < 1 || o.Len < 1 { + return errors.New("invalid option") + } + if len(b) < o.Len { + return errors.New("short buffer") + } + return o.set(c, b) +} + +// SetInt writes the option and value to the kernel. +// +// The Len field of Option must be either 1 or 4. +func (o *Option) SetInt(c *Conn, v int) error { + if o.Len != 1 && o.Len != 4 { + return errors.New("invalid option") + } + var b []byte + if o.Len == 1 { + b = []byte{byte(v)} + } else { + var bb [4]byte + NativeEndian.PutUint32(bb[:o.Len], uint32(v)) + b = bb[:4] + } + return o.set(c, b) +} + +func controlHeaderLen() int { + return roundup(sizeofCmsghdr) +} + +func controlMessageLen(dataLen int) int { + return roundup(sizeofCmsghdr) + dataLen +} + +// ControlMessageSpace returns the whole length of control message. +func ControlMessageSpace(dataLen int) int { + return roundup(sizeofCmsghdr) + roundup(dataLen) +} + +// A ControlMessage represents the head message in a stream of control +// messages. +// +// A control message comprises of a header, data and a few padding +// fields to conform to the interface to the kernel. +// +// See RFC 3542 for further information. +type ControlMessage []byte + +// Data returns the data field of the control message at the head on +// m. +func (m ControlMessage) Data(dataLen int) []byte { + l := controlHeaderLen() + if len(m) < l || len(m) < l+dataLen { + return nil + } + return m[l : l+dataLen] +} + +// Next returns the control message at the next on m. +// +// Next works only for standard control messages. +func (m ControlMessage) Next(dataLen int) ControlMessage { + l := ControlMessageSpace(dataLen) + if len(m) < l { + return nil + } + return m[l:] +} + +// MarshalHeader marshals the header fields of the control message at +// the head on m. +func (m ControlMessage) MarshalHeader(lvl, typ, dataLen int) error { + if len(m) < controlHeaderLen() { + return errors.New("short message") + } + h := (*cmsghdr)(unsafe.Pointer(&m[0])) + h.set(controlMessageLen(dataLen), lvl, typ) + return nil +} + +// ParseHeader parses and returns the header fields of the control +// message at the head on m. +func (m ControlMessage) ParseHeader() (lvl, typ, dataLen int, err error) { + l := controlHeaderLen() + if len(m) < l { + return 0, 0, 0, errors.New("short message") + } + h := (*cmsghdr)(unsafe.Pointer(&m[0])) + return h.lvl(), h.typ(), int(uint64(h.len()) - uint64(l)), nil +} + +// Marshal marshals the control message at the head on m, and returns +// the next control message. +func (m ControlMessage) Marshal(lvl, typ int, data []byte) (ControlMessage, error) { + l := len(data) + if len(m) < ControlMessageSpace(l) { + return nil, errors.New("short message") + } + h := (*cmsghdr)(unsafe.Pointer(&m[0])) + h.set(controlMessageLen(l), lvl, typ) + if l > 0 { + copy(m.Data(l), data) + } + return m.Next(l), nil +} + +// Parse parses m as a single or multiple control messages. +// +// Parse works for both standard and compatible messages. +func (m ControlMessage) Parse() ([]ControlMessage, error) { + var ms []ControlMessage + for len(m) >= controlHeaderLen() { + h := (*cmsghdr)(unsafe.Pointer(&m[0])) + l := h.len() + if l <= 0 { + return nil, errors.New("invalid header length") + } + if uint64(l) < uint64(controlHeaderLen()) { + return nil, errors.New("invalid message length") + } + if uint64(l) > uint64(len(m)) { + return nil, errors.New("short buffer") + } + // On message reception: + // + // |<- ControlMessageSpace --------------->| + // |<- controlMessageLen ---------->| | + // |<- controlHeaderLen ->| | | + // +---------------+------+---------+------+ + // | Header | PadH | Data | PadD | + // +---------------+------+---------+------+ + // + // On compatible message reception: + // + // | ... |<- controlMessageLen ----------->| + // | ... |<- controlHeaderLen ->| | + // +-----+---------------+------+----------+ + // | ... | Header | PadH | Data | + // +-----+---------------+------+----------+ + ms = append(ms, ControlMessage(m[:l])) + ll := l - controlHeaderLen() + if len(m) >= ControlMessageSpace(ll) { + m = m[ControlMessageSpace(ll):] + } else { + m = m[controlMessageLen(ll):] + } + } + return ms, nil +} + +// NewControlMessage returns a new stream of control messages. +func NewControlMessage(dataLen []int) ControlMessage { + var l int + for i := range dataLen { + l += ControlMessageSpace(dataLen[i]) + } + return make([]byte, l) +} + +// A Message represents an IO message. +type Message struct { + // When writing, the Buffers field must contain at least one + // byte to write. + // When reading, the Buffers field will always contain a byte + // to read. + Buffers [][]byte + + // OOB contains protocol-specific control or miscellaneous + // ancillary data known as out-of-band data. + OOB []byte + + // Addr specifies a destination address when writing. + // It can be nil when the underlying protocol of the raw + // connection uses connection-oriented communication. + // After a successful read, it may contain the source address + // on the received packet. + Addr net.Addr + + N int // # of bytes read or written from/to Buffers + NN int // # of bytes read or written from/to OOB + Flags int // protocol-specific information on the received message +} + +// RecvMsg wraps recvmsg system call. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +func (c *Conn) RecvMsg(m *Message, flags int) error { + return c.recvMsg(m, flags) +} + +// SendMsg wraps sendmsg system call. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +func (c *Conn) SendMsg(m *Message, flags int) error { + return c.sendMsg(m, flags) +} + +// RecvMsgs wraps recvmmsg system call. +// +// It returns the number of processed messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// Only Linux supports this. +func (c *Conn) RecvMsgs(ms []Message, flags int) (int, error) { + return c.recvMsgs(ms, flags) +} + +// SendMsgs wraps sendmmsg system call. +// +// It returns the number of processed messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// Only Linux supports this. +func (c *Conn) SendMsgs(ms []Message, flags int) (int, error) { + return c.sendMsgs(ms, flags) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys.go new file mode 100644 index 00000000..4f0eead1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys.go @@ -0,0 +1,33 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "encoding/binary" + "unsafe" +) + +var ( + // NativeEndian is the machine native endian implementation of + // ByteOrder. + NativeEndian binary.ByteOrder + + kernelAlign int +) + +func init() { + i := uint32(1) + b := (*[4]byte)(unsafe.Pointer(&i)) + if b[0] == 1 { + NativeEndian = binary.LittleEndian + } else { + NativeEndian = binary.BigEndian + } + kernelAlign = probeProtocolStack() +} + +func roundup(l int) int { + return (l + kernelAlign - 1) & ^(kernelAlign - 1) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_bsd.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_bsd.go new file mode 100644 index 00000000..f13e14ff --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_bsd.go @@ -0,0 +1,17 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd openbsd + +package socket + +import "errors" + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go new file mode 100644 index 00000000..f723fa36 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd netbsd openbsd + +package socket + +import "unsafe" + +func probeProtocolStack() int { + var p uintptr + return int(unsafe.Sizeof(p)) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_darwin.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_darwin.go new file mode 100644 index 00000000..b17d223b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_darwin.go @@ -0,0 +1,7 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +func probeProtocolStack() int { return 4 } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_dragonfly.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_dragonfly.go new file mode 100644 index 00000000..b17d223b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_dragonfly.go @@ -0,0 +1,7 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +func probeProtocolStack() int { return 4 } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux.go new file mode 100644 index 00000000..1559521e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux.go @@ -0,0 +1,27 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux,!s390x,!386 + +package socket + +import ( + "syscall" + "unsafe" +) + +func probeProtocolStack() int { + var p uintptr + return int(unsafe.Sizeof(p)) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_386.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_386.go new file mode 100644 index 00000000..235b2cc0 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_386.go @@ -0,0 +1,55 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "syscall" + "unsafe" +) + +func probeProtocolStack() int { return 4 } + +const ( + sysSETSOCKOPT = 0xe + sysGETSOCKOPT = 0xf + sysSENDMSG = 0x10 + sysRECVMSG = 0x11 + sysRECVMMSG = 0x13 + sysSENDMMSG = 0x14 +) + +func socketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) +func rawsocketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, errno := socketcall(sysGETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, errno := socketcall(sysSETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/thunk_linux_386.s b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_386.s similarity index 59% rename from vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/thunk_linux_386.s rename to vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_386.s index daa78bc0..93e7d75e 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/thunk_linux_386.s +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_386.s @@ -2,7 +2,10 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.2 +#include "textflag.h" -TEXT ·socketcall(SB),4,$0-36 +TEXT ·socketcall(SB),NOSPLIT,$0-36 JMP syscall·socketcall(SB) + +TEXT ·rawsocketcall(SB),NOSPLIT,$0-36 + JMP syscall·rawsocketcall(SB) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go new file mode 100644 index 00000000..9decee2e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x12b + sysSENDMMSG = 0x133 +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_arm.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_arm.go new file mode 100644 index 00000000..d753b436 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_arm.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x16d + sysSENDMMSG = 0x176 +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go new file mode 100644 index 00000000..b6708943 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0xf3 + sysSENDMMSG = 0x10d +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_mips.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_mips.go new file mode 100644 index 00000000..9c0d7401 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_mips.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x10ef + sysSENDMMSG = 0x10f7 +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go new file mode 100644 index 00000000..071a4aba --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x14ae + sysSENDMMSG = 0x14b6 +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go new file mode 100644 index 00000000..071a4aba --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x14ae + sysSENDMMSG = 0x14b6 +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go new file mode 100644 index 00000000..9c0d7401 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x10ef + sysSENDMMSG = 0x10f7 +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go new file mode 100644 index 00000000..21c1e3f0 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x157 + sysSENDMMSG = 0x15d +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go new file mode 100644 index 00000000..21c1e3f0 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x157 + sysSENDMMSG = 0x15d +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go new file mode 100644 index 00000000..327979ef --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go @@ -0,0 +1,55 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "syscall" + "unsafe" +) + +func probeProtocolStack() int { return 8 } + +const ( + sysSETSOCKOPT = 0xe + sysGETSOCKOPT = 0xf + sysSENDMSG = 0x10 + sysRECVMSG = 0x11 + sysRECVMMSG = 0x13 + sysSENDMMSG = 0x14 +) + +func socketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) +func rawsocketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, errno := socketcall(sysGETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, errno := socketcall(sysSETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s new file mode 100644 index 00000000..06d75628 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·socketcall(SB),NOSPLIT,$0-72 + JMP syscall·socketcall(SB) + +TEXT ·rawsocketcall(SB),NOSPLIT,$0-72 + JMP syscall·rawsocketcall(SB) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_netbsd.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_netbsd.go new file mode 100644 index 00000000..431851c1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_netbsd.go @@ -0,0 +1,25 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "syscall" + "unsafe" +) + +const ( + sysRECVMMSG = 0x1db + sysSENDMMSG = 0x1dc +) + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_posix.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_posix.go new file mode 100644 index 00000000..dc130c27 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_posix.go @@ -0,0 +1,168 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package socket + +import ( + "encoding/binary" + "errors" + "net" + "runtime" + "strconv" + "sync" + "time" +) + +func marshalInetAddr(a net.Addr) []byte { + switch a := a.(type) { + case *net.TCPAddr: + return marshalSockaddr(a.IP, a.Port, a.Zone) + case *net.UDPAddr: + return marshalSockaddr(a.IP, a.Port, a.Zone) + case *net.IPAddr: + return marshalSockaddr(a.IP, 0, a.Zone) + default: + return nil + } +} + +func marshalSockaddr(ip net.IP, port int, zone string) []byte { + if ip4 := ip.To4(); ip4 != nil { + b := make([]byte, sizeofSockaddrInet) + switch runtime.GOOS { + case "android", "linux", "solaris", "windows": + NativeEndian.PutUint16(b[:2], uint16(sysAF_INET)) + default: + b[0] = sizeofSockaddrInet + b[1] = sysAF_INET + } + binary.BigEndian.PutUint16(b[2:4], uint16(port)) + copy(b[4:8], ip4) + return b + } + if ip6 := ip.To16(); ip6 != nil && ip.To4() == nil { + b := make([]byte, sizeofSockaddrInet6) + switch runtime.GOOS { + case "android", "linux", "solaris", "windows": + NativeEndian.PutUint16(b[:2], uint16(sysAF_INET6)) + default: + b[0] = sizeofSockaddrInet6 + b[1] = sysAF_INET6 + } + binary.BigEndian.PutUint16(b[2:4], uint16(port)) + copy(b[8:24], ip6) + if zone != "" { + NativeEndian.PutUint32(b[24:28], uint32(zoneCache.index(zone))) + } + return b + } + return nil +} + +func parseInetAddr(b []byte, network string) (net.Addr, error) { + if len(b) < 2 { + return nil, errors.New("invalid address") + } + var af int + switch runtime.GOOS { + case "android", "linux", "solaris", "windows": + af = int(NativeEndian.Uint16(b[:2])) + default: + af = int(b[1]) + } + var ip net.IP + var zone string + if af == sysAF_INET { + if len(b) < sizeofSockaddrInet { + return nil, errors.New("short address") + } + ip = make(net.IP, net.IPv4len) + copy(ip, b[4:8]) + } + if af == sysAF_INET6 { + if len(b) < sizeofSockaddrInet6 { + return nil, errors.New("short address") + } + ip = make(net.IP, net.IPv6len) + copy(ip, b[8:24]) + if id := int(NativeEndian.Uint32(b[24:28])); id > 0 { + zone = zoneCache.name(id) + } + } + switch network { + case "tcp", "tcp4", "tcp6": + return &net.TCPAddr{IP: ip, Port: int(binary.BigEndian.Uint16(b[2:4])), Zone: zone}, nil + case "udp", "udp4", "udp6": + return &net.UDPAddr{IP: ip, Port: int(binary.BigEndian.Uint16(b[2:4])), Zone: zone}, nil + default: + return &net.IPAddr{IP: ip, Zone: zone}, nil + } +} + +// An ipv6ZoneCache represents a cache holding partial network +// interface information. It is used for reducing the cost of IPv6 +// addressing scope zone resolution. +// +// Multiple names sharing the index are managed by first-come +// first-served basis for consistency. +type ipv6ZoneCache struct { + sync.RWMutex // guard the following + lastFetched time.Time // last time routing information was fetched + toIndex map[string]int // interface name to its index + toName map[int]string // interface index to its name +} + +var zoneCache = ipv6ZoneCache{ + toIndex: make(map[string]int), + toName: make(map[int]string), +} + +func (zc *ipv6ZoneCache) update(ift []net.Interface) { + zc.Lock() + defer zc.Unlock() + now := time.Now() + if zc.lastFetched.After(now.Add(-60 * time.Second)) { + return + } + zc.lastFetched = now + if len(ift) == 0 { + var err error + if ift, err = net.Interfaces(); err != nil { + return + } + } + zc.toIndex = make(map[string]int, len(ift)) + zc.toName = make(map[int]string, len(ift)) + for _, ifi := range ift { + zc.toIndex[ifi.Name] = ifi.Index + if _, ok := zc.toName[ifi.Index]; !ok { + zc.toName[ifi.Index] = ifi.Name + } + } +} + +func (zc *ipv6ZoneCache) name(zone int) string { + zoneCache.update(nil) + zoneCache.RLock() + defer zoneCache.RUnlock() + name, ok := zoneCache.toName[zone] + if !ok { + name = strconv.Itoa(zone) + } + return name +} + +func (zc *ipv6ZoneCache) index(zone string) int { + zoneCache.update(nil) + zoneCache.RLock() + defer zoneCache.RUnlock() + index, ok := zoneCache.toIndex[zone] + if !ok { + index, _ = strconv.Atoi(zone) + } + return index +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_solaris.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_solaris.go new file mode 100644 index 00000000..cced74e6 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_solaris.go @@ -0,0 +1,71 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "errors" + "runtime" + "syscall" + "unsafe" +) + +func probeProtocolStack() int { + switch runtime.GOARCH { + case "amd64": + return 4 + default: + var p uintptr + return int(unsafe.Sizeof(p)) + } +} + +//go:cgo_import_dynamic libc___xnet_getsockopt __xnet_getsockopt "libsocket.so" +//go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so" +//go:cgo_import_dynamic libc___xnet_recvmsg __xnet_recvmsg "libsocket.so" +//go:cgo_import_dynamic libc___xnet_sendmsg __xnet_sendmsg "libsocket.so" + +//go:linkname procGetsockopt libc___xnet_getsockopt +//go:linkname procSetsockopt libc_setsockopt +//go:linkname procRecvmsg libc___xnet_recvmsg +//go:linkname procSendmsg libc___xnet_sendmsg + +var ( + procGetsockopt uintptr + procSetsockopt uintptr + procRecvmsg uintptr + procSendmsg uintptr +) + +func sysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, uintptr, syscall.Errno) +func rawSysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, uintptr, syscall.Errno) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procGetsockopt)), 5, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procSetsockopt)), 5, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procRecvmsg)), 3, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procSendmsg)), 3, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s new file mode 100644 index 00000000..a18ac5ed --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s @@ -0,0 +1,11 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·sysvicall6(SB),NOSPLIT,$0-88 + JMP syscall·sysvicall6(SB) + +TEXT ·rawSysvicall6(SB),NOSPLIT,$0-88 + JMP syscall·rawSysvicall6(SB) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_stub.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_stub.go new file mode 100644 index 00000000..d9f06d00 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_stub.go @@ -0,0 +1,64 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package socket + +import ( + "errors" + "net" + "runtime" + "unsafe" +) + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +func probeProtocolStack() int { + switch runtime.GOARCH { + case "amd64p32", "mips64p32": + return 4 + default: + var p uintptr + return int(unsafe.Sizeof(p)) + } +} + +func marshalInetAddr(ip net.IP, port int, zone string) []byte { + return nil +} + +func parseInetAddr(b []byte, network string) (net.Addr, error) { + return nil, errors.New("not implemented") +} + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + return 0, errors.New("not implemented") +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + return errors.New("not implemented") +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_unix.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_unix.go new file mode 100644 index 00000000..18eba308 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_unix.go @@ -0,0 +1,33 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux,!s390x,!386 netbsd openbsd + +package socket + +import ( + "syscall" + "unsafe" +) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, _, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, _, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall(syscall.SYS_RECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags)) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall(syscall.SYS_SENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags)) + return int(n), errnoErr(errno) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_windows.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_windows.go new file mode 100644 index 00000000..54a470eb --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/sys_windows.go @@ -0,0 +1,70 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "errors" + "syscall" + "unsafe" +) + +func probeProtocolStack() int { + var p uintptr + return int(unsafe.Sizeof(p)) +} + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x17 + + sysSOCK_RAW = 0x3 +) + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + err := syscall.Getsockopt(syscall.Handle(s), int32(level), int32(name), (*byte)(unsafe.Pointer(&b[0])), (*int32)(unsafe.Pointer(&l))) + return int(l), err +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + return syscall.Setsockopt(syscall.Handle(s), int32(level), int32(name), (*byte)(unsafe.Pointer(&b[0])), int32(len(b))) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go new file mode 100644 index 00000000..26f8feff --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1e + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go new file mode 100644 index 00000000..e2987f7d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go @@ -0,0 +1,61 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1e + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go new file mode 100644 index 00000000..26f8feff --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1e + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go new file mode 100644 index 00000000..e2987f7d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go @@ -0,0 +1,61 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1e + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go new file mode 100644 index 00000000..c582abd5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go @@ -0,0 +1,61 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_dragonfly.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go new file mode 100644 index 00000000..04a24886 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go new file mode 100644 index 00000000..35c7cb9c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go @@ -0,0 +1,61 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go new file mode 100644 index 00000000..04a24886 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go new file mode 100644 index 00000000..43020693 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go @@ -0,0 +1,63 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go new file mode 100644 index 00000000..1502f6c5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go new file mode 100644 index 00000000..43020693 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go @@ -0,0 +1,63 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go new file mode 100644 index 00000000..1502f6c5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go new file mode 100644 index 00000000..43020693 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go @@ -0,0 +1,63 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go new file mode 100644 index 00000000..1502f6c5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go new file mode 100644 index 00000000..1502f6c5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go new file mode 100644 index 00000000..43020693 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go @@ -0,0 +1,63 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go new file mode 100644 index 00000000..1502f6c5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go new file mode 100644 index 00000000..1502f6c5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go new file mode 100644 index 00000000..1502f6c5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go new file mode 100644 index 00000000..db60491f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go @@ -0,0 +1,65 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go new file mode 100644 index 00000000..2a1a7998 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go @@ -0,0 +1,68 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go new file mode 100644 index 00000000..206ea2d1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go new file mode 100644 index 00000000..1c836361 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go new file mode 100644 index 00000000..a6c0bf46 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go @@ -0,0 +1,61 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go new file mode 100644 index 00000000..1c836361 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go new file mode 100644 index 00000000..327c6329 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go @@ -0,0 +1,60 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_solaris.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1a + + sysSOCK_RAW = 0x4 +) + +type iovec struct { + Base *int8 + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Accrights *int8 + Accrightslen int32 + Pad_cgo_2 [4]byte +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 + X__sin6_src_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x20 +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/batch.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/batch.go new file mode 100644 index 00000000..b4454992 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/batch.go @@ -0,0 +1,191 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package ipv4 + +import ( + "net" + "runtime" + "syscall" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ReadBatch and WriteBatch methods of +// PacketConn are not implemented. + +// BUG(mikio): On Windows, the ReadBatch and WriteBatch methods of +// RawConn are not implemented. + +// A Message represents an IO message. +// +// type Message struct { +// Buffers [][]byte +// OOB []byte +// Addr net.Addr +// N int +// NN int +// Flags int +// } +// +// The Buffers fields represents a list of contiguous buffers, which +// can be used for vectored IO, for example, putting a header and a +// payload in each slice. +// When writing, the Buffers field must contain at least one byte to +// write. +// When reading, the Buffers field will always contain a byte to read. +// +// The OOB field contains protocol-specific control or miscellaneous +// ancillary data known as out-of-band data. +// It can be nil when not required. +// +// The Addr field specifies a destination address when writing. +// It can be nil when the underlying protocol of the endpoint uses +// connection-oriented communication. +// After a successful read, it may contain the source address on the +// received packet. +// +// The N field indicates the number of bytes read or written from/to +// Buffers. +// +// The NN field indicates the number of bytes read or written from/to +// OOB. +// +// The Flags field contains protocol-specific information on the +// received message. +type Message = socket.Message + +// ReadBatch reads a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// On a successful read it returns the number of messages received, up +// to len(ms). +// +// On Linux, a batch read will be optimized. +// On other platforms, this method will read only a single message. +// +// Unlike the ReadFrom method, it doesn't strip the IPv4 header +// followed by option headers from the received IPv4 datagram when the +// underlying transport is net.IPConn. Each Buffers field of Message +// must be large enough to accommodate an IPv4 header and option +// headers. +func (c *payloadHandler) ReadBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.RecvMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.RecvMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} + +// WriteBatch writes a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// It returns the number of messages written on a successful write. +// +// On Linux, a batch write will be optimized. +// On other platforms, this method will write only a single message. +func (c *payloadHandler) WriteBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.SendMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.SendMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} + +// ReadBatch reads a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// On a successful read it returns the number of messages received, up +// to len(ms). +// +// On Linux, a batch read will be optimized. +// On other platforms, this method will read only a single message. +func (c *packetHandler) ReadBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.RecvMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.RecvMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + } +} + +// WriteBatch writes a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// It returns the number of messages written on a successful write. +// +// On Linux, a batch write will be optimized. +// On other platforms, this method will write only a single message. +func (c *packetHandler) WriteBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.SendMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "write", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.SendMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "write", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + } +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/bpfopt_linux.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/bpfopt_linux.go deleted file mode 100644 index f2d00b4c..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/bpfopt_linux.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv4 - -import ( - "os" - "unsafe" - - "golang.org/x/net/bpf" -) - -// SetBPF attaches a BPF program to the connection. -// -// Only supported on Linux. -func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error { - fd, err := c.sysfd() - if err != nil { - return err - } - prog := sysSockFProg{ - Len: uint16(len(filter)), - Filter: (*sysSockFilter)(unsafe.Pointer(&filter[0])), - } - return os.NewSyscallError("setsockopt", setsockopt(fd, sysSOL_SOCKET, sysSO_ATTACH_FILTER, unsafe.Pointer(&prog), uint32(unsafe.Sizeof(prog)))) -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/bpfopt_stub.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/bpfopt_stub.go deleted file mode 100644 index c4a8481f..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/bpfopt_stub.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !linux - -package ipv4 - -import "golang.org/x/net/bpf" - -// SetBPF attaches a BPF program to the connection. -// -// Only supported on Linux. -func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error { - return errOpNoSupport -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/control.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/control.go index 8cadfd7f..a2b02ca9 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/control.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/control.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -8,6 +8,9 @@ import ( "fmt" "net" "sync" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" ) type rawOpt struct { @@ -51,6 +54,77 @@ func (cm *ControlMessage) String() string { return fmt.Sprintf("ttl=%d src=%v dst=%v ifindex=%d", cm.TTL, cm.Src, cm.Dst, cm.IfIndex) } +// Marshal returns the binary encoding of cm. +func (cm *ControlMessage) Marshal() []byte { + if cm == nil { + return nil + } + var m socket.ControlMessage + if ctlOpts[ctlPacketInfo].name > 0 && (cm.Src.To4() != nil || cm.IfIndex > 0) { + m = socket.NewControlMessage([]int{ctlOpts[ctlPacketInfo].length}) + } + if len(m) > 0 { + ctlOpts[ctlPacketInfo].marshal(m, cm) + } + return m +} + +// Parse parses b as a control message and stores the result in cm. +func (cm *ControlMessage) Parse(b []byte) error { + ms, err := socket.ControlMessage(b).Parse() + if err != nil { + return err + } + for _, m := range ms { + lvl, typ, l, err := m.ParseHeader() + if err != nil { + return err + } + if lvl != iana.ProtocolIP { + continue + } + switch { + case typ == ctlOpts[ctlTTL].name && l >= ctlOpts[ctlTTL].length: + ctlOpts[ctlTTL].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlDst].name && l >= ctlOpts[ctlDst].length: + ctlOpts[ctlDst].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlInterface].name && l >= ctlOpts[ctlInterface].length: + ctlOpts[ctlInterface].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlPacketInfo].name && l >= ctlOpts[ctlPacketInfo].length: + ctlOpts[ctlPacketInfo].parse(cm, m.Data(l)) + } + } + return nil +} + +// NewControlMessage returns a new control message. +// +// The returned message is large enough for options specified by cf. +func NewControlMessage(cf ControlFlags) []byte { + opt := rawOpt{cflags: cf} + var l int + if opt.isset(FlagTTL) && ctlOpts[ctlTTL].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlTTL].length) + } + if ctlOpts[ctlPacketInfo].name > 0 { + if opt.isset(FlagSrc | FlagDst | FlagInterface) { + l += socket.ControlMessageSpace(ctlOpts[ctlPacketInfo].length) + } + } else { + if opt.isset(FlagDst) && ctlOpts[ctlDst].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlDst].length) + } + if opt.isset(FlagInterface) && ctlOpts[ctlInterface].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlInterface].length) + } + } + var b []byte + if l > 0 { + b = make([]byte, l) + } + return b +} + // Ancillary data socket options const ( ctlTTL = iota // header field diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/control_bsd.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/control_bsd.go index 33d8bc8b..77e7ad5b 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/control_bsd.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/control_bsd.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -12,26 +12,26 @@ import ( "unsafe" "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" ) func marshalDst(b []byte, cm *ControlMessage) []byte { - m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) - m.Level = iana.ProtocolIP - m.Type = sysIP_RECVDSTADDR - m.SetLen(syscall.CmsgLen(net.IPv4len)) - return b[syscall.CmsgSpace(net.IPv4len):] + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_RECVDSTADDR, net.IPv4len) + return m.Next(net.IPv4len) } func parseDst(cm *ControlMessage, b []byte) { - cm.Dst = b[:net.IPv4len] + if len(cm.Dst) < net.IPv4len { + cm.Dst = make(net.IP, net.IPv4len) + } + copy(cm.Dst, b[:net.IPv4len]) } func marshalInterface(b []byte, cm *ControlMessage) []byte { - m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) - m.Level = iana.ProtocolIP - m.Type = sysIP_RECVIF - m.SetLen(syscall.CmsgLen(syscall.SizeofSockaddrDatalink)) - return b[syscall.CmsgSpace(syscall.SizeofSockaddrDatalink):] + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_RECVIF, syscall.SizeofSockaddrDatalink) + return m.Next(syscall.SizeofSockaddrDatalink) } func parseInterface(cm *ControlMessage, b []byte) { diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/control_pktinfo.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/control_pktinfo.go index 444782f3..425338f3 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/control_pktinfo.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/control_pktinfo.go @@ -2,24 +2,23 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin linux +// +build darwin linux solaris package ipv4 import ( - "syscall" + "net" "unsafe" "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" ) func marshalPacketInfo(b []byte, cm *ControlMessage) []byte { - m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) - m.Level = iana.ProtocolIP - m.Type = sysIP_PKTINFO - m.SetLen(syscall.CmsgLen(sysSizeofInetPktinfo)) + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_PKTINFO, sizeofInetPktinfo) if cm != nil { - pi := (*sysInetPktinfo)(unsafe.Pointer(&b[syscall.CmsgLen(0)])) + pi := (*inetPktinfo)(unsafe.Pointer(&m.Data(sizeofInetPktinfo)[0])) if ip := cm.Src.To4(); ip != nil { copy(pi.Spec_dst[:], ip) } @@ -27,11 +26,14 @@ func marshalPacketInfo(b []byte, cm *ControlMessage) []byte { pi.setIfindex(cm.IfIndex) } } - return b[syscall.CmsgSpace(sysSizeofInetPktinfo):] + return m.Next(sizeofInetPktinfo) } func parsePacketInfo(cm *ControlMessage, b []byte) { - pi := (*sysInetPktinfo)(unsafe.Pointer(&b[0])) + pi := (*inetPktinfo)(unsafe.Pointer(&b[0])) cm.IfIndex = int(pi.Ifindex) - cm.Dst = pi.Addr[:] + if len(cm.Dst) < net.IPv4len { + cm.Dst = make(net.IP, net.IPv4len) + } + copy(cm.Dst, pi.Addr[:]) } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/control_stub.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/control_stub.go index 4d850719..5a2f7d8d 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/control_stub.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/control_stub.go @@ -1,23 +1,13 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build nacl plan9 solaris +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package ipv4 -func setControlMessage(fd int, opt *rawOpt, cf ControlFlags, on bool) error { - return errOpNoSupport -} - -func newControlMessage(opt *rawOpt) []byte { - return nil -} +import "golang.org/x/net/internal/socket" -func parseControlMessage(b []byte) (*ControlMessage, error) { - return nil, errOpNoSupport -} - -func marshalControlMessage(cm *ControlMessage) []byte { - return nil +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + return errOpNoSupport } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/control_unix.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/control_unix.go index 3000c52e..e1ae8167 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/control_unix.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/control_unix.go @@ -1,24 +1,23 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd +// +build darwin dragonfly freebsd linux netbsd openbsd solaris package ipv4 import ( - "os" - "syscall" "unsafe" "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" ) -func setControlMessage(fd int, opt *rawOpt, cf ControlFlags, on bool) error { +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { opt.Lock() defer opt.Unlock() - if cf&FlagTTL != 0 && sockOpts[ssoReceiveTTL].name > 0 { - if err := setInt(fd, &sockOpts[ssoReceiveTTL], boolint(on)); err != nil { + if so, ok := sockOpts[ssoReceiveTTL]; ok && cf&FlagTTL != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { return err } if on { @@ -27,9 +26,9 @@ func setControlMessage(fd int, opt *rawOpt, cf ControlFlags, on bool) error { opt.clear(FlagTTL) } } - if sockOpts[ssoPacketInfo].name > 0 { + if so, ok := sockOpts[ssoPacketInfo]; ok { if cf&(FlagSrc|FlagDst|FlagInterface) != 0 { - if err := setInt(fd, &sockOpts[ssoPacketInfo], boolint(on)); err != nil { + if err := so.SetInt(c, boolint(on)); err != nil { return err } if on { @@ -39,8 +38,8 @@ func setControlMessage(fd int, opt *rawOpt, cf ControlFlags, on bool) error { } } } else { - if cf&FlagDst != 0 && sockOpts[ssoReceiveDst].name > 0 { - if err := setInt(fd, &sockOpts[ssoReceiveDst], boolint(on)); err != nil { + if so, ok := sockOpts[ssoReceiveDst]; ok && cf&FlagDst != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { return err } if on { @@ -49,8 +48,8 @@ func setControlMessage(fd int, opt *rawOpt, cf ControlFlags, on bool) error { opt.clear(FlagDst) } } - if cf&FlagInterface != 0 && sockOpts[ssoReceiveInterface].name > 0 { - if err := setInt(fd, &sockOpts[ssoReceiveInterface], boolint(on)); err != nil { + if so, ok := sockOpts[ssoReceiveInterface]; ok && cf&FlagInterface != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { return err } if on { @@ -63,100 +62,10 @@ func setControlMessage(fd int, opt *rawOpt, cf ControlFlags, on bool) error { return nil } -func newControlMessage(opt *rawOpt) (oob []byte) { - opt.RLock() - var l int - if opt.isset(FlagTTL) && ctlOpts[ctlTTL].name > 0 { - l += syscall.CmsgSpace(ctlOpts[ctlTTL].length) - } - if ctlOpts[ctlPacketInfo].name > 0 { - if opt.isset(FlagSrc | FlagDst | FlagInterface) { - l += syscall.CmsgSpace(ctlOpts[ctlPacketInfo].length) - } - } else { - if opt.isset(FlagDst) && ctlOpts[ctlDst].name > 0 { - l += syscall.CmsgSpace(ctlOpts[ctlDst].length) - } - if opt.isset(FlagInterface) && ctlOpts[ctlInterface].name > 0 { - l += syscall.CmsgSpace(ctlOpts[ctlInterface].length) - } - } - if l > 0 { - oob = make([]byte, l) - b := oob - if opt.isset(FlagTTL) && ctlOpts[ctlTTL].name > 0 { - b = ctlOpts[ctlTTL].marshal(b, nil) - } - if ctlOpts[ctlPacketInfo].name > 0 { - if opt.isset(FlagSrc | FlagDst | FlagInterface) { - b = ctlOpts[ctlPacketInfo].marshal(b, nil) - } - } else { - if opt.isset(FlagDst) && ctlOpts[ctlDst].name > 0 { - b = ctlOpts[ctlDst].marshal(b, nil) - } - if opt.isset(FlagInterface) && ctlOpts[ctlInterface].name > 0 { - b = ctlOpts[ctlInterface].marshal(b, nil) - } - } - } - opt.RUnlock() - return -} - -func parseControlMessage(b []byte) (*ControlMessage, error) { - if len(b) == 0 { - return nil, nil - } - cmsgs, err := syscall.ParseSocketControlMessage(b) - if err != nil { - return nil, os.NewSyscallError("parse socket control message", err) - } - cm := &ControlMessage{} - for _, m := range cmsgs { - if m.Header.Level != iana.ProtocolIP { - continue - } - switch int(m.Header.Type) { - case ctlOpts[ctlTTL].name: - ctlOpts[ctlTTL].parse(cm, m.Data[:]) - case ctlOpts[ctlDst].name: - ctlOpts[ctlDst].parse(cm, m.Data[:]) - case ctlOpts[ctlInterface].name: - ctlOpts[ctlInterface].parse(cm, m.Data[:]) - case ctlOpts[ctlPacketInfo].name: - ctlOpts[ctlPacketInfo].parse(cm, m.Data[:]) - } - } - return cm, nil -} - -func marshalControlMessage(cm *ControlMessage) (oob []byte) { - if cm == nil { - return nil - } - var l int - pktinfo := false - if ctlOpts[ctlPacketInfo].name > 0 && (cm.Src.To4() != nil || cm.IfIndex > 0) { - pktinfo = true - l += syscall.CmsgSpace(ctlOpts[ctlPacketInfo].length) - } - if l > 0 { - oob = make([]byte, l) - b := oob - if pktinfo { - b = ctlOpts[ctlPacketInfo].marshal(b, cm) - } - } - return -} - func marshalTTL(b []byte, cm *ControlMessage) []byte { - m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) - m.Level = iana.ProtocolIP - m.Type = sysIP_RECVTTL - m.SetLen(syscall.CmsgLen(1)) - return b[syscall.CmsgSpace(1):] + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_RECVTTL, 1) + return m.Next(1) } func parseTTL(cm *ControlMessage, b []byte) { diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/control_windows.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/control_windows.go index 800f6377..ce55c664 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/control_windows.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/control_windows.go @@ -1,27 +1,16 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ipv4 -import "syscall" +import ( + "syscall" -func setControlMessage(fd syscall.Handle, opt *rawOpt, cf ControlFlags, on bool) error { - // TODO(mikio): implement this - return syscall.EWINDOWS -} - -func newControlMessage(opt *rawOpt) []byte { - // TODO(mikio): implement this - return nil -} + "golang.org/x/net/internal/socket" +) -func parseControlMessage(b []byte) (*ControlMessage, error) { +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { // TODO(mikio): implement this - return nil, syscall.EWINDOWS -} - -func marshalControlMessage(cm *ControlMessage) []byte { - // TODO(mikio): implement this - return nil + return syscall.EWINDOWS } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/defs_darwin.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/defs_darwin.go deleted file mode 100644 index 731d56a7..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/defs_darwin.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ - -package ipv4 - -/* -#include - -#include -*/ -import "C" - -const ( - sysIP_OPTIONS = C.IP_OPTIONS - sysIP_HDRINCL = C.IP_HDRINCL - sysIP_TOS = C.IP_TOS - sysIP_TTL = C.IP_TTL - sysIP_RECVOPTS = C.IP_RECVOPTS - sysIP_RECVRETOPTS = C.IP_RECVRETOPTS - sysIP_RECVDSTADDR = C.IP_RECVDSTADDR - sysIP_RETOPTS = C.IP_RETOPTS - sysIP_RECVIF = C.IP_RECVIF - sysIP_STRIPHDR = C.IP_STRIPHDR - sysIP_RECVTTL = C.IP_RECVTTL - sysIP_BOUND_IF = C.IP_BOUND_IF - sysIP_PKTINFO = C.IP_PKTINFO - sysIP_RECVPKTINFO = C.IP_RECVPKTINFO - - sysIP_MULTICAST_IF = C.IP_MULTICAST_IF - sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL - sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP - sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP - sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP - sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF - sysIP_MULTICAST_IFINDEX = C.IP_MULTICAST_IFINDEX - sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP - sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP - sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE - sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE - sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP - sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP - sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP - sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP - sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE - sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE - - sysSizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage - sysSizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sysSizeofInetPktinfo = C.sizeof_struct_in_pktinfo - - sysSizeofIPMreq = C.sizeof_struct_ip_mreq - sysSizeofIPMreqn = C.sizeof_struct_ip_mreqn - sysSizeofIPMreqSource = C.sizeof_struct_ip_mreq_source - sysSizeofGroupReq = C.sizeof_struct_group_req - sysSizeofGroupSourceReq = C.sizeof_struct_group_source_req -) - -type sysSockaddrStorage C.struct_sockaddr_storage - -type sysSockaddrInet C.struct_sockaddr_in - -type sysInetPktinfo C.struct_in_pktinfo - -type sysIPMreq C.struct_ip_mreq - -type sysIPMreqn C.struct_ip_mreqn - -type sysIPMreqSource C.struct_ip_mreq_source - -type sysGroupReq C.struct_group_req - -type sysGroupSourceReq C.struct_group_source_req diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/defs_dragonfly.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/defs_dragonfly.go deleted file mode 100644 index 08e3b855..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/defs_dragonfly.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ - -package ipv4 - -/* -#include -*/ -import "C" - -const ( - sysIP_OPTIONS = C.IP_OPTIONS - sysIP_HDRINCL = C.IP_HDRINCL - sysIP_TOS = C.IP_TOS - sysIP_TTL = C.IP_TTL - sysIP_RECVOPTS = C.IP_RECVOPTS - sysIP_RECVRETOPTS = C.IP_RECVRETOPTS - sysIP_RECVDSTADDR = C.IP_RECVDSTADDR - sysIP_RETOPTS = C.IP_RETOPTS - sysIP_RECVIF = C.IP_RECVIF - sysIP_RECVTTL = C.IP_RECVTTL - - sysIP_MULTICAST_IF = C.IP_MULTICAST_IF - sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL - sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP - sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF - sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP - sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP - - sysSizeofIPMreq = C.sizeof_struct_ip_mreq -) - -type sysIPMreq C.struct_ip_mreq diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/defs_freebsd.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/defs_freebsd.go deleted file mode 100644 index f12ca327..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/defs_freebsd.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ - -package ipv4 - -/* -#include - -#include -*/ -import "C" - -const ( - sysIP_OPTIONS = C.IP_OPTIONS - sysIP_HDRINCL = C.IP_HDRINCL - sysIP_TOS = C.IP_TOS - sysIP_TTL = C.IP_TTL - sysIP_RECVOPTS = C.IP_RECVOPTS - sysIP_RECVRETOPTS = C.IP_RECVRETOPTS - sysIP_RECVDSTADDR = C.IP_RECVDSTADDR - sysIP_SENDSRCADDR = C.IP_SENDSRCADDR - sysIP_RETOPTS = C.IP_RETOPTS - sysIP_RECVIF = C.IP_RECVIF - sysIP_ONESBCAST = C.IP_ONESBCAST - sysIP_BINDANY = C.IP_BINDANY - sysIP_RECVTTL = C.IP_RECVTTL - sysIP_MINTTL = C.IP_MINTTL - sysIP_DONTFRAG = C.IP_DONTFRAG - sysIP_RECVTOS = C.IP_RECVTOS - - sysIP_MULTICAST_IF = C.IP_MULTICAST_IF - sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL - sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP - sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP - sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP - sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF - sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP - sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP - sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE - sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE - sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP - sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP - sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP - sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP - sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE - sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE - - sysSizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage - sysSizeofSockaddrInet = C.sizeof_struct_sockaddr_in - - sysSizeofIPMreq = C.sizeof_struct_ip_mreq - sysSizeofIPMreqn = C.sizeof_struct_ip_mreqn - sysSizeofIPMreqSource = C.sizeof_struct_ip_mreq_source - sysSizeofGroupReq = C.sizeof_struct_group_req - sysSizeofGroupSourceReq = C.sizeof_struct_group_source_req -) - -type sysSockaddrStorage C.struct_sockaddr_storage - -type sysSockaddrInet C.struct_sockaddr_in - -type sysIPMreq C.struct_ip_mreq - -type sysIPMreqn C.struct_ip_mreqn - -type sysIPMreqSource C.struct_ip_mreq_source - -type sysGroupReq C.struct_group_req - -type sysGroupSourceReq C.struct_group_source_req diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/defs_linux.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/defs_linux.go deleted file mode 100644 index c4042eb6..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/defs_linux.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ - -package ipv4 - -/* -#include - -#include -#include -#include -#include -#include -*/ -import "C" - -const ( - sysIP_TOS = C.IP_TOS - sysIP_TTL = C.IP_TTL - sysIP_HDRINCL = C.IP_HDRINCL - sysIP_OPTIONS = C.IP_OPTIONS - sysIP_ROUTER_ALERT = C.IP_ROUTER_ALERT - sysIP_RECVOPTS = C.IP_RECVOPTS - sysIP_RETOPTS = C.IP_RETOPTS - sysIP_PKTINFO = C.IP_PKTINFO - sysIP_PKTOPTIONS = C.IP_PKTOPTIONS - sysIP_MTU_DISCOVER = C.IP_MTU_DISCOVER - sysIP_RECVERR = C.IP_RECVERR - sysIP_RECVTTL = C.IP_RECVTTL - sysIP_RECVTOS = C.IP_RECVTOS - sysIP_MTU = C.IP_MTU - sysIP_FREEBIND = C.IP_FREEBIND - sysIP_TRANSPARENT = C.IP_TRANSPARENT - sysIP_RECVRETOPTS = C.IP_RECVRETOPTS - sysIP_ORIGDSTADDR = C.IP_ORIGDSTADDR - sysIP_RECVORIGDSTADDR = C.IP_RECVORIGDSTADDR - sysIP_MINTTL = C.IP_MINTTL - sysIP_NODEFRAG = C.IP_NODEFRAG - sysIP_UNICAST_IF = C.IP_UNICAST_IF - - sysIP_MULTICAST_IF = C.IP_MULTICAST_IF - sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL - sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP - sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP - sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP - sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE - sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE - sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP - sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP - sysIP_MSFILTER = C.IP_MSFILTER - sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP - sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP - sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP - sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP - sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE - sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE - sysMCAST_MSFILTER = C.MCAST_MSFILTER - sysIP_MULTICAST_ALL = C.IP_MULTICAST_ALL - - //sysIP_PMTUDISC_DONT = C.IP_PMTUDISC_DONT - //sysIP_PMTUDISC_WANT = C.IP_PMTUDISC_WANT - //sysIP_PMTUDISC_DO = C.IP_PMTUDISC_DO - //sysIP_PMTUDISC_PROBE = C.IP_PMTUDISC_PROBE - //sysIP_PMTUDISC_INTERFACE = C.IP_PMTUDISC_INTERFACE - //sysIP_PMTUDISC_OMIT = C.IP_PMTUDISC_OMIT - - sysICMP_FILTER = C.ICMP_FILTER - - sysSO_EE_ORIGIN_NONE = C.SO_EE_ORIGIN_NONE - sysSO_EE_ORIGIN_LOCAL = C.SO_EE_ORIGIN_LOCAL - sysSO_EE_ORIGIN_ICMP = C.SO_EE_ORIGIN_ICMP - sysSO_EE_ORIGIN_ICMP6 = C.SO_EE_ORIGIN_ICMP6 - sysSO_EE_ORIGIN_TXSTATUS = C.SO_EE_ORIGIN_TXSTATUS - sysSO_EE_ORIGIN_TIMESTAMPING = C.SO_EE_ORIGIN_TIMESTAMPING - - sysSOL_SOCKET = C.SOL_SOCKET - sysSO_ATTACH_FILTER = C.SO_ATTACH_FILTER - - sysSizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage - sysSizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sysSizeofInetPktinfo = C.sizeof_struct_in_pktinfo - sysSizeofSockExtendedErr = C.sizeof_struct_sock_extended_err - - sysSizeofIPMreq = C.sizeof_struct_ip_mreq - sysSizeofIPMreqn = C.sizeof_struct_ip_mreqn - sysSizeofIPMreqSource = C.sizeof_struct_ip_mreq_source - sysSizeofGroupReq = C.sizeof_struct_group_req - sysSizeofGroupSourceReq = C.sizeof_struct_group_source_req - - sysSizeofICMPFilter = C.sizeof_struct_icmp_filter -) - -type sysKernelSockaddrStorage C.struct___kernel_sockaddr_storage - -type sysSockaddrInet C.struct_sockaddr_in - -type sysInetPktinfo C.struct_in_pktinfo - -type sysSockExtendedErr C.struct_sock_extended_err - -type sysIPMreq C.struct_ip_mreq - -type sysIPMreqn C.struct_ip_mreqn - -type sysIPMreqSource C.struct_ip_mreq_source - -type sysGroupReq C.struct_group_req - -type sysGroupSourceReq C.struct_group_source_req - -type sysICMPFilter C.struct_icmp_filter - -type sysSockFProg C.struct_sock_fprog - -type sysSockFilter C.struct_sock_filter diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/defs_netbsd.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/defs_netbsd.go deleted file mode 100644 index 8642354f..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/defs_netbsd.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ - -package ipv4 - -/* -#include -*/ -import "C" - -const ( - sysIP_OPTIONS = C.IP_OPTIONS - sysIP_HDRINCL = C.IP_HDRINCL - sysIP_TOS = C.IP_TOS - sysIP_TTL = C.IP_TTL - sysIP_RECVOPTS = C.IP_RECVOPTS - sysIP_RECVRETOPTS = C.IP_RECVRETOPTS - sysIP_RECVDSTADDR = C.IP_RECVDSTADDR - sysIP_RETOPTS = C.IP_RETOPTS - sysIP_RECVIF = C.IP_RECVIF - sysIP_RECVTTL = C.IP_RECVTTL - - sysIP_MULTICAST_IF = C.IP_MULTICAST_IF - sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL - sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP - sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP - sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP - - sysSizeofIPMreq = C.sizeof_struct_ip_mreq -) - -type sysIPMreq C.struct_ip_mreq diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/defs_openbsd.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/defs_openbsd.go deleted file mode 100644 index 8642354f..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/defs_openbsd.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ - -package ipv4 - -/* -#include -*/ -import "C" - -const ( - sysIP_OPTIONS = C.IP_OPTIONS - sysIP_HDRINCL = C.IP_HDRINCL - sysIP_TOS = C.IP_TOS - sysIP_TTL = C.IP_TTL - sysIP_RECVOPTS = C.IP_RECVOPTS - sysIP_RECVRETOPTS = C.IP_RECVRETOPTS - sysIP_RECVDSTADDR = C.IP_RECVDSTADDR - sysIP_RETOPTS = C.IP_RETOPTS - sysIP_RECVIF = C.IP_RECVIF - sysIP_RECVTTL = C.IP_RECVTTL - - sysIP_MULTICAST_IF = C.IP_MULTICAST_IF - sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL - sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP - sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP - sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP - - sysSizeofIPMreq = C.sizeof_struct_ip_mreq -) - -type sysIPMreq C.struct_ip_mreq diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/defs_solaris.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/defs_solaris.go deleted file mode 100644 index bb74afa4..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/defs_solaris.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ - -package ipv4 - -/* -#include -*/ -import "C" - -const ( - sysIP_OPTIONS = C.IP_OPTIONS - sysIP_HDRINCL = C.IP_HDRINCL - sysIP_TOS = C.IP_TOS - sysIP_TTL = C.IP_TTL - sysIP_RECVOPTS = C.IP_RECVOPTS - sysIP_RECVRETOPTS = C.IP_RECVRETOPTS - sysIP_RECVDSTADDR = C.IP_RECVDSTADDR - sysIP_RETOPTS = C.IP_RETOPTS - sysIP_RECVIF = C.IP_RECVIF - sysIP_RECVSLLA = C.IP_RECVSLLA - sysIP_RECVTTL = C.IP_RECVTTL - sysIP_NEXTHOP = C.IP_NEXTHOP - sysIP_PKTINFO = C.IP_PKTINFO - sysIP_RECVPKTINFO = C.IP_RECVPKTINFO - sysIP_DONTFRAG = C.IP_DONTFRAG - sysIP_BOUND_IF = C.IP_BOUND_IF - sysIP_UNSPEC_SRC = C.IP_UNSPEC_SRC - sysIP_BROADCAST_TTL = C.IP_BROADCAST_TTL - sysIP_DHCPINIT_IF = C.IP_DHCPINIT_IF - - sysIP_MULTICAST_IF = C.IP_MULTICAST_IF - sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL - sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP - sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP - sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP - sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE - sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE - sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP - sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP - - sysSizeofInetPktinfo = C.sizeof_struct_in_pktinfo - - sysSizeofIPMreq = C.sizeof_struct_ip_mreq - sysSizeofIPMreqSource = C.sizeof_struct_ip_mreq_source -) - -type sysInetPktinfo C.struct_in_pktinfo - -type sysIPMreq C.struct_ip_mreq - -type sysIPMreqSource C.struct_ip_mreq_source diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/dgramopt_posix.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/dgramopt.go similarity index 70% rename from vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/dgramopt_posix.go rename to vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/dgramopt.go index 103c4f6d..54d77d5f 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/dgramopt_posix.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/dgramopt.go @@ -1,14 +1,14 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd windows - package ipv4 import ( "net" "syscall" + + "golang.org/x/net/bpf" ) // MulticastTTL returns the time-to-live field value for outgoing @@ -17,11 +17,11 @@ func (c *dgramOpt) MulticastTTL() (int, error) { if !c.ok() { return 0, syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return 0, err + so, ok := sockOpts[ssoMulticastTTL] + if !ok { + return 0, errOpNoSupport } - return getInt(fd, &sockOpts[ssoMulticastTTL]) + return so.GetInt(c.Conn) } // SetMulticastTTL sets the time-to-live field value for future @@ -30,11 +30,11 @@ func (c *dgramOpt) SetMulticastTTL(ttl int) error { if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoMulticastTTL] + if !ok { + return errOpNoSupport } - return setInt(fd, &sockOpts[ssoMulticastTTL], ttl) + return so.SetInt(c.Conn, ttl) } // MulticastInterface returns the default interface for multicast @@ -43,11 +43,11 @@ func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { if !c.ok() { return nil, syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return nil, err + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return nil, errOpNoSupport } - return getInterface(fd, &sockOpts[ssoMulticastInterface]) + return so.getMulticastInterface(c.Conn) } // SetMulticastInterface sets the default interface for future @@ -56,11 +56,11 @@ func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return errOpNoSupport } - return setInterface(fd, &sockOpts[ssoMulticastInterface], ifi) + return so.setMulticastInterface(c.Conn, ifi) } // MulticastLoopback reports whether transmitted multicast packets @@ -69,11 +69,11 @@ func (c *dgramOpt) MulticastLoopback() (bool, error) { if !c.ok() { return false, syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return false, err + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return false, errOpNoSupport } - on, err := getInt(fd, &sockOpts[ssoMulticastLoopback]) + on, err := so.GetInt(c.Conn) if err != nil { return false, err } @@ -86,11 +86,11 @@ func (c *dgramOpt) SetMulticastLoopback(on bool) error { if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return errOpNoSupport } - return setInt(fd, &sockOpts[ssoMulticastLoopback], boolint(on)) + return so.SetInt(c.Conn, boolint(on)) } // JoinGroup joins the group address group on the interface ifi. @@ -106,15 +106,15 @@ func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoJoinGroup] + if !ok { + return errOpNoSupport } grp := netAddrToIP4(group) if grp == nil { return errMissingAddress } - return setGroup(fd, &sockOpts[ssoJoinGroup], ifi, grp) + return so.setGroup(c.Conn, ifi, grp) } // LeaveGroup leaves the group address group on the interface ifi @@ -124,15 +124,15 @@ func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoLeaveGroup] + if !ok { + return errOpNoSupport } grp := netAddrToIP4(group) if grp == nil { return errMissingAddress } - return setGroup(fd, &sockOpts[ssoLeaveGroup], ifi, grp) + return so.setGroup(c.Conn, ifi, grp) } // JoinSourceSpecificGroup joins the source-specific group comprising @@ -145,9 +145,9 @@ func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoJoinSourceGroup] + if !ok { + return errOpNoSupport } grp := netAddrToIP4(group) if grp == nil { @@ -157,7 +157,7 @@ func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net if src == nil { return errMissingAddress } - return setSourceGroup(fd, &sockOpts[ssoJoinSourceGroup], ifi, grp, src) + return so.setSourceGroup(c.Conn, ifi, grp, src) } // LeaveSourceSpecificGroup leaves the source-specific group on the @@ -166,9 +166,9 @@ func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source ne if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoLeaveSourceGroup] + if !ok { + return errOpNoSupport } grp := netAddrToIP4(group) if grp == nil { @@ -178,7 +178,7 @@ func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source ne if src == nil { return errMissingAddress } - return setSourceGroup(fd, &sockOpts[ssoLeaveSourceGroup], ifi, grp, src) + return so.setSourceGroup(c.Conn, ifi, grp, src) } // ExcludeSourceSpecificGroup excludes the source-specific group from @@ -188,9 +188,9 @@ func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoBlockSourceGroup] + if !ok { + return errOpNoSupport } grp := netAddrToIP4(group) if grp == nil { @@ -200,7 +200,7 @@ func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source if src == nil { return errMissingAddress } - return setSourceGroup(fd, &sockOpts[ssoBlockSourceGroup], ifi, grp, src) + return so.setSourceGroup(c.Conn, ifi, grp, src) } // IncludeSourceSpecificGroup includes the excluded source-specific @@ -209,9 +209,9 @@ func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoUnblockSourceGroup] + if !ok { + return errOpNoSupport } grp := netAddrToIP4(group) if grp == nil { @@ -221,7 +221,7 @@ func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source if src == nil { return errMissingAddress } - return setSourceGroup(fd, &sockOpts[ssoUnblockSourceGroup], ifi, grp, src) + return so.setSourceGroup(c.Conn, ifi, grp, src) } // ICMPFilter returns an ICMP filter. @@ -230,11 +230,11 @@ func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { if !c.ok() { return nil, syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return nil, err + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return nil, errOpNoSupport } - return getICMPFilter(fd, &sockOpts[ssoICMPFilter]) + return so.getICMPFilter(c.Conn) } // SetICMPFilter deploys the ICMP filter. @@ -243,9 +243,23 @@ func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return errOpNoSupport + } + return so.setICMPFilter(c.Conn, f) +} + +// SetBPF attaches a BPF program to the connection. +// +// Only supported on Linux. +func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoAttachFilter] + if !ok { + return errOpNoSupport } - return setICMPFilter(fd, &sockOpts[ssoICMPFilter], f) + return so.setBPF(c.Conn, filter) } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/dgramopt_stub.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/dgramopt_stub.go deleted file mode 100644 index b74df693..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/dgramopt_stub.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build nacl plan9 solaris - -package ipv4 - -import "net" - -// MulticastTTL returns the time-to-live field value for outgoing -// multicast packets. -func (c *dgramOpt) MulticastTTL() (int, error) { - return 0, errOpNoSupport -} - -// SetMulticastTTL sets the time-to-live field value for future -// outgoing multicast packets. -func (c *dgramOpt) SetMulticastTTL(ttl int) error { - return errOpNoSupport -} - -// MulticastInterface returns the default interface for multicast -// packet transmissions. -func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { - return nil, errOpNoSupport -} - -// SetMulticastInterface sets the default interface for future -// multicast packet transmissions. -func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { - return errOpNoSupport -} - -// MulticastLoopback reports whether transmitted multicast packets -// should be copied and send back to the originator. -func (c *dgramOpt) MulticastLoopback() (bool, error) { - return false, errOpNoSupport -} - -// SetMulticastLoopback sets whether transmitted multicast packets -// should be copied and send back to the originator. -func (c *dgramOpt) SetMulticastLoopback(on bool) error { - return errOpNoSupport -} - -// JoinGroup joins the group address group on the interface ifi. -// By default all sources that can cast data to group are accepted. -// It's possible to mute and unmute data transmission from a specific -// source by using ExcludeSourceSpecificGroup and -// IncludeSourceSpecificGroup. -// JoinGroup uses the system assigned multicast interface when ifi is -// nil, although this is not recommended because the assignment -// depends on platforms and sometimes it might require routing -// configuration. -func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { - return errOpNoSupport -} - -// LeaveGroup leaves the group address group on the interface ifi -// regardless of whether the group is any-source group or -// source-specific group. -func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { - return errOpNoSupport -} - -// JoinSourceSpecificGroup joins the source-specific group comprising -// group and source on the interface ifi. -// JoinSourceSpecificGroup uses the system assigned multicast -// interface when ifi is nil, although this is not recommended because -// the assignment depends on platforms and sometimes it might require -// routing configuration. -func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { - return errOpNoSupport -} - -// LeaveSourceSpecificGroup leaves the source-specific group on the -// interface ifi. -func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { - return errOpNoSupport -} - -// ExcludeSourceSpecificGroup excludes the source-specific group from -// the already joined any-source groups by JoinGroup on the interface -// ifi. -func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { - return errOpNoSupport -} - -// IncludeSourceSpecificGroup includes the excluded source-specific -// group by ExcludeSourceSpecificGroup again on the interface ifi. -func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { - return errOpNoSupport -} - -// ICMPFilter returns an ICMP filter. -// Currently only Linux supports this. -func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { - return nil, errOpNoSupport -} - -// SetICMPFilter deploys the ICMP filter. -// Currently only Linux supports this. -func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { - return errOpNoSupport -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/doc.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/doc.go index 9a79badf..b43935a5 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/doc.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/doc.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -21,10 +21,10 @@ // // The options for unicasting are available for net.TCPConn, // net.UDPConn and net.IPConn which are created as network connections -// that use the IPv4 transport. When a single TCP connection carrying +// that use the IPv4 transport. When a single TCP connection carrying // a data flow of multiple packets needs to indicate the flow is -// important, ipv4.Conn is used to set the type-of-service field on -// the IPv4 header for each packet. +// important, Conn is used to set the type-of-service field on the +// IPv4 header for each packet. // // ln, err := net.Listen("tcp4", "0.0.0.0:1024") // if err != nil { @@ -56,7 +56,7 @@ // // The options for multicasting are available for net.UDPConn and // net.IPconn which are created as network connections that use the -// IPv4 transport. A few network facilities must be prepared before +// IPv4 transport. A few network facilities must be prepared before // you begin multicasting, at a minimum joining network interfaces and // multicast groups. // @@ -80,7 +80,7 @@ // defer c.Close() // // Second, the application joins multicast groups, starts listening to -// the groups on the specified network interfaces. Note that the +// the groups on the specified network interfaces. Note that the // service port for transport layer protocol does not matter with this // operation as joining groups affects only network and link layer // protocols, such as IPv4 and Ethernet. @@ -94,10 +94,10 @@ // } // // The application might set per packet control message transmissions -// between the protocol stack within the kernel. When the application +// between the protocol stack within the kernel. When the application // needs a destination address on an incoming packet, -// SetControlMessage of ipv4.PacketConn is used to enable control -// message transmissons. +// SetControlMessage of PacketConn is used to enable control message +// transmissions. // // if err := p.SetControlMessage(ipv4.FlagDst, true); err != nil { // // error handling @@ -145,7 +145,7 @@ // More multicasting // // An application that uses PacketConn or RawConn may join multiple -// multicast groups. For example, a UDP listener with port 1024 might +// multicast groups. For example, a UDP listener with port 1024 might // join two different groups across over two different network // interfaces by using: // @@ -166,7 +166,7 @@ // } // // It is possible for multiple UDP listeners that listen on the same -// UDP port to join the same multicast group. The net package will +// UDP port to join the same multicast group. The net package will // provide a socket that listens to a wildcard address with reusable // UDP port when an appropriate multicast address prefix is passed to // the net.ListenPacket or net.ListenUDP. @@ -240,3 +240,5 @@ // In the fallback case, ExcludeSourceSpecificGroup and // IncludeSourceSpecificGroup may return an error. package ipv4 // import "golang.org/x/net/ipv4" + +// BUG(mikio): This package is not implemented on NaCl and Plan 9. diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/endpoint.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/endpoint.go index bc45bf05..2ab87736 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/endpoint.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/endpoint.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -8,8 +8,15 @@ import ( "net" "syscall" "time" + + "golang.org/x/net/internal/socket" ) +// BUG(mikio): On Windows, the JoinSourceSpecificGroup, +// LeaveSourceSpecificGroup, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup methods of PacketConn and RawConn are +// not implemented. + // A Conn represents a network endpoint that uses the IPv4 transport. // It is used to control basic IP-level socket options such as TOS and // TTL. @@ -18,21 +25,22 @@ type Conn struct { } type genericOpt struct { - net.Conn + *socket.Conn } func (c *genericOpt) ok() bool { return c != nil && c.Conn != nil } // NewConn returns a new Conn. func NewConn(c net.Conn) *Conn { + cc, _ := socket.NewConn(c) return &Conn{ - genericOpt: genericOpt{Conn: c}, + genericOpt: genericOpt{Conn: cc}, } } // A PacketConn represents a packet network endpoint that uses the -// IPv4 transport. It is used to control several IP-level socket -// options including multicasting. It also provides datagram based +// IPv4 transport. It is used to control several IP-level socket +// options including multicasting. It also provides datagram based // network I/O methods specific to the IPv4 and higher layer protocols // such as UDP. type PacketConn struct { @@ -42,21 +50,17 @@ type PacketConn struct { } type dgramOpt struct { - net.PacketConn + *socket.Conn } -func (c *dgramOpt) ok() bool { return c != nil && c.PacketConn != nil } +func (c *dgramOpt) ok() bool { return c != nil && c.Conn != nil } // SetControlMessage sets the per packet IP-level socket options. func (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error { if !c.payloadHandler.ok() { return syscall.EINVAL } - fd, err := c.payloadHandler.sysfd() - if err != nil { - return err - } - return setControlMessage(fd, &c.payloadHandler.rawOpt, cf, on) + return setControlMessage(c.dgramOpt.Conn, &c.payloadHandler.rawOpt, cf, on) } // SetDeadline sets the read and write deadlines associated with the @@ -97,22 +101,18 @@ func (c *PacketConn) Close() error { // NewPacketConn returns a new PacketConn using c as its underlying // transport. func NewPacketConn(c net.PacketConn) *PacketConn { + cc, _ := socket.NewConn(c.(net.Conn)) p := &PacketConn{ - genericOpt: genericOpt{Conn: c.(net.Conn)}, - dgramOpt: dgramOpt{PacketConn: c}, - payloadHandler: payloadHandler{PacketConn: c}, - } - if _, ok := c.(*net.IPConn); ok && sockOpts[ssoStripHeader].name > 0 { - if fd, err := p.payloadHandler.sysfd(); err == nil { - setInt(fd, &sockOpts[ssoStripHeader], boolint(true)) - } + genericOpt: genericOpt{Conn: cc}, + dgramOpt: dgramOpt{Conn: cc}, + payloadHandler: payloadHandler{PacketConn: c, Conn: cc}, } return p } // A RawConn represents a packet network endpoint that uses the IPv4 -// transport. It is used to control several IP-level socket options -// including IPv4 header manipulation. It also provides datagram +// transport. It is used to control several IP-level socket options +// including IPv4 header manipulation. It also provides datagram // based network I/O methods specific to the IPv4 and higher layer // protocols that handle IPv4 datagram directly such as OSPF, GRE. type RawConn struct { @@ -126,11 +126,7 @@ func (c *RawConn) SetControlMessage(cf ControlFlags, on bool) error { if !c.packetHandler.ok() { return syscall.EINVAL } - fd, err := c.packetHandler.sysfd() - if err != nil { - return err - } - return setControlMessage(fd, &c.packetHandler.rawOpt, cf, on) + return setControlMessage(c.dgramOpt.Conn, &c.packetHandler.rawOpt, cf, on) } // SetDeadline sets the read and write deadlines associated with the @@ -139,7 +135,7 @@ func (c *RawConn) SetDeadline(t time.Time) error { if !c.packetHandler.ok() { return syscall.EINVAL } - return c.packetHandler.c.SetDeadline(t) + return c.packetHandler.IPConn.SetDeadline(t) } // SetReadDeadline sets the read deadline associated with the @@ -148,7 +144,7 @@ func (c *RawConn) SetReadDeadline(t time.Time) error { if !c.packetHandler.ok() { return syscall.EINVAL } - return c.packetHandler.c.SetReadDeadline(t) + return c.packetHandler.IPConn.SetReadDeadline(t) } // SetWriteDeadline sets the write deadline associated with the @@ -157,7 +153,7 @@ func (c *RawConn) SetWriteDeadline(t time.Time) error { if !c.packetHandler.ok() { return syscall.EINVAL } - return c.packetHandler.c.SetWriteDeadline(t) + return c.packetHandler.IPConn.SetWriteDeadline(t) } // Close closes the endpoint. @@ -165,22 +161,26 @@ func (c *RawConn) Close() error { if !c.packetHandler.ok() { return syscall.EINVAL } - return c.packetHandler.c.Close() + return c.packetHandler.IPConn.Close() } // NewRawConn returns a new RawConn using c as its underlying // transport. func NewRawConn(c net.PacketConn) (*RawConn, error) { - r := &RawConn{ - genericOpt: genericOpt{Conn: c.(net.Conn)}, - dgramOpt: dgramOpt{PacketConn: c}, - packetHandler: packetHandler{c: c.(*net.IPConn)}, - } - fd, err := r.packetHandler.sysfd() + cc, err := socket.NewConn(c.(net.Conn)) if err != nil { return nil, err } - if err := setInt(fd, &sockOpts[ssoHeaderPrepend], boolint(true)); err != nil { + r := &RawConn{ + genericOpt: genericOpt{Conn: cc}, + dgramOpt: dgramOpt{Conn: cc}, + packetHandler: packetHandler{IPConn: c.(*net.IPConn), Conn: cc}, + } + so, ok := sockOpts[ssoHeaderPrepend] + if !ok { + return nil, errOpNoSupport + } + if err := so.SetInt(r.dgramOpt.Conn, boolint(true)); err != nil { return nil, err } return r, nil diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/gen.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/gen.go deleted file mode 100644 index cbe70327..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/gen.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -//go:generate go run gen.go - -// This program generates system adaptation constants and types, -// internet protocol constants and tables by reading template files -// and IANA protocol registries. -package main - -import ( - "bytes" - "encoding/xml" - "fmt" - "go/format" - "io" - "io/ioutil" - "net/http" - "os" - "os/exec" - "runtime" - "strconv" - "strings" -) - -func main() { - if err := genzsys(); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - if err := geniana(); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} - -func genzsys() error { - defs := "defs_" + runtime.GOOS + ".go" - f, err := os.Open(defs) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - f.Close() - cmd := exec.Command("go", "tool", "cgo", "-godefs", defs) - b, err := cmd.Output() - if err != nil { - return err - } - // The ipv4 package still supports go1.2, and so we need to - // take care of additional platforms in go1.3 and above for - // working with go1.2. - switch { - case runtime.GOOS == "dragonfly" || runtime.GOOS == "solaris": - b = bytes.Replace(b, []byte("package ipv4\n"), []byte("// +build "+runtime.GOOS+"\n\npackage ipv4\n"), 1) - case runtime.GOOS == "linux" && (runtime.GOARCH == "arm64" || runtime.GOARCH == "mips64" || runtime.GOARCH == "mips64le" || runtime.GOARCH == "ppc" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" || runtime.GOARCH == "s390x"): - b = bytes.Replace(b, []byte("package ipv4\n"), []byte("// +build "+runtime.GOOS+","+runtime.GOARCH+"\n\npackage ipv4\n"), 1) - } - b, err = format.Source(b) - if err != nil { - return err - } - zsys := "zsys_" + runtime.GOOS + ".go" - switch runtime.GOOS { - case "freebsd", "linux": - zsys = "zsys_" + runtime.GOOS + "_" + runtime.GOARCH + ".go" - } - if err := ioutil.WriteFile(zsys, b, 0644); err != nil { - return err - } - return nil -} - -var registries = []struct { - url string - parse func(io.Writer, io.Reader) error -}{ - { - "http://www.iana.org/assignments/icmp-parameters/icmp-parameters.xml", - parseICMPv4Parameters, - }, -} - -func geniana() error { - var bb bytes.Buffer - fmt.Fprintf(&bb, "// go generate gen.go\n") - fmt.Fprintf(&bb, "// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\n") - fmt.Fprintf(&bb, "package ipv4\n\n") - for _, r := range registries { - resp, err := http.Get(r.url) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("got HTTP status code %v for %v\n", resp.StatusCode, r.url) - } - if err := r.parse(&bb, resp.Body); err != nil { - return err - } - fmt.Fprintf(&bb, "\n") - } - b, err := format.Source(bb.Bytes()) - if err != nil { - return err - } - if err := ioutil.WriteFile("iana.go", b, 0644); err != nil { - return err - } - return nil -} - -func parseICMPv4Parameters(w io.Writer, r io.Reader) error { - dec := xml.NewDecoder(r) - var icp icmpv4Parameters - if err := dec.Decode(&icp); err != nil { - return err - } - prs := icp.escape() - fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) - fmt.Fprintf(w, "const (\n") - for _, pr := range prs { - if pr.Descr == "" { - continue - } - fmt.Fprintf(w, "ICMPType%s ICMPType = %d", pr.Descr, pr.Value) - fmt.Fprintf(w, "// %s\n", pr.OrigDescr) - } - fmt.Fprintf(w, ")\n\n") - fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) - fmt.Fprintf(w, "var icmpTypes = map[ICMPType]string{\n") - for _, pr := range prs { - if pr.Descr == "" { - continue - } - fmt.Fprintf(w, "%d: %q,\n", pr.Value, strings.ToLower(pr.OrigDescr)) - } - fmt.Fprintf(w, "}\n") - return nil -} - -type icmpv4Parameters struct { - XMLName xml.Name `xml:"registry"` - Title string `xml:"title"` - Updated string `xml:"updated"` - Registries []struct { - Title string `xml:"title"` - Records []struct { - Value string `xml:"value"` - Descr string `xml:"description"` - } `xml:"record"` - } `xml:"registry"` -} - -type canonICMPv4ParamRecord struct { - OrigDescr string - Descr string - Value int -} - -func (icp *icmpv4Parameters) escape() []canonICMPv4ParamRecord { - id := -1 - for i, r := range icp.Registries { - if strings.Contains(r.Title, "Type") || strings.Contains(r.Title, "type") { - id = i - break - } - } - if id < 0 { - return nil - } - prs := make([]canonICMPv4ParamRecord, len(icp.Registries[id].Records)) - sr := strings.NewReplacer( - "Messages", "", - "Message", "", - "ICMP", "", - "+", "P", - "-", "", - "/", "", - ".", "", - " ", "", - ) - for i, pr := range icp.Registries[id].Records { - if strings.Contains(pr.Descr, "Reserved") || - strings.Contains(pr.Descr, "Unassigned") || - strings.Contains(pr.Descr, "Deprecated") || - strings.Contains(pr.Descr, "Experiment") || - strings.Contains(pr.Descr, "experiment") { - continue - } - ss := strings.Split(pr.Descr, "\n") - if len(ss) > 1 { - prs[i].Descr = strings.Join(ss, " ") - } else { - prs[i].Descr = ss[0] - } - s := strings.TrimSpace(prs[i].Descr) - prs[i].OrigDescr = s - prs[i].Descr = sr.Replace(s) - prs[i].Value, _ = strconv.Atoi(pr.Value) - } - return prs -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/genericopt_posix.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/genericopt.go similarity index 61% rename from vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/genericopt_posix.go rename to vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/genericopt.go index fefa0be3..119bf841 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/genericopt_posix.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/genericopt.go @@ -1,9 +1,7 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd windows - package ipv4 import "syscall" @@ -13,11 +11,11 @@ func (c *genericOpt) TOS() (int, error) { if !c.ok() { return 0, syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return 0, err + so, ok := sockOpts[ssoTOS] + if !ok { + return 0, errOpNoSupport } - return getInt(fd, &sockOpts[ssoTOS]) + return so.GetInt(c.Conn) } // SetTOS sets the type-of-service field value for future outgoing @@ -26,11 +24,11 @@ func (c *genericOpt) SetTOS(tos int) error { if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoTOS] + if !ok { + return errOpNoSupport } - return setInt(fd, &sockOpts[ssoTOS], tos) + return so.SetInt(c.Conn, tos) } // TTL returns the time-to-live field value for outgoing packets. @@ -38,11 +36,11 @@ func (c *genericOpt) TTL() (int, error) { if !c.ok() { return 0, syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return 0, err + so, ok := sockOpts[ssoTTL] + if !ok { + return 0, errOpNoSupport } - return getInt(fd, &sockOpts[ssoTTL]) + return so.GetInt(c.Conn) } // SetTTL sets the time-to-live field value for future outgoing @@ -51,9 +49,9 @@ func (c *genericOpt) SetTTL(ttl int) error { if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoTTL] + if !ok { + return errOpNoSupport } - return setInt(fd, &sockOpts[ssoTTL], ttl) + return so.SetInt(c.Conn, ttl) } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/genericopt_stub.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/genericopt_stub.go deleted file mode 100644 index 1817badb..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/genericopt_stub.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build nacl plan9 solaris - -package ipv4 - -// TOS returns the type-of-service field value for outgoing packets. -func (c *genericOpt) TOS() (int, error) { - return 0, errOpNoSupport -} - -// SetTOS sets the type-of-service field value for future outgoing -// packets. -func (c *genericOpt) SetTOS(tos int) error { - return errOpNoSupport -} - -// TTL returns the time-to-live field value for outgoing packets. -func (c *genericOpt) TTL() (int, error) { - return 0, errOpNoSupport -} - -// SetTTL sets the time-to-live field value for future outgoing -// packets. -func (c *genericOpt) SetTTL(ttl int) error { - return errOpNoSupport -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/header.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/header.go index 363d9c21..8bb0f0f4 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/header.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/header.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -10,6 +10,8 @@ import ( "net" "runtime" "syscall" + + "golang.org/x/net/internal/socket" ) const ( @@ -49,7 +51,7 @@ func (h *Header) String() string { return fmt.Sprintf("ver=%d hdrlen=%d tos=%#x totallen=%d id=%#x flags=%#x fragoff=%#x ttl=%d proto=%d cksum=%#x src=%v dst=%v", h.Version, h.Len, h.TOS, h.TotalLen, h.ID, h.Flags, h.FragOff, h.TTL, h.Protocol, h.Checksum, h.Src, h.Dst) } -// Marshal returns the binary encoding of the IPv4 header h. +// Marshal returns the binary encoding of h. func (h *Header) Marshal() ([]byte, error) { if h == nil { return nil, syscall.EINVAL @@ -63,9 +65,17 @@ func (h *Header) Marshal() ([]byte, error) { b[1] = byte(h.TOS) flagsAndFragOff := (h.FragOff & 0x1fff) | int(h.Flags<<13) switch runtime.GOOS { - case "darwin", "dragonfly", "freebsd", "netbsd": - nativeEndian.PutUint16(b[2:4], uint16(h.TotalLen)) - nativeEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + case "darwin", "dragonfly", "netbsd": + socket.NativeEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + socket.NativeEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + case "freebsd": + if freebsdVersion < 1100000 { + socket.NativeEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + socket.NativeEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + } else { + binary.BigEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + binary.BigEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + } default: binary.BigEndian.PutUint16(b[2:4], uint16(h.TotalLen)) binary.BigEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) @@ -88,45 +98,62 @@ func (h *Header) Marshal() ([]byte, error) { return b, nil } -// ParseHeader parses b as an IPv4 header. -func ParseHeader(b []byte) (*Header, error) { - if len(b) < HeaderLen { - return nil, errHeaderTooShort +// Parse parses b as an IPv4 header and sotres the result in h. +func (h *Header) Parse(b []byte) error { + if h == nil || len(b) < HeaderLen { + return errHeaderTooShort } hdrlen := int(b[0]&0x0f) << 2 if hdrlen > len(b) { - return nil, errBufferTooShort - } - h := &Header{ - Version: int(b[0] >> 4), - Len: hdrlen, - TOS: int(b[1]), - ID: int(binary.BigEndian.Uint16(b[4:6])), - TTL: int(b[8]), - Protocol: int(b[9]), - Checksum: int(binary.BigEndian.Uint16(b[10:12])), - Src: net.IPv4(b[12], b[13], b[14], b[15]), - Dst: net.IPv4(b[16], b[17], b[18], b[19]), + return errBufferTooShort } + h.Version = int(b[0] >> 4) + h.Len = hdrlen + h.TOS = int(b[1]) + h.ID = int(binary.BigEndian.Uint16(b[4:6])) + h.TTL = int(b[8]) + h.Protocol = int(b[9]) + h.Checksum = int(binary.BigEndian.Uint16(b[10:12])) + h.Src = net.IPv4(b[12], b[13], b[14], b[15]) + h.Dst = net.IPv4(b[16], b[17], b[18], b[19]) switch runtime.GOOS { case "darwin", "dragonfly", "netbsd": - h.TotalLen = int(nativeEndian.Uint16(b[2:4])) + hdrlen - h.FragOff = int(nativeEndian.Uint16(b[6:8])) + h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) + hdrlen + h.FragOff = int(socket.NativeEndian.Uint16(b[6:8])) case "freebsd": - h.TotalLen = int(nativeEndian.Uint16(b[2:4])) - if freebsdVersion < 1000000 { - h.TotalLen += hdrlen + if freebsdVersion < 1100000 { + h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) + if freebsdVersion < 1000000 { + h.TotalLen += hdrlen + } + h.FragOff = int(socket.NativeEndian.Uint16(b[6:8])) + } else { + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) + h.FragOff = int(binary.BigEndian.Uint16(b[6:8])) } - h.FragOff = int(nativeEndian.Uint16(b[6:8])) default: h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) h.FragOff = int(binary.BigEndian.Uint16(b[6:8])) } h.Flags = HeaderFlags(h.FragOff&0xe000) >> 13 h.FragOff = h.FragOff & 0x1fff - if hdrlen-HeaderLen > 0 { - h.Options = make([]byte, hdrlen-HeaderLen) - copy(h.Options, b[HeaderLen:]) + optlen := hdrlen - HeaderLen + if optlen > 0 && len(b) >= hdrlen { + if cap(h.Options) < optlen { + h.Options = make([]byte, optlen) + } else { + h.Options = h.Options[:optlen] + } + copy(h.Options, b[HeaderLen:hdrlen]) + } + return nil +} + +// ParseHeader parses b as an IPv4 header. +func ParseHeader(b []byte) (*Header, error) { + h := new(Header) + if err := h.Parse(b); err != nil { + return nil, err } return h, nil } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/helper.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/helper.go index acecfd0d..a5052e32 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/helper.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/helper.go @@ -1,14 +1,12 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ipv4 import ( - "encoding/binary" "errors" "net" - "unsafe" ) var ( @@ -23,20 +21,8 @@ var ( // See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html. freebsdVersion uint32 - - nativeEndian binary.ByteOrder ) -func init() { - i := uint32(1) - b := (*[4]byte)(unsafe.Pointer(&i)) - if b[0] == 1 { - nativeEndian = binary.LittleEndian - } else { - nativeEndian = binary.BigEndian - } -} - func boolint(b bool) int { if b { return 1 @@ -57,3 +43,21 @@ func netAddrToIP4(a net.Addr) net.IP { } return nil } + +func opAddr(a net.Addr) net.Addr { + switch a.(type) { + case *net.TCPAddr: + if a == nil { + return nil + } + case *net.UDPAddr: + if a == nil { + return nil + } + case *net.IPAddr: + if a == nil { + return nil + } + } + return a +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/helper_stub.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/helper_stub.go deleted file mode 100644 index dc2120cf..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/helper_stub.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build nacl plan9 solaris - -package ipv4 - -func (c *genericOpt) sysfd() (int, error) { - return 0, errOpNoSupport -} - -func (c *dgramOpt) sysfd() (int, error) { - return 0, errOpNoSupport -} - -func (c *payloadHandler) sysfd() (int, error) { - return 0, errOpNoSupport -} - -func (c *packetHandler) sysfd() (int, error) { - return 0, errOpNoSupport -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/helper_unix.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/helper_unix.go deleted file mode 100644 index 345ca7dc..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/helper_unix.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux netbsd openbsd - -package ipv4 - -import ( - "net" - "reflect" -) - -func (c *genericOpt) sysfd() (int, error) { - switch p := c.Conn.(type) { - case *net.TCPConn, *net.UDPConn, *net.IPConn: - return sysfd(p) - } - return 0, errInvalidConnType -} - -func (c *dgramOpt) sysfd() (int, error) { - switch p := c.PacketConn.(type) { - case *net.UDPConn, *net.IPConn: - return sysfd(p.(net.Conn)) - } - return 0, errInvalidConnType -} - -func (c *payloadHandler) sysfd() (int, error) { - return sysfd(c.PacketConn.(net.Conn)) -} - -func (c *packetHandler) sysfd() (int, error) { - return sysfd(c.c) -} - -func sysfd(c net.Conn) (int, error) { - cv := reflect.ValueOf(c) - switch ce := cv.Elem(); ce.Kind() { - case reflect.Struct: - netfd := ce.FieldByName("conn").FieldByName("fd") - switch fe := netfd.Elem(); fe.Kind() { - case reflect.Struct: - fd := fe.FieldByName("sysfd") - return int(fd.Int()), nil - } - } - return 0, errInvalidConnType -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/helper_windows.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/helper_windows.go deleted file mode 100644 index 322b2a5e..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/helper_windows.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv4 - -import ( - "net" - "reflect" - "syscall" -) - -func (c *genericOpt) sysfd() (syscall.Handle, error) { - switch p := c.Conn.(type) { - case *net.TCPConn, *net.UDPConn, *net.IPConn: - return sysfd(p) - } - return syscall.InvalidHandle, errInvalidConnType -} - -func (c *dgramOpt) sysfd() (syscall.Handle, error) { - switch p := c.PacketConn.(type) { - case *net.UDPConn, *net.IPConn: - return sysfd(p.(net.Conn)) - } - return syscall.InvalidHandle, errInvalidConnType -} - -func (c *payloadHandler) sysfd() (syscall.Handle, error) { - return sysfd(c.PacketConn.(net.Conn)) -} - -func (c *packetHandler) sysfd() (syscall.Handle, error) { - return sysfd(c.c) -} - -func sysfd(c net.Conn) (syscall.Handle, error) { - cv := reflect.ValueOf(c) - switch ce := cv.Elem(); ce.Kind() { - case reflect.Struct: - netfd := ce.FieldByName("conn").FieldByName("fd") - switch fe := netfd.Elem(); fe.Kind() { - case reflect.Struct: - fd := fe.FieldByName("sysfd") - return syscall.Handle(fd.Uint()), nil - } - } - return syscall.InvalidHandle, errInvalidConnType -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/icmp.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/icmp.go index dbd05cff..9902bb3d 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/icmp.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/icmp.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -26,12 +26,12 @@ func (typ ICMPType) Protocol() int { // packets. The filter belongs to a packet delivery path on a host and // it cannot interact with forwarding packets or tunnel-outer packets. // -// Note: RFC 2460 defines a reasonable role model and it works not +// Note: RFC 8200 defines a reasonable role model and it works not // only for IPv6 but IPv4. A node means a device that implements IP. // A router means a node that forwards IP packets not explicitly // addressed to itself, and a host means a node that is not a router. type ICMPFilter struct { - sysICMPFilter + icmpFilter } // Accept accepts incoming ICMP packets including the type field value diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/icmp_linux.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/icmp_linux.go index c9122533..6e1c5c80 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/icmp_linux.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/icmp_linux.go @@ -4,15 +4,15 @@ package ipv4 -func (f *sysICMPFilter) accept(typ ICMPType) { +func (f *icmpFilter) accept(typ ICMPType) { f.Data &^= 1 << (uint32(typ) & 31) } -func (f *sysICMPFilter) block(typ ICMPType) { +func (f *icmpFilter) block(typ ICMPType) { f.Data |= 1 << (uint32(typ) & 31) } -func (f *sysICMPFilter) setAll(block bool) { +func (f *icmpFilter) setAll(block bool) { if block { f.Data = 1<<32 - 1 } else { @@ -20,6 +20,6 @@ func (f *sysICMPFilter) setAll(block bool) { } } -func (f *sysICMPFilter) willBlock(typ ICMPType) bool { +func (f *icmpFilter) willBlock(typ ICMPType) bool { return f.Data&(1<<(uint32(typ)&31)) != 0 } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/icmp_stub.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/icmp_stub.go index 9ee9b6a3..21bb29ab 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/icmp_stub.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/icmp_stub.go @@ -6,20 +6,20 @@ package ipv4 -const sysSizeofICMPFilter = 0x0 +const sizeofICMPFilter = 0x0 -type sysICMPFilter struct { +type icmpFilter struct { } -func (f *sysICMPFilter) accept(typ ICMPType) { +func (f *icmpFilter) accept(typ ICMPType) { } -func (f *sysICMPFilter) block(typ ICMPType) { +func (f *icmpFilter) block(typ ICMPType) { } -func (f *sysICMPFilter) setAll(block bool) { +func (f *icmpFilter) setAll(block bool) { } -func (f *sysICMPFilter) willBlock(typ ICMPType) bool { +func (f *icmpFilter) willBlock(typ ICMPType) bool { return false } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/packet.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/packet.go index 09864314..f00f5b05 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/packet.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/packet.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -7,42 +7,30 @@ package ipv4 import ( "net" "syscall" + + "golang.org/x/net/internal/socket" ) +// BUG(mikio): On Windows, the ReadFrom and WriteTo methods of RawConn +// are not implemented. + // A packetHandler represents the IPv4 datagram handler. type packetHandler struct { - c *net.IPConn + *net.IPConn + *socket.Conn rawOpt } -func (c *packetHandler) ok() bool { return c != nil && c.c != nil } +func (c *packetHandler) ok() bool { return c != nil && c.IPConn != nil && c.Conn != nil } // ReadFrom reads an IPv4 datagram from the endpoint c, copying the -// datagram into b. It returns the received datagram as the IPv4 +// datagram into b. It returns the received datagram as the IPv4 // header h, the payload p and the control message cm. func (c *packetHandler) ReadFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) { if !c.ok() { return nil, nil, nil, syscall.EINVAL } - oob := newControlMessage(&c.rawOpt) - n, oobn, _, src, err := c.c.ReadMsgIP(b, oob) - if err != nil { - return nil, nil, nil, err - } - var hs []byte - if hs, p, err = slicePacket(b[:n]); err != nil { - return nil, nil, nil, err - } - if h, err = ParseHeader(hs); err != nil { - return nil, nil, nil, err - } - if cm, err = parseControlMessage(oob[:oobn]); err != nil { - return nil, nil, nil, err - } - if src != nil && cm != nil { - cm.Src = src.IP - } - return + return c.readFrom(b) } func slicePacket(b []byte) (h, p []byte, err error) { @@ -54,14 +42,14 @@ func slicePacket(b []byte) (h, p []byte, err error) { } // WriteTo writes an IPv4 datagram through the endpoint c, copying the -// datagram from the IPv4 header h and the payload p. The control +// datagram from the IPv4 header h and the payload p. The control // message cm allows the datagram path and the outgoing interface to be -// specified. Currently only Darwin and Linux support this. The cm +// specified. Currently only Darwin and Linux support this. The cm // may be nil if control of the outgoing datagram is not required. // // The IPv4 header h must contain appropriate fields that include: // -// Version = ipv4.Version +// Version = // Len = // TOS = // TotalLen = @@ -77,21 +65,5 @@ func (c *packetHandler) WriteTo(h *Header, p []byte, cm *ControlMessage) error { if !c.ok() { return syscall.EINVAL } - oob := marshalControlMessage(cm) - wh, err := h.Marshal() - if err != nil { - return err - } - dst := &net.IPAddr{} - if cm != nil { - if ip := cm.Dst.To4(); ip != nil { - dst.IP = ip - } - } - if dst.IP == nil { - dst.IP = h.Dst - } - wh = append(wh, p...) - _, _, err = c.c.WriteMsgIP(wh, oob, dst) - return err + return c.writeTo(h, p, cm) } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/packet_go1_8.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/packet_go1_8.go new file mode 100644 index 00000000..b47d1868 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/packet_go1_8.go @@ -0,0 +1,56 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package ipv4 + +import "net" + +func (c *packetHandler) readFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) { + c.rawOpt.RLock() + oob := NewControlMessage(c.rawOpt.cflags) + c.rawOpt.RUnlock() + n, nn, _, src, err := c.ReadMsgIP(b, oob) + if err != nil { + return nil, nil, nil, err + } + var hs []byte + if hs, p, err = slicePacket(b[:n]); err != nil { + return nil, nil, nil, err + } + if h, err = ParseHeader(hs); err != nil { + return nil, nil, nil, err + } + if nn > 0 { + cm = new(ControlMessage) + if err := cm.Parse(oob[:nn]); err != nil { + return nil, nil, nil, err + } + } + if src != nil && cm != nil { + cm.Src = src.IP + } + return +} + +func (c *packetHandler) writeTo(h *Header, p []byte, cm *ControlMessage) error { + oob := cm.Marshal() + wh, err := h.Marshal() + if err != nil { + return err + } + dst := new(net.IPAddr) + if cm != nil { + if ip := cm.Dst.To4(); ip != nil { + dst.IP = ip + } + } + if dst.IP == nil { + dst.IP = h.Dst + } + wh = append(wh, p...) + _, _, err = c.WriteMsgIP(wh, oob, dst) + return err +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/packet_go1_9.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/packet_go1_9.go new file mode 100644 index 00000000..082c36d7 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/packet_go1_9.go @@ -0,0 +1,67 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (c *packetHandler) readFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) { + c.rawOpt.RLock() + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: NewControlMessage(c.rawOpt.cflags), + } + c.rawOpt.RUnlock() + if err := c.RecvMsg(&m, 0); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + var hs []byte + if hs, p, err = slicePacket(b[:m.N]); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + if h, err = ParseHeader(hs); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + if m.NN > 0 { + cm = new(ControlMessage) + if err := cm.Parse(m.OOB[:m.NN]); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + } + if src, ok := m.Addr.(*net.IPAddr); ok && cm != nil { + cm.Src = src.IP + } + return +} + +func (c *packetHandler) writeTo(h *Header, p []byte, cm *ControlMessage) error { + m := socket.Message{ + OOB: cm.Marshal(), + } + wh, err := h.Marshal() + if err != nil { + return err + } + m.Buffers = [][]byte{wh, p} + dst := new(net.IPAddr) + if cm != nil { + if ip := cm.Dst.To4(); ip != nil { + dst.IP = ip + } + } + if dst.IP == nil { + dst.IP = h.Dst + } + m.Addr = dst + if err := c.SendMsg(&m, 0); err != nil { + return &net.OpError{Op: "write", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Addr: opAddr(dst), Err: err} + } + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/payload.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/payload.go index d7698cbd..f95f811a 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/payload.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/payload.go @@ -1,15 +1,23 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ipv4 -import "net" +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ControlMessage for ReadFrom and WriteTo +// methods of PacketConn is not implemented. // A payloadHandler represents the IPv4 datagram payload handler. type payloadHandler struct { net.PacketConn + *socket.Conn rawOpt } -func (c *payloadHandler) ok() bool { return c != nil && c.PacketConn != nil } +func (c *payloadHandler) ok() bool { return c != nil && c.PacketConn != nil && c.Conn != nil } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/payload_cmsg.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/payload_cmsg.go index d358fc3a..3f06d760 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/payload_cmsg.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/payload_cmsg.go @@ -1,8 +1,8 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !plan9,!solaris,!windows +// +build !nacl,!plan9,!windows package ipv4 @@ -12,70 +12,25 @@ import ( ) // ReadFrom reads a payload of the received IPv4 datagram, from the -// endpoint c, copying the payload into b. It returns the number of +// endpoint c, copying the payload into b. It returns the number of // bytes copied into b, the control message cm and the source address // src of the received datagram. func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { if !c.ok() { return 0, nil, nil, syscall.EINVAL } - oob := newControlMessage(&c.rawOpt) - var oobn int - switch c := c.PacketConn.(type) { - case *net.UDPConn: - if n, oobn, _, src, err = c.ReadMsgUDP(b, oob); err != nil { - return 0, nil, nil, err - } - case *net.IPConn: - if sockOpts[ssoStripHeader].name > 0 { - if n, oobn, _, src, err = c.ReadMsgIP(b, oob); err != nil { - return 0, nil, nil, err - } - } else { - nb := make([]byte, maxHeaderLen+len(b)) - if n, oobn, _, src, err = c.ReadMsgIP(nb, oob); err != nil { - return 0, nil, nil, err - } - hdrlen := int(nb[0]&0x0f) << 2 - copy(b, nb[hdrlen:]) - n -= hdrlen - } - default: - return 0, nil, nil, errInvalidConnType - } - if cm, err = parseControlMessage(oob[:oobn]); err != nil { - return 0, nil, nil, err - } - if cm != nil { - cm.Src = netAddrToIP4(src) - } - return + return c.readFrom(b) } // WriteTo writes a payload of the IPv4 datagram, to the destination -// address dst through the endpoint c, copying the payload from b. It -// returns the number of bytes written. The control message cm allows +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows // the datagram path and the outgoing interface to be specified. -// Currently only Darwin and Linux support this. The cm may be nil if +// Currently only Darwin and Linux support this. The cm may be nil if // control of the outgoing datagram is not required. func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { if !c.ok() { return 0, syscall.EINVAL } - oob := marshalControlMessage(cm) - if dst == nil { - return 0, errMissingAddress - } - switch c := c.PacketConn.(type) { - case *net.UDPConn: - n, _, err = c.WriteMsgUDP(b, oob, dst.(*net.UDPAddr)) - case *net.IPConn: - n, _, err = c.WriteMsgIP(b, oob, dst.(*net.IPAddr)) - default: - return 0, errInvalidConnType - } - if err != nil { - return 0, err - } - return + return c.writeTo(b, cm, dst) } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go new file mode 100644 index 00000000..d26ccd90 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go @@ -0,0 +1,59 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 +// +build !nacl,!plan9,!windows + +package ipv4 + +import "net" + +func (c *payloadHandler) readFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + c.rawOpt.RLock() + oob := NewControlMessage(c.rawOpt.cflags) + c.rawOpt.RUnlock() + var nn int + switch c := c.PacketConn.(type) { + case *net.UDPConn: + if n, nn, _, src, err = c.ReadMsgUDP(b, oob); err != nil { + return 0, nil, nil, err + } + case *net.IPConn: + nb := make([]byte, maxHeaderLen+len(b)) + if n, nn, _, src, err = c.ReadMsgIP(nb, oob); err != nil { + return 0, nil, nil, err + } + hdrlen := int(nb[0]&0x0f) << 2 + copy(b, nb[hdrlen:]) + n -= hdrlen + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Err: errInvalidConnType} + } + if nn > 0 { + cm = new(ControlMessage) + if err = cm.Parse(oob[:nn]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + } + if cm != nil { + cm.Src = netAddrToIP4(src) + } + return +} + +func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + oob := cm.Marshal() + if dst == nil { + return 0, &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errMissingAddress} + } + switch c := c.PacketConn.(type) { + case *net.UDPConn: + n, _, err = c.WriteMsgUDP(b, oob, dst.(*net.UDPAddr)) + case *net.IPConn: + n, _, err = c.WriteMsgIP(b, oob, dst.(*net.IPAddr)) + default: + return 0, &net.OpError{Op: "write", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Addr: opAddr(dst), Err: errInvalidConnType} + } + return +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go new file mode 100644 index 00000000..2f193118 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go @@ -0,0 +1,67 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build !nacl,!plan9,!windows + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (c *payloadHandler) readFrom(b []byte) (int, *ControlMessage, net.Addr, error) { + c.rawOpt.RLock() + m := socket.Message{ + OOB: NewControlMessage(c.rawOpt.cflags), + } + c.rawOpt.RUnlock() + switch c.PacketConn.(type) { + case *net.UDPConn: + m.Buffers = [][]byte{b} + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + case *net.IPConn: + h := make([]byte, HeaderLen) + m.Buffers = [][]byte{h, b} + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + hdrlen := int(h[0]&0x0f) << 2 + if hdrlen > len(h) { + d := hdrlen - len(h) + copy(b, b[d:]) + m.N -= d + } else { + m.N -= hdrlen + } + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errInvalidConnType} + } + var cm *ControlMessage + if m.NN > 0 { + cm = new(ControlMessage) + if err := cm.Parse(m.OOB[:m.NN]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + cm.Src = netAddrToIP4(m.Addr) + } + return m.N, cm, m.Addr, nil +} + +func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (int, error) { + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: cm.Marshal(), + Addr: dst, + } + err := c.SendMsg(&m, 0) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Addr: opAddr(dst), Err: err} + } + return m.N, err +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/payload_nocmsg.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/payload_nocmsg.go index d128c9c2..3926de70 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/payload_nocmsg.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/payload_nocmsg.go @@ -1,8 +1,8 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build plan9 solaris windows +// +build nacl plan9 windows package ipv4 @@ -12,7 +12,7 @@ import ( ) // ReadFrom reads a payload of the received IPv4 datagram, from the -// endpoint c, copying the payload into b. It returns the number of +// endpoint c, copying the payload into b. It returns the number of // bytes copied into b, the control message cm and the source address // src of the received datagram. func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { @@ -26,10 +26,10 @@ func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net. } // WriteTo writes a payload of the IPv4 datagram, to the destination -// address dst through the endpoint c, copying the payload from b. It -// returns the number of bytes written. The control message cm allows +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows // the datagram path and the outgoing interface to be specified. -// Currently only Darwin and Linux support this. The cm may be nil if +// Currently only Darwin and Linux support this. The cm may be nil if // control of the outgoing datagram is not required. func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { if !c.ok() { diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt.go index ace37d30..22e90c03 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt.go @@ -4,6 +4,8 @@ package ipv4 +import "golang.org/x/net/internal/socket" + // Sticky socket options const ( ssoTOS = iota // header field for unicast packet @@ -24,16 +26,12 @@ const ( ssoLeaveSourceGroup // source-specific multicast ssoBlockSourceGroup // any-source or source-specific multicast ssoUnblockSourceGroup // any-source or source-specific multicast - ssoMax + ssoAttachFilter // attach BPF for filtering inbound traffic ) // Sticky socket option value types const ( - ssoTypeByte = iota + 1 - ssoTypeInt - ssoTypeInterface - ssoTypeICMPFilter - ssoTypeIPMreq + ssoTypeIPMreq = iota + 1 ssoTypeIPMreqn ssoTypeGroupReq ssoTypeGroupSourceReq @@ -41,6 +39,6 @@ const ( // A sockOpt represents a binding for sticky socket option. type sockOpt struct { - name int // option name, must be equal or greater than 1 - typ int // option value type, must be equal or greater than 1 + socket.Option + typ int // hint for option value type; optional } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_asmreq_stub.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_asmreq_stub.go deleted file mode 100644 index 45551528..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_asmreq_stub.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!windows - -package ipv4 - -import "net" - -func setsockoptIPMreq(fd, name int, ifi *net.Interface, grp net.IP) error { - return errOpNoSupport -} - -func getsockoptInterface(fd, name int) (*net.Interface, error) { - return nil, errOpNoSupport -} - -func setsockoptInterface(fd, name int, ifi *net.Interface) error { - return errOpNoSupport -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_asmreq_unix.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_asmreq_unix.go deleted file mode 100644 index 7b5c3290..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_asmreq_unix.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd netbsd openbsd - -package ipv4 - -import ( - "net" - "os" - "unsafe" - - "golang.org/x/net/internal/iana" -) - -func setsockoptIPMreq(fd, name int, ifi *net.Interface, grp net.IP) error { - mreq := sysIPMreq{Multiaddr: [4]byte{grp[0], grp[1], grp[2], grp[3]}} - if err := setIPMreqInterface(&mreq, ifi); err != nil { - return err - } - return os.NewSyscallError("setsockopt", setsockopt(fd, iana.ProtocolIP, name, unsafe.Pointer(&mreq), sysSizeofIPMreq)) -} - -func getsockoptInterface(fd, name int) (*net.Interface, error) { - var b [4]byte - l := uint32(4) - if err := getsockopt(fd, iana.ProtocolIP, name, unsafe.Pointer(&b[0]), &l); err != nil { - return nil, os.NewSyscallError("getsockopt", err) - } - ifi, err := netIP4ToInterface(net.IPv4(b[0], b[1], b[2], b[3])) - if err != nil { - return nil, err - } - return ifi, nil -} - -func setsockoptInterface(fd, name int, ifi *net.Interface) error { - ip, err := netInterfaceToIP4(ifi) - if err != nil { - return err - } - var b [4]byte - copy(b[:], ip) - return os.NewSyscallError("setsockopt", setsockopt(fd, iana.ProtocolIP, name, unsafe.Pointer(&b[0]), uint32(4))) -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_asmreq_windows.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_asmreq_windows.go deleted file mode 100644 index 431930df..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_asmreq_windows.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv4 - -import ( - "net" - "os" - "syscall" - "unsafe" - - "golang.org/x/net/internal/iana" -) - -func setsockoptIPMreq(fd syscall.Handle, name int, ifi *net.Interface, grp net.IP) error { - mreq := sysIPMreq{Multiaddr: [4]byte{grp[0], grp[1], grp[2], grp[3]}} - if err := setIPMreqInterface(&mreq, ifi); err != nil { - return err - } - return os.NewSyscallError("setsockopt", syscall.Setsockopt(fd, iana.ProtocolIP, int32(name), (*byte)(unsafe.Pointer(&mreq)), int32(sysSizeofIPMreq))) -} - -func getsockoptInterface(fd syscall.Handle, name int) (*net.Interface, error) { - var b [4]byte - l := int32(4) - if err := syscall.Getsockopt(fd, iana.ProtocolIP, int32(name), (*byte)(unsafe.Pointer(&b[0])), &l); err != nil { - return nil, os.NewSyscallError("getsockopt", err) - } - ifi, err := netIP4ToInterface(net.IPv4(b[0], b[1], b[2], b[3])) - if err != nil { - return nil, err - } - return ifi, nil -} - -func setsockoptInterface(fd syscall.Handle, name int, ifi *net.Interface) error { - ip, err := netInterfaceToIP4(ifi) - if err != nil { - return err - } - var b [4]byte - copy(b[:], ip) - return os.NewSyscallError("setsockopt", syscall.Setsockopt(fd, iana.ProtocolIP, int32(name), (*byte)(unsafe.Pointer(&b[0])), 4)) -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_stub.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_stub.go deleted file mode 100644 index 332f403e..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_stub.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !darwin,!freebsd,!linux,!windows - -package ipv4 - -import "net" - -func getsockoptIPMreqn(fd, name int) (*net.Interface, error) { - return nil, errOpNoSupport -} - -func setsockoptIPMreqn(fd, name int, ifi *net.Interface, grp net.IP) error { - return errOpNoSupport -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_posix.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_posix.go new file mode 100644 index 00000000..e96955bc --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_posix.go @@ -0,0 +1,71 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + switch so.typ { + case ssoTypeIPMreqn: + return so.getIPMreqn(c) + default: + return so.getMulticastIf(c) + } +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + switch so.typ { + case ssoTypeIPMreqn: + return so.setIPMreqn(c, ifi, nil) + default: + return so.setMulticastIf(c, ifi) + } +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + b := make([]byte, so.Len) + n, err := so.Get(c, b) + if err != nil { + return nil, err + } + if n != sizeofICMPFilter { + return nil, errOpNoSupport + } + return (*ICMPFilter)(unsafe.Pointer(&b[0])), nil +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + b := (*[sizeofICMPFilter]byte)(unsafe.Pointer(f))[:sizeofICMPFilter] + return so.Set(c, b) +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + switch so.typ { + case ssoTypeIPMreq: + return so.setIPMreq(c, ifi, grp) + case ssoTypeIPMreqn: + return so.setIPMreqn(c, ifi, grp) + case ssoTypeGroupReq: + return so.setGroupReq(c, ifi, grp) + default: + return errOpNoSupport + } +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return so.setGroupSourceReq(c, ifi, grp, src) +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return so.setAttachFilter(c, f) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_stub.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_stub.go deleted file mode 100644 index 85465244..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_stub.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !darwin,!freebsd,!linux - -package ipv4 - -import "net" - -func setsockoptGroupReq(fd, name int, ifi *net.Interface, grp net.IP) error { - return errOpNoSupport -} - -func setsockoptGroupSourceReq(fd, name int, ifi *net.Interface, grp, src net.IP) error { - return errOpNoSupport -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_unix.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_unix.go deleted file mode 100644 index 0a672b6a..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_unix.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin freebsd linux - -package ipv4 - -import ( - "net" - "os" - "unsafe" - - "golang.org/x/net/internal/iana" -) - -var freebsd32o64 bool - -func setsockoptGroupReq(fd, name int, ifi *net.Interface, grp net.IP) error { - var gr sysGroupReq - if ifi != nil { - gr.Interface = uint32(ifi.Index) - } - gr.setGroup(grp) - var p unsafe.Pointer - var l uint32 - if freebsd32o64 { - var d [sysSizeofGroupReq + 4]byte - s := (*[sysSizeofGroupReq]byte)(unsafe.Pointer(&gr)) - copy(d[:4], s[:4]) - copy(d[8:], s[4:]) - p = unsafe.Pointer(&d[0]) - l = sysSizeofGroupReq + 4 - } else { - p = unsafe.Pointer(&gr) - l = sysSizeofGroupReq - } - return os.NewSyscallError("setsockopt", setsockopt(fd, iana.ProtocolIP, name, p, l)) -} - -func setsockoptGroupSourceReq(fd, name int, ifi *net.Interface, grp, src net.IP) error { - var gsr sysGroupSourceReq - if ifi != nil { - gsr.Interface = uint32(ifi.Index) - } - gsr.setSourceGroup(grp, src) - var p unsafe.Pointer - var l uint32 - if freebsd32o64 { - var d [sysSizeofGroupSourceReq + 4]byte - s := (*[sysSizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr)) - copy(d[:4], s[:4]) - copy(d[8:], s[4:]) - p = unsafe.Pointer(&d[0]) - l = sysSizeofGroupSourceReq + 4 - } else { - p = unsafe.Pointer(&gsr) - l = sysSizeofGroupSourceReq - } - return os.NewSyscallError("setsockopt", setsockopt(fd, iana.ProtocolIP, name, p, l)) -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_stub.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_stub.go index 9d19f5df..23249b78 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_stub.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_stub.go @@ -1,11 +1,42 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build nacl plan9 solaris +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package ipv4 -func setInt(fd int, opt *sockOpt, v int) error { +import ( + "net" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + return errOpNoSupport +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + return errOpNoSupport +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { return errOpNoSupport } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_unix.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_unix.go deleted file mode 100644 index f7acc6b9..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_unix.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux netbsd openbsd - -package ipv4 - -import ( - "net" - "os" - "unsafe" - - "golang.org/x/net/internal/iana" -) - -func getInt(fd int, opt *sockOpt) (int, error) { - if opt.name < 1 || (opt.typ != ssoTypeByte && opt.typ != ssoTypeInt) { - return 0, errOpNoSupport - } - var i int32 - var b byte - p := unsafe.Pointer(&i) - l := uint32(4) - if opt.typ == ssoTypeByte { - p = unsafe.Pointer(&b) - l = 1 - } - if err := getsockopt(fd, iana.ProtocolIP, opt.name, p, &l); err != nil { - return 0, os.NewSyscallError("getsockopt", err) - } - if opt.typ == ssoTypeByte { - return int(b), nil - } - return int(i), nil -} - -func setInt(fd int, opt *sockOpt, v int) error { - if opt.name < 1 || (opt.typ != ssoTypeByte && opt.typ != ssoTypeInt) { - return errOpNoSupport - } - i := int32(v) - var b byte - p := unsafe.Pointer(&i) - l := uint32(4) - if opt.typ == ssoTypeByte { - b = byte(v) - p = unsafe.Pointer(&b) - l = 1 - } - return os.NewSyscallError("setsockopt", setsockopt(fd, iana.ProtocolIP, opt.name, p, l)) -} - -func getInterface(fd int, opt *sockOpt) (*net.Interface, error) { - if opt.name < 1 { - return nil, errOpNoSupport - } - switch opt.typ { - case ssoTypeInterface: - return getsockoptInterface(fd, opt.name) - case ssoTypeIPMreqn: - return getsockoptIPMreqn(fd, opt.name) - default: - return nil, errOpNoSupport - } -} - -func setInterface(fd int, opt *sockOpt, ifi *net.Interface) error { - if opt.name < 1 { - return errOpNoSupport - } - switch opt.typ { - case ssoTypeInterface: - return setsockoptInterface(fd, opt.name, ifi) - case ssoTypeIPMreqn: - return setsockoptIPMreqn(fd, opt.name, ifi, nil) - default: - return errOpNoSupport - } -} - -func getICMPFilter(fd int, opt *sockOpt) (*ICMPFilter, error) { - if opt.name < 1 || opt.typ != ssoTypeICMPFilter { - return nil, errOpNoSupport - } - var f ICMPFilter - l := uint32(sysSizeofICMPFilter) - if err := getsockopt(fd, iana.ProtocolReserved, opt.name, unsafe.Pointer(&f.sysICMPFilter), &l); err != nil { - return nil, os.NewSyscallError("getsockopt", err) - } - return &f, nil -} - -func setICMPFilter(fd int, opt *sockOpt, f *ICMPFilter) error { - if opt.name < 1 || opt.typ != ssoTypeICMPFilter { - return errOpNoSupport - } - return os.NewSyscallError("setsockopt", setsockopt(fd, iana.ProtocolReserved, opt.name, unsafe.Pointer(&f.sysICMPFilter), sysSizeofICMPFilter)) -} - -func setGroup(fd int, opt *sockOpt, ifi *net.Interface, grp net.IP) error { - if opt.name < 1 { - return errOpNoSupport - } - switch opt.typ { - case ssoTypeIPMreq: - return setsockoptIPMreq(fd, opt.name, ifi, grp) - case ssoTypeIPMreqn: - return setsockoptIPMreqn(fd, opt.name, ifi, grp) - case ssoTypeGroupReq: - return setsockoptGroupReq(fd, opt.name, ifi, grp) - default: - return errOpNoSupport - } -} - -func setSourceGroup(fd int, opt *sockOpt, ifi *net.Interface, grp, src net.IP) error { - if opt.name < 1 || opt.typ != ssoTypeGroupSourceReq { - return errOpNoSupport - } - return setsockoptGroupSourceReq(fd, opt.name, ifi, grp, src) -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_windows.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_windows.go deleted file mode 100644 index c4c2441e..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_windows.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv4 - -import ( - "net" - "os" - "syscall" - "unsafe" - - "golang.org/x/net/internal/iana" -) - -func getInt(fd syscall.Handle, opt *sockOpt) (int, error) { - if opt.name < 1 || opt.typ != ssoTypeInt { - return 0, errOpNoSupport - } - var i int32 - l := int32(4) - if err := syscall.Getsockopt(fd, iana.ProtocolIP, int32(opt.name), (*byte)(unsafe.Pointer(&i)), &l); err != nil { - return 0, os.NewSyscallError("getsockopt", err) - } - return int(i), nil -} - -func setInt(fd syscall.Handle, opt *sockOpt, v int) error { - if opt.name < 1 || opt.typ != ssoTypeInt { - return errOpNoSupport - } - i := int32(v) - return os.NewSyscallError("setsockopt", syscall.Setsockopt(fd, iana.ProtocolIP, int32(opt.name), (*byte)(unsafe.Pointer(&i)), 4)) -} - -func getInterface(fd syscall.Handle, opt *sockOpt) (*net.Interface, error) { - if opt.name < 1 || opt.typ != ssoTypeInterface { - return nil, errOpNoSupport - } - return getsockoptInterface(fd, opt.name) -} - -func setInterface(fd syscall.Handle, opt *sockOpt, ifi *net.Interface) error { - if opt.name < 1 || opt.typ != ssoTypeInterface { - return errOpNoSupport - } - return setsockoptInterface(fd, opt.name, ifi) -} - -func getICMPFilter(fd syscall.Handle, opt *sockOpt) (*ICMPFilter, error) { - return nil, errOpNoSupport -} - -func setICMPFilter(fd syscall.Handle, opt *sockOpt, f *ICMPFilter) error { - return errOpNoSupport -} - -func setGroup(fd syscall.Handle, opt *sockOpt, ifi *net.Interface, grp net.IP) error { - if opt.name < 1 || opt.typ != ssoTypeIPMreq { - return errOpNoSupport - } - return setsockoptIPMreq(fd, opt.name, ifi, grp) -} - -func setSourceGroup(fd syscall.Handle, opt *sockOpt, ifi *net.Interface, grp, src net.IP) error { - // TODO(mikio): implement this - return errOpNoSupport -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_asmreq.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_asmreq.go similarity index 57% rename from vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_asmreq.go rename to vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_asmreq.go index 4a6aa78e..0388cba0 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_asmreq.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_asmreq.go @@ -1,14 +1,50 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd netbsd openbsd windows +// +build darwin dragonfly freebsd netbsd openbsd solaris windows package ipv4 -import "net" +import ( + "net" + "unsafe" -func setIPMreqInterface(mreq *sysIPMreq, ifi *net.Interface) error { + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + mreq := ipMreq{Multiaddr: [4]byte{grp[0], grp[1], grp[2], grp[3]}} + if err := setIPMreqInterface(&mreq, ifi); err != nil { + return err + } + b := (*[sizeofIPMreq]byte)(unsafe.Pointer(&mreq))[:sizeofIPMreq] + return so.Set(c, b) +} + +func (so *sockOpt) getMulticastIf(c *socket.Conn) (*net.Interface, error) { + var b [4]byte + if _, err := so.Get(c, b[:]); err != nil { + return nil, err + } + ifi, err := netIP4ToInterface(net.IPv4(b[0], b[1], b[2], b[3])) + if err != nil { + return nil, err + } + return ifi, nil +} + +func (so *sockOpt) setMulticastIf(c *socket.Conn, ifi *net.Interface) error { + ip, err := netInterfaceToIP4(ifi) + if err != nil { + return err + } + var b [4]byte + copy(b[:], ip) + return so.Set(c, b[:]) +} + +func setIPMreqInterface(mreq *ipMreq, ifi *net.Interface) error { if ifi == nil { return nil } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go new file mode 100644 index 00000000..f3919208 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go @@ -0,0 +1,25 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!solaris,!windows + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) getMulticastIf(c *socket.Conn) (*net.Interface, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setMulticastIf(c *socket.Conn, ifi *net.Interface) error { + return errOpNoSupport +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_unix.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_asmreqn.go similarity index 50% rename from vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_unix.go rename to vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_asmreqn.go index 1f2b9a14..1f24f69f 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_unix.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_asmreqn.go @@ -8,18 +8,17 @@ package ipv4 import ( "net" - "os" "unsafe" - "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" ) -func getsockoptIPMreqn(fd, name int) (*net.Interface, error) { - var mreqn sysIPMreqn - l := uint32(sysSizeofIPMreqn) - if err := getsockopt(fd, iana.ProtocolIP, name, unsafe.Pointer(&mreqn), &l); err != nil { - return nil, os.NewSyscallError("getsockopt", err) +func (so *sockOpt) getIPMreqn(c *socket.Conn) (*net.Interface, error) { + b := make([]byte, so.Len) + if _, err := so.Get(c, b); err != nil { + return nil, err } + mreqn := (*ipMreqn)(unsafe.Pointer(&b[0])) if mreqn.Ifindex == 0 { return nil, nil } @@ -30,13 +29,14 @@ func getsockoptIPMreqn(fd, name int) (*net.Interface, error) { return ifi, nil } -func setsockoptIPMreqn(fd, name int, ifi *net.Interface, grp net.IP) error { - var mreqn sysIPMreqn +func (so *sockOpt) setIPMreqn(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var mreqn ipMreqn if ifi != nil { mreqn.Ifindex = int32(ifi.Index) } if grp != nil { mreqn.Multiaddr = [4]byte{grp[0], grp[1], grp[2], grp[3]} } - return os.NewSyscallError("setsockopt", setsockopt(fd, iana.ProtocolIP, name, unsafe.Pointer(&mreqn), sysSizeofIPMreqn)) + b := (*[sizeofIPMreqn]byte)(unsafe.Pointer(&mreqn))[:sizeofIPMreqn] + return so.Set(c, b) } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_stub.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go similarity index 51% rename from vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_stub.go rename to vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go index 7732e49f..0711d3d7 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_stub.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go @@ -4,14 +4,18 @@ // +build !darwin,!freebsd,!linux -package ipv6 +package ipv4 -import "net" +import ( + "net" -func setsockoptGroupReq(fd int, opt *sockOpt, ifi *net.Interface, grp net.IP) error { - return errOpNoSupport + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getIPMreqn(c *socket.Conn) (*net.Interface, error) { + return nil, errOpNoSupport } -func setsockoptGroupSourceReq(fd int, opt *sockOpt, ifi *net.Interface, grp, src net.IP) error { +func (so *sockOpt) setIPMreqn(c *socket.Conn, ifi *net.Interface, grp net.IP) error { return errOpNoSupport } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_bpf.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_bpf.go new file mode 100644 index 00000000..9f30b730 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_bpf.go @@ -0,0 +1,23 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package ipv4 + +import ( + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + prog := sockFProg{ + Len: uint16(len(f)), + Filter: (*sockFilter)(unsafe.Pointer(&f[0])), + } + b := (*[sizeofSockFprog]byte)(unsafe.Pointer(&prog))[:sizeofSockFprog] + return so.Set(c, b) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go new file mode 100644 index 00000000..9a213209 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux + +package ipv4 + +import ( + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + return errOpNoSupport +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_bsd.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_bsd.go index 203033db..58256dd9 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_bsd.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_bsd.go @@ -2,13 +2,16 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build dragonfly netbsd +// +build netbsd openbsd package ipv4 import ( "net" "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" ) var ( @@ -18,17 +21,17 @@ var ( ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, } - sockOpts = [ssoMax]sockOpt{ - ssoTOS: {sysIP_TOS, ssoTypeInt}, - ssoTTL: {sysIP_TTL, ssoTypeInt}, - ssoMulticastTTL: {sysIP_MULTICAST_TTL, ssoTypeByte}, - ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeInterface}, - ssoMulticastLoopback: {sysIP_MULTICAST_LOOP, ssoTypeInt}, - ssoReceiveTTL: {sysIP_RECVTTL, ssoTypeInt}, - ssoReceiveDst: {sysIP_RECVDSTADDR, ssoTypeInt}, - ssoReceiveInterface: {sysIP_RECVIF, ssoTypeInt}, - ssoHeaderPrepend: {sysIP_HDRINCL, ssoTypeInt}, - ssoJoinGroup: {sysIP_ADD_MEMBERSHIP, ssoTypeIPMreq}, - ssoLeaveGroup: {sysIP_DROP_MEMBERSHIP, ssoTypeIPMreq}, + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 1}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, } ) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_darwin.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_darwin.go index b5f5bd51..e8fb1916 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_darwin.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_darwin.go @@ -6,8 +6,13 @@ package ipv4 import ( "net" + "strconv" + "strings" "syscall" "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" ) var ( @@ -17,80 +22,72 @@ var ( ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, } - sockOpts = [ssoMax]sockOpt{ - ssoTOS: {sysIP_TOS, ssoTypeInt}, - ssoTTL: {sysIP_TTL, ssoTypeInt}, - ssoMulticastTTL: {sysIP_MULTICAST_TTL, ssoTypeByte}, - ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeInterface}, - ssoMulticastLoopback: {sysIP_MULTICAST_LOOP, ssoTypeInt}, - ssoReceiveTTL: {sysIP_RECVTTL, ssoTypeInt}, - ssoReceiveDst: {sysIP_RECVDSTADDR, ssoTypeInt}, - ssoReceiveInterface: {sysIP_RECVIF, ssoTypeInt}, - ssoHeaderPrepend: {sysIP_HDRINCL, ssoTypeInt}, - ssoStripHeader: {sysIP_STRIPHDR, ssoTypeInt}, - ssoJoinGroup: {sysIP_ADD_MEMBERSHIP, ssoTypeIPMreq}, - ssoLeaveGroup: {sysIP_DROP_MEMBERSHIP, ssoTypeIPMreq}, + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoStripHeader: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_STRIPHDR, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, } ) func init() { // Seems like kern.osreldate is veiled on latest OS X. We use // kern.osrelease instead. - osver, err := syscall.Sysctl("kern.osrelease") + s, err := syscall.Sysctl("kern.osrelease") if err != nil { return } - var i int - for i = range osver { - if osver[i] == '.' { - break - } + ss := strings.Split(s, ".") + if len(ss) == 0 { + return } // The IP_PKTINFO and protocol-independent multicast API were - // introduced in OS X 10.7 (Darwin 11.0.0). But it looks like - // those features require OS X 10.8 (Darwin 12.0.0) and above. + // introduced in OS X 10.7 (Darwin 11). But it looks like + // those features require OS X 10.8 (Darwin 12) or above. // See http://support.apple.com/kb/HT1633. - if i > 2 || i == 2 && osver[0] >= '1' && osver[1] >= '2' { - ctlOpts[ctlPacketInfo].name = sysIP_PKTINFO - ctlOpts[ctlPacketInfo].length = sysSizeofInetPktinfo - ctlOpts[ctlPacketInfo].marshal = marshalPacketInfo - ctlOpts[ctlPacketInfo].parse = parsePacketInfo - sockOpts[ssoPacketInfo].name = sysIP_RECVPKTINFO - sockOpts[ssoPacketInfo].typ = ssoTypeInt - sockOpts[ssoMulticastInterface].typ = ssoTypeIPMreqn - sockOpts[ssoJoinGroup].name = sysMCAST_JOIN_GROUP - sockOpts[ssoJoinGroup].typ = ssoTypeGroupReq - sockOpts[ssoLeaveGroup].name = sysMCAST_LEAVE_GROUP - sockOpts[ssoLeaveGroup].typ = ssoTypeGroupReq - sockOpts[ssoJoinSourceGroup].name = sysMCAST_JOIN_SOURCE_GROUP - sockOpts[ssoJoinSourceGroup].typ = ssoTypeGroupSourceReq - sockOpts[ssoLeaveSourceGroup].name = sysMCAST_LEAVE_SOURCE_GROUP - sockOpts[ssoLeaveSourceGroup].typ = ssoTypeGroupSourceReq - sockOpts[ssoBlockSourceGroup].name = sysMCAST_BLOCK_SOURCE - sockOpts[ssoBlockSourceGroup].typ = ssoTypeGroupSourceReq - sockOpts[ssoUnblockSourceGroup].name = sysMCAST_UNBLOCK_SOURCE - sockOpts[ssoUnblockSourceGroup].typ = ssoTypeGroupSourceReq + if mjver, err := strconv.Atoi(ss[0]); err != nil || mjver < 12 { + return } + ctlOpts[ctlPacketInfo].name = sysIP_PKTINFO + ctlOpts[ctlPacketInfo].length = sizeofInetPktinfo + ctlOpts[ctlPacketInfo].marshal = marshalPacketInfo + ctlOpts[ctlPacketInfo].parse = parsePacketInfo + sockOpts[ssoPacketInfo] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVPKTINFO, Len: 4}} + sockOpts[ssoMulticastInterface] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: sizeofIPMreqn}, typ: ssoTypeIPMreqn} + sockOpts[ssoJoinGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq} + sockOpts[ssoLeaveGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq} + sockOpts[ssoJoinSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoLeaveSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoBlockSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoUnblockSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} } -func (pi *sysInetPktinfo) setIfindex(i int) { +func (pi *inetPktinfo) setIfindex(i int) { pi.Ifindex = uint32(i) } -func (gr *sysGroupReq) setGroup(grp net.IP) { - sa := (*sysSockaddrInet)(unsafe.Pointer(&gr.Pad_cgo_0[0])) - sa.Len = sysSizeofSockaddrInet +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Len = sizeofSockaddrInet sa.Family = syscall.AF_INET copy(sa.Addr[:], grp) } -func (gsr *sysGroupSourceReq) setSourceGroup(grp, src net.IP) { - sa := (*sysSockaddrInet)(unsafe.Pointer(&gsr.Pad_cgo_0[0])) - sa.Len = sysSizeofSockaddrInet +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Len = sizeofSockaddrInet sa.Family = syscall.AF_INET copy(sa.Addr[:], grp) - sa = (*sysSockaddrInet)(unsafe.Pointer(&gsr.Pad_cgo_1[0])) - sa.Len = sysSizeofSockaddrInet + sa = (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 132)) + sa.Len = sizeofSockaddrInet sa.Family = syscall.AF_INET copy(sa.Addr[:], src) } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_dragonfly.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_dragonfly.go new file mode 100644 index 00000000..859764f3 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_dragonfly.go @@ -0,0 +1,35 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + } +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_freebsd.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_freebsd.go index 163ff9a7..b8003245 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_freebsd.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_freebsd.go @@ -10,6 +10,9 @@ import ( "strings" "syscall" "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" ) var ( @@ -19,29 +22,29 @@ var ( ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, } - sockOpts = [ssoMax]sockOpt{ - ssoTOS: {sysIP_TOS, ssoTypeInt}, - ssoTTL: {sysIP_TTL, ssoTypeInt}, - ssoMulticastTTL: {sysIP_MULTICAST_TTL, ssoTypeByte}, - ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeInterface}, - ssoMulticastLoopback: {sysIP_MULTICAST_LOOP, ssoTypeInt}, - ssoReceiveTTL: {sysIP_RECVTTL, ssoTypeInt}, - ssoReceiveDst: {sysIP_RECVDSTADDR, ssoTypeInt}, - ssoReceiveInterface: {sysIP_RECVIF, ssoTypeInt}, - ssoHeaderPrepend: {sysIP_HDRINCL, ssoTypeInt}, - ssoJoinGroup: {sysMCAST_JOIN_GROUP, ssoTypeGroupReq}, - ssoLeaveGroup: {sysMCAST_LEAVE_GROUP, ssoTypeGroupReq}, - ssoJoinSourceGroup: {sysMCAST_JOIN_SOURCE_GROUP, ssoTypeGroupSourceReq}, - ssoLeaveSourceGroup: {sysMCAST_LEAVE_SOURCE_GROUP, ssoTypeGroupSourceReq}, - ssoBlockSourceGroup: {sysMCAST_BLOCK_SOURCE, ssoTypeGroupSourceReq}, - ssoUnblockSourceGroup: {sysMCAST_UNBLOCK_SOURCE, ssoTypeGroupSourceReq}, + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, } ) func init() { freebsdVersion, _ = syscall.SysctlUint32("kern.osreldate") if freebsdVersion >= 1000000 { - sockOpts[ssoMulticastInterface].typ = ssoTypeIPMreqn + sockOpts[ssoMulticastInterface] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: sizeofIPMreqn}, typ: ssoTypeIPMreqn} } if runtime.GOOS == "freebsd" && runtime.GOARCH == "386" { archs, _ := syscall.Sysctl("kern.supported_archs") @@ -54,20 +57,20 @@ func init() { } } -func (gr *sysGroupReq) setGroup(grp net.IP) { - sa := (*sysSockaddrInet)(unsafe.Pointer(&gr.Group)) - sa.Len = sysSizeofSockaddrInet +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gr.Group)) + sa.Len = sizeofSockaddrInet sa.Family = syscall.AF_INET copy(sa.Addr[:], grp) } -func (gsr *sysGroupSourceReq) setSourceGroup(grp, src net.IP) { - sa := (*sysSockaddrInet)(unsafe.Pointer(&gsr.Group)) - sa.Len = sysSizeofSockaddrInet +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gsr.Group)) + sa.Len = sizeofSockaddrInet sa.Family = syscall.AF_INET copy(sa.Addr[:], grp) - sa = (*sysSockaddrInet)(unsafe.Pointer(&gsr.Source)) - sa.Len = sysSizeofSockaddrInet + sa = (*sockaddrInet)(unsafe.Pointer(&gsr.Source)) + sa.Len = sizeofSockaddrInet sa.Family = syscall.AF_INET copy(sa.Addr[:], src) } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_linux.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_linux.go index 73e0d462..60defe13 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_linux.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_linux.go @@ -8,48 +8,52 @@ import ( "net" "syscall" "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" ) var ( ctlOpts = [ctlMax]ctlOpt{ ctlTTL: {sysIP_TTL, 1, marshalTTL, parseTTL}, - ctlPacketInfo: {sysIP_PKTINFO, sysSizeofInetPktinfo, marshalPacketInfo, parsePacketInfo}, + ctlPacketInfo: {sysIP_PKTINFO, sizeofInetPktinfo, marshalPacketInfo, parsePacketInfo}, } - sockOpts = [ssoMax]sockOpt{ - ssoTOS: {sysIP_TOS, ssoTypeInt}, - ssoTTL: {sysIP_TTL, ssoTypeInt}, - ssoMulticastTTL: {sysIP_MULTICAST_TTL, ssoTypeInt}, - ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeIPMreqn}, - ssoMulticastLoopback: {sysIP_MULTICAST_LOOP, ssoTypeInt}, - ssoReceiveTTL: {sysIP_RECVTTL, ssoTypeInt}, - ssoPacketInfo: {sysIP_PKTINFO, ssoTypeInt}, - ssoHeaderPrepend: {sysIP_HDRINCL, ssoTypeInt}, - ssoICMPFilter: {sysICMP_FILTER, ssoTypeICMPFilter}, - ssoJoinGroup: {sysMCAST_JOIN_GROUP, ssoTypeGroupReq}, - ssoLeaveGroup: {sysMCAST_LEAVE_GROUP, ssoTypeGroupReq}, - ssoJoinSourceGroup: {sysMCAST_JOIN_SOURCE_GROUP, ssoTypeGroupSourceReq}, - ssoLeaveSourceGroup: {sysMCAST_LEAVE_SOURCE_GROUP, ssoTypeGroupSourceReq}, - ssoBlockSourceGroup: {sysMCAST_BLOCK_SOURCE, ssoTypeGroupSourceReq}, - ssoUnblockSourceGroup: {sysMCAST_UNBLOCK_SOURCE, ssoTypeGroupSourceReq}, + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: sizeofIPMreqn}, typ: ssoTypeIPMreqn}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoPacketInfo: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_PKTINFO, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolReserved, Name: sysICMP_FILTER, Len: sizeofICMPFilter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoAttachFilter: {Option: socket.Option{Level: sysSOL_SOCKET, Name: sysSO_ATTACH_FILTER, Len: sizeofSockFprog}}, } ) -func (pi *sysInetPktinfo) setIfindex(i int) { +func (pi *inetPktinfo) setIfindex(i int) { pi.Ifindex = int32(i) } -func (gr *sysGroupReq) setGroup(grp net.IP) { - sa := (*sysSockaddrInet)(unsafe.Pointer(&gr.Group)) +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gr.Group)) sa.Family = syscall.AF_INET copy(sa.Addr[:], grp) } -func (gsr *sysGroupSourceReq) setSourceGroup(grp, src net.IP) { - sa := (*sysSockaddrInet)(unsafe.Pointer(&gsr.Group)) +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gsr.Group)) sa.Family = syscall.AF_INET copy(sa.Addr[:], grp) - sa = (*sysSockaddrInet)(unsafe.Pointer(&gsr.Source)) + sa = (*sockaddrInet)(unsafe.Pointer(&gsr.Source)) sa.Family = syscall.AF_INET copy(sa.Addr[:], src) } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_openbsd.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_openbsd.go deleted file mode 100644 index d78083a2..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_openbsd.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv4 - -import ( - "net" - "syscall" -) - -var ( - ctlOpts = [ctlMax]ctlOpt{ - ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, - ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, - ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, - } - - sockOpts = [ssoMax]sockOpt{ - ssoTOS: {sysIP_TOS, ssoTypeInt}, - ssoTTL: {sysIP_TTL, ssoTypeInt}, - ssoMulticastTTL: {sysIP_MULTICAST_TTL, ssoTypeByte}, - ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeInterface}, - ssoMulticastLoopback: {sysIP_MULTICAST_LOOP, ssoTypeByte}, - ssoReceiveTTL: {sysIP_RECVTTL, ssoTypeInt}, - ssoReceiveDst: {sysIP_RECVDSTADDR, ssoTypeInt}, - ssoReceiveInterface: {sysIP_RECVIF, ssoTypeInt}, - ssoHeaderPrepend: {sysIP_HDRINCL, ssoTypeInt}, - ssoJoinGroup: {sysIP_ADD_MEMBERSHIP, ssoTypeIPMreq}, - ssoLeaveGroup: {sysIP_DROP_MEMBERSHIP, ssoTypeIPMreq}, - } -) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_solaris.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_solaris.go new file mode 100644 index 00000000..832fef1e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_solaris.go @@ -0,0 +1,57 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 4, marshalTTL, parseTTL}, + ctlPacketInfo: {sysIP_PKTINFO, sizeofInetPktinfo, marshalPacketInfo, parsePacketInfo}, + } + + sockOpts = map[int]sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 1}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoPacketInfo: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVPKTINFO, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func (pi *inetPktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 260)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_ssmreq.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_ssmreq.go new file mode 100644 index 00000000..ae5704e7 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_ssmreq.go @@ -0,0 +1,54 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd linux solaris + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +var freebsd32o64 bool + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var gr groupReq + if ifi != nil { + gr.Interface = uint32(ifi.Index) + } + gr.setGroup(grp) + var b []byte + if freebsd32o64 { + var d [sizeofGroupReq + 4]byte + s := (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr))[:sizeofGroupReq] + } + return so.Set(c, b) +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + var gsr groupSourceReq + if ifi != nil { + gsr.Interface = uint32(ifi.Index) + } + gsr.setSourceGroup(grp, src) + var b []byte + if freebsd32o64 { + var d [sizeofGroupSourceReq + 4]byte + s := (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr))[:sizeofGroupSourceReq] + } + return so.Set(c, b) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go new file mode 100644 index 00000000..e6b7623d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go @@ -0,0 +1,21 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!freebsd,!linux,!solaris + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errOpNoSupport +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_stub.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_stub.go index c8e55cbc..4f076473 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_stub.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_stub.go @@ -2,12 +2,12 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build nacl plan9 solaris +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package ipv4 var ( ctlOpts = [ctlMax]ctlOpt{} - sockOpts = [ssoMax]sockOpt{} + sockOpts = map[int]*sockOpt{} ) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_windows.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_windows.go index 466489fe..b0913d53 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_windows.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/sys_windows.go @@ -4,6 +4,11 @@ package ipv4 +import ( + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + const ( // See ws2tcpip.h. sysIP_OPTIONS = 0x1 @@ -20,22 +25,22 @@ const ( sysIP_DROP_SOURCE_MEMBERSHIP = 0x10 sysIP_PKTINFO = 0x13 - sysSizeofInetPktinfo = 0x8 - sysSizeofIPMreq = 0x8 - sysSizeofIPMreqSource = 0xc + sizeofInetPktinfo = 0x8 + sizeofIPMreq = 0x8 + sizeofIPMreqSource = 0xc ) -type sysInetPktinfo struct { +type inetPktinfo struct { Addr [4]byte Ifindex int32 } -type sysIPMreq struct { +type ipMreq struct { Multiaddr [4]byte Interface [4]byte } -type sysIPMreqSource struct { +type ipMreqSource struct { Multiaddr [4]byte Sourceaddr [4]byte Interface [4]byte @@ -45,17 +50,18 @@ type sysIPMreqSource struct { var ( ctlOpts = [ctlMax]ctlOpt{} - sockOpts = [ssoMax]sockOpt{ - ssoTOS: {sysIP_TOS, ssoTypeInt}, - ssoTTL: {sysIP_TTL, ssoTypeInt}, - ssoMulticastTTL: {sysIP_MULTICAST_TTL, ssoTypeInt}, - ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeInterface}, - ssoMulticastLoopback: {sysIP_MULTICAST_LOOP, ssoTypeInt}, - ssoJoinGroup: {sysIP_ADD_MEMBERSHIP, ssoTypeIPMreq}, - ssoLeaveGroup: {sysIP_DROP_MEMBERSHIP, ssoTypeIPMreq}, + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, } ) -func (pi *sysInetPktinfo) setIfindex(i int) { +func (pi *inetPktinfo) setIfindex(i int) { pi.Ifindex = int32(i) } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/syscall_linux_386.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/syscall_linux_386.go deleted file mode 100644 index 07a3a282..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/syscall_linux_386.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv4 - -import ( - "syscall" - "unsafe" -) - -const ( - sysGETSOCKOPT = 0xf - sysSETSOCKOPT = 0xe -) - -func socketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (int, syscall.Errno) - -func getsockopt(fd, level, name int, v unsafe.Pointer, l *uint32) error { - if _, errno := socketcall(sysGETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(unsafe.Pointer(l)), 0); errno != 0 { - return error(errno) - } - return nil -} - -func setsockopt(fd, level, name int, v unsafe.Pointer, l uint32) error { - if _, errno := socketcall(sysSETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(l), 0); errno != 0 { - return error(errno) - } - return nil -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/syscall_unix.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/syscall_unix.go deleted file mode 100644 index 88a41b0c..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/syscall_unix.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux,!386 netbsd openbsd - -package ipv4 - -import ( - "syscall" - "unsafe" -) - -func getsockopt(fd, level, name int, v unsafe.Pointer, l *uint32) error { - if _, _, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(unsafe.Pointer(l)), 0); errno != 0 { - return error(errno) - } - return nil -} - -func setsockopt(fd, level, name int, v unsafe.Pointer, l uint32) error { - if _, _, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(l), 0); errno != 0 { - return error(errno) - } - return nil -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/thunk_linux_386.s b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/thunk_linux_386.s deleted file mode 100644 index daa78bc0..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/thunk_linux_386.s +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.2 - -TEXT ·socketcall(SB),4,$0-36 - JMP syscall·socketcall(SB) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_darwin.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_darwin.go index 087c6390..c07cc883 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_darwin.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_darwin.go @@ -37,18 +37,18 @@ const ( sysMCAST_BLOCK_SOURCE = 0x54 sysMCAST_UNBLOCK_SOURCE = 0x55 - sysSizeofSockaddrStorage = 0x80 - sysSizeofSockaddrInet = 0x10 - sysSizeofInetPktinfo = 0xc + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc - sysSizeofIPMreq = 0x8 - sysSizeofIPMreqn = 0xc - sysSizeofIPMreqSource = 0xc - sysSizeofGroupReq = 0x84 - sysSizeofGroupSourceReq = 0x104 + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 ) -type sysSockaddrStorage struct { +type sockaddrStorage struct { Len uint8 Family uint8 X__ss_pad1 [6]int8 @@ -56,7 +56,7 @@ type sysSockaddrStorage struct { X__ss_pad2 [112]int8 } -type sysSockaddrInet struct { +type sockaddrInet struct { Len uint8 Family uint8 Port uint16 @@ -64,35 +64,35 @@ type sysSockaddrInet struct { Zero [8]int8 } -type sysInetPktinfo struct { +type inetPktinfo struct { Ifindex uint32 Spec_dst [4]byte /* in_addr */ Addr [4]byte /* in_addr */ } -type sysIPMreq struct { +type ipMreq struct { Multiaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } -type sysIPMreqn struct { +type ipMreqn struct { Multiaddr [4]byte /* in_addr */ Address [4]byte /* in_addr */ Ifindex int32 } -type sysIPMreqSource struct { +type ipMreqSource struct { Multiaddr [4]byte /* in_addr */ Sourceaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [128]byte } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [128]byte Pad_cgo_1 [128]byte diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go index f5c9ccec..c4365e9e 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go @@ -1,8 +1,6 @@ // Created by cgo -godefs - DO NOT EDIT // cgo -godefs defs_dragonfly.go -// +build dragonfly - package ipv4 const ( @@ -24,10 +22,10 @@ const ( sysIP_ADD_MEMBERSHIP = 0xc sysIP_DROP_MEMBERSHIP = 0xd - sysSizeofIPMreq = 0x8 + sizeofIPMreq = 0x8 ) -type sysIPMreq struct { +type ipMreq struct { Multiaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go index 6fd67e1e..8c4aec94 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go @@ -38,17 +38,17 @@ const ( sysMCAST_BLOCK_SOURCE = 0x54 sysMCAST_UNBLOCK_SOURCE = 0x55 - sysSizeofSockaddrStorage = 0x80 - sysSizeofSockaddrInet = 0x10 + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 - sysSizeofIPMreq = 0x8 - sysSizeofIPMreqn = 0xc - sysSizeofIPMreqSource = 0xc - sysSizeofGroupReq = 0x84 - sysSizeofGroupSourceReq = 0x104 + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 ) -type sysSockaddrStorage struct { +type sockaddrStorage struct { Len uint8 Family uint8 X__ss_pad1 [6]int8 @@ -56,7 +56,7 @@ type sysSockaddrStorage struct { X__ss_pad2 [112]int8 } -type sysSockaddrInet struct { +type sockaddrInet struct { Len uint8 Family uint8 Port uint16 @@ -64,30 +64,30 @@ type sysSockaddrInet struct { Zero [8]int8 } -type sysIPMreq struct { +type ipMreq struct { Multiaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } -type sysIPMreqn struct { +type ipMreqn struct { Multiaddr [4]byte /* in_addr */ Address [4]byte /* in_addr */ Ifindex int32 } -type sysIPMreqSource struct { +type ipMreqSource struct { Multiaddr [4]byte /* in_addr */ Sourceaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } -type sysGroupReq struct { +type groupReq struct { Interface uint32 - Group sysSockaddrStorage + Group sockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 - Group sysSockaddrStorage - Source sysSockaddrStorage + Group sockaddrStorage + Source sockaddrStorage } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go index ebac6d79..4b10b7c5 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go @@ -38,17 +38,17 @@ const ( sysMCAST_BLOCK_SOURCE = 0x54 sysMCAST_UNBLOCK_SOURCE = 0x55 - sysSizeofSockaddrStorage = 0x80 - sysSizeofSockaddrInet = 0x10 + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 - sysSizeofIPMreq = 0x8 - sysSizeofIPMreqn = 0xc - sysSizeofIPMreqSource = 0xc - sysSizeofGroupReq = 0x88 - sysSizeofGroupSourceReq = 0x108 + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 ) -type sysSockaddrStorage struct { +type sockaddrStorage struct { Len uint8 Family uint8 X__ss_pad1 [6]int8 @@ -56,7 +56,7 @@ type sysSockaddrStorage struct { X__ss_pad2 [112]int8 } -type sysSockaddrInet struct { +type sockaddrInet struct { Len uint8 Family uint8 Port uint16 @@ -64,32 +64,32 @@ type sysSockaddrInet struct { Zero [8]int8 } -type sysIPMreq struct { +type ipMreq struct { Multiaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } -type sysIPMreqn struct { +type ipMreqn struct { Multiaddr [4]byte /* in_addr */ Address [4]byte /* in_addr */ Ifindex int32 } -type sysIPMreqSource struct { +type ipMreqSource struct { Multiaddr [4]byte /* in_addr */ Sourceaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysSockaddrStorage + Group sockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysSockaddrStorage - Source sysSockaddrStorage + Group sockaddrStorage + Source sockaddrStorage } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go index ebac6d79..4b10b7c5 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go @@ -38,17 +38,17 @@ const ( sysMCAST_BLOCK_SOURCE = 0x54 sysMCAST_UNBLOCK_SOURCE = 0x55 - sysSizeofSockaddrStorage = 0x80 - sysSizeofSockaddrInet = 0x10 + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 - sysSizeofIPMreq = 0x8 - sysSizeofIPMreqn = 0xc - sysSizeofIPMreqSource = 0xc - sysSizeofGroupReq = 0x88 - sysSizeofGroupSourceReq = 0x108 + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 ) -type sysSockaddrStorage struct { +type sockaddrStorage struct { Len uint8 Family uint8 X__ss_pad1 [6]int8 @@ -56,7 +56,7 @@ type sysSockaddrStorage struct { X__ss_pad2 [112]int8 } -type sysSockaddrInet struct { +type sockaddrInet struct { Len uint8 Family uint8 Port uint16 @@ -64,32 +64,32 @@ type sysSockaddrInet struct { Zero [8]int8 } -type sysIPMreq struct { +type ipMreq struct { Multiaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } -type sysIPMreqn struct { +type ipMreqn struct { Multiaddr [4]byte /* in_addr */ Address [4]byte /* in_addr */ Ifindex int32 } -type sysIPMreqSource struct { +type ipMreqSource struct { Multiaddr [4]byte /* in_addr */ Sourceaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysSockaddrStorage + Group sockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysSockaddrStorage - Source sysSockaddrStorage + Group sockaddrStorage + Source sockaddrStorage } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_386.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_386.go index 3733152a..c0260f0c 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_386.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_386.go @@ -58,39 +58,41 @@ const ( sysSOL_SOCKET = 0x1 sysSO_ATTACH_FILTER = 0x1a - sysSizeofKernelSockaddrStorage = 0x80 - sysSizeofSockaddrInet = 0x10 - sysSizeofInetPktinfo = 0xc - sysSizeofSockExtendedErr = 0x10 - - sysSizeofIPMreq = 0x8 - sysSizeofIPMreqn = 0xc - sysSizeofIPMreqSource = 0xc - sysSizeofGroupReq = 0x84 - sysSizeofGroupSourceReq = 0x104 - - sysSizeofICMPFilter = 0x4 + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 ) -type sysKernelSockaddrStorage struct { +type kernelSockaddrStorage struct { Family uint16 X__data [126]int8 } -type sysSockaddrInet struct { +type sockaddrInet struct { Family uint16 Port uint16 Addr [4]byte /* in_addr */ X__pad [8]uint8 } -type sysInetPktinfo struct { +type inetPktinfo struct { Ifindex int32 Spec_dst [4]byte /* in_addr */ Addr [4]byte /* in_addr */ } -type sysSockExtendedErr struct { +type sockExtendedErr struct { Errno uint32 Origin uint8 Type uint8 @@ -100,45 +102,45 @@ type sysSockExtendedErr struct { Data uint32 } -type sysIPMreq struct { +type ipMreq struct { Multiaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } -type sysIPMreqn struct { +type ipMreqn struct { Multiaddr [4]byte /* in_addr */ Address [4]byte /* in_addr */ Ifindex int32 } -type sysIPMreqSource struct { +type ipMreqSource struct { Multiaddr uint32 Interface uint32 Sourceaddr uint32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 - Group sysKernelSockaddrStorage + Group kernelSockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 - Group sysKernelSockaddrStorage - Source sysKernelSockaddrStorage + Group kernelSockaddrStorage + Source kernelSockaddrStorage } -type sysICMPFilter struct { +type icmpFilter struct { Data uint32 } -type sysSockFProg struct { +type sockFProg struct { Len uint16 Pad_cgo_0 [2]byte - Filter *sysSockFilter + Filter *sockFilter } -type sysSockFilter struct { +type sockFilter struct { Code uint16 Jt uint8 Jf uint8 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go index afa45190..9c967eaa 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go @@ -58,39 +58,41 @@ const ( sysSOL_SOCKET = 0x1 sysSO_ATTACH_FILTER = 0x1a - sysSizeofKernelSockaddrStorage = 0x80 - sysSizeofSockaddrInet = 0x10 - sysSizeofInetPktinfo = 0xc - sysSizeofSockExtendedErr = 0x10 - - sysSizeofIPMreq = 0x8 - sysSizeofIPMreqn = 0xc - sysSizeofIPMreqSource = 0xc - sysSizeofGroupReq = 0x88 - sysSizeofGroupSourceReq = 0x108 - - sysSizeofICMPFilter = 0x4 + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 ) -type sysKernelSockaddrStorage struct { +type kernelSockaddrStorage struct { Family uint16 X__data [126]int8 } -type sysSockaddrInet struct { +type sockaddrInet struct { Family uint16 Port uint16 Addr [4]byte /* in_addr */ X__pad [8]uint8 } -type sysInetPktinfo struct { +type inetPktinfo struct { Ifindex int32 Spec_dst [4]byte /* in_addr */ Addr [4]byte /* in_addr */ } -type sysSockExtendedErr struct { +type sockExtendedErr struct { Errno uint32 Origin uint8 Type uint8 @@ -100,47 +102,47 @@ type sysSockExtendedErr struct { Data uint32 } -type sysIPMreq struct { +type ipMreq struct { Multiaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } -type sysIPMreqn struct { +type ipMreqn struct { Multiaddr [4]byte /* in_addr */ Address [4]byte /* in_addr */ Ifindex int32 } -type sysIPMreqSource struct { +type ipMreqSource struct { Multiaddr uint32 Interface uint32 Sourceaddr uint32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage + Group kernelSockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage - Source sysKernelSockaddrStorage + Group kernelSockaddrStorage + Source kernelSockaddrStorage } -type sysICMPFilter struct { +type icmpFilter struct { Data uint32 } -type sysSockFProg struct { +type sockFProg struct { Len uint16 Pad_cgo_0 [6]byte - Filter *sysSockFilter + Filter *sockFilter } -type sysSockFilter struct { +type sockFilter struct { Code uint16 Jt uint8 Jf uint8 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go index 3733152a..c0260f0c 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go @@ -58,39 +58,41 @@ const ( sysSOL_SOCKET = 0x1 sysSO_ATTACH_FILTER = 0x1a - sysSizeofKernelSockaddrStorage = 0x80 - sysSizeofSockaddrInet = 0x10 - sysSizeofInetPktinfo = 0xc - sysSizeofSockExtendedErr = 0x10 - - sysSizeofIPMreq = 0x8 - sysSizeofIPMreqn = 0xc - sysSizeofIPMreqSource = 0xc - sysSizeofGroupReq = 0x84 - sysSizeofGroupSourceReq = 0x104 - - sysSizeofICMPFilter = 0x4 + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 ) -type sysKernelSockaddrStorage struct { +type kernelSockaddrStorage struct { Family uint16 X__data [126]int8 } -type sysSockaddrInet struct { +type sockaddrInet struct { Family uint16 Port uint16 Addr [4]byte /* in_addr */ X__pad [8]uint8 } -type sysInetPktinfo struct { +type inetPktinfo struct { Ifindex int32 Spec_dst [4]byte /* in_addr */ Addr [4]byte /* in_addr */ } -type sysSockExtendedErr struct { +type sockExtendedErr struct { Errno uint32 Origin uint8 Type uint8 @@ -100,45 +102,45 @@ type sysSockExtendedErr struct { Data uint32 } -type sysIPMreq struct { +type ipMreq struct { Multiaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } -type sysIPMreqn struct { +type ipMreqn struct { Multiaddr [4]byte /* in_addr */ Address [4]byte /* in_addr */ Ifindex int32 } -type sysIPMreqSource struct { +type ipMreqSource struct { Multiaddr uint32 Interface uint32 Sourceaddr uint32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 - Group sysKernelSockaddrStorage + Group kernelSockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 - Group sysKernelSockaddrStorage - Source sysKernelSockaddrStorage + Group kernelSockaddrStorage + Source kernelSockaddrStorage } -type sysICMPFilter struct { +type icmpFilter struct { Data uint32 } -type sysSockFProg struct { +type sockFProg struct { Len uint16 Pad_cgo_0 [2]byte - Filter *sysSockFilter + Filter *sockFilter } -type sysSockFilter struct { +type sockFilter struct { Code uint16 Jt uint8 Jf uint8 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go index 129a20ac..9c967eaa 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go @@ -1,8 +1,6 @@ // Created by cgo -godefs - DO NOT EDIT // cgo -godefs defs_linux.go -// +build linux,arm64 - package ipv4 const ( @@ -60,39 +58,41 @@ const ( sysSOL_SOCKET = 0x1 sysSO_ATTACH_FILTER = 0x1a - sysSizeofKernelSockaddrStorage = 0x80 - sysSizeofSockaddrInet = 0x10 - sysSizeofInetPktinfo = 0xc - sysSizeofSockExtendedErr = 0x10 + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 - sysSizeofIPMreq = 0x8 - sysSizeofIPMreqn = 0xc - sysSizeofIPMreqSource = 0xc - sysSizeofGroupReq = 0x88 - sysSizeofGroupSourceReq = 0x108 + sizeofICMPFilter = 0x4 - sysSizeofICMPFilter = 0x4 + sizeofSockFprog = 0x10 ) -type sysKernelSockaddrStorage struct { +type kernelSockaddrStorage struct { Family uint16 X__data [126]int8 } -type sysSockaddrInet struct { +type sockaddrInet struct { Family uint16 Port uint16 Addr [4]byte /* in_addr */ X__pad [8]uint8 } -type sysInetPktinfo struct { +type inetPktinfo struct { Ifindex int32 Spec_dst [4]byte /* in_addr */ Addr [4]byte /* in_addr */ } -type sysSockExtendedErr struct { +type sockExtendedErr struct { Errno uint32 Origin uint8 Type uint8 @@ -102,47 +102,47 @@ type sysSockExtendedErr struct { Data uint32 } -type sysIPMreq struct { +type ipMreq struct { Multiaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } -type sysIPMreqn struct { +type ipMreqn struct { Multiaddr [4]byte /* in_addr */ Address [4]byte /* in_addr */ Ifindex int32 } -type sysIPMreqSource struct { +type ipMreqSource struct { Multiaddr uint32 Interface uint32 Sourceaddr uint32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage + Group kernelSockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage - Source sysKernelSockaddrStorage + Group kernelSockaddrStorage + Source kernelSockaddrStorage } -type sysICMPFilter struct { +type icmpFilter struct { Data uint32 } -type sysSockFProg struct { +type sockFProg struct { Len uint16 Pad_cgo_0 [6]byte - Filter *sysSockFilter + Filter *sockFilter } -type sysSockFilter struct { +type sockFilter struct { Code uint16 Jt uint8 Jf uint8 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go new file mode 100644 index 00000000..c0260f0c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go @@ -0,0 +1,148 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go index 7ed9368f..9c967eaa 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go @@ -1,8 +1,6 @@ // Created by cgo -godefs - DO NOT EDIT // cgo -godefs defs_linux.go -// +build linux,mips64 - package ipv4 const ( @@ -60,39 +58,41 @@ const ( sysSOL_SOCKET = 0x1 sysSO_ATTACH_FILTER = 0x1a - sysSizeofKernelSockaddrStorage = 0x80 - sysSizeofSockaddrInet = 0x10 - sysSizeofInetPktinfo = 0xc - sysSizeofSockExtendedErr = 0x10 + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 - sysSizeofIPMreq = 0x8 - sysSizeofIPMreqn = 0xc - sysSizeofIPMreqSource = 0xc - sysSizeofGroupReq = 0x88 - sysSizeofGroupSourceReq = 0x108 + sizeofICMPFilter = 0x4 - sysSizeofICMPFilter = 0x4 + sizeofSockFprog = 0x10 ) -type sysKernelSockaddrStorage struct { +type kernelSockaddrStorage struct { Family uint16 X__data [126]int8 } -type sysSockaddrInet struct { +type sockaddrInet struct { Family uint16 Port uint16 Addr [4]byte /* in_addr */ X__pad [8]uint8 } -type sysInetPktinfo struct { +type inetPktinfo struct { Ifindex int32 Spec_dst [4]byte /* in_addr */ Addr [4]byte /* in_addr */ } -type sysSockExtendedErr struct { +type sockExtendedErr struct { Errno uint32 Origin uint8 Type uint8 @@ -102,47 +102,47 @@ type sysSockExtendedErr struct { Data uint32 } -type sysIPMreq struct { +type ipMreq struct { Multiaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } -type sysIPMreqn struct { +type ipMreqn struct { Multiaddr [4]byte /* in_addr */ Address [4]byte /* in_addr */ Ifindex int32 } -type sysIPMreqSource struct { +type ipMreqSource struct { Multiaddr uint32 Interface uint32 Sourceaddr uint32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage + Group kernelSockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage - Source sysKernelSockaddrStorage + Group kernelSockaddrStorage + Source kernelSockaddrStorage } -type sysICMPFilter struct { +type icmpFilter struct { Data uint32 } -type sysSockFProg struct { +type sockFProg struct { Len uint16 Pad_cgo_0 [6]byte - Filter *sysSockFilter + Filter *sockFilter } -type sysSockFilter struct { +type sockFilter struct { Code uint16 Jt uint8 Jf uint8 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go index 19fadae6..9c967eaa 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go @@ -1,8 +1,6 @@ // Created by cgo -godefs - DO NOT EDIT // cgo -godefs defs_linux.go -// +build linux,mips64le - package ipv4 const ( @@ -60,39 +58,41 @@ const ( sysSOL_SOCKET = 0x1 sysSO_ATTACH_FILTER = 0x1a - sysSizeofKernelSockaddrStorage = 0x80 - sysSizeofSockaddrInet = 0x10 - sysSizeofInetPktinfo = 0xc - sysSizeofSockExtendedErr = 0x10 + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 - sysSizeofIPMreq = 0x8 - sysSizeofIPMreqn = 0xc - sysSizeofIPMreqSource = 0xc - sysSizeofGroupReq = 0x88 - sysSizeofGroupSourceReq = 0x108 + sizeofICMPFilter = 0x4 - sysSizeofICMPFilter = 0x4 + sizeofSockFprog = 0x10 ) -type sysKernelSockaddrStorage struct { +type kernelSockaddrStorage struct { Family uint16 X__data [126]int8 } -type sysSockaddrInet struct { +type sockaddrInet struct { Family uint16 Port uint16 Addr [4]byte /* in_addr */ X__pad [8]uint8 } -type sysInetPktinfo struct { +type inetPktinfo struct { Ifindex int32 Spec_dst [4]byte /* in_addr */ Addr [4]byte /* in_addr */ } -type sysSockExtendedErr struct { +type sockExtendedErr struct { Errno uint32 Origin uint8 Type uint8 @@ -102,47 +102,47 @@ type sysSockExtendedErr struct { Data uint32 } -type sysIPMreq struct { +type ipMreq struct { Multiaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } -type sysIPMreqn struct { +type ipMreqn struct { Multiaddr [4]byte /* in_addr */ Address [4]byte /* in_addr */ Ifindex int32 } -type sysIPMreqSource struct { +type ipMreqSource struct { Multiaddr uint32 Interface uint32 Sourceaddr uint32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage + Group kernelSockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage - Source sysKernelSockaddrStorage + Group kernelSockaddrStorage + Source kernelSockaddrStorage } -type sysICMPFilter struct { +type icmpFilter struct { Data uint32 } -type sysSockFProg struct { +type sockFProg struct { Len uint16 Pad_cgo_0 [6]byte - Filter *sysSockFilter + Filter *sockFilter } -type sysSockFilter struct { +type sockFilter struct { Code uint16 Jt uint8 Jf uint8 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go new file mode 100644 index 00000000..c0260f0c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go @@ -0,0 +1,148 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go index 15426bee..f65bd9a7 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go @@ -1,8 +1,6 @@ // Created by cgo -godefs - DO NOT EDIT // cgo -godefs defs_linux.go -// +build linux,ppc - package ipv4 const ( @@ -60,39 +58,41 @@ const ( sysSOL_SOCKET = 0x1 sysSO_ATTACH_FILTER = 0x1a - sysSizeofKernelSockaddrStorage = 0x80 - sysSizeofSockaddrInet = 0x10 - sysSizeofInetPktinfo = 0xc - sysSizeofSockExtendedErr = 0x10 + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 - sysSizeofIPMreq = 0x8 - sysSizeofIPMreqn = 0xc - sysSizeofIPMreqSource = 0xc - sysSizeofGroupReq = 0x84 - sysSizeofGroupSourceReq = 0x104 + sizeofICMPFilter = 0x4 - sysSizeofICMPFilter = 0x4 + sizeofSockFprog = 0x8 ) -type sysKernelSockaddrStorage struct { +type kernelSockaddrStorage struct { Family uint16 X__data [126]uint8 } -type sysSockaddrInet struct { +type sockaddrInet struct { Family uint16 Port uint16 Addr [4]byte /* in_addr */ X__pad [8]uint8 } -type sysInetPktinfo struct { +type inetPktinfo struct { Ifindex int32 Spec_dst [4]byte /* in_addr */ Addr [4]byte /* in_addr */ } -type sysSockExtendedErr struct { +type sockExtendedErr struct { Errno uint32 Origin uint8 Type uint8 @@ -102,45 +102,45 @@ type sysSockExtendedErr struct { Data uint32 } -type sysIPMreq struct { +type ipMreq struct { Multiaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } -type sysIPMreqn struct { +type ipMreqn struct { Multiaddr [4]byte /* in_addr */ Address [4]byte /* in_addr */ Ifindex int32 } -type sysIPMreqSource struct { +type ipMreqSource struct { Multiaddr uint32 Interface uint32 Sourceaddr uint32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 - Group sysKernelSockaddrStorage + Group kernelSockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 - Group sysKernelSockaddrStorage - Source sysKernelSockaddrStorage + Group kernelSockaddrStorage + Source kernelSockaddrStorage } -type sysICMPFilter struct { +type icmpFilter struct { Data uint32 } -type sysSockFProg struct { +type sockFProg struct { Len uint16 Pad_cgo_0 [2]byte - Filter *sysSockFilter + Filter *sockFilter } -type sysSockFilter struct { +type sockFilter struct { Code uint16 Jt uint8 Jf uint8 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go index beaadd5f..9c967eaa 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go @@ -1,8 +1,6 @@ // Created by cgo -godefs - DO NOT EDIT // cgo -godefs defs_linux.go -// +build linux,ppc64 - package ipv4 const ( @@ -60,39 +58,41 @@ const ( sysSOL_SOCKET = 0x1 sysSO_ATTACH_FILTER = 0x1a - sysSizeofKernelSockaddrStorage = 0x80 - sysSizeofSockaddrInet = 0x10 - sysSizeofInetPktinfo = 0xc - sysSizeofSockExtendedErr = 0x10 + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 - sysSizeofIPMreq = 0x8 - sysSizeofIPMreqn = 0xc - sysSizeofIPMreqSource = 0xc - sysSizeofGroupReq = 0x88 - sysSizeofGroupSourceReq = 0x108 + sizeofICMPFilter = 0x4 - sysSizeofICMPFilter = 0x4 + sizeofSockFprog = 0x10 ) -type sysKernelSockaddrStorage struct { +type kernelSockaddrStorage struct { Family uint16 X__data [126]int8 } -type sysSockaddrInet struct { +type sockaddrInet struct { Family uint16 Port uint16 Addr [4]byte /* in_addr */ X__pad [8]uint8 } -type sysInetPktinfo struct { +type inetPktinfo struct { Ifindex int32 Spec_dst [4]byte /* in_addr */ Addr [4]byte /* in_addr */ } -type sysSockExtendedErr struct { +type sockExtendedErr struct { Errno uint32 Origin uint8 Type uint8 @@ -102,47 +102,47 @@ type sysSockExtendedErr struct { Data uint32 } -type sysIPMreq struct { +type ipMreq struct { Multiaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } -type sysIPMreqn struct { +type ipMreqn struct { Multiaddr [4]byte /* in_addr */ Address [4]byte /* in_addr */ Ifindex int32 } -type sysIPMreqSource struct { +type ipMreqSource struct { Multiaddr uint32 Interface uint32 Sourceaddr uint32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage + Group kernelSockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage - Source sysKernelSockaddrStorage + Group kernelSockaddrStorage + Source kernelSockaddrStorage } -type sysICMPFilter struct { +type icmpFilter struct { Data uint32 } -type sysSockFProg struct { +type sockFProg struct { Len uint16 Pad_cgo_0 [6]byte - Filter *sysSockFilter + Filter *sockFilter } -type sysSockFilter struct { +type sockFilter struct { Code uint16 Jt uint8 Jf uint8 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go index 0eb26230..9c967eaa 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go @@ -1,8 +1,6 @@ // Created by cgo -godefs - DO NOT EDIT // cgo -godefs defs_linux.go -// +build linux,ppc64le - package ipv4 const ( @@ -60,39 +58,41 @@ const ( sysSOL_SOCKET = 0x1 sysSO_ATTACH_FILTER = 0x1a - sysSizeofKernelSockaddrStorage = 0x80 - sysSizeofSockaddrInet = 0x10 - sysSizeofInetPktinfo = 0xc - sysSizeofSockExtendedErr = 0x10 + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 - sysSizeofIPMreq = 0x8 - sysSizeofIPMreqn = 0xc - sysSizeofIPMreqSource = 0xc - sysSizeofGroupReq = 0x88 - sysSizeofGroupSourceReq = 0x108 + sizeofICMPFilter = 0x4 - sysSizeofICMPFilter = 0x4 + sizeofSockFprog = 0x10 ) -type sysKernelSockaddrStorage struct { +type kernelSockaddrStorage struct { Family uint16 X__data [126]int8 } -type sysSockaddrInet struct { +type sockaddrInet struct { Family uint16 Port uint16 Addr [4]byte /* in_addr */ X__pad [8]uint8 } -type sysInetPktinfo struct { +type inetPktinfo struct { Ifindex int32 Spec_dst [4]byte /* in_addr */ Addr [4]byte /* in_addr */ } -type sysSockExtendedErr struct { +type sockExtendedErr struct { Errno uint32 Origin uint8 Type uint8 @@ -102,47 +102,47 @@ type sysSockExtendedErr struct { Data uint32 } -type sysIPMreq struct { +type ipMreq struct { Multiaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } -type sysIPMreqn struct { +type ipMreqn struct { Multiaddr [4]byte /* in_addr */ Address [4]byte /* in_addr */ Ifindex int32 } -type sysIPMreqSource struct { +type ipMreqSource struct { Multiaddr uint32 Interface uint32 Sourceaddr uint32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage + Group kernelSockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage - Source sysKernelSockaddrStorage + Group kernelSockaddrStorage + Source kernelSockaddrStorage } -type sysICMPFilter struct { +type icmpFilter struct { Data uint32 } -type sysSockFProg struct { +type sockFProg struct { Len uint16 Pad_cgo_0 [6]byte - Filter *sysSockFilter + Filter *sockFilter } -type sysSockFilter struct { +type sockFilter struct { Code uint16 Jt uint8 Jf uint8 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go index 90fe99eb..9c967eaa 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go @@ -1,8 +1,6 @@ // Created by cgo -godefs - DO NOT EDIT // cgo -godefs defs_linux.go -// +build linux,s390x - package ipv4 const ( @@ -60,39 +58,41 @@ const ( sysSOL_SOCKET = 0x1 sysSO_ATTACH_FILTER = 0x1a - sysSizeofKernelSockaddrStorage = 0x80 - sysSizeofSockaddrInet = 0x10 - sysSizeofInetPktinfo = 0xc - sysSizeofSockExtendedErr = 0x10 + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 - sysSizeofIPMreq = 0x8 - sysSizeofIPMreqn = 0xc - sysSizeofIPMreqSource = 0xc - sysSizeofGroupReq = 0x88 - sysSizeofGroupSourceReq = 0x108 + sizeofICMPFilter = 0x4 - sysSizeofICMPFilter = 0x4 + sizeofSockFprog = 0x10 ) -type sysKernelSockaddrStorage struct { +type kernelSockaddrStorage struct { Family uint16 X__data [126]int8 } -type sysSockaddrInet struct { +type sockaddrInet struct { Family uint16 Port uint16 Addr [4]byte /* in_addr */ X__pad [8]uint8 } -type sysInetPktinfo struct { +type inetPktinfo struct { Ifindex int32 Spec_dst [4]byte /* in_addr */ Addr [4]byte /* in_addr */ } -type sysSockExtendedErr struct { +type sockExtendedErr struct { Errno uint32 Origin uint8 Type uint8 @@ -102,47 +102,47 @@ type sysSockExtendedErr struct { Data uint32 } -type sysIPMreq struct { +type ipMreq struct { Multiaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } -type sysIPMreqn struct { +type ipMreqn struct { Multiaddr [4]byte /* in_addr */ Address [4]byte /* in_addr */ Ifindex int32 } -type sysIPMreqSource struct { +type ipMreqSource struct { Multiaddr uint32 Interface uint32 Sourceaddr uint32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage + Group kernelSockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage - Source sysKernelSockaddrStorage + Group kernelSockaddrStorage + Source kernelSockaddrStorage } -type sysICMPFilter struct { +type icmpFilter struct { Data uint32 } -type sysSockFProg struct { +type sockFProg struct { Len uint16 Pad_cgo_0 [6]byte - Filter *sysSockFilter + Filter *sockFilter } -type sysSockFilter struct { +type sockFilter struct { Code uint16 Jt uint8 Jf uint8 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_netbsd.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_netbsd.go index 8a440eb6..fd3624d9 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_netbsd.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_netbsd.go @@ -21,10 +21,10 @@ const ( sysIP_ADD_MEMBERSHIP = 0xc sysIP_DROP_MEMBERSHIP = 0xd - sysSizeofIPMreq = 0x8 + sizeofIPMreq = 0x8 ) -type sysIPMreq struct { +type ipMreq struct { Multiaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_openbsd.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_openbsd.go index fd522b57..12f36be7 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_openbsd.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_openbsd.go @@ -21,10 +21,10 @@ const ( sysIP_ADD_MEMBERSHIP = 0xc sysIP_DROP_MEMBERSHIP = 0xd - sysSizeofIPMreq = 0x8 + sizeofIPMreq = 0x8 ) -type sysIPMreq struct { +type ipMreq struct { Multiaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_solaris.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_solaris.go index d7c23349..0a3875cc 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_solaris.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv4/zsys_solaris.go @@ -1,30 +1,20 @@ // Created by cgo -godefs - DO NOT EDIT // cgo -godefs defs_solaris.go -// +build solaris - package ipv4 const ( - sysIP_OPTIONS = 0x1 - sysIP_HDRINCL = 0x2 - sysIP_TOS = 0x3 - sysIP_TTL = 0x4 - sysIP_RECVOPTS = 0x5 - sysIP_RECVRETOPTS = 0x6 - sysIP_RECVDSTADDR = 0x7 - sysIP_RETOPTS = 0x8 - sysIP_RECVIF = 0x9 - sysIP_RECVSLLA = 0xa - sysIP_RECVTTL = 0xb - sysIP_NEXTHOP = 0x19 - sysIP_PKTINFO = 0x1a - sysIP_RECVPKTINFO = 0x1a - sysIP_DONTFRAG = 0x1b - sysIP_BOUND_IF = 0x41 - sysIP_UNSPEC_SRC = 0x42 - sysIP_BROADCAST_TTL = 0x43 - sysIP_DHCPINIT_IF = 0x45 + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x9 + sysIP_RECVSLLA = 0xa + sysIP_RECVTTL = 0xb sysIP_MULTICAST_IF = 0x10 sysIP_MULTICAST_TTL = 0x11 @@ -35,26 +25,76 @@ const ( sysIP_UNBLOCK_SOURCE = 0x16 sysIP_ADD_SOURCE_MEMBERSHIP = 0x17 sysIP_DROP_SOURCE_MEMBERSHIP = 0x18 + sysIP_NEXTHOP = 0x19 + + sysIP_PKTINFO = 0x1a + sysIP_RECVPKTINFO = 0x1a + sysIP_DONTFRAG = 0x1b + + sysIP_BOUND_IF = 0x41 + sysIP_UNSPEC_SRC = 0x42 + sysIP_BROADCAST_TTL = 0x43 + sysIP_DHCPINIT_IF = 0x45 - sysSizeofInetPktinfo = 0xc + sysIP_REUSEADDR = 0x104 + sysIP_DONTROUTE = 0x105 + sysIP_BROADCAST = 0x106 - sysSizeofIPMreq = 0x8 - sysSizeofIPMreqSource = 0xc + sysMCAST_JOIN_GROUP = 0x29 + sysMCAST_LEAVE_GROUP = 0x2a + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_JOIN_SOURCE_GROUP = 0x2d + sysMCAST_LEAVE_SOURCE_GROUP = 0x2e + + sizeofSockaddrStorage = 0x100 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + + sizeofIPMreq = 0x8 + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x104 + sizeofGroupSourceReq = 0x204 ) -type sysInetPktinfo struct { +type sockaddrStorage struct { + Family uint16 + X_ss_pad1 [6]int8 + X_ss_align float64 + X_ss_pad2 [240]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type inetPktinfo struct { Ifindex uint32 Spec_dst [4]byte /* in_addr */ Addr [4]byte /* in_addr */ } -type sysIPMreq struct { +type ipMreq struct { Multiaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } -type sysIPMreqSource struct { +type ipMreqSource struct { Multiaddr [4]byte /* in_addr */ Sourceaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [256]byte +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [256]byte + Pad_cgo_1 [256]byte +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/batch.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/batch.go new file mode 100644 index 00000000..4f5fe683 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/batch.go @@ -0,0 +1,119 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package ipv6 + +import ( + "net" + "runtime" + "syscall" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ReadBatch and WriteBatch methods of +// PacketConn are not implemented. + +// A Message represents an IO message. +// +// type Message struct { +// Buffers [][]byte +// OOB []byte +// Addr net.Addr +// N int +// NN int +// Flags int +// } +// +// The Buffers fields represents a list of contiguous buffers, which +// can be used for vectored IO, for example, putting a header and a +// payload in each slice. +// When writing, the Buffers field must contain at least one byte to +// write. +// When reading, the Buffers field will always contain a byte to read. +// +// The OOB field contains protocol-specific control or miscellaneous +// ancillary data known as out-of-band data. +// It can be nil when not required. +// +// The Addr field specifies a destination address when writing. +// It can be nil when the underlying protocol of the endpoint uses +// connection-oriented communication. +// After a successful read, it may contain the source address on the +// received packet. +// +// The N field indicates the number of bytes read or written from/to +// Buffers. +// +// The NN field indicates the number of bytes read or written from/to +// OOB. +// +// The Flags field contains protocol-specific information on the +// received message. +type Message = socket.Message + +// ReadBatch reads a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// On a successful read it returns the number of messages received, up +// to len(ms). +// +// On Linux, a batch read will be optimized. +// On other platforms, this method will read only a single message. +func (c *payloadHandler) ReadBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.RecvMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.RecvMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} + +// WriteBatch writes a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// It returns the number of messages written on a successful write. +// +// On Linux, a batch write will be optimized. +// On other platforms, this method will write only a single message. +func (c *payloadHandler) WriteBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.SendMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.SendMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/bpfopt_linux.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/bpfopt_linux.go deleted file mode 100644 index 066ef203..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/bpfopt_linux.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv6 - -import ( - "os" - "unsafe" - - "golang.org/x/net/bpf" -) - -// SetBPF attaches a BPF program to the connection. -// -// Only supported on Linux. -func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error { - fd, err := c.sysfd() - if err != nil { - return err - } - prog := sysSockFProg{ - Len: uint16(len(filter)), - Filter: (*sysSockFilter)(unsafe.Pointer(&filter[0])), - } - return os.NewSyscallError("setsockopt", setsockopt(fd, sysSOL_SOCKET, sysSO_ATTACH_FILTER, unsafe.Pointer(&prog), uint32(unsafe.Sizeof(prog)))) -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/bpfopt_stub.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/bpfopt_stub.go deleted file mode 100644 index 2e4de5f0..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/bpfopt_stub.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !linux - -package ipv6 - -import "golang.org/x/net/bpf" - -// SetBPF attaches a BPF program to the connection. -// -// Only supported on Linux. -func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error { - return errOpNoSupport -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/control.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/control.go index b7362aae..2da64441 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/control.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/control.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -8,10 +8,13 @@ import ( "fmt" "net" "sync" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" ) // Note that RFC 3542 obsoletes RFC 2292 but OS X Snow Leopard and the -// former still support RFC 2292 only. Please be aware that almost +// former still support RFC 2292 only. Please be aware that almost // all protocol implementations prohibit using a combination of RFC // 2292 and RFC 3542 for some practical reasons. @@ -66,6 +69,105 @@ func (cm *ControlMessage) String() string { return fmt.Sprintf("tclass=%#x hoplim=%d src=%v dst=%v ifindex=%d nexthop=%v mtu=%d", cm.TrafficClass, cm.HopLimit, cm.Src, cm.Dst, cm.IfIndex, cm.NextHop, cm.MTU) } +// Marshal returns the binary encoding of cm. +func (cm *ControlMessage) Marshal() []byte { + if cm == nil { + return nil + } + var l int + tclass := false + if ctlOpts[ctlTrafficClass].name > 0 && cm.TrafficClass > 0 { + tclass = true + l += socket.ControlMessageSpace(ctlOpts[ctlTrafficClass].length) + } + hoplimit := false + if ctlOpts[ctlHopLimit].name > 0 && cm.HopLimit > 0 { + hoplimit = true + l += socket.ControlMessageSpace(ctlOpts[ctlHopLimit].length) + } + pktinfo := false + if ctlOpts[ctlPacketInfo].name > 0 && (cm.Src.To16() != nil && cm.Src.To4() == nil || cm.IfIndex > 0) { + pktinfo = true + l += socket.ControlMessageSpace(ctlOpts[ctlPacketInfo].length) + } + nexthop := false + if ctlOpts[ctlNextHop].name > 0 && cm.NextHop.To16() != nil && cm.NextHop.To4() == nil { + nexthop = true + l += socket.ControlMessageSpace(ctlOpts[ctlNextHop].length) + } + var b []byte + if l > 0 { + b = make([]byte, l) + bb := b + if tclass { + bb = ctlOpts[ctlTrafficClass].marshal(bb, cm) + } + if hoplimit { + bb = ctlOpts[ctlHopLimit].marshal(bb, cm) + } + if pktinfo { + bb = ctlOpts[ctlPacketInfo].marshal(bb, cm) + } + if nexthop { + bb = ctlOpts[ctlNextHop].marshal(bb, cm) + } + } + return b +} + +// Parse parses b as a control message and stores the result in cm. +func (cm *ControlMessage) Parse(b []byte) error { + ms, err := socket.ControlMessage(b).Parse() + if err != nil { + return err + } + for _, m := range ms { + lvl, typ, l, err := m.ParseHeader() + if err != nil { + return err + } + if lvl != iana.ProtocolIPv6 { + continue + } + switch { + case typ == ctlOpts[ctlTrafficClass].name && l >= ctlOpts[ctlTrafficClass].length: + ctlOpts[ctlTrafficClass].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlHopLimit].name && l >= ctlOpts[ctlHopLimit].length: + ctlOpts[ctlHopLimit].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlPacketInfo].name && l >= ctlOpts[ctlPacketInfo].length: + ctlOpts[ctlPacketInfo].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlPathMTU].name && l >= ctlOpts[ctlPathMTU].length: + ctlOpts[ctlPathMTU].parse(cm, m.Data(l)) + } + } + return nil +} + +// NewControlMessage returns a new control message. +// +// The returned message is large enough for options specified by cf. +func NewControlMessage(cf ControlFlags) []byte { + opt := rawOpt{cflags: cf} + var l int + if opt.isset(FlagTrafficClass) && ctlOpts[ctlTrafficClass].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlTrafficClass].length) + } + if opt.isset(FlagHopLimit) && ctlOpts[ctlHopLimit].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlHopLimit].length) + } + if opt.isset(flagPacketInfo) && ctlOpts[ctlPacketInfo].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlPacketInfo].length) + } + if opt.isset(FlagPathMTU) && ctlOpts[ctlPathMTU].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlPathMTU].length) + } + var b []byte + if l > 0 { + b = make([]byte, l) + } + return b +} + // Ancillary data socket options const ( ctlTrafficClass = iota // header field diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go index 80ec2e2f..9fd9eb15 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -7,31 +7,26 @@ package ipv6 import ( - "syscall" "unsafe" "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" ) func marshal2292HopLimit(b []byte, cm *ControlMessage) []byte { - m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) - m.Level = iana.ProtocolIPv6 - m.Type = sysIPV6_2292HOPLIMIT - m.SetLen(syscall.CmsgLen(4)) + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_2292HOPLIMIT, 4) if cm != nil { - data := b[syscall.CmsgLen(0):] - nativeEndian.PutUint32(data[:4], uint32(cm.HopLimit)) + socket.NativeEndian.PutUint32(m.Data(4), uint32(cm.HopLimit)) } - return b[syscall.CmsgSpace(4):] + return m.Next(4) } func marshal2292PacketInfo(b []byte, cm *ControlMessage) []byte { - m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) - m.Level = iana.ProtocolIPv6 - m.Type = sysIPV6_2292PKTINFO - m.SetLen(syscall.CmsgLen(sysSizeofInet6Pktinfo)) + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_2292PKTINFO, sizeofInet6Pktinfo) if cm != nil { - pi := (*sysInet6Pktinfo)(unsafe.Pointer(&b[syscall.CmsgLen(0)])) + pi := (*inet6Pktinfo)(unsafe.Pointer(&m.Data(sizeofInet6Pktinfo)[0])) if ip := cm.Src.To16(); ip != nil && ip.To4() == nil { copy(pi.Addr[:], ip) } @@ -39,17 +34,15 @@ func marshal2292PacketInfo(b []byte, cm *ControlMessage) []byte { pi.setIfindex(cm.IfIndex) } } - return b[syscall.CmsgSpace(sysSizeofInet6Pktinfo):] + return m.Next(sizeofInet6Pktinfo) } func marshal2292NextHop(b []byte, cm *ControlMessage) []byte { - m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) - m.Level = iana.ProtocolIPv6 - m.Type = sysIPV6_2292NEXTHOP - m.SetLen(syscall.CmsgLen(sysSizeofSockaddrInet6)) + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_2292NEXTHOP, sizeofSockaddrInet6) if cm != nil { - sa := (*sysSockaddrInet6)(unsafe.Pointer(&b[syscall.CmsgLen(0)])) + sa := (*sockaddrInet6)(unsafe.Pointer(&m.Data(sizeofSockaddrInet6)[0])) sa.setSockaddr(cm.NextHop, cm.IfIndex) } - return b[syscall.CmsgSpace(sysSizeofSockaddrInet6):] + return m.Next(sizeofSockaddrInet6) } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go index f344d16d..eec529c2 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go @@ -1,57 +1,50 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd +// +build darwin dragonfly freebsd linux netbsd openbsd solaris package ipv6 import ( - "syscall" + "net" "unsafe" "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" ) func marshalTrafficClass(b []byte, cm *ControlMessage) []byte { - m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) - m.Level = iana.ProtocolIPv6 - m.Type = sysIPV6_TCLASS - m.SetLen(syscall.CmsgLen(4)) + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_TCLASS, 4) if cm != nil { - data := b[syscall.CmsgLen(0):] - nativeEndian.PutUint32(data[:4], uint32(cm.TrafficClass)) + socket.NativeEndian.PutUint32(m.Data(4), uint32(cm.TrafficClass)) } - return b[syscall.CmsgSpace(4):] + return m.Next(4) } func parseTrafficClass(cm *ControlMessage, b []byte) { - cm.TrafficClass = int(nativeEndian.Uint32(b[:4])) + cm.TrafficClass = int(socket.NativeEndian.Uint32(b[:4])) } func marshalHopLimit(b []byte, cm *ControlMessage) []byte { - m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) - m.Level = iana.ProtocolIPv6 - m.Type = sysIPV6_HOPLIMIT - m.SetLen(syscall.CmsgLen(4)) + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_HOPLIMIT, 4) if cm != nil { - data := b[syscall.CmsgLen(0):] - nativeEndian.PutUint32(data[:4], uint32(cm.HopLimit)) + socket.NativeEndian.PutUint32(m.Data(4), uint32(cm.HopLimit)) } - return b[syscall.CmsgSpace(4):] + return m.Next(4) } func parseHopLimit(cm *ControlMessage, b []byte) { - cm.HopLimit = int(nativeEndian.Uint32(b[:4])) + cm.HopLimit = int(socket.NativeEndian.Uint32(b[:4])) } func marshalPacketInfo(b []byte, cm *ControlMessage) []byte { - m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) - m.Level = iana.ProtocolIPv6 - m.Type = sysIPV6_PKTINFO - m.SetLen(syscall.CmsgLen(sysSizeofInet6Pktinfo)) + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_PKTINFO, sizeofInet6Pktinfo) if cm != nil { - pi := (*sysInet6Pktinfo)(unsafe.Pointer(&b[syscall.CmsgLen(0)])) + pi := (*inet6Pktinfo)(unsafe.Pointer(&m.Data(sizeofInet6Pktinfo)[0])) if ip := cm.Src.To16(); ip != nil && ip.To4() == nil { copy(pi.Addr[:], ip) } @@ -59,41 +52,43 @@ func marshalPacketInfo(b []byte, cm *ControlMessage) []byte { pi.setIfindex(cm.IfIndex) } } - return b[syscall.CmsgSpace(sysSizeofInet6Pktinfo):] + return m.Next(sizeofInet6Pktinfo) } func parsePacketInfo(cm *ControlMessage, b []byte) { - pi := (*sysInet6Pktinfo)(unsafe.Pointer(&b[0])) - cm.Dst = pi.Addr[:] + pi := (*inet6Pktinfo)(unsafe.Pointer(&b[0])) + if len(cm.Dst) < net.IPv6len { + cm.Dst = make(net.IP, net.IPv6len) + } + copy(cm.Dst, pi.Addr[:]) cm.IfIndex = int(pi.Ifindex) } func marshalNextHop(b []byte, cm *ControlMessage) []byte { - m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) - m.Level = iana.ProtocolIPv6 - m.Type = sysIPV6_NEXTHOP - m.SetLen(syscall.CmsgLen(sysSizeofSockaddrInet6)) + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_NEXTHOP, sizeofSockaddrInet6) if cm != nil { - sa := (*sysSockaddrInet6)(unsafe.Pointer(&b[syscall.CmsgLen(0)])) + sa := (*sockaddrInet6)(unsafe.Pointer(&m.Data(sizeofSockaddrInet6)[0])) sa.setSockaddr(cm.NextHop, cm.IfIndex) } - return b[syscall.CmsgSpace(sysSizeofSockaddrInet6):] + return m.Next(sizeofSockaddrInet6) } func parseNextHop(cm *ControlMessage, b []byte) { } func marshalPathMTU(b []byte, cm *ControlMessage) []byte { - m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) - m.Level = iana.ProtocolIPv6 - m.Type = sysIPV6_PATHMTU - m.SetLen(syscall.CmsgLen(sysSizeofIPv6Mtuinfo)) - return b[syscall.CmsgSpace(sysSizeofIPv6Mtuinfo):] + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_PATHMTU, sizeofIPv6Mtuinfo) + return m.Next(sizeofIPv6Mtuinfo) } func parsePathMTU(cm *ControlMessage, b []byte) { - mi := (*sysIPv6Mtuinfo)(unsafe.Pointer(&b[0])) - cm.Dst = mi.Addr.Addr[:] + mi := (*ipv6Mtuinfo)(unsafe.Pointer(&b[0])) + if len(cm.Dst) < net.IPv6len { + cm.Dst = make(net.IP, net.IPv6len) + } + copy(cm.Dst, mi.Addr.Addr[:]) cm.IfIndex = int(mi.Addr.Scope_id) cm.MTU = int(mi.Mtu) } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/control_stub.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/control_stub.go index 2fecf7e5..a045f28f 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/control_stub.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/control_stub.go @@ -1,23 +1,13 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build nacl plan9 solaris +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package ipv6 -func setControlMessage(fd int, opt *rawOpt, cf ControlFlags, on bool) error { - return errOpNoSupport -} - -func newControlMessage(opt *rawOpt) (oob []byte) { - return nil -} +import "golang.org/x/net/internal/socket" -func parseControlMessage(b []byte) (*ControlMessage, error) { - return nil, errOpNoSupport -} - -func marshalControlMessage(cm *ControlMessage) (oob []byte) { - return nil +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + return errOpNoSupport } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/control_unix.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/control_unix.go index 2af5beb4..66515060 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/control_unix.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/control_unix.go @@ -1,23 +1,18 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd +// +build darwin dragonfly freebsd linux netbsd openbsd solaris package ipv6 -import ( - "os" - "syscall" +import "golang.org/x/net/internal/socket" - "golang.org/x/net/internal/iana" -) - -func setControlMessage(fd int, opt *rawOpt, cf ControlFlags, on bool) error { +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { opt.Lock() defer opt.Unlock() - if cf&FlagTrafficClass != 0 && sockOpts[ssoReceiveTrafficClass].name > 0 { - if err := setInt(fd, &sockOpts[ssoReceiveTrafficClass], boolint(on)); err != nil { + if so, ok := sockOpts[ssoReceiveTrafficClass]; ok && cf&FlagTrafficClass != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { return err } if on { @@ -26,8 +21,8 @@ func setControlMessage(fd int, opt *rawOpt, cf ControlFlags, on bool) error { opt.clear(FlagTrafficClass) } } - if cf&FlagHopLimit != 0 && sockOpts[ssoReceiveHopLimit].name > 0 { - if err := setInt(fd, &sockOpts[ssoReceiveHopLimit], boolint(on)); err != nil { + if so, ok := sockOpts[ssoReceiveHopLimit]; ok && cf&FlagHopLimit != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { return err } if on { @@ -36,8 +31,8 @@ func setControlMessage(fd int, opt *rawOpt, cf ControlFlags, on bool) error { opt.clear(FlagHopLimit) } } - if cf&flagPacketInfo != 0 && sockOpts[ssoReceivePacketInfo].name > 0 { - if err := setInt(fd, &sockOpts[ssoReceivePacketInfo], boolint(on)); err != nil { + if so, ok := sockOpts[ssoReceivePacketInfo]; ok && cf&flagPacketInfo != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { return err } if on { @@ -46,8 +41,8 @@ func setControlMessage(fd int, opt *rawOpt, cf ControlFlags, on bool) error { opt.clear(cf & flagPacketInfo) } } - if cf&FlagPathMTU != 0 && sockOpts[ssoReceivePathMTU].name > 0 { - if err := setInt(fd, &sockOpts[ssoReceivePathMTU], boolint(on)); err != nil { + if so, ok := sockOpts[ssoReceivePathMTU]; ok && cf&FlagPathMTU != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { return err } if on { @@ -58,109 +53,3 @@ func setControlMessage(fd int, opt *rawOpt, cf ControlFlags, on bool) error { } return nil } - -func newControlMessage(opt *rawOpt) (oob []byte) { - opt.RLock() - var l int - if opt.isset(FlagTrafficClass) && ctlOpts[ctlTrafficClass].name > 0 { - l += syscall.CmsgSpace(ctlOpts[ctlTrafficClass].length) - } - if opt.isset(FlagHopLimit) && ctlOpts[ctlHopLimit].name > 0 { - l += syscall.CmsgSpace(ctlOpts[ctlHopLimit].length) - } - if opt.isset(flagPacketInfo) && ctlOpts[ctlPacketInfo].name > 0 { - l += syscall.CmsgSpace(ctlOpts[ctlPacketInfo].length) - } - if opt.isset(FlagPathMTU) && ctlOpts[ctlPathMTU].name > 0 { - l += syscall.CmsgSpace(ctlOpts[ctlPathMTU].length) - } - if l > 0 { - oob = make([]byte, l) - b := oob - if opt.isset(FlagTrafficClass) && ctlOpts[ctlTrafficClass].name > 0 { - b = ctlOpts[ctlTrafficClass].marshal(b, nil) - } - if opt.isset(FlagHopLimit) && ctlOpts[ctlHopLimit].name > 0 { - b = ctlOpts[ctlHopLimit].marshal(b, nil) - } - if opt.isset(flagPacketInfo) && ctlOpts[ctlPacketInfo].name > 0 { - b = ctlOpts[ctlPacketInfo].marshal(b, nil) - } - if opt.isset(FlagPathMTU) && ctlOpts[ctlPathMTU].name > 0 { - b = ctlOpts[ctlPathMTU].marshal(b, nil) - } - } - opt.RUnlock() - return -} - -func parseControlMessage(b []byte) (*ControlMessage, error) { - if len(b) == 0 { - return nil, nil - } - cmsgs, err := syscall.ParseSocketControlMessage(b) - if err != nil { - return nil, os.NewSyscallError("parse socket control message", err) - } - cm := &ControlMessage{} - for _, m := range cmsgs { - if m.Header.Level != iana.ProtocolIPv6 { - continue - } - switch int(m.Header.Type) { - case ctlOpts[ctlTrafficClass].name: - ctlOpts[ctlTrafficClass].parse(cm, m.Data[:]) - case ctlOpts[ctlHopLimit].name: - ctlOpts[ctlHopLimit].parse(cm, m.Data[:]) - case ctlOpts[ctlPacketInfo].name: - ctlOpts[ctlPacketInfo].parse(cm, m.Data[:]) - case ctlOpts[ctlPathMTU].name: - ctlOpts[ctlPathMTU].parse(cm, m.Data[:]) - } - } - return cm, nil -} - -func marshalControlMessage(cm *ControlMessage) (oob []byte) { - if cm == nil { - return - } - var l int - tclass := false - if ctlOpts[ctlTrafficClass].name > 0 && cm.TrafficClass > 0 { - tclass = true - l += syscall.CmsgSpace(ctlOpts[ctlTrafficClass].length) - } - hoplimit := false - if ctlOpts[ctlHopLimit].name > 0 && cm.HopLimit > 0 { - hoplimit = true - l += syscall.CmsgSpace(ctlOpts[ctlHopLimit].length) - } - pktinfo := false - if ctlOpts[ctlPacketInfo].name > 0 && (cm.Src.To16() != nil && cm.Src.To4() == nil || cm.IfIndex > 0) { - pktinfo = true - l += syscall.CmsgSpace(ctlOpts[ctlPacketInfo].length) - } - nexthop := false - if ctlOpts[ctlNextHop].name > 0 && cm.NextHop.To16() != nil && cm.NextHop.To4() == nil { - nexthop = true - l += syscall.CmsgSpace(ctlOpts[ctlNextHop].length) - } - if l > 0 { - oob = make([]byte, l) - b := oob - if tclass { - b = ctlOpts[ctlTrafficClass].marshal(b, cm) - } - if hoplimit { - b = ctlOpts[ctlHopLimit].marshal(b, cm) - } - if pktinfo { - b = ctlOpts[ctlPacketInfo].marshal(b, cm) - } - if nexthop { - b = ctlOpts[ctlNextHop].marshal(b, cm) - } - } - return -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/control_windows.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/control_windows.go index 72fdc1b0..ef2563b3 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/control_windows.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/control_windows.go @@ -1,27 +1,16 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ipv6 -import "syscall" +import ( + "syscall" -func setControlMessage(fd syscall.Handle, opt *rawOpt, cf ControlFlags, on bool) error { - // TODO(mikio): implement this - return syscall.EWINDOWS -} - -func newControlMessage(opt *rawOpt) (oob []byte) { - // TODO(mikio): implement this - return nil -} + "golang.org/x/net/internal/socket" +) -func parseControlMessage(b []byte) (*ControlMessage, error) { +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { // TODO(mikio): implement this - return nil, syscall.EWINDOWS -} - -func marshalControlMessage(cm *ControlMessage) (oob []byte) { - // TODO(mikio): implement this - return nil + return syscall.EWINDOWS } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/defs_darwin.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/defs_darwin.go deleted file mode 100644 index 4c7f476a..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/defs_darwin.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package ipv6 - -/* -#define __APPLE_USE_RFC_3542 -#include -#include -*/ -import "C" - -const ( - sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS - sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF - sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS - sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP - sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP - sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP - - sysIPV6_PORTRANGE = C.IPV6_PORTRANGE - sysICMP6_FILTER = C.ICMP6_FILTER - sysIPV6_2292PKTINFO = C.IPV6_2292PKTINFO - sysIPV6_2292HOPLIMIT = C.IPV6_2292HOPLIMIT - sysIPV6_2292NEXTHOP = C.IPV6_2292NEXTHOP - sysIPV6_2292HOPOPTS = C.IPV6_2292HOPOPTS - sysIPV6_2292DSTOPTS = C.IPV6_2292DSTOPTS - sysIPV6_2292RTHDR = C.IPV6_2292RTHDR - - sysIPV6_2292PKTOPTIONS = C.IPV6_2292PKTOPTIONS - - sysIPV6_CHECKSUM = C.IPV6_CHECKSUM - sysIPV6_V6ONLY = C.IPV6_V6ONLY - - sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY - - sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS - sysIPV6_TCLASS = C.IPV6_TCLASS - - sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS - - sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO - - sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT - sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR - sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS - sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS - - sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU - sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU - - sysIPV6_PATHMTU = C.IPV6_PATHMTU - - sysIPV6_PKTINFO = C.IPV6_PKTINFO - sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT - sysIPV6_NEXTHOP = C.IPV6_NEXTHOP - sysIPV6_HOPOPTS = C.IPV6_HOPOPTS - sysIPV6_DSTOPTS = C.IPV6_DSTOPTS - sysIPV6_RTHDR = C.IPV6_RTHDR - - sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL - - sysIPV6_DONTFRAG = C.IPV6_DONTFRAG - - sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR - - sysIPV6_MSFILTER = C.IPV6_MSFILTER - sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP - sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP - sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP - sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP - sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE - sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE - - sysIPV6_BOUND_IF = C.IPV6_BOUND_IF - - sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT - sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH - sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW - - sysSizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage - sysSizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sysSizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sysSizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - - sysSizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - sysSizeofGroupReq = C.sizeof_struct_group_req - sysSizeofGroupSourceReq = C.sizeof_struct_group_source_req - - sysSizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -type sysSockaddrStorage C.struct_sockaddr_storage - -type sysSockaddrInet6 C.struct_sockaddr_in6 - -type sysInet6Pktinfo C.struct_in6_pktinfo - -type sysIPv6Mtuinfo C.struct_ip6_mtuinfo - -type sysIPv6Mreq C.struct_ipv6_mreq - -type sysICMPv6Filter C.struct_icmp6_filter - -type sysGroupReq C.struct_group_req - -type sysGroupSourceReq C.struct_group_source_req diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/defs_dragonfly.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/defs_dragonfly.go deleted file mode 100644 index c72487ce..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/defs_dragonfly.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package ipv6 - -/* -#include -#include - -#include -#include -*/ -import "C" - -const ( - sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS - sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF - sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS - sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP - sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP - sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP - sysIPV6_PORTRANGE = C.IPV6_PORTRANGE - sysICMP6_FILTER = C.ICMP6_FILTER - - sysIPV6_CHECKSUM = C.IPV6_CHECKSUM - sysIPV6_V6ONLY = C.IPV6_V6ONLY - - sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY - - sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS - sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO - sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT - sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR - sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS - sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS - - sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU - sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU - - sysIPV6_PATHMTU = C.IPV6_PATHMTU - - sysIPV6_PKTINFO = C.IPV6_PKTINFO - sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT - sysIPV6_NEXTHOP = C.IPV6_NEXTHOP - sysIPV6_HOPOPTS = C.IPV6_HOPOPTS - sysIPV6_DSTOPTS = C.IPV6_DSTOPTS - sysIPV6_RTHDR = C.IPV6_RTHDR - - sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS - - sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL - - sysIPV6_TCLASS = C.IPV6_TCLASS - sysIPV6_DONTFRAG = C.IPV6_DONTFRAG - - sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR - - sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT - sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH - sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW - - sysSizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sysSizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sysSizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - - sysSizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - - sysSizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -type sysSockaddrInet6 C.struct_sockaddr_in6 - -type sysInet6Pktinfo C.struct_in6_pktinfo - -type sysIPv6Mtuinfo C.struct_ip6_mtuinfo - -type sysIPv6Mreq C.struct_ipv6_mreq - -type sysICMPv6Filter C.struct_icmp6_filter diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/defs_freebsd.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/defs_freebsd.go deleted file mode 100644 index de199ec6..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/defs_freebsd.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package ipv6 - -/* -#include -#include - -#include -#include -*/ -import "C" - -const ( - sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS - sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF - sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS - sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP - sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP - sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP - sysIPV6_PORTRANGE = C.IPV6_PORTRANGE - sysICMP6_FILTER = C.ICMP6_FILTER - - sysIPV6_CHECKSUM = C.IPV6_CHECKSUM - sysIPV6_V6ONLY = C.IPV6_V6ONLY - - sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY - - sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS - - sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO - sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT - sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR - sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS - sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS - - sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU - sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU - - sysIPV6_PATHMTU = C.IPV6_PATHMTU - - sysIPV6_PKTINFO = C.IPV6_PKTINFO - sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT - sysIPV6_NEXTHOP = C.IPV6_NEXTHOP - sysIPV6_HOPOPTS = C.IPV6_HOPOPTS - sysIPV6_DSTOPTS = C.IPV6_DSTOPTS - sysIPV6_RTHDR = C.IPV6_RTHDR - - sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS - - sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL - - sysIPV6_TCLASS = C.IPV6_TCLASS - sysIPV6_DONTFRAG = C.IPV6_DONTFRAG - - sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR - - sysIPV6_BINDANY = C.IPV6_BINDANY - - sysIPV6_MSFILTER = C.IPV6_MSFILTER - - sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP - sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP - sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP - sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP - sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE - sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE - - sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT - sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH - sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW - - sysSizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage - sysSizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sysSizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sysSizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - - sysSizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - sysSizeofGroupReq = C.sizeof_struct_group_req - sysSizeofGroupSourceReq = C.sizeof_struct_group_source_req - - sysSizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -type sysSockaddrStorage C.struct_sockaddr_storage - -type sysSockaddrInet6 C.struct_sockaddr_in6 - -type sysInet6Pktinfo C.struct_in6_pktinfo - -type sysIPv6Mtuinfo C.struct_ip6_mtuinfo - -type sysIPv6Mreq C.struct_ipv6_mreq - -type sysGroupReq C.struct_group_req - -type sysGroupSourceReq C.struct_group_source_req - -type sysICMPv6Filter C.struct_icmp6_filter diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/defs_linux.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/defs_linux.go deleted file mode 100644 index 664305d8..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/defs_linux.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package ipv6 - -/* -#include -#include -#include -#include -#include -#include -*/ -import "C" - -const ( - sysIPV6_ADDRFORM = C.IPV6_ADDRFORM - sysIPV6_2292PKTINFO = C.IPV6_2292PKTINFO - sysIPV6_2292HOPOPTS = C.IPV6_2292HOPOPTS - sysIPV6_2292DSTOPTS = C.IPV6_2292DSTOPTS - sysIPV6_2292RTHDR = C.IPV6_2292RTHDR - sysIPV6_2292PKTOPTIONS = C.IPV6_2292PKTOPTIONS - sysIPV6_CHECKSUM = C.IPV6_CHECKSUM - sysIPV6_2292HOPLIMIT = C.IPV6_2292HOPLIMIT - sysIPV6_NEXTHOP = C.IPV6_NEXTHOP - sysIPV6_FLOWINFO = C.IPV6_FLOWINFO - - sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS - sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF - sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS - sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP - sysIPV6_ADD_MEMBERSHIP = C.IPV6_ADD_MEMBERSHIP - sysIPV6_DROP_MEMBERSHIP = C.IPV6_DROP_MEMBERSHIP - sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP - sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP - sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP - sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP - sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE - sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE - sysMCAST_MSFILTER = C.MCAST_MSFILTER - sysIPV6_ROUTER_ALERT = C.IPV6_ROUTER_ALERT - sysIPV6_MTU_DISCOVER = C.IPV6_MTU_DISCOVER - sysIPV6_MTU = C.IPV6_MTU - sysIPV6_RECVERR = C.IPV6_RECVERR - sysIPV6_V6ONLY = C.IPV6_V6ONLY - sysIPV6_JOIN_ANYCAST = C.IPV6_JOIN_ANYCAST - sysIPV6_LEAVE_ANYCAST = C.IPV6_LEAVE_ANYCAST - - //sysIPV6_PMTUDISC_DONT = C.IPV6_PMTUDISC_DONT - //sysIPV6_PMTUDISC_WANT = C.IPV6_PMTUDISC_WANT - //sysIPV6_PMTUDISC_DO = C.IPV6_PMTUDISC_DO - //sysIPV6_PMTUDISC_PROBE = C.IPV6_PMTUDISC_PROBE - //sysIPV6_PMTUDISC_INTERFACE = C.IPV6_PMTUDISC_INTERFACE - //sysIPV6_PMTUDISC_OMIT = C.IPV6_PMTUDISC_OMIT - - sysIPV6_FLOWLABEL_MGR = C.IPV6_FLOWLABEL_MGR - sysIPV6_FLOWINFO_SEND = C.IPV6_FLOWINFO_SEND - - sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY - sysIPV6_XFRM_POLICY = C.IPV6_XFRM_POLICY - - sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO - sysIPV6_PKTINFO = C.IPV6_PKTINFO - sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT - sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT - sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS - sysIPV6_HOPOPTS = C.IPV6_HOPOPTS - sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS - sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR - sysIPV6_RTHDR = C.IPV6_RTHDR - sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS - sysIPV6_DSTOPTS = C.IPV6_DSTOPTS - sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU - sysIPV6_PATHMTU = C.IPV6_PATHMTU - sysIPV6_DONTFRAG = C.IPV6_DONTFRAG - - sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS - sysIPV6_TCLASS = C.IPV6_TCLASS - - sysIPV6_ADDR_PREFERENCES = C.IPV6_ADDR_PREFERENCES - - sysIPV6_PREFER_SRC_TMP = C.IPV6_PREFER_SRC_TMP - sysIPV6_PREFER_SRC_PUBLIC = C.IPV6_PREFER_SRC_PUBLIC - sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = C.IPV6_PREFER_SRC_PUBTMP_DEFAULT - sysIPV6_PREFER_SRC_COA = C.IPV6_PREFER_SRC_COA - sysIPV6_PREFER_SRC_HOME = C.IPV6_PREFER_SRC_HOME - sysIPV6_PREFER_SRC_CGA = C.IPV6_PREFER_SRC_CGA - sysIPV6_PREFER_SRC_NONCGA = C.IPV6_PREFER_SRC_NONCGA - - sysIPV6_MINHOPCOUNT = C.IPV6_MINHOPCOUNT - - sysIPV6_ORIGDSTADDR = C.IPV6_ORIGDSTADDR - sysIPV6_RECVORIGDSTADDR = C.IPV6_RECVORIGDSTADDR - sysIPV6_TRANSPARENT = C.IPV6_TRANSPARENT - sysIPV6_UNICAST_IF = C.IPV6_UNICAST_IF - - sysICMPV6_FILTER = C.ICMPV6_FILTER - - sysICMPV6_FILTER_BLOCK = C.ICMPV6_FILTER_BLOCK - sysICMPV6_FILTER_PASS = C.ICMPV6_FILTER_PASS - sysICMPV6_FILTER_BLOCKOTHERS = C.ICMPV6_FILTER_BLOCKOTHERS - sysICMPV6_FILTER_PASSONLY = C.ICMPV6_FILTER_PASSONLY - - sysSOL_SOCKET = C.SOL_SOCKET - sysSO_ATTACH_FILTER = C.SO_ATTACH_FILTER - - sysSizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage - sysSizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sysSizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sysSizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - sysSizeofIPv6FlowlabelReq = C.sizeof_struct_in6_flowlabel_req - - sysSizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - sysSizeofGroupReq = C.sizeof_struct_group_req - sysSizeofGroupSourceReq = C.sizeof_struct_group_source_req - - sysSizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -type sysKernelSockaddrStorage C.struct___kernel_sockaddr_storage - -type sysSockaddrInet6 C.struct_sockaddr_in6 - -type sysInet6Pktinfo C.struct_in6_pktinfo - -type sysIPv6Mtuinfo C.struct_ip6_mtuinfo - -type sysIPv6FlowlabelReq C.struct_in6_flowlabel_req - -type sysIPv6Mreq C.struct_ipv6_mreq - -type sysGroupReq C.struct_group_req - -type sysGroupSourceReq C.struct_group_source_req - -type sysICMPv6Filter C.struct_icmp6_filter - -type sysSockFProg C.struct_sock_fprog - -type sysSockFilter C.struct_sock_filter diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/defs_netbsd.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/defs_netbsd.go deleted file mode 100644 index 7bd09e8e..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/defs_netbsd.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package ipv6 - -/* -#include -#include - -#include -#include -*/ -import "C" - -const ( - sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS - sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF - sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS - sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP - sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP - sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP - sysIPV6_PORTRANGE = C.IPV6_PORTRANGE - sysICMP6_FILTER = C.ICMP6_FILTER - - sysIPV6_CHECKSUM = C.IPV6_CHECKSUM - sysIPV6_V6ONLY = C.IPV6_V6ONLY - - sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY - - sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS - - sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO - sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT - sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR - sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS - sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS - - sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU - sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU - sysIPV6_PATHMTU = C.IPV6_PATHMTU - - sysIPV6_PKTINFO = C.IPV6_PKTINFO - sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT - sysIPV6_NEXTHOP = C.IPV6_NEXTHOP - sysIPV6_HOPOPTS = C.IPV6_HOPOPTS - sysIPV6_DSTOPTS = C.IPV6_DSTOPTS - sysIPV6_RTHDR = C.IPV6_RTHDR - - sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS - - sysIPV6_TCLASS = C.IPV6_TCLASS - sysIPV6_DONTFRAG = C.IPV6_DONTFRAG - - sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT - sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH - sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW - - sysSizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sysSizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sysSizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - - sysSizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - - sysSizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -type sysSockaddrInet6 C.struct_sockaddr_in6 - -type sysInet6Pktinfo C.struct_in6_pktinfo - -type sysIPv6Mtuinfo C.struct_ip6_mtuinfo - -type sysIPv6Mreq C.struct_ipv6_mreq - -type sysICMPv6Filter C.struct_icmp6_filter diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/defs_openbsd.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/defs_openbsd.go deleted file mode 100644 index 6796d9b2..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/defs_openbsd.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package ipv6 - -/* -#include -#include - -#include -#include -*/ -import "C" - -const ( - sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS - sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF - sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS - sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP - sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP - sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP - sysIPV6_PORTRANGE = C.IPV6_PORTRANGE - sysICMP6_FILTER = C.ICMP6_FILTER - - sysIPV6_CHECKSUM = C.IPV6_CHECKSUM - sysIPV6_V6ONLY = C.IPV6_V6ONLY - - sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS - - sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO - sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT - sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR - sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS - sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS - - sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU - sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU - - sysIPV6_PATHMTU = C.IPV6_PATHMTU - - sysIPV6_PKTINFO = C.IPV6_PKTINFO - sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT - sysIPV6_NEXTHOP = C.IPV6_NEXTHOP - sysIPV6_HOPOPTS = C.IPV6_HOPOPTS - sysIPV6_DSTOPTS = C.IPV6_DSTOPTS - sysIPV6_RTHDR = C.IPV6_RTHDR - - sysIPV6_AUTH_LEVEL = C.IPV6_AUTH_LEVEL - sysIPV6_ESP_TRANS_LEVEL = C.IPV6_ESP_TRANS_LEVEL - sysIPV6_ESP_NETWORK_LEVEL = C.IPV6_ESP_NETWORK_LEVEL - sysIPSEC6_OUTSA = C.IPSEC6_OUTSA - sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS - - sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL - sysIPV6_IPCOMP_LEVEL = C.IPV6_IPCOMP_LEVEL - - sysIPV6_TCLASS = C.IPV6_TCLASS - sysIPV6_DONTFRAG = C.IPV6_DONTFRAG - sysIPV6_PIPEX = C.IPV6_PIPEX - - sysIPV6_RTABLE = C.IPV6_RTABLE - - sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT - sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH - sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW - - sysSizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sysSizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sysSizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - - sysSizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - - sysSizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -type sysSockaddrInet6 C.struct_sockaddr_in6 - -type sysInet6Pktinfo C.struct_in6_pktinfo - -type sysIPv6Mtuinfo C.struct_ip6_mtuinfo - -type sysIPv6Mreq C.struct_ipv6_mreq - -type sysICMPv6Filter C.struct_icmp6_filter diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/defs_solaris.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/defs_solaris.go deleted file mode 100644 index 972b1712..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/defs_solaris.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package ipv6 - -/* -#include -#include -*/ -import "C" - -const ( - sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS - sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF - sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS - sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP - sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP - sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP - - sysIPV6_PKTINFO = C.IPV6_PKTINFO - - sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT - sysIPV6_NEXTHOP = C.IPV6_NEXTHOP - sysIPV6_HOPOPTS = C.IPV6_HOPOPTS - sysIPV6_DSTOPTS = C.IPV6_DSTOPTS - - sysIPV6_RTHDR = C.IPV6_RTHDR - sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS - - sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO - sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT - sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS - - sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR - - sysIPV6_RECVRTHDRDSTOPTS = C.IPV6_RECVRTHDRDSTOPTS - - sysIPV6_CHECKSUM = C.IPV6_CHECKSUM - sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS - sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU - sysIPV6_DONTFRAG = C.IPV6_DONTFRAG - sysIPV6_SEC_OPT = C.IPV6_SEC_OPT - sysIPV6_SRC_PREFERENCES = C.IPV6_SRC_PREFERENCES - sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU - sysIPV6_PATHMTU = C.IPV6_PATHMTU - sysIPV6_TCLASS = C.IPV6_TCLASS - sysIPV6_V6ONLY = C.IPV6_V6ONLY - - sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS - - sysIPV6_PREFER_SRC_HOME = C.IPV6_PREFER_SRC_HOME - sysIPV6_PREFER_SRC_COA = C.IPV6_PREFER_SRC_COA - sysIPV6_PREFER_SRC_PUBLIC = C.IPV6_PREFER_SRC_PUBLIC - sysIPV6_PREFER_SRC_TMP = C.IPV6_PREFER_SRC_TMP - sysIPV6_PREFER_SRC_NONCGA = C.IPV6_PREFER_SRC_NONCGA - sysIPV6_PREFER_SRC_CGA = C.IPV6_PREFER_SRC_CGA - - sysIPV6_PREFER_SRC_MIPMASK = C.IPV6_PREFER_SRC_MIPMASK - sysIPV6_PREFER_SRC_MIPDEFAULT = C.IPV6_PREFER_SRC_MIPDEFAULT - sysIPV6_PREFER_SRC_TMPMASK = C.IPV6_PREFER_SRC_TMPMASK - sysIPV6_PREFER_SRC_TMPDEFAULT = C.IPV6_PREFER_SRC_TMPDEFAULT - sysIPV6_PREFER_SRC_CGAMASK = C.IPV6_PREFER_SRC_CGAMASK - sysIPV6_PREFER_SRC_CGADEFAULT = C.IPV6_PREFER_SRC_CGADEFAULT - - sysIPV6_PREFER_SRC_MASK = C.IPV6_PREFER_SRC_MASK - - sysIPV6_PREFER_SRC_DEFAULT = C.IPV6_PREFER_SRC_DEFAULT - - sysIPV6_BOUND_IF = C.IPV6_BOUND_IF - sysIPV6_UNSPEC_SRC = C.IPV6_UNSPEC_SRC - - sysICMP6_FILTER = C.ICMP6_FILTER - - sysSizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sysSizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sysSizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - - sysSizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - - sysSizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -type sysSockaddrInet6 C.struct_sockaddr_in6 - -type sysInet6Pktinfo C.struct_in6_pktinfo - -type sysIPv6Mtuinfo C.struct_ip6_mtuinfo - -type sysIPv6Mreq C.struct_ipv6_mreq - -type sysICMPv6Filter C.struct_icmp6_filter diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/dgramopt_posix.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/dgramopt.go similarity index 69% rename from vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/dgramopt_posix.go rename to vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/dgramopt.go index 93ff2f1a..703dafe8 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/dgramopt_posix.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/dgramopt.go @@ -1,14 +1,14 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd windows - package ipv6 import ( "net" "syscall" + + "golang.org/x/net/bpf" ) // MulticastHopLimit returns the hop limit field value for outgoing @@ -17,11 +17,11 @@ func (c *dgramOpt) MulticastHopLimit() (int, error) { if !c.ok() { return 0, syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return 0, err + so, ok := sockOpts[ssoMulticastHopLimit] + if !ok { + return 0, errOpNoSupport } - return getInt(fd, &sockOpts[ssoMulticastHopLimit]) + return so.GetInt(c.Conn) } // SetMulticastHopLimit sets the hop limit field value for future @@ -30,11 +30,11 @@ func (c *dgramOpt) SetMulticastHopLimit(hoplim int) error { if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoMulticastHopLimit] + if !ok { + return errOpNoSupport } - return setInt(fd, &sockOpts[ssoMulticastHopLimit], hoplim) + return so.SetInt(c.Conn, hoplim) } // MulticastInterface returns the default interface for multicast @@ -43,11 +43,11 @@ func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { if !c.ok() { return nil, syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return nil, err + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return nil, errOpNoSupport } - return getInterface(fd, &sockOpts[ssoMulticastInterface]) + return so.getMulticastInterface(c.Conn) } // SetMulticastInterface sets the default interface for future @@ -56,11 +56,11 @@ func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return errOpNoSupport } - return setInterface(fd, &sockOpts[ssoMulticastInterface], ifi) + return so.setMulticastInterface(c.Conn, ifi) } // MulticastLoopback reports whether transmitted multicast packets @@ -69,11 +69,11 @@ func (c *dgramOpt) MulticastLoopback() (bool, error) { if !c.ok() { return false, syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return false, err + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return false, errOpNoSupport } - on, err := getInt(fd, &sockOpts[ssoMulticastLoopback]) + on, err := so.GetInt(c.Conn) if err != nil { return false, err } @@ -86,11 +86,11 @@ func (c *dgramOpt) SetMulticastLoopback(on bool) error { if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return errOpNoSupport } - return setInt(fd, &sockOpts[ssoMulticastLoopback], boolint(on)) + return so.SetInt(c.Conn, boolint(on)) } // JoinGroup joins the group address group on the interface ifi. @@ -106,15 +106,15 @@ func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoJoinGroup] + if !ok { + return errOpNoSupport } grp := netAddrToIP16(group) if grp == nil { return errMissingAddress } - return setGroup(fd, &sockOpts[ssoJoinGroup], ifi, grp) + return so.setGroup(c.Conn, ifi, grp) } // LeaveGroup leaves the group address group on the interface ifi @@ -124,15 +124,15 @@ func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoLeaveGroup] + if !ok { + return errOpNoSupport } grp := netAddrToIP16(group) if grp == nil { return errMissingAddress } - return setGroup(fd, &sockOpts[ssoLeaveGroup], ifi, grp) + return so.setGroup(c.Conn, ifi, grp) } // JoinSourceSpecificGroup joins the source-specific group comprising @@ -145,9 +145,9 @@ func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoJoinSourceGroup] + if !ok { + return errOpNoSupport } grp := netAddrToIP16(group) if grp == nil { @@ -157,7 +157,7 @@ func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net if src == nil { return errMissingAddress } - return setSourceGroup(fd, &sockOpts[ssoJoinSourceGroup], ifi, grp, src) + return so.setSourceGroup(c.Conn, ifi, grp, src) } // LeaveSourceSpecificGroup leaves the source-specific group on the @@ -166,9 +166,9 @@ func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source ne if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoLeaveSourceGroup] + if !ok { + return errOpNoSupport } grp := netAddrToIP16(group) if grp == nil { @@ -178,7 +178,7 @@ func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source ne if src == nil { return errMissingAddress } - return setSourceGroup(fd, &sockOpts[ssoLeaveSourceGroup], ifi, grp, src) + return so.setSourceGroup(c.Conn, ifi, grp, src) } // ExcludeSourceSpecificGroup excludes the source-specific group from @@ -188,9 +188,9 @@ func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoBlockSourceGroup] + if !ok { + return errOpNoSupport } grp := netAddrToIP16(group) if grp == nil { @@ -200,7 +200,7 @@ func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source if src == nil { return errMissingAddress } - return setSourceGroup(fd, &sockOpts[ssoBlockSourceGroup], ifi, grp, src) + return so.setSourceGroup(c.Conn, ifi, grp, src) } // IncludeSourceSpecificGroup includes the excluded source-specific @@ -209,9 +209,9 @@ func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoUnblockSourceGroup] + if !ok { + return errOpNoSupport } grp := netAddrToIP16(group) if grp == nil { @@ -221,22 +221,22 @@ func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source if src == nil { return errMissingAddress } - return setSourceGroup(fd, &sockOpts[ssoUnblockSourceGroup], ifi, grp, src) + return so.setSourceGroup(c.Conn, ifi, grp, src) } // Checksum reports whether the kernel will compute, store or verify a -// checksum for both incoming and outgoing packets. If on is true, it +// checksum for both incoming and outgoing packets. If on is true, it // returns an offset in bytes into the data of where the checksum // field is located. func (c *dgramOpt) Checksum() (on bool, offset int, err error) { if !c.ok() { return false, 0, syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return false, 0, err + so, ok := sockOpts[ssoChecksum] + if !ok { + return false, 0, errOpNoSupport } - offset, err = getInt(fd, &sockOpts[ssoChecksum]) + offset, err = so.GetInt(c.Conn) if err != nil { return false, 0, err } @@ -246,21 +246,21 @@ func (c *dgramOpt) Checksum() (on bool, offset int, err error) { return true, offset, nil } -// SetChecksum enables the kernel checksum processing. If on is ture, +// SetChecksum enables the kernel checksum processing. If on is ture, // the offset should be an offset in bytes into the data of where the // checksum field is located. func (c *dgramOpt) SetChecksum(on bool, offset int) error { if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoChecksum] + if !ok { + return errOpNoSupport } if !on { offset = -1 } - return setInt(fd, &sockOpts[ssoChecksum], offset) + return so.SetInt(c.Conn, offset) } // ICMPFilter returns an ICMP filter. @@ -268,11 +268,11 @@ func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { if !c.ok() { return nil, syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return nil, err + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return nil, errOpNoSupport } - return getICMPFilter(fd, &sockOpts[ssoICMPFilter]) + return so.getICMPFilter(c.Conn) } // SetICMPFilter deploys the ICMP filter. @@ -280,9 +280,23 @@ func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return errOpNoSupport + } + return so.setICMPFilter(c.Conn, f) +} + +// SetBPF attaches a BPF program to the connection. +// +// Only supported on Linux. +func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoAttachFilter] + if !ok { + return errOpNoSupport } - return setICMPFilter(fd, &sockOpts[ssoICMPFilter], f) + return so.setBPF(c.Conn, filter) } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/dgramopt_stub.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/dgramopt_stub.go deleted file mode 100644 index fb067fb2..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/dgramopt_stub.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build nacl plan9 solaris - -package ipv6 - -import "net" - -// MulticastHopLimit returns the hop limit field value for outgoing -// multicast packets. -func (c *dgramOpt) MulticastHopLimit() (int, error) { - return 0, errOpNoSupport -} - -// SetMulticastHopLimit sets the hop limit field value for future -// outgoing multicast packets. -func (c *dgramOpt) SetMulticastHopLimit(hoplim int) error { - return errOpNoSupport -} - -// MulticastInterface returns the default interface for multicast -// packet transmissions. -func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { - return nil, errOpNoSupport -} - -// SetMulticastInterface sets the default interface for future -// multicast packet transmissions. -func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { - return errOpNoSupport -} - -// MulticastLoopback reports whether transmitted multicast packets -// should be copied and send back to the originator. -func (c *dgramOpt) MulticastLoopback() (bool, error) { - return false, errOpNoSupport -} - -// SetMulticastLoopback sets whether transmitted multicast packets -// should be copied and send back to the originator. -func (c *dgramOpt) SetMulticastLoopback(on bool) error { - return errOpNoSupport -} - -// JoinGroup joins the group address group on the interface ifi. -// By default all sources that can cast data to group are accepted. -// It's possible to mute and unmute data transmission from a specific -// source by using ExcludeSourceSpecificGroup and -// IncludeSourceSpecificGroup. -// JoinGroup uses the system assigned multicast interface when ifi is -// nil, although this is not recommended because the assignment -// depends on platforms and sometimes it might require routing -// configuration. -func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { - return errOpNoSupport -} - -// LeaveGroup leaves the group address group on the interface ifi -// regardless of whether the group is any-source group or -// source-specific group. -func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { - return errOpNoSupport -} - -// JoinSourceSpecificGroup joins the source-specific group comprising -// group and source on the interface ifi. -// JoinSourceSpecificGroup uses the system assigned multicast -// interface when ifi is nil, although this is not recommended because -// the assignment depends on platforms and sometimes it might require -// routing configuration. -func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { - return errOpNoSupport -} - -// LeaveSourceSpecificGroup leaves the source-specific group on the -// interface ifi. -func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { - return errOpNoSupport -} - -// ExcludeSourceSpecificGroup excludes the source-specific group from -// the already joined any-source groups by JoinGroup on the interface -// ifi. -func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { - return errOpNoSupport -} - -// IncludeSourceSpecificGroup includes the excluded source-specific -// group by ExcludeSourceSpecificGroup again on the interface ifi. -func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { - return errOpNoSupport -} - -// Checksum reports whether the kernel will compute, store or verify a -// checksum for both incoming and outgoing packets. If on is true, it -// returns an offset in bytes into the data of where the checksum -// field is located. -func (c *dgramOpt) Checksum() (on bool, offset int, err error) { - return false, 0, errOpNoSupport -} - -// SetChecksum enables the kernel checksum processing. If on is ture, -// the offset should be an offset in bytes into the data of where the -// checksum field is located. -func (c *dgramOpt) SetChecksum(on bool, offset int) error { - return errOpNoSupport -} - -// ICMPFilter returns an ICMP filter. -func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { - return nil, errOpNoSupport -} - -// SetICMPFilter deploys the ICMP filter. -func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { - return errOpNoSupport -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/doc.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/doc.go index dd13aa21..664a97de 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/doc.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/doc.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -8,23 +8,24 @@ // The package provides IP-level socket options that allow // manipulation of IPv6 facilities. // -// The IPv6 protocol is defined in RFC 2460. -// Basic and advanced socket interface extensions are defined in RFC -// 3493 and RFC 3542. -// Socket interface extensions for multicast source filters are -// defined in RFC 3678. +// The IPv6 protocol is defined in RFC 8200. +// Socket interface extensions are defined in RFC 3493, RFC 3542 and +// RFC 3678. // MLDv1 and MLDv2 are defined in RFC 2710 and RFC 3810. // Source-specific multicast is defined in RFC 4607. // +// On Darwin, this package requires OS X Mavericks version 10.9 or +// above, or equivalent. +// // // Unicasting // // The options for unicasting are available for net.TCPConn, // net.UDPConn and net.IPConn which are created as network connections -// that use the IPv6 transport. When a single TCP connection carrying +// that use the IPv6 transport. When a single TCP connection carrying // a data flow of multiple packets needs to indicate the flow is -// important, ipv6.Conn is used to set the traffic class field on the -// IPv6 header for each packet. +// important, Conn is used to set the traffic class field on the IPv6 +// header for each packet. // // ln, err := net.Listen("tcp6", "[::]:1024") // if err != nil { @@ -56,7 +57,7 @@ // // The options for multicasting are available for net.UDPConn and // net.IPconn which are created as network connections that use the -// IPv6 transport. A few network facilities must be prepared before +// IPv6 transport. A few network facilities must be prepared before // you begin multicasting, at a minimum joining network interfaces and // multicast groups. // @@ -80,7 +81,7 @@ // defer c.Close() // // Second, the application joins multicast groups, starts listening to -// the groups on the specified network interfaces. Note that the +// the groups on the specified network interfaces. Note that the // service port for transport layer protocol does not matter with this // operation as joining groups affects only network and link layer // protocols, such as IPv6 and Ethernet. @@ -94,10 +95,10 @@ // } // // The application might set per packet control message transmissions -// between the protocol stack within the kernel. When the application +// between the protocol stack within the kernel. When the application // needs a destination address on an incoming packet, -// SetControlMessage of ipv6.PacketConn is used to enable control -// message transmissons. +// SetControlMessage of PacketConn is used to enable control message +// transmissions. // // if err := p.SetControlMessage(ipv6.FlagDst, true); err != nil { // // error handling @@ -143,7 +144,7 @@ // More multicasting // // An application that uses PacketConn may join multiple multicast -// groups. For example, a UDP listener with port 1024 might join two +// groups. For example, a UDP listener with port 1024 might join two // different groups across over two different network interfaces by // using: // @@ -164,7 +165,7 @@ // } // // It is possible for multiple UDP listeners that listen on the same -// UDP port to join the same multicast group. The net package will +// UDP port to join the same multicast group. The net package will // provide a socket that listens to a wildcard address with reusable // UDP port when an appropriate multicast address prefix is passed to // the net.ListenPacket or net.ListenUDP. @@ -238,3 +239,5 @@ // In the fallback case, ExcludeSourceSpecificGroup and // IncludeSourceSpecificGroup may return an error. package ipv6 // import "golang.org/x/net/ipv6" + +// BUG(mikio): This package is not implemented on NaCl and Plan 9. diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/endpoint.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/endpoint.go index 966eaa89..0624c174 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/endpoint.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/endpoint.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -8,8 +8,15 @@ import ( "net" "syscall" "time" + + "golang.org/x/net/internal/socket" ) +// BUG(mikio): On Windows, the JoinSourceSpecificGroup, +// LeaveSourceSpecificGroup, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup methods of PacketConn are not +// implemented. + // A Conn represents a network endpoint that uses IPv6 transport. // It allows to set basic IP-level socket options such as traffic // class and hop limit. @@ -18,7 +25,7 @@ type Conn struct { } type genericOpt struct { - net.Conn + *socket.Conn } func (c *genericOpt) ok() bool { return c != nil && c.Conn != nil } @@ -26,14 +33,14 @@ func (c *genericOpt) ok() bool { return c != nil && c.Conn != nil } // PathMTU returns a path MTU value for the destination associated // with the endpoint. func (c *Conn) PathMTU() (int, error) { - if !c.genericOpt.ok() { + if !c.ok() { return 0, syscall.EINVAL } - fd, err := c.genericOpt.sysfd() - if err != nil { - return 0, err + so, ok := sockOpts[ssoPathMTU] + if !ok { + return 0, errOpNoSupport } - _, mtu, err := getMTUInfo(fd, &sockOpts[ssoPathMTU]) + _, mtu, err := so.getMTUInfo(c.Conn) if err != nil { return 0, err } @@ -42,14 +49,15 @@ func (c *Conn) PathMTU() (int, error) { // NewConn returns a new Conn. func NewConn(c net.Conn) *Conn { + cc, _ := socket.NewConn(c) return &Conn{ - genericOpt: genericOpt{Conn: c}, + genericOpt: genericOpt{Conn: cc}, } } // A PacketConn represents a packet network endpoint that uses IPv6 -// transport. It is used to control several IP-level socket options -// including IPv6 header manipulation. It also provides datagram +// transport. It is used to control several IP-level socket options +// including IPv6 header manipulation. It also provides datagram // based network I/O methods specific to the IPv6 and higher layer // protocols such as OSPF, GRE, and UDP. type PacketConn struct { @@ -59,10 +67,10 @@ type PacketConn struct { } type dgramOpt struct { - net.PacketConn + *socket.Conn } -func (c *dgramOpt) ok() bool { return c != nil && c.PacketConn != nil } +func (c *dgramOpt) ok() bool { return c != nil && c.Conn != nil } // SetControlMessage allows to receive the per packet basis IP-level // socket options. @@ -70,11 +78,7 @@ func (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error { if !c.payloadHandler.ok() { return syscall.EINVAL } - fd, err := c.payloadHandler.sysfd() - if err != nil { - return err - } - return setControlMessage(fd, &c.payloadHandler.rawOpt, cf, on) + return setControlMessage(c.dgramOpt.Conn, &c.payloadHandler.rawOpt, cf, on) } // SetDeadline sets the read and write deadlines associated with the @@ -115,9 +119,10 @@ func (c *PacketConn) Close() error { // NewPacketConn returns a new PacketConn using c as its underlying // transport. func NewPacketConn(c net.PacketConn) *PacketConn { + cc, _ := socket.NewConn(c.(net.Conn)) return &PacketConn{ - genericOpt: genericOpt{Conn: c.(net.Conn)}, - dgramOpt: dgramOpt{PacketConn: c}, - payloadHandler: payloadHandler{PacketConn: c}, + genericOpt: genericOpt{Conn: cc}, + dgramOpt: dgramOpt{Conn: cc}, + payloadHandler: payloadHandler{PacketConn: c, Conn: cc}, } } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/gen.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/gen.go deleted file mode 100644 index 826e3ae2..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/gen.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -//go:generate go run gen.go - -// This program generates system adaptation constants and types, -// internet protocol constants and tables by reading template files -// and IANA protocol registries. -package main - -import ( - "bytes" - "encoding/xml" - "fmt" - "go/format" - "io" - "io/ioutil" - "net/http" - "os" - "os/exec" - "runtime" - "strconv" - "strings" -) - -func main() { - if err := genzsys(); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - if err := geniana(); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} - -func genzsys() error { - defs := "defs_" + runtime.GOOS + ".go" - f, err := os.Open(defs) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - f.Close() - cmd := exec.Command("go", "tool", "cgo", "-godefs", defs) - b, err := cmd.Output() - if err != nil { - return err - } - // The ipv6 package still supports go1.2, and so we need to - // take care of additional platforms in go1.3 and above for - // working with go1.2. - switch { - case runtime.GOOS == "dragonfly" || runtime.GOOS == "solaris": - b = bytes.Replace(b, []byte("package ipv6\n"), []byte("// +build "+runtime.GOOS+"\n\npackage ipv6\n"), 1) - case runtime.GOOS == "linux" && (runtime.GOARCH == "arm64" || runtime.GOARCH == "mips64" || runtime.GOARCH == "mips64le" || runtime.GOARCH == "ppc" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" || runtime.GOARCH == "s390x"): - b = bytes.Replace(b, []byte("package ipv6\n"), []byte("// +build "+runtime.GOOS+","+runtime.GOARCH+"\n\npackage ipv6\n"), 1) - } - b, err = format.Source(b) - if err != nil { - return err - } - zsys := "zsys_" + runtime.GOOS + ".go" - switch runtime.GOOS { - case "freebsd", "linux": - zsys = "zsys_" + runtime.GOOS + "_" + runtime.GOARCH + ".go" - } - if err := ioutil.WriteFile(zsys, b, 0644); err != nil { - return err - } - return nil -} - -var registries = []struct { - url string - parse func(io.Writer, io.Reader) error -}{ - { - "http://www.iana.org/assignments/icmpv6-parameters/icmpv6-parameters.xml", - parseICMPv6Parameters, - }, -} - -func geniana() error { - var bb bytes.Buffer - fmt.Fprintf(&bb, "// go generate gen.go\n") - fmt.Fprintf(&bb, "// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\n") - fmt.Fprintf(&bb, "package ipv6\n\n") - for _, r := range registries { - resp, err := http.Get(r.url) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("got HTTP status code %v for %v\n", resp.StatusCode, r.url) - } - if err := r.parse(&bb, resp.Body); err != nil { - return err - } - fmt.Fprintf(&bb, "\n") - } - b, err := format.Source(bb.Bytes()) - if err != nil { - return err - } - if err := ioutil.WriteFile("iana.go", b, 0644); err != nil { - return err - } - return nil -} - -func parseICMPv6Parameters(w io.Writer, r io.Reader) error { - dec := xml.NewDecoder(r) - var icp icmpv6Parameters - if err := dec.Decode(&icp); err != nil { - return err - } - prs := icp.escape() - fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) - fmt.Fprintf(w, "const (\n") - for _, pr := range prs { - if pr.Name == "" { - continue - } - fmt.Fprintf(w, "ICMPType%s ICMPType = %d", pr.Name, pr.Value) - fmt.Fprintf(w, "// %s\n", pr.OrigName) - } - fmt.Fprintf(w, ")\n\n") - fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) - fmt.Fprintf(w, "var icmpTypes = map[ICMPType]string{\n") - for _, pr := range prs { - if pr.Name == "" { - continue - } - fmt.Fprintf(w, "%d: %q,\n", pr.Value, strings.ToLower(pr.OrigName)) - } - fmt.Fprintf(w, "}\n") - return nil -} - -type icmpv6Parameters struct { - XMLName xml.Name `xml:"registry"` - Title string `xml:"title"` - Updated string `xml:"updated"` - Registries []struct { - Title string `xml:"title"` - Records []struct { - Value string `xml:"value"` - Name string `xml:"name"` - } `xml:"record"` - } `xml:"registry"` -} - -type canonICMPv6ParamRecord struct { - OrigName string - Name string - Value int -} - -func (icp *icmpv6Parameters) escape() []canonICMPv6ParamRecord { - id := -1 - for i, r := range icp.Registries { - if strings.Contains(r.Title, "Type") || strings.Contains(r.Title, "type") { - id = i - break - } - } - if id < 0 { - return nil - } - prs := make([]canonICMPv6ParamRecord, len(icp.Registries[id].Records)) - sr := strings.NewReplacer( - "Messages", "", - "Message", "", - "ICMP", "", - "+", "P", - "-", "", - "/", "", - ".", "", - " ", "", - ) - for i, pr := range icp.Registries[id].Records { - if strings.Contains(pr.Name, "Reserved") || - strings.Contains(pr.Name, "Unassigned") || - strings.Contains(pr.Name, "Deprecated") || - strings.Contains(pr.Name, "Experiment") || - strings.Contains(pr.Name, "experiment") { - continue - } - ss := strings.Split(pr.Name, "\n") - if len(ss) > 1 { - prs[i].Name = strings.Join(ss, " ") - } else { - prs[i].Name = ss[0] - } - s := strings.TrimSpace(prs[i].Name) - prs[i].OrigName = s - prs[i].Name = sr.Replace(s) - prs[i].Value, _ = strconv.Atoi(pr.Value) - } - return prs -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/genericopt_posix.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/genericopt.go similarity index 61% rename from vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/genericopt_posix.go rename to vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/genericopt.go index dd77a016..e9dbc2e1 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/genericopt_posix.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/genericopt.go @@ -1,9 +1,7 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd windows - package ipv6 import "syscall" @@ -14,11 +12,11 @@ func (c *genericOpt) TrafficClass() (int, error) { if !c.ok() { return 0, syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return 0, err + so, ok := sockOpts[ssoTrafficClass] + if !ok { + return 0, errOpNoSupport } - return getInt(fd, &sockOpts[ssoTrafficClass]) + return so.GetInt(c.Conn) } // SetTrafficClass sets the traffic class field value for future @@ -27,11 +25,11 @@ func (c *genericOpt) SetTrafficClass(tclass int) error { if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoTrafficClass] + if !ok { + return errOpNoSupport } - return setInt(fd, &sockOpts[ssoTrafficClass], tclass) + return so.SetInt(c.Conn, tclass) } // HopLimit returns the hop limit field value for outgoing packets. @@ -39,11 +37,11 @@ func (c *genericOpt) HopLimit() (int, error) { if !c.ok() { return 0, syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return 0, err + so, ok := sockOpts[ssoHopLimit] + if !ok { + return 0, errOpNoSupport } - return getInt(fd, &sockOpts[ssoHopLimit]) + return so.GetInt(c.Conn) } // SetHopLimit sets the hop limit field value for future outgoing @@ -52,9 +50,9 @@ func (c *genericOpt) SetHopLimit(hoplim int) error { if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoHopLimit] + if !ok { + return errOpNoSupport } - return setInt(fd, &sockOpts[ssoHopLimit], hoplim) + return so.SetInt(c.Conn, hoplim) } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/genericopt_stub.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/genericopt_stub.go deleted file mode 100644 index f5c37224..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/genericopt_stub.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build nacl plan9 solaris - -package ipv6 - -// TrafficClass returns the traffic class field value for outgoing -// packets. -func (c *genericOpt) TrafficClass() (int, error) { - return 0, errOpNoSupport -} - -// SetTrafficClass sets the traffic class field value for future -// outgoing packets. -func (c *genericOpt) SetTrafficClass(tclass int) error { - return errOpNoSupport -} - -// HopLimit returns the hop limit field value for outgoing packets. -func (c *genericOpt) HopLimit() (int, error) { - return 0, errOpNoSupport -} - -// SetHopLimit sets the hop limit field value for future outgoing -// packets. -func (c *genericOpt) SetHopLimit(hoplim int) error { - return errOpNoSupport -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/helper.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/helper.go index 53b99990..25974013 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/helper.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/helper.go @@ -1,14 +1,12 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ipv6 import ( - "encoding/binary" "errors" "net" - "unsafe" ) var ( @@ -17,20 +15,8 @@ var ( errInvalidConnType = errors.New("invalid conn type") errOpNoSupport = errors.New("operation not supported") errNoSuchInterface = errors.New("no such interface") - - nativeEndian binary.ByteOrder ) -func init() { - i := uint32(1) - b := (*[4]byte)(unsafe.Pointer(&i)) - if b[0] == 1 { - nativeEndian = binary.LittleEndian - } else { - nativeEndian = binary.BigEndian - } -} - func boolint(b bool) int { if b { return 1 @@ -51,3 +37,21 @@ func netAddrToIP16(a net.Addr) net.IP { } return nil } + +func opAddr(a net.Addr) net.Addr { + switch a.(type) { + case *net.TCPAddr: + if a == nil { + return nil + } + case *net.UDPAddr: + if a == nil { + return nil + } + case *net.IPAddr: + if a == nil { + return nil + } + } + return a +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/helper_stub.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/helper_stub.go deleted file mode 100644 index 20354ab2..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/helper_stub.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build nacl plan9 solaris - -package ipv6 - -func (c *genericOpt) sysfd() (int, error) { - return 0, errOpNoSupport -} - -func (c *dgramOpt) sysfd() (int, error) { - return 0, errOpNoSupport -} - -func (c *payloadHandler) sysfd() (int, error) { - return 0, errOpNoSupport -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/helper_unix.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/helper_unix.go deleted file mode 100644 index 92868ed2..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/helper_unix.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux netbsd openbsd - -package ipv6 - -import ( - "net" - "reflect" -) - -func (c *genericOpt) sysfd() (int, error) { - switch p := c.Conn.(type) { - case *net.TCPConn, *net.UDPConn, *net.IPConn: - return sysfd(p) - } - return 0, errInvalidConnType -} - -func (c *dgramOpt) sysfd() (int, error) { - switch p := c.PacketConn.(type) { - case *net.UDPConn, *net.IPConn: - return sysfd(p.(net.Conn)) - } - return 0, errInvalidConnType -} - -func (c *payloadHandler) sysfd() (int, error) { - return sysfd(c.PacketConn.(net.Conn)) -} - -func sysfd(c net.Conn) (int, error) { - cv := reflect.ValueOf(c) - switch ce := cv.Elem(); ce.Kind() { - case reflect.Struct: - nfd := ce.FieldByName("conn").FieldByName("fd") - switch fe := nfd.Elem(); fe.Kind() { - case reflect.Struct: - fd := fe.FieldByName("sysfd") - return int(fd.Int()), nil - } - } - return 0, errInvalidConnType -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/helper_windows.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/helper_windows.go deleted file mode 100644 index 28c401b5..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/helper_windows.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv6 - -import ( - "net" - "reflect" - "syscall" -) - -func (c *genericOpt) sysfd() (syscall.Handle, error) { - switch p := c.Conn.(type) { - case *net.TCPConn, *net.UDPConn, *net.IPConn: - return sysfd(p) - } - return syscall.InvalidHandle, errInvalidConnType -} - -func (c *dgramOpt) sysfd() (syscall.Handle, error) { - switch p := c.PacketConn.(type) { - case *net.UDPConn, *net.IPConn: - return sysfd(p.(net.Conn)) - } - return syscall.InvalidHandle, errInvalidConnType -} - -func (c *payloadHandler) sysfd() (syscall.Handle, error) { - return sysfd(c.PacketConn.(net.Conn)) -} - -func sysfd(c net.Conn) (syscall.Handle, error) { - cv := reflect.ValueOf(c) - switch ce := cv.Elem(); ce.Kind() { - case reflect.Struct: - netfd := ce.FieldByName("conn").FieldByName("fd") - switch fe := netfd.Elem(); fe.Kind() { - case reflect.Struct: - fd := fe.FieldByName("sysfd") - return syscall.Handle(fd.Uint()), nil - } - } - return syscall.InvalidHandle, errInvalidConnType -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/icmp.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/icmp.go index a2de65a0..b7f48e27 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/icmp.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/icmp.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -6,6 +6,9 @@ package ipv6 import "golang.org/x/net/internal/iana" +// BUG(mikio): On Windows, methods related to ICMPFilter are not +// implemented. + // An ICMPType represents a type of ICMP message. type ICMPType int @@ -26,12 +29,12 @@ func (typ ICMPType) Protocol() int { // packets. The filter belongs to a packet delivery path on a host and // it cannot interact with forwarding packets or tunnel-outer packets. // -// Note: RFC 2460 defines a reasonable role model. A node means a +// Note: RFC 8200 defines a reasonable role model. A node means a // device that implements IP. A router means a node that forwards IP // packets not explicitly addressed to itself, and a host means a node // that is not a router. type ICMPFilter struct { - sysICMPv6Filter + icmpv6Filter } // Accept accepts incoming ICMP packets including the type field value diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/icmp_bsd.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/icmp_bsd.go index 30e3ce42..e1a791de 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/icmp_bsd.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/icmp_bsd.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -6,15 +6,15 @@ package ipv6 -func (f *sysICMPv6Filter) accept(typ ICMPType) { +func (f *icmpv6Filter) accept(typ ICMPType) { f.Filt[typ>>5] |= 1 << (uint32(typ) & 31) } -func (f *sysICMPv6Filter) block(typ ICMPType) { +func (f *icmpv6Filter) block(typ ICMPType) { f.Filt[typ>>5] &^= 1 << (uint32(typ) & 31) } -func (f *sysICMPv6Filter) setAll(block bool) { +func (f *icmpv6Filter) setAll(block bool) { for i := range f.Filt { if block { f.Filt[i] = 0 @@ -24,6 +24,6 @@ func (f *sysICMPv6Filter) setAll(block bool) { } } -func (f *sysICMPv6Filter) willBlock(typ ICMPType) bool { +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { return f.Filt[typ>>5]&(1<<(uint32(typ)&31)) == 0 } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/icmp_linux.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/icmp_linux.go index a67ecf69..647f6b44 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/icmp_linux.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/icmp_linux.go @@ -1,18 +1,18 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ipv6 -func (f *sysICMPv6Filter) accept(typ ICMPType) { +func (f *icmpv6Filter) accept(typ ICMPType) { f.Data[typ>>5] &^= 1 << (uint32(typ) & 31) } -func (f *sysICMPv6Filter) block(typ ICMPType) { +func (f *icmpv6Filter) block(typ ICMPType) { f.Data[typ>>5] |= 1 << (uint32(typ) & 31) } -func (f *sysICMPv6Filter) setAll(block bool) { +func (f *icmpv6Filter) setAll(block bool) { for i := range f.Data { if block { f.Data[i] = 1<<32 - 1 @@ -22,6 +22,6 @@ func (f *sysICMPv6Filter) setAll(block bool) { } } -func (f *sysICMPv6Filter) willBlock(typ ICMPType) bool { +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { return f.Data[typ>>5]&(1<<(uint32(typ)&31)) != 0 } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/icmp_solaris.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/icmp_solaris.go index a942f354..7c23bb1c 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/icmp_solaris.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/icmp_solaris.go @@ -1,24 +1,27 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build solaris - package ipv6 -func (f *sysICMPv6Filter) accept(typ ICMPType) { - // TODO(mikio): implement this +func (f *icmpv6Filter) accept(typ ICMPType) { + f.X__icmp6_filt[typ>>5] |= 1 << (uint32(typ) & 31) } -func (f *sysICMPv6Filter) block(typ ICMPType) { - // TODO(mikio): implement this +func (f *icmpv6Filter) block(typ ICMPType) { + f.X__icmp6_filt[typ>>5] &^= 1 << (uint32(typ) & 31) } -func (f *sysICMPv6Filter) setAll(block bool) { - // TODO(mikio): implement this +func (f *icmpv6Filter) setAll(block bool) { + for i := range f.X__icmp6_filt { + if block { + f.X__icmp6_filt[i] = 0 + } else { + f.X__icmp6_filt[i] = 1<<32 - 1 + } + } } -func (f *sysICMPv6Filter) willBlock(typ ICMPType) bool { - // TODO(mikio): implement this - return false +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + return f.X__icmp6_filt[typ>>5]&(1<<(uint32(typ)&31)) == 0 } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/icmp_stub.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/icmp_stub.go index c1263eca..c4b9be6d 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/icmp_stub.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/icmp_stub.go @@ -1,23 +1,23 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build nacl plan9 +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package ipv6 -type sysICMPv6Filter struct { +type icmpv6Filter struct { } -func (f *sysICMPv6Filter) accept(typ ICMPType) { +func (f *icmpv6Filter) accept(typ ICMPType) { } -func (f *sysICMPv6Filter) block(typ ICMPType) { +func (f *icmpv6Filter) block(typ ICMPType) { } -func (f *sysICMPv6Filter) setAll(block bool) { +func (f *icmpv6Filter) setAll(block bool) { } -func (f *sysICMPv6Filter) willBlock(typ ICMPType) bool { +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { return false } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/icmp_windows.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/icmp_windows.go index 9dcfb810..443cd073 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/icmp_windows.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/icmp_windows.go @@ -1,26 +1,22 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ipv6 -type sysICMPv6Filter struct { +func (f *icmpv6Filter) accept(typ ICMPType) { // TODO(mikio): implement this } -func (f *sysICMPv6Filter) accept(typ ICMPType) { +func (f *icmpv6Filter) block(typ ICMPType) { // TODO(mikio): implement this } -func (f *sysICMPv6Filter) block(typ ICMPType) { +func (f *icmpv6Filter) setAll(block bool) { // TODO(mikio): implement this } -func (f *sysICMPv6Filter) setAll(block bool) { - // TODO(mikio): implement this -} - -func (f *sysICMPv6Filter) willBlock(typ ICMPType) bool { +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { // TODO(mikio): implement this return false } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/payload.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/payload.go index 529b20bc..a8197f16 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/payload.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/payload.go @@ -1,15 +1,23 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ipv6 -import "net" +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ControlMessage for ReadFrom and WriteTo +// methods of PacketConn is not implemented. // A payloadHandler represents the IPv6 datagram payload handler. type payloadHandler struct { net.PacketConn + *socket.Conn rawOpt } -func (c *payloadHandler) ok() bool { return c != nil && c.PacketConn != nil } +func (c *payloadHandler) ok() bool { return c != nil && c.PacketConn != nil && c.Conn != nil } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/payload_cmsg.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/payload_cmsg.go index 8e90d324..4ee4b062 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/payload_cmsg.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/payload_cmsg.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -12,59 +12,24 @@ import ( ) // ReadFrom reads a payload of the received IPv6 datagram, from the -// endpoint c, copying the payload into b. It returns the number of +// endpoint c, copying the payload into b. It returns the number of // bytes copied into b, the control message cm and the source address // src of the received datagram. func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { if !c.ok() { return 0, nil, nil, syscall.EINVAL } - oob := newControlMessage(&c.rawOpt) - var oobn int - switch c := c.PacketConn.(type) { - case *net.UDPConn: - if n, oobn, _, src, err = c.ReadMsgUDP(b, oob); err != nil { - return 0, nil, nil, err - } - case *net.IPConn: - if n, oobn, _, src, err = c.ReadMsgIP(b, oob); err != nil { - return 0, nil, nil, err - } - default: - return 0, nil, nil, errInvalidConnType - } - if cm, err = parseControlMessage(oob[:oobn]); err != nil { - return 0, nil, nil, err - } - if cm != nil { - cm.Src = netAddrToIP16(src) - } - return + return c.readFrom(b) } // WriteTo writes a payload of the IPv6 datagram, to the destination -// address dst through the endpoint c, copying the payload from b. It -// returns the number of bytes written. The control message cm allows -// the IPv6 header fields and the datagram path to be specified. The +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the IPv6 header fields and the datagram path to be specified. The // cm may be nil if control of the outgoing datagram is not required. func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { if !c.ok() { return 0, syscall.EINVAL } - oob := marshalControlMessage(cm) - if dst == nil { - return 0, errMissingAddress - } - switch c := c.PacketConn.(type) { - case *net.UDPConn: - n, _, err = c.WriteMsgUDP(b, oob, dst.(*net.UDPAddr)) - case *net.IPConn: - n, _, err = c.WriteMsgIP(b, oob, dst.(*net.IPAddr)) - default: - return 0, errInvalidConnType - } - if err != nil { - return 0, err - } - return + return c.writeTo(b, cm, dst) } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go new file mode 100644 index 00000000..fdc6c399 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go @@ -0,0 +1,55 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 +// +build !nacl,!plan9,!windows + +package ipv6 + +import "net" + +func (c *payloadHandler) readFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + c.rawOpt.RLock() + oob := NewControlMessage(c.rawOpt.cflags) + c.rawOpt.RUnlock() + var nn int + switch c := c.PacketConn.(type) { + case *net.UDPConn: + if n, nn, _, src, err = c.ReadMsgUDP(b, oob); err != nil { + return 0, nil, nil, err + } + case *net.IPConn: + if n, nn, _, src, err = c.ReadMsgIP(b, oob); err != nil { + return 0, nil, nil, err + } + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Err: errInvalidConnType} + } + if nn > 0 { + cm = new(ControlMessage) + if err = cm.Parse(oob[:nn]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + } + if cm != nil { + cm.Src = netAddrToIP16(src) + } + return +} + +func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + oob := cm.Marshal() + if dst == nil { + return 0, &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errMissingAddress} + } + switch c := c.PacketConn.(type) { + case *net.UDPConn: + n, _, err = c.WriteMsgUDP(b, oob, dst.(*net.UDPAddr)) + case *net.IPConn: + n, _, err = c.WriteMsgIP(b, oob, dst.(*net.IPAddr)) + default: + return 0, &net.OpError{Op: "write", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Addr: opAddr(dst), Err: errInvalidConnType} + } + return +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go new file mode 100644 index 00000000..8f6d02e2 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go @@ -0,0 +1,57 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build !nacl,!plan9,!windows + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (c *payloadHandler) readFrom(b []byte) (int, *ControlMessage, net.Addr, error) { + c.rawOpt.RLock() + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: NewControlMessage(c.rawOpt.cflags), + } + c.rawOpt.RUnlock() + switch c.PacketConn.(type) { + case *net.UDPConn: + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + case *net.IPConn: + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errInvalidConnType} + } + var cm *ControlMessage + if m.NN > 0 { + cm = new(ControlMessage) + if err := cm.Parse(m.OOB[:m.NN]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + cm.Src = netAddrToIP16(m.Addr) + } + return m.N, cm, m.Addr, nil +} + +func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (int, error) { + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: cm.Marshal(), + Addr: dst, + } + err := c.SendMsg(&m, 0) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Addr: opAddr(dst), Err: err} + } + return m.N, err +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/payload_nocmsg.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/payload_nocmsg.go index 499204d0..99a43542 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/payload_nocmsg.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/payload_nocmsg.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -12,7 +12,7 @@ import ( ) // ReadFrom reads a payload of the received IPv6 datagram, from the -// endpoint c, copying the payload into b. It returns the number of +// endpoint c, copying the payload into b. It returns the number of // bytes copied into b, the control message cm and the source address // src of the received datagram. func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { @@ -26,9 +26,9 @@ func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net. } // WriteTo writes a payload of the IPv6 datagram, to the destination -// address dst through the endpoint c, copying the payload from b. It -// returns the number of bytes written. The control message cm allows -// the IPv6 header fields and the datagram path to be specified. The +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the IPv6 header fields and the datagram path to be specified. The // cm may be nil if control of the outgoing datagram is not required. func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { if !c.ok() { diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sockopt.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sockopt.go index f0cfc2f9..cc3907df 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sockopt.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sockopt.go @@ -4,6 +4,8 @@ package ipv6 +import "golang.org/x/net/internal/socket" + // Sticky socket options const ( ssoTrafficClass = iota // header field for unicast packet, RFC 3542 @@ -24,23 +26,18 @@ const ( ssoLeaveSourceGroup // source-specific multicast ssoBlockSourceGroup // any-source or source-specific multicast ssoUnblockSourceGroup // any-source or source-specific multicast - ssoMax + ssoAttachFilter // attach BPF for filtering inbound traffic ) // Sticky socket option value types const ( - ssoTypeInt = iota + 1 - ssoTypeInterface - ssoTypeICMPFilter - ssoTypeMTUInfo - ssoTypeIPMreq + ssoTypeIPMreq = iota + 1 ssoTypeGroupReq ssoTypeGroupSourceReq ) // A sockOpt represents a binding for sticky socket option. type sockOpt struct { - level int // option level - name int // option name, must be equal or greater than 1 - typ int // option value type, must be equal or greater than 1 + socket.Option + typ int // hint for option value type; optional } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sockopt_asmreq_unix.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sockopt_asmreq_unix.go deleted file mode 100644 index b7fd4fe6..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sockopt_asmreq_unix.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux netbsd openbsd - -package ipv6 - -import ( - "net" - "os" - "unsafe" -) - -func setsockoptIPMreq(fd int, opt *sockOpt, ifi *net.Interface, grp net.IP) error { - var mreq sysIPv6Mreq - copy(mreq.Multiaddr[:], grp) - if ifi != nil { - mreq.setIfindex(ifi.Index) - } - return os.NewSyscallError("setsockopt", setsockopt(fd, opt.level, opt.name, unsafe.Pointer(&mreq), sysSizeofIPv6Mreq)) -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sockopt_asmreq_windows.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sockopt_asmreq_windows.go deleted file mode 100644 index c03c7313..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sockopt_asmreq_windows.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv6 - -import ( - "net" - "os" - "syscall" - "unsafe" -) - -func setsockoptIPMreq(fd syscall.Handle, opt *sockOpt, ifi *net.Interface, grp net.IP) error { - var mreq sysIPv6Mreq - copy(mreq.Multiaddr[:], grp) - if ifi != nil { - mreq.setIfindex(ifi.Index) - } - return os.NewSyscallError("setsockopt", syscall.Setsockopt(fd, int32(opt.level), int32(opt.name), (*byte)(unsafe.Pointer(&mreq)), sysSizeofIPv6Mreq)) -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sockopt_posix.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sockopt_posix.go new file mode 100644 index 00000000..0eac86eb --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sockopt_posix.go @@ -0,0 +1,87 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package ipv6 + +import ( + "net" + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + n, err := so.GetInt(c) + if err != nil { + return nil, err + } + return net.InterfaceByIndex(n) +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + var n int + if ifi != nil { + n = ifi.Index + } + return so.SetInt(c, n) +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + b := make([]byte, so.Len) + n, err := so.Get(c, b) + if err != nil { + return nil, err + } + if n != sizeofICMPv6Filter { + return nil, errOpNoSupport + } + return (*ICMPFilter)(unsafe.Pointer(&b[0])), nil +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + b := (*[sizeofICMPv6Filter]byte)(unsafe.Pointer(f))[:sizeofICMPv6Filter] + return so.Set(c, b) +} + +func (so *sockOpt) getMTUInfo(c *socket.Conn) (*net.Interface, int, error) { + b := make([]byte, so.Len) + n, err := so.Get(c, b) + if err != nil { + return nil, 0, err + } + if n != sizeofIPv6Mtuinfo { + return nil, 0, errOpNoSupport + } + mi := (*ipv6Mtuinfo)(unsafe.Pointer(&b[0])) + if mi.Addr.Scope_id == 0 { + return nil, int(mi.Mtu), nil + } + ifi, err := net.InterfaceByIndex(int(mi.Addr.Scope_id)) + if err != nil { + return nil, 0, err + } + return ifi, int(mi.Mtu), nil +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + switch so.typ { + case ssoTypeIPMreq: + return so.setIPMreq(c, ifi, grp) + case ssoTypeGroupReq: + return so.setGroupReq(c, ifi, grp) + default: + return errOpNoSupport + } +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return so.setGroupSourceReq(c, ifi, grp, src) +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return so.setAttachFilter(c, f) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_unix.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_unix.go deleted file mode 100644 index a36a7e03..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_unix.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin freebsd linux - -package ipv6 - -import ( - "net" - "os" - "unsafe" -) - -var freebsd32o64 bool - -func setsockoptGroupReq(fd int, opt *sockOpt, ifi *net.Interface, grp net.IP) error { - var gr sysGroupReq - if ifi != nil { - gr.Interface = uint32(ifi.Index) - } - gr.setGroup(grp) - var p unsafe.Pointer - var l uint32 - if freebsd32o64 { - var d [sysSizeofGroupReq + 4]byte - s := (*[sysSizeofGroupReq]byte)(unsafe.Pointer(&gr)) - copy(d[:4], s[:4]) - copy(d[8:], s[4:]) - p = unsafe.Pointer(&d[0]) - l = sysSizeofGroupReq + 4 - } else { - p = unsafe.Pointer(&gr) - l = sysSizeofGroupReq - } - return os.NewSyscallError("setsockopt", setsockopt(fd, opt.level, opt.name, p, l)) -} - -func setsockoptGroupSourceReq(fd int, opt *sockOpt, ifi *net.Interface, grp, src net.IP) error { - var gsr sysGroupSourceReq - if ifi != nil { - gsr.Interface = uint32(ifi.Index) - } - gsr.setSourceGroup(grp, src) - var p unsafe.Pointer - var l uint32 - if freebsd32o64 { - var d [sysSizeofGroupSourceReq + 4]byte - s := (*[sysSizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr)) - copy(d[:4], s[:4]) - copy(d[8:], s[4:]) - p = unsafe.Pointer(&d[0]) - l = sysSizeofGroupSourceReq + 4 - } else { - p = unsafe.Pointer(&gsr) - l = sysSizeofGroupSourceReq - } - return os.NewSyscallError("setsockopt", setsockopt(fd, opt.level, opt.name, p, l)) -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sockopt_stub.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sockopt_stub.go index b8dacfde..1f4a273e 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sockopt_stub.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sockopt_stub.go @@ -1,13 +1,46 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build nacl plan9 solaris +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package ipv6 -import "net" +import ( + "net" -func getMTUInfo(fd int, opt *sockOpt) (*net.Interface, int, error) { + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + return errOpNoSupport +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + return errOpNoSupport +} + +func (so *sockOpt) getMTUInfo(c *socket.Conn) (*net.Interface, int, error) { return nil, 0, errOpNoSupport } + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return errOpNoSupport +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sockopt_unix.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sockopt_unix.go deleted file mode 100644 index 7115b18e..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sockopt_unix.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux netbsd openbsd - -package ipv6 - -import ( - "net" - "os" - "unsafe" -) - -func getInt(fd int, opt *sockOpt) (int, error) { - if opt.name < 1 || opt.typ != ssoTypeInt { - return 0, errOpNoSupport - } - var i int32 - l := uint32(4) - if err := getsockopt(fd, opt.level, opt.name, unsafe.Pointer(&i), &l); err != nil { - return 0, os.NewSyscallError("getsockopt", err) - } - return int(i), nil -} - -func setInt(fd int, opt *sockOpt, v int) error { - if opt.name < 1 || opt.typ != ssoTypeInt { - return errOpNoSupport - } - i := int32(v) - return os.NewSyscallError("setsockopt", setsockopt(fd, opt.level, opt.name, unsafe.Pointer(&i), 4)) -} - -func getInterface(fd int, opt *sockOpt) (*net.Interface, error) { - if opt.name < 1 || opt.typ != ssoTypeInterface { - return nil, errOpNoSupport - } - var i int32 - l := uint32(4) - if err := getsockopt(fd, opt.level, opt.name, unsafe.Pointer(&i), &l); err != nil { - return nil, os.NewSyscallError("getsockopt", err) - } - if i == 0 { - return nil, nil - } - ifi, err := net.InterfaceByIndex(int(i)) - if err != nil { - return nil, err - } - return ifi, nil -} - -func setInterface(fd int, opt *sockOpt, ifi *net.Interface) error { - if opt.name < 1 || opt.typ != ssoTypeInterface { - return errOpNoSupport - } - var i int32 - if ifi != nil { - i = int32(ifi.Index) - } - return os.NewSyscallError("setsockopt", setsockopt(fd, opt.level, opt.name, unsafe.Pointer(&i), 4)) -} - -func getICMPFilter(fd int, opt *sockOpt) (*ICMPFilter, error) { - if opt.name < 1 || opt.typ != ssoTypeICMPFilter { - return nil, errOpNoSupport - } - var f ICMPFilter - l := uint32(sysSizeofICMPv6Filter) - if err := getsockopt(fd, opt.level, opt.name, unsafe.Pointer(&f.sysICMPv6Filter), &l); err != nil { - return nil, os.NewSyscallError("getsockopt", err) - } - return &f, nil -} - -func setICMPFilter(fd int, opt *sockOpt, f *ICMPFilter) error { - if opt.name < 1 || opt.typ != ssoTypeICMPFilter { - return errOpNoSupport - } - return os.NewSyscallError("setsockopt", setsockopt(fd, opt.level, opt.name, unsafe.Pointer(&f.sysICMPv6Filter), sysSizeofICMPv6Filter)) -} - -func getMTUInfo(fd int, opt *sockOpt) (*net.Interface, int, error) { - if opt.name < 1 || opt.typ != ssoTypeMTUInfo { - return nil, 0, errOpNoSupport - } - var mi sysIPv6Mtuinfo - l := uint32(sysSizeofIPv6Mtuinfo) - if err := getsockopt(fd, opt.level, opt.name, unsafe.Pointer(&mi), &l); err != nil { - return nil, 0, os.NewSyscallError("getsockopt", err) - } - if mi.Addr.Scope_id == 0 { - return nil, int(mi.Mtu), nil - } - ifi, err := net.InterfaceByIndex(int(mi.Addr.Scope_id)) - if err != nil { - return nil, 0, err - } - return ifi, int(mi.Mtu), nil -} - -func setGroup(fd int, opt *sockOpt, ifi *net.Interface, grp net.IP) error { - if opt.name < 1 { - return errOpNoSupport - } - switch opt.typ { - case ssoTypeIPMreq: - return setsockoptIPMreq(fd, opt, ifi, grp) - case ssoTypeGroupReq: - return setsockoptGroupReq(fd, opt, ifi, grp) - default: - return errOpNoSupport - } -} - -func setSourceGroup(fd int, opt *sockOpt, ifi *net.Interface, grp, src net.IP) error { - if opt.name < 1 || opt.typ != ssoTypeGroupSourceReq { - return errOpNoSupport - } - return setsockoptGroupSourceReq(fd, opt, ifi, grp, src) -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sockopt_windows.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sockopt_windows.go deleted file mode 100644 index 32c73b72..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sockopt_windows.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv6 - -import ( - "net" - "os" - "syscall" - "unsafe" -) - -func getInt(fd syscall.Handle, opt *sockOpt) (int, error) { - if opt.name < 1 || opt.typ != ssoTypeInt { - return 0, errOpNoSupport - } - var i int32 - l := int32(4) - if err := syscall.Getsockopt(fd, int32(opt.level), int32(opt.name), (*byte)(unsafe.Pointer(&i)), &l); err != nil { - return 0, os.NewSyscallError("getsockopt", err) - } - return int(i), nil -} - -func setInt(fd syscall.Handle, opt *sockOpt, v int) error { - if opt.name < 1 || opt.typ != ssoTypeInt { - return errOpNoSupport - } - i := int32(v) - return os.NewSyscallError("setsockopt", syscall.Setsockopt(fd, int32(opt.level), int32(opt.name), (*byte)(unsafe.Pointer(&i)), 4)) -} - -func getInterface(fd syscall.Handle, opt *sockOpt) (*net.Interface, error) { - if opt.name < 1 || opt.typ != ssoTypeInterface { - return nil, errOpNoSupport - } - var i int32 - l := int32(4) - if err := syscall.Getsockopt(fd, int32(opt.level), int32(opt.name), (*byte)(unsafe.Pointer(&i)), &l); err != nil { - return nil, os.NewSyscallError("getsockopt", err) - } - if i == 0 { - return nil, nil - } - ifi, err := net.InterfaceByIndex(int(i)) - if err != nil { - return nil, err - } - return ifi, nil -} - -func setInterface(fd syscall.Handle, opt *sockOpt, ifi *net.Interface) error { - if opt.name < 1 || opt.typ != ssoTypeInterface { - return errOpNoSupport - } - var i int32 - if ifi != nil { - i = int32(ifi.Index) - } - return os.NewSyscallError("setsockopt", syscall.Setsockopt(fd, int32(opt.level), int32(opt.name), (*byte)(unsafe.Pointer(&i)), 4)) -} - -func getICMPFilter(fd syscall.Handle, opt *sockOpt) (*ICMPFilter, error) { - return nil, errOpNoSupport -} - -func setICMPFilter(fd syscall.Handle, opt *sockOpt, f *ICMPFilter) error { - return errOpNoSupport -} - -func getMTUInfo(fd syscall.Handle, opt *sockOpt) (*net.Interface, int, error) { - return nil, 0, errOpNoSupport -} - -func setGroup(fd syscall.Handle, opt *sockOpt, ifi *net.Interface, grp net.IP) error { - if opt.name < 1 || opt.typ != ssoTypeIPMreq { - return errOpNoSupport - } - return setsockoptIPMreq(fd, opt, ifi, grp) -} - -func setSourceGroup(fd syscall.Handle, opt *sockOpt, ifi *net.Interface, grp, src net.IP) error { - // TODO(mikio): implement this - return errOpNoSupport -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_asmreq.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_asmreq.go new file mode 100644 index 00000000..b0510c0b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_asmreq.go @@ -0,0 +1,24 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package ipv6 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var mreq ipv6Mreq + copy(mreq.Multiaddr[:], grp) + if ifi != nil { + mreq.setIfindex(ifi.Index) + } + b := (*[sizeofIPv6Mreq]byte)(unsafe.Pointer(&mreq))[:sizeofIPv6Mreq] + return so.Set(c, b) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go new file mode 100644 index 00000000..eece9618 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go @@ -0,0 +1,17 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_bpf.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_bpf.go new file mode 100644 index 00000000..b2dbcb2f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_bpf.go @@ -0,0 +1,23 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package ipv6 + +import ( + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + prog := sockFProg{ + Len: uint16(len(f)), + Filter: (*sockFilter)(unsafe.Pointer(&f[0])), + } + b := (*[sizeofSockFprog]byte)(unsafe.Pointer(&prog))[:sizeofSockFprog] + return so.Set(c, b) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go new file mode 100644 index 00000000..676bea55 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux + +package ipv6 + +import ( + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + return errOpNoSupport +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_bsd.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_bsd.go index 0ee43e6d..e416eaa1 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_bsd.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_bsd.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -11,46 +11,47 @@ import ( "syscall" "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" ) var ( ctlOpts = [ctlMax]ctlOpt{ ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, - ctlPacketInfo: {sysIPV6_PKTINFO, sysSizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, - ctlNextHop: {sysIPV6_NEXTHOP, sysSizeofSockaddrInet6, marshalNextHop, parseNextHop}, - ctlPathMTU: {sysIPV6_PATHMTU, sysSizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, } - sockOpts = [ssoMax]sockOpt{ - ssoTrafficClass: {iana.ProtocolIPv6, sysIPV6_TCLASS, ssoTypeInt}, - ssoHopLimit: {iana.ProtocolIPv6, sysIPV6_UNICAST_HOPS, ssoTypeInt}, - ssoMulticastInterface: {iana.ProtocolIPv6, sysIPV6_MULTICAST_IF, ssoTypeInterface}, - ssoMulticastHopLimit: {iana.ProtocolIPv6, sysIPV6_MULTICAST_HOPS, ssoTypeInt}, - ssoMulticastLoopback: {iana.ProtocolIPv6, sysIPV6_MULTICAST_LOOP, ssoTypeInt}, - ssoReceiveTrafficClass: {iana.ProtocolIPv6, sysIPV6_RECVTCLASS, ssoTypeInt}, - ssoReceiveHopLimit: {iana.ProtocolIPv6, sysIPV6_RECVHOPLIMIT, ssoTypeInt}, - ssoReceivePacketInfo: {iana.ProtocolIPv6, sysIPV6_RECVPKTINFO, ssoTypeInt}, - ssoReceivePathMTU: {iana.ProtocolIPv6, sysIPV6_RECVPATHMTU, ssoTypeInt}, - ssoPathMTU: {iana.ProtocolIPv6, sysIPV6_PATHMTU, ssoTypeMTUInfo}, - ssoChecksum: {iana.ProtocolIPv6, sysIPV6_CHECKSUM, ssoTypeInt}, - ssoICMPFilter: {iana.ProtocolIPv6ICMP, sysICMP6_FILTER, ssoTypeICMPFilter}, - ssoJoinGroup: {iana.ProtocolIPv6, sysIPV6_JOIN_GROUP, ssoTypeIPMreq}, - ssoLeaveGroup: {iana.ProtocolIPv6, sysIPV6_LEAVE_GROUP, ssoTypeIPMreq}, + sockOpts = map[int]*sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_JOIN_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_LEAVE_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, } ) -func (sa *sysSockaddrInet6) setSockaddr(ip net.IP, i int) { - sa.Len = sysSizeofSockaddrInet6 +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sizeofSockaddrInet6 sa.Family = syscall.AF_INET6 copy(sa.Addr[:], ip) sa.Scope_id = uint32(i) } -func (pi *sysInet6Pktinfo) setIfindex(i int) { +func (pi *inet6Pktinfo) setIfindex(i int) { pi.Ifindex = uint32(i) } -func (mreq *sysIPv6Mreq) setIfindex(i int) { +func (mreq *ipv6Mreq) setIfindex(i int) { mreq.Interface = uint32(i) } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_darwin.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_darwin.go index c263f08d..e3d04439 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_darwin.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_darwin.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -6,128 +6,101 @@ package ipv6 import ( "net" + "strconv" + "strings" "syscall" "unsafe" "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" ) var ( ctlOpts = [ctlMax]ctlOpt{ ctlHopLimit: {sysIPV6_2292HOPLIMIT, 4, marshal2292HopLimit, parseHopLimit}, - ctlPacketInfo: {sysIPV6_2292PKTINFO, sysSizeofInet6Pktinfo, marshal2292PacketInfo, parsePacketInfo}, + ctlPacketInfo: {sysIPV6_2292PKTINFO, sizeofInet6Pktinfo, marshal2292PacketInfo, parsePacketInfo}, } - sockOpts = [ssoMax]sockOpt{ - ssoHopLimit: {iana.ProtocolIPv6, sysIPV6_UNICAST_HOPS, ssoTypeInt}, - ssoMulticastInterface: {iana.ProtocolIPv6, sysIPV6_MULTICAST_IF, ssoTypeInterface}, - ssoMulticastHopLimit: {iana.ProtocolIPv6, sysIPV6_MULTICAST_HOPS, ssoTypeInt}, - ssoMulticastLoopback: {iana.ProtocolIPv6, sysIPV6_MULTICAST_LOOP, ssoTypeInt}, - ssoReceiveHopLimit: {iana.ProtocolIPv6, sysIPV6_2292HOPLIMIT, ssoTypeInt}, - ssoReceivePacketInfo: {iana.ProtocolIPv6, sysIPV6_2292PKTINFO, ssoTypeInt}, - ssoChecksum: {iana.ProtocolIPv6, sysIPV6_CHECKSUM, ssoTypeInt}, - ssoICMPFilter: {iana.ProtocolIPv6ICMP, sysICMP6_FILTER, ssoTypeICMPFilter}, - ssoJoinGroup: {iana.ProtocolIPv6, sysIPV6_JOIN_GROUP, ssoTypeIPMreq}, - ssoLeaveGroup: {iana.ProtocolIPv6, sysIPV6_LEAVE_GROUP, ssoTypeIPMreq}, + sockOpts = map[int]*sockOpt{ + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_2292HOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_2292PKTINFO, Len: 4}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_JOIN_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_LEAVE_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, } ) func init() { // Seems like kern.osreldate is veiled on latest OS X. We use // kern.osrelease instead. - osver, err := syscall.Sysctl("kern.osrelease") + s, err := syscall.Sysctl("kern.osrelease") if err != nil { return } - var i int - for i = range osver { - if osver[i] == '.' { - break - } + ss := strings.Split(s, ".") + if len(ss) == 0 { + return } // The IP_PKTINFO and protocol-independent multicast API were - // introduced in OS X 10.7 (Darwin 11.0.0). But it looks like - // those features require OS X 10.8 (Darwin 12.0.0) and above. + // introduced in OS X 10.7 (Darwin 11). But it looks like + // those features require OS X 10.8 (Darwin 12) or above. // See http://support.apple.com/kb/HT1633. - if i > 2 || i == 2 && osver[0] >= '1' && osver[1] >= '2' { - ctlOpts[ctlTrafficClass].name = sysIPV6_TCLASS - ctlOpts[ctlTrafficClass].length = 4 - ctlOpts[ctlTrafficClass].marshal = marshalTrafficClass - ctlOpts[ctlTrafficClass].parse = parseTrafficClass - ctlOpts[ctlHopLimit].name = sysIPV6_HOPLIMIT - ctlOpts[ctlHopLimit].marshal = marshalHopLimit - ctlOpts[ctlPacketInfo].name = sysIPV6_PKTINFO - ctlOpts[ctlPacketInfo].marshal = marshalPacketInfo - ctlOpts[ctlNextHop].name = sysIPV6_NEXTHOP - ctlOpts[ctlNextHop].length = sysSizeofSockaddrInet6 - ctlOpts[ctlNextHop].marshal = marshalNextHop - ctlOpts[ctlNextHop].parse = parseNextHop - ctlOpts[ctlPathMTU].name = sysIPV6_PATHMTU - ctlOpts[ctlPathMTU].length = sysSizeofIPv6Mtuinfo - ctlOpts[ctlPathMTU].marshal = marshalPathMTU - ctlOpts[ctlPathMTU].parse = parsePathMTU - sockOpts[ssoTrafficClass].level = iana.ProtocolIPv6 - sockOpts[ssoTrafficClass].name = sysIPV6_TCLASS - sockOpts[ssoTrafficClass].typ = ssoTypeInt - sockOpts[ssoReceiveTrafficClass].level = iana.ProtocolIPv6 - sockOpts[ssoReceiveTrafficClass].name = sysIPV6_RECVTCLASS - sockOpts[ssoReceiveTrafficClass].typ = ssoTypeInt - sockOpts[ssoReceiveHopLimit].name = sysIPV6_RECVHOPLIMIT - sockOpts[ssoReceivePacketInfo].name = sysIPV6_RECVPKTINFO - sockOpts[ssoReceivePathMTU].level = iana.ProtocolIPv6 - sockOpts[ssoReceivePathMTU].name = sysIPV6_RECVPATHMTU - sockOpts[ssoReceivePathMTU].typ = ssoTypeInt - sockOpts[ssoPathMTU].level = iana.ProtocolIPv6 - sockOpts[ssoPathMTU].name = sysIPV6_PATHMTU - sockOpts[ssoPathMTU].typ = ssoTypeMTUInfo - sockOpts[ssoJoinGroup].name = sysMCAST_JOIN_GROUP - sockOpts[ssoJoinGroup].typ = ssoTypeGroupReq - sockOpts[ssoLeaveGroup].name = sysMCAST_LEAVE_GROUP - sockOpts[ssoLeaveGroup].typ = ssoTypeGroupReq - sockOpts[ssoJoinSourceGroup].level = iana.ProtocolIPv6 - sockOpts[ssoJoinSourceGroup].name = sysMCAST_JOIN_SOURCE_GROUP - sockOpts[ssoJoinSourceGroup].typ = ssoTypeGroupSourceReq - sockOpts[ssoLeaveSourceGroup].level = iana.ProtocolIPv6 - sockOpts[ssoLeaveSourceGroup].name = sysMCAST_LEAVE_SOURCE_GROUP - sockOpts[ssoLeaveSourceGroup].typ = ssoTypeGroupSourceReq - sockOpts[ssoBlockSourceGroup].level = iana.ProtocolIPv6 - sockOpts[ssoBlockSourceGroup].name = sysMCAST_BLOCK_SOURCE - sockOpts[ssoBlockSourceGroup].typ = ssoTypeGroupSourceReq - sockOpts[ssoUnblockSourceGroup].level = iana.ProtocolIPv6 - sockOpts[ssoUnblockSourceGroup].name = sysMCAST_UNBLOCK_SOURCE - sockOpts[ssoUnblockSourceGroup].typ = ssoTypeGroupSourceReq + if mjver, err := strconv.Atoi(ss[0]); err != nil || mjver < 12 { + return } + ctlOpts[ctlTrafficClass] = ctlOpt{sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass} + ctlOpts[ctlHopLimit] = ctlOpt{sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit} + ctlOpts[ctlPacketInfo] = ctlOpt{sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo} + ctlOpts[ctlNextHop] = ctlOpt{sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop} + ctlOpts[ctlPathMTU] = ctlOpt{sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU} + sockOpts[ssoTrafficClass] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}} + sockOpts[ssoReceiveTrafficClass] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}} + sockOpts[ssoReceiveHopLimit] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}} + sockOpts[ssoReceivePacketInfo] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}} + sockOpts[ssoReceivePathMTU] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}} + sockOpts[ssoPathMTU] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}} + sockOpts[ssoJoinGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq} + sockOpts[ssoLeaveGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq} + sockOpts[ssoJoinSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoLeaveSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoBlockSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoUnblockSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} } -func (sa *sysSockaddrInet6) setSockaddr(ip net.IP, i int) { - sa.Len = sysSizeofSockaddrInet6 +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sizeofSockaddrInet6 sa.Family = syscall.AF_INET6 copy(sa.Addr[:], ip) sa.Scope_id = uint32(i) } -func (pi *sysInet6Pktinfo) setIfindex(i int) { +func (pi *inet6Pktinfo) setIfindex(i int) { pi.Ifindex = uint32(i) } -func (mreq *sysIPv6Mreq) setIfindex(i int) { +func (mreq *ipv6Mreq) setIfindex(i int) { mreq.Interface = uint32(i) } -func (gr *sysGroupReq) setGroup(grp net.IP) { - sa := (*sysSockaddrInet6)(unsafe.Pointer(&gr.Pad_cgo_0[0])) - sa.Len = sysSizeofSockaddrInet6 +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Len = sizeofSockaddrInet6 sa.Family = syscall.AF_INET6 copy(sa.Addr[:], grp) } -func (gsr *sysGroupSourceReq) setSourceGroup(grp, src net.IP) { - sa := (*sysSockaddrInet6)(unsafe.Pointer(&gsr.Pad_cgo_0[0])) - sa.Len = sysSizeofSockaddrInet6 +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Len = sizeofSockaddrInet6 sa.Family = syscall.AF_INET6 copy(sa.Addr[:], grp) - sa = (*sysSockaddrInet6)(unsafe.Pointer(&gsr.Pad_cgo_1[0])) - sa.Len = sysSizeofSockaddrInet6 + sa = (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 132)) + sa.Len = sizeofSockaddrInet6 sa.Family = syscall.AF_INET6 copy(sa.Addr[:], src) } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_freebsd.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_freebsd.go index 5527001f..e9349dc2 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_freebsd.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_freebsd.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -12,36 +12,37 @@ import ( "unsafe" "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" ) var ( ctlOpts = [ctlMax]ctlOpt{ ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, - ctlPacketInfo: {sysIPV6_PKTINFO, sysSizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, - ctlNextHop: {sysIPV6_NEXTHOP, sysSizeofSockaddrInet6, marshalNextHop, parseNextHop}, - ctlPathMTU: {sysIPV6_PATHMTU, sysSizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, } - sockOpts = [ssoMax]sockOpt{ - ssoTrafficClass: {iana.ProtocolIPv6, sysIPV6_TCLASS, ssoTypeInt}, - ssoHopLimit: {iana.ProtocolIPv6, sysIPV6_UNICAST_HOPS, ssoTypeInt}, - ssoMulticastInterface: {iana.ProtocolIPv6, sysIPV6_MULTICAST_IF, ssoTypeInterface}, - ssoMulticastHopLimit: {iana.ProtocolIPv6, sysIPV6_MULTICAST_HOPS, ssoTypeInt}, - ssoMulticastLoopback: {iana.ProtocolIPv6, sysIPV6_MULTICAST_LOOP, ssoTypeInt}, - ssoReceiveTrafficClass: {iana.ProtocolIPv6, sysIPV6_RECVTCLASS, ssoTypeInt}, - ssoReceiveHopLimit: {iana.ProtocolIPv6, sysIPV6_RECVHOPLIMIT, ssoTypeInt}, - ssoReceivePacketInfo: {iana.ProtocolIPv6, sysIPV6_RECVPKTINFO, ssoTypeInt}, - ssoReceivePathMTU: {iana.ProtocolIPv6, sysIPV6_RECVPATHMTU, ssoTypeInt}, - ssoPathMTU: {iana.ProtocolIPv6, sysIPV6_PATHMTU, ssoTypeMTUInfo}, - ssoChecksum: {iana.ProtocolIPv6, sysIPV6_CHECKSUM, ssoTypeInt}, - ssoICMPFilter: {iana.ProtocolIPv6ICMP, sysICMP6_FILTER, ssoTypeICMPFilter}, - ssoJoinGroup: {iana.ProtocolIPv6, sysMCAST_JOIN_GROUP, ssoTypeGroupReq}, - ssoLeaveGroup: {iana.ProtocolIPv6, sysMCAST_LEAVE_GROUP, ssoTypeGroupReq}, - ssoJoinSourceGroup: {iana.ProtocolIPv6, sysMCAST_JOIN_SOURCE_GROUP, ssoTypeGroupSourceReq}, - ssoLeaveSourceGroup: {iana.ProtocolIPv6, sysMCAST_LEAVE_SOURCE_GROUP, ssoTypeGroupSourceReq}, - ssoBlockSourceGroup: {iana.ProtocolIPv6, sysMCAST_BLOCK_SOURCE, ssoTypeGroupSourceReq}, - ssoUnblockSourceGroup: {iana.ProtocolIPv6, sysMCAST_UNBLOCK_SOURCE, ssoTypeGroupSourceReq}, + sockOpts = map[int]sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, } ) @@ -57,35 +58,35 @@ func init() { } } -func (sa *sysSockaddrInet6) setSockaddr(ip net.IP, i int) { - sa.Len = sysSizeofSockaddrInet6 +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sizeofSockaddrInet6 sa.Family = syscall.AF_INET6 copy(sa.Addr[:], ip) sa.Scope_id = uint32(i) } -func (pi *sysInet6Pktinfo) setIfindex(i int) { +func (pi *inet6Pktinfo) setIfindex(i int) { pi.Ifindex = uint32(i) } -func (mreq *sysIPv6Mreq) setIfindex(i int) { +func (mreq *ipv6Mreq) setIfindex(i int) { mreq.Interface = uint32(i) } -func (gr *sysGroupReq) setGroup(grp net.IP) { - sa := (*sysSockaddrInet6)(unsafe.Pointer(&gr.Group)) - sa.Len = sysSizeofSockaddrInet6 +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gr.Group)) + sa.Len = sizeofSockaddrInet6 sa.Family = syscall.AF_INET6 copy(sa.Addr[:], grp) } -func (gsr *sysGroupSourceReq) setSourceGroup(grp, src net.IP) { - sa := (*sysSockaddrInet6)(unsafe.Pointer(&gsr.Group)) - sa.Len = sysSizeofSockaddrInet6 +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gsr.Group)) + sa.Len = sizeofSockaddrInet6 sa.Family = syscall.AF_INET6 copy(sa.Addr[:], grp) - sa = (*sysSockaddrInet6)(unsafe.Pointer(&gsr.Source)) - sa.Len = sysSizeofSockaddrInet6 + sa = (*sockaddrInet6)(unsafe.Pointer(&gsr.Source)) + sa.Len = sizeofSockaddrInet6 sa.Family = syscall.AF_INET6 copy(sa.Addr[:], src) } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_linux.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_linux.go index fd7d5b18..bc218103 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_linux.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_linux.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -10,63 +10,65 @@ import ( "unsafe" "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" ) var ( ctlOpts = [ctlMax]ctlOpt{ ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, - ctlPacketInfo: {sysIPV6_PKTINFO, sysSizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, - ctlPathMTU: {sysIPV6_PATHMTU, sysSizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, } - sockOpts = [ssoMax]sockOpt{ - ssoTrafficClass: {iana.ProtocolIPv6, sysIPV6_TCLASS, ssoTypeInt}, - ssoHopLimit: {iana.ProtocolIPv6, sysIPV6_UNICAST_HOPS, ssoTypeInt}, - ssoMulticastInterface: {iana.ProtocolIPv6, sysIPV6_MULTICAST_IF, ssoTypeInterface}, - ssoMulticastHopLimit: {iana.ProtocolIPv6, sysIPV6_MULTICAST_HOPS, ssoTypeInt}, - ssoMulticastLoopback: {iana.ProtocolIPv6, sysIPV6_MULTICAST_LOOP, ssoTypeInt}, - ssoReceiveTrafficClass: {iana.ProtocolIPv6, sysIPV6_RECVTCLASS, ssoTypeInt}, - ssoReceiveHopLimit: {iana.ProtocolIPv6, sysIPV6_RECVHOPLIMIT, ssoTypeInt}, - ssoReceivePacketInfo: {iana.ProtocolIPv6, sysIPV6_RECVPKTINFO, ssoTypeInt}, - ssoReceivePathMTU: {iana.ProtocolIPv6, sysIPV6_RECVPATHMTU, ssoTypeInt}, - ssoPathMTU: {iana.ProtocolIPv6, sysIPV6_PATHMTU, ssoTypeMTUInfo}, - ssoChecksum: {iana.ProtocolReserved, sysIPV6_CHECKSUM, ssoTypeInt}, - ssoICMPFilter: {iana.ProtocolIPv6ICMP, sysICMPV6_FILTER, ssoTypeICMPFilter}, - ssoJoinGroup: {iana.ProtocolIPv6, sysMCAST_JOIN_GROUP, ssoTypeGroupReq}, - ssoLeaveGroup: {iana.ProtocolIPv6, sysMCAST_LEAVE_GROUP, ssoTypeGroupReq}, - ssoJoinSourceGroup: {iana.ProtocolIPv6, sysMCAST_JOIN_SOURCE_GROUP, ssoTypeGroupSourceReq}, - ssoLeaveSourceGroup: {iana.ProtocolIPv6, sysMCAST_LEAVE_SOURCE_GROUP, ssoTypeGroupSourceReq}, - ssoBlockSourceGroup: {iana.ProtocolIPv6, sysMCAST_BLOCK_SOURCE, ssoTypeGroupSourceReq}, - ssoUnblockSourceGroup: {iana.ProtocolIPv6, sysMCAST_UNBLOCK_SOURCE, ssoTypeGroupSourceReq}, + sockOpts = map[int]*sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolReserved, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMPV6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoAttachFilter: {Option: socket.Option{Level: sysSOL_SOCKET, Name: sysSO_ATTACH_FILTER, Len: sizeofSockFprog}}, } ) -func (sa *sysSockaddrInet6) setSockaddr(ip net.IP, i int) { +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { sa.Family = syscall.AF_INET6 copy(sa.Addr[:], ip) sa.Scope_id = uint32(i) } -func (pi *sysInet6Pktinfo) setIfindex(i int) { +func (pi *inet6Pktinfo) setIfindex(i int) { pi.Ifindex = int32(i) } -func (mreq *sysIPv6Mreq) setIfindex(i int) { +func (mreq *ipv6Mreq) setIfindex(i int) { mreq.Ifindex = int32(i) } -func (gr *sysGroupReq) setGroup(grp net.IP) { - sa := (*sysSockaddrInet6)(unsafe.Pointer(&gr.Group)) +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gr.Group)) sa.Family = syscall.AF_INET6 copy(sa.Addr[:], grp) } -func (gsr *sysGroupSourceReq) setSourceGroup(grp, src net.IP) { - sa := (*sysSockaddrInet6)(unsafe.Pointer(&gsr.Group)) +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gsr.Group)) sa.Family = syscall.AF_INET6 copy(sa.Addr[:], grp) - sa = (*sysSockaddrInet6)(unsafe.Pointer(&gsr.Source)) + sa = (*sockaddrInet6)(unsafe.Pointer(&gsr.Source)) sa.Family = syscall.AF_INET6 copy(sa.Addr[:], src) } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_solaris.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_solaris.go new file mode 100644 index 00000000..d348b5f6 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_solaris.go @@ -0,0 +1,74 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]*sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 260)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_ssmreq.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_ssmreq.go new file mode 100644 index 00000000..add8ccc0 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_ssmreq.go @@ -0,0 +1,54 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd linux solaris + +package ipv6 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +var freebsd32o64 bool + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var gr groupReq + if ifi != nil { + gr.Interface = uint32(ifi.Index) + } + gr.setGroup(grp) + var b []byte + if freebsd32o64 { + var d [sizeofGroupReq + 4]byte + s := (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr))[:sizeofGroupReq] + } + return so.Set(c, b) +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + var gsr groupSourceReq + if ifi != nil { + gsr.Interface = uint32(ifi.Index) + } + gsr.setSourceGroup(grp, src) + var b []byte + if freebsd32o64 { + var d [sizeofGroupSourceReq + 4]byte + s := (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr))[:sizeofGroupSourceReq] + } + return so.Set(c, b) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go new file mode 100644 index 00000000..581ee490 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go @@ -0,0 +1,21 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!freebsd,!linux,!solaris + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errOpNoSupport +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_stub.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_stub.go index ead0f4d1..b845388e 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_stub.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_stub.go @@ -2,12 +2,12 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build nacl plan9 solaris +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package ipv6 var ( ctlOpts = [ctlMax]ctlOpt{} - sockOpts = [ssoMax]sockOpt{} + sockOpts = map[int]*sockOpt{} ) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_windows.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_windows.go index fda87573..fc36b018 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_windows.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/sys_windows.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -9,6 +9,7 @@ import ( "syscall" "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" ) const ( @@ -21,12 +22,14 @@ const ( sysIPV6_LEAVE_GROUP = 0xd sysIPV6_PKTINFO = 0x13 - sysSizeofSockaddrInet6 = 0x1c + sizeofSockaddrInet6 = 0x1c - sysSizeofIPv6Mreq = 0x14 + sizeofIPv6Mreq = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofICMPv6Filter = 0 ) -type sysSockaddrInet6 struct { +type sockaddrInet6 struct { Family uint16 Port uint16 Flowinfo uint32 @@ -34,30 +37,39 @@ type sysSockaddrInet6 struct { Scope_id uint32 } -type sysIPv6Mreq struct { +type ipv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Interface uint32 } +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type icmpv6Filter struct { + // TODO(mikio): implement this +} + var ( ctlOpts = [ctlMax]ctlOpt{} - sockOpts = [ssoMax]sockOpt{ - ssoHopLimit: {iana.ProtocolIPv6, sysIPV6_UNICAST_HOPS, ssoTypeInt}, - ssoMulticastInterface: {iana.ProtocolIPv6, sysIPV6_MULTICAST_IF, ssoTypeInterface}, - ssoMulticastHopLimit: {iana.ProtocolIPv6, sysIPV6_MULTICAST_HOPS, ssoTypeInt}, - ssoMulticastLoopback: {iana.ProtocolIPv6, sysIPV6_MULTICAST_LOOP, ssoTypeInt}, - ssoJoinGroup: {iana.ProtocolIPv6, sysIPV6_JOIN_GROUP, ssoTypeIPMreq}, - ssoLeaveGroup: {iana.ProtocolIPv6, sysIPV6_LEAVE_GROUP, ssoTypeIPMreq}, + sockOpts = map[int]*sockOpt{ + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_JOIN_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_LEAVE_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, } ) -func (sa *sysSockaddrInet6) setSockaddr(ip net.IP, i int) { +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { sa.Family = syscall.AF_INET6 copy(sa.Addr[:], ip) sa.Scope_id = uint32(i) } -func (mreq *sysIPv6Mreq) setIfindex(i int) { +func (mreq *ipv6Mreq) setIfindex(i int) { mreq.Interface = uint32(i) } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/syscall_linux_386.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/syscall_linux_386.go deleted file mode 100644 index 64a3c665..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/syscall_linux_386.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv6 - -import ( - "syscall" - "unsafe" -) - -const ( - sysGETSOCKOPT = 0xf - sysSETSOCKOPT = 0xe -) - -func socketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (int, syscall.Errno) - -func getsockopt(fd, level, name int, v unsafe.Pointer, l *uint32) error { - if _, errno := socketcall(sysGETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(unsafe.Pointer(l)), 0); errno != 0 { - return error(errno) - } - return nil -} - -func setsockopt(fd, level, name int, v unsafe.Pointer, l uint32) error { - if _, errno := socketcall(sysSETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(l), 0); errno != 0 { - return error(errno) - } - return nil -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/syscall_unix.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/syscall_unix.go deleted file mode 100644 index 925fd2fb..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/syscall_unix.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux,!386 netbsd openbsd - -package ipv6 - -import ( - "syscall" - "unsafe" -) - -func getsockopt(fd, level, name int, v unsafe.Pointer, l *uint32) error { - if _, _, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(unsafe.Pointer(l)), 0); errno != 0 { - return error(errno) - } - return nil -} - -func setsockopt(fd, level, name int, v unsafe.Pointer, l uint32) error { - if _, _, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(l), 0); errno != 0 { - return error(errno) - } - return nil -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_darwin.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_darwin.go index cb044b03..6aab1dfa 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_darwin.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_darwin.go @@ -71,19 +71,19 @@ const ( sysIPV6_PORTRANGE_HIGH = 0x1 sysIPV6_PORTRANGE_LOW = 0x2 - sysSizeofSockaddrStorage = 0x80 - sysSizeofSockaddrInet6 = 0x1c - sysSizeofInet6Pktinfo = 0x14 - sysSizeofIPv6Mtuinfo = 0x20 + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 - sysSizeofIPv6Mreq = 0x14 - sysSizeofGroupReq = 0x84 - sysSizeofGroupSourceReq = 0x104 + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 - sysSizeofICMPv6Filter = 0x20 + sizeofICMPv6Filter = 0x20 ) -type sysSockaddrStorage struct { +type sockaddrStorage struct { Len uint8 Family uint8 X__ss_pad1 [6]int8 @@ -91,7 +91,7 @@ type sysSockaddrStorage struct { X__ss_pad2 [112]int8 } -type sysSockaddrInet6 struct { +type sockaddrInet6 struct { Len uint8 Family uint8 Port uint16 @@ -100,31 +100,31 @@ type sysSockaddrInet6 struct { Scope_id uint32 } -type sysInet6Pktinfo struct { +type inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex uint32 } -type sysIPv6Mtuinfo struct { - Addr sysSockaddrInet6 +type ipv6Mtuinfo struct { + Addr sockaddrInet6 Mtu uint32 } -type sysIPv6Mreq struct { +type ipv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Interface uint32 } -type sysICMPv6Filter struct { +type icmpv6Filter struct { Filt [8]uint32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [128]byte } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [128]byte Pad_cgo_1 [128]byte diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go index 5a03ab73..d2de804d 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go @@ -1,8 +1,6 @@ // Created by cgo -godefs - DO NOT EDIT // cgo -godefs defs_dragonfly.go -// +build dragonfly - package ipv6 const ( @@ -52,16 +50,16 @@ const ( sysIPV6_PORTRANGE_HIGH = 0x1 sysIPV6_PORTRANGE_LOW = 0x2 - sysSizeofSockaddrInet6 = 0x1c - sysSizeofInet6Pktinfo = 0x14 - sysSizeofIPv6Mtuinfo = 0x20 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 - sysSizeofIPv6Mreq = 0x14 + sizeofIPv6Mreq = 0x14 - sysSizeofICMPv6Filter = 0x20 + sizeofICMPv6Filter = 0x20 ) -type sysSockaddrInet6 struct { +type sockaddrInet6 struct { Len uint8 Family uint8 Port uint16 @@ -70,21 +68,21 @@ type sysSockaddrInet6 struct { Scope_id uint32 } -type sysInet6Pktinfo struct { +type inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex uint32 } -type sysIPv6Mtuinfo struct { - Addr sysSockaddrInet6 +type ipv6Mtuinfo struct { + Addr sockaddrInet6 Mtu uint32 } -type sysIPv6Mreq struct { +type ipv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Interface uint32 } -type sysICMPv6Filter struct { +type icmpv6Filter struct { Filt [8]uint32 } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go index 4ace96f0..919e572d 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go @@ -62,19 +62,19 @@ const ( sysIPV6_PORTRANGE_HIGH = 0x1 sysIPV6_PORTRANGE_LOW = 0x2 - sysSizeofSockaddrStorage = 0x80 - sysSizeofSockaddrInet6 = 0x1c - sysSizeofInet6Pktinfo = 0x14 - sysSizeofIPv6Mtuinfo = 0x20 + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 - sysSizeofIPv6Mreq = 0x14 - sysSizeofGroupReq = 0x84 - sysSizeofGroupSourceReq = 0x104 + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 - sysSizeofICMPv6Filter = 0x20 + sizeofICMPv6Filter = 0x20 ) -type sysSockaddrStorage struct { +type sockaddrStorage struct { Len uint8 Family uint8 X__ss_pad1 [6]int8 @@ -82,7 +82,7 @@ type sysSockaddrStorage struct { X__ss_pad2 [112]int8 } -type sysSockaddrInet6 struct { +type sockaddrInet6 struct { Len uint8 Family uint8 Port uint16 @@ -91,32 +91,32 @@ type sysSockaddrInet6 struct { Scope_id uint32 } -type sysInet6Pktinfo struct { +type inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex uint32 } -type sysIPv6Mtuinfo struct { - Addr sysSockaddrInet6 +type ipv6Mtuinfo struct { + Addr sockaddrInet6 Mtu uint32 } -type sysIPv6Mreq struct { +type ipv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Interface uint32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 - Group sysSockaddrStorage + Group sockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 - Group sysSockaddrStorage - Source sysSockaddrStorage + Group sockaddrStorage + Source sockaddrStorage } -type sysICMPv6Filter struct { +type icmpv6Filter struct { Filt [8]uint32 } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go index 4a62c2d5..cb8141f9 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go @@ -62,19 +62,19 @@ const ( sysIPV6_PORTRANGE_HIGH = 0x1 sysIPV6_PORTRANGE_LOW = 0x2 - sysSizeofSockaddrStorage = 0x80 - sysSizeofSockaddrInet6 = 0x1c - sysSizeofInet6Pktinfo = 0x14 - sysSizeofIPv6Mtuinfo = 0x20 + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 - sysSizeofIPv6Mreq = 0x14 - sysSizeofGroupReq = 0x88 - sysSizeofGroupSourceReq = 0x108 + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 - sysSizeofICMPv6Filter = 0x20 + sizeofICMPv6Filter = 0x20 ) -type sysSockaddrStorage struct { +type sockaddrStorage struct { Len uint8 Family uint8 X__ss_pad1 [6]int8 @@ -82,7 +82,7 @@ type sysSockaddrStorage struct { X__ss_pad2 [112]int8 } -type sysSockaddrInet6 struct { +type sockaddrInet6 struct { Len uint8 Family uint8 Port uint16 @@ -91,34 +91,34 @@ type sysSockaddrInet6 struct { Scope_id uint32 } -type sysInet6Pktinfo struct { +type inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex uint32 } -type sysIPv6Mtuinfo struct { - Addr sysSockaddrInet6 +type ipv6Mtuinfo struct { + Addr sockaddrInet6 Mtu uint32 } -type sysIPv6Mreq struct { +type ipv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Interface uint32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysSockaddrStorage + Group sockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysSockaddrStorage - Source sysSockaddrStorage + Group sockaddrStorage + Source sockaddrStorage } -type sysICMPv6Filter struct { +type icmpv6Filter struct { Filt [8]uint32 } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go index 4a62c2d5..cb8141f9 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go @@ -62,19 +62,19 @@ const ( sysIPV6_PORTRANGE_HIGH = 0x1 sysIPV6_PORTRANGE_LOW = 0x2 - sysSizeofSockaddrStorage = 0x80 - sysSizeofSockaddrInet6 = 0x1c - sysSizeofInet6Pktinfo = 0x14 - sysSizeofIPv6Mtuinfo = 0x20 + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 - sysSizeofIPv6Mreq = 0x14 - sysSizeofGroupReq = 0x88 - sysSizeofGroupSourceReq = 0x108 + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 - sysSizeofICMPv6Filter = 0x20 + sizeofICMPv6Filter = 0x20 ) -type sysSockaddrStorage struct { +type sockaddrStorage struct { Len uint8 Family uint8 X__ss_pad1 [6]int8 @@ -82,7 +82,7 @@ type sysSockaddrStorage struct { X__ss_pad2 [112]int8 } -type sysSockaddrInet6 struct { +type sockaddrInet6 struct { Len uint8 Family uint8 Port uint16 @@ -91,34 +91,34 @@ type sysSockaddrInet6 struct { Scope_id uint32 } -type sysInet6Pktinfo struct { +type inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex uint32 } -type sysIPv6Mtuinfo struct { - Addr sysSockaddrInet6 +type ipv6Mtuinfo struct { + Addr sockaddrInet6 Mtu uint32 } -type sysIPv6Mreq struct { +type ipv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Interface uint32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysSockaddrStorage + Group sockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysSockaddrStorage - Source sysSockaddrStorage + Group sockaddrStorage + Source sockaddrStorage } -type sysICMPv6Filter struct { +type icmpv6Filter struct { Filt [8]uint32 } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_386.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_386.go index 36fccbb6..73aa8c6d 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_386.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_386.go @@ -87,25 +87,27 @@ const ( sysSOL_SOCKET = 0x1 sysSO_ATTACH_FILTER = 0x1a - sysSizeofKernelSockaddrStorage = 0x80 - sysSizeofSockaddrInet6 = 0x1c - sysSizeofInet6Pktinfo = 0x14 - sysSizeofIPv6Mtuinfo = 0x20 - sysSizeofIPv6FlowlabelReq = 0x20 + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 - sysSizeofIPv6Mreq = 0x14 - sysSizeofGroupReq = 0x84 - sysSizeofGroupSourceReq = 0x104 + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 - sysSizeofICMPv6Filter = 0x20 + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 ) -type sysKernelSockaddrStorage struct { +type kernelSockaddrStorage struct { Family uint16 X__data [126]int8 } -type sysSockaddrInet6 struct { +type sockaddrInet6 struct { Family uint16 Port uint16 Flowinfo uint32 @@ -113,17 +115,17 @@ type sysSockaddrInet6 struct { Scope_id uint32 } -type sysInet6Pktinfo struct { +type inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex int32 } -type sysIPv6Mtuinfo struct { - Addr sysSockaddrInet6 +type ipv6Mtuinfo struct { + Addr sockaddrInet6 Mtu uint32 } -type sysIPv6FlowlabelReq struct { +type ipv6FlowlabelReq struct { Dst [16]byte /* in6_addr */ Label uint32 Action uint8 @@ -134,33 +136,33 @@ type sysIPv6FlowlabelReq struct { X__flr_pad uint32 } -type sysIPv6Mreq struct { +type ipv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Ifindex int32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 - Group sysKernelSockaddrStorage + Group kernelSockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 - Group sysKernelSockaddrStorage - Source sysKernelSockaddrStorage + Group kernelSockaddrStorage + Source kernelSockaddrStorage } -type sysICMPv6Filter struct { +type icmpv6Filter struct { Data [8]uint32 } -type sysSockFProg struct { +type sockFProg struct { Len uint16 Pad_cgo_0 [2]byte - Filter *sysSockFilter + Filter *sockFilter } -type sysSockFilter struct { +type sockFilter struct { Code uint16 Jt uint8 Jf uint8 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go index 7461e7e0..b64f0157 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go @@ -87,25 +87,27 @@ const ( sysSOL_SOCKET = 0x1 sysSO_ATTACH_FILTER = 0x1a - sysSizeofKernelSockaddrStorage = 0x80 - sysSizeofSockaddrInet6 = 0x1c - sysSizeofInet6Pktinfo = 0x14 - sysSizeofIPv6Mtuinfo = 0x20 - sysSizeofIPv6FlowlabelReq = 0x20 + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 - sysSizeofIPv6Mreq = 0x14 - sysSizeofGroupReq = 0x88 - sysSizeofGroupSourceReq = 0x108 + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 - sysSizeofICMPv6Filter = 0x20 + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 ) -type sysKernelSockaddrStorage struct { +type kernelSockaddrStorage struct { Family uint16 X__data [126]int8 } -type sysSockaddrInet6 struct { +type sockaddrInet6 struct { Family uint16 Port uint16 Flowinfo uint32 @@ -113,17 +115,17 @@ type sysSockaddrInet6 struct { Scope_id uint32 } -type sysInet6Pktinfo struct { +type inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex int32 } -type sysIPv6Mtuinfo struct { - Addr sysSockaddrInet6 +type ipv6Mtuinfo struct { + Addr sockaddrInet6 Mtu uint32 } -type sysIPv6FlowlabelReq struct { +type ipv6FlowlabelReq struct { Dst [16]byte /* in6_addr */ Label uint32 Action uint8 @@ -134,35 +136,35 @@ type sysIPv6FlowlabelReq struct { X__flr_pad uint32 } -type sysIPv6Mreq struct { +type ipv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Ifindex int32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage + Group kernelSockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage - Source sysKernelSockaddrStorage + Group kernelSockaddrStorage + Source kernelSockaddrStorage } -type sysICMPv6Filter struct { +type icmpv6Filter struct { Data [8]uint32 } -type sysSockFProg struct { +type sockFProg struct { Len uint16 Pad_cgo_0 [6]byte - Filter *sysSockFilter + Filter *sockFilter } -type sysSockFilter struct { +type sockFilter struct { Code uint16 Jt uint8 Jf uint8 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go index 36fccbb6..73aa8c6d 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go @@ -87,25 +87,27 @@ const ( sysSOL_SOCKET = 0x1 sysSO_ATTACH_FILTER = 0x1a - sysSizeofKernelSockaddrStorage = 0x80 - sysSizeofSockaddrInet6 = 0x1c - sysSizeofInet6Pktinfo = 0x14 - sysSizeofIPv6Mtuinfo = 0x20 - sysSizeofIPv6FlowlabelReq = 0x20 + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 - sysSizeofIPv6Mreq = 0x14 - sysSizeofGroupReq = 0x84 - sysSizeofGroupSourceReq = 0x104 + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 - sysSizeofICMPv6Filter = 0x20 + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 ) -type sysKernelSockaddrStorage struct { +type kernelSockaddrStorage struct { Family uint16 X__data [126]int8 } -type sysSockaddrInet6 struct { +type sockaddrInet6 struct { Family uint16 Port uint16 Flowinfo uint32 @@ -113,17 +115,17 @@ type sysSockaddrInet6 struct { Scope_id uint32 } -type sysInet6Pktinfo struct { +type inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex int32 } -type sysIPv6Mtuinfo struct { - Addr sysSockaddrInet6 +type ipv6Mtuinfo struct { + Addr sockaddrInet6 Mtu uint32 } -type sysIPv6FlowlabelReq struct { +type ipv6FlowlabelReq struct { Dst [16]byte /* in6_addr */ Label uint32 Action uint8 @@ -134,33 +136,33 @@ type sysIPv6FlowlabelReq struct { X__flr_pad uint32 } -type sysIPv6Mreq struct { +type ipv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Ifindex int32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 - Group sysKernelSockaddrStorage + Group kernelSockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 - Group sysKernelSockaddrStorage - Source sysKernelSockaddrStorage + Group kernelSockaddrStorage + Source kernelSockaddrStorage } -type sysICMPv6Filter struct { +type icmpv6Filter struct { Data [8]uint32 } -type sysSockFProg struct { +type sockFProg struct { Len uint16 Pad_cgo_0 [2]byte - Filter *sysSockFilter + Filter *sockFilter } -type sysSockFilter struct { +type sockFilter struct { Code uint16 Jt uint8 Jf uint8 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go index ed35f603..b64f0157 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go @@ -1,8 +1,6 @@ // Created by cgo -godefs - DO NOT EDIT // cgo -godefs defs_linux.go -// +build linux,arm64 - package ipv6 const ( @@ -89,25 +87,27 @@ const ( sysSOL_SOCKET = 0x1 sysSO_ATTACH_FILTER = 0x1a - sysSizeofKernelSockaddrStorage = 0x80 - sysSizeofSockaddrInet6 = 0x1c - sysSizeofInet6Pktinfo = 0x14 - sysSizeofIPv6Mtuinfo = 0x20 - sysSizeofIPv6FlowlabelReq = 0x20 + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 - sysSizeofIPv6Mreq = 0x14 - sysSizeofGroupReq = 0x88 - sysSizeofGroupSourceReq = 0x108 + sizeofICMPv6Filter = 0x20 - sysSizeofICMPv6Filter = 0x20 + sizeofSockFprog = 0x10 ) -type sysKernelSockaddrStorage struct { +type kernelSockaddrStorage struct { Family uint16 X__data [126]int8 } -type sysSockaddrInet6 struct { +type sockaddrInet6 struct { Family uint16 Port uint16 Flowinfo uint32 @@ -115,17 +115,17 @@ type sysSockaddrInet6 struct { Scope_id uint32 } -type sysInet6Pktinfo struct { +type inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex int32 } -type sysIPv6Mtuinfo struct { - Addr sysSockaddrInet6 +type ipv6Mtuinfo struct { + Addr sockaddrInet6 Mtu uint32 } -type sysIPv6FlowlabelReq struct { +type ipv6FlowlabelReq struct { Dst [16]byte /* in6_addr */ Label uint32 Action uint8 @@ -136,35 +136,35 @@ type sysIPv6FlowlabelReq struct { X__flr_pad uint32 } -type sysIPv6Mreq struct { +type ipv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Ifindex int32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage + Group kernelSockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage - Source sysKernelSockaddrStorage + Group kernelSockaddrStorage + Source kernelSockaddrStorage } -type sysICMPv6Filter struct { +type icmpv6Filter struct { Data [8]uint32 } -type sysSockFProg struct { +type sockFProg struct { Len uint16 Pad_cgo_0 [6]byte - Filter *sysSockFilter + Filter *sockFilter } -type sysSockFilter struct { +type sockFilter struct { Code uint16 Jt uint8 Jf uint8 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go new file mode 100644 index 00000000..73aa8c6d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go @@ -0,0 +1,170 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go index 141c8697..b64f0157 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go @@ -1,8 +1,6 @@ // Created by cgo -godefs - DO NOT EDIT // cgo -godefs defs_linux.go -// +build linux,mips64 - package ipv6 const ( @@ -89,25 +87,27 @@ const ( sysSOL_SOCKET = 0x1 sysSO_ATTACH_FILTER = 0x1a - sysSizeofKernelSockaddrStorage = 0x80 - sysSizeofSockaddrInet6 = 0x1c - sysSizeofInet6Pktinfo = 0x14 - sysSizeofIPv6Mtuinfo = 0x20 - sysSizeofIPv6FlowlabelReq = 0x20 + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 - sysSizeofIPv6Mreq = 0x14 - sysSizeofGroupReq = 0x88 - sysSizeofGroupSourceReq = 0x108 + sizeofICMPv6Filter = 0x20 - sysSizeofICMPv6Filter = 0x20 + sizeofSockFprog = 0x10 ) -type sysKernelSockaddrStorage struct { +type kernelSockaddrStorage struct { Family uint16 X__data [126]int8 } -type sysSockaddrInet6 struct { +type sockaddrInet6 struct { Family uint16 Port uint16 Flowinfo uint32 @@ -115,17 +115,17 @@ type sysSockaddrInet6 struct { Scope_id uint32 } -type sysInet6Pktinfo struct { +type inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex int32 } -type sysIPv6Mtuinfo struct { - Addr sysSockaddrInet6 +type ipv6Mtuinfo struct { + Addr sockaddrInet6 Mtu uint32 } -type sysIPv6FlowlabelReq struct { +type ipv6FlowlabelReq struct { Dst [16]byte /* in6_addr */ Label uint32 Action uint8 @@ -136,35 +136,35 @@ type sysIPv6FlowlabelReq struct { X__flr_pad uint32 } -type sysIPv6Mreq struct { +type ipv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Ifindex int32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage + Group kernelSockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage - Source sysKernelSockaddrStorage + Group kernelSockaddrStorage + Source kernelSockaddrStorage } -type sysICMPv6Filter struct { +type icmpv6Filter struct { Data [8]uint32 } -type sysSockFProg struct { +type sockFProg struct { Len uint16 Pad_cgo_0 [6]byte - Filter *sysSockFilter + Filter *sockFilter } -type sysSockFilter struct { +type sockFilter struct { Code uint16 Jt uint8 Jf uint8 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go index d50eb633..b64f0157 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go @@ -1,8 +1,6 @@ // Created by cgo -godefs - DO NOT EDIT // cgo -godefs defs_linux.go -// +build linux,mips64le - package ipv6 const ( @@ -89,25 +87,27 @@ const ( sysSOL_SOCKET = 0x1 sysSO_ATTACH_FILTER = 0x1a - sysSizeofKernelSockaddrStorage = 0x80 - sysSizeofSockaddrInet6 = 0x1c - sysSizeofInet6Pktinfo = 0x14 - sysSizeofIPv6Mtuinfo = 0x20 - sysSizeofIPv6FlowlabelReq = 0x20 + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 - sysSizeofIPv6Mreq = 0x14 - sysSizeofGroupReq = 0x88 - sysSizeofGroupSourceReq = 0x108 + sizeofICMPv6Filter = 0x20 - sysSizeofICMPv6Filter = 0x20 + sizeofSockFprog = 0x10 ) -type sysKernelSockaddrStorage struct { +type kernelSockaddrStorage struct { Family uint16 X__data [126]int8 } -type sysSockaddrInet6 struct { +type sockaddrInet6 struct { Family uint16 Port uint16 Flowinfo uint32 @@ -115,17 +115,17 @@ type sysSockaddrInet6 struct { Scope_id uint32 } -type sysInet6Pktinfo struct { +type inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex int32 } -type sysIPv6Mtuinfo struct { - Addr sysSockaddrInet6 +type ipv6Mtuinfo struct { + Addr sockaddrInet6 Mtu uint32 } -type sysIPv6FlowlabelReq struct { +type ipv6FlowlabelReq struct { Dst [16]byte /* in6_addr */ Label uint32 Action uint8 @@ -136,35 +136,35 @@ type sysIPv6FlowlabelReq struct { X__flr_pad uint32 } -type sysIPv6Mreq struct { +type ipv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Ifindex int32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage + Group kernelSockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage - Source sysKernelSockaddrStorage + Group kernelSockaddrStorage + Source kernelSockaddrStorage } -type sysICMPv6Filter struct { +type icmpv6Filter struct { Data [8]uint32 } -type sysSockFProg struct { +type sockFProg struct { Len uint16 Pad_cgo_0 [6]byte - Filter *sysSockFilter + Filter *sockFilter } -type sysSockFilter struct { +type sockFilter struct { Code uint16 Jt uint8 Jf uint8 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go new file mode 100644 index 00000000..73aa8c6d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go @@ -0,0 +1,170 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go index 4c58ea67..c9bf6a87 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go @@ -1,8 +1,6 @@ // Created by cgo -godefs - DO NOT EDIT // cgo -godefs defs_linux.go -// +build linux,ppc - package ipv6 const ( @@ -89,25 +87,27 @@ const ( sysSOL_SOCKET = 0x1 sysSO_ATTACH_FILTER = 0x1a - sysSizeofKernelSockaddrStorage = 0x80 - sysSizeofSockaddrInet6 = 0x1c - sysSizeofInet6Pktinfo = 0x14 - sysSizeofIPv6Mtuinfo = 0x20 - sysSizeofIPv6FlowlabelReq = 0x20 + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 - sysSizeofIPv6Mreq = 0x14 - sysSizeofGroupReq = 0x84 - sysSizeofGroupSourceReq = 0x104 + sizeofICMPv6Filter = 0x20 - sysSizeofICMPv6Filter = 0x20 + sizeofSockFprog = 0x8 ) -type sysKernelSockaddrStorage struct { +type kernelSockaddrStorage struct { Family uint16 X__data [126]uint8 } -type sysSockaddrInet6 struct { +type sockaddrInet6 struct { Family uint16 Port uint16 Flowinfo uint32 @@ -115,17 +115,17 @@ type sysSockaddrInet6 struct { Scope_id uint32 } -type sysInet6Pktinfo struct { +type inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex int32 } -type sysIPv6Mtuinfo struct { - Addr sysSockaddrInet6 +type ipv6Mtuinfo struct { + Addr sockaddrInet6 Mtu uint32 } -type sysIPv6FlowlabelReq struct { +type ipv6FlowlabelReq struct { Dst [16]byte /* in6_addr */ Label uint32 Action uint8 @@ -136,33 +136,33 @@ type sysIPv6FlowlabelReq struct { X__flr_pad uint32 } -type sysIPv6Mreq struct { +type ipv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Ifindex int32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 - Group sysKernelSockaddrStorage + Group kernelSockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 - Group sysKernelSockaddrStorage - Source sysKernelSockaddrStorage + Group kernelSockaddrStorage + Source kernelSockaddrStorage } -type sysICMPv6Filter struct { +type icmpv6Filter struct { Data [8]uint32 } -type sysSockFProg struct { +type sockFProg struct { Len uint16 Pad_cgo_0 [2]byte - Filter *sysSockFilter + Filter *sockFilter } -type sysSockFilter struct { +type sockFilter struct { Code uint16 Jt uint8 Jf uint8 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go index c1d775f7..b64f0157 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go @@ -1,8 +1,6 @@ // Created by cgo -godefs - DO NOT EDIT // cgo -godefs defs_linux.go -// +build linux,ppc64 - package ipv6 const ( @@ -89,25 +87,27 @@ const ( sysSOL_SOCKET = 0x1 sysSO_ATTACH_FILTER = 0x1a - sysSizeofKernelSockaddrStorage = 0x80 - sysSizeofSockaddrInet6 = 0x1c - sysSizeofInet6Pktinfo = 0x14 - sysSizeofIPv6Mtuinfo = 0x20 - sysSizeofIPv6FlowlabelReq = 0x20 + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 - sysSizeofIPv6Mreq = 0x14 - sysSizeofGroupReq = 0x88 - sysSizeofGroupSourceReq = 0x108 + sizeofICMPv6Filter = 0x20 - sysSizeofICMPv6Filter = 0x20 + sizeofSockFprog = 0x10 ) -type sysKernelSockaddrStorage struct { +type kernelSockaddrStorage struct { Family uint16 X__data [126]int8 } -type sysSockaddrInet6 struct { +type sockaddrInet6 struct { Family uint16 Port uint16 Flowinfo uint32 @@ -115,17 +115,17 @@ type sysSockaddrInet6 struct { Scope_id uint32 } -type sysInet6Pktinfo struct { +type inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex int32 } -type sysIPv6Mtuinfo struct { - Addr sysSockaddrInet6 +type ipv6Mtuinfo struct { + Addr sockaddrInet6 Mtu uint32 } -type sysIPv6FlowlabelReq struct { +type ipv6FlowlabelReq struct { Dst [16]byte /* in6_addr */ Label uint32 Action uint8 @@ -136,35 +136,35 @@ type sysIPv6FlowlabelReq struct { X__flr_pad uint32 } -type sysIPv6Mreq struct { +type ipv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Ifindex int32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage + Group kernelSockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage - Source sysKernelSockaddrStorage + Group kernelSockaddrStorage + Source kernelSockaddrStorage } -type sysICMPv6Filter struct { +type icmpv6Filter struct { Data [8]uint32 } -type sysSockFProg struct { +type sockFProg struct { Len uint16 Pad_cgo_0 [6]byte - Filter *sysSockFilter + Filter *sockFilter } -type sysSockFilter struct { +type sockFilter struct { Code uint16 Jt uint8 Jf uint8 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go index e385fb7a..b64f0157 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go @@ -1,8 +1,6 @@ // Created by cgo -godefs - DO NOT EDIT // cgo -godefs defs_linux.go -// +build linux,ppc64le - package ipv6 const ( @@ -89,25 +87,27 @@ const ( sysSOL_SOCKET = 0x1 sysSO_ATTACH_FILTER = 0x1a - sysSizeofKernelSockaddrStorage = 0x80 - sysSizeofSockaddrInet6 = 0x1c - sysSizeofInet6Pktinfo = 0x14 - sysSizeofIPv6Mtuinfo = 0x20 - sysSizeofIPv6FlowlabelReq = 0x20 + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 - sysSizeofIPv6Mreq = 0x14 - sysSizeofGroupReq = 0x88 - sysSizeofGroupSourceReq = 0x108 + sizeofICMPv6Filter = 0x20 - sysSizeofICMPv6Filter = 0x20 + sizeofSockFprog = 0x10 ) -type sysKernelSockaddrStorage struct { +type kernelSockaddrStorage struct { Family uint16 X__data [126]int8 } -type sysSockaddrInet6 struct { +type sockaddrInet6 struct { Family uint16 Port uint16 Flowinfo uint32 @@ -115,17 +115,17 @@ type sysSockaddrInet6 struct { Scope_id uint32 } -type sysInet6Pktinfo struct { +type inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex int32 } -type sysIPv6Mtuinfo struct { - Addr sysSockaddrInet6 +type ipv6Mtuinfo struct { + Addr sockaddrInet6 Mtu uint32 } -type sysIPv6FlowlabelReq struct { +type ipv6FlowlabelReq struct { Dst [16]byte /* in6_addr */ Label uint32 Action uint8 @@ -136,35 +136,35 @@ type sysIPv6FlowlabelReq struct { X__flr_pad uint32 } -type sysIPv6Mreq struct { +type ipv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Ifindex int32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage + Group kernelSockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage - Source sysKernelSockaddrStorage + Group kernelSockaddrStorage + Source kernelSockaddrStorage } -type sysICMPv6Filter struct { +type icmpv6Filter struct { Data [8]uint32 } -type sysSockFProg struct { +type sockFProg struct { Len uint16 Pad_cgo_0 [6]byte - Filter *sysSockFilter + Filter *sockFilter } -type sysSockFilter struct { +type sockFilter struct { Code uint16 Jt uint8 Jf uint8 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go index 28d69b1b..b64f0157 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go @@ -1,8 +1,6 @@ // Created by cgo -godefs - DO NOT EDIT // cgo -godefs defs_linux.go -// +build linux,s390x - package ipv6 const ( @@ -89,25 +87,27 @@ const ( sysSOL_SOCKET = 0x1 sysSO_ATTACH_FILTER = 0x1a - sysSizeofKernelSockaddrStorage = 0x80 - sysSizeofSockaddrInet6 = 0x1c - sysSizeofInet6Pktinfo = 0x14 - sysSizeofIPv6Mtuinfo = 0x20 - sysSizeofIPv6FlowlabelReq = 0x20 + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 - sysSizeofIPv6Mreq = 0x14 - sysSizeofGroupReq = 0x88 - sysSizeofGroupSourceReq = 0x108 + sizeofICMPv6Filter = 0x20 - sysSizeofICMPv6Filter = 0x20 + sizeofSockFprog = 0x10 ) -type sysKernelSockaddrStorage struct { +type kernelSockaddrStorage struct { Family uint16 X__data [126]int8 } -type sysSockaddrInet6 struct { +type sockaddrInet6 struct { Family uint16 Port uint16 Flowinfo uint32 @@ -115,17 +115,17 @@ type sysSockaddrInet6 struct { Scope_id uint32 } -type sysInet6Pktinfo struct { +type inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex int32 } -type sysIPv6Mtuinfo struct { - Addr sysSockaddrInet6 +type ipv6Mtuinfo struct { + Addr sockaddrInet6 Mtu uint32 } -type sysIPv6FlowlabelReq struct { +type ipv6FlowlabelReq struct { Dst [16]byte /* in6_addr */ Label uint32 Action uint8 @@ -136,35 +136,35 @@ type sysIPv6FlowlabelReq struct { X__flr_pad uint32 } -type sysIPv6Mreq struct { +type ipv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Ifindex int32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage + Group kernelSockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage - Source sysKernelSockaddrStorage + Group kernelSockaddrStorage + Source kernelSockaddrStorage } -type sysICMPv6Filter struct { +type icmpv6Filter struct { Data [8]uint32 } -type sysSockFProg struct { +type sockFProg struct { Len uint16 Pad_cgo_0 [6]byte - Filter *sysSockFilter + Filter *sockFilter } -type sysSockFilter struct { +type sockFilter struct { Code uint16 Jt uint8 Jf uint8 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_netbsd.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_netbsd.go index d6ec88e3..bcada13b 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_netbsd.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_netbsd.go @@ -46,16 +46,16 @@ const ( sysIPV6_PORTRANGE_HIGH = 0x1 sysIPV6_PORTRANGE_LOW = 0x2 - sysSizeofSockaddrInet6 = 0x1c - sysSizeofInet6Pktinfo = 0x14 - sysSizeofIPv6Mtuinfo = 0x20 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 - sysSizeofIPv6Mreq = 0x14 + sizeofIPv6Mreq = 0x14 - sysSizeofICMPv6Filter = 0x20 + sizeofICMPv6Filter = 0x20 ) -type sysSockaddrInet6 struct { +type sockaddrInet6 struct { Len uint8 Family uint8 Port uint16 @@ -64,21 +64,21 @@ type sysSockaddrInet6 struct { Scope_id uint32 } -type sysInet6Pktinfo struct { +type inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex uint32 } -type sysIPv6Mtuinfo struct { - Addr sysSockaddrInet6 +type ipv6Mtuinfo struct { + Addr sockaddrInet6 Mtu uint32 } -type sysIPv6Mreq struct { +type ipv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Interface uint32 } -type sysICMPv6Filter struct { +type icmpv6Filter struct { Filt [8]uint32 } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_openbsd.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_openbsd.go index 3e080b78..86cf3c63 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_openbsd.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_openbsd.go @@ -55,16 +55,16 @@ const ( sysIPV6_PORTRANGE_HIGH = 0x1 sysIPV6_PORTRANGE_LOW = 0x2 - sysSizeofSockaddrInet6 = 0x1c - sysSizeofInet6Pktinfo = 0x14 - sysSizeofIPv6Mtuinfo = 0x20 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 - sysSizeofIPv6Mreq = 0x14 + sizeofIPv6Mreq = 0x14 - sysSizeofICMPv6Filter = 0x20 + sizeofICMPv6Filter = 0x20 ) -type sysSockaddrInet6 struct { +type sockaddrInet6 struct { Len uint8 Family uint8 Port uint16 @@ -73,21 +73,21 @@ type sysSockaddrInet6 struct { Scope_id uint32 } -type sysInet6Pktinfo struct { +type inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex uint32 } -type sysIPv6Mtuinfo struct { - Addr sysSockaddrInet6 +type ipv6Mtuinfo struct { + Addr sockaddrInet6 Mtu uint32 } -type sysIPv6Mreq struct { +type ipv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Interface uint32 } -type sysICMPv6Filter struct { +type icmpv6Filter struct { Filt [8]uint32 } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_solaris.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_solaris.go index cdf00c25..cf1837dd 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_solaris.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/ipv6/zsys_solaris.go @@ -1,8 +1,6 @@ // Created by cgo -godefs - DO NOT EDIT // cgo -godefs defs_solaris.go -// +build solaris - package ipv6 const ( @@ -44,6 +42,13 @@ const ( sysIPV6_RECVDSTOPTS = 0x28 + sysMCAST_JOIN_GROUP = 0x29 + sysMCAST_LEAVE_GROUP = 0x2a + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_JOIN_SOURCE_GROUP = 0x2d + sysMCAST_LEAVE_SOURCE_GROUP = 0x2e + sysIPV6_PREFER_SRC_HOME = 0x1 sysIPV6_PREFER_SRC_COA = 0x2 sysIPV6_PREFER_SRC_PUBLIC = 0x4 @@ -67,16 +72,26 @@ const ( sysICMP6_FILTER = 0x1 - sysSizeofSockaddrInet6 = 0x20 - sysSizeofInet6Pktinfo = 0x14 - sysSizeofIPv6Mtuinfo = 0x24 + sizeofSockaddrStorage = 0x100 + sizeofSockaddrInet6 = 0x20 + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x24 - sysSizeofIPv6Mreq = 0x14 + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x104 + sizeofGroupSourceReq = 0x204 - sysSizeofICMPv6Filter = 0x20 + sizeofICMPv6Filter = 0x20 ) -type sysSockaddrInet6 struct { +type sockaddrStorage struct { + Family uint16 + X_ss_pad1 [6]int8 + X_ss_align float64 + X_ss_pad2 [240]int8 +} + +type sockaddrInet6 struct { Family uint16 Port uint16 Flowinfo uint32 @@ -85,21 +100,32 @@ type sysSockaddrInet6 struct { X__sin6_src_id uint32 } -type sysInet6Pktinfo struct { +type inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex uint32 } -type sysIPv6Mtuinfo struct { - Addr sysSockaddrInet6 +type ipv6Mtuinfo struct { + Addr sockaddrInet6 Mtu uint32 } -type sysIPv6Mreq struct { +type ipv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Interface uint32 } -type sysICMPv6Filter struct { +type groupReq struct { + Interface uint32 + Pad_cgo_0 [256]byte +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [256]byte + Pad_cgo_1 [256]byte +} + +type icmpv6Filter struct { X__icmp6_filt [8]uint32 } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/lex/httplex/httplex.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/lex/httplex/httplex.go index bd0ec24f..20f2b894 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/lex/httplex/httplex.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/lex/httplex/httplex.go @@ -10,8 +10,11 @@ package httplex import ( + "net" "strings" "unicode/utf8" + + "golang.org/x/net/idna" ) var isTokenTable = [127]bool{ @@ -310,3 +313,39 @@ func ValidHeaderFieldValue(v string) bool { } return true } + +func isASCII(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} + +// PunycodeHostPort returns the IDNA Punycode version +// of the provided "host" or "host:port" string. +func PunycodeHostPort(v string) (string, error) { + if isASCII(v) { + return v, nil + } + + host, port, err := net.SplitHostPort(v) + if err != nil { + // The input 'v' argument was just a "host" argument, + // without a port. This error should not be returned + // to the caller. + host = v + port = "" + } + host, err = idna.ToASCII(host) + if err != nil { + // Non-UTF-8? Not representable in Punycode, in any + // case. + return "", err + } + if port == "" { + return host, nil + } + return net.JoinHostPort(host, port), nil +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/proxy/per_host.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/proxy/per_host.go index f540b196..242d5623 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/proxy/per_host.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/proxy/per_host.go @@ -9,7 +9,7 @@ import ( "strings" ) -// A PerHost directs connections to a default Dialer unless the hostname +// A PerHost directs connections to a default Dialer unless the host name // requested matches one of a number of exceptions. type PerHost struct { def, bypass Dialer @@ -76,7 +76,7 @@ func (p *PerHost) dialerForRequest(host string) Dialer { // AddFromString parses a string that contains comma-separated values // specifying hosts that should use the bypass proxy. Each value is either an -// IP address, a CIDR range, a zone (*.example.com) or a hostname +// IP address, a CIDR range, a zone (*.example.com) or a host name // (localhost). A best effort is made to parse the string and errors are // ignored. func (p *PerHost) AddFromString(s string) { @@ -131,7 +131,7 @@ func (p *PerHost) AddZone(zone string) { p.bypassZones = append(p.bypassZones, zone) } -// AddHost specifies a hostname that will use the bypass proxy. +// AddHost specifies a host name that will use the bypass proxy. func (p *PerHost) AddHost(host string) { if strings.HasSuffix(host, ".") { host = host[:len(host)-1] diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/proxy/proxy.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/proxy/proxy.go index 78a8b7be..553ead7c 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/proxy/proxy.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/proxy/proxy.go @@ -11,6 +11,7 @@ import ( "net" "net/url" "os" + "sync" ) // A Dialer is a means to establish a connection. @@ -27,7 +28,7 @@ type Auth struct { // FromEnvironment returns the dialer specified by the proxy related variables in // the environment. func FromEnvironment() Dialer { - allProxy := os.Getenv("all_proxy") + allProxy := allProxyEnv.Get() if len(allProxy) == 0 { return Direct } @@ -41,7 +42,7 @@ func FromEnvironment() Dialer { return Direct } - noProxy := os.Getenv("no_proxy") + noProxy := noProxyEnv.Get() if len(noProxy) == 0 { return proxy } @@ -92,3 +93,42 @@ func FromURL(u *url.URL, forward Dialer) (Dialer, error) { return nil, errors.New("proxy: unknown scheme: " + u.Scheme) } + +var ( + allProxyEnv = &envOnce{ + names: []string{"ALL_PROXY", "all_proxy"}, + } + noProxyEnv = &envOnce{ + names: []string{"NO_PROXY", "no_proxy"}, + } +) + +// envOnce looks up an environment variable (optionally by multiple +// names) once. It mitigates expensive lookups on some platforms +// (e.g. Windows). +// (Borrowed from net/http/transport.go) +type envOnce struct { + names []string + once sync.Once + val string +} + +func (e *envOnce) Get() string { + e.once.Do(e.init) + return e.val +} + +func (e *envOnce) init() { + for _, n := range e.names { + e.val = os.Getenv(n) + if e.val != "" { + return + } + } +} + +// reset is used by tests +func (e *envOnce) reset() { + e.once = sync.Once{} + e.val = "" +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/proxy/socks5.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/proxy/socks5.go index 9b962823..2efec6e8 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/proxy/socks5.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/proxy/socks5.go @@ -72,24 +72,28 @@ func (s *socks5) Dial(network, addr string) (net.Conn, error) { if err != nil { return nil, err } - closeConn := &conn - defer func() { - if closeConn != nil { - (*closeConn).Close() - } - }() + if err := s.connect(conn, addr); err != nil { + conn.Close() + return nil, err + } + return conn, nil +} - host, portStr, err := net.SplitHostPort(addr) +// connect takes an existing connection to a socks5 proxy server, +// and commands the server to extend that connection to target, +// which must be a canonical address with a host and port. +func (s *socks5) connect(conn net.Conn, target string) error { + host, portStr, err := net.SplitHostPort(target) if err != nil { - return nil, err + return err } port, err := strconv.Atoi(portStr) if err != nil { - return nil, errors.New("proxy: failed to parse port number: " + portStr) + return errors.New("proxy: failed to parse port number: " + portStr) } if port < 1 || port > 0xffff { - return nil, errors.New("proxy: port number out of range: " + portStr) + return errors.New("proxy: port number out of range: " + portStr) } // the size here is just an estimate @@ -103,17 +107,17 @@ func (s *socks5) Dial(network, addr string) (net.Conn, error) { } if _, err := conn.Write(buf); err != nil { - return nil, errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) } if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return nil, errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) } if buf[0] != 5 { - return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) } if buf[1] == 0xff { - return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") } if buf[1] == socks5AuthPassword { @@ -125,15 +129,15 @@ func (s *socks5) Dial(network, addr string) (net.Conn, error) { buf = append(buf, s.password...) if _, err := conn.Write(buf); err != nil { - return nil, errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) } if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return nil, errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) } if buf[1] != 0 { - return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") } } @@ -150,7 +154,7 @@ func (s *socks5) Dial(network, addr string) (net.Conn, error) { buf = append(buf, ip...) } else { if len(host) > 255 { - return nil, errors.New("proxy: destination hostname too long: " + host) + return errors.New("proxy: destination host name too long: " + host) } buf = append(buf, socks5Domain) buf = append(buf, byte(len(host))) @@ -159,11 +163,11 @@ func (s *socks5) Dial(network, addr string) (net.Conn, error) { buf = append(buf, byte(port>>8), byte(port)) if _, err := conn.Write(buf); err != nil { - return nil, errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) } if _, err := io.ReadFull(conn, buf[:4]); err != nil { - return nil, errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) } failure := "unknown error" @@ -172,7 +176,7 @@ func (s *socks5) Dial(network, addr string) (net.Conn, error) { } if len(failure) > 0 { - return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) } bytesToDiscard := 0 @@ -184,11 +188,11 @@ func (s *socks5) Dial(network, addr string) (net.Conn, error) { case socks5Domain: _, err := io.ReadFull(conn, buf[:1]) if err != nil { - return nil, errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) } bytesToDiscard = int(buf[0]) default: - return nil, errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) + return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) } if cap(buf) < bytesToDiscard { @@ -197,14 +201,13 @@ func (s *socks5) Dial(network, addr string) (net.Conn, error) { buf = buf[:bytesToDiscard] } if _, err := io.ReadFull(conn, buf); err != nil { - return nil, errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) } // Also need to discard the port number if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return nil, errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) } - closeConn = nil - return conn, nil + return nil } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/publicsuffix/gen.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/publicsuffix/gen.go deleted file mode 100644 index a2d49952..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/publicsuffix/gen.go +++ /dev/null @@ -1,713 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This program generates table.go and table_test.go based on the authoritative -// public suffix list at https://publicsuffix.org/list/effective_tld_names.dat -// -// The version is derived from -// https://api.github.com/repos/publicsuffix/list/commits?path=public_suffix_list.dat -// and a human-readable form is at -// https://github.com/publicsuffix/list/commits/master/public_suffix_list.dat -// -// To fetch a particular git revision, such as 5c70ccd250, pass -// -url "https://raw.githubusercontent.com/publicsuffix/list/5c70ccd250/public_suffix_list.dat" -// and -version "an explicit version string". - -import ( - "bufio" - "bytes" - "flag" - "fmt" - "go/format" - "io" - "io/ioutil" - "net/http" - "os" - "regexp" - "sort" - "strings" - - "golang.org/x/net/idna" -) - -const ( - // These sum of these four values must be no greater than 32. - nodesBitsChildren = 9 - nodesBitsICANN = 1 - nodesBitsTextOffset = 15 - nodesBitsTextLength = 6 - - // These sum of these four values must be no greater than 32. - childrenBitsWildcard = 1 - childrenBitsNodeType = 2 - childrenBitsHi = 14 - childrenBitsLo = 14 -) - -var ( - maxChildren int - maxTextOffset int - maxTextLength int - maxHi uint32 - maxLo uint32 -) - -func max(a, b int) int { - if a < b { - return b - } - return a -} - -func u32max(a, b uint32) uint32 { - if a < b { - return b - } - return a -} - -const ( - nodeTypeNormal = 0 - nodeTypeException = 1 - nodeTypeParentOnly = 2 - numNodeType = 3 -) - -func nodeTypeStr(n int) string { - switch n { - case nodeTypeNormal: - return "+" - case nodeTypeException: - return "!" - case nodeTypeParentOnly: - return "o" - } - panic("unreachable") -} - -const ( - defaultURL = "https://publicsuffix.org/list/effective_tld_names.dat" - gitCommitURL = "https://api.github.com/repos/publicsuffix/list/commits?path=public_suffix_list.dat" -) - -var ( - labelEncoding = map[string]uint32{} - labelsList = []string{} - labelsMap = map[string]bool{} - rules = []string{} - - // validSuffixRE is used to check that the entries in the public suffix - // list are in canonical form (after Punycode encoding). Specifically, - // capital letters are not allowed. - validSuffixRE = regexp.MustCompile(`^[a-z0-9_\!\*\-\.]+$`) - - shaRE = regexp.MustCompile(`"sha":"([^"]+)"`) - dateRE = regexp.MustCompile(`"committer":{[^{]+"date":"([^"]+)"`) - - comments = flag.Bool("comments", false, "generate table.go comments, for debugging") - subset = flag.Bool("subset", false, "generate only a subset of the full table, for debugging") - url = flag.String("url", defaultURL, "URL of the publicsuffix.org list. If empty, stdin is read instead") - v = flag.Bool("v", false, "verbose output (to stderr)") - version = flag.String("version", "", "the effective_tld_names.dat version") -) - -func main() { - if err := main1(); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} - -func main1() error { - flag.Parse() - if nodesBitsTextLength+nodesBitsTextOffset+nodesBitsICANN+nodesBitsChildren > 32 { - return fmt.Errorf("not enough bits to encode the nodes table") - } - if childrenBitsLo+childrenBitsHi+childrenBitsNodeType+childrenBitsWildcard > 32 { - return fmt.Errorf("not enough bits to encode the children table") - } - if *version == "" { - if *url != defaultURL { - return fmt.Errorf("-version was not specified, and the -url is not the default one") - } - sha, date, err := gitCommit() - if err != nil { - return err - } - *version = fmt.Sprintf("publicsuffix.org's public_suffix_list.dat, git revision %s (%s)", sha, date) - } - var r io.Reader = os.Stdin - if *url != "" { - res, err := http.Get(*url) - if err != nil { - return err - } - if res.StatusCode != http.StatusOK { - return fmt.Errorf("bad GET status for %s: %d", *url, res.Status) - } - r = res.Body - defer res.Body.Close() - } - - var root node - icann := false - br := bufio.NewReader(r) - for { - s, err := br.ReadString('\n') - if err != nil { - if err == io.EOF { - break - } - return err - } - s = strings.TrimSpace(s) - if strings.Contains(s, "BEGIN ICANN DOMAINS") { - icann = true - continue - } - if strings.Contains(s, "END ICANN DOMAINS") { - icann = false - continue - } - if s == "" || strings.HasPrefix(s, "//") { - continue - } - s, err = idna.ToASCII(s) - if err != nil { - return err - } - if !validSuffixRE.MatchString(s) { - return fmt.Errorf("bad publicsuffix.org list data: %q", s) - } - - if *subset { - switch { - case s == "ac.jp" || strings.HasSuffix(s, ".ac.jp"): - case s == "ak.us" || strings.HasSuffix(s, ".ak.us"): - case s == "ao" || strings.HasSuffix(s, ".ao"): - case s == "ar" || strings.HasSuffix(s, ".ar"): - case s == "arpa" || strings.HasSuffix(s, ".arpa"): - case s == "cy" || strings.HasSuffix(s, ".cy"): - case s == "dyndns.org" || strings.HasSuffix(s, ".dyndns.org"): - case s == "jp": - case s == "kobe.jp" || strings.HasSuffix(s, ".kobe.jp"): - case s == "kyoto.jp" || strings.HasSuffix(s, ".kyoto.jp"): - case s == "om" || strings.HasSuffix(s, ".om"): - case s == "uk" || strings.HasSuffix(s, ".uk"): - case s == "uk.com" || strings.HasSuffix(s, ".uk.com"): - case s == "tw" || strings.HasSuffix(s, ".tw"): - case s == "zw" || strings.HasSuffix(s, ".zw"): - case s == "xn--p1ai" || strings.HasSuffix(s, ".xn--p1ai"): - // xn--p1ai is Russian-Cyrillic "рф". - default: - continue - } - } - - rules = append(rules, s) - - nt, wildcard := nodeTypeNormal, false - switch { - case strings.HasPrefix(s, "*."): - s, nt = s[2:], nodeTypeParentOnly - wildcard = true - case strings.HasPrefix(s, "!"): - s, nt = s[1:], nodeTypeException - } - labels := strings.Split(s, ".") - for n, i := &root, len(labels)-1; i >= 0; i-- { - label := labels[i] - n = n.child(label) - if i == 0 { - if nt != nodeTypeParentOnly && n.nodeType == nodeTypeParentOnly { - n.nodeType = nt - } - n.icann = n.icann && icann - n.wildcard = n.wildcard || wildcard - } - labelsMap[label] = true - } - } - labelsList = make([]string, 0, len(labelsMap)) - for label := range labelsMap { - labelsList = append(labelsList, label) - } - sort.Strings(labelsList) - - if err := generate(printReal, &root, "table.go"); err != nil { - return err - } - if err := generate(printTest, &root, "table_test.go"); err != nil { - return err - } - return nil -} - -func generate(p func(io.Writer, *node) error, root *node, filename string) error { - buf := new(bytes.Buffer) - if err := p(buf, root); err != nil { - return err - } - b, err := format.Source(buf.Bytes()) - if err != nil { - return err - } - return ioutil.WriteFile(filename, b, 0644) -} - -func gitCommit() (sha, date string, retErr error) { - res, err := http.Get(gitCommitURL) - if err != nil { - return "", "", err - } - if res.StatusCode != http.StatusOK { - return "", "", fmt.Errorf("bad GET status for %s: %d", gitCommitURL, res.Status) - } - defer res.Body.Close() - b, err := ioutil.ReadAll(res.Body) - if err != nil { - return "", "", err - } - if m := shaRE.FindSubmatch(b); m != nil { - sha = string(m[1]) - } - if m := dateRE.FindSubmatch(b); m != nil { - date = string(m[1]) - } - if sha == "" || date == "" { - retErr = fmt.Errorf("could not find commit SHA and date in %s", gitCommitURL) - } - return sha, date, retErr -} - -func printTest(w io.Writer, n *node) error { - fmt.Fprintf(w, "// generated by go run gen.go; DO NOT EDIT\n\n") - fmt.Fprintf(w, "package publicsuffix\n\nvar rules = [...]string{\n") - for _, rule := range rules { - fmt.Fprintf(w, "%q,\n", rule) - } - fmt.Fprintf(w, "}\n\nvar nodeLabels = [...]string{\n") - if err := n.walk(w, printNodeLabel); err != nil { - return err - } - fmt.Fprintf(w, "}\n") - return nil -} - -func printReal(w io.Writer, n *node) error { - const header = `// generated by go run gen.go; DO NOT EDIT - -package publicsuffix - -const version = %q - -const ( - nodesBitsChildren = %d - nodesBitsICANN = %d - nodesBitsTextOffset = %d - nodesBitsTextLength = %d - - childrenBitsWildcard = %d - childrenBitsNodeType = %d - childrenBitsHi = %d - childrenBitsLo = %d -) - -const ( - nodeTypeNormal = %d - nodeTypeException = %d - nodeTypeParentOnly = %d -) - -// numTLD is the number of top level domains. -const numTLD = %d - -` - fmt.Fprintf(w, header, *version, - nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength, - childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo, - nodeTypeNormal, nodeTypeException, nodeTypeParentOnly, len(n.children)) - - text := combineText(labelsList) - if text == "" { - return fmt.Errorf("internal error: makeText returned no text") - } - for _, label := range labelsList { - offset, length := strings.Index(text, label), len(label) - if offset < 0 { - return fmt.Errorf("internal error: could not find %q in text %q", label, text) - } - maxTextOffset, maxTextLength = max(maxTextOffset, offset), max(maxTextLength, length) - if offset >= 1<= 1< 64 { - n, plus = 64, " +" - } - fmt.Fprintf(w, "%q%s\n", text[:n], plus) - text = text[n:] - } - - if err := n.walk(w, assignIndexes); err != nil { - return err - } - - fmt.Fprintf(w, ` - -// nodes is the list of nodes. Each node is represented as a uint32, which -// encodes the node's children, wildcard bit and node type (as an index into -// the children array), ICANN bit and text. -// -// If the table was generated with the -comments flag, there is a //-comment -// after each node's data. In it is the nodes-array indexes of the children, -// formatted as (n0x1234-n0x1256), with * denoting the wildcard bit. The -// nodeType is printed as + for normal, ! for exception, and o for parent-only -// nodes that have children but don't match a domain label in their own right. -// An I denotes an ICANN domain. -// -// The layout within the uint32, from MSB to LSB, is: -// [%2d bits] unused -// [%2d bits] children index -// [%2d bits] ICANN bit -// [%2d bits] text index -// [%2d bits] text length -var nodes = [...]uint32{ -`, - 32-nodesBitsChildren-nodesBitsICANN-nodesBitsTextOffset-nodesBitsTextLength, - nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength) - if err := n.walk(w, printNode); err != nil { - return err - } - fmt.Fprintf(w, `} - -// children is the list of nodes' children, the parent's wildcard bit and the -// parent's node type. If a node has no children then their children index -// will be in the range [0, 6), depending on the wildcard bit and node type. -// -// The layout within the uint32, from MSB to LSB, is: -// [%2d bits] unused -// [%2d bits] wildcard bit -// [%2d bits] node type -// [%2d bits] high nodes index (exclusive) of children -// [%2d bits] low nodes index (inclusive) of children -var children=[...]uint32{ -`, - 32-childrenBitsWildcard-childrenBitsNodeType-childrenBitsHi-childrenBitsLo, - childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo) - for i, c := range childrenEncoding { - s := "---------------" - lo := c & (1<> childrenBitsLo) & (1<>(childrenBitsLo+childrenBitsHi)) & (1<>(childrenBitsLo+childrenBitsHi+childrenBitsNodeType) != 0 - if *comments { - fmt.Fprintf(w, "0x%08x, // c0x%04x (%s)%s %s\n", - c, i, s, wildcardStr(wildcard), nodeTypeStr(nodeType)) - } else { - fmt.Fprintf(w, "0x%x,\n", c) - } - } - fmt.Fprintf(w, "}\n\n") - fmt.Fprintf(w, "// max children %d (capacity %d)\n", maxChildren, 1<= 1<= 1<= 1< 0 && ss[0] == "" { - ss = ss[1:] - } - return ss -} - -// crush combines a list of strings, taking advantage of overlaps. It returns a -// single string that contains each input string as a substring. -func crush(ss []string) string { - maxLabelLen := 0 - for _, s := range ss { - if maxLabelLen < len(s) { - maxLabelLen = len(s) - } - } - - for prefixLen := maxLabelLen; prefixLen > 0; prefixLen-- { - prefixes := makePrefixMap(ss, prefixLen) - for i, s := range ss { - if len(s) <= prefixLen { - continue - } - mergeLabel(ss, i, prefixLen, prefixes) - } - } - - return strings.Join(ss, "") -} - -// mergeLabel merges the label at ss[i] with the first available matching label -// in prefixMap, where the last "prefixLen" characters in ss[i] match the first -// "prefixLen" characters in the matching label. -// It will merge ss[i] repeatedly until no more matches are available. -// All matching labels merged into ss[i] are replaced by "". -func mergeLabel(ss []string, i, prefixLen int, prefixes prefixMap) { - s := ss[i] - suffix := s[len(s)-prefixLen:] - for _, j := range prefixes[suffix] { - // Empty strings mean "already used." Also avoid merging with self. - if ss[j] == "" || i == j { - continue - } - if *v { - fmt.Fprintf(os.Stderr, "%d-length overlap at (%4d,%4d): %q and %q share %q\n", - prefixLen, i, j, ss[i], ss[j], suffix) - } - ss[i] += ss[j][prefixLen:] - ss[j] = "" - // ss[i] has a new suffix, so merge again if possible. - // Note: we only have to merge again at the same prefix length. Shorter - // prefix lengths will be handled in the next iteration of crush's for loop. - // Can there be matches for longer prefix lengths, introduced by the merge? - // I believe that any such matches would by necessity have been eliminated - // during substring removal or merged at a higher prefix length. For - // instance, in crush("abc", "cde", "bcdef"), combining "abc" and "cde" - // would yield "abcde", which could be merged with "bcdef." However, in - // practice "cde" would already have been elimintated by removeSubstrings. - mergeLabel(ss, i, prefixLen, prefixes) - return - } -} - -// prefixMap maps from a prefix to a list of strings containing that prefix. The -// list of strings is represented as indexes into a slice of strings stored -// elsewhere. -type prefixMap map[string][]int - -// makePrefixMap constructs a prefixMap from a slice of strings. -func makePrefixMap(ss []string, prefixLen int) prefixMap { - prefixes := make(prefixMap) - for i, s := range ss { - // We use < rather than <= because if a label matches on a prefix equal to - // its full length, that's actually a substring match handled by - // removeSubstrings. - if prefixLen < len(s) { - prefix := s[:prefixLen] - prefixes[prefix] = append(prefixes[prefix], i) - } - } - - return prefixes -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/publicsuffix/table.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/publicsuffix/table.go index 00fa1ef5..50f070a9 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/net/publicsuffix/table.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/net/publicsuffix/table.go @@ -2,7 +2,7 @@ package publicsuffix -const version = "publicsuffix.org's public_suffix_list.dat, git revision fb4a6bce72a86feaf6c38f0a43cd05baf97a9258 (2016-07-07T00:50:50Z)" +const version = "publicsuffix.org's public_suffix_list.dat, git revision f47d806df99585862c8426c3e064a50eb5a278f5 (2017-06-14T11:49:01Z)" const ( nodesBitsChildren = 9 @@ -23,444 +23,453 @@ const ( ) // numTLD is the number of top level domains. -const numTLD = 1552 +const numTLD = 1549 // Text is the combined text of all labels. -const text = "biellaakesvuemieleccebieszczadygeyachimataipeigersundrangedalivo" + - "rnoddabievatmallorcafederationikonantanangerbifukagawalmartatesh" + - "inanomachintaijinfolldalomzansimagicasadelamonedatsunanjoetsuwan" + - "ouchikujogaszkoladbrokesamsclubindalorenskogliwicebihorologyusui" + - "sserveexchangebikedagestangeorgeorgiabilbaogakievenesamsunglobal" + - "ashovhachinohedmarkhangelskatowicebillustrationinohekinannestadr" + - "ivelandrobaknoluoktainaikawachinaganoharamcoalaheadjudaicable-mo" + - "dembetsukuintuitateyamabiomutashinainuyamanouchikuhokuryugasakit" + - "ashiobarabirdartcenterprisesakikonaircraftraeumtgeradealstahauge" + - "sundunloppacificaseihichisobetsuitairabirkenesoddtangenovaravenn" + - "agatorockartuzyuudmurtiabirthplacebjarkoyuzawabjerkreimdbalatino" + - "rdkappgafanpachigasakidsmynasperschlesisches3-sa-east-1bjugniezn" + - "ordre-landunsandvikcoromantovalle-d-aostatoilotenkawablockbuster" + - "nidupontariobloombergbauernrtatsunobloxcmsanfranciscofreakunemur" + - "orangeiseiyoichiropracticasertaishinomakikuchikuseikarugaulardal" + - "ottebluedaplierneuesangobmoattachmentsanjotattoolsztynsettlersan" + - "naninomiyakonojoshkar-olayangroupaleobmsannohelplfinancialottoko" + - "namegatakatorinvestmentsanokatsushikabeeldengeluidurbanamexhibit" + - "ionirasakis-a-candidatebmweirbnpparibaselburglobodoes-itverranza" + - "nquannefrankfurtaxihuanishiazais-a-catererbomloansantabarbarabon" + - "durhamburglogowfarmsteadvrcambridgestonewspaperbonnishigotsukiso" + - "fukushimaritimodenakanojohanamakinoharabookingloppenzaogashimada" + - "chicagoboatsantacruzsantafedextraspace-to-rentalstomakomaibarabo" + - "otsanukis-a-celticsfanishiharaboschaefflerdalouvreitgoryuzhno-sa" + - "khalinskatsuyamaseratis-a-chefarsundvrdnsfor-better-thandabostik" + - "aufenishiizunazukis-a-conservativefsncfdwgmbhartiffanybostonakij" + - "insekikogentingminakamichiharabotanicalgardenishikatakazakis-a-c" + - "padoval-daostavalleybotanicgardenishikatsuragithubusercontentjel" + - "dsundyndns-ipalermomasvuotnakatombetsupplybotanybouncemerckautok" + - "einobounty-fullensakerrypropertiesaotomeloyalistockholmestrandyn" + - "dns-mailowiczest-le-patrondheimperiaboutiquebecngmodellingmxfini" + - "tybozentsujiiebradescorporationishikawazukanazawabrandywinevalle" + - "ybrasiliabresciabrindisibenikebristolgapartmentsapodhalewismille" + - "rbritishcolumbialowiezaganishimerabroadcastleclercasinore-og-uvd" + - "alucaniabroadwaybroke-itjmaxxxjaworznobrokerbronnoysundyndns-off" + - "ice-on-the-webcampobassociatesapporobrothermesaverdeatnuorogersv" + - "palmspringsakerbrumunddaluccapitalonewhollandyndns-picsaratovall" + - "eaostavernishinomiyashironobrunelblagdenesnaaseralingenkainanaej" + - "rietisalatinabenoboribetsucksardegnamsosnowiecateringebudejjuedi" + - "schesapeakebayernurembergrimstadyndns-remotegildeskalmykiabrusse" + - "lsardiniabruxellesarlucernebryanskjervoyagebryneustarhubalestran" + - "dabergamoarekemrbuskerudinewhampshirechtrainingripebuzenishinoom" + - "otegotvalled-aostavropolitiendabuzzgorzeleccolognewmexicoldwarmi" + - "amiastaplesarpsborgriwataraidyndns-servercellikes-piedmontblanco" + - "meeresarufutsunomiyawakasaikaitakoenigrondarbwhalingrongabzhitom" + - "irkutskleppamperedchefashionishinoshimatta-varjjatjometlifeinsur" + - "ancecomputerhistoryofscience-fictioncomsecuritytacticsavonamssko" + - "ganeis-a-designerimarumorimachidacondoshichinohealthcareersaxoco" + - "nferenceconstructionconsuladoharuhrconsultanthropologyconsulting" + - "volluzerncontactoyosatoyokawacontemporaryarteducationalchikugojo" + - "medio-campidano-mediocampidanomediocontractorskenconventureshino" + - "desashibetsuikimobetsuliguriacookingchannelveruminamibosogndalvi" + - "vano-frankivskfhappoumuenchencoolkuszgradcooperaunitemasekhabaro" + - "vskhakassiacopenhagencyclopedichernihivanovosibirskydivingrosset" + - "ouchijiwadeloittevadsoccertificationissandnessjoenissayokoshibah" + - "ikariwanumataketomisatomobellevuelosangelesjaguarchitecturealtyc" + - "hyattorneyagawalbrzycharternopilawalesundyndns-wikinderoycorsica" + - "hcesuolocalhistorybnikahokutoeiheijis-a-doctoraycorvettenrightat" + - "homegoodsbschokoladencosenzakopanerairguardcostumedizinhistorisc" + - "hescholarshipschoolcouchpotatofrieschulezajskharkivgucciprianiig" + - "ataiwanairforcertmgretachikawakuyabukicks-assedichernivtsiciliac" + - "ouncilcouponschwarzgwangjuifminamidaitomangotembaixadacourseschw" + - "eizippodlasiellakasamatsudovre-eikercq-acranbrookuwanalyticscien" + - "cecentersciencehistorycreditcardcreditunioncremonashorokanaiecre" + - "wildlifedjejuegoshikiminokamoenairlinedre-eikercricketrzyncrimea" + - "crotonewportlligatewaycrownprovidercrscientistor-elvdalcruisescj" + - "ohnsoncryptonomichigangwoncuisinellahppiacenzamamibuilderscotlan" + - "dculturalcentertainmentoyotaris-a-financialadvisor-aurdalcuneocu" + - "pcakecxn--1ctwolominamatambovalledaostamayukis-a-geekgalaxycymru" + - "ovatoyotomiyazakis-a-greencyonabarussiacyouthdfcbankzjcbnlfieldf" + - "iguerestaurantoyotsukaidownloadfilateliafilminamiechizenfinalfin" + - "ancefineartscrappinguovdageaidnulsandoyfinlandfinnoyfirebaseappa" + - "raglidingushikamifuranoshiroomurafirenzefirestonextdirectoyouraf" + - "irmdaleirfjordfishingolffanserveftparisor-fronfitjarqhachiojiyah" + - "ikobeatservegame-serverisignfitnessettlementoystre-slidrettozawa" + - "fjalerflesbergxn--1lqs71dflickragerotikamakurazakinkobayashiksha" + - "cknetnedalflightservehalflifestyleflirumannortonsbergzlgfloginto" + - "gurafloraflorencefloridafloristanohatakaharulvikhmelnitskiyamasf" + - "jordenfloromskoguchikuzenflowerservehttparliamentozsdeflsmidthru" + - "heredstonexus-east-1flynnhubalsfjordishakotankarumaifarmerseinew" + - "yorkshirecreationaturbruksgymnaturhistorisches3-us-gov-west-1fnd" + - "foodnetworkshoppingfor-ourfor-someetranbyfor-theaterforexrothach" + - "irogatakamoriokamikitayamatotakadaforgotdnservehumourforli-cesen" + - "a-forlicesenaforlikescandyndns-at-workinggrouparmaforsaleirvikhm" + - "elnytskyivalleeaosteigenforsandasuoloftrani-andria-barletta-tran" + - "i-andriafortmissoulan-udefenseljordfortworthadanotaireserveirche" + - "rnovtsykkylvenetogakushimotoganewjerseyforuminamifuranofosneserv" + - "eminecraftraniandriabarlettatraniandriafotaruis-a-gurunzenfoxfor" + - "degreefreeboxostrowiechiryukyuragifudaigodoesntexistanbullensvan" + - "guardyndns-workisboringroundhandlingroznyfreemasonryfreiburgfrei" + - "ghtcmwilliamhillfreseniuscountryestateofdelawaredumbrellajollame" + - "ricanexpressexyzparocherkasyzrankoshigayaltaikis-a-hard-workerfr" + - "ibourgfriuli-v-giuliafriuli-ve-giuliafriuli-vegiuliafriuli-venez" + - "ia-giuliafriuli-veneziagiuliafriuli-vgiuliafriuliv-giuliafriuliv" + - "e-giuliafriulivegiuliafriulivenezia-giuliafriuliveneziagiuliafri" + - "ulivgiuliafrlfroganservemp3utilitiesquarezzoologicalvinklein-add" + - "rammenuernbergdyniabcn-north-1kappleaseating-organicbcg12000emma" + - "fanconagawakayamadridvagsoyericsson-aptibleangaviikadenaamesjevu" + - "emielno-ip6frognfrolandfrom-akrehamnfrom-alfrom-arfrom-azwinbalt" + - "imore-og-romsdalimitedunetbankasaokamisatokamachippubetsubetsuga" + - "ruconnectarumizusawaukraanghkebinagisochildrensgardenasushiobara" + - "bruzzoologyeongbuk-uralsk12from-capetownnews-stagingfrom-collect" + - "ionfrom-ctranoyfrom-dchitachinakagawassamukawataricohdavvenjarga" + - "usdalukowhoswhokksundynnsasayamafrom-dell-ogliastrakhanawatchese" + - "rvep2parservepicservequakefrom-flanderservesarcasmatartanddesign" + - "from-gafrom-higashiagatsumagoirminamiiselectransportrapaniimimat" + - "akatsukis-a-hunterfrom-iafrom-idfrom-ilfrom-incheonfrom-kservice" + - "settsurfastlyfrom-kyotobetsumidatlantichitosetogitsuldaluroyfrom" + - "-lanbibaidarfrom-mansionsevastopolefrom-mdfrom-megurorostrowwlkp" + - "mgfrom-microsoftbankhvanylvenicefrom-mnfrom-mochizukirafrom-msev" + - "enassisicilyfrom-mtnfrom-nchloefrom-ndfrom-nefrom-nhktravelchann" + - "elfrom-njcpartis-a-knightravelersinsurancefrom-nminamiizukamiton" + - "dabayashiogamagoriziafrom-nvaolbia-tempio-olbiatempioolbialystok" + - "kemerovodkagoshimaizurubtsovskjakdnepropetrovskiervaapsteiermark" + - "labudhabikinokawabarthadselfipartnersewindmillfrom-nyfrom-ohkura" + - "from-oketohmanxn--1qqw23afrom-orfrom-paderbornfrom-pratohnoshooo" + - "shikamaishimofusartsfranziskanerdpolicefrom-rivnefrom-schoenbrun" + - "nfrom-sdnipropetrovskypescaravantaafrom-tnfrom-txn--2m4a15efrom-" + - "utazuerichardlillehammerfest-mon-blogueurovisionfrom-vaksdalfrom" + - "-vtrdfrom-wafrom-wielunnerfrom-wvareserveblogspotrentino-a-adige" + - "from-wyfrosinonefrostalowa-wolawafroyahabaghdadultrentino-aadige" + - "fstcgroupartshangrilangevagrarboretumbriamallamagentositelefonic" + - "aaarborteaches-yogasawaracingroks-theatreefujiiderafujikawaguchi" + - "konefujiminohtawaramotoineppugliafujinomiyadafujiokayamaoris-a-l" + - "andscaperugiafujisatoshonairportland-4-salernogatagajobojis-a-la" + - "wyerfujisawafujishiroishidakabiratoridellogliastraderfujitsuruga" + - "shimamateramodalenfujixeroxn--30rr7yfujiyoshidafukayabeardubaidu" + - "ckdnsdojoburgfukuchiyamadafukudominichocolatelevisionissedalutsk" + - "azimierz-dolnyfukuis-a-liberalfukumitsubishigakirkenesharis-a-li" + - "bertarianfukuokazakirovogradoyfukuroishikarikaturindalfukusakiry" + - "uohaebaruminamimakis-a-linux-useranishiaritabashikaoizumizakitau" + - "rayasudafukuyamagatakahashimamakisarazurewebsiteshikagamiishibuk" + - "awafunabashiriuchinadafunagatakahatakaishimoichinosekigaharafuna" + - "hashikamiamakusatsumasendaisennangonohejis-a-llamarylandfundacio" + - "fuoiskujukuriyamarburgfuosskoczowindowsharpartyfurnitureggio-cal" + - "abriafurubiraquarellebesbyglandfurudonostiafurukawairtelecityeat" + - "shawaiijimarugame-hostingfusodegaurafussaikishiwadafutabayamaguc" + - "hinomigawafutboldlygoingnowhere-for-moregontrailroadfuttsurugimi" + - "namiminowafvgfyis-a-musicianfylkesbiblackfridayfyresdalhannovarg" + - "gatrentino-alto-adigehanyuzenhapmirhareidsbergenharstadharvestce" + - "lebrationhasamarahasaminami-alpssells-itrentino-altoadigehashban" + - "ghasudahasura-appassagenshimokitayamahasvikmshimonitayanagivestb" + - "ytomaritimekeepinghatogayahoohatoyamazakitahatakanabeautydalhats" + - "ukaichikaiseis-a-painteractivegarsheis-a-patsfanhattfjelldalhaya" + - "shimamotobuildinghazuminobusellsyourhomeipassenger-associationhb" + - "oehringerikehelsinkitahiroshimarriottrentino-s-tirollagrigentomo" + - "logyhembygdsforbundhemneshimonosekikawahemsedalhepforgeherokussl" + - "dheroyhgtvaroyhigashichichibungotakadatinghigashihiroshimanehiga" + - "shiizumozakitakamiizumisanofidelityumenhigashikagawahigashikagur" + - "asoedahigashikawakitaaikitakatakanezawahigashikurumeiwamarshalls" + - "tatebankokonoehigashimatsushimarinehigashimatsuyamakitaakitadait" + - "oigawahigashimurayamalatvuopmidoris-a-personaltrainerhigashinaru" + - "sembokukitakyushuaiahigashinehigashiomihachimanchesterhigashiosa" + - "kasayamamotorcycleshimosuwalkis-a-photographerokuappaviancarboni" + - "a-iglesias-carboniaiglesiascarboniahigashishirakawamatakaokamiko" + - "aniikappulawyhigashisumiyoshikawaminamiaikitamidsundhigashitsuno" + - "tteroyhigashiurausukitamotosumitakaginankokubunjis-a-playerhigas" + - "hiyamatokoriyamanakakogawahigashiyodogawahigashiyoshinogaris-a-r" + - "epublicancerresearchaeologicaliforniahiraizumisatohobby-sitehira" + - "katashinagawahiranairtraffichonanbugattipschmidtre-gauldaluxuryh" + - "irarahiratsukagawahirayaitakarazukamiminershimotsukehistorichous" + - "eshimotsumahitachiomiyaginowaniihamatamakawajimarcheapfizerhitac" + - "hiotagooglecodespotrentino-stirolhitoyoshimifunehitradinghjartda" + - "lhjelmelandholeckobierzyceholidayhomelinuxn--32vp30hagebostadhom" + - "esecuritymaceratakasagopocznosegawahomesecuritypccwinnershinichi" + - "nanhomesenseminehomeunixn--3bst00minamiogunicomcastresistancehon" + - "dahonefosshinjournalismailillesandefjordhoneywellhongorgehonjyoi" + - "takasakitanakagusukumoduminamisanrikubetsupplieshinjukumanohorni" + - "ndalhorseoulminamitanehortendofinternetrentino-sud-tirolhotelesh" + - "inkamigotoyohashimototalhotmailhoyangerhoylandetroitskolobrzeger" + - "sundhumanitieshinshinotsurgeonshalloffamemergencyberlevagangavii" + - "kanonjis-a-rockstarachowicehurdalhurumajis-a-socialistmeindianap" + - "olis-a-bloggerhyllestadhyogoris-a-soxfanhyugawarahyundaiwafunehz" + - "choseirouterjgorajlchoyodobashichikashukujitawarajlljmpgfoggiajn" + - "jelenia-gorajoyokaichibahcavuotnagaraholtaleniwaizumiotsukumiyam" + - "azonawsadodgemologicallyngenvironmentalconservationjpmorganjpnch" + - "ristmasakikugawatchandclockazojprshioyamemorialjuniperjurkristia" + - "nsundkrodsheradkrokstadelvaldaostarostwodzislawioshirakofuelkrym" + - "inamiyamashirokawanabelgorodeokumatorinokumejimassa-carrara-mass" + - "acarraramassabunkyonanaoshimageandsoundandvisionkumenanyokkaichi" + - "rurgiens-dentistes-en-francekunisakis-an-anarchistoricalsocietyk" + - "unitachiarailwaykunitomigusukumamotoyamasoykunneppupharmacyshira" + - "nukaniepcekunstsammlungkunstunddesignkuokgrouphiladelphiaareadmy" + - "blogsitekureisenkurgankurobelaudibleborkdalvdalaskanittedallasal" + - "leasingleshiraois-an-artisteinkjerusalembroiderykurogimilitaryku" + - "roisoftwarendalenugkuromatsunais-an-engineeringkurotakikawasakis" + - "-an-entertainerkurskomitamamurakushirogawakustanais-bykusuperspo" + - "rtrentinoaadigekutchanelkutnokuzbassnillfjordkuzumakis-certified" + - "ekakudamatsuekvafjordkvalsundkvamfamberkeleykvanangenkvinesdalkv" + - "innheradkviteseidskogkvitsoykwpspiegelkyowariasahikawamitourismo" + - "lanciamitoyoakemiuramiyazustkarasjokommunemiyotamanomjondalenmlb" + - "fanmonmouthaibarakisosakitagawamonstermonticellombardiamondshira" + - "okanmakiwakunigamihamadamontrealestatefarmequipmentrentinoalto-a" + - "digemonza-brianzaporizhzheguris-into-animelbournemonza-e-della-b" + - "rianzaporizhzhiamonzabrianzapposhiratakahagivingmonzaebrianzapto" + - "kuyamatsunomonzaedellabrianzaramoparachutingmordoviajessheiminan" + - "omoriyamatsusakahoginozawaonsenmoriyoshiokamitsuemormoneymoroyam" + - "atsushigemortgagemoscowitdkomonomoseushistorymosjoenmoskeneshish" + - "ikuis-into-carshintomikasaharamosshisognemosvikomorotsukamisunag" + - "awamoviemovistargardmtpchromedicaltanissettaitogliattiresaskatch" + - "ewanggouvicenzamtranakatsugawamuenstermugithubcloudusercontentre" + - "ntinoaltoadigemuikamogawamukochikushinonsenergymulhouservebeermu" + - "ltichoicemunakatanemuncieszynmuosattemuphilatelymurmanskomvuxn--" + - "3ds443gmurotorcraftrentinos-tirolmusashimurayamatsuuramusashinoh" + - "aramuseetrentinostirolmuseumverenigingmutsuzawamutuellevangermyd" + - "robofagemydshisuifuettertdasnetzmyeffectrentinosud-tirolmyfritzm" + - "yftphilipsymykolaivbarcelonagasakijobserverdalimoliserniaurskog-" + - "holandroverhalla-speziaeroportalabamagasakishimabarackmaze12myme" + - "diapchryslermyokohamamatsudamypepsonyoursidedyn-o-saurecipesaro-" + - "urbino-pesarourbinopesaromalvikongsbergmypetshitaramamyphotoshib" + - "ahccavuotnagareyamakeupowiathletajimabariakepnord-odalpharmacien" + - "snasaarlandmypsxn--3e0b707emysecuritycamerakermyshopblockshizuku" + - "ishimogosenmytis-a-bookkeepermincommbankommunalforbundmyvnchungb" + - "ukazunopictureshizuokannamiharupiemontepilotshoujis-into-cartoon" + - "shinyoshitomiokaneyamaxunusualpersonpimientakinouepinkongsvinger" + - "pioneerpippupiszpittsburghofauskedsmokorsetagayasells-for-ufcfan" + - "piwatepizzapkoninjamisonplanetariuminnesotaketakayamatsumaebashi" + - "modateplantationplantshowaplatformintelligenceplaystationplazapl" + - "chungnamdalseidfjordynv6plombardyndns-blogdnsiskinkyknethnologyp" + - "lumbingovtrentinosued-tirolplusterpmnpodzonepohlpointtomskonskow" + - "olancashireggioemiliaromagnakasatsunais-a-techietis-a-studentalp" + - "oivronpokerpokrovskonsulatrobeepilepsydneypolkowicepoltavalle-ao" + - "stathellexusdecorativeartshowtimeteorapphotographysiopomorzeszow" + - "ithgoogleapisa-hockeynutrentinosuedtirolpordenonepornporsangerpo" + - "rsanguideltajimicrolightingporsgrunnanpoznanpraxis-a-bruinsfanpr" + - "dpreservationpresidioprgmrprimelhusgardenprincipeprivatizehealth" + - "insuranceprochowiceproductionshriramlidlugolekagaminogiessenebak" + - "keshibechambagriculturennebudapest-a-la-masionthewifiat-band-cam" + - "paniaprofbsbxn--1lqs03nprogressivegaskimitsubatamicadaquesienapl" + - "esigdalprojectrentoyonakagyokutoyakokamishihoronobeokaminoyamats" + - "uris-into-gamessinashikitchenpromombetsupportrevisohughesilkonyv" + - "elolpropertyprotectionprudentialpruszkowithyoutubeneventodayprze" + - "worskogptzpvtroandinosaurlandesimbirskooris-a-therapistoiapwchur" + - "chaseljeepostfoldnavyatkakamigaharapzqldqponqslgbtrogstadquicksy" + - "tesimple-urlqvchuvashiaspreadbettingspydebergsrlsrtromsojavald-a" + - "ostarnbergsrvdonskoseis-an-accountantshinshirostoragestordalstor" + - "enburgstorfjordstpetersburgstreamsterdamnserverbaniastudiostudyn" + - "dns-homeftpaccesslingstuff-4-salestufftoread-booksneslupskopervi" + - "komatsushimashikestuttgartrusteesurnadalsurreysusakis-not-certif" + - "iedogawarabikomaezakirunorthwesternmutualsusonosuzakanrasuzukanu" + - "mazurysuzukis-saveducatorahimeshimakanegasakindleikangersvalbard" + - "udinkakegawasveiosvelvikosherbrookegawasvizzeraswedenswidnicargo" + - "daddyndns-at-homednshomebuiltrvenneslaskerrylogisticsmolenskoryo" + - "lasiteswiebodzindianmarketingswiftcoveronaritakurashikis-slickom" + - "aganeswinoujscienceandhistoryswisshikis-uberleetrentino-sued-tir" + - "olvestnesokndalvestre-slidreamhostersolarssonvestre-totennishiaw" + - "akuravestvagoyvevelstadvibo-valentiavibovalentiavideovillaskoyab" + - "earalvahkihokumakogengerdalipayufuchukotkafjordvinnicarriervinny" + - "tsiavipsinaappiagetmyiphoenixn--3oq18vl8pn36avirginiavirtualvirt" + - "ueeldomeindustriesteambulancevirtuelvisakatakkoelnvistaprinterna" + - "tionalfirearmsologneviterboltrysiljan-mayenvivoldavladikavkazanv" + - "ladimirvladivostokaizukarasuyamazoevlogoipictetrentinosudtirolvo" + - "lkenkunderseaportulansnoasaitamatsukuris-leetrentino-sudtirolvol" + - "kswagentsolundbeckosaigawavologdanskoshunantokigawavolvolgogradv" + - "olyngdalvoronezhytomyrvossevangenvotevotingvotoyonezawavrnworse-" + - "thangglidingwowiwatsukiyonowtversaillesokanoyakagewritesthisblog" + - "sytewroclawloclawekostromahachijorpelandwtcirclegnicagliaridagaw" + - "alterwtfbx-oslodingenwuozuwwworldwzmiuwajimaxn--4gq48lf9jeonname" + - "rikawauexn--4it168dxn--4it797kotohiradomainsurehabmerxn--4pvxsol" + - "utionsirdalxn--54b7fta0cciticatholicheltenham-radio-openair-traf" + - "fic-controlleyxn--55qw42gxn--55qx5dxn--5js045dxn--5rtp49civilavi" + - "ationisshingugexn--5rtq34kotouraxn--5su34j936bgsgxn--5tzm5gxn--6" + - "btw5axn--6frz82gxn--6orx2rxn--6qq986b3xlxn--7t0a264civilisationi" + - "yodogawaxn--80adxhksomaxn--80ao21axn--80aqecdr1axn--80asehdbarcl" + - "aycardstvedestrandiskstationatuurwetenschappenaumburgladelmenhor" + - "stalbans3-us-west-1xn--80aswgxn--80audnedalnxn--8ltr62kouhokutam" + - "akizunokunimilanoxn--8pvr4uxn--8y0a063axn--90a3academyactivedire" + - "ctoryazannakadomari-elasticbeanstalkounosunndalxn--90aishobaraom" + - "origuchiharagusabaerobaticketsaritsynologyeongnamegawakeisenbahn" + - "xn--90azhair-surveillancexn--9dbhblg6dietcimmobilienxn--9dbq2axn" + - "--9et52uxn--9krt00axn--andy-iraxn--aroport-byanagawaxn--asky-ira" + - "xn--aurskog-hland-jnbarclays3-us-west-2xn--avery-yuasakegawaxn--" + - "b-5gaxn--b4w605ferdxn--bck1b9a5dre4civilizationrwiiheyaizuwakama" + - "tsubushikusakadogawaxn--bdddj-mrabdxn--bearalvhki-y4axn--berlevg" + - "-jxaxn--bhcavuotna-s4axn--bhccavuotna-k7axn--bidr-5nachikatsuura" + - "xn--bievt-0qa2xn--bjarky-fyanaizuxn--bjddar-ptaobaokinawashirosa" + - "tobishimaintenancexn--blt-elaborxn--bmlo-graingerxn--bod-2naroyx" + - "n--brnny-wuaccident-investigationjukudoyamagadancebetsukubabia-g" + - "oracleaningatlantabusebastopologyeonggiehtavuoatnadexeterimo-i-r" + - "anagahamaroygardendoftheinternetflixilovecollegefantasyleaguerns" + - "eyxn--brnnysund-m8accident-preventionlineat-urlxn--brum-voagatun" + - "esnzxn--btsfjord-9zaxn--c1avgxn--c2br7gxn--c3s14misasaguris-gone" + - "xn--cck2b3barefootballangenoamishirasatochigiftsakuragawaustevol" + - "lavangenativeamericanantiques3-eu-central-1xn--cg4bkis-very-bada" + - "ddjamalborkangerxn--ciqpnxn--clchc0ea0b2g2a9gcdn77-sslattumisawa" + - "xn--comunicaes-v6a2oxn--correios-e-telecomunicaes-ghc29axn--czr6" + - "94bargainstitutelekommunikationaustdalindasiaustinnaturalhistory" + - "museumcentereportarnobrzegyptianaturalsciencesnaturelles3-eu-wes" + - "t-1xn--czrs0tunkoshimizumakiyosumydissentrentinoa-adigexn--czru2" + - "dxn--czrw28barreauctionaval-d-aosta-valleyonagoyaustraliaisondri" + - "odejaneirochestereviewskrakowebhoppdalaziobihirosakikamijimattel" + - "edatabaseballooningjesdalillyokosukareliancebinorilskariyakumold" + - "evennodessagaeroclubmedecincinnationwidealerhcloudcontrolledds3-" + - "external-1xn--d1acj3barrel-of-knowledgeologyonaguniversityoriika" + - "shibatakashimarylhurstjordalshalsenavigationavuotnakayamatsuzaki" + - "bigawaustrheimatunduhrennesoyokotebizenakamuratakahamaniwakurate" + - "xasdaburyatiaarpagefrontappagespeedmobilizerobiraetnagaivuotnaga" + - "okakyotambabydgoszczecinemailavagiske164xn--d1alfaromeoxn--d1atu" + - "rystykarasjohkamiokaminokawanishiaizubangexn--d5qv7z876civilwarm" + - "anagementkmaxxn--11b4c3dyroyrvikinguitarsassaris-a-democratmpana" + - "sonichelyabinskodjeffersonishiokoppegardyndns-weberlincolnishito" + - "sashimizunaminamiashigaraxn--davvenjrga-y4axn--djrs72d6uyxn--djt" + - "y4kouyamashikis-an-actorxn--dnna-grajewolterskluwerxn--drbak-wua" + - "xn--dyry-iraxn--e1a4claimsatxn--1ck2e1balsanagochihayaakasakawah" + - "araumalopolskanlandiscoveryokamikawanehonbetsurutaharaugustowada" + - "egubs3-ap-southeast-2xn--eckvdtc9dxn--efvn9somnarashinoxn--efvy8" + - "8hakatanotogawaxn--ehqz56nxn--elqq16hakodatexn--estv75gxn--eveni" + - "-0qa01gaxn--f6qx53axn--fct429kouzushimashikokuchuoxn--fhbeiarnxn" + - "--finny-yuaxn--fiq228c5hsooxn--fiq64barrell-of-knowledgeometre-e" + - "xperts-comptablesakuraibmditchyouripalaceu-1xn--fiqs8sopotromsak" + - "akinokiaxn--fiqz9sor-odalxn--fjord-lraxn--fjq720axn--fl-ziaxn--f" + - "lor-jraxn--flw351exn--fpcrj9c3dxn--frde-grandrapidsor-varangerxn" + - "--frna-woaraisaijosoyrovigorlicexn--frya-hraxn--fzc2c9e2clickddi" + - "elddanuorrikuzentakatajirissagamiharaxn--fzys8d69uvgmailxn--g2xx" + - "48clinichernigovernmentjxn--0trq7p7nnishiwakis-a-cubicle-slavell" + - "inowruzhgorodoyxn--gckr3f0fbxostrolekaluganskharkovallee-aostero" + - "yxn--gecrj9cliniquenoharaxn--ggaviika-8ya47hakonexn--gildeskl-g0" + - "axn--givuotna-8yandexn--3pxu8kosugexn--gjvik-wuaxn--gk3at1exn--g" + - "ls-elacaixaxn--gmq050is-very-evillagexn--gmqw5axn--h-2failxn--h1" + - "aeghakubankmpspacekitagatakasugais-a-nascarfanxn--h2brj9clintono" + - "shoesaudaxn--hbmer-xqaxn--hcesuolo-7ya35bashkiriauthordalandroid" + - "gcanonoichinomiyakehimejibestadigitalimanowarudagroks-thisamitsu" + - "kembuchikumagayagawakkanaibetsubamericanfamilydscloudappspotager" + - "epairbusantiquest-a-la-maisondre-landebusinessebyklefrakkestaddn" + - "skingjerdrumckinseyekaterinburgjerstadotsuruokamchatkameokameyam" + - "ashinatsukigatakamatsukawabogadocscbggfareastcoastaldefence-burg" + - "jemnes3-ap-northeast-1xn--hery-iraxn--hgebostad-g3axn--hmmrfeast" + - "a-s4acctuscanyxn--hnefoss-q1axn--hobl-iraxn--holtlen-hxaxn--hpmi" + - "r-xqaxn--hxt814exn--hyanger-q1axn--hylandet-54axn--i1b6b1a6a2exn" + - "--imr513nxn--indery-fyaotsurgutsiracusaitoshimaxn--io0a7is-very-" + - "goodhandsonxn--j1aefermobilyxn--j1amhakuis-a-nurservebbshellaspe" + - "ziaxn--j6w193gxn--jlq61u9w7basilicataniautomotivecodynaliascoli-" + - "picenoipirangamvikarlsoyokozemersongdalenviknakaniikawatanaguram" + - "usementargets-itargi234xn--jlster-byaroslavlaanderenxn--jrpeland" + - "-54axn--jvr189misconfusedxn--k7yn95exn--karmy-yuaxn--kbrq7oxn--k" + - "crx77d1x4axn--kfjord-iuaxn--klbu-woaxn--klt787dxn--kltp7dxn--klt" + - "x9axn--klty5xn--42c2d9axn--koluokta-7ya57hakusandiegoodyearthaga" + - "khanamigawaxn--kprw13dxn--kpry57dxn--kpu716ferraraxn--kput3is-ve" + - "ry-nicexn--krager-gyasakaiminatoyonoxn--kranghke-b0axn--krdshera" + - "d-m8axn--krehamn-dxaxn--krjohka-hwab49jetztrentino-suedtirolxn--" + - "ksnes-uuaxn--kvfjord-nxaxn--kvitsy-fyasugis-very-sweetpepperxn--" + - "kvnangen-k0axn--l-1fairwindsorfoldxn--l1accentureklamborghiniiza" + - "xn--laheadju-7yasuokaratexn--langevg-jxaxn--lcvr32dxn--ldingen-q" + - "1axn--leagaviika-52basketballfinanzgorautoscanadaejeonbukarmoyom" + - "itanobninskarpaczeladz-1xn--lesund-huaxn--lgbbat1ad8jevnakershus" + - "cultureggiocalabriaxn--lgrd-poacoachampionshiphoptobamagazinebra" + - "skaunjargallupinbatochiokinoshimalselvendrellindesnesakyotanabel" + - "lunordlandivtasvuodnaharimamurogawawegroweibolzanordreisa-geekas" + - "hiharaveroykenglandiscountysvardolls3-external-2xn--lhppi-xqaxn-" + - "-linds-pramericanartushuissier-justicexn--lns-qlanxessorreisahay" + - "akawakamiichikawamisatottoris-lostre-toteneis-a-teacherkassymant" + - "echnologyxn--loabt-0qaxn--lrdal-sraxn--lrenskog-54axn--lt-liaclo" + - "thingujolsterxn--lten-granexn--lury-iraxn--mely-iraxn--merker-ku" + - "axn--mgb2ddesortlandxn--mgb9awbferrarittogoldpoint2thisayamanash" + - "iibadajozorahkkeravjudygarlandxn--mgba3a3ejtuvalle-daostavangerx" + - "n--mgba3a4f16axn--mgba3a4franamizuholdingsmileksvikozagawaxn--mg" + - "ba7c0bbn0axn--mgbaakc7dvferreroticapebretonamiasakuchinotsuchiur" + - "akawarszawashingtondclkhersonxn--mgbaam7a8haldenxn--mgbab2bdxn--" + - "mgbai9a5eva00batsfjordivttasvuotnakaiwamizawavocatanzaroweddingj" + - "ovikaruizawasnesoddenmarkets3-ap-northeast-2xn--mgbai9azgqp6jewe" + - "lryxn--mgbayh7gpaduaxn--mgbb9fbpobanazawaxn--mgbbh1a71exn--mgbc0" + - "a9azcgxn--mgbca7dzdoxn--mgberp4a5d4a87gxn--mgberp4a5d4arxn--mgbi" + - "4ecexposedxn--mgbpl2fhskozakis-an-actresshintokushimaxn--mgbqly7" + - "c0a67fbcloudfrontdoorxn--mgbqly7cvafredrikstadtvsorumisakis-foun" + - "dationxn--mgbt3dhdxn--mgbtf8flatangerxn--mgbtx2bauhausposts-and-" + - "telecommunicationsupdatelemarkashiwaravoues3-fips-us-gov-west-1x" + - "n--mgbx4cd0abbottuxfamilyxn--mix082fetsundxn--mix891fgunmarnarda" + - "lxn--mjndalen-64axn--mk0axinfinitis-with-thebandoomdnsaliascolip" + - "icenord-aurdalceshiojirishirifujiedaxn--mk1bu44cloudfunctionsauh" + - "eradxn--mkru45isleofmandalxn--mlatvuopmi-s4axn--mli-tlapyatigors" + - "kpnxn--mlselv-iuaxn--moreke-juaxn--mori-qsakuhokkaidontexisteing" + - "eekppspbananarepublicartierxn--mosjen-eyatominamiawajikissmarter" + - "thanyouslivinghistoryxn--mot-tlaquilancasterxn--mre-og-romsdal-q" + - "qbbcartoonartdecoffeedbackashiwazakiyokawaraxastronomycdn77-secu" + - "rebungoonord-frontierepbodyndns-freebox-oskolegokasells-for-less" + - "3-ap-southeast-1xn--msy-ula0halsaintlouis-a-anarchistoireggio-em" + - "ilia-romagnakanotoddenxn--mtta-vrjjat-k7afamilycompanycntoyookan" + - "zakiwienxn--muost-0qaxn--mxtq1mishimatsumotofukexn--ngbc5azdxn--" + - "ngbe9e0axn--ngbrxn--45brj9circus-2xn--nit225krasnodarxn--nmesjev" + - "uemie-tcbajddarchaeologyxn--nnx388axn--nodexn--nqv7fs00emaxn--nr" + - "y-yla5gxn--ntso0iqx3axn--ntsq17gxn--nttery-byaeservecounterstrik" + - "exn--nvuotna-hwaxn--nyqy26axn--o1achattanooganorfolkebiblegalloc" + - "us-1xn--o3cw4hammarfeastafricamagichofunatorientexpressaseboknow" + - "sitalluxembourgrpanamaxn--od0algxn--od0aq3bbtatamotorsalangenayo" + - "roceanographicsalondonetskasukabedzin-the-bandaioiraseeklogesura" + - "nceoceanographiqueu-2xn--ogbpf8flekkefjordxn--oppegrd-ixaxn--ost" + - "ery-fyatsukaratsuginamikatagamihoboleslawiecolonialwilliamsburgu" + - "lenxn--osyro-wuaxn--p1acfhvalerxn--p1aiwchoshibuyachiyodavvesiid" + - "azaifuefukihaborokunohealth-carereformitakeharaxn--pbt977colorad" + - "oplateaudioxn--pgbs0dhlxn--porsgu-sta26fidonnakamagayachtscrappe" + - "r-sitexn--pssu33lxn--pssy2uxn--q9jyb4columbusheyxn--qcka1pmcdona" + - "ldsouthcarolinazawaxn--qqqt11missilelxn--qxamurskiptveterinairea" + - "ltorlandxn--rady-iraxn--rdal-poaxn--rde-ularvikrasnoyarskomforba" + - "mblebtimnetz-2xn--rdy-0nabarixn--rennesy-v1axn--rhkkervju-01afla" + - "kstadaokagakibichuoxn--rholt-mragowoodsidexn--rhqv96gxn--rht27zx" + - "n--rht3dxn--rht61exn--risa-5narusawaxn--risr-iraxn--rland-uuaxn-" + - "-rlingen-mxaxn--rmskog-byatsushiroxn--rny31hamurakamigoriginshim" + - "okawaxn--rovu88bbvacationswatch-and-clockerxn--rros-granvindafjo" + - "rdxn--rskog-uuaxn--rst-0narutokyotangotpantheonsitextileitungsen" + - "xn--rsta-francaiseharaxn--ryken-vuaxn--ryrvik-byawaraxn--s-1fait" + - "heguardianxn--s9brj9communitysnesavannahgaxn--sandnessjen-ogbizh" + - "evskredirectmeldalxn--sandy-yuaxn--seral-lraxn--ses554gxn--sgne-" + - "gratangenxn--skierv-utazaskvolloabathsbcomobaraxn--skjervy-v1axn" + - "--skjk-soaxn--sknit-yqaxn--sknland-fxaxn--slat-5narviikananporov" + - "noxn--slt-elabourxn--smla-hraxn--smna-gratis-a-bulls-fanxn--snas" + - "e-nraxn--sndre-land-0cbremangerxn--snes-poaxn--snsa-roaxn--sr-au" + - "rdal-l8axn--sr-fron-q1axn--sr-odal-q1axn--sr-varanger-ggbentleyu" + - "kuhashimojiinetatarstanflfanfshostrodawaraxn--srfold-byawatahama" + - "xn--srreisa-q1axn--srum-grazxn--stfold-9xaxn--stjrdal-s1axn--stj" + - "rdalshalsen-sqbeppubolognagasukeverbankasumigaurawa-mazowszexbox" + - "enapponazure-mobilevje-og-hornnesaltdalinkasuyakutiaxn--stre-tot" + - "en-zcbsouthwestfalenxn--t60b56axn--tckweatherchannelxn--tiq49xqy" + - "jewishartgalleryxn--tjme-hraxn--tn0agrinet-freaksowaxn--tnsberg-" + +const text = "bifukagawalterbihorologybikedagestangeorgeorgiaxasnesoddenmarkha" + + "ngelskjakdnepropetrovskiervaapsteiermarkaragandabruzzoologicalvi" + + "nklein-addrammenuernberggfarmerseine12bilbaogakidsmynasushiobara" + + "gusartsalangeninohekinannestadray-dnsiskinkyotobetsumidatlantica" + + "tholicheltenham-radio-opencraftranagatorodoybillustrationinomiya" + + "konojosoyrorosalondonetskarpaczeladzjavald-aostarnbergladegreevj" + + "e-og-hornnesaltdalimitedraydnsupdaternopilawabioceanographiquebi" + + "rdartcenterprisesakikuchikuseikarugamvikaruizawabirkenesoddtange" + + "novaraumalopolskanlandrivelandrobaknoluoktachikawakembuchikumaga" + + "yagawakkanaibetsubamericanfamilydscloudcontrolledekafjordrudunsa" + + "lvadordalibabalatinord-aurdalvdalaskanittedallasalleasinglesuran" + + "certmgretagajobojinzais-a-candidatebirthplacebjarkoybjerkreimbal" + + "sfjordgcahcesuolocus-1bjugnirasakis-a-catererblockbustermezlglas" + + "sassinationalheritagematsubarakawagoebloombergbauernishiazais-a-" + + "celticsfanishigoddabloxcmsalzburgliwicebluedancebmoattachmentsam" + + "egawabmsamnangerbmwegroweibolzanordkappgafanquannefrankfurtjmaxx" + + "xboxenapponazure-mobilebnpparibaselburglobalashovhachinohedmarka" + + "rumaifarmsteadupontariomutashinais-a-chefarsundurbanamexnethnolo" + + "gybnrweirbonnishiharabookinglobodoes-itvedestrandurhamburglogowf" + + "ashionishiizunazukis-a-conservativefsnillfjordvrcambridgestonexu" + + "s-2bootsamsclubindalimoliserniaboschaefflerdalindashorokanaiebos" + + "tikasaokaminokawanishiaizubangebostonakijinsekikogentingloppenza" + + "ogashimadachicagoboatsamsungmbhartiffanybotanicalgardenishikatak" + + "ayamatta-varjjatjometlifeinsurancebotanicgardenishikatsuragithub" + + "usercontentjxfinitybotanybouncemerckmsdnipropetrovskjervoyagebou" + + "nty-fullensakerrypropertiesandvikcoromantovalle-d-aostatic-acces" + + "sanfranciscofreakunemurorangeiseiyoichippubetsubetsugaruhrboutiq" + + "uebecngminakamichiharabozentsujiiebplacedogawarabikomaezakirunor" + + "dlandvrdnsangoppdalindesnesanjournalismailillesandefjordyndns-at" + + "-workinggroupaleobrandywinevalleybrasiliabresciabrindisibenikebr" + + "istoloslocalhistorybritishcolumbialowiezachpomorskienishikawazuk" + + "amitondabayashiogamagoriziabroadcastlegallocalhostrodawaravennag" + + "asukebroadwaybroke-itkmaxxjaworznowtvalled-aostavangerbrokerbron" + + "noysundyndns-blogdnsannanishimerabrothermesaverdeatnurembergmode" + + "nakasatsunais-a-cpadualstackspace-to-rentalstomakomaibarabrowser" + + "safetymarketsannohelplfinancialivornobrumunddalombardiamondsanok" + + "ashibatakashimaseratis-a-cubicle-slavellinotteroybrunelasticbean" + + "stalkashiharabrusselsantabarbarabruxellesantacruzsantafedjeffers" + + "onishinomiyashironobryanskleppalermomahachijorpelandyndns-freebo" + + "x-ostrowwlkpmgmxn--0trq7p7nnishinoomotegobrynewhollandyndns-home" + + "dnsanukis-a-democratmpalmspringsakerbuskerudinewmexicodyn-vpnplu" + + "sterbuzenishinoshimattelefonicarbonia-iglesias-carboniaiglesiasc" + + "arboniabuzzpamperedchefastlylbaltimore-og-romsdalwaysdatabasebal" + + "langenoamishirasatochigiessensiositelemarkarateu-1bwhalingrimsta" + + "dyndns-ipirangaulardalombardynamisches-dnsaotomemergencyachtsapo" + + "dlasiellaktyubinskiptveterinairealtorlandyndns-mailomzaporizhzhe" + + "guris-a-designerimarumorimachidabzhitomirumalselvendrellorenskog" + + "ripescaravantaacondoshichinohealth-carereformitakeharaconference" + + "constructionconsuladoesntexistanbullensvanguardyndns1consultanth" + + "ropologyconsultingvolluroycontactoyotsukaidownloadynnsaskatchewa" + + "ncontemporaryarteducationalchikugodoharuovatoyouracontractorsken" + + "conventureshinodesashibetsuikinderoycookingchannelblagdenesnaase" + + "ralingenkainanaejrietisalatinabenonichernivtsiciliacoolkuszczytn" + + "ore-og-uvdalutskasuyameldaluxembourgrpanamacooperaunitenrightath" + + "omeftpanasonichernovtsykkylvenetogakushimotoganewspapercopenhage" + + "ncyclopedichirurgiens-dentistes-en-francecorsicagliaridagawarsza" + + "washingtondclkaszubycorvettevadsoccertificationcosenzagancosidns" + + "dojoetsuwanouchikujogaszkoladbrokesassaris-a-huntercostumedio-ca" + + "mpidano-mediocampidanomediocouchpotatofriesatxn--11b4c3dynv6coun" + + "ciluxurycouponsaudacoursesauheradynvpnchiryukyuragifuchungbukhar" + + "acq-acranbrookuwanalyticsavannahgacreditcardyroyrvikingruecredit" + + "unioncremonashgabadaddjambyluzerncrewiiheyakagecricketrzyncrimea" + + "st-kazakhstanangercrotonextdirectoystre-slidrettozawacrownprovid" + + "ercrsvparaglidinguitarsaves-the-whalessandria-trani-barletta-and" + + "riatranibarlettaandriacruisesavonaplesaxocryptonomichigangwoncui" + + "sinellahppiacenzakopanerairguardiannakadomarinebraskaunjargalsac" + + "eoculturalcentertainmentozsdeltaitogliattiresbschokoladencuneocu" + + "pcakecxn--12c1fe0bradescorporationcyberlevagangaviikanonjis-a-kn" + + "ightpointtokaizukamikitayamatsuris-a-landscapercymrussiacyonabar" + + "ulvikatowicecyouthdfcbankatsushikabeeldengeluidfidonnakamurataji" + + "mibuildingulenfieldfiguerestaurantraniandriabarlettatraniandriaf" + + "ilateliafilegearthachiojiyahoofilminamidaitomangotsukisosakitaga" + + "wafinalfinancefineartschwarzgwangjuifminamiechizenfinlandfinnoyf" + + "irebaseapparisor-fronfirenzefirestonefirmdaleirvikaufenfishingol" + + "ffanschweizwildlifedorainfracloudfrontdoorfitjarmeniafitnessettl" + + "ementranoyfjalerflesbergunmarburguovdageaidnuslivinghistoryflick" + + "ragerotikakamigaharaflightsciencecentersciencehistoryflirflogint" + + "ogurafloraflorencefloridavvesiidazaifudaigojomedizinhistorisches" + + "cientistoragefloripaderbornfloristanohatakahamangyshlakasamatsud" + + "ontexisteingeekautokeinoflorogerscjohnsonflowerscotlandflynnhuba" + + "mblefrakkestadiscountysnes3-sa-east-1fndfoodnetworkshoppingushik" + + "amifuranortonsbergxn--12co0c3b4evalleaostatoilfor-ourfor-someetn" + + "edalfor-theaterforexrothachirogatakahatakaishimogosenforgotdnscr" + + "apper-siteforli-cesena-forlicesenaforlikescandynamic-dnscrapping" + + "forsaleitungsenforsandasuolodingenfortmissoulair-traffic-control" + + "leyfortworthadanosegawaforuminamifuranofosneserveftparliamentran" + + "sportransurlfotaruis-a-lawyerfoxfordedyn-ip24freeboxoservegame-s" + + "erversailleservehalflifestylefreemasonryfreetlservehttparmafreib" + + "urgfreightcminamiiselectrapaniimimatakatoris-a-liberalfresenius-" + + "3fribourgfriuli-v-giuliafriuli-ve-giuliafriuli-vegiuliafriuli-ve" + + "nezia-giuliafriuli-veneziagiuliafriuli-vgiuliafriuliv-giuliafriu" + + "live-giuliafriulivegiuliafriulivenezia-giuliafriuliveneziagiulia" + + "friulivgiuliafrlfroganservehumourfrognfrolandfrom-akrehamnfrom-a" + + "lfrom-arqhadselfiparocherkasyno-dserveirchitachinakagawassamukaw" + + "ataricohdatsunanjoburgriwataraidyndns-office-on-the-webcampobass" + + "ociatesapporofrom-azfrom-capebretonamiastapleserveminecraftravel" + + "channelfrom-collectionfrom-ctravelersinsurancefrom-dchitosetogit" + + "suldalotenkawafrom-defenseljordfrom-flanderservemp3from-gausdalf" + + "rom-higashiagatsumagoizumizakirkeneservep2parservepicservequakef" + + "rom-iafrom-idfrom-ilfrom-incheonfrom-kservesarcasmatartanddesign" + + "from-kyowariasahikawafrom-lajollamericanexpressexyfrom-maniwakur" + + "atextileksvikazofrom-mdfrom-megurokunohealthcareerservicesettsur" + + "geonshalloffamemorialfrom-microsoftbankazunofrom-mnfrom-modellin" + + "gfrom-msevastopolefrom-mtnfrom-nchloefrom-ndfrom-nefrom-nhktrdfr" + + "om-njcbnlfrom-nminamiizukamisatokamachintaifun-dnsaliasdaburfrom" + + "-nvalledaostavernfrom-nyfrom-ohkurafrom-oketohmannorth-kazakhsta" + + "nfrom-orfrom-padovaksdalfrom-pratohnoshoooshikamaishimodatefrom-" + + "rivnefrom-schoenbrunnfrom-sdfrom-tnfrom-txn--1ck2e1bananarepubli" + + "caseihichisobetsuitainairforcechirealminamiawajikibmdiscoveryomb" + + "ondishakotanavigationavoiitatebayashiibahcavuotnagaraholtaleniwa" + + "izumiotsukumiyamazonawsadodgemologicallyngenvironmentalconservat" + + "ionavuotnaklodzkodairassnasabaerobaticketselinogradultashkentata" + + "motors3-ap-northeast-2from-utazuerichardlillehammerfeste-ipartis" + + "-a-libertarianfrom-val-daostavalleyfrom-vtrentino-a-adigefrom-wa" + + "from-wielunnerfrom-wvallee-aosteroyfrom-wyfrosinonefrostalowa-wo" + + "lawafroyahikobeardubaiduckdnsevenassisicilyfstcgroupartnersewill" + + "iamhillfujiiderafujikawaguchikonefujiminohtawaramotoineppubologn" + + "akanotoddenfujinomiyadafujiokayamansionsfranziskanerdpolicefujis" + + "atoshonairtelecityeatsharis-a-linux-useranishiaritabashijonawate" + + "fujisawafujishiroishidakabiratoridefinimakanegasakindlegokasells" + + "-for-lessharpartshawaiijimarugame-hostrolekameokameyamatotakadaf" + + "ujitsurugashimaritimekeepingfujixeroxn--1ctwolominamatakkokamino" + + "yamaxunusualpersonfujiyoshidafukayabeatshellaspeziafukuchiyamada" + + "fukudominichocolatemasekashiwazakiyosatokashikiyosemitefukuis-a-" + + "llamarylandfukumitsubishigakirovogradoyfukuokazakiryuohaebarumin" + + "amimakis-a-musicianfukuroishikarikaturindalfukusakisarazurewebsi" + + "teshikagamiishibukawafukuyamagatakaharustkanoyakumoldeloittexasc" + + "olipicenoipifonynysaarlandfunabashiriuchinadafunagatakahashimama" + + "kishiwadafunahashikamiamakusatsumasendaisennangonohejis-a-nascar" + + "fanfundaciofuoiskujukuriyamanxn--1lqs03nfuosskoczowinbarcelonaga" + + "sakijobserverisignieznord-frontiereviewskrakowedeployomitanobihi" + + "rosakikamijimastronomy-gatewaybomloans3-ap-south-1furnituredston" + + "efurubiraquarelleborkangerfurudonostiaarpartyfurukawairtrafficho" + + "funatoriginsurecifedexhibitionishiokoppegardyndns-picsardegnamss" + + "koganeis-a-doctorayfusodegaurafussaikisofukushimaoris-a-nurserve" + + "bbshimojis-a-painteractivegarsheis-a-patsfanfutabayamaguchinomig" + + "awafutboldlygoingnowhere-for-moregontrailroadfuttsurugimperiafut" + + "urehostingfuturemailingfvgfyis-a-personaltrainerfylkesbiblackfri" + + "dayfyresdalhangoutsystemscloudfunctionshimokawahannanmokuizumode" + + "rnhannotaireshimokitayamahanyuzenhapmirhareidsbergenharstadharve" + + "stcelebrationhasamarcheapassagenshimonitayanagitlaborhasaminami-" + + "alpssells-itrentino-aadigehashbanghasudahasura-appassenger-assoc" + + "iationhasvikddielddanuorrikuzentakataiwanairlinedre-eikerhatogay" + + "aitakamoriokalmykiahatoyamazakitahiroshimarnardalhatsukaichikais" + + "eis-a-republicancerresearchaeologicaliforniahattfjelldalhayashim" + + "amotobungotakadapliernewjerseyhazuminobusellsyourhomegoodshimono" + + "sekikawahboehringerikehelsinkitakamiizumisanofidelitysvardollshi" + + "mosuwalkis-a-rockstarachowicehembygdsforbundhemneshimotsukehemse" + + "dalhepforgeherokussldheroyhgtvalleeaosteigenhigashichichibunkyon" + + "anaoshimageandsoundandvisionhigashihiroshimanehigashiizumozakita" + + "katakanabeautydalhigashikagawahigashikagurasoedahigashikawakitaa" + + "ikitakyushuaiahigashikurumeiwamarriottrentino-alto-adigehigashim" + + "atsushimarshallstatebankfhappouhigashimatsuyamakitaakitadaitoiga" + + "wahigashimurayamamotorcycleshimotsumahigashinarusembokukitamidor" + + "is-a-socialistmein-vigorgehigashinehigashiomihachimanchesterhiga" + + "shiosakasayamanakakogawahigashishirakawamatakanezawahigashisumiy" + + "oshikawaminamiaikitamotosumitakagildeskaliszhigashitsunotogawahi" + + "gashiurausukitanakagusukumoduminamiminowahigashiyamatokoriyamana" + + "shifteditchyouripaviancarrierhigashiyodogawahigashiyoshinogaris-" + + "a-soxfanhiraizumisatohobby-sitehirakatashinagawahiranais-a-stude" + + "ntalhirarahiratsukagawahirayaizuwakamatsubushikusakadogawahistor" + + "ichouseshinichinanhitachiomiyaginankokubunjis-a-teacherkassymant" + + "echnologyhitachiotagooglecodespotrentino-altoadigehitraeumtgerad" + + "elmenhorstalbanshinjournalistjohnhjartdalhjelmelandholeckobierzy" + + "ceholidayhomeipfizerhomelinkhakassiahomelinuxn--1lqs71dhomeoffic" + + "ehomesecuritymaceratakaokaluganskolevangerhomesecuritypccwindmil" + + "lhomesenseminehomeunixn--1qqw23ahondahoneywellbeingzonehongopocz" + + "northwesternmutualhonjyoitakarazukamakurazakitashiobarahornindal" + + "horseoulminamiogunicomcastresistancehortendofinternet-dnshinjuku" + + "manohospitalhoteleshinkamigotoyohashimotoshimahotmailhoyangerhoy" + + "landetroitskydivinghumanitieshinshinotsurgeryhurdalhurumajis-a-t" + + "echietis-a-therapistoiahyllestadhyogoris-an-accountantshinshiroh" + + "yugawarahyundaiwafunehzchoseiroumuenchenishitosashimizunaminamia" + + "shigarajfkhmelnitskiyamashikejgorajlchoyodobashichikashukujitawa" + + "rajlljmpharmacienshiojirishirifujiedajnjcpgfoggiajoyokaichibahcc" + + "avuotnagareyamalborkdalpha-myqnapcloudapplebesbyglandjpmorganjpn" + + "jprshioyanaizujuniperjurkoshimizumakis-an-engineeringkoshunantok" + + "igawakosugekotohiradomainshirakofuefukihaboromskoguchikuzenkotou" + + "rakouhokutamakis-an-entertainerkounosupplieshiranukamogawakouyam" + + "ashikokuchuokouzushimasoykozagawakozakis-bykpnkppspdnshiraois-ce" + + "rtifieducatorahimeshimamateramochizukirakrasnodarkredirectmelhus" + + "cultureggio-calabriakristiansandcatshiraokanagawakristiansundkro" + + "dsheradkrokstadelvaldaostarostwodzislawindowshiratakahagivestbyk" + + "ryminamisanrikubetsupportrentino-sued-tirolkumatorinokumejimasud" + + "akumenanyokkaichiropractichristmasakikugawatchandclockasukabedzi" + + "n-the-bandaikawachinaganoharamcoachampionshiphoptobishimaizurugb" + + "ydgoszczecinemakeupowiathletajimabariakeisenbahnishiwakis-a-fina" + + "ncialadvisor-aurdalottokonamegatakasugais-a-geekgalaxykunisakis-" + + "foundationkunitachiarailwaykunitomigusukumamotoyamassa-carrara-m" + + "assacarraramassabusinessebytomaritimobarakunneppulawykunstsammlu" + + "ngkunstunddesignkuokgrouphdkureggio-emilia-romagnakatsugawakurga" + + "nkurobelaudiblebtimnetzkurogimilanokuroisoftwarendalenugkuromats" + + "unais-gonekurotakikawasakis-into-animelbournekushirogawakustanai" + + "s-into-carshintomikasaharakusupplykutchanelkutnokuzumakis-into-c" + + "artoonshinyoshitomiokamitsuekvafjordkvalsundkvamfamberkeleykvana" + + "ngenkvinesdalkvinnheradkviteseidskogkvitsoykwpspiegelkzmissilewi" + + "smillermisugitokorozawamitourismolancastermitoyoakemiuramiyazumi" + + "yotamanomjondalenmlbfanmonmouthagebostadmonstermonticellolmontre" + + "alestatefarmequipmentrentino-suedtirolmonza-brianzaporizhzhiamon" + + "za-e-della-brianzapposhishikuis-not-certifiedunetbankharkovanylv" + + "enicemonzabrianzaptokuyamatsusakahoginowaniihamatamakawajimaphil" + + "adelphiaareadmyblogsitemonzaebrianzaramonzaedellabrianzamoonscal" + + "exusdecorativeartshisognemoparachutingmordoviajessheiminamitanem" + + "oriyamatsushigemoriyoshimilitarymormoneymoroyamatsuuramortgagemo" + + "scowinnershisuifuelveruminamiuonumatsumotofukemoseushistorymosjo" + + "enmoskeneshitaramamosshizukuishimofusaitamatsukuris-savedmosvikn" + + "x-serveronakatombetsunndalmoteginozawaonsenmoviemovistargardmtpc" + + "hromedicaltanissettairamtranbymuenstermugithubcloudusercontentre" + + "ntinoa-adigemuikamishihoronobeauxartsandcraftshizuokananporovigo" + + "tpantheonsitemukochikushinonsenergymulhouservebeermunakatanemunc" + + "ieszynmuosattemuphilatelymurmanskolobrzegersundmurotorcraftrenti" + + "noaadigemusashimurayamatsuzakis-slickhersonmusashinoharamuseetre" + + "ntinoalto-adigemuseumverenigingmusicargodaddynaliascoli-picenogi" + + "ftshoujis-uberleetrentino-stirolmutsuzawamy-vigorlicemy-wanggouv" + + "icenzamyactivedirectorymyasustor-elvdalmycdn77-securechtrainingm" + + "ydissentrentinoaltoadigemydrobofagemydshowamyeffectrentinos-tiro" + + "lmyfirewallonieruchomoscienceandindustrynmyfritzmyftpaccesshowti" + + "meteorapphilipsynology-diskstationmyfusionmyhome-serverrankoshig" + + "ayanagawamykolaivaporcloudmymailermymediapchryslermyokohamamatsu" + + "damypepsongdalenviknakanojohanamakinoharamypetshriramlidlugoleka" + + "gaminoduminamiyamashirokawanabelembroideryggeelvincklabudhabikin" + + "okawabarthagakhanamigawamyphotoshibajddarchaeologyeongnamegawalb" + + "rzycharternidmypsxn--30rr7ymysecuritycamerakermyshopblocksienara" + + "shinomytis-a-bookkeeperugiamyvnchungnamdalseidfjordyndns-remotew" + + "dyndns-serverdalouvreggioemiliaromagnakayamatsumaebashikshacknet" + + "oyookanmakiwakunigamidsundyndns-weberlincolnissandnessjoenissayo" + + "koshibahikariwanumatakazakis-a-greenissedalowiczest-le-patrondhe" + + "immobilienisshingugepicturesilkomaganepiemontepilotsimple-urlpim" + + "ientaketomisatolgapinkomakiyosumy-routerpioneerpippuphonefossigd" + + "alpiszpittsburghofauskedsmokorsetagayasells-for-unzenpiwatepizza" + + "pkomatsushimashikizunokunimihoboleslawiechristiansburgroks-thisa" + + "yamanobeokakudamatsueplanetariuminanoplantationplantsirdalplatfo" + + "rmshangrilanciaplaystationplazaplchurchaseljeepostfoldnavyplumbi" + + "ngopmnpodzonepohlpoivronpokerpokrovskomforbarclays3-us-gov-west-" + + "1politiendapolkowicepoltavalle-aostathellezajskommunalforbundpom" + + "orzeszowioslingpordenonepornporsangerporsanguidellogliastradingp" + + "orsgrunnanpoznanpraxis-a-bruinsfanprdpreservationpresidioprgmrpr" + + "imeloyalistockholmestrandprincipeprivatizehealthinsuranceprochow" + + "iceproductionslupskommuneprofbsbxn--12cfi8ixb8lvivano-frankivska" + + "tsuyamasfjordenprogressivegasiapromombetsurfbx-oscholarshipschoo" + + "lpropertyprotectionprotonetrentinosud-tirolprudentialpruszkowitd" + + "komonoprzeworskogptplusgardenpvtrentinosudtirolpwcirclegnicafede" + + "rationiyodogawapzqldqponqslgbtrentinosued-tirolquicksytesnoasait" + + "omobellevuelosangelesjaguarchitecturealtychyattorneyagawalesundq" + + "uipelementsokanazawaqvcircustomerstuff-4-salestufftoread-booksne" + + "solognestuttgartritonsusakis-very-evillagesusonosuzakaneyamazoes" + + "uzukaniepcesuzukis-very-goodhandsonsvalbardunloppacificitadelive" + + "rysveiosvelvikongsbergsvizzeraswedenswidnicartierswiebodzindiana" + + "polis-a-bloggerswiftcoversicherungswinoujscienceandhistoryswissh" + + "ikis-very-nicesynology-dsolundbeckomorotsukamiokamikoaniikappugl" + + "iatushuissier-justicetuvalle-daostaticsomatuxfamilytwmailvennesl" + + "askerrylogisticsomnaritakurashikis-very-badajozoravestfoldvestne" + + "soovestre-slidreamhostersopotrentinosuedtirolvestre-totennishiaw" + + "akuravestvagoyvevelstadvibo-valentiavibovalentiavideovillaskimit" + + "subatamicable-modembetsukuis-very-sweetpeppervinnicartoonartdeco" + + "ffeedbackplaneappspotagervinnytsiavipsinaappiagetmyiphoenixn--32" + + "vp30haibarakitahatakamatsukawavirginiavirtualvirtueeldomeindianm" + + "arketingvirtuelvisakegawavistaprinternationalfirearmsor-odalvite" + + "rboltrogstadvivoldavixn--3bst00minnesotaketakatsukis-into-gamess" + + "inatsukigatakasagotembaixadavlaanderenvladikavkazimierz-dolnyvla" + + "dimirvlogoipictetrentinostirolvolkswagentsor-varangervologdansko" + + "ninjamisonvolvolkenkundenvolyngdalvossevangenvotevotingvotoyonak" + + "agyokutoursorfoldwloclawekonskowolayangroupharmacyshirahamatonbe" + + "tsurnadalwmflabsorreisahayakawakamiichikawamisatotalworldworse-t" + + "handawowithgoogleapisa-hockeynutsiracusakatakinouewritesthisblog" + + "sytewroclawithyoutubeneventoeidsvollwtcitichernigovernmentoyonow" + + "tfbxoschulewuozuwwwiwatsukiyonowruzhgorodeowzmiuwajimaxn--45brj9" + + "civilaviationxn--45q11civilisationxn--4gbriminingxn--4it168dxn--" + + "4it797konyveloftrentino-sudtirolxn--4pvxs4allxn--54b7fta0ccivili" + + "zationxn--55qw42gxn--55qx5dxn--5js045dxn--5rtp49civilwarmanageme" + + "ntoyosatoyakokonoexn--5rtq34kooris-an-anarchistoricalsocietyxn--" + + "5su34j936bgsgxn--5tzm5gxn--6btw5axn--6frz82gxn--6orx2rxn--6qq986" + + "b3xlxn--7t0a264claimsarlucaniaxn--80adxhksortlandxn--80ao21axn--" + + "80aqecdr1axn--80asehdbarreauctionflfanfshostrowiecasertaipeiheij" + + "iiyamanouchikuhokuryugasakitaurayasudaukraanghkeymachineustarhub" + + "alsanagochihayaakasakawaharanzanpachigasakicks-assedicasadelamon" + + "edatingjemnes3-ap-southeast-2xn--80aswgxn--80audnedalnxn--8ltr62" + + "kopervikhmelnytskyivaolbia-tempio-olbiatempioolbialystokkepnogat" + + "aijis-an-actresshintokushimaxn--8pvr4uxn--8y0a063axn--90a3academ" + + "y-firewall-gatewayxn--90aishobaraomoriguchiharahkkeravjuedisches" + + "apeakebayernrtromsakakinokiaxn--90azhytomyrxn--9dbhblg6dietcimdb" + + "arrel-of-knowledgeologyonagoyaurskog-holandroverhalla-speziaerop" + + "ortalaheadjudaicaaarborteaches-yogasawaracingroks-theatree164xn-" + + "-9dbq2axn--9et52uxn--9krt00axn--andy-iraxn--aroport-byandexn--3d" + + "s443gxn--asky-iraxn--aurskog-hland-jnbarrell-of-knowledgeometre-" + + "experts-comptables3-us-west-1xn--avery-yuasakuhokkaidoomdnshome-" + + "webservercellikes-piedmontblancomeeresorumincommbankmpspbarclayc" + + "ards3-us-east-2xn--b-5gaxn--b4w605ferdxn--bck1b9a5dre4cldmailucc" + + "apitalonewportlligatoyotaris-a-gurulsandoyxn--bdddj-mrabdxn--bea" + + "ralvhki-y4axn--berlevg-jxaxn--bhcavuotna-s4axn--bhccavuotna-k7ax" + + "n--bidr-5nachikatsuuraxn--bievt-0qa2xn--bjarky-fyaotsurreyxn--bj" + + "ddar-ptamayufuettertdasnetzxn--blt-elabourxn--bmlo-graingerxn--b" + + "od-2naroyxn--brnny-wuaccident-investigation-aptibleaseating-orga" + + "nicbcn-north-1xn--brnnysund-m8accident-prevention-webhopenairbus" + + "antiquest-a-la-maisondre-landebudapest-a-la-masionionjukudoyamag" + + "entositelekommunikationthewifiat-band-campaniaxn--brum-voagatrom" + + "sojampagefrontapphotographysioxn--btsfjord-9zaxn--c1avgxn--c2br7" + + "gxn--c3s14mintelligencexn--cck2b3barsyonlinewhampshirebungoonord" + + "-odalazioceanographics3-us-west-2xn--cg4bkis-with-thebandovre-ei" + + "kerxn--ciqpnxn--clchc0ea0b2g2a9gcdn77-sslattumisakis-leetrentino" + + "-s-tirollagrigentomologyeongbukharkivgucciprianiigataishinomakim" + + "obetsuliguriaxn--comunicaes-v6a2oxn--correios-e-telecomunicaes-g" + + "hc29axn--czr694bashkiriaustevollarvikarasjohkamiminers3-ca-centr" + + "al-1xn--czrs0trusteexn--czru2dxn--czrw28basilicataniaustinnatura" + + "lsciencesnaturelles3-eu-central-1xn--d1acj3basketballfinanzgorau" + + "straliaisondriodejaneirochesterepbodynathomebuiltatarantottoribe" + + "staddnskingjerdrumckinseyokosukanzakiwienaturbruksgymnaturhistor" + + "isches3-eu-west-1xn--d1alfaromeoxn--d1atrvarggatroandinosaureise" + + "nxn--d5qv7z876clickasumigaurawa-mazowszextraspacekitagatajirissa" + + "gamiharaxn--davvenjrga-y4axn--djrs72d6uyxn--djty4koryokamikawane" + + "honbetsurutaharaxn--dnna-grajewolterskluwerxn--drbak-wuaxn--dyry" + + "-iraxn--e1a4clinichernihivanovodkagoshimalvikashiwaraxn--eckvdtc" + + "9dxn--efvn9southcarolinazawaxn--efvy88hair-surveillancexn--ehqz5" + + "6nxn--elqq16hakatanoshiroomuraxn--estv75gxn--eveni-0qa01gaxn--f6" + + "qx53axn--fct429kosaigawaxn--fhbeiarnxn--finny-yuaxn--fiq228c5hso" + + "uthwestfalenxn--fiq64batodayonaguniversityoriikariyaltakasakiyok" + + "awaraustrheimatunduhrennesoyokoteastcoastaldefencebinagisochildr" + + "ensgardenatuurwetenschappenaumburgjerstadotsuruokakegawaetnagaha" + + "maroygardenebakkeshibechambagriculturennebudejjudygarlandd-dnsfo" + + "r-better-thanawawdev-myqnapcloudcontrolapplinzi234xn--fiqs8sowax" + + "n--fiqz9spjelkavikomvuxn--2m4a15exn--fjord-lraxn--fjq720axn--fl-" + + "ziaxn--flor-jraxn--flw351exn--fpcrj9c3dxn--frde-grandrapidspread" + + "bettingxn--frna-woaraisaijotrysiljanxn--frya-hraxn--fzc2c9e2clin" + + "iquenoharaxn--fzys8d69uvgmailxn--g2xx48clintonoshoesarpsborgrond" + + "arxn--gckr3f0fedorapeopleirfjordxn--gecrj9clothingrongaxn--ggavi" + + "ika-8ya47hakodatexn--gildeskl-g0axn--givuotna-8yasakaiminatoyone" + + "zawaxn--gjvik-wuaxn--gk3at1exn--gls-elacaixaxn--gmq050isleofmand" + + "alxn--gmqw5axn--h-2failxn--h1aeghakonexn--h2brj9cnsarufutsunomiy" + + "awakasaikaitakoelnxn--h3cuzk1digitalxn--hbmer-xqaxn--hcesuolo-7y" + + "a35batsfjordivtasvuodnakaiwamizawauthordalandroiddnss3-eu-west-2" + + "xn--hery-iraxn--hgebostad-g3axn--hmmrfeasta-s4acctulangevagrarbo" + + "retumbriaxn--hnefoss-q1axn--hobl-iraxn--holtlen-hxaxn--hpmir-xqa" + + "xn--hxt814exn--hyanger-q1axn--hylandet-54axn--i1b6b1a6a2exn--imr" + + "513nxn--indery-fyasugissmarterthanyouxn--io0a7iwchoshibuyachiyod" + + "avvenjargapartmentsardiniaxn--j1aefedoraprojectrani-andria-barle" + + "tta-trani-andriaxn--j1amhakubaghdadxn--j6w193gxn--jlq61u9w7bauha" + + "usposts-and-telecommunicationsncfdivttasvuotnakamagayahababyklec" + + "lercasinordre-landiyoshiokaracoldwarmiamihamadautomotivecoalipay" + + "okozebinorfolkebibleikangereportateshinanomachimkentateyamagroce" + + "rybnikahokutobamaintenancebetsukubank12xn--jlster-byasuokanraxn-" + + "-jrpeland-54axn--jvr189misasaguris-lostre-toteneis-an-actorxn--k" + + "7yn95exn--karmy-yuaxn--kbrq7oxn--kcrx77d1x4axn--kfjord-iuaxn--kl" + + "bu-woaxn--klt787dxn--kltp7dxn--kltx9axn--klty5xn--3e0b707exn--ko" + + "luokta-7ya57hakuis-a-photographerokuappasadenamsosnowiechonanbui" + + "lderschmidtre-gauldalottexn--kprw13dxn--kpry57dxn--kpu716fermoda" + + "lenxn--kput3ixn--krager-gyatomitamamuraxn--kranghke-b0axn--krdsh" + + "erad-m8axn--krehamn-dxaxn--krjohka-hwab49jeonnamerikawauexn--ksn" + + "es-uuaxn--kvfjord-nxaxn--kvitsy-fyatsukanumazuryxn--kvnangen-k0a" + + "xn--l-1fairwindspydebergxn--l1accentureklamborghiniizaxn--lahead" + + "ju-7yatsushiroxn--langevg-jxaxn--lcvr32dxn--ldingen-q1axn--leaga" + + "viika-52bbcateringebugattipschlesisches3-website-ap-northeast-1x" + + "n--lesund-huaxn--lgbbat1ad8jetztrentino-sud-tirolxn--lgrd-poacnt" + + "oyotomiyazakis-a-hard-workerxn--lhppi-xqaxn--linds-pramericanart" + + "unesolutionsokndalxn--lns-qlansrlxn--loabt-0qaxn--lrdal-sraxn--l" + + "renskog-54axn--lt-liacolonialwilliamsburgrossetouchijiwadell-ogl" + + "iastraderxn--lten-granexn--lury-iraxn--m3ch0j3axn--mely-iraxn--m" + + "erker-kuaxn--mgb2ddesrtrentoyokawaxn--mgb9awbferraraxn--mgba3a3e" + + "jtunkongsvingerxn--mgba3a4f16axn--mgba3a4franamizuholdingsmilelx" + + "n--mgba7c0bbn0axn--mgbaakc7dvferrarittogoldpoint2thisamitsukexn-" + + "-mgbaam7a8hakusandiegoodyearxn--mgbab2bdxn--mgbai9a5eva00bbtatto" + + "olsztynsettlers3-website-ap-southeast-1xn--mgbai9azgqp6jevnakers" + + "huscountryestateofdelawarezzoologyxn--mgbayh7gpagespeedmobilizer" + + "oxn--mgbb9fbpobanazawaxn--mgbbh1a71exn--mgbc0a9azcgxn--mgbca7dzd" + + "oxn--mgberp4a5d4a87gxn--mgberp4a5d4arxn--mgbi4ecexposedxn--mgbpl" + + "2fhskodjejuegoshikiminokamoenairportland-4-salernoboribetsucksrv" + + "areserveblogspotrevisohughesolarssonxn--mgbqly7c0a67fbcoloradopl" + + "ateaudioxn--mgbqly7cvafredrikstadtvstordalxn--mgbt3dhdxn--mgbtf8" + + "flatangerxn--mgbtx2bbvacationswatch-and-clockerhcloudns3-website" + + "-ap-southeast-2xn--mgbx4cd0abbotturystykannamifunexn--mix082ferr" + + "eroticanonoichinomiyakexn--mix891fetsundxn--mjndalen-64axn--mk0a" + + "xindustriesteambulancexn--mk1bu44columbusheyxn--mkru45ixn--mlatv" + + "uopmi-s4axn--mli-tlanxesstorehabmerxn--mlselv-iuaxn--moreke-juax" + + "n--mori-qsakuragawaxn--mosjen-eyawaraxn--mot-tlapyatigorskypexn-" + + "-mre-og-romsdal-qqbentleyukinfinitintuitaxihuanhlfanhs3-website-" + + "eu-west-1xn--msy-ula0haldenxn--mtta-vrjjat-k7afamilycompanycommu" + + "nitysfjordyndns-wikinkobayashikaoirminamibosogndalucernexn--muos" + + "t-0qaxn--mxtq1misawaxn--ngbc5azdxn--ngbe9e0axn--ngbrxn--3oq18vl8" + + "pn36axn--nit225kosakaerodromegallupinbarefootballooningjovikarat" + + "suginamikatagamiharuconnectatsunobiraugustowadaegubs3-ap-southea" + + "st-1xn--nmesjevuemie-tcbalestrandabergamoarekexn--nnx388axn--nod" + + "exn--nqv7fs00emaxn--nry-yla5gxn--ntso0iqx3axn--ntsq17gxn--nttery" + + "-byaeservecounterstrikexn--nvuotna-hwaxn--nyqy26axn--o1achattano" + + "oganordreisa-geekoseis-an-artisteinkjerusalemrxn--o3cw4halsaintl" + + "ouis-a-anarchistoiredumbrellanbibaidarxn--o3cyx2axn--od0algxn--o" + + "d0aq3beppublishproxyzgorzeleccolognewyorkshirecipesaro-urbino-pe" + + "sarourbinopesaromasvuotnaharimamurogawatches3-website-sa-east-1x" + + "n--ogbpf8flekkefjordxn--oppegrd-ixaxn--ostery-fyawatahamaxn--osy" + + "ro-wuaxn--p1acfgujolsterxn--p1aixn--pbt977comobilyxn--pgbs0dhlxn" + + "--porsgu-sta26fhvalerxn--pssu33lxn--pssy2uxn--q9jyb4comparemarke" + + "rryhotelsasayamaxn--qcka1pmcdonaldstorfjordxn--qqqt11misconfused" + + "xn--qxamuneuestorjelenia-goraxn--rady-iraxn--rdal-poaxn--rde-ula" + + "quilancashireggiocalabriaxn--rdy-0nabarixn--rennesy-v1axn--rhkke" + + "rvju-01aflakstadaokagakibichuoxn--rholt-mragowoodsidexn--rhqv96g" + + "xn--rht27zxn--rht3dxn--rht61exn--risa-5narusawaxn--risr-iraxn--r" + + "land-uuaxn--rlingen-mxaxn--rmskog-byaxn--rny31hammarfeastafricap" + + "etownnews-stagingxn--rovu88bernuorockartuzyukuhashimoichinosekig" + + "aharautoscanadaejeonbukarasjokarasuyamarylhurstjordalshalsenaust" + + "dalavagiskebizenakaniikawatanaguramusementarnobrzegyptianaturalh" + + "istorymuseumcenterepaircraftarumizusawabogadocscbgdyniabkhaziama" + + "llamagazineat-url-o-g-i-nativeamericanantiques3-ap-northeast-1ka" + + "ppchizippodhaleangaviikadenadexetereit3l3p0rtargets-itargiving12" + + "000emmafanconagawakayamadridvagsoyericssonyoursidealerimo-i-rana" + + "amesjevuemielno-ip6xn--rros-granvindafjordxn--rskog-uuaxn--rst-0" + + "narutokyotangovtuscanyxn--rsta-francaiseharaxn--ryken-vuaxn--ryr" + + "vik-byaxn--s-1faithruherecreationxn--s9brj9compute-1xn--sandness" + + "jen-ogbizxn--sandy-yuaxn--seral-lraxn--ses554gxn--sgne-gratangen" + + "xn--skierv-utazaskoyabearalvahkihokumakogengerdalcestpetersburgx" + + "n--skjervy-v1axn--skjk-soaxn--sknit-yqaxn--sknland-fxaxn--slat-5" + + "narviikamisunagawaxn--slt-elabbvieeexn--smla-hraxn--smna-gratis-" + + "a-bulls-fanxn--snase-nraxn--sndre-land-0cbremangerxn--snes-poaxn" + + "--snsa-roaxn--sr-aurdal-l8axn--sr-fron-q1axn--sr-odal-q1axn--sr-" + + "varanger-ggbeskidyn-o-saurlandes3-website-us-east-1xn--srfold-by" + + "axn--srreisa-q1axn--srum-grazxn--stfold-9xaxn--stjrdal-s1axn--st" + + "jrdalshalsen-sqbestbuyshouses3-website-us-west-1xn--stre-toten-z" + + "cbstreamsterdamnserverbaniaxn--t60b56axn--tckweatherchannelxn--t" + + "iq49xqyjewelryxn--tjme-hraxn--tn0agrinet-freakstudioxn--tnsberg-" + "q1axn--tor131oxn--trany-yuaxn--trgstad-r1axn--trna-woaxn--troms-" + - "zuaxn--tysvr-vraxn--uc0atversicherungxn--uc0ay4axn--uist22hangou" + - "tsystemscloudcontrolappasadenaklodzkodairaxn--uisz3gxn--unjrga-r" + - "tarantourspjelkavikosakaerodromegalsacechirealminamiuonumasudaxn" + - "--unup4yxn--uuwu58axn--vads-jraxn--vard-jraxn--vegrshei-c0axn--v" + - "ermgensberater-ctberndiyurihonjournalistjohnhlfanhsalvadordaliba" + - "baikaliszczytnorddalinzaiitatebayashijonawatexn--vermgensberatun" + - "g-pwbeskidynathomedepotenzachpomorskienikiiyamanobeauxartsandcra" + - "ftsalzburglassassinationalheritagematsubarakawagoexn--vestvgy-ix" + - "a6oxn--vg-yiabbvieeexn--vgan-qoaxn--vgsy-qoa0jfkomakiyosatokashi" + - "kiyosemitexn--vgu402comparemarkerryhotelsaves-the-whalessandria-" + - "trani-barletta-andriatranibarlettaandriaxn--vhquvestfoldxn--vler" + - "-qoaxn--vre-eiker-k8axn--vrggt-xqadxn--vry-yla5gxn--vuq861bestbu" + - "yshousesamegawaxn--w4r85el8fhu5dnraxn--w4rs40lxn--wcvs22dxn--wgb" + - "h1compute-1xn--wgbl6axn--xhq521betainaboxfusejnynysafetysfjordnp" + - "alanakhodkanagawaxn--xkc2al3hye2axn--xkc2dl3a5ee0hannanmokuizumo" + - "dernxn--y9a3aquariumisugitokorozawaxn--yer-znarvikristiansandcat" + - "shirahamatonbetsurgeryxn--yfro4i67oxn--ygarden-p1axn--ygbi2ammxn" + - "--45q11citadeliveryggeelvinckchristiansburgruexn--ystre-slidre-u" + - "jbieidsvollipetskaszubyusuharaxn--zbx025dxn--zf0ao64axn--zf0avxn" + - "--4gbriminingxn--zfr164bielawallonieruchomoscienceandindustrynik" + - "koebenhavnikolaeventsamnangerxperiaxz" + "zuaxn--tysvr-vraxn--uc0atvaroyxn--uc0ay4axn--uist22hamurakamigor" + + "is-a-playerxn--uisz3gxn--unjrga-rtaobaokinawashirosatochiokinosh" + + "imalatvuopmiasakuchinotsuchiurakawakuyabukievenestudyndns-at-hom" + + "edepotenzamamicrolightingxn--unup4yxn--uuwu58axn--vads-jraxn--va" + + "rd-jraxn--vegrshei-c0axn--vermgensberater-ctbetainaboxfusejnyuri" + + "honjoyentgoryusuharaveroykenglandds3-external-1xn--vermgensberat" + + "ung-pwbieigersundnpalaceu-3utilitiesquare7xn--vestvgy-ixa6oxn--v" + + "g-yiabcgxn--vgan-qoaxn--vgsy-qoa0jewishartgalleryxn--vgu402compu" + + "terhistoryofscience-fictionxn--vhquvbargainstitutelevisionayorov" + + "nobninskarelianceu-2xn--vler-qoaxn--vre-eiker-k8axn--vrggt-xqadx" + + "n--vry-yla5gxn--vuq861bielawalmartjeldsundrangedalillyusuisserve" + + "exchangevents3-website-us-west-2xn--w4r85el8fhu5dnraxn--w4rs40lx" + + "n--wcvs22dxn--wgbh1comsecuritytacticsaseboknowsitallukowhoswhokk" + + "sundyndns-workisboringroundhandlingroznyxn--wgbl6axn--xhq521biel" + + "laakesvuemielecceverbankarlsoyuufcfanikinuyamashinashikitchenikk" + + "oebenhavnikolaevennodessagaeroclubmedecincinnationwidealstahauge" + + "sunderseaportsinfolldalabamagasakishimabarackmazerbaijan-mayendo" + + "ftheinternetflixilovecollegefantasyleaguernseyuzawavocatanzarowe" + + "ddingjesdalavangenaval-d-aosta-valleyolasitehimejibigawaskvolloa" + + "bathsbc66xn--xkc2al3hye2axn--xkc2dl3a5ee0hangglidingxn--y9a3aqua" + + "riumishimatsunoxn--yer-znarvikosherbrookegawaxn--yfro4i67oxn--yg" + + "arden-p1axn--ygbi2ammxn--3pxu8konsulatrobeepilepsydneyxn--ystre-" + + "slidre-ujbieszczadygeyachimataikikonaioirasebastopologyeonggieht" + + "avuoatnagaivuotnagaokakyotambabia-goracleaningatlantabuseekloges" + + "t-mon-blogueurovisionikonantankarmoyxn--zbx025dxn--zf0ao64axn--z" + + "f0avxn--42c2d9axn--zfr164bievatmallorcadaquesakurainvestmentsaky" + + "otanabellunorddalimanowarudavoues3-fips-us-gov-west-1xperiaxz" // nodes is the list of nodes. Each node is represented as a uint32, which // encodes the node's children, wildcard bit and node type (as an index into @@ -480,8055 +489,8268 @@ const text = "biellaakesvuemieleccebieszczadygeyachimataipeigersundrangedalivo" // [15 bits] text index // [ 6 bits] text length var nodes = [...]uint32{ - 0x27a003, - 0x328304, - 0x272406, - 0x36e2c3, - 0x36e2c6, - 0x3a6306, - 0x260483, - 0x206e44, - 0x345647, - 0x272048, - 0x1a00882, - 0x30abc7, - 0x355a09, - 0x2eb6ca, - 0x2eb6cb, - 0x22f803, - 0x28f606, - 0x232a05, - 0x1e00702, - 0x215f44, - 0x236483, - 0x278b45, - 0x2208ac2, - 0x330fc3, - 0x26cf584, - 0x328c05, - 0x2a014c2, - 0x378a0e, - 0x24d0c3, - 0x37e606, - 0x37e60b, - 0x2e01c42, + 0x31a403, + 0x284944, + 0x2dd106, + 0x3706c3, + 0x3706c6, + 0x398706, + 0x3a8103, + 0x2fe244, + 0x38e987, + 0x2dcd48, + 0x1a05702, + 0x316e87, + 0x35c789, + 0x2abb0a, + 0x2abb0b, + 0x22f383, + 0x287506, + 0x232dc5, + 0x1e021c2, + 0x2161c4, + 0x238743, + 0x26fc45, + 0x2214902, + 0x347743, + 0x266f744, + 0x33ddc5, + 0x2a04702, + 0x376b4e, + 0x24c4c3, + 0x38ae46, + 0x2e00142, + 0x2dd287, + 0x236f46, + 0x3209282, + 0x229d83, + 0x24d9c4, + 0x325e86, + 0x26c588, + 0x2761c6, + 0x2011c4, + 0x3600242, + 0x3335c9, + 0x20a1c7, + 0x351e86, + 0x330c89, + 0x298308, + 0x26e904, + 0x241ec6, + 0x222a46, + 0x3a022c2, + 0x26480f, + 0x20948e, + 0x211d04, + 0x2c2b85, + 0x2fe145, + 0x39e189, + 0x23c409, + 0x349a87, + 0x20fa86, + 0x275a83, + 0x3e02a82, + 0x315503, + 0x34e24a, + 0x20f903, + 0x2af985, + 0x284202, + 0x284209, + 0x4200ec2, + 0x212484, + 0x2b9686, + 0x2f3645, + 0x3552c4, + 0x4a05644, + 0x2030c3, + 0x232344, + 0x4e00c02, + 0x268d44, + 0x52ef6c4, + 0x25ef4a, + 0x5603dc2, + 0x2ba587, + 0x2f3b08, + 0x6208142, + 0x311687, + 0x2bf204, + 0x2bf207, + 0x36e0c5, + 0x34ffc7, + 0x349846, + 0x24f3c4, + 0x38c105, + 0x29e447, + 0x72001c2, + 0x26e503, + 0x200b82, + 0x200b83, + 0x760de02, + 0x2102c5, + 0x7a02a42, + 0x350e04, + 0x2734c5, + 0x211c47, + 0x26bcce, + 0x2b9184, + 0x245544, + 0x202f03, + 0x281d49, + 0x31ee0b, + 0x2e9a88, + 0x379948, + 0x3a9908, + 0x22ae48, + 0x330aca, + 0x34fec7, + 0x318186, + 0x7e87002, + 0x35e203, + 0x367e43, + 0x36f4c4, + 0x3a8143, + 0x3250c3, + 0x1720b82, + 0x8202502, + 0x27a8c5, + 0x296206, + 0x2d1b84, + 0x375487, + 0x2e1886, + 0x331f84, + 0x39d3c7, + 0x203bc3, + 0x86c54c2, + 0x8b0f242, + 0x8e16742, + 0x216746, + 0x9200002, + 0x3523c5, + 0x3220c3, + 0x200604, + 0x2e8f84, + 0x2e8f85, + 0x206b43, + 0x978d2c3, + 0x9a0bb42, + 0x289e05, + 0x289e0b, + 0x31e686, + 0x20cb4b, + 0x221344, + 0x20d949, + 0x20e9c4, + 0x9e0ec02, + 0x20f143, + 0x20f403, + 0x16105c2, + 0x268183, + 0x2105ca, + 0xa20b382, + 0x216445, + 0x29224a, + 0x2d7744, + 0x283783, + 0x26cfc4, + 0x212543, + 0x212544, + 0x212547, + 0x2140c5, + 0x2147c5, + 0x214f46, + 0x2157c6, + 0x216a03, + 0x21ae88, + 0x210043, + 0xa601c02, + 0x243448, + 0x213ccb, + 0x220148, + 0x220d86, + 0x221847, + 0x225348, + 0xb642b42, + 0xbabf3c2, + 0x326788, + 0x35e4c7, + 0x246085, + 0x357f48, + 0x2bd408, + 0x34dd83, + 0x22a1c4, + 0x36f502, + 0xbe2bc82, + 0xc238482, + 0xca2e802, + 0x22e803, + 0xce01ec2, + 0x2fe203, + 0x2f1e84, + 0x201ec3, + 0x26e8c4, + 0x201ecb, + 0x213c03, + 0x2de946, + 0x239f84, + 0x29034e, + 0x371145, + 0x38af48, + 0x31ffc7, + 0x31ffca, + 0x229743, + 0x22f147, + 0x31efc5, + 0x22f8c4, + 0x265b06, + 0x265b07, + 0x2c11c4, + 0x2f7a87, + 0x313d44, + 0x26c004, + 0x26c006, + 0x387184, + 0x3510c6, + 0x203f83, + 0x35e288, + 0x203f88, + 0x245503, + 0x268143, + 0x399a04, + 0x39e003, + 0xd219f02, + 0xd6d6a42, + 0x20bac3, + 0x207146, + 0x241fc3, + 0x377cc4, + 0xdaee982, + 0x3af843, + 0x3507c3, + 0x217a02, + 0xde04142, + 0x2c1946, + 0x233ac7, + 0x2e8945, + 0x37de04, + 0x28c505, + 0x268907, + 0x267805, + 0x2b8649, + 0x2cefc6, + 0x2daa88, + 0x2e8846, + 0xe21a1c2, + 0x32ca08, + 0x2f1c46, + 0x21a1c5, + 0x2f6d87, + 0x309984, + 0x309985, + 0x276384, + 0x276388, + 0xe60cc02, + 0xea09882, + 0x3103c6, + 0x3b8988, + 0x334385, + 0x337306, + 0x342f08, + 0x344a88, + 0xee09885, + 0xf2142c4, + 0x3b0787, + 0xf60e5c2, + 0xfa1b102, + 0x10a099c2, + 0x2b9785, + 0x2a2645, + 0x2fef86, + 0x3b2547, + 0x380747, + 0x112a84c3, + 0x2a84c7, + 0x31eb08, + 0x376ec9, + 0x376d07, + 0x384d07, + 0x3a8ec8, + 0x3ad4c6, + 0x22f3c6, + 0x23000c, + 0x23120a, + 0x231687, + 0x232c8b, + 0x233907, + 0x23390e, + 0x234cc4, + 0x235ac4, + 0x237a47, + 0x3690c7, + 0x23b206, + 0x23b207, + 0x23b4c7, + 0x19604682, + 0x23c886, + 0x23c88a, + 0x23ce8b, + 0x23dbc7, + 0x23ed45, + 0x23f083, + 0x240586, + 0x240587, + 0x38eb43, + 0x19a0c442, + 0x240f4a, + 0x19f5d882, + 0x1a2a5e02, + 0x1a643142, + 0x1aa2cd82, + 0x244bc5, + 0x245304, + 0x1b205742, + 0x268dc5, + 0x23d483, + 0x20eac5, + 0x22ad44, + 0x206804, + 0x314046, + 0x25e206, + 0x28a003, + 0x238284, + 0x3a6803, + 0x1b600dc2, + 0x391c04, + 0x391c06, + 0x3b0d05, + 0x205e06, + 0x2f6e88, + 0x266e84, + 0x27ed08, + 0x2426c5, + 0x228308, + 0x29ff86, + 0x237587, + 0x22e204, + 0x22e206, + 0x33f443, + 0x383ec3, + 0x223d08, + 0x318dc4, + 0x348747, + 0x23e6c6, + 0x2d6389, + 0x250348, + 0x26cd08, + 0x26d084, + 0x351443, + 0x225e02, + 0x1c60f882, + 0x1ca10e82, + 0x3a7403, + 0x1ce04a42, + 0x38eac4, + 0x2862c6, + 0x26e605, + 0x21ba03, + 0x232884, + 0x2b14c7, + 0x33da03, + 0x231a88, + 0x208545, + 0x36e803, + 0x273445, + 0x273584, + 0x2f6a86, + 0x209ec4, + 0x211346, + 0x211b86, + 0x3916c4, + 0x213b43, + 0x1d205882, + 0x247345, + 0x221c03, + 0x1d61b0c2, + 0x22ffc3, + 0x209bc5, + 0x232403, + 0x232409, + 0x1da05f02, + 0x1e205e42, + 0x2893c5, + 0x218786, + 0x2d1746, + 0x2b0a88, + 0x2b0a8b, + 0x20718b, + 0x2e8b45, + 0x2db145, + 0x2c6309, + 0x1600302, + 0x391888, + 0x20dc44, + 0x1ea007c2, + 0x3a7883, + 0x1f2c6086, + 0x20ae88, + 0x1f601402, + 0x2344c8, + 0x1fa2bb82, + 0x3b92ca, + 0x1feccc43, + 0x3ac1c6, + 0x3af408, + 0x3ac008, + 0x31d006, + 0x36bc07, + 0x264a07, + 0x3349ca, + 0x2d77c4, + 0x3474c4, + 0x35c1c9, + 0x20794385, + 0x209686, + 0x20e1c3, + 0x24a044, + 0x20a02644, + 0x202647, + 0x212fc7, + 0x22a584, + 0x285445, + 0x2ff048, + 0x366747, + 0x370f07, + 0x20e18342, + 0x327704, + 0x292b48, + 0x245bc4, + 0x247784, + 0x248085, + 0x2481c7, + 0x223589, + 0x248fc4, + 0x249709, + 0x249948, + 0x249dc4, + 0x249dc7, + 0x2124aa83, + 0x24ad47, + 0x1609d02, + 0x16ad202, + 0x24bec6, + 0x24c507, + 0x24cd44, + 0x24e6c7, + 0x24fa47, + 0x24fdc3, + 0x248902, + 0x229642, + 0x250a03, + 0x250a04, + 0x250a0b, + 0x379a48, + 0x256804, + 0x2523c5, + 0x254007, + 0x2555c5, + 0x2bc00a, + 0x256743, + 0x2160fc82, + 0x226e84, + 0x258d89, + 0x25c343, + 0x25c407, + 0x24a849, + 0x282688, + 0x204743, + 0x278fc7, + 0x279709, + 0x268ac3, + 0x2810c4, + 0x283c89, + 0x2880c6, + 0x289683, + 0x200182, + 0x21f983, + 0x3a8a87, + 0x21f985, + 0x379746, + 0x256e84, + 0x302e85, + 0x2e4403, + 0x216c46, + 0x20db42, + 0x395144, + 0x221402, + 0x221403, + 0x21a00782, + 0x247303, + 0x215c44, + 0x215c47, + 0x200906, + 0x202602, + 0x21e025c2, + 0x2dca84, + 0x22235e82, + 0x22600b02, + 0x2d4f84, + 0x2d4f85, + 0x2b6dc5, + 0x390e06, + 0x22a05d42, + 0x205d45, + 0x20cf05, + 0x20ae03, + 0x210986, + 0x2126c5, + 0x2166c2, + 0x343605, + 0x2166c4, + 0x221ec3, + 0x227343, + 0x22e0c642, + 0x2d4987, + 0x3669c4, + 0x3669c9, + 0x249f44, + 0x291d43, + 0x2f6609, + 0x367508, + 0x232a24c4, + 0x2a24c6, + 0x21c303, + 0x247bc3, + 0x2e9dc3, + 0x236eb382, + 0x368cc2, + 0x23a05e82, + 0x323cc8, + 0x32a388, + 0x398e46, + 0x2e27c5, + 0x22efc5, + 0x352ec7, + 0x21d205, + 0x228782, + 0x23e38182, + 0x1603002, + 0x2416c8, + 0x32c945, + 0x2e3404, + 0x2ebac5, + 0x23f407, + 0x3207c4, + 0x240e42, + 0x24200582, + 0x338984, + 0x212cc7, + 0x28a2c7, + 0x34ff84, + 0x292203, + 0x245444, + 0x245448, + 0x22f706, + 0x26598a, + 0x223444, + 0x292588, + 0x288504, + 0x221946, + 0x294684, + 0x2b9a86, + 0x366c89, + 0x25da47, + 0x3375c3, + 0x24667e42, + 0x267e43, + 0x20ee02, + 0x24a11ec2, + 0x3085c6, + 0x365c88, + 0x2a4087, + 0x3a3f49, + 0x291c49, + 0x2a5045, + 0x2a6049, + 0x2a6805, + 0x2a6949, + 0x2a8005, + 0x2a9108, + 0x21fb84, + 0x24e890c7, + 0x2a9303, + 0x2a9307, + 0x3850c6, + 0x2a9b87, + 0x2a1085, + 0x2935c3, + 0x2521ae02, + 0x3b40c4, + 0x2562ce82, + 0x258203, + 0x25a17f42, + 0x36d586, + 0x2f3a85, + 0x2ac207, + 0x26cc43, + 0x325044, + 0x20e903, + 0x33e783, + 0x25e02bc2, + 0x266015c2, + 0x398804, + 0x2488c3, + 0x243c85, + 0x26a029c2, + 0x27206482, + 0x2b4506, + 0x318f04, + 0x2e3004, + 0x2e300a, + 0x27a01fc2, + 0x37204a, + 0x3756c8, + 0x27fb1384, + 0x20ad83, + 0x201fc3, + 0x3a9a49, + 0x217649, + 0x285246, + 0x28244183, + 0x3292c5, + 0x30180d, + 0x375886, + 0x3bac8b, + 0x28602e82, + 0x22c1c8, + 0x29206e82, + 0x29606fc2, + 0x2ae585, + 0x29a03942, + 0x258447, + 0x21c907, + 0x21e003, + 0x2306c8, + 0x29e06502, + 0x312684, + 0x212943, + 0x351d45, + 0x34db83, + 0x2f3546, + 0x205904, + 0x268103, + 0x2ae9c3, + 0x2a205fc2, + 0x2e8ac4, + 0x35f6c5, + 0x39f1c7, + 0x275643, + 0x2ad883, + 0x2ae083, + 0x160fec2, + 0x2ae143, + 0x2ae943, + 0x2a605102, + 0x282104, + 0x25e406, + 0x342643, + 0x2aec43, + 0x2aaafd42, + 0x2afd48, + 0x2b0004, + 0x36c246, + 0x2b0387, + 0x249c46, + 0x28e2c4, + 0x38600682, + 0x384f8b, + 0x2fb08e, + 0x21930f, + 0x2985c3, + 0x38ebbbc2, + 0x1600f42, + 0x39201582, + 0x28f403, + 0x2fdec3, + 0x233706, + 0x277c46, + 0x3afd87, + 0x3328c4, + 0x396188c2, + 0x39a08882, + 0x348345, + 0x2e6047, + 0x3b5746, + 0x39e27282, + 0x227284, + 0x2b3ac3, + 0x3a20be02, + 0x3a759ec3, + 0x2b4c44, + 0x2be409, + 0x16c3ac2, + 0x3aa03a82, + 0x203a85, + 0x3aec3d42, + 0x3b203202, + 0x346947, + 0x239689, + 0x35ca0b, + 0x2647c5, + 0x2c4849, + 0x2e8246, + 0x31e6c7, + 0x3b608484, + 0x3199c9, + 0x373487, + 0x20ab47, + 0x20a383, + 0x20a386, + 0x3b68c7, + 0x206a43, + 0x2565c6, + 0x3be02a02, + 0x3c232682, + 0x385803, + 0x324c45, 0x350f47, - 0x235446, - 0x3200a42, - 0x258943, - 0x258944, - 0x343086, - 0x23c1c8, - 0x287c86, - 0x2717c4, - 0x3600ec2, - 0x329b89, - 0x3a2ec7, - 0x2f7646, - 0x357689, - 0x295f08, - 0x2af504, - 0x3a0846, - 0x216b86, - 0x3a02a82, - 0x25af4f, - 0x34280e, - 0x211dc4, - 0x2bc7c5, - 0x2e4bc5, - 0x2ec7c9, - 0x23ecc9, - 0x340e47, - 0x212fc6, - 0x212f03, - 0x3e04a42, - 0x270703, - 0x22098a, - 0x20b0c3, - 0x2607c5, - 0x287302, - 0x287309, - 0x4201e02, - 0x208184, - 0x206986, - 0x237bc5, - 0x34ea44, - 0x4a86a04, - 0x201e03, - 0x231a44, - 0x4e02902, - 0x328044, - 0x31ea44, - 0x22350a, - 0x52009c2, - 0x2d2687, - 0x238088, - 0x5a08f02, - 0x321407, - 0x2b7744, - 0x2b7747, - 0x385185, - 0x36ccc7, - 0x340c06, - 0x21cec4, - 0x357985, - 0x299d07, - 0x6a01cc2, - 0x2af103, - 0x213402, - 0x375ac3, - 0x6e136c2, - 0x283905, - 0x7204a02, - 0x329244, - 0x27eec5, - 0x211d07, - 0x3731ce, - 0x2e3e04, - 0x245c04, - 0x208143, - 0x2ce4c9, - 0x307ecb, - 0x30f508, - 0x31a288, - 0x31e088, - 0x323148, - 0x3574ca, - 0x36cbc7, - 0x2272c6, - 0x769ed02, - 0x375043, - 0x37fa83, - 0x38d3c4, - 0x260d03, - 0x2604c3, - 0x1711602, - 0x7a070c2, - 0x24a0c5, - 0x28ecc6, - 0x2c9e84, - 0x396e07, - 0x32cc06, - 0x341644, - 0x3a9e87, - 0x2070c3, - 0x7ebefc2, - 0x8305b42, - 0x8619ac2, - 0x219ac6, - 0x8a00002, - 0x37dd45, - 0x312b43, - 0x204384, - 0x2db0c4, - 0x2db0c5, - 0x2075c3, - 0x8f27883, - 0x920a882, - 0x28a905, - 0x28a90b, - 0x22be86, - 0x20cd4b, - 0x276844, - 0x20d309, - 0x20f104, - 0x960f602, - 0x210943, - 0x2125c3, - 0x1612742, - 0x245dc3, - 0x21274a, - 0x9a12bc2, - 0x2161c5, - 0x290fca, - 0x2cd084, - 0x213a83, - 0x213f44, - 0x2159c3, - 0x2159c4, - 0x2159c7, - 0x216f85, - 0x217785, - 0x218e86, - 0x219d86, - 0x21a783, - 0x21e908, - 0x258283, - 0x9e03482, - 0x21f388, - 0x21474b, - 0x222208, - 0x222986, - 0x223907, - 0x227e88, - 0xa63a242, - 0xaa715c2, - 0x2e3688, - 0x29f847, - 0x242c85, - 0x242c88, - 0x343988, - 0x383b83, - 0x22a7c4, - 0x38d402, - 0xae2cb02, - 0xb2519c2, - 0xba2ce42, - 0x22ce43, - 0xbe01482, - 0x206e03, - 0x201484, - 0x21a903, - 0x2af4c4, - 0x25fc4b, - 0x214683, - 0x2d3946, - 0x223384, - 0x29e18e, - 0x341045, - 0x265808, - 0x2246c7, - 0x2246ca, - 0x22fd03, - 0x275647, - 0x308085, - 0x22fd04, - 0x22fd06, - 0x22fd07, - 0x2c62c4, - 0x373507, - 0x2028c4, - 0x2093c4, - 0x2093c6, - 0x2dc104, - 0x221c46, - 0x2138c3, - 0x226b48, - 0x303708, - 0x245bc3, - 0x245d83, - 0x395544, - 0x39b103, - 0xc200482, - 0xc707ac2, - 0x2004c3, - 0x208406, - 0x381043, - 0x228584, - 0xca19942, - 0x2d7f03, + 0x250086, + 0x21f905, + 0x277d44, + 0x2c9fc5, + 0x2f2684, + 0x3c6040c2, + 0x331107, + 0x2dbd44, + 0x217544, + 0x21754d, + 0x257509, + 0x3a4448, + 0x253944, + 0x3abc45, + 0x206447, + 0x2144c4, + 0x2e1947, + 0x21c485, + 0x3caa4604, + 0x2d92c5, + 0x25b004, + 0x24bb86, + 0x3b2345, + 0x3ce250c2, + 0x283844, + 0x283845, + 0x36fa46, + 0x20c3c5, + 0x30c304, + 0x2c5dc3, + 0x2053c6, + 0x358505, + 0x2bb485, + 0x3b2444, + 0x2234c3, + 0x2234cc, + 0x3d288a02, + 0x3d6010c2, + 0x3da00282, + 0x206343, + 0x206344, + 0x3de04bc2, + 0x2f9688, + 0x379805, + 0x235684, + 0x23b086, + 0x3e201f42, + 0x3e609782, + 0x3ea00e82, + 0x306b85, + 0x391586, + 0x211084, + 0x3263c6, + 0x2ba346, 0x219943, - 0x21b682, - 0xce008c2, - 0x2bb486, - 0x233907, - 0x2e9005, - 0x344c04, - 0x2a1c45, - 0x2021c7, - 0x26e685, - 0x2aff89, - 0x2c75c6, - 0x2d0108, - 0x2e8f06, - 0xd2092c2, - 0x23bd88, - 0x300a86, - 0x20dc05, - 0x3af1c7, - 0x303604, - 0x303605, - 0x287e44, - 0x287e48, - 0xd60a1c2, - 0xda036c2, - 0x32f506, - 0x3160c8, - 0x338e05, - 0x33a086, - 0x33c2c8, - 0x35ff48, - 0xdec8b85, - 0x2036c4, - 0x324407, - 0xe20d9c2, - 0xe61eb82, - 0xfa06a82, - 0x3597c5, - 0x2a22c5, - 0x3753c6, - 0x317a47, - 0x22aac7, - 0x1022bf83, - 0x2a5b07, - 0x2d4808, - 0x390509, - 0x378bc7, - 0x3a7507, - 0x22e108, - 0x22e906, - 0x22f846, - 0x23020c, - 0x230d8a, - 0x231247, - 0x2328cb, - 0x233747, - 0x23374e, - 0x234744, - 0x234a44, - 0x238e47, - 0x25a647, - 0x23d186, - 0x23d187, - 0x23dd87, - 0x13208942, - 0x23f586, - 0x23f58a, - 0x23f80b, - 0x240bc7, - 0x241585, - 0x2418c3, - 0x241dc6, - 0x241dc7, - 0x23ee83, - 0x1362ea42, - 0x24268a, - 0x13b56b42, - 0x13ea4c42, - 0x14244142, - 0x14635542, - 0x244ec5, - 0x2459c4, - 0x14e00682, - 0x3280c5, + 0x3ef0de0a, + 0x247b05, + 0x2c8e83, + 0x223186, + 0x300fc9, + 0x223187, + 0x297788, + 0x2981c9, + 0x224348, + 0x229486, + 0x20bf03, + 0x3f2a8542, + 0x385683, + 0x385689, + 0x332448, + 0x3f649a02, + 0x3fa02342, + 0x227f83, + 0x2da905, + 0x251ec4, + 0x2c0909, + 0x22cb84, + 0x266348, + 0x202343, + 0x202344, 0x278b03, - 0x315b45, - 0x2124c4, - 0x293906, - 0x202bc6, - 0x28ab03, - 0x3654c4, - 0x324ec3, - 0x15201582, - 0x208d04, - 0x324986, - 0x208d05, - 0x258006, - 0x3af2c8, - 0x21ecc4, - 0x236248, - 0x2e01c5, - 0x32bcc8, - 0x2dce46, - 0x2b4247, - 0x22f244, - 0x22f246, - 0x323fc3, - 0x385503, - 0x2bfd08, - 0x30d944, - 0x341787, - 0x248cc6, - 0x30af09, - 0x35c488, - 0x330488, - 0x24f8c4, - 0x3a2543, - 0x206c82, - 0x1560b042, - 0x15a05382, - 0x3abf43, - 0x15e12c42, - 0x345784, - 0x2af205, - 0x29d8c3, - 0x2306c4, - 0x302947, - 0x344943, - 0x2465c8, - 0x205f85, - 0x308144, - 0x36bb43, - 0x27ee45, - 0x27ef84, - 0x2090c6, - 0x20c244, - 0x20d086, - 0x211c46, - 0x261504, - 0x2199c3, - 0x162b1a02, - 0x350e05, - 0x223cc3, - 0x16600442, - 0x2bbf85, - 0x231b03, - 0x231b09, - 0x16a04142, - 0x172110c2, - 0x329605, - 0x21cd46, - 0x34c707, - 0x2c9a46, - 0x2b9908, - 0x2b990b, - 0x20844b, - 0x2e9205, - 0x2d07c5, - 0x2c0a89, - 0x1600bc2, - 0x2616c8, - 0x20cf84, - 0x17a00202, - 0x25f883, - 0x1825a806, - 0x380ec8, - 0x18602e42, - 0x226088, - 0x18a08b02, - 0x27698a, + 0x2187c8, + 0x217487, + 0x4020b102, + 0x274082, + 0x351905, + 0x266689, + 0x209703, + 0x27b184, + 0x329284, + 0x2064c3, + 0x27c3ca, + 0x40752bc2, + 0x40a83802, + 0x2c5443, + 0x3739c3, + 0x1602302, + 0x38ac03, + 0x40e0f242, + 0x4120ec42, + 0x41610444, + 0x210446, + 0x383b06, + 0x26ad44, + 0x36c643, + 0x38bcc3, + 0x226883, + 0x23d206, + 0x2cb8c5, + 0x2c5a07, + 0x31e589, + 0x2ca645, + 0x2cb806, + 0x2cbd88, + 0x2cbf86, + 0x236a04, + 0x29944b, + 0x2ceac3, + 0x2ceac5, + 0x2cec08, + 0x228502, + 0x346c42, + 0x41a44c42, + 0x41e0e602, + 0x218903, + 0x422675c2, + 0x2675c3, + 0x2cef04, + 0x2cf5c3, + 0x42a115c2, + 0x42ed43c6, + 0x2a7306, + 0x43207902, + 0x4360f442, + 0x43a27382, + 0x43e02c82, + 0x4422dd02, + 0x44602d02, + 0x234703, + 0x390685, + 0x319606, + 0x44a11cc4, + 0x3b0b0a, + 0x32fe86, + 0x2e8d84, + 0x281d03, + 0x45604642, + 0x200c82, + 0x25fd03, + 0x45a05503, + 0x2c7b87, + 0x3b2247, + 0x47250b07, + 0x312d87, + 0x227b03, + 0x227b0a, + 0x236b84, + 0x23e5c4, + 0x23e5ca, + 0x213f05, + 0x47609642, + 0x24e683, + 0x47a008c2, + 0x21c2c3, + 0x267e03, + 0x48203342, + 0x2a8444, + 0x21de84, + 0x3b9505, + 0x305005, + 0x2e1ac6, + 0x2e1e46, + 0x48608442, + 0x48a033c2, + 0x3185c5, + 0x2a7012, + 0x2511c6, + 0x220803, + 0x30a746, + 0x220805, + 0x1610602, + 0x50e120c2, + 0x353e83, + 0x2120c3, + 0x2441c3, + 0x512023c2, + 0x376e43, + 0x5160b482, + 0x210483, + 0x282148, + 0x25e983, + 0x25e986, + 0x3a2987, + 0x306806, + 0x30680b, + 0x2e8cc7, + 0x3b3ec4, + 0x51e04ec2, + 0x379685, + 0x522054c3, + 0x2a6e03, + 0x326c05, + 0x329983, + 0x52729986, + 0x391a0a, + 0x26a9c3, + 0x204584, + 0x3b88c6, + 0x21a5c6, + 0x52a00983, + 0x324f07, + 0x285147, + 0x29b0c5, + 0x2318c6, + 0x224a83, + 0x54a10bc3, + 0x54e056c2, + 0x328144, + 0x22a2cc, + 0x236149, + 0x2414c7, + 0x249245, + 0x262a84, + 0x273cc8, + 0x278305, + 0x55284a05, + 0x28c609, + 0x351f43, + 0x2a5d84, + 0x556013c2, + 0x2013c3, + 0x55a94142, + 0x2a4386, + 0x160f982, + 0x55e06e02, + 0x306a88, + 0x2be603, + 0x2d9207, + 0x2e4d05, + 0x2dd685, + 0x32840b, + 0x2dd686, + 0x328606, + 0x2ffac6, + 0x262c84, + 0x3042c6, + 0x2e3508, + 0x23a043, + 0x250dc3, + 0x250dc4, + 0x2e4484, + 0x2e4a07, + 0x2e5ec5, + 0x562e6002, + 0x5660ba02, + 0x20ba05, + 0x2e83c4, + 0x2e83cb, + 0x2e8e88, + 0x228f44, + 0x2272c2, + 0x56e28ec2, + 0x23b903, + 0x2e9344, + 0x2e9605, + 0x2ea047, + 0x2eb604, + 0x2e8b84, + 0x57201302, + 0x360cc9, + 0x2ec405, + 0x264a85, + 0x2ecf85, + 0x57601303, + 0x2ee0c4, + 0x2ee0cb, + 0x2ee644, + 0x2ef3cb, + 0x2ef7c5, + 0x21944a, + 0x2f0048, + 0x2f024a, + 0x2f0ac3, + 0x2f0aca, + 0x57a01742, + 0x57e2d4c2, + 0x21aa03, + 0x582f1bc2, + 0x2f1bc3, + 0x5875c402, + 0x58b22842, + 0x2f2504, + 0x21afc6, + 0x326105, + 0x2f4503, + 0x31a9c6, + 0x204405, + 0x25e704, + 0x58e05ec2, + 0x2c9244, + 0x2c5f8a, + 0x22d787, + 0x2f38c6, + 0x380b07, + 0x22a403, + 0x283e48, + 0x37f48b, + 0x3736c5, + 0x333ec5, + 0x333ec6, + 0x390884, + 0x3aa248, + 0x222943, + 0x222944, + 0x222947, + 0x38e446, + 0x352686, + 0x29018a, + 0x246604, + 0x24660a, + 0x59282846, + 0x282847, + 0x252447, + 0x270844, + 0x270849, + 0x25e0c5, + 0x235e0b, + 0x2e81c3, + 0x211503, + 0x22f003, + 0x22fac4, + 0x59600482, + 0x25d4c6, + 0x293345, + 0x30a985, + 0x24f6c6, + 0x3395c4, + 0x59a02782, + 0x23f0c4, + 0x59e01c42, + 0x2b9f05, + 0x21ad84, + 0x21bec3, + 0x5a612102, + 0x212103, + 0x23ba46, + 0x5aa03082, + 0x27f488, + 0x223004, + 0x223006, + 0x374246, + 0x2540c4, + 0x205345, + 0x2141c8, + 0x216547, + 0x219687, + 0x21968f, + 0x292a46, + 0x22cf03, + 0x22cf04, + 0x310504, + 0x20d003, + 0x221a84, + 0x240944, + 0x5ae42b02, + 0x289d43, + 0x242b03, + 0x5b209842, + 0x229f83, + 0x38eb83, + 0x21484a, + 0x358107, + 0x2efc0c, + 0x2efec6, + 0x30a146, + 0x248547, + 0x5b64c687, + 0x24f809, + 0x243584, + 0x24fbc4, + 0x5ba18942, + 0x5be027c2, + 0x290546, + 0x324d04, + 0x2d6bc6, + 0x2a5148, + 0x3b8dc4, + 0x258486, + 0x2d1705, + 0x265c88, + 0x207383, + 0x273705, + 0x273e83, + 0x264b83, + 0x264b84, + 0x2759c3, + 0x5c2ec082, + 0x5c602e02, + 0x2e8089, + 0x278205, + 0x278404, + 0x27a9c5, + 0x20dd44, + 0x2e0d07, + 0x343bc5, + 0x250cc4, + 0x250cc8, + 0x2d5086, + 0x2d7984, + 0x2d8e88, + 0x2dbb87, + 0x5ca03902, + 0x2e36c4, + 0x20d0c4, + 0x20ad47, + 0x5ce2b804, + 0x2ccf42, + 0x5d201102, + 0x201543, + 0x203984, + 0x2aa283, + 0x374e05, + 0x5d61e182, + 0x2eb285, + 0x202c42, + 0x34d5c5, + 0x365e45, + 0x5da00c42, + 0x350744, + 0x5de00d02, + 0x2387c6, + 0x29a146, + 0x2667c8, + 0x2bfa08, + 0x36d504, + 0x36d6c5, + 0x3610c9, + 0x2db1c4, + 0x3919c4, + 0x205183, + 0x5e222705, + 0x2c3b87, + 0x2a2744, + 0x341e8d, + 0x361782, + 0x361783, + 0x364503, + 0x5e600802, + 0x388305, + 0x25f9c7, + 0x205b44, + 0x312e47, + 0x2983c9, + 0x2c60c9, + 0x2519c7, + 0x273b03, + 0x273b08, + 0x2ed249, + 0x24e187, + 0x373605, + 0x39e086, + 0x39fb86, + 0x3a3c05, + 0x257605, + 0x5ea02d82, + 0x36ce45, + 0x2b2908, + 0x2c1706, + 0x5eeb7487, + 0x2efa04, + 0x2aa987, + 0x2f62c6, + 0x5f230982, + 0x36f746, + 0x2f83ca, + 0x2f8e85, + 0x5f6de402, + 0x5fa36542, + 0x3b6c06, + 0x2a1e88, + 0x5fe8a487, + 0x60234e42, + 0x2255c3, + 0x311d86, + 0x225044, + 0x3a2846, + 0x390b06, + 0x26ff0a, + 0x331c05, + 0x367ec6, + 0x3759c3, + 0x3759c4, + 0x207102, + 0x309943, + 0x60606382, + 0x2f0f83, + 0x3722c4, + 0x2a1fc4, + 0x2a1fca, + 0x229543, + 0x276288, + 0x22954a, + 0x27b447, + 0x2fcd86, + 0x238684, + 0x290bc2, + 0x2a2e82, + 0x60a04002, + 0x245403, + 0x252207, + 0x31ac87, + 0x2848c4, + 0x26f8c7, + 0x2ea146, + 0x216847, + 0x35e604, + 0x242a05, + 0x2b7985, + 0x60e0fe82, + 0x20fe86, + 0x218283, + 0x220502, + 0x220506, + 0x61203e02, + 0x6160b0c2, + 0x3ba785, + 0x61a21c82, + 0x61e03b42, + 0x33b5c5, + 0x393105, + 0x367f85, + 0x267303, + 0x286385, + 0x2dd747, + 0x307bc5, + 0x306185, + 0x38b044, + 0x3204c6, + 0x23e804, + 0x62201442, + 0x62f630c5, + 0x2ebe07, + 0x2d6dc8, + 0x25fe86, + 0x25fe8d, + 0x260709, + 0x260712, + 0x32f345, + 0x3339c3, + 0x6320a9c2, + 0x309444, + 0x375903, + 0x360fc5, + 0x2fa085, + 0x63612982, + 0x36e843, + 0x63a50b82, + 0x642bf542, + 0x6460fb42, + 0x353805, + 0x37ac43, + 0x37a4c8, + 0x64a07842, + 0x64e000c2, + 0x2a8406, + 0x33b80a, + 0x21bf03, + 0x20c343, + 0x2ee3c3, + 0x65a02dc2, + 0x73e35482, + 0x74601c82, + 0x201682, + 0x36f549, + 0x2c2f04, + 0x2309c8, + 0x74af4542, + 0x74e08602, + 0x2ef605, + 0x2330c8, + 0x282288, + 0x2f858c, + 0x22d543, + 0x25a9c2, + 0x75201f82, + 0x2caac6, + 0x2fdc05, + 0x26d343, + 0x23cc46, + 0x2fdd46, + 0x201f83, + 0x2ff883, + 0x300786, + 0x3013c4, + 0x295586, + 0x2cec85, + 0x30164a, + 0x2eebc4, + 0x302304, + 0x30370a, + 0x7566b082, + 0x337745, + 0x30478a, + 0x305285, + 0x305b44, + 0x305c46, + 0x305dc4, + 0x218dc6, + 0x75a6dac2, + 0x2f3206, + 0x2f3dc5, + 0x3ab6c7, + 0x200206, + 0x248744, + 0x2d5e07, + 0x30dd46, + 0x2b8a45, + 0x381947, + 0x39eb47, + 0x39eb4e, + 0x25ed06, + 0x2e1805, + 0x27dec7, + 0x282b43, + 0x3b2f87, + 0x20f5c5, + 0x212144, + 0x212f82, + 0x3addc7, + 0x332944, + 0x377404, + 0x273f0b, + 0x21d5c3, + 0x2b6987, + 0x21d5c4, + 0x2cc0c7, 0x228bc3, - 0x3b23c6, - 0x3997c8, - 0x204188, - 0x334fc6, - 0x36a0c7, - 0x25b147, - 0x21670a, - 0x2cd104, - 0x33ed84, - 0x3554c9, - 0x38ff05, - 0x342a06, - 0x20b203, - 0x249604, - 0x2143c4, - 0x24fd07, - 0x22d547, - 0x26b1c4, - 0x216645, - 0x375488, - 0x3617c7, - 0x364607, - 0x18e09342, - 0x2e3cc4, - 0x2946c8, - 0x3859c4, - 0x246a04, - 0x246e05, - 0x246f47, - 0x210c49, - 0x247d04, - 0x248a09, - 0x248fc8, - 0x249384, - 0x249387, - 0x249b83, - 0x24a707, - 0x1649242, - 0x17a6b82, - 0x24b646, - 0x24c287, - 0x24c884, - 0x24d607, - 0x24e647, - 0x24ed88, - 0x24f503, - 0x23d6c2, - 0x202442, - 0x251003, - 0x251004, - 0x25100b, - 0x31a388, - 0x257f44, - 0x251d05, - 0x253c87, - 0x2569c5, - 0x36bf0a, - 0x257e83, - 0x1920db02, - 0x258184, - 0x25a409, - 0x25f283, - 0x25f347, - 0x36b309, - 0x376348, - 0x208a03, - 0x27dd47, - 0x27e489, - 0x2840c3, - 0x285e04, - 0x286bc9, - 0x289286, - 0x28a343, - 0x201c82, - 0x244d43, - 0x39c247, - 0x37de85, - 0x35a206, - 0x24a304, - 0x2e6285, - 0x220943, - 0x21a9c6, - 0x20d502, - 0x390ec4, - 0x225902, - 0x2daa43, - 0x196007c2, - 0x247643, - 0x21a204, - 0x21a207, - 0x204686, - 0x24cdc2, - 0x19a53a02, - 0x3af4c4, - 0x19e39ec2, - 0x1a202842, - 0x31aac4, - 0x31aac5, - 0x28de45, - 0x2c2a86, - 0x1a603ac2, - 0x308d05, - 0x3a5245, - 0x29a0c3, - 0x204e86, - 0x212b05, - 0x219a42, - 0x339cc5, - 0x219a44, - 0x21ec03, - 0x21ee43, - 0x1aa0be82, - 0x2f1f87, - 0x361a44, - 0x361a49, - 0x249504, - 0x23a103, - 0x34a009, - 0x350cc8, - 0x2a2144, - 0x2a2146, - 0x2a4543, - 0x214dc3, - 0x22a0c4, - 0x250cc3, - 0x1aee0682, - 0x301c42, - 0x1b210702, - 0x314a48, - 0x3801c8, - 0x394986, - 0x245545, - 0x229b85, - 0x210705, - 0x224242, - 0x1b6931c2, - 0x1633602, - 0x390088, - 0x23bcc5, - 0x3052c4, - 0x2e0105, - 0x32b887, - 0x257c84, - 0x23d4c2, - 0x1ba03e82, - 0x30d204, - 0x2140c7, - 0x39ee87, - 0x36cc84, - 0x290f83, - 0x245b04, - 0x245b08, - 0x22fb86, - 0x22fb8a, - 0x210b04, - 0x291308, - 0x24f004, - 0x223a06, - 0x293184, - 0x359ac6, - 0x341dc9, - 0x266587, - 0x235903, - 0x1be10442, - 0x26ecc3, - 0x20f802, - 0x1c217e82, - 0x2df3c6, - 0x363888, - 0x2a3687, - 0x3a4389, - 0x23a009, - 0x2a3f45, - 0x2a50c9, - 0x2a5f45, - 0x2a6a09, - 0x2a8105, - 0x288484, - 0x288487, - 0x2998c3, - 0x2a8e07, - 0x3a78c6, - 0x2a9607, - 0x2a0f05, - 0x2ab543, - 0x1c630842, - 0x392904, - 0x1ca29982, - 0x25a043, - 0x1ce134c2, - 0x2e6cc6, - 0x238005, - 0x2acc47, - 0x335583, - 0x260c84, - 0x203bc3, - 0x2e33c3, - 0x1d20b542, - 0x1da00042, - 0x3a6404, - 0x23d683, - 0x397285, - 0x2aafc5, - 0x1de04982, - 0x1e600942, - 0x27e086, - 0x20ad44, - 0x30da84, - 0x30da8a, - 0x1ee02002, - 0x2f920a, - 0x36f688, - 0x1f2023c4, - 0x215ac3, - 0x24bc43, - 0x31e1c9, - 0x22dec9, - 0x302a46, - 0x1f602243, - 0x2d9885, - 0x2f9e4d, - 0x207286, - 0x21134b, - 0x1fa016c2, - 0x34c188, - 0x1fe1ea02, - 0x20208282, - 0x370545, - 0x20603fc2, - 0x269947, - 0x2a6507, - 0x21db43, - 0x258c48, - 0x20a07382, - 0x282f04, - 0x212d83, - 0x34bb85, - 0x383983, - 0x237ac6, - 0x2eae04, - 0x245d43, - 0x26f203, - 0x20e0abc2, - 0x2e9184, - 0x353985, - 0x367f07, - 0x27bbc3, - 0x2ad443, - 0x2adc43, - 0x1622602, - 0x2add03, - 0x2adf83, - 0x21202dc2, - 0x2ce884, - 0x27f1c6, - 0x20fa03, - 0x2ae303, - 0x216afcc2, - 0x2afcc8, - 0x2b0784, - 0x2402c6, - 0x2b0bc7, - 0x218fc6, - 0x338f04, - 0x2f2001c2, - 0x3a778b, - 0x2f29ce, - 0x21d4cf, - 0x234343, - 0x2fa44d02, - 0x1605482, - 0x2fe04b42, - 0x227dc3, - 0x233343, - 0x238c46, - 0x2f0ac6, - 0x2e6587, - 0x379084, - 0x3028da82, - 0x306062c2, - 0x2f9b45, - 0x2ee007, - 0x2f15c6, - 0x30a6cf82, - 0x26cf84, + 0x33678d, + 0x388b48, + 0x21d044, + 0x250bc5, + 0x307d05, + 0x308143, + 0x75e22f02, + 0x309903, + 0x309fc3, + 0x210004, + 0x279805, + 0x218307, + 0x375a46, 0x372003, - 0x30e0ac82, - 0x352e03, - 0x3910c4, - 0x2b6949, - 0x16bd702, - 0x31235c82, - 0x2dac86, - 0x26b485, - 0x31645cc2, - 0x31a00102, - 0x33e107, - 0x2033c9, - 0x355c8b, - 0x25af05, - 0x3748c9, - 0x2be006, - 0x22bec7, - 0x2060c4, - 0x2cf089, - 0x35dbc7, - 0x2b7ec7, - 0x20ae83, - 0x20ae86, - 0x2dd5c7, - 0x2387c3, - 0x27cf86, - 0x31e049c2, - 0x32231d82, - 0x21bfc3, - 0x260885, - 0x221ac7, - 0x343c86, - 0x37de05, - 0x376b04, - 0x2de1c5, - 0x2e9b84, - 0x32600f02, - 0x321d87, - 0x2e2904, - 0x22ddc4, - 0x22ddcd, - 0x24c649, - 0x2e04c8, - 0x22bb04, - 0x323605, - 0x2633c7, - 0x2ceb44, - 0x32ccc7, - 0x35ab05, - 0x32b9a984, - 0x2ce185, - 0x25df44, - 0x374206, - 0x317845, - 0x32e34802, - 0x213b44, - 0x213b45, - 0x38d946, - 0x37df45, - 0x2548c4, - 0x2e7043, - 0x380406, - 0x20efc5, - 0x210e45, - 0x317944, - 0x210b83, - 0x210b8c, - 0x33289bc2, - 0x33605602, - 0x33a17382, - 0x39a883, - 0x39a884, - 0x33e03702, - 0x2fd288, - 0x35a2c5, - 0x37f144, - 0x29fe46, - 0x34233a82, - 0x3461ca02, - 0x34a00982, - 0x2b5dc5, - 0x2613c6, - 0x24fc44, - 0x3435c6, - 0x2d2446, - 0x207a03, - 0x34f2afca, - 0x23d9c5, - 0x2f3506, - 0x2f3509, - 0x367547, - 0x291748, - 0x295dc9, - 0x218388, - 0x322e86, - 0x23db83, - 0x35206a42, - 0x386e83, - 0x386e89, - 0x3442c8, - 0x3560ad82, - 0x35a0f842, - 0x232003, - 0x2cff85, - 0x251804, - 0x2c1a09, - 0x2aa9c4, - 0x2b09c8, - 0x20f843, - 0x2600c4, - 0x329d03, - 0x22dd07, - 0x35e3c442, - 0x25a2c2, - 0x22af85, - 0x26d1c9, - 0x220ec3, - 0x27f804, - 0x2d9844, - 0x263443, - 0x28088a, - 0x3636f542, - 0x36613b02, - 0x2bef43, - 0x3721c3, - 0x1660082, - 0x260f43, - 0x36a50702, - 0x3891c4, - 0x36e02ac2, - 0x3730db04, - 0x34a586, - 0x27e2c4, - 0x2406c3, - 0x284a83, - 0x21c443, - 0x23fb86, - 0x2c4d05, - 0x2bf7c7, - 0x22bd89, - 0x2c37c5, - 0x2c4c46, - 0x2c5248, - 0x2c5446, - 0x256604, - 0x29920b, - 0x2c70c3, - 0x2c70c5, - 0x2c7208, - 0x21e782, - 0x33e402, - 0x37629382, - 0x37a03642, - 0x2632c3, - 0x37e08b82, - 0x26e443, - 0x2c7504, - 0x2c83c3, - 0x38607682, - 0x2c9f8b, - 0x38acc586, - 0x2ef8c6, - 0x2ccbc8, - 0x38ecae42, - 0x39212602, - 0x3961ee82, - 0x39a0b602, - 0x39e02642, - 0x20264b, - 0x3a201842, - 0x2262c3, - 0x316c05, - 0x321ac6, - 0x3a611004, - 0x20b687, - 0x32478a, - 0x31ec86, - 0x2e9444, - 0x262ec3, - 0x3b20dbc2, - 0x202b42, - 0x2570c3, - 0x3b64c083, - 0x2635c7, - 0x317747, - 0x3ca51107, - 0x228b87, - 0x214983, - 0x2248ca, - 0x214984, - 0x248bc4, - 0x248bca, - 0x24f205, - 0x3ce02402, - 0x24e143, - 0x3d200dc2, - 0x20f543, - 0x26ec83, - 0x3da01742, - 0x2a5a84, - 0x220684, - 0x201745, - 0x2d8385, - 0x2368c6, - 0x236c46, - 0x3de09142, - 0x3e201042, - 0x3360c5, - 0x2ef5d2, - 0x24ca86, - 0x226a03, - 0x33b046, - 0x2ff645, - 0x160b282, - 0x4660d682, - 0x2efd43, - 0x310b83, - 0x2dbcc3, - 0x46a07902, - 0x378d03, - 0x46e12f42, - 0x2a3443, - 0x2ce8c8, - 0x222f43, - 0x222f46, - 0x313c87, - 0x210586, - 0x21058b, - 0x2e9387, - 0x392704, - 0x47602102, - 0x3a0745, - 0x202b03, - 0x22cd43, - 0x3188c3, - 0x3188c6, - 0x2d088a, - 0x273f43, - 0x235304, - 0x316006, - 0x20e006, - 0x47a04703, - 0x260b47, - 0x37ea8d, - 0x38cd87, - 0x298f45, - 0x246406, - 0x20f003, - 0x492050c3, - 0x49609242, - 0x3283c4, - 0x22d28c, - 0x32bf09, - 0x23a787, - 0x249885, - 0x268144, - 0x272748, - 0x279205, - 0x286a85, - 0x28d409, - 0x2f7703, - 0x2f7704, - 0x2a4bc4, - 0x49a00ac2, - 0x265883, - 0x49e92c42, - 0x2a1d46, - 0x160b142, - 0x4a299882, - 0x2b5cc8, - 0x2ce0c7, - 0x299885, - 0x2de9cb, - 0x2d1dc6, - 0x2debc6, - 0x2f8346, - 0x224cc4, - 0x2fba46, - 0x2d51c8, - 0x232243, - 0x247403, - 0x247404, - 0x2d6c84, - 0x2d7007, - 0x2d8185, - 0x4a6d82c2, - 0x4aa0a742, - 0x20a745, - 0x29cd44, - 0x2d9b8b, - 0x2dafc8, - 0x2db6c4, - 0x26cfc2, - 0x4b2aff02, - 0x2aff03, - 0x2dbb04, - 0x2dd185, - 0x22a547, - 0x2dfc44, - 0x2e9244, - 0x4b608582, - 0x35d1c9, - 0x2e0b05, - 0x25b1c5, - 0x2e1685, - 0x4ba1d603, - 0x2e24c4, - 0x2e24cb, - 0x2e4144, - 0x2e45cb, - 0x2e6745, - 0x21d60a, - 0x2e7108, - 0x2e730a, - 0x2e7583, - 0x2e758a, - 0x4be297c2, - 0x4c242242, - 0x263c83, - 0x4c6e8e82, - 0x2e8e83, - 0x4caea942, - 0x4cf132c2, - 0x2e9a04, - 0x21ea46, - 0x343305, - 0x2ea303, - 0x27a5c6, - 0x22b644, - 0x4d203942, - 0x2b6e84, - 0x2c070a, - 0x387f47, - 0x237e46, - 0x2d0d47, - 0x22d3c3, - 0x24f088, - 0x25ab8b, - 0x302b45, - 0x2b7285, - 0x2b7286, - 0x217c04, - 0x323908, - 0x203943, - 0x216a84, - 0x216a87, - 0x342fc6, - 0x31f2c6, - 0x29dfca, - 0x246104, - 0x24610a, - 0x322406, - 0x322407, - 0x251d87, - 0x276184, - 0x276189, - 0x266c05, - 0x239e4b, - 0x279443, - 0x20d243, - 0x229bc3, - 0x384904, - 0x4d6034c2, - 0x25b486, - 0x2ab2c5, - 0x2b2645, - 0x223e46, - 0x248684, - 0x4da00c02, - 0x223f44, - 0x4de0ed42, - 0x2307c4, - 0x225703, - 0x4e301102, - 0x308803, - 0x258606, - 0x4e602942, - 0x2d30c8, - 0x3abc84, - 0x3abc86, - 0x31abc6, - 0x253d44, - 0x380385, - 0x2035c8, - 0x204d07, - 0x20c307, - 0x20c30f, - 0x2945c6, - 0x220bc3, - 0x220bc4, - 0x228444, - 0x233083, - 0x223b44, - 0x22fe84, - 0x4ea2b382, - 0x28a843, - 0x23a203, - 0x4ee03682, - 0x253503, - 0x345843, - 0x21780a, - 0x29fa47, - 0x23b08c, - 0x23b346, - 0x23b886, - 0x23d307, - 0x22e547, - 0x241f49, - 0x21f4c4, - 0x242e44, - 0x4f24ecc2, - 0x4f604042, - 0x260944, - 0x375f06, - 0x22e9c8, - 0x380d04, - 0x269986, - 0x2c9a05, - 0x26ae48, - 0x208643, - 0x26df45, - 0x272903, - 0x25b2c3, - 0x25b2c4, - 0x2744c3, - 0x4fa50642, - 0x4fe01f82, - 0x279309, - 0x286985, - 0x288004, - 0x34adc5, - 0x213604, - 0x24be47, - 0x340005, - 0x2512c4, - 0x2512c8, - 0x2d5b06, - 0x2d9544, - 0x2de648, - 0x2e2747, - 0x50202742, - 0x2e6e04, - 0x2e63c4, - 0x2b80c7, - 0x50679d44, - 0x236b42, - 0x50a03a02, - 0x24ddc3, - 0x2dab84, - 0x235c43, - 0x2754c5, - 0x50e4acc2, - 0x2ed405, - 0x20b5c2, - 0x373e45, - 0x363a45, - 0x512198c2, - 0x2198c4, - 0x51609642, - 0x236506, - 0x2abb46, - 0x26d308, - 0x2b90c8, - 0x2e6c44, - 0x2f5905, - 0x302f49, - 0x34c804, - 0x2d0844, - 0x261603, - 0x216845, - 0x2bd7c7, - 0x277b44, - 0x2ea5cd, - 0x2eab42, - 0x2eab43, - 0x2eac03, - 0x51a04582, - 0x38a045, - 0x22b107, - 0x228c44, - 0x228c47, - 0x295fc9, - 0x2c0849, - 0x20c987, - 0x279043, - 0x279048, - 0x21db89, - 0x2ebb87, - 0x2ebf05, - 0x2ec6c6, - 0x2ecd06, - 0x2ece85, - 0x24c745, - 0x51e00c42, - 0x226605, - 0x2bae0a, - 0x2a7a08, - 0x21c906, - 0x2e6987, - 0x26b104, - 0x3ae3c7, - 0x2f0186, - 0x52200242, - 0x38d646, - 0x2f374a, - 0x2f4745, - 0x526d3382, - 0x52a56142, - 0x2dd906, - 0x35ee88, - 0x39f047, - 0x52e00602, - 0x213d03, - 0x200a06, - 0x30b804, - 0x313b46, - 0x34d186, - 0x37fb0a, - 0x397385, - 0x20fa86, - 0x2133c3, - 0x2133c4, - 0x2083c2, - 0x300a43, - 0x53248c82, - 0x2c5603, - 0x2f9484, - 0x2dca84, - 0x35efca, - 0x2468c3, - 0x287d48, - 0x279dca, - 0x234cc7, - 0x2f5d86, - 0x2363c4, - 0x28fcc2, - 0x208bc2, - 0x5360a6c2, - 0x245ac3, - 0x251b47, - 0x27a887, - 0x38ffcb, - 0x328284, - 0x30c5c7, - 0x22a646, - 0x219bc7, - 0x29f984, - 0x2c7d05, - 0x291bc5, - 0x53a1bf02, - 0x2225c6, - 0x33a583, - 0x2be6c2, - 0x32b206, - 0x53e0fe42, - 0x542012c2, - 0x2012c5, - 0x5461c642, - 0x54a05702, - 0x2e7dc5, - 0x38e705, - 0x20fb45, - 0x26c743, - 0x239ac5, - 0x2d1e87, - 0x2a94c5, - 0x3a0145, - 0x265904, - 0x243586, - 0x24aac4, - 0x54e05a02, - 0x27dbc5, - 0x2a2c87, - 0x2299c8, - 0x26ed46, - 0x26ed4d, - 0x26f609, - 0x26f612, - 0x2ee645, - 0x2f2703, - 0x55a0f042, - 0x2e7b84, - 0x207303, - 0x318f45, - 0x35d4c5, - 0x55e12dc2, - 0x36bb83, - 0x56244302, - 0x566cd782, - 0x56a13082, - 0x33f185, - 0x331083, - 0x264048, - 0x56e07e02, - 0x57201bc2, - 0x2a5a46, - 0x325c0a, - 0x20d803, - 0x239103, - 0x2edd83, - 0x57e03f02, - 0x66207942, - 0x66a0a302, - 0x201242, - 0x38d449, - 0x2bcb44, - 0x258f48, - 0x66eea342, - 0x67204102, - 0x2e4805, - 0x232d08, - 0x24a508, - 0x39e64c, - 0x239cc3, - 0x23bc82, - 0x6760c402, - 0x2c3c46, - 0x2f6c05, - 0x326903, - 0x32b746, - 0x2f6d46, - 0x235d83, - 0x2f8103, - 0x2f8b46, - 0x2f9904, - 0x276a86, - 0x2c7285, - 0x2f9c8a, - 0x233f84, - 0x2faac4, - 0x34da8a, - 0x67a74b82, - 0x271185, - 0x2fcf4a, - 0x2fdcc5, - 0x2fe844, - 0x2fe946, - 0x2feac4, - 0x228946, - 0x67e00282, - 0x237786, - 0x238845, - 0x201d07, - 0x300fc6, - 0x23d504, - 0x2c8047, - 0x32af06, - 0x267e05, - 0x267e07, - 0x39b987, - 0x39b98e, - 0x2232c6, - 0x32cb85, - 0x283a47, - 0x2f1c03, - 0x366e07, - 0x35ba05, - 0x212644, - 0x214382, - 0x267207, - 0x379104, - 0x238bc4, - 0x25a14b, - 0x21fdc3, - 0x286807, - 0x21fdc4, - 0x2a4c87, - 0x22ac83, - 0x32e98d, - 0x38a888, - 0x2511c4, - 0x2511c5, - 0x301705, - 0x2ff303, - 0x68214a02, - 0x300a03, - 0x301143, - 0x399f04, - 0x27e585, - 0x21eec7, - 0x213446, - 0x36f643, - 0x32b34b, - 0x32f70b, - 0x27338b, - 0x27e68a, - 0x2a55cb, - 0x2caf0b, - 0x2d33cc, - 0x2f8711, - 0x33d50a, - 0x35030b, - 0x37a40b, - 0x3aef8a, - 0x3b0f8a, - 0x301b0d, - 0x3032ce, - 0x30438b, - 0x30464a, - 0x305811, - 0x305c4a, - 0x30614b, - 0x30668e, - 0x30720c, - 0x3075cb, - 0x30788e, - 0x307c0c, - 0x3094ca, - 0x30a6cc, - 0x6870a9ca, - 0x30bbc9, - 0x30dd0a, - 0x30df8a, - 0x30e20b, - 0x31014e, - 0x3104d1, - 0x319509, - 0x31974a, - 0x31a00b, - 0x31baca, - 0x31c656, - 0x31de0b, - 0x3200ca, - 0x320dca, - 0x32528b, - 0x329a09, - 0x32f309, - 0x33154d, - 0x331dcb, - 0x332b0b, - 0x3334cb, - 0x333c89, - 0x3342ce, - 0x3346ca, - 0x335b0a, - 0x33620a, - 0x33698b, - 0x3371cb, - 0x33748d, - 0x338b0d, - 0x339950, - 0x339e0b, - 0x33b3cc, - 0x33c04b, - 0x33dc0b, - 0x33fb8b, - 0x34814b, - 0x348bcf, - 0x348f8b, - 0x349bca, - 0x34a2c9, - 0x34a709, - 0x34b0cb, - 0x34b38e, - 0x34e10b, - 0x34eecf, - 0x3512cb, - 0x35158b, - 0x35184b, - 0x351c8a, - 0x355889, - 0x35898f, - 0x36104c, - 0x36150c, - 0x36278e, - 0x362fcf, - 0x36338e, - 0x363e90, - 0x36428f, - 0x36574e, - 0x365c0c, - 0x365f12, - 0x367b11, - 0x3680ce, - 0x36850e, - 0x368a4e, - 0x368dcf, - 0x36918e, - 0x369513, - 0x3699d1, - 0x369e0e, - 0x36a28c, - 0x36ad53, - 0x36b550, - 0x36c18c, - 0x36c48c, - 0x36c94b, - 0x36dfce, - 0x36e64b, - 0x36ea8b, - 0x37090c, - 0x37954a, - 0x379c0c, - 0x379f0c, - 0x37a209, - 0x37b60b, - 0x37b8c8, - 0x37bac9, - 0x37bacf, - 0x37d40b, - 0x37e10a, - 0x38154c, - 0x383409, - 0x3837c8, - 0x384bcb, - 0x3852cb, - 0x38644a, - 0x3866cb, - 0x386c0c, - 0x387948, - 0x38aa8b, - 0x38d14b, - 0x39028b, - 0x391b0b, - 0x39b50b, - 0x39b7c9, - 0x39bd0d, - 0x3a0f8a, - 0x3a1ed7, - 0x3a3b58, - 0x3a8a09, - 0x3a9c0b, - 0x3aa3d4, - 0x3aa8cb, - 0x3aae4a, - 0x3ab2ca, - 0x3ab54b, - 0x3ac450, - 0x3ac851, - 0x3ad10a, - 0x3ae58d, - 0x3aec8d, - 0x3b134b, - 0x3b2746, - 0x2226c3, - 0x68a5b343, - 0x385d06, - 0x28e605, - 0x2d6487, - 0x33d3c6, - 0x1627342, - 0x2ad589, - 0x27a3c4, - 0x2d0348, - 0x245a03, - 0x2e7ac7, - 0x22eb82, - 0x2acc83, - 0x68e006c2, - 0x2c2686, - 0x2c36c4, - 0x328a44, - 0x23e083, - 0x23e085, - 0x696cd7c2, - 0x2dba04, - 0x2760c7, - 0x1662e02, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x220ec3, - 0x24c083, - 0x204703, - 0x2020c3, - 0x200882, - 0x77a48, - 0x206a82, - 0x250cc3, - 0x220ec3, - 0x24c083, - 0x204703, - 0x20b803, - 0x3152d6, - 0x318493, - 0x30c449, - 0x324308, - 0x3a05c9, - 0x2fd0c6, - 0x30d250, - 0x303e13, - 0x208888, - 0x2407c7, - 0x27c647, - 0x29f48a, - 0x2f9509, - 0x34cf89, - 0x290ccb, - 0x340c06, - 0x32324a, - 0x222986, - 0x279fc3, - 0x2f1ec5, - 0x226b48, - 0x2365cd, - 0x35988c, - 0x238507, - 0x304e0d, - 0x2036c4, - 0x22ff8a, - 0x2308ca, - 0x230d8a, - 0x304107, - 0x23cfc7, - 0x23ff44, - 0x22f246, - 0x340fc4, - 0x2f0448, - 0x2aaa09, - 0x2b9906, - 0x2b9908, - 0x24328d, - 0x2c0a89, - 0x204188, - 0x25b147, - 0x20150a, - 0x24c286, - 0x259f07, - 0x2b1e04, - 0x248087, - 0x33a34a, - 0x25958e, - 0x210705, - 0x2ff04b, - 0x2f2509, - 0x22dec9, - 0x2a6347, - 0x3a278a, - 0x2b8007, - 0x2f2b09, - 0x359d48, - 0x3141cb, - 0x2cff85, - 0x2e038a, - 0x21ec49, - 0x32688a, - 0x2c384b, - 0x247f8b, - 0x290a55, - 0x2d59c5, - 0x25b1c5, - 0x2e24ca, - 0x2501ca, - 0x376507, - 0x220283, - 0x29e308, - 0x2cb24a, - 0x3abc86, - 0x241949, - 0x26ae48, - 0x2d9544, - 0x235c49, - 0x2b90c8, - 0x2dcd87, - 0x27dbc6, - 0x2a2c87, - 0x297b87, - 0x23f985, - 0x25388c, - 0x2511c5, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x24c083, - 0x204703, - 0x206a82, - 0x22bf83, - 0x24c083, - 0x2020c3, - 0x204703, - 0x22bf83, - 0x24c083, - 0x222f43, - 0x204703, - 0x77a48, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x220ec3, - 0x24c083, - 0x204703, - 0x77a48, - 0x206a82, - 0x200e42, - 0x230fc2, - 0x207382, - 0x203202, - 0x2d3cc2, - 0x462bf83, - 0x231b03, - 0x20f583, - 0x250cc3, - 0x202243, - 0x220ec3, - 0x24c083, - 0x204703, - 0x232dc3, - 0x77a48, - 0x329904, - 0x25fe87, - 0x262203, - 0x3395c4, - 0x22ea83, - 0x286c03, - 0x250cc3, - 0x200882, - 0x127883, - 0x5606a82, - 0x230fc2, - 0x23c4, - 0x200fc2, - 0xe1c44, - 0x77a48, - 0x20e503, - 0x2cd683, - 0x5e2bf83, - 0x22ff84, - 0x6231b03, - 0x6650cc3, - 0x20b542, - 0x2023c4, - 0x24c083, - 0x2f1d03, - 0x2018c2, - 0x204703, - 0x21f0c2, - 0x2e9943, - 0x202942, - 0x207703, - 0x26af03, - 0x201d02, - 0x77a48, - 0x20e503, - 0x2f1d03, - 0x2018c2, - 0x2e9943, - 0x202942, - 0x207703, - 0x26af03, - 0x201d02, - 0x2e9943, - 0x202942, - 0x207703, - 0x26af03, - 0x201d02, - 0x22bf83, - 0x327883, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x2023c4, - 0x202243, - 0x220ec3, - 0x211004, - 0x24c083, - 0x204703, - 0x209202, - 0x21d603, - 0x77a48, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x220ec3, - 0x24c083, - 0x204703, - 0x327883, - 0x206a82, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x2023c4, - 0x24c083, - 0x204703, - 0x2ebf05, - 0x212dc2, - 0x200882, - 0x77a48, - 0x250cc3, - 0x260e41, - 0x20bd81, - 0x260e01, - 0x20bb01, - 0x275d81, - 0x275e41, - 0x262281, - 0x24b581, - 0x2f8901, - 0x301dc1, + 0x23ab4b, + 0x26ba4b, + 0x2a654b, + 0x2de44a, + 0x30254b, + 0x31be8b, + 0x356b8c, + 0x378d11, + 0x3b654a, + 0x3ba10b, + 0x30ad8b, + 0x30b34a, + 0x30b88a, + 0x30cb4e, + 0x30d18b, + 0x30d44a, + 0x30ef11, + 0x30f34a, + 0x30f84b, + 0x30fd8e, + 0x31078c, + 0x310c4b, + 0x310f0e, + 0x31128c, + 0x31474a, + 0x31698c, + 0x76316c8a, + 0x317489, + 0x31af4a, + 0x31b1ca, + 0x31b44b, + 0x31f60e, + 0x31f991, + 0x328b89, + 0x328dca, + 0x3295cb, + 0x32a84a, + 0x32b316, + 0x32e14b, + 0x32f10a, + 0x32f50a, + 0x33084b, + 0x333449, + 0x337109, + 0x337d4d, + 0x33870b, + 0x33978b, + 0x33a14b, + 0x33a609, + 0x33ac4e, + 0x33b30a, + 0x33fc8a, + 0x33ffca, + 0x340b8b, + 0x3413cb, + 0x34168d, + 0x342c0d, + 0x343290, + 0x34374b, + 0x34408c, + 0x34480b, + 0x34644b, + 0x34798b, + 0x34c00b, + 0x34ca8f, + 0x34ce4b, + 0x34d94a, + 0x34e689, + 0x34f409, + 0x34f8cb, + 0x34fb8e, + 0x35434b, + 0x35574f, + 0x35864b, + 0x35890b, + 0x358bcb, + 0x3590ca, + 0x35c609, + 0x35f34f, + 0x36424c, + 0x36488c, + 0x364d0e, + 0x3653cf, + 0x36578e, + 0x365fd0, + 0x3663cf, + 0x366f4e, + 0x36770c, + 0x367a12, + 0x3689d1, + 0x36988e, + 0x36a04e, + 0x36a58e, + 0x36a90f, + 0x36acce, + 0x36b053, + 0x36b511, + 0x36b94e, + 0x36bdcc, + 0x36d913, + 0x36e210, + 0x36ea8c, + 0x36ed8c, + 0x36f24b, + 0x3703ce, + 0x370c8b, + 0x3715cb, + 0x37258c, + 0x37814a, + 0x37850c, + 0x37880c, + 0x378b09, + 0x37bb8b, + 0x37be48, + 0x37c049, + 0x37c04f, + 0x37d98b, + 0x7677eb8a, + 0x381fcc, + 0x383189, + 0x383608, + 0x38380b, + 0x383c8b, + 0x38480a, + 0x384a8b, + 0x38540c, + 0x386008, + 0x388d4b, + 0x38b44b, + 0x39484b, + 0x3958cb, + 0x39e6cb, + 0x39e989, + 0x39eecd, + 0x3a464a, + 0x3a5597, + 0x3a6bd8, + 0x3a96c9, + 0x3ab30b, + 0x3ac814, + 0x3acd0b, + 0x3ad28a, + 0x3aea0a, + 0x3aec8b, + 0x3b4250, + 0x3b4651, + 0x3b4d0a, + 0x3b5b4d, + 0x3b624d, + 0x3ba3cb, + 0x3bbd46, + 0x20ff83, + 0x76b80483, + 0x22cdc6, + 0x247645, + 0x27a007, + 0x31bd46, + 0x1656682, + 0x2ad9c9, + 0x31a7c4, + 0x2dacc8, + 0x232b43, + 0x309387, + 0x234f42, + 0x2ac243, + 0x76e07b02, + 0x2c7406, + 0x2c9884, + 0x369f44, + 0x390143, + 0x390145, + 0x776c3d82, + 0x77aa6cc4, + 0x270787, + 0x77e4a282, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x209703, + 0x205503, + 0x200983, + 0x204e83, + 0x205702, + 0x16d208, + 0x2099c2, + 0x2e9dc3, + 0x209703, + 0x205503, + 0x200983, + 0x214843, + 0x324556, + 0x325793, + 0x26f749, + 0x3b0688, + 0x379509, + 0x304906, + 0x3389d0, + 0x254b53, + 0x38e508, + 0x28ea47, + 0x36c747, + 0x284d0a, + 0x372349, + 0x38d849, + 0x28decb, + 0x349846, + 0x379b4a, + 0x220d86, + 0x31a3c3, + 0x2d48c5, + 0x35e288, + 0x23888d, + 0x2b984c, + 0x2de0c7, + 0x30b00d, + 0x2142c4, + 0x22fd8a, + 0x230d4a, + 0x23120a, + 0x2099c7, + 0x23af07, + 0x23d844, + 0x22e206, + 0x20c144, + 0x2b4148, + 0x22cbc9, + 0x2b0a86, + 0x2b0a88, + 0x2422cd, + 0x2c6309, + 0x3ac008, + 0x264a07, + 0x2f1f0a, + 0x24c506, + 0x2580c7, + 0x2cc3c4, + 0x23f287, + 0x309c0a, + 0x3ae54e, + 0x21d205, + 0x3b4a4b, + 0x331a09, + 0x217649, + 0x21c747, + 0x2a34ca, + 0x20ac87, + 0x2fb1c9, + 0x38f0c8, + 0x3533cb, + 0x2da905, + 0x3a430a, + 0x266e09, + 0x26d2ca, + 0x2ca6cb, + 0x23f18b, + 0x28dc55, + 0x2e3b85, + 0x264a85, + 0x2ee0ca, + 0x3945ca, + 0x331787, + 0x21da83, + 0x2904c8, + 0x2d2c4a, + 0x223006, + 0x24dfc9, + 0x265c88, + 0x2d7984, + 0x2aa289, + 0x2bfa08, + 0x29fec7, + 0x3630c6, + 0x2ebe07, + 0x289a47, + 0x23d005, + 0x21d04c, + 0x250bc5, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x2099c2, + 0x2a84c3, + 0x205503, + 0x204e83, + 0x200983, + 0x2a84c3, + 0x205503, + 0x25e983, + 0x200983, + 0x16d208, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x209703, + 0x205503, + 0x200983, + 0x16d208, + 0x2099c2, + 0x2006c2, + 0x231442, + 0x206502, + 0x200542, + 0x2decc2, + 0x46a84c3, + 0x232403, + 0x2163c3, + 0x2e9dc3, + 0x244183, + 0x209703, + 0x2d47c6, + 0x205503, + 0x200983, + 0x233183, + 0x16d208, + 0x31ae44, + 0x202107, + 0x392403, + 0x2ae584, + 0x22e043, + 0x21c7c3, + 0x2e9dc3, + 0x16fc07, + 0x205702, + 0x18d2c3, + 0x5a099c2, + 0x88f4d, + 0x8928d, + 0x231442, + 0x1b1384, + 0x200442, + 0x5fb1288, + 0xed844, + 0x16d208, + 0x1411d82, + 0x15054c6, + 0x231783, + 0x200c03, + 0x66a84c3, + 0x22fd84, + 0x6a32403, + 0x6ee9dc3, + 0x202bc2, + 0x3b1384, + 0x205503, + 0x2f78c3, + 0x203ec2, + 0x200983, + 0x21b5c2, + 0x2f2443, + 0x203082, + 0x211643, + 0x265d43, + 0x200202, + 0x16d208, + 0x231783, + 0x2f78c3, + 0x203ec2, + 0x2f2443, + 0x203082, + 0x211643, + 0x265d43, + 0x200202, + 0x2f2443, + 0x203082, + 0x211643, + 0x265d43, + 0x200202, + 0x2a84c3, + 0x38d2c3, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x3b1384, + 0x244183, + 0x209703, + 0x211cc4, + 0x205503, + 0x200983, + 0x20f942, + 0x201303, + 0x16d208, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x209703, + 0x205503, + 0x200983, + 0x38d2c3, + 0x2099c2, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x3b1384, + 0x205503, + 0x200983, + 0x373605, + 0x212982, + 0x205702, + 0x16d208, + 0x1456108, + 0x2e9dc3, + 0x2274c1, + 0x202901, + 0x202941, + 0x23ad81, + 0x23ad01, + 0x30aec1, + 0x23aec1, + 0x2275c1, + 0x2eea41, + 0x30afc1, 0x200141, 0x200001, - 0x77a48, - 0x200481, - 0x200741, + 0x129845, + 0x16d208, + 0x201ec1, + 0x200701, + 0x200301, 0x200081, - 0x201501, - 0x2007c1, - 0x200901, + 0x200181, + 0x200401, 0x200041, - 0x202381, - 0x2001c1, - 0x2000c1, + 0x201181, + 0x200101, + 0x200281, + 0x200e81, + 0x2008c1, + 0x200441, + 0x201301, + 0x206ec1, 0x200341, - 0x200cc1, - 0x200fc1, - 0x200ac1, - 0x213041, - 0x200c01, - 0x200241, - 0x200a01, + 0x200801, 0x2002c1, - 0x200281, - 0x201d01, - 0x2041c1, - 0x200781, - 0x200641, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x24c083, - 0x204703, - 0x206a82, - 0x22bf83, - 0x231b03, - 0x200fc2, - 0x204703, - 0x142b87, - 0x1c106, - 0x18a4a, - 0x89808, - 0x51688, - 0x51a47, - 0x60f46, - 0xcdfc5, - 0x62145, - 0x72606, - 0x122706, - 0x223504, - 0x3212c7, - 0x77a48, - 0x2c8144, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x24c083, - 0x204703, - 0x22bf83, - 0x231b03, - 0x20f583, - 0x250cc3, - 0x202243, - 0x220ec3, - 0x24c083, - 0x204703, - 0x212dc2, - 0x2b6f03, - 0x214583, - 0x279643, - 0x202a82, - 0x249c83, - 0x201e03, - 0x2056c3, + 0x2000c1, + 0x201501, + 0x200201, + 0x200bc1, + 0x2005c1, + 0x201cc1, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x2099c2, + 0x2a84c3, + 0x232403, + 0x200442, + 0x200983, + 0x16fc07, + 0x9807, + 0x1cdc6, + 0x13ef8a, + 0x88648, + 0x51d48, + 0x52107, + 0x191106, + 0xd8c05, + 0x192345, + 0x5d306, + 0x125c86, + 0x25ef44, + 0x311547, + 0x16d208, + 0x2d5f04, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x2a84c3, + 0x232403, + 0x2163c3, + 0x2e9dc3, + 0x244183, + 0x209703, + 0x205503, + 0x200983, + 0x212982, + 0x2c5983, + 0x2bb143, + 0x32c243, + 0x2022c2, + 0x25d183, + 0x2030c3, + 0x204903, 0x200001, - 0x2075c3, - 0x276844, - 0x3355c3, - 0x30da43, - 0x21eb83, - 0x378d83, - 0xa22bf83, - 0x234a44, - 0x21eb43, - 0x22e0c3, - 0x231b03, - 0x231843, - 0x211a83, - 0x2a2383, - 0x30d9c3, - 0x226083, + 0x2dc745, + 0x206b43, + 0x221344, + 0x26cc83, + 0x318ec3, + 0x21b103, + 0x35ff43, + 0xaaa84c3, + 0x235ac4, + 0x23dbc3, + 0x21cc43, + 0x21b0c3, + 0x22ffc3, + 0x232403, + 0x232143, + 0x2459c3, + 0x2a2703, + 0x318e43, + 0x2344c3, + 0x202643, + 0x24ce44, + 0x24e347, + 0x248902, + 0x250943, + 0x256303, + 0x273ac3, + 0x390f43, + 0x2025c3, + 0xaee9dc3, + 0x20bec3, 0x2143c3, - 0x24ce04, - 0x23d6c2, - 0x250f43, - 0x2579c3, - 0x279003, - 0x260d83, - 0x345903, - 0x250cc3, - 0x2e87c3, - 0x2037c3, - 0x2023c3, - 0x249283, - 0x35d7c3, - 0x300b83, - 0x387883, - 0x200983, - 0x232003, - 0x220ec3, - 0x21e782, - 0x28a503, - 0x24c083, - 0x16020c3, - 0x255bc3, - 0x232943, - 0x212c03, - 0x204703, - 0x20b103, - 0x21d603, - 0x23b303, - 0x2f8183, - 0x2e9b03, - 0x303b85, - 0x2298c3, - 0x2e9b43, - 0x2eb283, - 0x2133c4, - 0x25a903, - 0x32be83, - 0x277083, - 0x232dc3, - 0x212dc2, - 0x239cc3, - 0x2fb8c4, - 0x238bc4, - 0x24cd43, - 0x77a48, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x24c083, - 0x204703, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x24c083, - 0x204703, - 0x206a82, - 0x204703, - 0xb62bf83, - 0x250cc3, - 0x220ec3, - 0x20dd02, - 0x77a48, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x24c083, - 0x204703, - 0x6c2, - 0x201142, - 0x223342, - 0x77a48, - 0x6a82, - 0x2338c2, - 0x207082, - 0x23eac2, - 0x202402, - 0x209142, - 0x62145, - 0x202702, - 0x2018c2, - 0x207902, + 0x24a5c3, + 0x328085, + 0x209d43, + 0x2fa383, + 0xb21f903, + 0x365f03, + 0x20d543, + 0x227f83, + 0x209703, + 0x228502, + 0x27d2c3, + 0x205503, + 0x1604e83, + 0x224a43, + 0x209a43, + 0x204a03, + 0x200983, + 0x35fe83, + 0x20f943, + 0x201303, + 0x2efe83, + 0x2ff903, + 0x2f2603, + 0x204405, + 0x23e743, + 0x285346, + 0x2f2643, + 0x36cf43, + 0x3759c4, + 0x2d9083, + 0x2284c3, + 0x267ec3, + 0x233183, + 0x212982, + 0x22d543, + 0x3024c3, + 0x304144, + 0x377404, + 0x20ce83, + 0x16d208, + 0x205702, + 0x200242, + 0x2022c2, 0x201702, - 0x200ac2, - 0x386d02, - 0x203a02, - 0x227d82, - 0x117c0d, - 0xed209, - 0x4a30b, - 0xd1d48, - 0x472c9, - 0x250cc3, - 0x77a48, - 0x77a48, - 0x52946, - 0x200882, - 0x223504, - 0x206a82, - 0x22bf83, - 0x200e42, - 0x231b03, - 0x20f582, - 0x2c8144, - 0x202243, - 0x20ad82, - 0x24c083, - 0x200fc2, - 0x204703, - 0x25b1c6, - 0x30e7cf, - 0x701683, - 0x77a48, - 0x206a82, - 0x20f583, - 0x250cc3, - 0x220ec3, - 0x1479b8b, - 0x206a82, - 0x22bf83, - 0x250cc3, - 0x24c083, - 0x200882, - 0x206b82, - 0x20a882, - 0xea2bf83, - 0x23e902, - 0x231b03, - 0x249242, - 0x225902, - 0x250cc3, - 0x224242, - 0x24b342, - 0x24cd02, - 0x204242, - 0x28cf02, - 0x205302, - 0x200902, - 0x210442, - 0x20b882, - 0x217e82, - 0x2ad442, - 0x23ba82, - 0x312042, - 0x24dcc2, - 0x220ec3, - 0x202ac2, - 0x24c083, - 0x243482, - 0x273342, - 0x204703, - 0x249d02, - 0x203682, - 0x24ecc2, - 0x201f82, - 0x2198c2, - 0x2d3382, - 0x21bf02, - 0x244302, - 0x222742, - 0x30464a, - 0x349bca, - 0x37f58a, - 0x3b28c2, - 0x20b642, - 0x245d02, - 0xeeaef89, - 0xf26050a, - 0xf42e107, - 0xbac2, - 0x6050a, - 0x247204, - 0xfe2bf83, - 0x231b03, - 0x248fc4, - 0x250cc3, - 0x2023c4, - 0x202243, - 0x220ec3, - 0x24c083, - 0x2020c3, - 0x204703, - 0x2298c3, - 0x2232c3, - 0x77a48, - 0x1460ec4, - 0x60745, - 0x5f68a, - 0x10a642, - 0x17e606, - 0x106aef89, - 0x142d47, - 0x1e02, - 0x1ab7ca, - 0xda987, - 0x77a48, - 0xfff08, - 0xd8c7, - 0x1181d10b, - 0x3482, - 0x1a0947, - 0xdc0a, - 0x19f20f, - 0x124b4f, - 0x1eb82, - 0x6a82, - 0xa22c8, - 0xec94a, - 0x143f48, - 0x1582, - 0x13564b, - 0x16fcc8, - 0x7f087, - 0xdaa8a, - 0x58a4b, - 0x172cc9, - 0x16fbc7, - 0xf564c, - 0xb587, - 0xd0b0a, - 0x14bcc8, - 0xf20ce, - 0x5360e, - 0xda7cb, - 0x17664b, - 0xecf4b, - 0x1c109, - 0x1df4b, - 0x22d8d, - 0x24b0b, - 0x277cd, - 0x2b70d, - 0x12c9ca, - 0x38a0b, - 0x5910b, - 0x67505, - 0x10b510, - 0x14338f, - 0xe37cf, - 0x1e34d, - 0x76650, - 0x8b02, - 0x11f24008, - 0x142a08, - 0x122e4205, - 0x47d0b, - 0x4f508, - 0x17680a, - 0x58189, - 0x625c7, - 0x62907, - 0x62ac7, - 0x656c7, - 0x660c7, - 0x663c7, - 0x67807, - 0x68687, - 0x69007, - 0x691c7, - 0x6a487, - 0x6a647, - 0x6a807, - 0x6a9c7, - 0x6acc7, - 0x6b347, - 0x6c307, - 0x6c8c7, - 0x6d087, - 0x6d807, - 0x6d9c7, - 0x6ddc7, - 0x6e307, - 0x6e507, - 0x6e7c7, - 0x6e987, - 0x6eb47, - 0x6f087, - 0x6fa87, - 0x70547, - 0x72d47, - 0x73007, - 0x73647, - 0x73807, - 0x73b87, - 0x749c7, - 0x74c47, - 0x75047, - 0x758c7, - 0x75a87, - 0x75ec7, - 0x76c07, - 0x76f07, - 0x77147, - 0x77307, - 0x77687, - 0x78007, - 0xd502, - 0x44c4a, - 0xf8407, - 0x124c8a0b, - 0x14c8a16, - 0x1bb11, - 0xdf0ca, - 0xa214a, - 0x52946, - 0x18e90b, - 0x10702, - 0x184551, - 0x99ac9, - 0x92dc9, - 0x10442, - 0x9ec8a, - 0xa3849, - 0xa3f4f, - 0xa48ce, - 0xa5408, - 0x134c2, - 0x799c9, - 0x1779ce, - 0xac08c, - 0xd438f, - 0x194a8e, - 0x1378c, - 0x18549, - 0x19451, - 0x1ae88, - 0x13ab12, - 0x12bb8d, - 0x2f10d, - 0x398cb, - 0x43755, - 0x44b09, - 0x4540a, - 0x57b49, - 0x5bb90, - 0x6a1cb, - 0x7be8f, - 0x7ce4b, - 0x8048c, - 0x80e50, - 0x85a0a, - 0x8a3cd, - 0x13f80e, - 0x14aa0a, - 0x8f30c, - 0x97854, - 0x99751, - 0x9cc0b, - 0x9de8f, - 0xab18d, - 0xaba0e, - 0xdcc4c, - 0x15eacc, - 0xdc94b, - 0xe8a4e, - 0xeb550, - 0x12e34b, - 0x16a70d, - 0xb48cf, - 0xb83cc, - 0xb978e, - 0xb9f91, - 0xbbd0c, - 0x119e47, - 0xc174d, - 0xc60cc, - 0xd5c50, - 0xe608d, - 0xfc987, - 0xeecd0, - 0xf3d88, - 0xf494b, - 0x16f84f, - 0x15bd88, - 0xdf2cd, - 0x173dd0, - 0xafec3, - 0xac82, - 0x2bb09, - 0x5340a, - 0xfb906, - 0x128de7c9, - 0x11e03, - 0x10ad11, - 0xccf47, - 0xd36d0, - 0xd3b8c, - 0xd4d85, - 0x1189c8, - 0x19c9ca, - 0x1976c7, - 0x1042, - 0x6184a, - 0xe3b09, - 0x34aca, - 0x19ef8f, - 0x4160b, - 0x1283cc, - 0x128692, - 0xadd85, - 0x161b4a, - 0x12ee1545, - 0x1132c3, - 0x186d02, - 0xe9e4a, - 0xcfc88, - 0x124ac7, - 0x34c2, - 0xed42, - 0x2942, - 0x1a7a10, - 0x4042, - 0x2e9cf, - 0x72606, - 0x176c8e, - 0xd7c0b, - 0x14ac08, - 0xc9d49, - 0x17cbd2, - 0x404d, - 0x496c8, - 0x4a1c9, - 0x4c40d, - 0x4e7c9, - 0x52a8b, - 0x55d88, - 0x5f4c8, - 0x67f88, - 0x68209, - 0x6840a, - 0x6898c, - 0xea08a, - 0xf81c7, - 0x1684d, - 0xed84b, - 0x7a1cc, - 0x65910, - 0x1bc2, - 0xd65cd, - 0x3f02, - 0x7942, - 0xf810a, - 0xdefca, - 0xe79cb, - 0x592cc, - 0xffc8e, - 0x199fcd, - 0xf2f88, - 0x6c2, - 0x10b6778e, - 0x10c2e107, - 0x111ab089, - 0x129c3, - 0x1171b7cc, - 0xbac2, - 0x146151, - 0x1676d1, - 0x176fd1, - 0x131111, - 0x11b70f, - 0x11fdcc, - 0x124f4d, - 0x15c8cd, - 0x16da95, - 0xbacc, - 0x50b50, - 0x1091cc, - 0x10f6cc, - 0x4f2c9, - 0xbac2, - 0x14620e, - 0x16778e, - 0x17708e, - 0x1311ce, - 0x11b7cc, - 0x11fe89, - 0xbb89, - 0x50c0d, - 0x109289, - 0x10f789, - 0x158543, - 0x1892c3, - 0xbac2, - 0xd2d05, - 0x1ab7c4, - 0x135a04, - 0x181444, - 0x17e004, - 0x17a784, - 0x142d44, - 0x1424703, - 0x1416703, - 0xf2b84, - 0x8b02, - 0x199fc3, - 0x200882, - 0x206a82, - 0x200e42, - 0x209342, - 0x20f582, - 0x200fc2, - 0x202942, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x2023c3, - 0x24c083, - 0x204703, - 0x77a48, - 0x22bf83, - 0x231b03, - 0x24c083, - 0x204703, - 0x39f83, - 0x250cc3, - 0x200882, - 0x327883, - 0x14a2bf83, - 0x380d87, - 0x250cc3, - 0x39a883, - 0x211004, - 0x24c083, - 0x204703, - 0x24e9ca, - 0x25b1c5, - 0x21d603, - 0x2012c2, - 0x77a48, - 0x77a48, - 0x6a82, - 0x110842, - 0x1a0a85, - 0x77a48, - 0x2bf83, - 0xf2447, - 0xcd44f, - 0xfb984, - 0x172e4a, - 0xabcc7, - 0x18908a, - 0x18ed8a, - 0xfb906, - 0x8a4d, - 0x127883, - 0x77a48, - 0x6a82, - 0x48fc4, - 0x86d83, - 0xebf05, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x24c083, - 0x204703, - 0x201e03, - 0x22bf83, - 0x231b03, - 0x20f583, - 0x250cc3, - 0x220ec3, - 0x24c083, - 0x204703, - 0x292f83, - 0x2232c3, - 0x201e03, - 0x223504, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x24c083, - 0x204703, - 0x22a543, - 0x22bf83, - 0x231b03, - 0x210c43, - 0x20f583, - 0x250cc3, - 0x2023c4, - 0x265603, - 0x232003, - 0x220ec3, - 0x24c083, - 0x204703, - 0x21d603, - 0x200a43, - 0x16e2bf83, - 0x231b03, - 0x245e83, - 0x250cc3, - 0x2805c3, - 0x232003, - 0x204703, - 0x208583, - 0x325ec4, - 0x77a48, - 0x1762bf83, - 0x231b03, - 0x2a54c3, - 0x250cc3, - 0x220ec3, - 0x211004, - 0x24c083, - 0x204703, - 0x220303, - 0x77a48, - 0x17e2bf83, - 0x231b03, - 0x20f583, - 0x2020c3, - 0x204703, - 0x77a48, - 0x142e107, - 0x327883, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x2023c4, - 0x211004, - 0x24c083, - 0x204703, - 0x175d04, - 0x340dc5, - 0x77a48, - 0x742, - 0x33303, - 0x2cf588, - 0x23ca87, - 0x223504, - 0x366b06, - 0x36d946, - 0x77a48, - 0x23bd43, - 0x2e31c9, - 0x2b3f55, - 0xb3f5f, - 0x22bf83, - 0x334fd2, - 0x1011c6, - 0x13b685, - 0x17680a, - 0x58189, - 0x334d8f, - 0x2c8144, - 0x23c485, - 0x35d590, - 0x324507, - 0x2020c3, - 0x255bc8, - 0x2d2d8a, - 0x241204, - 0x2e0f83, - 0x25b1c6, - 0x2012c2, - 0x387d0b, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x220ec3, - 0x24c083, - 0x204703, - 0x2e8283, - 0x206a82, - 0x24c083, - 0x204703, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x220ec3, - 0x204703, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x39a883, - 0x206f83, - 0x204703, - 0x206a82, - 0x22bf83, - 0x231b03, - 0x24c083, - 0x204703, - 0x200882, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x24c083, - 0x204703, - 0x223504, - 0x22bf83, - 0x231b03, - 0x30db04, - 0x24c083, - 0x204703, - 0x77a48, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x24c083, - 0x204703, - 0x22bf83, - 0x231b03, - 0x20f583, - 0x2037c3, - 0x220ec3, - 0x24c083, - 0x204703, - 0x206a82, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x24c083, - 0x204703, - 0x77a48, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x2614c3, - 0x6ab03, - 0x19a883, - 0x24c083, - 0x204703, - 0x30464a, - 0x31c409, - 0x33e2cb, - 0x33e94a, - 0x349bca, - 0x356a0b, - 0x36f44a, - 0x37954a, - 0x37f58a, - 0x37f80b, - 0x39c709, - 0x39e40a, - 0x39e94b, - 0x3aab8b, - 0x3b0d4a, - 0x22bf83, - 0x231b03, - 0x20f583, - 0x220ec3, - 0x24c083, - 0x204703, - 0x77a48, - 0x22bf83, - 0x2625c4, - 0x219242, - 0x211004, - 0x278b45, - 0x201e03, - 0x223504, - 0x22bf83, - 0x234a44, - 0x231b03, - 0x248fc4, - 0x2c8144, - 0x2023c4, - 0x232003, - 0x24c083, - 0x204703, - 0x297985, - 0x22a543, - 0x21d603, - 0x25ad03, - 0x2512c4, - 0x260e04, - 0x279645, - 0x77a48, - 0x2fa644, - 0x221c46, - 0x287e44, - 0x206a82, - 0x364707, - 0x24b847, - 0x246a04, - 0x2569c5, - 0x2e6285, - 0x2a8e05, - 0x2023c4, - 0x316208, - 0x2031c6, - 0x2e6ec8, - 0x23e385, - 0x2cff85, - 0x214984, - 0x204703, - 0x2e1c44, - 0x355bc6, - 0x25b2c3, - 0x2512c4, - 0x269bc5, - 0x233504, - 0x399e44, - 0x2012c2, - 0x24ec06, - 0x392506, - 0x2f6c05, - 0x200882, - 0x327883, - 0x1d606a82, - 0x233004, - 0x20f582, - 0x220ec3, - 0x20b602, - 0x24c083, - 0x200fc2, - 0x20b803, - 0x2232c3, - 0x77a48, - 0x77a48, - 0x250cc3, - 0x200882, - 0x1e206a82, - 0x250cc3, - 0x26a783, - 0x265603, - 0x31d184, - 0x24c083, - 0x204703, - 0x77a48, - 0x200882, - 0x1ea06a82, - 0x22bf83, - 0x24c083, - 0x204703, - 0x20f042, - 0x212dc2, - 0x39a883, - 0x2d9f83, - 0x200882, - 0x77a48, - 0x206a82, - 0x231b03, - 0x248fc4, - 0x209d03, - 0x250cc3, - 0x2037c3, - 0x220ec3, - 0x24c083, - 0x21a883, - 0x204703, - 0x220283, - 0x125513, - 0x134914, - 0x145c6, - 0x1c106, - 0x514c7, - 0x7a709, - 0x141c0a, - 0x896cd, - 0x11790c, - 0x17ef0a, - 0x62145, - 0x16d408, - 0x72606, - 0x122706, - 0x208b02, - 0x1ab987, - 0x22bf83, - 0xd0a85, - 0x1bb06, - 0x8d1ca, - 0xacf83, - 0x7a6c5, - 0xd003, - 0x18e9cc, - 0x1ade48, - 0x13f348, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x220ec3, - 0x24c083, - 0x204703, - 0x200882, - 0x206a82, - 0x250cc3, - 0x20b542, - 0x24c083, - 0x204703, - 0x20b803, - 0x362fcf, - 0x36338e, - 0x77a48, - 0x22bf83, - 0x43f87, - 0x231b03, - 0x250cc3, - 0x202243, - 0x24c083, - 0x204703, - 0x21fc03, - 0x264fc7, - 0x201c42, - 0x291d49, - 0x200ec2, - 0x3a7d0b, - 0x28b2ca, - 0x28cc09, - 0x200d82, - 0x261046, - 0x254b15, - 0x3a7e55, - 0x257393, - 0x3a83d3, - 0x204a42, - 0x20c905, - 0x32178c, - 0x21b24b, - 0x2543c5, - 0x20b0c2, - 0x287302, - 0x3747c6, - 0x201e02, - 0x25f986, - 0x34be4d, - 0x36fe4c, - 0x30b584, - 0x2009c2, - 0x209ec2, - 0x33aec8, - 0x204a02, - 0x32f986, - 0x2d2944, - 0x254cd5, - 0x257513, - 0x212903, - 0x34b6ca, - 0x35af47, - 0x2e7c09, - 0x229087, - 0x305b42, - 0x200002, - 0x200006, - 0x206e82, - 0x77a48, - 0x212742, - 0x212bc2, - 0x3994c7, - 0x35bac7, - 0x21f085, - 0x203482, - 0x220247, - 0x220408, - 0x23a242, - 0x2715c2, - 0x22ce42, - 0x201482, - 0x300cc8, - 0x21a903, - 0x286f48, - 0x2c77cd, - 0x214683, - 0x2e3f48, - 0x23218f, - 0x23254e, - 0x22338a, - 0x299e51, - 0x29a2d0, - 0x2b2d0d, - 0x2b304c, - 0x20e607, - 0x34b847, - 0x366bc9, - 0x245bc2, - 0x2004c2, - 0x252ecc, - 0x2531cb, - 0x2008c2, - 0x2dcb06, - 0x2092c2, - 0x2036c2, - 0x21eb82, - 0x206a82, - 0x3929c4, - 0x23a547, - 0x208942, - 0x23fac7, - 0x241007, - 0x217442, - 0x20e542, - 0x243e45, - 0x200682, - 0x26794e, - 0x27d94d, - 0x231b03, - 0x377f8e, - 0x2dc3cd, - 0x229343, - 0x203982, - 0x209f44, - 0x245b82, - 0x201502, - 0x34a4c5, - 0x351ac7, - 0x36ed02, - 0x209342, - 0x248847, - 0x24d248, - 0x23d6c2, - 0x2ade06, - 0x252d4c, - 0x25308b, - 0x20db02, - 0x25c18f, - 0x25c550, - 0x25c94f, - 0x25cd15, - 0x25d254, - 0x25d74e, - 0x25dace, - 0x25de4f, - 0x25e20e, - 0x25e594, - 0x25ea93, - 0x25ef4d, - 0x2781c9, - 0x28a283, + 0x202a42, + 0x206c02, + 0x245482, 0x2007c2, - 0x31a605, - 0x209d06, - 0x20f582, - 0x270387, - 0x250cc3, - 0x210702, - 0x235e48, - 0x29a091, - 0x29a4d0, - 0x200942, - 0x21e747, - 0x203fc2, - 0x2cec87, - 0x20ac82, - 0x2cf389, - 0x374787, - 0x34aec8, - 0x2261c6, - 0x2d9e83, - 0x322945, - 0x231d82, - 0x200402, - 0x200405, - 0x22aa05, - 0x200f02, - 0x233583, - 0x233587, - 0x200f07, + 0x20d882, + 0x200e82, + 0x20b102, + 0x20e602, + 0x2675c2, + 0x2056c2, + 0x2decc2, 0x2013c2, - 0x301344, - 0x2025c3, - 0x2bfb89, - 0x2da648, - 0x217382, - 0x203702, - 0x222047, - 0x224605, - 0x2a4248, - 0x20c5c7, - 0x201dc3, - 0x2a1b86, - 0x2b2b8d, - 0x2b2f0c, - 0x27e146, - 0x207082, - 0x206a42, - 0x20f842, - 0x23200f, - 0x23240e, - 0x2e6307, - 0x200342, - 0x30a2c5, - 0x30a2c6, - 0x250702, - 0x202ac2, - 0x215346, - 0x291f83, - 0x2cebc6, - 0x2c1105, - 0x2c110d, - 0x2c1c55, - 0x2c240c, - 0x2c2c0d, - 0x2c32d2, - 0x203642, - 0x208b82, - 0x201842, - 0x2e4f06, - 0x2abf46, - 0x201042, - 0x209d86, - 0x207902, - 0x223d85, - 0x203202, - 0x267a89, - 0x27074c, - 0x270a8b, - 0x200fc2, - 0x24d688, - 0x20cb42, - 0x209242, - 0x21b006, - 0x3683c5, - 0x21c307, - 0x253b45, - 0x299cc5, - 0x244002, - 0x322882, - 0x200ac2, - 0x27c187, - 0x2d0e4d, - 0x2d11cc, - 0x275587, - 0x20b142, - 0x224742, - 0x242988, - 0x22bc88, - 0x2d57c8, - 0x2df284, - 0x2e8cc7, - 0x2db883, - 0x2aff02, - 0x20d102, - 0x2dfa09, - 0x3a4507, - 0x208582, - 0x273cc5, - 0x242242, - 0x22e1c2, - 0x27b6c3, - 0x27b6c6, - 0x2e8282, - 0x2e98c2, - 0x200d42, - 0x30c286, - 0x209e87, - 0x201442, - 0x203942, - 0x286d8f, - 0x377dcd, - 0x35914e, - 0x2dc24c, - 0x204742, - 0x205fc2, - 0x226005, - 0x3b1146, - 0x214442, - 0x201002, - 0x2034c2, - 0x20c544, - 0x2c7644, - 0x338546, - 0x202942, - 0x27c9c7, - 0x224d83, - 0x226708, - 0x228048, - 0x32ba07, - 0x22ed46, - 0x202742, - 0x238f03, - 0x23ce07, - 0x26e186, - 0x2e4e45, - 0x3497c8, + 0x2069c2, + 0x201302, + 0x2172c2, + 0x202482, + 0x200482, + 0x219382, + 0x202782, + 0x209842, + 0x2027c2, + 0x222702, + 0x203b42, + 0x5702, + 0x242, + 0x22c2, + 0x1702, + 0x2a42, + 0x6c02, + 0x45482, + 0x7c2, + 0xd882, + 0xe82, + 0xb102, + 0xe602, + 0x675c2, + 0x56c2, + 0xdecc2, + 0x13c2, + 0x69c2, + 0x1302, + 0x172c2, + 0x2482, + 0x482, + 0x19382, + 0x2782, + 0x9842, + 0x27c2, + 0x22702, + 0x3b42, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x2099c2, + 0x200983, + 0xc6a84c3, + 0x2e9dc3, + 0x209703, + 0x21a2c2, + 0x16d208, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x7b02, + 0x201bc2, + 0x153f3c5, + 0x25ed82, + 0x16d208, + 0x99c2, + 0x20c182, + 0x208d02, + 0x2024c2, 0x209642, - 0x321e87, - 0x20b702, - 0x2eab42, - 0x204002, - 0x2df7c9, - 0x200242, - 0x200a02, - 0x275803, - 0x3a0007, - 0x201f02, - 0x2708cc, - 0x270bcb, - 0x27e1c6, - 0x20cf45, - 0x21c642, + 0x208442, + 0x192345, + 0x2038c2, + 0x203ec2, + 0x2023c2, + 0x204dc2, + 0x2013c2, + 0x385502, + 0x201102, + 0x236582, + 0x16fc07, + 0x1b270d, + 0xd8c89, + 0x56e8b, + 0xdd608, + 0x53dc9, + 0xfacc6, + 0x2e9dc3, + 0x16d208, + 0x16d208, + 0x52e06, + 0x1a78c7, 0x205702, - 0x2b2886, - 0x26ba43, - 0x357a07, - 0x249842, - 0x205a02, - 0x254995, - 0x3a8015, - 0x257253, - 0x3a8553, - 0x269cc7, - 0x277c08, - 0x277c10, - 0x278c4f, - 0x28b093, - 0x28c9d2, - 0x291910, - 0x2a248f, - 0x2a8992, - 0x2fcb11, - 0x2f4bd3, - 0x353a92, - 0x320a0f, - 0x2bb04e, - 0x2c0c92, - 0x2c8f51, - 0x2cb48f, - 0x2cc20e, - 0x2cd9d1, - 0x2fbb90, - 0x2db252, - 0x2df5d1, - 0x2e5346, - 0x2e6b07, - 0x2f9347, - 0x202c42, - 0x281b85, - 0x3471c7, - 0x212dc2, - 0x206d02, - 0x229585, - 0x2212c3, - 0x2798c6, - 0x2d100d, - 0x2d134c, - 0x201242, - 0x32160b, - 0x21b10a, - 0x2eae8a, - 0x2b1649, - 0x2dde0b, - 0x20c70d, - 0x362b4c, - 0x224f0a, - 0x22a18c, - 0x24470b, - 0x33bc8c, - 0x25424b, - 0x2706c3, - 0x277806, - 0x2ce482, - 0x2ea342, - 0x221f83, - 0x204102, - 0x204c03, - 0x2562c6, - 0x25cec7, - 0x26d686, - 0x2ecb08, - 0x22b988, - 0x2f3906, - 0x20c402, - 0x2f65cd, - 0x2f690c, - 0x2c8207, - 0x2fa507, - 0x214602, - 0x2342c2, - 0x23cd82, - 0x266dc2, - 0x206a82, - 0x24c083, - 0x204703, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x220ec3, - 0x211004, - 0x24c083, - 0x204703, - 0x20b803, - 0x200882, - 0x200702, - 0x21a8f085, - 0x21e07f05, - 0x2230bf46, - 0x77a48, - 0x226ae4c5, - 0x206a82, + 0x25ef44, + 0x2099c2, + 0x2a84c3, + 0x2006c2, + 0x232403, + 0x20d882, + 0x2d5f04, + 0x244183, + 0x249a02, + 0x205503, + 0x200442, + 0x200983, + 0x264a86, + 0x31ba0f, + 0x70a403, + 0x16d208, + 0x2099c2, + 0x2163c3, + 0x2e9dc3, + 0x209703, + 0x1526f4b, + 0xd9888, + 0x142b68a, + 0x14fa807, + 0xda405, + 0x16fc07, + 0x2099c2, + 0x2a84c3, + 0x2e9dc3, + 0x205503, + 0x205702, + 0x20c202, + 0x20bb42, + 0xfea84c3, + 0x23c042, + 0x232403, + 0x209d02, + 0x221402, + 0x2e9dc3, + 0x228782, + 0x251442, + 0x2a6c82, + 0x200f82, + 0x28d742, + 0x203442, + 0x202e42, + 0x267e42, + 0x24ecc2, + 0x211ec2, + 0x2ad882, + 0x2eab02, + 0x2182c2, + 0x2ad342, + 0x209703, + 0x20ec42, + 0x205503, 0x200e42, - 0x22b41485, - 0x22e80385, - 0x23281247, - 0x23615009, - 0x23a58804, - 0x20f582, - 0x210702, - 0x23f6ed45, - 0x24291589, - 0x24772b48, - 0x24aac5c5, - 0x24ebf307, - 0x2521f7c8, - 0x256d8045, - 0x25a01c06, - 0x25e71249, - 0x263ac248, - 0x266b9dc8, - 0x26a981ca, - 0x26e4a044, - 0x272c96c5, - 0x276b5708, - 0x27a6b485, - 0x21a982, - 0x27e00343, - 0x282a14c6, - 0x28645248, - 0x28a067c6, - 0x28ece688, - 0x29321ac6, - 0x29731b84, - 0x202b42, - 0x29a3bf07, - 0x29ea6f84, - 0x2a27bc47, - 0x2a713c87, - 0x200fc2, - 0x2aa98f45, - 0x2af24244, - 0x2b2fc647, - 0x2b623747, - 0x2ba85546, - 0x2be5b885, - 0x2c293d47, - 0x2c6d5388, - 0x2cb1a987, - 0x2cf6ab09, - 0x2d38e705, - 0x2d75e547, - 0x2da8e7c6, - 0x2de61248, - 0x33a60d, - 0x244189, - 0x2e430b, - 0x24b38b, - 0x275c0b, - 0x2a3a4b, - 0x30220b, - 0x3024cb, - 0x302d49, - 0x3048cb, - 0x304b8b, - 0x3053cb, - 0x305eca, - 0x30640a, - 0x306a0c, - 0x309b0b, - 0x30a44a, - 0x3199ca, - 0x32aa8e, - 0x32dace, - 0x32de4a, - 0x33188a, - 0x3323cb, - 0x33268b, - 0x33320b, - 0x34e64b, - 0x34ec4a, - 0x34f90b, - 0x34fbca, - 0x34fe4a, - 0x3500ca, - 0x37110b, - 0x37a88b, - 0x37c1ce, - 0x37c54b, - 0x38618b, - 0x3874cb, - 0x38ad4a, - 0x38afc9, - 0x38b20a, - 0x38c88a, - 0x39d0cb, - 0x39ec0b, - 0x39fa8a, - 0x3a120b, - 0x3a728b, - 0x3b078b, - 0x2e283308, - 0x2e688e49, - 0x2eb61e09, - 0x2eed0348, - 0x3382c5, - 0x207503, - 0x202d44, - 0x3996c5, - 0x258546, - 0x266545, - 0x288644, - 0x270288, - 0x218d45, - 0x290444, - 0x205c87, - 0x29c18a, - 0x3411ca, - 0x387307, - 0x2078c7, - 0x2f47c7, - 0x327dc7, - 0x2b6085, - 0x323d06, - 0x33f687, - 0x26dcc4, - 0x320686, - 0x3a6bc6, - 0x2017c5, - 0x24fa04, - 0x2c0406, - 0x29b587, - 0x32c646, - 0x305107, - 0x27f203, - 0x24e386, - 0x230705, - 0x281347, - 0x2bda0a, - 0x235f44, - 0x21b888, - 0x2eb949, - 0x2d1b07, - 0x330c06, - 0x327b88, - 0x314009, - 0x2396c4, - 0x3619c4, - 0x2fb3c5, - 0x210f08, - 0x2be207, - 0x2a9209, - 0x231548, - 0x2feb46, - 0x243586, - 0x2965c8, - 0x3739c6, - 0x207f05, - 0x285606, - 0x27c348, - 0x231f06, - 0x251f0b, - 0x2343c6, - 0x297d4d, - 0x3a26c5, - 0x2a6e46, - 0x2065c5, - 0x29ae89, - 0x32a347, - 0x3825c8, - 0x2d5046, - 0x296cc9, - 0x3a0486, - 0x2bd985, - 0x237446, - 0x2a8406, - 0x2c47c9, - 0x2394c6, - 0x248547, - 0x2d9045, - 0x203203, - 0x252085, - 0x298007, - 0x327706, - 0x3a25c9, - 0x30bf46, - 0x285846, - 0x205149, - 0x285009, - 0x29f347, - 0x322b08, - 0x28dc89, - 0x281808, - 0x31ce86, - 0x2cc985, - 0x30b24a, - 0x2858c6, - 0x380c06, - 0x2a1685, - 0x3843c8, - 0x2109c7, - 0x22f98a, - 0x249406, - 0x2f9a85, - 0x330e86, - 0x263ec7, - 0x330ac7, - 0x2ef245, - 0x2bdb45, - 0x29f6c6, - 0x2ad006, - 0x383a06, - 0x333b84, - 0x2840c9, - 0x289d06, - 0x35104a, - 0x21a588, - 0x35e248, - 0x3411ca, - 0x3a3505, - 0x29b4c5, - 0x385b88, - 0x2c9448, - 0x36d747, - 0x211206, - 0x312e08, - 0x2e4947, - 0x2837c8, - 0x36a5c6, - 0x286148, - 0x2b3406, - 0x23e507, - 0x297706, - 0x2c0406, - 0x233bca, - 0x392a46, - 0x2cc989, - 0x2ae7c6, - 0x2d224a, - 0x331b89, - 0x2f3a06, - 0x37ac04, - 0x31a6cd, - 0x2890c7, - 0x3157c6, - 0x2b9c85, - 0x3a0505, - 0x31abc6, - 0x274209, - 0x2b1c47, - 0x27d406, - 0x2cd2c6, - 0x2886c9, - 0x2bf4c4, - 0x22c784, - 0x2073c8, - 0x256686, - 0x273d88, - 0x2373c8, - 0x282fc7, - 0x200849, - 0x383c07, - 0x2ae38a, - 0x236d8f, - 0x2463ca, - 0x225e05, - 0x27c585, - 0x21ac05, - 0x2d2887, - 0x20e243, - 0x322d08, - 0x2f7206, - 0x2f7309, - 0x2b0106, - 0x2c3107, - 0x296a89, - 0x3824c8, - 0x2a1747, - 0x3015c3, - 0x338345, - 0x20e1c5, - 0x3339cb, - 0x26b544, + 0x281702, + 0x200983, + 0x25d202, + 0x209842, + 0x218942, + 0x202e02, + 0x200c42, + 0x2de402, + 0x20fe82, + 0x250b82, + 0x220642, + 0x30d44a, + 0x34d94a, + 0x37fc4a, + 0x3bbec2, + 0x202cc2, + 0x2058c2, + 0x1026e389, + 0x1072510a, + 0x1594ac7, + 0x1410843, + 0x24d50, + 0x50642, + 0x2030c4, + 0x10ea84c3, + 0x232403, + 0x249944, + 0x2e9dc3, + 0x3b1384, + 0x244183, + 0x209703, + 0x205503, + 0xdc105, + 0x204e83, + 0x200983, + 0x23e743, + 0x25ed03, + 0x16d208, + 0x1591084, + 0x18ff45, + 0x1a768a, + 0x116902, + 0x18ae46, + 0xaf551, + 0x1166e389, + 0x18ffc8, + 0x13f9c8, + 0xff387, + 0xec2, + 0x12984b, + 0x1a5b0a, + 0x21347, + 0x16d208, + 0x108f08, + 0xe4c7, + 0x17818f4b, + 0x1b887, + 0x1c02, + 0x6c707, + 0x1a1ca, + 0x13f6cf, + 0x988f, + 0x1b102, + 0x99c2, + 0xa2648, + 0x19e30a, + 0x1320c8, + 0xdc2, + 0x13f44f, + 0x9e18b, + 0x68bc8, + 0x38f47, + 0x388a, + 0x304cb, + 0x4efc9, + 0x11dd07, + 0xfc34c, + 0x2c07, + 0x19b40a, + 0xd4ac8, + 0x1a3cce, + 0x1cdce, + 0x2118b, + 0x26ccb, + 0x27d4b, + 0x2c009, + 0x2da0b, + 0x5e7cd, + 0x85acb, + 0xdfc8d, + 0xe000d, + 0xe164a, + 0x17724b, + 0x1ae0cb, + 0x31c45, + 0x1424d50, + 0x12618f, + 0x1268cf, + 0xe2c0d, + 0x1b8f90, + 0x2bb82, + 0x17fb0388, + 0x9688, + 0x182ee705, + 0x48fcb, + 0x117090, + 0x4fdc8, + 0x26e8a, + 0x56b49, + 0x5cb47, + 0x5ce87, + 0x5d047, + 0x5f507, + 0x60587, + 0x60b87, + 0x61387, + 0x617c7, + 0x61cc7, + 0x61fc7, + 0x62fc7, + 0x63187, + 0x63347, + 0x63507, + 0x63807, + 0x64007, + 0x64c87, + 0x65407, + 0x66547, + 0x66b07, + 0x66cc7, + 0x67047, + 0x67487, + 0x67687, + 0x67947, + 0x67b07, + 0x67cc7, + 0x67f87, + 0x68247, + 0x68f07, + 0x69607, + 0x698c7, + 0x6a047, + 0x6a207, + 0x6a607, + 0x6aec7, + 0x6b147, + 0x6b547, + 0x6b707, + 0x6b8c7, + 0x70587, + 0x71387, + 0x718c7, + 0x71e47, + 0x72007, + 0x72387, + 0x728c7, + 0xdb42, + 0xbbb0a, + 0xffb87, + 0x184cfa0b, + 0x14cfa16, + 0x17e91, + 0x1082ca, + 0xa24ca, + 0x52e06, + 0xd0f8b, + 0x5e82, + 0x2f711, + 0x157789, + 0x942c9, + 0x67e42, + 0x9f54a, + 0xa4909, + 0xa504f, + 0xa5a8e, + 0xa6388, + 0x17f42, + 0x18ef09, + 0x17f08e, + 0xf80cc, + 0xdf20f, + 0x198f4e, + 0xc84c, + 0x11809, + 0x13491, + 0x222c8, + 0x24512, + 0x281cd, + 0x2e0cd, + 0x8618b, + 0xbadd5, + 0xbb9c9, + 0xe268a, + 0x120689, + 0x160310, + 0x39a0b, + 0x4480f, + 0x5648b, + 0x58a8c, + 0x70f90, + 0x7beca, + 0x7d18d, + 0x80d4e, + 0x86cca, + 0x8720c, + 0x89714, + 0x157411, + 0x1a200b, + 0x9004f, + 0x9320d, + 0x9a00e, + 0x9fd8c, + 0xa1acc, + 0xaae8b, + 0xab18e, + 0xab990, + 0x154c0b, + 0x1160cd, + 0x10e80f, + 0x17e50c, + 0xb090e, + 0xb2391, + 0xb3ecc, + 0xc00c7, + 0xc064d, + 0xc0fcc, + 0xc1dd0, + 0x102c8d, + 0x12bc87, + 0xc7750, + 0xd3748, + 0xd51cb, + 0x12aa8f, + 0x17e248, + 0x1084cd, + 0x14d550, + 0x18ba60c6, + 0xaff43, + 0xbe02, + 0x11e309, + 0x5394a, + 0x104186, + 0x18cd9009, + 0x11d43, + 0xd6191, + 0xd65c9, + 0xd7607, + 0xaf6cb, + 0xde6d0, + 0xdeb8c, + 0xdf6c5, + 0x18f248, + 0x19f94a, + 0x111947, + 0x33c2, + 0x124a4a, + 0x127549, + 0x35b4a, + 0x8a3cf, + 0x3edcb, + 0x12814c, + 0x169b92, + 0xaea45, + 0x166aca, + 0x192ece45, + 0x18020c, + 0x122843, + 0x185502, + 0xf2bca, + 0x14f3fcc, + 0x1b1a48, + 0xdfe48, + 0x16fb87, + 0x1c42, + 0x3082, + 0x3f590, + 0x27c2, + 0x1ad58f, + 0x5d306, + 0x77ece, + 0xe598b, + 0x86ec8, + 0xd1a49, + 0x17d152, + 0x1abecd, + 0x55b08, + 0x56d49, + 0x572cd, + 0x57b89, + 0x5c58b, + 0x5d848, + 0x61ac8, + 0x628c8, + 0x62b49, + 0x62d4a, + 0x6398c, + 0xe3cca, + 0xff947, + 0x2270d, + 0xf4b4b, + 0x11a5cc, + 0x18b050, + 0xc2, + 0x7a14d, + 0x2dc2, + 0x35482, + 0xff88a, + 0x1081ca, + 0x10928b, + 0x1ae28c, + 0x108c8e, + 0x100cd, + 0x1b3908, + 0x7b02, + 0x11b5ec4e, + 0x1227020e, + 0x12a83a0a, + 0x1336864e, + 0x13b143ce, + 0x1432ee0c, + 0x1594ac7, + 0x1594ac9, + 0x1410843, + 0x14b3054c, + 0x15333209, + 0x15b49dc9, + 0x50642, + 0x18fb51, + 0x70151, + 0x8394d, + 0x17acd1, + 0x114311, + 0x12ed4f, + 0x13048f, + 0x13314c, + 0x149d0c, + 0x1a688d, + 0x1bb815, + 0x5064c, + 0x11f0cc, + 0xe9c50, + 0x11d44c, + 0x12a54c, + 0x15e999, + 0x168399, + 0x16fd99, + 0x175d54, + 0x181ad4, + 0x19b7d4, + 0x19d714, + 0x1ac314, + 0x16250709, + 0x1699ba89, + 0x1731f189, + 0x11e224c9, + 0x50642, + 0x126224c9, + 0x50642, + 0x15e98a, + 0x50642, + 0x12e224c9, + 0x50642, + 0x15e98a, + 0x50642, + 0x136224c9, + 0x50642, + 0x13e224c9, + 0x50642, + 0x146224c9, + 0x50642, + 0x15e98a, + 0x50642, + 0x14e224c9, + 0x50642, + 0x15e98a, + 0x50642, + 0x156224c9, + 0x50642, + 0x15e224c9, + 0x50642, + 0x15e98a, + 0x50642, + 0x166224c9, + 0x50642, + 0x16e224c9, + 0x50642, + 0x176224c9, + 0x50642, + 0x15e98a, + 0x50642, + 0xaf545, + 0x1a5b04, + 0x2bb84, + 0x1aa404, + 0x1a75c4, + 0xc484, + 0x13fc4, + 0x58f44, + 0xff384, + 0x14ab3c3, + 0x143e603, + 0xfb244, + 0x1547c03, + 0x2bb82, + 0x100c3, + 0x205702, + 0x2099c2, + 0x2006c2, + 0x218342, + 0x20d882, + 0x200442, + 0x203082, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x24a5c3, + 0x205503, + 0x200983, + 0x16d208, + 0x2a84c3, + 0x232403, + 0x205503, + 0x200983, + 0x3fc3, + 0x2e9dc3, + 0x205702, + 0x38d2c3, + 0x1aea84c3, + 0x3b8e47, + 0x2e9dc3, + 0x206343, + 0x211cc4, + 0x205503, + 0x200983, + 0x255cca, + 0x264a85, + 0x201303, + 0x20b0c2, + 0x16d208, + 0x16d208, + 0x99c2, + 0x11fd02, + 0x6c845, + 0x129845, + 0x16d208, + 0x1b887, + 0xa84c3, + 0x1ba38e47, + 0x13ee06, + 0x1bd49c05, + 0x11de07, + 0x66ca, + 0x3748, + 0x65c7, + 0x56948, + 0x28d87, + 0x2c6cf, + 0x30b87, + 0x3b806, + 0x117090, + 0x12330f, + 0x104204, + 0x1c11dece, + 0xa8b4c, + 0x4f14a, + 0x9a2c7, + 0x112b8a, + 0x18f409, + 0xbf34a, + 0x5414a, + 0x104186, + 0x9a38a, + 0x8350a, + 0xe47c9, + 0xd5a48, + 0xd5d46, + 0xd9a8d, + 0xb3c45, + 0x1a78c7, + 0x5d6c7, + 0xd9394, + 0xf938b, + 0x68a0a, + 0xa2d0d, + 0x1cdc3, + 0x1cdc3, + 0x1cdc6, + 0x1cdc3, + 0x18d2c3, + 0x16d208, + 0x99c2, + 0x49944, + 0x887c3, + 0x173605, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x2030c3, + 0x2a84c3, + 0x232403, + 0x2163c3, + 0x2e9dc3, + 0x209703, + 0x205503, + 0x200983, + 0x294483, + 0x25ed03, + 0x2030c3, + 0x25ef44, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x2082c3, + 0x2a84c3, + 0x232403, + 0x218343, + 0x2163c3, + 0x2e9dc3, + 0x3b1384, + 0x353903, + 0x227f83, + 0x209703, + 0x205503, + 0x200983, + 0x201303, + 0x311dc3, + 0x1dea84c3, + 0x232403, + 0x246383, + 0x2e9dc3, + 0x20a203, + 0x227f83, + 0x200983, + 0x2072c3, + 0x33bac4, + 0x16d208, + 0x1e6a84c3, + 0x232403, + 0x2a6443, + 0x2e9dc3, + 0x209703, + 0x211cc4, + 0x205503, + 0x200983, + 0x21db03, + 0x16d208, + 0x1eea84c3, + 0x232403, + 0x2163c3, + 0x204e83, + 0x200983, + 0x16d208, + 0x1594ac7, + 0x38d2c3, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x3b1384, + 0x211cc4, + 0x205503, + 0x200983, + 0x129845, + 0x16fc07, + 0xd95cb, + 0xd69c4, + 0xb3c45, + 0x1456108, + 0xa6a8d, + 0x20284a05, + 0x18004, + 0x169c3, + 0x186345, + 0x349a05, + 0x16d208, + 0x1cdc2, + 0x336c3, + 0xf1446, + 0x319ec8, + 0x313bc7, + 0x25ef44, + 0x3b2c86, + 0x3bb6c6, + 0x16d208, + 0x30ce43, + 0x33e589, + 0x237295, + 0x3729f, + 0x2a84c3, + 0x31d012, + 0xefac6, + 0x10a045, + 0x26e8a, + 0x56b49, + 0x31cdcf, 0x2d5f04, - 0x27af06, - 0x301947, - 0x397d8a, - 0x246c47, - 0x239747, - 0x280385, - 0x2043c5, - 0x2181c9, - 0x2c0406, - 0x246acd, - 0x359c85, - 0x302803, - 0x2102c3, - 0x30c385, - 0x352545, - 0x327b88, - 0x27de47, - 0x22c506, - 0x29cf86, - 0x229cc5, - 0x231dc7, - 0x207b47, - 0x203087, - 0x2c974a, - 0x24e448, - 0x333b84, - 0x383fc7, - 0x27f347, - 0x332046, - 0x269307, - 0x2b2288, - 0x361d08, - 0x26fd46, - 0x3450c8, - 0x239544, - 0x33f686, - 0x39aa46, - 0x375986, - 0x30cd86, - 0x22ef84, - 0x327e86, - 0x2b8c06, - 0x295b86, - 0x233bc6, - 0x210186, - 0x2aeec6, - 0x22c408, - 0x320508, - 0x2ca248, - 0x266748, - 0x385b06, - 0x213585, - 0x27cb06, - 0x2ac645, - 0x38a187, - 0x231605, - 0x215a43, - 0x207645, - 0x22cd44, - 0x2102c5, - 0x21d8c3, - 0x2fd4c7, - 0x319c88, - 0x3051c6, - 0x2d600d, - 0x27c546, - 0x295045, - 0x2bc283, - 0x2b50c9, - 0x2bf646, - 0x295646, - 0x29ec04, - 0x246347, - 0x233246, - 0x384205, - 0x233b83, - 0x203f04, - 0x27f506, - 0x2b0944, - 0x30eb08, - 0x396f89, - 0x32a849, - 0x29ea0a, - 0x310d8d, - 0x32fe07, - 0x380a86, - 0x2124c4, - 0x215009, - 0x2877c8, - 0x288cc6, - 0x267d06, - 0x269307, - 0x2c2806, - 0x225546, - 0x3a3606, - 0x313d0a, - 0x21f7c8, - 0x33aa05, - 0x282d09, - 0x283cca, - 0x2d6388, - 0x29abc8, - 0x2955c8, - 0x207f4c, - 0x2e8945, - 0x29d208, - 0x30a1c6, - 0x2d5646, - 0x379787, - 0x246b45, - 0x285785, - 0x32a709, - 0x214c87, - 0x2b2745, - 0x229f87, - 0x2102c3, - 0x2beb45, - 0x3aa1c8, - 0x2d4047, - 0x29aa89, - 0x2d9545, - 0x3074c4, - 0x2a0288, - 0x20e747, - 0x2a1908, - 0x34cdc8, - 0x35e9c5, - 0x23c806, - 0x252586, - 0x2e5e89, - 0x313687, - 0x2aca46, - 0x20b247, - 0x215403, - 0x258804, - 0x29c8c5, - 0x2589c4, - 0x360104, - 0x283587, - 0x209507, - 0x22f744, - 0x29a8d0, - 0x326fc7, - 0x2043c5, - 0x2e95cc, - 0x2b6084, - 0x2c6588, - 0x23e409, - 0x302086, - 0x33f488, - 0x240544, - 0x240548, - 0x384946, - 0x32d148, - 0x29c5c6, - 0x2c84cb, - 0x204a85, - 0x2c4308, - 0x21a084, - 0x284c8a, - 0x29aa89, - 0x2e0286, - 0x2d8b48, - 0x257a45, - 0x2fdac4, - 0x2c6486, - 0x202f48, - 0x283308, - 0x349546, - 0x37ff84, - 0x30b1c6, - 0x383c87, - 0x27bb47, - 0x26930f, - 0x208607, - 0x2f3ac7, - 0x2d5505, - 0x2efcc5, - 0x29f009, - 0x272346, - 0x281f05, - 0x285307, - 0x2d8e08, - 0x295c85, - 0x297706, - 0x21a3c8, - 0x2067ca, - 0x215448, - 0x3acd07, - 0x2371c6, - 0x282cc6, - 0x20e003, - 0x20fa43, - 0x283e89, - 0x28db09, - 0x2c6386, - 0x2d9545, - 0x2a7008, - 0x2d8b48, - 0x2ba5c8, - 0x3a368b, - 0x2d6247, - 0x2ff489, - 0x269588, - 0x33c444, - 0x2c48c8, - 0x28c489, - 0x2acd45, - 0x2d2787, - 0x2f7805, - 0x283208, - 0x28ef0b, - 0x293a90, - 0x2a6c45, - 0x219fcc, - 0x22c6c5, - 0x207203, - 0x2a7d46, - 0x2b7204, - 0x3397c6, - 0x29b587, - 0x215444, - 0x2422c8, - 0x322bcd, - 0x2d8a05, - 0x298f84, - 0x218404, - 0x282789, - 0x2a4e08, - 0x30bdc7, - 0x3849c8, - 0x284188, - 0x27d705, - 0x342607, - 0x27d687, - 0x2e2f87, - 0x2bdb49, - 0x2330c9, - 0x23fc46, - 0x2b3246, - 0x269546, - 0x26c105, - 0x3b0044, - 0x201b06, - 0x203c86, - 0x27d748, - 0x263b8b, - 0x26b987, - 0x2124c4, - 0x315c46, - 0x207047, - 0x2aeac5, - 0x316dc5, - 0x20f484, - 0x233046, - 0x201b88, - 0x215009, - 0x248446, - 0x287148, - 0x3842c6, - 0x332908, - 0x3ae14c, - 0x27d5c6, - 0x294d0d, - 0x29518b, - 0x248605, - 0x207c87, - 0x2395c6, - 0x330988, - 0x23fcc9, - 0x2e5ac8, - 0x2043c5, - 0x2f0807, - 0x281908, - 0x366909, - 0x23c0c6, - 0x24834a, - 0x330708, - 0x2e590b, - 0x2c6dcc, - 0x240648, - 0x27ec46, - 0x342008, - 0x208747, - 0x233349, - 0x29148d, - 0x29ba46, - 0x3a6cc8, - 0x3203c9, - 0x2b5ac8, - 0x286248, - 0x2b94cc, - 0x2ba7c7, - 0x2bb3c7, - 0x2bd985, - 0x2ee287, - 0x2d8cc8, - 0x2c6506, - 0x256acc, - 0x2e6808, - 0x2c5808, - 0x266a06, - 0x20df47, - 0x23fe44, - 0x266748, - 0x2dc68c, - 0x21c68c, - 0x225e85, - 0x393e07, - 0x37ff06, - 0x20dec6, - 0x29b048, - 0x3a4984, - 0x32c64b, - 0x2263cb, - 0x2371c6, - 0x322a47, - 0x328b05, - 0x273145, - 0x32c786, - 0x257a05, - 0x26b505, - 0x379a47, - 0x27b509, - 0x2344c4, - 0x3621c5, - 0x2d71c5, - 0x25b748, - 0x376005, - 0x2a7849, - 0x370587, - 0x37058b, - 0x2d1546, - 0x22c149, - 0x24f948, - 0x280d45, - 0x2e3088, - 0x233108, - 0x211807, - 0x282b87, - 0x283609, - 0x320447, - 0x38cf09, - 0x2aa34c, - 0x36aa08, - 0x2b5ec9, - 0x2b8247, - 0x284249, - 0x209647, - 0x2c6ec8, - 0x25ba85, - 0x33f606, - 0x2b9cc8, - 0x2d6a88, - 0x283b89, - 0x26b547, - 0x273205, - 0x216c49, - 0x28e046, - 0x28e7c4, - 0x2e5786, - 0x2450c8, - 0x248e07, - 0x263d88, - 0x345189, - 0x364f87, - 0x29c346, - 0x207d44, - 0x2076c9, - 0x342488, - 0x2668c7, - 0x323e06, - 0x20e286, - 0x380b84, - 0x326186, - 0x210243, - 0x2cf889, - 0x204a46, - 0x2a4485, - 0x29cf86, - 0x2a1a45, - 0x281d88, - 0x240387, - 0x35b546, - 0x3414c6, - 0x35e248, - 0x29f187, - 0x29ba85, - 0x29d488, - 0x38cc48, - 0x330708, - 0x22c585, - 0x33f686, - 0x32a609, - 0x252404, - 0x373acb, - 0x22524b, - 0x33a909, - 0x2102c3, - 0x2546c5, - 0x20ff46, - 0x267608, - 0x236d04, - 0x3051c6, - 0x2c9889, - 0x2c6845, - 0x379986, - 0x20e746, - 0x211184, - 0x2a064a, - 0x2a43c8, - 0x2d6a86, - 0x329185, - 0x20d707, - 0x378e07, - 0x23c804, - 0x225487, - 0x2315c4, - 0x2315c6, - 0x21a503, - 0x2bdb45, - 0x36fb05, - 0x20eac8, - 0x258905, - 0x27d309, - 0x266587, - 0x26658b, - 0x2a12cc, - 0x2a1eca, - 0x2bf307, - 0x202003, - 0x2e6408, - 0x22c745, - 0x295d05, - 0x338404, - 0x2c6dc6, - 0x23e406, - 0x3261c7, - 0x39998b, - 0x22ef84, - 0x382744, - 0x26fec4, - 0x2c4046, - 0x215444, - 0x211008, - 0x338205, - 0x29d545, - 0x2ba507, - 0x207d89, - 0x352545, - 0x31abca, - 0x2d8f49, - 0x2a104a, - 0x313e49, - 0x39cc04, - 0x2cd385, - 0x2c2908, - 0x2fc70b, - 0x2fb3c5, - 0x237546, - 0x214904, - 0x27d846, - 0x364e09, - 0x315d07, - 0x30c108, - 0x311106, - 0x383c07, - 0x283308, - 0x38fbc6, - 0x244a44, - 0x3636c7, - 0x34a905, - 0x350a47, - 0x201c04, - 0x239546, - 0x21fa48, - 0x295348, - 0x2ee007, - 0x378288, - 0x2b34c5, - 0x210104, - 0x3410c8, - 0x3321c4, - 0x211805, - 0x2efe04, - 0x2e4a47, - 0x289dc7, - 0x284388, - 0x2a1a86, - 0x258885, - 0x27d108, - 0x215648, - 0x29e949, - 0x225546, - 0x22fa08, - 0x284b0a, - 0x2aeb48, - 0x2d8045, - 0x27cd06, - 0x2740c8, - 0x2f08ca, - 0x24fb47, - 0x287bc5, - 0x294288, - 0x2ad9c4, - 0x384446, - 0x2bbb48, - 0x210186, - 0x264308, - 0x252247, - 0x205b86, - 0x37ac04, - 0x37e907, - 0x2fd904, - 0x364dc7, - 0x33924d, - 0x288b05, - 0x2d3e4b, - 0x29c6c6, - 0x24d788, - 0x242284, - 0x278906, - 0x27f506, - 0x342347, - 0x2949cd, - 0x2ab007, - 0x302748, - 0x24c9c5, - 0x288248, - 0x2be186, - 0x2b3548, - 0x217086, - 0x3448c7, - 0x345349, - 0x33ff07, - 0x288f88, - 0x276005, - 0x21f108, - 0x20de05, - 0x242b45, - 0x35a545, - 0x226103, - 0x285684, - 0x282d05, - 0x271249, - 0x2ffa06, - 0x2b2388, - 0x24bc05, - 0x32e087, - 0x24ba0a, - 0x3798c9, - 0x2a830a, - 0x2ca2c8, - 0x229dcc, - 0x28538d, - 0x334603, - 0x264208, - 0x203ec5, - 0x206586, - 0x382346, - 0x2d7b45, - 0x20b349, - 0x264745, - 0x27d108, - 0x255ac6, - 0x33cac6, - 0x2a0149, - 0x38f3c7, - 0x28f1c6, - 0x24b988, - 0x375888, - 0x2d0547, - 0x32d2ce, - 0x2be3c5, - 0x366805, - 0x210088, - 0x3978c7, - 0x20ca82, - 0x2b9044, - 0x3396ca, - 0x266988, - 0x207146, - 0x296bc8, - 0x252586, - 0x323708, - 0x2aca48, - 0x242b04, - 0x333805, - 0x687e44, - 0x687e44, - 0x687e44, - 0x204b03, - 0x20e106, - 0x27d5c6, - 0x29bd0c, - 0x205243, - 0x283cc6, - 0x21a4c4, - 0x2bf5c8, - 0x2c96c5, - 0x3397c6, - 0x2b5808, - 0x2cb1c6, - 0x35b4c6, + 0x20b145, + 0x2fa150, + 0x3b0887, + 0x204e83, + 0x28b148, + 0x125bc6, + 0x2ae1ca, + 0x256044, + 0x2ec883, + 0x264a86, + 0x20b0c2, + 0x22d54b, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x209703, + 0x205503, + 0x200983, + 0x2f1743, + 0x2099c2, + 0x2cd83, + 0x205503, + 0x200983, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x209703, + 0x200983, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x206343, + 0x221f03, + 0x200983, + 0x2099c2, + 0x2a84c3, + 0x232403, + 0x205503, + 0x200983, + 0x205702, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x9885, + 0x25ef44, + 0x2a84c3, + 0x232403, + 0x210444, + 0x205503, + 0x200983, + 0x16d208, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x2a84c3, + 0x232403, + 0x2163c3, + 0x2143c3, + 0x209703, + 0x205503, + 0x200983, + 0x2099c2, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x16d208, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x391683, + 0x63643, + 0x6343, + 0x205503, + 0x200983, + 0x30d44a, + 0x32b0c9, + 0x346b0b, + 0x34708a, + 0x34d94a, + 0x35d74b, + 0x371e0a, + 0x37814a, + 0x37fc4a, + 0x37fecb, + 0x39f689, + 0x3a140a, + 0x3a178b, + 0x3acfcb, + 0x3b9eca, + 0x2a84c3, + 0x232403, + 0x2163c3, + 0x209703, + 0x205503, + 0x200983, + 0x4589, + 0x16d208, + 0x2a84c3, + 0x25cb44, + 0x207ac2, + 0x211cc4, + 0x26fc45, + 0x2030c3, + 0x25ef44, + 0x2a84c3, + 0x235ac4, + 0x232403, + 0x249944, + 0x2d5f04, + 0x3b1384, + 0x227f83, + 0x205503, + 0x200983, + 0x27a305, + 0x2082c3, + 0x201303, + 0x22ed03, + 0x250cc4, + 0x390fc4, + 0x34ae45, + 0x16d208, + 0x302044, + 0x3510c6, + 0x276384, + 0x2099c2, + 0x371007, + 0x24c0c7, + 0x247784, + 0x2555c5, + 0x302e85, + 0x2a9305, + 0x3b1384, + 0x3b8ac8, + 0x239486, + 0x30c188, + 0x24ed05, + 0x2da905, + 0x236b84, + 0x200983, + 0x2ed844, + 0x35c946, + 0x264b83, + 0x250cc4, + 0x256005, + 0x32d104, + 0x334944, + 0x20b0c2, + 0x2425c6, + 0x3962c6, + 0x2fdc05, + 0x205702, + 0x38d2c3, + 0x262099c2, + 0x2333c4, + 0x20d882, + 0x209703, + 0x202c82, + 0x205503, + 0x200442, + 0x214843, + 0x25ed03, + 0x16d208, + 0x16d208, + 0x2e9dc3, + 0x205702, + 0x26e099c2, + 0x2e9dc3, + 0x245b43, + 0x353903, + 0x327344, + 0x205503, + 0x200983, + 0x16d208, + 0x205702, + 0x276099c2, + 0x2a84c3, + 0x205503, + 0x200983, + 0x482, + 0x20a9c2, + 0x212982, + 0x206343, + 0x2e87c3, + 0x205702, + 0x129845, + 0x16d208, + 0x16fc07, + 0x2099c2, + 0x232403, + 0x249944, + 0x2032c3, + 0x2e9dc3, + 0x2143c3, + 0x209703, + 0x205503, + 0x216b03, + 0x200983, + 0x21da83, + 0x118fd3, + 0x11c954, + 0x16fc07, + 0x13b46, + 0x53b4b, + 0x1cdc6, + 0x51b87, + 0x11ab09, + 0xe6d4a, + 0x8850d, + 0x1b240c, + 0x1ada8a, + 0x192345, + 0x6708, + 0x5d306, + 0x125c86, + 0x22bb82, + 0xff14c, + 0x1a5cc7, + 0x22e51, + 0x2a84c3, + 0x568c5, + 0x77848, + 0x9e04, + 0x288347c6, + 0x17e86, + 0x8cb46, + 0x8da0a, + 0xac543, + 0x28e54b04, + 0x11aac5, + 0xde283, + 0xdc105, + 0xd104c, + 0xf04c8, + 0xb5708, + 0x9e009, + 0x134b08, + 0x141e046, + 0xda40a, + 0x82b48, + 0xf4648, + 0xff384, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x209703, + 0x205503, + 0x200983, + 0x205702, + 0x2099c2, + 0x2e9dc3, + 0x202bc2, + 0x205503, + 0x200983, + 0x214843, + 0x3653cf, + 0x36578e, + 0x16d208, + 0x2a84c3, + 0x42f87, + 0x232403, + 0x2e9dc3, + 0x244183, + 0x205503, + 0x200983, + 0x201bc3, + 0x201bc7, + 0x200142, + 0x32c249, + 0x200242, + 0x23f88b, + 0x297b8a, + 0x2a2a49, + 0x200882, + 0x391206, + 0x34ed15, + 0x23f9d5, + 0x246993, + 0x23ff53, + 0x202a82, + 0x205ac5, + 0x3b364c, + 0x27160b, + 0x2726c5, + 0x201702, + 0x284202, + 0x386fc6, + 0x200ec2, + 0x3695c6, + 0x2d4c4d, + 0x27ef4c, + 0x224dc4, + 0x203dc2, + 0x205942, + 0x2248c8, + 0x202a42, + 0x312fc6, + 0x2ba844, + 0x34eed5, + 0x246b13, + 0x210783, + 0x32fa0a, + 0x3bb147, + 0x3094c9, + 0x37b887, + 0x30f242, + 0x200002, + 0x3aef06, + 0x20cb42, + 0x16d208, + 0x2105c2, + 0x20b382, + 0x274e87, + 0x20f687, + 0x21b585, + 0x201c02, + 0x21da47, + 0x21dc08, + 0x242b42, + 0x2bf3c2, + 0x22e802, + 0x201ec2, + 0x237b88, + 0x201ec3, + 0x2b5308, + 0x2cf1cd, + 0x213c03, 0x327988, - 0x29c947, - 0x32ce49, - 0x306f8a, - 0x264944, - 0x231605, - 0x2a91c5, - 0x214e06, - 0x32fe46, - 0x2a7406, - 0x2eeb86, - 0x32cf84, - 0x32cf8b, - 0x2313c4, - 0x20d785, - 0x2ab905, - 0x283086, - 0x3b0588, - 0x285247, - 0x30bec4, - 0x259b83, - 0x2ad4c5, - 0x2e5647, - 0x2a2849, - 0x28514b, - 0x3261c7, - 0x20e9c7, - 0x2b5708, - 0x32e1c7, - 0x2a2a86, - 0x244448, - 0x2a584b, - 0x399606, - 0x217549, - 0x2a59c5, - 0x3015c3, - 0x379986, - 0x252148, - 0x214ec3, - 0x21cf03, - 0x283306, - 0x252586, - 0x38c60a, - 0x27ec85, - 0x27f34b, - 0x29cecb, - 0x2417c3, - 0x21fe03, - 0x2ae304, - 0x344d07, - 0x240644, - 0x207f44, - 0x30a044, - 0x2aee48, - 0x3290c8, - 0x35ad89, - 0x38e788, - 0x271407, - 0x233bc6, - 0x2b1fcf, - 0x2be506, - 0x2c9644, - 0x328f0a, - 0x2e5547, - 0x201846, - 0x28e809, - 0x35ad05, - 0x20ec05, - 0x35ae46, - 0x21f243, - 0x2ada09, - 0x21f946, - 0x344f49, - 0x397d86, - 0x2bdb45, - 0x226285, - 0x208603, - 0x344e48, - 0x3a5ac7, - 0x2f7204, - 0x2bf448, - 0x2c0184, - 0x2c5686, - 0x2a7d46, - 0x23ec06, - 0x2c41c9, - 0x295c85, - 0x2c0406, - 0x2697c9, - 0x3ad4c6, - 0x2aeec6, - 0x3895c6, - 0x215105, - 0x2efe06, - 0x3448c4, - 0x25ba85, - 0x2b9cc4, - 0x309f46, - 0x359c44, - 0x202c43, - 0x287885, - 0x232e08, - 0x22b547, - 0x2b3dc9, - 0x287ac8, - 0x296391, - 0x20e7ca, - 0x237107, - 0x2bc046, - 0x21a4c4, - 0x2b9dc8, - 0x22f488, - 0x29654a, - 0x2a760d, - 0x237446, - 0x327a86, - 0x37e9c6, - 0x2ef0c7, - 0x302805, - 0x261107, - 0x2bf505, - 0x3706c4, - 0x2a5286, - 0x326007, - 0x2ad70d, - 0x274007, - 0x270188, - 0x27d409, - 0x27cc06, - 0x23c045, - 0x21d904, - 0x2451c6, - 0x23c706, - 0x266b06, - 0x299108, - 0x215d03, - 0x210603, - 0x323ac5, - 0x376b86, - 0x2aca05, - 0x311308, - 0x29b74a, - 0x2ce804, - 0x2bf5c8, - 0x2955c8, - 0x282ec7, - 0x24bcc9, - 0x2b5408, - 0x215087, - 0x269ec6, - 0x21018a, - 0x245248, - 0x2c6c09, - 0x2a4ec8, - 0x221649, - 0x2e5bc7, - 0x349945, - 0x361f86, - 0x2c6388, - 0x24d908, - 0x28eb48, - 0x21acc8, - 0x20d785, - 0x200884, - 0x3a57c8, - 0x201944, - 0x313c44, - 0x2bdb45, - 0x290487, - 0x207b49, - 0x342147, - 0x2051c5, - 0x27b106, - 0x33f0c6, - 0x206944, - 0x2a0486, - 0x383f44, - 0x288146, - 0x3a4a46, - 0x219206, - 0x2043c5, - 0x3111c7, - 0x202003, - 0x3670c9, - 0x35e048, - 0x214f04, - 0x214f0d, - 0x295448, - 0x2f3f48, - 0x2c6b86, - 0x345449, - 0x3798c9, - 0x364b05, - 0x29b84a, - 0x28224a, - 0x289f8c, - 0x28a106, - 0x27b9c6, - 0x2bea86, - 0x26db09, - 0x2067c6, - 0x261146, - 0x264806, - 0x266748, - 0x215446, - 0x2c450b, - 0x290605, - 0x29d545, - 0x27bc45, - 0x202106, - 0x210143, - 0x23eb86, - 0x273f87, - 0x2b9c85, - 0x243645, - 0x3a0505, - 0x335f46, - 0x31abc4, - 0x372a46, - 0x299489, - 0x201f8c, - 0x370408, - 0x202ec4, - 0x2efbc6, - 0x29c7c6, - 0x252148, - 0x2d8b48, - 0x201e89, - 0x20d707, - 0x2563c9, - 0x24cf86, - 0x22cf44, - 0x20f184, - 0x288bc4, - 0x283308, - 0x20798a, - 0x3524c6, - 0x356387, - 0x236087, - 0x22c245, - 0x2a9184, + 0x239f8f, + 0x23a34e, + 0x25edca, + 0x229751, + 0x229bd0, + 0x2bcdcd, + 0x2bd10c, + 0x311c47, + 0x32fb87, + 0x3b2d49, + 0x224ec2, + 0x206c02, + 0x25340c, + 0x25370b, + 0x204142, + 0x2ab046, + 0x21a1c2, + 0x209882, + 0x21b102, + 0x2099c2, + 0x383a84, + 0x238bc7, + 0x204682, + 0x23d147, + 0x23e487, + 0x20e142, + 0x2301c2, + 0x242e45, + 0x205742, + 0x362e0e, + 0x2ebb8d, + 0x232403, + 0x2be90e, + 0x2e064d, + 0x37eac3, + 0x200e02, + 0x21fec4, + 0x2454c2, + 0x2175c2, + 0x358e45, + 0x364b47, + 0x383382, + 0x218342, + 0x249547, + 0x24d288, + 0x248902, + 0x2aeac6, + 0x25328c, + 0x2535cb, + 0x20fc82, + 0x25924f, + 0x259610, + 0x259a0f, + 0x259dd5, + 0x25a314, + 0x25a80e, + 0x25ab8e, + 0x25af0f, + 0x25b2ce, + 0x25b654, + 0x25bb53, + 0x25c00d, + 0x272a89, + 0x2895c3, + 0x200782, + 0x22b0c5, + 0x207f86, + 0x20d882, + 0x21f507, + 0x2e9dc3, + 0x205e82, + 0x362a08, + 0x229991, + 0x229dd0, + 0x206482, + 0x288d87, + 0x203942, + 0x214607, + 0x20be02, + 0x319cc9, + 0x386f87, + 0x27aac8, + 0x234606, + 0x2e86c3, + 0x32a105, + 0x232682, + 0x202082, + 0x3af305, + 0x380685, + 0x2040c2, + 0x24c543, + 0x32d187, + 0x223787, + 0x200502, + 0x254684, + 0x223b83, + 0x223b89, + 0x22c548, + 0x200282, + 0x204bc2, + 0x3105c7, + 0x31ff05, + 0x2a5348, + 0x219947, + 0x200e83, 0x28c446, - 0x302846, - 0x233303, - 0x35de87, - 0x34ccc8, - 0x364c4a, - 0x2cbb88, - 0x2ce688, - 0x359c85, - 0x248705, - 0x26ba85, - 0x22c606, - 0x37ed86, - 0x209445, - 0x2cfac9, - 0x2a8f8c, - 0x26bb47, - 0x2965c8, - 0x257d45, - 0x687e44, - 0x247884, - 0x2d4184, - 0x2c1606, - 0x29da4e, - 0x20ec87, - 0x2edfc5, - 0x25238c, - 0x2c0047, - 0x325f87, - 0x35bf49, - 0x21b949, - 0x287bc5, - 0x35e048, - 0x32a609, - 0x2f2ec5, - 0x2b9bc8, - 0x2c4ac6, - 0x341346, - 0x331b84, - 0x2a46c8, - 0x249e83, - 0x342c84, - 0x2ad545, - 0x335407, - 0x20f4c5, - 0x2849c9, - 0x28d60d, - 0x2a6246, - 0x32c004, - 0x211188, - 0x27b34a, - 0x20cb87, - 0x239d85, - 0x206d03, - 0x29d08e, - 0x25258c, - 0x2fb707, - 0x29dc07, - 0x201c43, - 0x206805, - 0x2d4185, - 0x296f88, - 0x2940c9, - 0x202dc6, - 0x240644, - 0x237046, - 0x37564b, - 0x3a0b8c, - 0x341ec7, - 0x2c9385, - 0x38cb48, - 0x2d0305, - 0x328f07, - 0x23bf07, - 0x249e85, - 0x210143, - 0x2af184, - 0x20f945, - 0x2ad0c5, - 0x2ad0c6, - 0x2927c8, - 0x326007, - 0x382646, - 0x206486, - 0x35a486, - 0x263a09, - 0x342707, - 0x202c46, - 0x3a0d06, - 0x249f46, - 0x2a6f45, - 0x20aa06, - 0x399385, - 0x376088, - 0x2936cb, - 0x28c246, - 0x2360c4, - 0x2f0689, - 0x266584, - 0x2c4a48, - 0x2961c7, - 0x286144, - 0x2b4708, - 0x2bad84, - 0x2a6f84, - 0x2889c5, - 0x2d8a46, - 0x2aed87, - 0x2643c3, - 0x29c405, - 0x2f7784, - 0x366846, - 0x364b88, - 0x327885, - 0x28ff09, - 0x216e45, - 0x2e2b48, - 0x263747, - 0x38a2c8, - 0x2b3c07, - 0x2f3b89, - 0x327d06, - 0x36bd06, - 0x264804, - 0x269e05, - 0x2f5e4c, - 0x27bc47, - 0x27c447, - 0x235f48, - 0x2a6246, - 0x273ec4, - 0x2ead84, - 0x283489, - 0x2beb86, - 0x218247, - 0x30cd04, - 0x2ffb06, - 0x325b85, - 0x2a15c7, - 0x2c4486, - 0x248209, - 0x282087, - 0x269307, - 0x29ffc6, - 0x310c85, - 0x280a08, - 0x21f7c8, - 0x23d906, - 0x3278c5, - 0x261c86, - 0x205d03, - 0x296e09, - 0x2a718e, - 0x2b2a08, - 0x2c0288, - 0x23d70b, - 0x290146, - 0x321ac4, - 0x284f84, - 0x2a728a, - 0x219ec7, - 0x202d05, - 0x217549, - 0x2b8cc5, - 0x313c87, - 0x301e84, - 0x397107, - 0x2372c8, - 0x2d1bc6, - 0x3a6e49, - 0x2b550a, - 0x219e46, - 0x294f86, + 0x2bcc4d, + 0x2bcfcc, + 0x2b45c6, + 0x208d02, + 0x2a8542, + 0x202342, + 0x239e0f, + 0x23a20e, + 0x302f07, + 0x203d02, + 0x2bf745, + 0x2bf746, + 0x20f242, + 0x20ec42, + 0x221f06, + 0x214543, + 0x214546, + 0x2c6985, + 0x2c698d, + 0x2c6f55, + 0x2c814c, + 0x2c95cd, + 0x2c9992, + 0x20e602, + 0x2675c2, + 0x202d02, + 0x240806, + 0x2f7f86, + 0x2033c2, + 0x208006, + 0x2023c2, + 0x38b785, + 0x200542, + 0x2ebc89, + 0x31554c, + 0x31588b, + 0x200442, + 0x24e748, + 0x203b02, + 0x2056c2, + 0x26a346, + 0x222445, + 0x226747, + 0x257d85, + 0x29e405, + 0x243002, + 0x2067c2, + 0x2013c2, + 0x2df507, + 0x380c0d, + 0x380f8c, + 0x22f087, + 0x20f982, + 0x2069c2, + 0x241248, + 0x31e488, + 0x2e3988, + 0x308484, + 0x2ab407, + 0x2e90c3, + 0x228ec2, + 0x2082c2, + 0x2eb3c9, + 0x3a40c7, + 0x201302, + 0x26a745, + 0x22d4c2, + 0x21aa02, + 0x2f9f03, + 0x2f9f06, + 0x2f1742, + 0x2f23c2, + 0x201a42, + 0x202f86, + 0x21fe07, + 0x213bc2, + 0x205ec2, + 0x2b514f, + 0x2be74d, + 0x3872ce, + 0x2e04cc, + 0x2009c2, + 0x207302, + 0x234445, + 0x30ba46, + 0x2018c2, + 0x202482, + 0x200482, + 0x2198c4, + 0x2cf044, + 0x2d0e86, + 0x203082, + 0x36cac7, + 0x203083, + 0x285d48, + 0x34e488, + 0x239887, + 0x240706, + 0x203902, + 0x234b03, + 0x234b07, + 0x273946, + 0x2dee45, + 0x308808, + 0x200d02, + 0x331207, + 0x222702, + 0x361782, + 0x20cfc2, + 0x2c6749, + 0x230982, + 0x200842, + 0x22f303, + 0x331c87, + 0x2002c2, + 0x3156cc, + 0x3159cb, + 0x2b4646, + 0x2de1c5, + 0x221c82, + 0x203b42, + 0x2b7bc6, + 0x260dc3, + 0x38c187, + 0x236102, + 0x201442, + 0x34eb95, + 0x23fb95, + 0x246853, + 0x2400d3, + 0x2585c7, + 0x271a48, + 0x271a50, + 0x28d2cf, + 0x297953, + 0x2a2812, + 0x32be10, + 0x2d544f, + 0x35f7d2, + 0x30c3d1, + 0x2b7613, + 0x2c6512, + 0x2cff4f, + 0x2d2e8e, + 0x2d3f52, + 0x2d71d1, + 0x2d7c8f, + 0x30440e, + 0x2f0691, + 0x2f17d0, + 0x2f2752, + 0x2fc711, + 0x364586, + 0x36d3c7, + 0x372187, + 0x203142, + 0x27d8c5, + 0x3933c7, + 0x212982, + 0x209942, + 0x228a85, + 0x21e743, + 0x34b0c6, + 0x380dcd, + 0x38110c, + 0x201682, + 0x3b34cb, + 0x2714ca, + 0x20598a, + 0x2b6449, + 0x2ea64b, + 0x219a8d, + 0x2fa5cc, + 0x25180a, + 0x22090c, + 0x26908b, + 0x27250c, + 0x29474b, + 0x3154c3, + 0x36cfc6, + 0x3a98c2, + 0x2f4542, + 0x20a743, + 0x208602, + 0x21fe83, + 0x2366c6, + 0x259f87, + 0x2c7fc6, + 0x39e4c8, + 0x31e188, + 0x2ce146, + 0x201f82, + 0x2fd5cd, + 0x2fd90c, + 0x2d5fc7, + 0x301f07, + 0x213b82, + 0x201502, + 0x234a82, + 0x24d642, + 0x2099c2, + 0x205503, + 0x200983, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x209703, + 0x211cc4, + 0x205503, + 0x200983, + 0x214843, + 0x205702, + 0x2021c2, + 0x2ae8fdc5, + 0x2b247e45, + 0x2b717806, + 0x16d208, + 0x2baaee05, + 0x2099c2, + 0x2006c2, + 0x2bfb3ac5, + 0x2c27bdc5, + 0x2c67c9c7, + 0x2ca86a09, + 0x2ce3bc44, + 0x20d882, + 0x205e82, + 0x2d24b5c5, + 0x2d68f849, + 0x2db1db88, + 0x2deab805, + 0x2e300187, + 0x2e61ed48, + 0x2eae5d85, + 0x2ee00106, + 0x2f337809, + 0x2f6b5a48, + 0x2fac0488, + 0x2fe9704a, + 0x302732c4, + 0x306d13c5, + 0x30abc9c8, + 0x30e03a85, + 0x20cec2, + 0x31248a43, + 0x316a1686, + 0x31b60148, + 0x31eb94c6, + 0x32281f08, + 0x32719606, + 0x32adef04, + 0x200c82, + 0x32f2cb87, + 0x332a75c4, + 0x336756c7, + 0x33ba2987, + 0x200442, + 0x33e9b0c5, + 0x34334f84, + 0x346cd907, + 0x34a5f187, + 0x34e80886, + 0x3527c585, + 0x356959c7, + 0x35ad0b48, + 0x35e2b447, + 0x363164c9, + 0x36793105, + 0x36b31dc7, + 0x36e8f546, + 0x37391408, + 0x2273cd, + 0x279909, + 0x28174b, + 0x2a4b0b, + 0x34058b, + 0x2ffe8b, + 0x30bc4b, + 0x30bf0b, + 0x30c809, + 0x30d6cb, + 0x30d98b, + 0x30e48b, + 0x30f5ca, + 0x30fb0a, + 0x31010c, + 0x314d8b, + 0x31670a, + 0x32904a, + 0x33404e, + 0x33568e, + 0x335a0a, + 0x33808a, + 0x338dcb, + 0x33908b, + 0x339e8b, + 0x354ecb, + 0x3554ca, + 0x35618b, + 0x35644a, + 0x3566ca, + 0x35694a, + 0x372b0b, + 0x37914b, + 0x37c74e, + 0x37cacb, + 0x38454b, + 0x385acb, + 0x38900a, + 0x389289, + 0x3894ca, + 0x38a94a, + 0x3a00cb, + 0x3a1a4b, + 0x3a22ca, + 0x3a48cb, + 0x3a8c4b, + 0x3b990b, + 0x3767e648, + 0x37a87c89, + 0x37e9de89, + 0x382dacc8, + 0x342505, + 0x217083, + 0x21c6c4, + 0x220005, + 0x23b986, + 0x25da05, + 0x2864c4, + 0x21f408, + 0x308005, + 0x291784, + 0x203447, + 0x29cf8a, + 0x3712ca, + 0x338547, + 0x3af9c7, + 0x2f8f07, + 0x264e87, + 0x2f60c5, + 0x33bb86, + 0x2bb847, + 0x2b4904, + 0x2e4646, + 0x2e4546, + 0x3b9585, + 0x26d1c4, + 0x3519c6, + 0x29bf47, + 0x285746, + 0x2e3247, + 0x25e443, + 0x2b1c06, + 0x2328c5, + 0x27cac7, + 0x2641ca, + 0x260e44, + 0x217c08, + 0x2abd89, + 0x2cd247, + 0x336286, + 0x24e9c8, + 0x2b9c09, + 0x309684, + 0x366944, + 0x244245, + 0x2bb548, + 0x2c4b07, + 0x2a9709, + 0x364688, + 0x345e86, + 0x3204c6, + 0x298048, + 0x359646, + 0x247e45, + 0x280946, + 0x275ec8, + 0x24da46, + 0x2525cb, + 0x298646, + 0x29994d, + 0x3a6005, + 0x2a7486, + 0x208b45, + 0x2f9bc9, + 0x2f9a87, + 0x37a208, + 0x266986, + 0x298bc9, + 0x3793c6, + 0x264145, + 0x268686, + 0x2cae46, + 0x2cb3c9, + 0x3530c6, + 0x339487, + 0x26ad85, + 0x202ac3, + 0x252745, + 0x299c07, + 0x33c6c6, + 0x3a5f09, + 0x317806, + 0x280b86, + 0x210c49, + 0x280349, + 0x29fc07, + 0x282f88, + 0x28c989, + 0x27d548, + 0x378386, + 0x2d5805, + 0x2418ca, + 0x280c06, + 0x3b7986, + 0x2c8985, + 0x265808, + 0x223307, + 0x22f50a, + 0x249e46, + 0x279d45, + 0x37aa46, + 0x21ac47, + 0x336147, + 0x21bbc5, + 0x264305, + 0x357dc6, + 0x2ac5c6, + 0x34dc06, + 0x2b3204, + 0x27f689, + 0x288b46, + 0x2dd38a, + 0x21b388, + 0x3078c8, + 0x3712ca, + 0x20b445, + 0x29be85, + 0x350b88, + 0x2b2c88, + 0x27b5c7, + 0x258946, + 0x322388, + 0x2fdec7, + 0x27dc48, + 0x2b3846, + 0x281408, + 0x294f06, + 0x24ee87, + 0x299ec6, + 0x3519c6, + 0x3778ca, + 0x2bd8c6, + 0x2d5809, + 0x26dbc6, + 0x2af14a, + 0x2def09, + 0x2fb486, + 0x2b4b04, + 0x22b18d, + 0x287f07, + 0x326cc6, + 0x2c0345, + 0x379445, + 0x374246, + 0x2cd749, + 0x2b1647, + 0x277306, + 0x2cc246, + 0x286549, + 0x247d84, + 0x3482c4, + 0x352cc8, + 0x236a86, + 0x26a808, + 0x2e41c8, + 0x312747, + 0x3b7549, + 0x34de07, + 0x2aecca, + 0x2e1f8f, + 0x23188a, + 0x234245, + 0x276105, + 0x216e85, + 0x2ba787, + 0x21a803, + 0x283188, + 0x396786, + 0x396889, + 0x2b87c6, + 0x3b5207, + 0x298989, + 0x37a108, + 0x2c8a47, + 0x30a343, + 0x342585, + 0x21a785, + 0x2b304b, + 0x203b44, + 0x2c2084, + 0x274646, + 0x30abc7, + 0x382bca, + 0x248ac7, + 0x311e87, + 0x27bdc5, + 0x200645, + 0x2eef89, + 0x3519c6, + 0x24894d, + 0x353305, + 0x2b1383, + 0x205043, + 0x26f685, + 0x345c45, + 0x24e9c8, + 0x2790c7, + 0x348046, + 0x29db06, + 0x229105, + 0x2326c7, + 0x312247, + 0x239347, + 0x2d144a, + 0x2b1cc8, + 0x2b3204, + 0x24d7c7, + 0x27acc7, + 0x339306, + 0x262107, + 0x2dc4c8, + 0x2e6f08, + 0x268506, + 0x303008, + 0x2c87c4, + 0x2bb846, + 0x2353c6, + 0x33bfc6, + 0x2ba986, + 0x286004, + 0x264f46, + 0x2bf5c6, + 0x297546, + 0x247846, + 0x204f06, + 0x26e2c6, + 0x347f48, + 0x2b0748, + 0x2d1c88, + 0x25dc08, + 0x350b06, + 0x20dcc5, + 0x315ec6, 0x2ab885, - 0x37cb05, - 0x357f47, - 0x2456c8, - 0x325ac8, - 0x242b06, - 0x226305, - 0x32fbce, - 0x333b84, - 0x23d885, - 0x27aa89, - 0x272148, - 0x3acc46, - 0x298d8c, - 0x29b350, - 0x29d68f, - 0x29ef08, - 0x2bf307, - 0x2043c5, - 0x282d05, - 0x2aec09, - 0x294489, - 0x30b2c6, - 0x2fb447, - 0x393d85, - 0x36d749, - 0x3320c6, - 0x20660d, - 0x288a89, - 0x207f44, - 0x2b2788, - 0x3a5889, - 0x352686, - 0x27b205, - 0x36bd06, - 0x30bfc9, - 0x2381c8, - 0x213585, - 0x284c04, - 0x298f4b, - 0x352545, - 0x267686, - 0x2856c6, - 0x26b006, - 0x3a388b, - 0x290009, - 0x209785, - 0x38a087, - 0x20e746, - 0x339506, - 0x284888, - 0x269fc9, - 0x26ff4c, - 0x2e5448, - 0x352786, - 0x349543, - 0x2d2986, - 0x2829c5, - 0x27f688, - 0x225d06, - 0x2a1808, - 0x246cc5, - 0x215185, - 0x2a0848, - 0x378f47, - 0x382287, - 0x3261c7, - 0x33f488, - 0x28e9c8, - 0x24de06, - 0x309d87, - 0x2586c7, - 0x28288a, - 0x24ce83, - 0x202106, - 0x203005, - 0x324244, - 0x27d409, - 0x2f3b04, - 0x22b5c4, - 0x29c644, - 0x29dc0b, - 0x3a5a07, - 0x32fe05, - 0x293548, - 0x27b106, - 0x27b108, - 0x27ebc6, - 0x28adc5, - 0x28b545, - 0x28d046, - 0x28e488, - 0x28e748, - 0x27d5c6, - 0x29338f, - 0x2968d0, - 0x3a26c5, - 0x202003, - 0x24c905, - 0x2ff3c8, - 0x294389, - 0x330708, - 0x263888, - 0x380648, - 0x3a5ac7, - 0x27adc9, - 0x2a1a08, - 0x2b0684, - 0x29c4c8, - 0x25b809, - 0x30b8c7, - 0x298144, - 0x342208, - 0x310f8a, - 0x2c3ec6, - 0x237446, - 0x225409, - 0x29b587, - 0x2c4e48, - 0x209fc8, - 0x30cb88, - 0x355ec5, - 0x37da85, - 0x29d545, - 0x2d4145, - 0x2f1807, - 0x210145, - 0x2b9c85, - 0x212ec6, - 0x330647, - 0x2fc647, - 0x311286, - 0x2ca805, - 0x267686, - 0x240405, - 0x2bfec8, - 0x2ff984, - 0x3ad546, - 0x2e7dc4, - 0x2fdac8, - 0x3ad64a, - 0x27de4c, - 0x399b85, - 0x2ef186, - 0x270106, - 0x34c5c6, - 0x2ff5c4, - 0x325e45, - 0x27ea07, - 0x29b609, - 0x2a2947, - 0x687e44, - 0x687e44, - 0x30bd45, - 0x229144, - 0x29874a, - 0x27af86, - 0x2e5884, - 0x2017c5, - 0x2eb445, - 0x302744, - 0x285307, - 0x216dc7, - 0x2c4048, - 0x317048, - 0x213589, - 0x3321c8, - 0x29890b, - 0x214e04, - 0x361905, - 0x281f85, - 0x326149, - 0x269fc9, - 0x2f0588, - 0x2313c8, - 0x283084, - 0x29c805, - 0x207503, - 0x214dc5, - 0x2c0486, - 0x293f0c, - 0x21f846, - 0x240446, - 0x2940c5, - 0x335fc8, - 0x3a0e06, - 0x2bc1c6, - 0x237446, - 0x22d6cc, - 0x266cc4, - 0x35a5ca, - 0x3ace08, - 0x293d47, - 0x244946, - 0x202e87, - 0x2e0845, - 0x323e06, - 0x354906, - 0x382147, - 0x22b604, - 0x2e4b45, - 0x27aa84, - 0x370747, - 0x27acc8, - 0x27b84a, - 0x281787, - 0x23da87, - 0x2bf287, - 0x2d0449, - 0x293f0a, - 0x22cf03, - 0x22b505, - 0x219243, - 0x30a089, - 0x2f1108, - 0x2d5507, - 0x330809, - 0x21f8c6, - 0x2b0208, - 0x2fd445, - 0x21574a, - 0x326dc9, - 0x26fc09, - 0x379787, - 0x22f589, - 0x219108, - 0x2eff86, - 0x2ef348, - 0x215e47, - 0x320447, - 0x2d8f47, - 0x2d5388, - 0x2efa46, - 0x310d45, - 0x27ea07, - 0x294a88, - 0x35a404, - 0x350f04, - 0x28f0c7, - 0x2acdc7, - 0x32a48a, - 0x2eff06, - 0x2fb58a, - 0x2b8f87, - 0x333947, - 0x242c04, - 0x38cfc4, - 0x227a06, - 0x264d04, - 0x264d0c, - 0x3b1f45, - 0x21ab09, - 0x2e2cc4, - 0x302805, - 0x27b2c8, - 0x28e805, - 0x31abc6, - 0x2115c4, - 0x2a0a8a, - 0x2b0846, - 0x29574a, - 0x31a987, - 0x263ec5, - 0x21f245, - 0x22c28a, - 0x2a0585, - 0x29ea06, - 0x201944, - 0x2ae486, - 0x358005, - 0x225dc6, - 0x2ee00c, - 0x2c4fca, - 0x269ec4, - 0x233bc6, - 0x29b587, - 0x2c8984, - 0x266748, - 0x38e606, - 0x32fa49, - 0x2c5ec9, - 0x36ab09, - 0x373c86, - 0x215f46, - 0x2ef487, - 0x2cfa08, - 0x215d49, - 0x3a5a07, - 0x2b3346, - 0x383c87, - 0x37e885, - 0x333b84, - 0x2ef047, - 0x2f7805, - 0x288905, - 0x300687, - 0x249d48, - 0x38cac6, - 0x2959cd, - 0x29718f, - 0x29cecd, - 0x205204, - 0x232f06, - 0x2cbec8, - 0x2647c5, - 0x282a48, - 0x2116ca, - 0x207f44, - 0x3a7006, - 0x39f8c7, - 0x22ef87, - 0x29ca09, - 0x2ef305, - 0x302744, - 0x33374a, - 0x2b4fc9, - 0x22f687, - 0x26cbc6, - 0x352686, - 0x29c746, - 0x363786, - 0x2cb84f, - 0x2cbd89, - 0x215446, - 0x22f386, - 0x27a409, - 0x309e87, - 0x21d943, - 0x22d846, - 0x20fa43, - 0x2d7a08, - 0x383ac7, - 0x29f109, - 0x2a7bc8, - 0x3823c8, - 0x26b686, - 0x23c549, - 0x2c7a85, - 0x244944, - 0x349a07, - 0x26db85, - 0x205204, - 0x32fec8, - 0x21a184, - 0x305647, - 0x319c06, - 0x29f785, - 0x2a4ec8, - 0x35254b, - 0x35e547, - 0x22c506, - 0x2be584, - 0x321a46, - 0x2bdb45, - 0x2f7805, - 0x280789, - 0x284f09, - 0x2a2a04, - 0x3204c5, - 0x233c05, - 0x2155c6, - 0x35e148, - 0x2b7586, - 0x34cb0b, - 0x301f0a, - 0x2fda05, - 0x28b5c6, - 0x2f6f05, - 0x209845, - 0x29ad47, - 0x2073c8, - 0x2563c4, - 0x364a06, + 0x388447, + 0x215305, + 0x2125c3, + 0x211585, + 0x344cc4, + 0x205045, + 0x203b03, + 0x33a447, + 0x354648, + 0x2e3306, + 0x2c218d, + 0x2760c6, + 0x296ac5, + 0x2b7843, + 0x2bc389, + 0x247f06, 0x28e7c6, - 0x2192c7, - 0x301584, - 0x27f506, - 0x300e05, - 0x300e09, - 0x216144, - 0x2a9309, - 0x27d5c6, - 0x2ba888, - 0x233c05, - 0x236185, - 0x225dc6, - 0x26fe49, - 0x21b949, - 0x2404c6, - 0x272248, - 0x252488, - 0x2f6ec4, - 0x363c84, - 0x363c88, - 0x3158c8, - 0x2564c9, - 0x2c0406, - 0x237446, - 0x312ccd, - 0x3051c6, - 0x3ae009, - 0x2022c5, - 0x35ae46, - 0x261248, - 0x30fc45, - 0x258704, - 0x2bdb45, - 0x284588, - 0x298509, - 0x27ab44, - 0x239546, - 0x2e5d0a, - 0x2d6388, - 0x32a609, - 0x35b64a, - 0x330786, - 0x297348, - 0x328cc5, - 0x326c48, - 0x2b3d05, - 0x21f789, - 0x368809, - 0x202e02, - 0x2a59c5, - 0x272e86, - 0x27d507, - 0x324245, - 0x2f9986, - 0x30fd08, - 0x2a6246, - 0x2c27c9, - 0x27c546, - 0x284708, - 0x2a8645, - 0x24ae86, - 0x3449c8, - 0x283308, - 0x3a4ac8, - 0x2febc8, - 0x20aa04, - 0x22a783, - 0x2c2a04, - 0x236fc6, - 0x37e8c4, - 0x2c01c7, - 0x2bc0c9, - 0x2bdd85, - 0x209fc6, - 0x22d846, - 0x29260b, - 0x2fd946, - 0x316406, - 0x2c2f88, - 0x243586, - 0x263cc3, - 0x20a383, - 0x333b84, - 0x22f905, - 0x384107, - 0x27acc8, - 0x27accf, - 0x27e90b, - 0x35df48, - 0x2395c6, - 0x35e24e, - 0x225dc3, - 0x2b1d84, - 0x2fd8c5, - 0x33d746, - 0x28c54b, - 0x290546, - 0x21a449, - 0x29f785, - 0x38b708, - 0x212088, - 0x21b80c, - 0x29dc46, - 0x214e06, - 0x2d9545, - 0x288d48, - 0x27de45, - 0x33c448, - 0x29d30a, - 0x361e09, - 0x687e44, - 0x2f606a82, - 0x77a48, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x220ec3, - 0x24c083, - 0x204703, - 0x327883, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x2023c4, - 0x24c083, - 0x204703, - 0x223503, - 0x223504, - 0x22bf83, - 0x234a44, - 0x231b03, - 0x2c8144, - 0x250cc3, - 0x324507, - 0x220ec3, - 0x2020c3, - 0x255bc8, - 0x204703, - 0x2d2d8b, - 0x2e0f83, - 0x25b1c6, - 0x2012c2, - 0x387d0b, - 0x231b03, - 0x250cc3, - 0x24c083, - 0x204703, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x204703, - 0x29dfc3, - 0x205a03, - 0x200882, - 0x77a48, - 0x343145, - 0x2d4e88, - 0x2da008, - 0x206a82, - 0x330f85, - 0x357bc7, - 0x200202, - 0x2424c7, - 0x20f582, - 0x23d4c7, - 0x265249, - 0x3167c8, - 0x30ca09, - 0x3345c2, - 0x26ab07, - 0x240244, - 0x357c87, - 0x301e07, - 0x244d02, - 0x220ec3, - 0x203642, - 0x202b42, - 0x200fc2, - 0x200ac2, - 0x203942, - 0x203682, - 0x2a81c5, - 0x2477c5, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x24c083, - 0x204703, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x220ec3, - 0x24c083, - 0x204703, - 0x481, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x2023c4, - 0x202243, - 0x24c083, - 0x204703, - 0x20b743, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x24c083, - 0x204703, - 0x206a82, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x24c083, - 0x204703, - 0xa9c2, - 0x77a48, - 0x45684, - 0xd0705, - 0x200882, - 0x2bb844, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x37f383, - 0x2a8e05, - 0x202243, - 0x39a883, - 0x24c083, - 0x20f543, - 0x204703, - 0x20b803, - 0x223583, - 0x2232c3, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x24c083, - 0x204703, - 0x206a82, - 0x204703, - 0x77a48, - 0x250cc3, - 0x77a48, - 0x2cd683, - 0x22bf83, - 0x22ff84, - 0x231b03, - 0x250cc3, - 0x20b542, - 0x220ec3, - 0x24c083, - 0x204703, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x20b542, - 0x232003, - 0x24c083, - 0x204703, - 0x2d9f83, - 0x20b803, - 0x200882, - 0x206a82, - 0x250cc3, - 0x24c083, - 0x204703, - 0x25b1c5, - 0xad186, - 0x223504, - 0x2012c2, - 0x77a48, - 0x200882, - 0x20048, - 0x206a82, - 0xf206, - 0x143f44, - 0x10844b, - 0x18986, - 0x142b87, - 0x231b03, - 0x250cc3, - 0x159b85, - 0x14d4c4, - 0x24dd43, - 0x4ce47, - 0xcd204, - 0x24c083, - 0x14c104, - 0x204703, - 0x2e1c44, - 0x10c888, - 0x122706, - 0x206a82, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x220ec3, - 0x2020c3, - 0x204703, - 0x2e0f83, - 0x2012c2, - 0x77a48, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x2023c3, - 0x211004, - 0x24c083, - 0x204703, - 0x22bf83, - 0x231b03, - 0x2c8144, - 0x250cc3, - 0x24c083, - 0x204703, - 0x25b1c6, - 0x231b03, - 0x250cc3, - 0x178d03, - 0x204703, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x24c083, - 0x204703, - 0x142b87, - 0x77a48, - 0x250cc3, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x24c083, - 0x204703, - 0x3822bf83, - 0x231b03, - 0x24c083, - 0x204703, - 0x77a48, - 0x200882, - 0x206a82, - 0x22bf83, - 0x250cc3, - 0x24c083, - 0x200fc2, - 0x204703, - 0x30abc7, - 0x2e330b, - 0x208883, - 0x23a8c8, - 0x2cf787, - 0x2b7b46, - 0x2bc885, - 0x2f9509, - 0x25af48, - 0x31b349, - 0x31b350, - 0x35d24b, - 0x2ec7c9, - 0x206103, - 0x2130c9, - 0x230a86, - 0x230a8c, - 0x31b548, - 0x3ad308, - 0x279709, - 0x29e40e, - 0x37b34b, - 0x237bcc, - 0x201e03, - 0x268d0c, - 0x208349, - 0x375287, - 0x231a4c, - 0x39c18a, - 0x247204, - 0x3a4d8d, - 0x268bc8, - 0x3a52cd, - 0x26e086, - 0x290ccb, - 0x375b49, - 0x3162c7, - 0x31eb06, - 0x321c09, - 0x34b9ca, - 0x304f48, - 0x2e0b84, - 0x362087, - 0x278a07, - 0x30cf04, - 0x228dc4, - 0x262f09, - 0x2ce4c9, - 0x323148, - 0x212905, - 0x392845, - 0x20d546, - 0x3a4c49, - 0x21194d, - 0x237648, - 0x20d447, - 0x2bc908, - 0x32cc06, - 0x3a2444, - 0x37dd45, - 0x204946, - 0x205884, - 0x208247, - 0x20a60a, - 0x214bc4, - 0x219d86, - 0x21a789, - 0x21a78f, - 0x21b50d, - 0x21ca86, - 0x21fc50, - 0x220046, - 0x220787, - 0x221047, - 0x22104f, - 0x221889, - 0x2256c6, - 0x227e87, - 0x227e88, - 0x228249, - 0x290248, - 0x2d7547, - 0x20ce83, - 0x386a86, - 0x2e4c88, - 0x29e6ca, - 0x214689, - 0x212383, - 0x357ac6, - 0x36484a, - 0x2f7d07, - 0x3750ca, - 0x2028ce, - 0x2219c6, - 0x2a5bc7, - 0x217306, - 0x208406, - 0x37d88b, - 0x3038ca, - 0x22704d, - 0x216007, - 0x264988, - 0x264989, - 0x26498f, - 0x20e34c, - 0x27f909, - 0x3af74e, - 0x32460a, - 0x329546, - 0x37a686, - 0x306ccc, - 0x3108cc, - 0x32adc8, - 0x33fe07, - 0x2b0585, - 0x206a84, - 0x345b4e, - 0x34bc44, - 0x22adc7, - 0x265e4a, - 0x382ad4, - 0x384e4f, - 0x221208, - 0x386948, - 0x36d10d, - 0x36d10e, - 0x390509, - 0x22e108, - 0x22e10f, - 0x23174c, - 0x23174f, - 0x232c47, - 0x2352ca, - 0x21f50b, - 0x239c08, - 0x23aac7, - 0x25a64d, - 0x35c546, - 0x3a4f46, - 0x23ea09, - 0x250248, - 0x243048, - 0x24304e, - 0x2e3407, - 0x2aabc5, - 0x244ec5, - 0x200e84, - 0x2b7e06, - 0x323048, - 0x25ff83, - 0x2de30e, - 0x25aa08, - 0x308dcb, - 0x367487, - 0x3a4485, - 0x22f246, - 0x2a9b47, - 0x2e8448, - 0x330489, - 0x35c7c5, - 0x2878c8, - 0x214006, - 0x37e50a, - 0x345a49, - 0x231b09, - 0x231b0b, - 0x31f608, - 0x30cdc9, - 0x2129c6, - 0x35a8ca, - 0x2b898a, - 0x2354cc, - 0x35c307, - 0x291e8a, - 0x2af68b, - 0x2af699, - 0x2dd788, - 0x25b245, - 0x25a806, - 0x2dad89, - 0x316cc6, - 0x211e4a, - 0x342a06, - 0x2143c4, - 0x2c098d, - 0x24fd07, - 0x2143c9, - 0x245e85, - 0x245fc8, - 0x2467c9, - 0x246a04, - 0x247107, - 0x247108, - 0x2479c7, - 0x2687c8, - 0x24d447, - 0x23c285, - 0x25520c, - 0x2558c9, - 0x36bf0a, - 0x38f249, - 0x2131c9, - 0x27450c, - 0x259a4b, - 0x259d08, - 0x25bf88, - 0x25f344, - 0x285e08, - 0x286bc9, - 0x39c247, - 0x21a9c6, - 0x2413c7, - 0x3af509, - 0x2af2cb, - 0x325947, - 0x204787, - 0x2e2d47, - 0x3a5244, - 0x3a5245, - 0x2a6d05, - 0x337a0b, - 0x398604, - 0x317e88, - 0x2aa7ca, - 0x2140c7, - 0x34a107, - 0x28bdd2, - 0x288046, - 0x22fb86, - 0x32728e, - 0x34ae06, - 0x291308, - 0x29210f, - 0x3a5688, - 0x377c48, - 0x2b4b8a, - 0x2b4b91, - 0x2a0d0e, - 0x23adca, - 0x23adcc, - 0x22e307, - 0x22e310, - 0x203d08, - 0x2a0f05, - 0x2aa10a, - 0x2058cc, - 0x2b368d, - 0x2abe06, - 0x2abe07, - 0x2abe0c, - 0x2f338c, - 0x2d988c, - 0x28f4cb, - 0x287284, - 0x225584, - 0x371389, - 0x2d8447, - 0x32c449, - 0x2b87c9, - 0x367f07, - 0x39c006, - 0x39c009, - 0x3a6b43, - 0x2a634a, - 0x206cc7, - 0x3624cb, - 0x226eca, - 0x23d604, - 0x3564c6, - 0x281a09, - 0x20b504, - 0x3b200a, - 0x2f9b45, - 0x2b6285, - 0x2b628d, - 0x2b65ce, - 0x3146c5, - 0x3942c6, - 0x25adc7, - 0x2dc04a, - 0x2e8646, - 0x2fc484, - 0x2f5987, - 0x220d8b, + 0x29f4c4, + 0x231807, + 0x233606, + 0x2b1905, + 0x203cc3, + 0x3abd84, + 0x27ae86, + 0x2354c4, + 0x2da048, + 0x38ba89, + 0x215589, + 0x29f2ca, + 0x2a070d, + 0x313447, + 0x2b9186, + 0x206804, + 0x286a09, + 0x284688, + 0x287b06, + 0x33f286, + 0x262107, + 0x2b6b46, + 0x226346, + 0x26d606, + 0x3a2a0a, + 0x21ed48, + 0x2bacc5, + 0x262549, + 0x27e14a, + 0x2f5d08, + 0x29b908, + 0x295f08, + 0x2a7acc, + 0x30e705, + 0x29dd88, + 0x2e6586, + 0x37a386, + 0x3b50c7, + 0x2489c5, + 0x280ac5, + 0x215449, + 0x20e247, + 0x396845, + 0x227887, + 0x205043, + 0x2c5045, + 0x20ef48, + 0x252ac7, + 0x29b7c9, + 0x2d7985, + 0x2fa984, + 0x2a03c8, 0x32ccc7, - 0x3a34c4, - 0x374206, - 0x37420d, - 0x23918c, - 0x380406, - 0x23784a, - 0x217b06, - 0x21da08, - 0x228507, - 0x37f14a, - 0x2310c6, - 0x215f03, - 0x2613c6, - 0x201308, - 0x298b0a, - 0x26c447, - 0x26c448, - 0x2732c4, - 0x2863c7, - 0x28e0c8, - 0x2151c8, - 0x285c08, - 0x32630a, - 0x2cff85, - 0x2c76c7, - 0x23ac13, - 0x22c006, - 0x2b09c8, - 0x223ac9, - 0x242388, - 0x26b70b, - 0x2b8d88, - 0x220ec4, - 0x2a0946, - 0x3b11c6, - 0x2d8889, - 0x387747, - 0x255308, - 0x3acf86, - 0x21c444, - 0x2c4d05, - 0x2bf0c8, - 0x2bf98a, - 0x2c0608, - 0x2c5446, - 0x29920a, - 0x234548, - 0x2c8788, - 0x2c9bc8, - 0x2ca4c6, - 0x2cc0c6, - 0x31f00c, - 0x2cc590, - 0x28a505, - 0x2f9f88, - 0x2f9f90, - 0x3a5490, - 0x31b1ce, - 0x31ec8e, - 0x31ec94, - 0x31f7cf, - 0x31fb86, - 0x250751, - 0x3086d3, - 0x308b48, - 0x321585, - 0x359ec8, - 0x20f3c5, - 0x22964c, - 0x256789, - 0x22ac09, - 0x241147, - 0x214989, - 0x24ff47, - 0x2b6106, - 0x37db47, - 0x2605c5, - 0x310b83, - 0x260149, - 0x227409, - 0x378d03, - 0x3abb84, - 0x38004d, - 0x3810cf, - 0x3005c5, - 0x3188c6, - 0x20d147, - 0x303d07, - 0x289946, - 0x28994b, - 0x2a2085, - 0x257ec6, - 0x209247, - 0x273949, - 0x3358c6, - 0x210805, - 0x22400b, - 0x37f406, - 0x249885, - 0x39f588, - 0x2b5cc8, - 0x2b6b4c, - 0x2b6b50, - 0x2ca9c9, - 0x2f8587, - 0x2de9cb, - 0x2d59c6, - 0x2d740a, - 0x2d860b, - 0x2d918a, - 0x2d9406, - 0x2d9e45, - 0x2cf686, - 0x27c708, - 0x24120a, - 0x36cd9c, - 0x2e104c, + 0x2c8c08, + 0x38d688, + 0x354b05, + 0x3a3946, + 0x278cc6, + 0x244609, + 0x2b01c7, + 0x2ac006, + 0x313787, + 0x210103, + 0x23bc44, + 0x2a1785, + 0x232804, + 0x3833c4, + 0x27fdc7, + 0x26c147, + 0x22e704, + 0x29b610, + 0x3b3c47, + 0x200645, + 0x24c20c, + 0x20a8c4, + 0x2c1488, + 0x24ed89, + 0x35acc6, + 0x334c48, + 0x215244, + 0x36c4c8, + 0x22fb06, + 0x2accc8, + 0x29c506, + 0x2bec0b, + 0x202ac5, + 0x2c8748, + 0x215ac4, + 0x38beca, + 0x29b7c9, + 0x245f06, + 0x216f48, + 0x256385, + 0x2b0f44, + 0x2c1386, + 0x239208, + 0x27e648, + 0x322c06, + 0x3a9ec4, + 0x241846, + 0x34de87, + 0x2755c7, + 0x26210f, + 0x207347, + 0x2fb547, + 0x3709c5, + 0x353e05, + 0x29f8c9, + 0x2dd046, + 0x27cc05, + 0x280647, + 0x2e0bc8, + 0x297645, + 0x299ec6, + 0x21b1c8, + 0x2b94ca, + 0x2db4c8, + 0x28ac87, + 0x2e23c6, + 0x262506, + 0x21a5c3, + 0x216a43, + 0x27e309, + 0x28c809, + 0x2c1286, + 0x2d7985, + 0x33bd48, + 0x216f48, + 0x3597c8, + 0x26d68b, + 0x2c23c7, + 0x30a589, + 0x262388, + 0x343084, + 0x3514c8, + 0x28cd89, + 0x2ac305, + 0x2ba687, + 0x23bcc5, + 0x27e548, + 0x28fc4b, + 0x295710, + 0x2a6dc5, + 0x215a0c, + 0x348205, + 0x27be43, + 0x2a8f86, + 0x2be6c4, + 0x335086, + 0x29bf47, + 0x21b244, + 0x240b88, + 0x28304d, + 0x302945, + 0x29b104, + 0x2243c4, + 0x276949, + 0x2a11c8, + 0x317687, + 0x22fb88, + 0x27f748, + 0x277605, + 0x209287, + 0x277587, + 0x33e347, + 0x264309, + 0x233489, + 0x214c46, + 0x2bd306, + 0x262346, + 0x37f785, + 0x3a7184, + 0x200006, + 0x200386, + 0x277648, + 0x21a90b, + 0x260d07, + 0x206804, + 0x353646, + 0x2fe447, + 0x26dec5, + 0x391d05, + 0x219644, + 0x233406, + 0x200088, + 0x286a09, + 0x2510c6, + 0x284048, + 0x2b19c6, + 0x345248, + 0x306dcc, + 0x2774c6, + 0x29678d, + 0x296c0b, + 0x339545, + 0x312387, + 0x3531c6, + 0x336008, + 0x214cc9, + 0x2d0588, + 0x200645, + 0x277987, + 0x27d648, + 0x349649, + 0x28e946, + 0x250fca, + 0x335d88, + 0x2d03cb, + 0x39818c, + 0x36c5c8, + 0x27a7c6, + 0x208c88, + 0x3b77c7, + 0x32cf49, + 0x28f74d, + 0x299dc6, + 0x27b808, + 0x2b0609, + 0x2bda48, + 0x281508, + 0x2bfe0c, + 0x2c0b47, + 0x2c1887, + 0x264145, + 0x2ad587, + 0x2e0a88, + 0x2c1406, + 0x2556cc, + 0x2ef888, + 0x2ccb88, + 0x25dec6, + 0x21a507, + 0x214e44, + 0x25dc08, + 0x22200c, + 0x2ce24c, + 0x2342c5, + 0x2d0d47, + 0x3a9e46, + 0x21a486, + 0x2f9d88, + 0x3af904, + 0x28574b, + 0x36cc0b, + 0x2e23c6, + 0x282ec7, + 0x37a805, + 0x269a05, + 0x285886, + 0x256345, + 0x203b05, + 0x2cc9c7, + 0x274c49, + 0x2ac784, + 0x2fbb45, + 0x2e4bc5, + 0x2d9dc8, + 0x329d05, + 0x2b72c9, + 0x2ae5c7, + 0x2ae5cb, + 0x381306, + 0x347c89, + 0x26d108, + 0x276545, + 0x33e448, + 0x2334c8, + 0x245747, + 0x3776c7, + 0x27fe49, + 0x2acc07, + 0x28a989, + 0x2aa70c, + 0x3163c8, + 0x2b2ac9, + 0x2b3d47, + 0x27f809, + 0x26c287, + 0x398288, + 0x3b7705, + 0x2bb7c6, + 0x2c0388, + 0x308a88, + 0x27e009, + 0x203b47, + 0x269ac5, + 0x222b09, + 0x2bd6c6, + 0x28f544, + 0x30e1c6, + 0x35ffc8, + 0x232ac7, + 0x21ab08, + 0x3030c9, + 0x3a3707, + 0x29d146, + 0x312444, + 0x211609, + 0x209108, + 0x25dd87, + 0x27eb46, + 0x21a846, + 0x3b7904, + 0x2241c6, + 0x204fc3, + 0x3b1649, + 0x202a86, + 0x303345, + 0x29db06, + 0x26cac5, + 0x27dac8, + 0x36c307, + 0x381646, + 0x3b3b06, + 0x3078c8, + 0x29fa47, + 0x299e05, + 0x29b408, + 0x3a1e48, + 0x335d88, + 0x3480c5, + 0x2bb846, + 0x215349, + 0x244484, + 0x26c94b, + 0x22604b, + 0x2babc9, + 0x205043, + 0x254485, + 0x2214c6, + 0x385208, + 0x2e1f04, + 0x2e3306, + 0x2d1589, + 0x2ca445, + 0x2cc906, + 0x32ccc6, + 0x216f44, + 0x2a764a, + 0x303288, + 0x308a86, + 0x3b8645, + 0x37a687, + 0x2e0fc7, + 0x3a3944, + 0x226287, + 0x2aecc4, + 0x33bf46, + 0x2096c3, + 0x264305, + 0x32ad45, + 0x207588, + 0x24d985, + 0x277209, + 0x25da47, + 0x25da4b, + 0x2a148c, + 0x2a224a, + 0x300187, + 0x203503, + 0x3afc08, + 0x348285, + 0x2976c5, + 0x205104, + 0x398186, + 0x24ed86, + 0x224207, + 0x33448b, + 0x286004, + 0x2e6684, + 0x21f044, + 0x2cafc6, + 0x21b244, + 0x2bb648, + 0x342445, + 0x21ba45, + 0x359707, + 0x312489, + 0x345c45, + 0x37424a, + 0x26ac89, + 0x2996ca, + 0x3a2b49, + 0x33fec4, + 0x2cc305, + 0x2b6c48, + 0x2cd9cb, + 0x244245, + 0x2f2fc6, + 0x213e84, + 0x277746, + 0x3a3589, + 0x353707, + 0x3179c8, + 0x2a0a86, + 0x34de07, + 0x27e648, + 0x3747c6, + 0x375604, + 0x365ac7, + 0x357305, + 0x367287, + 0x200104, + 0x353146, + 0x2f4308, + 0x296dc8, + 0x2e6047, + 0x274fc8, + 0x294fc5, + 0x204e84, + 0x3711c8, + 0x2750c4, + 0x216e05, + 0x2f5fc4, + 0x2fdfc7, + 0x288c07, + 0x27f948, + 0x2c8d86, + 0x24d905, + 0x277008, + 0x2db6c8, + 0x29f209, + 0x226346, + 0x22f588, + 0x38bd4a, + 0x26df48, + 0x2e5d85, + 0x20b306, + 0x26ab48, + 0x277a4a, + 0x210f87, + 0x284c45, + 0x292708, + 0x2ade04, + 0x265886, + 0x2c1c08, + 0x204f06, + 0x38e7c8, + 0x28f187, + 0x203346, + 0x2b4b04, + 0x284fc7, + 0x2b0d84, + 0x3a3547, + 0x28e60d, + 0x27b645, + 0x2cd54b, + 0x29c606, + 0x24e848, + 0x240b44, + 0x350d06, + 0x27ae86, + 0x208fc7, + 0x29644d, + 0x243cc7, + 0x2b12c8, + 0x269b85, + 0x278648, + 0x2c4a86, + 0x295048, + 0x228086, + 0x33d987, + 0x300449, + 0x343ac7, + 0x287dc8, + 0x2706c5, + 0x21b608, + 0x21a3c5, + 0x3a4245, + 0x3a2dc5, + 0x234543, + 0x2809c4, + 0x262545, + 0x337809, + 0x27ea46, + 0x2dc5c8, + 0x377485, + 0x2b2e87, + 0x2a78ca, + 0x2cc849, + 0x2cad4a, + 0x2d1d08, + 0x2276cc, + 0x2806cd, + 0x2fc003, + 0x38e6c8, + 0x3abd45, + 0x2b9286, + 0x379f86, + 0x2e58c5, + 0x313889, + 0x33cc45, + 0x277008, + 0x2552c6, + 0x347806, + 0x2a0289, + 0x393947, + 0x28ff06, + 0x2a7848, + 0x33bec8, + 0x2daec7, + 0x2ace4e, + 0x2c4cc5, + 0x349545, + 0x204e08, + 0x21fcc7, + 0x21a882, + 0x2bf984, + 0x334f8a, + 0x25de48, + 0x2fe546, + 0x298ac8, + 0x278cc6, + 0x332608, + 0x2ac008, + 0x3a4204, + 0x2b33c5, + 0x676384, + 0x676384, + 0x676384, + 0x202b43, + 0x21a6c6, + 0x2774c6, + 0x29cb0c, + 0x203383, + 0x27e146, + 0x2151c4, + 0x247e88, + 0x2d13c5, + 0x335086, + 0x2bcac8, + 0x2d2bc6, + 0x3815c6, + 0x245d08, + 0x2a1807, + 0x2ac9c9, + 0x2f214a, + 0x22b484, + 0x215305, + 0x2a96c5, + 0x247c06, + 0x313486, + 0x29d546, + 0x2f5546, + 0x2acb04, + 0x2acb0b, + 0x231804, + 0x29ccc5, + 0x2aad85, + 0x312806, + 0x3a6308, + 0x280587, + 0x317784, + 0x236203, + 0x2ad905, + 0x306047, + 0x28048b, + 0x207487, + 0x2bc9c8, + 0x2e62c7, + 0x370b06, + 0x279bc8, + 0x2a820b, + 0x21ff46, + 0x212309, + 0x2a8385, + 0x30a343, + 0x2cc906, + 0x28f088, + 0x213403, + 0x24f403, + 0x27e646, + 0x278cc6, + 0x35d10a, + 0x27a805, + 0x27accb, + 0x29da4b, + 0x23ef83, + 0x202843, + 0x2aec44, + 0x278a87, + 0x28f104, + 0x244504, + 0x2e6404, + 0x26e248, + 0x3b8588, + 0x3baf89, + 0x393188, + 0x2b9dc7, + 0x247846, + 0x2dc20f, + 0x2c4e06, + 0x2d1344, + 0x3b83ca, + 0x305f47, + 0x3b9606, + 0x28f589, + 0x3baf05, + 0x2076c5, + 0x3bb046, + 0x21b743, + 0x2ade49, + 0x21eec6, + 0x3afa89, + 0x382bc6, + 0x264305, + 0x2346c5, + 0x207343, + 0x278bc8, + 0x20d787, + 0x396784, + 0x247d08, + 0x2e1244, + 0x2f1006, + 0x2a8f86, + 0x23c346, + 0x2c8609, + 0x297645, + 0x3519c6, + 0x2582c9, + 0x2c41c6, + 0x26e2c6, + 0x387886, + 0x2160c5, + 0x2f5fc6, + 0x33d984, + 0x3b7705, + 0x2c0384, + 0x2b2246, + 0x3532c4, + 0x203c43, + 0x284745, + 0x2331c8, + 0x25e607, + 0x2b8209, + 0x284b48, + 0x297e11, + 0x32cd4a, + 0x2e2307, + 0x2e7246, + 0x2151c4, + 0x2c0488, + 0x22e448, + 0x297fca, + 0x2b708d, + 0x268686, + 0x245e06, + 0x285086, + 0x21ba47, + 0x2b1385, + 0x3912c7, + 0x247dc5, + 0x2ae704, + 0x2a6206, + 0x224047, + 0x2adb4d, + 0x26aa87, + 0x21f308, + 0x277309, + 0x20b206, + 0x28e8c5, + 0x22cb04, + 0x3600c6, + 0x3a3846, + 0x25dfc6, + 0x299348, + 0x215f83, + 0x208fc3, + 0x352105, + 0x277dc6, + 0x2abfc5, + 0x2a0c88, + 0x29c10a, + 0x282084, + 0x247e88, + 0x295f08, + 0x312647, + 0x377549, + 0x2bc6c8, + 0x286a87, + 0x2587c6, + 0x204f0a, + 0x360148, + 0x2f98c9, + 0x2a1288, + 0x221609, + 0x2e7107, + 0x2f2f05, + 0x26d886, + 0x2c1288, + 0x27e7c8, + 0x296088, + 0x2e24c8, + 0x29ccc5, + 0x208a84, + 0x20d488, + 0x23e2c4, + 0x3a2944, + 0x264305, + 0x2917c7, + 0x312249, + 0x208dc7, + 0x210cc5, + 0x274846, + 0x34f606, + 0x212444, + 0x2a05c6, + 0x24d744, + 0x278546, + 0x312006, + 0x213246, + 0x200645, + 0x2a0b47, + 0x203503, + 0x2079c9, + 0x3076c8, + 0x247d04, + 0x28690d, + 0x296ec8, + 0x2e3788, + 0x2f9846, + 0x300549, + 0x2cc849, + 0x3a3285, + 0x29c20a, + 0x27cf4a, + 0x29d74c, + 0x29d8c6, + 0x275446, + 0x2c4f86, + 0x2b4749, + 0x2b94c6, + 0x29fa86, + 0x33cd06, + 0x25dc08, + 0x274fc6, + 0x2ce80b, + 0x291945, + 0x21ba45, + 0x2756c5, + 0x352a46, + 0x204ec3, + 0x23c2c6, + 0x26aa07, + 0x2c0345, + 0x320585, + 0x379445, + 0x318446, + 0x31da84, + 0x31da86, + 0x292f49, + 0x3528cc, + 0x2ae448, + 0x239184, + 0x2f5c06, + 0x29c706, + 0x28f088, + 0x216f48, + 0x3527c9, + 0x37a687, + 0x2367c9, + 0x24cfc6, + 0x22e904, + 0x20ea44, + 0x280144, + 0x27e648, + 0x31208a, + 0x345bc6, + 0x353cc7, + 0x362c47, + 0x347d85, + 0x2a9684, + 0x28cd46, + 0x2b13c6, + 0x2336c3, + 0x307507, + 0x38d588, + 0x3a33ca, + 0x2cbb88, + 0x281f08, + 0x353305, + 0x339645, + 0x260e05, + 0x348146, + 0x3ad906, + 0x26c085, + 0x3b1889, + 0x2a948c, + 0x260ec7, + 0x298048, + 0x2e5c05, + 0x676384, + 0x320944, + 0x252c04, + 0x22df86, + 0x29eb0e, + 0x207747, + 0x21bc45, + 0x24440c, + 0x2e1107, + 0x223fc7, + 0x225109, + 0x217cc9, + 0x284c45, + 0x3076c8, + 0x215349, + 0x335c45, + 0x2c0288, + 0x2c2586, + 0x371446, + 0x2def04, + 0x2553c8, + 0x20b3c3, + 0x2af8c4, + 0x2ad985, + 0x3bab07, + 0x21c245, + 0x38bc09, + 0x28b30d, + 0x2a33c6, + 0x225fc4, + 0x2588c8, + 0x274a8a, + 0x2611c7, + 0x235d45, + 0x23b403, + 0x29dc0e, + 0x278ccc, + 0x2f5e07, + 0x29ecc7, + 0x200143, + 0x2b9505, + 0x252c05, + 0x298e88, + 0x295d49, + 0x239086, + 0x28f104, + 0x2e2246, + 0x27b5cb, + 0x2cc5cc, + 0x366d87, + 0x2d0305, + 0x3a1d48, + 0x2dac85, + 0x3b83c7, + 0x32cb87, + 0x247585, + 0x204ec3, + 0x26e584, + 0x21c685, + 0x2ac685, + 0x2ac686, + 0x292008, + 0x224047, + 0x37a286, + 0x26c486, + 0x3a2d06, + 0x268789, + 0x209387, + 0x25e286, + 0x2cc746, + 0x2731c6, + 0x2a7585, + 0x3b2b46, + 0x380145, + 0x329d88, + 0x29114b, + 0x28c346, + 0x362c84, + 0x2b4389, + 0x25da44, + 0x2c2508, + 0x30e2c7, + 0x281404, + 0x2bbd88, + 0x2c1684, + 0x2a75c4, + 0x286845, + 0x302986, + 0x26e187, + 0x203043, + 0x29d205, + 0x323284, + 0x349586, + 0x3a3308, + 0x38d2c5, + 0x290e09, + 0x222d05, + 0x2dbf88, + 0x215087, + 0x388588, + 0x2b8047, + 0x2fb609, + 0x264dc6, + 0x32bb46, + 0x28cac4, + 0x258705, + 0x2fce4c, + 0x2756c7, + 0x275fc7, + 0x362b08, + 0x2a33c6, + 0x26a944, + 0x328004, + 0x27fcc9, + 0x2c5086, + 0x298a07, + 0x208c04, + 0x23da46, + 0x33b785, + 0x2c88c7, + 0x2ce786, + 0x250e89, + 0x27cd87, + 0x262107, + 0x2a0106, + 0x23d985, + 0x27c548, + 0x21ed48, + 0x247a46, + 0x38d305, + 0x390586, + 0x2034c3, + 0x298d09, + 0x29d2ce, + 0x2b7d48, 0x2e1348, - 0x25b1c5, - 0x2e51c7, - 0x29e046, - 0x399445, - 0x21ea46, - 0x289b08, - 0x2b5247, - 0x29e308, - 0x2a5cca, - 0x321f8c, - 0x322209, - 0x20a147, - 0x20c544, - 0x245846, - 0x3777ca, - 0x2b88c5, - 0x3a2c8c, - 0x3a5088, - 0x350b48, - 0x20da4c, - 0x213c4c, - 0x2162c9, - 0x216507, - 0x2c7e0c, - 0x32f644, - 0x39080a, - 0x20b80c, - 0x274d8b, - 0x23a28b, - 0x23b346, - 0x23df07, - 0x22e547, - 0x22e54f, - 0x2f4311, - 0x3b1ad2, - 0x23eecd, - 0x23eece, - 0x23f20e, - 0x31f988, - 0x31f992, - 0x242e48, - 0x2fc287, - 0x24a88a, - 0x20fd88, - 0x34adc5, - 0x2f164a, - 0x220587, - 0x2e6e04, - 0x24ddc3, - 0x376a45, - 0x2b4e07, - 0x2fa347, - 0x2b388e, - 0x38708d, - 0x39b189, - 0x216845, - 0x2ea903, - 0x25f8c6, - 0x36ba85, - 0x309008, - 0x2eb049, - 0x25a845, - 0x25a84f, - 0x2d9c87, - 0x2f9445, - 0x271d8a, - 0x3a2986, - 0x21db89, - 0x2ec3cc, - 0x2ee449, - 0x203f46, - 0x2aa5cc, - 0x2eea06, - 0x2f19c8, - 0x2f1bc6, - 0x2dd906, - 0x24fa84, - 0x25a083, - 0x35efca, - 0x31e391, - 0x27faca, - 0x327f85, - 0x38ec47, - 0x251b47, - 0x28e1c4, - 0x28e1cb, - 0x316648, - 0x2b2886, - 0x235fc5, - 0x265904, - 0x269ac9, - 0x27a984, - 0x3041c7, - 0x2ee645, - 0x2ee647, - 0x3274c5, - 0x2a8283, - 0x2fc148, - 0x325c0a, - 0x2643c3, - 0x34318a, - 0x274386, - 0x25a5cf, - 0x358f89, - 0x2de290, - 0x2e1848, - 0x2c5909, - 0x298347, - 0x37418f, - 0x330bc4, - 0x2c81c4, - 0x21b386, - 0x275746, - 0x2ff74a, - 0x32b746, - 0x33e787, - 0x2f8b48, - 0x2f8d47, - 0x2f9747, - 0x34da8a, - 0x2fbf8b, - 0x238845, - 0x3b1708, - 0x22aec3, - 0x36524c, - 0x38d68f, - 0x2b038d, - 0x2ef707, - 0x39b2c9, - 0x22cb47, - 0x240008, - 0x382ccc, - 0x272988, - 0x2511c8, - 0x30d64e, - 0x31d014, - 0x31d524, - 0x33ee8a, - 0x35d98b, - 0x250004, - 0x250009, - 0x3a7088, - 0x245a05, - 0x25fa8a, - 0x265107, - 0x2cf584, - 0x327883, - 0x22bf83, - 0x234a44, - 0x231b03, - 0x250cc3, - 0x2023c4, - 0x202243, - 0x220ec3, - 0x2cc586, - 0x211004, - 0x24c083, - 0x204703, - 0x21d603, - 0x200882, - 0x327883, - 0x206a82, - 0x22bf83, - 0x234a44, - 0x231b03, - 0x250cc3, - 0x202243, - 0x2cc586, - 0x24c083, - 0x204703, - 0x77a48, - 0x22bf83, - 0x231b03, - 0x20f583, - 0x24c083, - 0x204703, - 0x77a48, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x220ec3, - 0x211004, - 0x24c083, - 0x204703, - 0x722f48, - 0x201482, - 0x200482, - 0x206a82, - 0x22bf83, - 0x200d02, - 0x202002, - 0x2023c4, - 0x30db04, - 0x21ee82, - 0x211004, - 0x200fc2, - 0x204703, - 0x21d603, - 0x23b346, - 0x212dc2, - 0x203f02, - 0x214a02, - 0x3aa23a03, - 0x3ae095c3, - 0x52886, - 0x52886, - 0x223504, - 0xe3d4c, - 0x19a1cc, - 0x8390d, - 0xda987, - 0x1cc08, - 0x22408, - 0x1a9f8a, - 0x3bb1cb45, - 0x11cb49, - 0x142c08, - 0x16b1ca, - 0x170bce, - 0x144218b, - 0x143f44, - 0x16fcc8, - 0x7f087, - 0x12c47, - 0x172cc9, - 0xb587, - 0x14bcc8, - 0x1a4249, - 0xda4c5, - 0x6098e, - 0xa868d, - 0x142a08, - 0x3be6b1c6, - 0x62c87, - 0x65d07, - 0x6bf07, - 0x72b87, - 0xd502, - 0x14d247, - 0x103a8c, - 0xed107, - 0x90906, - 0xa3849, - 0xa5408, - 0x134c2, - 0x2002, - 0x1808cb, - 0x18549, - 0x44b09, - 0x15bd88, - 0xafcc2, - 0x3c909, - 0x120809, - 0xcd808, - 0xcde07, - 0xcff09, - 0xd32c5, - 0xd36d0, - 0x1a2ac6, - 0x62145, - 0x22f4d, - 0xb146, - 0xdb947, - 0xe1c58, - 0xcfc88, - 0x19110a, - 0x185e4d, - 0x4042, - 0x72606, - 0x8c808, - 0x14ac08, - 0x77909, - 0x496c8, - 0x56f0e, - 0xe9f85, - 0x4ef08, - 0x1bc2, - 0x122706, - 0x6c2, - 0xc01, - 0x3c2e24c4, - 0x3c692f43, + 0x24784b, + 0x291046, + 0x313104, + 0x2802c4, + 0x29d3ca, + 0x215907, + 0x25e345, + 0x212309, + 0x2bf685, + 0x3a2987, + 0x245c84, + 0x287087, + 0x2e40c8, + 0x2cd306, + 0x27b989, + 0x2bc7ca, + 0x215886, + 0x296a06, + 0x2aad05, + 0x37d085, + 0x282d07, + 0x244e48, + 0x33b6c8, + 0x3a4206, + 0x234745, + 0x31320e, + 0x2b3204, + 0x2479c5, + 0x2741c9, + 0x2dce48, + 0x28abc6, + 0x29af0c, + 0x29bd10, + 0x29e74f, + 0x29f7c8, + 0x300187, + 0x200645, + 0x262545, + 0x26e009, + 0x292909, + 0x241946, + 0x2442c7, + 0x2d0cc5, + 0x337b09, + 0x339386, + 0x2b930d, + 0x280009, + 0x244504, + 0x2b7ac8, + 0x20d549, + 0x345d86, + 0x274945, + 0x32bb46, + 0x317889, + 0x2f3c48, + 0x20dcc5, + 0x2553c4, + 0x29b0cb, + 0x345c45, + 0x29b206, + 0x280a06, + 0x265e46, + 0x276d4b, + 0x290f09, + 0x26c3c5, + 0x388347, + 0x32ccc6, + 0x334dc6, + 0x252988, + 0x302a89, + 0x21f0cc, + 0x305e48, + 0x309e46, + 0x322c03, + 0x2ba886, + 0x276b85, + 0x27b008, + 0x234146, + 0x2c8b08, + 0x248b45, + 0x279305, + 0x32eb08, + 0x332787, + 0x379ec7, + 0x224207, + 0x334c48, + 0x3002c8, + 0x2ad486, + 0x2b2087, + 0x23bb07, + 0x276a4a, + 0x201e03, + 0x352a46, + 0x2392c5, + 0x334f84, + 0x277309, + 0x2fb584, + 0x25e684, + 0x29c584, + 0x29eccb, + 0x20d6c7, + 0x313445, + 0x294cc8, + 0x274846, + 0x274848, + 0x27a746, + 0x28b085, + 0x28b645, + 0x28d886, + 0x28ee48, + 0x28f4c8, + 0x2774c6, + 0x294b0f, + 0x2987d0, + 0x3a6005, + 0x203503, + 0x22e9c5, + 0x30a4c8, + 0x292809, + 0x335d88, + 0x268608, + 0x2b8d48, + 0x20d787, + 0x274509, + 0x2c8d08, + 0x265304, + 0x29c408, + 0x2d9e89, + 0x2b27c7, + 0x299d44, + 0x208e88, + 0x2a090a, + 0x2e77c6, + 0x268686, + 0x226209, + 0x29bf47, + 0x2cba08, + 0x204848, + 0x2ddd88, + 0x35cc45, + 0x37e005, + 0x21ba45, + 0x252bc5, + 0x3b5987, + 0x204ec5, + 0x2c0345, + 0x313686, + 0x335cc7, + 0x2cd907, + 0x2a0c06, + 0x2d2245, + 0x29b206, + 0x27ba85, + 0x2b58c8, + 0x2f4284, + 0x2c4246, + 0x33b5c4, + 0x2b0f48, + 0x2c434a, + 0x2790cc, + 0x334685, + 0x21bb06, + 0x21f286, + 0x351fc6, + 0x309ec4, + 0x33ba45, + 0x27a587, + 0x29bfc9, + 0x2cb4c7, + 0x676384, + 0x676384, + 0x317605, + 0x37b944, + 0x29a8ca, + 0x2746c6, + 0x279e04, + 0x3b9585, + 0x37e405, + 0x2b12c4, + 0x280647, + 0x222c87, + 0x2cafc8, + 0x33de88, + 0x20dcc9, + 0x29cd88, + 0x29aa8b, + 0x2318c4, + 0x366885, + 0x27cc85, + 0x224189, + 0x302a89, + 0x2b4288, + 0x30e048, + 0x2d6604, + 0x29c745, + 0x217083, + 0x247bc5, + 0x351a46, + 0x295b8c, + 0x208b06, + 0x36c3c6, + 0x28ae45, + 0x3184c8, + 0x2b7ec6, + 0x2e73c6, + 0x268686, + 0x22920c, + 0x25e184, + 0x3a2e4a, + 0x28ad88, + 0x2959c7, + 0x323186, + 0x239147, + 0x2ec145, + 0x27eb46, + 0x34d406, + 0x35b847, + 0x25e6c4, + 0x2fe0c5, + 0x2741c4, + 0x2ae787, + 0x274408, + 0x2752ca, + 0x27d4c7, + 0x303407, + 0x300107, + 0x2dadc9, + 0x295b8a, + 0x21f083, + 0x25e5c5, + 0x213283, + 0x2e6449, + 0x33dc08, + 0x3709c7, + 0x335e89, + 0x21ee46, + 0x2b88c8, + 0x33a3c5, + 0x2db7ca, + 0x2d3549, + 0x2683c9, + 0x3b50c7, + 0x22e549, + 0x213148, + 0x35ba06, + 0x21bcc8, + 0x2160c7, + 0x2acc07, + 0x26ac87, + 0x2d0b48, + 0x2f5a86, + 0x2a06c5, + 0x27a587, + 0x296508, + 0x33b544, + 0x2dd244, + 0x28fe07, + 0x2ac387, + 0x2151ca, + 0x35b986, + 0x38c74a, + 0x2bf8c7, + 0x2b2fc7, + 0x246004, + 0x28aa44, + 0x2ce686, + 0x202d04, + 0x202d0c, + 0x3aff05, + 0x216d89, + 0x2d4f04, + 0x2b1385, + 0x274a08, + 0x279fc5, + 0x374246, + 0x223ec4, + 0x293c4a, + 0x2b00c6, + 0x29ba8a, + 0x22b447, + 0x21ac45, + 0x21b745, + 0x347dca, + 0x28efc5, + 0x26dfc6, + 0x23e2c4, + 0x2aedc6, + 0x282dc5, + 0x234206, + 0x2e604c, + 0x2cb14a, + 0x2587c4, + 0x247846, + 0x29bf47, + 0x2cf984, + 0x25dc08, + 0x393006, + 0x313089, + 0x2c7549, + 0x3164c9, + 0x26cb06, + 0x2161c6, + 0x21be07, + 0x3b17c8, + 0x215fc9, + 0x20d6c7, + 0x294e46, + 0x34de87, + 0x284f45, + 0x2b3204, + 0x21b9c7, + 0x23bcc5, + 0x286785, + 0x226987, + 0x247448, + 0x3a1cc6, + 0x29738d, + 0x29908f, + 0x29da4d, + 0x210d04, + 0x2332c6, + 0x2d3c08, + 0x33ccc5, + 0x276c08, + 0x24560a, + 0x244504, + 0x27bb46, + 0x26f3c7, + 0x286007, + 0x2a18c9, + 0x21bc85, + 0x2b12c4, + 0x2b330a, + 0x2bc289, + 0x22e647, + 0x265706, + 0x345d86, + 0x29c686, + 0x365b86, + 0x2d320f, + 0x2d3ac9, + 0x274fc6, + 0x22e346, + 0x31a809, + 0x2b2187, + 0x217443, + 0x229386, + 0x216a43, + 0x2e5788, + 0x34dcc7, + 0x29f9c9, + 0x2a8e08, + 0x37a008, + 0x203c86, + 0x208a49, + 0x242785, + 0x2b2244, + 0x2a99c7, + 0x2b47c5, + 0x210d04, + 0x313508, + 0x215bc4, + 0x2b1ec7, + 0x3545c6, + 0x357e85, + 0x2a1288, + 0x345c4b, + 0x331dc7, + 0x348046, + 0x2c4e84, + 0x319586, + 0x264305, + 0x23bcc5, + 0x27c2c9, + 0x280249, + 0x2acc44, + 0x2acc85, + 0x247885, + 0x2db646, + 0x3077c8, + 0x2bf046, + 0x38d3cb, + 0x35ab4a, + 0x2b0e85, + 0x28b6c6, + 0x396485, + 0x2cf485, + 0x2a54c7, + 0x352cc8, + 0x2367c4, + 0x25f806, + 0x28f546, + 0x213307, + 0x30a304, + 0x27ae86, + 0x237cc5, + 0x237cc9, + 0x2163c4, + 0x2a9809, + 0x2774c6, + 0x2c0c08, + 0x247885, + 0x362d45, + 0x234206, + 0x21efc9, + 0x217cc9, + 0x36c446, + 0x2dcf48, + 0x244508, + 0x396444, + 0x2b3644, + 0x2b3648, + 0x326dc8, + 0x2368c9, + 0x3519c6, + 0x268686, + 0x32224d, + 0x2e3306, + 0x306c89, + 0x315fc5, + 0x3bb046, + 0x391408, + 0x31d9c5, + 0x23bb44, + 0x264305, + 0x27fb48, + 0x29a689, + 0x274284, + 0x353146, + 0x279e8a, + 0x2f5d08, + 0x215349, + 0x38174a, + 0x335e06, + 0x299248, + 0x3b8185, + 0x2e0908, + 0x2b8145, + 0x21ed09, + 0x36a349, + 0x20d8c2, + 0x2a8385, + 0x269746, + 0x277407, + 0x3b05c5, + 0x308986, + 0x301448, + 0x2a33c6, + 0x2b6b09, + 0x2760c6, + 0x252808, + 0x2a89c5, + 0x23ebc6, + 0x33da88, + 0x27e648, + 0x2e7008, + 0x345f08, + 0x3b2b44, + 0x22a183, + 0x2b6d44, + 0x27d6c6, + 0x284f84, + 0x2e1287, + 0x2e72c9, + 0x2c45c5, + 0x204846, + 0x229386, + 0x291e4b, + 0x2b0dc6, + 0x3b8cc6, + 0x2c8488, + 0x3204c6, + 0x21aa43, + 0x3af743, + 0x2b3204, + 0x22f485, + 0x2b1807, + 0x274408, + 0x27440f, + 0x27a48b, + 0x3075c8, + 0x3531c6, + 0x3078ce, + 0x2319c3, + 0x2b1784, + 0x2b0d45, + 0x2b1146, + 0x28ce4b, + 0x291886, + 0x21b249, + 0x357e85, + 0x3899c8, + 0x20c688, + 0x217b8c, + 0x29ed06, + 0x247c06, + 0x2d7985, + 0x287b88, + 0x2790c5, + 0x343088, + 0x29b28a, + 0x29de89, + 0x676384, + 0x38a099c2, + 0x16d208, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x209703, + 0x205503, + 0x200983, + 0x38d2c3, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x3b1384, + 0x205503, + 0x200983, + 0x20cf83, + 0x25ef44, + 0x2a84c3, + 0x235ac4, + 0x232403, + 0x2d5f04, + 0x2e9dc3, + 0x3b0887, + 0x209703, + 0x204e83, + 0x28b148, + 0x200983, + 0x2ae1cb, + 0x2ec883, + 0x264a86, + 0x20b0c2, + 0x22d54b, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x200983, + 0x26be43, + 0x204783, + 0x205702, + 0x16d208, + 0x325f45, + 0x23bd48, + 0x2df7c8, + 0x2099c2, + 0x37ab45, + 0x38c347, + 0x2007c2, + 0x240d87, + 0x20d882, + 0x248707, + 0x32c589, + 0x3b7d48, + 0x2ddc09, + 0x23e202, + 0x263647, + 0x36c1c4, + 0x38c407, + 0x35aa47, + 0x2bbbc2, + 0x209703, + 0x20e602, + 0x200c82, + 0x200442, + 0x2013c2, + 0x205ec2, + 0x209842, + 0x2a80c5, + 0x320885, + 0x99c2, + 0x32403, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x209703, + 0x205503, + 0x200983, + 0x12083, + 0x1ec1, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x3b1384, + 0x244183, + 0x205503, + 0x200983, + 0x219503, + 0x3b819d06, + 0x13f443, + 0x7df85, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x2099c2, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x4a82, + 0x16d208, + 0x44e04, + 0xdb085, + 0x205702, + 0x26f544, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x2358c3, + 0x2a9305, + 0x244183, + 0x206343, + 0x205503, + 0x21c2c3, + 0x200983, + 0x214843, + 0x2387c3, + 0x25ed03, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x2099c2, + 0x200983, + 0x16d208, + 0x2e9dc3, + 0x16d208, + 0x200c03, + 0x2a84c3, + 0x22fd84, + 0x232403, + 0x2e9dc3, + 0x202bc2, + 0x209703, + 0x205503, + 0x200983, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x202bc2, + 0x227f83, + 0x205503, + 0x200983, + 0x2e87c3, + 0x214843, + 0x205702, + 0x2099c2, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x264a85, + 0xe4886, + 0x25ef44, + 0x20b0c2, + 0x16d208, + 0x205702, + 0x1d848, + 0x1b4183, + 0x2099c2, + 0x3fc91386, + 0x1320c4, + 0xd95cb, + 0x13eec6, + 0x9807, + 0x232403, + 0x47208, + 0x2e9dc3, + 0xb9b45, + 0x13fb84, + 0x260f83, + 0x4ce87, + 0xd78c4, + 0x205503, + 0x7f1c4, + 0x200983, + 0x2ed844, + 0xd9388, + 0x125c86, + 0x82b48, + 0x6cf05, + 0x1fa49, + 0x2099c2, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x209703, + 0x204e83, + 0x200983, + 0x2ec883, + 0x20b0c2, + 0x16d208, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x24a5c3, + 0x211cc4, + 0x205503, + 0x200983, + 0x2a84c3, + 0x232403, + 0x2d5f04, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x264a86, + 0x232403, + 0x2e9dc3, + 0x176e43, + 0x200983, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x9807, + 0x16d208, + 0x2e9dc3, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x426a84c3, + 0x232403, + 0x205503, + 0x200983, + 0x16d208, + 0x205702, + 0x2099c2, + 0x2a84c3, + 0x2e9dc3, + 0x205503, + 0x200442, + 0x200983, + 0x316e87, + 0x33e6cb, + 0x22d703, + 0x241608, + 0x3b1547, + 0x20a7c6, + 0x2c2c45, + 0x372349, + 0x209488, + 0x360d49, + 0x38f790, + 0x360d4b, + 0x39e189, + 0x201b03, + 0x20fb89, + 0x230f06, + 0x230f0c, + 0x326008, + 0x3b4f08, + 0x34af09, + 0x2905ce, + 0x2dd9cb, + 0x2f364c, + 0x2030c3, + 0x263d0c, + 0x207089, + 0x2fee47, + 0x23234c, + 0x3a89ca, + 0x2030c4, + 0x2d084d, + 0x263bc8, + 0x20cf8d, + 0x273846, + 0x28decb, + 0x283349, + 0x3b8b87, + 0x32fd06, + 0x330f89, + 0x351b8a, + 0x30b148, + 0x2ec484, + 0x2fba07, + 0x34f707, + 0x2bab04, + 0x37b5c4, + 0x22a749, + 0x281d49, + 0x22ae48, + 0x210785, + 0x3b4005, + 0x20db86, + 0x2d0709, + 0x24588d, + 0x2f30c8, + 0x20da87, + 0x2c2cc8, + 0x2e1886, + 0x38b6c4, + 0x3523c5, + 0x202986, + 0x204b04, + 0x206f87, + 0x20b8ca, + 0x212244, + 0x2157c6, + 0x216a09, + 0x216a0f, + 0x21788d, + 0x2184c6, + 0x21d450, + 0x21d846, + 0x21df87, + 0x21e4c7, + 0x21e4cf, + 0x21f6c9, + 0x224c46, + 0x225347, + 0x225348, + 0x225809, + 0x246088, + 0x2e52c7, + 0x20cc83, + 0x372986, + 0x3ba948, + 0x29088a, + 0x213c09, + 0x2095c3, + 0x38c246, + 0x25f64a, + 0x29e587, + 0x2fec8a, + 0x313d4e, + 0x21f806, + 0x2a8587, + 0x20e006, + 0x207146, + 0x37de0b, + 0x20414a, + 0x317f0d, + 0x216287, + 0x33ce88, + 0x33ce89, + 0x33ce8f, + 0x2b838c, + 0x27b289, + 0x2e6a0e, + 0x3b098a, + 0x2ba246, + 0x2f4586, + 0x30b58c, + 0x30ce8c, + 0x30dc08, + 0x3439c7, + 0x2b8c45, + 0x351e04, + 0x33c90e, + 0x228d04, + 0x351747, + 0x26030a, + 0x362554, + 0x36dd8f, + 0x21e688, + 0x372848, + 0x35040d, + 0x35040e, + 0x376ec9, + 0x3a8ec8, + 0x3a8ecf, + 0x23204c, + 0x23204f, + 0x233007, + 0x236dca, + 0x2435cb, + 0x238508, + 0x239cc7, + 0x3690cd, + 0x250406, + 0x2d0a06, + 0x23c149, + 0x394648, + 0x242088, + 0x24208e, + 0x2b5007, + 0x243885, + 0x244bc5, + 0x2063c4, + 0x20aa86, + 0x22ad48, + 0x202203, + 0x2ca10e, + 0x369488, + 0x2a2fcb, + 0x200dc7, + 0x3a4045, + 0x22e206, + 0x2aa0c7, + 0x333d08, + 0x26cd09, + 0x292e45, + 0x284788, + 0x212c06, + 0x38ad4a, + 0x33c809, + 0x232409, + 0x23240b, + 0x38dc48, + 0x2ba9c9, + 0x210846, + 0x22eb8a, + 0x2dc80a, + 0x236fcc, + 0x3a6687, + 0x32c38a, + 0x26ea8b, + 0x26ea99, + 0x3b6a88, + 0x264b05, + 0x2c6086, + 0x211e49, + 0x390746, + 0x28550a, + 0x209686, + 0x202644, + 0x2c620d, + 0x202647, + 0x211149, + 0x246385, + 0x2464c8, + 0x246fc9, + 0x247784, + 0x248387, + 0x248388, + 0x248c87, + 0x261908, + 0x24d487, + 0x26c645, + 0x25488c, + 0x2550c9, + 0x2bc00a, + 0x3937c9, + 0x20fc89, + 0x275a0c, + 0x25774b, + 0x257ec8, + 0x259048, + 0x25c404, + 0x2810c8, + 0x283c89, + 0x3a8a87, + 0x216c46, + 0x2835c7, + 0x2dcac9, + 0x26e6cb, + 0x319407, + 0x200a07, + 0x22b587, + 0x20cf04, + 0x20cf05, + 0x29a545, + 0x341c0b, + 0x39c644, + 0x3b2988, + 0x26614a, + 0x212cc7, + 0x2f6707, + 0x28bed2, + 0x278446, + 0x22f706, + 0x33c24e, + 0x27aa06, + 0x292588, + 0x29374f, + 0x20d348, + 0x37f308, + 0x30eaca, + 0x30ead1, + 0x2a0e8e, + 0x24dd0a, + 0x24dd0c, + 0x21e307, + 0x3a90d0, + 0x200408, + 0x2a1085, + 0x2aa4ca, + 0x204b4c, + 0x29518d, + 0x2f7e46, + 0x2f7e47, + 0x2f7e4c, + 0x300e4c, + 0x3292cc, + 0x2873cb, + 0x284184, + 0x226384, + 0x346d89, + 0x3050c7, + 0x225e49, + 0x37e909, + 0x39f1c7, + 0x3a8846, + 0x3a8849, + 0x2ad1c3, + 0x21c74a, + 0x31a287, + 0x33eb8b, + 0x317d8a, + 0x248844, + 0x22ba46, + 0x27d749, + 0x202b84, + 0x3affca, + 0x348345, + 0x2bdd45, + 0x2bdd4d, + 0x2be08e, + 0x28cc05, + 0x323906, + 0x264687, + 0x3870ca, + 0x39b686, + 0x3616c4, + 0x36d747, + 0x2c3f0b, + 0x2e1947, + 0x33fa84, + 0x24bb86, + 0x24bb8d, + 0x21e1cc, + 0x2053c6, + 0x2f32ca, + 0x2e03c6, + 0x2ed0c8, + 0x377c47, + 0x23568a, + 0x23d6c6, + 0x216183, + 0x391586, + 0x3ba7c8, + 0x29ac8a, + 0x275807, + 0x275808, + 0x281684, + 0x24b687, + 0x279348, + 0x2bd748, + 0x27c0c8, + 0x38c94a, + 0x2da905, + 0x2cf0c7, + 0x24db53, + 0x31e806, + 0x266348, + 0x221a09, + 0x240c48, + 0x203d0b, + 0x2cb608, + 0x2a5f44, + 0x32ec06, + 0x30bac6, + 0x3027c9, + 0x2c3dc7, + 0x254988, + 0x28af06, + 0x226884, + 0x2cb8c5, + 0x2c55c8, + 0x2c5bca, + 0x2c5e88, + 0x2cbf86, + 0x29944a, + 0x2ac808, + 0x2cf788, + 0x2d18c8, + 0x2d1f06, + 0x2d3e06, + 0x38e18c, + 0x2d43d0, + 0x27d2c5, + 0x20d148, + 0x301950, + 0x20d150, + 0x38f60e, + 0x38de0e, + 0x38de14, + 0x32fe8f, + 0x330246, + 0x332d51, + 0x33d213, + 0x33d688, + 0x3b3445, + 0x241b48, + 0x386245, + 0x329a8c, + 0x291549, + 0x228b49, + 0x3201c7, + 0x236b89, + 0x380887, + 0x2f6146, + 0x3521c7, + 0x269c45, + 0x2120c3, + 0x2023c9, + 0x221cc9, + 0x376e43, + 0x27f384, + 0x32a20d, + 0x206bcf, + 0x2268c5, + 0x329986, + 0x211407, + 0x325d87, + 0x288786, + 0x28878b, + 0x2a2405, + 0x256786, + 0x2f6c07, + 0x24e489, + 0x3a7486, + 0x21d305, + 0x22854b, + 0x235946, + 0x249245, + 0x357988, + 0x306a88, + 0x2c8f0c, + 0x2c8f10, + 0x2d2409, + 0x2ffd07, + 0x32840b, + 0x2e3b86, + 0x2e518a, + 0x2e754b, + 0x2e794a, + 0x2e7bc6, + 0x2e8685, + 0x319fc6, + 0x36c808, + 0x32028a, + 0x35009c, + 0x2ec94c, + 0x2ecc48, + 0x264a85, + 0x34ea07, + 0x26bec6, + 0x274e05, + 0x21afc6, + 0x288948, + 0x2bc507, + 0x2904c8, + 0x2a868a, + 0x33130c, + 0x331589, + 0x38b847, + 0x2198c4, + 0x244c86, + 0x37ee8a, + 0x37ea05, + 0x209f8c, + 0x20e648, + 0x367388, + 0x21a00c, + 0x22550c, + 0x225a09, + 0x225c47, + 0x231d4c, + 0x23aa84, + 0x23c60a, + 0x35e6cc, + 0x26b28b, + 0x242b8b, + 0x2efec6, + 0x24a107, + 0x24c687, + 0x3a930f, + 0x2f8a51, + 0x2d8592, + 0x24c68d, + 0x24c68e, + 0x24c9ce, + 0x330048, + 0x330052, + 0x24fbc8, + 0x3b1187, + 0x24aeca, + 0x3681c8, + 0x27a9c5, + 0x3b57ca, + 0x21dd87, + 0x2e36c4, + 0x201543, + 0x2a57c5, + 0x30ed47, + 0x2f5007, + 0x29538e, + 0x3382cd, + 0x33af89, + 0x222705, + 0x35c3c3, + 0x3a78c6, + 0x36e745, + 0x2a3208, + 0x205b49, + 0x2983c5, + 0x3692cf, + 0x2d96c7, + 0x372285, + 0x20178a, + 0x2a36c6, + 0x2ed249, + 0x396ccc, + 0x2f51c9, + 0x3abdc6, + 0x265f4c, + 0x322d06, + 0x2f7588, + 0x2f7786, + 0x3b6c06, + 0x3b96c4, + 0x258243, + 0x2a1fca, + 0x327191, + 0x3a9c0a, + 0x27ee85, + 0x265047, + 0x252207, + 0x279444, + 0x27944b, + 0x3b7bc8, + 0x2b7bc6, + 0x362b85, + 0x38b044, + 0x255f09, + 0x31ad84, + 0x254f07, + 0x32f345, + 0x32f347, + 0x33c485, + 0x2a8183, + 0x3b1048, + 0x33b80a, + 0x203043, + 0x325f8a, + 0x203046, + 0x36904f, + 0x2b4f89, + 0x2ca090, + 0x2f1548, + 0x2ccc89, + 0x2971c7, + 0x24bb0f, + 0x336244, + 0x2d5f84, + 0x21d6c6, + 0x22f246, + 0x25708a, + 0x23cc46, + 0x2f58c7, + 0x300788, + 0x300987, + 0x301207, + 0x30370a, + 0x30534b, + 0x2f3dc5, + 0x2d81c8, + 0x21bb03, + 0x23800c, + 0x36f78f, + 0x2b8a4d, + 0x2a7147, + 0x33b0c9, + 0x22bcc7, + 0x24a2c8, + 0x36274c, + 0x2a5e48, + 0x250bc8, + 0x318ace, + 0x32d354, + 0x32d864, + 0x3475ca, + 0x36148b, + 0x380944, + 0x380949, + 0x27bbc8, + 0x245345, + 0x201d0a, + 0x3696c7, + 0x26f744, + 0x38d2c3, + 0x2a84c3, + 0x235ac4, + 0x232403, + 0x2e9dc3, + 0x3b1384, + 0x244183, + 0x209703, + 0x2d43c6, + 0x211cc4, + 0x205503, + 0x200983, + 0x201303, + 0x205702, + 0x38d2c3, + 0x2099c2, + 0x2a84c3, + 0x235ac4, + 0x232403, + 0x2e9dc3, + 0x244183, + 0x2d43c6, + 0x205503, + 0x200983, + 0x16d208, + 0x2a84c3, + 0x232403, + 0x2163c3, + 0x205503, + 0x200983, + 0x16d208, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x209703, + 0x211cc4, + 0x205503, + 0x200983, + 0x205702, + 0x2bb143, + 0x2099c2, + 0x232403, + 0x2e9dc3, + 0x209703, + 0x205503, + 0x200983, + 0x201ec2, + 0x219f02, + 0x2099c2, + 0x2a84c3, + 0x202242, + 0x201fc2, + 0x3b1384, + 0x210444, + 0x227382, + 0x211cc4, + 0x200442, + 0x200983, + 0x201303, + 0x2efec6, + 0x212982, + 0x202dc2, + 0x222f02, + 0x44e0d343, + 0x4521e303, + 0x52d46, + 0x52d46, + 0x25ef44, + 0x204e83, + 0x142abca, + 0x12778c, + 0x102cc, + 0x7dd8d, + 0x129845, + 0x21347, + 0x18648, + 0x1b887, + 0x20348, + 0x19d4ca, + 0x45ed6a45, + 0x12b809, + 0xaf848, + 0x4a70a, + 0x8a64e, + 0x1440a4b, + 0x1320c4, + 0x77848, + 0x68bc8, + 0x38f47, + 0x12807, + 0x4efc9, + 0x2c07, + 0xd4ac8, + 0x1318c9, + 0x3adc5, + 0x124d4e, + 0xa8a0d, + 0x9688, + 0x4622a586, + 0x46c2a588, + 0x70cc8, + 0x117090, + 0x5f347, + 0x601c7, + 0x64547, + 0x69447, + 0xdb42, + 0x190bc7, + 0x430c, + 0x35fc7, + 0xa4246, + 0xa4909, + 0xa6388, + 0x17f42, + 0x1fc2, + 0xb8fcb, + 0x7f247, + 0x11809, + 0xbb9c9, + 0x17e248, + 0xafd42, + 0x113a49, + 0xcdf8a, + 0xc9e09, + 0xd6fc9, + 0xd7ac8, + 0xd8a47, + 0xda889, + 0xde345, + 0xde6d0, + 0x175b86, + 0x192345, + 0x5e98d, + 0xf986, + 0xe9187, + 0xed858, + 0x1b1a48, + 0xb4c8a, + 0x1c42, + 0x52f4d, + 0x27c2, + 0x5d306, + 0x8d108, + 0x86ec8, + 0x16d0c9, + 0x55b08, + 0x5fb4e, + 0x1a78c7, + 0x19d0d, + 0xf2d05, + 0x190948, + 0x194448, + 0xfacc6, + 0xc2, + 0x125c86, + 0x7b02, + 0x341, + 0x57a07, + 0xc8e83, + 0x466ee0c4, + 0x46a94443, 0x141, - 0x4e86, + 0x10986, 0x141, 0x1, - 0x4e86, - 0x1570305, - 0x247204, - 0x22bf83, - 0x248fc4, - 0x2023c4, - 0x24c083, - 0x223985, - 0x20b743, - 0x2298c3, - 0x2ebf05, - 0x2232c3, - 0x3d62bf83, - 0x231b03, - 0x250cc3, + 0x10986, + 0xc8e83, + 0x1596bc5, + 0x2030c4, + 0x2a84c3, + 0x249944, + 0x3b1384, + 0x205503, + 0x2218c5, + 0x219503, + 0x23e743, + 0x373605, + 0x25ed03, + 0x47ea84c3, + 0x232403, + 0x2e9dc3, 0x200041, - 0x220ec3, - 0x30db04, - 0x211004, - 0x24c083, - 0x204703, - 0x20b803, - 0x77a48, - 0x200882, - 0x327883, - 0x206a82, - 0x22bf83, - 0x231b03, - 0x20f583, - 0x202002, - 0x2023c4, - 0x202243, - 0x220ec3, - 0x24c083, - 0x2020c3, - 0x204703, - 0x2232c3, - 0x77a48, - 0x38d402, - 0x6a82, - 0xf8f0e, - 0x3e600142, - 0x27a048, - 0x225f46, - 0x2bc3c6, - 0x2258c7, - 0x3ea06b82, - 0x3ef58e08, - 0x20628a, + 0x209703, + 0x210444, + 0x211cc4, + 0x205503, + 0x200983, + 0x214843, + 0x16d208, + 0x205702, + 0x38d2c3, + 0x2099c2, + 0x2a84c3, + 0x232403, + 0x2163c3, + 0x201fc2, + 0x3b1384, + 0x244183, + 0x209703, + 0x205503, + 0x204e83, + 0x200983, + 0x25ed03, + 0x16d208, + 0x36f502, + 0x99c2, + 0x1456108, + 0x100b4e, + 0x48e016c2, + 0x31a448, + 0x234386, + 0x209cc6, + 0x233d07, + 0x4920c202, + 0x49768ec8, + 0x20884a, + 0x25cc88, + 0x200242, + 0x31a0c9, + 0x2f3e07, + 0x216bc6, + 0x3b0d89, + 0x2cf204, + 0x20a6c6, + 0x2dbcc4, + 0x26ffc4, + 0x2544c9, + 0x326686, + 0x320945, + 0x22c445, + 0x384e07, + 0x2bfb47, + 0x28fa44, + 0x233f46, + 0x2fb005, + 0x2fde45, + 0x3963c5, + 0x3b3dc7, + 0x200c05, + 0x314b49, + 0x312945, + 0x333e44, + 0x39b5c7, + 0x31974e, + 0x32e5c9, + 0x33c109, + 0x3a64c6, + 0x23d408, + 0x26d98b, + 0x2aeecc, + 0x37f806, + 0x2dd887, + 0x20a305, + 0x37b5ca, + 0x22af49, + 0x20bf49, + 0x24ff86, + 0x2f69c5, + 0x27ce45, + 0x3490c9, + 0x39654b, + 0x273346, + 0x33a786, + 0x202504, + 0x28bb86, + 0x243908, + 0x3ba646, + 0x214386, + 0x207c08, + 0x20bb47, + 0x20bd09, + 0x20c585, + 0x16d208, + 0x212784, + 0x3ada04, + 0x283785, + 0x399a49, + 0x220f07, + 0x220f0b, + 0x22394a, + 0x227a45, + 0x49a08d42, + 0x33ea47, + 0x49e28908, + 0x2afb87, + 0x350e85, + 0x20c1ca, + 0x99c2, + 0x34dfcb, + 0x24d5ca, + 0x221bc6, + 0x282bc3, + 0x28e34d, + 0x3492cc, + 0x35084d, + 0x245c45, + 0x32ae05, + 0x202247, + 0x3aba49, + 0x208746, + 0x23cac5, + 0x2d29c8, + 0x28ba83, + 0x2dfac8, + 0x28ba88, + 0x2c3747, + 0x309708, + 0x3a7209, + 0x2cc447, + 0x33e247, + 0x396a48, + 0x251f44, + 0x251f47, + 0x273748, + 0x3a3ac6, + 0x205f4f, + 0x211a07, + 0x2e5446, + 0x225d85, + 0x223083, + 0x371847, + 0x36c043, + 0x248e46, + 0x24aa86, + 0x24b286, + 0x290c05, + 0x261903, + 0x388208, + 0x36f009, + 0x38224b, + 0x24b408, + 0x24d145, + 0x24f605, + 0x4a248902, + 0x352289, + 0x3b1407, + 0x256805, + 0x2543c7, + 0x2559c6, + 0x365a45, + 0x36e58b, + 0x257ec4, + 0x25c845, + 0x25c987, + 0x272cc6, + 0x273105, + 0x2812c7, + 0x281a07, + 0x2cd884, + 0x289c0a, + 0x28a0c8, + 0x3b8209, + 0x241e85, + 0x207886, + 0x243aca, + 0x22c346, + 0x261e07, + 0x3b7ecd, + 0x29c809, + 0x38d185, + 0x314187, + 0x332288, + 0x33d848, + 0x3b3107, + 0x379d86, + 0x215dc7, + 0x249f43, + 0x341c04, + 0x363485, + 0x392707, + 0x395dc9, + 0x22be48, + 0x344c45, + 0x23cd84, + 0x246245, + 0x24b80d, + 0x200f82, + 0x373746, + 0x25d246, + 0x2c578a, + 0x376546, + 0x37edc5, + 0x33df85, + 0x33df87, + 0x38ab8c, + 0x270b4a, + 0x28b846, + 0x2b9645, + 0x28b9c6, + 0x28bd07, + 0x28e186, + 0x290b0c, + 0x3b0ec9, + 0x4a610e07, + 0x293b05, + 0x293b06, + 0x293ec8, + 0x23b705, + 0x2a2c85, + 0x2a3848, + 0x2a3a4a, + 0x4aa4ecc2, + 0x4ae0ee02, + 0x2e6705, + 0x284f83, + 0x3adf08, + 0x204043, + 0x2a3cc4, + 0x2ed38b, + 0x26dd48, + 0x2e4d48, + 0x4b349909, + 0x2a7dc9, + 0x2a8906, + 0x2a9d48, + 0x2a9f49, + 0x2aab46, + 0x2aacc5, + 0x3843c6, + 0x2ab5c9, + 0x331f47, + 0x23ea86, + 0x233747, + 0x2085c7, + 0x32c8c4, + 0x4b7b1d49, + 0x2cab88, + 0x368dc8, + 0x383447, + 0x2c5246, + 0x226ac9, + 0x209c87, + 0x32e90a, + 0x38c588, + 0x3af5c7, + 0x3b9786, + 0x24f38a, 0x262708, - 0x200ec2, - 0x206b09, - 0x238887, - 0x21a946, - 0x208d89, - 0x25ba04, - 0x2b7a46, - 0x2e2884, - 0x27b484, - 0x254709, - 0x343886, - 0x247885, - 0x20eec5, - 0x3a7607, - 0x2b9207, - 0x36ee44, - 0x225b06, - 0x2f2945, - 0x2e48c5, - 0x2f6e45, - 0x392607, - 0x3672c5, - 0x3098c9, - 0x2644c5, - 0x2d0cc4, - 0x2e8587, - 0x2cee0e, - 0x31ae09, - 0x327149, - 0x35c146, - 0x31bf08, - 0x2ae58b, - 0x2d1fcc, - 0x26c186, - 0x37b207, - 0x20ae05, - 0x228dca, - 0x31a489, - 0x24f6c9, - 0x388f86, - 0x2f0f05, - 0x282145, - 0x366349, - 0x2f6fcb, - 0x27ed46, - 0x333e06, - 0x20d444, - 0x28ba86, - 0x2aac48, - 0x201186, - 0x203786, - 0x209988, - 0x20a887, - 0x20ab89, - 0x20bdc5, - 0x77a48, - 0x212bc4, - 0x37ee84, - 0x213a85, - 0x395589, - 0x222b07, - 0x222b0b, - 0x2243ca, - 0x228ac5, - 0x3f20ce82, - 0x226d87, - 0x3f629408, - 0x287607, - 0x343bc5, - 0x238f8a, - 0x6a82, - 0x266d4b, - 0x383dca, - 0x223c86, - 0x3a4483, - 0x338f8d, - 0x35b24c, - 0x36654d, - 0x385a45, - 0x23e645, - 0x25ffc7, - 0x200d09, - 0x206186, - 0x32b5c5, - 0x2a9f08, - 0x28b983, - 0x2da308, - 0x28b988, - 0x2bd387, - 0x3b00c8, - 0x200b09, - 0x233a47, - 0x2e2e87, - 0x2f74c8, - 0x24c0c4, - 0x24c0c7, - 0x26df88, - 0x204bc6, - 0x39a48f, - 0x218747, - 0x2d76c6, - 0x240185, - 0x36e8c3, + 0x2dccc5, + 0x226645, + 0x2ee487, + 0x2f7349, + 0x36510b, + 0x315008, + 0x3129c9, + 0x24bfc7, + 0x2b550c, + 0x2b5c4c, + 0x2b5f4a, + 0x2b61cc, + 0x2c2708, + 0x2c2908, + 0x2c2b04, + 0x2c2ec9, + 0x2c3109, + 0x2c334a, + 0x2c35c9, + 0x2c3907, + 0x3af00c, + 0x241146, + 0x34acc8, + 0x22c406, + 0x32e7c6, + 0x38d087, + 0x3b3288, + 0x39034b, + 0x2afa47, + 0x352489, + 0x3445c9, + 0x249ac7, + 0x278a04, + 0x265187, + 0x2db346, + 0x214a06, + 0x2f3485, + 0x2a5888, + 0x291444, + 0x291446, + 0x270a0b, + 0x21ca49, + 0x214b46, + 0x21c489, + 0x3b3f46, + 0x254688, + 0x223b83, + 0x2f6b45, + 0x22edc9, + 0x261145, + 0x2f9684, + 0x272206, + 0x231545, + 0x228f86, + 0x3056c7, + 0x26e986, + 0x3a304b, + 0x22ea87, + 0x3379c6, + 0x346f06, + 0x384ec6, + 0x28fa09, + 0x2ef14a, + 0x2b3505, + 0x2170cd, + 0x2a3b46, + 0x235546, + 0x2b4e86, + 0x2ed045, + 0x2de9c7, + 0x2e14c7, + 0x3581ce, + 0x209703, + 0x2c5209, + 0x391dc9, + 0x37b9c7, + 0x358f07, + 0x29d645, + 0x27ec45, + 0x4ba2a88f, + 0x2ccec7, + 0x2cd088, + 0x2cd484, + 0x2cde46, + 0x4be44c42, + 0x2d2186, + 0x2d43c6, + 0x391f8e, + 0x2df90a, + 0x357b06, + 0x285eca, + 0x203549, + 0x324105, + 0x398008, + 0x3b5606, + 0x38cec8, + 0x26f088, + 0x28eb8b, + 0x233e05, + 0x200c88, + 0x207d4c, + 0x2bd507, + 0x24ae06, + 0x2e28c8, + 0x20a948, + 0x4c208442, + 0x20a48b, + 0x282549, + 0x329f09, + 0x3bb287, + 0x20f7c8, + 0x4c61bf48, + 0x3511cb, + 0x37e0c9, + 0x234fcd, + 0x2750c8, + 0x224a48, + 0x4ca03ec2, + 0x20e3c4, + 0x4ce1a2c2, + 0x2f4ec6, + 0x4d2004c2, + 0x3813ca, + 0x21c346, + 0x285908, + 0x284488, + 0x2af446, + 0x22d8c6, + 0x2f12c6, + 0x2a3185, + 0x238c04, + 0x4d61e144, + 0x205146, + 0x272707, + 0x4dae8bc7, + 0x35490b, + 0x319b09, + 0x32ae4a, + 0x391804, + 0x33e0c8, + 0x23e84d, + 0x2eb709, + 0x2eb948, + 0x2ebfc9, + 0x2ed844, + 0x243484, + 0x27c885, + 0x317b4b, + 0x26dcc6, + 0x3424c5, + 0x250149, + 0x234008, + 0x2047c4, + 0x37b749, + 0x208105, + 0x2bfb88, + 0x33e907, + 0x33c508, + 0x27d946, + 0x35e387, + 0x292349, + 0x2286c9, + 0x2492c5, + 0x334ec5, + 0x4de2d902, + 0x333c04, + 0x2049c5, + 0x32c146, + 0x318385, + 0x2b1ac7, + 0x205245, + 0x272d04, + 0x3a6586, + 0x23cb47, + 0x232986, + 0x2dca05, + 0x203188, + 0x234585, + 0x2062c7, + 0x20f1c9, + 0x21cb8a, + 0x2e1b87, + 0x2e1b8c, + 0x320906, + 0x343cc9, + 0x23b385, + 0x23b648, + 0x210803, + 0x210805, + 0x2e8a05, + 0x261607, + 0x4e20c002, + 0x22d0c7, + 0x2e4f06, + 0x342786, + 0x2e7d06, + 0x20a886, + 0x208388, + 0x241c85, + 0x2e5507, + 0x2e550d, + 0x201543, + 0x21ec05, + 0x201547, + 0x22d408, + 0x201105, + 0x218c88, + 0x36c0c6, + 0x32b9c7, + 0x2c4785, + 0x233e86, + 0x26f5c5, + 0x21390a, + 0x2f2e06, + 0x377ac7, + 0x2ca505, + 0x3612c7, + 0x36d6c4, + 0x2f9606, + 0x2fb3c5, + 0x32648b, + 0x2db1c9, + 0x2bb24a, + 0x249348, + 0x301d08, + 0x304a4c, + 0x306287, + 0x3073c8, + 0x310a48, + 0x31e945, + 0x34020a, + 0x35c3c9, + 0x4e600802, + 0x200806, + 0x219d04, + 0x2ea849, + 0x220b49, + 0x269287, + 0x294947, + 0x37e789, + 0x38cb48, + 0x38cb4f, + 0x315d06, + 0x2d670b, + 0x36e8c5, 0x36e8c7, - 0x36a503, - 0x247b86, - 0x249b86, - 0x24b006, - 0x28fd05, - 0x2687c3, - 0x389f48, - 0x36c709, - 0x3817cb, - 0x24b188, - 0x24d105, - 0x24e185, - 0x3fa3d6c2, - 0x37dc09, - 0x202447, - 0x257f45, - 0x254607, - 0x256dc6, - 0x363645, - 0x36b8cb, - 0x259d04, - 0x2622c5, - 0x262407, - 0x278406, - 0x278845, - 0x286007, - 0x286587, - 0x274344, - 0x28a70a, - 0x28abc8, - 0x328d49, - 0x3a0805, - 0x34c306, - 0x2aae0a, - 0x20edc6, - 0x266f87, - 0x31694d, - 0x227b09, - 0x329805, - 0x345fc7, - 0x344108, - 0x344788, - 0x323487, - 0x366f86, - 0x215b47, - 0x249503, - 0x337a04, - 0x360585, - 0x38dd07, - 0x392009, - 0x227608, - 0x22ccc5, - 0x3afa84, - 0x382f85, - 0x2474cd, - 0x204242, - 0x302bc6, - 0x272546, - 0x2a3cca, - 0x365a86, - 0x377705, - 0x317145, - 0x317147, - 0x37e34c, - 0x27648a, - 0x28b746, - 0x206945, - 0x28b8c6, - 0x28bc07, - 0x28d946, - 0x28fc0c, - 0x208ec9, - 0x3fe05307, - 0x2924c5, - 0x2924c6, - 0x2929c8, - 0x2b1285, - 0x2a2c05, - 0x2a2e48, - 0x2a304a, - 0x4020b882, - 0x4060f802, - 0x3827c5, - 0x2d7643, - 0x267348, - 0x21dcc3, - 0x2a32c4, - 0x21dccb, - 0x2ae948, - 0x2a6088, - 0x40b40cc9, - 0x2a7ec9, - 0x2a8586, - 0x2a97c8, - 0x2a99c9, - 0x2ab6c6, - 0x2ab845, - 0x383646, - 0x2ac389, - 0x341607, - 0x24ad46, - 0x238c87, - 0x206007, - 0x23bc44, - 0x40efa889, - 0x2c3d08, - 0x358d08, - 0x360187, - 0x2bed46, - 0x3007c9, - 0x2f7a07, - 0x32a14a, - 0x2be708, - 0x34c447, - 0x357e06, - 0x21ce8a, - 0x280b88, - 0x271fc5, - 0x22d185, - 0x2be8c7, - 0x2d1789, - 0x2d6d4b, - 0x2ede48, - 0x264549, - 0x24b747, - 0x3adc4c, - 0x2b0e4c, - 0x2b114a, - 0x2b13cc, - 0x2bc348, - 0x2bc548, - 0x2bc744, - 0x2bcb09, - 0x2bcd49, - 0x2bcf8a, - 0x2bd209, - 0x2bd547, - 0x20010c, - 0x242886, - 0x2794c8, - 0x20ee86, - 0x388a46, - 0x329707, - 0x31b008, - 0x261a4b, - 0x2874c7, - 0x2f0bc9, - 0x249149, - 0x253dc7, - 0x2e2ac4, - 0x363b07, - 0x34c986, - 0x2179c6, - 0x237a05, - 0x2ccd48, - 0x20f2c4, - 0x20f2c6, - 0x27634b, - 0x2a6649, - 0x31e946, - 0x35ab09, - 0x392786, - 0x301348, - 0x2025c3, - 0x209185, - 0x2038c9, - 0x20cb05, - 0x2fd284, - 0x277506, - 0x26bdc5, - 0x2db706, - 0x2fe047, - 0x2af586, - 0x2974cb, - 0x35a7c7, - 0x2d1646, - 0x371506, - 0x3a76c6, - 0x36ee09, - 0x24df0a, - 0x2b5985, - 0x22d94d, - 0x2a3146, - 0x391306, - 0x2e1746, - 0x21d985, - 0x2d39c7, - 0x29bb47, - 0x29fb0e, - 0x220ec3, - 0x2bed09, - 0x316e89, - 0x2291c7, - 0x27e2c7, - 0x2a7505, - 0x323f05, - 0x4126304f, - 0x2c5b47, - 0x2c5d08, - 0x2c6784, - 0x2c6a46, - 0x41629382, - 0x2ca746, - 0x2cc586, - 0x261d8e, - 0x2da14a, - 0x226886, - 0x22ee4a, - 0x205d89, - 0x314e85, - 0x393c08, - 0x3adb06, - 0x31e788, - 0x326ac8, - 0x24090b, - 0x2259c5, - 0x367348, - 0x209acc, - 0x343a87, - 0x24a7c6, - 0x27fd08, - 0x2b7cc8, - 0x41a09142, - 0x3700cb, - 0x376209, - 0x2d2b49, - 0x3a3347, - 0x20af88, - 0x41f5b088, - 0x20bfcb, - 0x35bc09, - 0x221d4d, - 0x378388, - 0x29bf88, - 0x422018c2, - 0x201084, - 0x4260dd02, - 0x2edbc6, - 0x42a02482, - 0x21c48a, - 0x322806, - 0x32c808, - 0x31c208, - 0x2b7946, - 0x388086, - 0x2e8046, - 0x308f85, - 0x23a584, - 0x42f012c4, - 0x338446, - 0x33be87, - 0x432e9287, - 0x35e7cb, - 0x2cf1c9, - 0x23e68a, - 0x261644, - 0x317288, - 0x24ab0d, - 0x2dfd49, - 0x2dff88, - 0x2e06c9, - 0x2e1c44, - 0x208c84, - 0x281645, - 0x36228b, - 0x2ae8c6, - 0x338285, - 0x343d49, - 0x225bc8, - 0x29f3c4, - 0x228f49, - 0x330045, - 0x2b9248, - 0x2e3547, - 0x327548, - 0x281c06, - 0x226c47, - 0x2910c9, - 0x224189, - 0x249905, - 0x339605, - 0x436284c2, - 0x2e8344, - 0x33b285, - 0x291c46, - 0x335e85, - 0x24e247, - 0x26ccc5, - 0x26cd44, - 0x35c206, - 0x32b647, - 0x244f86, - 0x3af445, - 0x37fd48, - 0x226145, - 0x39a807, - 0x3b2509, - 0x2a678a, - 0x236987, - 0x23698c, - 0x247846, - 0x22b289, - 0x340105, - 0x370f08, - 0x212983, - 0x212985, - 0x2e90c5, - 0x255707, - 0x43a17482, - 0x23e287, - 0x2e5046, - 0x2fa7c6, - 0x303146, - 0x2b7c06, - 0x3302c8, - 0x35a005, - 0x2d7787, - 0x2d778d, - 0x24ddc3, - 0x3a4885, - 0x271b47, - 0x387bc8, - 0x271705, - 0x228808, - 0x32c346, - 0x31cd07, - 0x2bdf45, - 0x225a46, - 0x2d3005, - 0x2bb8ca, - 0x2fc546, - 0x233dc7, - 0x2c6905, - 0x2f5207, - 0x2f5904, - 0x2fd206, - 0x331ac5, - 0x34368b, - 0x34c809, - 0x243bca, - 0x249988, - 0x336448, - 0x337c8c, - 0x3556c7, - 0x35dd48, - 0x361308, - 0x36bc05, - 0x3a024a, - 0x2ea909, - 0x43e04582, - 0x204586, - 0x20c984, - 0x2de009, - 0x362d89, - 0x2250c7, - 0x254447, - 0x2b8649, - 0x326508, - 0x32650f, - 0x270f06, - 0x241b0b, - 0x2ebd45, - 0x2ebd47, - 0x2ec189, - 0x21de06, - 0x228ec7, - 0x3b1e45, - 0x2305c4, - 0x26bc86, - 0x200c44, - 0x30ba07, - 0x2ee808, - 0x442f0e08, - 0x2f1305, - 0x2f1447, - 0x256149, - 0x201904, - 0x201908, - 0x4476d588, - 0x28e1c4, - 0x230f08, - 0x31ebc4, - 0x21bf09, - 0x22dc45, - 0x44a012c2, - 0x270f45, - 0x220885, - 0x24cb48, - 0x232a87, - 0x44e05a02, - 0x2c8405, - 0x2518c6, - 0x266246, - 0x2e8308, - 0x2e9c48, - 0x335e46, - 0x2eac86, - 0x21e589, - 0x2fa706, - 0x3081cb, - 0x28edc5, - 0x20fcc6, - 0x3abd88, - 0x3906c6, - 0x35c646, - 0x21d30a, - 0x258dca, - 0x24dac5, - 0x35a0c7, - 0x349746, - 0x45201242, - 0x271c87, - 0x236445, - 0x2aad84, - 0x2aad85, - 0x261546, - 0x276d47, - 0x20c705, - 0x258f44, - 0x26d548, - 0x35c705, - 0x28af07, - 0x293245, - 0x219805, - 0x24a284, - 0x28f709, - 0x2f2788, - 0x2cea06, - 0x212e06, - 0x28dec6, - 0x457a8c08, - 0x2f5087, - 0x2f53cd, - 0x2f5b4c, - 0x2f6149, - 0x2f6389, - 0x45b54382, - 0x3a6903, - 0x20c403, - 0x34ca45, - 0x38de0a, - 0x318e06, - 0x2fab45, - 0x2fe584, - 0x2fe58b, - 0x30e4cc, - 0x30ed0c, - 0x30f015, - 0x30f9cd, - 0x31150f, - 0x3118d2, - 0x311d4f, - 0x312112, - 0x312593, - 0x312a4d, - 0x31300d, - 0x31338e, - 0x31384e, - 0x31448c, - 0x31480c, - 0x314c4b, - 0x314fce, - 0x318092, - 0x318bcc, - 0x319110, - 0x32d652, - 0x32e60c, - 0x32eccd, - 0x32f00c, - 0x332dd1, - 0x333f8d, - 0x33664d, - 0x336c4a, - 0x336ecc, - 0x3377cc, - 0x337f8c, - 0x33880c, - 0x33c653, - 0x33cc50, - 0x33d050, - 0x33d8cd, - 0x33decc, - 0x33ebc9, - 0x34024d, - 0x340593, - 0x346891, - 0x346cd3, - 0x34738f, - 0x34774c, - 0x347a4f, - 0x347e0d, - 0x34840f, - 0x3487d0, - 0x34924e, - 0x34d5ce, - 0x34dd10, - 0x34e90d, - 0x34f28e, - 0x34f60c, - 0x3505d3, - 0x3521ce, - 0x352910, - 0x352d11, - 0x35314f, - 0x353513, - 0x353f0d, - 0x35424f, - 0x35460e, - 0x354f10, - 0x355309, - 0x356010, - 0x35664f, - 0x356ccf, - 0x357092, - 0x35860e, - 0x3594cd, - 0x35cc0d, - 0x35cf4d, - 0x35f24d, - 0x35f58d, - 0x35f8d0, - 0x35fccb, - 0x36034c, - 0x3606cc, - 0x3609cc, - 0x360cce, - 0x36f050, - 0x371692, - 0x371b0b, - 0x3720ce, - 0x37244e, - 0x3736ce, - 0x37454b, - 0x45f74b16, - 0x37740d, - 0x378594, - 0x37920d, - 0x37ad55, - 0x37be8d, - 0x37c80f, - 0x37d04f, - 0x381a8f, - 0x381e4e, - 0x3830cd, - 0x3855d1, - 0x38820c, - 0x38850c, - 0x38880b, - 0x38938c, - 0x38974f, - 0x389b12, - 0x38a4cd, - 0x38b48c, - 0x38b90c, - 0x38bc0d, - 0x38bf4f, - 0x38c30e, - 0x38dacc, - 0x38e08d, - 0x38e3cb, - 0x38f00c, - 0x38f58d, - 0x38f8ce, - 0x38fd49, - 0x390a93, - 0x39148d, - 0x3917cd, - 0x391dcc, - 0x39224e, - 0x392bcf, - 0x392f8c, - 0x39328d, - 0x3935cf, - 0x39398c, - 0x39408c, - 0x39444c, - 0x39474c, - 0x394e0d, - 0x395152, - 0x3957cc, - 0x395acc, - 0x395dd1, - 0x39620f, - 0x3965cf, - 0x396993, - 0x397a8e, - 0x39800f, - 0x3983cc, - 0x4639870e, - 0x398a8f, - 0x398e56, - 0x39ad12, + 0x385889, + 0x212ac6, + 0x37b6c7, + 0x2d8905, + 0x2303c4, + 0x261006, + 0x211ac4, + 0x2ce4c7, + 0x307048, + 0x4eaf68c8, + 0x2f7085, + 0x2f71c7, + 0x236549, + 0x23e284, + 0x23e288, + 0x4ee2b888, + 0x279444, + 0x231388, + 0x32fdc4, + 0x3ab849, + 0x2173c5, + 0x4f20b0c2, + 0x315d45, + 0x2e4345, + 0x251288, + 0x232e47, + 0x4f601442, + 0x204785, + 0x2cf606, + 0x24b106, + 0x333bc8, + 0x302108, + 0x318346, + 0x327f06, + 0x2e2e49, + 0x3426c6, + 0x21298b, + 0x296305, + 0x368106, + 0x377088, + 0x250506, + 0x292cc6, + 0x21914a, + 0x23084a, + 0x245005, + 0x241d47, + 0x308786, + 0x4fa01682, + 0x201687, + 0x238705, + 0x243a44, + 0x243a45, + 0x391706, + 0x26a447, + 0x219a85, + 0x220c04, + 0x2c7e88, + 0x292d85, + 0x333a47, + 0x3a1645, + 0x213845, + 0x256e04, + 0x287609, + 0x2fae48, + 0x2e0286, + 0x2d9d06, + 0x2b6e46, + 0x4fefbc88, + 0x2fbe87, + 0x2fc0cd, + 0x2fcb4c, + 0x2fd149, + 0x2fd389, + 0x5035b2c2, + 0x3a8603, + 0x207943, + 0x2db405, + 0x39280a, + 0x327dc6, + 0x302385, + 0x305884, + 0x30588b, + 0x31b70c, + 0x31c14c, + 0x31c455, + 0x31d74d, + 0x320a8f, + 0x320e52, + 0x3212cf, + 0x321692, + 0x321b13, + 0x321fcd, + 0x32258d, + 0x32290e, + 0x322e8e, + 0x3236cc, + 0x323a8c, + 0x323ecb, + 0x32424e, + 0x325392, + 0x327b8c, + 0x328790, + 0x335212, + 0x33640c, + 0x336acd, + 0x336e0c, + 0x339a51, + 0x33a90d, + 0x34084d, + 0x340e4a, + 0x3410cc, + 0x3419cc, + 0x3421cc, + 0x34290c, + 0x344dd3, + 0x345450, + 0x345850, + 0x34610d, + 0x34670c, + 0x347309, + 0x34890d, + 0x348c53, + 0x34a311, + 0x34a753, + 0x34b24f, + 0x34b60c, + 0x34b90f, + 0x34bccd, + 0x34c2cf, + 0x34c690, + 0x34d10e, + 0x3539ce, + 0x353f50, + 0x35518d, + 0x355b0e, + 0x355e8c, + 0x356e93, + 0x35934e, + 0x3599d0, + 0x359dd1, + 0x35a20f, + 0x35a5d3, + 0x35ae4d, + 0x35b18f, + 0x35b54e, + 0x35bc10, + 0x35c009, + 0x35cd90, + 0x35d38f, + 0x35da0f, + 0x35ddd2, + 0x35efce, + 0x35fc4d, + 0x36070d, + 0x360a4d, + 0x36184d, + 0x361b8d, + 0x361ed0, + 0x3622cb, + 0x36324c, + 0x3635cc, + 0x363bcc, + 0x363ece, + 0x371a10, + 0x372dd2, + 0x37324b, + 0x3738ce, + 0x373c4e, + 0x3744ce, + 0x37494b, + 0x50774f56, + 0x37624d, + 0x3766d4, + 0x377e0d, + 0x37b115, + 0x37c40d, + 0x37cd8f, + 0x37d5cf, + 0x38250f, + 0x3828ce, + 0x382e4d, + 0x383f91, + 0x38674c, + 0x386a4c, + 0x386d4b, + 0x38764c, + 0x387a0f, + 0x387dd2, + 0x38878d, + 0x38974c, + 0x389bcc, + 0x389ecd, + 0x38a20f, + 0x38a5ce, + 0x3924cc, + 0x392a8d, + 0x392dcb, + 0x39358c, + 0x393b0d, + 0x393e4e, + 0x3941c9, + 0x394d13, + 0x39524d, + 0x39558d, + 0x395b8c, + 0x39600e, + 0x396fcf, + 0x39738c, + 0x39768d, + 0x3979cf, + 0x397d8c, + 0x39848c, + 0x39890c, + 0x398c0c, + 0x3992cd, + 0x399612, + 0x399c8c, + 0x399f8c, + 0x39a291, + 0x39a6cf, + 0x39aa8f, + 0x39ae53, + 0x39bcce, + 0x39c04f, 0x39c40c, - 0x39cd0f, - 0x39d38d, - 0x39d6cf, - 0x39da8c, - 0x39dd8d, - 0x39e0cd, - 0x39fd0e, - 0x3a14cc, - 0x3a17cc, - 0x3a1ad0, - 0x3a5c91, - 0x3a60cb, - 0x3a650c, - 0x3a680e, - 0x3a9111, - 0x3a954e, - 0x3a98cd, - 0x3ad8cb, - 0x3ae8cf, - 0x3afb94, - 0x224242, - 0x224242, - 0x204c83, - 0x224242, - 0x204c83, - 0x224242, - 0x20c942, - 0x383685, - 0x3a8e0c, - 0x224242, - 0x224242, - 0x20c942, - 0x224242, - 0x293045, - 0x2a6785, - 0x224242, - 0x224242, - 0x212bc2, - 0x293045, - 0x30ff09, - 0x34658c, - 0x224242, - 0x224242, - 0x224242, - 0x224242, - 0x383685, - 0x224242, - 0x224242, - 0x224242, - 0x224242, - 0x212bc2, - 0x30ff09, - 0x224242, - 0x224242, - 0x224242, - 0x2a6785, - 0x224242, - 0x2a6785, - 0x34658c, - 0x3a8e0c, - 0x327883, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x2023c4, - 0x24c083, - 0x204703, - 0x4fdc8, - 0x57044, - 0x4bf48, - 0x200882, - 0x47206a82, - 0x240bc3, - 0x244c44, - 0x209d03, - 0x2db184, - 0x22fb86, - 0x203b43, - 0x379084, - 0x27bd85, - 0x220ec3, - 0x24c083, - 0x204703, - 0x24e9ca, - 0x23b346, - 0x3727cc, - 0x77a48, - 0x206a82, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x232003, - 0x2cc586, - 0x24c083, - 0x204703, - 0x21d603, - 0x8ac2, - 0xda987, - 0xb7008, - 0xf60e, - 0x89352, - 0x6e0b, - 0x47f1cb45, - 0x48375d8c, - 0x3c347, - 0x117a4a, - 0x3b4d0, - 0x16fcc8, - 0x7f087, - 0x58a4b, - 0x172cc9, - 0x16fbc7, - 0xb587, - 0x7ef87, - 0x188c6, - 0x14bcc8, - 0x4881c106, - 0xa868d, - 0x117410, - 0x48c08b02, - 0x142a08, - 0x6ca87, - 0x88349, - 0x52946, - 0x92bc8, - 0x10442, - 0x9ec8a, - 0xf2347, - 0xed107, - 0xa3849, - 0xa5408, - 0x159b85, - 0xe0c0e, - 0x1224e, - 0x171cf, - 0x18549, - 0x44b09, - 0x6f38b, - 0x8248f, - 0x8f90c, - 0xac74b, - 0x16be48, - 0x15e6c7, - 0xf02c8, - 0x11bd0b, - 0x13e54c, - 0x149e0c, - 0x151ecc, - 0x154a4d, - 0x15bd88, - 0x3c909, - 0x14e38b, - 0xbef46, - 0xcdfc5, - 0xd36d0, - 0x197586, - 0x62145, - 0xd6908, - 0xdb947, - 0xdcfc7, - 0x142e47, - 0xeda0a, - 0xb6e8a, - 0x72606, - 0x906cd, - 0x14ac08, - 0x496c8, - 0x4a1c9, - 0xed54c, - 0x154c4b, - 0x120304, - 0x16e409, - 0xd7e86, - 0x3f02, - 0x122706, - 0x6c2, - 0xc2ec5, - 0x481, - 0xb7c3, - 0x4878cd86, - 0x92f43, - 0xf582, - 0x3a004, - 0xec2, - 0x23504, - 0x9c2, - 0x8f02, - 0x70c2, - 0x105b42, - 0x1482, - 0x107ac2, - 0x8c2, - 0x1eb82, - 0x35542, - 0x682, - 0x1582, - 0xb042, - 0x31b03, + 0x50b9c74e, + 0x39cacf, + 0x39ce96, + 0x39dc12, + 0x39f38c, + 0x39fd0f, + 0x3a038d, + 0x3a06cf, + 0x3a0a8c, + 0x3a0d8d, + 0x3a10cd, + 0x3a254e, + 0x3a4b8c, + 0x3a4e8c, + 0x3a5190, + 0x3a7a91, + 0x3a7ecb, + 0x3a820c, + 0x3a850e, + 0x3aa811, + 0x3aac4e, + 0x3aafcd, + 0x3b53cb, + 0x3b5e8f, + 0x3b6d94, + 0x228782, + 0x228782, + 0x200c83, + 0x228782, + 0x200c83, + 0x228782, + 0x205142, + 0x384405, + 0x3aa50c, + 0x228782, + 0x228782, + 0x205142, + 0x228782, + 0x294545, + 0x21cb85, + 0x228782, + 0x228782, + 0x20b382, + 0x294545, + 0x31f3c9, + 0x34a00c, + 0x228782, + 0x228782, + 0x228782, + 0x228782, + 0x384405, + 0x228782, + 0x228782, + 0x228782, + 0x228782, + 0x20b382, + 0x31f3c9, + 0x228782, + 0x228782, + 0x228782, + 0x21cb85, + 0x228782, + 0x21cb85, + 0x34a00c, + 0x3aa50c, + 0x38d2c3, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x3b1384, + 0x205503, + 0x200983, + 0x2708, + 0x5fc84, + 0xe0e08, + 0x205702, + 0x51a099c2, + 0x23dbc3, + 0x24f2c4, + 0x2032c3, + 0x393304, + 0x22f706, + 0x20e883, + 0x3328c4, + 0x286bc5, + 0x209703, + 0x205503, + 0x200983, + 0x255cca, + 0x2efec6, + 0x373fcc, + 0x16d208, + 0x2099c2, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x227f83, + 0x2d43c6, + 0x205503, + 0x200983, + 0x201303, + 0xa4508, + 0x129845, + 0x14902, + 0x52f86185, + 0x21347, + 0xc93c8, + 0xec0e, + 0x88192, + 0xfe20b, + 0x532d6a45, + 0x536d6a4c, + 0xb007, + 0x16fc07, + 0x1b254a, + 0x3a6d0, + 0x149c05, + 0xd95cb, + 0x68bc8, + 0x38f47, + 0x304cb, + 0x4efc9, + 0x11dd07, + 0x2c07, + 0x73587, + 0x1c106, + 0xd4ac8, + 0x53c1cdc6, + 0xa8a0d, + 0x1b1f10, + 0x5402bb82, + 0x9688, + 0x4a450, + 0x14434c, + 0x5474e88d, + 0x655c7, + 0x78749, + 0x52e06, + 0x940c8, + 0x67e42, + 0x9f54a, + 0x27f07, + 0x35fc7, + 0xa4909, + 0xa6388, + 0xb9b45, + 0xec50e, + 0xb54e, + 0xdecf, + 0x11809, + 0xbb9c9, + 0x43e4b, + 0x7664f, + 0x8780c, + 0x9ef4b, + 0xbbf48, + 0x154807, + 0xcdc48, + 0xfb80b, + 0xf568c, + 0xf640c, + 0xf908c, + 0xfe68d, + 0x17e248, + 0xeab02, + 0x113a49, + 0x185d4b, + 0xc5446, + 0x116fcb, + 0xd804a, + 0xd8c05, + 0xde6d0, + 0x111806, + 0x192345, + 0xe3f48, + 0xe9187, + 0xe9447, + 0xff487, + 0xf4d0a, + 0xc924a, + 0x5d306, + 0x91a0d, + 0x86ec8, + 0x55b08, + 0x56d49, + 0xb3c45, + 0xf484c, + 0xfe88b, + 0x165044, + 0xfaa89, + 0xfacc6, + 0x1af7c6, + 0x2dc2, + 0x125c86, + 0x107247, + 0x7b02, + 0xc83c5, + 0x29544, + 0x1ec1, + 0x4c983, + 0x53a85146, + 0x94443, + 0xd882, + 0x27f04, + 0x242, + 0x5ef44, + 0x3dc2, + 0x8142, + 0x2502, + 0x10f242, + 0x1ec2, + 0xd6a42, 0x4142, - 0x202, - 0x9342, - 0xdb02, - 0x10702, - 0x30842, - 0x134c2, - 0x42, - 0x4982, - 0x2002, - 0x2243, - 0x3fc2, - 0x7382, - 0xafcc2, - 0xac82, - 0x17382, - 0x3702, - 0x33a82, - 0x6a42, - 0xf842, - 0x16f542, - 0x8b82, - 0xb602, - 0x4c083, + 0x1b102, + 0x2cd82, + 0x5742, 0xdc2, - 0x9142, - 0x1042, - 0x12f42, - 0x49885, - 0xa742, - 0x42242, - 0x3e943, - 0x34c2, - 0xed42, - 0x4042, - 0x2742, - 0x3a02, - 0x5a02, - 0x1bc2, - 0x3f02, - 0x74747, - 0x213f03, - 0x200882, - 0x22bf83, - 0x231b03, - 0x20f583, - 0x215ac3, - 0x232003, - 0x24c083, - 0x2020c3, - 0x204703, - 0x292f83, - 0x77a48, - 0x22bf83, - 0x231b03, - 0x20f583, - 0x220ec3, - 0x24c083, - 0x2020c3, - 0x204703, - 0x22bf83, - 0x231b03, - 0x204703, - 0x22bf83, - 0x231b03, - 0x250cc3, + 0xf882, + 0x32403, + 0x5f02, + 0x7c2, + 0x18342, + 0xfc82, + 0x5e82, + 0x1ae02, + 0x17f42, + 0x15c2, + 0x29c2, + 0x1fc2, + 0x44183, + 0x3942, + 0x6502, + 0xafd42, + 0xbe02, + 0x282, + 0x4bc2, + 0x1f42, + 0xa8542, + 0x2342, + 0x152bc2, + 0x675c2, + 0x2c82, + 0x5503, + 0x8c2, + 0x8442, + 0x33c2, + 0xb482, + 0x49245, + 0xba02, + 0x2d4c2, + 0x3c083, + 0x482, + 0x1c42, + 0x27c2, + 0x3902, + 0x1102, + 0x1442, + 0xc2, + 0x2dc2, + 0x9885, + 0x75c47, + 0x212503, + 0x205702, + 0x2a84c3, + 0x232403, + 0x2163c3, + 0x20ad83, + 0x227f83, + 0x205503, + 0x204e83, + 0x200983, + 0x294483, + 0x169c3, + 0x16d208, + 0x2a84c3, + 0x232403, + 0x2163c3, + 0x209703, + 0x205503, + 0x204e83, + 0x200983, + 0x2a84c3, + 0x232403, + 0x200983, + 0x2a84c3, + 0x232403, + 0x2e9dc3, 0x200041, - 0x220ec3, - 0x24c083, - 0x20f543, - 0x204703, - 0x327883, - 0x22bf83, - 0x231b03, - 0x2aa943, - 0x20f583, - 0x376b83, - 0x285843, - 0x2a4543, - 0x240383, - 0x250cc3, - 0x2023c4, - 0x24c083, - 0x204703, - 0x2232c3, - 0x341bc4, - 0x22b683, - 0x1e03, - 0x201283, - 0x330d48, - 0x21cec4, - 0x315e4a, - 0x3807c6, - 0xdd704, - 0x3a4107, - 0x22134a, - 0x270dc9, - 0x3b15c7, - 0x20054a, - 0x327883, - 0x38284b, - 0x3292c9, - 0x28dfc5, - 0x2ca587, - 0x6a82, - 0x22bf83, - 0x3583c7, - 0x21e2c5, - 0x2e2989, - 0x231b03, - 0x2257c6, - 0x2b1e83, - 0xe50c3, - 0xfd786, - 0x60346, - 0x3ac7, - 0x214246, - 0x21a385, - 0x20be87, - 0x338647, - 0x4ae50cc3, - 0x32e847, - 0x363a03, - 0x238785, - 0x2023c4, - 0x222788, - 0x2ae00c, - 0x2ad305, - 0x3a30c6, - 0x358287, - 0x20a207, - 0x3b0407, - 0x205688, - 0x27ffcf, - 0x2d2a85, - 0x240cc7, - 0x39f787, - 0x2a340a, - 0x2a9d49, - 0x2d8305, - 0x2dbe4a, - 0x1225c6, - 0x2bb605, - 0x371d44, - 0x2b7886, - 0x300b87, - 0x23b987, - 0x341908, - 0x2025c5, - 0x21e1c6, - 0x203705, - 0x267105, - 0x21e104, - 0x31c107, - 0x33010a, - 0x399d08, - 0x2f0006, - 0x32003, - 0x2cff85, - 0x22b046, - 0x200346, - 0x262046, - 0x220ec3, - 0x38a747, - 0x39f705, - 0x24c083, - 0x3b184d, - 0x2020c3, - 0x341a08, - 0x3abc04, - 0x278705, - 0x2a3306, - 0x2347c6, - 0x20fbc7, - 0x2a4587, - 0x26c785, - 0x204703, - 0x3977c7, - 0x33b749, - 0x258349, - 0x26cd8a, - 0x244002, - 0x238744, - 0x2d7304, - 0x220c47, - 0x23e148, - 0x2dda89, - 0x3a4749, - 0x2ded47, - 0x2d2586, - 0xe0986, - 0x2e1c44, - 0x2e224a, - 0x2e7808, - 0x2e7f09, - 0x29b1c6, - 0x3028c5, - 0x399bc8, - 0x2c070a, - 0x25ad03, - 0x239406, - 0x2dee47, - 0x2115c5, - 0x3abac5, - 0x25b2c3, - 0x2512c4, - 0x22d145, - 0x286687, - 0x2f28c5, - 0x2edd06, - 0x135d45, - 0x226943, - 0x226949, - 0x2784cc, - 0x2ab3cc, - 0x2c7308, - 0x2995c7, - 0x2f1d48, - 0x2f318a, - 0x2f414b, - 0x329408, - 0x3a31c8, - 0x2032c6, - 0x34d3c5, - 0x31f40a, - 0x217cc5, - 0x2012c2, - 0x2bde07, - 0x26ed46, - 0x355b45, - 0x329f89, - 0x237f45, - 0x2be645, - 0x238349, - 0x22aec6, - 0x3650c8, - 0x35b843, - 0x35b8c6, - 0x277446, - 0x300285, - 0x300289, - 0x2b1989, - 0x244347, - 0x100104, - 0x300107, - 0x3a4649, - 0x221545, - 0x3a688, - 0x366d05, - 0x357885, - 0x22a889, + 0x209703, + 0x205503, + 0x21c2c3, + 0x200983, + 0x38d2c3, + 0x2a84c3, + 0x232403, + 0x209683, + 0x2163c3, + 0x277dc3, + 0x280b83, + 0x21c303, + 0x252c03, + 0x2e9dc3, + 0x3b1384, + 0x205503, + 0x200983, + 0x25ed03, + 0x352e84, + 0x231a03, + 0x30c3, + 0x228483, + 0x37a908, + 0x24f3c4, + 0x3b870a, + 0x2b8ec6, + 0x1b6a04, + 0x39b2c7, + 0x21e7ca, + 0x315bc9, + 0x3ab587, + 0x3b724a, + 0x38d2c3, + 0x2e678b, + 0x2b9fc9, + 0x2bd645, + 0x2d1fc7, + 0x99c2, + 0x2a84c3, + 0x205747, + 0x2e2b85, + 0x2dbdc9, + 0x232403, + 0x233c06, + 0x2c1a43, + 0xdb283, + 0x104e46, + 0x18ec46, + 0xe807, + 0x212e46, + 0x21b185, + 0x282407, + 0x2d5b87, + 0x56ae9dc3, + 0x336647, + 0x365e03, + 0x206a05, + 0x3b1384, + 0x220688, + 0x38644c, + 0x2ad745, + 0x29c986, + 0x205607, + 0x38b907, + 0x238347, + 0x245108, + 0x303b8f, + 0x315e05, + 0x23dcc7, + 0x26f287, + 0x2a3e0a, + 0x2d2809, + 0x304f85, + 0x30664a, + 0x82a06, + 0x2c1ac5, + 0x374b84, + 0x2843c6, + 0x2f1d47, + 0x2eaa07, + 0x3bb408, + 0x22dc85, + 0x2e2a86, + 0x214305, + 0x3adcc5, + 0x21c984, + 0x2af347, + 0x2081ca, + 0x334808, + 0x35ba86, + 0x27f83, + 0x2da905, + 0x25f906, + 0x3af246, + 0x392246, + 0x209703, + 0x388a07, + 0x26f205, + 0x205503, + 0x2d830d, + 0x204e83, + 0x3bb508, + 0x27f404, + 0x272fc5, + 0x2a3d06, + 0x234d46, + 0x368007, + 0x2a6ec7, + 0x267345, + 0x200983, + 0x21fbc7, + 0x2788c9, + 0x311a49, + 0x22708a, + 0x243002, + 0x2069c4, + 0x2e5084, + 0x390207, + 0x22cf88, + 0x2ea2c9, + 0x21eac9, + 0x2eaf47, + 0x2ba486, + 0xec286, + 0x2ed844, + 0x2ede4a, + 0x2f0d48, + 0x2f1189, + 0x2bdbc6, + 0x2b1445, + 0x3346c8, + 0x2c5f8a, + 0x22ed03, + 0x353006, + 0x2eb047, + 0x223ec5, + 0x3a5e05, + 0x264b83, + 0x250cc4, + 0x226605, + 0x281b07, + 0x2faf85, + 0x2ee346, + 0xfc605, + 0x247d83, + 0x357bc9, + 0x272d8c, + 0x29344c, + 0x2ced08, + 0x293087, + 0x2f7908, + 0x2f7c4a, + 0x2f888b, + 0x2ba108, + 0x234e48, + 0x239586, + 0x390d45, + 0x38da4a, + 0x3a6205, 0x20b0c2, - 0x22b484, - 0x202882, - 0x203fc2, - 0x33f285, - 0x2dd488, - 0x373085, - 0x2bd703, - 0x2bd705, - 0x2ca943, - 0x212602, - 0x269704, - 0x2344c3, - 0x209242, - 0x35a344, - 0x2d8003, - 0x20d102, - 0x2bd783, - 0x28c784, - 0x2b5c43, - 0x23d444, - 0x202942, - 0x275183, - 0x203a03, - 0x209642, - 0x2eab42, - 0x2b17c9, - 0x207e02, - 0x289ec4, - 0x208b42, - 0x399a44, - 0x2d2544, - 0x2e69c4, - 0x203f02, - 0x202f02, - 0x216483, - 0x2e3c43, - 0x310c04, - 0x262e44, - 0x2b1b84, - 0x2c5584, - 0x2ff383, - 0x2af143, - 0x322544, - 0x301544, - 0x301846, - 0x25b3c2, - 0x206a82, - 0x231b03, - 0x250cc3, - 0x24c083, - 0x204703, - 0x200882, - 0x327883, - 0x22bf83, - 0x231b03, - 0x204543, - 0x250cc3, - 0x2023c4, - 0x2b1a84, - 0x211004, - 0x24c083, - 0x204703, - 0x21d603, - 0x2e4144, - 0x27a003, - 0x2b3003, - 0x347104, - 0x366b06, - 0x208143, - 0x21afc3, - 0x211a83, - 0x2b0d83, - 0x2387c3, - 0x232003, - 0x2298c5, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x24c083, - 0x204703, - 0x2da603, - 0x22e9c3, - 0x77a48, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x202243, - 0x24c083, - 0x232704, - 0x204703, - 0x29e044, - 0x2b7685, - 0x206a82, - 0x200e42, - 0x20f582, - 0x202b42, - 0x200fc2, - 0x22bf83, - 0x234a44, - 0x231b03, - 0x250cc3, - 0x220ec3, - 0x24c083, - 0x204703, - 0x77a48, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x220ec3, - 0x211004, - 0x24c083, - 0x204703, - 0x20b803, - 0x223504, - 0x77a48, - 0x22bf83, - 0x2020c3, - 0x247204, - 0x77a48, - 0x22bf83, - 0x248fc4, - 0x2023c4, - 0x2020c3, - 0x2018c2, - 0x204703, - 0x2298c3, - 0x2ebf05, - 0x2012c2, - 0x301683, - 0x200882, - 0x77a48, - 0x206a82, - 0x231b03, - 0x250cc3, - 0x202002, - 0x204703, - 0x200882, - 0x200707, - 0x25ba05, - 0x2ba684, - 0x387ac6, - 0x20540b, - 0x267b89, - 0x3a3006, - 0x340a09, - 0x2b2508, - 0x208243, - 0x77a48, - 0x2286c7, - 0x328148, - 0x345883, - 0x3038c4, - 0x32c10b, - 0x266545, - 0x2f7888, - 0x2ea389, - 0x25a0c3, - 0x22bf83, - 0x204488, - 0x2f0fc7, - 0x345e86, - 0x231b03, - 0x345987, - 0x250cc3, - 0x2598c6, - 0x202243, - 0x22d007, - 0x235947, - 0x390f47, - 0x31c085, - 0x20a8c3, - 0x21038b, - 0x265448, - 0x227c88, - 0x33b906, - 0x344b49, - 0x323887, - 0x2fae85, - 0x3af704, - 0x271008, - 0x234e4a, - 0x235089, - 0x26d4c3, - 0x281485, - 0x28da83, - 0x22c806, - 0x2ba4c4, - 0x300488, - 0x388b8b, - 0x33f145, - 0x2b7406, - 0x2ba3c5, - 0x2baa88, - 0x2bb747, - 0x3b0287, - 0x315a47, - 0x2151c4, - 0x30b3c7, - 0x296746, - 0x220ec3, - 0x2c3b08, - 0x24e2c3, - 0x2cac08, - 0x2d42c5, - 0x3ac0c8, - 0x231d07, - 0x24c083, - 0x245c83, - 0x28ae44, - 0x323b87, - 0x209d83, - 0x235a0b, - 0x2039c3, - 0x24e284, - 0x2ebf88, - 0x204703, - 0x2f2f05, - 0x376a05, - 0x3abfc6, - 0x215c45, - 0x2d4684, - 0x209202, - 0x2e81c3, - 0x371dca, - 0x3a24c3, - 0x271549, - 0x30b0c6, - 0x217f88, - 0x28c2c6, - 0x224d87, - 0x2e8788, - 0x2f2d08, - 0x319043, - 0x373143, - 0x22a3c9, - 0x2f61c3, - 0x349646, - 0x25b686, - 0x2445c6, - 0x397389, - 0x2ffd44, - 0x216c43, - 0x2dbd45, - 0x30c709, - 0x22d103, - 0x2fc404, - 0x362ac4, - 0x212dc4, - 0x294846, - 0x20a403, - 0x20a408, - 0x255448, - 0x2eb306, - 0x2fac8b, - 0x2fafc8, - 0x2fb1cb, - 0x2fdd89, - 0x2fd687, - 0x2fe208, - 0x2fedc3, - 0x2e94c6, - 0x39ab47, - 0x297445, - 0x34d8c9, - 0x34448d, - 0x217dd1, - 0x234905, - 0x200882, - 0x206a82, - 0x22bf83, - 0x231b03, - 0x2c8144, - 0x250cc3, - 0x202243, - 0x220ec3, - 0x24c083, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x232003, - 0x24c083, - 0x204703, - 0x263c83, - 0x20b803, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x24c083, - 0x204703, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x24c083, - 0x204703, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x2023c4, - 0x232003, - 0x24c083, - 0x204703, - 0x212dc2, + 0x2c4647, + 0x25fe86, + 0x35c8c5, + 0x370809, + 0x2f39c5, + 0x27e985, + 0x2ddf09, + 0x351846, + 0x237e88, + 0x33f383, + 0x20f486, + 0x272146, + 0x306445, + 0x306449, + 0x2b6789, + 0x279ac7, + 0x109104, + 0x309107, + 0x21e9c9, + 0x238d05, + 0x413c8, + 0x3b2e85, + 0x330e85, + 0x380509, + 0x201702, + 0x25e544, + 0x201e82, + 0x203942, + 0x31ecc5, + 0x3b6788, + 0x2b3b85, + 0x2c3ac3, + 0x2c3ac5, + 0x2d2383, + 0x20f442, + 0x377804, + 0x2ac783, + 0x2056c2, + 0x379884, + 0x2e5d43, + 0x2082c2, + 0x2b3c03, + 0x28d084, + 0x2e4c83, + 0x248684, + 0x203082, + 0x218943, + 0x22ef03, + 0x200d02, + 0x361782, + 0x2b65c9, + 0x207842, + 0x288d04, + 0x203cc2, + 0x334544, + 0x2ba444, + 0x2b74c4, + 0x202dc2, + 0x2391c2, + 0x225bc3, + 0x2f8403, + 0x23d904, + 0x281c84, + 0x2eb1c4, + 0x2f0f04, + 0x30a483, + 0x26e543, + 0x282984, + 0x30a2c4, + 0x30aac6, + 0x22a282, + 0x2099c2, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x205702, + 0x38d2c3, + 0x2a84c3, + 0x232403, + 0x2007c3, + 0x2e9dc3, + 0x3b1384, + 0x2b6884, + 0x211cc4, + 0x205503, + 0x200983, + 0x201303, + 0x2ee644, + 0x31a403, + 0x2bd0c3, + 0x34ab84, + 0x3b2c86, + 0x202f03, + 0x16fc07, + 0x222403, + 0x2459c3, + 0x2b0543, + 0x206a43, + 0x227f83, + 0x2d6cc5, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x282c43, + 0x2a5143, + 0x16d208, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x244183, + 0x205503, + 0x23a504, + 0x200983, + 0x26bec4, + 0x2bf145, + 0x16fc07, + 0x2099c2, + 0x2006c2, + 0x20d882, + 0x200c82, + 0x200442, + 0x2a84c3, + 0x235ac4, + 0x232403, + 0x2e9dc3, + 0x209703, + 0x205503, + 0x200983, + 0x16d208, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x209703, + 0x211cc4, + 0x205503, + 0x200983, + 0x214843, + 0x25ef44, + 0x16d208, + 0x2a84c3, + 0x204e83, + 0x169c3, + 0x2030c4, + 0x16d208, + 0x2a84c3, + 0x249944, + 0x3b1384, + 0x204e83, + 0x203ec2, + 0x200983, + 0x23e743, + 0x50cc4, + 0x373605, + 0x20b0c2, + 0x30a403, + 0x205702, + 0x16d208, + 0x2099c2, + 0x232403, + 0x2e9dc3, + 0x201fc2, + 0x200983, + 0x205702, + 0x1b7407, + 0x12e3c9, + 0x6f83, + 0x16d208, + 0x18ebc3, + 0x5a31fd87, + 0xa84c3, + 0x708, + 0x232403, + 0x2e9dc3, + 0x1ae886, + 0x244183, + 0x8f2c8, + 0xc0e08, + 0x41a46, + 0x209703, + 0xca988, + 0xb1b43, + 0xdf145, + 0x32607, + 0x8003, + 0x174c0a, + 0x11ed83, + 0x308d44, + 0x10398b, + 0x103f48, + 0x8d742, + 0x205702, + 0x2099c2, + 0x2a84c3, + 0x232403, + 0x2d5f04, + 0x2e9dc3, + 0x244183, + 0x209703, + 0x205503, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x227f83, + 0x205503, + 0x200983, + 0x21aa03, + 0x214843, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x169c3, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x3b1384, + 0x227f83, + 0x205503, + 0x200983, + 0x212982, 0x200141, - 0x200882, + 0x205702, 0x200001, - 0x311602, - 0x77a48, - 0x21fc45, - 0x200481, - 0x2bf83, - 0x200741, + 0x320b82, + 0x16d208, + 0x21d445, + 0x201ec1, + 0xa84c3, + 0x200701, + 0x200301, 0x200081, - 0x201501, - 0x234382, - 0x36a504, - 0x383603, - 0x2007c1, - 0x200901, + 0x298602, + 0x36c044, + 0x384383, + 0x200181, + 0x200401, 0x200041, - 0x2001c1, - 0x388e07, - 0x2d49cf, - 0x2cadc6, - 0x2000c1, - 0x26c046, + 0x200101, + 0x2e9907, + 0x2eab8f, + 0x340446, + 0x200281, + 0x37f6c6, + 0x200e81, + 0x2008c1, + 0x332a0e, + 0x200441, + 0x200983, + 0x201301, + 0x270e85, + 0x20f942, + 0x264a85, 0x200341, - 0x200cc1, - 0x25040e, - 0x200fc1, - 0x204703, - 0x200ac1, - 0x26f285, - 0x209202, - 0x25b1c5, - 0x200c01, - 0x200241, - 0x200a01, - 0x2012c2, + 0x200801, 0x2002c1, - 0x201d01, - 0x2041c1, - 0x200781, - 0x200641, - 0x77a48, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x24c083, - 0x204703, - 0x20b743, - 0x22bf83, - 0x250cc3, - 0x8ce48, - 0x220ec3, - 0x24c083, - 0x204703, - 0x14d9688, - 0x77a48, - 0x45684, - 0x77a48, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x24c083, - 0x204703, - 0x201e03, - 0x77a48, - 0x22bf83, - 0x231b03, - 0x2c8144, - 0x204703, - 0x297985, - 0x325c04, - 0x22bf83, - 0x24c083, - 0x204703, - 0x206a82, - 0x22bf83, - 0x230509, - 0x231b03, - 0x23db49, - 0x250cc3, - 0x220ec3, - 0x24c083, - 0x204703, - 0x2e1a48, - 0x21d847, - 0x2ebf05, - 0x200707, - 0x20540b, - 0x204f48, - 0x340a09, - 0x2286c7, - 0x204488, - 0x2598c6, - 0x235947, - 0x227c88, - 0x33b906, - 0x323887, - 0x235089, - 0x37ab09, - 0x2b7406, - 0x2b9385, - 0x2c3b08, - 0x24e2c3, - 0x2cac08, - 0x231d07, - 0x209d83, - 0x358107, - 0x215c45, - 0x2dd2c8, - 0x264905, - 0x373143, - 0x2c7b49, - 0x2a9bc7, - 0x2fc404, - 0x362ac4, - 0x2fac8b, - 0x2fafc8, - 0x2fd687, - 0x22bf83, - 0x231b03, - 0x20f583, - 0x204703, - 0x22d3c3, - 0x250cc3, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x220ec3, - 0x24c083, - 0x204703, - 0x200882, - 0x206a82, - 0x204703, - 0x77a48, - 0x200882, - 0x206a82, - 0x20f582, - 0x202002, - 0x200342, - 0x24c083, - 0x200fc2, - 0x200882, - 0x327883, - 0x206a82, - 0x22bf83, - 0x231b03, - 0x20f582, - 0x250cc3, - 0x202243, - 0x220ec3, - 0x211004, - 0x24c083, - 0x21a883, - 0x204703, - 0x2ffd44, - 0x2232c3, - 0x250cc3, - 0x206a82, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x220ec3, - 0x24c083, - 0x2020c3, - 0x204703, - 0x39c8c7, - 0x22bf83, - 0x2555c7, - 0x263506, - 0x203983, - 0x210c43, - 0x250cc3, - 0x2037c3, - 0x2023c4, - 0x377844, - 0x2d4746, - 0x250403, - 0x24c083, - 0x204703, - 0x297985, - 0x20d644, - 0x317f43, - 0x2273c3, - 0x2bde07, - 0x2e34c5, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x220ec3, - 0x24c083, - 0x204703, - 0x209ec2, - 0x34ad43, - 0x237643, - 0x327883, - 0x5522bf83, - 0x200d02, - 0x231b03, - 0x209d03, - 0x250cc3, - 0x2023c4, - 0x265603, - 0x2d2a83, - 0x220ec3, - 0x211004, - 0x5560dbc2, - 0x24c083, - 0x204703, - 0x22a543, - 0x2468c3, - 0x212dc2, - 0x2232c3, - 0x77a48, - 0x250cc3, - 0x2cf584, - 0x327883, - 0x206a82, - 0x22bf83, - 0x234a44, - 0x231b03, - 0x250cc3, - 0x2023c4, - 0x202243, - 0x2ce884, - 0x30db04, - 0x2cc586, - 0x211004, - 0x24c083, - 0x204703, - 0x21d603, - 0x26ed46, - 0x18b4b, - 0x1c106, - 0x2310a, - 0xfee8a, - 0x77a48, - 0x2036c4, - 0x22bf83, - 0x327844, - 0x231b03, - 0x24a304, - 0x250cc3, - 0x2614c3, - 0x220ec3, - 0x24c083, - 0x204703, - 0x32104b, - 0x39e40a, - 0x3b0a4c, - 0x200882, - 0x206a82, - 0x20f582, - 0x2a8e05, - 0x2023c4, - 0x20f842, - 0x220ec3, - 0x30db04, - 0x202b42, - 0x200fc2, - 0x203682, - 0x212dc2, - 0x127883, - 0x35ecc9, - 0x25b508, - 0x33a1c9, - 0x235789, - 0x23cc0a, - 0x255f4a, - 0x20a1c2, - 0x21eb82, - 0x6a82, - 0x22bf83, - 0x208942, - 0x240e86, - 0x356b42, - 0x218982, - 0x27184e, - 0x2751ce, - 0x27f787, - 0x3804c7, - 0x274802, - 0x231b03, - 0x250cc3, - 0x201b42, - 0x202002, - 0x233fcf, - 0x205482, - 0x23bb07, - 0x33bb07, - 0x365587, - 0x24dbcc, - 0x253f4c, - 0x204b44, - 0x28148a, - 0x28da82, - 0x20ac82, - 0x2b1f04, - 0x2266c2, - 0x2bc342, - 0x254184, - 0x21a982, - 0x217382, - 0x33b987, - 0x27fec5, - 0x233a82, - 0x233f44, - 0x36f542, - 0x2ce2c8, - 0x24c083, - 0x3b2248, - 0x201082, - 0x232fc5, - 0x324146, - 0x204703, - 0x20a742, - 0x2ddcc7, - 0x9202, - 0x274b05, - 0x393f45, - 0x2040c2, - 0x22b382, - 0x31650a, - 0x26c60a, - 0x20b5c2, - 0x320784, - 0x201f02, - 0x238608, - 0x20a302, - 0x33b148, - 0x2f7bc7, - 0x2f7ec9, - 0x274b82, - 0x2fdfc5, - 0x25b9c5, - 0x2c13cb, - 0x2c210c, - 0x22ce88, - 0x2fe388, - 0x25b3c2, - 0x20fc82, - 0x200882, - 0x77a48, - 0x206a82, - 0x22bf83, - 0x20f582, - 0x202b42, - 0x200fc2, - 0x204703, - 0x203682, - 0x200882, - 0x57606a82, - 0x57a50cc3, - 0x39a883, - 0x20f842, - 0x24c083, - 0x3a2c03, - 0x204703, - 0x2d9f83, - 0x274846, - 0x160b803, - 0x77a48, - 0x62145, - 0x6ae47, - 0x58200182, - 0x58600ec2, - 0x58a01e02, - 0x58e02902, - 0x592136c2, - 0x59601482, - 0x59a06a82, - 0x59e0e542, - 0x5a221982, - 0x5a601582, - 0x2751c3, - 0x201503, - 0x5aa17982, - 0x5ae01c82, - 0x49507, - 0x5b233602, - 0x5b600902, - 0x5ba048c2, - 0x5be0b542, - 0x5c204982, - 0x5c602002, - 0xbac45, - 0x226103, - 0x20b504, - 0x5ca266c2, - 0x5ce35c82, - 0x5d200102, - 0x7c80b, - 0x5d600982, - 0x5de0ad82, - 0x5e20f842, - 0x5e600342, - 0x5ea50702, - 0x5ee02ac2, - 0x5f203642, - 0x5f608b82, - 0x5fa0dbc2, - 0x5fe00cc2, - 0x60202b42, - 0x606353c2, - 0x60a02d82, - 0x60e43482, - 0x14c104, - 0x2d2fc3, - 0x61211382, - 0x61619d02, - 0x61a052c2, - 0x61e03502, - 0x62200fc2, - 0x62609242, - 0xdbc07, - 0x62a08582, - 0x62e05fc2, - 0x63203682, - 0x6364ecc2, - 0xed54c, - 0x63a1c642, - 0x63e75bc2, - 0x642076c2, - 0x64601242, - 0x64a0c402, - 0x64e3cd82, - 0x65201d02, - 0x65603b82, - 0x65a777c2, - 0x65e4ffc2, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x5da65603, - 0x2805c3, - 0x229944, - 0x25b406, - 0x2e8283, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x265603, - 0x2805c3, - 0x200482, - 0x200482, - 0x265603, - 0x2805c3, - 0x6662bf83, - 0x231b03, - 0x331043, - 0x220ec3, - 0x24c083, - 0x204703, - 0x77a48, - 0x206a82, - 0x22bf83, - 0x24c083, - 0x204703, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x220ec3, - 0x24c083, - 0x204703, - 0x247204, - 0x206a82, - 0x22bf83, - 0x2cfe43, - 0x231b03, - 0x248fc4, - 0x20f583, - 0x250cc3, - 0x2023c4, - 0x202243, - 0x220ec3, - 0x24c083, - 0x204703, - 0x2298c3, - 0x2ebf05, - 0x237d03, - 0x2232c3, - 0x206a82, - 0x22bf83, - 0x265603, - 0x24c083, - 0x204703, - 0x200882, - 0x327883, - 0x77a48, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x22fb86, - 0x2023c4, - 0x202243, - 0x211004, - 0x24c083, - 0x204703, - 0x21d603, - 0x22bf83, - 0x231b03, - 0x24c083, - 0x204703, - 0x22bf83, - 0x1c106, - 0x231b03, - 0x250cc3, - 0xd19c6, - 0x24c083, - 0x204703, - 0x309748, - 0x30d009, - 0x319509, - 0x329dc8, - 0x37d6c8, - 0x37d6c9, - 0x34385, - 0x200882, - 0x2e3305, - 0x22fc03, - 0x69206a82, - 0x231b03, - 0x250cc3, - 0x22c947, - 0x2387c3, - 0x220ec3, - 0x24c083, - 0x20f543, - 0x213583, - 0x2020c3, - 0x204703, - 0x23b346, - 0x2012c2, - 0x2232c3, - 0x77a48, - 0x200882, - 0x327883, - 0x206a82, - 0x22bf83, - 0x231b03, - 0x250cc3, - 0x2023c4, - 0x220ec3, - 0x24c083, - 0x204703, - 0x20b803, + 0x20b0c2, + 0x2000c1, + 0x200201, + 0x200bc1, + 0x2005c1, + 0x201cc1, + 0x16d208, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x219503, + 0x2a84c3, + 0x2e9dc3, + 0x8d688, + 0x209703, + 0x205503, + 0x20803, + 0x200983, + 0x14e7e88, + 0x16d208, + 0x44e04, + 0x14e7e8a, + 0x16d208, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x2030c3, + 0x16d208, + 0x2a84c3, + 0x232403, + 0x2d5f04, + 0x200983, + 0x27a305, + 0x33b804, + 0x2a84c3, + 0x205503, + 0x200983, + 0x225ca, + 0xd5284, + 0x10c9c6, + 0x2099c2, + 0x2a84c3, + 0x230309, + 0x232403, + 0x3034c9, + 0x2e9dc3, + 0x209703, + 0x205503, + 0x200983, + 0x2ed648, + 0x22ca47, + 0x373605, + 0x18ed88, + 0x1b7407, + 0x2d20a, + 0xecb, + 0x4ab87, + 0x3d2c8, + 0x1b1b8a, + 0x10a48, + 0x12e3c9, + 0x264c7, + 0x3be87, + 0x152b08, + 0x708, + 0x3df8f, + 0x11d85, + 0xa07, + 0x1ae886, + 0x137607, + 0x3d586, + 0x8f2c8, + 0xa5606, + 0x151647, + 0x19c9, + 0x1aa1c7, + 0xa46c9, + 0xb4a09, + 0xbeec6, + 0xc0e08, + 0xbfcc5, + 0x4eb4a, + 0xca988, + 0xb1b43, + 0xd2648, + 0x32607, + 0x6d505, + 0x69c50, + 0x8003, + 0x1aa047, + 0x15ec5, + 0xe9748, + 0x13ce05, + 0x11ed83, + 0x6fd48, + 0xcd46, + 0x42849, + 0xaa147, + 0x6fa0b, + 0x14ac44, + 0xfa544, + 0x10398b, + 0x103f48, + 0x104d47, + 0x129845, + 0x2a84c3, + 0x232403, + 0x2163c3, + 0x200983, + 0x22a403, + 0x2e9dc3, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x209703, + 0x205503, + 0x200983, + 0x43f8b, + 0x205702, + 0x2099c2, + 0x200983, + 0x16d208, + 0x205702, + 0x2099c2, + 0x20d882, + 0x201fc2, + 0x203d02, + 0x205503, + 0x200442, + 0x205702, + 0x38d2c3, + 0x2099c2, + 0x2a84c3, + 0x232403, + 0x20d882, + 0x2e9dc3, + 0x244183, + 0x209703, + 0x211cc4, + 0x205503, + 0x216b03, + 0x200983, + 0x308d44, + 0x25ed03, + 0x2e9dc3, + 0x2099c2, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x209703, + 0x205503, + 0x204e83, + 0x200983, + 0x39f847, + 0x2a84c3, + 0x2614c7, + 0x2c7ac6, + 0x219203, + 0x218343, + 0x2e9dc3, + 0x2143c3, + 0x3b1384, + 0x37ef04, + 0x31ea46, + 0x20d143, + 0x205503, + 0x200983, + 0x27a305, + 0x318284, + 0x3b2a43, + 0x38b743, + 0x2c4647, + 0x33e885, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x209703, + 0x205503, + 0x200983, + 0x28e87, + 0x205942, + 0x287003, + 0x2bf143, + 0x38d2c3, + 0x626a84c3, + 0x202242, + 0x232403, + 0x2032c3, + 0x2e9dc3, + 0x3b1384, + 0x353903, + 0x315e03, + 0x209703, + 0x211cc4, + 0x62a04642, + 0x205503, + 0x200983, + 0x2082c3, + 0x229543, + 0x212982, + 0x25ed03, + 0x16d208, + 0x2e9dc3, + 0x169c3, + 0x26f744, + 0x38d2c3, + 0x2099c2, + 0x2a84c3, + 0x235ac4, + 0x232403, + 0x2e9dc3, + 0x3b1384, + 0x244183, + 0x282104, + 0x210444, + 0x2d43c6, + 0x211cc4, + 0x205503, + 0x200983, + 0x201303, + 0x25fe86, + 0x13f08b, + 0x1cdc6, + 0x5eb4a, + 0x107e4a, + 0x16d208, + 0x2142c4, + 0x63ea84c3, + 0x38d284, + 0x232403, + 0x256e84, + 0x2e9dc3, + 0x391683, + 0x209703, + 0x205503, + 0x200983, + 0x56243, + 0x32f78b, + 0x3a140a, + 0x3b9bcc, + 0xda688, + 0x205702, + 0x2099c2, + 0x20d882, + 0x2a9305, + 0x3b1384, + 0x202342, + 0x209703, + 0x210444, + 0x200c82, + 0x200442, + 0x209842, + 0x212982, + 0x18d2c3, + 0x19f02, + 0x2a1cc9, + 0x25d548, + 0x309a89, + 0x337449, + 0x23490a, + 0x23634a, + 0x20cc02, + 0x21b102, + 0x99c2, + 0x2a84c3, + 0x204682, + 0x23de86, + 0x35d882, + 0x201242, + 0x20124e, + 0x21898e, + 0x27b107, + 0x205487, + 0x275d02, + 0x232403, + 0x2e9dc3, + 0x200042, + 0x201fc2, + 0x4a5c3, + 0x2eec0f, + 0x200f42, + 0x32c787, + 0x2c7d07, + 0x2d3907, + 0x2ad24c, + 0x3151cc, + 0x3a3a44, + 0x27c6ca, + 0x2188c2, + 0x20be02, + 0x2b6fc4, + 0x2226c2, + 0x2c2702, + 0x315404, + 0x20cec2, + 0x200282, + 0x6343, + 0x2a5687, + 0x2352c5, + 0x201f42, + 0x2eeb84, + 0x352bc2, + 0x2da248, + 0x205503, + 0x3b0208, + 0x200d42, + 0x233385, + 0x3b04c6, + 0x200983, + 0x20ba02, + 0x2ea507, + 0xf942, + 0x26b005, + 0x3a9f45, + 0x201642, + 0x242b02, + 0x3b7a8a, + 0x2671ca, + 0x202c42, + 0x2e4744, + 0x2002c2, + 0x206888, + 0x201c82, + 0x30a848, + 0x2feb47, + 0x2ff649, + 0x26b082, + 0x305645, + 0x33bc85, + 0x22dd4b, + 0x2c6c4c, + 0x22e848, + 0x3188c8, + 0x22a282, + 0x35f782, + 0x205702, + 0x16d208, + 0x2099c2, + 0x2a84c3, + 0x20d882, + 0x200c82, + 0x200442, + 0x200983, + 0x209842, + 0x205702, + 0x652099c2, + 0x656e9dc3, + 0x206343, + 0x202342, + 0x205503, + 0x375cc3, + 0x200983, + 0x2e87c3, + 0x275d46, + 0x1614843, + 0x16d208, + 0x192345, + 0xa6a8d, + 0xa4dca, + 0x65c87, + 0x65e011c2, + 0x66200242, + 0x66600ec2, + 0x66a00c02, + 0x66e0de02, + 0x67201ec2, + 0x16fc07, + 0x676099c2, + 0x67a301c2, + 0x67e09982, + 0x68200dc2, + 0x218983, + 0x9e04, + 0x225d83, + 0x686149c2, + 0x68a00182, + 0x49f47, + 0x68e03002, + 0x69202e42, + 0x69600b42, + 0x69a02bc2, + 0x69e029c2, + 0x6a201fc2, + 0xb3985, + 0x234543, + 0x202b84, + 0x6a6226c2, + 0x6aa03a82, + 0x6ae03202, + 0x16c90b, + 0x6b200e82, + 0x6ba49a02, + 0x6be02342, + 0x6c203d02, + 0x6c60f242, + 0x6ca0ec42, + 0x6ce0e602, + 0x6d2675c2, + 0x6d604642, + 0x6da01b42, + 0x6de00c82, + 0x6e2042c2, + 0x6e61c702, + 0x6ea00e42, + 0x7f1c4, + 0x350703, + 0x6ee33082, + 0x6f216982, + 0x6f603402, + 0x6fa089c2, + 0x6fe00442, + 0x702056c2, + 0x44107, + 0x70601302, + 0x70a07302, + 0x70e09842, + 0x71218942, + 0xf484c, + 0x71621c82, + 0x71a3ab02, + 0x71e11602, + 0x72201682, + 0x72601f82, + 0x72a34a82, + 0x72e00202, + 0x7320e8c2, + 0x736724c2, + 0x73a56642, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0xa203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x6b753903, + 0x20a203, + 0x2d6d44, + 0x25d446, + 0x2f1743, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x219f02, + 0x353903, + 0x20a203, + 0x742a84c3, + 0x232403, + 0x37ac03, + 0x209703, + 0x205503, + 0x200983, + 0x16d208, + 0x2099c2, + 0x2a84c3, + 0x205503, + 0x200983, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x209703, + 0x205503, + 0x200983, + 0x2030c4, + 0x2099c2, + 0x2a84c3, + 0x2028c3, + 0x232403, + 0x249944, + 0x2163c3, + 0x2e9dc3, + 0x3b1384, + 0x244183, + 0x209703, + 0x205503, + 0x200983, + 0x23e743, + 0x373605, + 0x2a1fc3, + 0x25ed03, + 0x2099c2, + 0x2a84c3, + 0x353903, + 0x205503, + 0x200983, + 0x205702, + 0x38d2c3, + 0x16d208, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x22f706, + 0x3b1384, + 0x244183, + 0x211cc4, + 0x205503, + 0x200983, + 0x201303, + 0x2a84c3, + 0x232403, + 0x205503, + 0x200983, + 0x14bb147, + 0x2a84c3, + 0x1cdc6, + 0x232403, + 0x2e9dc3, + 0xdba46, + 0x205503, + 0x200983, + 0x3149c8, + 0x318709, + 0x328b89, + 0x333808, + 0x37dc48, + 0x37dc49, + 0x24318d, + 0x2ee80f, + 0x251490, + 0x34848d, + 0x3638cc, + 0x37f98b, + 0x98605, + 0x205702, + 0x33e6c5, + 0x200243, + 0x772099c2, + 0x232403, + 0x2e9dc3, + 0x343ec7, + 0x206a43, + 0x209703, + 0x205503, + 0x21c2c3, + 0x20dcc3, + 0x204e83, + 0x200983, + 0x2efec6, + 0x20b0c2, + 0x25ed03, + 0x16d208, + 0x205702, + 0x38d2c3, + 0x2099c2, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x3b1384, + 0x209703, + 0x205503, + 0x200983, + 0x214843, + 0x14f53c6, + 0x205702, + 0x2099c2, + 0x2e9dc3, + 0x209703, + 0x200983, } // children is the list of nodes' children, the parent's wildcard bit and the @@ -8548,426 +8770,484 @@ var children = [...]uint32{ 0x40000000, 0x50000000, 0x60000000, - 0x1858610, - 0x185c616, - 0x187c617, - 0x19d861f, - 0x19ec676, - 0x1a0067b, - 0x1a10680, - 0x1a2c684, - 0x1a3068b, - 0x1a4868c, - 0x1a6c692, - 0x1a7069b, - 0x1a8869c, - 0x1a8c6a2, - 0x1aa86a3, - 0x1aac6aa, - 0x1af46ab, - 0x1af86bd, - 0x1b186be, - 0x1b2c6c6, - 0x1b306cb, - 0x1b606cc, - 0x1b7c6d8, - 0x1ba46df, - 0x1bac6e9, - 0x1bb06eb, - 0x1c446ec, - 0x1c58711, - 0x1c6c716, - 0x1c9871b, - 0x1ca8726, - 0x1cbc72a, - 0x1ce072f, - 0x1df8738, - 0x1dfc77e, - 0x1e1077f, - 0x1e24784, - 0x1e2c789, - 0x1e3c78b, - 0x1e4078f, - 0x1e58790, - 0x1ea0796, - 0x1eb47a8, - 0x1eb87ad, - 0x1ebc7ae, - 0x1ec47af, - 0x1f007b1, - 0x61f047c0, - 0x1f187c1, - 0x1f1c7c6, - 0x1f2c7c7, - 0x1fdc7cb, - 0x1fe07f7, - 0x21fe87f8, - 0x21fec7fa, - 0x1ff07fb, - 0x20247fc, - 0x2028809, - 0x244880a, - 0x22498912, - 0x2249c926, - 0x24c4927, - 0x24cc931, - 0x224d0933, - 0x24d8934, - 0x224e8936, - 0x224ec93a, - 0x24f893b, - 0x24fc93e, - 0x2250093f, - 0x251c940, - 0x2534947, - 0x253894d, - 0x254894e, - 0x2550952, - 0x22584954, - 0x2588961, - 0x2598962, - 0x25c4966, - 0x25dc971, - 0x25f0977, - 0x261897c, - 0x2638986, - 0x266898e, - 0x269099a, - 0x26949a4, - 0x26b89a5, - 0x26bc9ae, - 0x26d09af, + 0x184c60d, + 0x1850613, + 0x1870614, + 0x19cc61c, + 0x19e0673, + 0x19f4678, + 0x1a0467d, + 0x1a20681, + 0x1a24688, + 0x1a3c689, + 0x1a6468f, + 0x1a68699, + 0x1a8069a, + 0x1a846a0, + 0x1a886a1, + 0x1ab06a2, + 0x1ab46ac, + 0x21abc6ad, + 0x1b046af, + 0x1b086c1, + 0x1b286c2, + 0x1b3c6ca, + 0x1b406cf, + 0x1b706d0, + 0x1b8c6dc, + 0x1bb46e3, + 0x1bc06ed, + 0x1bc46f0, + 0x1c5c6f1, + 0x1c70717, + 0x1c8471c, + 0x1cb4721, + 0x1cc472d, + 0x1cd8731, + 0x1cfc736, + 0x1e3473f, + 0x1e3878d, + 0x1ea478e, + 0x1f107a9, + 0x1f247c4, + 0x1f387c9, + 0x1f407ce, + 0x1f507d0, + 0x1f547d4, + 0x1f6c7d5, + 0x1fb87db, + 0x1fd47ee, + 0x1fd87f5, + 0x1fdc7f6, + 0x1fe87f7, + 0x20247fa, + 0x62028809, + 0x203c80a, + 0x205080f, + 0x2054814, + 0x2064815, + 0x2114819, + 0x2118845, + 0x22124846, + 0x2212c849, + 0x216484b, + 0x2168859, + 0x25b885a, + 0x2265896e, + 0x2265c996, + 0x22660997, + 0x2266c998, + 0x2267099b, + 0x2267c99c, + 0x2268099f, + 0x226849a0, + 0x226889a1, + 0x2268c9a2, + 0x226909a3, + 0x2269c9a4, + 0x226a09a7, + 0x226ac9a8, + 0x226b09ab, + 0x226b49ac, + 0x226b89ad, + 0x226c49ae, + 0x226c89b1, + 0x226cc9b2, + 0x226d09b3, 0x26d49b4, - 0x26d89b5, - 0x26f89b6, - 0x26fc9be, - 0x270c9bf, - 0x27809c3, - 0x279c9e0, - 0x27a89e7, - 0x27bc9ea, - 0x27d49ef, - 0x27e89f5, - 0x28009fa, - 0x2818a00, - 0x2830a06, - 0x284ca0c, - 0x2864a13, - 0x28c4a19, - 0x28dca31, - 0x28f0a37, - 0x2934a3c, - 0x29b4a4d, - 0x29e0a6d, + 0x226d89b5, + 0x226e49b6, + 0x226e89b9, + 0x26f09ba, + 0x227089bc, + 0x2270c9c2, + 0x27189c3, + 0x2271c9c6, + 0x27209c7, + 0x227249c8, + 0x27409c9, + 0x27589d0, + 0x275c9d6, + 0x276c9d7, + 0x27749db, + 0x27a89dd, + 0x27ac9ea, + 0x27bc9eb, + 0x28609ef, + 0x22864a18, + 0x286ca19, + 0x2870a1b, + 0x2888a1c, + 0x289ca22, + 0x28c4a27, + 0x28e4a31, + 0x2914a39, + 0x293ca45, + 0x2940a4f, + 0x2964a50, + 0x2968a59, + 0x297ca5a, + 0x2980a5f, + 0x2984a60, + 0x29a4a61, + 0x29c0a69, + 0x29c4a70, + 0x229c8a71, + 0x29cca72, + 0x29d0a73, + 0x29e0a74, 0x29e4a78, - 0x29eca79, - 0x2a0ca7b, - 0x2a10a83, - 0x2a2ca84, - 0x2a34a8b, - 0x2a68a8d, - 0x2aa0a9a, - 0x2aa4aa8, - 0x2ad0aa9, - 0x2ae8ab4, - 0x2b0caba, - 0x2b2cac3, - 0x30f0acb, - 0x30fcc3c, - 0x311cc3f, - 0x32d8c47, - 0x33a8cb6, - 0x3418cea, - 0x3470d06, - 0x3558d1c, - 0x35b0d56, - 0x35ecd6c, - 0x36e8d7b, - 0x37b4dba, - 0x384cded, - 0x38dce13, - 0x3940e37, - 0x3b78e50, - 0x3c30ede, - 0x3cfcf0c, - 0x3d48f3f, - 0x3dd0f52, - 0x3e0cf74, - 0x3e5cf83, - 0x3ed4f97, - 0x63ed8fb5, - 0x63edcfb6, - 0x63ee0fb7, - 0x3f5cfb8, - 0x3fc0fd7, - 0x403cff0, - 0x40b500f, - 0x413502d, - 0x41a104d, - 0x42cd068, - 0x43250b3, - 0x643290c9, - 0x43c10ca, - 0x44490f0, - 0x4495112, - 0x44fd125, - 0x45a513f, - 0x466d169, - 0x46d519b, - 0x47e91b5, - 0x647ed1fa, - 0x647f11fb, - 0x484d1fc, - 0x48a9213, - 0x493922a, - 0x49b524e, - 0x49f926d, - 0x4add27e, - 0x4b112b7, - 0x4b712c4, - 0x4be52dc, - 0x4c6d2f9, - 0x4cad31b, - 0x4d1d32b, - 0x64d21347, - 0x64d25348, - 0x24d29349, - 0x4d4134a, - 0x4d5d350, - 0x4da1357, - 0x4db1368, - 0x4dc936c, - 0x4e41372, - 0x4e55390, - 0x4e6d395, - 0x4e9139b, - 0x4ea53a4, - 0x4ec13a9, - 0x4ec53b0, - 0x4ecd3b1, - 0x4f093b3, - 0x4f1d3c2, - 0x4f253c7, - 0x4f2d3c9, - 0x4f313cb, - 0x4f553cc, - 0x4f793d5, - 0x4f913de, - 0x4f953e4, - 0x4f9d3e5, - 0x4fa13e7, - 0x4ff53e8, - 0x50193fd, - 0x5039406, - 0x505540e, - 0x5065415, - 0x5079419, - 0x507d41e, - 0x508541f, - 0x5099421, - 0x50a9426, - 0x50ad42a, - 0x50c942b, - 0x5959432, - 0x5991656, - 0x59bd664, - 0x59d566f, - 0x59f5675, - 0x659f967d, - 0x5a3d67e, - 0x5a4568f, - 0x25a49691, - 0x25a4d692, - 0x5a51693, - 0x5b71694, - 0x25b756dc, - 0x25b7d6dd, - 0x25b856df, - 0x25b916e1, - 0x5b956e4, - 0x5bbd6e5, - 0x5be56ef, - 0x5be96f9, - 0x25c216fa, - 0x5c31708, - 0x678970c, - 0x678d9e2, - 0x67919e3, - 0x267959e4, - 0x67999e5, - 0x2679d9e6, - 0x67a19e7, - 0x267ad9e8, - 0x67b19eb, - 0x67b59ec, - 0x267b99ed, - 0x67bd9ee, - 0x267c59ef, - 0x67c99f1, - 0x67cd9f2, - 0x267dd9f3, - 0x67e19f7, - 0x67e59f8, - 0x67e99f9, - 0x67ed9fa, - 0x267f19fb, - 0x67f59fc, - 0x67f99fd, - 0x67fd9fe, - 0x68019ff, - 0x26809a00, - 0x680da02, - 0x6811a03, - 0x6815a04, - 0x26819a05, - 0x681da06, - 0x26825a07, - 0x26829a09, - 0x6845a0a, - 0x6851a11, - 0x6891a14, - 0x6895a24, - 0x68b9a25, - 0x69fda2e, - 0x26a05a7f, - 0x26a09a81, - 0x26a0da82, - 0x6a15a83, - 0x6af1a85, - 0x6af5abc, - 0x6b21abd, - 0x6b41ac8, - 0x6b4dad0, - 0x6b6dad3, - 0x6ba5adb, - 0x6e3dae9, - 0x6ef9b8f, - 0x6f0dbbe, - 0x6f41bc3, - 0x6f6dbd0, - 0x6f89bdb, - 0x6fadbe2, - 0x6fc5beb, - 0x6fe1bf1, - 0x7005bf8, - 0x7015c01, - 0x7045c05, - 0x7061c11, - 0x726dc18, - 0x7291c9b, - 0x72b1ca4, - 0x72c5cac, - 0x72d9cb1, - 0x72f9cb6, - 0x739dcbe, - 0x73b9ce7, - 0x73d5cee, - 0x73d9cf5, - 0x73ddcf6, - 0x73e1cf7, - 0x73f5cf8, - 0x7415cfd, - 0x7421d05, - 0x7451d08, - 0x74d1d14, - 0x74e5d34, - 0x74e9d39, - 0x7501d3a, - 0x750dd40, - 0x7511d43, - 0x752dd44, - 0x7569d4b, - 0x756dd5a, - 0x758dd5b, - 0x75ddd63, - 0x75f5d77, - 0x7649d7d, - 0x764dd92, - 0x7651d93, - 0x7695d94, - 0x76a5da5, - 0x76ddda9, - 0x770ddb7, - 0x7849dc3, - 0x786de12, - 0x7899e1b, - 0x78a1e26, - 0x78a5e28, - 0x79ade29, - 0x79b9e6b, - 0x79c5e6e, - 0x79d1e71, - 0x79dde74, - 0x79e9e77, - 0x79f5e7a, - 0x7a01e7d, - 0x7a0de80, - 0x7a19e83, - 0x7a25e86, - 0x7a31e89, - 0x7a3de8c, - 0x7a49e8f, - 0x7a51e92, - 0x7a5de94, - 0x7a69e97, - 0x7a75e9a, - 0x7a81e9d, - 0x7a8dea0, - 0x7a99ea3, - 0x7aa5ea6, - 0x7ab1ea9, - 0x7abdeac, - 0x7ac9eaf, - 0x7ad5eb2, - 0x7ae1eb5, - 0x7aedeb8, - 0x7af9ebb, - 0x7b05ebe, - 0x7b11ec1, - 0x7b1dec4, - 0x7b25ec7, - 0x7b31ec9, - 0x7b3decc, - 0x7b49ecf, - 0x7b55ed2, - 0x7b61ed5, - 0x7b6ded8, - 0x7b79edb, - 0x7b85ede, - 0x7b91ee1, - 0x7b9dee4, - 0x7ba9ee7, + 0x2a5ca79, + 0x2a78a97, + 0x2a88a9e, + 0x2a9caa2, + 0x2ab4aa7, + 0x2ac8aad, + 0x2ae0ab2, + 0x2ae4ab8, + 0x2afcab9, + 0x2b14abf, + 0x2b30ac5, + 0x2b48acc, + 0x2ba8ad2, + 0x2bc0aea, + 0x2bc4af0, + 0x2bd8af1, + 0x2c1caf6, + 0x2c9cb07, + 0x2cc8b27, + 0x2cccb32, + 0x2cd4b33, + 0x2cf4b35, + 0x2cf8b3d, + 0x2d18b3e, + 0x2d20b46, + 0x2d5cb48, + 0x2d9cb57, + 0x2da0b67, + 0x2e00b68, + 0x2e04b80, + 0x22e08b81, + 0x2e20b82, + 0x2e44b88, + 0x2e64b91, + 0x3428b99, + 0x3434d0a, + 0x3454d0d, + 0x3610d15, + 0x36e0d84, + 0x3750db8, + 0x37a8dd4, + 0x3890dea, + 0x38e8e24, + 0x3924e3a, + 0x3a20e49, + 0x3aece88, + 0x3b84ebb, + 0x3c14ee1, + 0x3c78f05, + 0x3eb0f1e, + 0x3f68fac, + 0x4034fda, + 0x408100d, + 0x4109020, + 0x4145042, + 0x4195051, + 0x420d065, + 0x64211083, + 0x64215084, + 0x64219085, + 0x4295086, + 0x42f10a5, + 0x436d0bc, + 0x43e50db, + 0x44650f9, + 0x44d1119, + 0x45fd134, + 0x465517f, + 0x64659195, + 0x46f1196, + 0x47791bc, + 0x47c51de, + 0x482d1f1, + 0x48d520b, + 0x499d235, + 0x4a05267, + 0x4b19281, + 0x64b1d2c6, + 0x64b212c7, + 0x4b7d2c8, + 0x4bd92df, + 0x4c692f6, + 0x4ce531a, + 0x4d29339, + 0x4e0d34a, + 0x4e41383, + 0x4ea1390, + 0x4f153a8, + 0x4f9d3c5, + 0x4fdd3e7, + 0x504d3f7, + 0x65051413, + 0x65055414, + 0x25059415, + 0x5071416, + 0x508d41c, + 0x50d1423, + 0x50e1434, + 0x50f9438, + 0x517143e, + 0x517945c, + 0x518d45e, + 0x51a5463, + 0x51cd469, + 0x51d1473, + 0x51d9474, + 0x51ed476, + 0x520947b, + 0x520d482, + 0x5215483, + 0x5251485, + 0x5265494, + 0x526d499, + 0x527549b, + 0x527949d, + 0x529d49e, + 0x52c14a7, + 0x52d94b0, + 0x52dd4b6, + 0x52e54b7, + 0x52e94b9, + 0x534d4ba, + 0x53514d3, + 0x53754d4, + 0x53954dd, + 0x53b14e5, + 0x53c14ec, + 0x53d54f0, + 0x53d94f5, + 0x53e14f6, + 0x53f54f8, + 0x54054fd, + 0x5409501, + 0x5425502, + 0x5cb5509, + 0x5ced72d, + 0x5d1973b, + 0x5d31746, + 0x5d5174c, + 0x5d71754, + 0x5db575c, + 0x5dbd76d, + 0x25dc176f, + 0x25dc5770, + 0x5dcd771, + 0x5f29773, + 0x25f2d7ca, + 0x25f3d7cb, + 0x25f457cf, + 0x25f517d1, + 0x5f557d4, + 0x5f597d5, + 0x5f817d6, + 0x5fa97e0, + 0x5fad7ea, + 0x5fe57eb, + 0x5ff97f9, + 0x6b517fe, + 0x6b55ad4, + 0x6b59ad5, + 0x26b5dad6, + 0x6b61ad7, + 0x26b65ad8, + 0x6b69ad9, + 0x26b75ada, + 0x6b79add, + 0x6b7dade, + 0x26b81adf, + 0x6b85ae0, + 0x26b8dae1, + 0x6b91ae3, + 0x6b95ae4, + 0x26ba5ae5, + 0x6ba9ae9, + 0x6badaea, + 0x6bb1aeb, + 0x6bb5aec, + 0x26bb9aed, + 0x6bbdaee, + 0x6bc1aef, + 0x6bc5af0, + 0x6bc9af1, + 0x26bd1af2, + 0x6bd5af4, + 0x6bd9af5, + 0x6bddaf6, + 0x26be1af7, + 0x6be5af8, + 0x26bedaf9, + 0x26bf1afb, + 0x6c0dafc, + 0x6c19b03, + 0x6c59b06, + 0x6c5db16, + 0x6c81b17, + 0x6c85b20, + 0x6c89b21, + 0x6e01b22, + 0x26e05b80, + 0x26e0db81, + 0x26e11b83, + 0x26e15b84, + 0x6e1db85, + 0x6ef9b87, + 0x26efdbbe, + 0x6f01bbf, + 0x6f2dbc0, + 0x6f31bcb, + 0x6f51bcc, + 0x6f5dbd4, + 0x6f7dbd7, + 0x6fb5bdf, + 0x724dbed, + 0x7309c93, + 0x731dcc2, + 0x7351cc7, + 0x7381cd4, + 0x739dce0, + 0x73c1ce7, + 0x73ddcf0, + 0x73f9cf7, + 0x741dcfe, + 0x742dd07, + 0x7431d0b, + 0x7465d0c, + 0x7481d19, + 0x74edd20, + 0x274f1d3b, + 0x7515d3c, + 0x7535d45, + 0x7549d4d, + 0x755dd52, + 0x7561d57, + 0x7581d58, + 0x7625d60, + 0x7641d89, + 0x7661d90, + 0x7665d98, + 0x766dd99, + 0x7671d9b, + 0x7685d9c, + 0x76a5da1, + 0x76b1da9, + 0x76bddac, + 0x76eddaf, + 0x77bddbb, + 0x77c1def, + 0x77d5df0, + 0x77d9df5, + 0x77f1df6, + 0x77f5dfc, + 0x7801dfd, + 0x7805e00, + 0x7821e01, + 0x785de08, + 0x7861e17, + 0x7881e18, + 0x78d1e20, + 0x78ede34, + 0x7941e3b, + 0x7945e50, + 0x7949e51, + 0x794de52, + 0x7991e53, + 0x79a1e64, + 0x79dde68, + 0x79e1e77, + 0x7a11e78, + 0x7b59e84, + 0x7b7ded6, + 0x7ba9edf, 0x7bb5eea, - 0x7bc1eed, - 0x7bc9ef0, - 0x7bd5ef2, - 0x7be1ef5, - 0x7bedef8, - 0x7bf9efb, - 0x7c05efe, - 0x7c11f01, - 0x7c1df04, - 0x7c29f07, - 0x7c2df0a, - 0x7c39f0b, - 0x7c51f0e, - 0x7c55f14, - 0x7c65f15, - 0x7c7df19, - 0x7cc1f1f, - 0x7cd5f30, - 0x7d09f35, - 0x7d19f42, - 0x7d35f46, - 0x7d4df4d, - 0x7d51f53, - 0x27d95f54, - 0x7d99f65, - 0x7dc5f66, + 0x7bbdeed, + 0x7ccdeef, + 0x7cd9f33, + 0x7ce5f36, + 0x7cf1f39, + 0x7cfdf3c, + 0x7d09f3f, + 0x7d15f42, + 0x7d21f45, + 0x7d2df48, + 0x7d39f4b, + 0x7d45f4e, + 0x7d51f51, + 0x7d5df54, + 0x7d69f57, + 0x7d71f5a, + 0x7d7df5c, + 0x7d89f5f, + 0x7d95f62, + 0x7da1f65, + 0x7dadf68, + 0x7db9f6b, + 0x7dc5f6e, + 0x7dd1f71, + 0x7dddf74, + 0x7de9f77, + 0x7df5f7a, + 0x7e01f7d, + 0x7e0df80, + 0x7e19f83, + 0x7e25f86, + 0x7e31f89, + 0x7e3df8c, + 0x7e45f8f, + 0x7e51f91, + 0x7e5df94, + 0x7e69f97, + 0x7e75f9a, + 0x7e81f9d, + 0x7e8dfa0, + 0x7e99fa3, + 0x7ea5fa6, + 0x7eb1fa9, + 0x7ebdfac, + 0x7ec9faf, + 0x7ed5fb2, + 0x7ee1fb5, + 0x7ee9fb8, + 0x7ef5fba, + 0x7f01fbd, + 0x7f0dfc0, + 0x7f19fc3, + 0x7f25fc6, + 0x7f31fc9, + 0x7f3dfcc, + 0x7f49fcf, + 0x7f4dfd2, + 0x7f59fd3, + 0x7f71fd6, + 0x7f75fdc, + 0x7f85fdd, + 0x7f9dfe1, + 0x7fe1fe7, + 0x7ff5ff8, + 0x8029ffd, + 0x803a00a, + 0x805a00e, + 0x8072016, + 0x808a01c, + 0x808e022, + 0x280d2023, + 0x80d6034, + 0x8102035, + 0x8106040, + 0x811a041, } -// max children 421 (capacity 511) -// max text offset 27811 (capacity 32767) +// max children 479 (capacity 511) +// max text offset 28411 (capacity 32767) // max text length 36 (capacity 63) -// max hi 8049 (capacity 16383) -// max lo 8038 (capacity 16383) +// max hi 8262 (capacity 16383) +// max lo 8257 (capacity 16383) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/affinity_linux.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/affinity_linux.go new file mode 100644 index 00000000..72afe333 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/affinity_linux.go @@ -0,0 +1,124 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// CPU affinity functions + +package unix + +import ( + "unsafe" +) + +const cpuSetSize = _CPU_SETSIZE / _NCPUBITS + +// CPUSet represents a CPU affinity mask. +type CPUSet [cpuSetSize]cpuMask + +func schedAffinity(trap uintptr, pid int, set *CPUSet) error { + _, _, e := RawSyscall(trap, uintptr(pid), uintptr(unsafe.Sizeof(*set)), uintptr(unsafe.Pointer(set))) + if e != 0 { + return errnoErr(e) + } + return nil +} + +// SchedGetaffinity gets the CPU affinity mask of the thread specified by pid. +// If pid is 0 the calling thread is used. +func SchedGetaffinity(pid int, set *CPUSet) error { + return schedAffinity(SYS_SCHED_GETAFFINITY, pid, set) +} + +// SchedSetaffinity sets the CPU affinity mask of the thread specified by pid. +// If pid is 0 the calling thread is used. +func SchedSetaffinity(pid int, set *CPUSet) error { + return schedAffinity(SYS_SCHED_SETAFFINITY, pid, set) +} + +// Zero clears the set s, so that it contains no CPUs. +func (s *CPUSet) Zero() { + for i := range s { + s[i] = 0 + } +} + +func cpuBitsIndex(cpu int) int { + return cpu / _NCPUBITS +} + +func cpuBitsMask(cpu int) cpuMask { + return cpuMask(1 << (uint(cpu) % _NCPUBITS)) +} + +// Set adds cpu to the set s. +func (s *CPUSet) Set(cpu int) { + i := cpuBitsIndex(cpu) + if i < len(s) { + s[i] |= cpuBitsMask(cpu) + } +} + +// Clear removes cpu from the set s. +func (s *CPUSet) Clear(cpu int) { + i := cpuBitsIndex(cpu) + if i < len(s) { + s[i] &^= cpuBitsMask(cpu) + } +} + +// IsSet reports whether cpu is in the set s. +func (s *CPUSet) IsSet(cpu int) bool { + i := cpuBitsIndex(cpu) + if i < len(s) { + return s[i]&cpuBitsMask(cpu) != 0 + } + return false +} + +// Count returns the number of CPUs in the set s. +func (s *CPUSet) Count() int { + c := 0 + for _, b := range s { + c += onesCount64(uint64(b)) + } + return c +} + +// onesCount64 is a copy of Go 1.9's math/bits.OnesCount64. +// Once this package can require Go 1.9, we can delete this +// and update the caller to use bits.OnesCount64. +func onesCount64(x uint64) int { + const m0 = 0x5555555555555555 // 01010101 ... + const m1 = 0x3333333333333333 // 00110011 ... + const m2 = 0x0f0f0f0f0f0f0f0f // 00001111 ... + const m3 = 0x00ff00ff00ff00ff // etc. + const m4 = 0x0000ffff0000ffff + + // Implementation: Parallel summing of adjacent bits. + // See "Hacker's Delight", Chap. 5: Counting Bits. + // The following pattern shows the general approach: + // + // x = x>>1&(m0&m) + x&(m0&m) + // x = x>>2&(m1&m) + x&(m1&m) + // x = x>>4&(m2&m) + x&(m2&m) + // x = x>>8&(m3&m) + x&(m3&m) + // x = x>>16&(m4&m) + x&(m4&m) + // x = x>>32&(m5&m) + x&(m5&m) + // return int(x) + // + // Masking (& operations) can be left away when there's no + // danger that a field's sum will carry over into the next + // field: Since the result cannot be > 64, 8 bits is enough + // and we can ignore the masks for the shifts by 8 and up. + // Per "Hacker's Delight", the first line can be simplified + // more, but it saves at best one instruction, so we leave + // it alone for clarity. + const m = 1<<64 - 1 + x = x>>1&(m0&m) + x&(m0&m) + x = x>>2&(m1&m) + x&(m1&m) + x = (x>>4 + x) & (m2 & m) + x += x >> 8 + x += x >> 16 + x += x >> 32 + return int(x) & (1<<7 - 1) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/asm_linux_386.s b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/asm_linux_386.s index 4db29093..448bebbb 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/asm_linux_386.s +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/asm_linux_386.s @@ -10,21 +10,51 @@ // System calls for 386, Linux // +// See ../runtime/sys_linux_386.s for the reason why we always use int 0x80 +// instead of the glibc-specific "CALL 0x10(GS)". +#define INVOKE_SYSCALL INT $0x80 + // Just jump to package syscall's implementation for all these functions. // The runtime may know about them. -TEXT ·Syscall(SB),NOSPLIT,$0-28 +TEXT ·Syscall(SB),NOSPLIT,$0-28 JMP syscall·Syscall(SB) -TEXT ·Syscall6(SB),NOSPLIT,$0-40 +TEXT ·Syscall6(SB),NOSPLIT,$0-40 JMP syscall·Syscall6(SB) +TEXT ·SyscallNoError(SB),NOSPLIT,$0-24 + CALL runtime·entersyscall(SB) + MOVL trap+0(FP), AX // syscall entry + MOVL a1+4(FP), BX + MOVL a2+8(FP), CX + MOVL a3+12(FP), DX + MOVL $0, SI + MOVL $0, DI + INVOKE_SYSCALL + MOVL AX, r1+16(FP) + MOVL DX, r2+20(FP) + CALL runtime·exitsyscall(SB) + RET + TEXT ·RawSyscall(SB),NOSPLIT,$0-28 JMP syscall·RawSyscall(SB) -TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 +TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 JMP syscall·RawSyscall6(SB) +TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24 + MOVL trap+0(FP), AX // syscall entry + MOVL a1+4(FP), BX + MOVL a2+8(FP), CX + MOVL a3+12(FP), DX + MOVL $0, SI + MOVL $0, DI + INVOKE_SYSCALL + MOVL AX, r1+16(FP) + MOVL DX, r2+20(FP) + RET + TEXT ·socketcall(SB),NOSPLIT,$0-36 JMP syscall·socketcall(SB) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/asm_linux_amd64.s b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/asm_linux_amd64.s index 44e25c62..c6468a95 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/asm_linux_amd64.s +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/asm_linux_amd64.s @@ -13,17 +13,45 @@ // Just jump to package syscall's implementation for all these functions. // The runtime may know about them. -TEXT ·Syscall(SB),NOSPLIT,$0-56 +TEXT ·Syscall(SB),NOSPLIT,$0-56 JMP syscall·Syscall(SB) TEXT ·Syscall6(SB),NOSPLIT,$0-80 JMP syscall·Syscall6(SB) +TEXT ·SyscallNoError(SB),NOSPLIT,$0-48 + CALL runtime·entersyscall(SB) + MOVQ a1+8(FP), DI + MOVQ a2+16(FP), SI + MOVQ a3+24(FP), DX + MOVQ $0, R10 + MOVQ $0, R8 + MOVQ $0, R9 + MOVQ trap+0(FP), AX // syscall entry + SYSCALL + MOVQ AX, r1+32(FP) + MOVQ DX, r2+40(FP) + CALL runtime·exitsyscall(SB) + RET + TEXT ·RawSyscall(SB),NOSPLIT,$0-56 JMP syscall·RawSyscall(SB) TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 JMP syscall·RawSyscall6(SB) +TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48 + MOVQ a1+8(FP), DI + MOVQ a2+16(FP), SI + MOVQ a3+24(FP), DX + MOVQ $0, R10 + MOVQ $0, R8 + MOVQ $0, R9 + MOVQ trap+0(FP), AX // syscall entry + SYSCALL + MOVQ AX, r1+32(FP) + MOVQ DX, r2+40(FP) + RET + TEXT ·gettimeofday(SB),NOSPLIT,$0-16 JMP syscall·gettimeofday(SB) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/asm_linux_arm.s b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/asm_linux_arm.s index cf0b5746..cf0f3575 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/asm_linux_arm.s +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/asm_linux_arm.s @@ -13,17 +13,44 @@ // Just jump to package syscall's implementation for all these functions. // The runtime may know about them. -TEXT ·Syscall(SB),NOSPLIT,$0-28 +TEXT ·Syscall(SB),NOSPLIT,$0-28 B syscall·Syscall(SB) -TEXT ·Syscall6(SB),NOSPLIT,$0-40 +TEXT ·Syscall6(SB),NOSPLIT,$0-40 B syscall·Syscall6(SB) +TEXT ·SyscallNoError(SB),NOSPLIT,$0-24 + BL runtime·entersyscall(SB) + MOVW trap+0(FP), R7 + MOVW a1+4(FP), R0 + MOVW a2+8(FP), R1 + MOVW a3+12(FP), R2 + MOVW $0, R3 + MOVW $0, R4 + MOVW $0, R5 + SWI $0 + MOVW R0, r1+16(FP) + MOVW $0, R0 + MOVW R0, r2+20(FP) + BL runtime·exitsyscall(SB) + RET + TEXT ·RawSyscall(SB),NOSPLIT,$0-28 B syscall·RawSyscall(SB) -TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 +TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 B syscall·RawSyscall6(SB) -TEXT ·seek(SB),NOSPLIT,$0-32 +TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24 + MOVW trap+0(FP), R7 // syscall entry + MOVW a1+4(FP), R0 + MOVW a2+8(FP), R1 + MOVW a3+12(FP), R2 + SWI $0 + MOVW R0, r1+16(FP) + MOVW $0, R0 + MOVW R0, r2+20(FP) + RET + +TEXT ·seek(SB),NOSPLIT,$0-28 B syscall·seek(SB) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/asm_linux_arm64.s b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/asm_linux_arm64.s index 4be9bfed..afe6fdf6 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/asm_linux_arm64.s +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/asm_linux_arm64.s @@ -11,14 +11,42 @@ // Just jump to package syscall's implementation for all these functions. // The runtime may know about them. -TEXT ·Syscall(SB),NOSPLIT,$0-56 +TEXT ·Syscall(SB),NOSPLIT,$0-56 B syscall·Syscall(SB) TEXT ·Syscall6(SB),NOSPLIT,$0-80 B syscall·Syscall6(SB) +TEXT ·SyscallNoError(SB),NOSPLIT,$0-48 + BL runtime·entersyscall(SB) + MOVD a1+8(FP), R0 + MOVD a2+16(FP), R1 + MOVD a3+24(FP), R2 + MOVD $0, R3 + MOVD $0, R4 + MOVD $0, R5 + MOVD trap+0(FP), R8 // syscall entry + SVC + MOVD R0, r1+32(FP) // r1 + MOVD R1, r2+40(FP) // r2 + BL runtime·exitsyscall(SB) + RET + TEXT ·RawSyscall(SB),NOSPLIT,$0-56 B syscall·RawSyscall(SB) TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 B syscall·RawSyscall6(SB) + +TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48 + MOVD a1+8(FP), R0 + MOVD a2+16(FP), R1 + MOVD a3+24(FP), R2 + MOVD $0, R3 + MOVD $0, R4 + MOVD $0, R5 + MOVD trap+0(FP), R8 // syscall entry + SVC + MOVD R0, r1+32(FP) + MOVD R1, r2+40(FP) + RET diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s index 724e580c..ab9d6383 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s @@ -15,14 +15,42 @@ // Just jump to package syscall's implementation for all these functions. // The runtime may know about them. -TEXT ·Syscall(SB),NOSPLIT,$0-56 +TEXT ·Syscall(SB),NOSPLIT,$0-56 JMP syscall·Syscall(SB) -TEXT ·Syscall6(SB),NOSPLIT,$0-80 +TEXT ·Syscall6(SB),NOSPLIT,$0-80 JMP syscall·Syscall6(SB) -TEXT ·RawSyscall(SB),NOSPLIT,$0-56 +TEXT ·SyscallNoError(SB),NOSPLIT,$0-48 + JAL runtime·entersyscall(SB) + MOVV a1+8(FP), R4 + MOVV a2+16(FP), R5 + MOVV a3+24(FP), R6 + MOVV R0, R7 + MOVV R0, R8 + MOVV R0, R9 + MOVV trap+0(FP), R2 // syscall entry + SYSCALL + MOVV R2, r1+32(FP) + MOVV R3, r2+40(FP) + JAL runtime·exitsyscall(SB) + RET + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 JMP syscall·RawSyscall(SB) -TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 JMP syscall·RawSyscall6(SB) + +TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48 + MOVV a1+8(FP), R4 + MOVV a2+16(FP), R5 + MOVV a3+24(FP), R6 + MOVV R0, R7 + MOVV R0, R8 + MOVV R0, R9 + MOVV trap+0(FP), R2 // syscall entry + SYSCALL + MOVV R2, r1+32(FP) + MOVV R3, r2+40(FP) + RET diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s index 2ea42575..99e53990 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s @@ -15,17 +15,40 @@ // Just jump to package syscall's implementation for all these functions. // The runtime may know about them. -TEXT ·Syscall(SB),NOSPLIT,$0-28 +TEXT ·Syscall(SB),NOSPLIT,$0-28 JMP syscall·Syscall(SB) -TEXT ·Syscall6(SB),NOSPLIT,$0-40 +TEXT ·Syscall6(SB),NOSPLIT,$0-40 JMP syscall·Syscall6(SB) -TEXT ·Syscall9(SB),NOSPLIT,$0-52 +TEXT ·Syscall9(SB),NOSPLIT,$0-52 JMP syscall·Syscall9(SB) -TEXT ·RawSyscall(SB),NOSPLIT,$0-28 +TEXT ·SyscallNoError(SB),NOSPLIT,$0-24 + JAL runtime·entersyscall(SB) + MOVW a1+4(FP), R4 + MOVW a2+8(FP), R5 + MOVW a3+12(FP), R6 + MOVW R0, R7 + MOVW trap+0(FP), R2 // syscall entry + SYSCALL + MOVW R2, r1+16(FP) // r1 + MOVW R3, r2+20(FP) // r2 + JAL runtime·exitsyscall(SB) + RET + +TEXT ·RawSyscall(SB),NOSPLIT,$0-28 JMP syscall·RawSyscall(SB) -TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 +TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 JMP syscall·RawSyscall6(SB) + +TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24 + MOVW a1+4(FP), R4 + MOVW a2+8(FP), R5 + MOVW a3+12(FP), R6 + MOVW trap+0(FP), R2 // syscall entry + SYSCALL + MOVW R2, r1+16(FP) + MOVW R3, r2+20(FP) + RET diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s index 8d231feb..649e5871 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s @@ -15,14 +15,42 @@ // Just jump to package syscall's implementation for all these functions. // The runtime may know about them. -TEXT ·Syscall(SB),NOSPLIT,$0-56 +TEXT ·Syscall(SB),NOSPLIT,$0-56 BR syscall·Syscall(SB) TEXT ·Syscall6(SB),NOSPLIT,$0-80 BR syscall·Syscall6(SB) +TEXT ·SyscallNoError(SB),NOSPLIT,$0-48 + BL runtime·entersyscall(SB) + MOVD a1+8(FP), R3 + MOVD a2+16(FP), R4 + MOVD a3+24(FP), R5 + MOVD R0, R6 + MOVD R0, R7 + MOVD R0, R8 + MOVD trap+0(FP), R9 // syscall entry + SYSCALL R9 + MOVD R3, r1+32(FP) + MOVD R4, r2+40(FP) + BL runtime·exitsyscall(SB) + RET + TEXT ·RawSyscall(SB),NOSPLIT,$0-56 BR syscall·RawSyscall(SB) TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 BR syscall·RawSyscall6(SB) + +TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48 + MOVD a1+8(FP), R3 + MOVD a2+16(FP), R4 + MOVD a3+24(FP), R5 + MOVD R0, R6 + MOVD R0, R7 + MOVD R0, R8 + MOVD trap+0(FP), R9 // syscall entry + SYSCALL R9 + MOVD R3, r1+32(FP) + MOVD R4, r2+40(FP) + RET diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/asm_linux_s390x.s b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/asm_linux_s390x.s index 11889859..a5a863c6 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/asm_linux_s390x.s +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/asm_linux_s390x.s @@ -21,8 +21,36 @@ TEXT ·Syscall(SB),NOSPLIT,$0-56 TEXT ·Syscall6(SB),NOSPLIT,$0-80 BR syscall·Syscall6(SB) +TEXT ·SyscallNoError(SB),NOSPLIT,$0-48 + BL runtime·entersyscall(SB) + MOVD a1+8(FP), R2 + MOVD a2+16(FP), R3 + MOVD a3+24(FP), R4 + MOVD $0, R5 + MOVD $0, R6 + MOVD $0, R7 + MOVD trap+0(FP), R1 // syscall entry + SYSCALL + MOVD R2, r1+32(FP) + MOVD R3, r2+40(FP) + BL runtime·exitsyscall(SB) + RET + TEXT ·RawSyscall(SB),NOSPLIT,$0-56 BR syscall·RawSyscall(SB) TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 BR syscall·RawSyscall6(SB) + +TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48 + MOVD a1+8(FP), R2 + MOVD a2+16(FP), R3 + MOVD a3+24(FP), R4 + MOVD $0, R5 + MOVD $0, R6 + MOVD $0, R7 + MOVD trap+0(FP), R1 // syscall entry + SYSCALL + MOVD R2, r1+32(FP) + MOVD R3, r2+40(FP) + RET diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/dirent.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/dirent.go index bd475812..95fd3531 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/dirent.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/dirent.go @@ -6,97 +6,12 @@ package unix -import "unsafe" - -// readInt returns the size-bytes unsigned integer in native byte order at offset off. -func readInt(b []byte, off, size uintptr) (u uint64, ok bool) { - if len(b) < int(off+size) { - return 0, false - } - if isBigEndian { - return readIntBE(b[off:], size), true - } - return readIntLE(b[off:], size), true -} - -func readIntBE(b []byte, size uintptr) uint64 { - switch size { - case 1: - return uint64(b[0]) - case 2: - _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808 - return uint64(b[1]) | uint64(b[0])<<8 - case 4: - _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 - return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24 - case 8: - _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 - return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | - uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 - default: - panic("syscall: readInt with unsupported size") - } -} - -func readIntLE(b []byte, size uintptr) uint64 { - switch size { - case 1: - return uint64(b[0]) - case 2: - _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808 - return uint64(b[0]) | uint64(b[1])<<8 - case 4: - _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 - case 8: - _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - default: - panic("syscall: readInt with unsupported size") - } -} +import "syscall" // ParseDirent parses up to max directory entries in buf, // appending the names to names. It returns the number of // bytes consumed from buf, the number of entries added // to names, and the new names slice. func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) { - origlen := len(buf) - count = 0 - for max != 0 && len(buf) > 0 { - reclen, ok := direntReclen(buf) - if !ok || reclen > uint64(len(buf)) { - return origlen, count, names - } - rec := buf[:reclen] - buf = buf[reclen:] - ino, ok := direntIno(rec) - if !ok { - break - } - if ino == 0 { // File absent in directory. - continue - } - const namoff = uint64(unsafe.Offsetof(Dirent{}.Name)) - namlen, ok := direntNamlen(rec) - if !ok || namoff+namlen > uint64(len(rec)) { - break - } - name := rec[namoff : namoff+namlen] - for i, c := range name { - if c == 0 { - name = name[:i] - break - } - } - // Check for useless names before allocating a string. - if string(name) == "." || string(name) == ".." { - continue - } - max-- - count++ - names = append(names, string(name)) - } - return origlen - len(buf), count, names + return syscall.ParseDirent(buf, max, names) } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/env_unix.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/env_unix.go index 2e06b33f..706b3cd1 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/env_unix.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/env_unix.go @@ -25,3 +25,7 @@ func Clearenv() { func Environ() []string { return syscall.Environ() } + +func Unsetenv(key string) error { + return syscall.Unsetenv(key) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/env_unset.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/env_unset.go deleted file mode 100644 index c44fdc4a..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/env_unset.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.4 - -package unix - -import "syscall" - -func Unsetenv(key string) error { - // This was added in Go 1.4. - return syscall.Unsetenv(key) -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/gccgo.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/gccgo.go index 40bed3fa..50062e3c 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/gccgo.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/gccgo.go @@ -11,9 +11,19 @@ import "syscall" // We can't use the gc-syntax .s files for gccgo. On the plus side // much of the functionality can be written directly in Go. +//extern gccgoRealSyscallNoError +func realSyscallNoError(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r uintptr) + //extern gccgoRealSyscall func realSyscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r, errno uintptr) +func SyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr) { + syscall.Entersyscall() + r := realSyscallNoError(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0) + syscall.Exitsyscall() + return r, 0 +} + func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { syscall.Entersyscall() r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0) @@ -35,6 +45,11 @@ func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, return r, 0, syscall.Errno(errno) } +func RawSyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr) { + r := realSyscallNoError(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0) + return r, 0 +} + func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0) return r, 0, syscall.Errno(errno) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/gccgo_c.c b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/gccgo_c.c index 99a774f2..24e96b11 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/gccgo_c.c +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/gccgo_c.c @@ -31,6 +31,12 @@ gccgoRealSyscall(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintp return r; } +uintptr_t +gccgoRealSyscallNoError(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9) +{ + return syscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9); +} + // Define the use function in C so that it is not inlined. extern void use(void *) __asm__ (GOSYM_PREFIX GOPKGPATH ".use") __attribute__((noinline)); diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/mkerrors.sh index 2a44da57..4dd40c17 100755 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -187,6 +187,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -426,7 +427,9 @@ ccflags="$@" $2 ~ /^(VM|VMADDR)_/ || $2 ~ /^IOCTL_VM_SOCKETS_/ || $2 ~ /^(TASKSTATS|TS)_/ || + $2 ~ /^CGROUPSTATS_/ || $2 ~ /^GENL_/ || + $2 ~ /^STATX_/ || $2 ~ /^UTIME_/ || $2 ~ /^XATTR_(CREATE|REPLACE)/ || $2 ~ /^ATTR_(BIT_MAP_COUNT|(CMN|VOL|FILE)_)/ || diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/mksyscall.pl b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/mksyscall.pl index fb929b4c..1f6b926f 100755 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/mksyscall.pl +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/mksyscall.pl @@ -210,7 +210,15 @@ ($) # Determine which form to use; pad args with zeros. my $asm = "Syscall"; if ($nonblock) { - $asm = "RawSyscall"; + if ($errvar eq "" && $ENV{'GOOS'} eq "linux") { + $asm = "RawSyscallNoError"; + } else { + $asm = "RawSyscall"; + } + } else { + if ($errvar eq "" && $ENV{'GOOS'} eq "linux") { + $asm = "SyscallNoError"; + } } if(@args <= 3) { while(@args < 3) { @@ -284,7 +292,12 @@ ($) if ($ret[0] eq "_" && $ret[1] eq "_" && $ret[2] eq "_") { $text .= "\t$call\n"; } else { - $text .= "\t$ret[0], $ret[1], $ret[2] := $call\n"; + if ($errvar eq "" && $ENV{'GOOS'} eq "linux") { + # raw syscall without error on Linux, see golang.org/issue/22924 + $text .= "\t$ret[0], $ret[1] := $call\n"; + } else { + $text .= "\t$ret[0], $ret[1], $ret[2] := $call\n"; + } } $text .= $body; diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_bsd.go index 47b05984..d3903ede 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -352,6 +352,18 @@ func GetsockoptICMPv6Filter(fd, level, opt int) (*ICMPv6Filter, error) { return &value, err } +// GetsockoptString returns the string value of the socket option opt for the +// socket associated with fd at the given socket level. +func GetsockoptString(fd, level, opt int) (string, error) { + buf := make([]byte, 256) + vallen := _Socklen(len(buf)) + err := getsockopt(fd, level, opt, unsafe.Pointer(&buf[0]), &vallen) + if err != nil { + return "", err + } + return string(buf[:vallen-1]), nil +} + //sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) //sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) //sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_darwin.go index d6c472a7..b9598694 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -36,6 +36,7 @@ func Getwd() (string, error) { return "", ENOTSUP } +// SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets. type SockaddrDatalink struct { Len uint8 Family uint8 @@ -76,18 +77,6 @@ func nametomib(name string) (mib []_C_int, err error) { return buf[0 : n/siz], nil } -func direntIno(buf []byte) (uint64, bool) { - return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino)) -} - -func direntReclen(buf []byte) (uint64, bool) { - return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) -} - -func direntNamlen(buf []byte) (uint64, bool) { - return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen)) -} - //sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) func PtraceAttach(pid int) (err error) { return ptrace(PT_ATTACH, pid, 0, 0) } func PtraceDetach(pid int) (err error) { return ptrace(PT_DETACH, pid, 0, 0) } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 49c65ea6..777860bf 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -14,6 +14,7 @@ package unix import "unsafe" +// SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets. type SockaddrDatalink struct { Len uint8 Family uint8 @@ -56,22 +57,6 @@ func nametomib(name string) (mib []_C_int, err error) { return buf[0 : n/siz], nil } -func direntIno(buf []byte) (uint64, bool) { - return readInt(buf, unsafe.Offsetof(Dirent{}.Fileno), unsafe.Sizeof(Dirent{}.Fileno)) -} - -func direntReclen(buf []byte) (uint64, bool) { - namlen, ok := direntNamlen(buf) - if !ok { - return 0, false - } - return (16 + namlen + 1 + 7) &^ 7, true -} - -func direntNamlen(buf []byte) (uint64, bool) { - return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen)) -} - //sysnb pipe() (r int, w int, err error) func Pipe(p []int) (err error) { @@ -110,6 +95,23 @@ func Accept4(fd, flags int) (nfd int, sa Sockaddr, err error) { return } +const ImplementsGetwd = true + +//sys Getcwd(buf []byte) (n int, err error) = SYS___GETCWD + +func Getwd() (string, error) { + var buf [PathMax]byte + _, err := Getcwd(buf[0:]) + if err != nil { + return "", err + } + n := clen(buf[:]) + if n < 1 { + return "", EINVAL + } + return string(buf[:n]), nil +} + func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { var _p0 unsafe.Pointer var bufsize uintptr @@ -169,6 +171,69 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) { return &value, err } +func sysctlUname(mib []_C_int, old *byte, oldlen *uintptr) error { + err := sysctl(mib, old, oldlen, nil, 0) + if err != nil { + // Utsname members on Dragonfly are only 32 bytes and + // the syscall returns ENOMEM in case the actual value + // is longer. + if err == ENOMEM { + err = nil + } + } + return err +} + +func Uname(uname *Utsname) error { + mib := []_C_int{CTL_KERN, KERN_OSTYPE} + n := unsafe.Sizeof(uname.Sysname) + if err := sysctlUname(mib, &uname.Sysname[0], &n); err != nil { + return err + } + uname.Sysname[unsafe.Sizeof(uname.Sysname)-1] = 0 + + mib = []_C_int{CTL_KERN, KERN_HOSTNAME} + n = unsafe.Sizeof(uname.Nodename) + if err := sysctlUname(mib, &uname.Nodename[0], &n); err != nil { + return err + } + uname.Nodename[unsafe.Sizeof(uname.Nodename)-1] = 0 + + mib = []_C_int{CTL_KERN, KERN_OSRELEASE} + n = unsafe.Sizeof(uname.Release) + if err := sysctlUname(mib, &uname.Release[0], &n); err != nil { + return err + } + uname.Release[unsafe.Sizeof(uname.Release)-1] = 0 + + mib = []_C_int{CTL_KERN, KERN_VERSION} + n = unsafe.Sizeof(uname.Version) + if err := sysctlUname(mib, &uname.Version[0], &n); err != nil { + return err + } + + // The version might have newlines or tabs in it, convert them to + // spaces. + for i, b := range uname.Version { + if b == '\n' || b == '\t' { + if i == len(uname.Version)-1 { + uname.Version[i] = 0 + } else { + uname.Version[i] = ' ' + } + } + } + + mib = []_C_int{CTL_HW, HW_MACHINE} + n = unsafe.Sizeof(uname.Machine) + if err := sysctlUname(mib, &uname.Machine[0], &n); err != nil { + return err + } + uname.Machine[unsafe.Sizeof(uname.Machine)-1] = 0 + + return nil +} + /* * Exposed directly */ diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_freebsd.go index bcf9812a..89f2c3fc 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -14,6 +14,7 @@ package unix import "unsafe" +// SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets. type SockaddrDatalink struct { Len uint8 Family uint8 @@ -54,18 +55,6 @@ func nametomib(name string) (mib []_C_int, err error) { return buf[0 : n/siz], nil } -func direntIno(buf []byte) (uint64, bool) { - return readInt(buf, unsafe.Offsetof(Dirent{}.Fileno), unsafe.Sizeof(Dirent{}.Fileno)) -} - -func direntReclen(buf []byte) (uint64, bool) { - return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) -} - -func direntNamlen(buf []byte) (uint64, bool) { - return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen)) -} - //sysnb pipe() (r int, w int, err error) func Pipe(p []int) (err error) { @@ -105,6 +94,23 @@ func Accept4(fd, flags int) (nfd int, sa Sockaddr, err error) { return } +const ImplementsGetwd = true + +//sys Getcwd(buf []byte) (n int, err error) = SYS___GETCWD + +func Getwd() (string, error) { + var buf [PathMax]byte + _, err := Getcwd(buf[0:]) + if err != nil { + return "", err + } + n := clen(buf[:]) + if n < 1 { + return "", EINVAL + } + return string(buf[:n]), nil +} + func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { var _p0 unsafe.Pointer var bufsize uintptr @@ -276,7 +282,6 @@ func Listxattr(file string, dest []byte) (sz int, err error) { // FreeBSD won't allow you to list xattrs from multiple namespaces s := 0 - var e error for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} { stmp, e := ExtattrListFile(file, nsid, uintptr(d), destsiz) @@ -288,7 +293,6 @@ func Listxattr(file string, dest []byte) (sz int, err error) { * we don't have read permissions on, so don't ignore those errors */ if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER { - e = nil continue } else if e != nil { return s, e @@ -302,7 +306,7 @@ func Listxattr(file string, dest []byte) (sz int, err error) { d = initxattrdest(dest, s) } - return s, e + return s, nil } func Flistxattr(fd int, dest []byte) (sz int, err error) { @@ -310,11 +314,9 @@ func Flistxattr(fd int, dest []byte) (sz int, err error) { destsiz := len(dest) s := 0 - var e error for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} { stmp, e := ExtattrListFd(fd, nsid, uintptr(d), destsiz) if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER { - e = nil continue } else if e != nil { return s, e @@ -328,7 +330,7 @@ func Flistxattr(fd int, dest []byte) (sz int, err error) { d = initxattrdest(dest, s) } - return s, e + return s, nil } func Llistxattr(link string, dest []byte) (sz int, err error) { @@ -336,11 +338,9 @@ func Llistxattr(link string, dest []byte) (sz int, err error) { destsiz := len(dest) s := 0 - var e error for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} { stmp, e := ExtattrListLink(link, nsid, uintptr(d), destsiz) if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER { - e = nil continue } else if e != nil { return s, e @@ -354,7 +354,7 @@ func Llistxattr(link string, dest []byte) (sz int, err error) { d = initxattrdest(dest, s) } - return s, e + return s, nil } //sys ioctl(fd int, req uint, arg uintptr) (err error) @@ -485,6 +485,7 @@ func Uname(uname *Utsname) error { //sys Fstatfs(fd int, stat *Statfs_t) (err error) //sys Fsync(fd int) (err error) //sys Ftruncate(fd int, length int64) (err error) +//sys Getdents(fd int, buf []byte) (n int, err error) //sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) //sys Getdtablesize() (size int) //sysnb Getegid() (egid int) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_linux.go index 797cda7e..76cf81f5 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -413,6 +413,7 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { return unsafe.Pointer(&sa.raw), sl, nil } +// SockaddrLinklayer implements the Sockaddr interface for AF_PACKET type sockets. type SockaddrLinklayer struct { Protocol uint16 Ifindex int @@ -439,6 +440,7 @@ func (sa *SockaddrLinklayer) sockaddr() (unsafe.Pointer, _Socklen, error) { return unsafe.Pointer(&sa.raw), SizeofSockaddrLinklayer, nil } +// SockaddrNetlink implements the Sockaddr interface for AF_NETLINK type sockets. type SockaddrNetlink struct { Family uint16 Pad uint16 @@ -455,6 +457,8 @@ func (sa *SockaddrNetlink) sockaddr() (unsafe.Pointer, _Socklen, error) { return unsafe.Pointer(&sa.raw), SizeofSockaddrNetlink, nil } +// SockaddrHCI implements the Sockaddr interface for AF_BLUETOOTH type sockets +// using the HCI protocol. type SockaddrHCI struct { Dev uint16 Channel uint16 @@ -468,6 +472,31 @@ func (sa *SockaddrHCI) sockaddr() (unsafe.Pointer, _Socklen, error) { return unsafe.Pointer(&sa.raw), SizeofSockaddrHCI, nil } +// SockaddrL2 implements the Sockaddr interface for AF_BLUETOOTH type sockets +// using the L2CAP protocol. +type SockaddrL2 struct { + PSM uint16 + CID uint16 + Addr [6]uint8 + AddrType uint8 + raw RawSockaddrL2 +} + +func (sa *SockaddrL2) sockaddr() (unsafe.Pointer, _Socklen, error) { + sa.raw.Family = AF_BLUETOOTH + psm := (*[2]byte)(unsafe.Pointer(&sa.raw.Psm)) + psm[0] = byte(sa.PSM) + psm[1] = byte(sa.PSM >> 8) + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Bdaddr[i] = sa.Addr[len(sa.Addr)-1-i] + } + cid := (*[2]byte)(unsafe.Pointer(&sa.raw.Cid)) + cid[0] = byte(sa.CID) + cid[1] = byte(sa.CID >> 8) + sa.raw.Bdaddr_type = sa.AddrType + return unsafe.Pointer(&sa.raw), SizeofSockaddrL2, nil +} + // SockaddrCAN implements the Sockaddr interface for AF_CAN type sockets. // The RxID and TxID fields are used for transport protocol addressing in // (CAN_TP16, CAN_TP20, CAN_MCNET, and CAN_ISOTP), they can be left with @@ -808,6 +837,24 @@ func GetsockoptTCPInfo(fd, level, opt int) (*TCPInfo, error) { return &value, err } +// GetsockoptString returns the string value of the socket option opt for the +// socket associated with fd at the given socket level. +func GetsockoptString(fd, level, opt int) (string, error) { + buf := make([]byte, 256) + vallen := _Socklen(len(buf)) + err := getsockopt(fd, level, opt, unsafe.Pointer(&buf[0]), &vallen) + if err != nil { + if err == ERANGE { + buf = make([]byte, vallen) + err = getsockopt(fd, level, opt, unsafe.Pointer(&buf[0]), &vallen) + } + if err != nil { + return "", err + } + } + return string(buf[:vallen-1]), nil +} + func SetsockoptIPMreqn(fd, level, opt int, mreq *IPMreqn) (err error) { return setsockopt(fd, level, opt, unsafe.Pointer(mreq), unsafe.Sizeof(*mreq)) } @@ -1172,22 +1219,6 @@ func ReadDirent(fd int, buf []byte) (n int, err error) { return Getdents(fd, buf) } -func direntIno(buf []byte) (uint64, bool) { - return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino)) -} - -func direntReclen(buf []byte) (uint64, bool) { - return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) -} - -func direntNamlen(buf []byte) (uint64, bool) { - reclen, ok := direntReclen(buf) - if !ok { - return 0, false - } - return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true -} - //sys mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) func Mount(source string, target string, fstype string, flags uintptr, data string) (err error) { @@ -1293,6 +1324,7 @@ func Setgid(uid int) (err error) { //sys Setpriority(which int, who int, prio int) (err error) //sys Setxattr(path string, attr string, data []byte, flags int) (err error) +//sys Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) //sys Sync() //sys Syncfs(fd int) (err error) //sysnb Sysinfo(info *Sysinfo_t) (err error) @@ -1430,11 +1462,9 @@ func Vmsplice(fd int, iovs []Iovec, flags int) (int, error) { // RtSigtimedwait // SchedGetPriorityMax // SchedGetPriorityMin -// SchedGetaffinity // SchedGetparam // SchedGetscheduler // SchedRrGetInterval -// SchedSetaffinity // SchedSetparam // SchedYield // Security diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_linux_gc.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_linux_gc.go new file mode 100644 index 00000000..c26e6ec2 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_linux_gc.go @@ -0,0 +1,14 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux,!gccgo + +package unix + +// SyscallNoError may be used instead of Syscall for syscalls that don't fail. +func SyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr) + +// RawSyscallNoError may be used instead of RawSyscall for syscalls that don't +// fail. +func RawSyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 91468095..71b70783 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -17,6 +17,7 @@ import ( "unsafe" ) +// SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets. type SockaddrDatalink struct { Len uint8 Family uint8 @@ -92,18 +93,6 @@ func nametomib(name string) (mib []_C_int, err error) { return mib, nil } -func direntIno(buf []byte) (uint64, bool) { - return readInt(buf, unsafe.Offsetof(Dirent{}.Fileno), unsafe.Sizeof(Dirent{}.Fileno)) -} - -func direntReclen(buf []byte) (uint64, bool) { - return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) -} - -func direntNamlen(buf []byte) (uint64, bool) { - return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen)) -} - //sysnb pipe() (fd1 int, fd2 int, err error) func Pipe(p []int) (err error) { if len(p) != 2 { @@ -118,6 +107,23 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { return getdents(fd, buf) } +const ImplementsGetwd = true + +//sys Getcwd(buf []byte) (n int, err error) = SYS___GETCWD + +func Getwd() (string, error) { + var buf [PathMax]byte + _, err := Getcwd(buf[0:]) + if err != nil { + return "", err + } + n := clen(buf[:]) + if n < 1 { + return "", EINVAL + } + return string(buf[:n]), nil +} + // TODO func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { return -1, ENOSYS @@ -167,6 +173,52 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) { return &value, err } +func Uname(uname *Utsname) error { + mib := []_C_int{CTL_KERN, KERN_OSTYPE} + n := unsafe.Sizeof(uname.Sysname) + if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil { + return err + } + + mib = []_C_int{CTL_KERN, KERN_HOSTNAME} + n = unsafe.Sizeof(uname.Nodename) + if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil { + return err + } + + mib = []_C_int{CTL_KERN, KERN_OSRELEASE} + n = unsafe.Sizeof(uname.Release) + if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil { + return err + } + + mib = []_C_int{CTL_KERN, KERN_VERSION} + n = unsafe.Sizeof(uname.Version) + if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil { + return err + } + + // The version might have newlines or tabs in it, convert them to + // spaces. + for i, b := range uname.Version { + if b == '\n' || b == '\t' { + if i == len(uname.Version)-1 { + uname.Version[i] = 0 + } else { + uname.Version[i] = ' ' + } + } + } + + mib = []_C_int{CTL_HW, HW_MACHINE} + n = unsafe.Sizeof(uname.Machine) + if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil { + return err + } + + return nil +} + /* * Exposed directly */ diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_no_getwd.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_no_getwd.go deleted file mode 100644 index 530792ea..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_no_getwd.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build dragonfly freebsd netbsd openbsd - -package unix - -const ImplementsGetwd = false - -func Getwd() (string, error) { return "", ENOTSUP } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_openbsd.go index b98cd07d..37556e77 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -18,6 +18,7 @@ import ( "unsafe" ) +// SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets. type SockaddrDatalink struct { Len uint8 Family uint8 @@ -42,18 +43,6 @@ func nametomib(name string) (mib []_C_int, err error) { return nil, EINVAL } -func direntIno(buf []byte) (uint64, bool) { - return readInt(buf, unsafe.Offsetof(Dirent{}.Fileno), unsafe.Sizeof(Dirent{}.Fileno)) -} - -func direntReclen(buf []byte) (uint64, bool) { - return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) -} - -func direntNamlen(buf []byte) (uint64, bool) { - return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen)) -} - //sysnb pipe(p *[2]_C_int) (err error) func Pipe(p []int) (err error) { if len(p) != 2 { @@ -71,6 +60,23 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { return getdents(fd, buf) } +const ImplementsGetwd = true + +//sys Getcwd(buf []byte) (n int, err error) = SYS___GETCWD + +func Getwd() (string, error) { + var buf [PathMax]byte + _, err := Getcwd(buf[0:]) + if err != nil { + return "", err + } + n := clen(buf[:]) + if n < 1 { + return "", EINVAL + } + return string(buf[:n]), nil +} + // TODO func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { return -1, ENOSYS @@ -135,6 +141,52 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) { return &value, err } +func Uname(uname *Utsname) error { + mib := []_C_int{CTL_KERN, KERN_OSTYPE} + n := unsafe.Sizeof(uname.Sysname) + if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil { + return err + } + + mib = []_C_int{CTL_KERN, KERN_HOSTNAME} + n = unsafe.Sizeof(uname.Nodename) + if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil { + return err + } + + mib = []_C_int{CTL_KERN, KERN_OSRELEASE} + n = unsafe.Sizeof(uname.Release) + if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil { + return err + } + + mib = []_C_int{CTL_KERN, KERN_VERSION} + n = unsafe.Sizeof(uname.Version) + if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil { + return err + } + + // The version might have newlines or tabs in it, convert them to + // spaces. + for i, b := range uname.Version { + if b == '\n' || b == '\t' { + if i == len(uname.Version)-1 { + uname.Version[i] = 0 + } else { + uname.Version[i] = ' ' + } + } + } + + mib = []_C_int{CTL_HW, HW_MACHINE} + n = unsafe.Sizeof(uname.Machine) + if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil { + return err + } + + return nil +} + /* * Exposed directly */ diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_solaris.go index 3ab9e07c..eca8d1d0 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -23,6 +23,7 @@ type syscallFunc uintptr func rawSysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) func sysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) +// SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets. type SockaddrDatalink struct { Family uint16 Index uint16 @@ -34,31 +35,6 @@ type SockaddrDatalink struct { raw RawSockaddrDatalink } -func clen(n []byte) int { - for i := 0; i < len(n); i++ { - if n[i] == 0 { - return i - } - } - return len(n) -} - -func direntIno(buf []byte) (uint64, bool) { - return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino)) -} - -func direntReclen(buf []byte) (uint64, bool) { - return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) -} - -func direntNamlen(buf []byte) (uint64, bool) { - reclen, ok := direntReclen(buf) - if !ok { - return 0, false - } - return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true -} - //sysnb pipe(p *[2]_C_int) (n int, err error) func Pipe(p []int) (err error) { @@ -139,6 +115,18 @@ func Getsockname(fd int) (sa Sockaddr, err error) { return anyToSockaddr(&rsa) } +// GetsockoptString returns the string value of the socket option opt for the +// socket associated with fd at the given socket level. +func GetsockoptString(fd, level, opt int) (string, error) { + buf := make([]byte, 256) + vallen := _Socklen(len(buf)) + err := getsockopt(fd, level, opt, unsafe.Pointer(&buf[0]), &vallen) + if err != nil { + return "", err + } + return string(buf[:vallen-1]), nil +} + const ImplementsGetwd = true //sys Getcwd(buf []byte) (n int, err error) @@ -655,6 +643,7 @@ func Poll(fds []PollFd, timeout int) (n int, err error) { //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys Rmdir(path string) (err error) //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = lseek +//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) //sysnb Setegid(egid int) (err error) //sysnb Seteuid(euid int) (err error) //sysnb Setgid(gid int) (err error) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_unix.go index 3ed8a91f..cd8f3a9c 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -50,6 +50,16 @@ func errnoErr(e syscall.Errno) error { return e } +// clen returns the index of the first NULL byte in n or len(n) if n contains no NULL byte. +func clen(n []byte) int { + for i := 0; i < len(n); i++ { + if n[i] == 0 { + return i + } + } + return len(n) +} + // Mmap manager, for use by operating system-specific implementations. type mmapper struct { @@ -138,16 +148,19 @@ func Write(fd int, p []byte) (n int, err error) { // creation of IPv6 sockets to return EAFNOSUPPORT. var SocketDisableIPv6 bool +// Sockaddr represents a socket address. type Sockaddr interface { sockaddr() (ptr unsafe.Pointer, len _Socklen, err error) // lowercase; only we can define Sockaddrs } +// SockaddrInet4 implements the Sockaddr interface for AF_INET type sockets. type SockaddrInet4 struct { Port int Addr [4]byte raw RawSockaddrInet4 } +// SockaddrInet6 implements the Sockaddr interface for AF_INET6 type sockets. type SockaddrInet6 struct { Port int ZoneId uint32 @@ -155,6 +168,7 @@ type SockaddrInet6 struct { raw RawSockaddrInet6 } +// SockaddrUnix implements the Sockaddr interface for AF_UNIX type sockets. type SockaddrUnix struct { Name string raw RawSockaddrUnix diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/timestruct.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/timestruct.go index 139fbbeb..47b9011e 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/timestruct.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/timestruct.go @@ -6,6 +6,8 @@ package unix +import "time" + // TimespecToNsec converts a Timespec value into a number of // nanoseconds since the Unix epoch. func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } @@ -22,6 +24,24 @@ func NsecToTimespec(nsec int64) Timespec { return setTimespec(sec, nsec) } +// TimeToTimespec converts t into a Timespec. +// On some 32-bit systems the range of valid Timespec values are smaller +// than that of time.Time values. So if t is out of the valid range of +// Timespec, it returns a zero Timespec and ERANGE. +func TimeToTimespec(t time.Time) (Timespec, error) { + sec := t.Unix() + nsec := int64(t.Nanosecond()) + ts := setTimespec(sec, nsec) + + // Currently all targets have either int32 or int64 for Timespec.Sec. + // If there were a new target with floating point type for it, we have + // to consider the rounding error. + if int64(ts.Sec) != sec { + return Timespec{}, ERANGE + } + return ts, nil +} + // TimevalToNsec converts a Timeval value into a number of nanoseconds // since the Unix epoch. func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go index 8f40598b..d9601550 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go @@ -168,6 +168,8 @@ const ( CSTOP = 0x13 CSTOPB = 0x400 CSUSP = 0x1a + CTL_HW = 0x6 + CTL_KERN = 0x1 CTL_MAXNAME = 0xc CTL_NET = 0x4 DLT_A429 = 0xb8 @@ -353,6 +355,7 @@ const ( F_UNLCK = 0x2 F_WRLCK = 0x3 HUPCL = 0x4000 + HW_MACHINE = 0x1 ICANON = 0x100 ICMP6_FILTER = 0x12 ICRNL = 0x100 @@ -835,6 +838,10 @@ const ( IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 LOCK_EX = 0x2 LOCK_NB = 0x4 LOCK_SH = 0x1 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 8947248f..4fba476e 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -1638,6 +1638,27 @@ const ( SPLICE_F_MORE = 0x4 SPLICE_F_MOVE = 0x1 SPLICE_F_NONBLOCK = 0x2 + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 4083cb2a..7e2a108d 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -1639,6 +1639,27 @@ const ( SPLICE_F_MORE = 0x4 SPLICE_F_MOVE = 0x1 SPLICE_F_NONBLOCK = 0x2 + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 27d38352..250841bd 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -1643,6 +1643,27 @@ const ( SPLICE_F_MORE = 0x4 SPLICE_F_MOVE = 0x1 SPLICE_F_NONBLOCK = 0x2 + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 69ad3147..f5d78561 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -1629,6 +1629,27 @@ const ( SPLICE_F_MORE = 0x4 SPLICE_F_MOVE = 0x1 SPLICE_F_NONBLOCK = 0x2 + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index d131a4cc..f45492db 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -1641,6 +1641,27 @@ const ( SPLICE_F_MORE = 0x4 SPLICE_F_MOVE = 0x1 SPLICE_F_NONBLOCK = 0x2 + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 62dd2035..f5a64fba 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -1641,6 +1641,27 @@ const ( SPLICE_F_MORE = 0x4 SPLICE_F_MOVE = 0x1 SPLICE_F_NONBLOCK = 0x2 + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index dc8e56e3..db6d556b 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -1641,6 +1641,27 @@ const ( SPLICE_F_MORE = 0x4 SPLICE_F_MOVE = 0x1 SPLICE_F_NONBLOCK = 0x2 + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 90676625..4a62a550 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -1641,6 +1641,27 @@ const ( SPLICE_F_MORE = 0x4 SPLICE_F_MOVE = 0x1 SPLICE_F_NONBLOCK = 0x2 + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index f6ca82c7..5e1e81e0 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -1696,6 +1696,27 @@ const ( SPLICE_F_MORE = 0x4 SPLICE_F_MOVE = 0x1 SPLICE_F_NONBLOCK = 0x2 + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index ddd25625..6a803243 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -1696,6 +1696,27 @@ const ( SPLICE_F_MORE = 0x4 SPLICE_F_MOVE = 0x1 SPLICE_F_NONBLOCK = 0x2 + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index fc304a68..af5a8950 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -1700,6 +1700,27 @@ const ( SPLICE_F_MORE = 0x4 SPLICE_F_MOVE = 0x1 SPLICE_F_NONBLOCK = 0x2 + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go index b4338d5f..1612b660 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go @@ -1,5 +1,5 @@ // mkerrors.sh -m32 -// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT +// Code generated by the command above; see README.md. DO NOT EDIT. // +build 386,netbsd @@ -169,6 +169,8 @@ const ( CSTOP = 0x13 CSTOPB = 0x400 CSUSP = 0x1a + CTL_HW = 0x6 + CTL_KERN = 0x1 CTL_MAXNAME = 0xc CTL_NET = 0x4 CTL_QUERY = -0x2 @@ -581,6 +583,7 @@ const ( F_UNLCK = 0x2 F_WRLCK = 0x3 HUPCL = 0x4000 + HW_MACHINE = 0x1 ICANON = 0x100 ICMP6_FILTER = 0x12 ICRNL = 0x100 @@ -970,6 +973,10 @@ const ( IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 LOCK_EX = 0x2 LOCK_NB = 0x4 LOCK_SH = 0x1 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go index 4994437b..c994ab61 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go @@ -1,5 +1,5 @@ // mkerrors.sh -m64 -// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT +// Code generated by the command above; see README.md. DO NOT EDIT. // +build amd64,netbsd @@ -169,6 +169,8 @@ const ( CSTOP = 0x13 CSTOPB = 0x400 CSUSP = 0x1a + CTL_HW = 0x6 + CTL_KERN = 0x1 CTL_MAXNAME = 0xc CTL_NET = 0x4 CTL_QUERY = -0x2 @@ -571,6 +573,7 @@ const ( F_UNLCK = 0x2 F_WRLCK = 0x3 HUPCL = 0x4000 + HW_MACHINE = 0x1 ICANON = 0x100 ICMP6_FILTER = 0x12 ICRNL = 0x100 @@ -960,6 +963,10 @@ const ( IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 LOCK_EX = 0x2 LOCK_NB = 0x4 LOCK_SH = 0x1 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go index 206c75f0..a8f9efed 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go @@ -1,5 +1,5 @@ // mkerrors.sh -marm -// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT +// Code generated by the command above; see README.md. DO NOT EDIT. // +build arm,netbsd @@ -161,6 +161,8 @@ const ( CSTOP = 0x13 CSTOPB = 0x400 CSUSP = 0x1a + CTL_HW = 0x6 + CTL_KERN = 0x1 CTL_MAXNAME = 0xc CTL_NET = 0x4 CTL_QUERY = -0x2 @@ -563,6 +565,7 @@ const ( F_UNLCK = 0x2 F_WRLCK = 0x3 HUPCL = 0x4000 + HW_MACHINE = 0x1 ICANON = 0x100 ICMP6_FILTER = 0x12 ICRNL = 0x100 @@ -952,6 +955,10 @@ const ( IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 LOCK_EX = 0x2 LOCK_NB = 0x4 LOCK_SH = 0x1 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go index 3322e998..04e4f331 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go @@ -1,5 +1,5 @@ // mkerrors.sh -m32 -// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT +// Code generated by the command above; see README.md. DO NOT EDIT. // +build 386,openbsd @@ -157,6 +157,8 @@ const ( CSTOP = 0x13 CSTOPB = 0x400 CSUSP = 0x1a + CTL_HW = 0x6 + CTL_KERN = 0x1 CTL_MAXNAME = 0xc CTL_NET = 0x4 DIOCOSFPFLUSH = 0x2000444e @@ -442,6 +444,7 @@ const ( F_UNLCK = 0x2 F_WRLCK = 0x3 HUPCL = 0x4000 + HW_MACHINE = 0x1 ICANON = 0x100 ICMP6_FILTER = 0x12 ICRNL = 0x100 @@ -860,6 +863,10 @@ const ( IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 LCNT_OVERLOAD_FLUSH = 0x6 LOCK_EX = 0x2 LOCK_NB = 0x4 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go index 1758ecca..c80ff981 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go @@ -1,5 +1,5 @@ // mkerrors.sh -m64 -// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT +// Code generated by the command above; see README.md. DO NOT EDIT. // +build amd64,openbsd @@ -157,6 +157,8 @@ const ( CSTOP = 0x13 CSTOPB = 0x400 CSUSP = 0x1a + CTL_HW = 0x6 + CTL_KERN = 0x1 CTL_MAXNAME = 0xc CTL_NET = 0x4 DIOCOSFPFLUSH = 0x2000444e @@ -442,6 +444,7 @@ const ( F_UNLCK = 0x2 F_WRLCK = 0x3 HUPCL = 0x4000 + HW_MACHINE = 0x1 ICANON = 0x100 ICMP6_FILTER = 0x12 ICRNL = 0x100 @@ -860,6 +863,10 @@ const ( IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 LCNT_OVERLOAD_FLUSH = 0x6 LOCK_EX = 0x2 LOCK_NB = 0x4 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go index 3ed0b260..4c320495 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go @@ -1,5 +1,5 @@ // mkerrors.sh -// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT +// Code generated by the command above; see README.md. DO NOT EDIT. // Created by cgo -godefs - DO NOT EDIT // cgo -godefs -- _const.go @@ -157,6 +157,8 @@ const ( CSTOP = 0x13 CSTOPB = 0x400 CSUSP = 0x1a + CTL_HW = 0x6 + CTL_KERN = 0x1 CTL_MAXNAME = 0xc CTL_NET = 0x4 DIOCOSFPFLUSH = 0x2000444e @@ -441,6 +443,7 @@ const ( F_UNLCK = 0x2 F_WRLCK = 0x3 HUPCL = 0x4000 + HW_MACHINE = 0x1 ICANON = 0x100 ICMP6_FILTER = 0x12 ICRNL = 0x100 @@ -859,6 +862,10 @@ const ( IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 LCNT_OVERLOAD_FLUSH = 0x6 LOCK_EX = 0x2 LOCK_NB = 0x4 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index 2ed340fd..a0241de1 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -423,6 +423,23 @@ func extpwrite(fd int, p []byte, flags int, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index 8bcecfb9..fd9ca5a4 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -389,6 +389,23 @@ func pipe() (r int, w int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -937,6 +954,23 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index 61c0cf99..a9f18b22 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -389,6 +389,23 @@ func pipe() (r int, w int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -937,6 +954,23 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index ffd01073..9823e18a 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -389,6 +389,23 @@ func pipe() (r int, w int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -937,6 +954,23 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index e441cc3e..ef9602c1 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -538,7 +538,7 @@ func Eventfd(initval uint, flags int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) + SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) return } @@ -674,7 +674,7 @@ func Getpgid(pid int) (pgid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) pid = int(r0) return } @@ -682,7 +682,7 @@ func Getpid() (pid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) ppid = int(r0) return } @@ -739,7 +739,7 @@ func Getsid(pid int) (sid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettid() (tid int) { - r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) tid = int(r0) return } @@ -1238,8 +1238,23 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() { - Syscall(SYS_SYNC, 0, 0, 0) + SyscallNoError(SYS_SYNC, 0, 0, 0) return } @@ -1298,7 +1313,7 @@ func Times(tms *Tms) (ticks uintptr, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(mask int) (oldmask int) { - r0, _, _ := RawSyscall(SYS_UMASK, uintptr(mask), 0, 0) + r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) oldmask = int(r0) return } @@ -1595,7 +1610,7 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID32, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETEGID32, 0, 0, 0) egid = int(r0) return } @@ -1603,7 +1618,7 @@ func Getegid() (egid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (euid int) { - r0, _, _ := RawSyscall(SYS_GETEUID32, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETEUID32, 0, 0, 0) euid = int(r0) return } @@ -1611,7 +1626,7 @@ func Geteuid() (euid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID32, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETGID32, 0, 0, 0) gid = int(r0) return } @@ -1619,7 +1634,7 @@ func Getgid() (gid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID32, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETUID32, 0, 0, 0) uid = int(r0) return } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 083a08d4..63054b35 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -538,7 +538,7 @@ func Eventfd(initval uint, flags int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) + SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) return } @@ -674,7 +674,7 @@ func Getpgid(pid int) (pgid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) pid = int(r0) return } @@ -682,7 +682,7 @@ func Getpid() (pid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) ppid = int(r0) return } @@ -739,7 +739,7 @@ func Getsid(pid int) (sid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettid() (tid int) { - r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) tid = int(r0) return } @@ -1238,8 +1238,23 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() { - Syscall(SYS_SYNC, 0, 0, 0) + SyscallNoError(SYS_SYNC, 0, 0, 0) return } @@ -1298,7 +1313,7 @@ func Times(tms *Tms) (ticks uintptr, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(mask int) (oldmask int) { - r0, _, _ := RawSyscall(SYS_UMASK, uintptr(mask), 0, 0) + r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) oldmask = int(r0) return } @@ -1602,7 +1617,7 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETEGID, 0, 0, 0) egid = int(r0) return } @@ -1610,7 +1625,7 @@ func Getegid() (egid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (euid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETEUID, 0, 0, 0) euid = int(r0) return } @@ -1618,7 +1633,7 @@ func Geteuid() (euid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETGID, 0, 0, 0) gid = int(r0) return } @@ -1636,7 +1651,7 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETUID, 0, 0, 0) uid = int(r0) return } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index 24829376..8b10ee14 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -538,7 +538,7 @@ func Eventfd(initval uint, flags int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) + SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) return } @@ -674,7 +674,7 @@ func Getpgid(pid int) (pgid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) pid = int(r0) return } @@ -682,7 +682,7 @@ func Getpid() (pid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) ppid = int(r0) return } @@ -739,7 +739,7 @@ func Getsid(pid int) (sid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettid() (tid int) { - r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) tid = int(r0) return } @@ -1238,8 +1238,23 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() { - Syscall(SYS_SYNC, 0, 0, 0) + SyscallNoError(SYS_SYNC, 0, 0, 0) return } @@ -1298,7 +1313,7 @@ func Times(tms *Tms) (ticks uintptr, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(mask int) (oldmask int) { - r0, _, _ := RawSyscall(SYS_UMASK, uintptr(mask), 0, 0) + r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) oldmask = int(r0) return } @@ -1744,7 +1759,7 @@ func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID32, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETEGID32, 0, 0, 0) egid = int(r0) return } @@ -1752,7 +1767,7 @@ func Getegid() (egid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (euid int) { - r0, _, _ := RawSyscall(SYS_GETEUID32, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETEUID32, 0, 0, 0) euid = int(r0) return } @@ -1760,7 +1775,7 @@ func Geteuid() (euid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID32, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETGID32, 0, 0, 0) gid = int(r0) return } @@ -1768,7 +1783,7 @@ func Getgid() (gid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID32, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETUID32, 0, 0, 0) uid = int(r0) return } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index 40760110..8f276d65 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -538,7 +538,7 @@ func Eventfd(initval uint, flags int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) + SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) return } @@ -674,7 +674,7 @@ func Getpgid(pid int) (pgid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) pid = int(r0) return } @@ -682,7 +682,7 @@ func Getpid() (pid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) ppid = int(r0) return } @@ -739,7 +739,7 @@ func Getsid(pid int) (sid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettid() (tid int) { - r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) tid = int(r0) return } @@ -1238,8 +1238,23 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() { - Syscall(SYS_SYNC, 0, 0, 0) + SyscallNoError(SYS_SYNC, 0, 0, 0) return } @@ -1298,7 +1313,7 @@ func Times(tms *Tms) (ticks uintptr, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(mask int) (oldmask int) { - r0, _, _ := RawSyscall(SYS_UMASK, uintptr(mask), 0, 0) + r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) oldmask = int(r0) return } @@ -1582,7 +1597,7 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETEGID, 0, 0, 0) egid = int(r0) return } @@ -1590,7 +1605,7 @@ func Getegid() (egid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (euid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETEUID, 0, 0, 0) euid = int(r0) return } @@ -1598,7 +1613,7 @@ func Geteuid() (euid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETGID, 0, 0, 0) gid = int(r0) return } @@ -1616,7 +1631,7 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETUID, 0, 0, 0) uid = int(r0) return } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 10322c94..61169b33 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -538,7 +538,7 @@ func Eventfd(initval uint, flags int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) + SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) return } @@ -674,7 +674,7 @@ func Getpgid(pid int) (pgid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) pid = int(r0) return } @@ -682,7 +682,7 @@ func Getpid() (pid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) ppid = int(r0) return } @@ -739,7 +739,7 @@ func Getsid(pid int) (sid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettid() (tid int) { - r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) tid = int(r0) return } @@ -1238,8 +1238,23 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() { - Syscall(SYS_SYNC, 0, 0, 0) + SyscallNoError(SYS_SYNC, 0, 0, 0) return } @@ -1298,7 +1313,7 @@ func Times(tms *Tms) (ticks uintptr, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(mask int) (oldmask int) { - r0, _, _ := RawSyscall(SYS_UMASK, uintptr(mask), 0, 0) + r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) oldmask = int(r0) return } @@ -1540,7 +1555,7 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETEGID, 0, 0, 0) egid = int(r0) return } @@ -1548,7 +1563,7 @@ func Getegid() (egid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (euid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETEUID, 0, 0, 0) euid = int(r0) return } @@ -1556,7 +1571,7 @@ func Geteuid() (euid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETGID, 0, 0, 0) gid = int(r0) return } @@ -1564,7 +1579,7 @@ func Getgid() (gid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETUID, 0, 0, 0) uid = int(r0) return } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index 723e90ef..4cb59b4a 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -538,7 +538,7 @@ func Eventfd(initval uint, flags int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) + SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) return } @@ -674,7 +674,7 @@ func Getpgid(pid int) (pgid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) pid = int(r0) return } @@ -682,7 +682,7 @@ func Getpid() (pid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) ppid = int(r0) return } @@ -739,7 +739,7 @@ func Getsid(pid int) (sid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettid() (tid int) { - r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) tid = int(r0) return } @@ -1238,8 +1238,23 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() { - Syscall(SYS_SYNC, 0, 0, 0) + SyscallNoError(SYS_SYNC, 0, 0, 0) return } @@ -1298,7 +1313,7 @@ func Times(tms *Tms) (ticks uintptr, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(mask int) (oldmask int) { - r0, _, _ := RawSyscall(SYS_UMASK, uintptr(mask), 0, 0) + r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) oldmask = int(r0) return } @@ -1582,7 +1597,7 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETEGID, 0, 0, 0) egid = int(r0) return } @@ -1590,7 +1605,7 @@ func Getegid() (egid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (euid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETEUID, 0, 0, 0) euid = int(r0) return } @@ -1598,7 +1613,7 @@ func Geteuid() (euid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETGID, 0, 0, 0) gid = int(r0) return } @@ -1616,7 +1631,7 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETUID, 0, 0, 0) uid = int(r0) return } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index d4c78d4b..0b547ae3 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -538,7 +538,7 @@ func Eventfd(initval uint, flags int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) + SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) return } @@ -674,7 +674,7 @@ func Getpgid(pid int) (pgid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) pid = int(r0) return } @@ -682,7 +682,7 @@ func Getpid() (pid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) ppid = int(r0) return } @@ -739,7 +739,7 @@ func Getsid(pid int) (sid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettid() (tid int) { - r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) tid = int(r0) return } @@ -1238,8 +1238,23 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() { - Syscall(SYS_SYNC, 0, 0, 0) + SyscallNoError(SYS_SYNC, 0, 0, 0) return } @@ -1298,7 +1313,7 @@ func Times(tms *Tms) (ticks uintptr, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(mask int) (oldmask int) { - r0, _, _ := RawSyscall(SYS_UMASK, uintptr(mask), 0, 0) + r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) oldmask = int(r0) return } @@ -1582,7 +1597,7 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETEGID, 0, 0, 0) egid = int(r0) return } @@ -1590,7 +1605,7 @@ func Getegid() (egid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (euid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETEUID, 0, 0, 0) euid = int(r0) return } @@ -1598,7 +1613,7 @@ func Geteuid() (euid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETGID, 0, 0, 0) gid = int(r0) return } @@ -1616,7 +1631,7 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETUID, 0, 0, 0) uid = int(r0) return } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index bd2f491c..cd94d3a8 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -538,7 +538,7 @@ func Eventfd(initval uint, flags int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) + SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) return } @@ -674,7 +674,7 @@ func Getpgid(pid int) (pgid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) pid = int(r0) return } @@ -682,7 +682,7 @@ func Getpid() (pid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) ppid = int(r0) return } @@ -739,7 +739,7 @@ func Getsid(pid int) (sid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettid() (tid int) { - r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) tid = int(r0) return } @@ -1238,8 +1238,23 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() { - Syscall(SYS_SYNC, 0, 0, 0) + SyscallNoError(SYS_SYNC, 0, 0, 0) return } @@ -1298,7 +1313,7 @@ func Times(tms *Tms) (ticks uintptr, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(mask int) (oldmask int) { - r0, _, _ := RawSyscall(SYS_UMASK, uintptr(mask), 0, 0) + r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) oldmask = int(r0) return } @@ -1540,7 +1555,7 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETEGID, 0, 0, 0) egid = int(r0) return } @@ -1548,7 +1563,7 @@ func Getegid() (egid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (euid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETEUID, 0, 0, 0) euid = int(r0) return } @@ -1556,7 +1571,7 @@ func Geteuid() (euid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETGID, 0, 0, 0) gid = int(r0) return } @@ -1564,7 +1579,7 @@ func Getgid() (gid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETUID, 0, 0, 0) uid = int(r0) return } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index 5c46e647..cdad555a 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -538,7 +538,7 @@ func Eventfd(initval uint, flags int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) + SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) return } @@ -674,7 +674,7 @@ func Getpgid(pid int) (pgid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) pid = int(r0) return } @@ -682,7 +682,7 @@ func Getpid() (pid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) ppid = int(r0) return } @@ -739,7 +739,7 @@ func Getsid(pid int) (sid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettid() (tid int) { - r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) tid = int(r0) return } @@ -1238,8 +1238,23 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() { - Syscall(SYS_SYNC, 0, 0, 0) + SyscallNoError(SYS_SYNC, 0, 0, 0) return } @@ -1298,7 +1313,7 @@ func Times(tms *Tms) (ticks uintptr, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(mask int) (oldmask int) { - r0, _, _ := RawSyscall(SYS_UMASK, uintptr(mask), 0, 0) + r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) oldmask = int(r0) return } @@ -1592,7 +1607,7 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETEGID, 0, 0, 0) egid = int(r0) return } @@ -1600,7 +1615,7 @@ func Getegid() (egid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (euid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETEUID, 0, 0, 0) euid = int(r0) return } @@ -1608,7 +1623,7 @@ func Geteuid() (euid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETGID, 0, 0, 0) gid = int(r0) return } @@ -1626,7 +1641,7 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETUID, 0, 0, 0) uid = int(r0) return } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 9ef023d4..38f4e44b 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -538,7 +538,7 @@ func Eventfd(initval uint, flags int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) + SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) return } @@ -674,7 +674,7 @@ func Getpgid(pid int) (pgid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) pid = int(r0) return } @@ -682,7 +682,7 @@ func Getpid() (pid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) ppid = int(r0) return } @@ -739,7 +739,7 @@ func Getsid(pid int) (sid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettid() (tid int) { - r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) tid = int(r0) return } @@ -1238,8 +1238,23 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() { - Syscall(SYS_SYNC, 0, 0, 0) + SyscallNoError(SYS_SYNC, 0, 0, 0) return } @@ -1298,7 +1313,7 @@ func Times(tms *Tms) (ticks uintptr, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(mask int) (oldmask int) { - r0, _, _ := RawSyscall(SYS_UMASK, uintptr(mask), 0, 0) + r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) oldmask = int(r0) return } @@ -1592,7 +1607,7 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETEGID, 0, 0, 0) egid = int(r0) return } @@ -1600,7 +1615,7 @@ func Getegid() (egid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (euid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETEUID, 0, 0, 0) euid = int(r0) return } @@ -1608,7 +1623,7 @@ func Geteuid() (euid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETGID, 0, 0, 0) gid = int(r0) return } @@ -1626,7 +1641,7 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETUID, 0, 0, 0) uid = int(r0) return } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 21e7d019..c443baf6 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -538,7 +538,7 @@ func Eventfd(initval uint, flags int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) + SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) return } @@ -674,7 +674,7 @@ func Getpgid(pid int) (pgid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) pid = int(r0) return } @@ -682,7 +682,7 @@ func Getpid() (pid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) ppid = int(r0) return } @@ -739,7 +739,7 @@ func Getsid(pid int) (sid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettid() (tid int) { - r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) tid = int(r0) return } @@ -1238,8 +1238,23 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() { - Syscall(SYS_SYNC, 0, 0, 0) + SyscallNoError(SYS_SYNC, 0, 0, 0) return } @@ -1298,7 +1313,7 @@ func Times(tms *Tms) (ticks uintptr, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(mask int) (oldmask int) { - r0, _, _ := RawSyscall(SYS_UMASK, uintptr(mask), 0, 0) + r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) oldmask = int(r0) return } @@ -1602,7 +1617,7 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETEGID, 0, 0, 0) egid = int(r0) return } @@ -1610,7 +1625,7 @@ func Getegid() (egid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (euid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETEUID, 0, 0, 0) euid = int(r0) return } @@ -1618,7 +1633,7 @@ func Geteuid() (euid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETGID, 0, 0, 0) gid = int(r0) return } @@ -1636,7 +1651,7 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETUID, 0, 0, 0) uid = int(r0) return } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 04a1ace9..62eadff1 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -406,6 +406,23 @@ func getdents(fd int, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index 079824a7..307f4e99 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -406,6 +406,23 @@ func getdents(fd int, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 05f8b496..61109313 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -406,6 +406,23 @@ func getdents(fd int, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 3b55544d..003f820e 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -404,6 +404,23 @@ func getdents(fd int, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index cdaf4ef4..ba0e8f32 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -404,6 +404,23 @@ func getdents(fd int, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 6c4dc8a9..2ce02c7c 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -404,6 +404,23 @@ func getdents(fd int, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 1d452764..f5d01b3a 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -95,6 +95,7 @@ import ( //go:cgo_import_dynamic libc_renameat renameat "libc.so" //go:cgo_import_dynamic libc_rmdir rmdir "libc.so" //go:cgo_import_dynamic libc_lseek lseek "libc.so" +//go:cgo_import_dynamic libc_select select "libc.so" //go:cgo_import_dynamic libc_setegid setegid "libc.so" //go:cgo_import_dynamic libc_seteuid seteuid "libc.so" //go:cgo_import_dynamic libc_setgid setgid "libc.so" @@ -220,6 +221,7 @@ import ( //go:linkname procRenameat libc_renameat //go:linkname procRmdir libc_rmdir //go:linkname proclseek libc_lseek +//go:linkname procSelect libc_select //go:linkname procSetegid libc_setegid //go:linkname procSeteuid libc_seteuid //go:linkname procSetgid libc_setgid @@ -346,6 +348,7 @@ var ( procRenameat, procRmdir, proclseek, + procSelect, procSetegid, procSeteuid, procSetgid, @@ -1264,6 +1267,14 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return } +func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSelect)), 5, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + if e1 != 0 { + err = e1 + } + return +} + func Setegid(egid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetegid)), 1, uintptr(egid), 0, 0, 0, 0, 0) if e1 != 0 { diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsysnum_solaris_amd64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsysnum_solaris_amd64.go deleted file mode 100644 index c7086598..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/zsysnum_solaris_amd64.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build amd64,solaris - -package unix - -// TODO(aram): remove these before Go 1.3. -const ( - SYS_EXECVE = 59 - SYS_FCNTL = 62 -) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go index 1ca0e3ee..e3b8ebb0 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go @@ -143,6 +143,10 @@ type Fsid struct { Val [2]int32 } +const ( + PathMax = 0x400 +) + type RawSockaddrInet4 struct { Len uint8 Family uint8 @@ -472,3 +476,11 @@ const ( POLLWRBAND = 0x100 POLLWRNORM = 0x4 ) + +type Utsname struct { + Sysname [32]byte + Nodename [32]byte + Release [32]byte + Version [32]byte + Machine [32]byte +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index f11c787d..878a21ad 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -140,6 +140,10 @@ type Fsid struct { Val [2]int32 } +const ( + PathMax = 0x400 +) + const ( FADV_NORMAL = 0x0 FADV_RANDOM = 0x1 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 65809472..8408af12 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -140,6 +140,10 @@ type Fsid struct { Val [2]int32 } +const ( + PathMax = 0x400 +) + const ( FADV_NORMAL = 0x0 FADV_RANDOM = 0x1 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index 5a73ab30..4b2d9a48 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -142,6 +142,10 @@ type Fsid struct { Val [2]int32 } +const ( + PathMax = 0x400 +) + const ( FADV_NORMAL = 0x0 FADV_RANDOM = 0x1 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 295a9bd3..7aa206e3 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -52,7 +52,7 @@ type Timex struct { Errcnt int32 Stbcnt int32 Tai int32 - Pad_cgo_0 [44]byte + _ [44]byte } type Time_t int32 @@ -98,7 +98,7 @@ type _Gid_t uint32 type Stat_t struct { Dev uint64 X__pad1 uint16 - Pad_cgo_0 [2]byte + _ [2]byte X__st_ino uint32 Mode uint32 Nlink uint32 @@ -106,7 +106,7 @@ type Stat_t struct { Gid uint32 Rdev uint64 X__pad2 uint16 - Pad_cgo_1 [2]byte + _ [2]byte Size int64 Blksize int32 Blocks int64 @@ -131,13 +131,43 @@ type Statfs_t struct { Spare [4]int32 } +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + X__reserved int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 +} + type Dirent struct { - Ino uint64 - Off int64 - Reclen uint16 - Type uint8 - Name [256]int8 - Pad_cgo_0 [1]byte + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]int8 + _ [1]byte } type Fsid struct { @@ -224,11 +254,20 @@ type RawSockaddrHCI struct { Channel uint16 } +type RawSockaddrL2 struct { + Family uint16 + Psm uint16 + Bdaddr [6]uint8 + Cid uint16 + Bdaddr_type uint8 + _ [1]byte +} + type RawSockaddrCAN struct { - Family uint16 - Pad_cgo_0 [2]byte - Ifindex int32 - Addr [8]byte + Family uint16 + _ [2]byte + Ifindex int32 + Addr [8]byte } type RawSockaddrALG struct { @@ -341,7 +380,7 @@ type TCPInfo struct { Probes uint8 Backoff uint8 Options uint8 - Pad_cgo_0 [2]byte + _ [2]byte Rto uint32 Ato uint32 Snd_mss uint32 @@ -376,6 +415,7 @@ const ( SizeofSockaddrLinklayer = 0x14 SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 + SizeofSockaddrL2 = 0xe SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 @@ -565,9 +605,9 @@ type SockFilter struct { } type SockFprog struct { - Len uint16 - Pad_cgo_0 [2]byte - Filter *SockFilter + Len uint16 + _ [2]byte + Filter *SockFilter } type InotifyEvent struct { @@ -643,9 +683,15 @@ type EpollEvent struct { } const ( - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STATX_SYNC_AS_STAT = 0x0 + AT_STATX_FORCE_SYNC = 0x2000 + AT_STATX_DONT_SYNC = 0x4000 + AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 ) @@ -694,11 +740,11 @@ type Winsize struct { type Taskstats struct { Version uint16 - Pad_cgo_0 [2]byte + _ [2]byte Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - Pad_cgo_1 [6]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -710,13 +756,13 @@ type Taskstats struct { Ac_comm [32]int8 Ac_sched uint8 Ac_pad [3]uint8 - Pad_cgo_2 [4]byte + _ [4]byte Ac_uid uint32 Ac_gid uint32 Ac_pid uint32 Ac_ppid uint32 Ac_btime uint32 - Pad_cgo_3 [4]byte + _ [4]byte Ac_etime uint64 Ac_utime uint64 Ac_stime uint64 @@ -760,6 +806,24 @@ const ( TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 ) +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + type Genlmsghdr struct { Cmd uint8 Version uint8 @@ -792,3 +856,16 @@ const ( CTRL_ATTR_MCAST_GRP_NAME = 0x1 CTRL_ATTR_MCAST_GRP_ID = 0x2 ) + +type cpuMask uint32 + +const ( + _CPU_SETSIZE = 0x400 + _NCPUBITS = 0x20 +) + +const ( + BDADDR_BREDR = 0x0 + BDADDR_LE_PUBLIC = 0x1 + BDADDR_LE_RANDOM = 0x2 +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index cdd4a1dc..abb3d89a 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -33,13 +33,13 @@ type Timeval struct { type Timex struct { Modes uint32 - Pad_cgo_0 [4]byte + _ [4]byte Offset int64 Freq int64 Maxerror int64 Esterror int64 Status int32 - Pad_cgo_1 [4]byte + _ [4]byte Constant int64 Precision int64 Tolerance int64 @@ -48,14 +48,14 @@ type Timex struct { Ppsfreq int64 Jitter int64 Shift int32 - Pad_cgo_2 [4]byte + _ [4]byte Stabil int64 Jitcnt int64 Calcnt int64 Errcnt int64 Stbcnt int64 Tai int32 - Pad_cgo_3 [44]byte + _ [44]byte } type Time_t int64 @@ -131,13 +131,43 @@ type Statfs_t struct { Spare [4]int64 } +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + X__reserved int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 +} + type Dirent struct { - Ino uint64 - Off int64 - Reclen uint16 - Type uint8 - Name [256]int8 - Pad_cgo_0 [5]byte + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]int8 + _ [5]byte } type Fsid struct { @@ -145,13 +175,13 @@ type Fsid struct { } type Flock_t struct { - Type int16 - Whence int16 - Pad_cgo_0 [4]byte - Start int64 - Len int64 - Pid int32 - Pad_cgo_1 [4]byte + Type int16 + Whence int16 + _ [4]byte + Start int64 + Len int64 + Pid int32 + _ [4]byte } type FscryptPolicy struct { @@ -226,11 +256,20 @@ type RawSockaddrHCI struct { Channel uint16 } +type RawSockaddrL2 struct { + Family uint16 + Psm uint16 + Bdaddr [6]uint8 + Cid uint16 + Bdaddr_type uint8 + _ [1]byte +} + type RawSockaddrCAN struct { - Family uint16 - Pad_cgo_0 [2]byte - Ifindex int32 - Addr [8]byte + Family uint16 + _ [2]byte + Ifindex int32 + Addr [8]byte } type RawSockaddrALG struct { @@ -297,13 +336,13 @@ type PacketMreq struct { type Msghdr struct { Name *byte Namelen uint32 - Pad_cgo_0 [4]byte + _ [4]byte Iov *Iovec Iovlen uint64 Control *byte Controllen uint64 Flags int32 - Pad_cgo_1 [4]byte + _ [4]byte } type Cmsghdr struct { @@ -345,7 +384,7 @@ type TCPInfo struct { Probes uint8 Backoff uint8 Options uint8 - Pad_cgo_0 [2]byte + _ [2]byte Rto uint32 Ato uint32 Snd_mss uint32 @@ -380,6 +419,7 @@ const ( SizeofSockaddrLinklayer = 0x14 SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 + SizeofSockaddrL2 = 0xe SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 @@ -569,9 +609,9 @@ type SockFilter struct { } type SockFprog struct { - Len uint16 - Pad_cgo_0 [6]byte - Filter *SockFilter + Len uint16 + _ [6]byte + Filter *SockFilter } type InotifyEvent struct { @@ -628,12 +668,12 @@ type Sysinfo_t struct { Freeswap uint64 Procs uint16 Pad uint16 - Pad_cgo_0 [4]byte + _ [4]byte Totalhigh uint64 Freehigh uint64 Unit uint32 X_f [0]int8 - Pad_cgo_1 [4]byte + _ [4]byte } type Utsname struct { @@ -646,12 +686,12 @@ type Utsname struct { } type Ustat_t struct { - Tfree int32 - Pad_cgo_0 [4]byte - Tinode uint64 - Fname [6]int8 - Fpack [6]int8 - Pad_cgo_1 [4]byte + Tfree int32 + _ [4]byte + Tinode uint64 + Fname [6]int8 + Fpack [6]int8 + _ [4]byte } type EpollEvent struct { @@ -661,9 +701,15 @@ type EpollEvent struct { } const ( - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STATX_SYNC_AS_STAT = 0x0 + AT_STATX_FORCE_SYNC = 0x2000 + AT_STATX_DONT_SYNC = 0x4000 + AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 ) @@ -712,11 +758,11 @@ type Winsize struct { type Taskstats struct { Version uint16 - Pad_cgo_0 [2]byte + _ [2]byte Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - Pad_cgo_1 [6]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -728,13 +774,13 @@ type Taskstats struct { Ac_comm [32]int8 Ac_sched uint8 Ac_pad [3]uint8 - Pad_cgo_2 [4]byte + _ [4]byte Ac_uid uint32 Ac_gid uint32 Ac_pid uint32 Ac_ppid uint32 Ac_btime uint32 - Pad_cgo_3 [4]byte + _ [4]byte Ac_etime uint64 Ac_utime uint64 Ac_stime uint64 @@ -778,6 +824,24 @@ const ( TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 ) +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + type Genlmsghdr struct { Cmd uint8 Version uint8 @@ -810,3 +874,16 @@ const ( CTRL_ATTR_MCAST_GRP_NAME = 0x1 CTRL_ATTR_MCAST_GRP_ID = 0x2 ) + +type cpuMask uint64 + +const ( + _CPU_SETSIZE = 0x400 + _NCPUBITS = 0x40 +) + +const ( + BDADDR_BREDR = 0x0 + BDADDR_LE_PUBLIC = 0x1 + BDADDR_LE_RANDOM = 0x2 +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 086620ac..11654174 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -52,7 +52,7 @@ type Timex struct { Errcnt int32 Stbcnt int32 Tai int32 - Pad_cgo_0 [44]byte + _ [44]byte } type Time_t int32 @@ -98,7 +98,7 @@ type _Gid_t uint32 type Stat_t struct { Dev uint64 X__pad1 uint16 - Pad_cgo_0 [2]byte + _ [2]byte X__st_ino uint32 Mode uint32 Nlink uint32 @@ -106,10 +106,10 @@ type Stat_t struct { Gid uint32 Rdev uint64 X__pad2 uint16 - Pad_cgo_1 [6]byte + _ [6]byte Size int64 Blksize int32 - Pad_cgo_2 [4]byte + _ [4]byte Blocks int64 Atim Timespec Mtim Timespec @@ -118,28 +118,58 @@ type Stat_t struct { } type Statfs_t struct { - Type int32 - Bsize int32 - Blocks uint64 - Bfree uint64 - Bavail uint64 - Files uint64 - Ffree uint64 - Fsid Fsid - Namelen int32 - Frsize int32 - Flags int32 - Spare [4]int32 - Pad_cgo_0 [4]byte + Type int32 + Bsize int32 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Namelen int32 + Frsize int32 + Flags int32 + Spare [4]int32 + _ [4]byte +} + +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + X__reserved int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 } type Dirent struct { - Ino uint64 - Off int64 - Reclen uint16 - Type uint8 - Name [256]uint8 - Pad_cgo_0 [5]byte + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]uint8 + _ [5]byte } type Fsid struct { @@ -147,13 +177,13 @@ type Fsid struct { } type Flock_t struct { - Type int16 - Whence int16 - Pad_cgo_0 [4]byte - Start int64 - Len int64 - Pid int32 - Pad_cgo_1 [4]byte + Type int16 + Whence int16 + _ [4]byte + Start int64 + Len int64 + Pid int32 + _ [4]byte } type FscryptPolicy struct { @@ -228,11 +258,20 @@ type RawSockaddrHCI struct { Channel uint16 } +type RawSockaddrL2 struct { + Family uint16 + Psm uint16 + Bdaddr [6]uint8 + Cid uint16 + Bdaddr_type uint8 + _ [1]byte +} + type RawSockaddrCAN struct { - Family uint16 - Pad_cgo_0 [2]byte - Ifindex int32 - Addr [8]byte + Family uint16 + _ [2]byte + Ifindex int32 + Addr [8]byte } type RawSockaddrALG struct { @@ -345,7 +384,7 @@ type TCPInfo struct { Probes uint8 Backoff uint8 Options uint8 - Pad_cgo_0 [2]byte + _ [2]byte Rto uint32 Ato uint32 Snd_mss uint32 @@ -380,6 +419,7 @@ const ( SizeofSockaddrLinklayer = 0x14 SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 + SizeofSockaddrL2 = 0xe SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 @@ -569,9 +609,9 @@ type SockFilter struct { } type SockFprog struct { - Len uint16 - Pad_cgo_0 [2]byte - Filter *SockFilter + Len uint16 + _ [2]byte + Filter *SockFilter } type InotifyEvent struct { @@ -632,9 +672,15 @@ type EpollEvent struct { } const ( - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STATX_SYNC_AS_STAT = 0x0 + AT_STATX_FORCE_SYNC = 0x2000 + AT_STATX_DONT_SYNC = 0x4000 + AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 ) @@ -683,11 +729,11 @@ type Winsize struct { type Taskstats struct { Version uint16 - Pad_cgo_0 [2]byte + _ [2]byte Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - Pad_cgo_1 [6]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -699,13 +745,13 @@ type Taskstats struct { Ac_comm [32]uint8 Ac_sched uint8 Ac_pad [3]uint8 - Pad_cgo_2 [4]byte + _ [4]byte Ac_uid uint32 Ac_gid uint32 Ac_pid uint32 Ac_ppid uint32 Ac_btime uint32 - Pad_cgo_3 [4]byte + _ [4]byte Ac_etime uint64 Ac_utime uint64 Ac_stime uint64 @@ -749,6 +795,24 @@ const ( TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 ) +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + type Genlmsghdr struct { Cmd uint8 Version uint8 @@ -781,3 +845,16 @@ const ( CTRL_ATTR_MCAST_GRP_NAME = 0x1 CTRL_ATTR_MCAST_GRP_ID = 0x2 ) + +type cpuMask uint32 + +const ( + _CPU_SETSIZE = 0x400 + _NCPUBITS = 0x20 +) + +const ( + BDADDR_BREDR = 0x0 + BDADDR_LE_PUBLIC = 0x1 + BDADDR_LE_RANDOM = 0x2 +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index c53db98d..0d0de46f 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -33,13 +33,13 @@ type Timeval struct { type Timex struct { Modes uint32 - Pad_cgo_0 [4]byte + _ [4]byte Offset int64 Freq int64 Maxerror int64 Esterror int64 Status int32 - Pad_cgo_1 [4]byte + _ [4]byte Constant int64 Precision int64 Tolerance int64 @@ -48,14 +48,14 @@ type Timex struct { Ppsfreq int64 Jitter int64 Shift int32 - Pad_cgo_2 [4]byte + _ [4]byte Stabil int64 Jitcnt int64 Calcnt int64 Errcnt int64 Stbcnt int64 Tai int32 - Pad_cgo_3 [44]byte + _ [44]byte } type Time_t int64 @@ -132,13 +132,43 @@ type Statfs_t struct { Spare [4]int64 } +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + X__reserved int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 +} + type Dirent struct { - Ino uint64 - Off int64 - Reclen uint16 - Type uint8 - Name [256]int8 - Pad_cgo_0 [5]byte + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]int8 + _ [5]byte } type Fsid struct { @@ -146,13 +176,13 @@ type Fsid struct { } type Flock_t struct { - Type int16 - Whence int16 - Pad_cgo_0 [4]byte - Start int64 - Len int64 - Pid int32 - Pad_cgo_1 [4]byte + Type int16 + Whence int16 + _ [4]byte + Start int64 + Len int64 + Pid int32 + _ [4]byte } type FscryptPolicy struct { @@ -227,11 +257,20 @@ type RawSockaddrHCI struct { Channel uint16 } +type RawSockaddrL2 struct { + Family uint16 + Psm uint16 + Bdaddr [6]uint8 + Cid uint16 + Bdaddr_type uint8 + _ [1]byte +} + type RawSockaddrCAN struct { - Family uint16 - Pad_cgo_0 [2]byte - Ifindex int32 - Addr [8]byte + Family uint16 + _ [2]byte + Ifindex int32 + Addr [8]byte } type RawSockaddrALG struct { @@ -298,13 +337,13 @@ type PacketMreq struct { type Msghdr struct { Name *byte Namelen uint32 - Pad_cgo_0 [4]byte + _ [4]byte Iov *Iovec Iovlen uint64 Control *byte Controllen uint64 Flags int32 - Pad_cgo_1 [4]byte + _ [4]byte } type Cmsghdr struct { @@ -346,7 +385,7 @@ type TCPInfo struct { Probes uint8 Backoff uint8 Options uint8 - Pad_cgo_0 [2]byte + _ [2]byte Rto uint32 Ato uint32 Snd_mss uint32 @@ -381,6 +420,7 @@ const ( SizeofSockaddrLinklayer = 0x14 SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 + SizeofSockaddrL2 = 0xe SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 @@ -570,9 +610,9 @@ type SockFilter struct { } type SockFprog struct { - Len uint16 - Pad_cgo_0 [6]byte - Filter *SockFilter + Len uint16 + _ [6]byte + Filter *SockFilter } type InotifyEvent struct { @@ -606,12 +646,12 @@ type Sysinfo_t struct { Freeswap uint64 Procs uint16 Pad uint16 - Pad_cgo_0 [4]byte + _ [4]byte Totalhigh uint64 Freehigh uint64 Unit uint32 X_f [0]int8 - Pad_cgo_1 [4]byte + _ [4]byte } type Utsname struct { @@ -624,12 +664,12 @@ type Utsname struct { } type Ustat_t struct { - Tfree int32 - Pad_cgo_0 [4]byte - Tinode uint64 - Fname [6]int8 - Fpack [6]int8 - Pad_cgo_1 [4]byte + Tfree int32 + _ [4]byte + Tinode uint64 + Fname [6]int8 + Fpack [6]int8 + _ [4]byte } type EpollEvent struct { @@ -640,9 +680,15 @@ type EpollEvent struct { } const ( - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STATX_SYNC_AS_STAT = 0x0 + AT_STATX_FORCE_SYNC = 0x2000 + AT_STATX_DONT_SYNC = 0x4000 + AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 ) @@ -691,11 +737,11 @@ type Winsize struct { type Taskstats struct { Version uint16 - Pad_cgo_0 [2]byte + _ [2]byte Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - Pad_cgo_1 [6]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -707,13 +753,13 @@ type Taskstats struct { Ac_comm [32]int8 Ac_sched uint8 Ac_pad [3]uint8 - Pad_cgo_2 [4]byte + _ [4]byte Ac_uid uint32 Ac_gid uint32 Ac_pid uint32 Ac_ppid uint32 Ac_btime uint32 - Pad_cgo_3 [4]byte + _ [4]byte Ac_etime uint64 Ac_utime uint64 Ac_stime uint64 @@ -757,6 +803,24 @@ const ( TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 ) +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + type Genlmsghdr struct { Cmd uint8 Version uint8 @@ -789,3 +853,16 @@ const ( CTRL_ATTR_MCAST_GRP_NAME = 0x1 CTRL_ATTR_MCAST_GRP_ID = 0x2 ) + +type cpuMask uint64 + +const ( + _CPU_SETSIZE = 0x400 + _NCPUBITS = 0x40 +) + +const ( + BDADDR_BREDR = 0x0 + BDADDR_LE_PUBLIC = 0x1 + BDADDR_LE_RANDOM = 0x2 +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 69e529b2..a9087c52 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -52,7 +52,7 @@ type Timex struct { Errcnt int32 Stbcnt int32 Tai int32 - Pad_cgo_0 [44]byte + _ [44]byte } type Time_t int32 @@ -116,29 +116,59 @@ type Stat_t struct { } type Statfs_t struct { - Type int32 - Bsize int32 - Frsize int32 - Pad_cgo_0 [4]byte - Blocks uint64 - Bfree uint64 - Files uint64 - Ffree uint64 - Bavail uint64 - Fsid Fsid - Namelen int32 - Flags int32 - Spare [5]int32 - Pad_cgo_1 [4]byte + Type int32 + Bsize int32 + Frsize int32 + _ [4]byte + Blocks uint64 + Bfree uint64 + Files uint64 + Ffree uint64 + Bavail uint64 + Fsid Fsid + Namelen int32 + Flags int32 + Spare [5]int32 + _ [4]byte +} + +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + X__reserved int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 } type Dirent struct { - Ino uint64 - Off int64 - Reclen uint16 - Type uint8 - Name [256]int8 - Pad_cgo_0 [5]byte + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]int8 + _ [5]byte } type Fsid struct { @@ -146,13 +176,13 @@ type Fsid struct { } type Flock_t struct { - Type int16 - Whence int16 - Pad_cgo_0 [4]byte - Start int64 - Len int64 - Pid int32 - Pad_cgo_1 [4]byte + Type int16 + Whence int16 + _ [4]byte + Start int64 + Len int64 + Pid int32 + _ [4]byte } type FscryptPolicy struct { @@ -227,11 +257,20 @@ type RawSockaddrHCI struct { Channel uint16 } +type RawSockaddrL2 struct { + Family uint16 + Psm uint16 + Bdaddr [6]uint8 + Cid uint16 + Bdaddr_type uint8 + _ [1]byte +} + type RawSockaddrCAN struct { - Family uint16 - Pad_cgo_0 [2]byte - Ifindex int32 - Addr [8]byte + Family uint16 + _ [2]byte + Ifindex int32 + Addr [8]byte } type RawSockaddrALG struct { @@ -344,7 +383,7 @@ type TCPInfo struct { Probes uint8 Backoff uint8 Options uint8 - Pad_cgo_0 [2]byte + _ [2]byte Rto uint32 Ato uint32 Snd_mss uint32 @@ -379,6 +418,7 @@ const ( SizeofSockaddrLinklayer = 0x14 SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 + SizeofSockaddrL2 = 0xe SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 @@ -568,9 +608,9 @@ type SockFilter struct { } type SockFprog struct { - Len uint16 - Pad_cgo_0 [2]byte - Filter *SockFilter + Len uint16 + _ [2]byte + Filter *SockFilter } type InotifyEvent struct { @@ -637,9 +677,15 @@ type EpollEvent struct { } const ( - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STATX_SYNC_AS_STAT = 0x0 + AT_STATX_FORCE_SYNC = 0x2000 + AT_STATX_DONT_SYNC = 0x4000 + AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 ) @@ -688,11 +734,11 @@ type Winsize struct { type Taskstats struct { Version uint16 - Pad_cgo_0 [2]byte + _ [2]byte Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - Pad_cgo_1 [6]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -704,13 +750,13 @@ type Taskstats struct { Ac_comm [32]int8 Ac_sched uint8 Ac_pad [3]uint8 - Pad_cgo_2 [4]byte + _ [4]byte Ac_uid uint32 Ac_gid uint32 Ac_pid uint32 Ac_ppid uint32 Ac_btime uint32 - Pad_cgo_3 [4]byte + _ [4]byte Ac_etime uint64 Ac_utime uint64 Ac_stime uint64 @@ -754,6 +800,24 @@ const ( TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 ) +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + type Genlmsghdr struct { Cmd uint8 Version uint8 @@ -786,3 +850,16 @@ const ( CTRL_ATTR_MCAST_GRP_NAME = 0x1 CTRL_ATTR_MCAST_GRP_ID = 0x2 ) + +type cpuMask uint32 + +const ( + _CPU_SETSIZE = 0x400 + _NCPUBITS = 0x20 +) + +const ( + BDADDR_BREDR = 0x0 + BDADDR_LE_PUBLIC = 0x1 + BDADDR_LE_RANDOM = 0x2 +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 4460279a..01e8f65c 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -33,13 +33,13 @@ type Timeval struct { type Timex struct { Modes uint32 - Pad_cgo_0 [4]byte + _ [4]byte Offset int64 Freq int64 Maxerror int64 Esterror int64 Status int32 - Pad_cgo_1 [4]byte + _ [4]byte Constant int64 Precision int64 Tolerance int64 @@ -48,14 +48,14 @@ type Timex struct { Ppsfreq int64 Jitter int64 Shift int32 - Pad_cgo_2 [4]byte + _ [4]byte Stabil int64 Jitcnt int64 Calcnt int64 Errcnt int64 Stbcnt int64 Tai int32 - Pad_cgo_3 [44]byte + _ [44]byte } type Time_t int64 @@ -132,13 +132,43 @@ type Statfs_t struct { Spare [5]int64 } +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + X__reserved int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 +} + type Dirent struct { - Ino uint64 - Off int64 - Reclen uint16 - Type uint8 - Name [256]int8 - Pad_cgo_0 [5]byte + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]int8 + _ [5]byte } type Fsid struct { @@ -146,13 +176,13 @@ type Fsid struct { } type Flock_t struct { - Type int16 - Whence int16 - Pad_cgo_0 [4]byte - Start int64 - Len int64 - Pid int32 - Pad_cgo_1 [4]byte + Type int16 + Whence int16 + _ [4]byte + Start int64 + Len int64 + Pid int32 + _ [4]byte } type FscryptPolicy struct { @@ -227,11 +257,20 @@ type RawSockaddrHCI struct { Channel uint16 } +type RawSockaddrL2 struct { + Family uint16 + Psm uint16 + Bdaddr [6]uint8 + Cid uint16 + Bdaddr_type uint8 + _ [1]byte +} + type RawSockaddrCAN struct { - Family uint16 - Pad_cgo_0 [2]byte - Ifindex int32 - Addr [8]byte + Family uint16 + _ [2]byte + Ifindex int32 + Addr [8]byte } type RawSockaddrALG struct { @@ -298,13 +337,13 @@ type PacketMreq struct { type Msghdr struct { Name *byte Namelen uint32 - Pad_cgo_0 [4]byte + _ [4]byte Iov *Iovec Iovlen uint64 Control *byte Controllen uint64 Flags int32 - Pad_cgo_1 [4]byte + _ [4]byte } type Cmsghdr struct { @@ -346,7 +385,7 @@ type TCPInfo struct { Probes uint8 Backoff uint8 Options uint8 - Pad_cgo_0 [2]byte + _ [2]byte Rto uint32 Ato uint32 Snd_mss uint32 @@ -381,6 +420,7 @@ const ( SizeofSockaddrLinklayer = 0x14 SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 + SizeofSockaddrL2 = 0xe SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 @@ -570,9 +610,9 @@ type SockFilter struct { } type SockFprog struct { - Len uint16 - Pad_cgo_0 [6]byte - Filter *SockFilter + Len uint16 + _ [6]byte + Filter *SockFilter } type InotifyEvent struct { @@ -609,12 +649,12 @@ type Sysinfo_t struct { Freeswap uint64 Procs uint16 Pad uint16 - Pad_cgo_0 [4]byte + _ [4]byte Totalhigh uint64 Freehigh uint64 Unit uint32 X_f [0]int8 - Pad_cgo_1 [4]byte + _ [4]byte } type Utsname struct { @@ -627,12 +667,12 @@ type Utsname struct { } type Ustat_t struct { - Tfree int32 - Pad_cgo_0 [4]byte - Tinode uint64 - Fname [6]int8 - Fpack [6]int8 - Pad_cgo_1 [4]byte + Tfree int32 + _ [4]byte + Tinode uint64 + Fname [6]int8 + Fpack [6]int8 + _ [4]byte } type EpollEvent struct { @@ -642,9 +682,15 @@ type EpollEvent struct { } const ( - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STATX_SYNC_AS_STAT = 0x0 + AT_STATX_FORCE_SYNC = 0x2000 + AT_STATX_DONT_SYNC = 0x4000 + AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 ) @@ -693,11 +739,11 @@ type Winsize struct { type Taskstats struct { Version uint16 - Pad_cgo_0 [2]byte + _ [2]byte Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - Pad_cgo_1 [6]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -709,13 +755,13 @@ type Taskstats struct { Ac_comm [32]int8 Ac_sched uint8 Ac_pad [3]uint8 - Pad_cgo_2 [4]byte + _ [4]byte Ac_uid uint32 Ac_gid uint32 Ac_pid uint32 Ac_ppid uint32 Ac_btime uint32 - Pad_cgo_3 [4]byte + _ [4]byte Ac_etime uint64 Ac_utime uint64 Ac_stime uint64 @@ -759,6 +805,24 @@ const ( TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 ) +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + type Genlmsghdr struct { Cmd uint8 Version uint8 @@ -791,3 +855,16 @@ const ( CTRL_ATTR_MCAST_GRP_NAME = 0x1 CTRL_ATTR_MCAST_GRP_ID = 0x2 ) + +type cpuMask uint64 + +const ( + _CPU_SETSIZE = 0x400 + _NCPUBITS = 0x40 +) + +const ( + BDADDR_BREDR = 0x0 + BDADDR_LE_PUBLIC = 0x1 + BDADDR_LE_RANDOM = 0x2 +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 98923811..6f9452d8 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -33,13 +33,13 @@ type Timeval struct { type Timex struct { Modes uint32 - Pad_cgo_0 [4]byte + _ [4]byte Offset int64 Freq int64 Maxerror int64 Esterror int64 Status int32 - Pad_cgo_1 [4]byte + _ [4]byte Constant int64 Precision int64 Tolerance int64 @@ -48,14 +48,14 @@ type Timex struct { Ppsfreq int64 Jitter int64 Shift int32 - Pad_cgo_2 [4]byte + _ [4]byte Stabil int64 Jitcnt int64 Calcnt int64 Errcnt int64 Stbcnt int64 Tai int32 - Pad_cgo_3 [44]byte + _ [44]byte } type Time_t int64 @@ -132,13 +132,43 @@ type Statfs_t struct { Spare [5]int64 } +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + X__reserved int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 +} + type Dirent struct { - Ino uint64 - Off int64 - Reclen uint16 - Type uint8 - Name [256]int8 - Pad_cgo_0 [5]byte + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]int8 + _ [5]byte } type Fsid struct { @@ -146,13 +176,13 @@ type Fsid struct { } type Flock_t struct { - Type int16 - Whence int16 - Pad_cgo_0 [4]byte - Start int64 - Len int64 - Pid int32 - Pad_cgo_1 [4]byte + Type int16 + Whence int16 + _ [4]byte + Start int64 + Len int64 + Pid int32 + _ [4]byte } type FscryptPolicy struct { @@ -227,11 +257,20 @@ type RawSockaddrHCI struct { Channel uint16 } +type RawSockaddrL2 struct { + Family uint16 + Psm uint16 + Bdaddr [6]uint8 + Cid uint16 + Bdaddr_type uint8 + _ [1]byte +} + type RawSockaddrCAN struct { - Family uint16 - Pad_cgo_0 [2]byte - Ifindex int32 - Addr [8]byte + Family uint16 + _ [2]byte + Ifindex int32 + Addr [8]byte } type RawSockaddrALG struct { @@ -298,13 +337,13 @@ type PacketMreq struct { type Msghdr struct { Name *byte Namelen uint32 - Pad_cgo_0 [4]byte + _ [4]byte Iov *Iovec Iovlen uint64 Control *byte Controllen uint64 Flags int32 - Pad_cgo_1 [4]byte + _ [4]byte } type Cmsghdr struct { @@ -346,7 +385,7 @@ type TCPInfo struct { Probes uint8 Backoff uint8 Options uint8 - Pad_cgo_0 [2]byte + _ [2]byte Rto uint32 Ato uint32 Snd_mss uint32 @@ -381,6 +420,7 @@ const ( SizeofSockaddrLinklayer = 0x14 SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 + SizeofSockaddrL2 = 0xe SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 @@ -570,9 +610,9 @@ type SockFilter struct { } type SockFprog struct { - Len uint16 - Pad_cgo_0 [6]byte - Filter *SockFilter + Len uint16 + _ [6]byte + Filter *SockFilter } type InotifyEvent struct { @@ -609,12 +649,12 @@ type Sysinfo_t struct { Freeswap uint64 Procs uint16 Pad uint16 - Pad_cgo_0 [4]byte + _ [4]byte Totalhigh uint64 Freehigh uint64 Unit uint32 X_f [0]int8 - Pad_cgo_1 [4]byte + _ [4]byte } type Utsname struct { @@ -627,12 +667,12 @@ type Utsname struct { } type Ustat_t struct { - Tfree int32 - Pad_cgo_0 [4]byte - Tinode uint64 - Fname [6]int8 - Fpack [6]int8 - Pad_cgo_1 [4]byte + Tfree int32 + _ [4]byte + Tinode uint64 + Fname [6]int8 + Fpack [6]int8 + _ [4]byte } type EpollEvent struct { @@ -642,9 +682,15 @@ type EpollEvent struct { } const ( - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STATX_SYNC_AS_STAT = 0x0 + AT_STATX_FORCE_SYNC = 0x2000 + AT_STATX_DONT_SYNC = 0x4000 + AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 ) @@ -693,11 +739,11 @@ type Winsize struct { type Taskstats struct { Version uint16 - Pad_cgo_0 [2]byte + _ [2]byte Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - Pad_cgo_1 [6]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -709,13 +755,13 @@ type Taskstats struct { Ac_comm [32]int8 Ac_sched uint8 Ac_pad [3]uint8 - Pad_cgo_2 [4]byte + _ [4]byte Ac_uid uint32 Ac_gid uint32 Ac_pid uint32 Ac_ppid uint32 Ac_btime uint32 - Pad_cgo_3 [4]byte + _ [4]byte Ac_etime uint64 Ac_utime uint64 Ac_stime uint64 @@ -759,6 +805,24 @@ const ( TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 ) +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + type Genlmsghdr struct { Cmd uint8 Version uint8 @@ -791,3 +855,16 @@ const ( CTRL_ATTR_MCAST_GRP_NAME = 0x1 CTRL_ATTR_MCAST_GRP_ID = 0x2 ) + +type cpuMask uint64 + +const ( + _CPU_SETSIZE = 0x400 + _NCPUBITS = 0x40 +) + +const ( + BDADDR_BREDR = 0x0 + BDADDR_LE_PUBLIC = 0x1 + BDADDR_LE_RANDOM = 0x2 +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index c2f4c036..6de721f7 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -52,7 +52,7 @@ type Timex struct { Errcnt int32 Stbcnt int32 Tai int32 - Pad_cgo_0 [44]byte + _ [44]byte } type Time_t int32 @@ -116,29 +116,59 @@ type Stat_t struct { } type Statfs_t struct { - Type int32 - Bsize int32 - Frsize int32 - Pad_cgo_0 [4]byte - Blocks uint64 - Bfree uint64 - Files uint64 - Ffree uint64 - Bavail uint64 - Fsid Fsid - Namelen int32 - Flags int32 - Spare [5]int32 - Pad_cgo_1 [4]byte + Type int32 + Bsize int32 + Frsize int32 + _ [4]byte + Blocks uint64 + Bfree uint64 + Files uint64 + Ffree uint64 + Bavail uint64 + Fsid Fsid + Namelen int32 + Flags int32 + Spare [5]int32 + _ [4]byte +} + +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + X__reserved int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 } type Dirent struct { - Ino uint64 - Off int64 - Reclen uint16 - Type uint8 - Name [256]int8 - Pad_cgo_0 [5]byte + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]int8 + _ [5]byte } type Fsid struct { @@ -146,13 +176,13 @@ type Fsid struct { } type Flock_t struct { - Type int16 - Whence int16 - Pad_cgo_0 [4]byte - Start int64 - Len int64 - Pid int32 - Pad_cgo_1 [4]byte + Type int16 + Whence int16 + _ [4]byte + Start int64 + Len int64 + Pid int32 + _ [4]byte } type FscryptPolicy struct { @@ -227,11 +257,20 @@ type RawSockaddrHCI struct { Channel uint16 } +type RawSockaddrL2 struct { + Family uint16 + Psm uint16 + Bdaddr [6]uint8 + Cid uint16 + Bdaddr_type uint8 + _ [1]byte +} + type RawSockaddrCAN struct { - Family uint16 - Pad_cgo_0 [2]byte - Ifindex int32 - Addr [8]byte + Family uint16 + _ [2]byte + Ifindex int32 + Addr [8]byte } type RawSockaddrALG struct { @@ -344,7 +383,7 @@ type TCPInfo struct { Probes uint8 Backoff uint8 Options uint8 - Pad_cgo_0 [2]byte + _ [2]byte Rto uint32 Ato uint32 Snd_mss uint32 @@ -379,6 +418,7 @@ const ( SizeofSockaddrLinklayer = 0x14 SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 + SizeofSockaddrL2 = 0xe SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 @@ -568,9 +608,9 @@ type SockFilter struct { } type SockFprog struct { - Len uint16 - Pad_cgo_0 [2]byte - Filter *SockFilter + Len uint16 + _ [2]byte + Filter *SockFilter } type InotifyEvent struct { @@ -637,9 +677,15 @@ type EpollEvent struct { } const ( - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STATX_SYNC_AS_STAT = 0x0 + AT_STATX_FORCE_SYNC = 0x2000 + AT_STATX_DONT_SYNC = 0x4000 + AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 ) @@ -688,11 +734,11 @@ type Winsize struct { type Taskstats struct { Version uint16 - Pad_cgo_0 [2]byte + _ [2]byte Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - Pad_cgo_1 [6]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -704,13 +750,13 @@ type Taskstats struct { Ac_comm [32]int8 Ac_sched uint8 Ac_pad [3]uint8 - Pad_cgo_2 [4]byte + _ [4]byte Ac_uid uint32 Ac_gid uint32 Ac_pid uint32 Ac_ppid uint32 Ac_btime uint32 - Pad_cgo_3 [4]byte + _ [4]byte Ac_etime uint64 Ac_utime uint64 Ac_stime uint64 @@ -754,6 +800,24 @@ const ( TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 ) +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + type Genlmsghdr struct { Cmd uint8 Version uint8 @@ -786,3 +850,16 @@ const ( CTRL_ATTR_MCAST_GRP_NAME = 0x1 CTRL_ATTR_MCAST_GRP_ID = 0x2 ) + +type cpuMask uint32 + +const ( + _CPU_SETSIZE = 0x400 + _NCPUBITS = 0x20 +) + +const ( + BDADDR_BREDR = 0x0 + BDADDR_LE_PUBLIC = 0x1 + BDADDR_LE_RANDOM = 0x2 +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index ffe78833..cb2701fd 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -33,13 +33,13 @@ type Timeval struct { type Timex struct { Modes uint32 - Pad_cgo_0 [4]byte + _ [4]byte Offset int64 Freq int64 Maxerror int64 Esterror int64 Status int32 - Pad_cgo_1 [4]byte + _ [4]byte Constant int64 Precision int64 Tolerance int64 @@ -48,14 +48,14 @@ type Timex struct { Ppsfreq int64 Jitter int64 Shift int32 - Pad_cgo_2 [4]byte + _ [4]byte Stabil int64 Jitcnt int64 Calcnt int64 Errcnt int64 Stbcnt int64 Tai int32 - Pad_cgo_3 [44]byte + _ [44]byte } type Time_t int64 @@ -133,13 +133,43 @@ type Statfs_t struct { Spare [4]int64 } +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + X__reserved int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 +} + type Dirent struct { - Ino uint64 - Off int64 - Reclen uint16 - Type uint8 - Name [256]uint8 - Pad_cgo_0 [5]byte + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]uint8 + _ [5]byte } type Fsid struct { @@ -147,13 +177,13 @@ type Fsid struct { } type Flock_t struct { - Type int16 - Whence int16 - Pad_cgo_0 [4]byte - Start int64 - Len int64 - Pid int32 - Pad_cgo_1 [4]byte + Type int16 + Whence int16 + _ [4]byte + Start int64 + Len int64 + Pid int32 + _ [4]byte } type FscryptPolicy struct { @@ -228,11 +258,20 @@ type RawSockaddrHCI struct { Channel uint16 } +type RawSockaddrL2 struct { + Family uint16 + Psm uint16 + Bdaddr [6]uint8 + Cid uint16 + Bdaddr_type uint8 + _ [1]byte +} + type RawSockaddrCAN struct { - Family uint16 - Pad_cgo_0 [2]byte - Ifindex int32 - Addr [8]byte + Family uint16 + _ [2]byte + Ifindex int32 + Addr [8]byte } type RawSockaddrALG struct { @@ -299,13 +338,13 @@ type PacketMreq struct { type Msghdr struct { Name *byte Namelen uint32 - Pad_cgo_0 [4]byte + _ [4]byte Iov *Iovec Iovlen uint64 Control *byte Controllen uint64 Flags int32 - Pad_cgo_1 [4]byte + _ [4]byte } type Cmsghdr struct { @@ -347,7 +386,7 @@ type TCPInfo struct { Probes uint8 Backoff uint8 Options uint8 - Pad_cgo_0 [2]byte + _ [2]byte Rto uint32 Ato uint32 Snd_mss uint32 @@ -382,6 +421,7 @@ const ( SizeofSockaddrLinklayer = 0x14 SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 + SizeofSockaddrL2 = 0xe SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 @@ -571,9 +611,9 @@ type SockFilter struct { } type SockFprog struct { - Len uint16 - Pad_cgo_0 [6]byte - Filter *SockFilter + Len uint16 + _ [6]byte + Filter *SockFilter } type InotifyEvent struct { @@ -616,12 +656,12 @@ type Sysinfo_t struct { Freeswap uint64 Procs uint16 Pad uint16 - Pad_cgo_0 [4]byte + _ [4]byte Totalhigh uint64 Freehigh uint64 Unit uint32 X_f [0]uint8 - Pad_cgo_1 [4]byte + _ [4]byte } type Utsname struct { @@ -634,12 +674,12 @@ type Utsname struct { } type Ustat_t struct { - Tfree int32 - Pad_cgo_0 [4]byte - Tinode uint64 - Fname [6]uint8 - Fpack [6]uint8 - Pad_cgo_1 [4]byte + Tfree int32 + _ [4]byte + Tinode uint64 + Fname [6]uint8 + Fpack [6]uint8 + _ [4]byte } type EpollEvent struct { @@ -650,9 +690,15 @@ type EpollEvent struct { } const ( - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STATX_SYNC_AS_STAT = 0x0 + AT_STATX_FORCE_SYNC = 0x2000 + AT_STATX_DONT_SYNC = 0x4000 + AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 ) @@ -701,11 +747,11 @@ type Winsize struct { type Taskstats struct { Version uint16 - Pad_cgo_0 [2]byte + _ [2]byte Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - Pad_cgo_1 [6]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -717,13 +763,13 @@ type Taskstats struct { Ac_comm [32]uint8 Ac_sched uint8 Ac_pad [3]uint8 - Pad_cgo_2 [4]byte + _ [4]byte Ac_uid uint32 Ac_gid uint32 Ac_pid uint32 Ac_ppid uint32 Ac_btime uint32 - Pad_cgo_3 [4]byte + _ [4]byte Ac_etime uint64 Ac_utime uint64 Ac_stime uint64 @@ -767,6 +813,24 @@ const ( TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 ) +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + type Genlmsghdr struct { Cmd uint8 Version uint8 @@ -799,3 +863,16 @@ const ( CTRL_ATTR_MCAST_GRP_NAME = 0x1 CTRL_ATTR_MCAST_GRP_ID = 0x2 ) + +type cpuMask uint64 + +const ( + _CPU_SETSIZE = 0x400 + _NCPUBITS = 0x40 +) + +const ( + BDADDR_BREDR = 0x0 + BDADDR_LE_PUBLIC = 0x1 + BDADDR_LE_RANDOM = 0x2 +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 7e3d946a..fa5b15be 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -33,13 +33,13 @@ type Timeval struct { type Timex struct { Modes uint32 - Pad_cgo_0 [4]byte + _ [4]byte Offset int64 Freq int64 Maxerror int64 Esterror int64 Status int32 - Pad_cgo_1 [4]byte + _ [4]byte Constant int64 Precision int64 Tolerance int64 @@ -48,14 +48,14 @@ type Timex struct { Ppsfreq int64 Jitter int64 Shift int32 - Pad_cgo_2 [4]byte + _ [4]byte Stabil int64 Jitcnt int64 Calcnt int64 Errcnt int64 Stbcnt int64 Tai int32 - Pad_cgo_3 [44]byte + _ [44]byte } type Time_t int64 @@ -133,13 +133,43 @@ type Statfs_t struct { Spare [4]int64 } +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + X__reserved int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 +} + type Dirent struct { - Ino uint64 - Off int64 - Reclen uint16 - Type uint8 - Name [256]uint8 - Pad_cgo_0 [5]byte + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]uint8 + _ [5]byte } type Fsid struct { @@ -147,13 +177,13 @@ type Fsid struct { } type Flock_t struct { - Type int16 - Whence int16 - Pad_cgo_0 [4]byte - Start int64 - Len int64 - Pid int32 - Pad_cgo_1 [4]byte + Type int16 + Whence int16 + _ [4]byte + Start int64 + Len int64 + Pid int32 + _ [4]byte } type FscryptPolicy struct { @@ -228,11 +258,20 @@ type RawSockaddrHCI struct { Channel uint16 } +type RawSockaddrL2 struct { + Family uint16 + Psm uint16 + Bdaddr [6]uint8 + Cid uint16 + Bdaddr_type uint8 + _ [1]byte +} + type RawSockaddrCAN struct { - Family uint16 - Pad_cgo_0 [2]byte - Ifindex int32 - Addr [8]byte + Family uint16 + _ [2]byte + Ifindex int32 + Addr [8]byte } type RawSockaddrALG struct { @@ -299,13 +338,13 @@ type PacketMreq struct { type Msghdr struct { Name *byte Namelen uint32 - Pad_cgo_0 [4]byte + _ [4]byte Iov *Iovec Iovlen uint64 Control *byte Controllen uint64 Flags int32 - Pad_cgo_1 [4]byte + _ [4]byte } type Cmsghdr struct { @@ -347,7 +386,7 @@ type TCPInfo struct { Probes uint8 Backoff uint8 Options uint8 - Pad_cgo_0 [2]byte + _ [2]byte Rto uint32 Ato uint32 Snd_mss uint32 @@ -382,6 +421,7 @@ const ( SizeofSockaddrLinklayer = 0x14 SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 + SizeofSockaddrL2 = 0xe SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 @@ -571,9 +611,9 @@ type SockFilter struct { } type SockFprog struct { - Len uint16 - Pad_cgo_0 [6]byte - Filter *SockFilter + Len uint16 + _ [6]byte + Filter *SockFilter } type InotifyEvent struct { @@ -616,12 +656,12 @@ type Sysinfo_t struct { Freeswap uint64 Procs uint16 Pad uint16 - Pad_cgo_0 [4]byte + _ [4]byte Totalhigh uint64 Freehigh uint64 Unit uint32 X_f [0]uint8 - Pad_cgo_1 [4]byte + _ [4]byte } type Utsname struct { @@ -634,12 +674,12 @@ type Utsname struct { } type Ustat_t struct { - Tfree int32 - Pad_cgo_0 [4]byte - Tinode uint64 - Fname [6]uint8 - Fpack [6]uint8 - Pad_cgo_1 [4]byte + Tfree int32 + _ [4]byte + Tinode uint64 + Fname [6]uint8 + Fpack [6]uint8 + _ [4]byte } type EpollEvent struct { @@ -650,9 +690,15 @@ type EpollEvent struct { } const ( - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STATX_SYNC_AS_STAT = 0x0 + AT_STATX_FORCE_SYNC = 0x2000 + AT_STATX_DONT_SYNC = 0x4000 + AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 ) @@ -701,11 +747,11 @@ type Winsize struct { type Taskstats struct { Version uint16 - Pad_cgo_0 [2]byte + _ [2]byte Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - Pad_cgo_1 [6]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -717,13 +763,13 @@ type Taskstats struct { Ac_comm [32]uint8 Ac_sched uint8 Ac_pad [3]uint8 - Pad_cgo_2 [4]byte + _ [4]byte Ac_uid uint32 Ac_gid uint32 Ac_pid uint32 Ac_ppid uint32 Ac_btime uint32 - Pad_cgo_3 [4]byte + _ [4]byte Ac_etime uint64 Ac_utime uint64 Ac_stime uint64 @@ -767,6 +813,24 @@ const ( TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 ) +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + type Genlmsghdr struct { Cmd uint8 Version uint8 @@ -799,3 +863,16 @@ const ( CTRL_ATTR_MCAST_GRP_NAME = 0x1 CTRL_ATTR_MCAST_GRP_ID = 0x2 ) + +type cpuMask uint64 + +const ( + _CPU_SETSIZE = 0x400 + _NCPUBITS = 0x40 +) + +const ( + BDADDR_BREDR = 0x0 + BDADDR_LE_PUBLIC = 0x1 + BDADDR_LE_RANDOM = 0x2 +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index 730fa8a6..64952cb7 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -132,6 +132,36 @@ type Statfs_t struct { _ [4]byte } +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + _ int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 +} + type Dirent struct { Ino uint64 Off int64 @@ -227,6 +257,15 @@ type RawSockaddrHCI struct { Channel uint16 } +type RawSockaddrL2 struct { + Family uint16 + Psm uint16 + Bdaddr [6]uint8 + Cid uint16 + Bdaddr_type uint8 + _ [1]byte +} + type RawSockaddrCAN struct { Family uint16 _ [2]byte @@ -381,6 +420,7 @@ const ( SizeofSockaddrLinklayer = 0x14 SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 + SizeofSockaddrL2 = 0xe SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 @@ -667,9 +707,15 @@ type EpollEvent struct { } const ( - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STATX_SYNC_AS_STAT = 0x0 + AT_STATX_FORCE_SYNC = 0x2000 + AT_STATX_DONT_SYNC = 0x4000 + AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 ) @@ -784,6 +830,24 @@ const ( TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 ) +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + type Genlmsghdr struct { Cmd uint8 Version uint8 @@ -816,3 +880,16 @@ const ( CTRL_ATTR_MCAST_GRP_NAME = 0x1 CTRL_ATTR_MCAST_GRP_ID = 0x2 ) + +type cpuMask uint64 + +const ( + _CPU_SETSIZE = 0x400 + _NCPUBITS = 0x40 +) + +const ( + BDADDR_BREDR = 0x0 + BDADDR_LE_PUBLIC = 0x1 + BDADDR_LE_RANDOM = 0x2 +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go index e16c05a8..da70faa8 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -99,6 +99,10 @@ type Fsid struct { X__fsid_val [2]int32 } +const ( + PathMax = 0x400 +) + type RawSockaddrInet4 struct { Len uint8 Family uint8 @@ -425,3 +429,11 @@ type Sysctlnode struct { X_sysctl_parent [8]byte X_sysctl_desc [8]byte } + +type Utsname struct { + Sysname [256]byte + Nodename [256]byte + Release [256]byte + Version [256]byte + Machine [256]byte +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go index 9c374356..0963ab8c 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -103,6 +103,10 @@ type Fsid struct { X__fsid_val [2]int32 } +const ( + PathMax = 0x400 +) + type RawSockaddrInet4 struct { Len uint8 Family uint8 @@ -432,3 +436,11 @@ type Sysctlnode struct { X_sysctl_parent [8]byte X_sysctl_desc [8]byte } + +type Utsname struct { + Sysname [256]byte + Nodename [256]byte + Release [256]byte + Version [256]byte + Machine [256]byte +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index 13294231..211f6419 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -104,6 +104,10 @@ type Fsid struct { X__fsid_val [2]int32 } +const ( + PathMax = 0x400 +) + type RawSockaddrInet4 struct { Len uint8 Family uint8 @@ -430,3 +434,11 @@ type Sysctlnode struct { X_sysctl_parent [8]byte X_sysctl_desc [8]byte } + +type Utsname struct { + Sysname [256]byte + Nodename [256]byte + Release [256]byte + Version [256]byte + Machine [256]byte +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go index 2cf08bf4..d5a2d75d 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -140,6 +140,10 @@ type Fsid struct { Val [2]int32 } +const ( + PathMax = 0x400 +) + type RawSockaddrInet4 struct { Len uint8 Family uint8 @@ -470,3 +474,11 @@ const ( POLLWRBAND = 0x100 POLLWRNORM = 0x4 ) + +type Utsname struct { + Sysname [256]byte + Nodename [256]byte + Release [256]byte + Version [256]byte + Machine [256]byte +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index 7cfc61f6..d5314108 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -142,6 +142,10 @@ type Fsid struct { Val [2]int32 } +const ( + PathMax = 0x400 +) + type RawSockaddrInet4 struct { Len uint8 Family uint8 @@ -477,3 +481,11 @@ const ( POLLWRBAND = 0x100 POLLWRNORM = 0x4 ) + +type Utsname struct { + Sysname [256]byte + Nodename [256]byte + Release [256]byte + Version [256]byte + Machine [256]byte +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go index 842c59c5..e35b13b6 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go @@ -140,6 +140,10 @@ type Fsid struct { Val [2]int32 } +const ( + PathMax = 0x400 +) + type RawSockaddrInet4 struct { Len uint8 Family uint8 @@ -463,3 +467,11 @@ const ( POLLWRBAND = 0x100 POLLWRNORM = 0x4 ) + +type Utsname struct { + Sysname [256]byte + Nodename [256]byte + Release [256]byte + Version [256]byte + Machine [256]byte +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/env_unset.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/env_unset.go deleted file mode 100644 index b712c660..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/env_unset.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows -// +build go1.4 - -package windows - -import "syscall" - -func Unsetenv(key string) error { - // This was added in Go 1.4. - return syscall.Unsetenv(key) -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/env_windows.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/env_windows.go index e8292386..bdc71e24 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/env_windows.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/env_windows.go @@ -23,3 +23,7 @@ func Clearenv() { func Environ() []string { return syscall.Environ() } + +func Unsetenv(key string) error { + return syscall.Unsetenv(key) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/registry/key.go index f087ce5a..d0beb195 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/registry/key.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/registry/key.go @@ -57,11 +57,12 @@ const ( // An application can use these keys as entry points to the registry. // Normally these keys are used in OpenKey to open new keys, // but they can also be used anywhere a Key is required. - CLASSES_ROOT = Key(syscall.HKEY_CLASSES_ROOT) - CURRENT_USER = Key(syscall.HKEY_CURRENT_USER) - LOCAL_MACHINE = Key(syscall.HKEY_LOCAL_MACHINE) - USERS = Key(syscall.HKEY_USERS) - CURRENT_CONFIG = Key(syscall.HKEY_CURRENT_CONFIG) + CLASSES_ROOT = Key(syscall.HKEY_CLASSES_ROOT) + CURRENT_USER = Key(syscall.HKEY_CURRENT_USER) + LOCAL_MACHINE = Key(syscall.HKEY_LOCAL_MACHINE) + USERS = Key(syscall.HKEY_USERS) + CURRENT_CONFIG = Key(syscall.HKEY_CURRENT_CONFIG) + PERFORMANCE_DATA = Key(syscall.HKEY_PERFORMANCE_DATA) ) // Close closes open key k. @@ -87,6 +88,27 @@ func OpenKey(k Key, path string, access uint32) (Key, error) { return Key(subkey), nil } +// OpenRemoteKey opens a predefined registry key on another +// computer pcname. The key to be opened is specified by k, but +// can only be one of LOCAL_MACHINE, PERFORMANCE_DATA or USERS. +// If pcname is "", OpenRemoteKey returns local computer key. +func OpenRemoteKey(pcname string, k Key) (Key, error) { + var err error + var p *uint16 + if pcname != "" { + p, err = syscall.UTF16PtrFromString(`\\` + pcname) + if err != nil { + return 0, err + } + } + var remoteKey syscall.Handle + err = regConnectRegistry(p, syscall.Handle(k), &remoteKey) + if err != nil { + return 0, err + } + return Key(remoteKey), nil +} + // ReadSubKeyNames returns the names of subkeys of key k. // The parameter n controls the number of returned names, // analogous to the way os.File.Readdirnames works. diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/registry/mksyscall.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/registry/mksyscall.go new file mode 100644 index 00000000..0ac95ffe --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/registry/mksyscall.go @@ -0,0 +1,7 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package registry + +//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go syscall.go diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/registry/syscall.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/registry/syscall.go index 5426cae9..e66643cb 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/registry/syscall.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/registry/syscall.go @@ -8,8 +8,6 @@ package registry import "syscall" -//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go syscall.go - const ( _REG_OPTION_NON_VOLATILE = 0 @@ -29,5 +27,6 @@ func LoadRegLoadMUIString() error { //sys regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegEnumValueW //sys regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) = advapi32.RegDeleteValueW //sys regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) = advapi32.RegLoadMUIStringW +//sys regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) = advapi32.RegConnectRegistryW //sys expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go index 0fa24c6d..ceebdd77 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go @@ -3,13 +3,39 @@ package registry import ( - "golang.org/x/sys/windows" "syscall" "unsafe" + + "golang.org/x/sys/windows" ) var _ unsafe.Pointer +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + var ( modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") modkernel32 = windows.NewLazySystemDLL("kernel32.dll") @@ -20,6 +46,7 @@ var ( procRegEnumValueW = modadvapi32.NewProc("RegEnumValueW") procRegDeleteValueW = modadvapi32.NewProc("RegDeleteValueW") procRegLoadMUIStringW = modadvapi32.NewProc("RegLoadMUIStringW") + procRegConnectRegistryW = modadvapi32.NewProc("RegConnectRegistryW") procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") ) @@ -71,12 +98,20 @@ func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint return } +func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegConnectRegistryW.Addr(), 3, uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) n = uint32(r0) if n == 0 { if e1 != 0 { - err = error(e1) + err = errnoErr(e1) } else { err = syscall.EINVAL } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/security_windows.go index d8e7ff2e..f1ec5dc4 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/security_windows.go @@ -132,6 +132,36 @@ const ( SECURITY_NT_NON_UNIQUE_RID = 0x15 ) +// Predefined domain-relative RIDs for local groups. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/aa379649(v=vs.85).aspx +const ( + DOMAIN_ALIAS_RID_ADMINS = 0x220 + DOMAIN_ALIAS_RID_USERS = 0x221 + DOMAIN_ALIAS_RID_GUESTS = 0x222 + DOMAIN_ALIAS_RID_POWER_USERS = 0x223 + DOMAIN_ALIAS_RID_ACCOUNT_OPS = 0x224 + DOMAIN_ALIAS_RID_SYSTEM_OPS = 0x225 + DOMAIN_ALIAS_RID_PRINT_OPS = 0x226 + DOMAIN_ALIAS_RID_BACKUP_OPS = 0x227 + DOMAIN_ALIAS_RID_REPLICATOR = 0x228 + DOMAIN_ALIAS_RID_RAS_SERVERS = 0x229 + DOMAIN_ALIAS_RID_PREW2KCOMPACCESS = 0x22a + DOMAIN_ALIAS_RID_REMOTE_DESKTOP_USERS = 0x22b + DOMAIN_ALIAS_RID_NETWORK_CONFIGURATION_OPS = 0x22c + DOMAIN_ALIAS_RID_INCOMING_FOREST_TRUST_BUILDERS = 0x22d + DOMAIN_ALIAS_RID_MONITORING_USERS = 0X22e + DOMAIN_ALIAS_RID_LOGGING_USERS = 0x22f + DOMAIN_ALIAS_RID_AUTHORIZATIONACCESS = 0x230 + DOMAIN_ALIAS_RID_TS_LICENSE_SERVERS = 0x231 + DOMAIN_ALIAS_RID_DCOM_USERS = 0x232 + DOMAIN_ALIAS_RID_IUSERS = 0x238 + DOMAIN_ALIAS_RID_CRYPTO_OPERATORS = 0x239 + DOMAIN_ALIAS_RID_CACHEABLE_PRINCIPALS_GROUP = 0x23b + DOMAIN_ALIAS_RID_NON_CACHEABLE_PRINCIPALS_GROUP = 0x23c + DOMAIN_ALIAS_RID_EVENT_LOG_READERS_GROUP = 0x23d + DOMAIN_ALIAS_RID_CERTSVC_DCOM_ACCESS_GROUP = 0x23e +) + //sys LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) = advapi32.LookupAccountSidW //sys LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) = advapi32.LookupAccountNameW //sys ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) = advapi32.ConvertSidToStringSidW @@ -335,6 +365,8 @@ type Tokengroups struct { Groups [1]SIDAndAttributes } +// Authorization Functions +//sys checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) = advapi32.CheckTokenMembership //sys OpenProcessToken(h Handle, access uint32, token *Token) (err error) = advapi32.OpenProcessToken //sys GetTokenInformation(t Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) = advapi32.GetTokenInformation //sys GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) = userenv.GetUserProfileDirectoryW @@ -433,3 +465,12 @@ func (t Token) GetUserProfileDirectory() (string, error) { } } } + +// IsMember reports whether the access token t is a member of the provided SID. +func (t Token) IsMember(sid *SID) (bool, error) { + var b int32 + if e := checkTokenMembership(t, sid, &b); e != nil { + return false, e + } + return b != 0, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/svc/debug/service.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/svc/debug/service.go index d5ab94b2..123df989 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/svc/debug/service.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/svc/debug/service.go @@ -31,7 +31,7 @@ func Run(name string, handler svc.Handler) error { for { select { case <-sig: - cmds <- svc.ChangeRequest{svc.Stop, status} + cmds <- svc.ChangeRequest{svc.Stop, 0, 0, status} case status = <-changes: } } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/svc/go12.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/svc/go12.go index 6f0a924e..cd8b913c 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/svc/go12.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/svc/go12.go @@ -1,4 +1,4 @@ -// Copyright 2014 The Go Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/svc/go13.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/svc/go13.go index 432a9e79..9d7f3cec 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/svc/go13.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/svc/go13.go @@ -1,4 +1,4 @@ -// Copyright 2014 The Go Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/svc/service.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/svc/service.go index 9864f7a7..903cba3f 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/svc/service.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/svc/service.go @@ -35,11 +35,20 @@ const ( type Cmd uint32 const ( - Stop = Cmd(windows.SERVICE_CONTROL_STOP) - Pause = Cmd(windows.SERVICE_CONTROL_PAUSE) - Continue = Cmd(windows.SERVICE_CONTROL_CONTINUE) - Interrogate = Cmd(windows.SERVICE_CONTROL_INTERROGATE) - Shutdown = Cmd(windows.SERVICE_CONTROL_SHUTDOWN) + Stop = Cmd(windows.SERVICE_CONTROL_STOP) + Pause = Cmd(windows.SERVICE_CONTROL_PAUSE) + Continue = Cmd(windows.SERVICE_CONTROL_CONTINUE) + Interrogate = Cmd(windows.SERVICE_CONTROL_INTERROGATE) + Shutdown = Cmd(windows.SERVICE_CONTROL_SHUTDOWN) + ParamChange = Cmd(windows.SERVICE_CONTROL_PARAMCHANGE) + NetBindAdd = Cmd(windows.SERVICE_CONTROL_NETBINDADD) + NetBindRemove = Cmd(windows.SERVICE_CONTROL_NETBINDREMOVE) + NetBindEnable = Cmd(windows.SERVICE_CONTROL_NETBINDENABLE) + NetBindDisable = Cmd(windows.SERVICE_CONTROL_NETBINDDISABLE) + DeviceEvent = Cmd(windows.SERVICE_CONTROL_DEVICEEVENT) + HardwareProfileChange = Cmd(windows.SERVICE_CONTROL_HARDWAREPROFILECHANGE) + PowerEvent = Cmd(windows.SERVICE_CONTROL_POWEREVENT) + SessionChange = Cmd(windows.SERVICE_CONTROL_SESSIONCHANGE) ) // Accepted is used to describe commands accepted by the service. @@ -47,9 +56,14 @@ const ( type Accepted uint32 const ( - AcceptStop = Accepted(windows.SERVICE_ACCEPT_STOP) - AcceptShutdown = Accepted(windows.SERVICE_ACCEPT_SHUTDOWN) - AcceptPauseAndContinue = Accepted(windows.SERVICE_ACCEPT_PAUSE_CONTINUE) + AcceptStop = Accepted(windows.SERVICE_ACCEPT_STOP) + AcceptShutdown = Accepted(windows.SERVICE_ACCEPT_SHUTDOWN) + AcceptPauseAndContinue = Accepted(windows.SERVICE_ACCEPT_PAUSE_CONTINUE) + AcceptParamChange = Accepted(windows.SERVICE_ACCEPT_PARAMCHANGE) + AcceptNetBindChange = Accepted(windows.SERVICE_ACCEPT_NETBINDCHANGE) + AcceptHardwareProfileChange = Accepted(windows.SERVICE_ACCEPT_HARDWAREPROFILECHANGE) + AcceptPowerEvent = Accepted(windows.SERVICE_ACCEPT_POWEREVENT) + AcceptSessionChange = Accepted(windows.SERVICE_ACCEPT_SESSIONCHANGE) ) // Status combines State and Accepted commands to fully describe running service. @@ -63,6 +77,8 @@ type Status struct { // ChangeRequest is sent to the service Handler to request service status change. type ChangeRequest struct { Cmd Cmd + EventType uint32 + EventData uintptr CurrentStatus Status } @@ -85,16 +101,16 @@ type Handler interface { var ( // These are used by asm code. - goWaitsH uintptr - cWaitsH uintptr - ssHandle uintptr - sName *uint16 - sArgc uintptr - sArgv **uint16 - ctlHandlerProc uintptr - cSetEvent uintptr - cWaitForSingleObject uintptr - cRegisterServiceCtrlHandlerW uintptr + goWaitsH uintptr + cWaitsH uintptr + ssHandle uintptr + sName *uint16 + sArgc uintptr + sArgv **uint16 + ctlHandlerExProc uintptr + cSetEvent uintptr + cWaitForSingleObject uintptr + cRegisterServiceCtrlHandlerExW uintptr ) func init() { @@ -102,12 +118,16 @@ func init() { cSetEvent = k.MustFindProc("SetEvent").Addr() cWaitForSingleObject = k.MustFindProc("WaitForSingleObject").Addr() a := syscall.MustLoadDLL("advapi32.dll") - cRegisterServiceCtrlHandlerW = a.MustFindProc("RegisterServiceCtrlHandlerW").Addr() + cRegisterServiceCtrlHandlerExW = a.MustFindProc("RegisterServiceCtrlHandlerExW").Addr() } +// The HandlerEx prototype also has a context pointer but since we don't use +// it at start-up time we don't have to pass it over either. type ctlEvent struct { - cmd Cmd - errno uint32 + cmd Cmd + eventType uint32 + eventData uintptr + errno uint32 } // service provides access to windows service api. @@ -165,6 +185,21 @@ func (s *service) updateStatus(status *Status, ec *exitCode) error { if status.Accepts&AcceptPauseAndContinue != 0 { t.ControlsAccepted |= windows.SERVICE_ACCEPT_PAUSE_CONTINUE } + if status.Accepts&AcceptParamChange != 0 { + t.ControlsAccepted |= windows.SERVICE_ACCEPT_PARAMCHANGE + } + if status.Accepts&AcceptNetBindChange != 0 { + t.ControlsAccepted |= windows.SERVICE_ACCEPT_NETBINDCHANGE + } + if status.Accepts&AcceptHardwareProfileChange != 0 { + t.ControlsAccepted |= windows.SERVICE_ACCEPT_HARDWAREPROFILECHANGE + } + if status.Accepts&AcceptPowerEvent != 0 { + t.ControlsAccepted |= windows.SERVICE_ACCEPT_POWEREVENT + } + if status.Accepts&AcceptSessionChange != 0 { + t.ControlsAccepted |= windows.SERVICE_ACCEPT_SESSIONCHANGE + } if ec.errno == 0 { t.Win32ExitCode = windows.NO_ERROR t.ServiceSpecificExitCode = windows.NO_ERROR @@ -208,6 +243,8 @@ func (s *service) run() { var outch chan ChangeRequest inch := s.c var cmd Cmd + var evtype uint32 + var evdata uintptr loop: for { select { @@ -219,7 +256,9 @@ loop: inch = nil outch = cmdsToHandler cmd = r.cmd - case outch <- ChangeRequest{cmd, status}: + evtype = r.eventType + evdata = r.eventData + case outch <- ChangeRequest{cmd, evtype, evdata, status}: inch = s.c outch = nil case c := <-changesFromHandler: @@ -276,8 +315,8 @@ func Run(name string, handler Handler) error { return err } - ctlHandler := func(ctl uint32) uintptr { - e := ctlEvent{cmd: Cmd(ctl)} + ctlHandler := func(ctl uint32, evtype uint32, evdata uintptr, context uintptr) uintptr { + e := ctlEvent{cmd: Cmd(ctl), eventType: evtype, eventData: evdata} // We assume that this callback function is running on // the same thread as Run. Nowhere in MS documentation // I could find statement to guarantee that. So putting @@ -288,6 +327,7 @@ func Run(name string, handler Handler) error { e.errno = sysErrNewThreadInCallback } s.c <- e + // Always return NO_ERROR (0) for now. return 0 } @@ -301,7 +341,7 @@ func Run(name string, handler Handler) error { goWaitsH = uintptr(s.goWaits.h) cWaitsH = uintptr(s.cWaits.h) sName = t[0].ServiceName - ctlHandlerProc, err = newCallback(ctlHandler) + ctlHandlerExProc, err = newCallback(ctlHandler) if err != nil { return err } @@ -314,3 +354,10 @@ func Run(name string, handler Handler) error { } return nil } + +// StatusHandle returns service status handle. It is safe to call this function +// from inside the Handler.Execute because then it is guaranteed to be set. +// This code will have to change once multiple services are possible per process. +func StatusHandle() windows.Handle { + return windows.Handle(ssHandle) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/svc/sys_386.s b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/svc/sys_386.s index 5e11bfad..2c82a9d9 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/svc/sys_386.s +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/svc/sys_386.s @@ -22,7 +22,8 @@ TEXT ·servicemain(SB),7,$0 MOVL AX, (SP) MOVL $·servicectlhandler(SB), AX MOVL AX, 4(SP) - MOVL ·cRegisterServiceCtrlHandlerW(SB), AX + MOVL $0, 8(SP) + MOVL ·cRegisterServiceCtrlHandlerExW(SB), AX MOVL SP, BP CALL AX MOVL BP, SP @@ -61,7 +62,7 @@ exit: // I do not know why, but this seems to be the only way to call // ctlHandlerProc on Windows 7. -// func servicectlhandler(ctl uint32) uintptr +// func servicectlhandler(ctl uint32, evtype uint32, evdata uintptr, context uintptr) uintptr { TEXT ·servicectlhandler(SB),7,$0 - MOVL ·ctlHandlerProc(SB), CX + MOVL ·ctlHandlerExProc(SB), CX JMP CX diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/svc/sys_amd64.s b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/svc/sys_amd64.s index 87dbec83..06b42590 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/svc/sys_amd64.s +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/svc/sys_amd64.s @@ -13,7 +13,8 @@ TEXT ·servicemain(SB),7,$0 MOVQ ·sName(SB), CX MOVQ $·servicectlhandler(SB), DX - MOVQ ·cRegisterServiceCtrlHandlerW(SB), AX + // BUG(pastarmovj): Figure out a way to pass in context in R8. + MOVQ ·cRegisterServiceCtrlHandlerExW(SB), AX CALL AX CMPQ AX, $0 JE exit @@ -35,7 +36,7 @@ exit: // I do not know why, but this seems to be the only way to call // ctlHandlerProc on Windows 7. -// func servicectlhandler(ctl uint32) uintptr +// func ·servicectlhandler(ctl uint32, evtype uint32, evdata uintptr, context uintptr) uintptr { TEXT ·servicectlhandler(SB),7,$0 - MOVQ ·ctlHandlerProc(SB), AX + MOVQ ·ctlHandlerExProc(SB), AX JMP AX diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/syscall_windows.go index e0da2aa0..1e9f4bb4 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -248,7 +248,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) = FindNextVolumeMountPointW //sys FindVolumeClose(findVolume Handle) (err error) //sys FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) -//sys GetDriveType(rootPathName *uint16) (driveType uint32) +//sys GetDriveType(rootPathName *uint16) (driveType uint32) = GetDriveTypeW //sys GetLogicalDrives() (drivesBitMask uint32, err error) [failretval==0] //sys GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) [failretval==0] = GetLogicalDriveStringsW //sys GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) = GetVolumeInformationW diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 56cd15a8..c7b3b15e 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -186,7 +186,7 @@ var ( procFindNextVolumeMountPointW = modkernel32.NewProc("FindNextVolumeMountPointW") procFindVolumeClose = modkernel32.NewProc("FindVolumeClose") procFindVolumeMountPointClose = modkernel32.NewProc("FindVolumeMountPointClose") - procGetDriveType = modkernel32.NewProc("GetDriveType") + procGetDriveTypeW = modkernel32.NewProc("GetDriveTypeW") procGetLogicalDrives = modkernel32.NewProc("GetLogicalDrives") procGetLogicalDriveStringsW = modkernel32.NewProc("GetLogicalDriveStringsW") procGetVolumeInformationW = modkernel32.NewProc("GetVolumeInformationW") @@ -246,6 +246,7 @@ var ( procAllocateAndInitializeSid = modadvapi32.NewProc("AllocateAndInitializeSid") procFreeSid = modadvapi32.NewProc("FreeSid") procEqualSid = modadvapi32.NewProc("EqualSid") + procCheckTokenMembership = modadvapi32.NewProc("CheckTokenMembership") procOpenProcessToken = modadvapi32.NewProc("OpenProcessToken") procGetTokenInformation = modadvapi32.NewProc("GetTokenInformation") procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW") @@ -1961,7 +1962,7 @@ func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { } func GetDriveType(rootPathName *uint16) (driveType uint32) { - r0, _, _ := syscall.Syscall(procGetDriveType.Addr(), 1, uintptr(unsafe.Pointer(rootPathName)), 0, 0) + r0, _, _ := syscall.Syscall(procGetDriveTypeW.Addr(), 1, uintptr(unsafe.Pointer(rootPathName)), 0, 0) driveType = uint32(r0) return } @@ -2637,6 +2638,18 @@ func EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) { return } +func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) { + r1, _, e1 := syscall.Syscall(procCheckTokenMembership.Addr(), 3, uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func OpenProcessToken(h Handle, access uint32, token *Token) (err error) { r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(h), uintptr(access), uintptr(unsafe.Pointer(token))) if r1 == 0 { diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/AUTHORS b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/AUTHORS new file mode 100644 index 00000000..15167cd7 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/CONTRIBUTING.md b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/CONTRIBUTING.md new file mode 100644 index 00000000..88dff59b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + + +## Filing issues + +When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +**We do not accept GitHub pull requests** +(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. + diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/CONTRIBUTORS b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/CONTRIBUTORS new file mode 100644 index 00000000..1c4577e9 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/README.md b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/README.md new file mode 100644 index 00000000..b3f365ee --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/README.md @@ -0,0 +1,93 @@ +# Go Text + +This repository holds supplementary Go libraries for text processing, many involving Unicode. + +## Semantic Versioning +This repo uses Semantic versioning (http://semver.org/), so +1. MAJOR version when you make incompatible API changes, +1. MINOR version when you add functionality in a backwards-compatible manner, + and +1. PATCH version when you make backwards-compatible bug fixes. + +Until version 1.0.0 of x/text is reached, the minor version is considered a +major version. So going from 0.1.0 to 0.2.0 is considered to be a major version +bump. + +A major new CLDR version is mapped to a minor version increase in x/text. +Any other new CLDR version is mapped to a patch version increase in x/text. + +It is important that the Unicode version used in `x/text` matches the one used +by your Go compiler. The `x/text` repository supports multiple versions of +Unicode and will match the version of Unicode to that of the Go compiler. At the +moment this is supported for Go compilers from version 1.7. + +## Download/Install + +The easiest way to install is to run `go get -u golang.org/x/text`. You can +also manually git clone the repository to `$GOPATH/src/golang.org/x/text`. + +## Contribute +To submit changes to this repository, see http://golang.org/doc/contribute.html. + +To generate the tables in this repository (except for the encoding tables), +run go generate from this directory. By default tables are generated for the +Unicode version in core and the CLDR version defined in +golang.org/x/text/unicode/cldr. + +Running go generate will as a side effect create a DATA subdirectory in this +directory, which holds all files that are used as a source for generating the +tables. This directory will also serve as a cache. + +## Testing +Run + + go test ./... + +from this directory to run all tests. Add the "-tags icu" flag to also run +ICU conformance tests (if available). This requires that you have the correct +ICU version installed on your system. + +TODO: +- updating unversioned source files. + +## Generating Tables + +To generate the tables in this repository (except for the encoding +tables), run `go generate` from this directory. By default tables are +generated for the Unicode version in core and the CLDR version defined in +golang.org/x/text/unicode/cldr. + +Running go generate will as a side effect create a DATA subdirectory in this +directory which holds all files that are used as a source for generating the +tables. This directory will also serve as a cache. + +## Versions +To update a Unicode version run + + UNICODE_VERSION=x.x.x go generate + +where `x.x.x` must correspond to a directory in http://www.unicode.org/Public/. +If this version is newer than the version in core it will also update the +relevant packages there. The idna package in x/net will always be updated. + +To update a CLDR version run + + CLDR_VERSION=version go generate + +where `version` must correspond to a directory in +http://www.unicode.org/Public/cldr/. + +Note that the code gets adapted over time to changes in the data and that +backwards compatibility is not maintained. +So updating to a different version may not work. + +The files in DATA/{iana|icu|w3|whatwg} are currently not versioned. + +## Report Issues / Send Patches + +This repository uses Gerrit for code changes. To learn how to submit changes to +this repository, see https://golang.org/doc/contribute.html. + +The main issue tracker for the image repository is located at +https://github.com/golang/go/issues. Prefix your issue with "x/image:" in the +subject line, so it is easy to find. diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/codereview.cfg b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/codereview.cfg new file mode 100644 index 00000000..3f8b14b6 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/codereview.cfg @@ -0,0 +1 @@ +issuerepo: golang/go diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/doc.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/doc.go new file mode 100644 index 00000000..2e19a419 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/doc.go @@ -0,0 +1,16 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go + +// text is a repository of text-related packages related to internationalization +// (i18n) and localization (l10n), such as character encodings, text +// transformations, and locale-specific text handling. +// +// There is a 30 minute video, recorded on 2017-11-30, on the "State of +// golang.org/x/text" at https://www.youtube.com/watch?v=uYrDrMEGu58 +package text + +// TODO: more documentation on general concepts, such as Transformers, use +// of normalization, etc. diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/charmap/charmap.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/charmap/charmap.go index 6e62a837..e89ff073 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/charmap/charmap.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/charmap/charmap.go @@ -33,32 +33,32 @@ var ( ISO8859_8I encoding.Encoding = &iso8859_8I iso8859_6E = internal.Encoding{ - ISO8859_6, - "ISO-8859-6E", - identifier.ISO88596E, + Encoding: ISO8859_6, + Name: "ISO-8859-6E", + MIB: identifier.ISO88596E, } iso8859_6I = internal.Encoding{ - ISO8859_6, - "ISO-8859-6I", - identifier.ISO88596I, + Encoding: ISO8859_6, + Name: "ISO-8859-6I", + MIB: identifier.ISO88596I, } iso8859_8E = internal.Encoding{ - ISO8859_8, - "ISO-8859-8E", - identifier.ISO88598E, + Encoding: ISO8859_8, + Name: "ISO-8859-8E", + MIB: identifier.ISO88598E, } iso8859_8I = internal.Encoding{ - ISO8859_8, - "ISO-8859-8I", - identifier.ISO88598I, + Encoding: ISO8859_8, + Name: "ISO-8859-8I", + MIB: identifier.ISO88598I, } ) // All is a list of all defined encodings in this package. -var All = listAll +var All []encoding.Encoding = listAll // TODO: implement these encodings, in order of importance. // ASCII, ISO8859_1: Rather common. Close to Windows 1252. @@ -70,8 +70,8 @@ type utf8Enc struct { data [3]byte } -// charmap describes an 8-bit character set encoding. -type charmap struct { +// Charmap is an 8-bit character set encoding. +type Charmap struct { // name is the encoding's name. name string // mib is the encoding type of this encoder. @@ -79,7 +79,7 @@ type charmap struct { // asciiSuperset states whether the encoding is a superset of ASCII. asciiSuperset bool // low is the lower bound of the encoded byte for a non-ASCII rune. If - // charmap.asciiSuperset is true then this will be 0x80, otherwise 0x00. + // Charmap.asciiSuperset is true then this will be 0x80, otherwise 0x00. low uint8 // replacement is the encoded replacement character. replacement byte @@ -91,26 +91,30 @@ type charmap struct { encode [256]uint32 } -func (m *charmap) NewDecoder() *encoding.Decoder { +// NewDecoder implements the encoding.Encoding interface. +func (m *Charmap) NewDecoder() *encoding.Decoder { return &encoding.Decoder{Transformer: charmapDecoder{charmap: m}} } -func (m *charmap) NewEncoder() *encoding.Encoder { +// NewEncoder implements the encoding.Encoding interface. +func (m *Charmap) NewEncoder() *encoding.Encoder { return &encoding.Encoder{Transformer: charmapEncoder{charmap: m}} } -func (m *charmap) String() string { +// String returns the Charmap's name. +func (m *Charmap) String() string { return m.name } -func (m *charmap) ID() (mib identifier.MIB, other string) { +// ID implements an internal interface. +func (m *Charmap) ID() (mib identifier.MIB, other string) { return m.mib, "" } // charmapDecoder implements transform.Transformer by decoding to UTF-8. type charmapDecoder struct { transform.NopResetter - charmap *charmap + charmap *Charmap } func (m charmapDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { @@ -142,10 +146,22 @@ func (m charmapDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, return nDst, nSrc, err } +// DecodeByte returns the Charmap's rune decoding of the byte b. +func (m *Charmap) DecodeByte(b byte) rune { + switch x := &m.decode[b]; x.len { + case 1: + return rune(x.data[0]) + case 2: + return rune(x.data[0]&0x1f)<<6 | rune(x.data[1]&0x3f) + default: + return rune(x.data[0]&0x0f)<<12 | rune(x.data[1]&0x3f)<<6 | rune(x.data[2]&0x3f) + } +} + // charmapEncoder implements transform.Transformer by encoding from UTF-8. type charmapEncoder struct { transform.NopResetter - charmap *charmap + charmap *Charmap } func (m charmapEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { @@ -207,3 +223,27 @@ loop: } return nDst, nSrc, err } + +// EncodeRune returns the Charmap's byte encoding of the rune r. ok is whether +// r is in the Charmap's repertoire. If not, b is set to the Charmap's +// replacement byte. This is often the ASCII substitute character '\x1a'. +func (m *Charmap) EncodeRune(r rune) (b byte, ok bool) { + if r < utf8.RuneSelf && m.asciiSuperset { + return byte(r), true + } + for low, high := int(m.low), 0x100; ; { + if low >= high { + return m.replacement, false + } + mid := (low + high) / 2 + got := m.encode[mid] + gotRune := rune(got & (1<<24 - 1)) + if gotRune < r { + low = mid + 1 + } else if gotRune > r { + high = mid + } else { + return byte(got >> 24), true + } + } +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/charmap/maketables.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/charmap/maketables.go deleted file mode 100644 index 09910812..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/charmap/maketables.go +++ /dev/null @@ -1,500 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "bufio" - "fmt" - "log" - "net/http" - "sort" - "strings" - "unicode/utf8" - - "golang.org/x/text/encoding" - "golang.org/x/text/internal/gen" -) - -const ascii = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + - "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + - ` !"#$%&'()*+,-./0123456789:;<=>?` + - `@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_` + - "`abcdefghijklmnopqrstuvwxyz{|}~\u007f" - -var encodings = []struct { - name string - mib string - comment string - varName string - replacement byte - mapping string -}{ - { - "IBM Code Page 437", - "PC8CodePage437", - "", - "CodePage437", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM437-2.1.2.ucm", - }, - { - "IBM Code Page 850", - "PC850Multilingual", - "", - "CodePage850", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM850-2.1.2.ucm", - }, - { - "IBM Code Page 852", - "PCp852", - "", - "CodePage852", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM852-2.1.2.ucm", - }, - { - "IBM Code Page 855", - "IBM855", - "", - "CodePage855", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM855-2.1.2.ucm", - }, - { - "Windows Code Page 858", // PC latin1 with Euro - "IBM00858", - "", - "CodePage858", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/windows-858-2000.ucm", - }, - { - "IBM Code Page 862", - "PC862LatinHebrew", - "", - "CodePage862", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM862-2.1.2.ucm", - }, - { - "IBM Code Page 866", - "IBM866", - "", - "CodePage866", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-ibm866.txt", - }, - { - "ISO 8859-1", - "ISOLatin1", - "", - "ISO8859_1", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/iso-8859_1-1998.ucm", - }, - { - "ISO 8859-2", - "ISOLatin2", - "", - "ISO8859_2", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-2.txt", - }, - { - "ISO 8859-3", - "ISOLatin3", - "", - "ISO8859_3", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-3.txt", - }, - { - "ISO 8859-4", - "ISOLatin4", - "", - "ISO8859_4", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-4.txt", - }, - { - "ISO 8859-5", - "ISOLatinCyrillic", - "", - "ISO8859_5", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-5.txt", - }, - { - "ISO 8859-6", - "ISOLatinArabic", - "", - "ISO8859_6,ISO8859_6E,ISO8859_6I", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-6.txt", - }, - { - "ISO 8859-7", - "ISOLatinGreek", - "", - "ISO8859_7", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-7.txt", - }, - { - "ISO 8859-8", - "ISOLatinHebrew", - "", - "ISO8859_8,ISO8859_8E,ISO8859_8I", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-8.txt", - }, - { - "ISO 8859-10", - "ISOLatin6", - "", - "ISO8859_10", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-10.txt", - }, - { - "ISO 8859-13", - "ISO885913", - "", - "ISO8859_13", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-13.txt", - }, - { - "ISO 8859-14", - "ISO885914", - "", - "ISO8859_14", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-14.txt", - }, - { - "ISO 8859-15", - "ISO885915", - "", - "ISO8859_15", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-15.txt", - }, - { - "ISO 8859-16", - "ISO885916", - "", - "ISO8859_16", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-16.txt", - }, - { - "KOI8-R", - "KOI8R", - "", - "KOI8R", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-koi8-r.txt", - }, - { - "KOI8-U", - "KOI8U", - "", - "KOI8U", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-koi8-u.txt", - }, - { - "Macintosh", - "Macintosh", - "", - "Macintosh", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-macintosh.txt", - }, - { - "Macintosh Cyrillic", - "MacintoshCyrillic", - "", - "MacintoshCyrillic", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-x-mac-cyrillic.txt", - }, - { - "Windows 874", - "Windows874", - "", - "Windows874", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-874.txt", - }, - { - "Windows 1250", - "Windows1250", - "", - "Windows1250", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-1250.txt", - }, - { - "Windows 1251", - "Windows1251", - "", - "Windows1251", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-1251.txt", - }, - { - "Windows 1252", - "Windows1252", - "", - "Windows1252", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-1252.txt", - }, - { - "Windows 1253", - "Windows1253", - "", - "Windows1253", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-1253.txt", - }, - { - "Windows 1254", - "Windows1254", - "", - "Windows1254", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-1254.txt", - }, - { - "Windows 1255", - "Windows1255", - "", - "Windows1255", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-1255.txt", - }, - { - "Windows 1256", - "Windows1256", - "", - "Windows1256", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-1256.txt", - }, - { - "Windows 1257", - "Windows1257", - "", - "Windows1257", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-1257.txt", - }, - { - "Windows 1258", - "Windows1258", - "", - "Windows1258", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-1258.txt", - }, - { - "X-User-Defined", - "XUserDefined", - "It is defined at http://encoding.spec.whatwg.org/#x-user-defined", - "XUserDefined", - encoding.ASCIISub, - ascii + - "\uf780\uf781\uf782\uf783\uf784\uf785\uf786\uf787" + - "\uf788\uf789\uf78a\uf78b\uf78c\uf78d\uf78e\uf78f" + - "\uf790\uf791\uf792\uf793\uf794\uf795\uf796\uf797" + - "\uf798\uf799\uf79a\uf79b\uf79c\uf79d\uf79e\uf79f" + - "\uf7a0\uf7a1\uf7a2\uf7a3\uf7a4\uf7a5\uf7a6\uf7a7" + - "\uf7a8\uf7a9\uf7aa\uf7ab\uf7ac\uf7ad\uf7ae\uf7af" + - "\uf7b0\uf7b1\uf7b2\uf7b3\uf7b4\uf7b5\uf7b6\uf7b7" + - "\uf7b8\uf7b9\uf7ba\uf7bb\uf7bc\uf7bd\uf7be\uf7bf" + - "\uf7c0\uf7c1\uf7c2\uf7c3\uf7c4\uf7c5\uf7c6\uf7c7" + - "\uf7c8\uf7c9\uf7ca\uf7cb\uf7cc\uf7cd\uf7ce\uf7cf" + - "\uf7d0\uf7d1\uf7d2\uf7d3\uf7d4\uf7d5\uf7d6\uf7d7" + - "\uf7d8\uf7d9\uf7da\uf7db\uf7dc\uf7dd\uf7de\uf7df" + - "\uf7e0\uf7e1\uf7e2\uf7e3\uf7e4\uf7e5\uf7e6\uf7e7" + - "\uf7e8\uf7e9\uf7ea\uf7eb\uf7ec\uf7ed\uf7ee\uf7ef" + - "\uf7f0\uf7f1\uf7f2\uf7f3\uf7f4\uf7f5\uf7f6\uf7f7" + - "\uf7f8\uf7f9\uf7fa\uf7fb\uf7fc\uf7fd\uf7fe\uf7ff", - }, -} - -func getWHATWG(url string) string { - res, err := http.Get(url) - if err != nil { - log.Fatalf("%q: Get: %v", url, err) - } - defer res.Body.Close() - - mapping := make([]rune, 128) - for i := range mapping { - mapping[i] = '\ufffd' - } - - scanner := bufio.NewScanner(res.Body) - for scanner.Scan() { - s := strings.TrimSpace(scanner.Text()) - if s == "" || s[0] == '#' { - continue - } - x, y := 0, 0 - if _, err := fmt.Sscanf(s, "%d\t0x%x", &x, &y); err != nil { - log.Fatalf("could not parse %q", s) - } - if x < 0 || 128 <= x { - log.Fatalf("code %d is out of range", x) - } - if 0x80 <= y && y < 0xa0 { - // We diverge from the WHATWG spec by mapping control characters - // in the range [0x80, 0xa0) to U+FFFD. - continue - } - mapping[x] = rune(y) - } - return ascii + string(mapping) -} - -func getUCM(url string) string { - res, err := http.Get(url) - if err != nil { - log.Fatalf("%q: Get: %v", url, err) - } - defer res.Body.Close() - - mapping := make([]rune, 256) - for i := range mapping { - mapping[i] = '\ufffd' - } - - charsFound := 0 - scanner := bufio.NewScanner(res.Body) - for scanner.Scan() { - s := strings.TrimSpace(scanner.Text()) - if s == "" || s[0] == '#' { - continue - } - var c byte - var r rune - if _, err := fmt.Sscanf(s, ` \x%x |0`, &r, &c); err != nil { - continue - } - mapping[c] = r - charsFound++ - } - - if charsFound < 200 { - log.Fatalf("%q: only %d characters found (wrong page format?)", url, charsFound) - } - - return string(mapping) -} - -func main() { - mibs := map[string]bool{} - all := []string{} - - w := gen.NewCodeWriter() - defer w.WriteGoFile("tables.go", "charmap") - - printf := func(s string, a ...interface{}) { fmt.Fprintf(w, s, a...) } - - printf("import (\n") - printf("\t\"golang.org/x/text/encoding\"\n") - printf("\t\"golang.org/x/text/encoding/internal/identifier\"\n") - printf(")\n\n") - for _, e := range encodings { - varNames := strings.Split(e.varName, ",") - all = append(all, varNames...) - varName := varNames[0] - switch { - case strings.HasPrefix(e.mapping, "http://encoding.spec.whatwg.org/"): - e.mapping = getWHATWG(e.mapping) - case strings.HasPrefix(e.mapping, "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/"): - e.mapping = getUCM(e.mapping) - } - - asciiSuperset, low := strings.HasPrefix(e.mapping, ascii), 0x00 - if asciiSuperset { - low = 0x80 - } - lvn := 1 - if strings.HasPrefix(varName, "ISO") || strings.HasPrefix(varName, "KOI") { - lvn = 3 - } - lowerVarName := strings.ToLower(varName[:lvn]) + varName[lvn:] - printf("// %s is the %s encoding.\n", varName, e.name) - if e.comment != "" { - printf("//\n// %s\n", e.comment) - } - printf("var %s encoding.Encoding = &%s\n\nvar %s = charmap{\nname: %q,\n", - varName, lowerVarName, lowerVarName, e.name) - if mibs[e.mib] { - log.Fatalf("MIB type %q declared multiple times.", e.mib) - } - printf("mib: identifier.%s,\n", e.mib) - printf("asciiSuperset: %t,\n", asciiSuperset) - printf("low: 0x%02x,\n", low) - printf("replacement: 0x%02x,\n", e.replacement) - - printf("decode: [256]utf8Enc{\n") - i, backMapping := 0, map[rune]byte{} - for _, c := range e.mapping { - if _, ok := backMapping[c]; !ok && c != utf8.RuneError { - backMapping[c] = byte(i) - } - var buf [8]byte - n := utf8.EncodeRune(buf[:], c) - if n > 3 { - panic(fmt.Sprintf("rune %q (%U) is too long", c, c)) - } - printf("{%d,[3]byte{0x%02x,0x%02x,0x%02x}},", n, buf[0], buf[1], buf[2]) - if i%2 == 1 { - printf("\n") - } - i++ - } - printf("},\n") - - printf("encode: [256]uint32{\n") - encode := make([]uint32, 0, 256) - for c, i := range backMapping { - encode = append(encode, uint32(i)<<24|uint32(c)) - } - sort.Sort(byRune(encode)) - for len(encode) < cap(encode) { - encode = append(encode, encode[len(encode)-1]) - } - for i, enc := range encode { - printf("0x%08x,", enc) - if i%8 == 7 { - printf("\n") - } - } - printf("},\n}\n") - - // Add an estimate of the size of a single charmap{} struct value, which - // includes two 256 elem arrays of 4 bytes and some extra fields, which - // align to 3 uint64s on 64-bit architectures. - w.Size += 2*4*256 + 3*8 - } - // TODO: add proper line breaking. - printf("var listAll = []encoding.Encoding{\n%s,\n}\n\n", strings.Join(all, ",\n")) -} - -type byRune []uint32 - -func (b byRune) Len() int { return len(b) } -func (b byRune) Less(i, j int) bool { return b[i]&0xffffff < b[j]&0xffffff } -func (b byRune) Swap(i, j int) { b[i], b[j] = b[j], b[i] } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/charmap/tables.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/charmap/tables.go index 2bcef84d..cf7281e9 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/charmap/tables.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/charmap/tables.go @@ -1,4 +1,4 @@ -// This file was generated by go generate; DO NOT EDIT +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. package charmap @@ -7,10 +7,185 @@ import ( "golang.org/x/text/encoding/internal/identifier" ) +// CodePage037 is the IBM Code Page 037 encoding. +var CodePage037 *Charmap = &codePage037 + +var codePage037 = Charmap{ + name: "IBM Code Page 037", + mib: identifier.IBM037, + asciiSuperset: false, + low: 0x00, + replacement: 0x3f, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x9c, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x86, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x97, 0x00}}, {2, [3]byte{0xc2, 0x8d, 0x00}}, + {2, [3]byte{0xc2, 0x8e, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x9d, 0x00}}, {2, [3]byte{0xc2, 0x85, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {2, [3]byte{0xc2, 0x87, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x92, 0x00}}, {2, [3]byte{0xc2, 0x8f, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x80, 0x00}}, {2, [3]byte{0xc2, 0x81, 0x00}}, + {2, [3]byte{0xc2, 0x82, 0x00}}, {2, [3]byte{0xc2, 0x83, 0x00}}, + {2, [3]byte{0xc2, 0x84, 0x00}}, {1, [3]byte{0x0a, 0x00, 0x00}}, + {1, [3]byte{0x17, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x88, 0x00}}, {2, [3]byte{0xc2, 0x89, 0x00}}, + {2, [3]byte{0xc2, 0x8a, 0x00}}, {2, [3]byte{0xc2, 0x8b, 0x00}}, + {2, [3]byte{0xc2, 0x8c, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x90, 0x00}}, {2, [3]byte{0xc2, 0x91, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {2, [3]byte{0xc2, 0x93, 0x00}}, + {2, [3]byte{0xc2, 0x94, 0x00}}, {2, [3]byte{0xc2, 0x95, 0x00}}, + {2, [3]byte{0xc2, 0x96, 0x00}}, {1, [3]byte{0x04, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x98, 0x00}}, {2, [3]byte{0xc2, 0x99, 0x00}}, + {2, [3]byte{0xc2, 0x9a, 0x00}}, {2, [3]byte{0xc2, 0x9b, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x9e, 0x00}}, {1, [3]byte{0x1a, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {2, [3]byte{0xc2, 0xa0, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {2, [3]byte{0xc3, 0xa4, 0x00}}, + {2, [3]byte{0xc3, 0xa0, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa3, 0x00}}, {2, [3]byte{0xc3, 0xa5, 0x00}}, + {2, [3]byte{0xc3, 0xa7, 0x00}}, {2, [3]byte{0xc3, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xa2, 0x00}}, {1, [3]byte{0x2e, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x28, 0x00, 0x00}}, + {1, [3]byte{0x2b, 0x00, 0x00}}, {1, [3]byte{0x7c, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0xac, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {1, [3]byte{0x21, 0x00, 0x00}}, {1, [3]byte{0x24, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x3b, 0x00, 0x00}}, {2, [3]byte{0xc2, 0xac, 0x00}}, + {1, [3]byte{0x2d, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc3, 0x84, 0x00}}, + {2, [3]byte{0xc3, 0x80, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x83, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x87, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {1, [3]byte{0x2c, 0x00, 0x00}}, + {1, [3]byte{0x25, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0xb8, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc3, 0x88, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {2, [3]byte{0xc3, 0x8f, 0x00}}, + {2, [3]byte{0xc3, 0x8c, 0x00}}, {1, [3]byte{0x60, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x3d, 0x00, 0x00}}, {1, [3]byte{0x22, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x98, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xab, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xb0, 0x00}}, {2, [3]byte{0xc3, 0xbd, 0x00}}, + {2, [3]byte{0xc3, 0xbe, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {1, [3]byte{0x6a, 0x00, 0x00}}, + {1, [3]byte{0x6b, 0x00, 0x00}}, {1, [3]byte{0x6c, 0x00, 0x00}}, + {1, [3]byte{0x6d, 0x00, 0x00}}, {1, [3]byte{0x6e, 0x00, 0x00}}, + {1, [3]byte{0x6f, 0x00, 0x00}}, {1, [3]byte{0x70, 0x00, 0x00}}, + {1, [3]byte{0x71, 0x00, 0x00}}, {1, [3]byte{0x72, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xba, 0x00}}, + {2, [3]byte{0xc3, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xb8, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc2, 0xa4, 0x00}}, + {2, [3]byte{0xc2, 0xb5, 0x00}}, {1, [3]byte{0x7e, 0x00, 0x00}}, + {1, [3]byte{0x73, 0x00, 0x00}}, {1, [3]byte{0x74, 0x00, 0x00}}, + {1, [3]byte{0x75, 0x00, 0x00}}, {1, [3]byte{0x76, 0x00, 0x00}}, + {1, [3]byte{0x77, 0x00, 0x00}}, {1, [3]byte{0x78, 0x00, 0x00}}, + {1, [3]byte{0x79, 0x00, 0x00}}, {1, [3]byte{0x7a, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xa1, 0x00}}, {2, [3]byte{0xc2, 0xbf, 0x00}}, + {2, [3]byte{0xc3, 0x90, 0x00}}, {2, [3]byte{0xc3, 0x9d, 0x00}}, + {2, [3]byte{0xc3, 0x9e, 0x00}}, {2, [3]byte{0xc2, 0xae, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc2, 0xa5, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc2, 0xa9, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xbc, 0x00}}, + {2, [3]byte{0xc2, 0xbd, 0x00}}, {2, [3]byte{0xc2, 0xbe, 0x00}}, + {1, [3]byte{0x5b, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xaf, 0x00}}, {2, [3]byte{0xc2, 0xa8, 0x00}}, + {2, [3]byte{0xc2, 0xb4, 0x00}}, {2, [3]byte{0xc3, 0x97, 0x00}}, + {1, [3]byte{0x7b, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xad, 0x00}}, {2, [3]byte{0xc3, 0xb4, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb2, 0x00}}, + {2, [3]byte{0xc3, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0xb5, 0x00}}, + {1, [3]byte{0x7d, 0x00, 0x00}}, {1, [3]byte{0x4a, 0x00, 0x00}}, + {1, [3]byte{0x4b, 0x00, 0x00}}, {1, [3]byte{0x4c, 0x00, 0x00}}, + {1, [3]byte{0x4d, 0x00, 0x00}}, {1, [3]byte{0x4e, 0x00, 0x00}}, + {1, [3]byte{0x4f, 0x00, 0x00}}, {1, [3]byte{0x50, 0x00, 0x00}}, + {1, [3]byte{0x51, 0x00, 0x00}}, {1, [3]byte{0x52, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xb9, 0x00}}, {2, [3]byte{0xc3, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc3, 0xbf, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {1, [3]byte{0x53, 0x00, 0x00}}, {1, [3]byte{0x54, 0x00, 0x00}}, + {1, [3]byte{0x55, 0x00, 0x00}}, {1, [3]byte{0x56, 0x00, 0x00}}, + {1, [3]byte{0x57, 0x00, 0x00}}, {1, [3]byte{0x58, 0x00, 0x00}}, + {1, [3]byte{0x59, 0x00, 0x00}}, {1, [3]byte{0x5a, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc3, 0x94, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x92, 0x00}}, + {2, [3]byte{0xc3, 0x93, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0x9b, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc2, 0x9f, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x37000004, 0x2d000005, 0x2e000006, 0x2f000007, + 0x16000008, 0x05000009, 0x2500000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x3c000014, 0x3d000015, 0x32000016, 0x26000017, + 0x18000018, 0x19000019, 0x3f00001a, 0x2700001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x40000020, 0x5a000021, 0x7f000022, 0x7b000023, 0x5b000024, 0x6c000025, 0x50000026, 0x7d000027, + 0x4d000028, 0x5d000029, 0x5c00002a, 0x4e00002b, 0x6b00002c, 0x6000002d, 0x4b00002e, 0x6100002f, + 0xf0000030, 0xf1000031, 0xf2000032, 0xf3000033, 0xf4000034, 0xf5000035, 0xf6000036, 0xf7000037, + 0xf8000038, 0xf9000039, 0x7a00003a, 0x5e00003b, 0x4c00003c, 0x7e00003d, 0x6e00003e, 0x6f00003f, + 0x7c000040, 0xc1000041, 0xc2000042, 0xc3000043, 0xc4000044, 0xc5000045, 0xc6000046, 0xc7000047, + 0xc8000048, 0xc9000049, 0xd100004a, 0xd200004b, 0xd300004c, 0xd400004d, 0xd500004e, 0xd600004f, + 0xd7000050, 0xd8000051, 0xd9000052, 0xe2000053, 0xe3000054, 0xe4000055, 0xe5000056, 0xe6000057, + 0xe7000058, 0xe8000059, 0xe900005a, 0xba00005b, 0xe000005c, 0xbb00005d, 0xb000005e, 0x6d00005f, + 0x79000060, 0x81000061, 0x82000062, 0x83000063, 0x84000064, 0x85000065, 0x86000066, 0x87000067, + 0x88000068, 0x89000069, 0x9100006a, 0x9200006b, 0x9300006c, 0x9400006d, 0x9500006e, 0x9600006f, + 0x97000070, 0x98000071, 0x99000072, 0xa2000073, 0xa3000074, 0xa4000075, 0xa5000076, 0xa6000077, + 0xa7000078, 0xa8000079, 0xa900007a, 0xc000007b, 0x4f00007c, 0xd000007d, 0xa100007e, 0x0700007f, + 0x20000080, 0x21000081, 0x22000082, 0x23000083, 0x24000084, 0x15000085, 0x06000086, 0x17000087, + 0x28000088, 0x29000089, 0x2a00008a, 0x2b00008b, 0x2c00008c, 0x0900008d, 0x0a00008e, 0x1b00008f, + 0x30000090, 0x31000091, 0x1a000092, 0x33000093, 0x34000094, 0x35000095, 0x36000096, 0x08000097, + 0x38000098, 0x39000099, 0x3a00009a, 0x3b00009b, 0x0400009c, 0x1400009d, 0x3e00009e, 0xff00009f, + 0x410000a0, 0xaa0000a1, 0x4a0000a2, 0xb10000a3, 0x9f0000a4, 0xb20000a5, 0x6a0000a6, 0xb50000a7, + 0xbd0000a8, 0xb40000a9, 0x9a0000aa, 0x8a0000ab, 0x5f0000ac, 0xca0000ad, 0xaf0000ae, 0xbc0000af, + 0x900000b0, 0x8f0000b1, 0xea0000b2, 0xfa0000b3, 0xbe0000b4, 0xa00000b5, 0xb60000b6, 0xb30000b7, + 0x9d0000b8, 0xda0000b9, 0x9b0000ba, 0x8b0000bb, 0xb70000bc, 0xb80000bd, 0xb90000be, 0xab0000bf, + 0x640000c0, 0x650000c1, 0x620000c2, 0x660000c3, 0x630000c4, 0x670000c5, 0x9e0000c6, 0x680000c7, + 0x740000c8, 0x710000c9, 0x720000ca, 0x730000cb, 0x780000cc, 0x750000cd, 0x760000ce, 0x770000cf, + 0xac0000d0, 0x690000d1, 0xed0000d2, 0xee0000d3, 0xeb0000d4, 0xef0000d5, 0xec0000d6, 0xbf0000d7, + 0x800000d8, 0xfd0000d9, 0xfe0000da, 0xfb0000db, 0xfc0000dc, 0xad0000dd, 0xae0000de, 0x590000df, + 0x440000e0, 0x450000e1, 0x420000e2, 0x460000e3, 0x430000e4, 0x470000e5, 0x9c0000e6, 0x480000e7, + 0x540000e8, 0x510000e9, 0x520000ea, 0x530000eb, 0x580000ec, 0x550000ed, 0x560000ee, 0x570000ef, + 0x8c0000f0, 0x490000f1, 0xcd0000f2, 0xce0000f3, 0xcb0000f4, 0xcf0000f5, 0xcc0000f6, 0xe10000f7, + 0x700000f8, 0xdd0000f9, 0xde0000fa, 0xdb0000fb, 0xdc0000fc, 0x8d0000fd, 0x8e0000fe, 0xdf0000ff, + }, +} + // CodePage437 is the IBM Code Page 437 encoding. -var CodePage437 encoding.Encoding = &codePage437 +var CodePage437 *Charmap = &codePage437 -var codePage437 = charmap{ +var codePage437 = Charmap{ name: "IBM Code Page 437", mib: identifier.PC8CodePage437, asciiSuperset: true, @@ -183,9 +358,9 @@ var codePage437 = charmap{ } // CodePage850 is the IBM Code Page 850 encoding. -var CodePage850 encoding.Encoding = &codePage850 +var CodePage850 *Charmap = &codePage850 -var codePage850 = charmap{ +var codePage850 = Charmap{ name: "IBM Code Page 850", mib: identifier.PC850Multilingual, asciiSuperset: true, @@ -358,9 +533,9 @@ var codePage850 = charmap{ } // CodePage852 is the IBM Code Page 852 encoding. -var CodePage852 encoding.Encoding = &codePage852 +var CodePage852 *Charmap = &codePage852 -var codePage852 = charmap{ +var codePage852 = Charmap{ name: "IBM Code Page 852", mib: identifier.PCp852, asciiSuperset: true, @@ -533,9 +708,9 @@ var codePage852 = charmap{ } // CodePage855 is the IBM Code Page 855 encoding. -var CodePage855 encoding.Encoding = &codePage855 +var CodePage855 *Charmap = &codePage855 -var codePage855 = charmap{ +var codePage855 = Charmap{ name: "IBM Code Page 855", mib: identifier.IBM855, asciiSuperset: true, @@ -708,9 +883,9 @@ var codePage855 = charmap{ } // CodePage858 is the Windows Code Page 858 encoding. -var CodePage858 encoding.Encoding = &codePage858 +var CodePage858 *Charmap = &codePage858 -var codePage858 = charmap{ +var codePage858 = Charmap{ name: "Windows Code Page 858", mib: identifier.IBM00858, asciiSuperset: true, @@ -882,10 +1057,185 @@ var codePage858 = charmap{ }, } +// CodePage860 is the IBM Code Page 860 encoding. +var CodePage860 *Charmap = &codePage860 + +var codePage860 = Charmap{ + name: "IBM Code Page 860", + mib: identifier.IBM860, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x87, 0x00}}, {2, [3]byte{0xc3, 0xbc, 0x00}}, + {2, [3]byte{0xc3, 0xa9, 0x00}}, {2, [3]byte{0xc3, 0xa2, 0x00}}, + {2, [3]byte{0xc3, 0xa3, 0x00}}, {2, [3]byte{0xc3, 0xa0, 0x00}}, + {2, [3]byte{0xc3, 0x81, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0x8a, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x94, 0x00}}, {2, [3]byte{0xc3, 0xac, 0x00}}, + {2, [3]byte{0xc3, 0x83, 0x00}}, {2, [3]byte{0xc3, 0x82, 0x00}}, + {2, [3]byte{0xc3, 0x89, 0x00}}, {2, [3]byte{0xc3, 0x80, 0x00}}, + {2, [3]byte{0xc3, 0x88, 0x00}}, {2, [3]byte{0xc3, 0xb4, 0x00}}, + {2, [3]byte{0xc3, 0xb5, 0x00}}, {2, [3]byte{0xc3, 0xb2, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc2, 0xa2, 0x00}}, + {2, [3]byte{0xc2, 0xa3, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xa7}}, {2, [3]byte{0xc3, 0x93, 0x00}}, + {2, [3]byte{0xc3, 0xa1, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0xba, 0x00}}, + {2, [3]byte{0xc3, 0xb1, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc2, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xba, 0x00}}, + {2, [3]byte{0xc2, 0xbf, 0x00}}, {2, [3]byte{0xc3, 0x92, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xa1, 0x00}}, + {2, [3]byte{0xc2, 0xab, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0x91}}, {3, [3]byte{0xe2, 0x96, 0x92}}, + {3, [3]byte{0xe2, 0x96, 0x93}}, {3, [3]byte{0xe2, 0x94, 0x82}}, + {3, [3]byte{0xe2, 0x94, 0xa4}}, {3, [3]byte{0xe2, 0x95, 0xa1}}, + {3, [3]byte{0xe2, 0x95, 0xa2}}, {3, [3]byte{0xe2, 0x95, 0x96}}, + {3, [3]byte{0xe2, 0x95, 0x95}}, {3, [3]byte{0xe2, 0x95, 0xa3}}, + {3, [3]byte{0xe2, 0x95, 0x91}}, {3, [3]byte{0xe2, 0x95, 0x97}}, + {3, [3]byte{0xe2, 0x95, 0x9d}}, {3, [3]byte{0xe2, 0x95, 0x9c}}, + {3, [3]byte{0xe2, 0x95, 0x9b}}, {3, [3]byte{0xe2, 0x94, 0x90}}, + {3, [3]byte{0xe2, 0x94, 0x94}}, {3, [3]byte{0xe2, 0x94, 0xb4}}, + {3, [3]byte{0xe2, 0x94, 0xac}}, {3, [3]byte{0xe2, 0x94, 0x9c}}, + {3, [3]byte{0xe2, 0x94, 0x80}}, {3, [3]byte{0xe2, 0x94, 0xbc}}, + {3, [3]byte{0xe2, 0x95, 0x9e}}, {3, [3]byte{0xe2, 0x95, 0x9f}}, + {3, [3]byte{0xe2, 0x95, 0x9a}}, {3, [3]byte{0xe2, 0x95, 0x94}}, + {3, [3]byte{0xe2, 0x95, 0xa9}}, {3, [3]byte{0xe2, 0x95, 0xa6}}, + {3, [3]byte{0xe2, 0x95, 0xa0}}, {3, [3]byte{0xe2, 0x95, 0x90}}, + {3, [3]byte{0xe2, 0x95, 0xac}}, {3, [3]byte{0xe2, 0x95, 0xa7}}, + {3, [3]byte{0xe2, 0x95, 0xa8}}, {3, [3]byte{0xe2, 0x95, 0xa4}}, + {3, [3]byte{0xe2, 0x95, 0xa5}}, {3, [3]byte{0xe2, 0x95, 0x99}}, + {3, [3]byte{0xe2, 0x95, 0x98}}, {3, [3]byte{0xe2, 0x95, 0x92}}, + {3, [3]byte{0xe2, 0x95, 0x93}}, {3, [3]byte{0xe2, 0x95, 0xab}}, + {3, [3]byte{0xe2, 0x95, 0xaa}}, {3, [3]byte{0xe2, 0x94, 0x98}}, + {3, [3]byte{0xe2, 0x94, 0x8c}}, {3, [3]byte{0xe2, 0x96, 0x88}}, + {3, [3]byte{0xe2, 0x96, 0x84}}, {3, [3]byte{0xe2, 0x96, 0x8c}}, + {3, [3]byte{0xe2, 0x96, 0x90}}, {3, [3]byte{0xe2, 0x96, 0x80}}, + {2, [3]byte{0xce, 0xb1, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xce, 0x93, 0x00}}, {2, [3]byte{0xcf, 0x80, 0x00}}, + {2, [3]byte{0xce, 0xa3, 0x00}}, {2, [3]byte{0xcf, 0x83, 0x00}}, + {2, [3]byte{0xc2, 0xb5, 0x00}}, {2, [3]byte{0xcf, 0x84, 0x00}}, + {2, [3]byte{0xce, 0xa6, 0x00}}, {2, [3]byte{0xce, 0x98, 0x00}}, + {2, [3]byte{0xce, 0xa9, 0x00}}, {2, [3]byte{0xce, 0xb4, 0x00}}, + {3, [3]byte{0xe2, 0x88, 0x9e}}, {2, [3]byte{0xcf, 0x86, 0x00}}, + {2, [3]byte{0xce, 0xb5, 0x00}}, {3, [3]byte{0xe2, 0x88, 0xa9}}, + {3, [3]byte{0xe2, 0x89, 0xa1}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {3, [3]byte{0xe2, 0x89, 0xa5}}, {3, [3]byte{0xe2, 0x89, 0xa4}}, + {3, [3]byte{0xe2, 0x8c, 0xa0}}, {3, [3]byte{0xe2, 0x8c, 0xa1}}, + {2, [3]byte{0xc3, 0xb7, 0x00}}, {3, [3]byte{0xe2, 0x89, 0x88}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {3, [3]byte{0xe2, 0x88, 0x99}}, + {2, [3]byte{0xc2, 0xb7, 0x00}}, {3, [3]byte{0xe2, 0x88, 0x9a}}, + {3, [3]byte{0xe2, 0x81, 0xbf}}, {2, [3]byte{0xc2, 0xb2, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0xa0}}, {2, [3]byte{0xc2, 0xa0, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xff0000a0, 0xad0000a1, 0x9b0000a2, 0x9c0000a3, 0xa60000aa, 0xae0000ab, 0xaa0000ac, 0xf80000b0, + 0xf10000b1, 0xfd0000b2, 0xe60000b5, 0xfa0000b7, 0xa70000ba, 0xaf0000bb, 0xac0000bc, 0xab0000bd, + 0xa80000bf, 0x910000c0, 0x860000c1, 0x8f0000c2, 0x8e0000c3, 0x800000c7, 0x920000c8, 0x900000c9, + 0x890000ca, 0x980000cc, 0x8b0000cd, 0xa50000d1, 0xa90000d2, 0x9f0000d3, 0x8c0000d4, 0x990000d5, + 0x9d0000d9, 0x960000da, 0x9a0000dc, 0xe10000df, 0x850000e0, 0xa00000e1, 0x830000e2, 0x840000e3, + 0x870000e7, 0x8a0000e8, 0x820000e9, 0x880000ea, 0x8d0000ec, 0xa10000ed, 0xa40000f1, 0x950000f2, + 0xa20000f3, 0x930000f4, 0x940000f5, 0xf60000f7, 0x970000f9, 0xa30000fa, 0x810000fc, 0xe2000393, + 0xe9000398, 0xe40003a3, 0xe80003a6, 0xea0003a9, 0xe00003b1, 0xeb0003b4, 0xee0003b5, 0xe30003c0, + 0xe50003c3, 0xe70003c4, 0xed0003c6, 0xfc00207f, 0x9e0020a7, 0xf9002219, 0xfb00221a, 0xec00221e, + 0xef002229, 0xf7002248, 0xf0002261, 0xf3002264, 0xf2002265, 0xf4002320, 0xf5002321, 0xc4002500, + 0xb3002502, 0xda00250c, 0xbf002510, 0xc0002514, 0xd9002518, 0xc300251c, 0xb4002524, 0xc200252c, + 0xc1002534, 0xc500253c, 0xcd002550, 0xba002551, 0xd5002552, 0xd6002553, 0xc9002554, 0xb8002555, + 0xb7002556, 0xbb002557, 0xd4002558, 0xd3002559, 0xc800255a, 0xbe00255b, 0xbd00255c, 0xbc00255d, + 0xc600255e, 0xc700255f, 0xcc002560, 0xb5002561, 0xb6002562, 0xb9002563, 0xd1002564, 0xd2002565, + 0xcb002566, 0xcf002567, 0xd0002568, 0xca002569, 0xd800256a, 0xd700256b, 0xce00256c, 0xdf002580, + 0xdc002584, 0xdb002588, 0xdd00258c, 0xde002590, 0xb0002591, 0xb1002592, 0xb2002593, 0xfe0025a0, + }, +} + // CodePage862 is the IBM Code Page 862 encoding. -var CodePage862 encoding.Encoding = &codePage862 +var CodePage862 *Charmap = &codePage862 -var codePage862 = charmap{ +var codePage862 = Charmap{ name: "IBM Code Page 862", mib: identifier.PC862LatinHebrew, asciiSuperset: true, @@ -1057,12 +1407,12 @@ var codePage862 = charmap{ }, } -// CodePage866 is the IBM Code Page 866 encoding. -var CodePage866 encoding.Encoding = &codePage866 +// CodePage863 is the IBM Code Page 863 encoding. +var CodePage863 *Charmap = &codePage863 -var codePage866 = charmap{ - name: "IBM Code Page 866", - mib: identifier.IBM866, +var codePage863 = Charmap{ + name: "IBM Code Page 863", + mib: identifier.IBM863, asciiSuperset: true, low: 0x80, replacement: 0x1a, @@ -1131,30 +1481,30 @@ var codePage866 = charmap{ {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, - {2, [3]byte{0xd0, 0x90, 0x00}}, {2, [3]byte{0xd0, 0x91, 0x00}}, - {2, [3]byte{0xd0, 0x92, 0x00}}, {2, [3]byte{0xd0, 0x93, 0x00}}, - {2, [3]byte{0xd0, 0x94, 0x00}}, {2, [3]byte{0xd0, 0x95, 0x00}}, - {2, [3]byte{0xd0, 0x96, 0x00}}, {2, [3]byte{0xd0, 0x97, 0x00}}, - {2, [3]byte{0xd0, 0x98, 0x00}}, {2, [3]byte{0xd0, 0x99, 0x00}}, - {2, [3]byte{0xd0, 0x9a, 0x00}}, {2, [3]byte{0xd0, 0x9b, 0x00}}, - {2, [3]byte{0xd0, 0x9c, 0x00}}, {2, [3]byte{0xd0, 0x9d, 0x00}}, - {2, [3]byte{0xd0, 0x9e, 0x00}}, {2, [3]byte{0xd0, 0x9f, 0x00}}, - {2, [3]byte{0xd0, 0xa0, 0x00}}, {2, [3]byte{0xd0, 0xa1, 0x00}}, - {2, [3]byte{0xd0, 0xa2, 0x00}}, {2, [3]byte{0xd0, 0xa3, 0x00}}, - {2, [3]byte{0xd0, 0xa4, 0x00}}, {2, [3]byte{0xd0, 0xa5, 0x00}}, - {2, [3]byte{0xd0, 0xa6, 0x00}}, {2, [3]byte{0xd0, 0xa7, 0x00}}, - {2, [3]byte{0xd0, 0xa8, 0x00}}, {2, [3]byte{0xd0, 0xa9, 0x00}}, - {2, [3]byte{0xd0, 0xaa, 0x00}}, {2, [3]byte{0xd0, 0xab, 0x00}}, - {2, [3]byte{0xd0, 0xac, 0x00}}, {2, [3]byte{0xd0, 0xad, 0x00}}, - {2, [3]byte{0xd0, 0xae, 0x00}}, {2, [3]byte{0xd0, 0xaf, 0x00}}, - {2, [3]byte{0xd0, 0xb0, 0x00}}, {2, [3]byte{0xd0, 0xb1, 0x00}}, - {2, [3]byte{0xd0, 0xb2, 0x00}}, {2, [3]byte{0xd0, 0xb3, 0x00}}, - {2, [3]byte{0xd0, 0xb4, 0x00}}, {2, [3]byte{0xd0, 0xb5, 0x00}}, - {2, [3]byte{0xd0, 0xb6, 0x00}}, {2, [3]byte{0xd0, 0xb7, 0x00}}, - {2, [3]byte{0xd0, 0xb8, 0x00}}, {2, [3]byte{0xd0, 0xb9, 0x00}}, - {2, [3]byte{0xd0, 0xba, 0x00}}, {2, [3]byte{0xd0, 0xbb, 0x00}}, - {2, [3]byte{0xd0, 0xbc, 0x00}}, {2, [3]byte{0xd0, 0xbd, 0x00}}, - {2, [3]byte{0xd0, 0xbe, 0x00}}, {2, [3]byte{0xd0, 0xbf, 0x00}}, + {2, [3]byte{0xc3, 0x87, 0x00}}, {2, [3]byte{0xc3, 0xbc, 0x00}}, + {2, [3]byte{0xc3, 0xa9, 0x00}}, {2, [3]byte{0xc3, 0xa2, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc3, 0xa0, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {3, [3]byte{0xe2, 0x80, 0x97}}, + {2, [3]byte{0xc3, 0x80, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0x89, 0x00}}, {2, [3]byte{0xc3, 0x88, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0xb4, 0x00}}, + {2, [3]byte{0xc3, 0x8b, 0x00}}, {2, [3]byte{0xc3, 0x8f, 0x00}}, + {2, [3]byte{0xc3, 0xbb, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc2, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0x94, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc2, 0xa2, 0x00}}, + {2, [3]byte{0xc2, 0xa3, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {2, [3]byte{0xc3, 0x9b, 0x00}}, {2, [3]byte{0xc6, 0x92, 0x00}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xb4, 0x00}}, + {2, [3]byte{0xc3, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0xba, 0x00}}, + {2, [3]byte{0xc2, 0xa8, 0x00}}, {2, [3]byte{0xc2, 0xb8, 0x00}}, + {2, [3]byte{0xc2, 0xb3, 0x00}}, {2, [3]byte{0xc2, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {3, [3]byte{0xe2, 0x8c, 0x90}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xbe, 0x00}}, + {2, [3]byte{0xc2, 0xab, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, {3, [3]byte{0xe2, 0x96, 0x91}}, {3, [3]byte{0xe2, 0x96, 0x92}}, {3, [3]byte{0xe2, 0x96, 0x93}}, {3, [3]byte{0xe2, 0x94, 0x82}}, {3, [3]byte{0xe2, 0x94, 0xa4}}, {3, [3]byte{0xe2, 0x95, 0xa1}}, @@ -1179,63 +1529,763 @@ var codePage866 = charmap{ {3, [3]byte{0xe2, 0x94, 0x8c}}, {3, [3]byte{0xe2, 0x96, 0x88}}, {3, [3]byte{0xe2, 0x96, 0x84}}, {3, [3]byte{0xe2, 0x96, 0x8c}}, {3, [3]byte{0xe2, 0x96, 0x90}}, {3, [3]byte{0xe2, 0x96, 0x80}}, - {2, [3]byte{0xd1, 0x80, 0x00}}, {2, [3]byte{0xd1, 0x81, 0x00}}, - {2, [3]byte{0xd1, 0x82, 0x00}}, {2, [3]byte{0xd1, 0x83, 0x00}}, - {2, [3]byte{0xd1, 0x84, 0x00}}, {2, [3]byte{0xd1, 0x85, 0x00}}, - {2, [3]byte{0xd1, 0x86, 0x00}}, {2, [3]byte{0xd1, 0x87, 0x00}}, - {2, [3]byte{0xd1, 0x88, 0x00}}, {2, [3]byte{0xd1, 0x89, 0x00}}, - {2, [3]byte{0xd1, 0x8a, 0x00}}, {2, [3]byte{0xd1, 0x8b, 0x00}}, - {2, [3]byte{0xd1, 0x8c, 0x00}}, {2, [3]byte{0xd1, 0x8d, 0x00}}, - {2, [3]byte{0xd1, 0x8e, 0x00}}, {2, [3]byte{0xd1, 0x8f, 0x00}}, - {2, [3]byte{0xd0, 0x81, 0x00}}, {2, [3]byte{0xd1, 0x91, 0x00}}, - {2, [3]byte{0xd0, 0x84, 0x00}}, {2, [3]byte{0xd1, 0x94, 0x00}}, - {2, [3]byte{0xd0, 0x87, 0x00}}, {2, [3]byte{0xd1, 0x97, 0x00}}, - {2, [3]byte{0xd0, 0x8e, 0x00}}, {2, [3]byte{0xd1, 0x9e, 0x00}}, + {2, [3]byte{0xce, 0xb1, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xce, 0x93, 0x00}}, {2, [3]byte{0xcf, 0x80, 0x00}}, + {2, [3]byte{0xce, 0xa3, 0x00}}, {2, [3]byte{0xcf, 0x83, 0x00}}, + {2, [3]byte{0xc2, 0xb5, 0x00}}, {2, [3]byte{0xcf, 0x84, 0x00}}, + {2, [3]byte{0xce, 0xa6, 0x00}}, {2, [3]byte{0xce, 0x98, 0x00}}, + {2, [3]byte{0xce, 0xa9, 0x00}}, {2, [3]byte{0xce, 0xb4, 0x00}}, + {3, [3]byte{0xe2, 0x88, 0x9e}}, {2, [3]byte{0xcf, 0x86, 0x00}}, + {2, [3]byte{0xce, 0xb5, 0x00}}, {3, [3]byte{0xe2, 0x88, 0xa9}}, + {3, [3]byte{0xe2, 0x89, 0xa1}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {3, [3]byte{0xe2, 0x89, 0xa5}}, {3, [3]byte{0xe2, 0x89, 0xa4}}, + {3, [3]byte{0xe2, 0x8c, 0xa0}}, {3, [3]byte{0xe2, 0x8c, 0xa1}}, + {2, [3]byte{0xc3, 0xb7, 0x00}}, {3, [3]byte{0xe2, 0x89, 0x88}}, {2, [3]byte{0xc2, 0xb0, 0x00}}, {3, [3]byte{0xe2, 0x88, 0x99}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, {3, [3]byte{0xe2, 0x88, 0x9a}}, - {3, [3]byte{0xe2, 0x84, 0x96}}, {2, [3]byte{0xc2, 0xa4, 0x00}}, + {3, [3]byte{0xe2, 0x81, 0xbf}}, {2, [3]byte{0xc2, 0xb2, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0xa0}}, {2, [3]byte{0xc2, 0xa0, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xff0000a0, 0x9b0000a2, 0x9c0000a3, 0x980000a4, 0xa00000a6, 0x8f0000a7, 0xa40000a8, 0xae0000ab, + 0xaa0000ac, 0xa70000af, 0xf80000b0, 0xf10000b1, 0xfd0000b2, 0xa60000b3, 0xa10000b4, 0xe60000b5, + 0x860000b6, 0xfa0000b7, 0xa50000b8, 0xaf0000bb, 0xac0000bc, 0xab0000bd, 0xad0000be, 0x8e0000c0, + 0x840000c2, 0x800000c7, 0x910000c8, 0x900000c9, 0x920000ca, 0x940000cb, 0xa80000ce, 0x950000cf, + 0x990000d4, 0x9d0000d9, 0x9e0000db, 0x9a0000dc, 0xe10000df, 0x850000e0, 0x830000e2, 0x870000e7, + 0x8a0000e8, 0x820000e9, 0x880000ea, 0x890000eb, 0x8c0000ee, 0x8b0000ef, 0xa20000f3, 0x930000f4, + 0xf60000f7, 0x970000f9, 0xa30000fa, 0x960000fb, 0x810000fc, 0x9f000192, 0xe2000393, 0xe9000398, + 0xe40003a3, 0xe80003a6, 0xea0003a9, 0xe00003b1, 0xeb0003b4, 0xee0003b5, 0xe30003c0, 0xe50003c3, + 0xe70003c4, 0xed0003c6, 0x8d002017, 0xfc00207f, 0xf9002219, 0xfb00221a, 0xec00221e, 0xef002229, + 0xf7002248, 0xf0002261, 0xf3002264, 0xf2002265, 0xa9002310, 0xf4002320, 0xf5002321, 0xc4002500, + 0xb3002502, 0xda00250c, 0xbf002510, 0xc0002514, 0xd9002518, 0xc300251c, 0xb4002524, 0xc200252c, + 0xc1002534, 0xc500253c, 0xcd002550, 0xba002551, 0xd5002552, 0xd6002553, 0xc9002554, 0xb8002555, + 0xb7002556, 0xbb002557, 0xd4002558, 0xd3002559, 0xc800255a, 0xbe00255b, 0xbd00255c, 0xbc00255d, + 0xc600255e, 0xc700255f, 0xcc002560, 0xb5002561, 0xb6002562, 0xb9002563, 0xd1002564, 0xd2002565, + 0xcb002566, 0xcf002567, 0xd0002568, 0xca002569, 0xd800256a, 0xd700256b, 0xce00256c, 0xdf002580, + 0xdc002584, 0xdb002588, 0xdd00258c, 0xde002590, 0xb0002591, 0xb1002592, 0xb2002593, 0xfe0025a0, + }, +} + +// CodePage865 is the IBM Code Page 865 encoding. +var CodePage865 *Charmap = &codePage865 + +var codePage865 = Charmap{ + name: "IBM Code Page 865", + mib: identifier.IBM865, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x87, 0x00}}, {2, [3]byte{0xc3, 0xbc, 0x00}}, + {2, [3]byte{0xc3, 0xa9, 0x00}}, {2, [3]byte{0xc3, 0xa2, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0xa0, 0x00}}, + {2, [3]byte{0xc3, 0xa5, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xac, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x89, 0x00}}, {2, [3]byte{0xc3, 0xa6, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc3, 0xb4, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb2, 0x00}}, + {2, [3]byte{0xc3, 0xbb, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xbf, 0x00}}, {2, [3]byte{0xc3, 0x96, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc3, 0xb8, 0x00}}, + {2, [3]byte{0xc2, 0xa3, 0x00}}, {2, [3]byte{0xc3, 0x98, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xa7}}, {2, [3]byte{0xc6, 0x92, 0x00}}, + {2, [3]byte{0xc3, 0xa1, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0xba, 0x00}}, + {2, [3]byte{0xc3, 0xb1, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc2, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xba, 0x00}}, + {2, [3]byte{0xc2, 0xbf, 0x00}}, {3, [3]byte{0xe2, 0x8c, 0x90}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xa1, 0x00}}, + {2, [3]byte{0xc2, 0xab, 0x00}}, {2, [3]byte{0xc2, 0xa4, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0x91}}, {3, [3]byte{0xe2, 0x96, 0x92}}, + {3, [3]byte{0xe2, 0x96, 0x93}}, {3, [3]byte{0xe2, 0x94, 0x82}}, + {3, [3]byte{0xe2, 0x94, 0xa4}}, {3, [3]byte{0xe2, 0x95, 0xa1}}, + {3, [3]byte{0xe2, 0x95, 0xa2}}, {3, [3]byte{0xe2, 0x95, 0x96}}, + {3, [3]byte{0xe2, 0x95, 0x95}}, {3, [3]byte{0xe2, 0x95, 0xa3}}, + {3, [3]byte{0xe2, 0x95, 0x91}}, {3, [3]byte{0xe2, 0x95, 0x97}}, + {3, [3]byte{0xe2, 0x95, 0x9d}}, {3, [3]byte{0xe2, 0x95, 0x9c}}, + {3, [3]byte{0xe2, 0x95, 0x9b}}, {3, [3]byte{0xe2, 0x94, 0x90}}, + {3, [3]byte{0xe2, 0x94, 0x94}}, {3, [3]byte{0xe2, 0x94, 0xb4}}, + {3, [3]byte{0xe2, 0x94, 0xac}}, {3, [3]byte{0xe2, 0x94, 0x9c}}, + {3, [3]byte{0xe2, 0x94, 0x80}}, {3, [3]byte{0xe2, 0x94, 0xbc}}, + {3, [3]byte{0xe2, 0x95, 0x9e}}, {3, [3]byte{0xe2, 0x95, 0x9f}}, + {3, [3]byte{0xe2, 0x95, 0x9a}}, {3, [3]byte{0xe2, 0x95, 0x94}}, + {3, [3]byte{0xe2, 0x95, 0xa9}}, {3, [3]byte{0xe2, 0x95, 0xa6}}, + {3, [3]byte{0xe2, 0x95, 0xa0}}, {3, [3]byte{0xe2, 0x95, 0x90}}, + {3, [3]byte{0xe2, 0x95, 0xac}}, {3, [3]byte{0xe2, 0x95, 0xa7}}, + {3, [3]byte{0xe2, 0x95, 0xa8}}, {3, [3]byte{0xe2, 0x95, 0xa4}}, + {3, [3]byte{0xe2, 0x95, 0xa5}}, {3, [3]byte{0xe2, 0x95, 0x99}}, + {3, [3]byte{0xe2, 0x95, 0x98}}, {3, [3]byte{0xe2, 0x95, 0x92}}, + {3, [3]byte{0xe2, 0x95, 0x93}}, {3, [3]byte{0xe2, 0x95, 0xab}}, + {3, [3]byte{0xe2, 0x95, 0xaa}}, {3, [3]byte{0xe2, 0x94, 0x98}}, + {3, [3]byte{0xe2, 0x94, 0x8c}}, {3, [3]byte{0xe2, 0x96, 0x88}}, + {3, [3]byte{0xe2, 0x96, 0x84}}, {3, [3]byte{0xe2, 0x96, 0x8c}}, + {3, [3]byte{0xe2, 0x96, 0x90}}, {3, [3]byte{0xe2, 0x96, 0x80}}, + {2, [3]byte{0xce, 0xb1, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xce, 0x93, 0x00}}, {2, [3]byte{0xcf, 0x80, 0x00}}, + {2, [3]byte{0xce, 0xa3, 0x00}}, {2, [3]byte{0xcf, 0x83, 0x00}}, + {2, [3]byte{0xc2, 0xb5, 0x00}}, {2, [3]byte{0xcf, 0x84, 0x00}}, + {2, [3]byte{0xce, 0xa6, 0x00}}, {2, [3]byte{0xce, 0x98, 0x00}}, + {2, [3]byte{0xce, 0xa9, 0x00}}, {2, [3]byte{0xce, 0xb4, 0x00}}, + {3, [3]byte{0xe2, 0x88, 0x9e}}, {2, [3]byte{0xcf, 0x86, 0x00}}, + {2, [3]byte{0xce, 0xb5, 0x00}}, {3, [3]byte{0xe2, 0x88, 0xa9}}, + {3, [3]byte{0xe2, 0x89, 0xa1}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {3, [3]byte{0xe2, 0x89, 0xa5}}, {3, [3]byte{0xe2, 0x89, 0xa4}}, + {3, [3]byte{0xe2, 0x8c, 0xa0}}, {3, [3]byte{0xe2, 0x8c, 0xa1}}, + {2, [3]byte{0xc3, 0xb7, 0x00}}, {3, [3]byte{0xe2, 0x89, 0x88}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {3, [3]byte{0xe2, 0x88, 0x99}}, + {2, [3]byte{0xc2, 0xb7, 0x00}}, {3, [3]byte{0xe2, 0x88, 0x9a}}, + {3, [3]byte{0xe2, 0x81, 0xbf}}, {2, [3]byte{0xc2, 0xb2, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0xa0}}, {2, [3]byte{0xc2, 0xa0, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xff0000a0, 0xad0000a1, 0x9c0000a3, 0xaf0000a4, 0xa60000aa, 0xae0000ab, 0xaa0000ac, 0xf80000b0, + 0xf10000b1, 0xfd0000b2, 0xe60000b5, 0xfa0000b7, 0xa70000ba, 0xac0000bc, 0xab0000bd, 0xa80000bf, + 0x8e0000c4, 0x8f0000c5, 0x920000c6, 0x800000c7, 0x900000c9, 0xa50000d1, 0x990000d6, 0x9d0000d8, + 0x9a0000dc, 0xe10000df, 0x850000e0, 0xa00000e1, 0x830000e2, 0x840000e4, 0x860000e5, 0x910000e6, + 0x870000e7, 0x8a0000e8, 0x820000e9, 0x880000ea, 0x890000eb, 0x8d0000ec, 0xa10000ed, 0x8c0000ee, + 0x8b0000ef, 0xa40000f1, 0x950000f2, 0xa20000f3, 0x930000f4, 0x940000f6, 0xf60000f7, 0x9b0000f8, + 0x970000f9, 0xa30000fa, 0x960000fb, 0x810000fc, 0x980000ff, 0x9f000192, 0xe2000393, 0xe9000398, + 0xe40003a3, 0xe80003a6, 0xea0003a9, 0xe00003b1, 0xeb0003b4, 0xee0003b5, 0xe30003c0, 0xe50003c3, + 0xe70003c4, 0xed0003c6, 0xfc00207f, 0x9e0020a7, 0xf9002219, 0xfb00221a, 0xec00221e, 0xef002229, + 0xf7002248, 0xf0002261, 0xf3002264, 0xf2002265, 0xa9002310, 0xf4002320, 0xf5002321, 0xc4002500, + 0xb3002502, 0xda00250c, 0xbf002510, 0xc0002514, 0xd9002518, 0xc300251c, 0xb4002524, 0xc200252c, + 0xc1002534, 0xc500253c, 0xcd002550, 0xba002551, 0xd5002552, 0xd6002553, 0xc9002554, 0xb8002555, + 0xb7002556, 0xbb002557, 0xd4002558, 0xd3002559, 0xc800255a, 0xbe00255b, 0xbd00255c, 0xbc00255d, + 0xc600255e, 0xc700255f, 0xcc002560, 0xb5002561, 0xb6002562, 0xb9002563, 0xd1002564, 0xd2002565, + 0xcb002566, 0xcf002567, 0xd0002568, 0xca002569, 0xd800256a, 0xd700256b, 0xce00256c, 0xdf002580, + 0xdc002584, 0xdb002588, 0xdd00258c, 0xde002590, 0xb0002591, 0xb1002592, 0xb2002593, 0xfe0025a0, + }, +} + +// CodePage866 is the IBM Code Page 866 encoding. +var CodePage866 *Charmap = &codePage866 + +var codePage866 = Charmap{ + name: "IBM Code Page 866", + mib: identifier.IBM866, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xd0, 0x90, 0x00}}, {2, [3]byte{0xd0, 0x91, 0x00}}, + {2, [3]byte{0xd0, 0x92, 0x00}}, {2, [3]byte{0xd0, 0x93, 0x00}}, + {2, [3]byte{0xd0, 0x94, 0x00}}, {2, [3]byte{0xd0, 0x95, 0x00}}, + {2, [3]byte{0xd0, 0x96, 0x00}}, {2, [3]byte{0xd0, 0x97, 0x00}}, + {2, [3]byte{0xd0, 0x98, 0x00}}, {2, [3]byte{0xd0, 0x99, 0x00}}, + {2, [3]byte{0xd0, 0x9a, 0x00}}, {2, [3]byte{0xd0, 0x9b, 0x00}}, + {2, [3]byte{0xd0, 0x9c, 0x00}}, {2, [3]byte{0xd0, 0x9d, 0x00}}, + {2, [3]byte{0xd0, 0x9e, 0x00}}, {2, [3]byte{0xd0, 0x9f, 0x00}}, + {2, [3]byte{0xd0, 0xa0, 0x00}}, {2, [3]byte{0xd0, 0xa1, 0x00}}, + {2, [3]byte{0xd0, 0xa2, 0x00}}, {2, [3]byte{0xd0, 0xa3, 0x00}}, + {2, [3]byte{0xd0, 0xa4, 0x00}}, {2, [3]byte{0xd0, 0xa5, 0x00}}, + {2, [3]byte{0xd0, 0xa6, 0x00}}, {2, [3]byte{0xd0, 0xa7, 0x00}}, + {2, [3]byte{0xd0, 0xa8, 0x00}}, {2, [3]byte{0xd0, 0xa9, 0x00}}, + {2, [3]byte{0xd0, 0xaa, 0x00}}, {2, [3]byte{0xd0, 0xab, 0x00}}, + {2, [3]byte{0xd0, 0xac, 0x00}}, {2, [3]byte{0xd0, 0xad, 0x00}}, + {2, [3]byte{0xd0, 0xae, 0x00}}, {2, [3]byte{0xd0, 0xaf, 0x00}}, + {2, [3]byte{0xd0, 0xb0, 0x00}}, {2, [3]byte{0xd0, 0xb1, 0x00}}, + {2, [3]byte{0xd0, 0xb2, 0x00}}, {2, [3]byte{0xd0, 0xb3, 0x00}}, + {2, [3]byte{0xd0, 0xb4, 0x00}}, {2, [3]byte{0xd0, 0xb5, 0x00}}, + {2, [3]byte{0xd0, 0xb6, 0x00}}, {2, [3]byte{0xd0, 0xb7, 0x00}}, + {2, [3]byte{0xd0, 0xb8, 0x00}}, {2, [3]byte{0xd0, 0xb9, 0x00}}, + {2, [3]byte{0xd0, 0xba, 0x00}}, {2, [3]byte{0xd0, 0xbb, 0x00}}, + {2, [3]byte{0xd0, 0xbc, 0x00}}, {2, [3]byte{0xd0, 0xbd, 0x00}}, + {2, [3]byte{0xd0, 0xbe, 0x00}}, {2, [3]byte{0xd0, 0xbf, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0x91}}, {3, [3]byte{0xe2, 0x96, 0x92}}, + {3, [3]byte{0xe2, 0x96, 0x93}}, {3, [3]byte{0xe2, 0x94, 0x82}}, + {3, [3]byte{0xe2, 0x94, 0xa4}}, {3, [3]byte{0xe2, 0x95, 0xa1}}, + {3, [3]byte{0xe2, 0x95, 0xa2}}, {3, [3]byte{0xe2, 0x95, 0x96}}, + {3, [3]byte{0xe2, 0x95, 0x95}}, {3, [3]byte{0xe2, 0x95, 0xa3}}, + {3, [3]byte{0xe2, 0x95, 0x91}}, {3, [3]byte{0xe2, 0x95, 0x97}}, + {3, [3]byte{0xe2, 0x95, 0x9d}}, {3, [3]byte{0xe2, 0x95, 0x9c}}, + {3, [3]byte{0xe2, 0x95, 0x9b}}, {3, [3]byte{0xe2, 0x94, 0x90}}, + {3, [3]byte{0xe2, 0x94, 0x94}}, {3, [3]byte{0xe2, 0x94, 0xb4}}, + {3, [3]byte{0xe2, 0x94, 0xac}}, {3, [3]byte{0xe2, 0x94, 0x9c}}, + {3, [3]byte{0xe2, 0x94, 0x80}}, {3, [3]byte{0xe2, 0x94, 0xbc}}, + {3, [3]byte{0xe2, 0x95, 0x9e}}, {3, [3]byte{0xe2, 0x95, 0x9f}}, + {3, [3]byte{0xe2, 0x95, 0x9a}}, {3, [3]byte{0xe2, 0x95, 0x94}}, + {3, [3]byte{0xe2, 0x95, 0xa9}}, {3, [3]byte{0xe2, 0x95, 0xa6}}, + {3, [3]byte{0xe2, 0x95, 0xa0}}, {3, [3]byte{0xe2, 0x95, 0x90}}, + {3, [3]byte{0xe2, 0x95, 0xac}}, {3, [3]byte{0xe2, 0x95, 0xa7}}, + {3, [3]byte{0xe2, 0x95, 0xa8}}, {3, [3]byte{0xe2, 0x95, 0xa4}}, + {3, [3]byte{0xe2, 0x95, 0xa5}}, {3, [3]byte{0xe2, 0x95, 0x99}}, + {3, [3]byte{0xe2, 0x95, 0x98}}, {3, [3]byte{0xe2, 0x95, 0x92}}, + {3, [3]byte{0xe2, 0x95, 0x93}}, {3, [3]byte{0xe2, 0x95, 0xab}}, + {3, [3]byte{0xe2, 0x95, 0xaa}}, {3, [3]byte{0xe2, 0x94, 0x98}}, + {3, [3]byte{0xe2, 0x94, 0x8c}}, {3, [3]byte{0xe2, 0x96, 0x88}}, + {3, [3]byte{0xe2, 0x96, 0x84}}, {3, [3]byte{0xe2, 0x96, 0x8c}}, + {3, [3]byte{0xe2, 0x96, 0x90}}, {3, [3]byte{0xe2, 0x96, 0x80}}, + {2, [3]byte{0xd1, 0x80, 0x00}}, {2, [3]byte{0xd1, 0x81, 0x00}}, + {2, [3]byte{0xd1, 0x82, 0x00}}, {2, [3]byte{0xd1, 0x83, 0x00}}, + {2, [3]byte{0xd1, 0x84, 0x00}}, {2, [3]byte{0xd1, 0x85, 0x00}}, + {2, [3]byte{0xd1, 0x86, 0x00}}, {2, [3]byte{0xd1, 0x87, 0x00}}, + {2, [3]byte{0xd1, 0x88, 0x00}}, {2, [3]byte{0xd1, 0x89, 0x00}}, + {2, [3]byte{0xd1, 0x8a, 0x00}}, {2, [3]byte{0xd1, 0x8b, 0x00}}, + {2, [3]byte{0xd1, 0x8c, 0x00}}, {2, [3]byte{0xd1, 0x8d, 0x00}}, + {2, [3]byte{0xd1, 0x8e, 0x00}}, {2, [3]byte{0xd1, 0x8f, 0x00}}, + {2, [3]byte{0xd0, 0x81, 0x00}}, {2, [3]byte{0xd1, 0x91, 0x00}}, + {2, [3]byte{0xd0, 0x84, 0x00}}, {2, [3]byte{0xd1, 0x94, 0x00}}, + {2, [3]byte{0xd0, 0x87, 0x00}}, {2, [3]byte{0xd1, 0x97, 0x00}}, + {2, [3]byte{0xd0, 0x8e, 0x00}}, {2, [3]byte{0xd1, 0x9e, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {3, [3]byte{0xe2, 0x88, 0x99}}, + {2, [3]byte{0xc2, 0xb7, 0x00}}, {3, [3]byte{0xe2, 0x88, 0x9a}}, + {3, [3]byte{0xe2, 0x84, 0x96}}, {2, [3]byte{0xc2, 0xa4, 0x00}}, {3, [3]byte{0xe2, 0x96, 0xa0}}, {2, [3]byte{0xc2, 0xa0, 0x00}}, }, encode: [256]uint32{ - 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, - 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, - 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, - 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, - 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, - 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, - 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, - 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, - 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, - 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, - 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, - 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, - 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, - 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, - 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, - 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, - 0xff0000a0, 0xfd0000a4, 0xf80000b0, 0xfa0000b7, 0xf0000401, 0xf2000404, 0xf4000407, 0xf600040e, - 0x80000410, 0x81000411, 0x82000412, 0x83000413, 0x84000414, 0x85000415, 0x86000416, 0x87000417, - 0x88000418, 0x89000419, 0x8a00041a, 0x8b00041b, 0x8c00041c, 0x8d00041d, 0x8e00041e, 0x8f00041f, - 0x90000420, 0x91000421, 0x92000422, 0x93000423, 0x94000424, 0x95000425, 0x96000426, 0x97000427, - 0x98000428, 0x99000429, 0x9a00042a, 0x9b00042b, 0x9c00042c, 0x9d00042d, 0x9e00042e, 0x9f00042f, - 0xa0000430, 0xa1000431, 0xa2000432, 0xa3000433, 0xa4000434, 0xa5000435, 0xa6000436, 0xa7000437, - 0xa8000438, 0xa9000439, 0xaa00043a, 0xab00043b, 0xac00043c, 0xad00043d, 0xae00043e, 0xaf00043f, - 0xe0000440, 0xe1000441, 0xe2000442, 0xe3000443, 0xe4000444, 0xe5000445, 0xe6000446, 0xe7000447, - 0xe8000448, 0xe9000449, 0xea00044a, 0xeb00044b, 0xec00044c, 0xed00044d, 0xee00044e, 0xef00044f, - 0xf1000451, 0xf3000454, 0xf5000457, 0xf700045e, 0xfc002116, 0xf9002219, 0xfb00221a, 0xc4002500, - 0xb3002502, 0xda00250c, 0xbf002510, 0xc0002514, 0xd9002518, 0xc300251c, 0xb4002524, 0xc200252c, - 0xc1002534, 0xc500253c, 0xcd002550, 0xba002551, 0xd5002552, 0xd6002553, 0xc9002554, 0xb8002555, - 0xb7002556, 0xbb002557, 0xd4002558, 0xd3002559, 0xc800255a, 0xbe00255b, 0xbd00255c, 0xbc00255d, - 0xc600255e, 0xc700255f, 0xcc002560, 0xb5002561, 0xb6002562, 0xb9002563, 0xd1002564, 0xd2002565, - 0xcb002566, 0xcf002567, 0xd0002568, 0xca002569, 0xd800256a, 0xd700256b, 0xce00256c, 0xdf002580, - 0xdc002584, 0xdb002588, 0xdd00258c, 0xde002590, 0xb0002591, 0xb1002592, 0xb2002593, 0xfe0025a0, + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xff0000a0, 0xfd0000a4, 0xf80000b0, 0xfa0000b7, 0xf0000401, 0xf2000404, 0xf4000407, 0xf600040e, + 0x80000410, 0x81000411, 0x82000412, 0x83000413, 0x84000414, 0x85000415, 0x86000416, 0x87000417, + 0x88000418, 0x89000419, 0x8a00041a, 0x8b00041b, 0x8c00041c, 0x8d00041d, 0x8e00041e, 0x8f00041f, + 0x90000420, 0x91000421, 0x92000422, 0x93000423, 0x94000424, 0x95000425, 0x96000426, 0x97000427, + 0x98000428, 0x99000429, 0x9a00042a, 0x9b00042b, 0x9c00042c, 0x9d00042d, 0x9e00042e, 0x9f00042f, + 0xa0000430, 0xa1000431, 0xa2000432, 0xa3000433, 0xa4000434, 0xa5000435, 0xa6000436, 0xa7000437, + 0xa8000438, 0xa9000439, 0xaa00043a, 0xab00043b, 0xac00043c, 0xad00043d, 0xae00043e, 0xaf00043f, + 0xe0000440, 0xe1000441, 0xe2000442, 0xe3000443, 0xe4000444, 0xe5000445, 0xe6000446, 0xe7000447, + 0xe8000448, 0xe9000449, 0xea00044a, 0xeb00044b, 0xec00044c, 0xed00044d, 0xee00044e, 0xef00044f, + 0xf1000451, 0xf3000454, 0xf5000457, 0xf700045e, 0xfc002116, 0xf9002219, 0xfb00221a, 0xc4002500, + 0xb3002502, 0xda00250c, 0xbf002510, 0xc0002514, 0xd9002518, 0xc300251c, 0xb4002524, 0xc200252c, + 0xc1002534, 0xc500253c, 0xcd002550, 0xba002551, 0xd5002552, 0xd6002553, 0xc9002554, 0xb8002555, + 0xb7002556, 0xbb002557, 0xd4002558, 0xd3002559, 0xc800255a, 0xbe00255b, 0xbd00255c, 0xbc00255d, + 0xc600255e, 0xc700255f, 0xcc002560, 0xb5002561, 0xb6002562, 0xb9002563, 0xd1002564, 0xd2002565, + 0xcb002566, 0xcf002567, 0xd0002568, 0xca002569, 0xd800256a, 0xd700256b, 0xce00256c, 0xdf002580, + 0xdc002584, 0xdb002588, 0xdd00258c, 0xde002590, 0xb0002591, 0xb1002592, 0xb2002593, 0xfe0025a0, + }, +} + +// CodePage1047 is the IBM Code Page 1047 encoding. +var CodePage1047 *Charmap = &codePage1047 + +var codePage1047 = Charmap{ + name: "IBM Code Page 1047", + mib: identifier.IBM1047, + asciiSuperset: false, + low: 0x00, + replacement: 0x3f, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x9c, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x86, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x97, 0x00}}, {2, [3]byte{0xc2, 0x8d, 0x00}}, + {2, [3]byte{0xc2, 0x8e, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x9d, 0x00}}, {2, [3]byte{0xc2, 0x85, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {2, [3]byte{0xc2, 0x87, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x92, 0x00}}, {2, [3]byte{0xc2, 0x8f, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x80, 0x00}}, {2, [3]byte{0xc2, 0x81, 0x00}}, + {2, [3]byte{0xc2, 0x82, 0x00}}, {2, [3]byte{0xc2, 0x83, 0x00}}, + {2, [3]byte{0xc2, 0x84, 0x00}}, {1, [3]byte{0x0a, 0x00, 0x00}}, + {1, [3]byte{0x17, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x88, 0x00}}, {2, [3]byte{0xc2, 0x89, 0x00}}, + {2, [3]byte{0xc2, 0x8a, 0x00}}, {2, [3]byte{0xc2, 0x8b, 0x00}}, + {2, [3]byte{0xc2, 0x8c, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x90, 0x00}}, {2, [3]byte{0xc2, 0x91, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {2, [3]byte{0xc2, 0x93, 0x00}}, + {2, [3]byte{0xc2, 0x94, 0x00}}, {2, [3]byte{0xc2, 0x95, 0x00}}, + {2, [3]byte{0xc2, 0x96, 0x00}}, {1, [3]byte{0x04, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x98, 0x00}}, {2, [3]byte{0xc2, 0x99, 0x00}}, + {2, [3]byte{0xc2, 0x9a, 0x00}}, {2, [3]byte{0xc2, 0x9b, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x9e, 0x00}}, {1, [3]byte{0x1a, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {2, [3]byte{0xc2, 0xa0, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {2, [3]byte{0xc3, 0xa4, 0x00}}, + {2, [3]byte{0xc3, 0xa0, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa3, 0x00}}, {2, [3]byte{0xc3, 0xa5, 0x00}}, + {2, [3]byte{0xc3, 0xa7, 0x00}}, {2, [3]byte{0xc3, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xa2, 0x00}}, {1, [3]byte{0x2e, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x28, 0x00, 0x00}}, + {1, [3]byte{0x2b, 0x00, 0x00}}, {1, [3]byte{0x7c, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0xac, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {1, [3]byte{0x21, 0x00, 0x00}}, {1, [3]byte{0x24, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x3b, 0x00, 0x00}}, {1, [3]byte{0x5e, 0x00, 0x00}}, + {1, [3]byte{0x2d, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc3, 0x84, 0x00}}, + {2, [3]byte{0xc3, 0x80, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x83, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x87, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {1, [3]byte{0x2c, 0x00, 0x00}}, + {1, [3]byte{0x25, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0xb8, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc3, 0x88, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {2, [3]byte{0xc3, 0x8f, 0x00}}, + {2, [3]byte{0xc3, 0x8c, 0x00}}, {1, [3]byte{0x60, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x3d, 0x00, 0x00}}, {1, [3]byte{0x22, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x98, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xab, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xb0, 0x00}}, {2, [3]byte{0xc3, 0xbd, 0x00}}, + {2, [3]byte{0xc3, 0xbe, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {1, [3]byte{0x6a, 0x00, 0x00}}, + {1, [3]byte{0x6b, 0x00, 0x00}}, {1, [3]byte{0x6c, 0x00, 0x00}}, + {1, [3]byte{0x6d, 0x00, 0x00}}, {1, [3]byte{0x6e, 0x00, 0x00}}, + {1, [3]byte{0x6f, 0x00, 0x00}}, {1, [3]byte{0x70, 0x00, 0x00}}, + {1, [3]byte{0x71, 0x00, 0x00}}, {1, [3]byte{0x72, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xba, 0x00}}, + {2, [3]byte{0xc3, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xb8, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc2, 0xa4, 0x00}}, + {2, [3]byte{0xc2, 0xb5, 0x00}}, {1, [3]byte{0x7e, 0x00, 0x00}}, + {1, [3]byte{0x73, 0x00, 0x00}}, {1, [3]byte{0x74, 0x00, 0x00}}, + {1, [3]byte{0x75, 0x00, 0x00}}, {1, [3]byte{0x76, 0x00, 0x00}}, + {1, [3]byte{0x77, 0x00, 0x00}}, {1, [3]byte{0x78, 0x00, 0x00}}, + {1, [3]byte{0x79, 0x00, 0x00}}, {1, [3]byte{0x7a, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xa1, 0x00}}, {2, [3]byte{0xc2, 0xbf, 0x00}}, + {2, [3]byte{0xc3, 0x90, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x9e, 0x00}}, {2, [3]byte{0xc2, 0xae, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc2, 0xa5, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc2, 0xa9, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xbc, 0x00}}, + {2, [3]byte{0xc2, 0xbd, 0x00}}, {2, [3]byte{0xc2, 0xbe, 0x00}}, + {2, [3]byte{0xc3, 0x9d, 0x00}}, {2, [3]byte{0xc2, 0xa8, 0x00}}, + {2, [3]byte{0xc2, 0xaf, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xb4, 0x00}}, {2, [3]byte{0xc3, 0x97, 0x00}}, + {1, [3]byte{0x7b, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xad, 0x00}}, {2, [3]byte{0xc3, 0xb4, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb2, 0x00}}, + {2, [3]byte{0xc3, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0xb5, 0x00}}, + {1, [3]byte{0x7d, 0x00, 0x00}}, {1, [3]byte{0x4a, 0x00, 0x00}}, + {1, [3]byte{0x4b, 0x00, 0x00}}, {1, [3]byte{0x4c, 0x00, 0x00}}, + {1, [3]byte{0x4d, 0x00, 0x00}}, {1, [3]byte{0x4e, 0x00, 0x00}}, + {1, [3]byte{0x4f, 0x00, 0x00}}, {1, [3]byte{0x50, 0x00, 0x00}}, + {1, [3]byte{0x51, 0x00, 0x00}}, {1, [3]byte{0x52, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xb9, 0x00}}, {2, [3]byte{0xc3, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc3, 0xbf, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {1, [3]byte{0x53, 0x00, 0x00}}, {1, [3]byte{0x54, 0x00, 0x00}}, + {1, [3]byte{0x55, 0x00, 0x00}}, {1, [3]byte{0x56, 0x00, 0x00}}, + {1, [3]byte{0x57, 0x00, 0x00}}, {1, [3]byte{0x58, 0x00, 0x00}}, + {1, [3]byte{0x59, 0x00, 0x00}}, {1, [3]byte{0x5a, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc3, 0x94, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x92, 0x00}}, + {2, [3]byte{0xc3, 0x93, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0x9b, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc2, 0x9f, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x37000004, 0x2d000005, 0x2e000006, 0x2f000007, + 0x16000008, 0x05000009, 0x2500000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x3c000014, 0x3d000015, 0x32000016, 0x26000017, + 0x18000018, 0x19000019, 0x3f00001a, 0x2700001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x40000020, 0x5a000021, 0x7f000022, 0x7b000023, 0x5b000024, 0x6c000025, 0x50000026, 0x7d000027, + 0x4d000028, 0x5d000029, 0x5c00002a, 0x4e00002b, 0x6b00002c, 0x6000002d, 0x4b00002e, 0x6100002f, + 0xf0000030, 0xf1000031, 0xf2000032, 0xf3000033, 0xf4000034, 0xf5000035, 0xf6000036, 0xf7000037, + 0xf8000038, 0xf9000039, 0x7a00003a, 0x5e00003b, 0x4c00003c, 0x7e00003d, 0x6e00003e, 0x6f00003f, + 0x7c000040, 0xc1000041, 0xc2000042, 0xc3000043, 0xc4000044, 0xc5000045, 0xc6000046, 0xc7000047, + 0xc8000048, 0xc9000049, 0xd100004a, 0xd200004b, 0xd300004c, 0xd400004d, 0xd500004e, 0xd600004f, + 0xd7000050, 0xd8000051, 0xd9000052, 0xe2000053, 0xe3000054, 0xe4000055, 0xe5000056, 0xe6000057, + 0xe7000058, 0xe8000059, 0xe900005a, 0xad00005b, 0xe000005c, 0xbd00005d, 0x5f00005e, 0x6d00005f, + 0x79000060, 0x81000061, 0x82000062, 0x83000063, 0x84000064, 0x85000065, 0x86000066, 0x87000067, + 0x88000068, 0x89000069, 0x9100006a, 0x9200006b, 0x9300006c, 0x9400006d, 0x9500006e, 0x9600006f, + 0x97000070, 0x98000071, 0x99000072, 0xa2000073, 0xa3000074, 0xa4000075, 0xa5000076, 0xa6000077, + 0xa7000078, 0xa8000079, 0xa900007a, 0xc000007b, 0x4f00007c, 0xd000007d, 0xa100007e, 0x0700007f, + 0x20000080, 0x21000081, 0x22000082, 0x23000083, 0x24000084, 0x15000085, 0x06000086, 0x17000087, + 0x28000088, 0x29000089, 0x2a00008a, 0x2b00008b, 0x2c00008c, 0x0900008d, 0x0a00008e, 0x1b00008f, + 0x30000090, 0x31000091, 0x1a000092, 0x33000093, 0x34000094, 0x35000095, 0x36000096, 0x08000097, + 0x38000098, 0x39000099, 0x3a00009a, 0x3b00009b, 0x0400009c, 0x1400009d, 0x3e00009e, 0xff00009f, + 0x410000a0, 0xaa0000a1, 0x4a0000a2, 0xb10000a3, 0x9f0000a4, 0xb20000a5, 0x6a0000a6, 0xb50000a7, + 0xbb0000a8, 0xb40000a9, 0x9a0000aa, 0x8a0000ab, 0xb00000ac, 0xca0000ad, 0xaf0000ae, 0xbc0000af, + 0x900000b0, 0x8f0000b1, 0xea0000b2, 0xfa0000b3, 0xbe0000b4, 0xa00000b5, 0xb60000b6, 0xb30000b7, + 0x9d0000b8, 0xda0000b9, 0x9b0000ba, 0x8b0000bb, 0xb70000bc, 0xb80000bd, 0xb90000be, 0xab0000bf, + 0x640000c0, 0x650000c1, 0x620000c2, 0x660000c3, 0x630000c4, 0x670000c5, 0x9e0000c6, 0x680000c7, + 0x740000c8, 0x710000c9, 0x720000ca, 0x730000cb, 0x780000cc, 0x750000cd, 0x760000ce, 0x770000cf, + 0xac0000d0, 0x690000d1, 0xed0000d2, 0xee0000d3, 0xeb0000d4, 0xef0000d5, 0xec0000d6, 0xbf0000d7, + 0x800000d8, 0xfd0000d9, 0xfe0000da, 0xfb0000db, 0xfc0000dc, 0xba0000dd, 0xae0000de, 0x590000df, + 0x440000e0, 0x450000e1, 0x420000e2, 0x460000e3, 0x430000e4, 0x470000e5, 0x9c0000e6, 0x480000e7, + 0x540000e8, 0x510000e9, 0x520000ea, 0x530000eb, 0x580000ec, 0x550000ed, 0x560000ee, 0x570000ef, + 0x8c0000f0, 0x490000f1, 0xcd0000f2, 0xce0000f3, 0xcb0000f4, 0xcf0000f5, 0xcc0000f6, 0xe10000f7, + 0x700000f8, 0xdd0000f9, 0xde0000fa, 0xdb0000fb, 0xdc0000fc, 0x8d0000fd, 0x8e0000fe, 0xdf0000ff, + }, +} + +// CodePage1140 is the IBM Code Page 1140 encoding. +var CodePage1140 *Charmap = &codePage1140 + +var codePage1140 = Charmap{ + name: "IBM Code Page 1140", + mib: identifier.IBM01140, + asciiSuperset: false, + low: 0x00, + replacement: 0x3f, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x9c, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x86, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x97, 0x00}}, {2, [3]byte{0xc2, 0x8d, 0x00}}, + {2, [3]byte{0xc2, 0x8e, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x9d, 0x00}}, {2, [3]byte{0xc2, 0x85, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {2, [3]byte{0xc2, 0x87, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x92, 0x00}}, {2, [3]byte{0xc2, 0x8f, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x80, 0x00}}, {2, [3]byte{0xc2, 0x81, 0x00}}, + {2, [3]byte{0xc2, 0x82, 0x00}}, {2, [3]byte{0xc2, 0x83, 0x00}}, + {2, [3]byte{0xc2, 0x84, 0x00}}, {1, [3]byte{0x0a, 0x00, 0x00}}, + {1, [3]byte{0x17, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x88, 0x00}}, {2, [3]byte{0xc2, 0x89, 0x00}}, + {2, [3]byte{0xc2, 0x8a, 0x00}}, {2, [3]byte{0xc2, 0x8b, 0x00}}, + {2, [3]byte{0xc2, 0x8c, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x90, 0x00}}, {2, [3]byte{0xc2, 0x91, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {2, [3]byte{0xc2, 0x93, 0x00}}, + {2, [3]byte{0xc2, 0x94, 0x00}}, {2, [3]byte{0xc2, 0x95, 0x00}}, + {2, [3]byte{0xc2, 0x96, 0x00}}, {1, [3]byte{0x04, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x98, 0x00}}, {2, [3]byte{0xc2, 0x99, 0x00}}, + {2, [3]byte{0xc2, 0x9a, 0x00}}, {2, [3]byte{0xc2, 0x9b, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x9e, 0x00}}, {1, [3]byte{0x1a, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {2, [3]byte{0xc2, 0xa0, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {2, [3]byte{0xc3, 0xa4, 0x00}}, + {2, [3]byte{0xc3, 0xa0, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa3, 0x00}}, {2, [3]byte{0xc3, 0xa5, 0x00}}, + {2, [3]byte{0xc3, 0xa7, 0x00}}, {2, [3]byte{0xc3, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xa2, 0x00}}, {1, [3]byte{0x2e, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x28, 0x00, 0x00}}, + {1, [3]byte{0x2b, 0x00, 0x00}}, {1, [3]byte{0x7c, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0xac, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {1, [3]byte{0x21, 0x00, 0x00}}, {1, [3]byte{0x24, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x3b, 0x00, 0x00}}, {2, [3]byte{0xc2, 0xac, 0x00}}, + {1, [3]byte{0x2d, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc3, 0x84, 0x00}}, + {2, [3]byte{0xc3, 0x80, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x83, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x87, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {1, [3]byte{0x2c, 0x00, 0x00}}, + {1, [3]byte{0x25, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0xb8, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc3, 0x88, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {2, [3]byte{0xc3, 0x8f, 0x00}}, + {2, [3]byte{0xc3, 0x8c, 0x00}}, {1, [3]byte{0x60, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x3d, 0x00, 0x00}}, {1, [3]byte{0x22, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x98, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xab, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xb0, 0x00}}, {2, [3]byte{0xc3, 0xbd, 0x00}}, + {2, [3]byte{0xc3, 0xbe, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {1, [3]byte{0x6a, 0x00, 0x00}}, + {1, [3]byte{0x6b, 0x00, 0x00}}, {1, [3]byte{0x6c, 0x00, 0x00}}, + {1, [3]byte{0x6d, 0x00, 0x00}}, {1, [3]byte{0x6e, 0x00, 0x00}}, + {1, [3]byte{0x6f, 0x00, 0x00}}, {1, [3]byte{0x70, 0x00, 0x00}}, + {1, [3]byte{0x71, 0x00, 0x00}}, {1, [3]byte{0x72, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xba, 0x00}}, + {2, [3]byte{0xc3, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xb8, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {3, [3]byte{0xe2, 0x82, 0xac}}, + {2, [3]byte{0xc2, 0xb5, 0x00}}, {1, [3]byte{0x7e, 0x00, 0x00}}, + {1, [3]byte{0x73, 0x00, 0x00}}, {1, [3]byte{0x74, 0x00, 0x00}}, + {1, [3]byte{0x75, 0x00, 0x00}}, {1, [3]byte{0x76, 0x00, 0x00}}, + {1, [3]byte{0x77, 0x00, 0x00}}, {1, [3]byte{0x78, 0x00, 0x00}}, + {1, [3]byte{0x79, 0x00, 0x00}}, {1, [3]byte{0x7a, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xa1, 0x00}}, {2, [3]byte{0xc2, 0xbf, 0x00}}, + {2, [3]byte{0xc3, 0x90, 0x00}}, {2, [3]byte{0xc3, 0x9d, 0x00}}, + {2, [3]byte{0xc3, 0x9e, 0x00}}, {2, [3]byte{0xc2, 0xae, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc2, 0xa5, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc2, 0xa9, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xbc, 0x00}}, + {2, [3]byte{0xc2, 0xbd, 0x00}}, {2, [3]byte{0xc2, 0xbe, 0x00}}, + {1, [3]byte{0x5b, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xaf, 0x00}}, {2, [3]byte{0xc2, 0xa8, 0x00}}, + {2, [3]byte{0xc2, 0xb4, 0x00}}, {2, [3]byte{0xc3, 0x97, 0x00}}, + {1, [3]byte{0x7b, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xad, 0x00}}, {2, [3]byte{0xc3, 0xb4, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb2, 0x00}}, + {2, [3]byte{0xc3, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0xb5, 0x00}}, + {1, [3]byte{0x7d, 0x00, 0x00}}, {1, [3]byte{0x4a, 0x00, 0x00}}, + {1, [3]byte{0x4b, 0x00, 0x00}}, {1, [3]byte{0x4c, 0x00, 0x00}}, + {1, [3]byte{0x4d, 0x00, 0x00}}, {1, [3]byte{0x4e, 0x00, 0x00}}, + {1, [3]byte{0x4f, 0x00, 0x00}}, {1, [3]byte{0x50, 0x00, 0x00}}, + {1, [3]byte{0x51, 0x00, 0x00}}, {1, [3]byte{0x52, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xb9, 0x00}}, {2, [3]byte{0xc3, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc3, 0xbf, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {1, [3]byte{0x53, 0x00, 0x00}}, {1, [3]byte{0x54, 0x00, 0x00}}, + {1, [3]byte{0x55, 0x00, 0x00}}, {1, [3]byte{0x56, 0x00, 0x00}}, + {1, [3]byte{0x57, 0x00, 0x00}}, {1, [3]byte{0x58, 0x00, 0x00}}, + {1, [3]byte{0x59, 0x00, 0x00}}, {1, [3]byte{0x5a, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc3, 0x94, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x92, 0x00}}, + {2, [3]byte{0xc3, 0x93, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0x9b, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc2, 0x9f, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x37000004, 0x2d000005, 0x2e000006, 0x2f000007, + 0x16000008, 0x05000009, 0x2500000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x3c000014, 0x3d000015, 0x32000016, 0x26000017, + 0x18000018, 0x19000019, 0x3f00001a, 0x2700001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x40000020, 0x5a000021, 0x7f000022, 0x7b000023, 0x5b000024, 0x6c000025, 0x50000026, 0x7d000027, + 0x4d000028, 0x5d000029, 0x5c00002a, 0x4e00002b, 0x6b00002c, 0x6000002d, 0x4b00002e, 0x6100002f, + 0xf0000030, 0xf1000031, 0xf2000032, 0xf3000033, 0xf4000034, 0xf5000035, 0xf6000036, 0xf7000037, + 0xf8000038, 0xf9000039, 0x7a00003a, 0x5e00003b, 0x4c00003c, 0x7e00003d, 0x6e00003e, 0x6f00003f, + 0x7c000040, 0xc1000041, 0xc2000042, 0xc3000043, 0xc4000044, 0xc5000045, 0xc6000046, 0xc7000047, + 0xc8000048, 0xc9000049, 0xd100004a, 0xd200004b, 0xd300004c, 0xd400004d, 0xd500004e, 0xd600004f, + 0xd7000050, 0xd8000051, 0xd9000052, 0xe2000053, 0xe3000054, 0xe4000055, 0xe5000056, 0xe6000057, + 0xe7000058, 0xe8000059, 0xe900005a, 0xba00005b, 0xe000005c, 0xbb00005d, 0xb000005e, 0x6d00005f, + 0x79000060, 0x81000061, 0x82000062, 0x83000063, 0x84000064, 0x85000065, 0x86000066, 0x87000067, + 0x88000068, 0x89000069, 0x9100006a, 0x9200006b, 0x9300006c, 0x9400006d, 0x9500006e, 0x9600006f, + 0x97000070, 0x98000071, 0x99000072, 0xa2000073, 0xa3000074, 0xa4000075, 0xa5000076, 0xa6000077, + 0xa7000078, 0xa8000079, 0xa900007a, 0xc000007b, 0x4f00007c, 0xd000007d, 0xa100007e, 0x0700007f, + 0x20000080, 0x21000081, 0x22000082, 0x23000083, 0x24000084, 0x15000085, 0x06000086, 0x17000087, + 0x28000088, 0x29000089, 0x2a00008a, 0x2b00008b, 0x2c00008c, 0x0900008d, 0x0a00008e, 0x1b00008f, + 0x30000090, 0x31000091, 0x1a000092, 0x33000093, 0x34000094, 0x35000095, 0x36000096, 0x08000097, + 0x38000098, 0x39000099, 0x3a00009a, 0x3b00009b, 0x0400009c, 0x1400009d, 0x3e00009e, 0xff00009f, + 0x410000a0, 0xaa0000a1, 0x4a0000a2, 0xb10000a3, 0xb20000a5, 0x6a0000a6, 0xb50000a7, 0xbd0000a8, + 0xb40000a9, 0x9a0000aa, 0x8a0000ab, 0x5f0000ac, 0xca0000ad, 0xaf0000ae, 0xbc0000af, 0x900000b0, + 0x8f0000b1, 0xea0000b2, 0xfa0000b3, 0xbe0000b4, 0xa00000b5, 0xb60000b6, 0xb30000b7, 0x9d0000b8, + 0xda0000b9, 0x9b0000ba, 0x8b0000bb, 0xb70000bc, 0xb80000bd, 0xb90000be, 0xab0000bf, 0x640000c0, + 0x650000c1, 0x620000c2, 0x660000c3, 0x630000c4, 0x670000c5, 0x9e0000c6, 0x680000c7, 0x740000c8, + 0x710000c9, 0x720000ca, 0x730000cb, 0x780000cc, 0x750000cd, 0x760000ce, 0x770000cf, 0xac0000d0, + 0x690000d1, 0xed0000d2, 0xee0000d3, 0xeb0000d4, 0xef0000d5, 0xec0000d6, 0xbf0000d7, 0x800000d8, + 0xfd0000d9, 0xfe0000da, 0xfb0000db, 0xfc0000dc, 0xad0000dd, 0xae0000de, 0x590000df, 0x440000e0, + 0x450000e1, 0x420000e2, 0x460000e3, 0x430000e4, 0x470000e5, 0x9c0000e6, 0x480000e7, 0x540000e8, + 0x510000e9, 0x520000ea, 0x530000eb, 0x580000ec, 0x550000ed, 0x560000ee, 0x570000ef, 0x8c0000f0, + 0x490000f1, 0xcd0000f2, 0xce0000f3, 0xcb0000f4, 0xcf0000f5, 0xcc0000f6, 0xe10000f7, 0x700000f8, + 0xdd0000f9, 0xde0000fa, 0xdb0000fb, 0xdc0000fc, 0x8d0000fd, 0x8e0000fe, 0xdf0000ff, 0x9f0020ac, }, } // ISO8859_1 is the ISO 8859-1 encoding. -var ISO8859_1 encoding.Encoding = &iso8859_1 +var ISO8859_1 *Charmap = &iso8859_1 -var iso8859_1 = charmap{ +var iso8859_1 = Charmap{ name: "ISO 8859-1", mib: identifier.ISOLatin1, asciiSuperset: true, @@ -1408,9 +2458,9 @@ var iso8859_1 = charmap{ } // ISO8859_2 is the ISO 8859-2 encoding. -var ISO8859_2 encoding.Encoding = &iso8859_2 +var ISO8859_2 *Charmap = &iso8859_2 -var iso8859_2 = charmap{ +var iso8859_2 = Charmap{ name: "ISO 8859-2", mib: identifier.ISOLatin2, asciiSuperset: true, @@ -1583,9 +2633,9 @@ var iso8859_2 = charmap{ } // ISO8859_3 is the ISO 8859-3 encoding. -var ISO8859_3 encoding.Encoding = &iso8859_3 +var ISO8859_3 *Charmap = &iso8859_3 -var iso8859_3 = charmap{ +var iso8859_3 = Charmap{ name: "ISO 8859-3", mib: identifier.ISOLatin3, asciiSuperset: true, @@ -1758,9 +2808,9 @@ var iso8859_3 = charmap{ } // ISO8859_4 is the ISO 8859-4 encoding. -var ISO8859_4 encoding.Encoding = &iso8859_4 +var ISO8859_4 *Charmap = &iso8859_4 -var iso8859_4 = charmap{ +var iso8859_4 = Charmap{ name: "ISO 8859-4", mib: identifier.ISOLatin4, asciiSuperset: true, @@ -1933,9 +2983,9 @@ var iso8859_4 = charmap{ } // ISO8859_5 is the ISO 8859-5 encoding. -var ISO8859_5 encoding.Encoding = &iso8859_5 +var ISO8859_5 *Charmap = &iso8859_5 -var iso8859_5 = charmap{ +var iso8859_5 = Charmap{ name: "ISO 8859-5", mib: identifier.ISOLatinCyrillic, asciiSuperset: true, @@ -2108,9 +3158,9 @@ var iso8859_5 = charmap{ } // ISO8859_6 is the ISO 8859-6 encoding. -var ISO8859_6 encoding.Encoding = &iso8859_6 +var ISO8859_6 *Charmap = &iso8859_6 -var iso8859_6 = charmap{ +var iso8859_6 = Charmap{ name: "ISO 8859-6", mib: identifier.ISOLatinArabic, asciiSuperset: true, @@ -2283,9 +3333,9 @@ var iso8859_6 = charmap{ } // ISO8859_7 is the ISO 8859-7 encoding. -var ISO8859_7 encoding.Encoding = &iso8859_7 +var ISO8859_7 *Charmap = &iso8859_7 -var iso8859_7 = charmap{ +var iso8859_7 = Charmap{ name: "ISO 8859-7", mib: identifier.ISOLatinGreek, asciiSuperset: true, @@ -2458,9 +3508,9 @@ var iso8859_7 = charmap{ } // ISO8859_8 is the ISO 8859-8 encoding. -var ISO8859_8 encoding.Encoding = &iso8859_8 +var ISO8859_8 *Charmap = &iso8859_8 -var iso8859_8 = charmap{ +var iso8859_8 = Charmap{ name: "ISO 8859-8", mib: identifier.ISOLatinHebrew, asciiSuperset: true, @@ -2632,10 +3682,185 @@ var iso8859_8 = charmap{ }, } +// ISO8859_9 is the ISO 8859-9 encoding. +var ISO8859_9 *Charmap = &iso8859_9 + +var iso8859_9 = Charmap{ + name: "ISO 8859-9", + mib: identifier.ISOLatin5, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x80, 0x00}}, {2, [3]byte{0xc2, 0x81, 0x00}}, + {2, [3]byte{0xc2, 0x82, 0x00}}, {2, [3]byte{0xc2, 0x83, 0x00}}, + {2, [3]byte{0xc2, 0x84, 0x00}}, {2, [3]byte{0xc2, 0x85, 0x00}}, + {2, [3]byte{0xc2, 0x86, 0x00}}, {2, [3]byte{0xc2, 0x87, 0x00}}, + {2, [3]byte{0xc2, 0x88, 0x00}}, {2, [3]byte{0xc2, 0x89, 0x00}}, + {2, [3]byte{0xc2, 0x8a, 0x00}}, {2, [3]byte{0xc2, 0x8b, 0x00}}, + {2, [3]byte{0xc2, 0x8c, 0x00}}, {2, [3]byte{0xc2, 0x8d, 0x00}}, + {2, [3]byte{0xc2, 0x8e, 0x00}}, {2, [3]byte{0xc2, 0x8f, 0x00}}, + {2, [3]byte{0xc2, 0x90, 0x00}}, {2, [3]byte{0xc2, 0x91, 0x00}}, + {2, [3]byte{0xc2, 0x92, 0x00}}, {2, [3]byte{0xc2, 0x93, 0x00}}, + {2, [3]byte{0xc2, 0x94, 0x00}}, {2, [3]byte{0xc2, 0x95, 0x00}}, + {2, [3]byte{0xc2, 0x96, 0x00}}, {2, [3]byte{0xc2, 0x97, 0x00}}, + {2, [3]byte{0xc2, 0x98, 0x00}}, {2, [3]byte{0xc2, 0x99, 0x00}}, + {2, [3]byte{0xc2, 0x9a, 0x00}}, {2, [3]byte{0xc2, 0x9b, 0x00}}, + {2, [3]byte{0xc2, 0x9c, 0x00}}, {2, [3]byte{0xc2, 0x9d, 0x00}}, + {2, [3]byte{0xc2, 0x9e, 0x00}}, {2, [3]byte{0xc2, 0x9f, 0x00}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xc2, 0xa1, 0x00}}, + {2, [3]byte{0xc2, 0xa2, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc2, 0xa4, 0x00}}, {2, [3]byte{0xc2, 0xa5, 0x00}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xa8, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {2, [3]byte{0xc2, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc2, 0xae, 0x00}}, {2, [3]byte{0xc2, 0xaf, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc2, 0xb3, 0x00}}, + {2, [3]byte{0xc2, 0xb4, 0x00}}, {2, [3]byte{0xc2, 0xb5, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc2, 0xb8, 0x00}}, {2, [3]byte{0xc2, 0xb9, 0x00}}, + {2, [3]byte{0xc2, 0xba, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbe, 0x00}}, {2, [3]byte{0xc2, 0xbf, 0x00}}, + {2, [3]byte{0xc3, 0x80, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc3, 0x83, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc3, 0x87, 0x00}}, + {2, [3]byte{0xc3, 0x88, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc3, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {2, [3]byte{0xc3, 0x8f, 0x00}}, + {2, [3]byte{0xc4, 0x9e, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc3, 0x92, 0x00}}, {2, [3]byte{0xc3, 0x93, 0x00}}, + {2, [3]byte{0xc3, 0x94, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x97, 0x00}}, + {2, [3]byte{0xc3, 0x98, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc3, 0x9b, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc4, 0xb0, 0x00}}, + {2, [3]byte{0xc5, 0x9e, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc3, 0xa0, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {2, [3]byte{0xc3, 0xa3, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0xa5, 0x00}}, + {2, [3]byte{0xc3, 0xa6, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xac, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc4, 0x9f, 0x00}}, {2, [3]byte{0xc3, 0xb1, 0x00}}, + {2, [3]byte{0xc3, 0xb2, 0x00}}, {2, [3]byte{0xc3, 0xb3, 0x00}}, + {2, [3]byte{0xc3, 0xb4, 0x00}}, {2, [3]byte{0xc3, 0xb5, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {2, [3]byte{0xc3, 0xb8, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc3, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc4, 0xb1, 0x00}}, + {2, [3]byte{0xc5, 0x9f, 0x00}}, {2, [3]byte{0xc3, 0xbf, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0x80000080, 0x81000081, 0x82000082, 0x83000083, 0x84000084, 0x85000085, 0x86000086, 0x87000087, + 0x88000088, 0x89000089, 0x8a00008a, 0x8b00008b, 0x8c00008c, 0x8d00008d, 0x8e00008e, 0x8f00008f, + 0x90000090, 0x91000091, 0x92000092, 0x93000093, 0x94000094, 0x95000095, 0x96000096, 0x97000097, + 0x98000098, 0x99000099, 0x9a00009a, 0x9b00009b, 0x9c00009c, 0x9d00009d, 0x9e00009e, 0x9f00009f, + 0xa00000a0, 0xa10000a1, 0xa20000a2, 0xa30000a3, 0xa40000a4, 0xa50000a5, 0xa60000a6, 0xa70000a7, + 0xa80000a8, 0xa90000a9, 0xaa0000aa, 0xab0000ab, 0xac0000ac, 0xad0000ad, 0xae0000ae, 0xaf0000af, + 0xb00000b0, 0xb10000b1, 0xb20000b2, 0xb30000b3, 0xb40000b4, 0xb50000b5, 0xb60000b6, 0xb70000b7, + 0xb80000b8, 0xb90000b9, 0xba0000ba, 0xbb0000bb, 0xbc0000bc, 0xbd0000bd, 0xbe0000be, 0xbf0000bf, + 0xc00000c0, 0xc10000c1, 0xc20000c2, 0xc30000c3, 0xc40000c4, 0xc50000c5, 0xc60000c6, 0xc70000c7, + 0xc80000c8, 0xc90000c9, 0xca0000ca, 0xcb0000cb, 0xcc0000cc, 0xcd0000cd, 0xce0000ce, 0xcf0000cf, + 0xd10000d1, 0xd20000d2, 0xd30000d3, 0xd40000d4, 0xd50000d5, 0xd60000d6, 0xd70000d7, 0xd80000d8, + 0xd90000d9, 0xda0000da, 0xdb0000db, 0xdc0000dc, 0xdf0000df, 0xe00000e0, 0xe10000e1, 0xe20000e2, + 0xe30000e3, 0xe40000e4, 0xe50000e5, 0xe60000e6, 0xe70000e7, 0xe80000e8, 0xe90000e9, 0xea0000ea, + 0xeb0000eb, 0xec0000ec, 0xed0000ed, 0xee0000ee, 0xef0000ef, 0xf10000f1, 0xf20000f2, 0xf30000f3, + 0xf40000f4, 0xf50000f5, 0xf60000f6, 0xf70000f7, 0xf80000f8, 0xf90000f9, 0xfa0000fa, 0xfb0000fb, + 0xfc0000fc, 0xff0000ff, 0xd000011e, 0xf000011f, 0xdd000130, 0xfd000131, 0xde00015e, 0xfe00015f, + }, +} + // ISO8859_10 is the ISO 8859-10 encoding. -var ISO8859_10 encoding.Encoding = &iso8859_10 +var ISO8859_10 *Charmap = &iso8859_10 -var iso8859_10 = charmap{ +var iso8859_10 = Charmap{ name: "ISO 8859-10", mib: identifier.ISOLatin6, asciiSuperset: true, @@ -2808,9 +4033,9 @@ var iso8859_10 = charmap{ } // ISO8859_13 is the ISO 8859-13 encoding. -var ISO8859_13 encoding.Encoding = &iso8859_13 +var ISO8859_13 *Charmap = &iso8859_13 -var iso8859_13 = charmap{ +var iso8859_13 = Charmap{ name: "ISO 8859-13", mib: identifier.ISO885913, asciiSuperset: true, @@ -2983,9 +4208,9 @@ var iso8859_13 = charmap{ } // ISO8859_14 is the ISO 8859-14 encoding. -var ISO8859_14 encoding.Encoding = &iso8859_14 +var ISO8859_14 *Charmap = &iso8859_14 -var iso8859_14 = charmap{ +var iso8859_14 = Charmap{ name: "ISO 8859-14", mib: identifier.ISO885914, asciiSuperset: true, @@ -3158,9 +4383,9 @@ var iso8859_14 = charmap{ } // ISO8859_15 is the ISO 8859-15 encoding. -var ISO8859_15 encoding.Encoding = &iso8859_15 +var ISO8859_15 *Charmap = &iso8859_15 -var iso8859_15 = charmap{ +var iso8859_15 = Charmap{ name: "ISO 8859-15", mib: identifier.ISO885915, asciiSuperset: true, @@ -3333,9 +4558,9 @@ var iso8859_15 = charmap{ } // ISO8859_16 is the ISO 8859-16 encoding. -var ISO8859_16 encoding.Encoding = &iso8859_16 +var ISO8859_16 *Charmap = &iso8859_16 -var iso8859_16 = charmap{ +var iso8859_16 = Charmap{ name: "ISO 8859-16", mib: identifier.ISO885916, asciiSuperset: true, @@ -3508,9 +4733,9 @@ var iso8859_16 = charmap{ } // KOI8R is the KOI8-R encoding. -var KOI8R encoding.Encoding = &koi8R +var KOI8R *Charmap = &koi8R -var koi8R = charmap{ +var koi8R = Charmap{ name: "KOI8-R", mib: identifier.KOI8R, asciiSuperset: true, @@ -3683,9 +4908,9 @@ var koi8R = charmap{ } // KOI8U is the KOI8-U encoding. -var KOI8U encoding.Encoding = &koi8U +var KOI8U *Charmap = &koi8U -var koi8U = charmap{ +var koi8U = Charmap{ name: "KOI8-U", mib: identifier.KOI8U, asciiSuperset: true, @@ -3858,9 +5083,9 @@ var koi8U = charmap{ } // Macintosh is the Macintosh encoding. -var Macintosh encoding.Encoding = &macintosh +var Macintosh *Charmap = &macintosh -var macintosh = charmap{ +var macintosh = Charmap{ name: "Macintosh", mib: identifier.Macintosh, asciiSuperset: true, @@ -4033,9 +5258,9 @@ var macintosh = charmap{ } // MacintoshCyrillic is the Macintosh Cyrillic encoding. -var MacintoshCyrillic encoding.Encoding = &macintoshCyrillic +var MacintoshCyrillic *Charmap = &macintoshCyrillic -var macintoshCyrillic = charmap{ +var macintoshCyrillic = Charmap{ name: "Macintosh Cyrillic", mib: identifier.MacintoshCyrillic, asciiSuperset: true, @@ -4208,9 +5433,9 @@ var macintoshCyrillic = charmap{ } // Windows874 is the Windows 874 encoding. -var Windows874 encoding.Encoding = &windows874 +var Windows874 *Charmap = &windows874 -var windows874 = charmap{ +var windows874 = Charmap{ name: "Windows 874", mib: identifier.Windows874, asciiSuperset: true, @@ -4383,9 +5608,9 @@ var windows874 = charmap{ } // Windows1250 is the Windows 1250 encoding. -var Windows1250 encoding.Encoding = &windows1250 +var Windows1250 *Charmap = &windows1250 -var windows1250 = charmap{ +var windows1250 = Charmap{ name: "Windows 1250", mib: identifier.Windows1250, asciiSuperset: true, @@ -4558,9 +5783,9 @@ var windows1250 = charmap{ } // Windows1251 is the Windows 1251 encoding. -var Windows1251 encoding.Encoding = &windows1251 +var Windows1251 *Charmap = &windows1251 -var windows1251 = charmap{ +var windows1251 = Charmap{ name: "Windows 1251", mib: identifier.Windows1251, asciiSuperset: true, @@ -4733,9 +5958,9 @@ var windows1251 = charmap{ } // Windows1252 is the Windows 1252 encoding. -var Windows1252 encoding.Encoding = &windows1252 +var Windows1252 *Charmap = &windows1252 -var windows1252 = charmap{ +var windows1252 = Charmap{ name: "Windows 1252", mib: identifier.Windows1252, asciiSuperset: true, @@ -4908,9 +6133,9 @@ var windows1252 = charmap{ } // Windows1253 is the Windows 1253 encoding. -var Windows1253 encoding.Encoding = &windows1253 +var Windows1253 *Charmap = &windows1253 -var windows1253 = charmap{ +var windows1253 = Charmap{ name: "Windows 1253", mib: identifier.Windows1253, asciiSuperset: true, @@ -5083,9 +6308,9 @@ var windows1253 = charmap{ } // Windows1254 is the Windows 1254 encoding. -var Windows1254 encoding.Encoding = &windows1254 +var Windows1254 *Charmap = &windows1254 -var windows1254 = charmap{ +var windows1254 = Charmap{ name: "Windows 1254", mib: identifier.Windows1254, asciiSuperset: true, @@ -5258,9 +6483,9 @@ var windows1254 = charmap{ } // Windows1255 is the Windows 1255 encoding. -var Windows1255 encoding.Encoding = &windows1255 +var Windows1255 *Charmap = &windows1255 -var windows1255 = charmap{ +var windows1255 = Charmap{ name: "Windows 1255", mib: identifier.Windows1255, asciiSuperset: true, @@ -5368,7 +6593,7 @@ var windows1255 = charmap{ {2, [3]byte{0xd6, 0xb4, 0x00}}, {2, [3]byte{0xd6, 0xb5, 0x00}}, {2, [3]byte{0xd6, 0xb6, 0x00}}, {2, [3]byte{0xd6, 0xb7, 0x00}}, {2, [3]byte{0xd6, 0xb8, 0x00}}, {2, [3]byte{0xd6, 0xb9, 0x00}}, - {3, [3]byte{0xef, 0xbf, 0xbd}}, {2, [3]byte{0xd6, 0xbb, 0x00}}, + {2, [3]byte{0xd6, 0xba, 0x00}}, {2, [3]byte{0xd6, 0xbb, 0x00}}, {2, [3]byte{0xd6, 0xbc, 0x00}}, {2, [3]byte{0xd6, 0xbd, 0x00}}, {2, [3]byte{0xd6, 0xbe, 0x00}}, {2, [3]byte{0xd6, 0xbf, 0x00}}, {2, [3]byte{0xd7, 0x80, 0x00}}, {2, [3]byte{0xd7, 0x81, 0x00}}, @@ -5418,24 +6643,24 @@ var windows1255 = charmap{ 0xb20000b2, 0xb30000b3, 0xb40000b4, 0xb50000b5, 0xb60000b6, 0xb70000b7, 0xb80000b8, 0xb90000b9, 0xbb0000bb, 0xbc0000bc, 0xbd0000bd, 0xbe0000be, 0xbf0000bf, 0xaa0000d7, 0xba0000f7, 0x83000192, 0x880002c6, 0x980002dc, 0xc00005b0, 0xc10005b1, 0xc20005b2, 0xc30005b3, 0xc40005b4, 0xc50005b5, - 0xc60005b6, 0xc70005b7, 0xc80005b8, 0xc90005b9, 0xcb0005bb, 0xcc0005bc, 0xcd0005bd, 0xce0005be, - 0xcf0005bf, 0xd00005c0, 0xd10005c1, 0xd20005c2, 0xd30005c3, 0xe00005d0, 0xe10005d1, 0xe20005d2, - 0xe30005d3, 0xe40005d4, 0xe50005d5, 0xe60005d6, 0xe70005d7, 0xe80005d8, 0xe90005d9, 0xea0005da, - 0xeb0005db, 0xec0005dc, 0xed0005dd, 0xee0005de, 0xef0005df, 0xf00005e0, 0xf10005e1, 0xf20005e2, - 0xf30005e3, 0xf40005e4, 0xf50005e5, 0xf60005e6, 0xf70005e7, 0xf80005e8, 0xf90005e9, 0xfa0005ea, - 0xd40005f0, 0xd50005f1, 0xd60005f2, 0xd70005f3, 0xd80005f4, 0xfd00200e, 0xfe00200f, 0x96002013, - 0x97002014, 0x91002018, 0x92002019, 0x8200201a, 0x9300201c, 0x9400201d, 0x8400201e, 0x86002020, - 0x87002021, 0x95002022, 0x85002026, 0x89002030, 0x8b002039, 0x9b00203a, 0xa40020aa, 0x800020ac, - 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, + 0xc60005b6, 0xc70005b7, 0xc80005b8, 0xc90005b9, 0xca0005ba, 0xcb0005bb, 0xcc0005bc, 0xcd0005bd, + 0xce0005be, 0xcf0005bf, 0xd00005c0, 0xd10005c1, 0xd20005c2, 0xd30005c3, 0xe00005d0, 0xe10005d1, + 0xe20005d2, 0xe30005d3, 0xe40005d4, 0xe50005d5, 0xe60005d6, 0xe70005d7, 0xe80005d8, 0xe90005d9, + 0xea0005da, 0xeb0005db, 0xec0005dc, 0xed0005dd, 0xee0005de, 0xef0005df, 0xf00005e0, 0xf10005e1, + 0xf20005e2, 0xf30005e3, 0xf40005e4, 0xf50005e5, 0xf60005e6, 0xf70005e7, 0xf80005e8, 0xf90005e9, + 0xfa0005ea, 0xd40005f0, 0xd50005f1, 0xd60005f2, 0xd70005f3, 0xd80005f4, 0xfd00200e, 0xfe00200f, + 0x96002013, 0x97002014, 0x91002018, 0x92002019, 0x8200201a, 0x9300201c, 0x9400201d, 0x8400201e, + 0x86002020, 0x87002021, 0x95002022, 0x85002026, 0x89002030, 0x8b002039, 0x9b00203a, 0xa40020aa, + 0x800020ac, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, }, } // Windows1256 is the Windows 1256 encoding. -var Windows1256 encoding.Encoding = &windows1256 +var Windows1256 *Charmap = &windows1256 -var windows1256 = charmap{ +var windows1256 = Charmap{ name: "Windows 1256", mib: identifier.Windows1256, asciiSuperset: true, @@ -5608,9 +6833,9 @@ var windows1256 = charmap{ } // Windows1257 is the Windows 1257 encoding. -var Windows1257 encoding.Encoding = &windows1257 +var Windows1257 *Charmap = &windows1257 -var windows1257 = charmap{ +var windows1257 = Charmap{ name: "Windows 1257", mib: identifier.Windows1257, asciiSuperset: true, @@ -5783,9 +7008,9 @@ var windows1257 = charmap{ } // Windows1258 is the Windows 1258 encoding. -var Windows1258 encoding.Encoding = &windows1258 +var Windows1258 *Charmap = &windows1258 -var windows1258 = charmap{ +var windows1258 = Charmap{ name: "Windows 1258", mib: identifier.Windows1258, asciiSuperset: true, @@ -5960,9 +7185,9 @@ var windows1258 = charmap{ // XUserDefined is the X-User-Defined encoding. // // It is defined at http://encoding.spec.whatwg.org/#x-user-defined -var XUserDefined encoding.Encoding = &xUserDefined +var XUserDefined *Charmap = &xUserDefined -var xUserDefined = charmap{ +var xUserDefined = Charmap{ name: "X-User-Defined", mib: identifier.XUserDefined, asciiSuperset: true, @@ -6134,13 +7359,19 @@ var xUserDefined = charmap{ }, } var listAll = []encoding.Encoding{ + CodePage037, CodePage437, CodePage850, CodePage852, CodePage855, CodePage858, + CodePage860, CodePage862, + CodePage863, + CodePage865, CodePage866, + CodePage1047, + CodePage1140, ISO8859_1, ISO8859_2, ISO8859_3, @@ -6153,6 +7384,7 @@ var listAll = []encoding.Encoding{ ISO8859_8, ISO8859_8E, ISO8859_8I, + ISO8859_9, ISO8859_10, ISO8859_13, ISO8859_14, @@ -6175,4 +7407,4 @@ var listAll = []encoding.Encoding{ XUserDefined, } -// Total table size 72520 bytes (70KiB); checksum: 811C9DC5 +// Total table size 87024 bytes (84KiB); checksum: 811C9DC5 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/encoding.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/encoding.go index 2a7d9529..221f175c 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/encoding.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/encoding.go @@ -52,7 +52,7 @@ type Decoder struct { } // Bytes converts the given encoded bytes to UTF-8. It returns the converted -// bytes or 0, err if any error occurred. +// bytes or nil, err if any error occurred. func (d *Decoder) Bytes(b []byte) ([]byte, error) { b, _, err := transform.Bytes(d, b) if err != nil { @@ -62,7 +62,7 @@ func (d *Decoder) Bytes(b []byte) ([]byte, error) { } // String converts the given encoded string to UTF-8. It returns the converted -// string or 0, err if any error occurred. +// string or "", err if any error occurred. func (d *Decoder) String(s string) (string, error) { s, _, err := transform.String(d, s) if err != nil { @@ -95,7 +95,7 @@ type Encoder struct { _ struct{} } -// Bytes converts bytes from UTF-8. It returns the converted bytes or 0, err if +// Bytes converts bytes from UTF-8. It returns the converted bytes or nil, err if // any error occurred. func (e *Encoder) Bytes(b []byte) ([]byte, error) { b, _, err := transform.Bytes(e, b) @@ -106,7 +106,7 @@ func (e *Encoder) Bytes(b []byte) ([]byte, error) { } // String converts a string from UTF-8. It returns the converted string or -// 0, err if any error occurred. +// "", err if any error occurred. func (e *Encoder) String(s string) (string, error) { s, _, err := transform.String(e, s) if err != nil { diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/htmlindex/gen.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/htmlindex/gen.go deleted file mode 100644 index d10e5e09..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/htmlindex/gen.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "bytes" - "encoding/json" - "fmt" - "log" - "strings" - - "golang.org/x/text/internal/gen" -) - -type group struct { - Encodings []struct { - Labels []string - Name string - } -} - -func main() { - gen.Init() - - r := gen.Open("http://www.w3.org/TR", "w3", "encoding/indexes/encodings.json") - var groups []group - if err := json.NewDecoder(r).Decode(&groups); err != nil { - log.Fatalf("Error reading encodings.json: %v", err) - } - - w := &bytes.Buffer{} - fmt.Fprintln(w, "type htmlEncoding byte") - fmt.Fprintln(w, "const (") - for i, g := range groups { - for _, e := range g.Encodings { - name := consts[e.Name] - if name == "" { - log.Fatalf("No const defined for %s.", e.Name) - } - if i == 0 { - fmt.Fprintf(w, "%s htmlEncoding = iota\n", name) - } else { - fmt.Fprintf(w, "%s\n", name) - } - } - } - fmt.Fprintln(w, "numEncodings") - fmt.Fprint(w, ")\n\n") - - fmt.Fprintln(w, "var canonical = [numEncodings]string{") - for _, g := range groups { - for _, e := range g.Encodings { - fmt.Fprintf(w, "%q,\n", e.Name) - } - } - fmt.Fprint(w, "}\n\n") - - fmt.Fprintln(w, "var nameMap = map[string]htmlEncoding{") - for _, g := range groups { - for _, e := range g.Encodings { - for _, l := range e.Labels { - fmt.Fprintf(w, "%q: %s,\n", l, consts[e.Name]) - } - } - } - fmt.Fprint(w, "}\n\n") - - var tags []string - fmt.Fprintln(w, "var localeMap = []htmlEncoding{") - for _, loc := range locales { - tags = append(tags, loc.tag) - fmt.Fprintf(w, "%s, // %s \n", consts[loc.name], loc.tag) - } - fmt.Fprint(w, "}\n\n") - - fmt.Fprintf(w, "const locales = %q\n", strings.Join(tags, " ")) - - gen.WriteGoFile("tables.go", "htmlindex", w.Bytes()) -} - -// consts maps canonical encoding name to internal constant. -var consts = map[string]string{ - "utf-8": "utf8", - "ibm866": "ibm866", - "iso-8859-2": "iso8859_2", - "iso-8859-3": "iso8859_3", - "iso-8859-4": "iso8859_4", - "iso-8859-5": "iso8859_5", - "iso-8859-6": "iso8859_6", - "iso-8859-7": "iso8859_7", - "iso-8859-8": "iso8859_8", - "iso-8859-8-i": "iso8859_8I", - "iso-8859-10": "iso8859_10", - "iso-8859-13": "iso8859_13", - "iso-8859-14": "iso8859_14", - "iso-8859-15": "iso8859_15", - "iso-8859-16": "iso8859_16", - "koi8-r": "koi8r", - "koi8-u": "koi8u", - "macintosh": "macintosh", - "windows-874": "windows874", - "windows-1250": "windows1250", - "windows-1251": "windows1251", - "windows-1252": "windows1252", - "windows-1253": "windows1253", - "windows-1254": "windows1254", - "windows-1255": "windows1255", - "windows-1256": "windows1256", - "windows-1257": "windows1257", - "windows-1258": "windows1258", - "x-mac-cyrillic": "macintoshCyrillic", - "gbk": "gbk", - "gb18030": "gb18030", - // "hz-gb-2312": "hzgb2312", // Was removed from WhatWG - "big5": "big5", - "euc-jp": "eucjp", - "iso-2022-jp": "iso2022jp", - "shift_jis": "shiftJIS", - "euc-kr": "euckr", - "replacement": "replacement", - "utf-16be": "utf16be", - "utf-16le": "utf16le", - "x-user-defined": "xUserDefined", -} - -// locales is taken from -// https://html.spec.whatwg.org/multipage/syntax.html#encoding-sniffing-algorithm. -var locales = []struct{ tag, name string }{ - {"und", "windows-1252"}, // The default value. - {"ar", "windows-1256"}, - {"ba", "windows-1251"}, - {"be", "windows-1251"}, - {"bg", "windows-1251"}, - {"cs", "windows-1250"}, - {"el", "iso-8859-7"}, - {"et", "windows-1257"}, - {"fa", "windows-1256"}, - {"he", "windows-1255"}, - {"hr", "windows-1250"}, - {"hu", "iso-8859-2"}, - {"ja", "shift_jis"}, - {"kk", "windows-1251"}, - {"ko", "euc-kr"}, - {"ku", "windows-1254"}, - {"ky", "windows-1251"}, - {"lt", "windows-1257"}, - {"lv", "windows-1257"}, - {"mk", "windows-1251"}, - {"pl", "iso-8859-2"}, - {"ru", "windows-1251"}, - {"sah", "windows-1251"}, - {"sk", "windows-1250"}, - {"sl", "iso-8859-2"}, - {"sr", "windows-1251"}, - {"tg", "windows-1251"}, - {"th", "windows-874"}, - {"tr", "windows-1254"}, - {"tt", "windows-1251"}, - {"uk", "windows-1251"}, - {"vi", "windows-1258"}, - {"zh-hans", "gb18030"}, - {"zh-hant", "big5"}, -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/htmlindex/htmlindex.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/htmlindex/htmlindex.go index 70f2ac4b..bdc7d15d 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/htmlindex/htmlindex.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/htmlindex/htmlindex.go @@ -50,7 +50,7 @@ func LanguageDefault(tag language.Tag) string { for _, t := range strings.Split(locales, " ") { tags = append(tags, language.MustParse(t)) } - matcher = language.NewMatcher(tags) + matcher = language.NewMatcher(tags, language.PreferSameScript(true)) }) _, i, _ := matcher.Match(tag) return canonical[localeMap[i]] // Default is Windows-1252. diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/htmlindex/tables.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/htmlindex/tables.go index 78950d3c..9d6b4315 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/htmlindex/tables.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/htmlindex/tables.go @@ -1,4 +1,4 @@ -// This file was generated by go generate; DO NOT EDIT +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. package htmlindex @@ -313,7 +313,7 @@ var nameMap = map[string]htmlEncoding{ } var localeMap = []htmlEncoding{ - windows1252, // und + windows1252, // und_Latn windows1256, // ar windows1251, // ba windows1251, // be @@ -349,4 +349,4 @@ var localeMap = []htmlEncoding{ big5, // zh-hant } -const locales = "und ar ba be bg cs el et fa he hr hu ja kk ko ku ky lt lv mk pl ru sah sk sl sr tg th tr tt uk vi zh-hans zh-hant" +const locales = "und_Latn ar ba be bg cs el et fa he hr hu ja kk ko ku ky lt lv mk pl ru sah sk sl sr tg th tr tt uk vi zh-hans zh-hant" diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/internal/identifier/gen.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/internal/identifier/gen.go deleted file mode 100644 index 0c8eba7e..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/internal/identifier/gen.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "bytes" - "encoding/xml" - "fmt" - "io" - "log" - "strings" - - "golang.org/x/text/internal/gen" -) - -type registry struct { - XMLName xml.Name `xml:"registry"` - Updated string `xml:"updated"` - Registry []struct { - ID string `xml:"id,attr"` - Record []struct { - Name string `xml:"name"` - Xref []struct { - Type string `xml:"type,attr"` - Data string `xml:"data,attr"` - } `xml:"xref"` - Desc struct { - Data string `xml:",innerxml"` - // Any []struct { - // Data string `xml:",chardata"` - // } `xml:",any"` - // Data string `xml:",chardata"` - } `xml:"description,"` - MIB string `xml:"value"` - Alias []string `xml:"alias"` - MIME string `xml:"preferred_alias"` - } `xml:"record"` - } `xml:"registry"` -} - -func main() { - r := gen.OpenIANAFile("assignments/character-sets/character-sets.xml") - reg := ®istry{} - if err := xml.NewDecoder(r).Decode(®); err != nil && err != io.EOF { - log.Fatalf("Error decoding charset registry: %v", err) - } - if len(reg.Registry) == 0 || reg.Registry[0].ID != "character-sets-1" { - log.Fatalf("Unexpected ID %s", reg.Registry[0].ID) - } - - w := &bytes.Buffer{} - fmt.Fprintf(w, "const (\n") - for _, rec := range reg.Registry[0].Record { - constName := "" - for _, a := range rec.Alias { - if strings.HasPrefix(a, "cs") && strings.IndexByte(a, '-') == -1 { - // Some of the constant definitions have comments in them. Strip those. - constName = strings.Title(strings.SplitN(a[2:], "\n", 2)[0]) - } - } - if constName == "" { - switch rec.MIB { - case "2085": - constName = "HZGB2312" // Not listed as alias for some reason. - default: - log.Fatalf("No cs alias defined for %s.", rec.MIB) - } - } - if rec.MIME != "" { - rec.MIME = fmt.Sprintf(" (MIME: %s)", rec.MIME) - } - fmt.Fprintf(w, "// %s is the MIB identifier with IANA name %s%s.\n//\n", constName, rec.Name, rec.MIME) - if len(rec.Desc.Data) > 0 { - fmt.Fprint(w, "// ") - d := xml.NewDecoder(strings.NewReader(rec.Desc.Data)) - inElem := true - attr := "" - for { - t, err := d.Token() - if err != nil { - if err != io.EOF { - log.Fatal(err) - } - break - } - switch x := t.(type) { - case xml.CharData: - attr = "" // Don't need attribute info. - a := bytes.Split([]byte(x), []byte("\n")) - for i, b := range a { - if b = bytes.TrimSpace(b); len(b) != 0 { - if !inElem && i > 0 { - fmt.Fprint(w, "\n// ") - } - inElem = false - fmt.Fprintf(w, "%s ", string(b)) - } - } - case xml.StartElement: - if x.Name.Local == "xref" { - inElem = true - use := false - for _, a := range x.Attr { - if a.Name.Local == "type" { - use = use || a.Value != "person" - } - if a.Name.Local == "data" && use { - attr = a.Value + " " - } - } - } - case xml.EndElement: - inElem = false - fmt.Fprint(w, attr) - } - } - fmt.Fprint(w, "\n") - } - for _, x := range rec.Xref { - switch x.Type { - case "rfc": - fmt.Fprintf(w, "// Reference: %s\n", strings.ToUpper(x.Data)) - case "uri": - fmt.Fprintf(w, "// Reference: %s\n", x.Data) - } - } - fmt.Fprintf(w, "%s MIB = %s\n", constName, rec.MIB) - fmt.Fprintln(w) - } - fmt.Fprintln(w, ")") - - gen.WriteGoFile("mib.go", "identifier", w.Bytes()) -} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/internal/identifier/identifier.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/internal/identifier/identifier.go index 2a2da0ef..7351b4ef 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/internal/identifier/identifier.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/internal/identifier/identifier.go @@ -36,8 +36,8 @@ package identifier // - http://www.ietf.org/rfc/rfc2978.txt // - http://www.unicode.org/reports/tr22/ // - http://www.w3.org/TR/encoding/ -// - http://www.w3.org/TR/encoding/indexes/encodings.json // - https://encoding.spec.whatwg.org/ +// - https://encoding.spec.whatwg.org/encodings.json // - https://tools.ietf.org/html/rfc6657#section-5 // Interface can be implemented by Encodings to define the CCS or CES for which diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/internal/identifier/mib.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/internal/identifier/mib.go index 915abfa2..768842b0 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/internal/identifier/mib.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/internal/identifier/mib.go @@ -1,4 +1,4 @@ -// This file was generated by go generate; DO NOT EDIT +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. package identifier diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/japanese/eucjp.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/japanese/eucjp.go index 40f9b05f..79313fa5 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/japanese/eucjp.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/japanese/eucjp.go @@ -5,7 +5,6 @@ package japanese import ( - "errors" "unicode/utf8" "golang.org/x/text/encoding" @@ -23,10 +22,9 @@ var eucJP = internal.Encoding{ identifier.EUCPkdFmtJapanese, } -var errInvalidEUCJP = errors.New("japanese: invalid EUC-JP encoding") - type eucJPDecoder struct{ transform.NopResetter } +// See https://encoding.spec.whatwg.org/#euc-jp-decoder. func (eucJPDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { r, size := rune(0), 0 loop: @@ -37,60 +35,79 @@ loop: case c0 == 0x8e: if nSrc+1 >= len(src) { - err = transform.ErrShortSrc - break loop + if !atEOF { + err = transform.ErrShortSrc + break loop + } + r, size = utf8.RuneError, 1 + break } c1 := src[nSrc+1] - if c1 < 0xa1 || 0xdf < c1 { - err = errInvalidEUCJP - break loop + switch { + case c1 < 0xa1: + r, size = utf8.RuneError, 1 + case c1 > 0xdf: + r, size = utf8.RuneError, 2 + if c1 == 0xff { + size = 1 + } + default: + r, size = rune(c1)+(0xff61-0xa1), 2 } - r, size = rune(c1)+(0xff61-0xa1), 2 - case c0 == 0x8f: if nSrc+2 >= len(src) { - err = transform.ErrShortSrc - break loop + if !atEOF { + err = transform.ErrShortSrc + break loop + } + r, size = utf8.RuneError, 1 + if p := nSrc + 1; p < len(src) && 0xa1 <= src[p] && src[p] < 0xfe { + size = 2 + } + break } c1 := src[nSrc+1] if c1 < 0xa1 || 0xfe < c1 { - err = errInvalidEUCJP - break loop + r, size = utf8.RuneError, 1 + break } c2 := src[nSrc+2] if c2 < 0xa1 || 0xfe < c2 { - err = errInvalidEUCJP - break loop + r, size = utf8.RuneError, 2 + break } - r, size = '\ufffd', 3 + r, size = utf8.RuneError, 3 if i := int(c1-0xa1)*94 + int(c2-0xa1); i < len(jis0212Decode) { r = rune(jis0212Decode[i]) if r == 0 { - r = '\ufffd' + r = utf8.RuneError } } case 0xa1 <= c0 && c0 <= 0xfe: if nSrc+1 >= len(src) { - err = transform.ErrShortSrc - break loop + if !atEOF { + err = transform.ErrShortSrc + break loop + } + r, size = utf8.RuneError, 1 + break } c1 := src[nSrc+1] if c1 < 0xa1 || 0xfe < c1 { - err = errInvalidEUCJP - break loop + r, size = utf8.RuneError, 1 + break } - r, size = '\ufffd', 2 + r, size = utf8.RuneError, 2 if i := int(c0-0xa1)*94 + int(c1-0xa1); i < len(jis0208Decode) { r = rune(jis0208Decode[i]) if r == 0 { - r = '\ufffd' + r = utf8.RuneError } } default: - err = errInvalidEUCJP - break loop + r, size = utf8.RuneError, 1 } if nDst+utf8.RuneLen(r) > len(dst) { @@ -99,9 +116,6 @@ loop: } nDst += utf8.EncodeRune(dst[nDst:], r) } - if atEOF && err == transform.ErrShortSrc { - err = errInvalidEUCJP - } return nDst, nSrc, err } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/japanese/iso2022jp.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/japanese/iso2022jp.go index b63e7d5d..613226df 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/japanese/iso2022jp.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/japanese/iso2022jp.go @@ -5,7 +5,6 @@ package japanese import ( - "errors" "unicode/utf8" "golang.org/x/text/encoding" @@ -31,8 +30,6 @@ func iso2022JPNewEncoder() transform.Transformer { return new(iso2022JPEncoder) } -var errInvalidISO2022JP = errors.New("japanese: invalid ISO-2022-JP encoding") - const ( asciiState = iota katakanaState @@ -50,45 +47,51 @@ func (d *iso2022JPDecoder) Reset() { func (d *iso2022JPDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { r, size := rune(0), 0 -loop: for ; nSrc < len(src); nSrc += size { c0 := src[nSrc] if c0 >= utf8.RuneSelf { - err = errInvalidISO2022JP - break loop + r, size = '\ufffd', 1 + goto write } if c0 == asciiEsc { if nSrc+2 >= len(src) { - err = transform.ErrShortSrc - break loop + if !atEOF { + return nDst, nSrc, transform.ErrShortSrc + } + // TODO: is it correct to only skip 1?? + r, size = '\ufffd', 1 + goto write } size = 3 c1 := src[nSrc+1] c2 := src[nSrc+2] switch { - case c1 == '$' && (c2 == '@' || c2 == 'B'): + case c1 == '$' && (c2 == '@' || c2 == 'B'): // 0x24 {0x40, 0x42} *d = jis0208State continue - case c1 == '$' && c2 == '(': + case c1 == '$' && c2 == '(': // 0x24 0x28 if nSrc+3 >= len(src) { - err = transform.ErrShortSrc - break loop + if !atEOF { + return nDst, nSrc, transform.ErrShortSrc + } + r, size = '\ufffd', 1 + goto write } size = 4 - if src[nSrc]+3 == 'D' { + if src[nSrc+3] == 'D' { *d = jis0212State continue } - case c1 == '(' && (c2 == 'B' || c2 == 'J'): + case c1 == '(' && (c2 == 'B' || c2 == 'J'): // 0x28 {0x42, 0x4A} *d = asciiState continue - case c1 == '(' && c2 == 'I': + case c1 == '(' && c2 == 'I': // 0x28 0x49 *d = katakanaState continue } - err = errInvalidISO2022JP - break loop + r, size = '\ufffd', 1 + goto write } switch *d { @@ -97,8 +100,8 @@ loop: case katakanaState: if c0 < 0x21 || 0x60 <= c0 { - err = errInvalidISO2022JP - break loop + r, size = '\ufffd', 1 + goto write } r, size = rune(c0)+(0xff61-0x21), 1 @@ -106,11 +109,14 @@ loop: if c0 == 0x0a { *d = asciiState r, size = rune(c0), 1 - break + goto write } if nSrc+1 >= len(src) { - err = transform.ErrShortSrc - break loop + if !atEOF { + return nDst, nSrc, transform.ErrShortSrc + } + r, size = '\ufffd', 1 + goto write } size = 2 c1 := src[nSrc+1] @@ -121,22 +127,19 @@ loop: r = rune(jis0212Decode[i]) } else { r = '\ufffd' - break + goto write } if r == 0 { r = '\ufffd' } } + write: if nDst+utf8.RuneLen(r) > len(dst) { - err = transform.ErrShortDst - break loop + return nDst, nSrc, transform.ErrShortDst } nDst += utf8.EncodeRune(dst[nDst:], r) } - if atEOF && err == transform.ErrShortSrc { - err = errInvalidISO2022JP - } return nDst, nSrc, err } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/japanese/maketables.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/japanese/maketables.go deleted file mode 100644 index d6c10deb..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/japanese/maketables.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This program generates tables.go: -// go run maketables.go | gofmt > tables.go - -// TODO: Emoji extensions? -// http://www.unicode.org/faq/emoji_dingbats.html -// http://www.unicode.org/Public/UNIDATA/EmojiSources.txt - -import ( - "bufio" - "fmt" - "log" - "net/http" - "sort" - "strings" -) - -type entry struct { - jisCode, table int -} - -func main() { - fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n") - fmt.Printf("// Package japanese provides Japanese encodings such as EUC-JP and Shift JIS.\n") - fmt.Printf(`package japanese // import "golang.org/x/text/encoding/japanese"` + "\n\n") - - reverse := [65536]entry{} - for i := range reverse { - reverse[i].table = -1 - } - - tables := []struct { - url string - name string - }{ - {"http://encoding.spec.whatwg.org/index-jis0208.txt", "0208"}, - {"http://encoding.spec.whatwg.org/index-jis0212.txt", "0212"}, - } - for i, table := range tables { - res, err := http.Get(table.url) - if err != nil { - log.Fatalf("%q: Get: %v", table.url, err) - } - defer res.Body.Close() - - mapping := [65536]uint16{} - - scanner := bufio.NewScanner(res.Body) - for scanner.Scan() { - s := strings.TrimSpace(scanner.Text()) - if s == "" || s[0] == '#' { - continue - } - x, y := 0, uint16(0) - if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil { - log.Fatalf("%q: could not parse %q", table.url, s) - } - if x < 0 || 120*94 <= x { - log.Fatalf("%q: JIS code %d is out of range", table.url, x) - } - mapping[x] = y - if reverse[y].table == -1 { - reverse[y] = entry{jisCode: x, table: i} - } - } - if err := scanner.Err(); err != nil { - log.Fatalf("%q: scanner error: %v", table.url, err) - } - - fmt.Printf("// jis%sDecode is the decoding table from JIS %s code to Unicode.\n// It is defined at %s\n", - table.name, table.name, table.url) - fmt.Printf("var jis%sDecode = [...]uint16{\n", table.name) - for i, m := range mapping { - if m != 0 { - fmt.Printf("\t%d: 0x%04X,\n", i, m) - } - } - fmt.Printf("}\n\n") - } - - // Any run of at least separation continuous zero entries in the reverse map will - // be a separate encode table. - const separation = 1024 - - intervals := []interval(nil) - low, high := -1, -1 - for i, v := range reverse { - if v.table == -1 { - continue - } - if low < 0 { - low = i - } else if i-high >= separation { - if high >= 0 { - intervals = append(intervals, interval{low, high}) - } - low = i - } - high = i + 1 - } - if high >= 0 { - intervals = append(intervals, interval{low, high}) - } - sort.Sort(byDecreasingLength(intervals)) - - fmt.Printf("const (\n") - fmt.Printf("\tjis0208 = 1\n") - fmt.Printf("\tjis0212 = 2\n") - fmt.Printf("\tcodeMask = 0x7f\n") - fmt.Printf("\tcodeShift = 7\n") - fmt.Printf("\ttableShift = 14\n") - fmt.Printf(")\n\n") - - fmt.Printf("const numEncodeTables = %d\n\n", len(intervals)) - fmt.Printf("// encodeX are the encoding tables from Unicode to JIS code,\n") - fmt.Printf("// sorted by decreasing length.\n") - for i, v := range intervals { - fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high) - } - fmt.Printf("//\n") - fmt.Printf("// The high two bits of the value record whether the JIS code comes from the\n") - fmt.Printf("// JIS0208 table (high bits == 1) or the JIS0212 table (high bits == 2).\n") - fmt.Printf("// The low 14 bits are two 7-bit unsigned integers j1 and j2 that form the\n") - fmt.Printf("// JIS code (94*j1 + j2) within that table.\n") - fmt.Printf("\n") - - for i, v := range intervals { - fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high) - fmt.Printf("var encode%d = [...]uint16{\n", i) - for j := v.low; j < v.high; j++ { - x := reverse[j] - if x.table == -1 { - continue - } - fmt.Printf("\t%d - %d: jis%s<<14 | 0x%02X<<7 | 0x%02X,\n", - j, v.low, tables[x.table].name, x.jisCode/94, x.jisCode%94) - } - fmt.Printf("}\n\n") - } -} - -// interval is a half-open interval [low, high). -type interval struct { - low, high int -} - -func (i interval) len() int { return i.high - i.low } - -// byDecreasingLength sorts intervals by decreasing length. -type byDecreasingLength []interval - -func (b byDecreasingLength) Len() int { return len(b) } -func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() } -func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/japanese/shiftjis.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/japanese/shiftjis.go index 099aecc3..16fd8a6e 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/japanese/shiftjis.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/japanese/shiftjis.go @@ -5,7 +5,6 @@ package japanese import ( - "errors" "unicode/utf8" "golang.org/x/text/encoding" @@ -24,8 +23,6 @@ var shiftJIS = internal.Encoding{ identifier.ShiftJIS, } -var errInvalidShiftJIS = errors.New("japanese: invalid Shift JIS encoding") - type shiftJISDecoder struct{ transform.NopResetter } func (shiftJISDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { @@ -48,28 +45,32 @@ loop: c0 = 2*c0 - 0x21 if nSrc+1 >= len(src) { - err = transform.ErrShortSrc - break loop + if !atEOF { + err = transform.ErrShortSrc + break loop + } + r, size = '\ufffd', 1 + goto write } c1 := src[nSrc+1] switch { case c1 < 0x40: - err = errInvalidShiftJIS - break loop + r, size = '\ufffd', 1 // c1 is ASCII so output on next round + goto write case c1 < 0x7f: c0-- c1 -= 0x40 case c1 == 0x7f: - err = errInvalidShiftJIS - break loop + r, size = '\ufffd', 1 // c1 is ASCII so output on next round + goto write case c1 < 0x9f: c0-- c1 -= 0x41 case c1 < 0xfd: c1 -= 0x9f default: - err = errInvalidShiftJIS - break loop + r, size = '\ufffd', 2 + goto write } r, size = '\ufffd', 2 if i := int(c0)*94 + int(c1); i < len(jis0208Decode) { @@ -79,20 +80,19 @@ loop: } } + case c0 == 0x80: + r, size = 0x80, 1 + default: - err = errInvalidShiftJIS - break loop + r, size = '\ufffd', 1 } - + write: if nDst+utf8.RuneLen(r) > len(dst) { err = transform.ErrShortDst break loop } nDst += utf8.EncodeRune(dst[nDst:], r) } - if atEOF && err == transform.ErrShortSrc { - err = errInvalidShiftJIS - } return nDst, nSrc, err } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/korean/euckr.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/korean/euckr.go index a4b9ff17..034337f5 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/korean/euckr.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/korean/euckr.go @@ -5,7 +5,6 @@ package korean import ( - "errors" "unicode/utf8" "golang.org/x/text/encoding" @@ -26,8 +25,6 @@ var eucKR = internal.Encoding{ identifier.EUCKR, } -var errInvalidEUCKR = errors.New("korean: invalid EUC-KR encoding") - type eucKRDecoder struct{ transform.NopResetter } func (eucKRDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { @@ -40,10 +37,15 @@ loop: case 0x81 <= c0 && c0 < 0xff: if nSrc+1 >= len(src) { - err = transform.ErrShortSrc - break loop + if !atEOF { + err = transform.ErrShortSrc + break loop + } + r, size = utf8.RuneError, 1 + break } c1 := src[nSrc+1] + size = 2 if c0 < 0xc7 { r = 178 * rune(c0-0x81) switch { @@ -54,39 +56,36 @@ loop: case 0x81 <= c1 && c1 < 0xff: r += rune(c1) - (0x81 - 2*26) default: - err = errInvalidEUCKR - break loop + goto decError } } else if 0xa1 <= c1 && c1 < 0xff { r = 178*(0xc7-0x81) + rune(c0-0xc7)*94 + rune(c1-0xa1) } else { - err = errInvalidEUCKR - break loop + goto decError } if int(r) < len(decode) { r = rune(decode[r]) - if r == 0 { - r = '\ufffd' + if r != 0 { + break } - } else { - r = '\ufffd' } - size = 2 + decError: + r = utf8.RuneError + if c1 < utf8.RuneSelf { + size = 1 + } default: - err = errInvalidEUCKR - break loop + r, size = utf8.RuneError, 1 + break } if nDst+utf8.RuneLen(r) > len(dst) { err = transform.ErrShortDst - break loop + break } nDst += utf8.EncodeRune(dst[nDst:], r) } - if atEOF && err == transform.ErrShortSrc { - err = errInvalidEUCKR - } return nDst, nSrc, err } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/korean/maketables.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/korean/maketables.go deleted file mode 100644 index c84034fb..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/korean/maketables.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This program generates tables.go: -// go run maketables.go | gofmt > tables.go - -import ( - "bufio" - "fmt" - "log" - "net/http" - "sort" - "strings" -) - -func main() { - fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n") - fmt.Printf("// Package korean provides Korean encodings such as EUC-KR.\n") - fmt.Printf(`package korean // import "golang.org/x/text/encoding/korean"` + "\n\n") - - res, err := http.Get("http://encoding.spec.whatwg.org/index-euc-kr.txt") - if err != nil { - log.Fatalf("Get: %v", err) - } - defer res.Body.Close() - - mapping := [65536]uint16{} - reverse := [65536]uint16{} - - scanner := bufio.NewScanner(res.Body) - for scanner.Scan() { - s := strings.TrimSpace(scanner.Text()) - if s == "" || s[0] == '#' { - continue - } - x, y := uint16(0), uint16(0) - if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil { - log.Fatalf("could not parse %q", s) - } - if x < 0 || 178*(0xc7-0x81)+(0xfe-0xc7)*94+(0xff-0xa1) <= x { - log.Fatalf("EUC-KR code %d is out of range", x) - } - mapping[x] = y - if reverse[y] == 0 { - c0, c1 := uint16(0), uint16(0) - if x < 178*(0xc7-0x81) { - c0 = uint16(x/178) + 0x81 - c1 = uint16(x % 178) - switch { - case c1 < 1*26: - c1 += 0x41 - case c1 < 2*26: - c1 += 0x47 - default: - c1 += 0x4d - } - } else { - x -= 178 * (0xc7 - 0x81) - c0 = uint16(x/94) + 0xc7 - c1 = uint16(x%94) + 0xa1 - } - reverse[y] = c0<<8 | c1 - } - } - if err := scanner.Err(); err != nil { - log.Fatalf("scanner error: %v", err) - } - - fmt.Printf("// decode is the decoding table from EUC-KR code to Unicode.\n") - fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-euc-kr.txt\n") - fmt.Printf("var decode = [...]uint16{\n") - for i, v := range mapping { - if v != 0 { - fmt.Printf("\t%d: 0x%04X,\n", i, v) - } - } - fmt.Printf("}\n\n") - - // Any run of at least separation continuous zero entries in the reverse map will - // be a separate encode table. - const separation = 1024 - - intervals := []interval(nil) - low, high := -1, -1 - for i, v := range reverse { - if v == 0 { - continue - } - if low < 0 { - low = i - } else if i-high >= separation { - if high >= 0 { - intervals = append(intervals, interval{low, high}) - } - low = i - } - high = i + 1 - } - if high >= 0 { - intervals = append(intervals, interval{low, high}) - } - sort.Sort(byDecreasingLength(intervals)) - - fmt.Printf("const numEncodeTables = %d\n\n", len(intervals)) - fmt.Printf("// encodeX are the encoding tables from Unicode to EUC-KR code,\n") - fmt.Printf("// sorted by decreasing length.\n") - for i, v := range intervals { - fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high) - } - fmt.Printf("\n") - - for i, v := range intervals { - fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high) - fmt.Printf("var encode%d = [...]uint16{\n", i) - for j := v.low; j < v.high; j++ { - x := reverse[j] - if x == 0 { - continue - } - fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x) - } - fmt.Printf("}\n\n") - } -} - -// interval is a half-open interval [low, high). -type interval struct { - low, high int -} - -func (i interval) len() int { return i.high - i.low } - -// byDecreasingLength sorts intervals by decreasing length. -type byDecreasingLength []interval - -func (b byDecreasingLength) Len() int { return len(b) } -func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() } -func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/simplifiedchinese/gbk.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/simplifiedchinese/gbk.go index e0b15bbc..b89c45b0 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/simplifiedchinese/gbk.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/simplifiedchinese/gbk.go @@ -5,7 +5,6 @@ package simplifiedchinese import ( - "errors" "unicode/utf8" "golang.org/x/text/encoding" @@ -40,11 +39,6 @@ var gbk18030 = internal.Encoding{ identifier.GB18030, } -var ( - errInvalidGB18030 = errors.New("simplifiedchinese: invalid GB18030 encoding") - errInvalidGBK = errors.New("simplifiedchinese: invalid GBK encoding") -) - type gbkDecoder struct { transform.NopResetter gb18030 bool @@ -66,8 +60,12 @@ loop: case c0 < 0xff: if nSrc+1 >= len(src) { - err = transform.ErrShortSrc - break loop + if !atEOF { + err = transform.ErrShortSrc + break loop + } + r, size = utf8.RuneError, 1 + goto write } c1 := src[nSrc+1] switch { @@ -77,18 +75,24 @@ loop: c1 -= 0x41 case d.gb18030 && 0x30 <= c1 && c1 < 0x40: if nSrc+3 >= len(src) { - err = transform.ErrShortSrc - break loop + if !atEOF { + err = transform.ErrShortSrc + break loop + } + // The second byte here is always ASCII, so we can set size + // to 1 in all cases. + r, size = utf8.RuneError, 1 + goto write } c2 := src[nSrc+2] if c2 < 0x81 || 0xff <= c2 { - err = errInvalidGB18030 - break loop + r, size = utf8.RuneError, 1 + goto write } c3 := src[nSrc+3] if c3 < 0x30 || 0x3a <= c3 { - err = errInvalidGB18030 - break loop + r, size = utf8.RuneError, 1 + goto write } size = 4 r = ((rune(c0-0x81)*10+rune(c1-0x30))*126+rune(c2-0x81))*10 + rune(c3-0x30) @@ -109,17 +113,13 @@ loop: r -= 189000 if 0 <= r && r < 0x100000 { r += 0x10000 - goto write - } - err = errInvalidGB18030 - break loop - default: - if d.gb18030 { - err = errInvalidGB18030 } else { - err = errInvalidGBK + r, size = utf8.RuneError, 1 } - break loop + goto write + default: + r, size = utf8.RuneError, 1 + goto write } r, size = '\ufffd', 2 if i := int(c0-0x81)*190 + int(c1); i < len(decode) { @@ -130,12 +130,7 @@ loop: } default: - if d.gb18030 { - err = errInvalidGB18030 - } else { - err = errInvalidGBK - } - break loop + r, size = utf8.RuneError, 1 } write: @@ -145,13 +140,6 @@ loop: } nDst += utf8.EncodeRune(dst[nDst:], r) } - if atEOF && err == transform.ErrShortSrc { - if d.gb18030 { - err = errInvalidGB18030 - } else { - err = errInvalidGBK - } - } return nDst, nSrc, err } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/simplifiedchinese/hzgb2312.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/simplifiedchinese/hzgb2312.go index 85de6b1e..eb3157f0 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/simplifiedchinese/hzgb2312.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/simplifiedchinese/hzgb2312.go @@ -5,7 +5,6 @@ package simplifiedchinese import ( - "errors" "unicode/utf8" "golang.org/x/text/encoding" @@ -31,8 +30,6 @@ func hzGB2312NewEncoder() transform.Transformer { return new(hzGB2312Encoder) } -var errInvalidHZGB2312 = errors.New("simplifiedchinese: invalid HZ-GB2312 encoding") - const ( asciiState = iota gbState @@ -50,14 +47,18 @@ loop: for ; nSrc < len(src); nSrc += size { c0 := src[nSrc] if c0 >= utf8.RuneSelf { - err = errInvalidHZGB2312 - break loop + r, size = utf8.RuneError, 1 + goto write } if c0 == '~' { if nSrc+1 >= len(src) { - err = transform.ErrShortSrc - break loop + if !atEOF { + err = transform.ErrShortSrc + break loop + } + r = utf8.RuneError + goto write } size = 2 switch src[nSrc+1] { @@ -78,8 +79,8 @@ loop: case '\n': continue default: - err = errInvalidHZGB2312 - break loop + r = utf8.RuneError + goto write } } @@ -87,33 +88,37 @@ loop: r, size = rune(c0), 1 } else { if nSrc+1 >= len(src) { - err = transform.ErrShortSrc - break loop + if !atEOF { + err = transform.ErrShortSrc + break loop + } + r, size = utf8.RuneError, 1 + goto write } + size = 2 c1 := src[nSrc+1] if c0 < 0x21 || 0x7e <= c0 || c1 < 0x21 || 0x7f <= c1 { - err = errInvalidHZGB2312 - break loop - } - - r, size = '\ufffd', 2 - if i := int(c0-0x01)*190 + int(c1+0x3f); i < len(decode) { + // error + } else if i := int(c0-0x01)*190 + int(c1+0x3f); i < len(decode) { r = rune(decode[i]) - if r == 0 { - r = '\ufffd' + if r != 0 { + goto write } } + if c1 > utf8.RuneSelf { + // Be consistent and always treat non-ASCII as a single error. + size = 1 + } + r = utf8.RuneError } + write: if nDst+utf8.RuneLen(r) > len(dst) { err = transform.ErrShortDst break loop } nDst += utf8.EncodeRune(dst[nDst:], r) } - if atEOF && err == transform.ErrShortSrc { - err = errInvalidHZGB2312 - } return nDst, nSrc, err } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go deleted file mode 100644 index 55016c78..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This program generates tables.go: -// go run maketables.go | gofmt > tables.go - -import ( - "bufio" - "fmt" - "log" - "net/http" - "sort" - "strings" -) - -func main() { - fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n") - fmt.Printf("// Package simplifiedchinese provides Simplified Chinese encodings such as GBK.\n") - fmt.Printf(`package simplifiedchinese // import "golang.org/x/text/encoding/simplifiedchinese"` + "\n\n") - - printGB18030() - printGBK() -} - -func printGB18030() { - res, err := http.Get("http://encoding.spec.whatwg.org/index-gb18030.txt") - if err != nil { - log.Fatalf("Get: %v", err) - } - defer res.Body.Close() - - fmt.Printf("// gb18030 is the table from http://encoding.spec.whatwg.org/index-gb18030.txt\n") - fmt.Printf("var gb18030 = [...][2]uint16{\n") - scanner := bufio.NewScanner(res.Body) - for scanner.Scan() { - s := strings.TrimSpace(scanner.Text()) - if s == "" || s[0] == '#' { - continue - } - x, y := uint32(0), uint32(0) - if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil { - log.Fatalf("could not parse %q", s) - } - if x < 0x10000 && y < 0x10000 { - fmt.Printf("\t{0x%04x, 0x%04x},\n", x, y) - } - } - fmt.Printf("}\n\n") -} - -func printGBK() { - res, err := http.Get("http://encoding.spec.whatwg.org/index-gbk.txt") - if err != nil { - log.Fatalf("Get: %v", err) - } - defer res.Body.Close() - - mapping := [65536]uint16{} - reverse := [65536]uint16{} - - scanner := bufio.NewScanner(res.Body) - for scanner.Scan() { - s := strings.TrimSpace(scanner.Text()) - if s == "" || s[0] == '#' { - continue - } - x, y := uint16(0), uint16(0) - if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil { - log.Fatalf("could not parse %q", s) - } - if x < 0 || 126*190 <= x { - log.Fatalf("GBK code %d is out of range", x) - } - mapping[x] = y - if reverse[y] == 0 { - c0, c1 := x/190, x%190 - if c1 >= 0x3f { - c1++ - } - reverse[y] = (0x81+c0)<<8 | (0x40 + c1) - } - } - if err := scanner.Err(); err != nil { - log.Fatalf("scanner error: %v", err) - } - - fmt.Printf("// decode is the decoding table from GBK code to Unicode.\n") - fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-gbk.txt\n") - fmt.Printf("var decode = [...]uint16{\n") - for i, v := range mapping { - if v != 0 { - fmt.Printf("\t%d: 0x%04X,\n", i, v) - } - } - fmt.Printf("}\n\n") - - // Any run of at least separation continuous zero entries in the reverse map will - // be a separate encode table. - const separation = 1024 - - intervals := []interval(nil) - low, high := -1, -1 - for i, v := range reverse { - if v == 0 { - continue - } - if low < 0 { - low = i - } else if i-high >= separation { - if high >= 0 { - intervals = append(intervals, interval{low, high}) - } - low = i - } - high = i + 1 - } - if high >= 0 { - intervals = append(intervals, interval{low, high}) - } - sort.Sort(byDecreasingLength(intervals)) - - fmt.Printf("const numEncodeTables = %d\n\n", len(intervals)) - fmt.Printf("// encodeX are the encoding tables from Unicode to GBK code,\n") - fmt.Printf("// sorted by decreasing length.\n") - for i, v := range intervals { - fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high) - } - fmt.Printf("\n") - - for i, v := range intervals { - fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high) - fmt.Printf("var encode%d = [...]uint16{\n", i) - for j := v.low; j < v.high; j++ { - x := reverse[j] - if x == 0 { - continue - } - fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x) - } - fmt.Printf("}\n\n") - } -} - -// interval is a half-open interval [low, high). -type interval struct { - low, high int -} - -func (i interval) len() int { return i.high - i.low } - -// byDecreasingLength sorts intervals by decreasing length. -type byDecreasingLength []interval - -func (b byDecreasingLength) Len() int { return len(b) } -func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() } -func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/traditionalchinese/big5.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/traditionalchinese/big5.go index 275821f5..1fcddde0 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/traditionalchinese/big5.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/traditionalchinese/big5.go @@ -5,7 +5,6 @@ package traditionalchinese import ( - "errors" "unicode/utf8" "golang.org/x/text/encoding" @@ -26,8 +25,6 @@ var big5 = internal.Encoding{ identifier.Big5, } -var errInvalidBig5 = errors.New("traditionalchinese: invalid Big5 encoding") - type big5Decoder struct{ transform.NopResetter } func (big5Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { @@ -40,8 +37,12 @@ loop: case 0x81 <= c0 && c0 < 0xff: if nSrc+1 >= len(src) { - err = transform.ErrShortSrc - break loop + if !atEOF { + err = transform.ErrShortSrc + break loop + } + r, size = utf8.RuneError, 1 + goto write } c1 := src[nSrc+1] switch { @@ -49,9 +50,12 @@ loop: c1 -= 0x40 case 0xa1 <= c1 && c1 < 0xff: c1 -= 0x62 + case c1 < 0x40: + r, size = utf8.RuneError, 1 + goto write default: - err = errInvalidBig5 - break loop + r, size = utf8.RuneError, 2 + goto write } r, size = '\ufffd', 2 if i := int(c0-0x81)*157 + int(c1); i < len(decode) { @@ -80,10 +84,10 @@ loop: } default: - err = errInvalidBig5 - break loop + r, size = utf8.RuneError, 1 } + write: if nDst+utf8.RuneLen(r) > len(dst) { err = transform.ErrShortDst break loop @@ -99,9 +103,6 @@ loop: nDst += copy(dst[nDst:], s) continue loop } - if atEOF && err == transform.ErrShortSrc { - err = errInvalidBig5 - } return nDst, nSrc, err } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go deleted file mode 100644 index cf7fdb31..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This program generates tables.go: -// go run maketables.go | gofmt > tables.go - -import ( - "bufio" - "fmt" - "log" - "net/http" - "sort" - "strings" -) - -func main() { - fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n") - fmt.Printf("// Package traditionalchinese provides Traditional Chinese encodings such as Big5.\n") - fmt.Printf(`package traditionalchinese // import "golang.org/x/text/encoding/traditionalchinese"` + "\n\n") - - res, err := http.Get("http://encoding.spec.whatwg.org/index-big5.txt") - if err != nil { - log.Fatalf("Get: %v", err) - } - defer res.Body.Close() - - mapping := [65536]uint32{} - reverse := [65536 * 4]uint16{} - - scanner := bufio.NewScanner(res.Body) - for scanner.Scan() { - s := strings.TrimSpace(scanner.Text()) - if s == "" || s[0] == '#' { - continue - } - x, y := uint16(0), uint32(0) - if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil { - log.Fatalf("could not parse %q", s) - } - if x < 0 || 126*157 <= x { - log.Fatalf("Big5 code %d is out of range", x) - } - mapping[x] = y - - // The WHATWG spec http://encoding.spec.whatwg.org/#indexes says that - // "The index pointer for code point in index is the first pointer - // corresponding to code point in index", which would normally mean - // that the code below should be guarded by "if reverse[y] == 0", but - // last instead of first seems to match the behavior of - // "iconv -f UTF-8 -t BIG5". For example, U+8005 者 occurs twice in - // http://encoding.spec.whatwg.org/index-big5.txt, as index 2148 - // (encoded as "\x8e\xcd") and index 6543 (encoded as "\xaa\xcc") - // and "echo 者 | iconv -f UTF-8 -t BIG5 | xxd" gives "\xaa\xcc". - c0, c1 := x/157, x%157 - if c1 < 0x3f { - c1 += 0x40 - } else { - c1 += 0x62 - } - reverse[y] = (0x81+c0)<<8 | c1 - } - if err := scanner.Err(); err != nil { - log.Fatalf("scanner error: %v", err) - } - - fmt.Printf("// decode is the decoding table from Big5 code to Unicode.\n") - fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-big5.txt\n") - fmt.Printf("var decode = [...]uint32{\n") - for i, v := range mapping { - if v != 0 { - fmt.Printf("\t%d: 0x%08X,\n", i, v) - } - } - fmt.Printf("}\n\n") - - // Any run of at least separation continuous zero entries in the reverse map will - // be a separate encode table. - const separation = 1024 - - intervals := []interval(nil) - low, high := -1, -1 - for i, v := range reverse { - if v == 0 { - continue - } - if low < 0 { - low = i - } else if i-high >= separation { - if high >= 0 { - intervals = append(intervals, interval{low, high}) - } - low = i - } - high = i + 1 - } - if high >= 0 { - intervals = append(intervals, interval{low, high}) - } - sort.Sort(byDecreasingLength(intervals)) - - fmt.Printf("const numEncodeTables = %d\n\n", len(intervals)) - fmt.Printf("// encodeX are the encoding tables from Unicode to Big5 code,\n") - fmt.Printf("// sorted by decreasing length.\n") - for i, v := range intervals { - fmt.Printf("// encode%d: %5d entries for runes in [%6d, %6d).\n", i, v.len(), v.low, v.high) - } - fmt.Printf("\n") - - for i, v := range intervals { - fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high) - fmt.Printf("var encode%d = [...]uint16{\n", i) - for j := v.low; j < v.high; j++ { - x := reverse[j] - if x == 0 { - continue - } - fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x) - } - fmt.Printf("}\n\n") - } -} - -// interval is a half-open interval [low, high). -type interval struct { - low, high int -} - -func (i interval) len() int { return i.high - i.low } - -// byDecreasingLength sorts intervals by decreasing length. -type byDecreasingLength []interval - -func (b byDecreasingLength) Len() int { return len(b) } -func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() } -func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/language/common.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/language/common.go index a255bb0a..9d86e185 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/language/common.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/language/common.go @@ -1,4 +1,4 @@ -// This file was generated by go generate; DO NOT EDIT +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. package language diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/language/doc.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/language/doc.go new file mode 100644 index 00000000..8afecd50 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/language/doc.go @@ -0,0 +1,102 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package language implements BCP 47 language tags and related functionality. +// +// The most important function of package language is to match a list of +// user-preferred languages to a list of supported languages. +// It alleviates the developer of dealing with the complexity of this process +// and provides the user with the best experience +// (see https://blog.golang.org/matchlang). +// +// +// Matching preferred against supported languages +// +// A Matcher for an application that supports English, Australian English, +// Danish, and standard Mandarin can be created as follows: +// +// var matcher = language.NewMatcher([]language.Tag{ +// language.English, // The first language is used as fallback. +// language.MustParse("en-AU"), +// language.Danish, +// language.Chinese, +// }) +// +// This list of supported languages is typically implied by the languages for +// which there exists translations of the user interface. +// +// User-preferred languages usually come as a comma-separated list of BCP 47 +// language tags. +// The MatchString finds best matches for such strings: +// +// handler(w http.ResponseWriter, r *http.Request) { +// lang, _ := r.Cookie("lang") +// accept := r.Header.Get("Accept-Language") +// tag, _ := language.MatchStrings(matcher, lang.String(), accept) +// +// // tag should now be used for the initialization of any +// // locale-specific service. +// } +// +// The Matcher's Match method can be used to match Tags directly. +// +// Matchers are aware of the intricacies of equivalence between languages, such +// as deprecated subtags, legacy tags, macro languages, mutual +// intelligibility between scripts and languages, and transparently passing +// BCP 47 user configuration. +// For instance, it will know that a reader of Bokmål Danish can read Norwegian +// and will know that Cantonese ("yue") is a good match for "zh-HK". +// +// +// Using match results +// +// To guarantee a consistent user experience to the user it is important to +// use the same language tag for the selection of any locale-specific services. +// For example, it is utterly confusing to substitute spelled-out numbers +// or dates in one language in text of another language. +// More subtly confusing is using the wrong sorting order or casing +// algorithm for a certain language. +// +// All the packages in x/text that provide locale-specific services +// (e.g. collate, cases) should be initialized with the tag that was +// obtained at the start of an interaction with the user. +// +// Note that Tag that is returned by Match and MatchString may differ from any +// of the supported languages, as it may contain carried over settings from +// the user tags. +// This may be inconvenient when your application has some additional +// locale-specific data for your supported languages. +// Match and MatchString both return the index of the matched supported tag +// to simplify associating such data with the matched tag. +// +// +// Canonicalization +// +// If one uses the Matcher to compare languages one does not need to +// worry about canonicalization. +// +// The meaning of a Tag varies per application. The language package +// therefore delays canonicalization and preserves information as much +// as possible. The Matcher, however, will always take into account that +// two different tags may represent the same language. +// +// By default, only legacy and deprecated tags are converted into their +// canonical equivalent. All other information is preserved. This approach makes +// the confidence scores more accurate and allows matchers to distinguish +// between variants that are otherwise lost. +// +// As a consequence, two tags that should be treated as identical according to +// BCP 47 or CLDR, like "en-Latn" and "en", will be represented differently. The +// Matcher handles such distinctions, though, and is aware of the +// equivalence relations. The CanonType type can be used to alter the +// canonicalization form. +// +// References +// +// BCP 47 - Tags for Identifying Languages http://tools.ietf.org/html/bcp47 +// +package language // import "golang.org/x/text/language" + +// TODO: explanation on how to match languages for your own locale-specific +// service. diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/language/gen_common.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/language/gen_common.go deleted file mode 100644 index 83ce1801..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/language/gen_common.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This file contains code common to the maketables.go and the package code. - -// langAliasType is the type of an alias in langAliasMap. -type langAliasType int8 - -const ( - langDeprecated langAliasType = iota - langMacro - langLegacy - - langAliasTypeUnknown langAliasType = -1 -) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/language/gen_index.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/language/gen_index.go deleted file mode 100644 index eef555cd..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/language/gen_index.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This file generates derivative tables based on the language package itself. - -import ( - "bytes" - "flag" - "fmt" - "io/ioutil" - "log" - "reflect" - "sort" - "strings" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/language" - "golang.org/x/text/unicode/cldr" -) - -var ( - test = flag.Bool("test", false, - "test existing tables; can be used to compare web data with package data.") - - draft = flag.String("draft", - "contributed", - `Minimal draft requirements (approved, contributed, provisional, unconfirmed).`) -) - -func main() { - gen.Init() - - // Read the CLDR zip file. - r := gen.OpenCLDRCoreZip() - defer r.Close() - - d := &cldr.Decoder{} - data, err := d.DecodeZip(r) - if err != nil { - log.Fatalf("DecodeZip: %v", err) - } - - w := gen.NewCodeWriter() - defer func() { - buf := &bytes.Buffer{} - - if _, err = w.WriteGo(buf, "language"); err != nil { - log.Fatalf("Error formatting file index.go: %v", err) - } - - // Since we're generating a table for our own package we need to rewrite - // doing the equivalent of go fmt -r 'language.b -> b'. Using - // bytes.Replace will do. - out := bytes.Replace(buf.Bytes(), []byte("language."), nil, -1) - if err := ioutil.WriteFile("index.go", out, 0600); err != nil { - log.Fatalf("Could not create file index.go: %v", err) - } - }() - - m := map[language.Tag]bool{} - for _, lang := range data.Locales() { - // We include all locales unconditionally to be consistent with en_US. - // We want en_US, even though it has no data associated with it. - - // TODO: put any of the languages for which no data exists at the end - // of the index. This allows all components based on ICU to use that - // as the cutoff point. - // if x := data.RawLDML(lang); false || - // x.LocaleDisplayNames != nil || - // x.Characters != nil || - // x.Delimiters != nil || - // x.Measurement != nil || - // x.Dates != nil || - // x.Numbers != nil || - // x.Units != nil || - // x.ListPatterns != nil || - // x.Collations != nil || - // x.Segmentations != nil || - // x.Rbnf != nil || - // x.Annotations != nil || - // x.Metadata != nil { - - // TODO: support POSIX natively, albeit non-standard. - tag := language.Make(strings.Replace(lang, "_POSIX", "-u-va-posix", 1)) - m[tag] = true - // } - } - // Include locales for plural rules, which uses a different structure. - for _, plurals := range data.Supplemental().Plurals { - for _, rules := range plurals.PluralRules { - for _, lang := range strings.Split(rules.Locales, " ") { - m[language.Make(lang)] = true - } - } - } - - var core, special []language.Tag - - for t := range m { - if x := t.Extensions(); len(x) != 0 && fmt.Sprint(x) != "[u-va-posix]" { - log.Fatalf("Unexpected extension %v in %v", x, t) - } - if len(t.Variants()) == 0 && len(t.Extensions()) == 0 { - core = append(core, t) - } else { - special = append(special, t) - } - } - - w.WriteComment(` - NumCompactTags is the number of common tags. The maximum tag is - NumCompactTags-1.`) - w.WriteConst("NumCompactTags", len(core)+len(special)) - - sort.Sort(byAlpha(special)) - w.WriteVar("specialTags", special) - - // TODO: order by frequency? - sort.Sort(byAlpha(core)) - - // Size computations are just an estimate. - w.Size += int(reflect.TypeOf(map[uint32]uint16{}).Size()) - w.Size += len(core) * 6 // size of uint32 and uint16 - - fmt.Fprintln(w) - fmt.Fprintln(w, "var coreTags = map[uint32]uint16{") - fmt.Fprintln(w, "0x0: 0, // und") - i := len(special) + 1 // Und and special tags already written. - for _, t := range core { - if t == language.Und { - continue - } - fmt.Fprint(w.Hash, t, i) - b, s, r := t.Raw() - fmt.Fprintf(w, "0x%s%s%s: %d, // %s\n", - getIndex(b, 3), // 3 is enough as it is guaranteed to be a compact number - getIndex(s, 2), - getIndex(r, 3), - i, t) - i++ - } - fmt.Fprintln(w, "}") -} - -// getIndex prints the subtag type and extracts its index of size nibble. -// If the index is less than n nibbles, the result is prefixed with 0s. -func getIndex(x interface{}, n int) string { - s := fmt.Sprintf("%#v", x) // s is of form Type{typeID: 0x00} - s = s[strings.Index(s, "0x")+2 : len(s)-1] - return strings.Repeat("0", n-len(s)) + s -} - -type byAlpha []language.Tag - -func (a byAlpha) Len() int { return len(a) } -func (a byAlpha) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byAlpha) Less(i, j int) bool { return a[i].String() < a[j].String() } diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/language/index.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/language/index.go index 7fa9cc82..5311e5cb 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/language/index.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/language/index.go @@ -1,762 +1,783 @@ -// This file was generated by go generate; DO NOT EDIT +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. package language // NumCompactTags is the number of common tags. The maximum tag is // NumCompactTags-1. -const NumCompactTags = 747 +const NumCompactTags = 768 var specialTags = []Tag{ // 2 elements - 0: {lang: 0x61, region: 0x6d, script: 0x0, pVariant: 0x5, pExt: 0xe, str: "ca-ES-valencia"}, - 1: {lang: 0x9b, region: 0x132, script: 0x0, pVariant: 0x5, pExt: 0x5, str: "en-US-u-va-posix"}, + 0: {lang: 0xd7, region: 0x6e, script: 0x0, pVariant: 0x5, pExt: 0xe, str: "ca-ES-valencia"}, + 1: {lang: 0x139, region: 0x135, script: 0x0, pVariant: 0x5, pExt: 0x5, str: "en-US-u-va-posix"}, } // Size: 72 bytes var coreTags = map[uint32]uint16{ 0x0: 0, // und - 0x00a00000: 3, // af - 0x00a000d0: 4, // af-NA - 0x00a0015e: 5, // af-ZA - 0x00b00000: 6, // agq - 0x00b00051: 7, // agq-CM - 0x00d00000: 8, // ak - 0x00d0007e: 9, // ak-GH - 0x01100000: 10, // am - 0x0110006e: 11, // am-ET - 0x01500000: 12, // ar - 0x01500001: 13, // ar-001 - 0x01500022: 14, // ar-AE - 0x01500038: 15, // ar-BH - 0x01500061: 16, // ar-DJ - 0x01500066: 17, // ar-DZ - 0x0150006a: 18, // ar-EG - 0x0150006b: 19, // ar-EH - 0x0150006c: 20, // ar-ER - 0x01500095: 21, // ar-IL - 0x01500099: 22, // ar-IQ - 0x0150009f: 23, // ar-JO - 0x015000a6: 24, // ar-KM - 0x015000aa: 25, // ar-KW - 0x015000ae: 26, // ar-LB - 0x015000b7: 27, // ar-LY - 0x015000b8: 28, // ar-MA - 0x015000c7: 29, // ar-MR - 0x015000df: 30, // ar-OM - 0x015000eb: 31, // ar-PS - 0x015000f1: 32, // ar-QA - 0x01500106: 33, // ar-SA - 0x01500109: 34, // ar-SD - 0x01500113: 35, // ar-SO - 0x01500115: 36, // ar-SS - 0x0150011a: 37, // ar-SY - 0x0150011e: 38, // ar-TD - 0x01500126: 39, // ar-TN - 0x0150015b: 40, // ar-YE - 0x01c00000: 41, // as - 0x01c00097: 42, // as-IN - 0x01d00000: 43, // asa - 0x01d0012d: 44, // asa-TZ - 0x01f00000: 45, // ast - 0x01f0006d: 46, // ast-ES - 0x02400000: 47, // az - 0x0241e000: 48, // az-Cyrl - 0x0241e031: 49, // az-Cyrl-AZ - 0x02452000: 50, // az-Latn - 0x02452031: 51, // az-Latn-AZ - 0x02a00000: 52, // bas - 0x02a00051: 53, // bas-CM - 0x02f00000: 54, // be - 0x02f00046: 55, // be-BY - 0x03100000: 56, // bem - 0x0310015f: 57, // bem-ZM - 0x03300000: 58, // bez - 0x0330012d: 59, // bez-TZ - 0x03800000: 60, // bg - 0x03800037: 61, // bg-BG - 0x03c00000: 62, // bh - 0x04900000: 63, // bm - 0x049000c1: 64, // bm-ML - 0x04b00000: 65, // bn - 0x04b00034: 66, // bn-BD - 0x04b00097: 67, // bn-IN - 0x04c00000: 68, // bo - 0x04c00052: 69, // bo-CN - 0x04c00097: 70, // bo-IN - 0x05000000: 71, // br - 0x05000076: 72, // br-FR - 0x05300000: 73, // brx - 0x05300097: 74, // brx-IN - 0x05400000: 75, // bs - 0x0541e000: 76, // bs-Cyrl - 0x0541e032: 77, // bs-Cyrl-BA - 0x05452000: 78, // bs-Latn - 0x05452032: 79, // bs-Latn-BA - 0x06100000: 80, // ca - 0x06100021: 81, // ca-AD - 0x0610006d: 82, // ca-ES - 0x06100076: 83, // ca-FR - 0x0610009c: 84, // ca-IT - 0x06400000: 85, // ce - 0x06400104: 86, // ce-RU - 0x06600000: 87, // cgg - 0x0660012f: 88, // cgg-UG - 0x06c00000: 89, // chr - 0x06c00132: 90, // chr-US - 0x06f00000: 91, // ckb - 0x06f00099: 92, // ckb-IQ - 0x06f0009a: 93, // ckb-IR - 0x07900000: 94, // cs - 0x0790005d: 95, // cs-CZ - 0x07d00000: 96, // cu - 0x07d00104: 97, // cu-RU - 0x07f00000: 98, // cy - 0x07f00079: 99, // cy-GB - 0x08000000: 100, // da - 0x08000062: 101, // da-DK - 0x08000080: 102, // da-GL - 0x08300000: 103, // dav - 0x083000a2: 104, // dav-KE - 0x08500000: 105, // de - 0x0850002d: 106, // de-AT - 0x08500035: 107, // de-BE - 0x0850004d: 108, // de-CH - 0x0850005f: 109, // de-DE - 0x085000b0: 110, // de-LI - 0x085000b5: 111, // de-LU - 0x08800000: 112, // dje - 0x088000d2: 113, // dje-NE - 0x08b00000: 114, // dsb - 0x08b0005f: 115, // dsb-DE - 0x08f00000: 116, // dua - 0x08f00051: 117, // dua-CM - 0x09000000: 118, // dv - 0x09100000: 119, // dyo - 0x09100112: 120, // dyo-SN - 0x09300000: 121, // dz - 0x09300042: 122, // dz-BT - 0x09400000: 123, // ebu - 0x094000a2: 124, // ebu-KE - 0x09500000: 125, // ee - 0x0950007e: 126, // ee-GH - 0x09500120: 127, // ee-TG - 0x09a00000: 128, // el - 0x09a0005c: 129, // el-CY - 0x09a00085: 130, // el-GR - 0x09b00000: 131, // en - 0x09b00001: 132, // en-001 - 0x09b0001a: 133, // en-150 - 0x09b00024: 134, // en-AG - 0x09b00025: 135, // en-AI - 0x09b0002c: 136, // en-AS - 0x09b0002d: 137, // en-AT - 0x09b0002e: 138, // en-AU - 0x09b00033: 139, // en-BB - 0x09b00035: 140, // en-BE - 0x09b00039: 141, // en-BI - 0x09b0003c: 142, // en-BM - 0x09b00041: 143, // en-BS - 0x09b00045: 144, // en-BW - 0x09b00047: 145, // en-BZ - 0x09b00048: 146, // en-CA - 0x09b00049: 147, // en-CC - 0x09b0004d: 148, // en-CH - 0x09b0004f: 149, // en-CK - 0x09b00051: 150, // en-CM - 0x09b0005b: 151, // en-CX - 0x09b0005c: 152, // en-CY - 0x09b0005f: 153, // en-DE - 0x09b00060: 154, // en-DG - 0x09b00062: 155, // en-DK - 0x09b00063: 156, // en-DM - 0x09b0006c: 157, // en-ER - 0x09b00070: 158, // en-FI - 0x09b00071: 159, // en-FJ - 0x09b00072: 160, // en-FK - 0x09b00073: 161, // en-FM - 0x09b00079: 162, // en-GB - 0x09b0007a: 163, // en-GD - 0x09b0007d: 164, // en-GG - 0x09b0007e: 165, // en-GH - 0x09b0007f: 166, // en-GI - 0x09b00081: 167, // en-GM - 0x09b00088: 168, // en-GU - 0x09b0008a: 169, // en-GY - 0x09b0008b: 170, // en-HK - 0x09b00094: 171, // en-IE - 0x09b00095: 172, // en-IL - 0x09b00096: 173, // en-IM - 0x09b00097: 174, // en-IN - 0x09b00098: 175, // en-IO - 0x09b0009d: 176, // en-JE - 0x09b0009e: 177, // en-JM - 0x09b000a2: 178, // en-KE - 0x09b000a5: 179, // en-KI - 0x09b000a7: 180, // en-KN - 0x09b000ab: 181, // en-KY - 0x09b000af: 182, // en-LC - 0x09b000b2: 183, // en-LR - 0x09b000b3: 184, // en-LS - 0x09b000bd: 185, // en-MG - 0x09b000be: 186, // en-MH - 0x09b000c4: 187, // en-MO - 0x09b000c5: 188, // en-MP - 0x09b000c8: 189, // en-MS - 0x09b000c9: 190, // en-MT - 0x09b000ca: 191, // en-MU - 0x09b000cc: 192, // en-MW - 0x09b000ce: 193, // en-MY - 0x09b000d0: 194, // en-NA - 0x09b000d3: 195, // en-NF - 0x09b000d4: 196, // en-NG - 0x09b000d7: 197, // en-NL - 0x09b000db: 198, // en-NR - 0x09b000dd: 199, // en-NU - 0x09b000de: 200, // en-NZ - 0x09b000e4: 201, // en-PG - 0x09b000e5: 202, // en-PH - 0x09b000e6: 203, // en-PK - 0x09b000e9: 204, // en-PN - 0x09b000ea: 205, // en-PR - 0x09b000ee: 206, // en-PW - 0x09b00105: 207, // en-RW - 0x09b00107: 208, // en-SB - 0x09b00108: 209, // en-SC - 0x09b00109: 210, // en-SD - 0x09b0010a: 211, // en-SE - 0x09b0010b: 212, // en-SG - 0x09b0010c: 213, // en-SH - 0x09b0010d: 214, // en-SI - 0x09b00110: 215, // en-SL - 0x09b00115: 216, // en-SS - 0x09b00119: 217, // en-SX - 0x09b0011b: 218, // en-SZ - 0x09b0011d: 219, // en-TC - 0x09b00123: 220, // en-TK - 0x09b00127: 221, // en-TO - 0x09b0012a: 222, // en-TT - 0x09b0012b: 223, // en-TV - 0x09b0012d: 224, // en-TZ - 0x09b0012f: 225, // en-UG - 0x09b00131: 226, // en-UM - 0x09b00132: 227, // en-US - 0x09b00136: 228, // en-VC - 0x09b00139: 229, // en-VG - 0x09b0013a: 230, // en-VI - 0x09b0013c: 231, // en-VU - 0x09b0013f: 232, // en-WS - 0x09b0015e: 233, // en-ZA - 0x09b0015f: 234, // en-ZM - 0x09b00161: 235, // en-ZW - 0x09c00000: 236, // eo - 0x09c00001: 237, // eo-001 - 0x09d00000: 238, // es - 0x09d0001e: 239, // es-419 - 0x09d0002b: 240, // es-AR - 0x09d0003e: 241, // es-BO - 0x09d00040: 242, // es-BR - 0x09d00050: 243, // es-CL - 0x09d00053: 244, // es-CO - 0x09d00055: 245, // es-CR - 0x09d00058: 246, // es-CU - 0x09d00064: 247, // es-DO - 0x09d00067: 248, // es-EA - 0x09d00068: 249, // es-EC - 0x09d0006d: 250, // es-ES - 0x09d00084: 251, // es-GQ - 0x09d00087: 252, // es-GT - 0x09d0008d: 253, // es-HN - 0x09d00092: 254, // es-IC - 0x09d000cd: 255, // es-MX - 0x09d000d6: 256, // es-NI - 0x09d000e0: 257, // es-PA - 0x09d000e2: 258, // es-PE - 0x09d000e5: 259, // es-PH - 0x09d000ea: 260, // es-PR - 0x09d000ef: 261, // es-PY - 0x09d00118: 262, // es-SV - 0x09d00132: 263, // es-US - 0x09d00133: 264, // es-UY - 0x09d00138: 265, // es-VE - 0x09f00000: 266, // et - 0x09f00069: 267, // et-EE - 0x0a100000: 268, // eu - 0x0a10006d: 269, // eu-ES - 0x0a200000: 270, // ewo - 0x0a200051: 271, // ewo-CM - 0x0a400000: 272, // fa - 0x0a400023: 273, // fa-AF - 0x0a40009a: 274, // fa-IR - 0x0a600000: 275, // ff - 0x0a600051: 276, // ff-CM - 0x0a600082: 277, // ff-GN - 0x0a6000c7: 278, // ff-MR - 0x0a600112: 279, // ff-SN - 0x0a800000: 280, // fi - 0x0a800070: 281, // fi-FI - 0x0aa00000: 282, // fil - 0x0aa000e5: 283, // fil-PH - 0x0ad00000: 284, // fo - 0x0ad00062: 285, // fo-DK - 0x0ad00074: 286, // fo-FO - 0x0af00000: 287, // fr - 0x0af00035: 288, // fr-BE - 0x0af00036: 289, // fr-BF - 0x0af00039: 290, // fr-BI - 0x0af0003a: 291, // fr-BJ - 0x0af0003b: 292, // fr-BL - 0x0af00048: 293, // fr-CA - 0x0af0004a: 294, // fr-CD - 0x0af0004b: 295, // fr-CF - 0x0af0004c: 296, // fr-CG - 0x0af0004d: 297, // fr-CH - 0x0af0004e: 298, // fr-CI - 0x0af00051: 299, // fr-CM - 0x0af00061: 300, // fr-DJ - 0x0af00066: 301, // fr-DZ - 0x0af00076: 302, // fr-FR - 0x0af00078: 303, // fr-GA - 0x0af0007c: 304, // fr-GF - 0x0af00082: 305, // fr-GN - 0x0af00083: 306, // fr-GP - 0x0af00084: 307, // fr-GQ - 0x0af0008f: 308, // fr-HT - 0x0af000a6: 309, // fr-KM - 0x0af000b5: 310, // fr-LU - 0x0af000b8: 311, // fr-MA - 0x0af000b9: 312, // fr-MC - 0x0af000bc: 313, // fr-MF - 0x0af000bd: 314, // fr-MG - 0x0af000c1: 315, // fr-ML - 0x0af000c6: 316, // fr-MQ - 0x0af000c7: 317, // fr-MR - 0x0af000ca: 318, // fr-MU - 0x0af000d1: 319, // fr-NC - 0x0af000d2: 320, // fr-NE - 0x0af000e3: 321, // fr-PF - 0x0af000e8: 322, // fr-PM - 0x0af00100: 323, // fr-RE - 0x0af00105: 324, // fr-RW - 0x0af00108: 325, // fr-SC - 0x0af00112: 326, // fr-SN - 0x0af0011a: 327, // fr-SY - 0x0af0011e: 328, // fr-TD - 0x0af00120: 329, // fr-TG - 0x0af00126: 330, // fr-TN - 0x0af0013c: 331, // fr-VU - 0x0af0013d: 332, // fr-WF - 0x0af0015c: 333, // fr-YT - 0x0b600000: 334, // fur - 0x0b60009c: 335, // fur-IT - 0x0b900000: 336, // fy - 0x0b9000d7: 337, // fy-NL - 0x0ba00000: 338, // ga - 0x0ba00094: 339, // ga-IE - 0x0c200000: 340, // gd - 0x0c200079: 341, // gd-GB - 0x0c800000: 342, // gl - 0x0c80006d: 343, // gl-ES - 0x0d200000: 344, // gsw - 0x0d20004d: 345, // gsw-CH - 0x0d200076: 346, // gsw-FR - 0x0d2000b0: 347, // gsw-LI - 0x0d300000: 348, // gu - 0x0d300097: 349, // gu-IN - 0x0d700000: 350, // guw - 0x0d800000: 351, // guz - 0x0d8000a2: 352, // guz-KE - 0x0d900000: 353, // gv - 0x0d900096: 354, // gv-IM - 0x0dc00000: 355, // ha - 0x0dc0007e: 356, // ha-GH - 0x0dc000d2: 357, // ha-NE - 0x0dc000d4: 358, // ha-NG - 0x0de00000: 359, // haw - 0x0de00132: 360, // haw-US - 0x0e000000: 361, // he - 0x0e000095: 362, // he-IL - 0x0e100000: 363, // hi - 0x0e100097: 364, // hi-IN - 0x0ee00000: 365, // hr - 0x0ee00032: 366, // hr-BA - 0x0ee0008e: 367, // hr-HR - 0x0ef00000: 368, // hsb - 0x0ef0005f: 369, // hsb-DE - 0x0f200000: 370, // hu - 0x0f200090: 371, // hu-HU - 0x0f300000: 372, // hy - 0x0f300027: 373, // hy-AM - 0x0f800000: 374, // id - 0x0f800093: 375, // id-ID - 0x0fa00000: 376, // ig - 0x0fa000d4: 377, // ig-NG - 0x0fb00000: 378, // ii - 0x0fb00052: 379, // ii-CN - 0x10200000: 380, // is - 0x1020009b: 381, // is-IS - 0x10300000: 382, // it - 0x1030004d: 383, // it-CH - 0x1030009c: 384, // it-IT - 0x10300111: 385, // it-SM - 0x10400000: 386, // iu - 0x10700000: 387, // ja - 0x107000a0: 388, // ja-JP - 0x10900000: 389, // jbo - 0x10a00000: 390, // jgo - 0x10a00051: 391, // jgo-CM - 0x10c00000: 392, // jmc - 0x10c0012d: 393, // jmc-TZ - 0x10f00000: 394, // jv - 0x11100000: 395, // ka - 0x1110007b: 396, // ka-GE - 0x11300000: 397, // kab - 0x11300066: 398, // kab-DZ - 0x11500000: 399, // kaj - 0x11600000: 400, // kam - 0x116000a2: 401, // kam-KE - 0x11900000: 402, // kcg - 0x11b00000: 403, // kde - 0x11b0012d: 404, // kde-TZ - 0x11d00000: 405, // kea - 0x11d00059: 406, // kea-CV - 0x12800000: 407, // khq - 0x128000c1: 408, // khq-ML - 0x12b00000: 409, // ki - 0x12b000a2: 410, // ki-KE - 0x12f00000: 411, // kk - 0x12f000ac: 412, // kk-KZ - 0x13000000: 413, // kkj - 0x13000051: 414, // kkj-CM - 0x13100000: 415, // kl - 0x13100080: 416, // kl-GL - 0x13200000: 417, // kln - 0x132000a2: 418, // kln-KE - 0x13300000: 419, // km - 0x133000a4: 420, // km-KH - 0x13500000: 421, // kn - 0x13500097: 422, // kn-IN - 0x13600000: 423, // ko - 0x136000a8: 424, // ko-KP - 0x136000a9: 425, // ko-KR - 0x13800000: 426, // kok - 0x13800097: 427, // kok-IN - 0x14100000: 428, // ks - 0x14100097: 429, // ks-IN - 0x14200000: 430, // ksb - 0x1420012d: 431, // ksb-TZ - 0x14300000: 432, // ksf - 0x14300051: 433, // ksf-CM - 0x14400000: 434, // ksh - 0x1440005f: 435, // ksh-DE - 0x14500000: 436, // ku - 0x14a00000: 437, // kw - 0x14a00079: 438, // kw-GB - 0x14d00000: 439, // ky - 0x14d000a3: 440, // ky-KG - 0x15100000: 441, // lag - 0x1510012d: 442, // lag-TZ - 0x15400000: 443, // lb - 0x154000b5: 444, // lb-LU - 0x15a00000: 445, // lg - 0x15a0012f: 446, // lg-UG - 0x16100000: 447, // lkt - 0x16100132: 448, // lkt-US - 0x16400000: 449, // ln - 0x16400029: 450, // ln-AO - 0x1640004a: 451, // ln-CD - 0x1640004b: 452, // ln-CF - 0x1640004c: 453, // ln-CG - 0x16500000: 454, // lo - 0x165000ad: 455, // lo-LA - 0x16800000: 456, // lrc - 0x16800099: 457, // lrc-IQ - 0x1680009a: 458, // lrc-IR - 0x16900000: 459, // lt - 0x169000b4: 460, // lt-LT - 0x16b00000: 461, // lu - 0x16b0004a: 462, // lu-CD - 0x16d00000: 463, // luo - 0x16d000a2: 464, // luo-KE - 0x16e00000: 465, // luy - 0x16e000a2: 466, // luy-KE - 0x17000000: 467, // lv - 0x170000b6: 468, // lv-LV - 0x17a00000: 469, // mas - 0x17a000a2: 470, // mas-KE - 0x17a0012d: 471, // mas-TZ - 0x18000000: 472, // mer - 0x180000a2: 473, // mer-KE - 0x18200000: 474, // mfe - 0x182000ca: 475, // mfe-MU - 0x18300000: 476, // mg - 0x183000bd: 477, // mg-MG - 0x18400000: 478, // mgh - 0x184000cf: 479, // mgh-MZ - 0x18500000: 480, // mgo - 0x18500051: 481, // mgo-CM - 0x18c00000: 482, // mk - 0x18c000c0: 483, // mk-MK - 0x18d00000: 484, // ml - 0x18d00097: 485, // ml-IN - 0x18f00000: 486, // mn - 0x18f000c3: 487, // mn-MN - 0x19600000: 488, // mr - 0x19600097: 489, // mr-IN - 0x19a00000: 490, // ms - 0x19a0003d: 491, // ms-BN - 0x19a000ce: 492, // ms-MY - 0x19a0010b: 493, // ms-SG - 0x19b00000: 494, // mt - 0x19b000c9: 495, // mt-MT - 0x19d00000: 496, // mua - 0x19d00051: 497, // mua-CM - 0x1a500000: 498, // my - 0x1a5000c2: 499, // my-MM - 0x1a900000: 500, // mzn - 0x1a90009a: 501, // mzn-IR - 0x1ab00000: 502, // nah - 0x1ae00000: 503, // naq - 0x1ae000d0: 504, // naq-NA - 0x1af00000: 505, // nb - 0x1af000d8: 506, // nb-NO - 0x1af0010e: 507, // nb-SJ - 0x1b100000: 508, // nd - 0x1b100161: 509, // nd-ZW - 0x1b400000: 510, // ne - 0x1b400097: 511, // ne-IN - 0x1b4000d9: 512, // ne-NP - 0x1bd00000: 513, // nl - 0x1bd0002f: 514, // nl-AW - 0x1bd00035: 515, // nl-BE - 0x1bd0003f: 516, // nl-BQ - 0x1bd0005a: 517, // nl-CW - 0x1bd000d7: 518, // nl-NL - 0x1bd00114: 519, // nl-SR - 0x1bd00119: 520, // nl-SX - 0x1be00000: 521, // nmg - 0x1be00051: 522, // nmg-CM - 0x1bf00000: 523, // nn - 0x1bf000d8: 524, // nn-NO - 0x1c000000: 525, // nnh - 0x1c000051: 526, // nnh-CM - 0x1c100000: 527, // no - 0x1c500000: 528, // nqo - 0x1c600000: 529, // nr - 0x1c800000: 530, // nso - 0x1c900000: 531, // nus - 0x1c900115: 532, // nus-SS - 0x1cc00000: 533, // ny - 0x1ce00000: 534, // nyn - 0x1ce0012f: 535, // nyn-UG - 0x1d200000: 536, // om - 0x1d20006e: 537, // om-ET - 0x1d2000a2: 538, // om-KE - 0x1d300000: 539, // or - 0x1d300097: 540, // or-IN - 0x1d400000: 541, // os - 0x1d40007b: 542, // os-GE - 0x1d400104: 543, // os-RU - 0x1d700000: 544, // pa - 0x1d705000: 545, // pa-Arab - 0x1d7050e6: 546, // pa-Arab-PK - 0x1d72f000: 547, // pa-Guru - 0x1d72f097: 548, // pa-Guru-IN - 0x1db00000: 549, // pap - 0x1e700000: 550, // pl - 0x1e7000e7: 551, // pl-PL - 0x1ed00000: 552, // prg - 0x1ed00001: 553, // prg-001 - 0x1ee00000: 554, // ps - 0x1ee00023: 555, // ps-AF - 0x1ef00000: 556, // pt - 0x1ef00029: 557, // pt-AO - 0x1ef00040: 558, // pt-BR - 0x1ef0004d: 559, // pt-CH - 0x1ef00059: 560, // pt-CV - 0x1ef00084: 561, // pt-GQ - 0x1ef00089: 562, // pt-GW - 0x1ef000b5: 563, // pt-LU - 0x1ef000c4: 564, // pt-MO - 0x1ef000cf: 565, // pt-MZ - 0x1ef000ec: 566, // pt-PT - 0x1ef00116: 567, // pt-ST - 0x1ef00124: 568, // pt-TL - 0x1f100000: 569, // qu - 0x1f10003e: 570, // qu-BO - 0x1f100068: 571, // qu-EC - 0x1f1000e2: 572, // qu-PE - 0x1fc00000: 573, // rm - 0x1fc0004d: 574, // rm-CH - 0x20100000: 575, // rn - 0x20100039: 576, // rn-BI - 0x20300000: 577, // ro - 0x203000ba: 578, // ro-MD - 0x20300102: 579, // ro-RO - 0x20500000: 580, // rof - 0x2050012d: 581, // rof-TZ - 0x20700000: 582, // ru - 0x20700046: 583, // ru-BY - 0x207000a3: 584, // ru-KG - 0x207000ac: 585, // ru-KZ - 0x207000ba: 586, // ru-MD - 0x20700104: 587, // ru-RU - 0x2070012e: 588, // ru-UA - 0x20a00000: 589, // rw - 0x20a00105: 590, // rw-RW - 0x20b00000: 591, // rwk - 0x20b0012d: 592, // rwk-TZ - 0x20f00000: 593, // sah - 0x20f00104: 594, // sah-RU - 0x21000000: 595, // saq - 0x210000a2: 596, // saq-KE - 0x21400000: 597, // sbp - 0x2140012d: 598, // sbp-TZ - 0x21c00000: 599, // sdh - 0x21d00000: 600, // se - 0x21d00070: 601, // se-FI - 0x21d000d8: 602, // se-NO - 0x21d0010a: 603, // se-SE - 0x21f00000: 604, // seh - 0x21f000cf: 605, // seh-MZ - 0x22100000: 606, // ses - 0x221000c1: 607, // ses-ML - 0x22200000: 608, // sg - 0x2220004b: 609, // sg-CF - 0x22600000: 610, // shi - 0x22652000: 611, // shi-Latn - 0x226520b8: 612, // shi-Latn-MA - 0x226d2000: 613, // shi-Tfng - 0x226d20b8: 614, // shi-Tfng-MA - 0x22800000: 615, // si - 0x228000b1: 616, // si-LK - 0x22a00000: 617, // sk - 0x22a0010f: 618, // sk-SK - 0x22c00000: 619, // sl - 0x22c0010d: 620, // sl-SI - 0x23000000: 621, // sma - 0x23100000: 622, // smi - 0x23200000: 623, // smj - 0x23300000: 624, // smn - 0x23300070: 625, // smn-FI - 0x23500000: 626, // sms - 0x23600000: 627, // sn - 0x23600161: 628, // sn-ZW - 0x23800000: 629, // so - 0x23800061: 630, // so-DJ - 0x2380006e: 631, // so-ET - 0x238000a2: 632, // so-KE - 0x23800113: 633, // so-SO - 0x23a00000: 634, // sq - 0x23a00026: 635, // sq-AL - 0x23a000c0: 636, // sq-MK - 0x23a0014a: 637, // sq-XK - 0x23b00000: 638, // sr - 0x23b1e000: 639, // sr-Cyrl - 0x23b1e032: 640, // sr-Cyrl-BA - 0x23b1e0bb: 641, // sr-Cyrl-ME - 0x23b1e103: 642, // sr-Cyrl-RS - 0x23b1e14a: 643, // sr-Cyrl-XK - 0x23b52000: 644, // sr-Latn - 0x23b52032: 645, // sr-Latn-BA - 0x23b520bb: 646, // sr-Latn-ME - 0x23b52103: 647, // sr-Latn-RS - 0x23b5214a: 648, // sr-Latn-XK - 0x24000000: 649, // ss - 0x24100000: 650, // ssy - 0x24200000: 651, // st - 0x24700000: 652, // sv - 0x24700030: 653, // sv-AX - 0x24700070: 654, // sv-FI - 0x2470010a: 655, // sv-SE - 0x24800000: 656, // sw - 0x2480004a: 657, // sw-CD - 0x248000a2: 658, // sw-KE - 0x2480012d: 659, // sw-TZ - 0x2480012f: 660, // sw-UG - 0x24f00000: 661, // syr - 0x25100000: 662, // ta - 0x25100097: 663, // ta-IN - 0x251000b1: 664, // ta-LK - 0x251000ce: 665, // ta-MY - 0x2510010b: 666, // ta-SG - 0x25800000: 667, // te - 0x25800097: 668, // te-IN - 0x25a00000: 669, // teo - 0x25a000a2: 670, // teo-KE - 0x25a0012f: 671, // teo-UG - 0x25d00000: 672, // th - 0x25d00121: 673, // th-TH - 0x26100000: 674, // ti - 0x2610006c: 675, // ti-ER - 0x2610006e: 676, // ti-ET - 0x26200000: 677, // tig - 0x26400000: 678, // tk - 0x26400125: 679, // tk-TM - 0x26b00000: 680, // tn - 0x26c00000: 681, // to - 0x26c00127: 682, // to-TO - 0x26f00000: 683, // tr - 0x26f0005c: 684, // tr-CY - 0x26f00129: 685, // tr-TR - 0x27200000: 686, // ts - 0x27e00000: 687, // twq - 0x27e000d2: 688, // twq-NE - 0x28200000: 689, // tzm - 0x282000b8: 690, // tzm-MA - 0x28400000: 691, // ug - 0x28400052: 692, // ug-CN - 0x28600000: 693, // uk - 0x2860012e: 694, // uk-UA - 0x28c00000: 695, // ur - 0x28c00097: 696, // ur-IN - 0x28c000e6: 697, // ur-PK - 0x28d00000: 698, // uz - 0x28d05000: 699, // uz-Arab - 0x28d05023: 700, // uz-Arab-AF - 0x28d1e000: 701, // uz-Cyrl - 0x28d1e134: 702, // uz-Cyrl-UZ - 0x28d52000: 703, // uz-Latn - 0x28d52134: 704, // uz-Latn-UZ - 0x28e00000: 705, // vai - 0x28e52000: 706, // vai-Latn - 0x28e520b2: 707, // vai-Latn-LR - 0x28ed9000: 708, // vai-Vaii - 0x28ed90b2: 709, // vai-Vaii-LR - 0x28f00000: 710, // ve - 0x29200000: 711, // vi - 0x2920013b: 712, // vi-VN - 0x29700000: 713, // vo - 0x29700001: 714, // vo-001 - 0x29a00000: 715, // vun - 0x29a0012d: 716, // vun-TZ - 0x29b00000: 717, // wa - 0x29c00000: 718, // wae - 0x29c0004d: 719, // wae-CH - 0x2a400000: 720, // wo - 0x2a900000: 721, // xh - 0x2b100000: 722, // xog - 0x2b10012f: 723, // xog-UG - 0x2b700000: 724, // yav - 0x2b700051: 725, // yav-CM - 0x2b900000: 726, // yi - 0x2b900001: 727, // yi-001 - 0x2ba00000: 728, // yo - 0x2ba0003a: 729, // yo-BJ - 0x2ba000d4: 730, // yo-NG - 0x2bd00000: 731, // yue - 0x2bd0008b: 732, // yue-HK - 0x2c300000: 733, // zgh - 0x2c3000b8: 734, // zgh-MA - 0x2c400000: 735, // zh - 0x2c434000: 736, // zh-Hans - 0x2c434052: 737, // zh-Hans-CN - 0x2c43408b: 738, // zh-Hans-HK - 0x2c4340c4: 739, // zh-Hans-MO - 0x2c43410b: 740, // zh-Hans-SG - 0x2c435000: 741, // zh-Hant - 0x2c43508b: 742, // zh-Hant-HK - 0x2c4350c4: 743, // zh-Hant-MO - 0x2c43512c: 744, // zh-Hant-TW - 0x2c600000: 745, // zu - 0x2c60015e: 746, // zu-ZA + 0x01600000: 3, // af + 0x016000d2: 4, // af-NA + 0x01600161: 5, // af-ZA + 0x01c00000: 6, // agq + 0x01c00052: 7, // agq-CM + 0x02100000: 8, // ak + 0x02100080: 9, // ak-GH + 0x02700000: 10, // am + 0x0270006f: 11, // am-ET + 0x03a00000: 12, // ar + 0x03a00001: 13, // ar-001 + 0x03a00023: 14, // ar-AE + 0x03a00039: 15, // ar-BH + 0x03a00062: 16, // ar-DJ + 0x03a00067: 17, // ar-DZ + 0x03a0006b: 18, // ar-EG + 0x03a0006c: 19, // ar-EH + 0x03a0006d: 20, // ar-ER + 0x03a00097: 21, // ar-IL + 0x03a0009b: 22, // ar-IQ + 0x03a000a1: 23, // ar-JO + 0x03a000a8: 24, // ar-KM + 0x03a000ac: 25, // ar-KW + 0x03a000b0: 26, // ar-LB + 0x03a000b9: 27, // ar-LY + 0x03a000ba: 28, // ar-MA + 0x03a000c9: 29, // ar-MR + 0x03a000e1: 30, // ar-OM + 0x03a000ed: 31, // ar-PS + 0x03a000f3: 32, // ar-QA + 0x03a00108: 33, // ar-SA + 0x03a0010b: 34, // ar-SD + 0x03a00115: 35, // ar-SO + 0x03a00117: 36, // ar-SS + 0x03a0011c: 37, // ar-SY + 0x03a00120: 38, // ar-TD + 0x03a00128: 39, // ar-TN + 0x03a0015e: 40, // ar-YE + 0x04000000: 41, // ars + 0x04300000: 42, // as + 0x04300099: 43, // as-IN + 0x04400000: 44, // asa + 0x0440012f: 45, // asa-TZ + 0x04800000: 46, // ast + 0x0480006e: 47, // ast-ES + 0x05800000: 48, // az + 0x0581f000: 49, // az-Cyrl + 0x0581f032: 50, // az-Cyrl-AZ + 0x05857000: 51, // az-Latn + 0x05857032: 52, // az-Latn-AZ + 0x05e00000: 53, // bas + 0x05e00052: 54, // bas-CM + 0x07100000: 55, // be + 0x07100047: 56, // be-BY + 0x07500000: 57, // bem + 0x07500162: 58, // bem-ZM + 0x07900000: 59, // bez + 0x0790012f: 60, // bez-TZ + 0x07e00000: 61, // bg + 0x07e00038: 62, // bg-BG + 0x08200000: 63, // bh + 0x0a000000: 64, // bm + 0x0a0000c3: 65, // bm-ML + 0x0a500000: 66, // bn + 0x0a500035: 67, // bn-BD + 0x0a500099: 68, // bn-IN + 0x0a900000: 69, // bo + 0x0a900053: 70, // bo-CN + 0x0a900099: 71, // bo-IN + 0x0b200000: 72, // br + 0x0b200078: 73, // br-FR + 0x0b500000: 74, // brx + 0x0b500099: 75, // brx-IN + 0x0b700000: 76, // bs + 0x0b71f000: 77, // bs-Cyrl + 0x0b71f033: 78, // bs-Cyrl-BA + 0x0b757000: 79, // bs-Latn + 0x0b757033: 80, // bs-Latn-BA + 0x0d700000: 81, // ca + 0x0d700022: 82, // ca-AD + 0x0d70006e: 83, // ca-ES + 0x0d700078: 84, // ca-FR + 0x0d70009e: 85, // ca-IT + 0x0db00000: 86, // ccp + 0x0db00035: 87, // ccp-BD + 0x0db00099: 88, // ccp-IN + 0x0dc00000: 89, // ce + 0x0dc00106: 90, // ce-RU + 0x0df00000: 91, // cgg + 0x0df00131: 92, // cgg-UG + 0x0e500000: 93, // chr + 0x0e500135: 94, // chr-US + 0x0e900000: 95, // ckb + 0x0e90009b: 96, // ckb-IQ + 0x0e90009c: 97, // ckb-IR + 0x0fa00000: 98, // cs + 0x0fa0005e: 99, // cs-CZ + 0x0fe00000: 100, // cu + 0x0fe00106: 101, // cu-RU + 0x10000000: 102, // cy + 0x1000007b: 103, // cy-GB + 0x10100000: 104, // da + 0x10100063: 105, // da-DK + 0x10100082: 106, // da-GL + 0x10800000: 107, // dav + 0x108000a4: 108, // dav-KE + 0x10d00000: 109, // de + 0x10d0002e: 110, // de-AT + 0x10d00036: 111, // de-BE + 0x10d0004e: 112, // de-CH + 0x10d00060: 113, // de-DE + 0x10d0009e: 114, // de-IT + 0x10d000b2: 115, // de-LI + 0x10d000b7: 116, // de-LU + 0x11700000: 117, // dje + 0x117000d4: 118, // dje-NE + 0x11f00000: 119, // dsb + 0x11f00060: 120, // dsb-DE + 0x12400000: 121, // dua + 0x12400052: 122, // dua-CM + 0x12800000: 123, // dv + 0x12b00000: 124, // dyo + 0x12b00114: 125, // dyo-SN + 0x12d00000: 126, // dz + 0x12d00043: 127, // dz-BT + 0x12f00000: 128, // ebu + 0x12f000a4: 129, // ebu-KE + 0x13000000: 130, // ee + 0x13000080: 131, // ee-GH + 0x13000122: 132, // ee-TG + 0x13600000: 133, // el + 0x1360005d: 134, // el-CY + 0x13600087: 135, // el-GR + 0x13900000: 136, // en + 0x13900001: 137, // en-001 + 0x1390001a: 138, // en-150 + 0x13900025: 139, // en-AG + 0x13900026: 140, // en-AI + 0x1390002d: 141, // en-AS + 0x1390002e: 142, // en-AT + 0x1390002f: 143, // en-AU + 0x13900034: 144, // en-BB + 0x13900036: 145, // en-BE + 0x1390003a: 146, // en-BI + 0x1390003d: 147, // en-BM + 0x13900042: 148, // en-BS + 0x13900046: 149, // en-BW + 0x13900048: 150, // en-BZ + 0x13900049: 151, // en-CA + 0x1390004a: 152, // en-CC + 0x1390004e: 153, // en-CH + 0x13900050: 154, // en-CK + 0x13900052: 155, // en-CM + 0x1390005c: 156, // en-CX + 0x1390005d: 157, // en-CY + 0x13900060: 158, // en-DE + 0x13900061: 159, // en-DG + 0x13900063: 160, // en-DK + 0x13900064: 161, // en-DM + 0x1390006d: 162, // en-ER + 0x13900072: 163, // en-FI + 0x13900073: 164, // en-FJ + 0x13900074: 165, // en-FK + 0x13900075: 166, // en-FM + 0x1390007b: 167, // en-GB + 0x1390007c: 168, // en-GD + 0x1390007f: 169, // en-GG + 0x13900080: 170, // en-GH + 0x13900081: 171, // en-GI + 0x13900083: 172, // en-GM + 0x1390008a: 173, // en-GU + 0x1390008c: 174, // en-GY + 0x1390008d: 175, // en-HK + 0x13900096: 176, // en-IE + 0x13900097: 177, // en-IL + 0x13900098: 178, // en-IM + 0x13900099: 179, // en-IN + 0x1390009a: 180, // en-IO + 0x1390009f: 181, // en-JE + 0x139000a0: 182, // en-JM + 0x139000a4: 183, // en-KE + 0x139000a7: 184, // en-KI + 0x139000a9: 185, // en-KN + 0x139000ad: 186, // en-KY + 0x139000b1: 187, // en-LC + 0x139000b4: 188, // en-LR + 0x139000b5: 189, // en-LS + 0x139000bf: 190, // en-MG + 0x139000c0: 191, // en-MH + 0x139000c6: 192, // en-MO + 0x139000c7: 193, // en-MP + 0x139000ca: 194, // en-MS + 0x139000cb: 195, // en-MT + 0x139000cc: 196, // en-MU + 0x139000ce: 197, // en-MW + 0x139000d0: 198, // en-MY + 0x139000d2: 199, // en-NA + 0x139000d5: 200, // en-NF + 0x139000d6: 201, // en-NG + 0x139000d9: 202, // en-NL + 0x139000dd: 203, // en-NR + 0x139000df: 204, // en-NU + 0x139000e0: 205, // en-NZ + 0x139000e6: 206, // en-PG + 0x139000e7: 207, // en-PH + 0x139000e8: 208, // en-PK + 0x139000eb: 209, // en-PN + 0x139000ec: 210, // en-PR + 0x139000f0: 211, // en-PW + 0x13900107: 212, // en-RW + 0x13900109: 213, // en-SB + 0x1390010a: 214, // en-SC + 0x1390010b: 215, // en-SD + 0x1390010c: 216, // en-SE + 0x1390010d: 217, // en-SG + 0x1390010e: 218, // en-SH + 0x1390010f: 219, // en-SI + 0x13900112: 220, // en-SL + 0x13900117: 221, // en-SS + 0x1390011b: 222, // en-SX + 0x1390011d: 223, // en-SZ + 0x1390011f: 224, // en-TC + 0x13900125: 225, // en-TK + 0x13900129: 226, // en-TO + 0x1390012c: 227, // en-TT + 0x1390012d: 228, // en-TV + 0x1390012f: 229, // en-TZ + 0x13900131: 230, // en-UG + 0x13900133: 231, // en-UM + 0x13900135: 232, // en-US + 0x13900139: 233, // en-VC + 0x1390013c: 234, // en-VG + 0x1390013d: 235, // en-VI + 0x1390013f: 236, // en-VU + 0x13900142: 237, // en-WS + 0x13900161: 238, // en-ZA + 0x13900162: 239, // en-ZM + 0x13900164: 240, // en-ZW + 0x13c00000: 241, // eo + 0x13c00001: 242, // eo-001 + 0x13e00000: 243, // es + 0x13e0001f: 244, // es-419 + 0x13e0002c: 245, // es-AR + 0x13e0003f: 246, // es-BO + 0x13e00041: 247, // es-BR + 0x13e00048: 248, // es-BZ + 0x13e00051: 249, // es-CL + 0x13e00054: 250, // es-CO + 0x13e00056: 251, // es-CR + 0x13e00059: 252, // es-CU + 0x13e00065: 253, // es-DO + 0x13e00068: 254, // es-EA + 0x13e00069: 255, // es-EC + 0x13e0006e: 256, // es-ES + 0x13e00086: 257, // es-GQ + 0x13e00089: 258, // es-GT + 0x13e0008f: 259, // es-HN + 0x13e00094: 260, // es-IC + 0x13e000cf: 261, // es-MX + 0x13e000d8: 262, // es-NI + 0x13e000e2: 263, // es-PA + 0x13e000e4: 264, // es-PE + 0x13e000e7: 265, // es-PH + 0x13e000ec: 266, // es-PR + 0x13e000f1: 267, // es-PY + 0x13e0011a: 268, // es-SV + 0x13e00135: 269, // es-US + 0x13e00136: 270, // es-UY + 0x13e0013b: 271, // es-VE + 0x14000000: 272, // et + 0x1400006a: 273, // et-EE + 0x14500000: 274, // eu + 0x1450006e: 275, // eu-ES + 0x14600000: 276, // ewo + 0x14600052: 277, // ewo-CM + 0x14800000: 278, // fa + 0x14800024: 279, // fa-AF + 0x1480009c: 280, // fa-IR + 0x14e00000: 281, // ff + 0x14e00052: 282, // ff-CM + 0x14e00084: 283, // ff-GN + 0x14e000c9: 284, // ff-MR + 0x14e00114: 285, // ff-SN + 0x15100000: 286, // fi + 0x15100072: 287, // fi-FI + 0x15300000: 288, // fil + 0x153000e7: 289, // fil-PH + 0x15800000: 290, // fo + 0x15800063: 291, // fo-DK + 0x15800076: 292, // fo-FO + 0x15e00000: 293, // fr + 0x15e00036: 294, // fr-BE + 0x15e00037: 295, // fr-BF + 0x15e0003a: 296, // fr-BI + 0x15e0003b: 297, // fr-BJ + 0x15e0003c: 298, // fr-BL + 0x15e00049: 299, // fr-CA + 0x15e0004b: 300, // fr-CD + 0x15e0004c: 301, // fr-CF + 0x15e0004d: 302, // fr-CG + 0x15e0004e: 303, // fr-CH + 0x15e0004f: 304, // fr-CI + 0x15e00052: 305, // fr-CM + 0x15e00062: 306, // fr-DJ + 0x15e00067: 307, // fr-DZ + 0x15e00078: 308, // fr-FR + 0x15e0007a: 309, // fr-GA + 0x15e0007e: 310, // fr-GF + 0x15e00084: 311, // fr-GN + 0x15e00085: 312, // fr-GP + 0x15e00086: 313, // fr-GQ + 0x15e00091: 314, // fr-HT + 0x15e000a8: 315, // fr-KM + 0x15e000b7: 316, // fr-LU + 0x15e000ba: 317, // fr-MA + 0x15e000bb: 318, // fr-MC + 0x15e000be: 319, // fr-MF + 0x15e000bf: 320, // fr-MG + 0x15e000c3: 321, // fr-ML + 0x15e000c8: 322, // fr-MQ + 0x15e000c9: 323, // fr-MR + 0x15e000cc: 324, // fr-MU + 0x15e000d3: 325, // fr-NC + 0x15e000d4: 326, // fr-NE + 0x15e000e5: 327, // fr-PF + 0x15e000ea: 328, // fr-PM + 0x15e00102: 329, // fr-RE + 0x15e00107: 330, // fr-RW + 0x15e0010a: 331, // fr-SC + 0x15e00114: 332, // fr-SN + 0x15e0011c: 333, // fr-SY + 0x15e00120: 334, // fr-TD + 0x15e00122: 335, // fr-TG + 0x15e00128: 336, // fr-TN + 0x15e0013f: 337, // fr-VU + 0x15e00140: 338, // fr-WF + 0x15e0015f: 339, // fr-YT + 0x16900000: 340, // fur + 0x1690009e: 341, // fur-IT + 0x16d00000: 342, // fy + 0x16d000d9: 343, // fy-NL + 0x16e00000: 344, // ga + 0x16e00096: 345, // ga-IE + 0x17e00000: 346, // gd + 0x17e0007b: 347, // gd-GB + 0x19000000: 348, // gl + 0x1900006e: 349, // gl-ES + 0x1a300000: 350, // gsw + 0x1a30004e: 351, // gsw-CH + 0x1a300078: 352, // gsw-FR + 0x1a3000b2: 353, // gsw-LI + 0x1a400000: 354, // gu + 0x1a400099: 355, // gu-IN + 0x1a900000: 356, // guw + 0x1ab00000: 357, // guz + 0x1ab000a4: 358, // guz-KE + 0x1ac00000: 359, // gv + 0x1ac00098: 360, // gv-IM + 0x1b400000: 361, // ha + 0x1b400080: 362, // ha-GH + 0x1b4000d4: 363, // ha-NE + 0x1b4000d6: 364, // ha-NG + 0x1b800000: 365, // haw + 0x1b800135: 366, // haw-US + 0x1bc00000: 367, // he + 0x1bc00097: 368, // he-IL + 0x1be00000: 369, // hi + 0x1be00099: 370, // hi-IN + 0x1d100000: 371, // hr + 0x1d100033: 372, // hr-BA + 0x1d100090: 373, // hr-HR + 0x1d200000: 374, // hsb + 0x1d200060: 375, // hsb-DE + 0x1d500000: 376, // hu + 0x1d500092: 377, // hu-HU + 0x1d700000: 378, // hy + 0x1d700028: 379, // hy-AM + 0x1e100000: 380, // id + 0x1e100095: 381, // id-ID + 0x1e700000: 382, // ig + 0x1e7000d6: 383, // ig-NG + 0x1ea00000: 384, // ii + 0x1ea00053: 385, // ii-CN + 0x1f500000: 386, // io + 0x1f800000: 387, // is + 0x1f80009d: 388, // is-IS + 0x1f900000: 389, // it + 0x1f90004e: 390, // it-CH + 0x1f90009e: 391, // it-IT + 0x1f900113: 392, // it-SM + 0x1f900138: 393, // it-VA + 0x1fa00000: 394, // iu + 0x20000000: 395, // ja + 0x200000a2: 396, // ja-JP + 0x20300000: 397, // jbo + 0x20700000: 398, // jgo + 0x20700052: 399, // jgo-CM + 0x20a00000: 400, // jmc + 0x20a0012f: 401, // jmc-TZ + 0x20e00000: 402, // jv + 0x21000000: 403, // ka + 0x2100007d: 404, // ka-GE + 0x21200000: 405, // kab + 0x21200067: 406, // kab-DZ + 0x21600000: 407, // kaj + 0x21700000: 408, // kam + 0x217000a4: 409, // kam-KE + 0x21f00000: 410, // kcg + 0x22300000: 411, // kde + 0x2230012f: 412, // kde-TZ + 0x22700000: 413, // kea + 0x2270005a: 414, // kea-CV + 0x23400000: 415, // khq + 0x234000c3: 416, // khq-ML + 0x23900000: 417, // ki + 0x239000a4: 418, // ki-KE + 0x24200000: 419, // kk + 0x242000ae: 420, // kk-KZ + 0x24400000: 421, // kkj + 0x24400052: 422, // kkj-CM + 0x24500000: 423, // kl + 0x24500082: 424, // kl-GL + 0x24600000: 425, // kln + 0x246000a4: 426, // kln-KE + 0x24a00000: 427, // km + 0x24a000a6: 428, // km-KH + 0x25100000: 429, // kn + 0x25100099: 430, // kn-IN + 0x25400000: 431, // ko + 0x254000aa: 432, // ko-KP + 0x254000ab: 433, // ko-KR + 0x25600000: 434, // kok + 0x25600099: 435, // kok-IN + 0x26a00000: 436, // ks + 0x26a00099: 437, // ks-IN + 0x26b00000: 438, // ksb + 0x26b0012f: 439, // ksb-TZ + 0x26d00000: 440, // ksf + 0x26d00052: 441, // ksf-CM + 0x26e00000: 442, // ksh + 0x26e00060: 443, // ksh-DE + 0x27400000: 444, // ku + 0x28100000: 445, // kw + 0x2810007b: 446, // kw-GB + 0x28a00000: 447, // ky + 0x28a000a5: 448, // ky-KG + 0x29100000: 449, // lag + 0x2910012f: 450, // lag-TZ + 0x29500000: 451, // lb + 0x295000b7: 452, // lb-LU + 0x2a300000: 453, // lg + 0x2a300131: 454, // lg-UG + 0x2af00000: 455, // lkt + 0x2af00135: 456, // lkt-US + 0x2b500000: 457, // ln + 0x2b50002a: 458, // ln-AO + 0x2b50004b: 459, // ln-CD + 0x2b50004c: 460, // ln-CF + 0x2b50004d: 461, // ln-CG + 0x2b800000: 462, // lo + 0x2b8000af: 463, // lo-LA + 0x2bf00000: 464, // lrc + 0x2bf0009b: 465, // lrc-IQ + 0x2bf0009c: 466, // lrc-IR + 0x2c000000: 467, // lt + 0x2c0000b6: 468, // lt-LT + 0x2c200000: 469, // lu + 0x2c20004b: 470, // lu-CD + 0x2c400000: 471, // luo + 0x2c4000a4: 472, // luo-KE + 0x2c500000: 473, // luy + 0x2c5000a4: 474, // luy-KE + 0x2c700000: 475, // lv + 0x2c7000b8: 476, // lv-LV + 0x2d100000: 477, // mas + 0x2d1000a4: 478, // mas-KE + 0x2d10012f: 479, // mas-TZ + 0x2e900000: 480, // mer + 0x2e9000a4: 481, // mer-KE + 0x2ed00000: 482, // mfe + 0x2ed000cc: 483, // mfe-MU + 0x2f100000: 484, // mg + 0x2f1000bf: 485, // mg-MG + 0x2f200000: 486, // mgh + 0x2f2000d1: 487, // mgh-MZ + 0x2f400000: 488, // mgo + 0x2f400052: 489, // mgo-CM + 0x2ff00000: 490, // mk + 0x2ff000c2: 491, // mk-MK + 0x30400000: 492, // ml + 0x30400099: 493, // ml-IN + 0x30b00000: 494, // mn + 0x30b000c5: 495, // mn-MN + 0x31b00000: 496, // mr + 0x31b00099: 497, // mr-IN + 0x31f00000: 498, // ms + 0x31f0003e: 499, // ms-BN + 0x31f000d0: 500, // ms-MY + 0x31f0010d: 501, // ms-SG + 0x32000000: 502, // mt + 0x320000cb: 503, // mt-MT + 0x32500000: 504, // mua + 0x32500052: 505, // mua-CM + 0x33100000: 506, // my + 0x331000c4: 507, // my-MM + 0x33a00000: 508, // mzn + 0x33a0009c: 509, // mzn-IR + 0x34100000: 510, // nah + 0x34500000: 511, // naq + 0x345000d2: 512, // naq-NA + 0x34700000: 513, // nb + 0x347000da: 514, // nb-NO + 0x34700110: 515, // nb-SJ + 0x34e00000: 516, // nd + 0x34e00164: 517, // nd-ZW + 0x35000000: 518, // nds + 0x35000060: 519, // nds-DE + 0x350000d9: 520, // nds-NL + 0x35100000: 521, // ne + 0x35100099: 522, // ne-IN + 0x351000db: 523, // ne-NP + 0x36700000: 524, // nl + 0x36700030: 525, // nl-AW + 0x36700036: 526, // nl-BE + 0x36700040: 527, // nl-BQ + 0x3670005b: 528, // nl-CW + 0x367000d9: 529, // nl-NL + 0x36700116: 530, // nl-SR + 0x3670011b: 531, // nl-SX + 0x36800000: 532, // nmg + 0x36800052: 533, // nmg-CM + 0x36a00000: 534, // nn + 0x36a000da: 535, // nn-NO + 0x36c00000: 536, // nnh + 0x36c00052: 537, // nnh-CM + 0x36f00000: 538, // no + 0x37500000: 539, // nqo + 0x37600000: 540, // nr + 0x37a00000: 541, // nso + 0x38000000: 542, // nus + 0x38000117: 543, // nus-SS + 0x38700000: 544, // ny + 0x38900000: 545, // nyn + 0x38900131: 546, // nyn-UG + 0x39000000: 547, // om + 0x3900006f: 548, // om-ET + 0x390000a4: 549, // om-KE + 0x39500000: 550, // or + 0x39500099: 551, // or-IN + 0x39800000: 552, // os + 0x3980007d: 553, // os-GE + 0x39800106: 554, // os-RU + 0x39d00000: 555, // pa + 0x39d05000: 556, // pa-Arab + 0x39d050e8: 557, // pa-Arab-PK + 0x39d33000: 558, // pa-Guru + 0x39d33099: 559, // pa-Guru-IN + 0x3a100000: 560, // pap + 0x3b300000: 561, // pl + 0x3b3000e9: 562, // pl-PL + 0x3bd00000: 563, // prg + 0x3bd00001: 564, // prg-001 + 0x3be00000: 565, // ps + 0x3be00024: 566, // ps-AF + 0x3c000000: 567, // pt + 0x3c00002a: 568, // pt-AO + 0x3c000041: 569, // pt-BR + 0x3c00004e: 570, // pt-CH + 0x3c00005a: 571, // pt-CV + 0x3c000086: 572, // pt-GQ + 0x3c00008b: 573, // pt-GW + 0x3c0000b7: 574, // pt-LU + 0x3c0000c6: 575, // pt-MO + 0x3c0000d1: 576, // pt-MZ + 0x3c0000ee: 577, // pt-PT + 0x3c000118: 578, // pt-ST + 0x3c000126: 579, // pt-TL + 0x3c400000: 580, // qu + 0x3c40003f: 581, // qu-BO + 0x3c400069: 582, // qu-EC + 0x3c4000e4: 583, // qu-PE + 0x3d400000: 584, // rm + 0x3d40004e: 585, // rm-CH + 0x3d900000: 586, // rn + 0x3d90003a: 587, // rn-BI + 0x3dc00000: 588, // ro + 0x3dc000bc: 589, // ro-MD + 0x3dc00104: 590, // ro-RO + 0x3de00000: 591, // rof + 0x3de0012f: 592, // rof-TZ + 0x3e200000: 593, // ru + 0x3e200047: 594, // ru-BY + 0x3e2000a5: 595, // ru-KG + 0x3e2000ae: 596, // ru-KZ + 0x3e2000bc: 597, // ru-MD + 0x3e200106: 598, // ru-RU + 0x3e200130: 599, // ru-UA + 0x3e500000: 600, // rw + 0x3e500107: 601, // rw-RW + 0x3e600000: 602, // rwk + 0x3e60012f: 603, // rwk-TZ + 0x3eb00000: 604, // sah + 0x3eb00106: 605, // sah-RU + 0x3ec00000: 606, // saq + 0x3ec000a4: 607, // saq-KE + 0x3f300000: 608, // sbp + 0x3f30012f: 609, // sbp-TZ + 0x3fa00000: 610, // sd + 0x3fa000e8: 611, // sd-PK + 0x3fc00000: 612, // sdh + 0x3fd00000: 613, // se + 0x3fd00072: 614, // se-FI + 0x3fd000da: 615, // se-NO + 0x3fd0010c: 616, // se-SE + 0x3ff00000: 617, // seh + 0x3ff000d1: 618, // seh-MZ + 0x40100000: 619, // ses + 0x401000c3: 620, // ses-ML + 0x40200000: 621, // sg + 0x4020004c: 622, // sg-CF + 0x40800000: 623, // shi + 0x40857000: 624, // shi-Latn + 0x408570ba: 625, // shi-Latn-MA + 0x408dc000: 626, // shi-Tfng + 0x408dc0ba: 627, // shi-Tfng-MA + 0x40c00000: 628, // si + 0x40c000b3: 629, // si-LK + 0x41200000: 630, // sk + 0x41200111: 631, // sk-SK + 0x41600000: 632, // sl + 0x4160010f: 633, // sl-SI + 0x41c00000: 634, // sma + 0x41d00000: 635, // smi + 0x41e00000: 636, // smj + 0x41f00000: 637, // smn + 0x41f00072: 638, // smn-FI + 0x42200000: 639, // sms + 0x42300000: 640, // sn + 0x42300164: 641, // sn-ZW + 0x42900000: 642, // so + 0x42900062: 643, // so-DJ + 0x4290006f: 644, // so-ET + 0x429000a4: 645, // so-KE + 0x42900115: 646, // so-SO + 0x43100000: 647, // sq + 0x43100027: 648, // sq-AL + 0x431000c2: 649, // sq-MK + 0x4310014d: 650, // sq-XK + 0x43200000: 651, // sr + 0x4321f000: 652, // sr-Cyrl + 0x4321f033: 653, // sr-Cyrl-BA + 0x4321f0bd: 654, // sr-Cyrl-ME + 0x4321f105: 655, // sr-Cyrl-RS + 0x4321f14d: 656, // sr-Cyrl-XK + 0x43257000: 657, // sr-Latn + 0x43257033: 658, // sr-Latn-BA + 0x432570bd: 659, // sr-Latn-ME + 0x43257105: 660, // sr-Latn-RS + 0x4325714d: 661, // sr-Latn-XK + 0x43700000: 662, // ss + 0x43a00000: 663, // ssy + 0x43b00000: 664, // st + 0x44400000: 665, // sv + 0x44400031: 666, // sv-AX + 0x44400072: 667, // sv-FI + 0x4440010c: 668, // sv-SE + 0x44500000: 669, // sw + 0x4450004b: 670, // sw-CD + 0x445000a4: 671, // sw-KE + 0x4450012f: 672, // sw-TZ + 0x44500131: 673, // sw-UG + 0x44e00000: 674, // syr + 0x45000000: 675, // ta + 0x45000099: 676, // ta-IN + 0x450000b3: 677, // ta-LK + 0x450000d0: 678, // ta-MY + 0x4500010d: 679, // ta-SG + 0x46100000: 680, // te + 0x46100099: 681, // te-IN + 0x46400000: 682, // teo + 0x464000a4: 683, // teo-KE + 0x46400131: 684, // teo-UG + 0x46700000: 685, // tg + 0x46700124: 686, // tg-TJ + 0x46b00000: 687, // th + 0x46b00123: 688, // th-TH + 0x46f00000: 689, // ti + 0x46f0006d: 690, // ti-ER + 0x46f0006f: 691, // ti-ET + 0x47100000: 692, // tig + 0x47600000: 693, // tk + 0x47600127: 694, // tk-TM + 0x48000000: 695, // tn + 0x48200000: 696, // to + 0x48200129: 697, // to-TO + 0x48a00000: 698, // tr + 0x48a0005d: 699, // tr-CY + 0x48a0012b: 700, // tr-TR + 0x48e00000: 701, // ts + 0x49400000: 702, // tt + 0x49400106: 703, // tt-RU + 0x4a400000: 704, // twq + 0x4a4000d4: 705, // twq-NE + 0x4a900000: 706, // tzm + 0x4a9000ba: 707, // tzm-MA + 0x4ac00000: 708, // ug + 0x4ac00053: 709, // ug-CN + 0x4ae00000: 710, // uk + 0x4ae00130: 711, // uk-UA + 0x4b400000: 712, // ur + 0x4b400099: 713, // ur-IN + 0x4b4000e8: 714, // ur-PK + 0x4bc00000: 715, // uz + 0x4bc05000: 716, // uz-Arab + 0x4bc05024: 717, // uz-Arab-AF + 0x4bc1f000: 718, // uz-Cyrl + 0x4bc1f137: 719, // uz-Cyrl-UZ + 0x4bc57000: 720, // uz-Latn + 0x4bc57137: 721, // uz-Latn-UZ + 0x4be00000: 722, // vai + 0x4be57000: 723, // vai-Latn + 0x4be570b4: 724, // vai-Latn-LR + 0x4bee3000: 725, // vai-Vaii + 0x4bee30b4: 726, // vai-Vaii-LR + 0x4c000000: 727, // ve + 0x4c300000: 728, // vi + 0x4c30013e: 729, // vi-VN + 0x4c900000: 730, // vo + 0x4c900001: 731, // vo-001 + 0x4cc00000: 732, // vun + 0x4cc0012f: 733, // vun-TZ + 0x4ce00000: 734, // wa + 0x4cf00000: 735, // wae + 0x4cf0004e: 736, // wae-CH + 0x4e500000: 737, // wo + 0x4e500114: 738, // wo-SN + 0x4f200000: 739, // xh + 0x4fb00000: 740, // xog + 0x4fb00131: 741, // xog-UG + 0x50900000: 742, // yav + 0x50900052: 743, // yav-CM + 0x51200000: 744, // yi + 0x51200001: 745, // yi-001 + 0x51800000: 746, // yo + 0x5180003b: 747, // yo-BJ + 0x518000d6: 748, // yo-NG + 0x51f00000: 749, // yue + 0x51f38000: 750, // yue-Hans + 0x51f38053: 751, // yue-Hans-CN + 0x51f39000: 752, // yue-Hant + 0x51f3908d: 753, // yue-Hant-HK + 0x52800000: 754, // zgh + 0x528000ba: 755, // zgh-MA + 0x52900000: 756, // zh + 0x52938000: 757, // zh-Hans + 0x52938053: 758, // zh-Hans-CN + 0x5293808d: 759, // zh-Hans-HK + 0x529380c6: 760, // zh-Hans-MO + 0x5293810d: 761, // zh-Hans-SG + 0x52939000: 762, // zh-Hant + 0x5293908d: 763, // zh-Hant-HK + 0x529390c6: 764, // zh-Hant-MO + 0x5293912e: 765, // zh-Hant-TW + 0x52f00000: 766, // zu + 0x52f00161: 767, // zu-ZA } -// Total table size 4550 bytes (4KiB); checksum: B6D49547 +// Total table size 4676 bytes (4KiB); checksum: 17BE3673 diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/language/language.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/language/language.go index 3c190414..b65e213f 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/language/language.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/language/language.go @@ -2,105 +2,10 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:generate go run maketables.go gen_common.go -output tables.go +//go:generate go run gen.go gen_common.go -output tables.go //go:generate go run gen_index.go -// Package language implements BCP 47 language tags and related functionality. -// -// The Tag type, which is used to represent languages, is agnostic to the -// meaning of its subtags. Tags are not fully canonicalized to preserve -// information that may be valuable in certain contexts. As a consequence, two -// different tags may represent identical languages. -// -// Initializing language- or locale-specific components usually consists of -// two steps. The first step is to select a display language based on the -// preferred languages of the user and the languages supported by an application. -// The second step is to create the language-specific services based on -// this selection. Each is discussed in more details below. -// -// Matching preferred against supported languages -// -// An application may support various languages. This list is typically limited -// by the languages for which there exists translations of the user interface. -// Similarly, a user may provide a list of preferred languages which is limited -// by the languages understood by this user. -// An application should use a Matcher to find the best supported language based -// on the user's preferred list. -// Matchers are aware of the intricacies of equivalence between languages. -// The default Matcher implementation takes into account things such as -// deprecated subtags, legacy tags, and mutual intelligibility between scripts -// and languages. -// -// A Matcher for English, Australian English, Danish, and standard Mandarin can -// be defined as follows: -// -// var matcher = language.NewMatcher([]language.Tag{ -// language.English, // The first language is used as fallback. -// language.MustParse("en-AU"), -// language.Danish, -// language.Chinese, -// }) -// -// The following code selects the best match for someone speaking Spanish and -// Norwegian: -// -// preferred := []language.Tag{ language.Spanish, language.Norwegian } -// tag, _, _ := matcher.Match(preferred...) -// -// In this case, the best match is Danish, as Danish is sufficiently a match to -// Norwegian to not have to fall back to the default. -// See ParseAcceptLanguage on how to handle the Accept-Language HTTP header. -// -// Selecting language-specific services -// -// One should always use the Tag returned by the Matcher to create an instance -// of any of the language-specific services provided by the text repository. -// This prevents the mixing of languages, such as having a different language for -// messages and display names, as well as improper casing or sorting order for -// the selected language. -// Using the returned Tag also allows user-defined settings, such as collation -// order or numbering system to be transparently passed as options. -// -// If you have language-specific data in your application, however, it will in -// most cases suffice to use the index returned by the matcher to identify -// the user language. -// The following loop provides an alternative in case this is not sufficient: -// -// supported := map[language.Tag]data{ -// language.English: enData, -// language.MustParse("en-AU"): enAUData, -// language.Danish: daData, -// language.Chinese: zhData, -// } -// tag, _, _ := matcher.Match(preferred...) -// for ; tag != language.Und; tag = tag.Parent() { -// if v, ok := supported[tag]; ok { -// return v -// } -// } -// return enData // should not reach here -// -// Repeatedly taking the Parent of the tag returned by Match will eventually -// match one of the tags used to initialize the Matcher. -// -// Canonicalization -// -// By default, only legacy and deprecated tags are converted into their -// canonical equivalent. All other information is preserved. This approach makes -// the confidence scores more accurate and allows matchers to distinguish -// between variants that are otherwise lost. -// -// As a consequence, two tags that should be treated as identical according to -// BCP 47 or CLDR, like "en-Latn" and "en", will be represented differently. The -// Matchers will handle such distinctions, though, and are aware of the -// equivalence relations. The CanonType type can be used to alter the -// canonicalization form. -// -// References -// -// BCP 47 - Tags for Identifying Languages -// http://tools.ietf.org/html/bcp47 -package language // import "golang.org/x/text/language" +package language // TODO: Remove above NOTE after: // - verifying that tables are dropped correctly (most notably matcher tables). @@ -129,8 +34,15 @@ const ( // specific language or locale. All language tag values are guaranteed to be // well-formed. type Tag struct { - lang langID - region regionID + lang langID + region regionID + // TODO: we will soon run out of positions for script. Idea: instead of + // storing lang, region, and script codes, store only the compact index and + // have a lookup table from this code to its expansion. This greatly speeds + // up table lookup, speed up common variant cases. + // This will also immediately free up 3 extra bytes. Also, the pVariant + // field can now be moved to the lookup table, as the compact index uniquely + // determines the offset of a possible variant. script scriptID pVariant byte // offset in str, includes preceding '-' pExt uint16 // offset of first extension, includes preceding '-' @@ -387,6 +299,26 @@ func (t Tag) String() string { return string(buf[:t.genCoreBytes(buf[:])]) } +// MarshalText implements encoding.TextMarshaler. +func (t Tag) MarshalText() (text []byte, err error) { + if t.str != "" { + text = append(text, t.str...) + } else if t.script == 0 && t.region == 0 { + text = append(text, t.lang.String()...) + } else { + buf := [maxCoreSize]byte{} + text = buf[:t.genCoreBytes(buf[:])] + } + return text, nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (t *Tag) UnmarshalText(text []byte) error { + tag, err := Raw.Parse(string(text)) + *t = tag + return err +} + // Base returns the base language of the language tag. If the base language is // unspecified, an attempt will be made to infer it from the context. // It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change. @@ -593,7 +525,7 @@ func (t Tag) Extension(x byte) (ext Extension, ok bool) { return Extension{ext}, true } } - return Extension{string(x)}, false + return Extension{}, false } // Extensions returns all extensions of t. diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/language/maketables.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/language/maketables.go deleted file mode 100644 index 2cc995b3..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/language/maketables.go +++ /dev/null @@ -1,1635 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Language tag table generator. -// Data read from the web. - -package main - -import ( - "bufio" - "flag" - "fmt" - "io" - "io/ioutil" - "log" - "math" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/tag" - "golang.org/x/text/unicode/cldr" -) - -var ( - test = flag.Bool("test", - false, - "test existing tables; can be used to compare web data with package data.") - outputFile = flag.String("output", - "tables.go", - "output file for generated tables") -) - -var comment = []string{ - ` -lang holds an alphabetically sorted list of ISO-639 language identifiers. -All entries are 4 bytes. The index of the identifier (divided by 4) is the language tag. -For 2-byte language identifiers, the two successive bytes have the following meaning: - - if the first letter of the 2- and 3-letter ISO codes are the same: - the second and third letter of the 3-letter ISO code. - - otherwise: a 0 and a by 2 bits right-shifted index into altLangISO3. -For 3-byte language identifiers the 4th byte is 0.`, - ` -langNoIndex is a bit vector of all 3-letter language codes that are not used as an index -in lookup tables. The language ids for these language codes are derived directly -from the letters and are not consecutive.`, - ` -altLangISO3 holds an alphabetically sorted list of 3-letter language code alternatives -to 2-letter language codes that cannot be derived using the method described above. -Each 3-letter code is followed by its 1-byte langID.`, - ` -altLangIndex is used to convert indexes in altLangISO3 to langIDs.`, - ` -langAliasMap maps langIDs to their suggested replacements.`, - ` -script is an alphabetically sorted list of ISO 15924 codes. The index -of the script in the string, divided by 4, is the internal scriptID.`, - ` -isoRegionOffset needs to be added to the index of regionISO to obtain the regionID -for 2-letter ISO codes. (The first isoRegionOffset regionIDs are reserved for -the UN.M49 codes used for groups.)`, - ` -regionISO holds a list of alphabetically sorted 2-letter ISO region codes. -Each 2-letter codes is followed by two bytes with the following meaning: - - [A-Z}{2}: the first letter of the 2-letter code plus these two - letters form the 3-letter ISO code. - - 0, n: index into altRegionISO3.`, - ` -regionTypes defines the status of a region for various standards.`, - ` -m49 maps regionIDs to UN.M49 codes. The first isoRegionOffset entries are -codes indicating collections of regions.`, - ` -m49Index gives indexes into fromM49 based on the three most significant bits -of a 10-bit UN.M49 code. To search an UN.M49 code in fromM49, search in - fromM49[m49Index[msb39(code)]:m49Index[msb3(code)+1]] -for an entry where the first 7 bits match the 7 lsb of the UN.M49 code. -The region code is stored in the 9 lsb of the indexed value.`, - ` -fromM49 contains entries to map UN.M49 codes to regions. See m49Index for details.`, - ` -altRegionISO3 holds a list of 3-letter region codes that cannot be -mapped to 2-letter codes using the default algorithm. This is a short list.`, - ` -altRegionIDs holds a list of regionIDs the positions of which match those -of the 3-letter ISO codes in altRegionISO3.`, - ` -variantNumSpecialized is the number of specialized variants in variants.`, - ` -suppressScript is an index from langID to the dominant script for that language, -if it exists. If a script is given, it should be suppressed from the language tag.`, - ` -likelyLang is a lookup table, indexed by langID, for the most likely -scripts and regions given incomplete information. If more entries exist for a -given language, region and script are the index and size respectively -of the list in likelyLangList.`, - ` -likelyLangList holds lists info associated with likelyLang.`, - ` -likelyRegion is a lookup table, indexed by regionID, for the most likely -languages and scripts given incomplete information. If more entries exist -for a given regionID, lang and script are the index and size respectively -of the list in likelyRegionList. -TODO: exclude containers and user-definable regions from the list.`, - ` -likelyRegionList holds lists info associated with likelyRegion.`, - ` -likelyScript is a lookup table, indexed by scriptID, for the most likely -languages and regions given a script.`, - ` -matchLang holds pairs of langIDs of base languages that are typically -mutually intelligible. Each pair is associated with a confidence and -whether the intelligibility goes one or both ways.`, - ` -matchScript holds pairs of scriptIDs where readers of one script -can typically also read the other. Each is associated with a confidence.`, - ` -nRegionGroups is the number of region groups.`, - ` -regionInclusion maps region identifiers to sets of regions in regionInclusionBits, -where each set holds all groupings that are directly connected in a region -containment graph.`, - ` -regionInclusionBits is an array of bit vectors where every vector represents -a set of region groupings. These sets are used to compute the distance -between two regions for the purpose of language matching.`, - ` -regionInclusionNext marks, for each entry in regionInclusionBits, the set of -all groups that are reachable from the groups set in the respective entry.`, -} - -// TODO: consider changing some of these structures to tries. This can reduce -// memory, but may increase the need for memory allocations. This could be -// mitigated if we can piggyback on language tags for common cases. - -func failOnError(e error) { - if e != nil { - log.Panic(e) - } -} - -type setType int - -const ( - Indexed setType = 1 + iota // all elements must be of same size - Linear -) - -type stringSet struct { - s []string - sorted, frozen bool - - // We often need to update values after the creation of an index is completed. - // We include a convenience map for keeping track of this. - update map[string]string - typ setType // used for checking. -} - -func (ss *stringSet) clone() stringSet { - c := *ss - c.s = append([]string(nil), c.s...) - return c -} - -func (ss *stringSet) setType(t setType) { - if ss.typ != t && ss.typ != 0 { - log.Panicf("type %d cannot be assigned as it was already %d", t, ss.typ) - } -} - -// parse parses a whitespace-separated string and initializes ss with its -// components. -func (ss *stringSet) parse(s string) { - scan := bufio.NewScanner(strings.NewReader(s)) - scan.Split(bufio.ScanWords) - for scan.Scan() { - ss.add(scan.Text()) - } -} - -func (ss *stringSet) assertChangeable() { - if ss.frozen { - log.Panic("attempt to modify a frozen stringSet") - } -} - -func (ss *stringSet) add(s string) { - ss.assertChangeable() - ss.s = append(ss.s, s) - ss.sorted = ss.frozen -} - -func (ss *stringSet) freeze() { - ss.compact() - ss.frozen = true -} - -func (ss *stringSet) compact() { - if ss.sorted { - return - } - a := ss.s - sort.Strings(a) - k := 0 - for i := 1; i < len(a); i++ { - if a[k] != a[i] { - a[k+1] = a[i] - k++ - } - } - ss.s = a[:k+1] - ss.sorted = ss.frozen -} - -type funcSorter struct { - fn func(a, b string) bool - sort.StringSlice -} - -func (s funcSorter) Less(i, j int) bool { - return s.fn(s.StringSlice[i], s.StringSlice[j]) -} - -func (ss *stringSet) sortFunc(f func(a, b string) bool) { - ss.compact() - sort.Sort(funcSorter{f, sort.StringSlice(ss.s)}) -} - -func (ss *stringSet) remove(s string) { - ss.assertChangeable() - if i, ok := ss.find(s); ok { - copy(ss.s[i:], ss.s[i+1:]) - ss.s = ss.s[:len(ss.s)-1] - } -} - -func (ss *stringSet) replace(ol, nu string) { - ss.s[ss.index(ol)] = nu - ss.sorted = ss.frozen -} - -func (ss *stringSet) index(s string) int { - ss.setType(Indexed) - i, ok := ss.find(s) - if !ok { - if i < len(ss.s) { - log.Panicf("find: item %q is not in list. Closest match is %q.", s, ss.s[i]) - } - log.Panicf("find: item %q is not in list", s) - - } - return i -} - -func (ss *stringSet) find(s string) (int, bool) { - ss.compact() - i := sort.SearchStrings(ss.s, s) - return i, i != len(ss.s) && ss.s[i] == s -} - -func (ss *stringSet) slice() []string { - ss.compact() - return ss.s -} - -func (ss *stringSet) updateLater(v, key string) { - if ss.update == nil { - ss.update = map[string]string{} - } - ss.update[v] = key -} - -// join joins the string and ensures that all entries are of the same length. -func (ss *stringSet) join() string { - ss.setType(Indexed) - n := len(ss.s[0]) - for _, s := range ss.s { - if len(s) != n { - log.Panicf("join: not all entries are of the same length: %q", s) - } - } - ss.s = append(ss.s, strings.Repeat("\xff", n)) - return strings.Join(ss.s, "") -} - -// ianaEntry holds information for an entry in the IANA Language Subtag Repository. -// All types use the same entry. -// See http://tools.ietf.org/html/bcp47#section-5.1 for a description of the various -// fields. -type ianaEntry struct { - typ string - description []string - scope string - added string - preferred string - deprecated string - suppressScript string - macro string - prefix []string -} - -type builder struct { - w *gen.CodeWriter - hw io.Writer // MultiWriter for w and w.Hash - data *cldr.CLDR - supp *cldr.SupplementalData - - // indices - locale stringSet // common locales - lang stringSet // canonical language ids (2 or 3 letter ISO codes) with data - langNoIndex stringSet // 3-letter ISO codes with no associated data - script stringSet // 4-letter ISO codes - region stringSet // 2-letter ISO or 3-digit UN M49 codes - variant stringSet // 4-8-alphanumeric variant code. - - // Region codes that are groups with their corresponding group IDs. - groups map[int]index - - // langInfo - registry map[string]*ianaEntry -} - -type index uint - -func newBuilder(w *gen.CodeWriter) *builder { - r := gen.OpenCLDRCoreZip() - defer r.Close() - d := &cldr.Decoder{} - data, err := d.DecodeZip(r) - failOnError(err) - b := builder{ - w: w, - hw: io.MultiWriter(w, w.Hash), - data: data, - supp: data.Supplemental(), - } - b.parseRegistry() - return &b -} - -func (b *builder) parseRegistry() { - r := gen.OpenIANAFile("assignments/language-subtag-registry") - defer r.Close() - b.registry = make(map[string]*ianaEntry) - - scan := bufio.NewScanner(r) - scan.Split(bufio.ScanWords) - var record *ianaEntry - for more := scan.Scan(); more; { - key := scan.Text() - more = scan.Scan() - value := scan.Text() - switch key { - case "Type:": - record = &ianaEntry{typ: value} - case "Subtag:", "Tag:": - if s := strings.SplitN(value, "..", 2); len(s) > 1 { - for a := s[0]; a <= s[1]; a = inc(a) { - b.addToRegistry(a, record) - } - } else { - b.addToRegistry(value, record) - } - case "Suppress-Script:": - record.suppressScript = value - case "Added:": - record.added = value - case "Deprecated:": - record.deprecated = value - case "Macrolanguage:": - record.macro = value - case "Preferred-Value:": - record.preferred = value - case "Prefix:": - record.prefix = append(record.prefix, value) - case "Scope:": - record.scope = value - case "Description:": - buf := []byte(value) - for more = scan.Scan(); more; more = scan.Scan() { - b := scan.Bytes() - if b[0] == '%' || b[len(b)-1] == ':' { - break - } - buf = append(buf, ' ') - buf = append(buf, b...) - } - record.description = append(record.description, string(buf)) - continue - default: - continue - } - more = scan.Scan() - } - if scan.Err() != nil { - log.Panic(scan.Err()) - } -} - -func (b *builder) addToRegistry(key string, entry *ianaEntry) { - if info, ok := b.registry[key]; ok { - if info.typ != "language" || entry.typ != "extlang" { - log.Fatalf("parseRegistry: tag %q already exists", key) - } - } else { - b.registry[key] = entry - } -} - -var commentIndex = make(map[string]string) - -func init() { - for _, s := range comment { - key := strings.TrimSpace(strings.SplitN(s, " ", 2)[0]) - commentIndex[key] = s - } -} - -func (b *builder) comment(name string) { - if s := commentIndex[name]; len(s) > 0 { - b.w.WriteComment(s) - } else { - fmt.Fprintln(b.w) - } -} - -func (b *builder) pf(f string, x ...interface{}) { - fmt.Fprintf(b.hw, f, x...) - fmt.Fprint(b.hw, "\n") -} - -func (b *builder) p(x ...interface{}) { - fmt.Fprintln(b.hw, x...) -} - -func (b *builder) addSize(s int) { - b.w.Size += s - b.pf("// Size: %d bytes", s) -} - -func (b *builder) writeConst(name string, x interface{}) { - b.comment(name) - b.w.WriteConst(name, x) -} - -// writeConsts computes f(v) for all v in values and writes the results -// as constants named _v to a single constant block. -func (b *builder) writeConsts(f func(string) int, values ...string) { - b.pf("const (") - for _, v := range values { - b.pf("\t_%s = %v", v, f(v)) - } - b.pf(")") -} - -// writeType writes the type of the given value, which must be a struct. -func (b *builder) writeType(value interface{}) { - b.comment(reflect.TypeOf(value).Name()) - b.w.WriteType(value) -} - -func (b *builder) writeSlice(name string, ss interface{}) { - b.writeSliceAddSize(name, 0, ss) -} - -func (b *builder) writeSliceAddSize(name string, extraSize int, ss interface{}) { - b.comment(name) - b.w.Size += extraSize - v := reflect.ValueOf(ss) - t := v.Type().Elem() - b.pf("// Size: %d bytes, %d elements", v.Len()*int(t.Size())+extraSize, v.Len()) - - fmt.Fprintf(b.w, "var %s = ", name) - b.w.WriteArray(ss) - b.p() -} - -type fromTo struct { - from, to uint16 -} - -func (b *builder) writeSortedMap(name string, ss *stringSet, index func(s string) uint16) { - ss.sortFunc(func(a, b string) bool { - return index(a) < index(b) - }) - m := []fromTo{} - for _, s := range ss.s { - m = append(m, fromTo{index(s), index(ss.update[s])}) - } - b.writeSlice(name, m) -} - -const base = 'z' - 'a' + 1 - -func strToInt(s string) uint { - v := uint(0) - for i := 0; i < len(s); i++ { - v *= base - v += uint(s[i] - 'a') - } - return v -} - -// converts the given integer to the original ASCII string passed to strToInt. -// len(s) must match the number of characters obtained. -func intToStr(v uint, s []byte) { - for i := len(s) - 1; i >= 0; i-- { - s[i] = byte(v%base) + 'a' - v /= base - } -} - -func (b *builder) writeBitVector(name string, ss []string) { - vec := make([]uint8, int(math.Ceil(math.Pow(base, float64(len(ss[0])))/8))) - for _, s := range ss { - v := strToInt(s) - vec[v/8] |= 1 << (v % 8) - } - b.writeSlice(name, vec) -} - -// TODO: convert this type into a list or two-stage trie. -func (b *builder) writeMapFunc(name string, m map[string]string, f func(string) uint16) { - b.comment(name) - v := reflect.ValueOf(m) - sz := v.Len() * (2 + int(v.Type().Key().Size())) - for _, k := range m { - sz += len(k) - } - b.addSize(sz) - keys := []string{} - b.pf(`var %s = map[string]uint16{`, name) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - b.pf("\t%q: %v,", k, f(m[k])) - } - b.p("}") -} - -func (b *builder) writeMap(name string, m interface{}) { - b.comment(name) - v := reflect.ValueOf(m) - sz := v.Len() * (2 + int(v.Type().Key().Size()) + int(v.Type().Elem().Size())) - b.addSize(sz) - f := strings.FieldsFunc(fmt.Sprintf("%#v", m), func(r rune) bool { - return strings.IndexRune("{}, ", r) != -1 - }) - sort.Strings(f[1:]) - b.pf(`var %s = %s{`, name, f[0]) - for _, kv := range f[1:] { - b.pf("\t%s,", kv) - } - b.p("}") -} - -func (b *builder) langIndex(s string) uint16 { - if s == "und" { - return 0 - } - if i, ok := b.lang.find(s); ok { - return uint16(i) - } - return uint16(strToInt(s)) + uint16(len(b.lang.s)) -} - -// inc advances the string to its lexicographical successor. -func inc(s string) string { - const maxTagLength = 4 - var buf [maxTagLength]byte - intToStr(strToInt(strings.ToLower(s))+1, buf[:len(s)]) - for i := 0; i < len(s); i++ { - if s[i] <= 'Z' { - buf[i] -= 'a' - 'A' - } - } - return string(buf[:len(s)]) -} - -func (b *builder) parseIndices() { - meta := b.supp.Metadata - - for k, v := range b.registry { - var ss *stringSet - switch v.typ { - case "language": - if len(k) == 2 || v.suppressScript != "" || v.scope == "special" { - b.lang.add(k) - continue - } else { - ss = &b.langNoIndex - } - case "region": - ss = &b.region - case "script": - ss = &b.script - case "variant": - ss = &b.variant - default: - continue - } - ss.add(k) - } - // Include any language for which there is data. - for _, lang := range b.data.Locales() { - if x := b.data.RawLDML(lang); false || - x.LocaleDisplayNames != nil || - x.Characters != nil || - x.Delimiters != nil || - x.Measurement != nil || - x.Dates != nil || - x.Numbers != nil || - x.Units != nil || - x.ListPatterns != nil || - x.Collations != nil || - x.Segmentations != nil || - x.Rbnf != nil || - x.Annotations != nil || - x.Metadata != nil { - - from := strings.Split(lang, "_") - if lang := from[0]; lang != "root" { - b.lang.add(lang) - } - } - } - // Include locales for plural rules, which uses a different structure. - for _, plurals := range b.data.Supplemental().Plurals { - for _, rules := range plurals.PluralRules { - for _, lang := range strings.Split(rules.Locales, " ") { - if lang = strings.Split(lang, "_")[0]; lang != "root" { - b.lang.add(lang) - } - } - } - } - // Include languages in likely subtags. - for _, m := range b.supp.LikelySubtags.LikelySubtag { - from := strings.Split(m.From, "_") - b.lang.add(from[0]) - } - // Include ISO-639 alpha-3 bibliographic entries. - for _, a := range meta.Alias.LanguageAlias { - if a.Reason == "bibliographic" { - b.langNoIndex.add(a.Type) - } - } - // Include regions in territoryAlias (not all are in the IANA registry!) - for _, reg := range b.supp.Metadata.Alias.TerritoryAlias { - if len(reg.Type) == 2 { - b.region.add(reg.Type) - } - } - - for _, s := range b.lang.s { - if len(s) == 3 { - b.langNoIndex.remove(s) - } - } - b.writeConst("numLanguages", len(b.lang.slice())+len(b.langNoIndex.slice())) - b.writeConst("numScripts", len(b.script.slice())) - b.writeConst("numRegions", len(b.region.slice())) - - // Add dummy codes at the start of each list to represent "unspecified". - b.lang.add("---") - b.script.add("----") - b.region.add("---") - - // common locales - b.locale.parse(meta.DefaultContent.Locales) -} - -func (b *builder) computeRegionGroups() { - b.groups = make(map[int]index) - - // Create group indices. - for i := 1; b.region.s[i][0] < 'A'; i++ { // Base M49 indices on regionID. - b.groups[i] = index(len(b.groups)) - } - for _, g := range b.supp.TerritoryContainment.Group { - group := b.region.index(g.Type) - if _, ok := b.groups[group]; !ok { - b.groups[group] = index(len(b.groups)) - } - } - if len(b.groups) > 32 { - log.Fatalf("only 32 groups supported, found %d", len(b.groups)) - } - b.writeConst("nRegionGroups", len(b.groups)) -} - -var langConsts = []string{ - "af", "am", "ar", "az", "bg", "bn", "ca", "cs", "da", "de", "el", "en", "es", - "et", "fa", "fi", "fil", "fr", "gu", "he", "hi", "hr", "hu", "hy", "id", "is", - "it", "ja", "ka", "kk", "km", "kn", "ko", "ky", "lo", "lt", "lv", "mk", "ml", - "mn", "mo", "mr", "ms", "mul", "my", "nb", "ne", "nl", "no", "pa", "pl", "pt", - "ro", "ru", "sh", "si", "sk", "sl", "sq", "sr", "sv", "sw", "ta", "te", "th", - "tl", "tn", "tr", "uk", "ur", "uz", "vi", "zh", "zu", - - // constants for grandfathered tags (if not already defined) - "jbo", "ami", "bnn", "hak", "tlh", "lb", "nv", "pwn", "tao", "tay", "tsu", - "nn", "sfb", "vgt", "sgg", "cmn", "nan", "hsn", -} - -// writeLanguage generates all tables needed for language canonicalization. -func (b *builder) writeLanguage() { - meta := b.supp.Metadata - - b.writeConst("nonCanonicalUnd", b.lang.index("und")) - b.writeConsts(func(s string) int { return int(b.langIndex(s)) }, langConsts...) - b.writeConst("langPrivateStart", b.langIndex("qaa")) - b.writeConst("langPrivateEnd", b.langIndex("qtz")) - - // Get language codes that need to be mapped (overlong 3-letter codes, - // deprecated 2-letter codes, legacy and grandfathered tags.) - langAliasMap := stringSet{} - aliasTypeMap := map[string]langAliasType{} - - // altLangISO3 get the alternative ISO3 names that need to be mapped. - altLangISO3 := stringSet{} - // Add dummy start to avoid the use of index 0. - altLangISO3.add("---") - altLangISO3.updateLater("---", "aa") - - lang := b.lang.clone() - for _, a := range meta.Alias.LanguageAlias { - if a.Replacement == "" { - a.Replacement = "und" - } - // TODO: support mapping to tags - repl := strings.SplitN(a.Replacement, "_", 2)[0] - if a.Reason == "overlong" { - if len(a.Replacement) == 2 && len(a.Type) == 3 { - lang.updateLater(a.Replacement, a.Type) - } - } else if len(a.Type) <= 3 { - switch a.Reason { - case "macrolanguage": - aliasTypeMap[a.Type] = langMacro - case "deprecated": - // handled elsewhere - continue - case "bibliographic", "legacy": - if a.Type == "no" { - continue - } - aliasTypeMap[a.Type] = langLegacy - default: - log.Fatalf("new %s alias: %s", a.Reason, a.Type) - } - langAliasMap.add(a.Type) - langAliasMap.updateLater(a.Type, repl) - } - } - // Manually add the mapping of "nb" (Norwegian) to its macro language. - // This can be removed if CLDR adopts this change. - langAliasMap.add("nb") - langAliasMap.updateLater("nb", "no") - aliasTypeMap["nb"] = langMacro - - for k, v := range b.registry { - // Also add deprecated values for 3-letter ISO codes, which CLDR omits. - if v.typ == "language" && v.deprecated != "" && v.preferred != "" { - langAliasMap.add(k) - langAliasMap.updateLater(k, v.preferred) - aliasTypeMap[k] = langDeprecated - } - } - // Fix CLDR mappings. - lang.updateLater("tl", "tgl") - lang.updateLater("sh", "hbs") - lang.updateLater("mo", "mol") - lang.updateLater("no", "nor") - lang.updateLater("tw", "twi") - lang.updateLater("nb", "nob") - lang.updateLater("ak", "aka") - - // Ensure that each 2-letter code is matched with a 3-letter code. - for _, v := range lang.s[1:] { - s, ok := lang.update[v] - if !ok { - if s, ok = lang.update[langAliasMap.update[v]]; !ok { - continue - } - lang.update[v] = s - } - if v[0] != s[0] { - altLangISO3.add(s) - altLangISO3.updateLater(s, v) - } - } - - // Complete canonialized language tags. - lang.freeze() - for i, v := range lang.s { - // We can avoid these manual entries by using the IANI registry directly. - // Seems easier to update the list manually, as changes are rare. - // The panic in this loop will trigger if we miss an entry. - add := "" - if s, ok := lang.update[v]; ok { - if s[0] == v[0] { - add = s[1:] - } else { - add = string([]byte{0, byte(altLangISO3.index(s))}) - } - } else if len(v) == 3 { - add = "\x00" - } else { - log.Panicf("no data for long form of %q", v) - } - lang.s[i] += add - } - b.writeConst("lang", tag.Index(lang.join())) - - b.writeConst("langNoIndexOffset", len(b.lang.s)) - - // space of all valid 3-letter language identifiers. - b.writeBitVector("langNoIndex", b.langNoIndex.slice()) - - altLangIndex := []uint16{} - for i, s := range altLangISO3.slice() { - altLangISO3.s[i] += string([]byte{byte(len(altLangIndex))}) - if i > 0 { - idx := b.lang.index(altLangISO3.update[s]) - altLangIndex = append(altLangIndex, uint16(idx)) - } - } - b.writeConst("altLangISO3", tag.Index(altLangISO3.join())) - b.writeSlice("altLangIndex", altLangIndex) - - b.writeSortedMap("langAliasMap", &langAliasMap, b.langIndex) - types := make([]langAliasType, len(langAliasMap.s)) - for i, s := range langAliasMap.s { - types[i] = aliasTypeMap[s] - } - b.writeSlice("langAliasTypes", types) -} - -var scriptConsts = []string{ - "Latn", "Hani", "Hans", "Hant", "Qaaa", "Qaai", "Qabx", "Zinh", "Zyyy", - "Zzzz", -} - -func (b *builder) writeScript() { - b.writeConsts(b.script.index, scriptConsts...) - b.writeConst("script", tag.Index(b.script.join())) - - supp := make([]uint8, len(b.lang.slice())) - for i, v := range b.lang.slice()[1:] { - if sc := b.registry[v].suppressScript; sc != "" { - supp[i+1] = uint8(b.script.index(sc)) - } - } - b.writeSlice("suppressScript", supp) - - // There is only one deprecated script in CLDR. This value is hard-coded. - // We check here if the code must be updated. - for _, a := range b.supp.Metadata.Alias.ScriptAlias { - if a.Type != "Qaai" { - log.Panicf("unexpected deprecated stript %q", a.Type) - } - } -} - -func parseM49(s string) int16 { - if len(s) == 0 { - return 0 - } - v, err := strconv.ParseUint(s, 10, 10) - failOnError(err) - return int16(v) -} - -var regionConsts = []string{ - "001", "419", "BR", "CA", "ES", "GB", "MD", "PT", "UK", "US", - "ZZ", "XA", "XC", "XK", // Unofficial tag for Kosovo. -} - -func (b *builder) writeRegion() { - b.writeConsts(b.region.index, regionConsts...) - - isoOffset := b.region.index("AA") - m49map := make([]int16, len(b.region.slice())) - fromM49map := make(map[int16]int) - altRegionISO3 := "" - altRegionIDs := []uint16{} - - b.writeConst("isoRegionOffset", isoOffset) - - // 2-letter region lookup and mapping to numeric codes. - regionISO := b.region.clone() - regionISO.s = regionISO.s[isoOffset:] - regionISO.sorted = false - - regionTypes := make([]byte, len(b.region.s)) - - // Is the region valid BCP 47? - for s, e := range b.registry { - if len(s) == 2 && s == strings.ToUpper(s) { - i := b.region.index(s) - for _, d := range e.description { - if strings.Contains(d, "Private use") { - regionTypes[i] = iso3166UserAssgined - } - } - regionTypes[i] |= bcp47Region - } - } - - // Is the region a valid ccTLD? - r := gen.OpenIANAFile("domains/root/db") - defer r.Close() - - buf, err := ioutil.ReadAll(r) - failOnError(err) - re := regexp.MustCompile(`"/domains/root/db/([a-z]{2}).html"`) - for _, m := range re.FindAllSubmatch(buf, -1) { - i := b.region.index(strings.ToUpper(string(m[1]))) - regionTypes[i] |= ccTLD - } - - b.writeSlice("regionTypes", regionTypes) - - iso3Set := make(map[string]int) - update := func(iso2, iso3 string) { - i := regionISO.index(iso2) - if j, ok := iso3Set[iso3]; !ok && iso3[0] == iso2[0] { - regionISO.s[i] += iso3[1:] - iso3Set[iso3] = -1 - } else { - if ok && j >= 0 { - regionISO.s[i] += string([]byte{0, byte(j)}) - } else { - iso3Set[iso3] = len(altRegionISO3) - regionISO.s[i] += string([]byte{0, byte(len(altRegionISO3))}) - altRegionISO3 += iso3 - altRegionIDs = append(altRegionIDs, uint16(isoOffset+i)) - } - } - } - for _, tc := range b.supp.CodeMappings.TerritoryCodes { - i := regionISO.index(tc.Type) + isoOffset - if d := m49map[i]; d != 0 { - log.Panicf("%s found as a duplicate UN.M49 code of %03d", tc.Numeric, d) - } - m49 := parseM49(tc.Numeric) - m49map[i] = m49 - if r := fromM49map[m49]; r == 0 { - fromM49map[m49] = i - } else if r != i { - dep := b.registry[regionISO.s[r-isoOffset]].deprecated - if t := b.registry[tc.Type]; t != nil && dep != "" && (t.deprecated == "" || t.deprecated > dep) { - fromM49map[m49] = i - } - } - } - for _, ta := range b.supp.Metadata.Alias.TerritoryAlias { - if len(ta.Type) == 3 && ta.Type[0] <= '9' && len(ta.Replacement) == 2 { - from := parseM49(ta.Type) - if r := fromM49map[from]; r == 0 { - fromM49map[from] = regionISO.index(ta.Replacement) + isoOffset - } - } - } - for _, tc := range b.supp.CodeMappings.TerritoryCodes { - if len(tc.Alpha3) == 3 { - update(tc.Type, tc.Alpha3) - } - } - // This entries are not included in territoryCodes. Mostly 3-letter variants - // of deleted codes and an entry for QU. - for _, m := range []struct{ iso2, iso3 string }{ - {"CT", "CTE"}, - {"DY", "DHY"}, - {"HV", "HVO"}, - {"JT", "JTN"}, - {"MI", "MID"}, - {"NH", "NHB"}, - {"NQ", "ATN"}, - {"PC", "PCI"}, - {"PU", "PUS"}, - {"PZ", "PCZ"}, - {"RH", "RHO"}, - {"VD", "VDR"}, - {"WK", "WAK"}, - // These three-letter codes are used for others as well. - {"FQ", "ATF"}, - } { - update(m.iso2, m.iso3) - } - for i, s := range regionISO.s { - if len(s) != 4 { - regionISO.s[i] = s + " " - } - } - b.writeConst("regionISO", tag.Index(regionISO.join())) - b.writeConst("altRegionISO3", altRegionISO3) - b.writeSlice("altRegionIDs", altRegionIDs) - - // Create list of deprecated regions. - // TODO: consider inserting SF -> FI. Not included by CLDR, but is the only - // Transitionally-reserved mapping not included. - regionOldMap := stringSet{} - // Include regions in territoryAlias (not all are in the IANA registry!) - for _, reg := range b.supp.Metadata.Alias.TerritoryAlias { - if len(reg.Type) == 2 && reg.Reason == "deprecated" && len(reg.Replacement) == 2 { - regionOldMap.add(reg.Type) - regionOldMap.updateLater(reg.Type, reg.Replacement) - i, _ := regionISO.find(reg.Type) - j, _ := regionISO.find(reg.Replacement) - if k := m49map[i+isoOffset]; k == 0 { - m49map[i+isoOffset] = m49map[j+isoOffset] - } - } - } - b.writeSortedMap("regionOldMap", ®ionOldMap, func(s string) uint16 { - return uint16(b.region.index(s)) - }) - // 3-digit region lookup, groupings. - for i := 1; i < isoOffset; i++ { - m := parseM49(b.region.s[i]) - m49map[i] = m - fromM49map[m] = i - } - b.writeSlice("m49", m49map) - - const ( - searchBits = 7 - regionBits = 9 - ) - if len(m49map) >= 1< %d", len(m49map), 1<>searchBits] = int16(len(fromM49)) - } - b.writeSlice("m49Index", m49Index) - b.writeSlice("fromM49", fromM49) -} - -const ( - // TODO: put these lists in regionTypes as user data? Could be used for - // various optimizations and refinements and could be exposed in the API. - iso3166Except = "AC CP DG EA EU FX IC SU TA UK" - iso3166Trans = "AN BU CS NT TP YU ZR" // SF is not in our set of Regions. - // DY and RH are actually not deleted, but indeterminately reserved. - iso3166DelCLDR = "CT DD DY FQ HV JT MI NH NQ PC PU PZ RH VD WK YD" -) - -const ( - iso3166UserAssgined = 1 << iota - ccTLD - bcp47Region -) - -func find(list []string, s string) int { - for i, t := range list { - if t == s { - return i - } - } - return -1 -} - -// writeVariants generates per-variant information and creates a map from variant -// name to index value. We assign index values such that sorting multiple -// variants by index value will result in the correct order. -// There are two types of variants: specialized and general. Specialized variants -// are only applicable to certain language or language-script pairs. Generalized -// variants apply to any language. Generalized variants always sort after -// specialized variants. We will therefore always assign a higher index value -// to a generalized variant than any other variant. Generalized variants are -// sorted alphabetically among themselves. -// Specialized variants may also sort after other specialized variants. Such -// variants will be ordered after any of the variants they may follow. -// We assume that if a variant x is followed by a variant y, then for any prefix -// p of x, p-x is a prefix of y. This allows us to order tags based on the -// maximum of the length of any of its prefixes. -// TODO: it is possible to define a set of Prefix values on variants such that -// a total order cannot be defined to the point that this algorithm breaks. -// In other words, we cannot guarantee the same order of variants for the -// future using the same algorithm or for non-compliant combinations of -// variants. For this reason, consider using simple alphabetic sorting -// of variants and ignore Prefix restrictions altogether. -func (b *builder) writeVariant() { - generalized := stringSet{} - specialized := stringSet{} - specializedExtend := stringSet{} - // Collate the variants by type and check assumptions. - for _, v := range b.variant.slice() { - e := b.registry[v] - if len(e.prefix) == 0 { - generalized.add(v) - continue - } - c := strings.Split(e.prefix[0], "-") - hasScriptOrRegion := false - if len(c) > 1 { - _, hasScriptOrRegion = b.script.find(c[1]) - if !hasScriptOrRegion { - _, hasScriptOrRegion = b.region.find(c[1]) - - } - } - if len(c) == 1 || len(c) == 2 && hasScriptOrRegion { - // Variant is preceded by a language. - specialized.add(v) - continue - } - // Variant is preceded by another variant. - specializedExtend.add(v) - prefix := c[0] + "-" - if hasScriptOrRegion { - prefix += c[1] - } - for _, p := range e.prefix { - // Verify that the prefix minus the last element is a prefix of the - // predecessor element. - i := strings.LastIndex(p, "-") - pred := b.registry[p[i+1:]] - if find(pred.prefix, p[:i]) < 0 { - log.Fatalf("prefix %q for variant %q not consistent with predecessor spec", p, v) - } - // The sorting used below does not work in the general case. It works - // if we assume that variants that may be followed by others only have - // prefixes of the same length. Verify this. - count := strings.Count(p[:i], "-") - for _, q := range pred.prefix { - if c := strings.Count(q, "-"); c != count { - log.Fatalf("variant %q preceding %q has a prefix %q of size %d; want %d", p[i+1:], v, q, c, count) - } - } - if !strings.HasPrefix(p, prefix) { - log.Fatalf("prefix %q of variant %q should start with %q", p, v, prefix) - } - } - } - - // Sort extended variants. - a := specializedExtend.s - less := func(v, w string) bool { - // Sort by the maximum number of elements. - maxCount := func(s string) (max int) { - for _, p := range b.registry[s].prefix { - if c := strings.Count(p, "-"); c > max { - max = c - } - } - return - } - if cv, cw := maxCount(v), maxCount(w); cv != cw { - return cv < cw - } - // Sort by name as tie breaker. - return v < w - } - sort.Sort(funcSorter{less, sort.StringSlice(a)}) - specializedExtend.frozen = true - - // Create index from variant name to index. - variantIndex := make(map[string]uint8) - add := func(s []string) { - for _, v := range s { - variantIndex[v] = uint8(len(variantIndex)) - } - } - add(specialized.slice()) - add(specializedExtend.s) - numSpecialized := len(variantIndex) - add(generalized.slice()) - if n := len(variantIndex); n > 255 { - log.Fatalf("maximum number of variants exceeded: was %d; want <= 255", n) - } - b.writeMap("variantIndex", variantIndex) - b.writeConst("variantNumSpecialized", numSpecialized) -} - -func (b *builder) writeLanguageInfo() { -} - -// writeLikelyData writes tables that are used both for finding parent relations and for -// language matching. Each entry contains additional bits to indicate the status of the -// data to know when it cannot be used for parent relations. -func (b *builder) writeLikelyData() { - const ( - isList = 1 << iota - scriptInFrom - regionInFrom - ) - type ( // generated types - likelyScriptRegion struct { - region uint16 - script uint8 - flags uint8 - } - likelyLangScript struct { - lang uint16 - script uint8 - flags uint8 - } - likelyLangRegion struct { - lang uint16 - region uint16 - } - // likelyTag is used for getting likely tags for group regions, where - // the likely region might be a region contained in the group. - likelyTag struct { - lang uint16 - region uint16 - script uint8 - } - ) - var ( // generated variables - likelyRegionGroup = make([]likelyTag, len(b.groups)) - likelyLang = make([]likelyScriptRegion, len(b.lang.s)) - likelyRegion = make([]likelyLangScript, len(b.region.s)) - likelyScript = make([]likelyLangRegion, len(b.script.s)) - likelyLangList = []likelyScriptRegion{} - likelyRegionList = []likelyLangScript{} - ) - type fromTo struct { - from, to []string - } - langToOther := map[int][]fromTo{} - regionToOther := map[int][]fromTo{} - for _, m := range b.supp.LikelySubtags.LikelySubtag { - from := strings.Split(m.From, "_") - to := strings.Split(m.To, "_") - if len(to) != 3 { - log.Fatalf("invalid number of subtags in %q: found %d, want 3", m.To, len(to)) - } - if len(from) > 3 { - log.Fatalf("invalid number of subtags: found %d, want 1-3", len(from)) - } - if from[0] != to[0] && from[0] != "und" { - log.Fatalf("unexpected language change in expansion: %s -> %s", from, to) - } - if len(from) == 3 { - if from[2] != to[2] { - log.Fatalf("unexpected region change in expansion: %s -> %s", from, to) - } - if from[0] != "und" { - log.Fatalf("unexpected fully specified from tag: %s -> %s", from, to) - } - } - if len(from) == 1 || from[0] != "und" { - id := 0 - if from[0] != "und" { - id = b.lang.index(from[0]) - } - langToOther[id] = append(langToOther[id], fromTo{from, to}) - } else if len(from) == 2 && len(from[1]) == 4 { - sid := b.script.index(from[1]) - likelyScript[sid].lang = uint16(b.langIndex(to[0])) - likelyScript[sid].region = uint16(b.region.index(to[2])) - } else { - r := b.region.index(from[len(from)-1]) - if id, ok := b.groups[r]; ok { - if from[0] != "und" { - log.Fatalf("region changed unexpectedly: %s -> %s", from, to) - } - likelyRegionGroup[id].lang = uint16(b.langIndex(to[0])) - likelyRegionGroup[id].script = uint8(b.script.index(to[1])) - likelyRegionGroup[id].region = uint16(b.region.index(to[2])) - } else { - regionToOther[r] = append(regionToOther[r], fromTo{from, to}) - } - } - } - b.writeType(likelyLangRegion{}) - b.writeSlice("likelyScript", likelyScript) - - for id := range b.lang.s { - list := langToOther[id] - if len(list) == 1 { - likelyLang[id].region = uint16(b.region.index(list[0].to[2])) - likelyLang[id].script = uint8(b.script.index(list[0].to[1])) - } else if len(list) > 1 { - likelyLang[id].flags = isList - likelyLang[id].region = uint16(len(likelyLangList)) - likelyLang[id].script = uint8(len(list)) - for _, x := range list { - flags := uint8(0) - if len(x.from) > 1 { - if x.from[1] == x.to[2] { - flags = regionInFrom - } else { - flags = scriptInFrom - } - } - likelyLangList = append(likelyLangList, likelyScriptRegion{ - region: uint16(b.region.index(x.to[2])), - script: uint8(b.script.index(x.to[1])), - flags: flags, - }) - } - } - } - // TODO: merge suppressScript data with this table. - b.writeType(likelyScriptRegion{}) - b.writeSlice("likelyLang", likelyLang) - b.writeSlice("likelyLangList", likelyLangList) - - for id := range b.region.s { - list := regionToOther[id] - if len(list) == 1 { - likelyRegion[id].lang = uint16(b.langIndex(list[0].to[0])) - likelyRegion[id].script = uint8(b.script.index(list[0].to[1])) - if len(list[0].from) > 2 { - likelyRegion[id].flags = scriptInFrom - } - } else if len(list) > 1 { - likelyRegion[id].flags = isList - likelyRegion[id].lang = uint16(len(likelyRegionList)) - likelyRegion[id].script = uint8(len(list)) - for i, x := range list { - if len(x.from) == 2 && i != 0 || i > 0 && len(x.from) != 3 { - log.Fatalf("unspecified script must be first in list: %v at %d", x.from, i) - } - x := likelyLangScript{ - lang: uint16(b.langIndex(x.to[0])), - script: uint8(b.script.index(x.to[1])), - } - if len(list[0].from) > 2 { - x.flags = scriptInFrom - } - likelyRegionList = append(likelyRegionList, x) - } - } - } - b.writeType(likelyLangScript{}) - b.writeSlice("likelyRegion", likelyRegion) - b.writeSlice("likelyRegionList", likelyRegionList) - - b.writeType(likelyTag{}) - b.writeSlice("likelyRegionGroup", likelyRegionGroup) -} - -type mutualIntelligibility struct { - want, have uint16 - conf uint8 - oneway bool -} - -type scriptIntelligibility struct { - lang uint16 // langID or 0 if * - want, have uint8 - conf uint8 -} - -type sortByConf []mutualIntelligibility - -func (l sortByConf) Less(a, b int) bool { - return l[a].conf > l[b].conf -} - -func (l sortByConf) Swap(a, b int) { - l[a], l[b] = l[b], l[a] -} - -func (l sortByConf) Len() int { - return len(l) -} - -// toConf converts a percentage value [0, 100] to a confidence class. -func toConf(pct uint8) uint8 { - switch { - case pct == 100: - return 3 // Exact - case pct >= 90: - return 2 // High - case pct > 50: - return 1 // Low - default: - return 0 // No - } -} - -// writeMatchData writes tables with languages and scripts for which there is -// mutual intelligibility. The data is based on CLDR's languageMatching data. -// Note that we use a different algorithm than the one defined by CLDR and that -// we slightly modify the data. For example, we convert scores to confidence levels. -// We also drop all region-related data as we use a different algorithm to -// determine region equivalence. -func (b *builder) writeMatchData() { - b.writeType(mutualIntelligibility{}) - b.writeType(scriptIntelligibility{}) - lm := b.supp.LanguageMatching.LanguageMatches - cldr.MakeSlice(&lm).SelectAnyOf("type", "written") - - matchLang := []mutualIntelligibility{} - matchScript := []scriptIntelligibility{} - // Convert the languageMatch entries in lists keyed by desired language. - for _, m := range lm[0].LanguageMatch { - // Different versions of CLDR use different separators. - desired := strings.Replace(m.Desired, "-", "_", -1) - supported := strings.Replace(m.Supported, "-", "_", -1) - d := strings.Split(desired, "_") - s := strings.Split(supported, "_") - if len(d) != len(s) || len(d) > 2 { - // Skip all entries with regions and work around CLDR bug. - continue - } - pct, _ := strconv.ParseInt(m.Percent, 10, 8) - if len(d) == 2 && d[0] == s[0] && len(d[1]) == 4 { - // language-script pair. - lang := uint16(0) - if d[0] != "*" { - lang = uint16(b.langIndex(d[0])) - } - matchScript = append(matchScript, scriptIntelligibility{ - lang: lang, - want: uint8(b.script.index(d[1])), - have: uint8(b.script.index(s[1])), - conf: toConf(uint8(pct)), - }) - if m.Oneway != "true" { - matchScript = append(matchScript, scriptIntelligibility{ - lang: lang, - want: uint8(b.script.index(s[1])), - have: uint8(b.script.index(d[1])), - conf: toConf(uint8(pct)), - }) - } - } else if len(d) == 1 && d[0] != "*" { - if pct == 100 { - // nb == no is already handled by macro mapping. Check there - // really is only this case. - if d[0] != "no" || s[0] != "nb" { - log.Fatalf("unhandled equivalence %s == %s", s[0], d[0]) - } - continue - } - matchLang = append(matchLang, mutualIntelligibility{ - want: uint16(b.langIndex(d[0])), - have: uint16(b.langIndex(s[0])), - conf: uint8(pct), - oneway: m.Oneway == "true", - }) - } else { - // TODO: Handle other mappings. - a := []string{"*;*", "*_*;*_*", "es_MX;es_419"} - s := strings.Join([]string{desired, supported}, ";") - if i := sort.SearchStrings(a, s); i == len(a) || a[i] != s { - log.Printf("%q not handled", s) - } - } - } - sort.Stable(sortByConf(matchLang)) - // collapse percentage into confidence classes - for i, m := range matchLang { - matchLang[i].conf = toConf(m.conf) - } - b.writeSlice("matchLang", matchLang) - b.writeSlice("matchScript", matchScript) -} - -func (b *builder) writeRegionInclusionData() { - var ( - // mm holds for each group the set of groups with a distance of 1. - mm = make(map[int][]index) - - // containment holds for each group the transitive closure of - // containment of other groups. - containment = make(map[index][]index) - ) - for _, g := range b.supp.TerritoryContainment.Group { - group := b.region.index(g.Type) - groupIdx := b.groups[group] - for _, mem := range strings.Split(g.Contains, " ") { - r := b.region.index(mem) - mm[r] = append(mm[r], groupIdx) - if g, ok := b.groups[r]; ok { - mm[group] = append(mm[group], g) - containment[groupIdx] = append(containment[groupIdx], g) - } - } - } - - regionContainment := make([]uint32, len(b.groups)) - for _, g := range b.groups { - l := containment[g] - - // Compute the transitive closure of containment. - for i := 0; i < len(l); i++ { - l = append(l, containment[l[i]]...) - } - - // Compute the bitmask. - regionContainment[g] = 1 << g - for _, v := range l { - regionContainment[g] |= 1 << v - } - // log.Printf("%d: %X", g, regionContainment[g]) - } - b.writeSlice("regionContainment", regionContainment) - - regionInclusion := make([]uint8, len(b.region.s)) - bvs := make(map[uint32]index) - // Make the first bitvector positions correspond with the groups. - for r, i := range b.groups { - bv := uint32(1 << i) - for _, g := range mm[r] { - bv |= 1 << g - } - bvs[bv] = i - regionInclusion[r] = uint8(bvs[bv]) - } - for r := 1; r < len(b.region.s); r++ { - if _, ok := b.groups[r]; !ok { - bv := uint32(0) - for _, g := range mm[r] { - bv |= 1 << g - } - if bv == 0 { - // Pick the world for unspecified regions. - bv = 1 << b.groups[b.region.index("001")] - } - if _, ok := bvs[bv]; !ok { - bvs[bv] = index(len(bvs)) - } - regionInclusion[r] = uint8(bvs[bv]) - } - } - b.writeSlice("regionInclusion", regionInclusion) - regionInclusionBits := make([]uint32, len(bvs)) - for k, v := range bvs { - regionInclusionBits[v] = uint32(k) - } - // Add bit vectors for increasingly large distances until a fixed point is reached. - regionInclusionNext := []uint8{} - for i := 0; i < len(regionInclusionBits); i++ { - bits := regionInclusionBits[i] - next := bits - for i := uint(0); i < uint(len(b.groups)); i++ { - if bits&(1< 0 { + t, _ = Raw.Compose(t, e) } return t, index, c } @@ -91,7 +144,7 @@ var ErrMissingLikelyTagsData = errors.New("missing likely tags data") // addLikelySubtags sets subtags to their most likely value, given the locale. // In most cases this means setting fields for unknown values, but in some -// cases it may alter a value. It returns a ErrMissingLikelyTagsData error +// cases it may alter a value. It returns an ErrMissingLikelyTagsData error // if the given locale cannot be expanded. func (t Tag) addLikelySubtags() (Tag, error) { id, err := addTags(t) @@ -300,8 +353,9 @@ func minimizeTags(t Tag) (Tag, error) { // 1) compute the match between the two tags. // 2) if the match is better than the previous best match, replace it // with the new match. (see next section) -// b) if the current best match is above a certain threshold, return this -// match without proceeding to the next tag in "desired". [See Note 1] +// b) if the current best match is Exact and pin is true the result will be +// frozen to the language found thusfar, although better matches may +// still be found for the same language. // 3) If the best match so far is below a certain threshold, return "default". // // Ranking: @@ -350,9 +404,6 @@ func minimizeTags(t Tag) (Tag, error) { // found wins. // // Notes: -// [1] Note that even if we may not have a perfect match, if a match is above a -// certain threshold, it is considered a better match than any other match -// to a tag later in the list of preferred language tags. // [2] In practice, as matching of Exact is done in a separate phase from // matching the other levels, we reuse the Exact level to mean MaxExact in // the second phase. As a consequence, we only need the levels defined by @@ -388,16 +439,18 @@ func minimizeTags(t Tag) (Tag, error) { // matcher keeps a set of supported language tags, indexed by language. type matcher struct { - default_ *haveTag - index map[langID]*matchHeader - passSettings bool + default_ *haveTag + supported []*haveTag + index map[langID]*matchHeader + passSettings bool + preferSameScript bool } // matchHeader has the lists of tags for exact matches and matches based on // maximized and canonicalized tags for a given language. type matchHeader struct { - exact []haveTag - max []haveTag + haveTags []*haveTag + original bool } // haveTag holds a supported Tag and its maximized script and region. The maximized @@ -427,7 +480,7 @@ type haveTag struct { func makeHaveTag(tag Tag, index int) (haveTag, langID) { max := tag - if tag.lang != 0 { + if tag.lang != 0 || tag.region != 0 || tag.script != 0 { max, _ = max.canonicalize(All) max, _ = addTags(max) max.remakeString() @@ -440,8 +493,10 @@ func makeHaveTag(tag Tag, index int) (haveTag, langID) { // script to map to another and we rely on this to keep the code simple. func altScript(l langID, s scriptID) scriptID { for _, alt := range matchScript { - if (alt.lang == 0 || langID(alt.lang) == l) && scriptID(alt.have) == s { - return scriptID(alt.want) + // TODO: also match cases where language is not the same. + if (langID(alt.wantLang) == l || langID(alt.haveLang) == l) && + scriptID(alt.haveScript) == s { + return scriptID(alt.wantScript) } } return 0 @@ -450,29 +505,27 @@ func altScript(l langID, s scriptID) scriptID { // addIfNew adds a haveTag to the list of tags only if it is a unique tag. // Tags that have the same maximized values are linked by index. func (h *matchHeader) addIfNew(n haveTag, exact bool) { + h.original = h.original || exact // Don't add new exact matches. - for _, v := range h.exact { + for _, v := range h.haveTags { if v.tag.equalsRest(n.tag) { return } } - if exact { - h.exact = append(h.exact, n) - } // Allow duplicate maximized tags, but create a linked list to allow quickly // comparing the equivalents and bail out. - for i, v := range h.max { + for i, v := range h.haveTags { if v.maxScript == n.maxScript && v.maxRegion == n.maxRegion && v.tag.variantOrPrivateTagStr() == n.tag.variantOrPrivateTagStr() { - for h.max[i].nextMax != 0 { - i = int(h.max[i].nextMax) + for h.haveTags[i].nextMax != 0 { + i = int(h.haveTags[i].nextMax) } - h.max[i].nextMax = uint16(len(h.max)) + h.haveTags[i].nextMax = uint16(len(h.haveTags)) break } } - h.max = append(h.max, n) + h.haveTags = append(h.haveTags, &n) } // header returns the matchHeader for the given language. It creates one if @@ -486,12 +539,26 @@ func (m *matcher) header(l langID) *matchHeader { return h } +func toConf(d uint8) Confidence { + if d <= 10 { + return High + } + if d < 30 { + return Low + } + return No +} + // newMatcher builds an index for the given supported tags and returns it as // a matcher. It also expands the index by considering various equivalence classes // for a given tag. -func newMatcher(supported []Tag) *matcher { +func newMatcher(supported []Tag, options []MatchOption) *matcher { m := &matcher{ - index: make(map[langID]*matchHeader), + index: make(map[langID]*matchHeader), + preferSameScript: true, + } + for _, o := range options { + o(m) } if len(supported) == 0 { m.default_ = &haveTag{} @@ -502,25 +569,29 @@ func newMatcher(supported []Tag) *matcher { for i, tag := range supported { pair, _ := makeHaveTag(tag, i) m.header(tag.lang).addIfNew(pair, true) + m.supported = append(m.supported, &pair) } - m.default_ = &m.header(supported[0].lang).exact[0] + m.default_ = m.header(supported[0].lang).haveTags[0] + // Keep these in two different loops to support the case that two equivalent + // languages are distinguished, such as iw and he. for i, tag := range supported { pair, max := makeHaveTag(tag, i) if max != tag.lang { - m.header(max).addIfNew(pair, false) + m.header(max).addIfNew(pair, true) } } // update is used to add indexes in the map for equivalent languages. - // If force is true, the update will also apply to derived entries. To - // avoid applying a "transitive closure", use false. - update := func(want, have uint16, conf Confidence, force bool) { + // update will only add entries to original indexes, thus not computing any + // transitive relations. + update := func(want, have uint16, conf Confidence) { if hh := m.index[langID(have)]; hh != nil { - if !force && len(hh.exact) == 0 { + if !hh.original { return } hw := m.header(langID(want)) - for _, v := range hh.max { + for _, ht := range hh.haveTags { + v := *ht if conf < v.conf { v.conf = conf } @@ -528,7 +599,7 @@ func newMatcher(supported []Tag) *matcher { if v.altScript != 0 { v.altScript = altScript(langID(want), v.maxScript) } - hw.addIfNew(v, conf == Exact && len(hh.exact) > 0) + hw.addIfNew(v, conf == Exact && hh.original) } } } @@ -536,9 +607,9 @@ func newMatcher(supported []Tag) *matcher { // Add entries for languages with mutual intelligibility as defined by CLDR's // languageMatch data. for _, ml := range matchLang { - update(ml.want, ml.have, Confidence(ml.conf), false) + update(ml.want, ml.have, toConf(ml.distance)) if !ml.oneway { - update(ml.have, ml.want, Confidence(ml.conf), false) + update(ml.have, ml.want, toConf(ml.distance)) } } @@ -548,10 +619,6 @@ func newMatcher(supported []Tag) *matcher { // (their canonicalization simply substitutes a different language code, but // nothing else), the match confidence is Exact, otherwise it is High. for i, lm := range langAliasMap { - if lm.from == _sh { - continue - } - // If deprecated codes match and there is no fiddling with the script or // or region, we consider it an exact match. conf := Exact @@ -559,9 +626,9 @@ func newMatcher(supported []Tag) *matcher { if !isExactEquivalent(langID(lm.from)) { conf = High } - update(lm.to, lm.from, conf, true) + update(lm.to, lm.from, conf) } - update(lm.from, lm.to, conf, true) + update(lm.from, lm.to, conf) } return m } @@ -570,28 +637,29 @@ func newMatcher(supported []Tag) *matcher { // account the order of preference of the given tags. func (m *matcher) getBest(want ...Tag) (got *haveTag, orig Tag, c Confidence) { best := bestMatch{} - for _, w := range want { + for i, w := range want { var max Tag // Check for exact match first. h := m.index[w.lang] if w.lang != 0 { - // Base language is defined. if h == nil { continue } - for i := range h.exact { - have := &h.exact[i] - if have.tag.equalsRest(w) { - return have, w, Exact - } + // Base language is defined. + max, _ = w.canonicalize(Legacy | Deprecated | Macro) + // A region that is added through canonicalization is stronger than + // a maximized region: set it in the original (e.g. mo -> ro-MD). + if w.region != max.region { + w.region = max.region } - max, _ = w.canonicalize(Legacy | Deprecated) + // TODO: should we do the same for scripts? + // See test case: en, sr, nl ; sh ; sr max, _ = addTags(max) } else { // Base language is not defined. if h != nil { - for i := range h.exact { - have := &h.exact[i] + for i := range h.haveTags { + have := h.haveTags[i] if have.tag.equalsRest(w) { return have, w, Exact } @@ -607,16 +675,23 @@ func (m *matcher) getBest(want ...Tag) (got *haveTag, orig Tag, c Confidence) { continue } } + pin := true + for _, t := range want[i+1:] { + if w.lang == t.lang { + pin = false + break + } + } // Check for match based on maximized tag. - for i := range h.max { - have := &h.max[i] - best.update(have, w, max.script, max.region) + for i := range h.haveTags { + have := h.haveTags[i] + best.update(have, w, max.script, max.region, pin) if best.conf == Exact { for have.nextMax != 0 { - have = &h.max[have.nextMax] - best.update(have, w, max.script, max.region) + have = h.haveTags[have.nextMax] + best.update(have, w, max.script, max.region, pin) } - return best.have, best.want, High + return best.have, best.want, best.conf } } } @@ -631,42 +706,68 @@ func (m *matcher) getBest(want ...Tag) (got *haveTag, orig Tag, c Confidence) { // bestMatch accumulates the best match so far. type bestMatch struct { - have *haveTag - want Tag - conf Confidence + have *haveTag + want Tag + conf Confidence + pinnedRegion regionID + pinLanguage bool + sameRegionGroup bool // Cached results from applying tie-breaking rules. - origLang bool - origReg bool - regDist uint8 - origScript bool - parentDist uint8 // 255 if have is not an ancestor of want tag. + origLang bool + origReg bool + paradigmReg bool + regGroupDist uint8 + origScript bool } // update updates the existing best match if the new pair is considered to be a -// better match. -// To determine if the given pair is a better match, it first computes the rough -// confidence level. If this surpasses the current match, it will replace it and -// update the tie-breaker rule cache. If there is a tie, it proceeds with applying -// a series of tie-breaker rules. If there is no conclusive winner after applying -// the tie-breaker rules, it leaves the current match as the preferred match. -func (m *bestMatch) update(have *haveTag, tag Tag, maxScript scriptID, maxRegion regionID) { +// better match. To determine if the given pair is a better match, it first +// computes the rough confidence level. If this surpasses the current match, it +// will replace it and update the tie-breaker rule cache. If there is a tie, it +// proceeds with applying a series of tie-breaker rules. If there is no +// conclusive winner after applying the tie-breaker rules, it leaves the current +// match as the preferred match. +// +// If pin is true and have and tag are a strong match, it will henceforth only +// consider matches for this language. This corresponds to the nothing that most +// users have a strong preference for the first defined language. A user can +// still prefer a second language over a dialect of the preferred language by +// explicitly specifying dialects, e.g. "en, nl, en-GB". In this case pin should +// be false. +func (m *bestMatch) update(have *haveTag, tag Tag, maxScript scriptID, maxRegion regionID, pin bool) { // Bail if the maximum attainable confidence is below that of the current best match. c := have.conf if c < m.conf { return } - if have.maxScript != maxScript { + // Don't change the language once we already have found an exact match. + if m.pinLanguage && tag.lang != m.want.lang { + return + } + // Pin the region group if we are comparing tags for the same language. + if tag.lang == m.want.lang && m.sameRegionGroup { + _, sameGroup := regionGroupDist(m.pinnedRegion, have.maxRegion, have.maxScript, m.want.lang) + if !sameGroup { + return + } + } + if c == Exact && have.maxScript == maxScript { + // If there is another language and then another entry of this language, + // don't pin anything, otherwise pin the language. + m.pinLanguage = pin + } + if have.tag.equalsRest(tag) { + } else if have.maxScript != maxScript { // There is usually very little comprehension between different scripts. - // In a few cases there may still be Low comprehension. This possibility is - // pre-computed and stored in have.altScript. + // In a few cases there may still be Low comprehension. This possibility + // is pre-computed and stored in have.altScript. if Low < m.conf || have.altScript != maxScript { return } c = Low } else if have.maxRegion != maxRegion { - // There is usually a small difference between languages across regions. - // We use the region distance (below) to disambiguate between equal matches. if High < c { + // There is usually a small difference between languages across regions. c = High } } @@ -702,28 +803,26 @@ func (m *bestMatch) update(have *haveTag, tag Tag, maxScript scriptID, maxRegion beaten = true } - // Next we prefer smaller distances between regions, as defined by regionDist. - regDist := regionDist(have.maxRegion, maxRegion, tag.lang) - if !beaten && m.regDist != regDist { - if regDist > m.regDist { + regGroupDist, sameGroup := regionGroupDist(have.maxRegion, maxRegion, maxScript, tag.lang) + if !beaten && m.regGroupDist != regGroupDist { + if regGroupDist > m.regGroupDist { return } beaten = true } - // Next we prefer if the pre-maximized script was specified and identical. - origScript := have.tag.script == tag.script && tag.script != 0 - if !beaten && m.origScript != origScript { - if m.origScript { + paradigmReg := isParadigmLocale(tag.lang, have.maxRegion) + if !beaten && m.paradigmReg != paradigmReg { + if !paradigmReg { return } beaten = true } - // Finally we prefer tags which have a closer parent relationship. - parentDist := parentDistance(have.tag.region, tag) - if !beaten && m.parentDist != parentDist { - if parentDist > m.parentDist { + // Next we prefer if the pre-maximized script was specified and identical. + origScript := have.tag.script == tag.script && tag.script != 0 + if !beaten && m.origScript != origScript { + if m.origScript { return } beaten = true @@ -734,63 +833,47 @@ func (m *bestMatch) update(have *haveTag, tag Tag, maxScript scriptID, maxRegion m.have = have m.want = tag m.conf = c + m.pinnedRegion = maxRegion + m.sameRegionGroup = sameGroup m.origLang = origLang m.origReg = origReg + m.paradigmReg = paradigmReg m.origScript = origScript - m.regDist = regDist - m.parentDist = parentDist + m.regGroupDist = regGroupDist } } -// parentDistance returns the number of times Parent must be called before the -// regions match. It is assumed that it has already been checked that lang and -// script are identical. If haveRegion does not occur in the ancestor chain of -// tag, it returns 255. -func parentDistance(haveRegion regionID, tag Tag) uint8 { - p := tag.Parent() - d := uint8(1) - for haveRegion != p.region { - if p.region == 0 { - return 255 +func isParadigmLocale(lang langID, r regionID) bool { + for _, e := range paradigmLocales { + if langID(e[0]) == lang && (r == regionID(e[1]) || r == regionID(e[2])) { + return true } - p = p.Parent() - d++ } - return d + return false } -// regionDist wraps regionDistance with some exceptions to the algorithmic distance. -func regionDist(a, b regionID, lang langID) uint8 { - if lang == _en { - // Two variants of non-US English are close to each other, regardless of distance. - if a != _US && b != _US { - return 2 - } - } - return uint8(regionDistance(a, b)) -} +// regionGroupDist computes the distance between two regions based on their +// CLDR grouping. +func regionGroupDist(a, b regionID, script scriptID, lang langID) (dist uint8, same bool) { + const defaultDistance = 4 -// regionDistance computes the distance between two regions based on the -// distance in the graph of region containments as defined in CLDR. It iterates -// over increasingly inclusive sets of groups, represented as bit vectors, until -// the source bit vector has bits in common with the destination vector. -func regionDistance(a, b regionID) int { - if a == b { - return 0 - } - p, q := regionInclusion[a], regionInclusion[b] - if p < nRegionGroups { - p, q = q, p - } - set := regionInclusionBits - if q < nRegionGroups && set[p]&(1< len(dst) { + err = transform.ErrShortDst break } - // We replace illegal bytes with RuneError. Not doing so might - // otherwise turn a sequence of invalid UTF-8 into valid UTF-8. - // The resulting byte sequence may subsequently contain runes - // for which t(r) is true that were passed unnoticed. - if !t(utf8.RuneError) { - if nDst+3 > len(dst) { - err = transform.ErrShortDst - break - } - dst[nDst+0] = runeErrorString[0] - dst[nDst+1] = runeErrorString[1] - dst[nDst+2] = runeErrorString[2] - nDst += 3 - } - nSrc++ - continue + dst[nDst+0] = runeErrorString[0] + dst[nDst+1] = runeErrorString[1] + dst[nDst+2] = runeErrorString[2] + nDst += 3 } + nSrc++ + continue } - if t(r) { nSrc += size continue @@ -157,6 +185,28 @@ type mapper func(rune) rune func (mapper) Reset() {} +// Span implements transform.Spanner. +func (t mapper) Span(src []byte, atEOF bool) (n int, err error) { + for r, size := rune(0), 0; n < len(src); n += size { + if r = rune(src[n]); r < utf8.RuneSelf { + size = 1 + } else if r, size = utf8.DecodeRune(src[n:]); size == 1 { + // Invalid rune. + if !atEOF && !utf8.FullRune(src[n:]) { + err = transform.ErrShortSrc + } else { + err = transform.ErrEndOfSpan + } + break + } + if t(r) != r { + err = transform.ErrEndOfSpan + break + } + } + return n, err +} + // Transform implements transform.Transformer. func (t mapper) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { var replacement rune @@ -230,24 +280,51 @@ func ReplaceIllFormed() Transformer { type replaceIllFormed struct{ transform.NopResetter } +func (t replaceIllFormed) Span(src []byte, atEOF bool) (n int, err error) { + for n < len(src) { + // ASCII fast path. + if src[n] < utf8.RuneSelf { + n++ + continue + } + + r, size := utf8.DecodeRune(src[n:]) + + // Look for a valid non-ASCII rune. + if r != utf8.RuneError || size != 1 { + n += size + continue + } + + // Look for short source data. + if !atEOF && !utf8.FullRune(src[n:]) { + err = transform.ErrShortSrc + break + } + + // We have an invalid rune. + err = transform.ErrEndOfSpan + break + } + return n, err +} + func (t replaceIllFormed) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { for nSrc < len(src) { - r, size := utf8.DecodeRune(src[nSrc:]) - - // Look for an ASCII rune. - if r < utf8.RuneSelf { + // ASCII fast path. + if r := src[nSrc]; r < utf8.RuneSelf { if nDst == len(dst) { err = transform.ErrShortDst break } - dst[nDst] = byte(r) + dst[nDst] = r nDst++ nSrc++ continue } // Look for a valid non-ASCII rune. - if r != utf8.RuneError || size != 1 { + if _, size := utf8.DecodeRune(src[nSrc:]); size != 1 { if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { err = transform.ErrShortDst break diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/secure/bidirule/bidirule.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/secure/bidirule/bidirule.go new file mode 100644 index 00000000..e2b70f76 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/secure/bidirule/bidirule.go @@ -0,0 +1,336 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bidirule implements the Bidi Rule defined by RFC 5893. +// +// This package is under development. The API may change without notice and +// without preserving backward compatibility. +package bidirule + +import ( + "errors" + "unicode/utf8" + + "golang.org/x/text/transform" + "golang.org/x/text/unicode/bidi" +) + +// This file contains an implementation of RFC 5893: Right-to-Left Scripts for +// Internationalized Domain Names for Applications (IDNA) +// +// A label is an individual component of a domain name. Labels are usually +// shown separated by dots; for example, the domain name "www.example.com" is +// composed of three labels: "www", "example", and "com". +// +// An RTL label is a label that contains at least one character of class R, AL, +// or AN. An LTR label is any label that is not an RTL label. +// +// A "Bidi domain name" is a domain name that contains at least one RTL label. +// +// The following guarantees can be made based on the above: +// +// o In a domain name consisting of only labels that satisfy the rule, +// the requirements of Section 3 are satisfied. Note that even LTR +// labels and pure ASCII labels have to be tested. +// +// o In a domain name consisting of only LDH labels (as defined in the +// Definitions document [RFC5890]) and labels that satisfy the rule, +// the requirements of Section 3 are satisfied as long as a label +// that starts with an ASCII digit does not come after a +// right-to-left label. +// +// No guarantee is given for other combinations. + +// ErrInvalid indicates a label is invalid according to the Bidi Rule. +var ErrInvalid = errors.New("bidirule: failed Bidi Rule") + +type ruleState uint8 + +const ( + ruleInitial ruleState = iota + ruleLTR + ruleLTRFinal + ruleRTL + ruleRTLFinal + ruleInvalid +) + +type ruleTransition struct { + next ruleState + mask uint16 +} + +var transitions = [...][2]ruleTransition{ + // [2.1] The first character must be a character with Bidi property L, R, or + // AL. If it has the R or AL property, it is an RTL label; if it has the L + // property, it is an LTR label. + ruleInitial: { + {ruleLTRFinal, 1 << bidi.L}, + {ruleRTLFinal, 1< 0; count-- { + p.openers.Remove(p.openers.Front()) + } + break + } + } + sort.Sort(p.pairPositions) + // if we get here, the closing bracket matched no openers + // and gets ignored + } + } +} + +// Bracket pairs within an isolating run sequence are processed as units so +// that both the opening and the closing paired bracket in a pair resolve to +// the same direction. +// +// N0. Process bracket pairs in an isolating run sequence sequentially in +// the logical order of the text positions of the opening paired brackets +// using the logic given below. Within this scope, bidirectional types EN +// and AN are treated as R. +// +// Identify the bracket pairs in the current isolating run sequence +// according to BD16. For each bracket-pair element in the list of pairs of +// text positions: +// +// a Inspect the bidirectional types of the characters enclosed within the +// bracket pair. +// +// b If any strong type (either L or R) matching the embedding direction is +// found, set the type for both brackets in the pair to match the embedding +// direction. +// +// o [ e ] o -> o e e e o +// +// o [ o e ] -> o e o e e +// +// o [ NI e ] -> o e NI e e +// +// c Otherwise, if a strong type (opposite the embedding direction) is +// found, test for adjacent strong types as follows: 1 First, check +// backwards before the opening paired bracket until the first strong type +// (L, R, or sos) is found. If that first preceding strong type is opposite +// the embedding direction, then set the type for both brackets in the pair +// to that type. 2 Otherwise, set the type for both brackets in the pair to +// the embedding direction. +// +// o [ o ] e -> o o o o e +// +// o [ o NI ] o -> o o o NI o o +// +// e [ o ] o -> e e o e o +// +// e [ o ] e -> e e o e e +// +// e ( o [ o ] NI ) e -> e e o o o o NI e e +// +// d Otherwise, do not set the type for the current bracket pair. Note that +// if the enclosed text contains no strong types the paired brackets will +// both resolve to the same level when resolved individually using rules N1 +// and N2. +// +// e ( NI ) o -> e ( NI ) o + +// getStrongTypeN0 maps character's directional code to strong type as required +// by rule N0. +// +// TODO: have separate type for "strong" directionality. +func (p *bracketPairer) getStrongTypeN0(index int) Class { + switch p.codesIsolatedRun[index] { + // in the scope of N0, number types are treated as R + case EN, AN, AL, R: + return R + case L: + return L + default: + return ON + } +} + +// classifyPairContent reports the strong types contained inside a Bracket Pair, +// assuming the given embedding direction. +// +// It returns ON if no strong type is found. If a single strong type is found, +// it returns this this type. Otherwise it returns the embedding direction. +// +// TODO: use separate type for "strong" directionality. +func (p *bracketPairer) classifyPairContent(loc bracketPair, dirEmbed Class) Class { + dirOpposite := ON + for i := loc.opener + 1; i < loc.closer; i++ { + dir := p.getStrongTypeN0(i) + if dir == ON { + continue + } + if dir == dirEmbed { + return dir // type matching embedding direction found + } + dirOpposite = dir + } + // return ON if no strong type found, or class opposite to dirEmbed + return dirOpposite +} + +// classBeforePair determines which strong types are present before a Bracket +// Pair. Return R or L if strong type found, otherwise ON. +func (p *bracketPairer) classBeforePair(loc bracketPair) Class { + for i := loc.opener - 1; i >= 0; i-- { + if dir := p.getStrongTypeN0(i); dir != ON { + return dir + } + } + // no strong types found, return sos + return p.sos +} + +// assignBracketType implements rule N0 for a single bracket pair. +func (p *bracketPairer) assignBracketType(loc bracketPair, dirEmbed Class, initialTypes []Class) { + // rule "N0, a", inspect contents of pair + dirPair := p.classifyPairContent(loc, dirEmbed) + + // dirPair is now L, R, or N (no strong type found) + + // the following logical tests are performed out of order compared to + // the statement of the rules but yield the same results + if dirPair == ON { + return // case "d" - nothing to do + } + + if dirPair != dirEmbed { + // case "c": strong type found, opposite - check before (c.1) + dirPair = p.classBeforePair(loc) + if dirPair == dirEmbed || dirPair == ON { + // no strong opposite type found before - use embedding (c.2) + dirPair = dirEmbed + } + } + // else: case "b", strong type found matching embedding, + // no explicit action needed, as dirPair is already set to embedding + // direction + + // set the bracket types to the type found + p.setBracketsToType(loc, dirPair, initialTypes) +} + +func (p *bracketPairer) setBracketsToType(loc bracketPair, dirPair Class, initialTypes []Class) { + p.codesIsolatedRun[loc.opener] = dirPair + p.codesIsolatedRun[loc.closer] = dirPair + + for i := loc.opener + 1; i < loc.closer; i++ { + index := p.indexes[i] + if initialTypes[index] != NSM { + break + } + p.codesIsolatedRun[i] = dirPair + } + + for i := loc.closer + 1; i < len(p.indexes); i++ { + index := p.indexes[i] + if initialTypes[index] != NSM { + break + } + p.codesIsolatedRun[i] = dirPair + } +} + +// resolveBrackets implements rule N0 for a list of pairs. +func (p *bracketPairer) resolveBrackets(dirEmbed Class, initialTypes []Class) { + for _, loc := range p.pairPositions { + p.assignBracketType(loc, dirEmbed, initialTypes) + } +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/bidi/core.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/bidi/core.go new file mode 100644 index 00000000..d4c1399f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/bidi/core.go @@ -0,0 +1,1058 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bidi + +import "log" + +// This implementation is a port based on the reference implementation found at: +// http://www.unicode.org/Public/PROGRAMS/BidiReferenceJava/ +// +// described in Unicode Bidirectional Algorithm (UAX #9). +// +// Input: +// There are two levels of input to the algorithm, since clients may prefer to +// supply some information from out-of-band sources rather than relying on the +// default behavior. +// +// - Bidi class array +// - Bidi class array, with externally supplied base line direction +// +// Output: +// Output is separated into several stages: +// +// - levels array over entire paragraph +// - reordering array over entire paragraph +// - levels array over line +// - reordering array over line +// +// Note that for conformance to the Unicode Bidirectional Algorithm, +// implementations are only required to generate correct reordering and +// character directionality (odd or even levels) over a line. Generating +// identical level arrays over a line is not required. Bidi explicit format +// codes (LRE, RLE, LRO, RLO, PDF) and BN can be assigned arbitrary levels and +// positions as long as the rest of the input is properly reordered. +// +// As the algorithm is defined to operate on a single paragraph at a time, this +// implementation is written to handle single paragraphs. Thus rule P1 is +// presumed by this implementation-- the data provided to the implementation is +// assumed to be a single paragraph, and either contains no 'B' codes, or a +// single 'B' code at the end of the input. 'B' is allowed as input to +// illustrate how the algorithm assigns it a level. +// +// Also note that rules L3 and L4 depend on the rendering engine that uses the +// result of the bidi algorithm. This implementation assumes that the rendering +// engine expects combining marks in visual order (e.g. to the left of their +// base character in RTL runs) and that it adjusts the glyphs used to render +// mirrored characters that are in RTL runs so that they render appropriately. + +// level is the embedding level of a character. Even embedding levels indicate +// left-to-right order and odd levels indicate right-to-left order. The special +// level of -1 is reserved for undefined order. +type level int8 + +const implicitLevel level = -1 + +// in returns if x is equal to any of the values in set. +func (c Class) in(set ...Class) bool { + for _, s := range set { + if c == s { + return true + } + } + return false +} + +// A paragraph contains the state of a paragraph. +type paragraph struct { + initialTypes []Class + + // Arrays of properties needed for paired bracket evaluation in N0 + pairTypes []bracketType // paired Bracket types for paragraph + pairValues []rune // rune for opening bracket or pbOpen and pbClose; 0 for pbNone + + embeddingLevel level // default: = implicitLevel; + + // at the paragraph levels + resultTypes []Class + resultLevels []level + + // Index of matching PDI for isolate initiator characters. For other + // characters, the value of matchingPDI will be set to -1. For isolate + // initiators with no matching PDI, matchingPDI will be set to the length of + // the input string. + matchingPDI []int + + // Index of matching isolate initiator for PDI characters. For other + // characters, and for PDIs with no matching isolate initiator, the value of + // matchingIsolateInitiator will be set to -1. + matchingIsolateInitiator []int +} + +// newParagraph initializes a paragraph. The user needs to supply a few arrays +// corresponding to the preprocessed text input. The types correspond to the +// Unicode BiDi classes for each rune. pairTypes indicates the bracket type for +// each rune. pairValues provides a unique bracket class identifier for each +// rune (suggested is the rune of the open bracket for opening and matching +// close brackets, after normalization). The embedding levels are optional, but +// may be supplied to encode embedding levels of styled text. +// +// TODO: return an error. +func newParagraph(types []Class, pairTypes []bracketType, pairValues []rune, levels level) *paragraph { + validateTypes(types) + validatePbTypes(pairTypes) + validatePbValues(pairValues, pairTypes) + validateParagraphEmbeddingLevel(levels) + + p := ¶graph{ + initialTypes: append([]Class(nil), types...), + embeddingLevel: levels, + + pairTypes: pairTypes, + pairValues: pairValues, + + resultTypes: append([]Class(nil), types...), + } + p.run() + return p +} + +func (p *paragraph) Len() int { return len(p.initialTypes) } + +// The algorithm. Does not include line-based processing (Rules L1, L2). +// These are applied later in the line-based phase of the algorithm. +func (p *paragraph) run() { + p.determineMatchingIsolates() + + // 1) determining the paragraph level + // Rule P1 is the requirement for entering this algorithm. + // Rules P2, P3. + // If no externally supplied paragraph embedding level, use default. + if p.embeddingLevel == implicitLevel { + p.embeddingLevel = p.determineParagraphEmbeddingLevel(0, p.Len()) + } + + // Initialize result levels to paragraph embedding level. + p.resultLevels = make([]level, p.Len()) + setLevels(p.resultLevels, p.embeddingLevel) + + // 2) Explicit levels and directions + // Rules X1-X8. + p.determineExplicitEmbeddingLevels() + + // Rule X9. + // We do not remove the embeddings, the overrides, the PDFs, and the BNs + // from the string explicitly. But they are not copied into isolating run + // sequences when they are created, so they are removed for all + // practical purposes. + + // Rule X10. + // Run remainder of algorithm one isolating run sequence at a time + for _, seq := range p.determineIsolatingRunSequences() { + // 3) resolving weak types + // Rules W1-W7. + seq.resolveWeakTypes() + + // 4a) resolving paired brackets + // Rule N0 + resolvePairedBrackets(seq) + + // 4b) resolving neutral types + // Rules N1-N3. + seq.resolveNeutralTypes() + + // 5) resolving implicit embedding levels + // Rules I1, I2. + seq.resolveImplicitLevels() + + // Apply the computed levels and types + seq.applyLevelsAndTypes() + } + + // Assign appropriate levels to 'hide' LREs, RLEs, LROs, RLOs, PDFs, and + // BNs. This is for convenience, so the resulting level array will have + // a value for every character. + p.assignLevelsToCharactersRemovedByX9() +} + +// determineMatchingIsolates determines the matching PDI for each isolate +// initiator and vice versa. +// +// Definition BD9. +// +// At the end of this function: +// +// - The member variable matchingPDI is set to point to the index of the +// matching PDI character for each isolate initiator character. If there is +// no matching PDI, it is set to the length of the input text. For other +// characters, it is set to -1. +// - The member variable matchingIsolateInitiator is set to point to the +// index of the matching isolate initiator character for each PDI character. +// If there is no matching isolate initiator, or the character is not a PDI, +// it is set to -1. +func (p *paragraph) determineMatchingIsolates() { + p.matchingPDI = make([]int, p.Len()) + p.matchingIsolateInitiator = make([]int, p.Len()) + + for i := range p.matchingIsolateInitiator { + p.matchingIsolateInitiator[i] = -1 + } + + for i := range p.matchingPDI { + p.matchingPDI[i] = -1 + + if t := p.resultTypes[i]; t.in(LRI, RLI, FSI) { + depthCounter := 1 + for j := i + 1; j < p.Len(); j++ { + if u := p.resultTypes[j]; u.in(LRI, RLI, FSI) { + depthCounter++ + } else if u == PDI { + if depthCounter--; depthCounter == 0 { + p.matchingPDI[i] = j + p.matchingIsolateInitiator[j] = i + break + } + } + } + if p.matchingPDI[i] == -1 { + p.matchingPDI[i] = p.Len() + } + } + } +} + +// determineParagraphEmbeddingLevel reports the resolved paragraph direction of +// the substring limited by the given range [start, end). +// +// Determines the paragraph level based on rules P2, P3. This is also used +// in rule X5c to find if an FSI should resolve to LRI or RLI. +func (p *paragraph) determineParagraphEmbeddingLevel(start, end int) level { + var strongType Class = unknownClass + + // Rule P2. + for i := start; i < end; i++ { + if t := p.resultTypes[i]; t.in(L, AL, R) { + strongType = t + break + } else if t.in(FSI, LRI, RLI) { + i = p.matchingPDI[i] // skip over to the matching PDI + if i > end { + log.Panic("assert (i <= end)") + } + } + } + // Rule P3. + switch strongType { + case unknownClass: // none found + // default embedding level when no strong types found is 0. + return 0 + case L: + return 0 + default: // AL, R + return 1 + } +} + +const maxDepth = 125 + +// This stack will store the embedding levels and override and isolated +// statuses +type directionalStatusStack struct { + stackCounter int + embeddingLevelStack [maxDepth + 1]level + overrideStatusStack [maxDepth + 1]Class + isolateStatusStack [maxDepth + 1]bool +} + +func (s *directionalStatusStack) empty() { s.stackCounter = 0 } +func (s *directionalStatusStack) pop() { s.stackCounter-- } +func (s *directionalStatusStack) depth() int { return s.stackCounter } + +func (s *directionalStatusStack) push(level level, overrideStatus Class, isolateStatus bool) { + s.embeddingLevelStack[s.stackCounter] = level + s.overrideStatusStack[s.stackCounter] = overrideStatus + s.isolateStatusStack[s.stackCounter] = isolateStatus + s.stackCounter++ +} + +func (s *directionalStatusStack) lastEmbeddingLevel() level { + return s.embeddingLevelStack[s.stackCounter-1] +} + +func (s *directionalStatusStack) lastDirectionalOverrideStatus() Class { + return s.overrideStatusStack[s.stackCounter-1] +} + +func (s *directionalStatusStack) lastDirectionalIsolateStatus() bool { + return s.isolateStatusStack[s.stackCounter-1] +} + +// Determine explicit levels using rules X1 - X8 +func (p *paragraph) determineExplicitEmbeddingLevels() { + var stack directionalStatusStack + var overflowIsolateCount, overflowEmbeddingCount, validIsolateCount int + + // Rule X1. + stack.push(p.embeddingLevel, ON, false) + + for i, t := range p.resultTypes { + // Rules X2, X3, X4, X5, X5a, X5b, X5c + switch t { + case RLE, LRE, RLO, LRO, RLI, LRI, FSI: + isIsolate := t.in(RLI, LRI, FSI) + isRTL := t.in(RLE, RLO, RLI) + + // override if this is an FSI that resolves to RLI + if t == FSI { + isRTL = (p.determineParagraphEmbeddingLevel(i+1, p.matchingPDI[i]) == 1) + } + if isIsolate { + p.resultLevels[i] = stack.lastEmbeddingLevel() + if stack.lastDirectionalOverrideStatus() != ON { + p.resultTypes[i] = stack.lastDirectionalOverrideStatus() + } + } + + var newLevel level + if isRTL { + // least greater odd + newLevel = (stack.lastEmbeddingLevel() + 1) | 1 + } else { + // least greater even + newLevel = (stack.lastEmbeddingLevel() + 2) &^ 1 + } + + if newLevel <= maxDepth && overflowIsolateCount == 0 && overflowEmbeddingCount == 0 { + if isIsolate { + validIsolateCount++ + } + // Push new embedding level, override status, and isolated + // status. + // No check for valid stack counter, since the level check + // suffices. + switch t { + case LRO: + stack.push(newLevel, L, isIsolate) + case RLO: + stack.push(newLevel, R, isIsolate) + default: + stack.push(newLevel, ON, isIsolate) + } + // Not really part of the spec + if !isIsolate { + p.resultLevels[i] = newLevel + } + } else { + // This is an invalid explicit formatting character, + // so apply the "Otherwise" part of rules X2-X5b. + if isIsolate { + overflowIsolateCount++ + } else { // !isIsolate + if overflowIsolateCount == 0 { + overflowEmbeddingCount++ + } + } + } + + // Rule X6a + case PDI: + if overflowIsolateCount > 0 { + overflowIsolateCount-- + } else if validIsolateCount == 0 { + // do nothing + } else { + overflowEmbeddingCount = 0 + for !stack.lastDirectionalIsolateStatus() { + stack.pop() + } + stack.pop() + validIsolateCount-- + } + p.resultLevels[i] = stack.lastEmbeddingLevel() + + // Rule X7 + case PDF: + // Not really part of the spec + p.resultLevels[i] = stack.lastEmbeddingLevel() + + if overflowIsolateCount > 0 { + // do nothing + } else if overflowEmbeddingCount > 0 { + overflowEmbeddingCount-- + } else if !stack.lastDirectionalIsolateStatus() && stack.depth() >= 2 { + stack.pop() + } + + case B: // paragraph separator. + // Rule X8. + + // These values are reset for clarity, in this implementation B + // can only occur as the last code in the array. + stack.empty() + overflowIsolateCount = 0 + overflowEmbeddingCount = 0 + validIsolateCount = 0 + p.resultLevels[i] = p.embeddingLevel + + default: + p.resultLevels[i] = stack.lastEmbeddingLevel() + if stack.lastDirectionalOverrideStatus() != ON { + p.resultTypes[i] = stack.lastDirectionalOverrideStatus() + } + } + } +} + +type isolatingRunSequence struct { + p *paragraph + + indexes []int // indexes to the original string + + types []Class // type of each character using the index + resolvedLevels []level // resolved levels after application of rules + level level + sos, eos Class +} + +func (i *isolatingRunSequence) Len() int { return len(i.indexes) } + +func maxLevel(a, b level) level { + if a > b { + return a + } + return b +} + +// Rule X10, second bullet: Determine the start-of-sequence (sos) and end-of-sequence (eos) types, +// either L or R, for each isolating run sequence. +func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence { + length := len(indexes) + types := make([]Class, length) + for i, x := range indexes { + types[i] = p.resultTypes[x] + } + + // assign level, sos and eos + prevChar := indexes[0] - 1 + for prevChar >= 0 && isRemovedByX9(p.initialTypes[prevChar]) { + prevChar-- + } + prevLevel := p.embeddingLevel + if prevChar >= 0 { + prevLevel = p.resultLevels[prevChar] + } + + var succLevel level + lastType := types[length-1] + if lastType.in(LRI, RLI, FSI) { + succLevel = p.embeddingLevel + } else { + // the first character after the end of run sequence + limit := indexes[length-1] + 1 + for ; limit < p.Len() && isRemovedByX9(p.initialTypes[limit]); limit++ { + + } + succLevel = p.embeddingLevel + if limit < p.Len() { + succLevel = p.resultLevels[limit] + } + } + level := p.resultLevels[indexes[0]] + return &isolatingRunSequence{ + p: p, + indexes: indexes, + types: types, + level: level, + sos: typeForLevel(maxLevel(prevLevel, level)), + eos: typeForLevel(maxLevel(succLevel, level)), + } +} + +// Resolving weak types Rules W1-W7. +// +// Note that some weak types (EN, AN) remain after this processing is +// complete. +func (s *isolatingRunSequence) resolveWeakTypes() { + + // on entry, only these types remain + s.assertOnly(L, R, AL, EN, ES, ET, AN, CS, B, S, WS, ON, NSM, LRI, RLI, FSI, PDI) + + // Rule W1. + // Changes all NSMs. + preceedingCharacterType := s.sos + for i, t := range s.types { + if t == NSM { + s.types[i] = preceedingCharacterType + } else { + if t.in(LRI, RLI, FSI, PDI) { + preceedingCharacterType = ON + } + preceedingCharacterType = t + } + } + + // Rule W2. + // EN does not change at the start of the run, because sos != AL. + for i, t := range s.types { + if t == EN { + for j := i - 1; j >= 0; j-- { + if t := s.types[j]; t.in(L, R, AL) { + if t == AL { + s.types[i] = AN + } + break + } + } + } + } + + // Rule W3. + for i, t := range s.types { + if t == AL { + s.types[i] = R + } + } + + // Rule W4. + // Since there must be values on both sides for this rule to have an + // effect, the scan skips the first and last value. + // + // Although the scan proceeds left to right, and changes the type + // values in a way that would appear to affect the computations + // later in the scan, there is actually no problem. A change in the + // current value can only affect the value to its immediate right, + // and only affect it if it is ES or CS. But the current value can + // only change if the value to its right is not ES or CS. Thus + // either the current value will not change, or its change will have + // no effect on the remainder of the analysis. + + for i := 1; i < s.Len()-1; i++ { + t := s.types[i] + if t == ES || t == CS { + prevSepType := s.types[i-1] + succSepType := s.types[i+1] + if prevSepType == EN && succSepType == EN { + s.types[i] = EN + } else if s.types[i] == CS && prevSepType == AN && succSepType == AN { + s.types[i] = AN + } + } + } + + // Rule W5. + for i, t := range s.types { + if t == ET { + // locate end of sequence + runStart := i + runEnd := s.findRunLimit(runStart, ET) + + // check values at ends of sequence + t := s.sos + if runStart > 0 { + t = s.types[runStart-1] + } + if t != EN { + t = s.eos + if runEnd < len(s.types) { + t = s.types[runEnd] + } + } + if t == EN { + setTypes(s.types[runStart:runEnd], EN) + } + // continue at end of sequence + i = runEnd + } + } + + // Rule W6. + for i, t := range s.types { + if t.in(ES, ET, CS) { + s.types[i] = ON + } + } + + // Rule W7. + for i, t := range s.types { + if t == EN { + // set default if we reach start of run + prevStrongType := s.sos + for j := i - 1; j >= 0; j-- { + t = s.types[j] + if t == L || t == R { // AL's have been changed to R + prevStrongType = t + break + } + } + if prevStrongType == L { + s.types[i] = L + } + } + } +} + +// 6) resolving neutral types Rules N1-N2. +func (s *isolatingRunSequence) resolveNeutralTypes() { + + // on entry, only these types can be in resultTypes + s.assertOnly(L, R, EN, AN, B, S, WS, ON, RLI, LRI, FSI, PDI) + + for i, t := range s.types { + switch t { + case WS, ON, B, S, RLI, LRI, FSI, PDI: + // find bounds of run of neutrals + runStart := i + runEnd := s.findRunLimit(runStart, B, S, WS, ON, RLI, LRI, FSI, PDI) + + // determine effective types at ends of run + var leadType, trailType Class + + // Note that the character found can only be L, R, AN, or + // EN. + if runStart == 0 { + leadType = s.sos + } else { + leadType = s.types[runStart-1] + if leadType.in(AN, EN) { + leadType = R + } + } + if runEnd == len(s.types) { + trailType = s.eos + } else { + trailType = s.types[runEnd] + if trailType.in(AN, EN) { + trailType = R + } + } + + var resolvedType Class + if leadType == trailType { + // Rule N1. + resolvedType = leadType + } else { + // Rule N2. + // Notice the embedding level of the run is used, not + // the paragraph embedding level. + resolvedType = typeForLevel(s.level) + } + + setTypes(s.types[runStart:runEnd], resolvedType) + + // skip over run of (former) neutrals + i = runEnd + } + } +} + +func setLevels(levels []level, newLevel level) { + for i := range levels { + levels[i] = newLevel + } +} + +func setTypes(types []Class, newType Class) { + for i := range types { + types[i] = newType + } +} + +// 7) resolving implicit embedding levels Rules I1, I2. +func (s *isolatingRunSequence) resolveImplicitLevels() { + + // on entry, only these types can be in resultTypes + s.assertOnly(L, R, EN, AN) + + s.resolvedLevels = make([]level, len(s.types)) + setLevels(s.resolvedLevels, s.level) + + if (s.level & 1) == 0 { // even level + for i, t := range s.types { + // Rule I1. + if t == L { + // no change + } else if t == R { + s.resolvedLevels[i] += 1 + } else { // t == AN || t == EN + s.resolvedLevels[i] += 2 + } + } + } else { // odd level + for i, t := range s.types { + // Rule I2. + if t == R { + // no change + } else { // t == L || t == AN || t == EN + s.resolvedLevels[i] += 1 + } + } + } +} + +// Applies the levels and types resolved in rules W1-I2 to the +// resultLevels array. +func (s *isolatingRunSequence) applyLevelsAndTypes() { + for i, x := range s.indexes { + s.p.resultTypes[x] = s.types[i] + s.p.resultLevels[x] = s.resolvedLevels[i] + } +} + +// Return the limit of the run consisting only of the types in validSet +// starting at index. This checks the value at index, and will return +// index if that value is not in validSet. +func (s *isolatingRunSequence) findRunLimit(index int, validSet ...Class) int { +loop: + for ; index < len(s.types); index++ { + t := s.types[index] + for _, valid := range validSet { + if t == valid { + continue loop + } + } + return index // didn't find a match in validSet + } + return len(s.types) +} + +// Algorithm validation. Assert that all values in types are in the +// provided set. +func (s *isolatingRunSequence) assertOnly(codes ...Class) { +loop: + for i, t := range s.types { + for _, c := range codes { + if t == c { + continue loop + } + } + log.Panicf("invalid bidi code %v present in assertOnly at position %d", t, s.indexes[i]) + } +} + +// determineLevelRuns returns an array of level runs. Each level run is +// described as an array of indexes into the input string. +// +// Determines the level runs. Rule X9 will be applied in determining the +// runs, in the way that makes sure the characters that are supposed to be +// removed are not included in the runs. +func (p *paragraph) determineLevelRuns() [][]int { + run := []int{} + allRuns := [][]int{} + currentLevel := implicitLevel + + for i := range p.initialTypes { + if !isRemovedByX9(p.initialTypes[i]) { + if p.resultLevels[i] != currentLevel { + // we just encountered a new run; wrap up last run + if currentLevel >= 0 { // only wrap it up if there was a run + allRuns = append(allRuns, run) + run = nil + } + // Start new run + currentLevel = p.resultLevels[i] + } + run = append(run, i) + } + } + // Wrap up the final run, if any + if len(run) > 0 { + allRuns = append(allRuns, run) + } + return allRuns +} + +// Definition BD13. Determine isolating run sequences. +func (p *paragraph) determineIsolatingRunSequences() []*isolatingRunSequence { + levelRuns := p.determineLevelRuns() + + // Compute the run that each character belongs to + runForCharacter := make([]int, p.Len()) + for i, run := range levelRuns { + for _, index := range run { + runForCharacter[index] = i + } + } + + sequences := []*isolatingRunSequence{} + + var currentRunSequence []int + + for _, run := range levelRuns { + first := run[0] + if p.initialTypes[first] != PDI || p.matchingIsolateInitiator[first] == -1 { + currentRunSequence = nil + // int run = i; + for { + // Copy this level run into currentRunSequence + currentRunSequence = append(currentRunSequence, run...) + + last := currentRunSequence[len(currentRunSequence)-1] + lastT := p.initialTypes[last] + if lastT.in(LRI, RLI, FSI) && p.matchingPDI[last] != p.Len() { + run = levelRuns[runForCharacter[p.matchingPDI[last]]] + } else { + break + } + } + sequences = append(sequences, p.isolatingRunSequence(currentRunSequence)) + } + } + return sequences +} + +// Assign level information to characters removed by rule X9. This is for +// ease of relating the level information to the original input data. Note +// that the levels assigned to these codes are arbitrary, they're chosen so +// as to avoid breaking level runs. +func (p *paragraph) assignLevelsToCharactersRemovedByX9() { + for i, t := range p.initialTypes { + if t.in(LRE, RLE, LRO, RLO, PDF, BN) { + p.resultTypes[i] = t + p.resultLevels[i] = -1 + } + } + // now propagate forward the levels information (could have + // propagated backward, the main thing is not to introduce a level + // break where one doesn't already exist). + + if p.resultLevels[0] == -1 { + p.resultLevels[0] = p.embeddingLevel + } + for i := 1; i < len(p.initialTypes); i++ { + if p.resultLevels[i] == -1 { + p.resultLevels[i] = p.resultLevels[i-1] + } + } + // Embedding information is for informational purposes only so need not be + // adjusted. +} + +// +// Output +// + +// getLevels computes levels array breaking lines at offsets in linebreaks. +// Rule L1. +// +// The linebreaks array must include at least one value. The values must be +// in strictly increasing order (no duplicates) between 1 and the length of +// the text, inclusive. The last value must be the length of the text. +func (p *paragraph) getLevels(linebreaks []int) []level { + // Note that since the previous processing has removed all + // P, S, and WS values from resultTypes, the values referred to + // in these rules are the initial types, before any processing + // has been applied (including processing of overrides). + // + // This example implementation has reinserted explicit format codes + // and BN, in order that the levels array correspond to the + // initial text. Their final placement is not normative. + // These codes are treated like WS in this implementation, + // so they don't interrupt sequences of WS. + + validateLineBreaks(linebreaks, p.Len()) + + result := append([]level(nil), p.resultLevels...) + + // don't worry about linebreaks since if there is a break within + // a series of WS values preceding S, the linebreak itself + // causes the reset. + for i, t := range p.initialTypes { + if t.in(B, S) { + // Rule L1, clauses one and two. + result[i] = p.embeddingLevel + + // Rule L1, clause three. + for j := i - 1; j >= 0; j-- { + if isWhitespace(p.initialTypes[j]) { // including format codes + result[j] = p.embeddingLevel + } else { + break + } + } + } + } + + // Rule L1, clause four. + start := 0 + for _, limit := range linebreaks { + for j := limit - 1; j >= start; j-- { + if isWhitespace(p.initialTypes[j]) { // including format codes + result[j] = p.embeddingLevel + } else { + break + } + } + start = limit + } + + return result +} + +// getReordering returns the reordering of lines from a visual index to a +// logical index for line breaks at the given offsets. +// +// Lines are concatenated from left to right. So for example, the fifth +// character from the left on the third line is +// +// getReordering(linebreaks)[linebreaks[1] + 4] +// +// (linebreaks[1] is the position after the last character of the second +// line, which is also the index of the first character on the third line, +// and adding four gets the fifth character from the left). +// +// The linebreaks array must include at least one value. The values must be +// in strictly increasing order (no duplicates) between 1 and the length of +// the text, inclusive. The last value must be the length of the text. +func (p *paragraph) getReordering(linebreaks []int) []int { + validateLineBreaks(linebreaks, p.Len()) + + return computeMultilineReordering(p.getLevels(linebreaks), linebreaks) +} + +// Return multiline reordering array for a given level array. Reordering +// does not occur across a line break. +func computeMultilineReordering(levels []level, linebreaks []int) []int { + result := make([]int, len(levels)) + + start := 0 + for _, limit := range linebreaks { + tempLevels := make([]level, limit-start) + copy(tempLevels, levels[start:]) + + for j, order := range computeReordering(tempLevels) { + result[start+j] = order + start + } + start = limit + } + return result +} + +// Return reordering array for a given level array. This reorders a single +// line. The reordering is a visual to logical map. For example, the +// leftmost char is string.charAt(order[0]). Rule L2. +func computeReordering(levels []level) []int { + result := make([]int, len(levels)) + // initialize order + for i := range result { + result[i] = i + } + + // locate highest level found on line. + // Note the rules say text, but no reordering across line bounds is + // performed, so this is sufficient. + highestLevel := level(0) + lowestOddLevel := level(maxDepth + 2) + for _, level := range levels { + if level > highestLevel { + highestLevel = level + } + if level&1 != 0 && level < lowestOddLevel { + lowestOddLevel = level + } + } + + for level := highestLevel; level >= lowestOddLevel; level-- { + for i := 0; i < len(levels); i++ { + if levels[i] >= level { + // find range of text at or above this level + start := i + limit := i + 1 + for limit < len(levels) && levels[limit] >= level { + limit++ + } + + for j, k := start, limit-1; j < k; j, k = j+1, k-1 { + result[j], result[k] = result[k], result[j] + } + // skip to end of level run + i = limit + } + } + } + + return result +} + +// isWhitespace reports whether the type is considered a whitespace type for the +// line break rules. +func isWhitespace(c Class) bool { + switch c { + case LRE, RLE, LRO, RLO, PDF, LRI, RLI, FSI, PDI, BN, WS: + return true + } + return false +} + +// isRemovedByX9 reports whether the type is one of the types removed in X9. +func isRemovedByX9(c Class) bool { + switch c { + case LRE, RLE, LRO, RLO, PDF, BN: + return true + } + return false +} + +// typeForLevel reports the strong type (L or R) corresponding to the level. +func typeForLevel(level level) Class { + if (level & 0x1) == 0 { + return L + } + return R +} + +// TODO: change validation to not panic + +func validateTypes(types []Class) { + if len(types) == 0 { + log.Panic("types is null") + } + for i, t := range types[:len(types)-1] { + if t == B { + log.Panicf("B type before end of paragraph at index: %d", i) + } + } +} + +func validateParagraphEmbeddingLevel(embeddingLevel level) { + if embeddingLevel != implicitLevel && + embeddingLevel != 0 && + embeddingLevel != 1 { + log.Panicf("illegal paragraph embedding level: %d", embeddingLevel) + } +} + +func validateLineBreaks(linebreaks []int, textLength int) { + prev := 0 + for i, next := range linebreaks { + if next <= prev { + log.Panicf("bad linebreak: %d at index: %d", next, i) + } + prev = next + } + if prev != textLength { + log.Panicf("last linebreak was %d, want %d", prev, textLength) + } +} + +func validatePbTypes(pairTypes []bracketType) { + if len(pairTypes) == 0 { + log.Panic("pairTypes is null") + } + for i, pt := range pairTypes { + switch pt { + case bpNone, bpOpen, bpClose: + default: + log.Panicf("illegal pairType value at %d: %v", i, pairTypes[i]) + } + } +} + +func validatePbValues(pairValues []rune, pairTypes []bracketType) { + if pairValues == nil { + log.Panic("pairValues is null") + } + if len(pairTypes) != len(pairValues) { + log.Panic("pairTypes is different length from pairValues") + } +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/bidi/prop.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/bidi/prop.go new file mode 100644 index 00000000..7c9484e1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/bidi/prop.go @@ -0,0 +1,206 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bidi + +import "unicode/utf8" + +// Properties provides access to BiDi properties of runes. +type Properties struct { + entry uint8 + last uint8 +} + +var trie = newBidiTrie(0) + +// TODO: using this for bidirule reduces the running time by about 5%. Consider +// if this is worth exposing or if we can find a way to speed up the Class +// method. +// +// // CompactClass is like Class, but maps all of the BiDi control classes +// // (LRO, RLO, LRE, RLE, PDF, LRI, RLI, FSI, PDI) to the class Control. +// func (p Properties) CompactClass() Class { +// return Class(p.entry & 0x0F) +// } + +// Class returns the Bidi class for p. +func (p Properties) Class() Class { + c := Class(p.entry & 0x0F) + if c == Control { + c = controlByteToClass[p.last&0xF] + } + return c +} + +// IsBracket reports whether the rune is a bracket. +func (p Properties) IsBracket() bool { return p.entry&0xF0 != 0 } + +// IsOpeningBracket reports whether the rune is an opening bracket. +// IsBracket must return true. +func (p Properties) IsOpeningBracket() bool { return p.entry&openMask != 0 } + +// TODO: find a better API and expose. +func (p Properties) reverseBracket(r rune) rune { + return xorMasks[p.entry>>xorMaskShift] ^ r +} + +var controlByteToClass = [16]Class{ + 0xD: LRO, // U+202D LeftToRightOverride, + 0xE: RLO, // U+202E RightToLeftOverride, + 0xA: LRE, // U+202A LeftToRightEmbedding, + 0xB: RLE, // U+202B RightToLeftEmbedding, + 0xC: PDF, // U+202C PopDirectionalFormat, + 0x6: LRI, // U+2066 LeftToRightIsolate, + 0x7: RLI, // U+2067 RightToLeftIsolate, + 0x8: FSI, // U+2068 FirstStrongIsolate, + 0x9: PDI, // U+2069 PopDirectionalIsolate, +} + +// LookupRune returns properties for r. +func LookupRune(r rune) (p Properties, size int) { + var buf [4]byte + n := utf8.EncodeRune(buf[:], r) + return Lookup(buf[:n]) +} + +// TODO: these lookup methods are based on the generated trie code. The returned +// sizes have slightly different semantics from the generated code, in that it +// always returns size==1 for an illegal UTF-8 byte (instead of the length +// of the maximum invalid subsequence). Most Transformers, like unicode/norm, +// leave invalid UTF-8 untouched, in which case it has performance benefits to +// do so (without changing the semantics). Bidi requires the semantics used here +// for the bidirule implementation to be compatible with the Go semantics. +// They ultimately should perhaps be adopted by all trie implementations, for +// convenience sake. +// This unrolled code also boosts performance of the secure/bidirule package by +// about 30%. +// So, to remove this code: +// - add option to trie generator to define return type. +// - always return 1 byte size for ill-formed UTF-8 runes. + +// Lookup returns properties for the first rune in s and the width in bytes of +// its encoding. The size will be 0 if s does not hold enough bytes to complete +// the encoding. +func Lookup(s []byte) (p Properties, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return Properties{entry: bidiValues[c0]}, 1 + case c0 < 0xC2: + return Properties{}, 1 + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return Properties{}, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return Properties{}, 1 + } + return Properties{entry: trie.lookupValue(uint32(i), c1)}, 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return Properties{}, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return Properties{}, 1 + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return Properties{}, 1 + } + return Properties{entry: trie.lookupValue(uint32(i), c2), last: c2}, 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return Properties{}, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return Properties{}, 1 + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return Properties{}, 1 + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return Properties{}, 1 + } + return Properties{entry: trie.lookupValue(uint32(i), c3)}, 4 + } + // Illegal rune + return Properties{}, 1 +} + +// LookupString returns properties for the first rune in s and the width in +// bytes of its encoding. The size will be 0 if s does not hold enough bytes to +// complete the encoding. +func LookupString(s string) (p Properties, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return Properties{entry: bidiValues[c0]}, 1 + case c0 < 0xC2: + return Properties{}, 1 + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return Properties{}, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return Properties{}, 1 + } + return Properties{entry: trie.lookupValue(uint32(i), c1)}, 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return Properties{}, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return Properties{}, 1 + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return Properties{}, 1 + } + return Properties{entry: trie.lookupValue(uint32(i), c2), last: c2}, 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return Properties{}, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return Properties{}, 1 + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return Properties{}, 1 + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return Properties{}, 1 + } + return Properties{entry: trie.lookupValue(uint32(i), c3)}, 4 + } + // Illegal rune + return Properties{}, 1 +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go new file mode 100644 index 00000000..2e1ff195 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go @@ -0,0 +1,1815 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build go1.10 + +package bidi + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "10.0.0" + +// xorMasks contains masks to be xor-ed with brackets to get the reverse +// version. +var xorMasks = []int32{ // 8 elements + 0, 1, 6, 7, 3, 15, 29, 63, +} // Size: 56 bytes + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *bidiTrie) lookup(s []byte) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return bidiValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *bidiTrie) lookupUnsafe(s []byte) uint8 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return bidiValues[c0] + } + i := bidiIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *bidiTrie) lookupString(s string) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return bidiValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *bidiTrie) lookupStringUnsafe(s string) uint8 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return bidiValues[c0] + } + i := bidiIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// bidiTrie. Total size: 16128 bytes (15.75 KiB). Checksum: 8122d83e461996f. +type bidiTrie struct{} + +func newBidiTrie(i int) *bidiTrie { + return &bidiTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *bidiTrie) lookupValue(n uint32, b byte) uint8 { + switch { + default: + return uint8(bidiValues[n<<6+uint32(b)]) + } +} + +// bidiValues: 228 blocks, 14592 entries, 14592 bytes +// The third block is the zero block. +var bidiValues = [14592]uint8{ + // Block 0x0, offset 0x0 + 0x00: 0x000b, 0x01: 0x000b, 0x02: 0x000b, 0x03: 0x000b, 0x04: 0x000b, 0x05: 0x000b, + 0x06: 0x000b, 0x07: 0x000b, 0x08: 0x000b, 0x09: 0x0008, 0x0a: 0x0007, 0x0b: 0x0008, + 0x0c: 0x0009, 0x0d: 0x0007, 0x0e: 0x000b, 0x0f: 0x000b, 0x10: 0x000b, 0x11: 0x000b, + 0x12: 0x000b, 0x13: 0x000b, 0x14: 0x000b, 0x15: 0x000b, 0x16: 0x000b, 0x17: 0x000b, + 0x18: 0x000b, 0x19: 0x000b, 0x1a: 0x000b, 0x1b: 0x000b, 0x1c: 0x0007, 0x1d: 0x0007, + 0x1e: 0x0007, 0x1f: 0x0008, 0x20: 0x0009, 0x21: 0x000a, 0x22: 0x000a, 0x23: 0x0004, + 0x24: 0x0004, 0x25: 0x0004, 0x26: 0x000a, 0x27: 0x000a, 0x28: 0x003a, 0x29: 0x002a, + 0x2a: 0x000a, 0x2b: 0x0003, 0x2c: 0x0006, 0x2d: 0x0003, 0x2e: 0x0006, 0x2f: 0x0006, + 0x30: 0x0002, 0x31: 0x0002, 0x32: 0x0002, 0x33: 0x0002, 0x34: 0x0002, 0x35: 0x0002, + 0x36: 0x0002, 0x37: 0x0002, 0x38: 0x0002, 0x39: 0x0002, 0x3a: 0x0006, 0x3b: 0x000a, + 0x3c: 0x000a, 0x3d: 0x000a, 0x3e: 0x000a, 0x3f: 0x000a, + // Block 0x1, offset 0x40 + 0x40: 0x000a, + 0x5b: 0x005a, 0x5c: 0x000a, 0x5d: 0x004a, + 0x5e: 0x000a, 0x5f: 0x000a, 0x60: 0x000a, + 0x7b: 0x005a, + 0x7c: 0x000a, 0x7d: 0x004a, 0x7e: 0x000a, 0x7f: 0x000b, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x000b, 0xc1: 0x000b, 0xc2: 0x000b, 0xc3: 0x000b, 0xc4: 0x000b, 0xc5: 0x0007, + 0xc6: 0x000b, 0xc7: 0x000b, 0xc8: 0x000b, 0xc9: 0x000b, 0xca: 0x000b, 0xcb: 0x000b, + 0xcc: 0x000b, 0xcd: 0x000b, 0xce: 0x000b, 0xcf: 0x000b, 0xd0: 0x000b, 0xd1: 0x000b, + 0xd2: 0x000b, 0xd3: 0x000b, 0xd4: 0x000b, 0xd5: 0x000b, 0xd6: 0x000b, 0xd7: 0x000b, + 0xd8: 0x000b, 0xd9: 0x000b, 0xda: 0x000b, 0xdb: 0x000b, 0xdc: 0x000b, 0xdd: 0x000b, + 0xde: 0x000b, 0xdf: 0x000b, 0xe0: 0x0006, 0xe1: 0x000a, 0xe2: 0x0004, 0xe3: 0x0004, + 0xe4: 0x0004, 0xe5: 0x0004, 0xe6: 0x000a, 0xe7: 0x000a, 0xe8: 0x000a, 0xe9: 0x000a, + 0xeb: 0x000a, 0xec: 0x000a, 0xed: 0x000b, 0xee: 0x000a, 0xef: 0x000a, + 0xf0: 0x0004, 0xf1: 0x0004, 0xf2: 0x0002, 0xf3: 0x0002, 0xf4: 0x000a, + 0xf6: 0x000a, 0xf7: 0x000a, 0xf8: 0x000a, 0xf9: 0x0002, 0xfb: 0x000a, + 0xfc: 0x000a, 0xfd: 0x000a, 0xfe: 0x000a, 0xff: 0x000a, + // Block 0x4, offset 0x100 + 0x117: 0x000a, + 0x137: 0x000a, + // Block 0x5, offset 0x140 + 0x179: 0x000a, 0x17a: 0x000a, + // Block 0x6, offset 0x180 + 0x182: 0x000a, 0x183: 0x000a, 0x184: 0x000a, 0x185: 0x000a, + 0x186: 0x000a, 0x187: 0x000a, 0x188: 0x000a, 0x189: 0x000a, 0x18a: 0x000a, 0x18b: 0x000a, + 0x18c: 0x000a, 0x18d: 0x000a, 0x18e: 0x000a, 0x18f: 0x000a, + 0x192: 0x000a, 0x193: 0x000a, 0x194: 0x000a, 0x195: 0x000a, 0x196: 0x000a, 0x197: 0x000a, + 0x198: 0x000a, 0x199: 0x000a, 0x19a: 0x000a, 0x19b: 0x000a, 0x19c: 0x000a, 0x19d: 0x000a, + 0x19e: 0x000a, 0x19f: 0x000a, + 0x1a5: 0x000a, 0x1a6: 0x000a, 0x1a7: 0x000a, 0x1a8: 0x000a, 0x1a9: 0x000a, + 0x1aa: 0x000a, 0x1ab: 0x000a, 0x1ac: 0x000a, 0x1ad: 0x000a, 0x1af: 0x000a, + 0x1b0: 0x000a, 0x1b1: 0x000a, 0x1b2: 0x000a, 0x1b3: 0x000a, 0x1b4: 0x000a, 0x1b5: 0x000a, + 0x1b6: 0x000a, 0x1b7: 0x000a, 0x1b8: 0x000a, 0x1b9: 0x000a, 0x1ba: 0x000a, 0x1bb: 0x000a, + 0x1bc: 0x000a, 0x1bd: 0x000a, 0x1be: 0x000a, 0x1bf: 0x000a, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x000c, 0x1c1: 0x000c, 0x1c2: 0x000c, 0x1c3: 0x000c, 0x1c4: 0x000c, 0x1c5: 0x000c, + 0x1c6: 0x000c, 0x1c7: 0x000c, 0x1c8: 0x000c, 0x1c9: 0x000c, 0x1ca: 0x000c, 0x1cb: 0x000c, + 0x1cc: 0x000c, 0x1cd: 0x000c, 0x1ce: 0x000c, 0x1cf: 0x000c, 0x1d0: 0x000c, 0x1d1: 0x000c, + 0x1d2: 0x000c, 0x1d3: 0x000c, 0x1d4: 0x000c, 0x1d5: 0x000c, 0x1d6: 0x000c, 0x1d7: 0x000c, + 0x1d8: 0x000c, 0x1d9: 0x000c, 0x1da: 0x000c, 0x1db: 0x000c, 0x1dc: 0x000c, 0x1dd: 0x000c, + 0x1de: 0x000c, 0x1df: 0x000c, 0x1e0: 0x000c, 0x1e1: 0x000c, 0x1e2: 0x000c, 0x1e3: 0x000c, + 0x1e4: 0x000c, 0x1e5: 0x000c, 0x1e6: 0x000c, 0x1e7: 0x000c, 0x1e8: 0x000c, 0x1e9: 0x000c, + 0x1ea: 0x000c, 0x1eb: 0x000c, 0x1ec: 0x000c, 0x1ed: 0x000c, 0x1ee: 0x000c, 0x1ef: 0x000c, + 0x1f0: 0x000c, 0x1f1: 0x000c, 0x1f2: 0x000c, 0x1f3: 0x000c, 0x1f4: 0x000c, 0x1f5: 0x000c, + 0x1f6: 0x000c, 0x1f7: 0x000c, 0x1f8: 0x000c, 0x1f9: 0x000c, 0x1fa: 0x000c, 0x1fb: 0x000c, + 0x1fc: 0x000c, 0x1fd: 0x000c, 0x1fe: 0x000c, 0x1ff: 0x000c, + // Block 0x8, offset 0x200 + 0x200: 0x000c, 0x201: 0x000c, 0x202: 0x000c, 0x203: 0x000c, 0x204: 0x000c, 0x205: 0x000c, + 0x206: 0x000c, 0x207: 0x000c, 0x208: 0x000c, 0x209: 0x000c, 0x20a: 0x000c, 0x20b: 0x000c, + 0x20c: 0x000c, 0x20d: 0x000c, 0x20e: 0x000c, 0x20f: 0x000c, 0x210: 0x000c, 0x211: 0x000c, + 0x212: 0x000c, 0x213: 0x000c, 0x214: 0x000c, 0x215: 0x000c, 0x216: 0x000c, 0x217: 0x000c, + 0x218: 0x000c, 0x219: 0x000c, 0x21a: 0x000c, 0x21b: 0x000c, 0x21c: 0x000c, 0x21d: 0x000c, + 0x21e: 0x000c, 0x21f: 0x000c, 0x220: 0x000c, 0x221: 0x000c, 0x222: 0x000c, 0x223: 0x000c, + 0x224: 0x000c, 0x225: 0x000c, 0x226: 0x000c, 0x227: 0x000c, 0x228: 0x000c, 0x229: 0x000c, + 0x22a: 0x000c, 0x22b: 0x000c, 0x22c: 0x000c, 0x22d: 0x000c, 0x22e: 0x000c, 0x22f: 0x000c, + 0x234: 0x000a, 0x235: 0x000a, + 0x23e: 0x000a, + // Block 0x9, offset 0x240 + 0x244: 0x000a, 0x245: 0x000a, + 0x247: 0x000a, + // Block 0xa, offset 0x280 + 0x2b6: 0x000a, + // Block 0xb, offset 0x2c0 + 0x2c3: 0x000c, 0x2c4: 0x000c, 0x2c5: 0x000c, + 0x2c6: 0x000c, 0x2c7: 0x000c, 0x2c8: 0x000c, 0x2c9: 0x000c, + // Block 0xc, offset 0x300 + 0x30a: 0x000a, + 0x30d: 0x000a, 0x30e: 0x000a, 0x30f: 0x0004, 0x310: 0x0001, 0x311: 0x000c, + 0x312: 0x000c, 0x313: 0x000c, 0x314: 0x000c, 0x315: 0x000c, 0x316: 0x000c, 0x317: 0x000c, + 0x318: 0x000c, 0x319: 0x000c, 0x31a: 0x000c, 0x31b: 0x000c, 0x31c: 0x000c, 0x31d: 0x000c, + 0x31e: 0x000c, 0x31f: 0x000c, 0x320: 0x000c, 0x321: 0x000c, 0x322: 0x000c, 0x323: 0x000c, + 0x324: 0x000c, 0x325: 0x000c, 0x326: 0x000c, 0x327: 0x000c, 0x328: 0x000c, 0x329: 0x000c, + 0x32a: 0x000c, 0x32b: 0x000c, 0x32c: 0x000c, 0x32d: 0x000c, 0x32e: 0x000c, 0x32f: 0x000c, + 0x330: 0x000c, 0x331: 0x000c, 0x332: 0x000c, 0x333: 0x000c, 0x334: 0x000c, 0x335: 0x000c, + 0x336: 0x000c, 0x337: 0x000c, 0x338: 0x000c, 0x339: 0x000c, 0x33a: 0x000c, 0x33b: 0x000c, + 0x33c: 0x000c, 0x33d: 0x000c, 0x33e: 0x0001, 0x33f: 0x000c, + // Block 0xd, offset 0x340 + 0x340: 0x0001, 0x341: 0x000c, 0x342: 0x000c, 0x343: 0x0001, 0x344: 0x000c, 0x345: 0x000c, + 0x346: 0x0001, 0x347: 0x000c, 0x348: 0x0001, 0x349: 0x0001, 0x34a: 0x0001, 0x34b: 0x0001, + 0x34c: 0x0001, 0x34d: 0x0001, 0x34e: 0x0001, 0x34f: 0x0001, 0x350: 0x0001, 0x351: 0x0001, + 0x352: 0x0001, 0x353: 0x0001, 0x354: 0x0001, 0x355: 0x0001, 0x356: 0x0001, 0x357: 0x0001, + 0x358: 0x0001, 0x359: 0x0001, 0x35a: 0x0001, 0x35b: 0x0001, 0x35c: 0x0001, 0x35d: 0x0001, + 0x35e: 0x0001, 0x35f: 0x0001, 0x360: 0x0001, 0x361: 0x0001, 0x362: 0x0001, 0x363: 0x0001, + 0x364: 0x0001, 0x365: 0x0001, 0x366: 0x0001, 0x367: 0x0001, 0x368: 0x0001, 0x369: 0x0001, + 0x36a: 0x0001, 0x36b: 0x0001, 0x36c: 0x0001, 0x36d: 0x0001, 0x36e: 0x0001, 0x36f: 0x0001, + 0x370: 0x0001, 0x371: 0x0001, 0x372: 0x0001, 0x373: 0x0001, 0x374: 0x0001, 0x375: 0x0001, + 0x376: 0x0001, 0x377: 0x0001, 0x378: 0x0001, 0x379: 0x0001, 0x37a: 0x0001, 0x37b: 0x0001, + 0x37c: 0x0001, 0x37d: 0x0001, 0x37e: 0x0001, 0x37f: 0x0001, + // Block 0xe, offset 0x380 + 0x380: 0x0005, 0x381: 0x0005, 0x382: 0x0005, 0x383: 0x0005, 0x384: 0x0005, 0x385: 0x0005, + 0x386: 0x000a, 0x387: 0x000a, 0x388: 0x000d, 0x389: 0x0004, 0x38a: 0x0004, 0x38b: 0x000d, + 0x38c: 0x0006, 0x38d: 0x000d, 0x38e: 0x000a, 0x38f: 0x000a, 0x390: 0x000c, 0x391: 0x000c, + 0x392: 0x000c, 0x393: 0x000c, 0x394: 0x000c, 0x395: 0x000c, 0x396: 0x000c, 0x397: 0x000c, + 0x398: 0x000c, 0x399: 0x000c, 0x39a: 0x000c, 0x39b: 0x000d, 0x39c: 0x000d, 0x39d: 0x000d, + 0x39e: 0x000d, 0x39f: 0x000d, 0x3a0: 0x000d, 0x3a1: 0x000d, 0x3a2: 0x000d, 0x3a3: 0x000d, + 0x3a4: 0x000d, 0x3a5: 0x000d, 0x3a6: 0x000d, 0x3a7: 0x000d, 0x3a8: 0x000d, 0x3a9: 0x000d, + 0x3aa: 0x000d, 0x3ab: 0x000d, 0x3ac: 0x000d, 0x3ad: 0x000d, 0x3ae: 0x000d, 0x3af: 0x000d, + 0x3b0: 0x000d, 0x3b1: 0x000d, 0x3b2: 0x000d, 0x3b3: 0x000d, 0x3b4: 0x000d, 0x3b5: 0x000d, + 0x3b6: 0x000d, 0x3b7: 0x000d, 0x3b8: 0x000d, 0x3b9: 0x000d, 0x3ba: 0x000d, 0x3bb: 0x000d, + 0x3bc: 0x000d, 0x3bd: 0x000d, 0x3be: 0x000d, 0x3bf: 0x000d, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x000d, 0x3c1: 0x000d, 0x3c2: 0x000d, 0x3c3: 0x000d, 0x3c4: 0x000d, 0x3c5: 0x000d, + 0x3c6: 0x000d, 0x3c7: 0x000d, 0x3c8: 0x000d, 0x3c9: 0x000d, 0x3ca: 0x000d, 0x3cb: 0x000c, + 0x3cc: 0x000c, 0x3cd: 0x000c, 0x3ce: 0x000c, 0x3cf: 0x000c, 0x3d0: 0x000c, 0x3d1: 0x000c, + 0x3d2: 0x000c, 0x3d3: 0x000c, 0x3d4: 0x000c, 0x3d5: 0x000c, 0x3d6: 0x000c, 0x3d7: 0x000c, + 0x3d8: 0x000c, 0x3d9: 0x000c, 0x3da: 0x000c, 0x3db: 0x000c, 0x3dc: 0x000c, 0x3dd: 0x000c, + 0x3de: 0x000c, 0x3df: 0x000c, 0x3e0: 0x0005, 0x3e1: 0x0005, 0x3e2: 0x0005, 0x3e3: 0x0005, + 0x3e4: 0x0005, 0x3e5: 0x0005, 0x3e6: 0x0005, 0x3e7: 0x0005, 0x3e8: 0x0005, 0x3e9: 0x0005, + 0x3ea: 0x0004, 0x3eb: 0x0005, 0x3ec: 0x0005, 0x3ed: 0x000d, 0x3ee: 0x000d, 0x3ef: 0x000d, + 0x3f0: 0x000c, 0x3f1: 0x000d, 0x3f2: 0x000d, 0x3f3: 0x000d, 0x3f4: 0x000d, 0x3f5: 0x000d, + 0x3f6: 0x000d, 0x3f7: 0x000d, 0x3f8: 0x000d, 0x3f9: 0x000d, 0x3fa: 0x000d, 0x3fb: 0x000d, + 0x3fc: 0x000d, 0x3fd: 0x000d, 0x3fe: 0x000d, 0x3ff: 0x000d, + // Block 0x10, offset 0x400 + 0x400: 0x000d, 0x401: 0x000d, 0x402: 0x000d, 0x403: 0x000d, 0x404: 0x000d, 0x405: 0x000d, + 0x406: 0x000d, 0x407: 0x000d, 0x408: 0x000d, 0x409: 0x000d, 0x40a: 0x000d, 0x40b: 0x000d, + 0x40c: 0x000d, 0x40d: 0x000d, 0x40e: 0x000d, 0x40f: 0x000d, 0x410: 0x000d, 0x411: 0x000d, + 0x412: 0x000d, 0x413: 0x000d, 0x414: 0x000d, 0x415: 0x000d, 0x416: 0x000d, 0x417: 0x000d, + 0x418: 0x000d, 0x419: 0x000d, 0x41a: 0x000d, 0x41b: 0x000d, 0x41c: 0x000d, 0x41d: 0x000d, + 0x41e: 0x000d, 0x41f: 0x000d, 0x420: 0x000d, 0x421: 0x000d, 0x422: 0x000d, 0x423: 0x000d, + 0x424: 0x000d, 0x425: 0x000d, 0x426: 0x000d, 0x427: 0x000d, 0x428: 0x000d, 0x429: 0x000d, + 0x42a: 0x000d, 0x42b: 0x000d, 0x42c: 0x000d, 0x42d: 0x000d, 0x42e: 0x000d, 0x42f: 0x000d, + 0x430: 0x000d, 0x431: 0x000d, 0x432: 0x000d, 0x433: 0x000d, 0x434: 0x000d, 0x435: 0x000d, + 0x436: 0x000d, 0x437: 0x000d, 0x438: 0x000d, 0x439: 0x000d, 0x43a: 0x000d, 0x43b: 0x000d, + 0x43c: 0x000d, 0x43d: 0x000d, 0x43e: 0x000d, 0x43f: 0x000d, + // Block 0x11, offset 0x440 + 0x440: 0x000d, 0x441: 0x000d, 0x442: 0x000d, 0x443: 0x000d, 0x444: 0x000d, 0x445: 0x000d, + 0x446: 0x000d, 0x447: 0x000d, 0x448: 0x000d, 0x449: 0x000d, 0x44a: 0x000d, 0x44b: 0x000d, + 0x44c: 0x000d, 0x44d: 0x000d, 0x44e: 0x000d, 0x44f: 0x000d, 0x450: 0x000d, 0x451: 0x000d, + 0x452: 0x000d, 0x453: 0x000d, 0x454: 0x000d, 0x455: 0x000d, 0x456: 0x000c, 0x457: 0x000c, + 0x458: 0x000c, 0x459: 0x000c, 0x45a: 0x000c, 0x45b: 0x000c, 0x45c: 0x000c, 0x45d: 0x0005, + 0x45e: 0x000a, 0x45f: 0x000c, 0x460: 0x000c, 0x461: 0x000c, 0x462: 0x000c, 0x463: 0x000c, + 0x464: 0x000c, 0x465: 0x000d, 0x466: 0x000d, 0x467: 0x000c, 0x468: 0x000c, 0x469: 0x000a, + 0x46a: 0x000c, 0x46b: 0x000c, 0x46c: 0x000c, 0x46d: 0x000c, 0x46e: 0x000d, 0x46f: 0x000d, + 0x470: 0x0002, 0x471: 0x0002, 0x472: 0x0002, 0x473: 0x0002, 0x474: 0x0002, 0x475: 0x0002, + 0x476: 0x0002, 0x477: 0x0002, 0x478: 0x0002, 0x479: 0x0002, 0x47a: 0x000d, 0x47b: 0x000d, + 0x47c: 0x000d, 0x47d: 0x000d, 0x47e: 0x000d, 0x47f: 0x000d, + // Block 0x12, offset 0x480 + 0x480: 0x000d, 0x481: 0x000d, 0x482: 0x000d, 0x483: 0x000d, 0x484: 0x000d, 0x485: 0x000d, + 0x486: 0x000d, 0x487: 0x000d, 0x488: 0x000d, 0x489: 0x000d, 0x48a: 0x000d, 0x48b: 0x000d, + 0x48c: 0x000d, 0x48d: 0x000d, 0x48e: 0x000d, 0x48f: 0x000d, 0x490: 0x000d, 0x491: 0x000c, + 0x492: 0x000d, 0x493: 0x000d, 0x494: 0x000d, 0x495: 0x000d, 0x496: 0x000d, 0x497: 0x000d, + 0x498: 0x000d, 0x499: 0x000d, 0x49a: 0x000d, 0x49b: 0x000d, 0x49c: 0x000d, 0x49d: 0x000d, + 0x49e: 0x000d, 0x49f: 0x000d, 0x4a0: 0x000d, 0x4a1: 0x000d, 0x4a2: 0x000d, 0x4a3: 0x000d, + 0x4a4: 0x000d, 0x4a5: 0x000d, 0x4a6: 0x000d, 0x4a7: 0x000d, 0x4a8: 0x000d, 0x4a9: 0x000d, + 0x4aa: 0x000d, 0x4ab: 0x000d, 0x4ac: 0x000d, 0x4ad: 0x000d, 0x4ae: 0x000d, 0x4af: 0x000d, + 0x4b0: 0x000c, 0x4b1: 0x000c, 0x4b2: 0x000c, 0x4b3: 0x000c, 0x4b4: 0x000c, 0x4b5: 0x000c, + 0x4b6: 0x000c, 0x4b7: 0x000c, 0x4b8: 0x000c, 0x4b9: 0x000c, 0x4ba: 0x000c, 0x4bb: 0x000c, + 0x4bc: 0x000c, 0x4bd: 0x000c, 0x4be: 0x000c, 0x4bf: 0x000c, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x000c, 0x4c1: 0x000c, 0x4c2: 0x000c, 0x4c3: 0x000c, 0x4c4: 0x000c, 0x4c5: 0x000c, + 0x4c6: 0x000c, 0x4c7: 0x000c, 0x4c8: 0x000c, 0x4c9: 0x000c, 0x4ca: 0x000c, 0x4cb: 0x000d, + 0x4cc: 0x000d, 0x4cd: 0x000d, 0x4ce: 0x000d, 0x4cf: 0x000d, 0x4d0: 0x000d, 0x4d1: 0x000d, + 0x4d2: 0x000d, 0x4d3: 0x000d, 0x4d4: 0x000d, 0x4d5: 0x000d, 0x4d6: 0x000d, 0x4d7: 0x000d, + 0x4d8: 0x000d, 0x4d9: 0x000d, 0x4da: 0x000d, 0x4db: 0x000d, 0x4dc: 0x000d, 0x4dd: 0x000d, + 0x4de: 0x000d, 0x4df: 0x000d, 0x4e0: 0x000d, 0x4e1: 0x000d, 0x4e2: 0x000d, 0x4e3: 0x000d, + 0x4e4: 0x000d, 0x4e5: 0x000d, 0x4e6: 0x000d, 0x4e7: 0x000d, 0x4e8: 0x000d, 0x4e9: 0x000d, + 0x4ea: 0x000d, 0x4eb: 0x000d, 0x4ec: 0x000d, 0x4ed: 0x000d, 0x4ee: 0x000d, 0x4ef: 0x000d, + 0x4f0: 0x000d, 0x4f1: 0x000d, 0x4f2: 0x000d, 0x4f3: 0x000d, 0x4f4: 0x000d, 0x4f5: 0x000d, + 0x4f6: 0x000d, 0x4f7: 0x000d, 0x4f8: 0x000d, 0x4f9: 0x000d, 0x4fa: 0x000d, 0x4fb: 0x000d, + 0x4fc: 0x000d, 0x4fd: 0x000d, 0x4fe: 0x000d, 0x4ff: 0x000d, + // Block 0x14, offset 0x500 + 0x500: 0x000d, 0x501: 0x000d, 0x502: 0x000d, 0x503: 0x000d, 0x504: 0x000d, 0x505: 0x000d, + 0x506: 0x000d, 0x507: 0x000d, 0x508: 0x000d, 0x509: 0x000d, 0x50a: 0x000d, 0x50b: 0x000d, + 0x50c: 0x000d, 0x50d: 0x000d, 0x50e: 0x000d, 0x50f: 0x000d, 0x510: 0x000d, 0x511: 0x000d, + 0x512: 0x000d, 0x513: 0x000d, 0x514: 0x000d, 0x515: 0x000d, 0x516: 0x000d, 0x517: 0x000d, + 0x518: 0x000d, 0x519: 0x000d, 0x51a: 0x000d, 0x51b: 0x000d, 0x51c: 0x000d, 0x51d: 0x000d, + 0x51e: 0x000d, 0x51f: 0x000d, 0x520: 0x000d, 0x521: 0x000d, 0x522: 0x000d, 0x523: 0x000d, + 0x524: 0x000d, 0x525: 0x000d, 0x526: 0x000c, 0x527: 0x000c, 0x528: 0x000c, 0x529: 0x000c, + 0x52a: 0x000c, 0x52b: 0x000c, 0x52c: 0x000c, 0x52d: 0x000c, 0x52e: 0x000c, 0x52f: 0x000c, + 0x530: 0x000c, 0x531: 0x000d, 0x532: 0x000d, 0x533: 0x000d, 0x534: 0x000d, 0x535: 0x000d, + 0x536: 0x000d, 0x537: 0x000d, 0x538: 0x000d, 0x539: 0x000d, 0x53a: 0x000d, 0x53b: 0x000d, + 0x53c: 0x000d, 0x53d: 0x000d, 0x53e: 0x000d, 0x53f: 0x000d, + // Block 0x15, offset 0x540 + 0x540: 0x0001, 0x541: 0x0001, 0x542: 0x0001, 0x543: 0x0001, 0x544: 0x0001, 0x545: 0x0001, + 0x546: 0x0001, 0x547: 0x0001, 0x548: 0x0001, 0x549: 0x0001, 0x54a: 0x0001, 0x54b: 0x0001, + 0x54c: 0x0001, 0x54d: 0x0001, 0x54e: 0x0001, 0x54f: 0x0001, 0x550: 0x0001, 0x551: 0x0001, + 0x552: 0x0001, 0x553: 0x0001, 0x554: 0x0001, 0x555: 0x0001, 0x556: 0x0001, 0x557: 0x0001, + 0x558: 0x0001, 0x559: 0x0001, 0x55a: 0x0001, 0x55b: 0x0001, 0x55c: 0x0001, 0x55d: 0x0001, + 0x55e: 0x0001, 0x55f: 0x0001, 0x560: 0x0001, 0x561: 0x0001, 0x562: 0x0001, 0x563: 0x0001, + 0x564: 0x0001, 0x565: 0x0001, 0x566: 0x0001, 0x567: 0x0001, 0x568: 0x0001, 0x569: 0x0001, + 0x56a: 0x0001, 0x56b: 0x000c, 0x56c: 0x000c, 0x56d: 0x000c, 0x56e: 0x000c, 0x56f: 0x000c, + 0x570: 0x000c, 0x571: 0x000c, 0x572: 0x000c, 0x573: 0x000c, 0x574: 0x0001, 0x575: 0x0001, + 0x576: 0x000a, 0x577: 0x000a, 0x578: 0x000a, 0x579: 0x000a, 0x57a: 0x0001, 0x57b: 0x0001, + 0x57c: 0x0001, 0x57d: 0x0001, 0x57e: 0x0001, 0x57f: 0x0001, + // Block 0x16, offset 0x580 + 0x580: 0x0001, 0x581: 0x0001, 0x582: 0x0001, 0x583: 0x0001, 0x584: 0x0001, 0x585: 0x0001, + 0x586: 0x0001, 0x587: 0x0001, 0x588: 0x0001, 0x589: 0x0001, 0x58a: 0x0001, 0x58b: 0x0001, + 0x58c: 0x0001, 0x58d: 0x0001, 0x58e: 0x0001, 0x58f: 0x0001, 0x590: 0x0001, 0x591: 0x0001, + 0x592: 0x0001, 0x593: 0x0001, 0x594: 0x0001, 0x595: 0x0001, 0x596: 0x000c, 0x597: 0x000c, + 0x598: 0x000c, 0x599: 0x000c, 0x59a: 0x0001, 0x59b: 0x000c, 0x59c: 0x000c, 0x59d: 0x000c, + 0x59e: 0x000c, 0x59f: 0x000c, 0x5a0: 0x000c, 0x5a1: 0x000c, 0x5a2: 0x000c, 0x5a3: 0x000c, + 0x5a4: 0x0001, 0x5a5: 0x000c, 0x5a6: 0x000c, 0x5a7: 0x000c, 0x5a8: 0x0001, 0x5a9: 0x000c, + 0x5aa: 0x000c, 0x5ab: 0x000c, 0x5ac: 0x000c, 0x5ad: 0x000c, 0x5ae: 0x0001, 0x5af: 0x0001, + 0x5b0: 0x0001, 0x5b1: 0x0001, 0x5b2: 0x0001, 0x5b3: 0x0001, 0x5b4: 0x0001, 0x5b5: 0x0001, + 0x5b6: 0x0001, 0x5b7: 0x0001, 0x5b8: 0x0001, 0x5b9: 0x0001, 0x5ba: 0x0001, 0x5bb: 0x0001, + 0x5bc: 0x0001, 0x5bd: 0x0001, 0x5be: 0x0001, 0x5bf: 0x0001, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x0001, 0x5c1: 0x0001, 0x5c2: 0x0001, 0x5c3: 0x0001, 0x5c4: 0x0001, 0x5c5: 0x0001, + 0x5c6: 0x0001, 0x5c7: 0x0001, 0x5c8: 0x0001, 0x5c9: 0x0001, 0x5ca: 0x0001, 0x5cb: 0x0001, + 0x5cc: 0x0001, 0x5cd: 0x0001, 0x5ce: 0x0001, 0x5cf: 0x0001, 0x5d0: 0x0001, 0x5d1: 0x0001, + 0x5d2: 0x0001, 0x5d3: 0x0001, 0x5d4: 0x0001, 0x5d5: 0x0001, 0x5d6: 0x0001, 0x5d7: 0x0001, + 0x5d8: 0x0001, 0x5d9: 0x000c, 0x5da: 0x000c, 0x5db: 0x000c, 0x5dc: 0x0001, 0x5dd: 0x0001, + 0x5de: 0x0001, 0x5df: 0x0001, 0x5e0: 0x000d, 0x5e1: 0x000d, 0x5e2: 0x000d, 0x5e3: 0x000d, + 0x5e4: 0x000d, 0x5e5: 0x000d, 0x5e6: 0x000d, 0x5e7: 0x000d, 0x5e8: 0x000d, 0x5e9: 0x000d, + 0x5ea: 0x000d, 0x5eb: 0x000d, 0x5ec: 0x000d, 0x5ed: 0x000d, 0x5ee: 0x000d, 0x5ef: 0x000d, + 0x5f0: 0x0001, 0x5f1: 0x0001, 0x5f2: 0x0001, 0x5f3: 0x0001, 0x5f4: 0x0001, 0x5f5: 0x0001, + 0x5f6: 0x0001, 0x5f7: 0x0001, 0x5f8: 0x0001, 0x5f9: 0x0001, 0x5fa: 0x0001, 0x5fb: 0x0001, + 0x5fc: 0x0001, 0x5fd: 0x0001, 0x5fe: 0x0001, 0x5ff: 0x0001, + // Block 0x18, offset 0x600 + 0x600: 0x0001, 0x601: 0x0001, 0x602: 0x0001, 0x603: 0x0001, 0x604: 0x0001, 0x605: 0x0001, + 0x606: 0x0001, 0x607: 0x0001, 0x608: 0x0001, 0x609: 0x0001, 0x60a: 0x0001, 0x60b: 0x0001, + 0x60c: 0x0001, 0x60d: 0x0001, 0x60e: 0x0001, 0x60f: 0x0001, 0x610: 0x0001, 0x611: 0x0001, + 0x612: 0x0001, 0x613: 0x0001, 0x614: 0x0001, 0x615: 0x0001, 0x616: 0x0001, 0x617: 0x0001, + 0x618: 0x0001, 0x619: 0x0001, 0x61a: 0x0001, 0x61b: 0x0001, 0x61c: 0x0001, 0x61d: 0x0001, + 0x61e: 0x0001, 0x61f: 0x0001, 0x620: 0x000d, 0x621: 0x000d, 0x622: 0x000d, 0x623: 0x000d, + 0x624: 0x000d, 0x625: 0x000d, 0x626: 0x000d, 0x627: 0x000d, 0x628: 0x000d, 0x629: 0x000d, + 0x62a: 0x000d, 0x62b: 0x000d, 0x62c: 0x000d, 0x62d: 0x000d, 0x62e: 0x000d, 0x62f: 0x000d, + 0x630: 0x000d, 0x631: 0x000d, 0x632: 0x000d, 0x633: 0x000d, 0x634: 0x000d, 0x635: 0x000d, + 0x636: 0x000d, 0x637: 0x000d, 0x638: 0x000d, 0x639: 0x000d, 0x63a: 0x000d, 0x63b: 0x000d, + 0x63c: 0x000d, 0x63d: 0x000d, 0x63e: 0x000d, 0x63f: 0x000d, + // Block 0x19, offset 0x640 + 0x640: 0x000d, 0x641: 0x000d, 0x642: 0x000d, 0x643: 0x000d, 0x644: 0x000d, 0x645: 0x000d, + 0x646: 0x000d, 0x647: 0x000d, 0x648: 0x000d, 0x649: 0x000d, 0x64a: 0x000d, 0x64b: 0x000d, + 0x64c: 0x000d, 0x64d: 0x000d, 0x64e: 0x000d, 0x64f: 0x000d, 0x650: 0x000d, 0x651: 0x000d, + 0x652: 0x000d, 0x653: 0x000d, 0x654: 0x000c, 0x655: 0x000c, 0x656: 0x000c, 0x657: 0x000c, + 0x658: 0x000c, 0x659: 0x000c, 0x65a: 0x000c, 0x65b: 0x000c, 0x65c: 0x000c, 0x65d: 0x000c, + 0x65e: 0x000c, 0x65f: 0x000c, 0x660: 0x000c, 0x661: 0x000c, 0x662: 0x0005, 0x663: 0x000c, + 0x664: 0x000c, 0x665: 0x000c, 0x666: 0x000c, 0x667: 0x000c, 0x668: 0x000c, 0x669: 0x000c, + 0x66a: 0x000c, 0x66b: 0x000c, 0x66c: 0x000c, 0x66d: 0x000c, 0x66e: 0x000c, 0x66f: 0x000c, + 0x670: 0x000c, 0x671: 0x000c, 0x672: 0x000c, 0x673: 0x000c, 0x674: 0x000c, 0x675: 0x000c, + 0x676: 0x000c, 0x677: 0x000c, 0x678: 0x000c, 0x679: 0x000c, 0x67a: 0x000c, 0x67b: 0x000c, + 0x67c: 0x000c, 0x67d: 0x000c, 0x67e: 0x000c, 0x67f: 0x000c, + // Block 0x1a, offset 0x680 + 0x680: 0x000c, 0x681: 0x000c, 0x682: 0x000c, + 0x6ba: 0x000c, + 0x6bc: 0x000c, + // Block 0x1b, offset 0x6c0 + 0x6c1: 0x000c, 0x6c2: 0x000c, 0x6c3: 0x000c, 0x6c4: 0x000c, 0x6c5: 0x000c, + 0x6c6: 0x000c, 0x6c7: 0x000c, 0x6c8: 0x000c, + 0x6cd: 0x000c, 0x6d1: 0x000c, + 0x6d2: 0x000c, 0x6d3: 0x000c, 0x6d4: 0x000c, 0x6d5: 0x000c, 0x6d6: 0x000c, 0x6d7: 0x000c, + 0x6e2: 0x000c, 0x6e3: 0x000c, + // Block 0x1c, offset 0x700 + 0x701: 0x000c, + 0x73c: 0x000c, + // Block 0x1d, offset 0x740 + 0x741: 0x000c, 0x742: 0x000c, 0x743: 0x000c, 0x744: 0x000c, + 0x74d: 0x000c, + 0x762: 0x000c, 0x763: 0x000c, + 0x772: 0x0004, 0x773: 0x0004, + 0x77b: 0x0004, + // Block 0x1e, offset 0x780 + 0x781: 0x000c, 0x782: 0x000c, + 0x7bc: 0x000c, + // Block 0x1f, offset 0x7c0 + 0x7c1: 0x000c, 0x7c2: 0x000c, + 0x7c7: 0x000c, 0x7c8: 0x000c, 0x7cb: 0x000c, + 0x7cc: 0x000c, 0x7cd: 0x000c, 0x7d1: 0x000c, + 0x7f0: 0x000c, 0x7f1: 0x000c, 0x7f5: 0x000c, + // Block 0x20, offset 0x800 + 0x801: 0x000c, 0x802: 0x000c, 0x803: 0x000c, 0x804: 0x000c, 0x805: 0x000c, + 0x807: 0x000c, 0x808: 0x000c, + 0x80d: 0x000c, + 0x822: 0x000c, 0x823: 0x000c, + 0x831: 0x0004, + 0x83a: 0x000c, 0x83b: 0x000c, + 0x83c: 0x000c, 0x83d: 0x000c, 0x83e: 0x000c, 0x83f: 0x000c, + // Block 0x21, offset 0x840 + 0x841: 0x000c, + 0x87c: 0x000c, 0x87f: 0x000c, + // Block 0x22, offset 0x880 + 0x881: 0x000c, 0x882: 0x000c, 0x883: 0x000c, 0x884: 0x000c, + 0x88d: 0x000c, + 0x896: 0x000c, + 0x8a2: 0x000c, 0x8a3: 0x000c, + // Block 0x23, offset 0x8c0 + 0x8c2: 0x000c, + // Block 0x24, offset 0x900 + 0x900: 0x000c, + 0x90d: 0x000c, + 0x933: 0x000a, 0x934: 0x000a, 0x935: 0x000a, + 0x936: 0x000a, 0x937: 0x000a, 0x938: 0x000a, 0x939: 0x0004, 0x93a: 0x000a, + // Block 0x25, offset 0x940 + 0x940: 0x000c, + 0x97e: 0x000c, 0x97f: 0x000c, + // Block 0x26, offset 0x980 + 0x980: 0x000c, + 0x986: 0x000c, 0x987: 0x000c, 0x988: 0x000c, 0x98a: 0x000c, 0x98b: 0x000c, + 0x98c: 0x000c, 0x98d: 0x000c, + 0x995: 0x000c, 0x996: 0x000c, + 0x9a2: 0x000c, 0x9a3: 0x000c, + 0x9b8: 0x000a, 0x9b9: 0x000a, 0x9ba: 0x000a, 0x9bb: 0x000a, + 0x9bc: 0x000a, 0x9bd: 0x000a, 0x9be: 0x000a, + // Block 0x27, offset 0x9c0 + 0x9cc: 0x000c, 0x9cd: 0x000c, + 0x9e2: 0x000c, 0x9e3: 0x000c, + // Block 0x28, offset 0xa00 + 0xa00: 0x000c, 0xa01: 0x000c, + 0xa3b: 0x000c, + 0xa3c: 0x000c, + // Block 0x29, offset 0xa40 + 0xa41: 0x000c, 0xa42: 0x000c, 0xa43: 0x000c, 0xa44: 0x000c, + 0xa4d: 0x000c, + 0xa62: 0x000c, 0xa63: 0x000c, + // Block 0x2a, offset 0xa80 + 0xa8a: 0x000c, + 0xa92: 0x000c, 0xa93: 0x000c, 0xa94: 0x000c, 0xa96: 0x000c, + // Block 0x2b, offset 0xac0 + 0xaf1: 0x000c, 0xaf4: 0x000c, 0xaf5: 0x000c, + 0xaf6: 0x000c, 0xaf7: 0x000c, 0xaf8: 0x000c, 0xaf9: 0x000c, 0xafa: 0x000c, + 0xaff: 0x0004, + // Block 0x2c, offset 0xb00 + 0xb07: 0x000c, 0xb08: 0x000c, 0xb09: 0x000c, 0xb0a: 0x000c, 0xb0b: 0x000c, + 0xb0c: 0x000c, 0xb0d: 0x000c, 0xb0e: 0x000c, + // Block 0x2d, offset 0xb40 + 0xb71: 0x000c, 0xb74: 0x000c, 0xb75: 0x000c, + 0xb76: 0x000c, 0xb77: 0x000c, 0xb78: 0x000c, 0xb79: 0x000c, 0xb7b: 0x000c, + 0xb7c: 0x000c, + // Block 0x2e, offset 0xb80 + 0xb88: 0x000c, 0xb89: 0x000c, 0xb8a: 0x000c, 0xb8b: 0x000c, + 0xb8c: 0x000c, 0xb8d: 0x000c, + // Block 0x2f, offset 0xbc0 + 0xbd8: 0x000c, 0xbd9: 0x000c, + 0xbf5: 0x000c, + 0xbf7: 0x000c, 0xbf9: 0x000c, 0xbfa: 0x003a, 0xbfb: 0x002a, + 0xbfc: 0x003a, 0xbfd: 0x002a, + // Block 0x30, offset 0xc00 + 0xc31: 0x000c, 0xc32: 0x000c, 0xc33: 0x000c, 0xc34: 0x000c, 0xc35: 0x000c, + 0xc36: 0x000c, 0xc37: 0x000c, 0xc38: 0x000c, 0xc39: 0x000c, 0xc3a: 0x000c, 0xc3b: 0x000c, + 0xc3c: 0x000c, 0xc3d: 0x000c, 0xc3e: 0x000c, + // Block 0x31, offset 0xc40 + 0xc40: 0x000c, 0xc41: 0x000c, 0xc42: 0x000c, 0xc43: 0x000c, 0xc44: 0x000c, + 0xc46: 0x000c, 0xc47: 0x000c, + 0xc4d: 0x000c, 0xc4e: 0x000c, 0xc4f: 0x000c, 0xc50: 0x000c, 0xc51: 0x000c, + 0xc52: 0x000c, 0xc53: 0x000c, 0xc54: 0x000c, 0xc55: 0x000c, 0xc56: 0x000c, 0xc57: 0x000c, + 0xc59: 0x000c, 0xc5a: 0x000c, 0xc5b: 0x000c, 0xc5c: 0x000c, 0xc5d: 0x000c, + 0xc5e: 0x000c, 0xc5f: 0x000c, 0xc60: 0x000c, 0xc61: 0x000c, 0xc62: 0x000c, 0xc63: 0x000c, + 0xc64: 0x000c, 0xc65: 0x000c, 0xc66: 0x000c, 0xc67: 0x000c, 0xc68: 0x000c, 0xc69: 0x000c, + 0xc6a: 0x000c, 0xc6b: 0x000c, 0xc6c: 0x000c, 0xc6d: 0x000c, 0xc6e: 0x000c, 0xc6f: 0x000c, + 0xc70: 0x000c, 0xc71: 0x000c, 0xc72: 0x000c, 0xc73: 0x000c, 0xc74: 0x000c, 0xc75: 0x000c, + 0xc76: 0x000c, 0xc77: 0x000c, 0xc78: 0x000c, 0xc79: 0x000c, 0xc7a: 0x000c, 0xc7b: 0x000c, + 0xc7c: 0x000c, + // Block 0x32, offset 0xc80 + 0xc86: 0x000c, + // Block 0x33, offset 0xcc0 + 0xced: 0x000c, 0xcee: 0x000c, 0xcef: 0x000c, + 0xcf0: 0x000c, 0xcf2: 0x000c, 0xcf3: 0x000c, 0xcf4: 0x000c, 0xcf5: 0x000c, + 0xcf6: 0x000c, 0xcf7: 0x000c, 0xcf9: 0x000c, 0xcfa: 0x000c, + 0xcfd: 0x000c, 0xcfe: 0x000c, + // Block 0x34, offset 0xd00 + 0xd18: 0x000c, 0xd19: 0x000c, + 0xd1e: 0x000c, 0xd1f: 0x000c, 0xd20: 0x000c, + 0xd31: 0x000c, 0xd32: 0x000c, 0xd33: 0x000c, 0xd34: 0x000c, + // Block 0x35, offset 0xd40 + 0xd42: 0x000c, 0xd45: 0x000c, + 0xd46: 0x000c, + 0xd4d: 0x000c, + 0xd5d: 0x000c, + // Block 0x36, offset 0xd80 + 0xd9d: 0x000c, + 0xd9e: 0x000c, 0xd9f: 0x000c, + // Block 0x37, offset 0xdc0 + 0xdd0: 0x000a, 0xdd1: 0x000a, + 0xdd2: 0x000a, 0xdd3: 0x000a, 0xdd4: 0x000a, 0xdd5: 0x000a, 0xdd6: 0x000a, 0xdd7: 0x000a, + 0xdd8: 0x000a, 0xdd9: 0x000a, + // Block 0x38, offset 0xe00 + 0xe00: 0x000a, + // Block 0x39, offset 0xe40 + 0xe40: 0x0009, + 0xe5b: 0x007a, 0xe5c: 0x006a, + // Block 0x3a, offset 0xe80 + 0xe92: 0x000c, 0xe93: 0x000c, 0xe94: 0x000c, + 0xeb2: 0x000c, 0xeb3: 0x000c, 0xeb4: 0x000c, + // Block 0x3b, offset 0xec0 + 0xed2: 0x000c, 0xed3: 0x000c, + 0xef2: 0x000c, 0xef3: 0x000c, + // Block 0x3c, offset 0xf00 + 0xf34: 0x000c, 0xf35: 0x000c, + 0xf37: 0x000c, 0xf38: 0x000c, 0xf39: 0x000c, 0xf3a: 0x000c, 0xf3b: 0x000c, + 0xf3c: 0x000c, 0xf3d: 0x000c, + // Block 0x3d, offset 0xf40 + 0xf46: 0x000c, 0xf49: 0x000c, 0xf4a: 0x000c, 0xf4b: 0x000c, + 0xf4c: 0x000c, 0xf4d: 0x000c, 0xf4e: 0x000c, 0xf4f: 0x000c, 0xf50: 0x000c, 0xf51: 0x000c, + 0xf52: 0x000c, 0xf53: 0x000c, + 0xf5b: 0x0004, 0xf5d: 0x000c, + 0xf70: 0x000a, 0xf71: 0x000a, 0xf72: 0x000a, 0xf73: 0x000a, 0xf74: 0x000a, 0xf75: 0x000a, + 0xf76: 0x000a, 0xf77: 0x000a, 0xf78: 0x000a, 0xf79: 0x000a, + // Block 0x3e, offset 0xf80 + 0xf80: 0x000a, 0xf81: 0x000a, 0xf82: 0x000a, 0xf83: 0x000a, 0xf84: 0x000a, 0xf85: 0x000a, + 0xf86: 0x000a, 0xf87: 0x000a, 0xf88: 0x000a, 0xf89: 0x000a, 0xf8a: 0x000a, 0xf8b: 0x000c, + 0xf8c: 0x000c, 0xf8d: 0x000c, 0xf8e: 0x000b, + // Block 0x3f, offset 0xfc0 + 0xfc5: 0x000c, + 0xfc6: 0x000c, + 0xfe9: 0x000c, + // Block 0x40, offset 0x1000 + 0x1020: 0x000c, 0x1021: 0x000c, 0x1022: 0x000c, + 0x1027: 0x000c, 0x1028: 0x000c, + 0x1032: 0x000c, + 0x1039: 0x000c, 0x103a: 0x000c, 0x103b: 0x000c, + // Block 0x41, offset 0x1040 + 0x1040: 0x000a, 0x1044: 0x000a, 0x1045: 0x000a, + // Block 0x42, offset 0x1080 + 0x109e: 0x000a, 0x109f: 0x000a, 0x10a0: 0x000a, 0x10a1: 0x000a, 0x10a2: 0x000a, 0x10a3: 0x000a, + 0x10a4: 0x000a, 0x10a5: 0x000a, 0x10a6: 0x000a, 0x10a7: 0x000a, 0x10a8: 0x000a, 0x10a9: 0x000a, + 0x10aa: 0x000a, 0x10ab: 0x000a, 0x10ac: 0x000a, 0x10ad: 0x000a, 0x10ae: 0x000a, 0x10af: 0x000a, + 0x10b0: 0x000a, 0x10b1: 0x000a, 0x10b2: 0x000a, 0x10b3: 0x000a, 0x10b4: 0x000a, 0x10b5: 0x000a, + 0x10b6: 0x000a, 0x10b7: 0x000a, 0x10b8: 0x000a, 0x10b9: 0x000a, 0x10ba: 0x000a, 0x10bb: 0x000a, + 0x10bc: 0x000a, 0x10bd: 0x000a, 0x10be: 0x000a, 0x10bf: 0x000a, + // Block 0x43, offset 0x10c0 + 0x10d7: 0x000c, + 0x10d8: 0x000c, 0x10db: 0x000c, + // Block 0x44, offset 0x1100 + 0x1116: 0x000c, + 0x1118: 0x000c, 0x1119: 0x000c, 0x111a: 0x000c, 0x111b: 0x000c, 0x111c: 0x000c, 0x111d: 0x000c, + 0x111e: 0x000c, 0x1120: 0x000c, 0x1122: 0x000c, + 0x1125: 0x000c, 0x1126: 0x000c, 0x1127: 0x000c, 0x1128: 0x000c, 0x1129: 0x000c, + 0x112a: 0x000c, 0x112b: 0x000c, 0x112c: 0x000c, + 0x1133: 0x000c, 0x1134: 0x000c, 0x1135: 0x000c, + 0x1136: 0x000c, 0x1137: 0x000c, 0x1138: 0x000c, 0x1139: 0x000c, 0x113a: 0x000c, 0x113b: 0x000c, + 0x113c: 0x000c, 0x113f: 0x000c, + // Block 0x45, offset 0x1140 + 0x1170: 0x000c, 0x1171: 0x000c, 0x1172: 0x000c, 0x1173: 0x000c, 0x1174: 0x000c, 0x1175: 0x000c, + 0x1176: 0x000c, 0x1177: 0x000c, 0x1178: 0x000c, 0x1179: 0x000c, 0x117a: 0x000c, 0x117b: 0x000c, + 0x117c: 0x000c, 0x117d: 0x000c, 0x117e: 0x000c, + // Block 0x46, offset 0x1180 + 0x1180: 0x000c, 0x1181: 0x000c, 0x1182: 0x000c, 0x1183: 0x000c, + 0x11b4: 0x000c, + 0x11b6: 0x000c, 0x11b7: 0x000c, 0x11b8: 0x000c, 0x11b9: 0x000c, 0x11ba: 0x000c, + 0x11bc: 0x000c, + // Block 0x47, offset 0x11c0 + 0x11c2: 0x000c, + 0x11eb: 0x000c, 0x11ec: 0x000c, 0x11ed: 0x000c, 0x11ee: 0x000c, 0x11ef: 0x000c, + 0x11f0: 0x000c, 0x11f1: 0x000c, 0x11f2: 0x000c, 0x11f3: 0x000c, + // Block 0x48, offset 0x1200 + 0x1200: 0x000c, 0x1201: 0x000c, + 0x1222: 0x000c, 0x1223: 0x000c, + 0x1224: 0x000c, 0x1225: 0x000c, 0x1228: 0x000c, 0x1229: 0x000c, + 0x122b: 0x000c, 0x122c: 0x000c, 0x122d: 0x000c, + // Block 0x49, offset 0x1240 + 0x1266: 0x000c, 0x1268: 0x000c, 0x1269: 0x000c, + 0x126d: 0x000c, 0x126f: 0x000c, + 0x1270: 0x000c, 0x1271: 0x000c, + // Block 0x4a, offset 0x1280 + 0x12ac: 0x000c, 0x12ad: 0x000c, 0x12ae: 0x000c, 0x12af: 0x000c, + 0x12b0: 0x000c, 0x12b1: 0x000c, 0x12b2: 0x000c, 0x12b3: 0x000c, + 0x12b6: 0x000c, 0x12b7: 0x000c, + // Block 0x4b, offset 0x12c0 + 0x12d0: 0x000c, 0x12d1: 0x000c, + 0x12d2: 0x000c, 0x12d4: 0x000c, 0x12d5: 0x000c, 0x12d6: 0x000c, 0x12d7: 0x000c, + 0x12d8: 0x000c, 0x12d9: 0x000c, 0x12da: 0x000c, 0x12db: 0x000c, 0x12dc: 0x000c, 0x12dd: 0x000c, + 0x12de: 0x000c, 0x12df: 0x000c, 0x12e0: 0x000c, 0x12e2: 0x000c, 0x12e3: 0x000c, + 0x12e4: 0x000c, 0x12e5: 0x000c, 0x12e6: 0x000c, 0x12e7: 0x000c, 0x12e8: 0x000c, + 0x12ed: 0x000c, + 0x12f4: 0x000c, + 0x12f8: 0x000c, 0x12f9: 0x000c, + // Block 0x4c, offset 0x1300 + 0x1300: 0x000c, 0x1301: 0x000c, 0x1302: 0x000c, 0x1303: 0x000c, 0x1304: 0x000c, 0x1305: 0x000c, + 0x1306: 0x000c, 0x1307: 0x000c, 0x1308: 0x000c, 0x1309: 0x000c, 0x130a: 0x000c, 0x130b: 0x000c, + 0x130c: 0x000c, 0x130d: 0x000c, 0x130e: 0x000c, 0x130f: 0x000c, 0x1310: 0x000c, 0x1311: 0x000c, + 0x1312: 0x000c, 0x1313: 0x000c, 0x1314: 0x000c, 0x1315: 0x000c, 0x1316: 0x000c, 0x1317: 0x000c, + 0x1318: 0x000c, 0x1319: 0x000c, 0x131a: 0x000c, 0x131b: 0x000c, 0x131c: 0x000c, 0x131d: 0x000c, + 0x131e: 0x000c, 0x131f: 0x000c, 0x1320: 0x000c, 0x1321: 0x000c, 0x1322: 0x000c, 0x1323: 0x000c, + 0x1324: 0x000c, 0x1325: 0x000c, 0x1326: 0x000c, 0x1327: 0x000c, 0x1328: 0x000c, 0x1329: 0x000c, + 0x132a: 0x000c, 0x132b: 0x000c, 0x132c: 0x000c, 0x132d: 0x000c, 0x132e: 0x000c, 0x132f: 0x000c, + 0x1330: 0x000c, 0x1331: 0x000c, 0x1332: 0x000c, 0x1333: 0x000c, 0x1334: 0x000c, 0x1335: 0x000c, + 0x1336: 0x000c, 0x1337: 0x000c, 0x1338: 0x000c, 0x1339: 0x000c, 0x133b: 0x000c, + 0x133c: 0x000c, 0x133d: 0x000c, 0x133e: 0x000c, 0x133f: 0x000c, + // Block 0x4d, offset 0x1340 + 0x137d: 0x000a, 0x137f: 0x000a, + // Block 0x4e, offset 0x1380 + 0x1380: 0x000a, 0x1381: 0x000a, + 0x138d: 0x000a, 0x138e: 0x000a, 0x138f: 0x000a, + 0x139d: 0x000a, + 0x139e: 0x000a, 0x139f: 0x000a, + 0x13ad: 0x000a, 0x13ae: 0x000a, 0x13af: 0x000a, + 0x13bd: 0x000a, 0x13be: 0x000a, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x0009, 0x13c1: 0x0009, 0x13c2: 0x0009, 0x13c3: 0x0009, 0x13c4: 0x0009, 0x13c5: 0x0009, + 0x13c6: 0x0009, 0x13c7: 0x0009, 0x13c8: 0x0009, 0x13c9: 0x0009, 0x13ca: 0x0009, 0x13cb: 0x000b, + 0x13cc: 0x000b, 0x13cd: 0x000b, 0x13cf: 0x0001, 0x13d0: 0x000a, 0x13d1: 0x000a, + 0x13d2: 0x000a, 0x13d3: 0x000a, 0x13d4: 0x000a, 0x13d5: 0x000a, 0x13d6: 0x000a, 0x13d7: 0x000a, + 0x13d8: 0x000a, 0x13d9: 0x000a, 0x13da: 0x000a, 0x13db: 0x000a, 0x13dc: 0x000a, 0x13dd: 0x000a, + 0x13de: 0x000a, 0x13df: 0x000a, 0x13e0: 0x000a, 0x13e1: 0x000a, 0x13e2: 0x000a, 0x13e3: 0x000a, + 0x13e4: 0x000a, 0x13e5: 0x000a, 0x13e6: 0x000a, 0x13e7: 0x000a, 0x13e8: 0x0009, 0x13e9: 0x0007, + 0x13ea: 0x000e, 0x13eb: 0x000e, 0x13ec: 0x000e, 0x13ed: 0x000e, 0x13ee: 0x000e, 0x13ef: 0x0006, + 0x13f0: 0x0004, 0x13f1: 0x0004, 0x13f2: 0x0004, 0x13f3: 0x0004, 0x13f4: 0x0004, 0x13f5: 0x000a, + 0x13f6: 0x000a, 0x13f7: 0x000a, 0x13f8: 0x000a, 0x13f9: 0x000a, 0x13fa: 0x000a, 0x13fb: 0x000a, + 0x13fc: 0x000a, 0x13fd: 0x000a, 0x13fe: 0x000a, 0x13ff: 0x000a, + // Block 0x50, offset 0x1400 + 0x1400: 0x000a, 0x1401: 0x000a, 0x1402: 0x000a, 0x1403: 0x000a, 0x1404: 0x0006, 0x1405: 0x009a, + 0x1406: 0x008a, 0x1407: 0x000a, 0x1408: 0x000a, 0x1409: 0x000a, 0x140a: 0x000a, 0x140b: 0x000a, + 0x140c: 0x000a, 0x140d: 0x000a, 0x140e: 0x000a, 0x140f: 0x000a, 0x1410: 0x000a, 0x1411: 0x000a, + 0x1412: 0x000a, 0x1413: 0x000a, 0x1414: 0x000a, 0x1415: 0x000a, 0x1416: 0x000a, 0x1417: 0x000a, + 0x1418: 0x000a, 0x1419: 0x000a, 0x141a: 0x000a, 0x141b: 0x000a, 0x141c: 0x000a, 0x141d: 0x000a, + 0x141e: 0x000a, 0x141f: 0x0009, 0x1420: 0x000b, 0x1421: 0x000b, 0x1422: 0x000b, 0x1423: 0x000b, + 0x1424: 0x000b, 0x1425: 0x000b, 0x1426: 0x000e, 0x1427: 0x000e, 0x1428: 0x000e, 0x1429: 0x000e, + 0x142a: 0x000b, 0x142b: 0x000b, 0x142c: 0x000b, 0x142d: 0x000b, 0x142e: 0x000b, 0x142f: 0x000b, + 0x1430: 0x0002, 0x1434: 0x0002, 0x1435: 0x0002, + 0x1436: 0x0002, 0x1437: 0x0002, 0x1438: 0x0002, 0x1439: 0x0002, 0x143a: 0x0003, 0x143b: 0x0003, + 0x143c: 0x000a, 0x143d: 0x009a, 0x143e: 0x008a, + // Block 0x51, offset 0x1440 + 0x1440: 0x0002, 0x1441: 0x0002, 0x1442: 0x0002, 0x1443: 0x0002, 0x1444: 0x0002, 0x1445: 0x0002, + 0x1446: 0x0002, 0x1447: 0x0002, 0x1448: 0x0002, 0x1449: 0x0002, 0x144a: 0x0003, 0x144b: 0x0003, + 0x144c: 0x000a, 0x144d: 0x009a, 0x144e: 0x008a, + 0x1460: 0x0004, 0x1461: 0x0004, 0x1462: 0x0004, 0x1463: 0x0004, + 0x1464: 0x0004, 0x1465: 0x0004, 0x1466: 0x0004, 0x1467: 0x0004, 0x1468: 0x0004, 0x1469: 0x0004, + 0x146a: 0x0004, 0x146b: 0x0004, 0x146c: 0x0004, 0x146d: 0x0004, 0x146e: 0x0004, 0x146f: 0x0004, + 0x1470: 0x0004, 0x1471: 0x0004, 0x1472: 0x0004, 0x1473: 0x0004, 0x1474: 0x0004, 0x1475: 0x0004, + 0x1476: 0x0004, 0x1477: 0x0004, 0x1478: 0x0004, 0x1479: 0x0004, 0x147a: 0x0004, 0x147b: 0x0004, + 0x147c: 0x0004, 0x147d: 0x0004, 0x147e: 0x0004, 0x147f: 0x0004, + // Block 0x52, offset 0x1480 + 0x1480: 0x0004, 0x1481: 0x0004, 0x1482: 0x0004, 0x1483: 0x0004, 0x1484: 0x0004, 0x1485: 0x0004, + 0x1486: 0x0004, 0x1487: 0x0004, 0x1488: 0x0004, 0x1489: 0x0004, 0x148a: 0x0004, 0x148b: 0x0004, + 0x148c: 0x0004, 0x148d: 0x0004, 0x148e: 0x0004, 0x148f: 0x0004, 0x1490: 0x000c, 0x1491: 0x000c, + 0x1492: 0x000c, 0x1493: 0x000c, 0x1494: 0x000c, 0x1495: 0x000c, 0x1496: 0x000c, 0x1497: 0x000c, + 0x1498: 0x000c, 0x1499: 0x000c, 0x149a: 0x000c, 0x149b: 0x000c, 0x149c: 0x000c, 0x149d: 0x000c, + 0x149e: 0x000c, 0x149f: 0x000c, 0x14a0: 0x000c, 0x14a1: 0x000c, 0x14a2: 0x000c, 0x14a3: 0x000c, + 0x14a4: 0x000c, 0x14a5: 0x000c, 0x14a6: 0x000c, 0x14a7: 0x000c, 0x14a8: 0x000c, 0x14a9: 0x000c, + 0x14aa: 0x000c, 0x14ab: 0x000c, 0x14ac: 0x000c, 0x14ad: 0x000c, 0x14ae: 0x000c, 0x14af: 0x000c, + 0x14b0: 0x000c, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x000a, 0x14c1: 0x000a, 0x14c3: 0x000a, 0x14c4: 0x000a, 0x14c5: 0x000a, + 0x14c6: 0x000a, 0x14c8: 0x000a, 0x14c9: 0x000a, + 0x14d4: 0x000a, 0x14d6: 0x000a, 0x14d7: 0x000a, + 0x14d8: 0x000a, + 0x14de: 0x000a, 0x14df: 0x000a, 0x14e0: 0x000a, 0x14e1: 0x000a, 0x14e2: 0x000a, 0x14e3: 0x000a, + 0x14e5: 0x000a, 0x14e7: 0x000a, 0x14e9: 0x000a, + 0x14ee: 0x0004, + 0x14fa: 0x000a, 0x14fb: 0x000a, + // Block 0x54, offset 0x1500 + 0x1500: 0x000a, 0x1501: 0x000a, 0x1502: 0x000a, 0x1503: 0x000a, 0x1504: 0x000a, + 0x150a: 0x000a, 0x150b: 0x000a, + 0x150c: 0x000a, 0x150d: 0x000a, 0x1510: 0x000a, 0x1511: 0x000a, + 0x1512: 0x000a, 0x1513: 0x000a, 0x1514: 0x000a, 0x1515: 0x000a, 0x1516: 0x000a, 0x1517: 0x000a, + 0x1518: 0x000a, 0x1519: 0x000a, 0x151a: 0x000a, 0x151b: 0x000a, 0x151c: 0x000a, 0x151d: 0x000a, + 0x151e: 0x000a, 0x151f: 0x000a, + // Block 0x55, offset 0x1540 + 0x1549: 0x000a, 0x154a: 0x000a, 0x154b: 0x000a, + 0x1550: 0x000a, 0x1551: 0x000a, + 0x1552: 0x000a, 0x1553: 0x000a, 0x1554: 0x000a, 0x1555: 0x000a, 0x1556: 0x000a, 0x1557: 0x000a, + 0x1558: 0x000a, 0x1559: 0x000a, 0x155a: 0x000a, 0x155b: 0x000a, 0x155c: 0x000a, 0x155d: 0x000a, + 0x155e: 0x000a, 0x155f: 0x000a, 0x1560: 0x000a, 0x1561: 0x000a, 0x1562: 0x000a, 0x1563: 0x000a, + 0x1564: 0x000a, 0x1565: 0x000a, 0x1566: 0x000a, 0x1567: 0x000a, 0x1568: 0x000a, 0x1569: 0x000a, + 0x156a: 0x000a, 0x156b: 0x000a, 0x156c: 0x000a, 0x156d: 0x000a, 0x156e: 0x000a, 0x156f: 0x000a, + 0x1570: 0x000a, 0x1571: 0x000a, 0x1572: 0x000a, 0x1573: 0x000a, 0x1574: 0x000a, 0x1575: 0x000a, + 0x1576: 0x000a, 0x1577: 0x000a, 0x1578: 0x000a, 0x1579: 0x000a, 0x157a: 0x000a, 0x157b: 0x000a, + 0x157c: 0x000a, 0x157d: 0x000a, 0x157e: 0x000a, 0x157f: 0x000a, + // Block 0x56, offset 0x1580 + 0x1580: 0x000a, 0x1581: 0x000a, 0x1582: 0x000a, 0x1583: 0x000a, 0x1584: 0x000a, 0x1585: 0x000a, + 0x1586: 0x000a, 0x1587: 0x000a, 0x1588: 0x000a, 0x1589: 0x000a, 0x158a: 0x000a, 0x158b: 0x000a, + 0x158c: 0x000a, 0x158d: 0x000a, 0x158e: 0x000a, 0x158f: 0x000a, 0x1590: 0x000a, 0x1591: 0x000a, + 0x1592: 0x000a, 0x1593: 0x000a, 0x1594: 0x000a, 0x1595: 0x000a, 0x1596: 0x000a, 0x1597: 0x000a, + 0x1598: 0x000a, 0x1599: 0x000a, 0x159a: 0x000a, 0x159b: 0x000a, 0x159c: 0x000a, 0x159d: 0x000a, + 0x159e: 0x000a, 0x159f: 0x000a, 0x15a0: 0x000a, 0x15a1: 0x000a, 0x15a2: 0x000a, 0x15a3: 0x000a, + 0x15a4: 0x000a, 0x15a5: 0x000a, 0x15a6: 0x000a, 0x15a7: 0x000a, 0x15a8: 0x000a, 0x15a9: 0x000a, + 0x15aa: 0x000a, 0x15ab: 0x000a, 0x15ac: 0x000a, 0x15ad: 0x000a, 0x15ae: 0x000a, 0x15af: 0x000a, + 0x15b0: 0x000a, 0x15b1: 0x000a, 0x15b2: 0x000a, 0x15b3: 0x000a, 0x15b4: 0x000a, 0x15b5: 0x000a, + 0x15b6: 0x000a, 0x15b7: 0x000a, 0x15b8: 0x000a, 0x15b9: 0x000a, 0x15ba: 0x000a, 0x15bb: 0x000a, + 0x15bc: 0x000a, 0x15bd: 0x000a, 0x15be: 0x000a, 0x15bf: 0x000a, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x000a, 0x15c1: 0x000a, 0x15c2: 0x000a, 0x15c3: 0x000a, 0x15c4: 0x000a, 0x15c5: 0x000a, + 0x15c6: 0x000a, 0x15c7: 0x000a, 0x15c8: 0x000a, 0x15c9: 0x000a, 0x15ca: 0x000a, 0x15cb: 0x000a, + 0x15cc: 0x000a, 0x15cd: 0x000a, 0x15ce: 0x000a, 0x15cf: 0x000a, 0x15d0: 0x000a, 0x15d1: 0x000a, + 0x15d2: 0x0003, 0x15d3: 0x0004, 0x15d4: 0x000a, 0x15d5: 0x000a, 0x15d6: 0x000a, 0x15d7: 0x000a, + 0x15d8: 0x000a, 0x15d9: 0x000a, 0x15da: 0x000a, 0x15db: 0x000a, 0x15dc: 0x000a, 0x15dd: 0x000a, + 0x15de: 0x000a, 0x15df: 0x000a, 0x15e0: 0x000a, 0x15e1: 0x000a, 0x15e2: 0x000a, 0x15e3: 0x000a, + 0x15e4: 0x000a, 0x15e5: 0x000a, 0x15e6: 0x000a, 0x15e7: 0x000a, 0x15e8: 0x000a, 0x15e9: 0x000a, + 0x15ea: 0x000a, 0x15eb: 0x000a, 0x15ec: 0x000a, 0x15ed: 0x000a, 0x15ee: 0x000a, 0x15ef: 0x000a, + 0x15f0: 0x000a, 0x15f1: 0x000a, 0x15f2: 0x000a, 0x15f3: 0x000a, 0x15f4: 0x000a, 0x15f5: 0x000a, + 0x15f6: 0x000a, 0x15f7: 0x000a, 0x15f8: 0x000a, 0x15f9: 0x000a, 0x15fa: 0x000a, 0x15fb: 0x000a, + 0x15fc: 0x000a, 0x15fd: 0x000a, 0x15fe: 0x000a, 0x15ff: 0x000a, + // Block 0x58, offset 0x1600 + 0x1600: 0x000a, 0x1601: 0x000a, 0x1602: 0x000a, 0x1603: 0x000a, 0x1604: 0x000a, 0x1605: 0x000a, + 0x1606: 0x000a, 0x1607: 0x000a, 0x1608: 0x003a, 0x1609: 0x002a, 0x160a: 0x003a, 0x160b: 0x002a, + 0x160c: 0x000a, 0x160d: 0x000a, 0x160e: 0x000a, 0x160f: 0x000a, 0x1610: 0x000a, 0x1611: 0x000a, + 0x1612: 0x000a, 0x1613: 0x000a, 0x1614: 0x000a, 0x1615: 0x000a, 0x1616: 0x000a, 0x1617: 0x000a, + 0x1618: 0x000a, 0x1619: 0x000a, 0x161a: 0x000a, 0x161b: 0x000a, 0x161c: 0x000a, 0x161d: 0x000a, + 0x161e: 0x000a, 0x161f: 0x000a, 0x1620: 0x000a, 0x1621: 0x000a, 0x1622: 0x000a, 0x1623: 0x000a, + 0x1624: 0x000a, 0x1625: 0x000a, 0x1626: 0x000a, 0x1627: 0x000a, 0x1628: 0x000a, 0x1629: 0x009a, + 0x162a: 0x008a, 0x162b: 0x000a, 0x162c: 0x000a, 0x162d: 0x000a, 0x162e: 0x000a, 0x162f: 0x000a, + 0x1630: 0x000a, 0x1631: 0x000a, 0x1632: 0x000a, 0x1633: 0x000a, 0x1634: 0x000a, 0x1635: 0x000a, + // Block 0x59, offset 0x1640 + 0x167b: 0x000a, + 0x167c: 0x000a, 0x167d: 0x000a, 0x167e: 0x000a, 0x167f: 0x000a, + // Block 0x5a, offset 0x1680 + 0x1680: 0x000a, 0x1681: 0x000a, 0x1682: 0x000a, 0x1683: 0x000a, 0x1684: 0x000a, 0x1685: 0x000a, + 0x1686: 0x000a, 0x1687: 0x000a, 0x1688: 0x000a, 0x1689: 0x000a, 0x168a: 0x000a, 0x168b: 0x000a, + 0x168c: 0x000a, 0x168d: 0x000a, 0x168e: 0x000a, 0x168f: 0x000a, 0x1690: 0x000a, 0x1691: 0x000a, + 0x1692: 0x000a, 0x1693: 0x000a, 0x1694: 0x000a, 0x1696: 0x000a, 0x1697: 0x000a, + 0x1698: 0x000a, 0x1699: 0x000a, 0x169a: 0x000a, 0x169b: 0x000a, 0x169c: 0x000a, 0x169d: 0x000a, + 0x169e: 0x000a, 0x169f: 0x000a, 0x16a0: 0x000a, 0x16a1: 0x000a, 0x16a2: 0x000a, 0x16a3: 0x000a, + 0x16a4: 0x000a, 0x16a5: 0x000a, 0x16a6: 0x000a, 0x16a7: 0x000a, 0x16a8: 0x000a, 0x16a9: 0x000a, + 0x16aa: 0x000a, 0x16ab: 0x000a, 0x16ac: 0x000a, 0x16ad: 0x000a, 0x16ae: 0x000a, 0x16af: 0x000a, + 0x16b0: 0x000a, 0x16b1: 0x000a, 0x16b2: 0x000a, 0x16b3: 0x000a, 0x16b4: 0x000a, 0x16b5: 0x000a, + 0x16b6: 0x000a, 0x16b7: 0x000a, 0x16b8: 0x000a, 0x16b9: 0x000a, 0x16ba: 0x000a, 0x16bb: 0x000a, + 0x16bc: 0x000a, 0x16bd: 0x000a, 0x16be: 0x000a, 0x16bf: 0x000a, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x000a, 0x16c1: 0x000a, 0x16c2: 0x000a, 0x16c3: 0x000a, 0x16c4: 0x000a, 0x16c5: 0x000a, + 0x16c6: 0x000a, 0x16c7: 0x000a, 0x16c8: 0x000a, 0x16c9: 0x000a, 0x16ca: 0x000a, 0x16cb: 0x000a, + 0x16cc: 0x000a, 0x16cd: 0x000a, 0x16ce: 0x000a, 0x16cf: 0x000a, 0x16d0: 0x000a, 0x16d1: 0x000a, + 0x16d2: 0x000a, 0x16d3: 0x000a, 0x16d4: 0x000a, 0x16d5: 0x000a, 0x16d6: 0x000a, 0x16d7: 0x000a, + 0x16d8: 0x000a, 0x16d9: 0x000a, 0x16da: 0x000a, 0x16db: 0x000a, 0x16dc: 0x000a, 0x16dd: 0x000a, + 0x16de: 0x000a, 0x16df: 0x000a, 0x16e0: 0x000a, 0x16e1: 0x000a, 0x16e2: 0x000a, 0x16e3: 0x000a, + 0x16e4: 0x000a, 0x16e5: 0x000a, 0x16e6: 0x000a, + // Block 0x5c, offset 0x1700 + 0x1700: 0x000a, 0x1701: 0x000a, 0x1702: 0x000a, 0x1703: 0x000a, 0x1704: 0x000a, 0x1705: 0x000a, + 0x1706: 0x000a, 0x1707: 0x000a, 0x1708: 0x000a, 0x1709: 0x000a, 0x170a: 0x000a, + 0x1720: 0x000a, 0x1721: 0x000a, 0x1722: 0x000a, 0x1723: 0x000a, + 0x1724: 0x000a, 0x1725: 0x000a, 0x1726: 0x000a, 0x1727: 0x000a, 0x1728: 0x000a, 0x1729: 0x000a, + 0x172a: 0x000a, 0x172b: 0x000a, 0x172c: 0x000a, 0x172d: 0x000a, 0x172e: 0x000a, 0x172f: 0x000a, + 0x1730: 0x000a, 0x1731: 0x000a, 0x1732: 0x000a, 0x1733: 0x000a, 0x1734: 0x000a, 0x1735: 0x000a, + 0x1736: 0x000a, 0x1737: 0x000a, 0x1738: 0x000a, 0x1739: 0x000a, 0x173a: 0x000a, 0x173b: 0x000a, + 0x173c: 0x000a, 0x173d: 0x000a, 0x173e: 0x000a, 0x173f: 0x000a, + // Block 0x5d, offset 0x1740 + 0x1740: 0x000a, 0x1741: 0x000a, 0x1742: 0x000a, 0x1743: 0x000a, 0x1744: 0x000a, 0x1745: 0x000a, + 0x1746: 0x000a, 0x1747: 0x000a, 0x1748: 0x0002, 0x1749: 0x0002, 0x174a: 0x0002, 0x174b: 0x0002, + 0x174c: 0x0002, 0x174d: 0x0002, 0x174e: 0x0002, 0x174f: 0x0002, 0x1750: 0x0002, 0x1751: 0x0002, + 0x1752: 0x0002, 0x1753: 0x0002, 0x1754: 0x0002, 0x1755: 0x0002, 0x1756: 0x0002, 0x1757: 0x0002, + 0x1758: 0x0002, 0x1759: 0x0002, 0x175a: 0x0002, 0x175b: 0x0002, + // Block 0x5e, offset 0x1780 + 0x17aa: 0x000a, 0x17ab: 0x000a, 0x17ac: 0x000a, 0x17ad: 0x000a, 0x17ae: 0x000a, 0x17af: 0x000a, + 0x17b0: 0x000a, 0x17b1: 0x000a, 0x17b2: 0x000a, 0x17b3: 0x000a, 0x17b4: 0x000a, 0x17b5: 0x000a, + 0x17b6: 0x000a, 0x17b7: 0x000a, 0x17b8: 0x000a, 0x17b9: 0x000a, 0x17ba: 0x000a, 0x17bb: 0x000a, + 0x17bc: 0x000a, 0x17bd: 0x000a, 0x17be: 0x000a, 0x17bf: 0x000a, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x000a, 0x17c1: 0x000a, 0x17c2: 0x000a, 0x17c3: 0x000a, 0x17c4: 0x000a, 0x17c5: 0x000a, + 0x17c6: 0x000a, 0x17c7: 0x000a, 0x17c8: 0x000a, 0x17c9: 0x000a, 0x17ca: 0x000a, 0x17cb: 0x000a, + 0x17cc: 0x000a, 0x17cd: 0x000a, 0x17ce: 0x000a, 0x17cf: 0x000a, 0x17d0: 0x000a, 0x17d1: 0x000a, + 0x17d2: 0x000a, 0x17d3: 0x000a, 0x17d4: 0x000a, 0x17d5: 0x000a, 0x17d6: 0x000a, 0x17d7: 0x000a, + 0x17d8: 0x000a, 0x17d9: 0x000a, 0x17da: 0x000a, 0x17db: 0x000a, 0x17dc: 0x000a, 0x17dd: 0x000a, + 0x17de: 0x000a, 0x17df: 0x000a, 0x17e0: 0x000a, 0x17e1: 0x000a, 0x17e2: 0x000a, 0x17e3: 0x000a, + 0x17e4: 0x000a, 0x17e5: 0x000a, 0x17e6: 0x000a, 0x17e7: 0x000a, 0x17e8: 0x000a, 0x17e9: 0x000a, + 0x17ea: 0x000a, 0x17eb: 0x000a, 0x17ed: 0x000a, 0x17ee: 0x000a, 0x17ef: 0x000a, + 0x17f0: 0x000a, 0x17f1: 0x000a, 0x17f2: 0x000a, 0x17f3: 0x000a, 0x17f4: 0x000a, 0x17f5: 0x000a, + 0x17f6: 0x000a, 0x17f7: 0x000a, 0x17f8: 0x000a, 0x17f9: 0x000a, 0x17fa: 0x000a, 0x17fb: 0x000a, + 0x17fc: 0x000a, 0x17fd: 0x000a, 0x17fe: 0x000a, 0x17ff: 0x000a, + // Block 0x60, offset 0x1800 + 0x1800: 0x000a, 0x1801: 0x000a, 0x1802: 0x000a, 0x1803: 0x000a, 0x1804: 0x000a, 0x1805: 0x000a, + 0x1806: 0x000a, 0x1807: 0x000a, 0x1808: 0x000a, 0x1809: 0x000a, 0x180a: 0x000a, 0x180b: 0x000a, + 0x180c: 0x000a, 0x180d: 0x000a, 0x180e: 0x000a, 0x180f: 0x000a, 0x1810: 0x000a, 0x1811: 0x000a, + 0x1812: 0x000a, 0x1813: 0x000a, 0x1814: 0x000a, 0x1815: 0x000a, 0x1816: 0x000a, 0x1817: 0x000a, + 0x1818: 0x000a, 0x1819: 0x000a, 0x181a: 0x000a, 0x181b: 0x000a, 0x181c: 0x000a, 0x181d: 0x000a, + 0x181e: 0x000a, 0x181f: 0x000a, 0x1820: 0x000a, 0x1821: 0x000a, 0x1822: 0x000a, 0x1823: 0x000a, + 0x1824: 0x000a, 0x1825: 0x000a, 0x1826: 0x000a, 0x1827: 0x000a, 0x1828: 0x003a, 0x1829: 0x002a, + 0x182a: 0x003a, 0x182b: 0x002a, 0x182c: 0x003a, 0x182d: 0x002a, 0x182e: 0x003a, 0x182f: 0x002a, + 0x1830: 0x003a, 0x1831: 0x002a, 0x1832: 0x003a, 0x1833: 0x002a, 0x1834: 0x003a, 0x1835: 0x002a, + 0x1836: 0x000a, 0x1837: 0x000a, 0x1838: 0x000a, 0x1839: 0x000a, 0x183a: 0x000a, 0x183b: 0x000a, + 0x183c: 0x000a, 0x183d: 0x000a, 0x183e: 0x000a, 0x183f: 0x000a, + // Block 0x61, offset 0x1840 + 0x1840: 0x000a, 0x1841: 0x000a, 0x1842: 0x000a, 0x1843: 0x000a, 0x1844: 0x000a, 0x1845: 0x009a, + 0x1846: 0x008a, 0x1847: 0x000a, 0x1848: 0x000a, 0x1849: 0x000a, 0x184a: 0x000a, 0x184b: 0x000a, + 0x184c: 0x000a, 0x184d: 0x000a, 0x184e: 0x000a, 0x184f: 0x000a, 0x1850: 0x000a, 0x1851: 0x000a, + 0x1852: 0x000a, 0x1853: 0x000a, 0x1854: 0x000a, 0x1855: 0x000a, 0x1856: 0x000a, 0x1857: 0x000a, + 0x1858: 0x000a, 0x1859: 0x000a, 0x185a: 0x000a, 0x185b: 0x000a, 0x185c: 0x000a, 0x185d: 0x000a, + 0x185e: 0x000a, 0x185f: 0x000a, 0x1860: 0x000a, 0x1861: 0x000a, 0x1862: 0x000a, 0x1863: 0x000a, + 0x1864: 0x000a, 0x1865: 0x000a, 0x1866: 0x003a, 0x1867: 0x002a, 0x1868: 0x003a, 0x1869: 0x002a, + 0x186a: 0x003a, 0x186b: 0x002a, 0x186c: 0x003a, 0x186d: 0x002a, 0x186e: 0x003a, 0x186f: 0x002a, + 0x1870: 0x000a, 0x1871: 0x000a, 0x1872: 0x000a, 0x1873: 0x000a, 0x1874: 0x000a, 0x1875: 0x000a, + 0x1876: 0x000a, 0x1877: 0x000a, 0x1878: 0x000a, 0x1879: 0x000a, 0x187a: 0x000a, 0x187b: 0x000a, + 0x187c: 0x000a, 0x187d: 0x000a, 0x187e: 0x000a, 0x187f: 0x000a, + // Block 0x62, offset 0x1880 + 0x1880: 0x000a, 0x1881: 0x000a, 0x1882: 0x000a, 0x1883: 0x007a, 0x1884: 0x006a, 0x1885: 0x009a, + 0x1886: 0x008a, 0x1887: 0x00ba, 0x1888: 0x00aa, 0x1889: 0x009a, 0x188a: 0x008a, 0x188b: 0x007a, + 0x188c: 0x006a, 0x188d: 0x00da, 0x188e: 0x002a, 0x188f: 0x003a, 0x1890: 0x00ca, 0x1891: 0x009a, + 0x1892: 0x008a, 0x1893: 0x007a, 0x1894: 0x006a, 0x1895: 0x009a, 0x1896: 0x008a, 0x1897: 0x00ba, + 0x1898: 0x00aa, 0x1899: 0x000a, 0x189a: 0x000a, 0x189b: 0x000a, 0x189c: 0x000a, 0x189d: 0x000a, + 0x189e: 0x000a, 0x189f: 0x000a, 0x18a0: 0x000a, 0x18a1: 0x000a, 0x18a2: 0x000a, 0x18a3: 0x000a, + 0x18a4: 0x000a, 0x18a5: 0x000a, 0x18a6: 0x000a, 0x18a7: 0x000a, 0x18a8: 0x000a, 0x18a9: 0x000a, + 0x18aa: 0x000a, 0x18ab: 0x000a, 0x18ac: 0x000a, 0x18ad: 0x000a, 0x18ae: 0x000a, 0x18af: 0x000a, + 0x18b0: 0x000a, 0x18b1: 0x000a, 0x18b2: 0x000a, 0x18b3: 0x000a, 0x18b4: 0x000a, 0x18b5: 0x000a, + 0x18b6: 0x000a, 0x18b7: 0x000a, 0x18b8: 0x000a, 0x18b9: 0x000a, 0x18ba: 0x000a, 0x18bb: 0x000a, + 0x18bc: 0x000a, 0x18bd: 0x000a, 0x18be: 0x000a, 0x18bf: 0x000a, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x000a, 0x18c1: 0x000a, 0x18c2: 0x000a, 0x18c3: 0x000a, 0x18c4: 0x000a, 0x18c5: 0x000a, + 0x18c6: 0x000a, 0x18c7: 0x000a, 0x18c8: 0x000a, 0x18c9: 0x000a, 0x18ca: 0x000a, 0x18cb: 0x000a, + 0x18cc: 0x000a, 0x18cd: 0x000a, 0x18ce: 0x000a, 0x18cf: 0x000a, 0x18d0: 0x000a, 0x18d1: 0x000a, + 0x18d2: 0x000a, 0x18d3: 0x000a, 0x18d4: 0x000a, 0x18d5: 0x000a, 0x18d6: 0x000a, 0x18d7: 0x000a, + 0x18d8: 0x003a, 0x18d9: 0x002a, 0x18da: 0x003a, 0x18db: 0x002a, 0x18dc: 0x000a, 0x18dd: 0x000a, + 0x18de: 0x000a, 0x18df: 0x000a, 0x18e0: 0x000a, 0x18e1: 0x000a, 0x18e2: 0x000a, 0x18e3: 0x000a, + 0x18e4: 0x000a, 0x18e5: 0x000a, 0x18e6: 0x000a, 0x18e7: 0x000a, 0x18e8: 0x000a, 0x18e9: 0x000a, + 0x18ea: 0x000a, 0x18eb: 0x000a, 0x18ec: 0x000a, 0x18ed: 0x000a, 0x18ee: 0x000a, 0x18ef: 0x000a, + 0x18f0: 0x000a, 0x18f1: 0x000a, 0x18f2: 0x000a, 0x18f3: 0x000a, 0x18f4: 0x000a, 0x18f5: 0x000a, + 0x18f6: 0x000a, 0x18f7: 0x000a, 0x18f8: 0x000a, 0x18f9: 0x000a, 0x18fa: 0x000a, 0x18fb: 0x000a, + 0x18fc: 0x003a, 0x18fd: 0x002a, 0x18fe: 0x000a, 0x18ff: 0x000a, + // Block 0x64, offset 0x1900 + 0x1900: 0x000a, 0x1901: 0x000a, 0x1902: 0x000a, 0x1903: 0x000a, 0x1904: 0x000a, 0x1905: 0x000a, + 0x1906: 0x000a, 0x1907: 0x000a, 0x1908: 0x000a, 0x1909: 0x000a, 0x190a: 0x000a, 0x190b: 0x000a, + 0x190c: 0x000a, 0x190d: 0x000a, 0x190e: 0x000a, 0x190f: 0x000a, 0x1910: 0x000a, 0x1911: 0x000a, + 0x1912: 0x000a, 0x1913: 0x000a, 0x1914: 0x000a, 0x1915: 0x000a, 0x1916: 0x000a, 0x1917: 0x000a, + 0x1918: 0x000a, 0x1919: 0x000a, 0x191a: 0x000a, 0x191b: 0x000a, 0x191c: 0x000a, 0x191d: 0x000a, + 0x191e: 0x000a, 0x191f: 0x000a, 0x1920: 0x000a, 0x1921: 0x000a, 0x1922: 0x000a, 0x1923: 0x000a, + 0x1924: 0x000a, 0x1925: 0x000a, 0x1926: 0x000a, 0x1927: 0x000a, 0x1928: 0x000a, 0x1929: 0x000a, + 0x192a: 0x000a, 0x192b: 0x000a, 0x192c: 0x000a, 0x192d: 0x000a, 0x192e: 0x000a, 0x192f: 0x000a, + 0x1930: 0x000a, 0x1931: 0x000a, 0x1932: 0x000a, 0x1933: 0x000a, + 0x1936: 0x000a, 0x1937: 0x000a, 0x1938: 0x000a, 0x1939: 0x000a, 0x193a: 0x000a, 0x193b: 0x000a, + 0x193c: 0x000a, 0x193d: 0x000a, 0x193e: 0x000a, 0x193f: 0x000a, + // Block 0x65, offset 0x1940 + 0x1940: 0x000a, 0x1941: 0x000a, 0x1942: 0x000a, 0x1943: 0x000a, 0x1944: 0x000a, 0x1945: 0x000a, + 0x1946: 0x000a, 0x1947: 0x000a, 0x1948: 0x000a, 0x1949: 0x000a, 0x194a: 0x000a, 0x194b: 0x000a, + 0x194c: 0x000a, 0x194d: 0x000a, 0x194e: 0x000a, 0x194f: 0x000a, 0x1950: 0x000a, 0x1951: 0x000a, + 0x1952: 0x000a, 0x1953: 0x000a, 0x1954: 0x000a, 0x1955: 0x000a, + 0x1958: 0x000a, 0x1959: 0x000a, 0x195a: 0x000a, 0x195b: 0x000a, 0x195c: 0x000a, 0x195d: 0x000a, + 0x195e: 0x000a, 0x195f: 0x000a, 0x1960: 0x000a, 0x1961: 0x000a, 0x1962: 0x000a, 0x1963: 0x000a, + 0x1964: 0x000a, 0x1965: 0x000a, 0x1966: 0x000a, 0x1967: 0x000a, 0x1968: 0x000a, 0x1969: 0x000a, + 0x196a: 0x000a, 0x196b: 0x000a, 0x196c: 0x000a, 0x196d: 0x000a, 0x196e: 0x000a, 0x196f: 0x000a, + 0x1970: 0x000a, 0x1971: 0x000a, 0x1972: 0x000a, 0x1973: 0x000a, 0x1974: 0x000a, 0x1975: 0x000a, + 0x1976: 0x000a, 0x1977: 0x000a, 0x1978: 0x000a, 0x1979: 0x000a, + 0x197d: 0x000a, 0x197e: 0x000a, 0x197f: 0x000a, + // Block 0x66, offset 0x1980 + 0x1980: 0x000a, 0x1981: 0x000a, 0x1982: 0x000a, 0x1983: 0x000a, 0x1984: 0x000a, 0x1985: 0x000a, + 0x1986: 0x000a, 0x1987: 0x000a, 0x1988: 0x000a, 0x198a: 0x000a, 0x198b: 0x000a, + 0x198c: 0x000a, 0x198d: 0x000a, 0x198e: 0x000a, 0x198f: 0x000a, 0x1990: 0x000a, 0x1991: 0x000a, + 0x1992: 0x000a, + 0x19ac: 0x000a, 0x19ad: 0x000a, 0x19ae: 0x000a, 0x19af: 0x000a, + // Block 0x67, offset 0x19c0 + 0x19e5: 0x000a, 0x19e6: 0x000a, 0x19e7: 0x000a, 0x19e8: 0x000a, 0x19e9: 0x000a, + 0x19ea: 0x000a, 0x19ef: 0x000c, + 0x19f0: 0x000c, 0x19f1: 0x000c, + 0x19f9: 0x000a, 0x19fa: 0x000a, 0x19fb: 0x000a, + 0x19fc: 0x000a, 0x19fd: 0x000a, 0x19fe: 0x000a, 0x19ff: 0x000a, + // Block 0x68, offset 0x1a00 + 0x1a3f: 0x000c, + // Block 0x69, offset 0x1a40 + 0x1a60: 0x000c, 0x1a61: 0x000c, 0x1a62: 0x000c, 0x1a63: 0x000c, + 0x1a64: 0x000c, 0x1a65: 0x000c, 0x1a66: 0x000c, 0x1a67: 0x000c, 0x1a68: 0x000c, 0x1a69: 0x000c, + 0x1a6a: 0x000c, 0x1a6b: 0x000c, 0x1a6c: 0x000c, 0x1a6d: 0x000c, 0x1a6e: 0x000c, 0x1a6f: 0x000c, + 0x1a70: 0x000c, 0x1a71: 0x000c, 0x1a72: 0x000c, 0x1a73: 0x000c, 0x1a74: 0x000c, 0x1a75: 0x000c, + 0x1a76: 0x000c, 0x1a77: 0x000c, 0x1a78: 0x000c, 0x1a79: 0x000c, 0x1a7a: 0x000c, 0x1a7b: 0x000c, + 0x1a7c: 0x000c, 0x1a7d: 0x000c, 0x1a7e: 0x000c, 0x1a7f: 0x000c, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x000a, 0x1a81: 0x000a, 0x1a82: 0x000a, 0x1a83: 0x000a, 0x1a84: 0x000a, 0x1a85: 0x000a, + 0x1a86: 0x000a, 0x1a87: 0x000a, 0x1a88: 0x000a, 0x1a89: 0x000a, 0x1a8a: 0x000a, 0x1a8b: 0x000a, + 0x1a8c: 0x000a, 0x1a8d: 0x000a, 0x1a8e: 0x000a, 0x1a8f: 0x000a, 0x1a90: 0x000a, 0x1a91: 0x000a, + 0x1a92: 0x000a, 0x1a93: 0x000a, 0x1a94: 0x000a, 0x1a95: 0x000a, 0x1a96: 0x000a, 0x1a97: 0x000a, + 0x1a98: 0x000a, 0x1a99: 0x000a, 0x1a9a: 0x000a, 0x1a9b: 0x000a, 0x1a9c: 0x000a, 0x1a9d: 0x000a, + 0x1a9e: 0x000a, 0x1a9f: 0x000a, 0x1aa0: 0x000a, 0x1aa1: 0x000a, 0x1aa2: 0x003a, 0x1aa3: 0x002a, + 0x1aa4: 0x003a, 0x1aa5: 0x002a, 0x1aa6: 0x003a, 0x1aa7: 0x002a, 0x1aa8: 0x003a, 0x1aa9: 0x002a, + 0x1aaa: 0x000a, 0x1aab: 0x000a, 0x1aac: 0x000a, 0x1aad: 0x000a, 0x1aae: 0x000a, 0x1aaf: 0x000a, + 0x1ab0: 0x000a, 0x1ab1: 0x000a, 0x1ab2: 0x000a, 0x1ab3: 0x000a, 0x1ab4: 0x000a, 0x1ab5: 0x000a, + 0x1ab6: 0x000a, 0x1ab7: 0x000a, 0x1ab8: 0x000a, 0x1ab9: 0x000a, 0x1aba: 0x000a, 0x1abb: 0x000a, + 0x1abc: 0x000a, 0x1abd: 0x000a, 0x1abe: 0x000a, 0x1abf: 0x000a, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x000a, 0x1ac1: 0x000a, 0x1ac2: 0x000a, 0x1ac3: 0x000a, 0x1ac4: 0x000a, 0x1ac5: 0x000a, + 0x1ac6: 0x000a, 0x1ac7: 0x000a, 0x1ac8: 0x000a, 0x1ac9: 0x000a, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0x000a, 0x1b01: 0x000a, 0x1b02: 0x000a, 0x1b03: 0x000a, 0x1b04: 0x000a, 0x1b05: 0x000a, + 0x1b06: 0x000a, 0x1b07: 0x000a, 0x1b08: 0x000a, 0x1b09: 0x000a, 0x1b0a: 0x000a, 0x1b0b: 0x000a, + 0x1b0c: 0x000a, 0x1b0d: 0x000a, 0x1b0e: 0x000a, 0x1b0f: 0x000a, 0x1b10: 0x000a, 0x1b11: 0x000a, + 0x1b12: 0x000a, 0x1b13: 0x000a, 0x1b14: 0x000a, 0x1b15: 0x000a, 0x1b16: 0x000a, 0x1b17: 0x000a, + 0x1b18: 0x000a, 0x1b19: 0x000a, 0x1b1b: 0x000a, 0x1b1c: 0x000a, 0x1b1d: 0x000a, + 0x1b1e: 0x000a, 0x1b1f: 0x000a, 0x1b20: 0x000a, 0x1b21: 0x000a, 0x1b22: 0x000a, 0x1b23: 0x000a, + 0x1b24: 0x000a, 0x1b25: 0x000a, 0x1b26: 0x000a, 0x1b27: 0x000a, 0x1b28: 0x000a, 0x1b29: 0x000a, + 0x1b2a: 0x000a, 0x1b2b: 0x000a, 0x1b2c: 0x000a, 0x1b2d: 0x000a, 0x1b2e: 0x000a, 0x1b2f: 0x000a, + 0x1b30: 0x000a, 0x1b31: 0x000a, 0x1b32: 0x000a, 0x1b33: 0x000a, 0x1b34: 0x000a, 0x1b35: 0x000a, + 0x1b36: 0x000a, 0x1b37: 0x000a, 0x1b38: 0x000a, 0x1b39: 0x000a, 0x1b3a: 0x000a, 0x1b3b: 0x000a, + 0x1b3c: 0x000a, 0x1b3d: 0x000a, 0x1b3e: 0x000a, 0x1b3f: 0x000a, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0x000a, 0x1b41: 0x000a, 0x1b42: 0x000a, 0x1b43: 0x000a, 0x1b44: 0x000a, 0x1b45: 0x000a, + 0x1b46: 0x000a, 0x1b47: 0x000a, 0x1b48: 0x000a, 0x1b49: 0x000a, 0x1b4a: 0x000a, 0x1b4b: 0x000a, + 0x1b4c: 0x000a, 0x1b4d: 0x000a, 0x1b4e: 0x000a, 0x1b4f: 0x000a, 0x1b50: 0x000a, 0x1b51: 0x000a, + 0x1b52: 0x000a, 0x1b53: 0x000a, 0x1b54: 0x000a, 0x1b55: 0x000a, 0x1b56: 0x000a, 0x1b57: 0x000a, + 0x1b58: 0x000a, 0x1b59: 0x000a, 0x1b5a: 0x000a, 0x1b5b: 0x000a, 0x1b5c: 0x000a, 0x1b5d: 0x000a, + 0x1b5e: 0x000a, 0x1b5f: 0x000a, 0x1b60: 0x000a, 0x1b61: 0x000a, 0x1b62: 0x000a, 0x1b63: 0x000a, + 0x1b64: 0x000a, 0x1b65: 0x000a, 0x1b66: 0x000a, 0x1b67: 0x000a, 0x1b68: 0x000a, 0x1b69: 0x000a, + 0x1b6a: 0x000a, 0x1b6b: 0x000a, 0x1b6c: 0x000a, 0x1b6d: 0x000a, 0x1b6e: 0x000a, 0x1b6f: 0x000a, + 0x1b70: 0x000a, 0x1b71: 0x000a, 0x1b72: 0x000a, 0x1b73: 0x000a, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0x000a, 0x1b81: 0x000a, 0x1b82: 0x000a, 0x1b83: 0x000a, 0x1b84: 0x000a, 0x1b85: 0x000a, + 0x1b86: 0x000a, 0x1b87: 0x000a, 0x1b88: 0x000a, 0x1b89: 0x000a, 0x1b8a: 0x000a, 0x1b8b: 0x000a, + 0x1b8c: 0x000a, 0x1b8d: 0x000a, 0x1b8e: 0x000a, 0x1b8f: 0x000a, 0x1b90: 0x000a, 0x1b91: 0x000a, + 0x1b92: 0x000a, 0x1b93: 0x000a, 0x1b94: 0x000a, 0x1b95: 0x000a, + 0x1bb0: 0x000a, 0x1bb1: 0x000a, 0x1bb2: 0x000a, 0x1bb3: 0x000a, 0x1bb4: 0x000a, 0x1bb5: 0x000a, + 0x1bb6: 0x000a, 0x1bb7: 0x000a, 0x1bb8: 0x000a, 0x1bb9: 0x000a, 0x1bba: 0x000a, 0x1bbb: 0x000a, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0x0009, 0x1bc1: 0x000a, 0x1bc2: 0x000a, 0x1bc3: 0x000a, 0x1bc4: 0x000a, + 0x1bc8: 0x003a, 0x1bc9: 0x002a, 0x1bca: 0x003a, 0x1bcb: 0x002a, + 0x1bcc: 0x003a, 0x1bcd: 0x002a, 0x1bce: 0x003a, 0x1bcf: 0x002a, 0x1bd0: 0x003a, 0x1bd1: 0x002a, + 0x1bd2: 0x000a, 0x1bd3: 0x000a, 0x1bd4: 0x003a, 0x1bd5: 0x002a, 0x1bd6: 0x003a, 0x1bd7: 0x002a, + 0x1bd8: 0x003a, 0x1bd9: 0x002a, 0x1bda: 0x003a, 0x1bdb: 0x002a, 0x1bdc: 0x000a, 0x1bdd: 0x000a, + 0x1bde: 0x000a, 0x1bdf: 0x000a, 0x1be0: 0x000a, + 0x1bea: 0x000c, 0x1beb: 0x000c, 0x1bec: 0x000c, 0x1bed: 0x000c, + 0x1bf0: 0x000a, + 0x1bf6: 0x000a, 0x1bf7: 0x000a, + 0x1bfd: 0x000a, 0x1bfe: 0x000a, 0x1bff: 0x000a, + // Block 0x70, offset 0x1c00 + 0x1c19: 0x000c, 0x1c1a: 0x000c, 0x1c1b: 0x000a, 0x1c1c: 0x000a, + 0x1c20: 0x000a, + // Block 0x71, offset 0x1c40 + 0x1c7b: 0x000a, + // Block 0x72, offset 0x1c80 + 0x1c80: 0x000a, 0x1c81: 0x000a, 0x1c82: 0x000a, 0x1c83: 0x000a, 0x1c84: 0x000a, 0x1c85: 0x000a, + 0x1c86: 0x000a, 0x1c87: 0x000a, 0x1c88: 0x000a, 0x1c89: 0x000a, 0x1c8a: 0x000a, 0x1c8b: 0x000a, + 0x1c8c: 0x000a, 0x1c8d: 0x000a, 0x1c8e: 0x000a, 0x1c8f: 0x000a, 0x1c90: 0x000a, 0x1c91: 0x000a, + 0x1c92: 0x000a, 0x1c93: 0x000a, 0x1c94: 0x000a, 0x1c95: 0x000a, 0x1c96: 0x000a, 0x1c97: 0x000a, + 0x1c98: 0x000a, 0x1c99: 0x000a, 0x1c9a: 0x000a, 0x1c9b: 0x000a, 0x1c9c: 0x000a, 0x1c9d: 0x000a, + 0x1c9e: 0x000a, 0x1c9f: 0x000a, 0x1ca0: 0x000a, 0x1ca1: 0x000a, 0x1ca2: 0x000a, 0x1ca3: 0x000a, + // Block 0x73, offset 0x1cc0 + 0x1cdd: 0x000a, + 0x1cde: 0x000a, + // Block 0x74, offset 0x1d00 + 0x1d10: 0x000a, 0x1d11: 0x000a, + 0x1d12: 0x000a, 0x1d13: 0x000a, 0x1d14: 0x000a, 0x1d15: 0x000a, 0x1d16: 0x000a, 0x1d17: 0x000a, + 0x1d18: 0x000a, 0x1d19: 0x000a, 0x1d1a: 0x000a, 0x1d1b: 0x000a, 0x1d1c: 0x000a, 0x1d1d: 0x000a, + 0x1d1e: 0x000a, 0x1d1f: 0x000a, + 0x1d3c: 0x000a, 0x1d3d: 0x000a, 0x1d3e: 0x000a, + // Block 0x75, offset 0x1d40 + 0x1d71: 0x000a, 0x1d72: 0x000a, 0x1d73: 0x000a, 0x1d74: 0x000a, 0x1d75: 0x000a, + 0x1d76: 0x000a, 0x1d77: 0x000a, 0x1d78: 0x000a, 0x1d79: 0x000a, 0x1d7a: 0x000a, 0x1d7b: 0x000a, + 0x1d7c: 0x000a, 0x1d7d: 0x000a, 0x1d7e: 0x000a, 0x1d7f: 0x000a, + // Block 0x76, offset 0x1d80 + 0x1d8c: 0x000a, 0x1d8d: 0x000a, 0x1d8e: 0x000a, 0x1d8f: 0x000a, + // Block 0x77, offset 0x1dc0 + 0x1df7: 0x000a, 0x1df8: 0x000a, 0x1df9: 0x000a, 0x1dfa: 0x000a, + // Block 0x78, offset 0x1e00 + 0x1e1e: 0x000a, 0x1e1f: 0x000a, + 0x1e3f: 0x000a, + // Block 0x79, offset 0x1e40 + 0x1e50: 0x000a, 0x1e51: 0x000a, + 0x1e52: 0x000a, 0x1e53: 0x000a, 0x1e54: 0x000a, 0x1e55: 0x000a, 0x1e56: 0x000a, 0x1e57: 0x000a, + 0x1e58: 0x000a, 0x1e59: 0x000a, 0x1e5a: 0x000a, 0x1e5b: 0x000a, 0x1e5c: 0x000a, 0x1e5d: 0x000a, + 0x1e5e: 0x000a, 0x1e5f: 0x000a, 0x1e60: 0x000a, 0x1e61: 0x000a, 0x1e62: 0x000a, 0x1e63: 0x000a, + 0x1e64: 0x000a, 0x1e65: 0x000a, 0x1e66: 0x000a, 0x1e67: 0x000a, 0x1e68: 0x000a, 0x1e69: 0x000a, + 0x1e6a: 0x000a, 0x1e6b: 0x000a, 0x1e6c: 0x000a, 0x1e6d: 0x000a, 0x1e6e: 0x000a, 0x1e6f: 0x000a, + 0x1e70: 0x000a, 0x1e71: 0x000a, 0x1e72: 0x000a, 0x1e73: 0x000a, 0x1e74: 0x000a, 0x1e75: 0x000a, + 0x1e76: 0x000a, 0x1e77: 0x000a, 0x1e78: 0x000a, 0x1e79: 0x000a, 0x1e7a: 0x000a, 0x1e7b: 0x000a, + 0x1e7c: 0x000a, 0x1e7d: 0x000a, 0x1e7e: 0x000a, 0x1e7f: 0x000a, + // Block 0x7a, offset 0x1e80 + 0x1e80: 0x000a, 0x1e81: 0x000a, 0x1e82: 0x000a, 0x1e83: 0x000a, 0x1e84: 0x000a, 0x1e85: 0x000a, + 0x1e86: 0x000a, + // Block 0x7b, offset 0x1ec0 + 0x1ecd: 0x000a, 0x1ece: 0x000a, 0x1ecf: 0x000a, + // Block 0x7c, offset 0x1f00 + 0x1f2f: 0x000c, + 0x1f30: 0x000c, 0x1f31: 0x000c, 0x1f32: 0x000c, 0x1f33: 0x000a, 0x1f34: 0x000c, 0x1f35: 0x000c, + 0x1f36: 0x000c, 0x1f37: 0x000c, 0x1f38: 0x000c, 0x1f39: 0x000c, 0x1f3a: 0x000c, 0x1f3b: 0x000c, + 0x1f3c: 0x000c, 0x1f3d: 0x000c, 0x1f3e: 0x000a, 0x1f3f: 0x000a, + // Block 0x7d, offset 0x1f40 + 0x1f5e: 0x000c, 0x1f5f: 0x000c, + // Block 0x7e, offset 0x1f80 + 0x1fb0: 0x000c, 0x1fb1: 0x000c, + // Block 0x7f, offset 0x1fc0 + 0x1fc0: 0x000a, 0x1fc1: 0x000a, 0x1fc2: 0x000a, 0x1fc3: 0x000a, 0x1fc4: 0x000a, 0x1fc5: 0x000a, + 0x1fc6: 0x000a, 0x1fc7: 0x000a, 0x1fc8: 0x000a, 0x1fc9: 0x000a, 0x1fca: 0x000a, 0x1fcb: 0x000a, + 0x1fcc: 0x000a, 0x1fcd: 0x000a, 0x1fce: 0x000a, 0x1fcf: 0x000a, 0x1fd0: 0x000a, 0x1fd1: 0x000a, + 0x1fd2: 0x000a, 0x1fd3: 0x000a, 0x1fd4: 0x000a, 0x1fd5: 0x000a, 0x1fd6: 0x000a, 0x1fd7: 0x000a, + 0x1fd8: 0x000a, 0x1fd9: 0x000a, 0x1fda: 0x000a, 0x1fdb: 0x000a, 0x1fdc: 0x000a, 0x1fdd: 0x000a, + 0x1fde: 0x000a, 0x1fdf: 0x000a, 0x1fe0: 0x000a, 0x1fe1: 0x000a, + // Block 0x80, offset 0x2000 + 0x2008: 0x000a, + // Block 0x81, offset 0x2040 + 0x2042: 0x000c, + 0x2046: 0x000c, 0x204b: 0x000c, + 0x2065: 0x000c, 0x2066: 0x000c, 0x2068: 0x000a, 0x2069: 0x000a, + 0x206a: 0x000a, 0x206b: 0x000a, + 0x2078: 0x0004, 0x2079: 0x0004, + // Block 0x82, offset 0x2080 + 0x20b4: 0x000a, 0x20b5: 0x000a, + 0x20b6: 0x000a, 0x20b7: 0x000a, + // Block 0x83, offset 0x20c0 + 0x20c4: 0x000c, 0x20c5: 0x000c, + 0x20e0: 0x000c, 0x20e1: 0x000c, 0x20e2: 0x000c, 0x20e3: 0x000c, + 0x20e4: 0x000c, 0x20e5: 0x000c, 0x20e6: 0x000c, 0x20e7: 0x000c, 0x20e8: 0x000c, 0x20e9: 0x000c, + 0x20ea: 0x000c, 0x20eb: 0x000c, 0x20ec: 0x000c, 0x20ed: 0x000c, 0x20ee: 0x000c, 0x20ef: 0x000c, + 0x20f0: 0x000c, 0x20f1: 0x000c, + // Block 0x84, offset 0x2100 + 0x2126: 0x000c, 0x2127: 0x000c, 0x2128: 0x000c, 0x2129: 0x000c, + 0x212a: 0x000c, 0x212b: 0x000c, 0x212c: 0x000c, 0x212d: 0x000c, + // Block 0x85, offset 0x2140 + 0x2147: 0x000c, 0x2148: 0x000c, 0x2149: 0x000c, 0x214a: 0x000c, 0x214b: 0x000c, + 0x214c: 0x000c, 0x214d: 0x000c, 0x214e: 0x000c, 0x214f: 0x000c, 0x2150: 0x000c, 0x2151: 0x000c, + // Block 0x86, offset 0x2180 + 0x2180: 0x000c, 0x2181: 0x000c, 0x2182: 0x000c, + 0x21b3: 0x000c, + 0x21b6: 0x000c, 0x21b7: 0x000c, 0x21b8: 0x000c, 0x21b9: 0x000c, + 0x21bc: 0x000c, + // Block 0x87, offset 0x21c0 + 0x21e5: 0x000c, + // Block 0x88, offset 0x2200 + 0x2229: 0x000c, + 0x222a: 0x000c, 0x222b: 0x000c, 0x222c: 0x000c, 0x222d: 0x000c, 0x222e: 0x000c, + 0x2231: 0x000c, 0x2232: 0x000c, 0x2235: 0x000c, + 0x2236: 0x000c, + // Block 0x89, offset 0x2240 + 0x2243: 0x000c, + 0x224c: 0x000c, + 0x227c: 0x000c, + // Block 0x8a, offset 0x2280 + 0x22b0: 0x000c, 0x22b2: 0x000c, 0x22b3: 0x000c, 0x22b4: 0x000c, + 0x22b7: 0x000c, 0x22b8: 0x000c, + 0x22be: 0x000c, 0x22bf: 0x000c, + // Block 0x8b, offset 0x22c0 + 0x22c1: 0x000c, + 0x22ec: 0x000c, 0x22ed: 0x000c, + 0x22f6: 0x000c, + // Block 0x8c, offset 0x2300 + 0x2325: 0x000c, 0x2328: 0x000c, + 0x232d: 0x000c, + // Block 0x8d, offset 0x2340 + 0x235d: 0x0001, + 0x235e: 0x000c, 0x235f: 0x0001, 0x2360: 0x0001, 0x2361: 0x0001, 0x2362: 0x0001, 0x2363: 0x0001, + 0x2364: 0x0001, 0x2365: 0x0001, 0x2366: 0x0001, 0x2367: 0x0001, 0x2368: 0x0001, 0x2369: 0x0003, + 0x236a: 0x0001, 0x236b: 0x0001, 0x236c: 0x0001, 0x236d: 0x0001, 0x236e: 0x0001, 0x236f: 0x0001, + 0x2370: 0x0001, 0x2371: 0x0001, 0x2372: 0x0001, 0x2373: 0x0001, 0x2374: 0x0001, 0x2375: 0x0001, + 0x2376: 0x0001, 0x2377: 0x0001, 0x2378: 0x0001, 0x2379: 0x0001, 0x237a: 0x0001, 0x237b: 0x0001, + 0x237c: 0x0001, 0x237d: 0x0001, 0x237e: 0x0001, 0x237f: 0x0001, + // Block 0x8e, offset 0x2380 + 0x2380: 0x0001, 0x2381: 0x0001, 0x2382: 0x0001, 0x2383: 0x0001, 0x2384: 0x0001, 0x2385: 0x0001, + 0x2386: 0x0001, 0x2387: 0x0001, 0x2388: 0x0001, 0x2389: 0x0001, 0x238a: 0x0001, 0x238b: 0x0001, + 0x238c: 0x0001, 0x238d: 0x0001, 0x238e: 0x0001, 0x238f: 0x0001, 0x2390: 0x000d, 0x2391: 0x000d, + 0x2392: 0x000d, 0x2393: 0x000d, 0x2394: 0x000d, 0x2395: 0x000d, 0x2396: 0x000d, 0x2397: 0x000d, + 0x2398: 0x000d, 0x2399: 0x000d, 0x239a: 0x000d, 0x239b: 0x000d, 0x239c: 0x000d, 0x239d: 0x000d, + 0x239e: 0x000d, 0x239f: 0x000d, 0x23a0: 0x000d, 0x23a1: 0x000d, 0x23a2: 0x000d, 0x23a3: 0x000d, + 0x23a4: 0x000d, 0x23a5: 0x000d, 0x23a6: 0x000d, 0x23a7: 0x000d, 0x23a8: 0x000d, 0x23a9: 0x000d, + 0x23aa: 0x000d, 0x23ab: 0x000d, 0x23ac: 0x000d, 0x23ad: 0x000d, 0x23ae: 0x000d, 0x23af: 0x000d, + 0x23b0: 0x000d, 0x23b1: 0x000d, 0x23b2: 0x000d, 0x23b3: 0x000d, 0x23b4: 0x000d, 0x23b5: 0x000d, + 0x23b6: 0x000d, 0x23b7: 0x000d, 0x23b8: 0x000d, 0x23b9: 0x000d, 0x23ba: 0x000d, 0x23bb: 0x000d, + 0x23bc: 0x000d, 0x23bd: 0x000d, 0x23be: 0x000d, 0x23bf: 0x000d, + // Block 0x8f, offset 0x23c0 + 0x23c0: 0x000d, 0x23c1: 0x000d, 0x23c2: 0x000d, 0x23c3: 0x000d, 0x23c4: 0x000d, 0x23c5: 0x000d, + 0x23c6: 0x000d, 0x23c7: 0x000d, 0x23c8: 0x000d, 0x23c9: 0x000d, 0x23ca: 0x000d, 0x23cb: 0x000d, + 0x23cc: 0x000d, 0x23cd: 0x000d, 0x23ce: 0x000d, 0x23cf: 0x000d, 0x23d0: 0x000d, 0x23d1: 0x000d, + 0x23d2: 0x000d, 0x23d3: 0x000d, 0x23d4: 0x000d, 0x23d5: 0x000d, 0x23d6: 0x000d, 0x23d7: 0x000d, + 0x23d8: 0x000d, 0x23d9: 0x000d, 0x23da: 0x000d, 0x23db: 0x000d, 0x23dc: 0x000d, 0x23dd: 0x000d, + 0x23de: 0x000d, 0x23df: 0x000d, 0x23e0: 0x000d, 0x23e1: 0x000d, 0x23e2: 0x000d, 0x23e3: 0x000d, + 0x23e4: 0x000d, 0x23e5: 0x000d, 0x23e6: 0x000d, 0x23e7: 0x000d, 0x23e8: 0x000d, 0x23e9: 0x000d, + 0x23ea: 0x000d, 0x23eb: 0x000d, 0x23ec: 0x000d, 0x23ed: 0x000d, 0x23ee: 0x000d, 0x23ef: 0x000d, + 0x23f0: 0x000d, 0x23f1: 0x000d, 0x23f2: 0x000d, 0x23f3: 0x000d, 0x23f4: 0x000d, 0x23f5: 0x000d, + 0x23f6: 0x000d, 0x23f7: 0x000d, 0x23f8: 0x000d, 0x23f9: 0x000d, 0x23fa: 0x000d, 0x23fb: 0x000d, + 0x23fc: 0x000d, 0x23fd: 0x000d, 0x23fe: 0x000a, 0x23ff: 0x000a, + // Block 0x90, offset 0x2400 + 0x2400: 0x000d, 0x2401: 0x000d, 0x2402: 0x000d, 0x2403: 0x000d, 0x2404: 0x000d, 0x2405: 0x000d, + 0x2406: 0x000d, 0x2407: 0x000d, 0x2408: 0x000d, 0x2409: 0x000d, 0x240a: 0x000d, 0x240b: 0x000d, + 0x240c: 0x000d, 0x240d: 0x000d, 0x240e: 0x000d, 0x240f: 0x000d, 0x2410: 0x000b, 0x2411: 0x000b, + 0x2412: 0x000b, 0x2413: 0x000b, 0x2414: 0x000b, 0x2415: 0x000b, 0x2416: 0x000b, 0x2417: 0x000b, + 0x2418: 0x000b, 0x2419: 0x000b, 0x241a: 0x000b, 0x241b: 0x000b, 0x241c: 0x000b, 0x241d: 0x000b, + 0x241e: 0x000b, 0x241f: 0x000b, 0x2420: 0x000b, 0x2421: 0x000b, 0x2422: 0x000b, 0x2423: 0x000b, + 0x2424: 0x000b, 0x2425: 0x000b, 0x2426: 0x000b, 0x2427: 0x000b, 0x2428: 0x000b, 0x2429: 0x000b, + 0x242a: 0x000b, 0x242b: 0x000b, 0x242c: 0x000b, 0x242d: 0x000b, 0x242e: 0x000b, 0x242f: 0x000b, + 0x2430: 0x000d, 0x2431: 0x000d, 0x2432: 0x000d, 0x2433: 0x000d, 0x2434: 0x000d, 0x2435: 0x000d, + 0x2436: 0x000d, 0x2437: 0x000d, 0x2438: 0x000d, 0x2439: 0x000d, 0x243a: 0x000d, 0x243b: 0x000d, + 0x243c: 0x000d, 0x243d: 0x000a, 0x243e: 0x000d, 0x243f: 0x000d, + // Block 0x91, offset 0x2440 + 0x2440: 0x000c, 0x2441: 0x000c, 0x2442: 0x000c, 0x2443: 0x000c, 0x2444: 0x000c, 0x2445: 0x000c, + 0x2446: 0x000c, 0x2447: 0x000c, 0x2448: 0x000c, 0x2449: 0x000c, 0x244a: 0x000c, 0x244b: 0x000c, + 0x244c: 0x000c, 0x244d: 0x000c, 0x244e: 0x000c, 0x244f: 0x000c, 0x2450: 0x000a, 0x2451: 0x000a, + 0x2452: 0x000a, 0x2453: 0x000a, 0x2454: 0x000a, 0x2455: 0x000a, 0x2456: 0x000a, 0x2457: 0x000a, + 0x2458: 0x000a, 0x2459: 0x000a, + 0x2460: 0x000c, 0x2461: 0x000c, 0x2462: 0x000c, 0x2463: 0x000c, + 0x2464: 0x000c, 0x2465: 0x000c, 0x2466: 0x000c, 0x2467: 0x000c, 0x2468: 0x000c, 0x2469: 0x000c, + 0x246a: 0x000c, 0x246b: 0x000c, 0x246c: 0x000c, 0x246d: 0x000c, 0x246e: 0x000c, 0x246f: 0x000c, + 0x2470: 0x000a, 0x2471: 0x000a, 0x2472: 0x000a, 0x2473: 0x000a, 0x2474: 0x000a, 0x2475: 0x000a, + 0x2476: 0x000a, 0x2477: 0x000a, 0x2478: 0x000a, 0x2479: 0x000a, 0x247a: 0x000a, 0x247b: 0x000a, + 0x247c: 0x000a, 0x247d: 0x000a, 0x247e: 0x000a, 0x247f: 0x000a, + // Block 0x92, offset 0x2480 + 0x2480: 0x000a, 0x2481: 0x000a, 0x2482: 0x000a, 0x2483: 0x000a, 0x2484: 0x000a, 0x2485: 0x000a, + 0x2486: 0x000a, 0x2487: 0x000a, 0x2488: 0x000a, 0x2489: 0x000a, 0x248a: 0x000a, 0x248b: 0x000a, + 0x248c: 0x000a, 0x248d: 0x000a, 0x248e: 0x000a, 0x248f: 0x000a, 0x2490: 0x0006, 0x2491: 0x000a, + 0x2492: 0x0006, 0x2494: 0x000a, 0x2495: 0x0006, 0x2496: 0x000a, 0x2497: 0x000a, + 0x2498: 0x000a, 0x2499: 0x009a, 0x249a: 0x008a, 0x249b: 0x007a, 0x249c: 0x006a, 0x249d: 0x009a, + 0x249e: 0x008a, 0x249f: 0x0004, 0x24a0: 0x000a, 0x24a1: 0x000a, 0x24a2: 0x0003, 0x24a3: 0x0003, + 0x24a4: 0x000a, 0x24a5: 0x000a, 0x24a6: 0x000a, 0x24a8: 0x000a, 0x24a9: 0x0004, + 0x24aa: 0x0004, 0x24ab: 0x000a, + 0x24b0: 0x000d, 0x24b1: 0x000d, 0x24b2: 0x000d, 0x24b3: 0x000d, 0x24b4: 0x000d, 0x24b5: 0x000d, + 0x24b6: 0x000d, 0x24b7: 0x000d, 0x24b8: 0x000d, 0x24b9: 0x000d, 0x24ba: 0x000d, 0x24bb: 0x000d, + 0x24bc: 0x000d, 0x24bd: 0x000d, 0x24be: 0x000d, 0x24bf: 0x000d, + // Block 0x93, offset 0x24c0 + 0x24c0: 0x000d, 0x24c1: 0x000d, 0x24c2: 0x000d, 0x24c3: 0x000d, 0x24c4: 0x000d, 0x24c5: 0x000d, + 0x24c6: 0x000d, 0x24c7: 0x000d, 0x24c8: 0x000d, 0x24c9: 0x000d, 0x24ca: 0x000d, 0x24cb: 0x000d, + 0x24cc: 0x000d, 0x24cd: 0x000d, 0x24ce: 0x000d, 0x24cf: 0x000d, 0x24d0: 0x000d, 0x24d1: 0x000d, + 0x24d2: 0x000d, 0x24d3: 0x000d, 0x24d4: 0x000d, 0x24d5: 0x000d, 0x24d6: 0x000d, 0x24d7: 0x000d, + 0x24d8: 0x000d, 0x24d9: 0x000d, 0x24da: 0x000d, 0x24db: 0x000d, 0x24dc: 0x000d, 0x24dd: 0x000d, + 0x24de: 0x000d, 0x24df: 0x000d, 0x24e0: 0x000d, 0x24e1: 0x000d, 0x24e2: 0x000d, 0x24e3: 0x000d, + 0x24e4: 0x000d, 0x24e5: 0x000d, 0x24e6: 0x000d, 0x24e7: 0x000d, 0x24e8: 0x000d, 0x24e9: 0x000d, + 0x24ea: 0x000d, 0x24eb: 0x000d, 0x24ec: 0x000d, 0x24ed: 0x000d, 0x24ee: 0x000d, 0x24ef: 0x000d, + 0x24f0: 0x000d, 0x24f1: 0x000d, 0x24f2: 0x000d, 0x24f3: 0x000d, 0x24f4: 0x000d, 0x24f5: 0x000d, + 0x24f6: 0x000d, 0x24f7: 0x000d, 0x24f8: 0x000d, 0x24f9: 0x000d, 0x24fa: 0x000d, 0x24fb: 0x000d, + 0x24fc: 0x000d, 0x24fd: 0x000d, 0x24fe: 0x000d, 0x24ff: 0x000b, + // Block 0x94, offset 0x2500 + 0x2501: 0x000a, 0x2502: 0x000a, 0x2503: 0x0004, 0x2504: 0x0004, 0x2505: 0x0004, + 0x2506: 0x000a, 0x2507: 0x000a, 0x2508: 0x003a, 0x2509: 0x002a, 0x250a: 0x000a, 0x250b: 0x0003, + 0x250c: 0x0006, 0x250d: 0x0003, 0x250e: 0x0006, 0x250f: 0x0006, 0x2510: 0x0002, 0x2511: 0x0002, + 0x2512: 0x0002, 0x2513: 0x0002, 0x2514: 0x0002, 0x2515: 0x0002, 0x2516: 0x0002, 0x2517: 0x0002, + 0x2518: 0x0002, 0x2519: 0x0002, 0x251a: 0x0006, 0x251b: 0x000a, 0x251c: 0x000a, 0x251d: 0x000a, + 0x251e: 0x000a, 0x251f: 0x000a, 0x2520: 0x000a, + 0x253b: 0x005a, + 0x253c: 0x000a, 0x253d: 0x004a, 0x253e: 0x000a, 0x253f: 0x000a, + // Block 0x95, offset 0x2540 + 0x2540: 0x000a, + 0x255b: 0x005a, 0x255c: 0x000a, 0x255d: 0x004a, + 0x255e: 0x000a, 0x255f: 0x00fa, 0x2560: 0x00ea, 0x2561: 0x000a, 0x2562: 0x003a, 0x2563: 0x002a, + 0x2564: 0x000a, 0x2565: 0x000a, + // Block 0x96, offset 0x2580 + 0x25a0: 0x0004, 0x25a1: 0x0004, 0x25a2: 0x000a, 0x25a3: 0x000a, + 0x25a4: 0x000a, 0x25a5: 0x0004, 0x25a6: 0x0004, 0x25a8: 0x000a, 0x25a9: 0x000a, + 0x25aa: 0x000a, 0x25ab: 0x000a, 0x25ac: 0x000a, 0x25ad: 0x000a, 0x25ae: 0x000a, + 0x25b0: 0x000b, 0x25b1: 0x000b, 0x25b2: 0x000b, 0x25b3: 0x000b, 0x25b4: 0x000b, 0x25b5: 0x000b, + 0x25b6: 0x000b, 0x25b7: 0x000b, 0x25b8: 0x000b, 0x25b9: 0x000a, 0x25ba: 0x000a, 0x25bb: 0x000a, + 0x25bc: 0x000a, 0x25bd: 0x000a, 0x25be: 0x000b, 0x25bf: 0x000b, + // Block 0x97, offset 0x25c0 + 0x25c1: 0x000a, + // Block 0x98, offset 0x2600 + 0x2600: 0x000a, 0x2601: 0x000a, 0x2602: 0x000a, 0x2603: 0x000a, 0x2604: 0x000a, 0x2605: 0x000a, + 0x2606: 0x000a, 0x2607: 0x000a, 0x2608: 0x000a, 0x2609: 0x000a, 0x260a: 0x000a, 0x260b: 0x000a, + 0x260c: 0x000a, 0x2610: 0x000a, 0x2611: 0x000a, + 0x2612: 0x000a, 0x2613: 0x000a, 0x2614: 0x000a, 0x2615: 0x000a, 0x2616: 0x000a, 0x2617: 0x000a, + 0x2618: 0x000a, 0x2619: 0x000a, 0x261a: 0x000a, 0x261b: 0x000a, + 0x2620: 0x000a, + // Block 0x99, offset 0x2640 + 0x267d: 0x000c, + // Block 0x9a, offset 0x2680 + 0x26a0: 0x000c, 0x26a1: 0x0002, 0x26a2: 0x0002, 0x26a3: 0x0002, + 0x26a4: 0x0002, 0x26a5: 0x0002, 0x26a6: 0x0002, 0x26a7: 0x0002, 0x26a8: 0x0002, 0x26a9: 0x0002, + 0x26aa: 0x0002, 0x26ab: 0x0002, 0x26ac: 0x0002, 0x26ad: 0x0002, 0x26ae: 0x0002, 0x26af: 0x0002, + 0x26b0: 0x0002, 0x26b1: 0x0002, 0x26b2: 0x0002, 0x26b3: 0x0002, 0x26b4: 0x0002, 0x26b5: 0x0002, + 0x26b6: 0x0002, 0x26b7: 0x0002, 0x26b8: 0x0002, 0x26b9: 0x0002, 0x26ba: 0x0002, 0x26bb: 0x0002, + // Block 0x9b, offset 0x26c0 + 0x26f6: 0x000c, 0x26f7: 0x000c, 0x26f8: 0x000c, 0x26f9: 0x000c, 0x26fa: 0x000c, + // Block 0x9c, offset 0x2700 + 0x2700: 0x0001, 0x2701: 0x0001, 0x2702: 0x0001, 0x2703: 0x0001, 0x2704: 0x0001, 0x2705: 0x0001, + 0x2706: 0x0001, 0x2707: 0x0001, 0x2708: 0x0001, 0x2709: 0x0001, 0x270a: 0x0001, 0x270b: 0x0001, + 0x270c: 0x0001, 0x270d: 0x0001, 0x270e: 0x0001, 0x270f: 0x0001, 0x2710: 0x0001, 0x2711: 0x0001, + 0x2712: 0x0001, 0x2713: 0x0001, 0x2714: 0x0001, 0x2715: 0x0001, 0x2716: 0x0001, 0x2717: 0x0001, + 0x2718: 0x0001, 0x2719: 0x0001, 0x271a: 0x0001, 0x271b: 0x0001, 0x271c: 0x0001, 0x271d: 0x0001, + 0x271e: 0x0001, 0x271f: 0x0001, 0x2720: 0x0001, 0x2721: 0x0001, 0x2722: 0x0001, 0x2723: 0x0001, + 0x2724: 0x0001, 0x2725: 0x0001, 0x2726: 0x0001, 0x2727: 0x0001, 0x2728: 0x0001, 0x2729: 0x0001, + 0x272a: 0x0001, 0x272b: 0x0001, 0x272c: 0x0001, 0x272d: 0x0001, 0x272e: 0x0001, 0x272f: 0x0001, + 0x2730: 0x0001, 0x2731: 0x0001, 0x2732: 0x0001, 0x2733: 0x0001, 0x2734: 0x0001, 0x2735: 0x0001, + 0x2736: 0x0001, 0x2737: 0x0001, 0x2738: 0x0001, 0x2739: 0x0001, 0x273a: 0x0001, 0x273b: 0x0001, + 0x273c: 0x0001, 0x273d: 0x0001, 0x273e: 0x0001, 0x273f: 0x0001, + // Block 0x9d, offset 0x2740 + 0x2740: 0x0001, 0x2741: 0x0001, 0x2742: 0x0001, 0x2743: 0x0001, 0x2744: 0x0001, 0x2745: 0x0001, + 0x2746: 0x0001, 0x2747: 0x0001, 0x2748: 0x0001, 0x2749: 0x0001, 0x274a: 0x0001, 0x274b: 0x0001, + 0x274c: 0x0001, 0x274d: 0x0001, 0x274e: 0x0001, 0x274f: 0x0001, 0x2750: 0x0001, 0x2751: 0x0001, + 0x2752: 0x0001, 0x2753: 0x0001, 0x2754: 0x0001, 0x2755: 0x0001, 0x2756: 0x0001, 0x2757: 0x0001, + 0x2758: 0x0001, 0x2759: 0x0001, 0x275a: 0x0001, 0x275b: 0x0001, 0x275c: 0x0001, 0x275d: 0x0001, + 0x275e: 0x0001, 0x275f: 0x000a, 0x2760: 0x0001, 0x2761: 0x0001, 0x2762: 0x0001, 0x2763: 0x0001, + 0x2764: 0x0001, 0x2765: 0x0001, 0x2766: 0x0001, 0x2767: 0x0001, 0x2768: 0x0001, 0x2769: 0x0001, + 0x276a: 0x0001, 0x276b: 0x0001, 0x276c: 0x0001, 0x276d: 0x0001, 0x276e: 0x0001, 0x276f: 0x0001, + 0x2770: 0x0001, 0x2771: 0x0001, 0x2772: 0x0001, 0x2773: 0x0001, 0x2774: 0x0001, 0x2775: 0x0001, + 0x2776: 0x0001, 0x2777: 0x0001, 0x2778: 0x0001, 0x2779: 0x0001, 0x277a: 0x0001, 0x277b: 0x0001, + 0x277c: 0x0001, 0x277d: 0x0001, 0x277e: 0x0001, 0x277f: 0x0001, + // Block 0x9e, offset 0x2780 + 0x2780: 0x0001, 0x2781: 0x000c, 0x2782: 0x000c, 0x2783: 0x000c, 0x2784: 0x0001, 0x2785: 0x000c, + 0x2786: 0x000c, 0x2787: 0x0001, 0x2788: 0x0001, 0x2789: 0x0001, 0x278a: 0x0001, 0x278b: 0x0001, + 0x278c: 0x000c, 0x278d: 0x000c, 0x278e: 0x000c, 0x278f: 0x000c, 0x2790: 0x0001, 0x2791: 0x0001, + 0x2792: 0x0001, 0x2793: 0x0001, 0x2794: 0x0001, 0x2795: 0x0001, 0x2796: 0x0001, 0x2797: 0x0001, + 0x2798: 0x0001, 0x2799: 0x0001, 0x279a: 0x0001, 0x279b: 0x0001, 0x279c: 0x0001, 0x279d: 0x0001, + 0x279e: 0x0001, 0x279f: 0x0001, 0x27a0: 0x0001, 0x27a1: 0x0001, 0x27a2: 0x0001, 0x27a3: 0x0001, + 0x27a4: 0x0001, 0x27a5: 0x0001, 0x27a6: 0x0001, 0x27a7: 0x0001, 0x27a8: 0x0001, 0x27a9: 0x0001, + 0x27aa: 0x0001, 0x27ab: 0x0001, 0x27ac: 0x0001, 0x27ad: 0x0001, 0x27ae: 0x0001, 0x27af: 0x0001, + 0x27b0: 0x0001, 0x27b1: 0x0001, 0x27b2: 0x0001, 0x27b3: 0x0001, 0x27b4: 0x0001, 0x27b5: 0x0001, + 0x27b6: 0x0001, 0x27b7: 0x0001, 0x27b8: 0x000c, 0x27b9: 0x000c, 0x27ba: 0x000c, 0x27bb: 0x0001, + 0x27bc: 0x0001, 0x27bd: 0x0001, 0x27be: 0x0001, 0x27bf: 0x000c, + // Block 0x9f, offset 0x27c0 + 0x27c0: 0x0001, 0x27c1: 0x0001, 0x27c2: 0x0001, 0x27c3: 0x0001, 0x27c4: 0x0001, 0x27c5: 0x0001, + 0x27c6: 0x0001, 0x27c7: 0x0001, 0x27c8: 0x0001, 0x27c9: 0x0001, 0x27ca: 0x0001, 0x27cb: 0x0001, + 0x27cc: 0x0001, 0x27cd: 0x0001, 0x27ce: 0x0001, 0x27cf: 0x0001, 0x27d0: 0x0001, 0x27d1: 0x0001, + 0x27d2: 0x0001, 0x27d3: 0x0001, 0x27d4: 0x0001, 0x27d5: 0x0001, 0x27d6: 0x0001, 0x27d7: 0x0001, + 0x27d8: 0x0001, 0x27d9: 0x0001, 0x27da: 0x0001, 0x27db: 0x0001, 0x27dc: 0x0001, 0x27dd: 0x0001, + 0x27de: 0x0001, 0x27df: 0x0001, 0x27e0: 0x0001, 0x27e1: 0x0001, 0x27e2: 0x0001, 0x27e3: 0x0001, + 0x27e4: 0x0001, 0x27e5: 0x000c, 0x27e6: 0x000c, 0x27e7: 0x0001, 0x27e8: 0x0001, 0x27e9: 0x0001, + 0x27ea: 0x0001, 0x27eb: 0x0001, 0x27ec: 0x0001, 0x27ed: 0x0001, 0x27ee: 0x0001, 0x27ef: 0x0001, + 0x27f0: 0x0001, 0x27f1: 0x0001, 0x27f2: 0x0001, 0x27f3: 0x0001, 0x27f4: 0x0001, 0x27f5: 0x0001, + 0x27f6: 0x0001, 0x27f7: 0x0001, 0x27f8: 0x0001, 0x27f9: 0x0001, 0x27fa: 0x0001, 0x27fb: 0x0001, + 0x27fc: 0x0001, 0x27fd: 0x0001, 0x27fe: 0x0001, 0x27ff: 0x0001, + // Block 0xa0, offset 0x2800 + 0x2800: 0x0001, 0x2801: 0x0001, 0x2802: 0x0001, 0x2803: 0x0001, 0x2804: 0x0001, 0x2805: 0x0001, + 0x2806: 0x0001, 0x2807: 0x0001, 0x2808: 0x0001, 0x2809: 0x0001, 0x280a: 0x0001, 0x280b: 0x0001, + 0x280c: 0x0001, 0x280d: 0x0001, 0x280e: 0x0001, 0x280f: 0x0001, 0x2810: 0x0001, 0x2811: 0x0001, + 0x2812: 0x0001, 0x2813: 0x0001, 0x2814: 0x0001, 0x2815: 0x0001, 0x2816: 0x0001, 0x2817: 0x0001, + 0x2818: 0x0001, 0x2819: 0x0001, 0x281a: 0x0001, 0x281b: 0x0001, 0x281c: 0x0001, 0x281d: 0x0001, + 0x281e: 0x0001, 0x281f: 0x0001, 0x2820: 0x0001, 0x2821: 0x0001, 0x2822: 0x0001, 0x2823: 0x0001, + 0x2824: 0x0001, 0x2825: 0x0001, 0x2826: 0x0001, 0x2827: 0x0001, 0x2828: 0x0001, 0x2829: 0x0001, + 0x282a: 0x0001, 0x282b: 0x0001, 0x282c: 0x0001, 0x282d: 0x0001, 0x282e: 0x0001, 0x282f: 0x0001, + 0x2830: 0x0001, 0x2831: 0x0001, 0x2832: 0x0001, 0x2833: 0x0001, 0x2834: 0x0001, 0x2835: 0x0001, + 0x2836: 0x0001, 0x2837: 0x0001, 0x2838: 0x0001, 0x2839: 0x000a, 0x283a: 0x000a, 0x283b: 0x000a, + 0x283c: 0x000a, 0x283d: 0x000a, 0x283e: 0x000a, 0x283f: 0x000a, + // Block 0xa1, offset 0x2840 + 0x2840: 0x0001, 0x2841: 0x0001, 0x2842: 0x0001, 0x2843: 0x0001, 0x2844: 0x0001, 0x2845: 0x0001, + 0x2846: 0x0001, 0x2847: 0x0001, 0x2848: 0x0001, 0x2849: 0x0001, 0x284a: 0x0001, 0x284b: 0x0001, + 0x284c: 0x0001, 0x284d: 0x0001, 0x284e: 0x0001, 0x284f: 0x0001, 0x2850: 0x0001, 0x2851: 0x0001, + 0x2852: 0x0001, 0x2853: 0x0001, 0x2854: 0x0001, 0x2855: 0x0001, 0x2856: 0x0001, 0x2857: 0x0001, + 0x2858: 0x0001, 0x2859: 0x0001, 0x285a: 0x0001, 0x285b: 0x0001, 0x285c: 0x0001, 0x285d: 0x0001, + 0x285e: 0x0001, 0x285f: 0x0001, 0x2860: 0x0005, 0x2861: 0x0005, 0x2862: 0x0005, 0x2863: 0x0005, + 0x2864: 0x0005, 0x2865: 0x0005, 0x2866: 0x0005, 0x2867: 0x0005, 0x2868: 0x0005, 0x2869: 0x0005, + 0x286a: 0x0005, 0x286b: 0x0005, 0x286c: 0x0005, 0x286d: 0x0005, 0x286e: 0x0005, 0x286f: 0x0005, + 0x2870: 0x0005, 0x2871: 0x0005, 0x2872: 0x0005, 0x2873: 0x0005, 0x2874: 0x0005, 0x2875: 0x0005, + 0x2876: 0x0005, 0x2877: 0x0005, 0x2878: 0x0005, 0x2879: 0x0005, 0x287a: 0x0005, 0x287b: 0x0005, + 0x287c: 0x0005, 0x287d: 0x0005, 0x287e: 0x0005, 0x287f: 0x0001, + // Block 0xa2, offset 0x2880 + 0x2881: 0x000c, + 0x28b8: 0x000c, 0x28b9: 0x000c, 0x28ba: 0x000c, 0x28bb: 0x000c, + 0x28bc: 0x000c, 0x28bd: 0x000c, 0x28be: 0x000c, 0x28bf: 0x000c, + // Block 0xa3, offset 0x28c0 + 0x28c0: 0x000c, 0x28c1: 0x000c, 0x28c2: 0x000c, 0x28c3: 0x000c, 0x28c4: 0x000c, 0x28c5: 0x000c, + 0x28c6: 0x000c, + 0x28d2: 0x000a, 0x28d3: 0x000a, 0x28d4: 0x000a, 0x28d5: 0x000a, 0x28d6: 0x000a, 0x28d7: 0x000a, + 0x28d8: 0x000a, 0x28d9: 0x000a, 0x28da: 0x000a, 0x28db: 0x000a, 0x28dc: 0x000a, 0x28dd: 0x000a, + 0x28de: 0x000a, 0x28df: 0x000a, 0x28e0: 0x000a, 0x28e1: 0x000a, 0x28e2: 0x000a, 0x28e3: 0x000a, + 0x28e4: 0x000a, 0x28e5: 0x000a, + 0x28ff: 0x000c, + // Block 0xa4, offset 0x2900 + 0x2900: 0x000c, 0x2901: 0x000c, + 0x2933: 0x000c, 0x2934: 0x000c, 0x2935: 0x000c, + 0x2936: 0x000c, 0x2939: 0x000c, 0x293a: 0x000c, + // Block 0xa5, offset 0x2940 + 0x2940: 0x000c, 0x2941: 0x000c, 0x2942: 0x000c, + 0x2967: 0x000c, 0x2968: 0x000c, 0x2969: 0x000c, + 0x296a: 0x000c, 0x296b: 0x000c, 0x296d: 0x000c, 0x296e: 0x000c, 0x296f: 0x000c, + 0x2970: 0x000c, 0x2971: 0x000c, 0x2972: 0x000c, 0x2973: 0x000c, 0x2974: 0x000c, + // Block 0xa6, offset 0x2980 + 0x29b3: 0x000c, + // Block 0xa7, offset 0x29c0 + 0x29c0: 0x000c, 0x29c1: 0x000c, + 0x29f6: 0x000c, 0x29f7: 0x000c, 0x29f8: 0x000c, 0x29f9: 0x000c, 0x29fa: 0x000c, 0x29fb: 0x000c, + 0x29fc: 0x000c, 0x29fd: 0x000c, 0x29fe: 0x000c, + // Block 0xa8, offset 0x2a00 + 0x2a0a: 0x000c, 0x2a0b: 0x000c, + 0x2a0c: 0x000c, + // Block 0xa9, offset 0x2a40 + 0x2a6f: 0x000c, + 0x2a70: 0x000c, 0x2a71: 0x000c, 0x2a74: 0x000c, + 0x2a76: 0x000c, 0x2a77: 0x000c, + 0x2a7e: 0x000c, + // Block 0xaa, offset 0x2a80 + 0x2a9f: 0x000c, 0x2aa3: 0x000c, + 0x2aa4: 0x000c, 0x2aa5: 0x000c, 0x2aa6: 0x000c, 0x2aa7: 0x000c, 0x2aa8: 0x000c, 0x2aa9: 0x000c, + 0x2aaa: 0x000c, + // Block 0xab, offset 0x2ac0 + 0x2ac0: 0x000c, 0x2ac1: 0x000c, + 0x2afc: 0x000c, + // Block 0xac, offset 0x2b00 + 0x2b00: 0x000c, + 0x2b26: 0x000c, 0x2b27: 0x000c, 0x2b28: 0x000c, 0x2b29: 0x000c, + 0x2b2a: 0x000c, 0x2b2b: 0x000c, 0x2b2c: 0x000c, + 0x2b30: 0x000c, 0x2b31: 0x000c, 0x2b32: 0x000c, 0x2b33: 0x000c, 0x2b34: 0x000c, + // Block 0xad, offset 0x2b40 + 0x2b78: 0x000c, 0x2b79: 0x000c, 0x2b7a: 0x000c, 0x2b7b: 0x000c, + 0x2b7c: 0x000c, 0x2b7d: 0x000c, 0x2b7e: 0x000c, 0x2b7f: 0x000c, + // Block 0xae, offset 0x2b80 + 0x2b82: 0x000c, 0x2b83: 0x000c, 0x2b84: 0x000c, + 0x2b86: 0x000c, + // Block 0xaf, offset 0x2bc0 + 0x2bf3: 0x000c, 0x2bf4: 0x000c, 0x2bf5: 0x000c, + 0x2bf6: 0x000c, 0x2bf7: 0x000c, 0x2bf8: 0x000c, 0x2bfa: 0x000c, + 0x2bff: 0x000c, + // Block 0xb0, offset 0x2c00 + 0x2c00: 0x000c, 0x2c02: 0x000c, 0x2c03: 0x000c, + // Block 0xb1, offset 0x2c40 + 0x2c72: 0x000c, 0x2c73: 0x000c, 0x2c74: 0x000c, 0x2c75: 0x000c, + 0x2c7c: 0x000c, 0x2c7d: 0x000c, 0x2c7f: 0x000c, + // Block 0xb2, offset 0x2c80 + 0x2c80: 0x000c, + 0x2c9c: 0x000c, 0x2c9d: 0x000c, + // Block 0xb3, offset 0x2cc0 + 0x2cf3: 0x000c, 0x2cf4: 0x000c, 0x2cf5: 0x000c, + 0x2cf6: 0x000c, 0x2cf7: 0x000c, 0x2cf8: 0x000c, 0x2cf9: 0x000c, 0x2cfa: 0x000c, + 0x2cfd: 0x000c, 0x2cff: 0x000c, + // Block 0xb4, offset 0x2d00 + 0x2d00: 0x000c, + 0x2d20: 0x000a, 0x2d21: 0x000a, 0x2d22: 0x000a, 0x2d23: 0x000a, + 0x2d24: 0x000a, 0x2d25: 0x000a, 0x2d26: 0x000a, 0x2d27: 0x000a, 0x2d28: 0x000a, 0x2d29: 0x000a, + 0x2d2a: 0x000a, 0x2d2b: 0x000a, 0x2d2c: 0x000a, + // Block 0xb5, offset 0x2d40 + 0x2d6b: 0x000c, 0x2d6d: 0x000c, + 0x2d70: 0x000c, 0x2d71: 0x000c, 0x2d72: 0x000c, 0x2d73: 0x000c, 0x2d74: 0x000c, 0x2d75: 0x000c, + 0x2d77: 0x000c, + // Block 0xb6, offset 0x2d80 + 0x2d9d: 0x000c, + 0x2d9e: 0x000c, 0x2d9f: 0x000c, 0x2da2: 0x000c, 0x2da3: 0x000c, + 0x2da4: 0x000c, 0x2da5: 0x000c, 0x2da7: 0x000c, 0x2da8: 0x000c, 0x2da9: 0x000c, + 0x2daa: 0x000c, 0x2dab: 0x000c, + // Block 0xb7, offset 0x2dc0 + 0x2dc1: 0x000c, 0x2dc2: 0x000c, 0x2dc3: 0x000c, 0x2dc4: 0x000c, 0x2dc5: 0x000c, + 0x2dc6: 0x000c, 0x2dc9: 0x000c, 0x2dca: 0x000c, + 0x2df3: 0x000c, 0x2df4: 0x000c, 0x2df5: 0x000c, + 0x2df6: 0x000c, 0x2df7: 0x000c, 0x2df8: 0x000c, 0x2dfb: 0x000c, + 0x2dfc: 0x000c, 0x2dfd: 0x000c, 0x2dfe: 0x000c, + // Block 0xb8, offset 0x2e00 + 0x2e07: 0x000c, + 0x2e11: 0x000c, + 0x2e12: 0x000c, 0x2e13: 0x000c, 0x2e14: 0x000c, 0x2e15: 0x000c, 0x2e16: 0x000c, + 0x2e19: 0x000c, 0x2e1a: 0x000c, 0x2e1b: 0x000c, + // Block 0xb9, offset 0x2e40 + 0x2e4a: 0x000c, 0x2e4b: 0x000c, + 0x2e4c: 0x000c, 0x2e4d: 0x000c, 0x2e4e: 0x000c, 0x2e4f: 0x000c, 0x2e50: 0x000c, 0x2e51: 0x000c, + 0x2e52: 0x000c, 0x2e53: 0x000c, 0x2e54: 0x000c, 0x2e55: 0x000c, 0x2e56: 0x000c, + 0x2e58: 0x000c, 0x2e59: 0x000c, + // Block 0xba, offset 0x2e80 + 0x2eb0: 0x000c, 0x2eb1: 0x000c, 0x2eb2: 0x000c, 0x2eb3: 0x000c, 0x2eb4: 0x000c, 0x2eb5: 0x000c, + 0x2eb6: 0x000c, 0x2eb8: 0x000c, 0x2eb9: 0x000c, 0x2eba: 0x000c, 0x2ebb: 0x000c, + 0x2ebc: 0x000c, 0x2ebd: 0x000c, + // Block 0xbb, offset 0x2ec0 + 0x2ed2: 0x000c, 0x2ed3: 0x000c, 0x2ed4: 0x000c, 0x2ed5: 0x000c, 0x2ed6: 0x000c, 0x2ed7: 0x000c, + 0x2ed8: 0x000c, 0x2ed9: 0x000c, 0x2eda: 0x000c, 0x2edb: 0x000c, 0x2edc: 0x000c, 0x2edd: 0x000c, + 0x2ede: 0x000c, 0x2edf: 0x000c, 0x2ee0: 0x000c, 0x2ee1: 0x000c, 0x2ee2: 0x000c, 0x2ee3: 0x000c, + 0x2ee4: 0x000c, 0x2ee5: 0x000c, 0x2ee6: 0x000c, 0x2ee7: 0x000c, + 0x2eea: 0x000c, 0x2eeb: 0x000c, 0x2eec: 0x000c, 0x2eed: 0x000c, 0x2eee: 0x000c, 0x2eef: 0x000c, + 0x2ef0: 0x000c, 0x2ef2: 0x000c, 0x2ef3: 0x000c, 0x2ef5: 0x000c, + 0x2ef6: 0x000c, + // Block 0xbc, offset 0x2f00 + 0x2f31: 0x000c, 0x2f32: 0x000c, 0x2f33: 0x000c, 0x2f34: 0x000c, 0x2f35: 0x000c, + 0x2f36: 0x000c, 0x2f3a: 0x000c, + 0x2f3c: 0x000c, 0x2f3d: 0x000c, 0x2f3f: 0x000c, + // Block 0xbd, offset 0x2f40 + 0x2f40: 0x000c, 0x2f41: 0x000c, 0x2f42: 0x000c, 0x2f43: 0x000c, 0x2f44: 0x000c, 0x2f45: 0x000c, + 0x2f47: 0x000c, + // Block 0xbe, offset 0x2f80 + 0x2fb0: 0x000c, 0x2fb1: 0x000c, 0x2fb2: 0x000c, 0x2fb3: 0x000c, 0x2fb4: 0x000c, + // Block 0xbf, offset 0x2fc0 + 0x2ff0: 0x000c, 0x2ff1: 0x000c, 0x2ff2: 0x000c, 0x2ff3: 0x000c, 0x2ff4: 0x000c, 0x2ff5: 0x000c, + 0x2ff6: 0x000c, + // Block 0xc0, offset 0x3000 + 0x300f: 0x000c, 0x3010: 0x000c, 0x3011: 0x000c, + 0x3012: 0x000c, + // Block 0xc1, offset 0x3040 + 0x305d: 0x000c, + 0x305e: 0x000c, 0x3060: 0x000b, 0x3061: 0x000b, 0x3062: 0x000b, 0x3063: 0x000b, + // Block 0xc2, offset 0x3080 + 0x30a7: 0x000c, 0x30a8: 0x000c, 0x30a9: 0x000c, + 0x30b3: 0x000b, 0x30b4: 0x000b, 0x30b5: 0x000b, + 0x30b6: 0x000b, 0x30b7: 0x000b, 0x30b8: 0x000b, 0x30b9: 0x000b, 0x30ba: 0x000b, 0x30bb: 0x000c, + 0x30bc: 0x000c, 0x30bd: 0x000c, 0x30be: 0x000c, 0x30bf: 0x000c, + // Block 0xc3, offset 0x30c0 + 0x30c0: 0x000c, 0x30c1: 0x000c, 0x30c2: 0x000c, 0x30c5: 0x000c, + 0x30c6: 0x000c, 0x30c7: 0x000c, 0x30c8: 0x000c, 0x30c9: 0x000c, 0x30ca: 0x000c, 0x30cb: 0x000c, + 0x30ea: 0x000c, 0x30eb: 0x000c, 0x30ec: 0x000c, 0x30ed: 0x000c, + // Block 0xc4, offset 0x3100 + 0x3100: 0x000a, 0x3101: 0x000a, 0x3102: 0x000c, 0x3103: 0x000c, 0x3104: 0x000c, 0x3105: 0x000a, + // Block 0xc5, offset 0x3140 + 0x3140: 0x000a, 0x3141: 0x000a, 0x3142: 0x000a, 0x3143: 0x000a, 0x3144: 0x000a, 0x3145: 0x000a, + 0x3146: 0x000a, 0x3147: 0x000a, 0x3148: 0x000a, 0x3149: 0x000a, 0x314a: 0x000a, 0x314b: 0x000a, + 0x314c: 0x000a, 0x314d: 0x000a, 0x314e: 0x000a, 0x314f: 0x000a, 0x3150: 0x000a, 0x3151: 0x000a, + 0x3152: 0x000a, 0x3153: 0x000a, 0x3154: 0x000a, 0x3155: 0x000a, 0x3156: 0x000a, + // Block 0xc6, offset 0x3180 + 0x319b: 0x000a, + // Block 0xc7, offset 0x31c0 + 0x31d5: 0x000a, + // Block 0xc8, offset 0x3200 + 0x320f: 0x000a, + // Block 0xc9, offset 0x3240 + 0x3249: 0x000a, + // Block 0xca, offset 0x3280 + 0x3283: 0x000a, + 0x328e: 0x0002, 0x328f: 0x0002, 0x3290: 0x0002, 0x3291: 0x0002, + 0x3292: 0x0002, 0x3293: 0x0002, 0x3294: 0x0002, 0x3295: 0x0002, 0x3296: 0x0002, 0x3297: 0x0002, + 0x3298: 0x0002, 0x3299: 0x0002, 0x329a: 0x0002, 0x329b: 0x0002, 0x329c: 0x0002, 0x329d: 0x0002, + 0x329e: 0x0002, 0x329f: 0x0002, 0x32a0: 0x0002, 0x32a1: 0x0002, 0x32a2: 0x0002, 0x32a3: 0x0002, + 0x32a4: 0x0002, 0x32a5: 0x0002, 0x32a6: 0x0002, 0x32a7: 0x0002, 0x32a8: 0x0002, 0x32a9: 0x0002, + 0x32aa: 0x0002, 0x32ab: 0x0002, 0x32ac: 0x0002, 0x32ad: 0x0002, 0x32ae: 0x0002, 0x32af: 0x0002, + 0x32b0: 0x0002, 0x32b1: 0x0002, 0x32b2: 0x0002, 0x32b3: 0x0002, 0x32b4: 0x0002, 0x32b5: 0x0002, + 0x32b6: 0x0002, 0x32b7: 0x0002, 0x32b8: 0x0002, 0x32b9: 0x0002, 0x32ba: 0x0002, 0x32bb: 0x0002, + 0x32bc: 0x0002, 0x32bd: 0x0002, 0x32be: 0x0002, 0x32bf: 0x0002, + // Block 0xcb, offset 0x32c0 + 0x32c0: 0x000c, 0x32c1: 0x000c, 0x32c2: 0x000c, 0x32c3: 0x000c, 0x32c4: 0x000c, 0x32c5: 0x000c, + 0x32c6: 0x000c, 0x32c7: 0x000c, 0x32c8: 0x000c, 0x32c9: 0x000c, 0x32ca: 0x000c, 0x32cb: 0x000c, + 0x32cc: 0x000c, 0x32cd: 0x000c, 0x32ce: 0x000c, 0x32cf: 0x000c, 0x32d0: 0x000c, 0x32d1: 0x000c, + 0x32d2: 0x000c, 0x32d3: 0x000c, 0x32d4: 0x000c, 0x32d5: 0x000c, 0x32d6: 0x000c, 0x32d7: 0x000c, + 0x32d8: 0x000c, 0x32d9: 0x000c, 0x32da: 0x000c, 0x32db: 0x000c, 0x32dc: 0x000c, 0x32dd: 0x000c, + 0x32de: 0x000c, 0x32df: 0x000c, 0x32e0: 0x000c, 0x32e1: 0x000c, 0x32e2: 0x000c, 0x32e3: 0x000c, + 0x32e4: 0x000c, 0x32e5: 0x000c, 0x32e6: 0x000c, 0x32e7: 0x000c, 0x32e8: 0x000c, 0x32e9: 0x000c, + 0x32ea: 0x000c, 0x32eb: 0x000c, 0x32ec: 0x000c, 0x32ed: 0x000c, 0x32ee: 0x000c, 0x32ef: 0x000c, + 0x32f0: 0x000c, 0x32f1: 0x000c, 0x32f2: 0x000c, 0x32f3: 0x000c, 0x32f4: 0x000c, 0x32f5: 0x000c, + 0x32f6: 0x000c, 0x32fb: 0x000c, + 0x32fc: 0x000c, 0x32fd: 0x000c, 0x32fe: 0x000c, 0x32ff: 0x000c, + // Block 0xcc, offset 0x3300 + 0x3300: 0x000c, 0x3301: 0x000c, 0x3302: 0x000c, 0x3303: 0x000c, 0x3304: 0x000c, 0x3305: 0x000c, + 0x3306: 0x000c, 0x3307: 0x000c, 0x3308: 0x000c, 0x3309: 0x000c, 0x330a: 0x000c, 0x330b: 0x000c, + 0x330c: 0x000c, 0x330d: 0x000c, 0x330e: 0x000c, 0x330f: 0x000c, 0x3310: 0x000c, 0x3311: 0x000c, + 0x3312: 0x000c, 0x3313: 0x000c, 0x3314: 0x000c, 0x3315: 0x000c, 0x3316: 0x000c, 0x3317: 0x000c, + 0x3318: 0x000c, 0x3319: 0x000c, 0x331a: 0x000c, 0x331b: 0x000c, 0x331c: 0x000c, 0x331d: 0x000c, + 0x331e: 0x000c, 0x331f: 0x000c, 0x3320: 0x000c, 0x3321: 0x000c, 0x3322: 0x000c, 0x3323: 0x000c, + 0x3324: 0x000c, 0x3325: 0x000c, 0x3326: 0x000c, 0x3327: 0x000c, 0x3328: 0x000c, 0x3329: 0x000c, + 0x332a: 0x000c, 0x332b: 0x000c, 0x332c: 0x000c, + 0x3335: 0x000c, + // Block 0xcd, offset 0x3340 + 0x3344: 0x000c, + 0x335b: 0x000c, 0x335c: 0x000c, 0x335d: 0x000c, + 0x335e: 0x000c, 0x335f: 0x000c, 0x3361: 0x000c, 0x3362: 0x000c, 0x3363: 0x000c, + 0x3364: 0x000c, 0x3365: 0x000c, 0x3366: 0x000c, 0x3367: 0x000c, 0x3368: 0x000c, 0x3369: 0x000c, + 0x336a: 0x000c, 0x336b: 0x000c, 0x336c: 0x000c, 0x336d: 0x000c, 0x336e: 0x000c, 0x336f: 0x000c, + // Block 0xce, offset 0x3380 + 0x3380: 0x000c, 0x3381: 0x000c, 0x3382: 0x000c, 0x3383: 0x000c, 0x3384: 0x000c, 0x3385: 0x000c, + 0x3386: 0x000c, 0x3388: 0x000c, 0x3389: 0x000c, 0x338a: 0x000c, 0x338b: 0x000c, + 0x338c: 0x000c, 0x338d: 0x000c, 0x338e: 0x000c, 0x338f: 0x000c, 0x3390: 0x000c, 0x3391: 0x000c, + 0x3392: 0x000c, 0x3393: 0x000c, 0x3394: 0x000c, 0x3395: 0x000c, 0x3396: 0x000c, 0x3397: 0x000c, + 0x3398: 0x000c, 0x339b: 0x000c, 0x339c: 0x000c, 0x339d: 0x000c, + 0x339e: 0x000c, 0x339f: 0x000c, 0x33a0: 0x000c, 0x33a1: 0x000c, 0x33a3: 0x000c, + 0x33a4: 0x000c, 0x33a6: 0x000c, 0x33a7: 0x000c, 0x33a8: 0x000c, 0x33a9: 0x000c, + 0x33aa: 0x000c, + // Block 0xcf, offset 0x33c0 + 0x33c0: 0x0001, 0x33c1: 0x0001, 0x33c2: 0x0001, 0x33c3: 0x0001, 0x33c4: 0x0001, 0x33c5: 0x0001, + 0x33c6: 0x0001, 0x33c7: 0x0001, 0x33c8: 0x0001, 0x33c9: 0x0001, 0x33ca: 0x0001, 0x33cb: 0x0001, + 0x33cc: 0x0001, 0x33cd: 0x0001, 0x33ce: 0x0001, 0x33cf: 0x0001, 0x33d0: 0x000c, 0x33d1: 0x000c, + 0x33d2: 0x000c, 0x33d3: 0x000c, 0x33d4: 0x000c, 0x33d5: 0x000c, 0x33d6: 0x000c, 0x33d7: 0x0001, + 0x33d8: 0x0001, 0x33d9: 0x0001, 0x33da: 0x0001, 0x33db: 0x0001, 0x33dc: 0x0001, 0x33dd: 0x0001, + 0x33de: 0x0001, 0x33df: 0x0001, 0x33e0: 0x0001, 0x33e1: 0x0001, 0x33e2: 0x0001, 0x33e3: 0x0001, + 0x33e4: 0x0001, 0x33e5: 0x0001, 0x33e6: 0x0001, 0x33e7: 0x0001, 0x33e8: 0x0001, 0x33e9: 0x0001, + 0x33ea: 0x0001, 0x33eb: 0x0001, 0x33ec: 0x0001, 0x33ed: 0x0001, 0x33ee: 0x0001, 0x33ef: 0x0001, + 0x33f0: 0x0001, 0x33f1: 0x0001, 0x33f2: 0x0001, 0x33f3: 0x0001, 0x33f4: 0x0001, 0x33f5: 0x0001, + 0x33f6: 0x0001, 0x33f7: 0x0001, 0x33f8: 0x0001, 0x33f9: 0x0001, 0x33fa: 0x0001, 0x33fb: 0x0001, + 0x33fc: 0x0001, 0x33fd: 0x0001, 0x33fe: 0x0001, 0x33ff: 0x0001, + // Block 0xd0, offset 0x3400 + 0x3400: 0x0001, 0x3401: 0x0001, 0x3402: 0x0001, 0x3403: 0x0001, 0x3404: 0x000c, 0x3405: 0x000c, + 0x3406: 0x000c, 0x3407: 0x000c, 0x3408: 0x000c, 0x3409: 0x000c, 0x340a: 0x000c, 0x340b: 0x0001, + 0x340c: 0x0001, 0x340d: 0x0001, 0x340e: 0x0001, 0x340f: 0x0001, 0x3410: 0x0001, 0x3411: 0x0001, + 0x3412: 0x0001, 0x3413: 0x0001, 0x3414: 0x0001, 0x3415: 0x0001, 0x3416: 0x0001, 0x3417: 0x0001, + 0x3418: 0x0001, 0x3419: 0x0001, 0x341a: 0x0001, 0x341b: 0x0001, 0x341c: 0x0001, 0x341d: 0x0001, + 0x341e: 0x0001, 0x341f: 0x0001, 0x3420: 0x0001, 0x3421: 0x0001, 0x3422: 0x0001, 0x3423: 0x0001, + 0x3424: 0x0001, 0x3425: 0x0001, 0x3426: 0x0001, 0x3427: 0x0001, 0x3428: 0x0001, 0x3429: 0x0001, + 0x342a: 0x0001, 0x342b: 0x0001, 0x342c: 0x0001, 0x342d: 0x0001, 0x342e: 0x0001, 0x342f: 0x0001, + 0x3430: 0x0001, 0x3431: 0x0001, 0x3432: 0x0001, 0x3433: 0x0001, 0x3434: 0x0001, 0x3435: 0x0001, + 0x3436: 0x0001, 0x3437: 0x0001, 0x3438: 0x0001, 0x3439: 0x0001, 0x343a: 0x0001, 0x343b: 0x0001, + 0x343c: 0x0001, 0x343d: 0x0001, 0x343e: 0x0001, 0x343f: 0x0001, + // Block 0xd1, offset 0x3440 + 0x3440: 0x000d, 0x3441: 0x000d, 0x3442: 0x000d, 0x3443: 0x000d, 0x3444: 0x000d, 0x3445: 0x000d, + 0x3446: 0x000d, 0x3447: 0x000d, 0x3448: 0x000d, 0x3449: 0x000d, 0x344a: 0x000d, 0x344b: 0x000d, + 0x344c: 0x000d, 0x344d: 0x000d, 0x344e: 0x000d, 0x344f: 0x000d, 0x3450: 0x000d, 0x3451: 0x000d, + 0x3452: 0x000d, 0x3453: 0x000d, 0x3454: 0x000d, 0x3455: 0x000d, 0x3456: 0x000d, 0x3457: 0x000d, + 0x3458: 0x000d, 0x3459: 0x000d, 0x345a: 0x000d, 0x345b: 0x000d, 0x345c: 0x000d, 0x345d: 0x000d, + 0x345e: 0x000d, 0x345f: 0x000d, 0x3460: 0x000d, 0x3461: 0x000d, 0x3462: 0x000d, 0x3463: 0x000d, + 0x3464: 0x000d, 0x3465: 0x000d, 0x3466: 0x000d, 0x3467: 0x000d, 0x3468: 0x000d, 0x3469: 0x000d, + 0x346a: 0x000d, 0x346b: 0x000d, 0x346c: 0x000d, 0x346d: 0x000d, 0x346e: 0x000d, 0x346f: 0x000d, + 0x3470: 0x000a, 0x3471: 0x000a, 0x3472: 0x000d, 0x3473: 0x000d, 0x3474: 0x000d, 0x3475: 0x000d, + 0x3476: 0x000d, 0x3477: 0x000d, 0x3478: 0x000d, 0x3479: 0x000d, 0x347a: 0x000d, 0x347b: 0x000d, + 0x347c: 0x000d, 0x347d: 0x000d, 0x347e: 0x000d, 0x347f: 0x000d, + // Block 0xd2, offset 0x3480 + 0x3480: 0x000a, 0x3481: 0x000a, 0x3482: 0x000a, 0x3483: 0x000a, 0x3484: 0x000a, 0x3485: 0x000a, + 0x3486: 0x000a, 0x3487: 0x000a, 0x3488: 0x000a, 0x3489: 0x000a, 0x348a: 0x000a, 0x348b: 0x000a, + 0x348c: 0x000a, 0x348d: 0x000a, 0x348e: 0x000a, 0x348f: 0x000a, 0x3490: 0x000a, 0x3491: 0x000a, + 0x3492: 0x000a, 0x3493: 0x000a, 0x3494: 0x000a, 0x3495: 0x000a, 0x3496: 0x000a, 0x3497: 0x000a, + 0x3498: 0x000a, 0x3499: 0x000a, 0x349a: 0x000a, 0x349b: 0x000a, 0x349c: 0x000a, 0x349d: 0x000a, + 0x349e: 0x000a, 0x349f: 0x000a, 0x34a0: 0x000a, 0x34a1: 0x000a, 0x34a2: 0x000a, 0x34a3: 0x000a, + 0x34a4: 0x000a, 0x34a5: 0x000a, 0x34a6: 0x000a, 0x34a7: 0x000a, 0x34a8: 0x000a, 0x34a9: 0x000a, + 0x34aa: 0x000a, 0x34ab: 0x000a, + 0x34b0: 0x000a, 0x34b1: 0x000a, 0x34b2: 0x000a, 0x34b3: 0x000a, 0x34b4: 0x000a, 0x34b5: 0x000a, + 0x34b6: 0x000a, 0x34b7: 0x000a, 0x34b8: 0x000a, 0x34b9: 0x000a, 0x34ba: 0x000a, 0x34bb: 0x000a, + 0x34bc: 0x000a, 0x34bd: 0x000a, 0x34be: 0x000a, 0x34bf: 0x000a, + // Block 0xd3, offset 0x34c0 + 0x34c0: 0x000a, 0x34c1: 0x000a, 0x34c2: 0x000a, 0x34c3: 0x000a, 0x34c4: 0x000a, 0x34c5: 0x000a, + 0x34c6: 0x000a, 0x34c7: 0x000a, 0x34c8: 0x000a, 0x34c9: 0x000a, 0x34ca: 0x000a, 0x34cb: 0x000a, + 0x34cc: 0x000a, 0x34cd: 0x000a, 0x34ce: 0x000a, 0x34cf: 0x000a, 0x34d0: 0x000a, 0x34d1: 0x000a, + 0x34d2: 0x000a, 0x34d3: 0x000a, + 0x34e0: 0x000a, 0x34e1: 0x000a, 0x34e2: 0x000a, 0x34e3: 0x000a, + 0x34e4: 0x000a, 0x34e5: 0x000a, 0x34e6: 0x000a, 0x34e7: 0x000a, 0x34e8: 0x000a, 0x34e9: 0x000a, + 0x34ea: 0x000a, 0x34eb: 0x000a, 0x34ec: 0x000a, 0x34ed: 0x000a, 0x34ee: 0x000a, + 0x34f1: 0x000a, 0x34f2: 0x000a, 0x34f3: 0x000a, 0x34f4: 0x000a, 0x34f5: 0x000a, + 0x34f6: 0x000a, 0x34f7: 0x000a, 0x34f8: 0x000a, 0x34f9: 0x000a, 0x34fa: 0x000a, 0x34fb: 0x000a, + 0x34fc: 0x000a, 0x34fd: 0x000a, 0x34fe: 0x000a, 0x34ff: 0x000a, + // Block 0xd4, offset 0x3500 + 0x3501: 0x000a, 0x3502: 0x000a, 0x3503: 0x000a, 0x3504: 0x000a, 0x3505: 0x000a, + 0x3506: 0x000a, 0x3507: 0x000a, 0x3508: 0x000a, 0x3509: 0x000a, 0x350a: 0x000a, 0x350b: 0x000a, + 0x350c: 0x000a, 0x350d: 0x000a, 0x350e: 0x000a, 0x350f: 0x000a, 0x3511: 0x000a, + 0x3512: 0x000a, 0x3513: 0x000a, 0x3514: 0x000a, 0x3515: 0x000a, 0x3516: 0x000a, 0x3517: 0x000a, + 0x3518: 0x000a, 0x3519: 0x000a, 0x351a: 0x000a, 0x351b: 0x000a, 0x351c: 0x000a, 0x351d: 0x000a, + 0x351e: 0x000a, 0x351f: 0x000a, 0x3520: 0x000a, 0x3521: 0x000a, 0x3522: 0x000a, 0x3523: 0x000a, + 0x3524: 0x000a, 0x3525: 0x000a, 0x3526: 0x000a, 0x3527: 0x000a, 0x3528: 0x000a, 0x3529: 0x000a, + 0x352a: 0x000a, 0x352b: 0x000a, 0x352c: 0x000a, 0x352d: 0x000a, 0x352e: 0x000a, 0x352f: 0x000a, + 0x3530: 0x000a, 0x3531: 0x000a, 0x3532: 0x000a, 0x3533: 0x000a, 0x3534: 0x000a, 0x3535: 0x000a, + // Block 0xd5, offset 0x3540 + 0x3540: 0x0002, 0x3541: 0x0002, 0x3542: 0x0002, 0x3543: 0x0002, 0x3544: 0x0002, 0x3545: 0x0002, + 0x3546: 0x0002, 0x3547: 0x0002, 0x3548: 0x0002, 0x3549: 0x0002, 0x354a: 0x0002, 0x354b: 0x000a, + 0x354c: 0x000a, + // Block 0xd6, offset 0x3580 + 0x35aa: 0x000a, 0x35ab: 0x000a, + // Block 0xd7, offset 0x35c0 + 0x35e0: 0x000a, 0x35e1: 0x000a, 0x35e2: 0x000a, 0x35e3: 0x000a, + 0x35e4: 0x000a, 0x35e5: 0x000a, + // Block 0xd8, offset 0x3600 + 0x3600: 0x000a, 0x3601: 0x000a, 0x3602: 0x000a, 0x3603: 0x000a, 0x3604: 0x000a, 0x3605: 0x000a, + 0x3606: 0x000a, 0x3607: 0x000a, 0x3608: 0x000a, 0x3609: 0x000a, 0x360a: 0x000a, 0x360b: 0x000a, + 0x360c: 0x000a, 0x360d: 0x000a, 0x360e: 0x000a, 0x360f: 0x000a, 0x3610: 0x000a, 0x3611: 0x000a, + 0x3612: 0x000a, 0x3613: 0x000a, 0x3614: 0x000a, + 0x3620: 0x000a, 0x3621: 0x000a, 0x3622: 0x000a, 0x3623: 0x000a, + 0x3624: 0x000a, 0x3625: 0x000a, 0x3626: 0x000a, 0x3627: 0x000a, 0x3628: 0x000a, 0x3629: 0x000a, + 0x362a: 0x000a, 0x362b: 0x000a, 0x362c: 0x000a, + 0x3630: 0x000a, 0x3631: 0x000a, 0x3632: 0x000a, 0x3633: 0x000a, 0x3634: 0x000a, 0x3635: 0x000a, + 0x3636: 0x000a, 0x3637: 0x000a, 0x3638: 0x000a, + // Block 0xd9, offset 0x3640 + 0x3640: 0x000a, 0x3641: 0x000a, 0x3642: 0x000a, 0x3643: 0x000a, 0x3644: 0x000a, 0x3645: 0x000a, + 0x3646: 0x000a, 0x3647: 0x000a, 0x3648: 0x000a, 0x3649: 0x000a, 0x364a: 0x000a, 0x364b: 0x000a, + 0x364c: 0x000a, 0x364d: 0x000a, 0x364e: 0x000a, 0x364f: 0x000a, 0x3650: 0x000a, 0x3651: 0x000a, + 0x3652: 0x000a, 0x3653: 0x000a, 0x3654: 0x000a, + // Block 0xda, offset 0x3680 + 0x3680: 0x000a, 0x3681: 0x000a, 0x3682: 0x000a, 0x3683: 0x000a, 0x3684: 0x000a, 0x3685: 0x000a, + 0x3686: 0x000a, 0x3687: 0x000a, 0x3688: 0x000a, 0x3689: 0x000a, 0x368a: 0x000a, 0x368b: 0x000a, + 0x3690: 0x000a, 0x3691: 0x000a, + 0x3692: 0x000a, 0x3693: 0x000a, 0x3694: 0x000a, 0x3695: 0x000a, 0x3696: 0x000a, 0x3697: 0x000a, + 0x3698: 0x000a, 0x3699: 0x000a, 0x369a: 0x000a, 0x369b: 0x000a, 0x369c: 0x000a, 0x369d: 0x000a, + 0x369e: 0x000a, 0x369f: 0x000a, 0x36a0: 0x000a, 0x36a1: 0x000a, 0x36a2: 0x000a, 0x36a3: 0x000a, + 0x36a4: 0x000a, 0x36a5: 0x000a, 0x36a6: 0x000a, 0x36a7: 0x000a, 0x36a8: 0x000a, 0x36a9: 0x000a, + 0x36aa: 0x000a, 0x36ab: 0x000a, 0x36ac: 0x000a, 0x36ad: 0x000a, 0x36ae: 0x000a, 0x36af: 0x000a, + 0x36b0: 0x000a, 0x36b1: 0x000a, 0x36b2: 0x000a, 0x36b3: 0x000a, 0x36b4: 0x000a, 0x36b5: 0x000a, + 0x36b6: 0x000a, 0x36b7: 0x000a, 0x36b8: 0x000a, 0x36b9: 0x000a, 0x36ba: 0x000a, 0x36bb: 0x000a, + 0x36bc: 0x000a, 0x36bd: 0x000a, 0x36be: 0x000a, 0x36bf: 0x000a, + // Block 0xdb, offset 0x36c0 + 0x36c0: 0x000a, 0x36c1: 0x000a, 0x36c2: 0x000a, 0x36c3: 0x000a, 0x36c4: 0x000a, 0x36c5: 0x000a, + 0x36c6: 0x000a, 0x36c7: 0x000a, + 0x36d0: 0x000a, 0x36d1: 0x000a, + 0x36d2: 0x000a, 0x36d3: 0x000a, 0x36d4: 0x000a, 0x36d5: 0x000a, 0x36d6: 0x000a, 0x36d7: 0x000a, + 0x36d8: 0x000a, 0x36d9: 0x000a, + 0x36e0: 0x000a, 0x36e1: 0x000a, 0x36e2: 0x000a, 0x36e3: 0x000a, + 0x36e4: 0x000a, 0x36e5: 0x000a, 0x36e6: 0x000a, 0x36e7: 0x000a, 0x36e8: 0x000a, 0x36e9: 0x000a, + 0x36ea: 0x000a, 0x36eb: 0x000a, 0x36ec: 0x000a, 0x36ed: 0x000a, 0x36ee: 0x000a, 0x36ef: 0x000a, + 0x36f0: 0x000a, 0x36f1: 0x000a, 0x36f2: 0x000a, 0x36f3: 0x000a, 0x36f4: 0x000a, 0x36f5: 0x000a, + 0x36f6: 0x000a, 0x36f7: 0x000a, 0x36f8: 0x000a, 0x36f9: 0x000a, 0x36fa: 0x000a, 0x36fb: 0x000a, + 0x36fc: 0x000a, 0x36fd: 0x000a, 0x36fe: 0x000a, 0x36ff: 0x000a, + // Block 0xdc, offset 0x3700 + 0x3700: 0x000a, 0x3701: 0x000a, 0x3702: 0x000a, 0x3703: 0x000a, 0x3704: 0x000a, 0x3705: 0x000a, + 0x3706: 0x000a, 0x3707: 0x000a, + 0x3710: 0x000a, 0x3711: 0x000a, + 0x3712: 0x000a, 0x3713: 0x000a, 0x3714: 0x000a, 0x3715: 0x000a, 0x3716: 0x000a, 0x3717: 0x000a, + 0x3718: 0x000a, 0x3719: 0x000a, 0x371a: 0x000a, 0x371b: 0x000a, 0x371c: 0x000a, 0x371d: 0x000a, + 0x371e: 0x000a, 0x371f: 0x000a, 0x3720: 0x000a, 0x3721: 0x000a, 0x3722: 0x000a, 0x3723: 0x000a, + 0x3724: 0x000a, 0x3725: 0x000a, 0x3726: 0x000a, 0x3727: 0x000a, 0x3728: 0x000a, 0x3729: 0x000a, + 0x372a: 0x000a, 0x372b: 0x000a, 0x372c: 0x000a, 0x372d: 0x000a, + // Block 0xdd, offset 0x3740 + 0x3740: 0x000a, 0x3741: 0x000a, 0x3742: 0x000a, 0x3743: 0x000a, 0x3744: 0x000a, 0x3745: 0x000a, + 0x3746: 0x000a, 0x3747: 0x000a, 0x3748: 0x000a, 0x3749: 0x000a, 0x374a: 0x000a, 0x374b: 0x000a, + 0x3750: 0x000a, 0x3751: 0x000a, + 0x3752: 0x000a, 0x3753: 0x000a, 0x3754: 0x000a, 0x3755: 0x000a, 0x3756: 0x000a, 0x3757: 0x000a, + 0x3758: 0x000a, 0x3759: 0x000a, 0x375a: 0x000a, 0x375b: 0x000a, 0x375c: 0x000a, 0x375d: 0x000a, + 0x375e: 0x000a, 0x375f: 0x000a, 0x3760: 0x000a, 0x3761: 0x000a, 0x3762: 0x000a, 0x3763: 0x000a, + 0x3764: 0x000a, 0x3765: 0x000a, 0x3766: 0x000a, 0x3767: 0x000a, 0x3768: 0x000a, 0x3769: 0x000a, + 0x376a: 0x000a, 0x376b: 0x000a, 0x376c: 0x000a, 0x376d: 0x000a, 0x376e: 0x000a, 0x376f: 0x000a, + 0x3770: 0x000a, 0x3771: 0x000a, 0x3772: 0x000a, 0x3773: 0x000a, 0x3774: 0x000a, 0x3775: 0x000a, + 0x3776: 0x000a, 0x3777: 0x000a, 0x3778: 0x000a, 0x3779: 0x000a, 0x377a: 0x000a, 0x377b: 0x000a, + 0x377c: 0x000a, 0x377d: 0x000a, 0x377e: 0x000a, + // Block 0xde, offset 0x3780 + 0x3780: 0x000a, 0x3781: 0x000a, 0x3782: 0x000a, 0x3783: 0x000a, 0x3784: 0x000a, 0x3785: 0x000a, + 0x3786: 0x000a, 0x3787: 0x000a, 0x3788: 0x000a, 0x3789: 0x000a, 0x378a: 0x000a, 0x378b: 0x000a, + 0x378c: 0x000a, 0x3790: 0x000a, 0x3791: 0x000a, + 0x3792: 0x000a, 0x3793: 0x000a, 0x3794: 0x000a, 0x3795: 0x000a, 0x3796: 0x000a, 0x3797: 0x000a, + 0x3798: 0x000a, 0x3799: 0x000a, 0x379a: 0x000a, 0x379b: 0x000a, 0x379c: 0x000a, 0x379d: 0x000a, + 0x379e: 0x000a, 0x379f: 0x000a, 0x37a0: 0x000a, 0x37a1: 0x000a, 0x37a2: 0x000a, 0x37a3: 0x000a, + 0x37a4: 0x000a, 0x37a5: 0x000a, 0x37a6: 0x000a, 0x37a7: 0x000a, 0x37a8: 0x000a, 0x37a9: 0x000a, + 0x37aa: 0x000a, 0x37ab: 0x000a, + // Block 0xdf, offset 0x37c0 + 0x37c0: 0x000a, 0x37c1: 0x000a, 0x37c2: 0x000a, 0x37c3: 0x000a, 0x37c4: 0x000a, 0x37c5: 0x000a, + 0x37c6: 0x000a, 0x37c7: 0x000a, 0x37c8: 0x000a, 0x37c9: 0x000a, 0x37ca: 0x000a, 0x37cb: 0x000a, + 0x37cc: 0x000a, 0x37cd: 0x000a, 0x37ce: 0x000a, 0x37cf: 0x000a, 0x37d0: 0x000a, 0x37d1: 0x000a, + 0x37d2: 0x000a, 0x37d3: 0x000a, 0x37d4: 0x000a, 0x37d5: 0x000a, 0x37d6: 0x000a, 0x37d7: 0x000a, + // Block 0xe0, offset 0x3800 + 0x3800: 0x000a, + 0x3810: 0x000a, 0x3811: 0x000a, + 0x3812: 0x000a, 0x3813: 0x000a, 0x3814: 0x000a, 0x3815: 0x000a, 0x3816: 0x000a, 0x3817: 0x000a, + 0x3818: 0x000a, 0x3819: 0x000a, 0x381a: 0x000a, 0x381b: 0x000a, 0x381c: 0x000a, 0x381d: 0x000a, + 0x381e: 0x000a, 0x381f: 0x000a, 0x3820: 0x000a, 0x3821: 0x000a, 0x3822: 0x000a, 0x3823: 0x000a, + 0x3824: 0x000a, 0x3825: 0x000a, 0x3826: 0x000a, + // Block 0xe1, offset 0x3840 + 0x387e: 0x000b, 0x387f: 0x000b, + // Block 0xe2, offset 0x3880 + 0x3880: 0x000b, 0x3881: 0x000b, 0x3882: 0x000b, 0x3883: 0x000b, 0x3884: 0x000b, 0x3885: 0x000b, + 0x3886: 0x000b, 0x3887: 0x000b, 0x3888: 0x000b, 0x3889: 0x000b, 0x388a: 0x000b, 0x388b: 0x000b, + 0x388c: 0x000b, 0x388d: 0x000b, 0x388e: 0x000b, 0x388f: 0x000b, 0x3890: 0x000b, 0x3891: 0x000b, + 0x3892: 0x000b, 0x3893: 0x000b, 0x3894: 0x000b, 0x3895: 0x000b, 0x3896: 0x000b, 0x3897: 0x000b, + 0x3898: 0x000b, 0x3899: 0x000b, 0x389a: 0x000b, 0x389b: 0x000b, 0x389c: 0x000b, 0x389d: 0x000b, + 0x389e: 0x000b, 0x389f: 0x000b, 0x38a0: 0x000b, 0x38a1: 0x000b, 0x38a2: 0x000b, 0x38a3: 0x000b, + 0x38a4: 0x000b, 0x38a5: 0x000b, 0x38a6: 0x000b, 0x38a7: 0x000b, 0x38a8: 0x000b, 0x38a9: 0x000b, + 0x38aa: 0x000b, 0x38ab: 0x000b, 0x38ac: 0x000b, 0x38ad: 0x000b, 0x38ae: 0x000b, 0x38af: 0x000b, + 0x38b0: 0x000b, 0x38b1: 0x000b, 0x38b2: 0x000b, 0x38b3: 0x000b, 0x38b4: 0x000b, 0x38b5: 0x000b, + 0x38b6: 0x000b, 0x38b7: 0x000b, 0x38b8: 0x000b, 0x38b9: 0x000b, 0x38ba: 0x000b, 0x38bb: 0x000b, + 0x38bc: 0x000b, 0x38bd: 0x000b, 0x38be: 0x000b, 0x38bf: 0x000b, + // Block 0xe3, offset 0x38c0 + 0x38c0: 0x000c, 0x38c1: 0x000c, 0x38c2: 0x000c, 0x38c3: 0x000c, 0x38c4: 0x000c, 0x38c5: 0x000c, + 0x38c6: 0x000c, 0x38c7: 0x000c, 0x38c8: 0x000c, 0x38c9: 0x000c, 0x38ca: 0x000c, 0x38cb: 0x000c, + 0x38cc: 0x000c, 0x38cd: 0x000c, 0x38ce: 0x000c, 0x38cf: 0x000c, 0x38d0: 0x000c, 0x38d1: 0x000c, + 0x38d2: 0x000c, 0x38d3: 0x000c, 0x38d4: 0x000c, 0x38d5: 0x000c, 0x38d6: 0x000c, 0x38d7: 0x000c, + 0x38d8: 0x000c, 0x38d9: 0x000c, 0x38da: 0x000c, 0x38db: 0x000c, 0x38dc: 0x000c, 0x38dd: 0x000c, + 0x38de: 0x000c, 0x38df: 0x000c, 0x38e0: 0x000c, 0x38e1: 0x000c, 0x38e2: 0x000c, 0x38e3: 0x000c, + 0x38e4: 0x000c, 0x38e5: 0x000c, 0x38e6: 0x000c, 0x38e7: 0x000c, 0x38e8: 0x000c, 0x38e9: 0x000c, + 0x38ea: 0x000c, 0x38eb: 0x000c, 0x38ec: 0x000c, 0x38ed: 0x000c, 0x38ee: 0x000c, 0x38ef: 0x000c, + 0x38f0: 0x000b, 0x38f1: 0x000b, 0x38f2: 0x000b, 0x38f3: 0x000b, 0x38f4: 0x000b, 0x38f5: 0x000b, + 0x38f6: 0x000b, 0x38f7: 0x000b, 0x38f8: 0x000b, 0x38f9: 0x000b, 0x38fa: 0x000b, 0x38fb: 0x000b, + 0x38fc: 0x000b, 0x38fd: 0x000b, 0x38fe: 0x000b, 0x38ff: 0x000b, +} + +// bidiIndex: 24 blocks, 1536 entries, 1536 bytes +// Block 0 is the zero block. +var bidiIndex = [1536]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, + 0xca: 0x03, 0xcb: 0x04, 0xcc: 0x05, 0xcd: 0x06, 0xce: 0x07, 0xcf: 0x08, + 0xd2: 0x09, 0xd6: 0x0a, 0xd7: 0x0b, + 0xd8: 0x0c, 0xd9: 0x0d, 0xda: 0x0e, 0xdb: 0x0f, 0xdc: 0x10, 0xdd: 0x11, 0xde: 0x12, 0xdf: 0x13, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, + 0xea: 0x07, 0xef: 0x08, + 0xf0: 0x11, 0xf1: 0x12, 0xf2: 0x12, 0xf3: 0x14, 0xf4: 0x15, + // Block 0x4, offset 0x100 + 0x120: 0x14, 0x121: 0x15, 0x122: 0x16, 0x123: 0x17, 0x124: 0x18, 0x125: 0x19, 0x126: 0x1a, 0x127: 0x1b, + 0x128: 0x1c, 0x129: 0x1d, 0x12a: 0x1c, 0x12b: 0x1e, 0x12c: 0x1f, 0x12d: 0x20, 0x12e: 0x21, 0x12f: 0x22, + 0x130: 0x23, 0x131: 0x24, 0x132: 0x1a, 0x133: 0x25, 0x134: 0x26, 0x135: 0x27, 0x137: 0x28, + 0x138: 0x29, 0x139: 0x2a, 0x13a: 0x2b, 0x13b: 0x2c, 0x13c: 0x2d, 0x13d: 0x2e, 0x13e: 0x2f, 0x13f: 0x30, + // Block 0x5, offset 0x140 + 0x140: 0x31, 0x141: 0x32, 0x142: 0x33, + 0x14d: 0x34, 0x14e: 0x35, + 0x150: 0x36, + 0x15a: 0x37, 0x15c: 0x38, 0x15d: 0x39, 0x15e: 0x3a, 0x15f: 0x3b, + 0x160: 0x3c, 0x162: 0x3d, 0x164: 0x3e, 0x165: 0x3f, 0x167: 0x40, + 0x168: 0x41, 0x169: 0x42, 0x16a: 0x43, 0x16c: 0x44, 0x16d: 0x45, 0x16e: 0x46, 0x16f: 0x47, + 0x170: 0x48, 0x173: 0x49, 0x177: 0x4a, + 0x17e: 0x4b, 0x17f: 0x4c, + // Block 0x6, offset 0x180 + 0x180: 0x4d, 0x181: 0x4e, 0x182: 0x4f, 0x183: 0x50, 0x184: 0x51, 0x185: 0x52, 0x186: 0x53, 0x187: 0x54, + 0x188: 0x55, 0x189: 0x54, 0x18a: 0x54, 0x18b: 0x54, 0x18c: 0x56, 0x18d: 0x57, 0x18e: 0x58, 0x18f: 0x54, + 0x190: 0x59, 0x191: 0x5a, 0x192: 0x5b, 0x193: 0x5c, 0x194: 0x54, 0x195: 0x54, 0x196: 0x54, 0x197: 0x54, + 0x198: 0x54, 0x199: 0x54, 0x19a: 0x5d, 0x19b: 0x54, 0x19c: 0x54, 0x19d: 0x5e, 0x19e: 0x54, 0x19f: 0x5f, + 0x1a4: 0x54, 0x1a5: 0x54, 0x1a6: 0x60, 0x1a7: 0x61, + 0x1a8: 0x54, 0x1a9: 0x54, 0x1aa: 0x54, 0x1ab: 0x54, 0x1ac: 0x54, 0x1ad: 0x62, 0x1ae: 0x63, 0x1af: 0x64, + 0x1b3: 0x65, 0x1b5: 0x66, 0x1b7: 0x67, + 0x1b8: 0x68, 0x1b9: 0x69, 0x1ba: 0x6a, 0x1bb: 0x6b, 0x1bc: 0x54, 0x1bd: 0x54, 0x1be: 0x54, 0x1bf: 0x6c, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x6d, 0x1c2: 0x6e, 0x1c3: 0x6f, 0x1c7: 0x70, + 0x1c8: 0x71, 0x1c9: 0x72, 0x1ca: 0x73, 0x1cb: 0x74, 0x1cd: 0x75, 0x1cf: 0x76, + // Block 0x8, offset 0x200 + 0x237: 0x54, + // Block 0x9, offset 0x240 + 0x252: 0x77, 0x253: 0x78, + 0x258: 0x79, 0x259: 0x7a, 0x25a: 0x7b, 0x25b: 0x7c, 0x25c: 0x7d, 0x25e: 0x7e, + 0x260: 0x7f, 0x261: 0x80, 0x263: 0x81, 0x264: 0x82, 0x265: 0x83, 0x266: 0x84, 0x267: 0x85, + 0x268: 0x86, 0x269: 0x87, 0x26a: 0x88, 0x26b: 0x89, 0x26f: 0x8a, + // Block 0xa, offset 0x280 + 0x2ac: 0x8b, 0x2ad: 0x8c, 0x2ae: 0x0e, 0x2af: 0x0e, + 0x2b0: 0x0e, 0x2b1: 0x0e, 0x2b2: 0x0e, 0x2b3: 0x0e, 0x2b4: 0x8d, 0x2b5: 0x0e, 0x2b6: 0x0e, 0x2b7: 0x8e, + 0x2b8: 0x8f, 0x2b9: 0x90, 0x2ba: 0x0e, 0x2bb: 0x91, 0x2bc: 0x92, 0x2bd: 0x93, 0x2bf: 0x94, + // Block 0xb, offset 0x2c0 + 0x2c4: 0x95, 0x2c5: 0x54, 0x2c6: 0x96, 0x2c7: 0x97, + 0x2cb: 0x98, 0x2cd: 0x99, + 0x2e0: 0x9a, 0x2e1: 0x9a, 0x2e2: 0x9a, 0x2e3: 0x9a, 0x2e4: 0x9b, 0x2e5: 0x9a, 0x2e6: 0x9a, 0x2e7: 0x9a, + 0x2e8: 0x9c, 0x2e9: 0x9a, 0x2ea: 0x9a, 0x2eb: 0x9d, 0x2ec: 0x9e, 0x2ed: 0x9a, 0x2ee: 0x9a, 0x2ef: 0x9a, + 0x2f0: 0x9a, 0x2f1: 0x9a, 0x2f2: 0x9a, 0x2f3: 0x9a, 0x2f4: 0x9a, 0x2f5: 0x9a, 0x2f6: 0x9a, 0x2f7: 0x9a, + 0x2f8: 0x9a, 0x2f9: 0x9f, 0x2fa: 0x9a, 0x2fb: 0x9a, 0x2fc: 0x9a, 0x2fd: 0x9a, 0x2fe: 0x9a, 0x2ff: 0x9a, + // Block 0xc, offset 0x300 + 0x300: 0xa0, 0x301: 0xa1, 0x302: 0xa2, 0x304: 0xa3, 0x305: 0xa4, 0x306: 0xa5, 0x307: 0xa6, + 0x308: 0xa7, 0x30b: 0xa8, 0x30c: 0xa9, 0x30d: 0xaa, + 0x310: 0xab, 0x311: 0xac, 0x312: 0xad, 0x313: 0xae, 0x316: 0xaf, 0x317: 0xb0, + 0x318: 0xb1, 0x319: 0xb2, 0x31a: 0xb3, 0x31c: 0xb4, + 0x328: 0xb5, 0x329: 0xb6, 0x32a: 0xb7, + 0x330: 0xb8, 0x332: 0xb9, 0x334: 0xba, 0x335: 0xbb, + // Block 0xd, offset 0x340 + 0x36b: 0xbc, 0x36c: 0xbd, + 0x37e: 0xbe, + // Block 0xe, offset 0x380 + 0x3b2: 0xbf, + // Block 0xf, offset 0x3c0 + 0x3c5: 0xc0, 0x3c6: 0xc1, + 0x3c8: 0x54, 0x3c9: 0xc2, 0x3cc: 0x54, 0x3cd: 0xc3, + 0x3db: 0xc4, 0x3dc: 0xc5, 0x3dd: 0xc6, 0x3de: 0xc7, 0x3df: 0xc8, + 0x3e8: 0xc9, 0x3e9: 0xca, 0x3ea: 0xcb, + // Block 0x10, offset 0x400 + 0x400: 0xcc, + 0x420: 0x9a, 0x421: 0x9a, 0x422: 0x9a, 0x423: 0xcd, 0x424: 0x9a, 0x425: 0xce, 0x426: 0x9a, 0x427: 0x9a, + 0x428: 0x9a, 0x429: 0x9a, 0x42a: 0x9a, 0x42b: 0x9a, 0x42c: 0x9a, 0x42d: 0x9a, 0x42e: 0x9a, 0x42f: 0x9a, + 0x430: 0x9a, 0x431: 0x9a, 0x432: 0x9a, 0x433: 0x9a, 0x434: 0x9a, 0x435: 0x9a, 0x436: 0x9a, 0x437: 0x9a, + 0x438: 0x0e, 0x439: 0x0e, 0x43a: 0x0e, 0x43b: 0xcf, 0x43c: 0x9a, 0x43d: 0x9a, 0x43e: 0x9a, 0x43f: 0x9a, + // Block 0x11, offset 0x440 + 0x440: 0xd0, 0x441: 0x54, 0x442: 0xd1, 0x443: 0xd2, 0x444: 0xd3, 0x445: 0xd4, + 0x449: 0xd5, 0x44c: 0x54, 0x44d: 0x54, 0x44e: 0x54, 0x44f: 0x54, + 0x450: 0x54, 0x451: 0x54, 0x452: 0x54, 0x453: 0x54, 0x454: 0x54, 0x455: 0x54, 0x456: 0x54, 0x457: 0x54, + 0x458: 0x54, 0x459: 0x54, 0x45a: 0x54, 0x45b: 0xd6, 0x45c: 0x54, 0x45d: 0x6b, 0x45e: 0x54, 0x45f: 0xd7, + 0x460: 0xd8, 0x461: 0xd9, 0x462: 0xda, 0x464: 0xdb, 0x465: 0xdc, 0x466: 0xdd, 0x467: 0xde, + 0x47f: 0xdf, + // Block 0x12, offset 0x480 + 0x4bf: 0xdf, + // Block 0x13, offset 0x4c0 + 0x4d0: 0x09, 0x4d1: 0x0a, 0x4d6: 0x0b, + 0x4db: 0x0c, 0x4dd: 0x0d, 0x4de: 0x0e, 0x4df: 0x0f, + 0x4ef: 0x10, + 0x4ff: 0x10, + // Block 0x14, offset 0x500 + 0x50f: 0x10, + 0x51f: 0x10, + 0x52f: 0x10, + 0x53f: 0x10, + // Block 0x15, offset 0x540 + 0x540: 0xe0, 0x541: 0xe0, 0x542: 0xe0, 0x543: 0xe0, 0x544: 0x05, 0x545: 0x05, 0x546: 0x05, 0x547: 0xe1, + 0x548: 0xe0, 0x549: 0xe0, 0x54a: 0xe0, 0x54b: 0xe0, 0x54c: 0xe0, 0x54d: 0xe0, 0x54e: 0xe0, 0x54f: 0xe0, + 0x550: 0xe0, 0x551: 0xe0, 0x552: 0xe0, 0x553: 0xe0, 0x554: 0xe0, 0x555: 0xe0, 0x556: 0xe0, 0x557: 0xe0, + 0x558: 0xe0, 0x559: 0xe0, 0x55a: 0xe0, 0x55b: 0xe0, 0x55c: 0xe0, 0x55d: 0xe0, 0x55e: 0xe0, 0x55f: 0xe0, + 0x560: 0xe0, 0x561: 0xe0, 0x562: 0xe0, 0x563: 0xe0, 0x564: 0xe0, 0x565: 0xe0, 0x566: 0xe0, 0x567: 0xe0, + 0x568: 0xe0, 0x569: 0xe0, 0x56a: 0xe0, 0x56b: 0xe0, 0x56c: 0xe0, 0x56d: 0xe0, 0x56e: 0xe0, 0x56f: 0xe0, + 0x570: 0xe0, 0x571: 0xe0, 0x572: 0xe0, 0x573: 0xe0, 0x574: 0xe0, 0x575: 0xe0, 0x576: 0xe0, 0x577: 0xe0, + 0x578: 0xe0, 0x579: 0xe0, 0x57a: 0xe0, 0x57b: 0xe0, 0x57c: 0xe0, 0x57d: 0xe0, 0x57e: 0xe0, 0x57f: 0xe0, + // Block 0x16, offset 0x580 + 0x58f: 0x10, + 0x59f: 0x10, + 0x5a0: 0x13, + 0x5af: 0x10, + 0x5bf: 0x10, + // Block 0x17, offset 0x5c0 + 0x5cf: 0x10, +} + +// Total table size 16184 bytes (15KiB); checksum: F50EF68C diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go new file mode 100644 index 00000000..0ca0193e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go @@ -0,0 +1,1781 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build !go1.10 + +package bidi + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "9.0.0" + +// xorMasks contains masks to be xor-ed with brackets to get the reverse +// version. +var xorMasks = []int32{ // 8 elements + 0, 1, 6, 7, 3, 15, 29, 63, +} // Size: 56 bytes + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *bidiTrie) lookup(s []byte) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return bidiValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *bidiTrie) lookupUnsafe(s []byte) uint8 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return bidiValues[c0] + } + i := bidiIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *bidiTrie) lookupString(s string) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return bidiValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *bidiTrie) lookupStringUnsafe(s string) uint8 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return bidiValues[c0] + } + i := bidiIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// bidiTrie. Total size: 15744 bytes (15.38 KiB). Checksum: b4c3b70954803b86. +type bidiTrie struct{} + +func newBidiTrie(i int) *bidiTrie { + return &bidiTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *bidiTrie) lookupValue(n uint32, b byte) uint8 { + switch { + default: + return uint8(bidiValues[n<<6+uint32(b)]) + } +} + +// bidiValues: 222 blocks, 14208 entries, 14208 bytes +// The third block is the zero block. +var bidiValues = [14208]uint8{ + // Block 0x0, offset 0x0 + 0x00: 0x000b, 0x01: 0x000b, 0x02: 0x000b, 0x03: 0x000b, 0x04: 0x000b, 0x05: 0x000b, + 0x06: 0x000b, 0x07: 0x000b, 0x08: 0x000b, 0x09: 0x0008, 0x0a: 0x0007, 0x0b: 0x0008, + 0x0c: 0x0009, 0x0d: 0x0007, 0x0e: 0x000b, 0x0f: 0x000b, 0x10: 0x000b, 0x11: 0x000b, + 0x12: 0x000b, 0x13: 0x000b, 0x14: 0x000b, 0x15: 0x000b, 0x16: 0x000b, 0x17: 0x000b, + 0x18: 0x000b, 0x19: 0x000b, 0x1a: 0x000b, 0x1b: 0x000b, 0x1c: 0x0007, 0x1d: 0x0007, + 0x1e: 0x0007, 0x1f: 0x0008, 0x20: 0x0009, 0x21: 0x000a, 0x22: 0x000a, 0x23: 0x0004, + 0x24: 0x0004, 0x25: 0x0004, 0x26: 0x000a, 0x27: 0x000a, 0x28: 0x003a, 0x29: 0x002a, + 0x2a: 0x000a, 0x2b: 0x0003, 0x2c: 0x0006, 0x2d: 0x0003, 0x2e: 0x0006, 0x2f: 0x0006, + 0x30: 0x0002, 0x31: 0x0002, 0x32: 0x0002, 0x33: 0x0002, 0x34: 0x0002, 0x35: 0x0002, + 0x36: 0x0002, 0x37: 0x0002, 0x38: 0x0002, 0x39: 0x0002, 0x3a: 0x0006, 0x3b: 0x000a, + 0x3c: 0x000a, 0x3d: 0x000a, 0x3e: 0x000a, 0x3f: 0x000a, + // Block 0x1, offset 0x40 + 0x40: 0x000a, + 0x5b: 0x005a, 0x5c: 0x000a, 0x5d: 0x004a, + 0x5e: 0x000a, 0x5f: 0x000a, 0x60: 0x000a, + 0x7b: 0x005a, + 0x7c: 0x000a, 0x7d: 0x004a, 0x7e: 0x000a, 0x7f: 0x000b, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x000b, 0xc1: 0x000b, 0xc2: 0x000b, 0xc3: 0x000b, 0xc4: 0x000b, 0xc5: 0x0007, + 0xc6: 0x000b, 0xc7: 0x000b, 0xc8: 0x000b, 0xc9: 0x000b, 0xca: 0x000b, 0xcb: 0x000b, + 0xcc: 0x000b, 0xcd: 0x000b, 0xce: 0x000b, 0xcf: 0x000b, 0xd0: 0x000b, 0xd1: 0x000b, + 0xd2: 0x000b, 0xd3: 0x000b, 0xd4: 0x000b, 0xd5: 0x000b, 0xd6: 0x000b, 0xd7: 0x000b, + 0xd8: 0x000b, 0xd9: 0x000b, 0xda: 0x000b, 0xdb: 0x000b, 0xdc: 0x000b, 0xdd: 0x000b, + 0xde: 0x000b, 0xdf: 0x000b, 0xe0: 0x0006, 0xe1: 0x000a, 0xe2: 0x0004, 0xe3: 0x0004, + 0xe4: 0x0004, 0xe5: 0x0004, 0xe6: 0x000a, 0xe7: 0x000a, 0xe8: 0x000a, 0xe9: 0x000a, + 0xeb: 0x000a, 0xec: 0x000a, 0xed: 0x000b, 0xee: 0x000a, 0xef: 0x000a, + 0xf0: 0x0004, 0xf1: 0x0004, 0xf2: 0x0002, 0xf3: 0x0002, 0xf4: 0x000a, + 0xf6: 0x000a, 0xf7: 0x000a, 0xf8: 0x000a, 0xf9: 0x0002, 0xfb: 0x000a, + 0xfc: 0x000a, 0xfd: 0x000a, 0xfe: 0x000a, 0xff: 0x000a, + // Block 0x4, offset 0x100 + 0x117: 0x000a, + 0x137: 0x000a, + // Block 0x5, offset 0x140 + 0x179: 0x000a, 0x17a: 0x000a, + // Block 0x6, offset 0x180 + 0x182: 0x000a, 0x183: 0x000a, 0x184: 0x000a, 0x185: 0x000a, + 0x186: 0x000a, 0x187: 0x000a, 0x188: 0x000a, 0x189: 0x000a, 0x18a: 0x000a, 0x18b: 0x000a, + 0x18c: 0x000a, 0x18d: 0x000a, 0x18e: 0x000a, 0x18f: 0x000a, + 0x192: 0x000a, 0x193: 0x000a, 0x194: 0x000a, 0x195: 0x000a, 0x196: 0x000a, 0x197: 0x000a, + 0x198: 0x000a, 0x199: 0x000a, 0x19a: 0x000a, 0x19b: 0x000a, 0x19c: 0x000a, 0x19d: 0x000a, + 0x19e: 0x000a, 0x19f: 0x000a, + 0x1a5: 0x000a, 0x1a6: 0x000a, 0x1a7: 0x000a, 0x1a8: 0x000a, 0x1a9: 0x000a, + 0x1aa: 0x000a, 0x1ab: 0x000a, 0x1ac: 0x000a, 0x1ad: 0x000a, 0x1af: 0x000a, + 0x1b0: 0x000a, 0x1b1: 0x000a, 0x1b2: 0x000a, 0x1b3: 0x000a, 0x1b4: 0x000a, 0x1b5: 0x000a, + 0x1b6: 0x000a, 0x1b7: 0x000a, 0x1b8: 0x000a, 0x1b9: 0x000a, 0x1ba: 0x000a, 0x1bb: 0x000a, + 0x1bc: 0x000a, 0x1bd: 0x000a, 0x1be: 0x000a, 0x1bf: 0x000a, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x000c, 0x1c1: 0x000c, 0x1c2: 0x000c, 0x1c3: 0x000c, 0x1c4: 0x000c, 0x1c5: 0x000c, + 0x1c6: 0x000c, 0x1c7: 0x000c, 0x1c8: 0x000c, 0x1c9: 0x000c, 0x1ca: 0x000c, 0x1cb: 0x000c, + 0x1cc: 0x000c, 0x1cd: 0x000c, 0x1ce: 0x000c, 0x1cf: 0x000c, 0x1d0: 0x000c, 0x1d1: 0x000c, + 0x1d2: 0x000c, 0x1d3: 0x000c, 0x1d4: 0x000c, 0x1d5: 0x000c, 0x1d6: 0x000c, 0x1d7: 0x000c, + 0x1d8: 0x000c, 0x1d9: 0x000c, 0x1da: 0x000c, 0x1db: 0x000c, 0x1dc: 0x000c, 0x1dd: 0x000c, + 0x1de: 0x000c, 0x1df: 0x000c, 0x1e0: 0x000c, 0x1e1: 0x000c, 0x1e2: 0x000c, 0x1e3: 0x000c, + 0x1e4: 0x000c, 0x1e5: 0x000c, 0x1e6: 0x000c, 0x1e7: 0x000c, 0x1e8: 0x000c, 0x1e9: 0x000c, + 0x1ea: 0x000c, 0x1eb: 0x000c, 0x1ec: 0x000c, 0x1ed: 0x000c, 0x1ee: 0x000c, 0x1ef: 0x000c, + 0x1f0: 0x000c, 0x1f1: 0x000c, 0x1f2: 0x000c, 0x1f3: 0x000c, 0x1f4: 0x000c, 0x1f5: 0x000c, + 0x1f6: 0x000c, 0x1f7: 0x000c, 0x1f8: 0x000c, 0x1f9: 0x000c, 0x1fa: 0x000c, 0x1fb: 0x000c, + 0x1fc: 0x000c, 0x1fd: 0x000c, 0x1fe: 0x000c, 0x1ff: 0x000c, + // Block 0x8, offset 0x200 + 0x200: 0x000c, 0x201: 0x000c, 0x202: 0x000c, 0x203: 0x000c, 0x204: 0x000c, 0x205: 0x000c, + 0x206: 0x000c, 0x207: 0x000c, 0x208: 0x000c, 0x209: 0x000c, 0x20a: 0x000c, 0x20b: 0x000c, + 0x20c: 0x000c, 0x20d: 0x000c, 0x20e: 0x000c, 0x20f: 0x000c, 0x210: 0x000c, 0x211: 0x000c, + 0x212: 0x000c, 0x213: 0x000c, 0x214: 0x000c, 0x215: 0x000c, 0x216: 0x000c, 0x217: 0x000c, + 0x218: 0x000c, 0x219: 0x000c, 0x21a: 0x000c, 0x21b: 0x000c, 0x21c: 0x000c, 0x21d: 0x000c, + 0x21e: 0x000c, 0x21f: 0x000c, 0x220: 0x000c, 0x221: 0x000c, 0x222: 0x000c, 0x223: 0x000c, + 0x224: 0x000c, 0x225: 0x000c, 0x226: 0x000c, 0x227: 0x000c, 0x228: 0x000c, 0x229: 0x000c, + 0x22a: 0x000c, 0x22b: 0x000c, 0x22c: 0x000c, 0x22d: 0x000c, 0x22e: 0x000c, 0x22f: 0x000c, + 0x234: 0x000a, 0x235: 0x000a, + 0x23e: 0x000a, + // Block 0x9, offset 0x240 + 0x244: 0x000a, 0x245: 0x000a, + 0x247: 0x000a, + // Block 0xa, offset 0x280 + 0x2b6: 0x000a, + // Block 0xb, offset 0x2c0 + 0x2c3: 0x000c, 0x2c4: 0x000c, 0x2c5: 0x000c, + 0x2c6: 0x000c, 0x2c7: 0x000c, 0x2c8: 0x000c, 0x2c9: 0x000c, + // Block 0xc, offset 0x300 + 0x30a: 0x000a, + 0x30d: 0x000a, 0x30e: 0x000a, 0x30f: 0x0004, 0x310: 0x0001, 0x311: 0x000c, + 0x312: 0x000c, 0x313: 0x000c, 0x314: 0x000c, 0x315: 0x000c, 0x316: 0x000c, 0x317: 0x000c, + 0x318: 0x000c, 0x319: 0x000c, 0x31a: 0x000c, 0x31b: 0x000c, 0x31c: 0x000c, 0x31d: 0x000c, + 0x31e: 0x000c, 0x31f: 0x000c, 0x320: 0x000c, 0x321: 0x000c, 0x322: 0x000c, 0x323: 0x000c, + 0x324: 0x000c, 0x325: 0x000c, 0x326: 0x000c, 0x327: 0x000c, 0x328: 0x000c, 0x329: 0x000c, + 0x32a: 0x000c, 0x32b: 0x000c, 0x32c: 0x000c, 0x32d: 0x000c, 0x32e: 0x000c, 0x32f: 0x000c, + 0x330: 0x000c, 0x331: 0x000c, 0x332: 0x000c, 0x333: 0x000c, 0x334: 0x000c, 0x335: 0x000c, + 0x336: 0x000c, 0x337: 0x000c, 0x338: 0x000c, 0x339: 0x000c, 0x33a: 0x000c, 0x33b: 0x000c, + 0x33c: 0x000c, 0x33d: 0x000c, 0x33e: 0x0001, 0x33f: 0x000c, + // Block 0xd, offset 0x340 + 0x340: 0x0001, 0x341: 0x000c, 0x342: 0x000c, 0x343: 0x0001, 0x344: 0x000c, 0x345: 0x000c, + 0x346: 0x0001, 0x347: 0x000c, 0x348: 0x0001, 0x349: 0x0001, 0x34a: 0x0001, 0x34b: 0x0001, + 0x34c: 0x0001, 0x34d: 0x0001, 0x34e: 0x0001, 0x34f: 0x0001, 0x350: 0x0001, 0x351: 0x0001, + 0x352: 0x0001, 0x353: 0x0001, 0x354: 0x0001, 0x355: 0x0001, 0x356: 0x0001, 0x357: 0x0001, + 0x358: 0x0001, 0x359: 0x0001, 0x35a: 0x0001, 0x35b: 0x0001, 0x35c: 0x0001, 0x35d: 0x0001, + 0x35e: 0x0001, 0x35f: 0x0001, 0x360: 0x0001, 0x361: 0x0001, 0x362: 0x0001, 0x363: 0x0001, + 0x364: 0x0001, 0x365: 0x0001, 0x366: 0x0001, 0x367: 0x0001, 0x368: 0x0001, 0x369: 0x0001, + 0x36a: 0x0001, 0x36b: 0x0001, 0x36c: 0x0001, 0x36d: 0x0001, 0x36e: 0x0001, 0x36f: 0x0001, + 0x370: 0x0001, 0x371: 0x0001, 0x372: 0x0001, 0x373: 0x0001, 0x374: 0x0001, 0x375: 0x0001, + 0x376: 0x0001, 0x377: 0x0001, 0x378: 0x0001, 0x379: 0x0001, 0x37a: 0x0001, 0x37b: 0x0001, + 0x37c: 0x0001, 0x37d: 0x0001, 0x37e: 0x0001, 0x37f: 0x0001, + // Block 0xe, offset 0x380 + 0x380: 0x0005, 0x381: 0x0005, 0x382: 0x0005, 0x383: 0x0005, 0x384: 0x0005, 0x385: 0x0005, + 0x386: 0x000a, 0x387: 0x000a, 0x388: 0x000d, 0x389: 0x0004, 0x38a: 0x0004, 0x38b: 0x000d, + 0x38c: 0x0006, 0x38d: 0x000d, 0x38e: 0x000a, 0x38f: 0x000a, 0x390: 0x000c, 0x391: 0x000c, + 0x392: 0x000c, 0x393: 0x000c, 0x394: 0x000c, 0x395: 0x000c, 0x396: 0x000c, 0x397: 0x000c, + 0x398: 0x000c, 0x399: 0x000c, 0x39a: 0x000c, 0x39b: 0x000d, 0x39c: 0x000d, 0x39d: 0x000d, + 0x39e: 0x000d, 0x39f: 0x000d, 0x3a0: 0x000d, 0x3a1: 0x000d, 0x3a2: 0x000d, 0x3a3: 0x000d, + 0x3a4: 0x000d, 0x3a5: 0x000d, 0x3a6: 0x000d, 0x3a7: 0x000d, 0x3a8: 0x000d, 0x3a9: 0x000d, + 0x3aa: 0x000d, 0x3ab: 0x000d, 0x3ac: 0x000d, 0x3ad: 0x000d, 0x3ae: 0x000d, 0x3af: 0x000d, + 0x3b0: 0x000d, 0x3b1: 0x000d, 0x3b2: 0x000d, 0x3b3: 0x000d, 0x3b4: 0x000d, 0x3b5: 0x000d, + 0x3b6: 0x000d, 0x3b7: 0x000d, 0x3b8: 0x000d, 0x3b9: 0x000d, 0x3ba: 0x000d, 0x3bb: 0x000d, + 0x3bc: 0x000d, 0x3bd: 0x000d, 0x3be: 0x000d, 0x3bf: 0x000d, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x000d, 0x3c1: 0x000d, 0x3c2: 0x000d, 0x3c3: 0x000d, 0x3c4: 0x000d, 0x3c5: 0x000d, + 0x3c6: 0x000d, 0x3c7: 0x000d, 0x3c8: 0x000d, 0x3c9: 0x000d, 0x3ca: 0x000d, 0x3cb: 0x000c, + 0x3cc: 0x000c, 0x3cd: 0x000c, 0x3ce: 0x000c, 0x3cf: 0x000c, 0x3d0: 0x000c, 0x3d1: 0x000c, + 0x3d2: 0x000c, 0x3d3: 0x000c, 0x3d4: 0x000c, 0x3d5: 0x000c, 0x3d6: 0x000c, 0x3d7: 0x000c, + 0x3d8: 0x000c, 0x3d9: 0x000c, 0x3da: 0x000c, 0x3db: 0x000c, 0x3dc: 0x000c, 0x3dd: 0x000c, + 0x3de: 0x000c, 0x3df: 0x000c, 0x3e0: 0x0005, 0x3e1: 0x0005, 0x3e2: 0x0005, 0x3e3: 0x0005, + 0x3e4: 0x0005, 0x3e5: 0x0005, 0x3e6: 0x0005, 0x3e7: 0x0005, 0x3e8: 0x0005, 0x3e9: 0x0005, + 0x3ea: 0x0004, 0x3eb: 0x0005, 0x3ec: 0x0005, 0x3ed: 0x000d, 0x3ee: 0x000d, 0x3ef: 0x000d, + 0x3f0: 0x000c, 0x3f1: 0x000d, 0x3f2: 0x000d, 0x3f3: 0x000d, 0x3f4: 0x000d, 0x3f5: 0x000d, + 0x3f6: 0x000d, 0x3f7: 0x000d, 0x3f8: 0x000d, 0x3f9: 0x000d, 0x3fa: 0x000d, 0x3fb: 0x000d, + 0x3fc: 0x000d, 0x3fd: 0x000d, 0x3fe: 0x000d, 0x3ff: 0x000d, + // Block 0x10, offset 0x400 + 0x400: 0x000d, 0x401: 0x000d, 0x402: 0x000d, 0x403: 0x000d, 0x404: 0x000d, 0x405: 0x000d, + 0x406: 0x000d, 0x407: 0x000d, 0x408: 0x000d, 0x409: 0x000d, 0x40a: 0x000d, 0x40b: 0x000d, + 0x40c: 0x000d, 0x40d: 0x000d, 0x40e: 0x000d, 0x40f: 0x000d, 0x410: 0x000d, 0x411: 0x000d, + 0x412: 0x000d, 0x413: 0x000d, 0x414: 0x000d, 0x415: 0x000d, 0x416: 0x000d, 0x417: 0x000d, + 0x418: 0x000d, 0x419: 0x000d, 0x41a: 0x000d, 0x41b: 0x000d, 0x41c: 0x000d, 0x41d: 0x000d, + 0x41e: 0x000d, 0x41f: 0x000d, 0x420: 0x000d, 0x421: 0x000d, 0x422: 0x000d, 0x423: 0x000d, + 0x424: 0x000d, 0x425: 0x000d, 0x426: 0x000d, 0x427: 0x000d, 0x428: 0x000d, 0x429: 0x000d, + 0x42a: 0x000d, 0x42b: 0x000d, 0x42c: 0x000d, 0x42d: 0x000d, 0x42e: 0x000d, 0x42f: 0x000d, + 0x430: 0x000d, 0x431: 0x000d, 0x432: 0x000d, 0x433: 0x000d, 0x434: 0x000d, 0x435: 0x000d, + 0x436: 0x000d, 0x437: 0x000d, 0x438: 0x000d, 0x439: 0x000d, 0x43a: 0x000d, 0x43b: 0x000d, + 0x43c: 0x000d, 0x43d: 0x000d, 0x43e: 0x000d, 0x43f: 0x000d, + // Block 0x11, offset 0x440 + 0x440: 0x000d, 0x441: 0x000d, 0x442: 0x000d, 0x443: 0x000d, 0x444: 0x000d, 0x445: 0x000d, + 0x446: 0x000d, 0x447: 0x000d, 0x448: 0x000d, 0x449: 0x000d, 0x44a: 0x000d, 0x44b: 0x000d, + 0x44c: 0x000d, 0x44d: 0x000d, 0x44e: 0x000d, 0x44f: 0x000d, 0x450: 0x000d, 0x451: 0x000d, + 0x452: 0x000d, 0x453: 0x000d, 0x454: 0x000d, 0x455: 0x000d, 0x456: 0x000c, 0x457: 0x000c, + 0x458: 0x000c, 0x459: 0x000c, 0x45a: 0x000c, 0x45b: 0x000c, 0x45c: 0x000c, 0x45d: 0x0005, + 0x45e: 0x000a, 0x45f: 0x000c, 0x460: 0x000c, 0x461: 0x000c, 0x462: 0x000c, 0x463: 0x000c, + 0x464: 0x000c, 0x465: 0x000d, 0x466: 0x000d, 0x467: 0x000c, 0x468: 0x000c, 0x469: 0x000a, + 0x46a: 0x000c, 0x46b: 0x000c, 0x46c: 0x000c, 0x46d: 0x000c, 0x46e: 0x000d, 0x46f: 0x000d, + 0x470: 0x0002, 0x471: 0x0002, 0x472: 0x0002, 0x473: 0x0002, 0x474: 0x0002, 0x475: 0x0002, + 0x476: 0x0002, 0x477: 0x0002, 0x478: 0x0002, 0x479: 0x0002, 0x47a: 0x000d, 0x47b: 0x000d, + 0x47c: 0x000d, 0x47d: 0x000d, 0x47e: 0x000d, 0x47f: 0x000d, + // Block 0x12, offset 0x480 + 0x480: 0x000d, 0x481: 0x000d, 0x482: 0x000d, 0x483: 0x000d, 0x484: 0x000d, 0x485: 0x000d, + 0x486: 0x000d, 0x487: 0x000d, 0x488: 0x000d, 0x489: 0x000d, 0x48a: 0x000d, 0x48b: 0x000d, + 0x48c: 0x000d, 0x48d: 0x000d, 0x48e: 0x000d, 0x48f: 0x000d, 0x490: 0x000d, 0x491: 0x000c, + 0x492: 0x000d, 0x493: 0x000d, 0x494: 0x000d, 0x495: 0x000d, 0x496: 0x000d, 0x497: 0x000d, + 0x498: 0x000d, 0x499: 0x000d, 0x49a: 0x000d, 0x49b: 0x000d, 0x49c: 0x000d, 0x49d: 0x000d, + 0x49e: 0x000d, 0x49f: 0x000d, 0x4a0: 0x000d, 0x4a1: 0x000d, 0x4a2: 0x000d, 0x4a3: 0x000d, + 0x4a4: 0x000d, 0x4a5: 0x000d, 0x4a6: 0x000d, 0x4a7: 0x000d, 0x4a8: 0x000d, 0x4a9: 0x000d, + 0x4aa: 0x000d, 0x4ab: 0x000d, 0x4ac: 0x000d, 0x4ad: 0x000d, 0x4ae: 0x000d, 0x4af: 0x000d, + 0x4b0: 0x000c, 0x4b1: 0x000c, 0x4b2: 0x000c, 0x4b3: 0x000c, 0x4b4: 0x000c, 0x4b5: 0x000c, + 0x4b6: 0x000c, 0x4b7: 0x000c, 0x4b8: 0x000c, 0x4b9: 0x000c, 0x4ba: 0x000c, 0x4bb: 0x000c, + 0x4bc: 0x000c, 0x4bd: 0x000c, 0x4be: 0x000c, 0x4bf: 0x000c, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x000c, 0x4c1: 0x000c, 0x4c2: 0x000c, 0x4c3: 0x000c, 0x4c4: 0x000c, 0x4c5: 0x000c, + 0x4c6: 0x000c, 0x4c7: 0x000c, 0x4c8: 0x000c, 0x4c9: 0x000c, 0x4ca: 0x000c, 0x4cb: 0x000d, + 0x4cc: 0x000d, 0x4cd: 0x000d, 0x4ce: 0x000d, 0x4cf: 0x000d, 0x4d0: 0x000d, 0x4d1: 0x000d, + 0x4d2: 0x000d, 0x4d3: 0x000d, 0x4d4: 0x000d, 0x4d5: 0x000d, 0x4d6: 0x000d, 0x4d7: 0x000d, + 0x4d8: 0x000d, 0x4d9: 0x000d, 0x4da: 0x000d, 0x4db: 0x000d, 0x4dc: 0x000d, 0x4dd: 0x000d, + 0x4de: 0x000d, 0x4df: 0x000d, 0x4e0: 0x000d, 0x4e1: 0x000d, 0x4e2: 0x000d, 0x4e3: 0x000d, + 0x4e4: 0x000d, 0x4e5: 0x000d, 0x4e6: 0x000d, 0x4e7: 0x000d, 0x4e8: 0x000d, 0x4e9: 0x000d, + 0x4ea: 0x000d, 0x4eb: 0x000d, 0x4ec: 0x000d, 0x4ed: 0x000d, 0x4ee: 0x000d, 0x4ef: 0x000d, + 0x4f0: 0x000d, 0x4f1: 0x000d, 0x4f2: 0x000d, 0x4f3: 0x000d, 0x4f4: 0x000d, 0x4f5: 0x000d, + 0x4f6: 0x000d, 0x4f7: 0x000d, 0x4f8: 0x000d, 0x4f9: 0x000d, 0x4fa: 0x000d, 0x4fb: 0x000d, + 0x4fc: 0x000d, 0x4fd: 0x000d, 0x4fe: 0x000d, 0x4ff: 0x000d, + // Block 0x14, offset 0x500 + 0x500: 0x000d, 0x501: 0x000d, 0x502: 0x000d, 0x503: 0x000d, 0x504: 0x000d, 0x505: 0x000d, + 0x506: 0x000d, 0x507: 0x000d, 0x508: 0x000d, 0x509: 0x000d, 0x50a: 0x000d, 0x50b: 0x000d, + 0x50c: 0x000d, 0x50d: 0x000d, 0x50e: 0x000d, 0x50f: 0x000d, 0x510: 0x000d, 0x511: 0x000d, + 0x512: 0x000d, 0x513: 0x000d, 0x514: 0x000d, 0x515: 0x000d, 0x516: 0x000d, 0x517: 0x000d, + 0x518: 0x000d, 0x519: 0x000d, 0x51a: 0x000d, 0x51b: 0x000d, 0x51c: 0x000d, 0x51d: 0x000d, + 0x51e: 0x000d, 0x51f: 0x000d, 0x520: 0x000d, 0x521: 0x000d, 0x522: 0x000d, 0x523: 0x000d, + 0x524: 0x000d, 0x525: 0x000d, 0x526: 0x000c, 0x527: 0x000c, 0x528: 0x000c, 0x529: 0x000c, + 0x52a: 0x000c, 0x52b: 0x000c, 0x52c: 0x000c, 0x52d: 0x000c, 0x52e: 0x000c, 0x52f: 0x000c, + 0x530: 0x000c, 0x531: 0x000d, 0x532: 0x000d, 0x533: 0x000d, 0x534: 0x000d, 0x535: 0x000d, + 0x536: 0x000d, 0x537: 0x000d, 0x538: 0x000d, 0x539: 0x000d, 0x53a: 0x000d, 0x53b: 0x000d, + 0x53c: 0x000d, 0x53d: 0x000d, 0x53e: 0x000d, 0x53f: 0x000d, + // Block 0x15, offset 0x540 + 0x540: 0x0001, 0x541: 0x0001, 0x542: 0x0001, 0x543: 0x0001, 0x544: 0x0001, 0x545: 0x0001, + 0x546: 0x0001, 0x547: 0x0001, 0x548: 0x0001, 0x549: 0x0001, 0x54a: 0x0001, 0x54b: 0x0001, + 0x54c: 0x0001, 0x54d: 0x0001, 0x54e: 0x0001, 0x54f: 0x0001, 0x550: 0x0001, 0x551: 0x0001, + 0x552: 0x0001, 0x553: 0x0001, 0x554: 0x0001, 0x555: 0x0001, 0x556: 0x0001, 0x557: 0x0001, + 0x558: 0x0001, 0x559: 0x0001, 0x55a: 0x0001, 0x55b: 0x0001, 0x55c: 0x0001, 0x55d: 0x0001, + 0x55e: 0x0001, 0x55f: 0x0001, 0x560: 0x0001, 0x561: 0x0001, 0x562: 0x0001, 0x563: 0x0001, + 0x564: 0x0001, 0x565: 0x0001, 0x566: 0x0001, 0x567: 0x0001, 0x568: 0x0001, 0x569: 0x0001, + 0x56a: 0x0001, 0x56b: 0x000c, 0x56c: 0x000c, 0x56d: 0x000c, 0x56e: 0x000c, 0x56f: 0x000c, + 0x570: 0x000c, 0x571: 0x000c, 0x572: 0x000c, 0x573: 0x000c, 0x574: 0x0001, 0x575: 0x0001, + 0x576: 0x000a, 0x577: 0x000a, 0x578: 0x000a, 0x579: 0x000a, 0x57a: 0x0001, 0x57b: 0x0001, + 0x57c: 0x0001, 0x57d: 0x0001, 0x57e: 0x0001, 0x57f: 0x0001, + // Block 0x16, offset 0x580 + 0x580: 0x0001, 0x581: 0x0001, 0x582: 0x0001, 0x583: 0x0001, 0x584: 0x0001, 0x585: 0x0001, + 0x586: 0x0001, 0x587: 0x0001, 0x588: 0x0001, 0x589: 0x0001, 0x58a: 0x0001, 0x58b: 0x0001, + 0x58c: 0x0001, 0x58d: 0x0001, 0x58e: 0x0001, 0x58f: 0x0001, 0x590: 0x0001, 0x591: 0x0001, + 0x592: 0x0001, 0x593: 0x0001, 0x594: 0x0001, 0x595: 0x0001, 0x596: 0x000c, 0x597: 0x000c, + 0x598: 0x000c, 0x599: 0x000c, 0x59a: 0x0001, 0x59b: 0x000c, 0x59c: 0x000c, 0x59d: 0x000c, + 0x59e: 0x000c, 0x59f: 0x000c, 0x5a0: 0x000c, 0x5a1: 0x000c, 0x5a2: 0x000c, 0x5a3: 0x000c, + 0x5a4: 0x0001, 0x5a5: 0x000c, 0x5a6: 0x000c, 0x5a7: 0x000c, 0x5a8: 0x0001, 0x5a9: 0x000c, + 0x5aa: 0x000c, 0x5ab: 0x000c, 0x5ac: 0x000c, 0x5ad: 0x000c, 0x5ae: 0x0001, 0x5af: 0x0001, + 0x5b0: 0x0001, 0x5b1: 0x0001, 0x5b2: 0x0001, 0x5b3: 0x0001, 0x5b4: 0x0001, 0x5b5: 0x0001, + 0x5b6: 0x0001, 0x5b7: 0x0001, 0x5b8: 0x0001, 0x5b9: 0x0001, 0x5ba: 0x0001, 0x5bb: 0x0001, + 0x5bc: 0x0001, 0x5bd: 0x0001, 0x5be: 0x0001, 0x5bf: 0x0001, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x0001, 0x5c1: 0x0001, 0x5c2: 0x0001, 0x5c3: 0x0001, 0x5c4: 0x0001, 0x5c5: 0x0001, + 0x5c6: 0x0001, 0x5c7: 0x0001, 0x5c8: 0x0001, 0x5c9: 0x0001, 0x5ca: 0x0001, 0x5cb: 0x0001, + 0x5cc: 0x0001, 0x5cd: 0x0001, 0x5ce: 0x0001, 0x5cf: 0x0001, 0x5d0: 0x0001, 0x5d1: 0x0001, + 0x5d2: 0x0001, 0x5d3: 0x0001, 0x5d4: 0x0001, 0x5d5: 0x0001, 0x5d6: 0x0001, 0x5d7: 0x0001, + 0x5d8: 0x0001, 0x5d9: 0x000c, 0x5da: 0x000c, 0x5db: 0x000c, 0x5dc: 0x0001, 0x5dd: 0x0001, + 0x5de: 0x0001, 0x5df: 0x0001, 0x5e0: 0x0001, 0x5e1: 0x0001, 0x5e2: 0x0001, 0x5e3: 0x0001, + 0x5e4: 0x0001, 0x5e5: 0x0001, 0x5e6: 0x0001, 0x5e7: 0x0001, 0x5e8: 0x0001, 0x5e9: 0x0001, + 0x5ea: 0x0001, 0x5eb: 0x0001, 0x5ec: 0x0001, 0x5ed: 0x0001, 0x5ee: 0x0001, 0x5ef: 0x0001, + 0x5f0: 0x0001, 0x5f1: 0x0001, 0x5f2: 0x0001, 0x5f3: 0x0001, 0x5f4: 0x0001, 0x5f5: 0x0001, + 0x5f6: 0x0001, 0x5f7: 0x0001, 0x5f8: 0x0001, 0x5f9: 0x0001, 0x5fa: 0x0001, 0x5fb: 0x0001, + 0x5fc: 0x0001, 0x5fd: 0x0001, 0x5fe: 0x0001, 0x5ff: 0x0001, + // Block 0x18, offset 0x600 + 0x600: 0x0001, 0x601: 0x0001, 0x602: 0x0001, 0x603: 0x0001, 0x604: 0x0001, 0x605: 0x0001, + 0x606: 0x0001, 0x607: 0x0001, 0x608: 0x0001, 0x609: 0x0001, 0x60a: 0x0001, 0x60b: 0x0001, + 0x60c: 0x0001, 0x60d: 0x0001, 0x60e: 0x0001, 0x60f: 0x0001, 0x610: 0x0001, 0x611: 0x0001, + 0x612: 0x0001, 0x613: 0x0001, 0x614: 0x0001, 0x615: 0x0001, 0x616: 0x0001, 0x617: 0x0001, + 0x618: 0x0001, 0x619: 0x0001, 0x61a: 0x0001, 0x61b: 0x0001, 0x61c: 0x0001, 0x61d: 0x0001, + 0x61e: 0x0001, 0x61f: 0x0001, 0x620: 0x000d, 0x621: 0x000d, 0x622: 0x000d, 0x623: 0x000d, + 0x624: 0x000d, 0x625: 0x000d, 0x626: 0x000d, 0x627: 0x000d, 0x628: 0x000d, 0x629: 0x000d, + 0x62a: 0x000d, 0x62b: 0x000d, 0x62c: 0x000d, 0x62d: 0x000d, 0x62e: 0x000d, 0x62f: 0x000d, + 0x630: 0x000d, 0x631: 0x000d, 0x632: 0x000d, 0x633: 0x000d, 0x634: 0x000d, 0x635: 0x000d, + 0x636: 0x000d, 0x637: 0x000d, 0x638: 0x000d, 0x639: 0x000d, 0x63a: 0x000d, 0x63b: 0x000d, + 0x63c: 0x000d, 0x63d: 0x000d, 0x63e: 0x000d, 0x63f: 0x000d, + // Block 0x19, offset 0x640 + 0x640: 0x000d, 0x641: 0x000d, 0x642: 0x000d, 0x643: 0x000d, 0x644: 0x000d, 0x645: 0x000d, + 0x646: 0x000d, 0x647: 0x000d, 0x648: 0x000d, 0x649: 0x000d, 0x64a: 0x000d, 0x64b: 0x000d, + 0x64c: 0x000d, 0x64d: 0x000d, 0x64e: 0x000d, 0x64f: 0x000d, 0x650: 0x000d, 0x651: 0x000d, + 0x652: 0x000d, 0x653: 0x000d, 0x654: 0x000c, 0x655: 0x000c, 0x656: 0x000c, 0x657: 0x000c, + 0x658: 0x000c, 0x659: 0x000c, 0x65a: 0x000c, 0x65b: 0x000c, 0x65c: 0x000c, 0x65d: 0x000c, + 0x65e: 0x000c, 0x65f: 0x000c, 0x660: 0x000c, 0x661: 0x000c, 0x662: 0x0005, 0x663: 0x000c, + 0x664: 0x000c, 0x665: 0x000c, 0x666: 0x000c, 0x667: 0x000c, 0x668: 0x000c, 0x669: 0x000c, + 0x66a: 0x000c, 0x66b: 0x000c, 0x66c: 0x000c, 0x66d: 0x000c, 0x66e: 0x000c, 0x66f: 0x000c, + 0x670: 0x000c, 0x671: 0x000c, 0x672: 0x000c, 0x673: 0x000c, 0x674: 0x000c, 0x675: 0x000c, + 0x676: 0x000c, 0x677: 0x000c, 0x678: 0x000c, 0x679: 0x000c, 0x67a: 0x000c, 0x67b: 0x000c, + 0x67c: 0x000c, 0x67d: 0x000c, 0x67e: 0x000c, 0x67f: 0x000c, + // Block 0x1a, offset 0x680 + 0x680: 0x000c, 0x681: 0x000c, 0x682: 0x000c, + 0x6ba: 0x000c, + 0x6bc: 0x000c, + // Block 0x1b, offset 0x6c0 + 0x6c1: 0x000c, 0x6c2: 0x000c, 0x6c3: 0x000c, 0x6c4: 0x000c, 0x6c5: 0x000c, + 0x6c6: 0x000c, 0x6c7: 0x000c, 0x6c8: 0x000c, + 0x6cd: 0x000c, 0x6d1: 0x000c, + 0x6d2: 0x000c, 0x6d3: 0x000c, 0x6d4: 0x000c, 0x6d5: 0x000c, 0x6d6: 0x000c, 0x6d7: 0x000c, + 0x6e2: 0x000c, 0x6e3: 0x000c, + // Block 0x1c, offset 0x700 + 0x701: 0x000c, + 0x73c: 0x000c, + // Block 0x1d, offset 0x740 + 0x741: 0x000c, 0x742: 0x000c, 0x743: 0x000c, 0x744: 0x000c, + 0x74d: 0x000c, + 0x762: 0x000c, 0x763: 0x000c, + 0x772: 0x0004, 0x773: 0x0004, + 0x77b: 0x0004, + // Block 0x1e, offset 0x780 + 0x781: 0x000c, 0x782: 0x000c, + 0x7bc: 0x000c, + // Block 0x1f, offset 0x7c0 + 0x7c1: 0x000c, 0x7c2: 0x000c, + 0x7c7: 0x000c, 0x7c8: 0x000c, 0x7cb: 0x000c, + 0x7cc: 0x000c, 0x7cd: 0x000c, 0x7d1: 0x000c, + 0x7f0: 0x000c, 0x7f1: 0x000c, 0x7f5: 0x000c, + // Block 0x20, offset 0x800 + 0x801: 0x000c, 0x802: 0x000c, 0x803: 0x000c, 0x804: 0x000c, 0x805: 0x000c, + 0x807: 0x000c, 0x808: 0x000c, + 0x80d: 0x000c, + 0x822: 0x000c, 0x823: 0x000c, + 0x831: 0x0004, + // Block 0x21, offset 0x840 + 0x841: 0x000c, + 0x87c: 0x000c, 0x87f: 0x000c, + // Block 0x22, offset 0x880 + 0x881: 0x000c, 0x882: 0x000c, 0x883: 0x000c, 0x884: 0x000c, + 0x88d: 0x000c, + 0x896: 0x000c, + 0x8a2: 0x000c, 0x8a3: 0x000c, + // Block 0x23, offset 0x8c0 + 0x8c2: 0x000c, + // Block 0x24, offset 0x900 + 0x900: 0x000c, + 0x90d: 0x000c, + 0x933: 0x000a, 0x934: 0x000a, 0x935: 0x000a, + 0x936: 0x000a, 0x937: 0x000a, 0x938: 0x000a, 0x939: 0x0004, 0x93a: 0x000a, + // Block 0x25, offset 0x940 + 0x940: 0x000c, + 0x97e: 0x000c, 0x97f: 0x000c, + // Block 0x26, offset 0x980 + 0x980: 0x000c, + 0x986: 0x000c, 0x987: 0x000c, 0x988: 0x000c, 0x98a: 0x000c, 0x98b: 0x000c, + 0x98c: 0x000c, 0x98d: 0x000c, + 0x995: 0x000c, 0x996: 0x000c, + 0x9a2: 0x000c, 0x9a3: 0x000c, + 0x9b8: 0x000a, 0x9b9: 0x000a, 0x9ba: 0x000a, 0x9bb: 0x000a, + 0x9bc: 0x000a, 0x9bd: 0x000a, 0x9be: 0x000a, + // Block 0x27, offset 0x9c0 + 0x9cc: 0x000c, 0x9cd: 0x000c, + 0x9e2: 0x000c, 0x9e3: 0x000c, + // Block 0x28, offset 0xa00 + 0xa01: 0x000c, + // Block 0x29, offset 0xa40 + 0xa41: 0x000c, 0xa42: 0x000c, 0xa43: 0x000c, 0xa44: 0x000c, + 0xa4d: 0x000c, + 0xa62: 0x000c, 0xa63: 0x000c, + // Block 0x2a, offset 0xa80 + 0xa8a: 0x000c, + 0xa92: 0x000c, 0xa93: 0x000c, 0xa94: 0x000c, 0xa96: 0x000c, + // Block 0x2b, offset 0xac0 + 0xaf1: 0x000c, 0xaf4: 0x000c, 0xaf5: 0x000c, + 0xaf6: 0x000c, 0xaf7: 0x000c, 0xaf8: 0x000c, 0xaf9: 0x000c, 0xafa: 0x000c, + 0xaff: 0x0004, + // Block 0x2c, offset 0xb00 + 0xb07: 0x000c, 0xb08: 0x000c, 0xb09: 0x000c, 0xb0a: 0x000c, 0xb0b: 0x000c, + 0xb0c: 0x000c, 0xb0d: 0x000c, 0xb0e: 0x000c, + // Block 0x2d, offset 0xb40 + 0xb71: 0x000c, 0xb74: 0x000c, 0xb75: 0x000c, + 0xb76: 0x000c, 0xb77: 0x000c, 0xb78: 0x000c, 0xb79: 0x000c, 0xb7b: 0x000c, + 0xb7c: 0x000c, + // Block 0x2e, offset 0xb80 + 0xb88: 0x000c, 0xb89: 0x000c, 0xb8a: 0x000c, 0xb8b: 0x000c, + 0xb8c: 0x000c, 0xb8d: 0x000c, + // Block 0x2f, offset 0xbc0 + 0xbd8: 0x000c, 0xbd9: 0x000c, + 0xbf5: 0x000c, + 0xbf7: 0x000c, 0xbf9: 0x000c, 0xbfa: 0x003a, 0xbfb: 0x002a, + 0xbfc: 0x003a, 0xbfd: 0x002a, + // Block 0x30, offset 0xc00 + 0xc31: 0x000c, 0xc32: 0x000c, 0xc33: 0x000c, 0xc34: 0x000c, 0xc35: 0x000c, + 0xc36: 0x000c, 0xc37: 0x000c, 0xc38: 0x000c, 0xc39: 0x000c, 0xc3a: 0x000c, 0xc3b: 0x000c, + 0xc3c: 0x000c, 0xc3d: 0x000c, 0xc3e: 0x000c, + // Block 0x31, offset 0xc40 + 0xc40: 0x000c, 0xc41: 0x000c, 0xc42: 0x000c, 0xc43: 0x000c, 0xc44: 0x000c, + 0xc46: 0x000c, 0xc47: 0x000c, + 0xc4d: 0x000c, 0xc4e: 0x000c, 0xc4f: 0x000c, 0xc50: 0x000c, 0xc51: 0x000c, + 0xc52: 0x000c, 0xc53: 0x000c, 0xc54: 0x000c, 0xc55: 0x000c, 0xc56: 0x000c, 0xc57: 0x000c, + 0xc59: 0x000c, 0xc5a: 0x000c, 0xc5b: 0x000c, 0xc5c: 0x000c, 0xc5d: 0x000c, + 0xc5e: 0x000c, 0xc5f: 0x000c, 0xc60: 0x000c, 0xc61: 0x000c, 0xc62: 0x000c, 0xc63: 0x000c, + 0xc64: 0x000c, 0xc65: 0x000c, 0xc66: 0x000c, 0xc67: 0x000c, 0xc68: 0x000c, 0xc69: 0x000c, + 0xc6a: 0x000c, 0xc6b: 0x000c, 0xc6c: 0x000c, 0xc6d: 0x000c, 0xc6e: 0x000c, 0xc6f: 0x000c, + 0xc70: 0x000c, 0xc71: 0x000c, 0xc72: 0x000c, 0xc73: 0x000c, 0xc74: 0x000c, 0xc75: 0x000c, + 0xc76: 0x000c, 0xc77: 0x000c, 0xc78: 0x000c, 0xc79: 0x000c, 0xc7a: 0x000c, 0xc7b: 0x000c, + 0xc7c: 0x000c, + // Block 0x32, offset 0xc80 + 0xc86: 0x000c, + // Block 0x33, offset 0xcc0 + 0xced: 0x000c, 0xcee: 0x000c, 0xcef: 0x000c, + 0xcf0: 0x000c, 0xcf2: 0x000c, 0xcf3: 0x000c, 0xcf4: 0x000c, 0xcf5: 0x000c, + 0xcf6: 0x000c, 0xcf7: 0x000c, 0xcf9: 0x000c, 0xcfa: 0x000c, + 0xcfd: 0x000c, 0xcfe: 0x000c, + // Block 0x34, offset 0xd00 + 0xd18: 0x000c, 0xd19: 0x000c, + 0xd1e: 0x000c, 0xd1f: 0x000c, 0xd20: 0x000c, + 0xd31: 0x000c, 0xd32: 0x000c, 0xd33: 0x000c, 0xd34: 0x000c, + // Block 0x35, offset 0xd40 + 0xd42: 0x000c, 0xd45: 0x000c, + 0xd46: 0x000c, + 0xd4d: 0x000c, + 0xd5d: 0x000c, + // Block 0x36, offset 0xd80 + 0xd9d: 0x000c, + 0xd9e: 0x000c, 0xd9f: 0x000c, + // Block 0x37, offset 0xdc0 + 0xdd0: 0x000a, 0xdd1: 0x000a, + 0xdd2: 0x000a, 0xdd3: 0x000a, 0xdd4: 0x000a, 0xdd5: 0x000a, 0xdd6: 0x000a, 0xdd7: 0x000a, + 0xdd8: 0x000a, 0xdd9: 0x000a, + // Block 0x38, offset 0xe00 + 0xe00: 0x000a, + // Block 0x39, offset 0xe40 + 0xe40: 0x0009, + 0xe5b: 0x007a, 0xe5c: 0x006a, + // Block 0x3a, offset 0xe80 + 0xe92: 0x000c, 0xe93: 0x000c, 0xe94: 0x000c, + 0xeb2: 0x000c, 0xeb3: 0x000c, 0xeb4: 0x000c, + // Block 0x3b, offset 0xec0 + 0xed2: 0x000c, 0xed3: 0x000c, + 0xef2: 0x000c, 0xef3: 0x000c, + // Block 0x3c, offset 0xf00 + 0xf34: 0x000c, 0xf35: 0x000c, + 0xf37: 0x000c, 0xf38: 0x000c, 0xf39: 0x000c, 0xf3a: 0x000c, 0xf3b: 0x000c, + 0xf3c: 0x000c, 0xf3d: 0x000c, + // Block 0x3d, offset 0xf40 + 0xf46: 0x000c, 0xf49: 0x000c, 0xf4a: 0x000c, 0xf4b: 0x000c, + 0xf4c: 0x000c, 0xf4d: 0x000c, 0xf4e: 0x000c, 0xf4f: 0x000c, 0xf50: 0x000c, 0xf51: 0x000c, + 0xf52: 0x000c, 0xf53: 0x000c, + 0xf5b: 0x0004, 0xf5d: 0x000c, + 0xf70: 0x000a, 0xf71: 0x000a, 0xf72: 0x000a, 0xf73: 0x000a, 0xf74: 0x000a, 0xf75: 0x000a, + 0xf76: 0x000a, 0xf77: 0x000a, 0xf78: 0x000a, 0xf79: 0x000a, + // Block 0x3e, offset 0xf80 + 0xf80: 0x000a, 0xf81: 0x000a, 0xf82: 0x000a, 0xf83: 0x000a, 0xf84: 0x000a, 0xf85: 0x000a, + 0xf86: 0x000a, 0xf87: 0x000a, 0xf88: 0x000a, 0xf89: 0x000a, 0xf8a: 0x000a, 0xf8b: 0x000c, + 0xf8c: 0x000c, 0xf8d: 0x000c, 0xf8e: 0x000b, + // Block 0x3f, offset 0xfc0 + 0xfc5: 0x000c, + 0xfc6: 0x000c, + 0xfe9: 0x000c, + // Block 0x40, offset 0x1000 + 0x1020: 0x000c, 0x1021: 0x000c, 0x1022: 0x000c, + 0x1027: 0x000c, 0x1028: 0x000c, + 0x1032: 0x000c, + 0x1039: 0x000c, 0x103a: 0x000c, 0x103b: 0x000c, + // Block 0x41, offset 0x1040 + 0x1040: 0x000a, 0x1044: 0x000a, 0x1045: 0x000a, + // Block 0x42, offset 0x1080 + 0x109e: 0x000a, 0x109f: 0x000a, 0x10a0: 0x000a, 0x10a1: 0x000a, 0x10a2: 0x000a, 0x10a3: 0x000a, + 0x10a4: 0x000a, 0x10a5: 0x000a, 0x10a6: 0x000a, 0x10a7: 0x000a, 0x10a8: 0x000a, 0x10a9: 0x000a, + 0x10aa: 0x000a, 0x10ab: 0x000a, 0x10ac: 0x000a, 0x10ad: 0x000a, 0x10ae: 0x000a, 0x10af: 0x000a, + 0x10b0: 0x000a, 0x10b1: 0x000a, 0x10b2: 0x000a, 0x10b3: 0x000a, 0x10b4: 0x000a, 0x10b5: 0x000a, + 0x10b6: 0x000a, 0x10b7: 0x000a, 0x10b8: 0x000a, 0x10b9: 0x000a, 0x10ba: 0x000a, 0x10bb: 0x000a, + 0x10bc: 0x000a, 0x10bd: 0x000a, 0x10be: 0x000a, 0x10bf: 0x000a, + // Block 0x43, offset 0x10c0 + 0x10d7: 0x000c, + 0x10d8: 0x000c, 0x10db: 0x000c, + // Block 0x44, offset 0x1100 + 0x1116: 0x000c, + 0x1118: 0x000c, 0x1119: 0x000c, 0x111a: 0x000c, 0x111b: 0x000c, 0x111c: 0x000c, 0x111d: 0x000c, + 0x111e: 0x000c, 0x1120: 0x000c, 0x1122: 0x000c, + 0x1125: 0x000c, 0x1126: 0x000c, 0x1127: 0x000c, 0x1128: 0x000c, 0x1129: 0x000c, + 0x112a: 0x000c, 0x112b: 0x000c, 0x112c: 0x000c, + 0x1133: 0x000c, 0x1134: 0x000c, 0x1135: 0x000c, + 0x1136: 0x000c, 0x1137: 0x000c, 0x1138: 0x000c, 0x1139: 0x000c, 0x113a: 0x000c, 0x113b: 0x000c, + 0x113c: 0x000c, 0x113f: 0x000c, + // Block 0x45, offset 0x1140 + 0x1170: 0x000c, 0x1171: 0x000c, 0x1172: 0x000c, 0x1173: 0x000c, 0x1174: 0x000c, 0x1175: 0x000c, + 0x1176: 0x000c, 0x1177: 0x000c, 0x1178: 0x000c, 0x1179: 0x000c, 0x117a: 0x000c, 0x117b: 0x000c, + 0x117c: 0x000c, 0x117d: 0x000c, 0x117e: 0x000c, + // Block 0x46, offset 0x1180 + 0x1180: 0x000c, 0x1181: 0x000c, 0x1182: 0x000c, 0x1183: 0x000c, + 0x11b4: 0x000c, + 0x11b6: 0x000c, 0x11b7: 0x000c, 0x11b8: 0x000c, 0x11b9: 0x000c, 0x11ba: 0x000c, + 0x11bc: 0x000c, + // Block 0x47, offset 0x11c0 + 0x11c2: 0x000c, + 0x11eb: 0x000c, 0x11ec: 0x000c, 0x11ed: 0x000c, 0x11ee: 0x000c, 0x11ef: 0x000c, + 0x11f0: 0x000c, 0x11f1: 0x000c, 0x11f2: 0x000c, 0x11f3: 0x000c, + // Block 0x48, offset 0x1200 + 0x1200: 0x000c, 0x1201: 0x000c, + 0x1222: 0x000c, 0x1223: 0x000c, + 0x1224: 0x000c, 0x1225: 0x000c, 0x1228: 0x000c, 0x1229: 0x000c, + 0x122b: 0x000c, 0x122c: 0x000c, 0x122d: 0x000c, + // Block 0x49, offset 0x1240 + 0x1266: 0x000c, 0x1268: 0x000c, 0x1269: 0x000c, + 0x126d: 0x000c, 0x126f: 0x000c, + 0x1270: 0x000c, 0x1271: 0x000c, + // Block 0x4a, offset 0x1280 + 0x12ac: 0x000c, 0x12ad: 0x000c, 0x12ae: 0x000c, 0x12af: 0x000c, + 0x12b0: 0x000c, 0x12b1: 0x000c, 0x12b2: 0x000c, 0x12b3: 0x000c, + 0x12b6: 0x000c, 0x12b7: 0x000c, + // Block 0x4b, offset 0x12c0 + 0x12d0: 0x000c, 0x12d1: 0x000c, + 0x12d2: 0x000c, 0x12d4: 0x000c, 0x12d5: 0x000c, 0x12d6: 0x000c, 0x12d7: 0x000c, + 0x12d8: 0x000c, 0x12d9: 0x000c, 0x12da: 0x000c, 0x12db: 0x000c, 0x12dc: 0x000c, 0x12dd: 0x000c, + 0x12de: 0x000c, 0x12df: 0x000c, 0x12e0: 0x000c, 0x12e2: 0x000c, 0x12e3: 0x000c, + 0x12e4: 0x000c, 0x12e5: 0x000c, 0x12e6: 0x000c, 0x12e7: 0x000c, 0x12e8: 0x000c, + 0x12ed: 0x000c, + 0x12f4: 0x000c, + 0x12f8: 0x000c, 0x12f9: 0x000c, + // Block 0x4c, offset 0x1300 + 0x1300: 0x000c, 0x1301: 0x000c, 0x1302: 0x000c, 0x1303: 0x000c, 0x1304: 0x000c, 0x1305: 0x000c, + 0x1306: 0x000c, 0x1307: 0x000c, 0x1308: 0x000c, 0x1309: 0x000c, 0x130a: 0x000c, 0x130b: 0x000c, + 0x130c: 0x000c, 0x130d: 0x000c, 0x130e: 0x000c, 0x130f: 0x000c, 0x1310: 0x000c, 0x1311: 0x000c, + 0x1312: 0x000c, 0x1313: 0x000c, 0x1314: 0x000c, 0x1315: 0x000c, 0x1316: 0x000c, 0x1317: 0x000c, + 0x1318: 0x000c, 0x1319: 0x000c, 0x131a: 0x000c, 0x131b: 0x000c, 0x131c: 0x000c, 0x131d: 0x000c, + 0x131e: 0x000c, 0x131f: 0x000c, 0x1320: 0x000c, 0x1321: 0x000c, 0x1322: 0x000c, 0x1323: 0x000c, + 0x1324: 0x000c, 0x1325: 0x000c, 0x1326: 0x000c, 0x1327: 0x000c, 0x1328: 0x000c, 0x1329: 0x000c, + 0x132a: 0x000c, 0x132b: 0x000c, 0x132c: 0x000c, 0x132d: 0x000c, 0x132e: 0x000c, 0x132f: 0x000c, + 0x1330: 0x000c, 0x1331: 0x000c, 0x1332: 0x000c, 0x1333: 0x000c, 0x1334: 0x000c, 0x1335: 0x000c, + 0x133b: 0x000c, + 0x133c: 0x000c, 0x133d: 0x000c, 0x133e: 0x000c, 0x133f: 0x000c, + // Block 0x4d, offset 0x1340 + 0x137d: 0x000a, 0x137f: 0x000a, + // Block 0x4e, offset 0x1380 + 0x1380: 0x000a, 0x1381: 0x000a, + 0x138d: 0x000a, 0x138e: 0x000a, 0x138f: 0x000a, + 0x139d: 0x000a, + 0x139e: 0x000a, 0x139f: 0x000a, + 0x13ad: 0x000a, 0x13ae: 0x000a, 0x13af: 0x000a, + 0x13bd: 0x000a, 0x13be: 0x000a, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x0009, 0x13c1: 0x0009, 0x13c2: 0x0009, 0x13c3: 0x0009, 0x13c4: 0x0009, 0x13c5: 0x0009, + 0x13c6: 0x0009, 0x13c7: 0x0009, 0x13c8: 0x0009, 0x13c9: 0x0009, 0x13ca: 0x0009, 0x13cb: 0x000b, + 0x13cc: 0x000b, 0x13cd: 0x000b, 0x13cf: 0x0001, 0x13d0: 0x000a, 0x13d1: 0x000a, + 0x13d2: 0x000a, 0x13d3: 0x000a, 0x13d4: 0x000a, 0x13d5: 0x000a, 0x13d6: 0x000a, 0x13d7: 0x000a, + 0x13d8: 0x000a, 0x13d9: 0x000a, 0x13da: 0x000a, 0x13db: 0x000a, 0x13dc: 0x000a, 0x13dd: 0x000a, + 0x13de: 0x000a, 0x13df: 0x000a, 0x13e0: 0x000a, 0x13e1: 0x000a, 0x13e2: 0x000a, 0x13e3: 0x000a, + 0x13e4: 0x000a, 0x13e5: 0x000a, 0x13e6: 0x000a, 0x13e7: 0x000a, 0x13e8: 0x0009, 0x13e9: 0x0007, + 0x13ea: 0x000e, 0x13eb: 0x000e, 0x13ec: 0x000e, 0x13ed: 0x000e, 0x13ee: 0x000e, 0x13ef: 0x0006, + 0x13f0: 0x0004, 0x13f1: 0x0004, 0x13f2: 0x0004, 0x13f3: 0x0004, 0x13f4: 0x0004, 0x13f5: 0x000a, + 0x13f6: 0x000a, 0x13f7: 0x000a, 0x13f8: 0x000a, 0x13f9: 0x000a, 0x13fa: 0x000a, 0x13fb: 0x000a, + 0x13fc: 0x000a, 0x13fd: 0x000a, 0x13fe: 0x000a, 0x13ff: 0x000a, + // Block 0x50, offset 0x1400 + 0x1400: 0x000a, 0x1401: 0x000a, 0x1402: 0x000a, 0x1403: 0x000a, 0x1404: 0x0006, 0x1405: 0x009a, + 0x1406: 0x008a, 0x1407: 0x000a, 0x1408: 0x000a, 0x1409: 0x000a, 0x140a: 0x000a, 0x140b: 0x000a, + 0x140c: 0x000a, 0x140d: 0x000a, 0x140e: 0x000a, 0x140f: 0x000a, 0x1410: 0x000a, 0x1411: 0x000a, + 0x1412: 0x000a, 0x1413: 0x000a, 0x1414: 0x000a, 0x1415: 0x000a, 0x1416: 0x000a, 0x1417: 0x000a, + 0x1418: 0x000a, 0x1419: 0x000a, 0x141a: 0x000a, 0x141b: 0x000a, 0x141c: 0x000a, 0x141d: 0x000a, + 0x141e: 0x000a, 0x141f: 0x0009, 0x1420: 0x000b, 0x1421: 0x000b, 0x1422: 0x000b, 0x1423: 0x000b, + 0x1424: 0x000b, 0x1425: 0x000b, 0x1426: 0x000e, 0x1427: 0x000e, 0x1428: 0x000e, 0x1429: 0x000e, + 0x142a: 0x000b, 0x142b: 0x000b, 0x142c: 0x000b, 0x142d: 0x000b, 0x142e: 0x000b, 0x142f: 0x000b, + 0x1430: 0x0002, 0x1434: 0x0002, 0x1435: 0x0002, + 0x1436: 0x0002, 0x1437: 0x0002, 0x1438: 0x0002, 0x1439: 0x0002, 0x143a: 0x0003, 0x143b: 0x0003, + 0x143c: 0x000a, 0x143d: 0x009a, 0x143e: 0x008a, + // Block 0x51, offset 0x1440 + 0x1440: 0x0002, 0x1441: 0x0002, 0x1442: 0x0002, 0x1443: 0x0002, 0x1444: 0x0002, 0x1445: 0x0002, + 0x1446: 0x0002, 0x1447: 0x0002, 0x1448: 0x0002, 0x1449: 0x0002, 0x144a: 0x0003, 0x144b: 0x0003, + 0x144c: 0x000a, 0x144d: 0x009a, 0x144e: 0x008a, + 0x1460: 0x0004, 0x1461: 0x0004, 0x1462: 0x0004, 0x1463: 0x0004, + 0x1464: 0x0004, 0x1465: 0x0004, 0x1466: 0x0004, 0x1467: 0x0004, 0x1468: 0x0004, 0x1469: 0x0004, + 0x146a: 0x0004, 0x146b: 0x0004, 0x146c: 0x0004, 0x146d: 0x0004, 0x146e: 0x0004, 0x146f: 0x0004, + 0x1470: 0x0004, 0x1471: 0x0004, 0x1472: 0x0004, 0x1473: 0x0004, 0x1474: 0x0004, 0x1475: 0x0004, + 0x1476: 0x0004, 0x1477: 0x0004, 0x1478: 0x0004, 0x1479: 0x0004, 0x147a: 0x0004, 0x147b: 0x0004, + 0x147c: 0x0004, 0x147d: 0x0004, 0x147e: 0x0004, 0x147f: 0x0004, + // Block 0x52, offset 0x1480 + 0x1480: 0x0004, 0x1481: 0x0004, 0x1482: 0x0004, 0x1483: 0x0004, 0x1484: 0x0004, 0x1485: 0x0004, + 0x1486: 0x0004, 0x1487: 0x0004, 0x1488: 0x0004, 0x1489: 0x0004, 0x148a: 0x0004, 0x148b: 0x0004, + 0x148c: 0x0004, 0x148d: 0x0004, 0x148e: 0x0004, 0x148f: 0x0004, 0x1490: 0x000c, 0x1491: 0x000c, + 0x1492: 0x000c, 0x1493: 0x000c, 0x1494: 0x000c, 0x1495: 0x000c, 0x1496: 0x000c, 0x1497: 0x000c, + 0x1498: 0x000c, 0x1499: 0x000c, 0x149a: 0x000c, 0x149b: 0x000c, 0x149c: 0x000c, 0x149d: 0x000c, + 0x149e: 0x000c, 0x149f: 0x000c, 0x14a0: 0x000c, 0x14a1: 0x000c, 0x14a2: 0x000c, 0x14a3: 0x000c, + 0x14a4: 0x000c, 0x14a5: 0x000c, 0x14a6: 0x000c, 0x14a7: 0x000c, 0x14a8: 0x000c, 0x14a9: 0x000c, + 0x14aa: 0x000c, 0x14ab: 0x000c, 0x14ac: 0x000c, 0x14ad: 0x000c, 0x14ae: 0x000c, 0x14af: 0x000c, + 0x14b0: 0x000c, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x000a, 0x14c1: 0x000a, 0x14c3: 0x000a, 0x14c4: 0x000a, 0x14c5: 0x000a, + 0x14c6: 0x000a, 0x14c8: 0x000a, 0x14c9: 0x000a, + 0x14d4: 0x000a, 0x14d6: 0x000a, 0x14d7: 0x000a, + 0x14d8: 0x000a, + 0x14de: 0x000a, 0x14df: 0x000a, 0x14e0: 0x000a, 0x14e1: 0x000a, 0x14e2: 0x000a, 0x14e3: 0x000a, + 0x14e5: 0x000a, 0x14e7: 0x000a, 0x14e9: 0x000a, + 0x14ee: 0x0004, + 0x14fa: 0x000a, 0x14fb: 0x000a, + // Block 0x54, offset 0x1500 + 0x1500: 0x000a, 0x1501: 0x000a, 0x1502: 0x000a, 0x1503: 0x000a, 0x1504: 0x000a, + 0x150a: 0x000a, 0x150b: 0x000a, + 0x150c: 0x000a, 0x150d: 0x000a, 0x1510: 0x000a, 0x1511: 0x000a, + 0x1512: 0x000a, 0x1513: 0x000a, 0x1514: 0x000a, 0x1515: 0x000a, 0x1516: 0x000a, 0x1517: 0x000a, + 0x1518: 0x000a, 0x1519: 0x000a, 0x151a: 0x000a, 0x151b: 0x000a, 0x151c: 0x000a, 0x151d: 0x000a, + 0x151e: 0x000a, 0x151f: 0x000a, + // Block 0x55, offset 0x1540 + 0x1549: 0x000a, 0x154a: 0x000a, 0x154b: 0x000a, + 0x1550: 0x000a, 0x1551: 0x000a, + 0x1552: 0x000a, 0x1553: 0x000a, 0x1554: 0x000a, 0x1555: 0x000a, 0x1556: 0x000a, 0x1557: 0x000a, + 0x1558: 0x000a, 0x1559: 0x000a, 0x155a: 0x000a, 0x155b: 0x000a, 0x155c: 0x000a, 0x155d: 0x000a, + 0x155e: 0x000a, 0x155f: 0x000a, 0x1560: 0x000a, 0x1561: 0x000a, 0x1562: 0x000a, 0x1563: 0x000a, + 0x1564: 0x000a, 0x1565: 0x000a, 0x1566: 0x000a, 0x1567: 0x000a, 0x1568: 0x000a, 0x1569: 0x000a, + 0x156a: 0x000a, 0x156b: 0x000a, 0x156c: 0x000a, 0x156d: 0x000a, 0x156e: 0x000a, 0x156f: 0x000a, + 0x1570: 0x000a, 0x1571: 0x000a, 0x1572: 0x000a, 0x1573: 0x000a, 0x1574: 0x000a, 0x1575: 0x000a, + 0x1576: 0x000a, 0x1577: 0x000a, 0x1578: 0x000a, 0x1579: 0x000a, 0x157a: 0x000a, 0x157b: 0x000a, + 0x157c: 0x000a, 0x157d: 0x000a, 0x157e: 0x000a, 0x157f: 0x000a, + // Block 0x56, offset 0x1580 + 0x1580: 0x000a, 0x1581: 0x000a, 0x1582: 0x000a, 0x1583: 0x000a, 0x1584: 0x000a, 0x1585: 0x000a, + 0x1586: 0x000a, 0x1587: 0x000a, 0x1588: 0x000a, 0x1589: 0x000a, 0x158a: 0x000a, 0x158b: 0x000a, + 0x158c: 0x000a, 0x158d: 0x000a, 0x158e: 0x000a, 0x158f: 0x000a, 0x1590: 0x000a, 0x1591: 0x000a, + 0x1592: 0x000a, 0x1593: 0x000a, 0x1594: 0x000a, 0x1595: 0x000a, 0x1596: 0x000a, 0x1597: 0x000a, + 0x1598: 0x000a, 0x1599: 0x000a, 0x159a: 0x000a, 0x159b: 0x000a, 0x159c: 0x000a, 0x159d: 0x000a, + 0x159e: 0x000a, 0x159f: 0x000a, 0x15a0: 0x000a, 0x15a1: 0x000a, 0x15a2: 0x000a, 0x15a3: 0x000a, + 0x15a4: 0x000a, 0x15a5: 0x000a, 0x15a6: 0x000a, 0x15a7: 0x000a, 0x15a8: 0x000a, 0x15a9: 0x000a, + 0x15aa: 0x000a, 0x15ab: 0x000a, 0x15ac: 0x000a, 0x15ad: 0x000a, 0x15ae: 0x000a, 0x15af: 0x000a, + 0x15b0: 0x000a, 0x15b1: 0x000a, 0x15b2: 0x000a, 0x15b3: 0x000a, 0x15b4: 0x000a, 0x15b5: 0x000a, + 0x15b6: 0x000a, 0x15b7: 0x000a, 0x15b8: 0x000a, 0x15b9: 0x000a, 0x15ba: 0x000a, 0x15bb: 0x000a, + 0x15bc: 0x000a, 0x15bd: 0x000a, 0x15be: 0x000a, 0x15bf: 0x000a, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x000a, 0x15c1: 0x000a, 0x15c2: 0x000a, 0x15c3: 0x000a, 0x15c4: 0x000a, 0x15c5: 0x000a, + 0x15c6: 0x000a, 0x15c7: 0x000a, 0x15c8: 0x000a, 0x15c9: 0x000a, 0x15ca: 0x000a, 0x15cb: 0x000a, + 0x15cc: 0x000a, 0x15cd: 0x000a, 0x15ce: 0x000a, 0x15cf: 0x000a, 0x15d0: 0x000a, 0x15d1: 0x000a, + 0x15d2: 0x0003, 0x15d3: 0x0004, 0x15d4: 0x000a, 0x15d5: 0x000a, 0x15d6: 0x000a, 0x15d7: 0x000a, + 0x15d8: 0x000a, 0x15d9: 0x000a, 0x15da: 0x000a, 0x15db: 0x000a, 0x15dc: 0x000a, 0x15dd: 0x000a, + 0x15de: 0x000a, 0x15df: 0x000a, 0x15e0: 0x000a, 0x15e1: 0x000a, 0x15e2: 0x000a, 0x15e3: 0x000a, + 0x15e4: 0x000a, 0x15e5: 0x000a, 0x15e6: 0x000a, 0x15e7: 0x000a, 0x15e8: 0x000a, 0x15e9: 0x000a, + 0x15ea: 0x000a, 0x15eb: 0x000a, 0x15ec: 0x000a, 0x15ed: 0x000a, 0x15ee: 0x000a, 0x15ef: 0x000a, + 0x15f0: 0x000a, 0x15f1: 0x000a, 0x15f2: 0x000a, 0x15f3: 0x000a, 0x15f4: 0x000a, 0x15f5: 0x000a, + 0x15f6: 0x000a, 0x15f7: 0x000a, 0x15f8: 0x000a, 0x15f9: 0x000a, 0x15fa: 0x000a, 0x15fb: 0x000a, + 0x15fc: 0x000a, 0x15fd: 0x000a, 0x15fe: 0x000a, 0x15ff: 0x000a, + // Block 0x58, offset 0x1600 + 0x1600: 0x000a, 0x1601: 0x000a, 0x1602: 0x000a, 0x1603: 0x000a, 0x1604: 0x000a, 0x1605: 0x000a, + 0x1606: 0x000a, 0x1607: 0x000a, 0x1608: 0x003a, 0x1609: 0x002a, 0x160a: 0x003a, 0x160b: 0x002a, + 0x160c: 0x000a, 0x160d: 0x000a, 0x160e: 0x000a, 0x160f: 0x000a, 0x1610: 0x000a, 0x1611: 0x000a, + 0x1612: 0x000a, 0x1613: 0x000a, 0x1614: 0x000a, 0x1615: 0x000a, 0x1616: 0x000a, 0x1617: 0x000a, + 0x1618: 0x000a, 0x1619: 0x000a, 0x161a: 0x000a, 0x161b: 0x000a, 0x161c: 0x000a, 0x161d: 0x000a, + 0x161e: 0x000a, 0x161f: 0x000a, 0x1620: 0x000a, 0x1621: 0x000a, 0x1622: 0x000a, 0x1623: 0x000a, + 0x1624: 0x000a, 0x1625: 0x000a, 0x1626: 0x000a, 0x1627: 0x000a, 0x1628: 0x000a, 0x1629: 0x009a, + 0x162a: 0x008a, 0x162b: 0x000a, 0x162c: 0x000a, 0x162d: 0x000a, 0x162e: 0x000a, 0x162f: 0x000a, + 0x1630: 0x000a, 0x1631: 0x000a, 0x1632: 0x000a, 0x1633: 0x000a, 0x1634: 0x000a, 0x1635: 0x000a, + // Block 0x59, offset 0x1640 + 0x167b: 0x000a, + 0x167c: 0x000a, 0x167d: 0x000a, 0x167e: 0x000a, 0x167f: 0x000a, + // Block 0x5a, offset 0x1680 + 0x1680: 0x000a, 0x1681: 0x000a, 0x1682: 0x000a, 0x1683: 0x000a, 0x1684: 0x000a, 0x1685: 0x000a, + 0x1686: 0x000a, 0x1687: 0x000a, 0x1688: 0x000a, 0x1689: 0x000a, 0x168a: 0x000a, 0x168b: 0x000a, + 0x168c: 0x000a, 0x168d: 0x000a, 0x168e: 0x000a, 0x168f: 0x000a, 0x1690: 0x000a, 0x1691: 0x000a, + 0x1692: 0x000a, 0x1693: 0x000a, 0x1694: 0x000a, 0x1696: 0x000a, 0x1697: 0x000a, + 0x1698: 0x000a, 0x1699: 0x000a, 0x169a: 0x000a, 0x169b: 0x000a, 0x169c: 0x000a, 0x169d: 0x000a, + 0x169e: 0x000a, 0x169f: 0x000a, 0x16a0: 0x000a, 0x16a1: 0x000a, 0x16a2: 0x000a, 0x16a3: 0x000a, + 0x16a4: 0x000a, 0x16a5: 0x000a, 0x16a6: 0x000a, 0x16a7: 0x000a, 0x16a8: 0x000a, 0x16a9: 0x000a, + 0x16aa: 0x000a, 0x16ab: 0x000a, 0x16ac: 0x000a, 0x16ad: 0x000a, 0x16ae: 0x000a, 0x16af: 0x000a, + 0x16b0: 0x000a, 0x16b1: 0x000a, 0x16b2: 0x000a, 0x16b3: 0x000a, 0x16b4: 0x000a, 0x16b5: 0x000a, + 0x16b6: 0x000a, 0x16b7: 0x000a, 0x16b8: 0x000a, 0x16b9: 0x000a, 0x16ba: 0x000a, 0x16bb: 0x000a, + 0x16bc: 0x000a, 0x16bd: 0x000a, 0x16be: 0x000a, 0x16bf: 0x000a, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x000a, 0x16c1: 0x000a, 0x16c2: 0x000a, 0x16c3: 0x000a, 0x16c4: 0x000a, 0x16c5: 0x000a, + 0x16c6: 0x000a, 0x16c7: 0x000a, 0x16c8: 0x000a, 0x16c9: 0x000a, 0x16ca: 0x000a, 0x16cb: 0x000a, + 0x16cc: 0x000a, 0x16cd: 0x000a, 0x16ce: 0x000a, 0x16cf: 0x000a, 0x16d0: 0x000a, 0x16d1: 0x000a, + 0x16d2: 0x000a, 0x16d3: 0x000a, 0x16d4: 0x000a, 0x16d5: 0x000a, 0x16d6: 0x000a, 0x16d7: 0x000a, + 0x16d8: 0x000a, 0x16d9: 0x000a, 0x16da: 0x000a, 0x16db: 0x000a, 0x16dc: 0x000a, 0x16dd: 0x000a, + 0x16de: 0x000a, 0x16df: 0x000a, 0x16e0: 0x000a, 0x16e1: 0x000a, 0x16e2: 0x000a, 0x16e3: 0x000a, + 0x16e4: 0x000a, 0x16e5: 0x000a, 0x16e6: 0x000a, 0x16e7: 0x000a, 0x16e8: 0x000a, 0x16e9: 0x000a, + 0x16ea: 0x000a, 0x16eb: 0x000a, 0x16ec: 0x000a, 0x16ed: 0x000a, 0x16ee: 0x000a, 0x16ef: 0x000a, + 0x16f0: 0x000a, 0x16f1: 0x000a, 0x16f2: 0x000a, 0x16f3: 0x000a, 0x16f4: 0x000a, 0x16f5: 0x000a, + 0x16f6: 0x000a, 0x16f7: 0x000a, 0x16f8: 0x000a, 0x16f9: 0x000a, 0x16fa: 0x000a, 0x16fb: 0x000a, + 0x16fc: 0x000a, 0x16fd: 0x000a, 0x16fe: 0x000a, + // Block 0x5c, offset 0x1700 + 0x1700: 0x000a, 0x1701: 0x000a, 0x1702: 0x000a, 0x1703: 0x000a, 0x1704: 0x000a, 0x1705: 0x000a, + 0x1706: 0x000a, 0x1707: 0x000a, 0x1708: 0x000a, 0x1709: 0x000a, 0x170a: 0x000a, 0x170b: 0x000a, + 0x170c: 0x000a, 0x170d: 0x000a, 0x170e: 0x000a, 0x170f: 0x000a, 0x1710: 0x000a, 0x1711: 0x000a, + 0x1712: 0x000a, 0x1713: 0x000a, 0x1714: 0x000a, 0x1715: 0x000a, 0x1716: 0x000a, 0x1717: 0x000a, + 0x1718: 0x000a, 0x1719: 0x000a, 0x171a: 0x000a, 0x171b: 0x000a, 0x171c: 0x000a, 0x171d: 0x000a, + 0x171e: 0x000a, 0x171f: 0x000a, 0x1720: 0x000a, 0x1721: 0x000a, 0x1722: 0x000a, 0x1723: 0x000a, + 0x1724: 0x000a, 0x1725: 0x000a, 0x1726: 0x000a, + // Block 0x5d, offset 0x1740 + 0x1740: 0x000a, 0x1741: 0x000a, 0x1742: 0x000a, 0x1743: 0x000a, 0x1744: 0x000a, 0x1745: 0x000a, + 0x1746: 0x000a, 0x1747: 0x000a, 0x1748: 0x000a, 0x1749: 0x000a, 0x174a: 0x000a, + 0x1760: 0x000a, 0x1761: 0x000a, 0x1762: 0x000a, 0x1763: 0x000a, + 0x1764: 0x000a, 0x1765: 0x000a, 0x1766: 0x000a, 0x1767: 0x000a, 0x1768: 0x000a, 0x1769: 0x000a, + 0x176a: 0x000a, 0x176b: 0x000a, 0x176c: 0x000a, 0x176d: 0x000a, 0x176e: 0x000a, 0x176f: 0x000a, + 0x1770: 0x000a, 0x1771: 0x000a, 0x1772: 0x000a, 0x1773: 0x000a, 0x1774: 0x000a, 0x1775: 0x000a, + 0x1776: 0x000a, 0x1777: 0x000a, 0x1778: 0x000a, 0x1779: 0x000a, 0x177a: 0x000a, 0x177b: 0x000a, + 0x177c: 0x000a, 0x177d: 0x000a, 0x177e: 0x000a, 0x177f: 0x000a, + // Block 0x5e, offset 0x1780 + 0x1780: 0x000a, 0x1781: 0x000a, 0x1782: 0x000a, 0x1783: 0x000a, 0x1784: 0x000a, 0x1785: 0x000a, + 0x1786: 0x000a, 0x1787: 0x000a, 0x1788: 0x0002, 0x1789: 0x0002, 0x178a: 0x0002, 0x178b: 0x0002, + 0x178c: 0x0002, 0x178d: 0x0002, 0x178e: 0x0002, 0x178f: 0x0002, 0x1790: 0x0002, 0x1791: 0x0002, + 0x1792: 0x0002, 0x1793: 0x0002, 0x1794: 0x0002, 0x1795: 0x0002, 0x1796: 0x0002, 0x1797: 0x0002, + 0x1798: 0x0002, 0x1799: 0x0002, 0x179a: 0x0002, 0x179b: 0x0002, + // Block 0x5f, offset 0x17c0 + 0x17ea: 0x000a, 0x17eb: 0x000a, 0x17ec: 0x000a, 0x17ed: 0x000a, 0x17ee: 0x000a, 0x17ef: 0x000a, + 0x17f0: 0x000a, 0x17f1: 0x000a, 0x17f2: 0x000a, 0x17f3: 0x000a, 0x17f4: 0x000a, 0x17f5: 0x000a, + 0x17f6: 0x000a, 0x17f7: 0x000a, 0x17f8: 0x000a, 0x17f9: 0x000a, 0x17fa: 0x000a, 0x17fb: 0x000a, + 0x17fc: 0x000a, 0x17fd: 0x000a, 0x17fe: 0x000a, 0x17ff: 0x000a, + // Block 0x60, offset 0x1800 + 0x1800: 0x000a, 0x1801: 0x000a, 0x1802: 0x000a, 0x1803: 0x000a, 0x1804: 0x000a, 0x1805: 0x000a, + 0x1806: 0x000a, 0x1807: 0x000a, 0x1808: 0x000a, 0x1809: 0x000a, 0x180a: 0x000a, 0x180b: 0x000a, + 0x180c: 0x000a, 0x180d: 0x000a, 0x180e: 0x000a, 0x180f: 0x000a, 0x1810: 0x000a, 0x1811: 0x000a, + 0x1812: 0x000a, 0x1813: 0x000a, 0x1814: 0x000a, 0x1815: 0x000a, 0x1816: 0x000a, 0x1817: 0x000a, + 0x1818: 0x000a, 0x1819: 0x000a, 0x181a: 0x000a, 0x181b: 0x000a, 0x181c: 0x000a, 0x181d: 0x000a, + 0x181e: 0x000a, 0x181f: 0x000a, 0x1820: 0x000a, 0x1821: 0x000a, 0x1822: 0x000a, 0x1823: 0x000a, + 0x1824: 0x000a, 0x1825: 0x000a, 0x1826: 0x000a, 0x1827: 0x000a, 0x1828: 0x000a, 0x1829: 0x000a, + 0x182a: 0x000a, 0x182b: 0x000a, 0x182d: 0x000a, 0x182e: 0x000a, 0x182f: 0x000a, + 0x1830: 0x000a, 0x1831: 0x000a, 0x1832: 0x000a, 0x1833: 0x000a, 0x1834: 0x000a, 0x1835: 0x000a, + 0x1836: 0x000a, 0x1837: 0x000a, 0x1838: 0x000a, 0x1839: 0x000a, 0x183a: 0x000a, 0x183b: 0x000a, + 0x183c: 0x000a, 0x183d: 0x000a, 0x183e: 0x000a, 0x183f: 0x000a, + // Block 0x61, offset 0x1840 + 0x1840: 0x000a, 0x1841: 0x000a, 0x1842: 0x000a, 0x1843: 0x000a, 0x1844: 0x000a, 0x1845: 0x000a, + 0x1846: 0x000a, 0x1847: 0x000a, 0x1848: 0x000a, 0x1849: 0x000a, 0x184a: 0x000a, 0x184b: 0x000a, + 0x184c: 0x000a, 0x184d: 0x000a, 0x184e: 0x000a, 0x184f: 0x000a, 0x1850: 0x000a, 0x1851: 0x000a, + 0x1852: 0x000a, 0x1853: 0x000a, 0x1854: 0x000a, 0x1855: 0x000a, 0x1856: 0x000a, 0x1857: 0x000a, + 0x1858: 0x000a, 0x1859: 0x000a, 0x185a: 0x000a, 0x185b: 0x000a, 0x185c: 0x000a, 0x185d: 0x000a, + 0x185e: 0x000a, 0x185f: 0x000a, 0x1860: 0x000a, 0x1861: 0x000a, 0x1862: 0x000a, 0x1863: 0x000a, + 0x1864: 0x000a, 0x1865: 0x000a, 0x1866: 0x000a, 0x1867: 0x000a, 0x1868: 0x003a, 0x1869: 0x002a, + 0x186a: 0x003a, 0x186b: 0x002a, 0x186c: 0x003a, 0x186d: 0x002a, 0x186e: 0x003a, 0x186f: 0x002a, + 0x1870: 0x003a, 0x1871: 0x002a, 0x1872: 0x003a, 0x1873: 0x002a, 0x1874: 0x003a, 0x1875: 0x002a, + 0x1876: 0x000a, 0x1877: 0x000a, 0x1878: 0x000a, 0x1879: 0x000a, 0x187a: 0x000a, 0x187b: 0x000a, + 0x187c: 0x000a, 0x187d: 0x000a, 0x187e: 0x000a, 0x187f: 0x000a, + // Block 0x62, offset 0x1880 + 0x1880: 0x000a, 0x1881: 0x000a, 0x1882: 0x000a, 0x1883: 0x000a, 0x1884: 0x000a, 0x1885: 0x009a, + 0x1886: 0x008a, 0x1887: 0x000a, 0x1888: 0x000a, 0x1889: 0x000a, 0x188a: 0x000a, 0x188b: 0x000a, + 0x188c: 0x000a, 0x188d: 0x000a, 0x188e: 0x000a, 0x188f: 0x000a, 0x1890: 0x000a, 0x1891: 0x000a, + 0x1892: 0x000a, 0x1893: 0x000a, 0x1894: 0x000a, 0x1895: 0x000a, 0x1896: 0x000a, 0x1897: 0x000a, + 0x1898: 0x000a, 0x1899: 0x000a, 0x189a: 0x000a, 0x189b: 0x000a, 0x189c: 0x000a, 0x189d: 0x000a, + 0x189e: 0x000a, 0x189f: 0x000a, 0x18a0: 0x000a, 0x18a1: 0x000a, 0x18a2: 0x000a, 0x18a3: 0x000a, + 0x18a4: 0x000a, 0x18a5: 0x000a, 0x18a6: 0x003a, 0x18a7: 0x002a, 0x18a8: 0x003a, 0x18a9: 0x002a, + 0x18aa: 0x003a, 0x18ab: 0x002a, 0x18ac: 0x003a, 0x18ad: 0x002a, 0x18ae: 0x003a, 0x18af: 0x002a, + 0x18b0: 0x000a, 0x18b1: 0x000a, 0x18b2: 0x000a, 0x18b3: 0x000a, 0x18b4: 0x000a, 0x18b5: 0x000a, + 0x18b6: 0x000a, 0x18b7: 0x000a, 0x18b8: 0x000a, 0x18b9: 0x000a, 0x18ba: 0x000a, 0x18bb: 0x000a, + 0x18bc: 0x000a, 0x18bd: 0x000a, 0x18be: 0x000a, 0x18bf: 0x000a, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x000a, 0x18c1: 0x000a, 0x18c2: 0x000a, 0x18c3: 0x007a, 0x18c4: 0x006a, 0x18c5: 0x009a, + 0x18c6: 0x008a, 0x18c7: 0x00ba, 0x18c8: 0x00aa, 0x18c9: 0x009a, 0x18ca: 0x008a, 0x18cb: 0x007a, + 0x18cc: 0x006a, 0x18cd: 0x00da, 0x18ce: 0x002a, 0x18cf: 0x003a, 0x18d0: 0x00ca, 0x18d1: 0x009a, + 0x18d2: 0x008a, 0x18d3: 0x007a, 0x18d4: 0x006a, 0x18d5: 0x009a, 0x18d6: 0x008a, 0x18d7: 0x00ba, + 0x18d8: 0x00aa, 0x18d9: 0x000a, 0x18da: 0x000a, 0x18db: 0x000a, 0x18dc: 0x000a, 0x18dd: 0x000a, + 0x18de: 0x000a, 0x18df: 0x000a, 0x18e0: 0x000a, 0x18e1: 0x000a, 0x18e2: 0x000a, 0x18e3: 0x000a, + 0x18e4: 0x000a, 0x18e5: 0x000a, 0x18e6: 0x000a, 0x18e7: 0x000a, 0x18e8: 0x000a, 0x18e9: 0x000a, + 0x18ea: 0x000a, 0x18eb: 0x000a, 0x18ec: 0x000a, 0x18ed: 0x000a, 0x18ee: 0x000a, 0x18ef: 0x000a, + 0x18f0: 0x000a, 0x18f1: 0x000a, 0x18f2: 0x000a, 0x18f3: 0x000a, 0x18f4: 0x000a, 0x18f5: 0x000a, + 0x18f6: 0x000a, 0x18f7: 0x000a, 0x18f8: 0x000a, 0x18f9: 0x000a, 0x18fa: 0x000a, 0x18fb: 0x000a, + 0x18fc: 0x000a, 0x18fd: 0x000a, 0x18fe: 0x000a, 0x18ff: 0x000a, + // Block 0x64, offset 0x1900 + 0x1900: 0x000a, 0x1901: 0x000a, 0x1902: 0x000a, 0x1903: 0x000a, 0x1904: 0x000a, 0x1905: 0x000a, + 0x1906: 0x000a, 0x1907: 0x000a, 0x1908: 0x000a, 0x1909: 0x000a, 0x190a: 0x000a, 0x190b: 0x000a, + 0x190c: 0x000a, 0x190d: 0x000a, 0x190e: 0x000a, 0x190f: 0x000a, 0x1910: 0x000a, 0x1911: 0x000a, + 0x1912: 0x000a, 0x1913: 0x000a, 0x1914: 0x000a, 0x1915: 0x000a, 0x1916: 0x000a, 0x1917: 0x000a, + 0x1918: 0x003a, 0x1919: 0x002a, 0x191a: 0x003a, 0x191b: 0x002a, 0x191c: 0x000a, 0x191d: 0x000a, + 0x191e: 0x000a, 0x191f: 0x000a, 0x1920: 0x000a, 0x1921: 0x000a, 0x1922: 0x000a, 0x1923: 0x000a, + 0x1924: 0x000a, 0x1925: 0x000a, 0x1926: 0x000a, 0x1927: 0x000a, 0x1928: 0x000a, 0x1929: 0x000a, + 0x192a: 0x000a, 0x192b: 0x000a, 0x192c: 0x000a, 0x192d: 0x000a, 0x192e: 0x000a, 0x192f: 0x000a, + 0x1930: 0x000a, 0x1931: 0x000a, 0x1932: 0x000a, 0x1933: 0x000a, 0x1934: 0x000a, 0x1935: 0x000a, + 0x1936: 0x000a, 0x1937: 0x000a, 0x1938: 0x000a, 0x1939: 0x000a, 0x193a: 0x000a, 0x193b: 0x000a, + 0x193c: 0x003a, 0x193d: 0x002a, 0x193e: 0x000a, 0x193f: 0x000a, + // Block 0x65, offset 0x1940 + 0x1940: 0x000a, 0x1941: 0x000a, 0x1942: 0x000a, 0x1943: 0x000a, 0x1944: 0x000a, 0x1945: 0x000a, + 0x1946: 0x000a, 0x1947: 0x000a, 0x1948: 0x000a, 0x1949: 0x000a, 0x194a: 0x000a, 0x194b: 0x000a, + 0x194c: 0x000a, 0x194d: 0x000a, 0x194e: 0x000a, 0x194f: 0x000a, 0x1950: 0x000a, 0x1951: 0x000a, + 0x1952: 0x000a, 0x1953: 0x000a, 0x1954: 0x000a, 0x1955: 0x000a, 0x1956: 0x000a, 0x1957: 0x000a, + 0x1958: 0x000a, 0x1959: 0x000a, 0x195a: 0x000a, 0x195b: 0x000a, 0x195c: 0x000a, 0x195d: 0x000a, + 0x195e: 0x000a, 0x195f: 0x000a, 0x1960: 0x000a, 0x1961: 0x000a, 0x1962: 0x000a, 0x1963: 0x000a, + 0x1964: 0x000a, 0x1965: 0x000a, 0x1966: 0x000a, 0x1967: 0x000a, 0x1968: 0x000a, 0x1969: 0x000a, + 0x196a: 0x000a, 0x196b: 0x000a, 0x196c: 0x000a, 0x196d: 0x000a, 0x196e: 0x000a, 0x196f: 0x000a, + 0x1970: 0x000a, 0x1971: 0x000a, 0x1972: 0x000a, 0x1973: 0x000a, + 0x1976: 0x000a, 0x1977: 0x000a, 0x1978: 0x000a, 0x1979: 0x000a, 0x197a: 0x000a, 0x197b: 0x000a, + 0x197c: 0x000a, 0x197d: 0x000a, 0x197e: 0x000a, 0x197f: 0x000a, + // Block 0x66, offset 0x1980 + 0x1980: 0x000a, 0x1981: 0x000a, 0x1982: 0x000a, 0x1983: 0x000a, 0x1984: 0x000a, 0x1985: 0x000a, + 0x1986: 0x000a, 0x1987: 0x000a, 0x1988: 0x000a, 0x1989: 0x000a, 0x198a: 0x000a, 0x198b: 0x000a, + 0x198c: 0x000a, 0x198d: 0x000a, 0x198e: 0x000a, 0x198f: 0x000a, 0x1990: 0x000a, 0x1991: 0x000a, + 0x1992: 0x000a, 0x1993: 0x000a, 0x1994: 0x000a, 0x1995: 0x000a, + 0x1998: 0x000a, 0x1999: 0x000a, 0x199a: 0x000a, 0x199b: 0x000a, 0x199c: 0x000a, 0x199d: 0x000a, + 0x199e: 0x000a, 0x199f: 0x000a, 0x19a0: 0x000a, 0x19a1: 0x000a, 0x19a2: 0x000a, 0x19a3: 0x000a, + 0x19a4: 0x000a, 0x19a5: 0x000a, 0x19a6: 0x000a, 0x19a7: 0x000a, 0x19a8: 0x000a, 0x19a9: 0x000a, + 0x19aa: 0x000a, 0x19ab: 0x000a, 0x19ac: 0x000a, 0x19ad: 0x000a, 0x19ae: 0x000a, 0x19af: 0x000a, + 0x19b0: 0x000a, 0x19b1: 0x000a, 0x19b2: 0x000a, 0x19b3: 0x000a, 0x19b4: 0x000a, 0x19b5: 0x000a, + 0x19b6: 0x000a, 0x19b7: 0x000a, 0x19b8: 0x000a, 0x19b9: 0x000a, + 0x19bd: 0x000a, 0x19be: 0x000a, 0x19bf: 0x000a, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x000a, 0x19c1: 0x000a, 0x19c2: 0x000a, 0x19c3: 0x000a, 0x19c4: 0x000a, 0x19c5: 0x000a, + 0x19c6: 0x000a, 0x19c7: 0x000a, 0x19c8: 0x000a, 0x19ca: 0x000a, 0x19cb: 0x000a, + 0x19cc: 0x000a, 0x19cd: 0x000a, 0x19ce: 0x000a, 0x19cf: 0x000a, 0x19d0: 0x000a, 0x19d1: 0x000a, + 0x19ec: 0x000a, 0x19ed: 0x000a, 0x19ee: 0x000a, 0x19ef: 0x000a, + // Block 0x68, offset 0x1a00 + 0x1a25: 0x000a, 0x1a26: 0x000a, 0x1a27: 0x000a, 0x1a28: 0x000a, 0x1a29: 0x000a, + 0x1a2a: 0x000a, 0x1a2f: 0x000c, + 0x1a30: 0x000c, 0x1a31: 0x000c, + 0x1a39: 0x000a, 0x1a3a: 0x000a, 0x1a3b: 0x000a, + 0x1a3c: 0x000a, 0x1a3d: 0x000a, 0x1a3e: 0x000a, 0x1a3f: 0x000a, + // Block 0x69, offset 0x1a40 + 0x1a7f: 0x000c, + // Block 0x6a, offset 0x1a80 + 0x1aa0: 0x000c, 0x1aa1: 0x000c, 0x1aa2: 0x000c, 0x1aa3: 0x000c, + 0x1aa4: 0x000c, 0x1aa5: 0x000c, 0x1aa6: 0x000c, 0x1aa7: 0x000c, 0x1aa8: 0x000c, 0x1aa9: 0x000c, + 0x1aaa: 0x000c, 0x1aab: 0x000c, 0x1aac: 0x000c, 0x1aad: 0x000c, 0x1aae: 0x000c, 0x1aaf: 0x000c, + 0x1ab0: 0x000c, 0x1ab1: 0x000c, 0x1ab2: 0x000c, 0x1ab3: 0x000c, 0x1ab4: 0x000c, 0x1ab5: 0x000c, + 0x1ab6: 0x000c, 0x1ab7: 0x000c, 0x1ab8: 0x000c, 0x1ab9: 0x000c, 0x1aba: 0x000c, 0x1abb: 0x000c, + 0x1abc: 0x000c, 0x1abd: 0x000c, 0x1abe: 0x000c, 0x1abf: 0x000c, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x000a, 0x1ac1: 0x000a, 0x1ac2: 0x000a, 0x1ac3: 0x000a, 0x1ac4: 0x000a, 0x1ac5: 0x000a, + 0x1ac6: 0x000a, 0x1ac7: 0x000a, 0x1ac8: 0x000a, 0x1ac9: 0x000a, 0x1aca: 0x000a, 0x1acb: 0x000a, + 0x1acc: 0x000a, 0x1acd: 0x000a, 0x1ace: 0x000a, 0x1acf: 0x000a, 0x1ad0: 0x000a, 0x1ad1: 0x000a, + 0x1ad2: 0x000a, 0x1ad3: 0x000a, 0x1ad4: 0x000a, 0x1ad5: 0x000a, 0x1ad6: 0x000a, 0x1ad7: 0x000a, + 0x1ad8: 0x000a, 0x1ad9: 0x000a, 0x1ada: 0x000a, 0x1adb: 0x000a, 0x1adc: 0x000a, 0x1add: 0x000a, + 0x1ade: 0x000a, 0x1adf: 0x000a, 0x1ae0: 0x000a, 0x1ae1: 0x000a, 0x1ae2: 0x003a, 0x1ae3: 0x002a, + 0x1ae4: 0x003a, 0x1ae5: 0x002a, 0x1ae6: 0x003a, 0x1ae7: 0x002a, 0x1ae8: 0x003a, 0x1ae9: 0x002a, + 0x1aea: 0x000a, 0x1aeb: 0x000a, 0x1aec: 0x000a, 0x1aed: 0x000a, 0x1aee: 0x000a, 0x1aef: 0x000a, + 0x1af0: 0x000a, 0x1af1: 0x000a, 0x1af2: 0x000a, 0x1af3: 0x000a, 0x1af4: 0x000a, 0x1af5: 0x000a, + 0x1af6: 0x000a, 0x1af7: 0x000a, 0x1af8: 0x000a, 0x1af9: 0x000a, 0x1afa: 0x000a, 0x1afb: 0x000a, + 0x1afc: 0x000a, 0x1afd: 0x000a, 0x1afe: 0x000a, 0x1aff: 0x000a, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0x000a, 0x1b01: 0x000a, 0x1b02: 0x000a, 0x1b03: 0x000a, 0x1b04: 0x000a, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0x000a, 0x1b41: 0x000a, 0x1b42: 0x000a, 0x1b43: 0x000a, 0x1b44: 0x000a, 0x1b45: 0x000a, + 0x1b46: 0x000a, 0x1b47: 0x000a, 0x1b48: 0x000a, 0x1b49: 0x000a, 0x1b4a: 0x000a, 0x1b4b: 0x000a, + 0x1b4c: 0x000a, 0x1b4d: 0x000a, 0x1b4e: 0x000a, 0x1b4f: 0x000a, 0x1b50: 0x000a, 0x1b51: 0x000a, + 0x1b52: 0x000a, 0x1b53: 0x000a, 0x1b54: 0x000a, 0x1b55: 0x000a, 0x1b56: 0x000a, 0x1b57: 0x000a, + 0x1b58: 0x000a, 0x1b59: 0x000a, 0x1b5b: 0x000a, 0x1b5c: 0x000a, 0x1b5d: 0x000a, + 0x1b5e: 0x000a, 0x1b5f: 0x000a, 0x1b60: 0x000a, 0x1b61: 0x000a, 0x1b62: 0x000a, 0x1b63: 0x000a, + 0x1b64: 0x000a, 0x1b65: 0x000a, 0x1b66: 0x000a, 0x1b67: 0x000a, 0x1b68: 0x000a, 0x1b69: 0x000a, + 0x1b6a: 0x000a, 0x1b6b: 0x000a, 0x1b6c: 0x000a, 0x1b6d: 0x000a, 0x1b6e: 0x000a, 0x1b6f: 0x000a, + 0x1b70: 0x000a, 0x1b71: 0x000a, 0x1b72: 0x000a, 0x1b73: 0x000a, 0x1b74: 0x000a, 0x1b75: 0x000a, + 0x1b76: 0x000a, 0x1b77: 0x000a, 0x1b78: 0x000a, 0x1b79: 0x000a, 0x1b7a: 0x000a, 0x1b7b: 0x000a, + 0x1b7c: 0x000a, 0x1b7d: 0x000a, 0x1b7e: 0x000a, 0x1b7f: 0x000a, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0x000a, 0x1b81: 0x000a, 0x1b82: 0x000a, 0x1b83: 0x000a, 0x1b84: 0x000a, 0x1b85: 0x000a, + 0x1b86: 0x000a, 0x1b87: 0x000a, 0x1b88: 0x000a, 0x1b89: 0x000a, 0x1b8a: 0x000a, 0x1b8b: 0x000a, + 0x1b8c: 0x000a, 0x1b8d: 0x000a, 0x1b8e: 0x000a, 0x1b8f: 0x000a, 0x1b90: 0x000a, 0x1b91: 0x000a, + 0x1b92: 0x000a, 0x1b93: 0x000a, 0x1b94: 0x000a, 0x1b95: 0x000a, 0x1b96: 0x000a, 0x1b97: 0x000a, + 0x1b98: 0x000a, 0x1b99: 0x000a, 0x1b9a: 0x000a, 0x1b9b: 0x000a, 0x1b9c: 0x000a, 0x1b9d: 0x000a, + 0x1b9e: 0x000a, 0x1b9f: 0x000a, 0x1ba0: 0x000a, 0x1ba1: 0x000a, 0x1ba2: 0x000a, 0x1ba3: 0x000a, + 0x1ba4: 0x000a, 0x1ba5: 0x000a, 0x1ba6: 0x000a, 0x1ba7: 0x000a, 0x1ba8: 0x000a, 0x1ba9: 0x000a, + 0x1baa: 0x000a, 0x1bab: 0x000a, 0x1bac: 0x000a, 0x1bad: 0x000a, 0x1bae: 0x000a, 0x1baf: 0x000a, + 0x1bb0: 0x000a, 0x1bb1: 0x000a, 0x1bb2: 0x000a, 0x1bb3: 0x000a, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0x000a, 0x1bc1: 0x000a, 0x1bc2: 0x000a, 0x1bc3: 0x000a, 0x1bc4: 0x000a, 0x1bc5: 0x000a, + 0x1bc6: 0x000a, 0x1bc7: 0x000a, 0x1bc8: 0x000a, 0x1bc9: 0x000a, 0x1bca: 0x000a, 0x1bcb: 0x000a, + 0x1bcc: 0x000a, 0x1bcd: 0x000a, 0x1bce: 0x000a, 0x1bcf: 0x000a, 0x1bd0: 0x000a, 0x1bd1: 0x000a, + 0x1bd2: 0x000a, 0x1bd3: 0x000a, 0x1bd4: 0x000a, 0x1bd5: 0x000a, + 0x1bf0: 0x000a, 0x1bf1: 0x000a, 0x1bf2: 0x000a, 0x1bf3: 0x000a, 0x1bf4: 0x000a, 0x1bf5: 0x000a, + 0x1bf6: 0x000a, 0x1bf7: 0x000a, 0x1bf8: 0x000a, 0x1bf9: 0x000a, 0x1bfa: 0x000a, 0x1bfb: 0x000a, + // Block 0x70, offset 0x1c00 + 0x1c00: 0x0009, 0x1c01: 0x000a, 0x1c02: 0x000a, 0x1c03: 0x000a, 0x1c04: 0x000a, + 0x1c08: 0x003a, 0x1c09: 0x002a, 0x1c0a: 0x003a, 0x1c0b: 0x002a, + 0x1c0c: 0x003a, 0x1c0d: 0x002a, 0x1c0e: 0x003a, 0x1c0f: 0x002a, 0x1c10: 0x003a, 0x1c11: 0x002a, + 0x1c12: 0x000a, 0x1c13: 0x000a, 0x1c14: 0x003a, 0x1c15: 0x002a, 0x1c16: 0x003a, 0x1c17: 0x002a, + 0x1c18: 0x003a, 0x1c19: 0x002a, 0x1c1a: 0x003a, 0x1c1b: 0x002a, 0x1c1c: 0x000a, 0x1c1d: 0x000a, + 0x1c1e: 0x000a, 0x1c1f: 0x000a, 0x1c20: 0x000a, + 0x1c2a: 0x000c, 0x1c2b: 0x000c, 0x1c2c: 0x000c, 0x1c2d: 0x000c, + 0x1c30: 0x000a, + 0x1c36: 0x000a, 0x1c37: 0x000a, + 0x1c3d: 0x000a, 0x1c3e: 0x000a, 0x1c3f: 0x000a, + // Block 0x71, offset 0x1c40 + 0x1c59: 0x000c, 0x1c5a: 0x000c, 0x1c5b: 0x000a, 0x1c5c: 0x000a, + 0x1c60: 0x000a, + // Block 0x72, offset 0x1c80 + 0x1cbb: 0x000a, + // Block 0x73, offset 0x1cc0 + 0x1cc0: 0x000a, 0x1cc1: 0x000a, 0x1cc2: 0x000a, 0x1cc3: 0x000a, 0x1cc4: 0x000a, 0x1cc5: 0x000a, + 0x1cc6: 0x000a, 0x1cc7: 0x000a, 0x1cc8: 0x000a, 0x1cc9: 0x000a, 0x1cca: 0x000a, 0x1ccb: 0x000a, + 0x1ccc: 0x000a, 0x1ccd: 0x000a, 0x1cce: 0x000a, 0x1ccf: 0x000a, 0x1cd0: 0x000a, 0x1cd1: 0x000a, + 0x1cd2: 0x000a, 0x1cd3: 0x000a, 0x1cd4: 0x000a, 0x1cd5: 0x000a, 0x1cd6: 0x000a, 0x1cd7: 0x000a, + 0x1cd8: 0x000a, 0x1cd9: 0x000a, 0x1cda: 0x000a, 0x1cdb: 0x000a, 0x1cdc: 0x000a, 0x1cdd: 0x000a, + 0x1cde: 0x000a, 0x1cdf: 0x000a, 0x1ce0: 0x000a, 0x1ce1: 0x000a, 0x1ce2: 0x000a, 0x1ce3: 0x000a, + // Block 0x74, offset 0x1d00 + 0x1d1d: 0x000a, + 0x1d1e: 0x000a, + // Block 0x75, offset 0x1d40 + 0x1d50: 0x000a, 0x1d51: 0x000a, + 0x1d52: 0x000a, 0x1d53: 0x000a, 0x1d54: 0x000a, 0x1d55: 0x000a, 0x1d56: 0x000a, 0x1d57: 0x000a, + 0x1d58: 0x000a, 0x1d59: 0x000a, 0x1d5a: 0x000a, 0x1d5b: 0x000a, 0x1d5c: 0x000a, 0x1d5d: 0x000a, + 0x1d5e: 0x000a, 0x1d5f: 0x000a, + 0x1d7c: 0x000a, 0x1d7d: 0x000a, 0x1d7e: 0x000a, + // Block 0x76, offset 0x1d80 + 0x1db1: 0x000a, 0x1db2: 0x000a, 0x1db3: 0x000a, 0x1db4: 0x000a, 0x1db5: 0x000a, + 0x1db6: 0x000a, 0x1db7: 0x000a, 0x1db8: 0x000a, 0x1db9: 0x000a, 0x1dba: 0x000a, 0x1dbb: 0x000a, + 0x1dbc: 0x000a, 0x1dbd: 0x000a, 0x1dbe: 0x000a, 0x1dbf: 0x000a, + // Block 0x77, offset 0x1dc0 + 0x1dcc: 0x000a, 0x1dcd: 0x000a, 0x1dce: 0x000a, 0x1dcf: 0x000a, + // Block 0x78, offset 0x1e00 + 0x1e37: 0x000a, 0x1e38: 0x000a, 0x1e39: 0x000a, 0x1e3a: 0x000a, + // Block 0x79, offset 0x1e40 + 0x1e5e: 0x000a, 0x1e5f: 0x000a, + 0x1e7f: 0x000a, + // Block 0x7a, offset 0x1e80 + 0x1e90: 0x000a, 0x1e91: 0x000a, + 0x1e92: 0x000a, 0x1e93: 0x000a, 0x1e94: 0x000a, 0x1e95: 0x000a, 0x1e96: 0x000a, 0x1e97: 0x000a, + 0x1e98: 0x000a, 0x1e99: 0x000a, 0x1e9a: 0x000a, 0x1e9b: 0x000a, 0x1e9c: 0x000a, 0x1e9d: 0x000a, + 0x1e9e: 0x000a, 0x1e9f: 0x000a, 0x1ea0: 0x000a, 0x1ea1: 0x000a, 0x1ea2: 0x000a, 0x1ea3: 0x000a, + 0x1ea4: 0x000a, 0x1ea5: 0x000a, 0x1ea6: 0x000a, 0x1ea7: 0x000a, 0x1ea8: 0x000a, 0x1ea9: 0x000a, + 0x1eaa: 0x000a, 0x1eab: 0x000a, 0x1eac: 0x000a, 0x1ead: 0x000a, 0x1eae: 0x000a, 0x1eaf: 0x000a, + 0x1eb0: 0x000a, 0x1eb1: 0x000a, 0x1eb2: 0x000a, 0x1eb3: 0x000a, 0x1eb4: 0x000a, 0x1eb5: 0x000a, + 0x1eb6: 0x000a, 0x1eb7: 0x000a, 0x1eb8: 0x000a, 0x1eb9: 0x000a, 0x1eba: 0x000a, 0x1ebb: 0x000a, + 0x1ebc: 0x000a, 0x1ebd: 0x000a, 0x1ebe: 0x000a, 0x1ebf: 0x000a, + // Block 0x7b, offset 0x1ec0 + 0x1ec0: 0x000a, 0x1ec1: 0x000a, 0x1ec2: 0x000a, 0x1ec3: 0x000a, 0x1ec4: 0x000a, 0x1ec5: 0x000a, + 0x1ec6: 0x000a, + // Block 0x7c, offset 0x1f00 + 0x1f0d: 0x000a, 0x1f0e: 0x000a, 0x1f0f: 0x000a, + // Block 0x7d, offset 0x1f40 + 0x1f6f: 0x000c, + 0x1f70: 0x000c, 0x1f71: 0x000c, 0x1f72: 0x000c, 0x1f73: 0x000a, 0x1f74: 0x000c, 0x1f75: 0x000c, + 0x1f76: 0x000c, 0x1f77: 0x000c, 0x1f78: 0x000c, 0x1f79: 0x000c, 0x1f7a: 0x000c, 0x1f7b: 0x000c, + 0x1f7c: 0x000c, 0x1f7d: 0x000c, 0x1f7e: 0x000a, 0x1f7f: 0x000a, + // Block 0x7e, offset 0x1f80 + 0x1f9e: 0x000c, 0x1f9f: 0x000c, + // Block 0x7f, offset 0x1fc0 + 0x1ff0: 0x000c, 0x1ff1: 0x000c, + // Block 0x80, offset 0x2000 + 0x2000: 0x000a, 0x2001: 0x000a, 0x2002: 0x000a, 0x2003: 0x000a, 0x2004: 0x000a, 0x2005: 0x000a, + 0x2006: 0x000a, 0x2007: 0x000a, 0x2008: 0x000a, 0x2009: 0x000a, 0x200a: 0x000a, 0x200b: 0x000a, + 0x200c: 0x000a, 0x200d: 0x000a, 0x200e: 0x000a, 0x200f: 0x000a, 0x2010: 0x000a, 0x2011: 0x000a, + 0x2012: 0x000a, 0x2013: 0x000a, 0x2014: 0x000a, 0x2015: 0x000a, 0x2016: 0x000a, 0x2017: 0x000a, + 0x2018: 0x000a, 0x2019: 0x000a, 0x201a: 0x000a, 0x201b: 0x000a, 0x201c: 0x000a, 0x201d: 0x000a, + 0x201e: 0x000a, 0x201f: 0x000a, 0x2020: 0x000a, 0x2021: 0x000a, + // Block 0x81, offset 0x2040 + 0x2048: 0x000a, + // Block 0x82, offset 0x2080 + 0x2082: 0x000c, + 0x2086: 0x000c, 0x208b: 0x000c, + 0x20a5: 0x000c, 0x20a6: 0x000c, 0x20a8: 0x000a, 0x20a9: 0x000a, + 0x20aa: 0x000a, 0x20ab: 0x000a, + 0x20b8: 0x0004, 0x20b9: 0x0004, + // Block 0x83, offset 0x20c0 + 0x20f4: 0x000a, 0x20f5: 0x000a, + 0x20f6: 0x000a, 0x20f7: 0x000a, + // Block 0x84, offset 0x2100 + 0x2104: 0x000c, 0x2105: 0x000c, + 0x2120: 0x000c, 0x2121: 0x000c, 0x2122: 0x000c, 0x2123: 0x000c, + 0x2124: 0x000c, 0x2125: 0x000c, 0x2126: 0x000c, 0x2127: 0x000c, 0x2128: 0x000c, 0x2129: 0x000c, + 0x212a: 0x000c, 0x212b: 0x000c, 0x212c: 0x000c, 0x212d: 0x000c, 0x212e: 0x000c, 0x212f: 0x000c, + 0x2130: 0x000c, 0x2131: 0x000c, + // Block 0x85, offset 0x2140 + 0x2166: 0x000c, 0x2167: 0x000c, 0x2168: 0x000c, 0x2169: 0x000c, + 0x216a: 0x000c, 0x216b: 0x000c, 0x216c: 0x000c, 0x216d: 0x000c, + // Block 0x86, offset 0x2180 + 0x2187: 0x000c, 0x2188: 0x000c, 0x2189: 0x000c, 0x218a: 0x000c, 0x218b: 0x000c, + 0x218c: 0x000c, 0x218d: 0x000c, 0x218e: 0x000c, 0x218f: 0x000c, 0x2190: 0x000c, 0x2191: 0x000c, + // Block 0x87, offset 0x21c0 + 0x21c0: 0x000c, 0x21c1: 0x000c, 0x21c2: 0x000c, + 0x21f3: 0x000c, + 0x21f6: 0x000c, 0x21f7: 0x000c, 0x21f8: 0x000c, 0x21f9: 0x000c, + 0x21fc: 0x000c, + // Block 0x88, offset 0x2200 + 0x2225: 0x000c, + // Block 0x89, offset 0x2240 + 0x2269: 0x000c, + 0x226a: 0x000c, 0x226b: 0x000c, 0x226c: 0x000c, 0x226d: 0x000c, 0x226e: 0x000c, + 0x2271: 0x000c, 0x2272: 0x000c, 0x2275: 0x000c, + 0x2276: 0x000c, + // Block 0x8a, offset 0x2280 + 0x2283: 0x000c, + 0x228c: 0x000c, + 0x22bc: 0x000c, + // Block 0x8b, offset 0x22c0 + 0x22f0: 0x000c, 0x22f2: 0x000c, 0x22f3: 0x000c, 0x22f4: 0x000c, + 0x22f7: 0x000c, 0x22f8: 0x000c, + 0x22fe: 0x000c, 0x22ff: 0x000c, + // Block 0x8c, offset 0x2300 + 0x2301: 0x000c, + 0x232c: 0x000c, 0x232d: 0x000c, + 0x2336: 0x000c, + // Block 0x8d, offset 0x2340 + 0x2365: 0x000c, 0x2368: 0x000c, + 0x236d: 0x000c, + // Block 0x8e, offset 0x2380 + 0x239d: 0x0001, + 0x239e: 0x000c, 0x239f: 0x0001, 0x23a0: 0x0001, 0x23a1: 0x0001, 0x23a2: 0x0001, 0x23a3: 0x0001, + 0x23a4: 0x0001, 0x23a5: 0x0001, 0x23a6: 0x0001, 0x23a7: 0x0001, 0x23a8: 0x0001, 0x23a9: 0x0003, + 0x23aa: 0x0001, 0x23ab: 0x0001, 0x23ac: 0x0001, 0x23ad: 0x0001, 0x23ae: 0x0001, 0x23af: 0x0001, + 0x23b0: 0x0001, 0x23b1: 0x0001, 0x23b2: 0x0001, 0x23b3: 0x0001, 0x23b4: 0x0001, 0x23b5: 0x0001, + 0x23b6: 0x0001, 0x23b7: 0x0001, 0x23b8: 0x0001, 0x23b9: 0x0001, 0x23ba: 0x0001, 0x23bb: 0x0001, + 0x23bc: 0x0001, 0x23bd: 0x0001, 0x23be: 0x0001, 0x23bf: 0x0001, + // Block 0x8f, offset 0x23c0 + 0x23c0: 0x0001, 0x23c1: 0x0001, 0x23c2: 0x0001, 0x23c3: 0x0001, 0x23c4: 0x0001, 0x23c5: 0x0001, + 0x23c6: 0x0001, 0x23c7: 0x0001, 0x23c8: 0x0001, 0x23c9: 0x0001, 0x23ca: 0x0001, 0x23cb: 0x0001, + 0x23cc: 0x0001, 0x23cd: 0x0001, 0x23ce: 0x0001, 0x23cf: 0x0001, 0x23d0: 0x000d, 0x23d1: 0x000d, + 0x23d2: 0x000d, 0x23d3: 0x000d, 0x23d4: 0x000d, 0x23d5: 0x000d, 0x23d6: 0x000d, 0x23d7: 0x000d, + 0x23d8: 0x000d, 0x23d9: 0x000d, 0x23da: 0x000d, 0x23db: 0x000d, 0x23dc: 0x000d, 0x23dd: 0x000d, + 0x23de: 0x000d, 0x23df: 0x000d, 0x23e0: 0x000d, 0x23e1: 0x000d, 0x23e2: 0x000d, 0x23e3: 0x000d, + 0x23e4: 0x000d, 0x23e5: 0x000d, 0x23e6: 0x000d, 0x23e7: 0x000d, 0x23e8: 0x000d, 0x23e9: 0x000d, + 0x23ea: 0x000d, 0x23eb: 0x000d, 0x23ec: 0x000d, 0x23ed: 0x000d, 0x23ee: 0x000d, 0x23ef: 0x000d, + 0x23f0: 0x000d, 0x23f1: 0x000d, 0x23f2: 0x000d, 0x23f3: 0x000d, 0x23f4: 0x000d, 0x23f5: 0x000d, + 0x23f6: 0x000d, 0x23f7: 0x000d, 0x23f8: 0x000d, 0x23f9: 0x000d, 0x23fa: 0x000d, 0x23fb: 0x000d, + 0x23fc: 0x000d, 0x23fd: 0x000d, 0x23fe: 0x000d, 0x23ff: 0x000d, + // Block 0x90, offset 0x2400 + 0x2400: 0x000d, 0x2401: 0x000d, 0x2402: 0x000d, 0x2403: 0x000d, 0x2404: 0x000d, 0x2405: 0x000d, + 0x2406: 0x000d, 0x2407: 0x000d, 0x2408: 0x000d, 0x2409: 0x000d, 0x240a: 0x000d, 0x240b: 0x000d, + 0x240c: 0x000d, 0x240d: 0x000d, 0x240e: 0x000d, 0x240f: 0x000d, 0x2410: 0x000d, 0x2411: 0x000d, + 0x2412: 0x000d, 0x2413: 0x000d, 0x2414: 0x000d, 0x2415: 0x000d, 0x2416: 0x000d, 0x2417: 0x000d, + 0x2418: 0x000d, 0x2419: 0x000d, 0x241a: 0x000d, 0x241b: 0x000d, 0x241c: 0x000d, 0x241d: 0x000d, + 0x241e: 0x000d, 0x241f: 0x000d, 0x2420: 0x000d, 0x2421: 0x000d, 0x2422: 0x000d, 0x2423: 0x000d, + 0x2424: 0x000d, 0x2425: 0x000d, 0x2426: 0x000d, 0x2427: 0x000d, 0x2428: 0x000d, 0x2429: 0x000d, + 0x242a: 0x000d, 0x242b: 0x000d, 0x242c: 0x000d, 0x242d: 0x000d, 0x242e: 0x000d, 0x242f: 0x000d, + 0x2430: 0x000d, 0x2431: 0x000d, 0x2432: 0x000d, 0x2433: 0x000d, 0x2434: 0x000d, 0x2435: 0x000d, + 0x2436: 0x000d, 0x2437: 0x000d, 0x2438: 0x000d, 0x2439: 0x000d, 0x243a: 0x000d, 0x243b: 0x000d, + 0x243c: 0x000d, 0x243d: 0x000d, 0x243e: 0x000a, 0x243f: 0x000a, + // Block 0x91, offset 0x2440 + 0x2440: 0x000d, 0x2441: 0x000d, 0x2442: 0x000d, 0x2443: 0x000d, 0x2444: 0x000d, 0x2445: 0x000d, + 0x2446: 0x000d, 0x2447: 0x000d, 0x2448: 0x000d, 0x2449: 0x000d, 0x244a: 0x000d, 0x244b: 0x000d, + 0x244c: 0x000d, 0x244d: 0x000d, 0x244e: 0x000d, 0x244f: 0x000d, 0x2450: 0x000b, 0x2451: 0x000b, + 0x2452: 0x000b, 0x2453: 0x000b, 0x2454: 0x000b, 0x2455: 0x000b, 0x2456: 0x000b, 0x2457: 0x000b, + 0x2458: 0x000b, 0x2459: 0x000b, 0x245a: 0x000b, 0x245b: 0x000b, 0x245c: 0x000b, 0x245d: 0x000b, + 0x245e: 0x000b, 0x245f: 0x000b, 0x2460: 0x000b, 0x2461: 0x000b, 0x2462: 0x000b, 0x2463: 0x000b, + 0x2464: 0x000b, 0x2465: 0x000b, 0x2466: 0x000b, 0x2467: 0x000b, 0x2468: 0x000b, 0x2469: 0x000b, + 0x246a: 0x000b, 0x246b: 0x000b, 0x246c: 0x000b, 0x246d: 0x000b, 0x246e: 0x000b, 0x246f: 0x000b, + 0x2470: 0x000d, 0x2471: 0x000d, 0x2472: 0x000d, 0x2473: 0x000d, 0x2474: 0x000d, 0x2475: 0x000d, + 0x2476: 0x000d, 0x2477: 0x000d, 0x2478: 0x000d, 0x2479: 0x000d, 0x247a: 0x000d, 0x247b: 0x000d, + 0x247c: 0x000d, 0x247d: 0x000a, 0x247e: 0x000d, 0x247f: 0x000d, + // Block 0x92, offset 0x2480 + 0x2480: 0x000c, 0x2481: 0x000c, 0x2482: 0x000c, 0x2483: 0x000c, 0x2484: 0x000c, 0x2485: 0x000c, + 0x2486: 0x000c, 0x2487: 0x000c, 0x2488: 0x000c, 0x2489: 0x000c, 0x248a: 0x000c, 0x248b: 0x000c, + 0x248c: 0x000c, 0x248d: 0x000c, 0x248e: 0x000c, 0x248f: 0x000c, 0x2490: 0x000a, 0x2491: 0x000a, + 0x2492: 0x000a, 0x2493: 0x000a, 0x2494: 0x000a, 0x2495: 0x000a, 0x2496: 0x000a, 0x2497: 0x000a, + 0x2498: 0x000a, 0x2499: 0x000a, + 0x24a0: 0x000c, 0x24a1: 0x000c, 0x24a2: 0x000c, 0x24a3: 0x000c, + 0x24a4: 0x000c, 0x24a5: 0x000c, 0x24a6: 0x000c, 0x24a7: 0x000c, 0x24a8: 0x000c, 0x24a9: 0x000c, + 0x24aa: 0x000c, 0x24ab: 0x000c, 0x24ac: 0x000c, 0x24ad: 0x000c, 0x24ae: 0x000c, 0x24af: 0x000c, + 0x24b0: 0x000a, 0x24b1: 0x000a, 0x24b2: 0x000a, 0x24b3: 0x000a, 0x24b4: 0x000a, 0x24b5: 0x000a, + 0x24b6: 0x000a, 0x24b7: 0x000a, 0x24b8: 0x000a, 0x24b9: 0x000a, 0x24ba: 0x000a, 0x24bb: 0x000a, + 0x24bc: 0x000a, 0x24bd: 0x000a, 0x24be: 0x000a, 0x24bf: 0x000a, + // Block 0x93, offset 0x24c0 + 0x24c0: 0x000a, 0x24c1: 0x000a, 0x24c2: 0x000a, 0x24c3: 0x000a, 0x24c4: 0x000a, 0x24c5: 0x000a, + 0x24c6: 0x000a, 0x24c7: 0x000a, 0x24c8: 0x000a, 0x24c9: 0x000a, 0x24ca: 0x000a, 0x24cb: 0x000a, + 0x24cc: 0x000a, 0x24cd: 0x000a, 0x24ce: 0x000a, 0x24cf: 0x000a, 0x24d0: 0x0006, 0x24d1: 0x000a, + 0x24d2: 0x0006, 0x24d4: 0x000a, 0x24d5: 0x0006, 0x24d6: 0x000a, 0x24d7: 0x000a, + 0x24d8: 0x000a, 0x24d9: 0x009a, 0x24da: 0x008a, 0x24db: 0x007a, 0x24dc: 0x006a, 0x24dd: 0x009a, + 0x24de: 0x008a, 0x24df: 0x0004, 0x24e0: 0x000a, 0x24e1: 0x000a, 0x24e2: 0x0003, 0x24e3: 0x0003, + 0x24e4: 0x000a, 0x24e5: 0x000a, 0x24e6: 0x000a, 0x24e8: 0x000a, 0x24e9: 0x0004, + 0x24ea: 0x0004, 0x24eb: 0x000a, + 0x24f0: 0x000d, 0x24f1: 0x000d, 0x24f2: 0x000d, 0x24f3: 0x000d, 0x24f4: 0x000d, 0x24f5: 0x000d, + 0x24f6: 0x000d, 0x24f7: 0x000d, 0x24f8: 0x000d, 0x24f9: 0x000d, 0x24fa: 0x000d, 0x24fb: 0x000d, + 0x24fc: 0x000d, 0x24fd: 0x000d, 0x24fe: 0x000d, 0x24ff: 0x000d, + // Block 0x94, offset 0x2500 + 0x2500: 0x000d, 0x2501: 0x000d, 0x2502: 0x000d, 0x2503: 0x000d, 0x2504: 0x000d, 0x2505: 0x000d, + 0x2506: 0x000d, 0x2507: 0x000d, 0x2508: 0x000d, 0x2509: 0x000d, 0x250a: 0x000d, 0x250b: 0x000d, + 0x250c: 0x000d, 0x250d: 0x000d, 0x250e: 0x000d, 0x250f: 0x000d, 0x2510: 0x000d, 0x2511: 0x000d, + 0x2512: 0x000d, 0x2513: 0x000d, 0x2514: 0x000d, 0x2515: 0x000d, 0x2516: 0x000d, 0x2517: 0x000d, + 0x2518: 0x000d, 0x2519: 0x000d, 0x251a: 0x000d, 0x251b: 0x000d, 0x251c: 0x000d, 0x251d: 0x000d, + 0x251e: 0x000d, 0x251f: 0x000d, 0x2520: 0x000d, 0x2521: 0x000d, 0x2522: 0x000d, 0x2523: 0x000d, + 0x2524: 0x000d, 0x2525: 0x000d, 0x2526: 0x000d, 0x2527: 0x000d, 0x2528: 0x000d, 0x2529: 0x000d, + 0x252a: 0x000d, 0x252b: 0x000d, 0x252c: 0x000d, 0x252d: 0x000d, 0x252e: 0x000d, 0x252f: 0x000d, + 0x2530: 0x000d, 0x2531: 0x000d, 0x2532: 0x000d, 0x2533: 0x000d, 0x2534: 0x000d, 0x2535: 0x000d, + 0x2536: 0x000d, 0x2537: 0x000d, 0x2538: 0x000d, 0x2539: 0x000d, 0x253a: 0x000d, 0x253b: 0x000d, + 0x253c: 0x000d, 0x253d: 0x000d, 0x253e: 0x000d, 0x253f: 0x000b, + // Block 0x95, offset 0x2540 + 0x2541: 0x000a, 0x2542: 0x000a, 0x2543: 0x0004, 0x2544: 0x0004, 0x2545: 0x0004, + 0x2546: 0x000a, 0x2547: 0x000a, 0x2548: 0x003a, 0x2549: 0x002a, 0x254a: 0x000a, 0x254b: 0x0003, + 0x254c: 0x0006, 0x254d: 0x0003, 0x254e: 0x0006, 0x254f: 0x0006, 0x2550: 0x0002, 0x2551: 0x0002, + 0x2552: 0x0002, 0x2553: 0x0002, 0x2554: 0x0002, 0x2555: 0x0002, 0x2556: 0x0002, 0x2557: 0x0002, + 0x2558: 0x0002, 0x2559: 0x0002, 0x255a: 0x0006, 0x255b: 0x000a, 0x255c: 0x000a, 0x255d: 0x000a, + 0x255e: 0x000a, 0x255f: 0x000a, 0x2560: 0x000a, + 0x257b: 0x005a, + 0x257c: 0x000a, 0x257d: 0x004a, 0x257e: 0x000a, 0x257f: 0x000a, + // Block 0x96, offset 0x2580 + 0x2580: 0x000a, + 0x259b: 0x005a, 0x259c: 0x000a, 0x259d: 0x004a, + 0x259e: 0x000a, 0x259f: 0x00fa, 0x25a0: 0x00ea, 0x25a1: 0x000a, 0x25a2: 0x003a, 0x25a3: 0x002a, + 0x25a4: 0x000a, 0x25a5: 0x000a, + // Block 0x97, offset 0x25c0 + 0x25e0: 0x0004, 0x25e1: 0x0004, 0x25e2: 0x000a, 0x25e3: 0x000a, + 0x25e4: 0x000a, 0x25e5: 0x0004, 0x25e6: 0x0004, 0x25e8: 0x000a, 0x25e9: 0x000a, + 0x25ea: 0x000a, 0x25eb: 0x000a, 0x25ec: 0x000a, 0x25ed: 0x000a, 0x25ee: 0x000a, + 0x25f0: 0x000b, 0x25f1: 0x000b, 0x25f2: 0x000b, 0x25f3: 0x000b, 0x25f4: 0x000b, 0x25f5: 0x000b, + 0x25f6: 0x000b, 0x25f7: 0x000b, 0x25f8: 0x000b, 0x25f9: 0x000a, 0x25fa: 0x000a, 0x25fb: 0x000a, + 0x25fc: 0x000a, 0x25fd: 0x000a, 0x25fe: 0x000b, 0x25ff: 0x000b, + // Block 0x98, offset 0x2600 + 0x2601: 0x000a, + // Block 0x99, offset 0x2640 + 0x2640: 0x000a, 0x2641: 0x000a, 0x2642: 0x000a, 0x2643: 0x000a, 0x2644: 0x000a, 0x2645: 0x000a, + 0x2646: 0x000a, 0x2647: 0x000a, 0x2648: 0x000a, 0x2649: 0x000a, 0x264a: 0x000a, 0x264b: 0x000a, + 0x264c: 0x000a, 0x2650: 0x000a, 0x2651: 0x000a, + 0x2652: 0x000a, 0x2653: 0x000a, 0x2654: 0x000a, 0x2655: 0x000a, 0x2656: 0x000a, 0x2657: 0x000a, + 0x2658: 0x000a, 0x2659: 0x000a, 0x265a: 0x000a, 0x265b: 0x000a, + 0x2660: 0x000a, + // Block 0x9a, offset 0x2680 + 0x26bd: 0x000c, + // Block 0x9b, offset 0x26c0 + 0x26e0: 0x000c, 0x26e1: 0x0002, 0x26e2: 0x0002, 0x26e3: 0x0002, + 0x26e4: 0x0002, 0x26e5: 0x0002, 0x26e6: 0x0002, 0x26e7: 0x0002, 0x26e8: 0x0002, 0x26e9: 0x0002, + 0x26ea: 0x0002, 0x26eb: 0x0002, 0x26ec: 0x0002, 0x26ed: 0x0002, 0x26ee: 0x0002, 0x26ef: 0x0002, + 0x26f0: 0x0002, 0x26f1: 0x0002, 0x26f2: 0x0002, 0x26f3: 0x0002, 0x26f4: 0x0002, 0x26f5: 0x0002, + 0x26f6: 0x0002, 0x26f7: 0x0002, 0x26f8: 0x0002, 0x26f9: 0x0002, 0x26fa: 0x0002, 0x26fb: 0x0002, + // Block 0x9c, offset 0x2700 + 0x2736: 0x000c, 0x2737: 0x000c, 0x2738: 0x000c, 0x2739: 0x000c, 0x273a: 0x000c, + // Block 0x9d, offset 0x2740 + 0x2740: 0x0001, 0x2741: 0x0001, 0x2742: 0x0001, 0x2743: 0x0001, 0x2744: 0x0001, 0x2745: 0x0001, + 0x2746: 0x0001, 0x2747: 0x0001, 0x2748: 0x0001, 0x2749: 0x0001, 0x274a: 0x0001, 0x274b: 0x0001, + 0x274c: 0x0001, 0x274d: 0x0001, 0x274e: 0x0001, 0x274f: 0x0001, 0x2750: 0x0001, 0x2751: 0x0001, + 0x2752: 0x0001, 0x2753: 0x0001, 0x2754: 0x0001, 0x2755: 0x0001, 0x2756: 0x0001, 0x2757: 0x0001, + 0x2758: 0x0001, 0x2759: 0x0001, 0x275a: 0x0001, 0x275b: 0x0001, 0x275c: 0x0001, 0x275d: 0x0001, + 0x275e: 0x0001, 0x275f: 0x0001, 0x2760: 0x0001, 0x2761: 0x0001, 0x2762: 0x0001, 0x2763: 0x0001, + 0x2764: 0x0001, 0x2765: 0x0001, 0x2766: 0x0001, 0x2767: 0x0001, 0x2768: 0x0001, 0x2769: 0x0001, + 0x276a: 0x0001, 0x276b: 0x0001, 0x276c: 0x0001, 0x276d: 0x0001, 0x276e: 0x0001, 0x276f: 0x0001, + 0x2770: 0x0001, 0x2771: 0x0001, 0x2772: 0x0001, 0x2773: 0x0001, 0x2774: 0x0001, 0x2775: 0x0001, + 0x2776: 0x0001, 0x2777: 0x0001, 0x2778: 0x0001, 0x2779: 0x0001, 0x277a: 0x0001, 0x277b: 0x0001, + 0x277c: 0x0001, 0x277d: 0x0001, 0x277e: 0x0001, 0x277f: 0x0001, + // Block 0x9e, offset 0x2780 + 0x2780: 0x0001, 0x2781: 0x0001, 0x2782: 0x0001, 0x2783: 0x0001, 0x2784: 0x0001, 0x2785: 0x0001, + 0x2786: 0x0001, 0x2787: 0x0001, 0x2788: 0x0001, 0x2789: 0x0001, 0x278a: 0x0001, 0x278b: 0x0001, + 0x278c: 0x0001, 0x278d: 0x0001, 0x278e: 0x0001, 0x278f: 0x0001, 0x2790: 0x0001, 0x2791: 0x0001, + 0x2792: 0x0001, 0x2793: 0x0001, 0x2794: 0x0001, 0x2795: 0x0001, 0x2796: 0x0001, 0x2797: 0x0001, + 0x2798: 0x0001, 0x2799: 0x0001, 0x279a: 0x0001, 0x279b: 0x0001, 0x279c: 0x0001, 0x279d: 0x0001, + 0x279e: 0x0001, 0x279f: 0x000a, 0x27a0: 0x0001, 0x27a1: 0x0001, 0x27a2: 0x0001, 0x27a3: 0x0001, + 0x27a4: 0x0001, 0x27a5: 0x0001, 0x27a6: 0x0001, 0x27a7: 0x0001, 0x27a8: 0x0001, 0x27a9: 0x0001, + 0x27aa: 0x0001, 0x27ab: 0x0001, 0x27ac: 0x0001, 0x27ad: 0x0001, 0x27ae: 0x0001, 0x27af: 0x0001, + 0x27b0: 0x0001, 0x27b1: 0x0001, 0x27b2: 0x0001, 0x27b3: 0x0001, 0x27b4: 0x0001, 0x27b5: 0x0001, + 0x27b6: 0x0001, 0x27b7: 0x0001, 0x27b8: 0x0001, 0x27b9: 0x0001, 0x27ba: 0x0001, 0x27bb: 0x0001, + 0x27bc: 0x0001, 0x27bd: 0x0001, 0x27be: 0x0001, 0x27bf: 0x0001, + // Block 0x9f, offset 0x27c0 + 0x27c0: 0x0001, 0x27c1: 0x000c, 0x27c2: 0x000c, 0x27c3: 0x000c, 0x27c4: 0x0001, 0x27c5: 0x000c, + 0x27c6: 0x000c, 0x27c7: 0x0001, 0x27c8: 0x0001, 0x27c9: 0x0001, 0x27ca: 0x0001, 0x27cb: 0x0001, + 0x27cc: 0x000c, 0x27cd: 0x000c, 0x27ce: 0x000c, 0x27cf: 0x000c, 0x27d0: 0x0001, 0x27d1: 0x0001, + 0x27d2: 0x0001, 0x27d3: 0x0001, 0x27d4: 0x0001, 0x27d5: 0x0001, 0x27d6: 0x0001, 0x27d7: 0x0001, + 0x27d8: 0x0001, 0x27d9: 0x0001, 0x27da: 0x0001, 0x27db: 0x0001, 0x27dc: 0x0001, 0x27dd: 0x0001, + 0x27de: 0x0001, 0x27df: 0x0001, 0x27e0: 0x0001, 0x27e1: 0x0001, 0x27e2: 0x0001, 0x27e3: 0x0001, + 0x27e4: 0x0001, 0x27e5: 0x0001, 0x27e6: 0x0001, 0x27e7: 0x0001, 0x27e8: 0x0001, 0x27e9: 0x0001, + 0x27ea: 0x0001, 0x27eb: 0x0001, 0x27ec: 0x0001, 0x27ed: 0x0001, 0x27ee: 0x0001, 0x27ef: 0x0001, + 0x27f0: 0x0001, 0x27f1: 0x0001, 0x27f2: 0x0001, 0x27f3: 0x0001, 0x27f4: 0x0001, 0x27f5: 0x0001, + 0x27f6: 0x0001, 0x27f7: 0x0001, 0x27f8: 0x000c, 0x27f9: 0x000c, 0x27fa: 0x000c, 0x27fb: 0x0001, + 0x27fc: 0x0001, 0x27fd: 0x0001, 0x27fe: 0x0001, 0x27ff: 0x000c, + // Block 0xa0, offset 0x2800 + 0x2800: 0x0001, 0x2801: 0x0001, 0x2802: 0x0001, 0x2803: 0x0001, 0x2804: 0x0001, 0x2805: 0x0001, + 0x2806: 0x0001, 0x2807: 0x0001, 0x2808: 0x0001, 0x2809: 0x0001, 0x280a: 0x0001, 0x280b: 0x0001, + 0x280c: 0x0001, 0x280d: 0x0001, 0x280e: 0x0001, 0x280f: 0x0001, 0x2810: 0x0001, 0x2811: 0x0001, + 0x2812: 0x0001, 0x2813: 0x0001, 0x2814: 0x0001, 0x2815: 0x0001, 0x2816: 0x0001, 0x2817: 0x0001, + 0x2818: 0x0001, 0x2819: 0x0001, 0x281a: 0x0001, 0x281b: 0x0001, 0x281c: 0x0001, 0x281d: 0x0001, + 0x281e: 0x0001, 0x281f: 0x0001, 0x2820: 0x0001, 0x2821: 0x0001, 0x2822: 0x0001, 0x2823: 0x0001, + 0x2824: 0x0001, 0x2825: 0x000c, 0x2826: 0x000c, 0x2827: 0x0001, 0x2828: 0x0001, 0x2829: 0x0001, + 0x282a: 0x0001, 0x282b: 0x0001, 0x282c: 0x0001, 0x282d: 0x0001, 0x282e: 0x0001, 0x282f: 0x0001, + 0x2830: 0x0001, 0x2831: 0x0001, 0x2832: 0x0001, 0x2833: 0x0001, 0x2834: 0x0001, 0x2835: 0x0001, + 0x2836: 0x0001, 0x2837: 0x0001, 0x2838: 0x0001, 0x2839: 0x0001, 0x283a: 0x0001, 0x283b: 0x0001, + 0x283c: 0x0001, 0x283d: 0x0001, 0x283e: 0x0001, 0x283f: 0x0001, + // Block 0xa1, offset 0x2840 + 0x2840: 0x0001, 0x2841: 0x0001, 0x2842: 0x0001, 0x2843: 0x0001, 0x2844: 0x0001, 0x2845: 0x0001, + 0x2846: 0x0001, 0x2847: 0x0001, 0x2848: 0x0001, 0x2849: 0x0001, 0x284a: 0x0001, 0x284b: 0x0001, + 0x284c: 0x0001, 0x284d: 0x0001, 0x284e: 0x0001, 0x284f: 0x0001, 0x2850: 0x0001, 0x2851: 0x0001, + 0x2852: 0x0001, 0x2853: 0x0001, 0x2854: 0x0001, 0x2855: 0x0001, 0x2856: 0x0001, 0x2857: 0x0001, + 0x2858: 0x0001, 0x2859: 0x0001, 0x285a: 0x0001, 0x285b: 0x0001, 0x285c: 0x0001, 0x285d: 0x0001, + 0x285e: 0x0001, 0x285f: 0x0001, 0x2860: 0x0001, 0x2861: 0x0001, 0x2862: 0x0001, 0x2863: 0x0001, + 0x2864: 0x0001, 0x2865: 0x0001, 0x2866: 0x0001, 0x2867: 0x0001, 0x2868: 0x0001, 0x2869: 0x0001, + 0x286a: 0x0001, 0x286b: 0x0001, 0x286c: 0x0001, 0x286d: 0x0001, 0x286e: 0x0001, 0x286f: 0x0001, + 0x2870: 0x0001, 0x2871: 0x0001, 0x2872: 0x0001, 0x2873: 0x0001, 0x2874: 0x0001, 0x2875: 0x0001, + 0x2876: 0x0001, 0x2877: 0x0001, 0x2878: 0x0001, 0x2879: 0x000a, 0x287a: 0x000a, 0x287b: 0x000a, + 0x287c: 0x000a, 0x287d: 0x000a, 0x287e: 0x000a, 0x287f: 0x000a, + // Block 0xa2, offset 0x2880 + 0x2880: 0x0001, 0x2881: 0x0001, 0x2882: 0x0001, 0x2883: 0x0001, 0x2884: 0x0001, 0x2885: 0x0001, + 0x2886: 0x0001, 0x2887: 0x0001, 0x2888: 0x0001, 0x2889: 0x0001, 0x288a: 0x0001, 0x288b: 0x0001, + 0x288c: 0x0001, 0x288d: 0x0001, 0x288e: 0x0001, 0x288f: 0x0001, 0x2890: 0x0001, 0x2891: 0x0001, + 0x2892: 0x0001, 0x2893: 0x0001, 0x2894: 0x0001, 0x2895: 0x0001, 0x2896: 0x0001, 0x2897: 0x0001, + 0x2898: 0x0001, 0x2899: 0x0001, 0x289a: 0x0001, 0x289b: 0x0001, 0x289c: 0x0001, 0x289d: 0x0001, + 0x289e: 0x0001, 0x289f: 0x0001, 0x28a0: 0x0005, 0x28a1: 0x0005, 0x28a2: 0x0005, 0x28a3: 0x0005, + 0x28a4: 0x0005, 0x28a5: 0x0005, 0x28a6: 0x0005, 0x28a7: 0x0005, 0x28a8: 0x0005, 0x28a9: 0x0005, + 0x28aa: 0x0005, 0x28ab: 0x0005, 0x28ac: 0x0005, 0x28ad: 0x0005, 0x28ae: 0x0005, 0x28af: 0x0005, + 0x28b0: 0x0005, 0x28b1: 0x0005, 0x28b2: 0x0005, 0x28b3: 0x0005, 0x28b4: 0x0005, 0x28b5: 0x0005, + 0x28b6: 0x0005, 0x28b7: 0x0005, 0x28b8: 0x0005, 0x28b9: 0x0005, 0x28ba: 0x0005, 0x28bb: 0x0005, + 0x28bc: 0x0005, 0x28bd: 0x0005, 0x28be: 0x0005, 0x28bf: 0x0001, + // Block 0xa3, offset 0x28c0 + 0x28c1: 0x000c, + 0x28f8: 0x000c, 0x28f9: 0x000c, 0x28fa: 0x000c, 0x28fb: 0x000c, + 0x28fc: 0x000c, 0x28fd: 0x000c, 0x28fe: 0x000c, 0x28ff: 0x000c, + // Block 0xa4, offset 0x2900 + 0x2900: 0x000c, 0x2901: 0x000c, 0x2902: 0x000c, 0x2903: 0x000c, 0x2904: 0x000c, 0x2905: 0x000c, + 0x2906: 0x000c, + 0x2912: 0x000a, 0x2913: 0x000a, 0x2914: 0x000a, 0x2915: 0x000a, 0x2916: 0x000a, 0x2917: 0x000a, + 0x2918: 0x000a, 0x2919: 0x000a, 0x291a: 0x000a, 0x291b: 0x000a, 0x291c: 0x000a, 0x291d: 0x000a, + 0x291e: 0x000a, 0x291f: 0x000a, 0x2920: 0x000a, 0x2921: 0x000a, 0x2922: 0x000a, 0x2923: 0x000a, + 0x2924: 0x000a, 0x2925: 0x000a, + 0x293f: 0x000c, + // Block 0xa5, offset 0x2940 + 0x2940: 0x000c, 0x2941: 0x000c, + 0x2973: 0x000c, 0x2974: 0x000c, 0x2975: 0x000c, + 0x2976: 0x000c, 0x2979: 0x000c, 0x297a: 0x000c, + // Block 0xa6, offset 0x2980 + 0x2980: 0x000c, 0x2981: 0x000c, 0x2982: 0x000c, + 0x29a7: 0x000c, 0x29a8: 0x000c, 0x29a9: 0x000c, + 0x29aa: 0x000c, 0x29ab: 0x000c, 0x29ad: 0x000c, 0x29ae: 0x000c, 0x29af: 0x000c, + 0x29b0: 0x000c, 0x29b1: 0x000c, 0x29b2: 0x000c, 0x29b3: 0x000c, 0x29b4: 0x000c, + // Block 0xa7, offset 0x29c0 + 0x29f3: 0x000c, + // Block 0xa8, offset 0x2a00 + 0x2a00: 0x000c, 0x2a01: 0x000c, + 0x2a36: 0x000c, 0x2a37: 0x000c, 0x2a38: 0x000c, 0x2a39: 0x000c, 0x2a3a: 0x000c, 0x2a3b: 0x000c, + 0x2a3c: 0x000c, 0x2a3d: 0x000c, 0x2a3e: 0x000c, + // Block 0xa9, offset 0x2a40 + 0x2a4a: 0x000c, 0x2a4b: 0x000c, + 0x2a4c: 0x000c, + // Block 0xaa, offset 0x2a80 + 0x2aaf: 0x000c, + 0x2ab0: 0x000c, 0x2ab1: 0x000c, 0x2ab4: 0x000c, + 0x2ab6: 0x000c, 0x2ab7: 0x000c, + 0x2abe: 0x000c, + // Block 0xab, offset 0x2ac0 + 0x2adf: 0x000c, 0x2ae3: 0x000c, + 0x2ae4: 0x000c, 0x2ae5: 0x000c, 0x2ae6: 0x000c, 0x2ae7: 0x000c, 0x2ae8: 0x000c, 0x2ae9: 0x000c, + 0x2aea: 0x000c, + // Block 0xac, offset 0x2b00 + 0x2b00: 0x000c, 0x2b01: 0x000c, + 0x2b3c: 0x000c, + // Block 0xad, offset 0x2b40 + 0x2b40: 0x000c, + 0x2b66: 0x000c, 0x2b67: 0x000c, 0x2b68: 0x000c, 0x2b69: 0x000c, + 0x2b6a: 0x000c, 0x2b6b: 0x000c, 0x2b6c: 0x000c, + 0x2b70: 0x000c, 0x2b71: 0x000c, 0x2b72: 0x000c, 0x2b73: 0x000c, 0x2b74: 0x000c, + // Block 0xae, offset 0x2b80 + 0x2bb8: 0x000c, 0x2bb9: 0x000c, 0x2bba: 0x000c, 0x2bbb: 0x000c, + 0x2bbc: 0x000c, 0x2bbd: 0x000c, 0x2bbe: 0x000c, 0x2bbf: 0x000c, + // Block 0xaf, offset 0x2bc0 + 0x2bc2: 0x000c, 0x2bc3: 0x000c, 0x2bc4: 0x000c, + 0x2bc6: 0x000c, + // Block 0xb0, offset 0x2c00 + 0x2c33: 0x000c, 0x2c34: 0x000c, 0x2c35: 0x000c, + 0x2c36: 0x000c, 0x2c37: 0x000c, 0x2c38: 0x000c, 0x2c3a: 0x000c, + 0x2c3f: 0x000c, + // Block 0xb1, offset 0x2c40 + 0x2c40: 0x000c, 0x2c42: 0x000c, 0x2c43: 0x000c, + // Block 0xb2, offset 0x2c80 + 0x2cb2: 0x000c, 0x2cb3: 0x000c, 0x2cb4: 0x000c, 0x2cb5: 0x000c, + 0x2cbc: 0x000c, 0x2cbd: 0x000c, 0x2cbf: 0x000c, + // Block 0xb3, offset 0x2cc0 + 0x2cc0: 0x000c, + 0x2cdc: 0x000c, 0x2cdd: 0x000c, + // Block 0xb4, offset 0x2d00 + 0x2d33: 0x000c, 0x2d34: 0x000c, 0x2d35: 0x000c, + 0x2d36: 0x000c, 0x2d37: 0x000c, 0x2d38: 0x000c, 0x2d39: 0x000c, 0x2d3a: 0x000c, + 0x2d3d: 0x000c, 0x2d3f: 0x000c, + // Block 0xb5, offset 0x2d40 + 0x2d40: 0x000c, + 0x2d60: 0x000a, 0x2d61: 0x000a, 0x2d62: 0x000a, 0x2d63: 0x000a, + 0x2d64: 0x000a, 0x2d65: 0x000a, 0x2d66: 0x000a, 0x2d67: 0x000a, 0x2d68: 0x000a, 0x2d69: 0x000a, + 0x2d6a: 0x000a, 0x2d6b: 0x000a, 0x2d6c: 0x000a, + // Block 0xb6, offset 0x2d80 + 0x2dab: 0x000c, 0x2dad: 0x000c, + 0x2db0: 0x000c, 0x2db1: 0x000c, 0x2db2: 0x000c, 0x2db3: 0x000c, 0x2db4: 0x000c, 0x2db5: 0x000c, + 0x2db7: 0x000c, + // Block 0xb7, offset 0x2dc0 + 0x2ddd: 0x000c, + 0x2dde: 0x000c, 0x2ddf: 0x000c, 0x2de2: 0x000c, 0x2de3: 0x000c, + 0x2de4: 0x000c, 0x2de5: 0x000c, 0x2de7: 0x000c, 0x2de8: 0x000c, 0x2de9: 0x000c, + 0x2dea: 0x000c, 0x2deb: 0x000c, + // Block 0xb8, offset 0x2e00 + 0x2e30: 0x000c, 0x2e31: 0x000c, 0x2e32: 0x000c, 0x2e33: 0x000c, 0x2e34: 0x000c, 0x2e35: 0x000c, + 0x2e36: 0x000c, 0x2e38: 0x000c, 0x2e39: 0x000c, 0x2e3a: 0x000c, 0x2e3b: 0x000c, + 0x2e3c: 0x000c, 0x2e3d: 0x000c, + // Block 0xb9, offset 0x2e40 + 0x2e52: 0x000c, 0x2e53: 0x000c, 0x2e54: 0x000c, 0x2e55: 0x000c, 0x2e56: 0x000c, 0x2e57: 0x000c, + 0x2e58: 0x000c, 0x2e59: 0x000c, 0x2e5a: 0x000c, 0x2e5b: 0x000c, 0x2e5c: 0x000c, 0x2e5d: 0x000c, + 0x2e5e: 0x000c, 0x2e5f: 0x000c, 0x2e60: 0x000c, 0x2e61: 0x000c, 0x2e62: 0x000c, 0x2e63: 0x000c, + 0x2e64: 0x000c, 0x2e65: 0x000c, 0x2e66: 0x000c, 0x2e67: 0x000c, + 0x2e6a: 0x000c, 0x2e6b: 0x000c, 0x2e6c: 0x000c, 0x2e6d: 0x000c, 0x2e6e: 0x000c, 0x2e6f: 0x000c, + 0x2e70: 0x000c, 0x2e72: 0x000c, 0x2e73: 0x000c, 0x2e75: 0x000c, + 0x2e76: 0x000c, + // Block 0xba, offset 0x2e80 + 0x2eb0: 0x000c, 0x2eb1: 0x000c, 0x2eb2: 0x000c, 0x2eb3: 0x000c, 0x2eb4: 0x000c, + // Block 0xbb, offset 0x2ec0 + 0x2ef0: 0x000c, 0x2ef1: 0x000c, 0x2ef2: 0x000c, 0x2ef3: 0x000c, 0x2ef4: 0x000c, 0x2ef5: 0x000c, + 0x2ef6: 0x000c, + // Block 0xbc, offset 0x2f00 + 0x2f0f: 0x000c, 0x2f10: 0x000c, 0x2f11: 0x000c, + 0x2f12: 0x000c, + // Block 0xbd, offset 0x2f40 + 0x2f5d: 0x000c, + 0x2f5e: 0x000c, 0x2f60: 0x000b, 0x2f61: 0x000b, 0x2f62: 0x000b, 0x2f63: 0x000b, + // Block 0xbe, offset 0x2f80 + 0x2fa7: 0x000c, 0x2fa8: 0x000c, 0x2fa9: 0x000c, + 0x2fb3: 0x000b, 0x2fb4: 0x000b, 0x2fb5: 0x000b, + 0x2fb6: 0x000b, 0x2fb7: 0x000b, 0x2fb8: 0x000b, 0x2fb9: 0x000b, 0x2fba: 0x000b, 0x2fbb: 0x000c, + 0x2fbc: 0x000c, 0x2fbd: 0x000c, 0x2fbe: 0x000c, 0x2fbf: 0x000c, + // Block 0xbf, offset 0x2fc0 + 0x2fc0: 0x000c, 0x2fc1: 0x000c, 0x2fc2: 0x000c, 0x2fc5: 0x000c, + 0x2fc6: 0x000c, 0x2fc7: 0x000c, 0x2fc8: 0x000c, 0x2fc9: 0x000c, 0x2fca: 0x000c, 0x2fcb: 0x000c, + 0x2fea: 0x000c, 0x2feb: 0x000c, 0x2fec: 0x000c, 0x2fed: 0x000c, + // Block 0xc0, offset 0x3000 + 0x3000: 0x000a, 0x3001: 0x000a, 0x3002: 0x000c, 0x3003: 0x000c, 0x3004: 0x000c, 0x3005: 0x000a, + // Block 0xc1, offset 0x3040 + 0x3040: 0x000a, 0x3041: 0x000a, 0x3042: 0x000a, 0x3043: 0x000a, 0x3044: 0x000a, 0x3045: 0x000a, + 0x3046: 0x000a, 0x3047: 0x000a, 0x3048: 0x000a, 0x3049: 0x000a, 0x304a: 0x000a, 0x304b: 0x000a, + 0x304c: 0x000a, 0x304d: 0x000a, 0x304e: 0x000a, 0x304f: 0x000a, 0x3050: 0x000a, 0x3051: 0x000a, + 0x3052: 0x000a, 0x3053: 0x000a, 0x3054: 0x000a, 0x3055: 0x000a, 0x3056: 0x000a, + // Block 0xc2, offset 0x3080 + 0x309b: 0x000a, + // Block 0xc3, offset 0x30c0 + 0x30d5: 0x000a, + // Block 0xc4, offset 0x3100 + 0x310f: 0x000a, + // Block 0xc5, offset 0x3140 + 0x3149: 0x000a, + // Block 0xc6, offset 0x3180 + 0x3183: 0x000a, + 0x318e: 0x0002, 0x318f: 0x0002, 0x3190: 0x0002, 0x3191: 0x0002, + 0x3192: 0x0002, 0x3193: 0x0002, 0x3194: 0x0002, 0x3195: 0x0002, 0x3196: 0x0002, 0x3197: 0x0002, + 0x3198: 0x0002, 0x3199: 0x0002, 0x319a: 0x0002, 0x319b: 0x0002, 0x319c: 0x0002, 0x319d: 0x0002, + 0x319e: 0x0002, 0x319f: 0x0002, 0x31a0: 0x0002, 0x31a1: 0x0002, 0x31a2: 0x0002, 0x31a3: 0x0002, + 0x31a4: 0x0002, 0x31a5: 0x0002, 0x31a6: 0x0002, 0x31a7: 0x0002, 0x31a8: 0x0002, 0x31a9: 0x0002, + 0x31aa: 0x0002, 0x31ab: 0x0002, 0x31ac: 0x0002, 0x31ad: 0x0002, 0x31ae: 0x0002, 0x31af: 0x0002, + 0x31b0: 0x0002, 0x31b1: 0x0002, 0x31b2: 0x0002, 0x31b3: 0x0002, 0x31b4: 0x0002, 0x31b5: 0x0002, + 0x31b6: 0x0002, 0x31b7: 0x0002, 0x31b8: 0x0002, 0x31b9: 0x0002, 0x31ba: 0x0002, 0x31bb: 0x0002, + 0x31bc: 0x0002, 0x31bd: 0x0002, 0x31be: 0x0002, 0x31bf: 0x0002, + // Block 0xc7, offset 0x31c0 + 0x31c0: 0x000c, 0x31c1: 0x000c, 0x31c2: 0x000c, 0x31c3: 0x000c, 0x31c4: 0x000c, 0x31c5: 0x000c, + 0x31c6: 0x000c, 0x31c7: 0x000c, 0x31c8: 0x000c, 0x31c9: 0x000c, 0x31ca: 0x000c, 0x31cb: 0x000c, + 0x31cc: 0x000c, 0x31cd: 0x000c, 0x31ce: 0x000c, 0x31cf: 0x000c, 0x31d0: 0x000c, 0x31d1: 0x000c, + 0x31d2: 0x000c, 0x31d3: 0x000c, 0x31d4: 0x000c, 0x31d5: 0x000c, 0x31d6: 0x000c, 0x31d7: 0x000c, + 0x31d8: 0x000c, 0x31d9: 0x000c, 0x31da: 0x000c, 0x31db: 0x000c, 0x31dc: 0x000c, 0x31dd: 0x000c, + 0x31de: 0x000c, 0x31df: 0x000c, 0x31e0: 0x000c, 0x31e1: 0x000c, 0x31e2: 0x000c, 0x31e3: 0x000c, + 0x31e4: 0x000c, 0x31e5: 0x000c, 0x31e6: 0x000c, 0x31e7: 0x000c, 0x31e8: 0x000c, 0x31e9: 0x000c, + 0x31ea: 0x000c, 0x31eb: 0x000c, 0x31ec: 0x000c, 0x31ed: 0x000c, 0x31ee: 0x000c, 0x31ef: 0x000c, + 0x31f0: 0x000c, 0x31f1: 0x000c, 0x31f2: 0x000c, 0x31f3: 0x000c, 0x31f4: 0x000c, 0x31f5: 0x000c, + 0x31f6: 0x000c, 0x31fb: 0x000c, + 0x31fc: 0x000c, 0x31fd: 0x000c, 0x31fe: 0x000c, 0x31ff: 0x000c, + // Block 0xc8, offset 0x3200 + 0x3200: 0x000c, 0x3201: 0x000c, 0x3202: 0x000c, 0x3203: 0x000c, 0x3204: 0x000c, 0x3205: 0x000c, + 0x3206: 0x000c, 0x3207: 0x000c, 0x3208: 0x000c, 0x3209: 0x000c, 0x320a: 0x000c, 0x320b: 0x000c, + 0x320c: 0x000c, 0x320d: 0x000c, 0x320e: 0x000c, 0x320f: 0x000c, 0x3210: 0x000c, 0x3211: 0x000c, + 0x3212: 0x000c, 0x3213: 0x000c, 0x3214: 0x000c, 0x3215: 0x000c, 0x3216: 0x000c, 0x3217: 0x000c, + 0x3218: 0x000c, 0x3219: 0x000c, 0x321a: 0x000c, 0x321b: 0x000c, 0x321c: 0x000c, 0x321d: 0x000c, + 0x321e: 0x000c, 0x321f: 0x000c, 0x3220: 0x000c, 0x3221: 0x000c, 0x3222: 0x000c, 0x3223: 0x000c, + 0x3224: 0x000c, 0x3225: 0x000c, 0x3226: 0x000c, 0x3227: 0x000c, 0x3228: 0x000c, 0x3229: 0x000c, + 0x322a: 0x000c, 0x322b: 0x000c, 0x322c: 0x000c, + 0x3235: 0x000c, + // Block 0xc9, offset 0x3240 + 0x3244: 0x000c, + 0x325b: 0x000c, 0x325c: 0x000c, 0x325d: 0x000c, + 0x325e: 0x000c, 0x325f: 0x000c, 0x3261: 0x000c, 0x3262: 0x000c, 0x3263: 0x000c, + 0x3264: 0x000c, 0x3265: 0x000c, 0x3266: 0x000c, 0x3267: 0x000c, 0x3268: 0x000c, 0x3269: 0x000c, + 0x326a: 0x000c, 0x326b: 0x000c, 0x326c: 0x000c, 0x326d: 0x000c, 0x326e: 0x000c, 0x326f: 0x000c, + // Block 0xca, offset 0x3280 + 0x3280: 0x000c, 0x3281: 0x000c, 0x3282: 0x000c, 0x3283: 0x000c, 0x3284: 0x000c, 0x3285: 0x000c, + 0x3286: 0x000c, 0x3288: 0x000c, 0x3289: 0x000c, 0x328a: 0x000c, 0x328b: 0x000c, + 0x328c: 0x000c, 0x328d: 0x000c, 0x328e: 0x000c, 0x328f: 0x000c, 0x3290: 0x000c, 0x3291: 0x000c, + 0x3292: 0x000c, 0x3293: 0x000c, 0x3294: 0x000c, 0x3295: 0x000c, 0x3296: 0x000c, 0x3297: 0x000c, + 0x3298: 0x000c, 0x329b: 0x000c, 0x329c: 0x000c, 0x329d: 0x000c, + 0x329e: 0x000c, 0x329f: 0x000c, 0x32a0: 0x000c, 0x32a1: 0x000c, 0x32a3: 0x000c, + 0x32a4: 0x000c, 0x32a6: 0x000c, 0x32a7: 0x000c, 0x32a8: 0x000c, 0x32a9: 0x000c, + 0x32aa: 0x000c, + // Block 0xcb, offset 0x32c0 + 0x32c0: 0x0001, 0x32c1: 0x0001, 0x32c2: 0x0001, 0x32c3: 0x0001, 0x32c4: 0x0001, 0x32c5: 0x0001, + 0x32c6: 0x0001, 0x32c7: 0x0001, 0x32c8: 0x0001, 0x32c9: 0x0001, 0x32ca: 0x0001, 0x32cb: 0x0001, + 0x32cc: 0x0001, 0x32cd: 0x0001, 0x32ce: 0x0001, 0x32cf: 0x0001, 0x32d0: 0x000c, 0x32d1: 0x000c, + 0x32d2: 0x000c, 0x32d3: 0x000c, 0x32d4: 0x000c, 0x32d5: 0x000c, 0x32d6: 0x000c, 0x32d7: 0x0001, + 0x32d8: 0x0001, 0x32d9: 0x0001, 0x32da: 0x0001, 0x32db: 0x0001, 0x32dc: 0x0001, 0x32dd: 0x0001, + 0x32de: 0x0001, 0x32df: 0x0001, 0x32e0: 0x0001, 0x32e1: 0x0001, 0x32e2: 0x0001, 0x32e3: 0x0001, + 0x32e4: 0x0001, 0x32e5: 0x0001, 0x32e6: 0x0001, 0x32e7: 0x0001, 0x32e8: 0x0001, 0x32e9: 0x0001, + 0x32ea: 0x0001, 0x32eb: 0x0001, 0x32ec: 0x0001, 0x32ed: 0x0001, 0x32ee: 0x0001, 0x32ef: 0x0001, + 0x32f0: 0x0001, 0x32f1: 0x0001, 0x32f2: 0x0001, 0x32f3: 0x0001, 0x32f4: 0x0001, 0x32f5: 0x0001, + 0x32f6: 0x0001, 0x32f7: 0x0001, 0x32f8: 0x0001, 0x32f9: 0x0001, 0x32fa: 0x0001, 0x32fb: 0x0001, + 0x32fc: 0x0001, 0x32fd: 0x0001, 0x32fe: 0x0001, 0x32ff: 0x0001, + // Block 0xcc, offset 0x3300 + 0x3300: 0x0001, 0x3301: 0x0001, 0x3302: 0x0001, 0x3303: 0x0001, 0x3304: 0x000c, 0x3305: 0x000c, + 0x3306: 0x000c, 0x3307: 0x000c, 0x3308: 0x000c, 0x3309: 0x000c, 0x330a: 0x000c, 0x330b: 0x0001, + 0x330c: 0x0001, 0x330d: 0x0001, 0x330e: 0x0001, 0x330f: 0x0001, 0x3310: 0x0001, 0x3311: 0x0001, + 0x3312: 0x0001, 0x3313: 0x0001, 0x3314: 0x0001, 0x3315: 0x0001, 0x3316: 0x0001, 0x3317: 0x0001, + 0x3318: 0x0001, 0x3319: 0x0001, 0x331a: 0x0001, 0x331b: 0x0001, 0x331c: 0x0001, 0x331d: 0x0001, + 0x331e: 0x0001, 0x331f: 0x0001, 0x3320: 0x0001, 0x3321: 0x0001, 0x3322: 0x0001, 0x3323: 0x0001, + 0x3324: 0x0001, 0x3325: 0x0001, 0x3326: 0x0001, 0x3327: 0x0001, 0x3328: 0x0001, 0x3329: 0x0001, + 0x332a: 0x0001, 0x332b: 0x0001, 0x332c: 0x0001, 0x332d: 0x0001, 0x332e: 0x0001, 0x332f: 0x0001, + 0x3330: 0x0001, 0x3331: 0x0001, 0x3332: 0x0001, 0x3333: 0x0001, 0x3334: 0x0001, 0x3335: 0x0001, + 0x3336: 0x0001, 0x3337: 0x0001, 0x3338: 0x0001, 0x3339: 0x0001, 0x333a: 0x0001, 0x333b: 0x0001, + 0x333c: 0x0001, 0x333d: 0x0001, 0x333e: 0x0001, 0x333f: 0x0001, + // Block 0xcd, offset 0x3340 + 0x3340: 0x000d, 0x3341: 0x000d, 0x3342: 0x000d, 0x3343: 0x000d, 0x3344: 0x000d, 0x3345: 0x000d, + 0x3346: 0x000d, 0x3347: 0x000d, 0x3348: 0x000d, 0x3349: 0x000d, 0x334a: 0x000d, 0x334b: 0x000d, + 0x334c: 0x000d, 0x334d: 0x000d, 0x334e: 0x000d, 0x334f: 0x000d, 0x3350: 0x000d, 0x3351: 0x000d, + 0x3352: 0x000d, 0x3353: 0x000d, 0x3354: 0x000d, 0x3355: 0x000d, 0x3356: 0x000d, 0x3357: 0x000d, + 0x3358: 0x000d, 0x3359: 0x000d, 0x335a: 0x000d, 0x335b: 0x000d, 0x335c: 0x000d, 0x335d: 0x000d, + 0x335e: 0x000d, 0x335f: 0x000d, 0x3360: 0x000d, 0x3361: 0x000d, 0x3362: 0x000d, 0x3363: 0x000d, + 0x3364: 0x000d, 0x3365: 0x000d, 0x3366: 0x000d, 0x3367: 0x000d, 0x3368: 0x000d, 0x3369: 0x000d, + 0x336a: 0x000d, 0x336b: 0x000d, 0x336c: 0x000d, 0x336d: 0x000d, 0x336e: 0x000d, 0x336f: 0x000d, + 0x3370: 0x000a, 0x3371: 0x000a, 0x3372: 0x000d, 0x3373: 0x000d, 0x3374: 0x000d, 0x3375: 0x000d, + 0x3376: 0x000d, 0x3377: 0x000d, 0x3378: 0x000d, 0x3379: 0x000d, 0x337a: 0x000d, 0x337b: 0x000d, + 0x337c: 0x000d, 0x337d: 0x000d, 0x337e: 0x000d, 0x337f: 0x000d, + // Block 0xce, offset 0x3380 + 0x3380: 0x000a, 0x3381: 0x000a, 0x3382: 0x000a, 0x3383: 0x000a, 0x3384: 0x000a, 0x3385: 0x000a, + 0x3386: 0x000a, 0x3387: 0x000a, 0x3388: 0x000a, 0x3389: 0x000a, 0x338a: 0x000a, 0x338b: 0x000a, + 0x338c: 0x000a, 0x338d: 0x000a, 0x338e: 0x000a, 0x338f: 0x000a, 0x3390: 0x000a, 0x3391: 0x000a, + 0x3392: 0x000a, 0x3393: 0x000a, 0x3394: 0x000a, 0x3395: 0x000a, 0x3396: 0x000a, 0x3397: 0x000a, + 0x3398: 0x000a, 0x3399: 0x000a, 0x339a: 0x000a, 0x339b: 0x000a, 0x339c: 0x000a, 0x339d: 0x000a, + 0x339e: 0x000a, 0x339f: 0x000a, 0x33a0: 0x000a, 0x33a1: 0x000a, 0x33a2: 0x000a, 0x33a3: 0x000a, + 0x33a4: 0x000a, 0x33a5: 0x000a, 0x33a6: 0x000a, 0x33a7: 0x000a, 0x33a8: 0x000a, 0x33a9: 0x000a, + 0x33aa: 0x000a, 0x33ab: 0x000a, + 0x33b0: 0x000a, 0x33b1: 0x000a, 0x33b2: 0x000a, 0x33b3: 0x000a, 0x33b4: 0x000a, 0x33b5: 0x000a, + 0x33b6: 0x000a, 0x33b7: 0x000a, 0x33b8: 0x000a, 0x33b9: 0x000a, 0x33ba: 0x000a, 0x33bb: 0x000a, + 0x33bc: 0x000a, 0x33bd: 0x000a, 0x33be: 0x000a, 0x33bf: 0x000a, + // Block 0xcf, offset 0x33c0 + 0x33c0: 0x000a, 0x33c1: 0x000a, 0x33c2: 0x000a, 0x33c3: 0x000a, 0x33c4: 0x000a, 0x33c5: 0x000a, + 0x33c6: 0x000a, 0x33c7: 0x000a, 0x33c8: 0x000a, 0x33c9: 0x000a, 0x33ca: 0x000a, 0x33cb: 0x000a, + 0x33cc: 0x000a, 0x33cd: 0x000a, 0x33ce: 0x000a, 0x33cf: 0x000a, 0x33d0: 0x000a, 0x33d1: 0x000a, + 0x33d2: 0x000a, 0x33d3: 0x000a, + 0x33e0: 0x000a, 0x33e1: 0x000a, 0x33e2: 0x000a, 0x33e3: 0x000a, + 0x33e4: 0x000a, 0x33e5: 0x000a, 0x33e6: 0x000a, 0x33e7: 0x000a, 0x33e8: 0x000a, 0x33e9: 0x000a, + 0x33ea: 0x000a, 0x33eb: 0x000a, 0x33ec: 0x000a, 0x33ed: 0x000a, 0x33ee: 0x000a, + 0x33f1: 0x000a, 0x33f2: 0x000a, 0x33f3: 0x000a, 0x33f4: 0x000a, 0x33f5: 0x000a, + 0x33f6: 0x000a, 0x33f7: 0x000a, 0x33f8: 0x000a, 0x33f9: 0x000a, 0x33fa: 0x000a, 0x33fb: 0x000a, + 0x33fc: 0x000a, 0x33fd: 0x000a, 0x33fe: 0x000a, 0x33ff: 0x000a, + // Block 0xd0, offset 0x3400 + 0x3401: 0x000a, 0x3402: 0x000a, 0x3403: 0x000a, 0x3404: 0x000a, 0x3405: 0x000a, + 0x3406: 0x000a, 0x3407: 0x000a, 0x3408: 0x000a, 0x3409: 0x000a, 0x340a: 0x000a, 0x340b: 0x000a, + 0x340c: 0x000a, 0x340d: 0x000a, 0x340e: 0x000a, 0x340f: 0x000a, 0x3411: 0x000a, + 0x3412: 0x000a, 0x3413: 0x000a, 0x3414: 0x000a, 0x3415: 0x000a, 0x3416: 0x000a, 0x3417: 0x000a, + 0x3418: 0x000a, 0x3419: 0x000a, 0x341a: 0x000a, 0x341b: 0x000a, 0x341c: 0x000a, 0x341d: 0x000a, + 0x341e: 0x000a, 0x341f: 0x000a, 0x3420: 0x000a, 0x3421: 0x000a, 0x3422: 0x000a, 0x3423: 0x000a, + 0x3424: 0x000a, 0x3425: 0x000a, 0x3426: 0x000a, 0x3427: 0x000a, 0x3428: 0x000a, 0x3429: 0x000a, + 0x342a: 0x000a, 0x342b: 0x000a, 0x342c: 0x000a, 0x342d: 0x000a, 0x342e: 0x000a, 0x342f: 0x000a, + 0x3430: 0x000a, 0x3431: 0x000a, 0x3432: 0x000a, 0x3433: 0x000a, 0x3434: 0x000a, 0x3435: 0x000a, + // Block 0xd1, offset 0x3440 + 0x3440: 0x0002, 0x3441: 0x0002, 0x3442: 0x0002, 0x3443: 0x0002, 0x3444: 0x0002, 0x3445: 0x0002, + 0x3446: 0x0002, 0x3447: 0x0002, 0x3448: 0x0002, 0x3449: 0x0002, 0x344a: 0x0002, 0x344b: 0x000a, + 0x344c: 0x000a, + // Block 0xd2, offset 0x3480 + 0x34aa: 0x000a, 0x34ab: 0x000a, + // Block 0xd3, offset 0x34c0 + 0x34c0: 0x000a, 0x34c1: 0x000a, 0x34c2: 0x000a, 0x34c3: 0x000a, 0x34c4: 0x000a, 0x34c5: 0x000a, + 0x34c6: 0x000a, 0x34c7: 0x000a, 0x34c8: 0x000a, 0x34c9: 0x000a, 0x34ca: 0x000a, 0x34cb: 0x000a, + 0x34cc: 0x000a, 0x34cd: 0x000a, 0x34ce: 0x000a, 0x34cf: 0x000a, 0x34d0: 0x000a, 0x34d1: 0x000a, + 0x34d2: 0x000a, + 0x34e0: 0x000a, 0x34e1: 0x000a, 0x34e2: 0x000a, 0x34e3: 0x000a, + 0x34e4: 0x000a, 0x34e5: 0x000a, 0x34e6: 0x000a, 0x34e7: 0x000a, 0x34e8: 0x000a, 0x34e9: 0x000a, + 0x34ea: 0x000a, 0x34eb: 0x000a, 0x34ec: 0x000a, + 0x34f0: 0x000a, 0x34f1: 0x000a, 0x34f2: 0x000a, 0x34f3: 0x000a, 0x34f4: 0x000a, 0x34f5: 0x000a, + 0x34f6: 0x000a, + // Block 0xd4, offset 0x3500 + 0x3500: 0x000a, 0x3501: 0x000a, 0x3502: 0x000a, 0x3503: 0x000a, 0x3504: 0x000a, 0x3505: 0x000a, + 0x3506: 0x000a, 0x3507: 0x000a, 0x3508: 0x000a, 0x3509: 0x000a, 0x350a: 0x000a, 0x350b: 0x000a, + 0x350c: 0x000a, 0x350d: 0x000a, 0x350e: 0x000a, 0x350f: 0x000a, 0x3510: 0x000a, 0x3511: 0x000a, + 0x3512: 0x000a, 0x3513: 0x000a, 0x3514: 0x000a, + // Block 0xd5, offset 0x3540 + 0x3540: 0x000a, 0x3541: 0x000a, 0x3542: 0x000a, 0x3543: 0x000a, 0x3544: 0x000a, 0x3545: 0x000a, + 0x3546: 0x000a, 0x3547: 0x000a, 0x3548: 0x000a, 0x3549: 0x000a, 0x354a: 0x000a, 0x354b: 0x000a, + 0x3550: 0x000a, 0x3551: 0x000a, + 0x3552: 0x000a, 0x3553: 0x000a, 0x3554: 0x000a, 0x3555: 0x000a, 0x3556: 0x000a, 0x3557: 0x000a, + 0x3558: 0x000a, 0x3559: 0x000a, 0x355a: 0x000a, 0x355b: 0x000a, 0x355c: 0x000a, 0x355d: 0x000a, + 0x355e: 0x000a, 0x355f: 0x000a, 0x3560: 0x000a, 0x3561: 0x000a, 0x3562: 0x000a, 0x3563: 0x000a, + 0x3564: 0x000a, 0x3565: 0x000a, 0x3566: 0x000a, 0x3567: 0x000a, 0x3568: 0x000a, 0x3569: 0x000a, + 0x356a: 0x000a, 0x356b: 0x000a, 0x356c: 0x000a, 0x356d: 0x000a, 0x356e: 0x000a, 0x356f: 0x000a, + 0x3570: 0x000a, 0x3571: 0x000a, 0x3572: 0x000a, 0x3573: 0x000a, 0x3574: 0x000a, 0x3575: 0x000a, + 0x3576: 0x000a, 0x3577: 0x000a, 0x3578: 0x000a, 0x3579: 0x000a, 0x357a: 0x000a, 0x357b: 0x000a, + 0x357c: 0x000a, 0x357d: 0x000a, 0x357e: 0x000a, 0x357f: 0x000a, + // Block 0xd6, offset 0x3580 + 0x3580: 0x000a, 0x3581: 0x000a, 0x3582: 0x000a, 0x3583: 0x000a, 0x3584: 0x000a, 0x3585: 0x000a, + 0x3586: 0x000a, 0x3587: 0x000a, + 0x3590: 0x000a, 0x3591: 0x000a, + 0x3592: 0x000a, 0x3593: 0x000a, 0x3594: 0x000a, 0x3595: 0x000a, 0x3596: 0x000a, 0x3597: 0x000a, + 0x3598: 0x000a, 0x3599: 0x000a, + 0x35a0: 0x000a, 0x35a1: 0x000a, 0x35a2: 0x000a, 0x35a3: 0x000a, + 0x35a4: 0x000a, 0x35a5: 0x000a, 0x35a6: 0x000a, 0x35a7: 0x000a, 0x35a8: 0x000a, 0x35a9: 0x000a, + 0x35aa: 0x000a, 0x35ab: 0x000a, 0x35ac: 0x000a, 0x35ad: 0x000a, 0x35ae: 0x000a, 0x35af: 0x000a, + 0x35b0: 0x000a, 0x35b1: 0x000a, 0x35b2: 0x000a, 0x35b3: 0x000a, 0x35b4: 0x000a, 0x35b5: 0x000a, + 0x35b6: 0x000a, 0x35b7: 0x000a, 0x35b8: 0x000a, 0x35b9: 0x000a, 0x35ba: 0x000a, 0x35bb: 0x000a, + 0x35bc: 0x000a, 0x35bd: 0x000a, 0x35be: 0x000a, 0x35bf: 0x000a, + // Block 0xd7, offset 0x35c0 + 0x35c0: 0x000a, 0x35c1: 0x000a, 0x35c2: 0x000a, 0x35c3: 0x000a, 0x35c4: 0x000a, 0x35c5: 0x000a, + 0x35c6: 0x000a, 0x35c7: 0x000a, + 0x35d0: 0x000a, 0x35d1: 0x000a, + 0x35d2: 0x000a, 0x35d3: 0x000a, 0x35d4: 0x000a, 0x35d5: 0x000a, 0x35d6: 0x000a, 0x35d7: 0x000a, + 0x35d8: 0x000a, 0x35d9: 0x000a, 0x35da: 0x000a, 0x35db: 0x000a, 0x35dc: 0x000a, 0x35dd: 0x000a, + 0x35de: 0x000a, 0x35df: 0x000a, 0x35e0: 0x000a, 0x35e1: 0x000a, 0x35e2: 0x000a, 0x35e3: 0x000a, + 0x35e4: 0x000a, 0x35e5: 0x000a, 0x35e6: 0x000a, 0x35e7: 0x000a, 0x35e8: 0x000a, 0x35e9: 0x000a, + 0x35ea: 0x000a, 0x35eb: 0x000a, 0x35ec: 0x000a, 0x35ed: 0x000a, + // Block 0xd8, offset 0x3600 + 0x3610: 0x000a, 0x3611: 0x000a, + 0x3612: 0x000a, 0x3613: 0x000a, 0x3614: 0x000a, 0x3615: 0x000a, 0x3616: 0x000a, 0x3617: 0x000a, + 0x3618: 0x000a, 0x3619: 0x000a, 0x361a: 0x000a, 0x361b: 0x000a, 0x361c: 0x000a, 0x361d: 0x000a, + 0x361e: 0x000a, 0x3620: 0x000a, 0x3621: 0x000a, 0x3622: 0x000a, 0x3623: 0x000a, + 0x3624: 0x000a, 0x3625: 0x000a, 0x3626: 0x000a, 0x3627: 0x000a, + 0x3630: 0x000a, 0x3633: 0x000a, 0x3634: 0x000a, 0x3635: 0x000a, + 0x3636: 0x000a, 0x3637: 0x000a, 0x3638: 0x000a, 0x3639: 0x000a, 0x363a: 0x000a, 0x363b: 0x000a, + 0x363c: 0x000a, 0x363d: 0x000a, 0x363e: 0x000a, + // Block 0xd9, offset 0x3640 + 0x3640: 0x000a, 0x3641: 0x000a, 0x3642: 0x000a, 0x3643: 0x000a, 0x3644: 0x000a, 0x3645: 0x000a, + 0x3646: 0x000a, 0x3647: 0x000a, 0x3648: 0x000a, 0x3649: 0x000a, 0x364a: 0x000a, 0x364b: 0x000a, + 0x3650: 0x000a, 0x3651: 0x000a, + 0x3652: 0x000a, 0x3653: 0x000a, 0x3654: 0x000a, 0x3655: 0x000a, 0x3656: 0x000a, 0x3657: 0x000a, + 0x3658: 0x000a, 0x3659: 0x000a, 0x365a: 0x000a, 0x365b: 0x000a, 0x365c: 0x000a, 0x365d: 0x000a, + 0x365e: 0x000a, + // Block 0xda, offset 0x3680 + 0x3680: 0x000a, 0x3681: 0x000a, 0x3682: 0x000a, 0x3683: 0x000a, 0x3684: 0x000a, 0x3685: 0x000a, + 0x3686: 0x000a, 0x3687: 0x000a, 0x3688: 0x000a, 0x3689: 0x000a, 0x368a: 0x000a, 0x368b: 0x000a, + 0x368c: 0x000a, 0x368d: 0x000a, 0x368e: 0x000a, 0x368f: 0x000a, 0x3690: 0x000a, 0x3691: 0x000a, + // Block 0xdb, offset 0x36c0 + 0x36fe: 0x000b, 0x36ff: 0x000b, + // Block 0xdc, offset 0x3700 + 0x3700: 0x000b, 0x3701: 0x000b, 0x3702: 0x000b, 0x3703: 0x000b, 0x3704: 0x000b, 0x3705: 0x000b, + 0x3706: 0x000b, 0x3707: 0x000b, 0x3708: 0x000b, 0x3709: 0x000b, 0x370a: 0x000b, 0x370b: 0x000b, + 0x370c: 0x000b, 0x370d: 0x000b, 0x370e: 0x000b, 0x370f: 0x000b, 0x3710: 0x000b, 0x3711: 0x000b, + 0x3712: 0x000b, 0x3713: 0x000b, 0x3714: 0x000b, 0x3715: 0x000b, 0x3716: 0x000b, 0x3717: 0x000b, + 0x3718: 0x000b, 0x3719: 0x000b, 0x371a: 0x000b, 0x371b: 0x000b, 0x371c: 0x000b, 0x371d: 0x000b, + 0x371e: 0x000b, 0x371f: 0x000b, 0x3720: 0x000b, 0x3721: 0x000b, 0x3722: 0x000b, 0x3723: 0x000b, + 0x3724: 0x000b, 0x3725: 0x000b, 0x3726: 0x000b, 0x3727: 0x000b, 0x3728: 0x000b, 0x3729: 0x000b, + 0x372a: 0x000b, 0x372b: 0x000b, 0x372c: 0x000b, 0x372d: 0x000b, 0x372e: 0x000b, 0x372f: 0x000b, + 0x3730: 0x000b, 0x3731: 0x000b, 0x3732: 0x000b, 0x3733: 0x000b, 0x3734: 0x000b, 0x3735: 0x000b, + 0x3736: 0x000b, 0x3737: 0x000b, 0x3738: 0x000b, 0x3739: 0x000b, 0x373a: 0x000b, 0x373b: 0x000b, + 0x373c: 0x000b, 0x373d: 0x000b, 0x373e: 0x000b, 0x373f: 0x000b, + // Block 0xdd, offset 0x3740 + 0x3740: 0x000c, 0x3741: 0x000c, 0x3742: 0x000c, 0x3743: 0x000c, 0x3744: 0x000c, 0x3745: 0x000c, + 0x3746: 0x000c, 0x3747: 0x000c, 0x3748: 0x000c, 0x3749: 0x000c, 0x374a: 0x000c, 0x374b: 0x000c, + 0x374c: 0x000c, 0x374d: 0x000c, 0x374e: 0x000c, 0x374f: 0x000c, 0x3750: 0x000c, 0x3751: 0x000c, + 0x3752: 0x000c, 0x3753: 0x000c, 0x3754: 0x000c, 0x3755: 0x000c, 0x3756: 0x000c, 0x3757: 0x000c, + 0x3758: 0x000c, 0x3759: 0x000c, 0x375a: 0x000c, 0x375b: 0x000c, 0x375c: 0x000c, 0x375d: 0x000c, + 0x375e: 0x000c, 0x375f: 0x000c, 0x3760: 0x000c, 0x3761: 0x000c, 0x3762: 0x000c, 0x3763: 0x000c, + 0x3764: 0x000c, 0x3765: 0x000c, 0x3766: 0x000c, 0x3767: 0x000c, 0x3768: 0x000c, 0x3769: 0x000c, + 0x376a: 0x000c, 0x376b: 0x000c, 0x376c: 0x000c, 0x376d: 0x000c, 0x376e: 0x000c, 0x376f: 0x000c, + 0x3770: 0x000b, 0x3771: 0x000b, 0x3772: 0x000b, 0x3773: 0x000b, 0x3774: 0x000b, 0x3775: 0x000b, + 0x3776: 0x000b, 0x3777: 0x000b, 0x3778: 0x000b, 0x3779: 0x000b, 0x377a: 0x000b, 0x377b: 0x000b, + 0x377c: 0x000b, 0x377d: 0x000b, 0x377e: 0x000b, 0x377f: 0x000b, +} + +// bidiIndex: 24 blocks, 1536 entries, 1536 bytes +// Block 0 is the zero block. +var bidiIndex = [1536]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, + 0xca: 0x03, 0xcb: 0x04, 0xcc: 0x05, 0xcd: 0x06, 0xce: 0x07, 0xcf: 0x08, + 0xd2: 0x09, 0xd6: 0x0a, 0xd7: 0x0b, + 0xd8: 0x0c, 0xd9: 0x0d, 0xda: 0x0e, 0xdb: 0x0f, 0xdc: 0x10, 0xdd: 0x11, 0xde: 0x12, 0xdf: 0x13, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, + 0xea: 0x07, 0xef: 0x08, + 0xf0: 0x11, 0xf1: 0x12, 0xf2: 0x12, 0xf3: 0x14, 0xf4: 0x15, + // Block 0x4, offset 0x100 + 0x120: 0x14, 0x121: 0x15, 0x122: 0x16, 0x123: 0x17, 0x124: 0x18, 0x125: 0x19, 0x126: 0x1a, 0x127: 0x1b, + 0x128: 0x1c, 0x129: 0x1d, 0x12a: 0x1c, 0x12b: 0x1e, 0x12c: 0x1f, 0x12d: 0x20, 0x12e: 0x21, 0x12f: 0x22, + 0x130: 0x23, 0x131: 0x24, 0x132: 0x1a, 0x133: 0x25, 0x134: 0x26, 0x135: 0x27, 0x137: 0x28, + 0x138: 0x29, 0x139: 0x2a, 0x13a: 0x2b, 0x13b: 0x2c, 0x13c: 0x2d, 0x13d: 0x2e, 0x13e: 0x2f, 0x13f: 0x30, + // Block 0x5, offset 0x140 + 0x140: 0x31, 0x141: 0x32, 0x142: 0x33, + 0x14d: 0x34, 0x14e: 0x35, + 0x150: 0x36, + 0x15a: 0x37, 0x15c: 0x38, 0x15d: 0x39, 0x15e: 0x3a, 0x15f: 0x3b, + 0x160: 0x3c, 0x162: 0x3d, 0x164: 0x3e, 0x165: 0x3f, 0x167: 0x40, + 0x168: 0x41, 0x169: 0x42, 0x16a: 0x43, 0x16c: 0x44, 0x16d: 0x45, 0x16e: 0x46, 0x16f: 0x47, + 0x170: 0x48, 0x173: 0x49, 0x177: 0x4a, + 0x17e: 0x4b, 0x17f: 0x4c, + // Block 0x6, offset 0x180 + 0x180: 0x4d, 0x181: 0x4e, 0x182: 0x4f, 0x183: 0x50, 0x184: 0x51, 0x185: 0x52, 0x186: 0x53, 0x187: 0x54, + 0x188: 0x55, 0x189: 0x54, 0x18a: 0x54, 0x18b: 0x54, 0x18c: 0x56, 0x18d: 0x57, 0x18e: 0x58, 0x18f: 0x59, + 0x190: 0x5a, 0x191: 0x5b, 0x192: 0x5c, 0x193: 0x5d, 0x194: 0x54, 0x195: 0x54, 0x196: 0x54, 0x197: 0x54, + 0x198: 0x54, 0x199: 0x54, 0x19a: 0x5e, 0x19b: 0x54, 0x19c: 0x54, 0x19d: 0x5f, 0x19e: 0x54, 0x19f: 0x60, + 0x1a4: 0x54, 0x1a5: 0x54, 0x1a6: 0x61, 0x1a7: 0x62, + 0x1a8: 0x54, 0x1a9: 0x54, 0x1aa: 0x54, 0x1ab: 0x54, 0x1ac: 0x54, 0x1ad: 0x63, 0x1ae: 0x64, 0x1af: 0x65, + 0x1b3: 0x66, 0x1b5: 0x67, 0x1b7: 0x68, + 0x1b8: 0x69, 0x1b9: 0x6a, 0x1ba: 0x6b, 0x1bb: 0x6c, 0x1bc: 0x54, 0x1bd: 0x54, 0x1be: 0x54, 0x1bf: 0x6d, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x6e, 0x1c2: 0x6f, 0x1c3: 0x70, 0x1c7: 0x71, + 0x1c8: 0x72, 0x1c9: 0x73, 0x1ca: 0x74, 0x1cb: 0x75, 0x1cd: 0x76, 0x1cf: 0x77, + // Block 0x8, offset 0x200 + 0x237: 0x54, + // Block 0x9, offset 0x240 + 0x252: 0x78, 0x253: 0x79, + 0x258: 0x7a, 0x259: 0x7b, 0x25a: 0x7c, 0x25b: 0x7d, 0x25c: 0x7e, 0x25e: 0x7f, + 0x260: 0x80, 0x261: 0x81, 0x263: 0x82, 0x264: 0x83, 0x265: 0x84, 0x266: 0x85, 0x267: 0x86, + 0x268: 0x87, 0x269: 0x88, 0x26a: 0x89, 0x26b: 0x8a, 0x26f: 0x8b, + // Block 0xa, offset 0x280 + 0x2ac: 0x8c, 0x2ad: 0x8d, 0x2ae: 0x0e, 0x2af: 0x0e, + 0x2b0: 0x0e, 0x2b1: 0x0e, 0x2b2: 0x0e, 0x2b3: 0x0e, 0x2b4: 0x8e, 0x2b5: 0x0e, 0x2b6: 0x0e, 0x2b7: 0x8f, + 0x2b8: 0x90, 0x2b9: 0x91, 0x2ba: 0x0e, 0x2bb: 0x92, 0x2bc: 0x93, 0x2bd: 0x94, 0x2bf: 0x95, + // Block 0xb, offset 0x2c0 + 0x2c4: 0x96, 0x2c5: 0x54, 0x2c6: 0x97, 0x2c7: 0x98, + 0x2cb: 0x99, 0x2cd: 0x9a, + 0x2e0: 0x9b, 0x2e1: 0x9b, 0x2e2: 0x9b, 0x2e3: 0x9b, 0x2e4: 0x9c, 0x2e5: 0x9b, 0x2e6: 0x9b, 0x2e7: 0x9b, + 0x2e8: 0x9d, 0x2e9: 0x9b, 0x2ea: 0x9b, 0x2eb: 0x9e, 0x2ec: 0x9f, 0x2ed: 0x9b, 0x2ee: 0x9b, 0x2ef: 0x9b, + 0x2f0: 0x9b, 0x2f1: 0x9b, 0x2f2: 0x9b, 0x2f3: 0x9b, 0x2f4: 0x9b, 0x2f5: 0x9b, 0x2f6: 0x9b, 0x2f7: 0x9b, + 0x2f8: 0x9b, 0x2f9: 0xa0, 0x2fa: 0x9b, 0x2fb: 0x9b, 0x2fc: 0x9b, 0x2fd: 0x9b, 0x2fe: 0x9b, 0x2ff: 0x9b, + // Block 0xc, offset 0x300 + 0x300: 0xa1, 0x301: 0xa2, 0x302: 0xa3, 0x304: 0xa4, 0x305: 0xa5, 0x306: 0xa6, 0x307: 0xa7, + 0x308: 0xa8, 0x30b: 0xa9, 0x30c: 0xaa, 0x30d: 0xab, + 0x310: 0xac, 0x311: 0xad, 0x312: 0xae, 0x313: 0xaf, 0x316: 0xb0, 0x317: 0xb1, + 0x318: 0xb2, 0x319: 0xb3, 0x31a: 0xb4, 0x31c: 0xb5, + 0x330: 0xb6, 0x332: 0xb7, + // Block 0xd, offset 0x340 + 0x36b: 0xb8, 0x36c: 0xb9, + 0x37e: 0xba, + // Block 0xe, offset 0x380 + 0x3b2: 0xbb, + // Block 0xf, offset 0x3c0 + 0x3c5: 0xbc, 0x3c6: 0xbd, + 0x3c8: 0x54, 0x3c9: 0xbe, 0x3cc: 0x54, 0x3cd: 0xbf, + 0x3db: 0xc0, 0x3dc: 0xc1, 0x3dd: 0xc2, 0x3de: 0xc3, 0x3df: 0xc4, + 0x3e8: 0xc5, 0x3e9: 0xc6, 0x3ea: 0xc7, + // Block 0x10, offset 0x400 + 0x400: 0xc8, + 0x420: 0x9b, 0x421: 0x9b, 0x422: 0x9b, 0x423: 0xc9, 0x424: 0x9b, 0x425: 0xca, 0x426: 0x9b, 0x427: 0x9b, + 0x428: 0x9b, 0x429: 0x9b, 0x42a: 0x9b, 0x42b: 0x9b, 0x42c: 0x9b, 0x42d: 0x9b, 0x42e: 0x9b, 0x42f: 0x9b, + 0x430: 0x9b, 0x431: 0x9b, 0x432: 0x9b, 0x433: 0x9b, 0x434: 0x9b, 0x435: 0x9b, 0x436: 0x9b, 0x437: 0x9b, + 0x438: 0x0e, 0x439: 0x0e, 0x43a: 0x0e, 0x43b: 0xcb, 0x43c: 0x9b, 0x43d: 0x9b, 0x43e: 0x9b, 0x43f: 0x9b, + // Block 0x11, offset 0x440 + 0x440: 0xcc, 0x441: 0x54, 0x442: 0xcd, 0x443: 0xce, 0x444: 0xcf, 0x445: 0xd0, + 0x44c: 0x54, 0x44d: 0x54, 0x44e: 0x54, 0x44f: 0x54, + 0x450: 0x54, 0x451: 0x54, 0x452: 0x54, 0x453: 0x54, 0x454: 0x54, 0x455: 0x54, 0x456: 0x54, 0x457: 0x54, + 0x458: 0x54, 0x459: 0x54, 0x45a: 0x54, 0x45b: 0xd1, 0x45c: 0x54, 0x45d: 0x6c, 0x45e: 0x54, 0x45f: 0xd2, + 0x460: 0xd3, 0x461: 0xd4, 0x462: 0xd5, 0x464: 0xd6, 0x465: 0xd7, 0x466: 0xd8, 0x467: 0x36, + 0x47f: 0xd9, + // Block 0x12, offset 0x480 + 0x4bf: 0xd9, + // Block 0x13, offset 0x4c0 + 0x4d0: 0x09, 0x4d1: 0x0a, 0x4d6: 0x0b, + 0x4db: 0x0c, 0x4dd: 0x0d, 0x4de: 0x0e, 0x4df: 0x0f, + 0x4ef: 0x10, + 0x4ff: 0x10, + // Block 0x14, offset 0x500 + 0x50f: 0x10, + 0x51f: 0x10, + 0x52f: 0x10, + 0x53f: 0x10, + // Block 0x15, offset 0x540 + 0x540: 0xda, 0x541: 0xda, 0x542: 0xda, 0x543: 0xda, 0x544: 0x05, 0x545: 0x05, 0x546: 0x05, 0x547: 0xdb, + 0x548: 0xda, 0x549: 0xda, 0x54a: 0xda, 0x54b: 0xda, 0x54c: 0xda, 0x54d: 0xda, 0x54e: 0xda, 0x54f: 0xda, + 0x550: 0xda, 0x551: 0xda, 0x552: 0xda, 0x553: 0xda, 0x554: 0xda, 0x555: 0xda, 0x556: 0xda, 0x557: 0xda, + 0x558: 0xda, 0x559: 0xda, 0x55a: 0xda, 0x55b: 0xda, 0x55c: 0xda, 0x55d: 0xda, 0x55e: 0xda, 0x55f: 0xda, + 0x560: 0xda, 0x561: 0xda, 0x562: 0xda, 0x563: 0xda, 0x564: 0xda, 0x565: 0xda, 0x566: 0xda, 0x567: 0xda, + 0x568: 0xda, 0x569: 0xda, 0x56a: 0xda, 0x56b: 0xda, 0x56c: 0xda, 0x56d: 0xda, 0x56e: 0xda, 0x56f: 0xda, + 0x570: 0xda, 0x571: 0xda, 0x572: 0xda, 0x573: 0xda, 0x574: 0xda, 0x575: 0xda, 0x576: 0xda, 0x577: 0xda, + 0x578: 0xda, 0x579: 0xda, 0x57a: 0xda, 0x57b: 0xda, 0x57c: 0xda, 0x57d: 0xda, 0x57e: 0xda, 0x57f: 0xda, + // Block 0x16, offset 0x580 + 0x58f: 0x10, + 0x59f: 0x10, + 0x5a0: 0x13, + 0x5af: 0x10, + 0x5bf: 0x10, + // Block 0x17, offset 0x5c0 + 0x5cf: 0x10, +} + +// Total table size 15800 bytes (15KiB); checksum: F50EF68C diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/bidi/trieval.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/bidi/trieval.go new file mode 100644 index 00000000..4c459c4b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/bidi/trieval.go @@ -0,0 +1,60 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package bidi + +// Class is the Unicode BiDi class. Each rune has a single class. +type Class uint + +const ( + L Class = iota // LeftToRight + R // RightToLeft + EN // EuropeanNumber + ES // EuropeanSeparator + ET // EuropeanTerminator + AN // ArabicNumber + CS // CommonSeparator + B // ParagraphSeparator + S // SegmentSeparator + WS // WhiteSpace + ON // OtherNeutral + BN // BoundaryNeutral + NSM // NonspacingMark + AL // ArabicLetter + Control // Control LRO - PDI + + numClass + + LRO // LeftToRightOverride + RLO // RightToLeftOverride + LRE // LeftToRightEmbedding + RLE // RightToLeftEmbedding + PDF // PopDirectionalFormat + LRI // LeftToRightIsolate + RLI // RightToLeftIsolate + FSI // FirstStrongIsolate + PDI // PopDirectionalIsolate + + unknownClass = ^Class(0) +) + +var controlToClass = map[rune]Class{ + 0x202D: LRO, // LeftToRightOverride, + 0x202E: RLO, // RightToLeftOverride, + 0x202A: LRE, // LeftToRightEmbedding, + 0x202B: RLE, // RightToLeftEmbedding, + 0x202C: PDF, // PopDirectionalFormat, + 0x2066: LRI, // LeftToRightIsolate, + 0x2067: RLI, // RightToLeftIsolate, + 0x2068: FSI, // FirstStrongIsolate, + 0x2069: PDI, // PopDirectionalIsolate, +} + +// A trie entry has the following bits: +// 7..5 XOR mask for brackets +// 4 1: Bracket open, 0: Bracket close +// 3..0 Class type + +const ( + openMask = 0x10 + xorMaskShift = 5 +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/norm/composition.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/norm/composition.go new file mode 100644 index 00000000..bab4c5de --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/norm/composition.go @@ -0,0 +1,508 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package norm + +import "unicode/utf8" + +const ( + maxNonStarters = 30 + // The maximum number of characters needed for a buffer is + // maxNonStarters + 1 for the starter + 1 for the GCJ + maxBufferSize = maxNonStarters + 2 + maxNFCExpansion = 3 // NFC(0x1D160) + maxNFKCExpansion = 18 // NFKC(0xFDFA) + + maxByteBufferSize = utf8.UTFMax * maxBufferSize // 128 +) + +// ssState is used for reporting the segment state after inserting a rune. +// It is returned by streamSafe.next. +type ssState int + +const ( + // Indicates a rune was successfully added to the segment. + ssSuccess ssState = iota + // Indicates a rune starts a new segment and should not be added. + ssStarter + // Indicates a rune caused a segment overflow and a CGJ should be inserted. + ssOverflow +) + +// streamSafe implements the policy of when a CGJ should be inserted. +type streamSafe uint8 + +// first inserts the first rune of a segment. It is a faster version of next if +// it is known p represents the first rune in a segment. +func (ss *streamSafe) first(p Properties) { + *ss = streamSafe(p.nTrailingNonStarters()) +} + +// insert returns a ssState value to indicate whether a rune represented by p +// can be inserted. +func (ss *streamSafe) next(p Properties) ssState { + if *ss > maxNonStarters { + panic("streamSafe was not reset") + } + n := p.nLeadingNonStarters() + if *ss += streamSafe(n); *ss > maxNonStarters { + *ss = 0 + return ssOverflow + } + // The Stream-Safe Text Processing prescribes that the counting can stop + // as soon as a starter is encountered. However, there are some starters, + // like Jamo V and T, that can combine with other runes, leaving their + // successive non-starters appended to the previous, possibly causing an + // overflow. We will therefore consider any rune with a non-zero nLead to + // be a non-starter. Note that it always hold that if nLead > 0 then + // nLead == nTrail. + if n == 0 { + *ss = streamSafe(p.nTrailingNonStarters()) + return ssStarter + } + return ssSuccess +} + +// backwards is used for checking for overflow and segment starts +// when traversing a string backwards. Users do not need to call first +// for the first rune. The state of the streamSafe retains the count of +// the non-starters loaded. +func (ss *streamSafe) backwards(p Properties) ssState { + if *ss > maxNonStarters { + panic("streamSafe was not reset") + } + c := *ss + streamSafe(p.nTrailingNonStarters()) + if c > maxNonStarters { + return ssOverflow + } + *ss = c + if p.nLeadingNonStarters() == 0 { + return ssStarter + } + return ssSuccess +} + +func (ss streamSafe) isMax() bool { + return ss == maxNonStarters +} + +// GraphemeJoiner is inserted after maxNonStarters non-starter runes. +const GraphemeJoiner = "\u034F" + +// reorderBuffer is used to normalize a single segment. Characters inserted with +// insert are decomposed and reordered based on CCC. The compose method can +// be used to recombine characters. Note that the byte buffer does not hold +// the UTF-8 characters in order. Only the rune array is maintained in sorted +// order. flush writes the resulting segment to a byte array. +type reorderBuffer struct { + rune [maxBufferSize]Properties // Per character info. + byte [maxByteBufferSize]byte // UTF-8 buffer. Referenced by runeInfo.pos. + nbyte uint8 // Number or bytes. + ss streamSafe // For limiting length of non-starter sequence. + nrune int // Number of runeInfos. + f formInfo + + src input + nsrc int + tmpBytes input + + out []byte + flushF func(*reorderBuffer) bool +} + +func (rb *reorderBuffer) init(f Form, src []byte) { + rb.f = *formTable[f] + rb.src.setBytes(src) + rb.nsrc = len(src) + rb.ss = 0 +} + +func (rb *reorderBuffer) initString(f Form, src string) { + rb.f = *formTable[f] + rb.src.setString(src) + rb.nsrc = len(src) + rb.ss = 0 +} + +func (rb *reorderBuffer) setFlusher(out []byte, f func(*reorderBuffer) bool) { + rb.out = out + rb.flushF = f +} + +// reset discards all characters from the buffer. +func (rb *reorderBuffer) reset() { + rb.nrune = 0 + rb.nbyte = 0 +} + +func (rb *reorderBuffer) doFlush() bool { + if rb.f.composing { + rb.compose() + } + res := rb.flushF(rb) + rb.reset() + return res +} + +// appendFlush appends the normalized segment to rb.out. +func appendFlush(rb *reorderBuffer) bool { + for i := 0; i < rb.nrune; i++ { + start := rb.rune[i].pos + end := start + rb.rune[i].size + rb.out = append(rb.out, rb.byte[start:end]...) + } + return true +} + +// flush appends the normalized segment to out and resets rb. +func (rb *reorderBuffer) flush(out []byte) []byte { + for i := 0; i < rb.nrune; i++ { + start := rb.rune[i].pos + end := start + rb.rune[i].size + out = append(out, rb.byte[start:end]...) + } + rb.reset() + return out +} + +// flushCopy copies the normalized segment to buf and resets rb. +// It returns the number of bytes written to buf. +func (rb *reorderBuffer) flushCopy(buf []byte) int { + p := 0 + for i := 0; i < rb.nrune; i++ { + runep := rb.rune[i] + p += copy(buf[p:], rb.byte[runep.pos:runep.pos+runep.size]) + } + rb.reset() + return p +} + +// insertOrdered inserts a rune in the buffer, ordered by Canonical Combining Class. +// It returns false if the buffer is not large enough to hold the rune. +// It is used internally by insert and insertString only. +func (rb *reorderBuffer) insertOrdered(info Properties) { + n := rb.nrune + b := rb.rune[:] + cc := info.ccc + if cc > 0 { + // Find insertion position + move elements to make room. + for ; n > 0; n-- { + if b[n-1].ccc <= cc { + break + } + b[n] = b[n-1] + } + } + rb.nrune += 1 + pos := uint8(rb.nbyte) + rb.nbyte += utf8.UTFMax + info.pos = pos + b[n] = info +} + +// insertErr is an error code returned by insert. Using this type instead +// of error improves performance up to 20% for many of the benchmarks. +type insertErr int + +const ( + iSuccess insertErr = -iota + iShortDst + iShortSrc +) + +// insertFlush inserts the given rune in the buffer ordered by CCC. +// If a decomposition with multiple segments are encountered, they leading +// ones are flushed. +// It returns a non-zero error code if the rune was not inserted. +func (rb *reorderBuffer) insertFlush(src input, i int, info Properties) insertErr { + if rune := src.hangul(i); rune != 0 { + rb.decomposeHangul(rune) + return iSuccess + } + if info.hasDecomposition() { + return rb.insertDecomposed(info.Decomposition()) + } + rb.insertSingle(src, i, info) + return iSuccess +} + +// insertUnsafe inserts the given rune in the buffer ordered by CCC. +// It is assumed there is sufficient space to hold the runes. It is the +// responsibility of the caller to ensure this. This can be done by checking +// the state returned by the streamSafe type. +func (rb *reorderBuffer) insertUnsafe(src input, i int, info Properties) { + if rune := src.hangul(i); rune != 0 { + rb.decomposeHangul(rune) + } + if info.hasDecomposition() { + // TODO: inline. + rb.insertDecomposed(info.Decomposition()) + } else { + rb.insertSingle(src, i, info) + } +} + +// insertDecomposed inserts an entry in to the reorderBuffer for each rune +// in dcomp. dcomp must be a sequence of decomposed UTF-8-encoded runes. +// It flushes the buffer on each new segment start. +func (rb *reorderBuffer) insertDecomposed(dcomp []byte) insertErr { + rb.tmpBytes.setBytes(dcomp) + // As the streamSafe accounting already handles the counting for modifiers, + // we don't have to call next. However, we do need to keep the accounting + // intact when flushing the buffer. + for i := 0; i < len(dcomp); { + info := rb.f.info(rb.tmpBytes, i) + if info.BoundaryBefore() && rb.nrune > 0 && !rb.doFlush() { + return iShortDst + } + i += copy(rb.byte[rb.nbyte:], dcomp[i:i+int(info.size)]) + rb.insertOrdered(info) + } + return iSuccess +} + +// insertSingle inserts an entry in the reorderBuffer for the rune at +// position i. info is the runeInfo for the rune at position i. +func (rb *reorderBuffer) insertSingle(src input, i int, info Properties) { + src.copySlice(rb.byte[rb.nbyte:], i, i+int(info.size)) + rb.insertOrdered(info) +} + +// insertCGJ inserts a Combining Grapheme Joiner (0x034f) into rb. +func (rb *reorderBuffer) insertCGJ() { + rb.insertSingle(input{str: GraphemeJoiner}, 0, Properties{size: uint8(len(GraphemeJoiner))}) +} + +// appendRune inserts a rune at the end of the buffer. It is used for Hangul. +func (rb *reorderBuffer) appendRune(r rune) { + bn := rb.nbyte + sz := utf8.EncodeRune(rb.byte[bn:], rune(r)) + rb.nbyte += utf8.UTFMax + rb.rune[rb.nrune] = Properties{pos: bn, size: uint8(sz)} + rb.nrune++ +} + +// assignRune sets a rune at position pos. It is used for Hangul and recomposition. +func (rb *reorderBuffer) assignRune(pos int, r rune) { + bn := rb.rune[pos].pos + sz := utf8.EncodeRune(rb.byte[bn:], rune(r)) + rb.rune[pos] = Properties{pos: bn, size: uint8(sz)} +} + +// runeAt returns the rune at position n. It is used for Hangul and recomposition. +func (rb *reorderBuffer) runeAt(n int) rune { + inf := rb.rune[n] + r, _ := utf8.DecodeRune(rb.byte[inf.pos : inf.pos+inf.size]) + return r +} + +// bytesAt returns the UTF-8 encoding of the rune at position n. +// It is used for Hangul and recomposition. +func (rb *reorderBuffer) bytesAt(n int) []byte { + inf := rb.rune[n] + return rb.byte[inf.pos : int(inf.pos)+int(inf.size)] +} + +// For Hangul we combine algorithmically, instead of using tables. +const ( + hangulBase = 0xAC00 // UTF-8(hangulBase) -> EA B0 80 + hangulBase0 = 0xEA + hangulBase1 = 0xB0 + hangulBase2 = 0x80 + + hangulEnd = hangulBase + jamoLVTCount // UTF-8(0xD7A4) -> ED 9E A4 + hangulEnd0 = 0xED + hangulEnd1 = 0x9E + hangulEnd2 = 0xA4 + + jamoLBase = 0x1100 // UTF-8(jamoLBase) -> E1 84 00 + jamoLBase0 = 0xE1 + jamoLBase1 = 0x84 + jamoLEnd = 0x1113 + jamoVBase = 0x1161 + jamoVEnd = 0x1176 + jamoTBase = 0x11A7 + jamoTEnd = 0x11C3 + + jamoTCount = 28 + jamoVCount = 21 + jamoVTCount = 21 * 28 + jamoLVTCount = 19 * 21 * 28 +) + +const hangulUTF8Size = 3 + +func isHangul(b []byte) bool { + if len(b) < hangulUTF8Size { + return false + } + b0 := b[0] + if b0 < hangulBase0 { + return false + } + b1 := b[1] + switch { + case b0 == hangulBase0: + return b1 >= hangulBase1 + case b0 < hangulEnd0: + return true + case b0 > hangulEnd0: + return false + case b1 < hangulEnd1: + return true + } + return b1 == hangulEnd1 && b[2] < hangulEnd2 +} + +func isHangulString(b string) bool { + if len(b) < hangulUTF8Size { + return false + } + b0 := b[0] + if b0 < hangulBase0 { + return false + } + b1 := b[1] + switch { + case b0 == hangulBase0: + return b1 >= hangulBase1 + case b0 < hangulEnd0: + return true + case b0 > hangulEnd0: + return false + case b1 < hangulEnd1: + return true + } + return b1 == hangulEnd1 && b[2] < hangulEnd2 +} + +// Caller must ensure len(b) >= 2. +func isJamoVT(b []byte) bool { + // True if (rune & 0xff00) == jamoLBase + return b[0] == jamoLBase0 && (b[1]&0xFC) == jamoLBase1 +} + +func isHangulWithoutJamoT(b []byte) bool { + c, _ := utf8.DecodeRune(b) + c -= hangulBase + return c < jamoLVTCount && c%jamoTCount == 0 +} + +// decomposeHangul writes the decomposed Hangul to buf and returns the number +// of bytes written. len(buf) should be at least 9. +func decomposeHangul(buf []byte, r rune) int { + const JamoUTF8Len = 3 + r -= hangulBase + x := r % jamoTCount + r /= jamoTCount + utf8.EncodeRune(buf, jamoLBase+r/jamoVCount) + utf8.EncodeRune(buf[JamoUTF8Len:], jamoVBase+r%jamoVCount) + if x != 0 { + utf8.EncodeRune(buf[2*JamoUTF8Len:], jamoTBase+x) + return 3 * JamoUTF8Len + } + return 2 * JamoUTF8Len +} + +// decomposeHangul algorithmically decomposes a Hangul rune into +// its Jamo components. +// See http://unicode.org/reports/tr15/#Hangul for details on decomposing Hangul. +func (rb *reorderBuffer) decomposeHangul(r rune) { + r -= hangulBase + x := r % jamoTCount + r /= jamoTCount + rb.appendRune(jamoLBase + r/jamoVCount) + rb.appendRune(jamoVBase + r%jamoVCount) + if x != 0 { + rb.appendRune(jamoTBase + x) + } +} + +// combineHangul algorithmically combines Jamo character components into Hangul. +// See http://unicode.org/reports/tr15/#Hangul for details on combining Hangul. +func (rb *reorderBuffer) combineHangul(s, i, k int) { + b := rb.rune[:] + bn := rb.nrune + for ; i < bn; i++ { + cccB := b[k-1].ccc + cccC := b[i].ccc + if cccB == 0 { + s = k - 1 + } + if s != k-1 && cccB >= cccC { + // b[i] is blocked by greater-equal cccX below it + b[k] = b[i] + k++ + } else { + l := rb.runeAt(s) // also used to compare to hangulBase + v := rb.runeAt(i) // also used to compare to jamoT + switch { + case jamoLBase <= l && l < jamoLEnd && + jamoVBase <= v && v < jamoVEnd: + // 11xx plus 116x to LV + rb.assignRune(s, hangulBase+ + (l-jamoLBase)*jamoVTCount+(v-jamoVBase)*jamoTCount) + case hangulBase <= l && l < hangulEnd && + jamoTBase < v && v < jamoTEnd && + ((l-hangulBase)%jamoTCount) == 0: + // ACxx plus 11Ax to LVT + rb.assignRune(s, l+v-jamoTBase) + default: + b[k] = b[i] + k++ + } + } + } + rb.nrune = k +} + +// compose recombines the runes in the buffer. +// It should only be used to recompose a single segment, as it will not +// handle alternations between Hangul and non-Hangul characters correctly. +func (rb *reorderBuffer) compose() { + // UAX #15, section X5 , including Corrigendum #5 + // "In any character sequence beginning with starter S, a character C is + // blocked from S if and only if there is some character B between S + // and C, and either B is a starter or it has the same or higher + // combining class as C." + bn := rb.nrune + if bn == 0 { + return + } + k := 1 + b := rb.rune[:] + for s, i := 0, 1; i < bn; i++ { + if isJamoVT(rb.bytesAt(i)) { + // Redo from start in Hangul mode. Necessary to support + // U+320E..U+321E in NFKC mode. + rb.combineHangul(s, i, k) + return + } + ii := b[i] + // We can only use combineForward as a filter if we later + // get the info for the combined character. This is more + // expensive than using the filter. Using combinesBackward() + // is safe. + if ii.combinesBackward() { + cccB := b[k-1].ccc + cccC := ii.ccc + blocked := false // b[i] blocked by starter or greater or equal CCC? + if cccB == 0 { + s = k - 1 + } else { + blocked = s != k-1 && cccB >= cccC + } + if !blocked { + combined := combine(rb.runeAt(s), rb.runeAt(i)) + if combined != 0 { + rb.assignRune(s, combined) + continue + } + } + } + b[k] = b[i] + k++ + } + rb.nrune = k +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/norm/forminfo.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/norm/forminfo.go new file mode 100644 index 00000000..e67e7655 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/norm/forminfo.go @@ -0,0 +1,259 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package norm + +// This file contains Form-specific logic and wrappers for data in tables.go. + +// Rune info is stored in a separate trie per composing form. A composing form +// and its corresponding decomposing form share the same trie. Each trie maps +// a rune to a uint16. The values take two forms. For v >= 0x8000: +// bits +// 15: 1 (inverse of NFD_QC bit of qcInfo) +// 13..7: qcInfo (see below). isYesD is always true (no decompostion). +// 6..0: ccc (compressed CCC value). +// For v < 0x8000, the respective rune has a decomposition and v is an index +// into a byte array of UTF-8 decomposition sequences and additional info and +// has the form: +//
* [ []] +// The header contains the number of bytes in the decomposition (excluding this +// length byte). The two most significant bits of this length byte correspond +// to bit 5 and 4 of qcInfo (see below). The byte sequence itself starts at v+1. +// The byte sequence is followed by a trailing and leading CCC if the values +// for these are not zero. The value of v determines which ccc are appended +// to the sequences. For v < firstCCC, there are none, for v >= firstCCC, +// the sequence is followed by a trailing ccc, and for v >= firstLeadingCC +// there is an additional leading ccc. The value of tccc itself is the +// trailing CCC shifted left 2 bits. The two least-significant bits of tccc +// are the number of trailing non-starters. + +const ( + qcInfoMask = 0x3F // to clear all but the relevant bits in a qcInfo + headerLenMask = 0x3F // extract the length value from the header byte + headerFlagsMask = 0xC0 // extract the qcInfo bits from the header byte +) + +// Properties provides access to normalization properties of a rune. +type Properties struct { + pos uint8 // start position in reorderBuffer; used in composition.go + size uint8 // length of UTF-8 encoding of this rune + ccc uint8 // leading canonical combining class (ccc if not decomposition) + tccc uint8 // trailing canonical combining class (ccc if not decomposition) + nLead uint8 // number of leading non-starters. + flags qcInfo // quick check flags + index uint16 +} + +// functions dispatchable per form +type lookupFunc func(b input, i int) Properties + +// formInfo holds Form-specific functions and tables. +type formInfo struct { + form Form + composing, compatibility bool // form type + info lookupFunc + nextMain iterFunc +} + +var formTable = []*formInfo{{ + form: NFC, + composing: true, + compatibility: false, + info: lookupInfoNFC, + nextMain: nextComposed, +}, { + form: NFD, + composing: false, + compatibility: false, + info: lookupInfoNFC, + nextMain: nextDecomposed, +}, { + form: NFKC, + composing: true, + compatibility: true, + info: lookupInfoNFKC, + nextMain: nextComposed, +}, { + form: NFKD, + composing: false, + compatibility: true, + info: lookupInfoNFKC, + nextMain: nextDecomposed, +}} + +// We do not distinguish between boundaries for NFC, NFD, etc. to avoid +// unexpected behavior for the user. For example, in NFD, there is a boundary +// after 'a'. However, 'a' might combine with modifiers, so from the application's +// perspective it is not a good boundary. We will therefore always use the +// boundaries for the combining variants. + +// BoundaryBefore returns true if this rune starts a new segment and +// cannot combine with any rune on the left. +func (p Properties) BoundaryBefore() bool { + if p.ccc == 0 && !p.combinesBackward() { + return true + } + // We assume that the CCC of the first character in a decomposition + // is always non-zero if different from info.ccc and that we can return + // false at this point. This is verified by maketables. + return false +} + +// BoundaryAfter returns true if runes cannot combine with or otherwise +// interact with this or previous runes. +func (p Properties) BoundaryAfter() bool { + // TODO: loosen these conditions. + return p.isInert() +} + +// We pack quick check data in 4 bits: +// 5: Combines forward (0 == false, 1 == true) +// 4..3: NFC_QC Yes(00), No (10), or Maybe (11) +// 2: NFD_QC Yes (0) or No (1). No also means there is a decomposition. +// 1..0: Number of trailing non-starters. +// +// When all 4 bits are zero, the character is inert, meaning it is never +// influenced by normalization. +type qcInfo uint8 + +func (p Properties) isYesC() bool { return p.flags&0x10 == 0 } +func (p Properties) isYesD() bool { return p.flags&0x4 == 0 } + +func (p Properties) combinesForward() bool { return p.flags&0x20 != 0 } +func (p Properties) combinesBackward() bool { return p.flags&0x8 != 0 } // == isMaybe +func (p Properties) hasDecomposition() bool { return p.flags&0x4 != 0 } // == isNoD + +func (p Properties) isInert() bool { + return p.flags&qcInfoMask == 0 && p.ccc == 0 +} + +func (p Properties) multiSegment() bool { + return p.index >= firstMulti && p.index < endMulti +} + +func (p Properties) nLeadingNonStarters() uint8 { + return p.nLead +} + +func (p Properties) nTrailingNonStarters() uint8 { + return uint8(p.flags & 0x03) +} + +// Decomposition returns the decomposition for the underlying rune +// or nil if there is none. +func (p Properties) Decomposition() []byte { + // TODO: create the decomposition for Hangul? + if p.index == 0 { + return nil + } + i := p.index + n := decomps[i] & headerLenMask + i++ + return decomps[i : i+uint16(n)] +} + +// Size returns the length of UTF-8 encoding of the rune. +func (p Properties) Size() int { + return int(p.size) +} + +// CCC returns the canonical combining class of the underlying rune. +func (p Properties) CCC() uint8 { + if p.index >= firstCCCZeroExcept { + return 0 + } + return ccc[p.ccc] +} + +// LeadCCC returns the CCC of the first rune in the decomposition. +// If there is no decomposition, LeadCCC equals CCC. +func (p Properties) LeadCCC() uint8 { + return ccc[p.ccc] +} + +// TrailCCC returns the CCC of the last rune in the decomposition. +// If there is no decomposition, TrailCCC equals CCC. +func (p Properties) TrailCCC() uint8 { + return ccc[p.tccc] +} + +// Recomposition +// We use 32-bit keys instead of 64-bit for the two codepoint keys. +// This clips off the bits of three entries, but we know this will not +// result in a collision. In the unlikely event that changes to +// UnicodeData.txt introduce collisions, the compiler will catch it. +// Note that the recomposition map for NFC and NFKC are identical. + +// combine returns the combined rune or 0 if it doesn't exist. +func combine(a, b rune) rune { + key := uint32(uint16(a))<<16 + uint32(uint16(b)) + return recompMap[key] +} + +func lookupInfoNFC(b input, i int) Properties { + v, sz := b.charinfoNFC(i) + return compInfo(v, sz) +} + +func lookupInfoNFKC(b input, i int) Properties { + v, sz := b.charinfoNFKC(i) + return compInfo(v, sz) +} + +// Properties returns properties for the first rune in s. +func (f Form) Properties(s []byte) Properties { + if f == NFC || f == NFD { + return compInfo(nfcData.lookup(s)) + } + return compInfo(nfkcData.lookup(s)) +} + +// PropertiesString returns properties for the first rune in s. +func (f Form) PropertiesString(s string) Properties { + if f == NFC || f == NFD { + return compInfo(nfcData.lookupString(s)) + } + return compInfo(nfkcData.lookupString(s)) +} + +// compInfo converts the information contained in v and sz +// to a Properties. See the comment at the top of the file +// for more information on the format. +func compInfo(v uint16, sz int) Properties { + if v == 0 { + return Properties{size: uint8(sz)} + } else if v >= 0x8000 { + p := Properties{ + size: uint8(sz), + ccc: uint8(v), + tccc: uint8(v), + flags: qcInfo(v >> 8), + } + if p.ccc > 0 || p.combinesBackward() { + p.nLead = uint8(p.flags & 0x3) + } + return p + } + // has decomposition + h := decomps[v] + f := (qcInfo(h&headerFlagsMask) >> 2) | 0x4 + p := Properties{size: uint8(sz), flags: f, index: v} + if v >= firstCCC { + v += uint16(h&headerLenMask) + 1 + c := decomps[v] + p.tccc = c >> 2 + p.flags |= qcInfo(c & 0x3) + if v >= firstLeadingCCC { + p.nLead = c & 0x3 + if v >= firstStarterWithNLead { + // We were tricked. Remove the decomposition. + p.flags &= 0x03 + p.index = 0 + return p + } + p.ccc = decomps[v+1] + } + } + return p +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/norm/input.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/norm/input.go new file mode 100644 index 00000000..479e35bc --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/norm/input.go @@ -0,0 +1,109 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package norm + +import "unicode/utf8" + +type input struct { + str string + bytes []byte +} + +func inputBytes(str []byte) input { + return input{bytes: str} +} + +func inputString(str string) input { + return input{str: str} +} + +func (in *input) setBytes(str []byte) { + in.str = "" + in.bytes = str +} + +func (in *input) setString(str string) { + in.str = str + in.bytes = nil +} + +func (in *input) _byte(p int) byte { + if in.bytes == nil { + return in.str[p] + } + return in.bytes[p] +} + +func (in *input) skipASCII(p, max int) int { + if in.bytes == nil { + for ; p < max && in.str[p] < utf8.RuneSelf; p++ { + } + } else { + for ; p < max && in.bytes[p] < utf8.RuneSelf; p++ { + } + } + return p +} + +func (in *input) skipContinuationBytes(p int) int { + if in.bytes == nil { + for ; p < len(in.str) && !utf8.RuneStart(in.str[p]); p++ { + } + } else { + for ; p < len(in.bytes) && !utf8.RuneStart(in.bytes[p]); p++ { + } + } + return p +} + +func (in *input) appendSlice(buf []byte, b, e int) []byte { + if in.bytes != nil { + return append(buf, in.bytes[b:e]...) + } + for i := b; i < e; i++ { + buf = append(buf, in.str[i]) + } + return buf +} + +func (in *input) copySlice(buf []byte, b, e int) int { + if in.bytes == nil { + return copy(buf, in.str[b:e]) + } + return copy(buf, in.bytes[b:e]) +} + +func (in *input) charinfoNFC(p int) (uint16, int) { + if in.bytes == nil { + return nfcData.lookupString(in.str[p:]) + } + return nfcData.lookup(in.bytes[p:]) +} + +func (in *input) charinfoNFKC(p int) (uint16, int) { + if in.bytes == nil { + return nfkcData.lookupString(in.str[p:]) + } + return nfkcData.lookup(in.bytes[p:]) +} + +func (in *input) hangul(p int) (r rune) { + var size int + if in.bytes == nil { + if !isHangulString(in.str[p:]) { + return 0 + } + r, size = utf8.DecodeRuneInString(in.str[p:]) + } else { + if !isHangul(in.bytes[p:]) { + return 0 + } + r, size = utf8.DecodeRune(in.bytes[p:]) + } + if size != hangulUTF8Size { + return 0 + } + return r +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/norm/iter.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/norm/iter.go new file mode 100644 index 00000000..ce17f96c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/norm/iter.go @@ -0,0 +1,457 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package norm + +import ( + "fmt" + "unicode/utf8" +) + +// MaxSegmentSize is the maximum size of a byte buffer needed to consider any +// sequence of starter and non-starter runes for the purpose of normalization. +const MaxSegmentSize = maxByteBufferSize + +// An Iter iterates over a string or byte slice, while normalizing it +// to a given Form. +type Iter struct { + rb reorderBuffer + buf [maxByteBufferSize]byte + info Properties // first character saved from previous iteration + next iterFunc // implementation of next depends on form + asciiF iterFunc + + p int // current position in input source + multiSeg []byte // remainder of multi-segment decomposition +} + +type iterFunc func(*Iter) []byte + +// Init initializes i to iterate over src after normalizing it to Form f. +func (i *Iter) Init(f Form, src []byte) { + i.p = 0 + if len(src) == 0 { + i.setDone() + i.rb.nsrc = 0 + return + } + i.multiSeg = nil + i.rb.init(f, src) + i.next = i.rb.f.nextMain + i.asciiF = nextASCIIBytes + i.info = i.rb.f.info(i.rb.src, i.p) + i.rb.ss.first(i.info) +} + +// InitString initializes i to iterate over src after normalizing it to Form f. +func (i *Iter) InitString(f Form, src string) { + i.p = 0 + if len(src) == 0 { + i.setDone() + i.rb.nsrc = 0 + return + } + i.multiSeg = nil + i.rb.initString(f, src) + i.next = i.rb.f.nextMain + i.asciiF = nextASCIIString + i.info = i.rb.f.info(i.rb.src, i.p) + i.rb.ss.first(i.info) +} + +// Seek sets the segment to be returned by the next call to Next to start +// at position p. It is the responsibility of the caller to set p to the +// start of a segment. +func (i *Iter) Seek(offset int64, whence int) (int64, error) { + var abs int64 + switch whence { + case 0: + abs = offset + case 1: + abs = int64(i.p) + offset + case 2: + abs = int64(i.rb.nsrc) + offset + default: + return 0, fmt.Errorf("norm: invalid whence") + } + if abs < 0 { + return 0, fmt.Errorf("norm: negative position") + } + if int(abs) >= i.rb.nsrc { + i.setDone() + return int64(i.p), nil + } + i.p = int(abs) + i.multiSeg = nil + i.next = i.rb.f.nextMain + i.info = i.rb.f.info(i.rb.src, i.p) + i.rb.ss.first(i.info) + return abs, nil +} + +// returnSlice returns a slice of the underlying input type as a byte slice. +// If the underlying is of type []byte, it will simply return a slice. +// If the underlying is of type string, it will copy the slice to the buffer +// and return that. +func (i *Iter) returnSlice(a, b int) []byte { + if i.rb.src.bytes == nil { + return i.buf[:copy(i.buf[:], i.rb.src.str[a:b])] + } + return i.rb.src.bytes[a:b] +} + +// Pos returns the byte position at which the next call to Next will commence processing. +func (i *Iter) Pos() int { + return i.p +} + +func (i *Iter) setDone() { + i.next = nextDone + i.p = i.rb.nsrc +} + +// Done returns true if there is no more input to process. +func (i *Iter) Done() bool { + return i.p >= i.rb.nsrc +} + +// Next returns f(i.input[i.Pos():n]), where n is a boundary of i.input. +// For any input a and b for which f(a) == f(b), subsequent calls +// to Next will return the same segments. +// Modifying runes are grouped together with the preceding starter, if such a starter exists. +// Although not guaranteed, n will typically be the smallest possible n. +func (i *Iter) Next() []byte { + return i.next(i) +} + +func nextASCIIBytes(i *Iter) []byte { + p := i.p + 1 + if p >= i.rb.nsrc { + i.setDone() + return i.rb.src.bytes[i.p:p] + } + if i.rb.src.bytes[p] < utf8.RuneSelf { + p0 := i.p + i.p = p + return i.rb.src.bytes[p0:p] + } + i.info = i.rb.f.info(i.rb.src, i.p) + i.next = i.rb.f.nextMain + return i.next(i) +} + +func nextASCIIString(i *Iter) []byte { + p := i.p + 1 + if p >= i.rb.nsrc { + i.buf[0] = i.rb.src.str[i.p] + i.setDone() + return i.buf[:1] + } + if i.rb.src.str[p] < utf8.RuneSelf { + i.buf[0] = i.rb.src.str[i.p] + i.p = p + return i.buf[:1] + } + i.info = i.rb.f.info(i.rb.src, i.p) + i.next = i.rb.f.nextMain + return i.next(i) +} + +func nextHangul(i *Iter) []byte { + p := i.p + next := p + hangulUTF8Size + if next >= i.rb.nsrc { + i.setDone() + } else if i.rb.src.hangul(next) == 0 { + i.rb.ss.next(i.info) + i.info = i.rb.f.info(i.rb.src, i.p) + i.next = i.rb.f.nextMain + return i.next(i) + } + i.p = next + return i.buf[:decomposeHangul(i.buf[:], i.rb.src.hangul(p))] +} + +func nextDone(i *Iter) []byte { + return nil +} + +// nextMulti is used for iterating over multi-segment decompositions +// for decomposing normal forms. +func nextMulti(i *Iter) []byte { + j := 0 + d := i.multiSeg + // skip first rune + for j = 1; j < len(d) && !utf8.RuneStart(d[j]); j++ { + } + for j < len(d) { + info := i.rb.f.info(input{bytes: d}, j) + if info.BoundaryBefore() { + i.multiSeg = d[j:] + return d[:j] + } + j += int(info.size) + } + // treat last segment as normal decomposition + i.next = i.rb.f.nextMain + return i.next(i) +} + +// nextMultiNorm is used for iterating over multi-segment decompositions +// for composing normal forms. +func nextMultiNorm(i *Iter) []byte { + j := 0 + d := i.multiSeg + for j < len(d) { + info := i.rb.f.info(input{bytes: d}, j) + if info.BoundaryBefore() { + i.rb.compose() + seg := i.buf[:i.rb.flushCopy(i.buf[:])] + i.rb.insertUnsafe(input{bytes: d}, j, info) + i.multiSeg = d[j+int(info.size):] + return seg + } + i.rb.insertUnsafe(input{bytes: d}, j, info) + j += int(info.size) + } + i.multiSeg = nil + i.next = nextComposed + return doNormComposed(i) +} + +// nextDecomposed is the implementation of Next for forms NFD and NFKD. +func nextDecomposed(i *Iter) (next []byte) { + outp := 0 + inCopyStart, outCopyStart := i.p, 0 + for { + if sz := int(i.info.size); sz <= 1 { + i.rb.ss = 0 + p := i.p + i.p++ // ASCII or illegal byte. Either way, advance by 1. + if i.p >= i.rb.nsrc { + i.setDone() + return i.returnSlice(p, i.p) + } else if i.rb.src._byte(i.p) < utf8.RuneSelf { + i.next = i.asciiF + return i.returnSlice(p, i.p) + } + outp++ + } else if d := i.info.Decomposition(); d != nil { + // Note: If leading CCC != 0, then len(d) == 2 and last is also non-zero. + // Case 1: there is a leftover to copy. In this case the decomposition + // must begin with a modifier and should always be appended. + // Case 2: no leftover. Simply return d if followed by a ccc == 0 value. + p := outp + len(d) + if outp > 0 { + i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p) + // TODO: this condition should not be possible, but we leave it + // in for defensive purposes. + if p > len(i.buf) { + return i.buf[:outp] + } + } else if i.info.multiSegment() { + // outp must be 0 as multi-segment decompositions always + // start a new segment. + if i.multiSeg == nil { + i.multiSeg = d + i.next = nextMulti + return nextMulti(i) + } + // We are in the last segment. Treat as normal decomposition. + d = i.multiSeg + i.multiSeg = nil + p = len(d) + } + prevCC := i.info.tccc + if i.p += sz; i.p >= i.rb.nsrc { + i.setDone() + i.info = Properties{} // Force BoundaryBefore to succeed. + } else { + i.info = i.rb.f.info(i.rb.src, i.p) + } + switch i.rb.ss.next(i.info) { + case ssOverflow: + i.next = nextCGJDecompose + fallthrough + case ssStarter: + if outp > 0 { + copy(i.buf[outp:], d) + return i.buf[:p] + } + return d + } + copy(i.buf[outp:], d) + outp = p + inCopyStart, outCopyStart = i.p, outp + if i.info.ccc < prevCC { + goto doNorm + } + continue + } else if r := i.rb.src.hangul(i.p); r != 0 { + outp = decomposeHangul(i.buf[:], r) + i.p += hangulUTF8Size + inCopyStart, outCopyStart = i.p, outp + if i.p >= i.rb.nsrc { + i.setDone() + break + } else if i.rb.src.hangul(i.p) != 0 { + i.next = nextHangul + return i.buf[:outp] + } + } else { + p := outp + sz + if p > len(i.buf) { + break + } + outp = p + i.p += sz + } + if i.p >= i.rb.nsrc { + i.setDone() + break + } + prevCC := i.info.tccc + i.info = i.rb.f.info(i.rb.src, i.p) + if v := i.rb.ss.next(i.info); v == ssStarter { + break + } else if v == ssOverflow { + i.next = nextCGJDecompose + break + } + if i.info.ccc < prevCC { + goto doNorm + } + } + if outCopyStart == 0 { + return i.returnSlice(inCopyStart, i.p) + } else if inCopyStart < i.p { + i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p) + } + return i.buf[:outp] +doNorm: + // Insert what we have decomposed so far in the reorderBuffer. + // As we will only reorder, there will always be enough room. + i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p) + i.rb.insertDecomposed(i.buf[0:outp]) + return doNormDecomposed(i) +} + +func doNormDecomposed(i *Iter) []byte { + for { + i.rb.insertUnsafe(i.rb.src, i.p, i.info) + if i.p += int(i.info.size); i.p >= i.rb.nsrc { + i.setDone() + break + } + i.info = i.rb.f.info(i.rb.src, i.p) + if i.info.ccc == 0 { + break + } + if s := i.rb.ss.next(i.info); s == ssOverflow { + i.next = nextCGJDecompose + break + } + } + // new segment or too many combining characters: exit normalization + return i.buf[:i.rb.flushCopy(i.buf[:])] +} + +func nextCGJDecompose(i *Iter) []byte { + i.rb.ss = 0 + i.rb.insertCGJ() + i.next = nextDecomposed + i.rb.ss.first(i.info) + buf := doNormDecomposed(i) + return buf +} + +// nextComposed is the implementation of Next for forms NFC and NFKC. +func nextComposed(i *Iter) []byte { + outp, startp := 0, i.p + var prevCC uint8 + for { + if !i.info.isYesC() { + goto doNorm + } + prevCC = i.info.tccc + sz := int(i.info.size) + if sz == 0 { + sz = 1 // illegal rune: copy byte-by-byte + } + p := outp + sz + if p > len(i.buf) { + break + } + outp = p + i.p += sz + if i.p >= i.rb.nsrc { + i.setDone() + break + } else if i.rb.src._byte(i.p) < utf8.RuneSelf { + i.rb.ss = 0 + i.next = i.asciiF + break + } + i.info = i.rb.f.info(i.rb.src, i.p) + if v := i.rb.ss.next(i.info); v == ssStarter { + break + } else if v == ssOverflow { + i.next = nextCGJCompose + break + } + if i.info.ccc < prevCC { + goto doNorm + } + } + return i.returnSlice(startp, i.p) +doNorm: + // reset to start position + i.p = startp + i.info = i.rb.f.info(i.rb.src, i.p) + i.rb.ss.first(i.info) + if i.info.multiSegment() { + d := i.info.Decomposition() + info := i.rb.f.info(input{bytes: d}, 0) + i.rb.insertUnsafe(input{bytes: d}, 0, info) + i.multiSeg = d[int(info.size):] + i.next = nextMultiNorm + return nextMultiNorm(i) + } + i.rb.ss.first(i.info) + i.rb.insertUnsafe(i.rb.src, i.p, i.info) + return doNormComposed(i) +} + +func doNormComposed(i *Iter) []byte { + // First rune should already be inserted. + for { + if i.p += int(i.info.size); i.p >= i.rb.nsrc { + i.setDone() + break + } + i.info = i.rb.f.info(i.rb.src, i.p) + if s := i.rb.ss.next(i.info); s == ssStarter { + break + } else if s == ssOverflow { + i.next = nextCGJCompose + break + } + i.rb.insertUnsafe(i.rb.src, i.p, i.info) + } + i.rb.compose() + seg := i.buf[:i.rb.flushCopy(i.buf[:])] + return seg +} + +func nextCGJCompose(i *Iter) []byte { + i.rb.ss = 0 // instead of first + i.rb.insertCGJ() + i.next = nextComposed + // Note that we treat any rune with nLeadingNonStarters > 0 as a non-starter, + // even if they are not. This is particularly dubious for U+FF9E and UFF9A. + // If we ever change that, insert a check here. + i.rb.ss.first(i.info) + i.rb.insertUnsafe(i.rb.src, i.p, i.info) + return doNormComposed(i) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/norm/normalize.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/norm/normalize.go new file mode 100644 index 00000000..e28ac641 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/norm/normalize.go @@ -0,0 +1,609 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Note: the file data_test.go that is generated should not be checked in. +//go:generate go run maketables.go triegen.go +//go:generate go test -tags test + +// Package norm contains types and functions for normalizing Unicode strings. +package norm // import "golang.org/x/text/unicode/norm" + +import ( + "unicode/utf8" + + "golang.org/x/text/transform" +) + +// A Form denotes a canonical representation of Unicode code points. +// The Unicode-defined normalization and equivalence forms are: +// +// NFC Unicode Normalization Form C +// NFD Unicode Normalization Form D +// NFKC Unicode Normalization Form KC +// NFKD Unicode Normalization Form KD +// +// For a Form f, this documentation uses the notation f(x) to mean +// the bytes or string x converted to the given form. +// A position n in x is called a boundary if conversion to the form can +// proceed independently on both sides: +// f(x) == append(f(x[0:n]), f(x[n:])...) +// +// References: http://unicode.org/reports/tr15/ and +// http://unicode.org/notes/tn5/. +type Form int + +const ( + NFC Form = iota + NFD + NFKC + NFKD +) + +// Bytes returns f(b). May return b if f(b) = b. +func (f Form) Bytes(b []byte) []byte { + src := inputBytes(b) + ft := formTable[f] + n, ok := ft.quickSpan(src, 0, len(b), true) + if ok { + return b + } + out := make([]byte, n, len(b)) + copy(out, b[0:n]) + rb := reorderBuffer{f: *ft, src: src, nsrc: len(b), out: out, flushF: appendFlush} + return doAppendInner(&rb, n) +} + +// String returns f(s). +func (f Form) String(s string) string { + src := inputString(s) + ft := formTable[f] + n, ok := ft.quickSpan(src, 0, len(s), true) + if ok { + return s + } + out := make([]byte, n, len(s)) + copy(out, s[0:n]) + rb := reorderBuffer{f: *ft, src: src, nsrc: len(s), out: out, flushF: appendFlush} + return string(doAppendInner(&rb, n)) +} + +// IsNormal returns true if b == f(b). +func (f Form) IsNormal(b []byte) bool { + src := inputBytes(b) + ft := formTable[f] + bp, ok := ft.quickSpan(src, 0, len(b), true) + if ok { + return true + } + rb := reorderBuffer{f: *ft, src: src, nsrc: len(b)} + rb.setFlusher(nil, cmpNormalBytes) + for bp < len(b) { + rb.out = b[bp:] + if bp = decomposeSegment(&rb, bp, true); bp < 0 { + return false + } + bp, _ = rb.f.quickSpan(rb.src, bp, len(b), true) + } + return true +} + +func cmpNormalBytes(rb *reorderBuffer) bool { + b := rb.out + for i := 0; i < rb.nrune; i++ { + info := rb.rune[i] + if int(info.size) > len(b) { + return false + } + p := info.pos + pe := p + info.size + for ; p < pe; p++ { + if b[0] != rb.byte[p] { + return false + } + b = b[1:] + } + } + return true +} + +// IsNormalString returns true if s == f(s). +func (f Form) IsNormalString(s string) bool { + src := inputString(s) + ft := formTable[f] + bp, ok := ft.quickSpan(src, 0, len(s), true) + if ok { + return true + } + rb := reorderBuffer{f: *ft, src: src, nsrc: len(s)} + rb.setFlusher(nil, func(rb *reorderBuffer) bool { + for i := 0; i < rb.nrune; i++ { + info := rb.rune[i] + if bp+int(info.size) > len(s) { + return false + } + p := info.pos + pe := p + info.size + for ; p < pe; p++ { + if s[bp] != rb.byte[p] { + return false + } + bp++ + } + } + return true + }) + for bp < len(s) { + if bp = decomposeSegment(&rb, bp, true); bp < 0 { + return false + } + bp, _ = rb.f.quickSpan(rb.src, bp, len(s), true) + } + return true +} + +// patchTail fixes a case where a rune may be incorrectly normalized +// if it is followed by illegal continuation bytes. It returns the +// patched buffer and whether the decomposition is still in progress. +func patchTail(rb *reorderBuffer) bool { + info, p := lastRuneStart(&rb.f, rb.out) + if p == -1 || info.size == 0 { + return true + } + end := p + int(info.size) + extra := len(rb.out) - end + if extra > 0 { + // Potentially allocating memory. However, this only + // happens with ill-formed UTF-8. + x := make([]byte, 0) + x = append(x, rb.out[len(rb.out)-extra:]...) + rb.out = rb.out[:end] + decomposeToLastBoundary(rb) + rb.doFlush() + rb.out = append(rb.out, x...) + return false + } + buf := rb.out[p:] + rb.out = rb.out[:p] + decomposeToLastBoundary(rb) + if s := rb.ss.next(info); s == ssStarter { + rb.doFlush() + rb.ss.first(info) + } else if s == ssOverflow { + rb.doFlush() + rb.insertCGJ() + rb.ss = 0 + } + rb.insertUnsafe(inputBytes(buf), 0, info) + return true +} + +func appendQuick(rb *reorderBuffer, i int) int { + if rb.nsrc == i { + return i + } + end, _ := rb.f.quickSpan(rb.src, i, rb.nsrc, true) + rb.out = rb.src.appendSlice(rb.out, i, end) + return end +} + +// Append returns f(append(out, b...)). +// The buffer out must be nil, empty, or equal to f(out). +func (f Form) Append(out []byte, src ...byte) []byte { + return f.doAppend(out, inputBytes(src), len(src)) +} + +func (f Form) doAppend(out []byte, src input, n int) []byte { + if n == 0 { + return out + } + ft := formTable[f] + // Attempt to do a quickSpan first so we can avoid initializing the reorderBuffer. + if len(out) == 0 { + p, _ := ft.quickSpan(src, 0, n, true) + out = src.appendSlice(out, 0, p) + if p == n { + return out + } + rb := reorderBuffer{f: *ft, src: src, nsrc: n, out: out, flushF: appendFlush} + return doAppendInner(&rb, p) + } + rb := reorderBuffer{f: *ft, src: src, nsrc: n} + return doAppend(&rb, out, 0) +} + +func doAppend(rb *reorderBuffer, out []byte, p int) []byte { + rb.setFlusher(out, appendFlush) + src, n := rb.src, rb.nsrc + doMerge := len(out) > 0 + if q := src.skipContinuationBytes(p); q > p { + // Move leading non-starters to destination. + rb.out = src.appendSlice(rb.out, p, q) + p = q + doMerge = patchTail(rb) + } + fd := &rb.f + if doMerge { + var info Properties + if p < n { + info = fd.info(src, p) + if !info.BoundaryBefore() || info.nLeadingNonStarters() > 0 { + if p == 0 { + decomposeToLastBoundary(rb) + } + p = decomposeSegment(rb, p, true) + } + } + if info.size == 0 { + rb.doFlush() + // Append incomplete UTF-8 encoding. + return src.appendSlice(rb.out, p, n) + } + if rb.nrune > 0 { + return doAppendInner(rb, p) + } + } + p = appendQuick(rb, p) + return doAppendInner(rb, p) +} + +func doAppendInner(rb *reorderBuffer, p int) []byte { + for n := rb.nsrc; p < n; { + p = decomposeSegment(rb, p, true) + p = appendQuick(rb, p) + } + return rb.out +} + +// AppendString returns f(append(out, []byte(s))). +// The buffer out must be nil, empty, or equal to f(out). +func (f Form) AppendString(out []byte, src string) []byte { + return f.doAppend(out, inputString(src), len(src)) +} + +// QuickSpan returns a boundary n such that b[0:n] == f(b[0:n]). +// It is not guaranteed to return the largest such n. +func (f Form) QuickSpan(b []byte) int { + n, _ := formTable[f].quickSpan(inputBytes(b), 0, len(b), true) + return n +} + +// Span implements transform.SpanningTransformer. It returns a boundary n such +// that b[0:n] == f(b[0:n]). It is not guaranteed to return the largest such n. +func (f Form) Span(b []byte, atEOF bool) (n int, err error) { + n, ok := formTable[f].quickSpan(inputBytes(b), 0, len(b), atEOF) + if n < len(b) { + if !ok { + err = transform.ErrEndOfSpan + } else { + err = transform.ErrShortSrc + } + } + return n, err +} + +// SpanString returns a boundary n such that s[0:n] == f(s[0:n]). +// It is not guaranteed to return the largest such n. +func (f Form) SpanString(s string, atEOF bool) (n int, err error) { + n, ok := formTable[f].quickSpan(inputString(s), 0, len(s), atEOF) + if n < len(s) { + if !ok { + err = transform.ErrEndOfSpan + } else { + err = transform.ErrShortSrc + } + } + return n, err +} + +// quickSpan returns a boundary n such that src[0:n] == f(src[0:n]) and +// whether any non-normalized parts were found. If atEOF is false, n will +// not point past the last segment if this segment might be become +// non-normalized by appending other runes. +func (f *formInfo) quickSpan(src input, i, end int, atEOF bool) (n int, ok bool) { + var lastCC uint8 + ss := streamSafe(0) + lastSegStart := i + for n = end; i < n; { + if j := src.skipASCII(i, n); i != j { + i = j + lastSegStart = i - 1 + lastCC = 0 + ss = 0 + continue + } + info := f.info(src, i) + if info.size == 0 { + if atEOF { + // include incomplete runes + return n, true + } + return lastSegStart, true + } + // This block needs to be before the next, because it is possible to + // have an overflow for runes that are starters (e.g. with U+FF9E). + switch ss.next(info) { + case ssStarter: + lastSegStart = i + case ssOverflow: + return lastSegStart, false + case ssSuccess: + if lastCC > info.ccc { + return lastSegStart, false + } + } + if f.composing { + if !info.isYesC() { + break + } + } else { + if !info.isYesD() { + break + } + } + lastCC = info.ccc + i += int(info.size) + } + if i == n { + if !atEOF { + n = lastSegStart + } + return n, true + } + return lastSegStart, false +} + +// QuickSpanString returns a boundary n such that s[0:n] == f(s[0:n]). +// It is not guaranteed to return the largest such n. +func (f Form) QuickSpanString(s string) int { + n, _ := formTable[f].quickSpan(inputString(s), 0, len(s), true) + return n +} + +// FirstBoundary returns the position i of the first boundary in b +// or -1 if b contains no boundary. +func (f Form) FirstBoundary(b []byte) int { + return f.firstBoundary(inputBytes(b), len(b)) +} + +func (f Form) firstBoundary(src input, nsrc int) int { + i := src.skipContinuationBytes(0) + if i >= nsrc { + return -1 + } + fd := formTable[f] + ss := streamSafe(0) + // We should call ss.first here, but we can't as the first rune is + // skipped already. This means FirstBoundary can't really determine + // CGJ insertion points correctly. Luckily it doesn't have to. + for { + info := fd.info(src, i) + if info.size == 0 { + return -1 + } + if s := ss.next(info); s != ssSuccess { + return i + } + i += int(info.size) + if i >= nsrc { + if !info.BoundaryAfter() && !ss.isMax() { + return -1 + } + return nsrc + } + } +} + +// FirstBoundaryInString returns the position i of the first boundary in s +// or -1 if s contains no boundary. +func (f Form) FirstBoundaryInString(s string) int { + return f.firstBoundary(inputString(s), len(s)) +} + +// NextBoundary reports the index of the boundary between the first and next +// segment in b or -1 if atEOF is false and there are not enough bytes to +// determine this boundary. +func (f Form) NextBoundary(b []byte, atEOF bool) int { + return f.nextBoundary(inputBytes(b), len(b), atEOF) +} + +// NextBoundaryInString reports the index of the boundary between the first and +// next segment in b or -1 if atEOF is false and there are not enough bytes to +// determine this boundary. +func (f Form) NextBoundaryInString(s string, atEOF bool) int { + return f.nextBoundary(inputString(s), len(s), atEOF) +} + +func (f Form) nextBoundary(src input, nsrc int, atEOF bool) int { + if nsrc == 0 { + if atEOF { + return 0 + } + return -1 + } + fd := formTable[f] + info := fd.info(src, 0) + if info.size == 0 { + if atEOF { + return 1 + } + return -1 + } + ss := streamSafe(0) + ss.first(info) + + for i := int(info.size); i < nsrc; i += int(info.size) { + info = fd.info(src, i) + if info.size == 0 { + if atEOF { + return i + } + return -1 + } + // TODO: Using streamSafe to determine the boundary isn't the same as + // using BoundaryBefore. Determine which should be used. + if s := ss.next(info); s != ssSuccess { + return i + } + } + if !atEOF && !info.BoundaryAfter() && !ss.isMax() { + return -1 + } + return nsrc +} + +// LastBoundary returns the position i of the last boundary in b +// or -1 if b contains no boundary. +func (f Form) LastBoundary(b []byte) int { + return lastBoundary(formTable[f], b) +} + +func lastBoundary(fd *formInfo, b []byte) int { + i := len(b) + info, p := lastRuneStart(fd, b) + if p == -1 { + return -1 + } + if info.size == 0 { // ends with incomplete rune + if p == 0 { // starts with incomplete rune + return -1 + } + i = p + info, p = lastRuneStart(fd, b[:i]) + if p == -1 { // incomplete UTF-8 encoding or non-starter bytes without a starter + return i + } + } + if p+int(info.size) != i { // trailing non-starter bytes: illegal UTF-8 + return i + } + if info.BoundaryAfter() { + return i + } + ss := streamSafe(0) + v := ss.backwards(info) + for i = p; i >= 0 && v != ssStarter; i = p { + info, p = lastRuneStart(fd, b[:i]) + if v = ss.backwards(info); v == ssOverflow { + break + } + if p+int(info.size) != i { + if p == -1 { // no boundary found + return -1 + } + return i // boundary after an illegal UTF-8 encoding + } + } + return i +} + +// decomposeSegment scans the first segment in src into rb. It inserts 0x034f +// (Grapheme Joiner) when it encounters a sequence of more than 30 non-starters +// and returns the number of bytes consumed from src or iShortDst or iShortSrc. +func decomposeSegment(rb *reorderBuffer, sp int, atEOF bool) int { + // Force one character to be consumed. + info := rb.f.info(rb.src, sp) + if info.size == 0 { + return 0 + } + if s := rb.ss.next(info); s == ssStarter { + // TODO: this could be removed if we don't support merging. + if rb.nrune > 0 { + goto end + } + } else if s == ssOverflow { + rb.insertCGJ() + goto end + } + if err := rb.insertFlush(rb.src, sp, info); err != iSuccess { + return int(err) + } + for { + sp += int(info.size) + if sp >= rb.nsrc { + if !atEOF && !info.BoundaryAfter() { + return int(iShortSrc) + } + break + } + info = rb.f.info(rb.src, sp) + if info.size == 0 { + if !atEOF { + return int(iShortSrc) + } + break + } + if s := rb.ss.next(info); s == ssStarter { + break + } else if s == ssOverflow { + rb.insertCGJ() + break + } + if err := rb.insertFlush(rb.src, sp, info); err != iSuccess { + return int(err) + } + } +end: + if !rb.doFlush() { + return int(iShortDst) + } + return sp +} + +// lastRuneStart returns the runeInfo and position of the last +// rune in buf or the zero runeInfo and -1 if no rune was found. +func lastRuneStart(fd *formInfo, buf []byte) (Properties, int) { + p := len(buf) - 1 + for ; p >= 0 && !utf8.RuneStart(buf[p]); p-- { + } + if p < 0 { + return Properties{}, -1 + } + return fd.info(inputBytes(buf), p), p +} + +// decomposeToLastBoundary finds an open segment at the end of the buffer +// and scans it into rb. Returns the buffer minus the last segment. +func decomposeToLastBoundary(rb *reorderBuffer) { + fd := &rb.f + info, i := lastRuneStart(fd, rb.out) + if int(info.size) != len(rb.out)-i { + // illegal trailing continuation bytes + return + } + if info.BoundaryAfter() { + return + } + var add [maxNonStarters + 1]Properties // stores runeInfo in reverse order + padd := 0 + ss := streamSafe(0) + p := len(rb.out) + for { + add[padd] = info + v := ss.backwards(info) + if v == ssOverflow { + // Note that if we have an overflow, it the string we are appending to + // is not correctly normalized. In this case the behavior is undefined. + break + } + padd++ + p -= int(info.size) + if v == ssStarter || p < 0 { + break + } + info, i = lastRuneStart(fd, rb.out[:p]) + if int(info.size) != p-i { + break + } + } + rb.ss = ss + // Copy bytes for insertion as we may need to overwrite rb.out. + var buf [maxBufferSize * utf8.UTFMax]byte + cp := buf[:copy(buf[:], rb.out[p:])] + rb.out = rb.out[:p] + for padd--; padd >= 0; padd-- { + info = add[padd] + rb.insertUnsafe(inputBytes(cp), 0, info) + cp = cp[info.size:] + } +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/norm/readwriter.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/norm/readwriter.go new file mode 100644 index 00000000..d926ee90 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/norm/readwriter.go @@ -0,0 +1,125 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package norm + +import "io" + +type normWriter struct { + rb reorderBuffer + w io.Writer + buf []byte +} + +// Write implements the standard write interface. If the last characters are +// not at a normalization boundary, the bytes will be buffered for the next +// write. The remaining bytes will be written on close. +func (w *normWriter) Write(data []byte) (n int, err error) { + // Process data in pieces to keep w.buf size bounded. + const chunk = 4000 + + for len(data) > 0 { + // Normalize into w.buf. + m := len(data) + if m > chunk { + m = chunk + } + w.rb.src = inputBytes(data[:m]) + w.rb.nsrc = m + w.buf = doAppend(&w.rb, w.buf, 0) + data = data[m:] + n += m + + // Write out complete prefix, save remainder. + // Note that lastBoundary looks back at most 31 runes. + i := lastBoundary(&w.rb.f, w.buf) + if i == -1 { + i = 0 + } + if i > 0 { + if _, err = w.w.Write(w.buf[:i]); err != nil { + break + } + bn := copy(w.buf, w.buf[i:]) + w.buf = w.buf[:bn] + } + } + return n, err +} + +// Close forces data that remains in the buffer to be written. +func (w *normWriter) Close() error { + if len(w.buf) > 0 { + _, err := w.w.Write(w.buf) + if err != nil { + return err + } + } + return nil +} + +// Writer returns a new writer that implements Write(b) +// by writing f(b) to w. The returned writer may use an +// an internal buffer to maintain state across Write calls. +// Calling its Close method writes any buffered data to w. +func (f Form) Writer(w io.Writer) io.WriteCloser { + wr := &normWriter{rb: reorderBuffer{}, w: w} + wr.rb.init(f, nil) + return wr +} + +type normReader struct { + rb reorderBuffer + r io.Reader + inbuf []byte + outbuf []byte + bufStart int + lastBoundary int + err error +} + +// Read implements the standard read interface. +func (r *normReader) Read(p []byte) (int, error) { + for { + if r.lastBoundary-r.bufStart > 0 { + n := copy(p, r.outbuf[r.bufStart:r.lastBoundary]) + r.bufStart += n + if r.lastBoundary-r.bufStart > 0 { + return n, nil + } + return n, r.err + } + if r.err != nil { + return 0, r.err + } + outn := copy(r.outbuf, r.outbuf[r.lastBoundary:]) + r.outbuf = r.outbuf[0:outn] + r.bufStart = 0 + + n, err := r.r.Read(r.inbuf) + r.rb.src = inputBytes(r.inbuf[0:n]) + r.rb.nsrc, r.err = n, err + if n > 0 { + r.outbuf = doAppend(&r.rb, r.outbuf, 0) + } + if err == io.EOF { + r.lastBoundary = len(r.outbuf) + } else { + r.lastBoundary = lastBoundary(&r.rb.f, r.outbuf) + if r.lastBoundary == -1 { + r.lastBoundary = 0 + } + } + } +} + +// Reader returns a new reader that implements Read +// by reading data from r and returning f(data). +func (f Form) Reader(r io.Reader) io.Reader { + const chunk = 4000 + buf := make([]byte, chunk) + rr := &normReader{rb: reorderBuffer{}, r: r, inbuf: buf} + rr.rb.init(f, buf) + return rr +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go new file mode 100644 index 00000000..44dd3978 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go @@ -0,0 +1,7653 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build go1.10 + +package norm + +const ( + // Version is the Unicode edition from which the tables are derived. + Version = "10.0.0" + + // MaxTransformChunkSize indicates the maximum number of bytes that Transform + // may need to write atomically for any Form. Making a destination buffer at + // least this size ensures that Transform can always make progress and that + // the user does not need to grow the buffer on an ErrShortDst. + MaxTransformChunkSize = 35 + maxNonStarters*4 +) + +var ccc = [55]uint8{ + 0, 1, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 35, 36, + 84, 91, 103, 107, 118, 122, 129, 130, + 132, 202, 214, 216, 218, 220, 222, 224, + 226, 228, 230, 232, 233, 234, 240, +} + +const ( + firstMulti = 0x186D + firstCCC = 0x2C9E + endMulti = 0x2F60 + firstLeadingCCC = 0x49AE + firstCCCZeroExcept = 0x4A78 + firstStarterWithNLead = 0x4A9F + lastDecomp = 0x4AA1 + maxDecomp = 0x8000 +) + +// decomps: 19105 bytes +var decomps = [...]byte{ + // Bytes 0 - 3f + 0x00, 0x41, 0x20, 0x41, 0x21, 0x41, 0x22, 0x41, + 0x23, 0x41, 0x24, 0x41, 0x25, 0x41, 0x26, 0x41, + 0x27, 0x41, 0x28, 0x41, 0x29, 0x41, 0x2A, 0x41, + 0x2B, 0x41, 0x2C, 0x41, 0x2D, 0x41, 0x2E, 0x41, + 0x2F, 0x41, 0x30, 0x41, 0x31, 0x41, 0x32, 0x41, + 0x33, 0x41, 0x34, 0x41, 0x35, 0x41, 0x36, 0x41, + 0x37, 0x41, 0x38, 0x41, 0x39, 0x41, 0x3A, 0x41, + 0x3B, 0x41, 0x3C, 0x41, 0x3D, 0x41, 0x3E, 0x41, + // Bytes 40 - 7f + 0x3F, 0x41, 0x40, 0x41, 0x41, 0x41, 0x42, 0x41, + 0x43, 0x41, 0x44, 0x41, 0x45, 0x41, 0x46, 0x41, + 0x47, 0x41, 0x48, 0x41, 0x49, 0x41, 0x4A, 0x41, + 0x4B, 0x41, 0x4C, 0x41, 0x4D, 0x41, 0x4E, 0x41, + 0x4F, 0x41, 0x50, 0x41, 0x51, 0x41, 0x52, 0x41, + 0x53, 0x41, 0x54, 0x41, 0x55, 0x41, 0x56, 0x41, + 0x57, 0x41, 0x58, 0x41, 0x59, 0x41, 0x5A, 0x41, + 0x5B, 0x41, 0x5C, 0x41, 0x5D, 0x41, 0x5E, 0x41, + // Bytes 80 - bf + 0x5F, 0x41, 0x60, 0x41, 0x61, 0x41, 0x62, 0x41, + 0x63, 0x41, 0x64, 0x41, 0x65, 0x41, 0x66, 0x41, + 0x67, 0x41, 0x68, 0x41, 0x69, 0x41, 0x6A, 0x41, + 0x6B, 0x41, 0x6C, 0x41, 0x6D, 0x41, 0x6E, 0x41, + 0x6F, 0x41, 0x70, 0x41, 0x71, 0x41, 0x72, 0x41, + 0x73, 0x41, 0x74, 0x41, 0x75, 0x41, 0x76, 0x41, + 0x77, 0x41, 0x78, 0x41, 0x79, 0x41, 0x7A, 0x41, + 0x7B, 0x41, 0x7C, 0x41, 0x7D, 0x41, 0x7E, 0x42, + // Bytes c0 - ff + 0xC2, 0xA2, 0x42, 0xC2, 0xA3, 0x42, 0xC2, 0xA5, + 0x42, 0xC2, 0xA6, 0x42, 0xC2, 0xAC, 0x42, 0xC2, + 0xB7, 0x42, 0xC3, 0x86, 0x42, 0xC3, 0xB0, 0x42, + 0xC4, 0xA6, 0x42, 0xC4, 0xA7, 0x42, 0xC4, 0xB1, + 0x42, 0xC5, 0x8B, 0x42, 0xC5, 0x93, 0x42, 0xC6, + 0x8E, 0x42, 0xC6, 0x90, 0x42, 0xC6, 0xAB, 0x42, + 0xC8, 0xA2, 0x42, 0xC8, 0xB7, 0x42, 0xC9, 0x90, + 0x42, 0xC9, 0x91, 0x42, 0xC9, 0x92, 0x42, 0xC9, + // Bytes 100 - 13f + 0x94, 0x42, 0xC9, 0x95, 0x42, 0xC9, 0x99, 0x42, + 0xC9, 0x9B, 0x42, 0xC9, 0x9C, 0x42, 0xC9, 0x9F, + 0x42, 0xC9, 0xA1, 0x42, 0xC9, 0xA3, 0x42, 0xC9, + 0xA5, 0x42, 0xC9, 0xA6, 0x42, 0xC9, 0xA8, 0x42, + 0xC9, 0xA9, 0x42, 0xC9, 0xAA, 0x42, 0xC9, 0xAB, + 0x42, 0xC9, 0xAD, 0x42, 0xC9, 0xAF, 0x42, 0xC9, + 0xB0, 0x42, 0xC9, 0xB1, 0x42, 0xC9, 0xB2, 0x42, + 0xC9, 0xB3, 0x42, 0xC9, 0xB4, 0x42, 0xC9, 0xB5, + // Bytes 140 - 17f + 0x42, 0xC9, 0xB8, 0x42, 0xC9, 0xB9, 0x42, 0xC9, + 0xBB, 0x42, 0xCA, 0x81, 0x42, 0xCA, 0x82, 0x42, + 0xCA, 0x83, 0x42, 0xCA, 0x89, 0x42, 0xCA, 0x8A, + 0x42, 0xCA, 0x8B, 0x42, 0xCA, 0x8C, 0x42, 0xCA, + 0x90, 0x42, 0xCA, 0x91, 0x42, 0xCA, 0x92, 0x42, + 0xCA, 0x95, 0x42, 0xCA, 0x9D, 0x42, 0xCA, 0x9F, + 0x42, 0xCA, 0xB9, 0x42, 0xCE, 0x91, 0x42, 0xCE, + 0x92, 0x42, 0xCE, 0x93, 0x42, 0xCE, 0x94, 0x42, + // Bytes 180 - 1bf + 0xCE, 0x95, 0x42, 0xCE, 0x96, 0x42, 0xCE, 0x97, + 0x42, 0xCE, 0x98, 0x42, 0xCE, 0x99, 0x42, 0xCE, + 0x9A, 0x42, 0xCE, 0x9B, 0x42, 0xCE, 0x9C, 0x42, + 0xCE, 0x9D, 0x42, 0xCE, 0x9E, 0x42, 0xCE, 0x9F, + 0x42, 0xCE, 0xA0, 0x42, 0xCE, 0xA1, 0x42, 0xCE, + 0xA3, 0x42, 0xCE, 0xA4, 0x42, 0xCE, 0xA5, 0x42, + 0xCE, 0xA6, 0x42, 0xCE, 0xA7, 0x42, 0xCE, 0xA8, + 0x42, 0xCE, 0xA9, 0x42, 0xCE, 0xB1, 0x42, 0xCE, + // Bytes 1c0 - 1ff + 0xB2, 0x42, 0xCE, 0xB3, 0x42, 0xCE, 0xB4, 0x42, + 0xCE, 0xB5, 0x42, 0xCE, 0xB6, 0x42, 0xCE, 0xB7, + 0x42, 0xCE, 0xB8, 0x42, 0xCE, 0xB9, 0x42, 0xCE, + 0xBA, 0x42, 0xCE, 0xBB, 0x42, 0xCE, 0xBC, 0x42, + 0xCE, 0xBD, 0x42, 0xCE, 0xBE, 0x42, 0xCE, 0xBF, + 0x42, 0xCF, 0x80, 0x42, 0xCF, 0x81, 0x42, 0xCF, + 0x82, 0x42, 0xCF, 0x83, 0x42, 0xCF, 0x84, 0x42, + 0xCF, 0x85, 0x42, 0xCF, 0x86, 0x42, 0xCF, 0x87, + // Bytes 200 - 23f + 0x42, 0xCF, 0x88, 0x42, 0xCF, 0x89, 0x42, 0xCF, + 0x9C, 0x42, 0xCF, 0x9D, 0x42, 0xD0, 0xBD, 0x42, + 0xD1, 0x8A, 0x42, 0xD1, 0x8C, 0x42, 0xD7, 0x90, + 0x42, 0xD7, 0x91, 0x42, 0xD7, 0x92, 0x42, 0xD7, + 0x93, 0x42, 0xD7, 0x94, 0x42, 0xD7, 0x9B, 0x42, + 0xD7, 0x9C, 0x42, 0xD7, 0x9D, 0x42, 0xD7, 0xA2, + 0x42, 0xD7, 0xA8, 0x42, 0xD7, 0xAA, 0x42, 0xD8, + 0xA1, 0x42, 0xD8, 0xA7, 0x42, 0xD8, 0xA8, 0x42, + // Bytes 240 - 27f + 0xD8, 0xA9, 0x42, 0xD8, 0xAA, 0x42, 0xD8, 0xAB, + 0x42, 0xD8, 0xAC, 0x42, 0xD8, 0xAD, 0x42, 0xD8, + 0xAE, 0x42, 0xD8, 0xAF, 0x42, 0xD8, 0xB0, 0x42, + 0xD8, 0xB1, 0x42, 0xD8, 0xB2, 0x42, 0xD8, 0xB3, + 0x42, 0xD8, 0xB4, 0x42, 0xD8, 0xB5, 0x42, 0xD8, + 0xB6, 0x42, 0xD8, 0xB7, 0x42, 0xD8, 0xB8, 0x42, + 0xD8, 0xB9, 0x42, 0xD8, 0xBA, 0x42, 0xD9, 0x81, + 0x42, 0xD9, 0x82, 0x42, 0xD9, 0x83, 0x42, 0xD9, + // Bytes 280 - 2bf + 0x84, 0x42, 0xD9, 0x85, 0x42, 0xD9, 0x86, 0x42, + 0xD9, 0x87, 0x42, 0xD9, 0x88, 0x42, 0xD9, 0x89, + 0x42, 0xD9, 0x8A, 0x42, 0xD9, 0xAE, 0x42, 0xD9, + 0xAF, 0x42, 0xD9, 0xB1, 0x42, 0xD9, 0xB9, 0x42, + 0xD9, 0xBA, 0x42, 0xD9, 0xBB, 0x42, 0xD9, 0xBE, + 0x42, 0xD9, 0xBF, 0x42, 0xDA, 0x80, 0x42, 0xDA, + 0x83, 0x42, 0xDA, 0x84, 0x42, 0xDA, 0x86, 0x42, + 0xDA, 0x87, 0x42, 0xDA, 0x88, 0x42, 0xDA, 0x8C, + // Bytes 2c0 - 2ff + 0x42, 0xDA, 0x8D, 0x42, 0xDA, 0x8E, 0x42, 0xDA, + 0x91, 0x42, 0xDA, 0x98, 0x42, 0xDA, 0xA1, 0x42, + 0xDA, 0xA4, 0x42, 0xDA, 0xA6, 0x42, 0xDA, 0xA9, + 0x42, 0xDA, 0xAD, 0x42, 0xDA, 0xAF, 0x42, 0xDA, + 0xB1, 0x42, 0xDA, 0xB3, 0x42, 0xDA, 0xBA, 0x42, + 0xDA, 0xBB, 0x42, 0xDA, 0xBE, 0x42, 0xDB, 0x81, + 0x42, 0xDB, 0x85, 0x42, 0xDB, 0x86, 0x42, 0xDB, + 0x87, 0x42, 0xDB, 0x88, 0x42, 0xDB, 0x89, 0x42, + // Bytes 300 - 33f + 0xDB, 0x8B, 0x42, 0xDB, 0x8C, 0x42, 0xDB, 0x90, + 0x42, 0xDB, 0x92, 0x43, 0xE0, 0xBC, 0x8B, 0x43, + 0xE1, 0x83, 0x9C, 0x43, 0xE1, 0x84, 0x80, 0x43, + 0xE1, 0x84, 0x81, 0x43, 0xE1, 0x84, 0x82, 0x43, + 0xE1, 0x84, 0x83, 0x43, 0xE1, 0x84, 0x84, 0x43, + 0xE1, 0x84, 0x85, 0x43, 0xE1, 0x84, 0x86, 0x43, + 0xE1, 0x84, 0x87, 0x43, 0xE1, 0x84, 0x88, 0x43, + 0xE1, 0x84, 0x89, 0x43, 0xE1, 0x84, 0x8A, 0x43, + // Bytes 340 - 37f + 0xE1, 0x84, 0x8B, 0x43, 0xE1, 0x84, 0x8C, 0x43, + 0xE1, 0x84, 0x8D, 0x43, 0xE1, 0x84, 0x8E, 0x43, + 0xE1, 0x84, 0x8F, 0x43, 0xE1, 0x84, 0x90, 0x43, + 0xE1, 0x84, 0x91, 0x43, 0xE1, 0x84, 0x92, 0x43, + 0xE1, 0x84, 0x94, 0x43, 0xE1, 0x84, 0x95, 0x43, + 0xE1, 0x84, 0x9A, 0x43, 0xE1, 0x84, 0x9C, 0x43, + 0xE1, 0x84, 0x9D, 0x43, 0xE1, 0x84, 0x9E, 0x43, + 0xE1, 0x84, 0xA0, 0x43, 0xE1, 0x84, 0xA1, 0x43, + // Bytes 380 - 3bf + 0xE1, 0x84, 0xA2, 0x43, 0xE1, 0x84, 0xA3, 0x43, + 0xE1, 0x84, 0xA7, 0x43, 0xE1, 0x84, 0xA9, 0x43, + 0xE1, 0x84, 0xAB, 0x43, 0xE1, 0x84, 0xAC, 0x43, + 0xE1, 0x84, 0xAD, 0x43, 0xE1, 0x84, 0xAE, 0x43, + 0xE1, 0x84, 0xAF, 0x43, 0xE1, 0x84, 0xB2, 0x43, + 0xE1, 0x84, 0xB6, 0x43, 0xE1, 0x85, 0x80, 0x43, + 0xE1, 0x85, 0x87, 0x43, 0xE1, 0x85, 0x8C, 0x43, + 0xE1, 0x85, 0x97, 0x43, 0xE1, 0x85, 0x98, 0x43, + // Bytes 3c0 - 3ff + 0xE1, 0x85, 0x99, 0x43, 0xE1, 0x85, 0xA0, 0x43, + 0xE1, 0x86, 0x84, 0x43, 0xE1, 0x86, 0x85, 0x43, + 0xE1, 0x86, 0x88, 0x43, 0xE1, 0x86, 0x91, 0x43, + 0xE1, 0x86, 0x92, 0x43, 0xE1, 0x86, 0x94, 0x43, + 0xE1, 0x86, 0x9E, 0x43, 0xE1, 0x86, 0xA1, 0x43, + 0xE1, 0x87, 0x87, 0x43, 0xE1, 0x87, 0x88, 0x43, + 0xE1, 0x87, 0x8C, 0x43, 0xE1, 0x87, 0x8E, 0x43, + 0xE1, 0x87, 0x93, 0x43, 0xE1, 0x87, 0x97, 0x43, + // Bytes 400 - 43f + 0xE1, 0x87, 0x99, 0x43, 0xE1, 0x87, 0x9D, 0x43, + 0xE1, 0x87, 0x9F, 0x43, 0xE1, 0x87, 0xB1, 0x43, + 0xE1, 0x87, 0xB2, 0x43, 0xE1, 0xB4, 0x82, 0x43, + 0xE1, 0xB4, 0x96, 0x43, 0xE1, 0xB4, 0x97, 0x43, + 0xE1, 0xB4, 0x9C, 0x43, 0xE1, 0xB4, 0x9D, 0x43, + 0xE1, 0xB4, 0xA5, 0x43, 0xE1, 0xB5, 0xBB, 0x43, + 0xE1, 0xB6, 0x85, 0x43, 0xE2, 0x80, 0x82, 0x43, + 0xE2, 0x80, 0x83, 0x43, 0xE2, 0x80, 0x90, 0x43, + // Bytes 440 - 47f + 0xE2, 0x80, 0x93, 0x43, 0xE2, 0x80, 0x94, 0x43, + 0xE2, 0x82, 0xA9, 0x43, 0xE2, 0x86, 0x90, 0x43, + 0xE2, 0x86, 0x91, 0x43, 0xE2, 0x86, 0x92, 0x43, + 0xE2, 0x86, 0x93, 0x43, 0xE2, 0x88, 0x82, 0x43, + 0xE2, 0x88, 0x87, 0x43, 0xE2, 0x88, 0x91, 0x43, + 0xE2, 0x88, 0x92, 0x43, 0xE2, 0x94, 0x82, 0x43, + 0xE2, 0x96, 0xA0, 0x43, 0xE2, 0x97, 0x8B, 0x43, + 0xE2, 0xA6, 0x85, 0x43, 0xE2, 0xA6, 0x86, 0x43, + // Bytes 480 - 4bf + 0xE2, 0xB5, 0xA1, 0x43, 0xE3, 0x80, 0x81, 0x43, + 0xE3, 0x80, 0x82, 0x43, 0xE3, 0x80, 0x88, 0x43, + 0xE3, 0x80, 0x89, 0x43, 0xE3, 0x80, 0x8A, 0x43, + 0xE3, 0x80, 0x8B, 0x43, 0xE3, 0x80, 0x8C, 0x43, + 0xE3, 0x80, 0x8D, 0x43, 0xE3, 0x80, 0x8E, 0x43, + 0xE3, 0x80, 0x8F, 0x43, 0xE3, 0x80, 0x90, 0x43, + 0xE3, 0x80, 0x91, 0x43, 0xE3, 0x80, 0x92, 0x43, + 0xE3, 0x80, 0x94, 0x43, 0xE3, 0x80, 0x95, 0x43, + // Bytes 4c0 - 4ff + 0xE3, 0x80, 0x96, 0x43, 0xE3, 0x80, 0x97, 0x43, + 0xE3, 0x82, 0xA1, 0x43, 0xE3, 0x82, 0xA2, 0x43, + 0xE3, 0x82, 0xA3, 0x43, 0xE3, 0x82, 0xA4, 0x43, + 0xE3, 0x82, 0xA5, 0x43, 0xE3, 0x82, 0xA6, 0x43, + 0xE3, 0x82, 0xA7, 0x43, 0xE3, 0x82, 0xA8, 0x43, + 0xE3, 0x82, 0xA9, 0x43, 0xE3, 0x82, 0xAA, 0x43, + 0xE3, 0x82, 0xAB, 0x43, 0xE3, 0x82, 0xAD, 0x43, + 0xE3, 0x82, 0xAF, 0x43, 0xE3, 0x82, 0xB1, 0x43, + // Bytes 500 - 53f + 0xE3, 0x82, 0xB3, 0x43, 0xE3, 0x82, 0xB5, 0x43, + 0xE3, 0x82, 0xB7, 0x43, 0xE3, 0x82, 0xB9, 0x43, + 0xE3, 0x82, 0xBB, 0x43, 0xE3, 0x82, 0xBD, 0x43, + 0xE3, 0x82, 0xBF, 0x43, 0xE3, 0x83, 0x81, 0x43, + 0xE3, 0x83, 0x83, 0x43, 0xE3, 0x83, 0x84, 0x43, + 0xE3, 0x83, 0x86, 0x43, 0xE3, 0x83, 0x88, 0x43, + 0xE3, 0x83, 0x8A, 0x43, 0xE3, 0x83, 0x8B, 0x43, + 0xE3, 0x83, 0x8C, 0x43, 0xE3, 0x83, 0x8D, 0x43, + // Bytes 540 - 57f + 0xE3, 0x83, 0x8E, 0x43, 0xE3, 0x83, 0x8F, 0x43, + 0xE3, 0x83, 0x92, 0x43, 0xE3, 0x83, 0x95, 0x43, + 0xE3, 0x83, 0x98, 0x43, 0xE3, 0x83, 0x9B, 0x43, + 0xE3, 0x83, 0x9E, 0x43, 0xE3, 0x83, 0x9F, 0x43, + 0xE3, 0x83, 0xA0, 0x43, 0xE3, 0x83, 0xA1, 0x43, + 0xE3, 0x83, 0xA2, 0x43, 0xE3, 0x83, 0xA3, 0x43, + 0xE3, 0x83, 0xA4, 0x43, 0xE3, 0x83, 0xA5, 0x43, + 0xE3, 0x83, 0xA6, 0x43, 0xE3, 0x83, 0xA7, 0x43, + // Bytes 580 - 5bf + 0xE3, 0x83, 0xA8, 0x43, 0xE3, 0x83, 0xA9, 0x43, + 0xE3, 0x83, 0xAA, 0x43, 0xE3, 0x83, 0xAB, 0x43, + 0xE3, 0x83, 0xAC, 0x43, 0xE3, 0x83, 0xAD, 0x43, + 0xE3, 0x83, 0xAF, 0x43, 0xE3, 0x83, 0xB0, 0x43, + 0xE3, 0x83, 0xB1, 0x43, 0xE3, 0x83, 0xB2, 0x43, + 0xE3, 0x83, 0xB3, 0x43, 0xE3, 0x83, 0xBB, 0x43, + 0xE3, 0x83, 0xBC, 0x43, 0xE3, 0x92, 0x9E, 0x43, + 0xE3, 0x92, 0xB9, 0x43, 0xE3, 0x92, 0xBB, 0x43, + // Bytes 5c0 - 5ff + 0xE3, 0x93, 0x9F, 0x43, 0xE3, 0x94, 0x95, 0x43, + 0xE3, 0x9B, 0xAE, 0x43, 0xE3, 0x9B, 0xBC, 0x43, + 0xE3, 0x9E, 0x81, 0x43, 0xE3, 0xA0, 0xAF, 0x43, + 0xE3, 0xA1, 0xA2, 0x43, 0xE3, 0xA1, 0xBC, 0x43, + 0xE3, 0xA3, 0x87, 0x43, 0xE3, 0xA3, 0xA3, 0x43, + 0xE3, 0xA4, 0x9C, 0x43, 0xE3, 0xA4, 0xBA, 0x43, + 0xE3, 0xA8, 0xAE, 0x43, 0xE3, 0xA9, 0xAC, 0x43, + 0xE3, 0xAB, 0xA4, 0x43, 0xE3, 0xAC, 0x88, 0x43, + // Bytes 600 - 63f + 0xE3, 0xAC, 0x99, 0x43, 0xE3, 0xAD, 0x89, 0x43, + 0xE3, 0xAE, 0x9D, 0x43, 0xE3, 0xB0, 0x98, 0x43, + 0xE3, 0xB1, 0x8E, 0x43, 0xE3, 0xB4, 0xB3, 0x43, + 0xE3, 0xB6, 0x96, 0x43, 0xE3, 0xBA, 0xAC, 0x43, + 0xE3, 0xBA, 0xB8, 0x43, 0xE3, 0xBC, 0x9B, 0x43, + 0xE3, 0xBF, 0xBC, 0x43, 0xE4, 0x80, 0x88, 0x43, + 0xE4, 0x80, 0x98, 0x43, 0xE4, 0x80, 0xB9, 0x43, + 0xE4, 0x81, 0x86, 0x43, 0xE4, 0x82, 0x96, 0x43, + // Bytes 640 - 67f + 0xE4, 0x83, 0xA3, 0x43, 0xE4, 0x84, 0xAF, 0x43, + 0xE4, 0x88, 0x82, 0x43, 0xE4, 0x88, 0xA7, 0x43, + 0xE4, 0x8A, 0xA0, 0x43, 0xE4, 0x8C, 0x81, 0x43, + 0xE4, 0x8C, 0xB4, 0x43, 0xE4, 0x8D, 0x99, 0x43, + 0xE4, 0x8F, 0x95, 0x43, 0xE4, 0x8F, 0x99, 0x43, + 0xE4, 0x90, 0x8B, 0x43, 0xE4, 0x91, 0xAB, 0x43, + 0xE4, 0x94, 0xAB, 0x43, 0xE4, 0x95, 0x9D, 0x43, + 0xE4, 0x95, 0xA1, 0x43, 0xE4, 0x95, 0xAB, 0x43, + // Bytes 680 - 6bf + 0xE4, 0x97, 0x97, 0x43, 0xE4, 0x97, 0xB9, 0x43, + 0xE4, 0x98, 0xB5, 0x43, 0xE4, 0x9A, 0xBE, 0x43, + 0xE4, 0x9B, 0x87, 0x43, 0xE4, 0xA6, 0x95, 0x43, + 0xE4, 0xA7, 0xA6, 0x43, 0xE4, 0xA9, 0xAE, 0x43, + 0xE4, 0xA9, 0xB6, 0x43, 0xE4, 0xAA, 0xB2, 0x43, + 0xE4, 0xAC, 0xB3, 0x43, 0xE4, 0xAF, 0x8E, 0x43, + 0xE4, 0xB3, 0x8E, 0x43, 0xE4, 0xB3, 0xAD, 0x43, + 0xE4, 0xB3, 0xB8, 0x43, 0xE4, 0xB5, 0x96, 0x43, + // Bytes 6c0 - 6ff + 0xE4, 0xB8, 0x80, 0x43, 0xE4, 0xB8, 0x81, 0x43, + 0xE4, 0xB8, 0x83, 0x43, 0xE4, 0xB8, 0x89, 0x43, + 0xE4, 0xB8, 0x8A, 0x43, 0xE4, 0xB8, 0x8B, 0x43, + 0xE4, 0xB8, 0x8D, 0x43, 0xE4, 0xB8, 0x99, 0x43, + 0xE4, 0xB8, 0xA6, 0x43, 0xE4, 0xB8, 0xA8, 0x43, + 0xE4, 0xB8, 0xAD, 0x43, 0xE4, 0xB8, 0xB2, 0x43, + 0xE4, 0xB8, 0xB6, 0x43, 0xE4, 0xB8, 0xB8, 0x43, + 0xE4, 0xB8, 0xB9, 0x43, 0xE4, 0xB8, 0xBD, 0x43, + // Bytes 700 - 73f + 0xE4, 0xB8, 0xBF, 0x43, 0xE4, 0xB9, 0x81, 0x43, + 0xE4, 0xB9, 0x99, 0x43, 0xE4, 0xB9, 0x9D, 0x43, + 0xE4, 0xBA, 0x82, 0x43, 0xE4, 0xBA, 0x85, 0x43, + 0xE4, 0xBA, 0x86, 0x43, 0xE4, 0xBA, 0x8C, 0x43, + 0xE4, 0xBA, 0x94, 0x43, 0xE4, 0xBA, 0xA0, 0x43, + 0xE4, 0xBA, 0xA4, 0x43, 0xE4, 0xBA, 0xAE, 0x43, + 0xE4, 0xBA, 0xBA, 0x43, 0xE4, 0xBB, 0x80, 0x43, + 0xE4, 0xBB, 0x8C, 0x43, 0xE4, 0xBB, 0xA4, 0x43, + // Bytes 740 - 77f + 0xE4, 0xBC, 0x81, 0x43, 0xE4, 0xBC, 0x91, 0x43, + 0xE4, 0xBD, 0xA0, 0x43, 0xE4, 0xBE, 0x80, 0x43, + 0xE4, 0xBE, 0x86, 0x43, 0xE4, 0xBE, 0x8B, 0x43, + 0xE4, 0xBE, 0xAE, 0x43, 0xE4, 0xBE, 0xBB, 0x43, + 0xE4, 0xBE, 0xBF, 0x43, 0xE5, 0x80, 0x82, 0x43, + 0xE5, 0x80, 0xAB, 0x43, 0xE5, 0x81, 0xBA, 0x43, + 0xE5, 0x82, 0x99, 0x43, 0xE5, 0x83, 0x8F, 0x43, + 0xE5, 0x83, 0x9A, 0x43, 0xE5, 0x83, 0xA7, 0x43, + // Bytes 780 - 7bf + 0xE5, 0x84, 0xAA, 0x43, 0xE5, 0x84, 0xBF, 0x43, + 0xE5, 0x85, 0x80, 0x43, 0xE5, 0x85, 0x85, 0x43, + 0xE5, 0x85, 0x8D, 0x43, 0xE5, 0x85, 0x94, 0x43, + 0xE5, 0x85, 0xA4, 0x43, 0xE5, 0x85, 0xA5, 0x43, + 0xE5, 0x85, 0xA7, 0x43, 0xE5, 0x85, 0xA8, 0x43, + 0xE5, 0x85, 0xA9, 0x43, 0xE5, 0x85, 0xAB, 0x43, + 0xE5, 0x85, 0xAD, 0x43, 0xE5, 0x85, 0xB7, 0x43, + 0xE5, 0x86, 0x80, 0x43, 0xE5, 0x86, 0x82, 0x43, + // Bytes 7c0 - 7ff + 0xE5, 0x86, 0x8D, 0x43, 0xE5, 0x86, 0x92, 0x43, + 0xE5, 0x86, 0x95, 0x43, 0xE5, 0x86, 0x96, 0x43, + 0xE5, 0x86, 0x97, 0x43, 0xE5, 0x86, 0x99, 0x43, + 0xE5, 0x86, 0xA4, 0x43, 0xE5, 0x86, 0xAB, 0x43, + 0xE5, 0x86, 0xAC, 0x43, 0xE5, 0x86, 0xB5, 0x43, + 0xE5, 0x86, 0xB7, 0x43, 0xE5, 0x87, 0x89, 0x43, + 0xE5, 0x87, 0x8C, 0x43, 0xE5, 0x87, 0x9C, 0x43, + 0xE5, 0x87, 0x9E, 0x43, 0xE5, 0x87, 0xA0, 0x43, + // Bytes 800 - 83f + 0xE5, 0x87, 0xB5, 0x43, 0xE5, 0x88, 0x80, 0x43, + 0xE5, 0x88, 0x83, 0x43, 0xE5, 0x88, 0x87, 0x43, + 0xE5, 0x88, 0x97, 0x43, 0xE5, 0x88, 0x9D, 0x43, + 0xE5, 0x88, 0xA9, 0x43, 0xE5, 0x88, 0xBA, 0x43, + 0xE5, 0x88, 0xBB, 0x43, 0xE5, 0x89, 0x86, 0x43, + 0xE5, 0x89, 0x8D, 0x43, 0xE5, 0x89, 0xB2, 0x43, + 0xE5, 0x89, 0xB7, 0x43, 0xE5, 0x8A, 0x89, 0x43, + 0xE5, 0x8A, 0x9B, 0x43, 0xE5, 0x8A, 0xA3, 0x43, + // Bytes 840 - 87f + 0xE5, 0x8A, 0xB3, 0x43, 0xE5, 0x8A, 0xB4, 0x43, + 0xE5, 0x8B, 0x87, 0x43, 0xE5, 0x8B, 0x89, 0x43, + 0xE5, 0x8B, 0x92, 0x43, 0xE5, 0x8B, 0x9E, 0x43, + 0xE5, 0x8B, 0xA4, 0x43, 0xE5, 0x8B, 0xB5, 0x43, + 0xE5, 0x8B, 0xB9, 0x43, 0xE5, 0x8B, 0xBA, 0x43, + 0xE5, 0x8C, 0x85, 0x43, 0xE5, 0x8C, 0x86, 0x43, + 0xE5, 0x8C, 0x95, 0x43, 0xE5, 0x8C, 0x97, 0x43, + 0xE5, 0x8C, 0x9A, 0x43, 0xE5, 0x8C, 0xB8, 0x43, + // Bytes 880 - 8bf + 0xE5, 0x8C, 0xBB, 0x43, 0xE5, 0x8C, 0xBF, 0x43, + 0xE5, 0x8D, 0x81, 0x43, 0xE5, 0x8D, 0x84, 0x43, + 0xE5, 0x8D, 0x85, 0x43, 0xE5, 0x8D, 0x89, 0x43, + 0xE5, 0x8D, 0x91, 0x43, 0xE5, 0x8D, 0x94, 0x43, + 0xE5, 0x8D, 0x9A, 0x43, 0xE5, 0x8D, 0x9C, 0x43, + 0xE5, 0x8D, 0xA9, 0x43, 0xE5, 0x8D, 0xB0, 0x43, + 0xE5, 0x8D, 0xB3, 0x43, 0xE5, 0x8D, 0xB5, 0x43, + 0xE5, 0x8D, 0xBD, 0x43, 0xE5, 0x8D, 0xBF, 0x43, + // Bytes 8c0 - 8ff + 0xE5, 0x8E, 0x82, 0x43, 0xE5, 0x8E, 0xB6, 0x43, + 0xE5, 0x8F, 0x83, 0x43, 0xE5, 0x8F, 0x88, 0x43, + 0xE5, 0x8F, 0x8A, 0x43, 0xE5, 0x8F, 0x8C, 0x43, + 0xE5, 0x8F, 0x9F, 0x43, 0xE5, 0x8F, 0xA3, 0x43, + 0xE5, 0x8F, 0xA5, 0x43, 0xE5, 0x8F, 0xAB, 0x43, + 0xE5, 0x8F, 0xAF, 0x43, 0xE5, 0x8F, 0xB1, 0x43, + 0xE5, 0x8F, 0xB3, 0x43, 0xE5, 0x90, 0x86, 0x43, + 0xE5, 0x90, 0x88, 0x43, 0xE5, 0x90, 0x8D, 0x43, + // Bytes 900 - 93f + 0xE5, 0x90, 0x8F, 0x43, 0xE5, 0x90, 0x9D, 0x43, + 0xE5, 0x90, 0xB8, 0x43, 0xE5, 0x90, 0xB9, 0x43, + 0xE5, 0x91, 0x82, 0x43, 0xE5, 0x91, 0x88, 0x43, + 0xE5, 0x91, 0xA8, 0x43, 0xE5, 0x92, 0x9E, 0x43, + 0xE5, 0x92, 0xA2, 0x43, 0xE5, 0x92, 0xBD, 0x43, + 0xE5, 0x93, 0xB6, 0x43, 0xE5, 0x94, 0x90, 0x43, + 0xE5, 0x95, 0x8F, 0x43, 0xE5, 0x95, 0x93, 0x43, + 0xE5, 0x95, 0x95, 0x43, 0xE5, 0x95, 0xA3, 0x43, + // Bytes 940 - 97f + 0xE5, 0x96, 0x84, 0x43, 0xE5, 0x96, 0x87, 0x43, + 0xE5, 0x96, 0x99, 0x43, 0xE5, 0x96, 0x9D, 0x43, + 0xE5, 0x96, 0xAB, 0x43, 0xE5, 0x96, 0xB3, 0x43, + 0xE5, 0x96, 0xB6, 0x43, 0xE5, 0x97, 0x80, 0x43, + 0xE5, 0x97, 0x82, 0x43, 0xE5, 0x97, 0xA2, 0x43, + 0xE5, 0x98, 0x86, 0x43, 0xE5, 0x99, 0x91, 0x43, + 0xE5, 0x99, 0xA8, 0x43, 0xE5, 0x99, 0xB4, 0x43, + 0xE5, 0x9B, 0x97, 0x43, 0xE5, 0x9B, 0x9B, 0x43, + // Bytes 980 - 9bf + 0xE5, 0x9B, 0xB9, 0x43, 0xE5, 0x9C, 0x96, 0x43, + 0xE5, 0x9C, 0x97, 0x43, 0xE5, 0x9C, 0x9F, 0x43, + 0xE5, 0x9C, 0xB0, 0x43, 0xE5, 0x9E, 0x8B, 0x43, + 0xE5, 0x9F, 0x8E, 0x43, 0xE5, 0x9F, 0xB4, 0x43, + 0xE5, 0xA0, 0x8D, 0x43, 0xE5, 0xA0, 0xB1, 0x43, + 0xE5, 0xA0, 0xB2, 0x43, 0xE5, 0xA1, 0x80, 0x43, + 0xE5, 0xA1, 0x9A, 0x43, 0xE5, 0xA1, 0x9E, 0x43, + 0xE5, 0xA2, 0xA8, 0x43, 0xE5, 0xA2, 0xAC, 0x43, + // Bytes 9c0 - 9ff + 0xE5, 0xA2, 0xB3, 0x43, 0xE5, 0xA3, 0x98, 0x43, + 0xE5, 0xA3, 0x9F, 0x43, 0xE5, 0xA3, 0xAB, 0x43, + 0xE5, 0xA3, 0xAE, 0x43, 0xE5, 0xA3, 0xB0, 0x43, + 0xE5, 0xA3, 0xB2, 0x43, 0xE5, 0xA3, 0xB7, 0x43, + 0xE5, 0xA4, 0x82, 0x43, 0xE5, 0xA4, 0x86, 0x43, + 0xE5, 0xA4, 0x8A, 0x43, 0xE5, 0xA4, 0x95, 0x43, + 0xE5, 0xA4, 0x9A, 0x43, 0xE5, 0xA4, 0x9C, 0x43, + 0xE5, 0xA4, 0xA2, 0x43, 0xE5, 0xA4, 0xA7, 0x43, + // Bytes a00 - a3f + 0xE5, 0xA4, 0xA9, 0x43, 0xE5, 0xA5, 0x84, 0x43, + 0xE5, 0xA5, 0x88, 0x43, 0xE5, 0xA5, 0x91, 0x43, + 0xE5, 0xA5, 0x94, 0x43, 0xE5, 0xA5, 0xA2, 0x43, + 0xE5, 0xA5, 0xB3, 0x43, 0xE5, 0xA7, 0x98, 0x43, + 0xE5, 0xA7, 0xAC, 0x43, 0xE5, 0xA8, 0x9B, 0x43, + 0xE5, 0xA8, 0xA7, 0x43, 0xE5, 0xA9, 0xA2, 0x43, + 0xE5, 0xA9, 0xA6, 0x43, 0xE5, 0xAA, 0xB5, 0x43, + 0xE5, 0xAC, 0x88, 0x43, 0xE5, 0xAC, 0xA8, 0x43, + // Bytes a40 - a7f + 0xE5, 0xAC, 0xBE, 0x43, 0xE5, 0xAD, 0x90, 0x43, + 0xE5, 0xAD, 0x97, 0x43, 0xE5, 0xAD, 0xA6, 0x43, + 0xE5, 0xAE, 0x80, 0x43, 0xE5, 0xAE, 0x85, 0x43, + 0xE5, 0xAE, 0x97, 0x43, 0xE5, 0xAF, 0x83, 0x43, + 0xE5, 0xAF, 0x98, 0x43, 0xE5, 0xAF, 0xA7, 0x43, + 0xE5, 0xAF, 0xAE, 0x43, 0xE5, 0xAF, 0xB3, 0x43, + 0xE5, 0xAF, 0xB8, 0x43, 0xE5, 0xAF, 0xBF, 0x43, + 0xE5, 0xB0, 0x86, 0x43, 0xE5, 0xB0, 0x8F, 0x43, + // Bytes a80 - abf + 0xE5, 0xB0, 0xA2, 0x43, 0xE5, 0xB0, 0xB8, 0x43, + 0xE5, 0xB0, 0xBF, 0x43, 0xE5, 0xB1, 0xA0, 0x43, + 0xE5, 0xB1, 0xA2, 0x43, 0xE5, 0xB1, 0xA4, 0x43, + 0xE5, 0xB1, 0xA5, 0x43, 0xE5, 0xB1, 0xAE, 0x43, + 0xE5, 0xB1, 0xB1, 0x43, 0xE5, 0xB2, 0x8D, 0x43, + 0xE5, 0xB3, 0x80, 0x43, 0xE5, 0xB4, 0x99, 0x43, + 0xE5, 0xB5, 0x83, 0x43, 0xE5, 0xB5, 0x90, 0x43, + 0xE5, 0xB5, 0xAB, 0x43, 0xE5, 0xB5, 0xAE, 0x43, + // Bytes ac0 - aff + 0xE5, 0xB5, 0xBC, 0x43, 0xE5, 0xB6, 0xB2, 0x43, + 0xE5, 0xB6, 0xBA, 0x43, 0xE5, 0xB7, 0x9B, 0x43, + 0xE5, 0xB7, 0xA1, 0x43, 0xE5, 0xB7, 0xA2, 0x43, + 0xE5, 0xB7, 0xA5, 0x43, 0xE5, 0xB7, 0xA6, 0x43, + 0xE5, 0xB7, 0xB1, 0x43, 0xE5, 0xB7, 0xBD, 0x43, + 0xE5, 0xB7, 0xBE, 0x43, 0xE5, 0xB8, 0xA8, 0x43, + 0xE5, 0xB8, 0xBD, 0x43, 0xE5, 0xB9, 0xA9, 0x43, + 0xE5, 0xB9, 0xB2, 0x43, 0xE5, 0xB9, 0xB4, 0x43, + // Bytes b00 - b3f + 0xE5, 0xB9, 0xBA, 0x43, 0xE5, 0xB9, 0xBC, 0x43, + 0xE5, 0xB9, 0xBF, 0x43, 0xE5, 0xBA, 0xA6, 0x43, + 0xE5, 0xBA, 0xB0, 0x43, 0xE5, 0xBA, 0xB3, 0x43, + 0xE5, 0xBA, 0xB6, 0x43, 0xE5, 0xBB, 0x89, 0x43, + 0xE5, 0xBB, 0x8A, 0x43, 0xE5, 0xBB, 0x92, 0x43, + 0xE5, 0xBB, 0x93, 0x43, 0xE5, 0xBB, 0x99, 0x43, + 0xE5, 0xBB, 0xAC, 0x43, 0xE5, 0xBB, 0xB4, 0x43, + 0xE5, 0xBB, 0xBE, 0x43, 0xE5, 0xBC, 0x84, 0x43, + // Bytes b40 - b7f + 0xE5, 0xBC, 0x8B, 0x43, 0xE5, 0xBC, 0x93, 0x43, + 0xE5, 0xBC, 0xA2, 0x43, 0xE5, 0xBD, 0x90, 0x43, + 0xE5, 0xBD, 0x93, 0x43, 0xE5, 0xBD, 0xA1, 0x43, + 0xE5, 0xBD, 0xA2, 0x43, 0xE5, 0xBD, 0xA9, 0x43, + 0xE5, 0xBD, 0xAB, 0x43, 0xE5, 0xBD, 0xB3, 0x43, + 0xE5, 0xBE, 0x8B, 0x43, 0xE5, 0xBE, 0x8C, 0x43, + 0xE5, 0xBE, 0x97, 0x43, 0xE5, 0xBE, 0x9A, 0x43, + 0xE5, 0xBE, 0xA9, 0x43, 0xE5, 0xBE, 0xAD, 0x43, + // Bytes b80 - bbf + 0xE5, 0xBF, 0x83, 0x43, 0xE5, 0xBF, 0x8D, 0x43, + 0xE5, 0xBF, 0x97, 0x43, 0xE5, 0xBF, 0xB5, 0x43, + 0xE5, 0xBF, 0xB9, 0x43, 0xE6, 0x80, 0x92, 0x43, + 0xE6, 0x80, 0x9C, 0x43, 0xE6, 0x81, 0xB5, 0x43, + 0xE6, 0x82, 0x81, 0x43, 0xE6, 0x82, 0x94, 0x43, + 0xE6, 0x83, 0x87, 0x43, 0xE6, 0x83, 0x98, 0x43, + 0xE6, 0x83, 0xA1, 0x43, 0xE6, 0x84, 0x88, 0x43, + 0xE6, 0x85, 0x84, 0x43, 0xE6, 0x85, 0x88, 0x43, + // Bytes bc0 - bff + 0xE6, 0x85, 0x8C, 0x43, 0xE6, 0x85, 0x8E, 0x43, + 0xE6, 0x85, 0xA0, 0x43, 0xE6, 0x85, 0xA8, 0x43, + 0xE6, 0x85, 0xBA, 0x43, 0xE6, 0x86, 0x8E, 0x43, + 0xE6, 0x86, 0x90, 0x43, 0xE6, 0x86, 0xA4, 0x43, + 0xE6, 0x86, 0xAF, 0x43, 0xE6, 0x86, 0xB2, 0x43, + 0xE6, 0x87, 0x9E, 0x43, 0xE6, 0x87, 0xB2, 0x43, + 0xE6, 0x87, 0xB6, 0x43, 0xE6, 0x88, 0x80, 0x43, + 0xE6, 0x88, 0x88, 0x43, 0xE6, 0x88, 0x90, 0x43, + // Bytes c00 - c3f + 0xE6, 0x88, 0x9B, 0x43, 0xE6, 0x88, 0xAE, 0x43, + 0xE6, 0x88, 0xB4, 0x43, 0xE6, 0x88, 0xB6, 0x43, + 0xE6, 0x89, 0x8B, 0x43, 0xE6, 0x89, 0x93, 0x43, + 0xE6, 0x89, 0x9D, 0x43, 0xE6, 0x8A, 0x95, 0x43, + 0xE6, 0x8A, 0xB1, 0x43, 0xE6, 0x8B, 0x89, 0x43, + 0xE6, 0x8B, 0x8F, 0x43, 0xE6, 0x8B, 0x93, 0x43, + 0xE6, 0x8B, 0x94, 0x43, 0xE6, 0x8B, 0xBC, 0x43, + 0xE6, 0x8B, 0xBE, 0x43, 0xE6, 0x8C, 0x87, 0x43, + // Bytes c40 - c7f + 0xE6, 0x8C, 0xBD, 0x43, 0xE6, 0x8D, 0x90, 0x43, + 0xE6, 0x8D, 0x95, 0x43, 0xE6, 0x8D, 0xA8, 0x43, + 0xE6, 0x8D, 0xBB, 0x43, 0xE6, 0x8E, 0x83, 0x43, + 0xE6, 0x8E, 0xA0, 0x43, 0xE6, 0x8E, 0xA9, 0x43, + 0xE6, 0x8F, 0x84, 0x43, 0xE6, 0x8F, 0x85, 0x43, + 0xE6, 0x8F, 0xA4, 0x43, 0xE6, 0x90, 0x9C, 0x43, + 0xE6, 0x90, 0xA2, 0x43, 0xE6, 0x91, 0x92, 0x43, + 0xE6, 0x91, 0xA9, 0x43, 0xE6, 0x91, 0xB7, 0x43, + // Bytes c80 - cbf + 0xE6, 0x91, 0xBE, 0x43, 0xE6, 0x92, 0x9A, 0x43, + 0xE6, 0x92, 0x9D, 0x43, 0xE6, 0x93, 0x84, 0x43, + 0xE6, 0x94, 0xAF, 0x43, 0xE6, 0x94, 0xB4, 0x43, + 0xE6, 0x95, 0x8F, 0x43, 0xE6, 0x95, 0x96, 0x43, + 0xE6, 0x95, 0xAC, 0x43, 0xE6, 0x95, 0xB8, 0x43, + 0xE6, 0x96, 0x87, 0x43, 0xE6, 0x96, 0x97, 0x43, + 0xE6, 0x96, 0x99, 0x43, 0xE6, 0x96, 0xA4, 0x43, + 0xE6, 0x96, 0xB0, 0x43, 0xE6, 0x96, 0xB9, 0x43, + // Bytes cc0 - cff + 0xE6, 0x97, 0x85, 0x43, 0xE6, 0x97, 0xA0, 0x43, + 0xE6, 0x97, 0xA2, 0x43, 0xE6, 0x97, 0xA3, 0x43, + 0xE6, 0x97, 0xA5, 0x43, 0xE6, 0x98, 0x93, 0x43, + 0xE6, 0x98, 0xA0, 0x43, 0xE6, 0x99, 0x89, 0x43, + 0xE6, 0x99, 0xB4, 0x43, 0xE6, 0x9A, 0x88, 0x43, + 0xE6, 0x9A, 0x91, 0x43, 0xE6, 0x9A, 0x9C, 0x43, + 0xE6, 0x9A, 0xB4, 0x43, 0xE6, 0x9B, 0x86, 0x43, + 0xE6, 0x9B, 0xB0, 0x43, 0xE6, 0x9B, 0xB4, 0x43, + // Bytes d00 - d3f + 0xE6, 0x9B, 0xB8, 0x43, 0xE6, 0x9C, 0x80, 0x43, + 0xE6, 0x9C, 0x88, 0x43, 0xE6, 0x9C, 0x89, 0x43, + 0xE6, 0x9C, 0x97, 0x43, 0xE6, 0x9C, 0x9B, 0x43, + 0xE6, 0x9C, 0xA1, 0x43, 0xE6, 0x9C, 0xA8, 0x43, + 0xE6, 0x9D, 0x8E, 0x43, 0xE6, 0x9D, 0x93, 0x43, + 0xE6, 0x9D, 0x96, 0x43, 0xE6, 0x9D, 0x9E, 0x43, + 0xE6, 0x9D, 0xBB, 0x43, 0xE6, 0x9E, 0x85, 0x43, + 0xE6, 0x9E, 0x97, 0x43, 0xE6, 0x9F, 0xB3, 0x43, + // Bytes d40 - d7f + 0xE6, 0x9F, 0xBA, 0x43, 0xE6, 0xA0, 0x97, 0x43, + 0xE6, 0xA0, 0x9F, 0x43, 0xE6, 0xA0, 0xAA, 0x43, + 0xE6, 0xA1, 0x92, 0x43, 0xE6, 0xA2, 0x81, 0x43, + 0xE6, 0xA2, 0x85, 0x43, 0xE6, 0xA2, 0x8E, 0x43, + 0xE6, 0xA2, 0xA8, 0x43, 0xE6, 0xA4, 0x94, 0x43, + 0xE6, 0xA5, 0x82, 0x43, 0xE6, 0xA6, 0xA3, 0x43, + 0xE6, 0xA7, 0xAA, 0x43, 0xE6, 0xA8, 0x82, 0x43, + 0xE6, 0xA8, 0x93, 0x43, 0xE6, 0xAA, 0xA8, 0x43, + // Bytes d80 - dbf + 0xE6, 0xAB, 0x93, 0x43, 0xE6, 0xAB, 0x9B, 0x43, + 0xE6, 0xAC, 0x84, 0x43, 0xE6, 0xAC, 0xA0, 0x43, + 0xE6, 0xAC, 0xA1, 0x43, 0xE6, 0xAD, 0x94, 0x43, + 0xE6, 0xAD, 0xA2, 0x43, 0xE6, 0xAD, 0xA3, 0x43, + 0xE6, 0xAD, 0xB2, 0x43, 0xE6, 0xAD, 0xB7, 0x43, + 0xE6, 0xAD, 0xB9, 0x43, 0xE6, 0xAE, 0x9F, 0x43, + 0xE6, 0xAE, 0xAE, 0x43, 0xE6, 0xAE, 0xB3, 0x43, + 0xE6, 0xAE, 0xBA, 0x43, 0xE6, 0xAE, 0xBB, 0x43, + // Bytes dc0 - dff + 0xE6, 0xAF, 0x8B, 0x43, 0xE6, 0xAF, 0x8D, 0x43, + 0xE6, 0xAF, 0x94, 0x43, 0xE6, 0xAF, 0x9B, 0x43, + 0xE6, 0xB0, 0x8F, 0x43, 0xE6, 0xB0, 0x94, 0x43, + 0xE6, 0xB0, 0xB4, 0x43, 0xE6, 0xB1, 0x8E, 0x43, + 0xE6, 0xB1, 0xA7, 0x43, 0xE6, 0xB2, 0x88, 0x43, + 0xE6, 0xB2, 0xBF, 0x43, 0xE6, 0xB3, 0x8C, 0x43, + 0xE6, 0xB3, 0x8D, 0x43, 0xE6, 0xB3, 0xA5, 0x43, + 0xE6, 0xB3, 0xA8, 0x43, 0xE6, 0xB4, 0x96, 0x43, + // Bytes e00 - e3f + 0xE6, 0xB4, 0x9B, 0x43, 0xE6, 0xB4, 0x9E, 0x43, + 0xE6, 0xB4, 0xB4, 0x43, 0xE6, 0xB4, 0xBE, 0x43, + 0xE6, 0xB5, 0x81, 0x43, 0xE6, 0xB5, 0xA9, 0x43, + 0xE6, 0xB5, 0xAA, 0x43, 0xE6, 0xB5, 0xB7, 0x43, + 0xE6, 0xB5, 0xB8, 0x43, 0xE6, 0xB6, 0x85, 0x43, + 0xE6, 0xB7, 0x8B, 0x43, 0xE6, 0xB7, 0x9A, 0x43, + 0xE6, 0xB7, 0xAA, 0x43, 0xE6, 0xB7, 0xB9, 0x43, + 0xE6, 0xB8, 0x9A, 0x43, 0xE6, 0xB8, 0xAF, 0x43, + // Bytes e40 - e7f + 0xE6, 0xB9, 0xAE, 0x43, 0xE6, 0xBA, 0x80, 0x43, + 0xE6, 0xBA, 0x9C, 0x43, 0xE6, 0xBA, 0xBA, 0x43, + 0xE6, 0xBB, 0x87, 0x43, 0xE6, 0xBB, 0x8B, 0x43, + 0xE6, 0xBB, 0x91, 0x43, 0xE6, 0xBB, 0x9B, 0x43, + 0xE6, 0xBC, 0x8F, 0x43, 0xE6, 0xBC, 0x94, 0x43, + 0xE6, 0xBC, 0xA2, 0x43, 0xE6, 0xBC, 0xA3, 0x43, + 0xE6, 0xBD, 0xAE, 0x43, 0xE6, 0xBF, 0x86, 0x43, + 0xE6, 0xBF, 0xAB, 0x43, 0xE6, 0xBF, 0xBE, 0x43, + // Bytes e80 - ebf + 0xE7, 0x80, 0x9B, 0x43, 0xE7, 0x80, 0x9E, 0x43, + 0xE7, 0x80, 0xB9, 0x43, 0xE7, 0x81, 0x8A, 0x43, + 0xE7, 0x81, 0xAB, 0x43, 0xE7, 0x81, 0xB0, 0x43, + 0xE7, 0x81, 0xB7, 0x43, 0xE7, 0x81, 0xBD, 0x43, + 0xE7, 0x82, 0x99, 0x43, 0xE7, 0x82, 0xAD, 0x43, + 0xE7, 0x83, 0x88, 0x43, 0xE7, 0x83, 0x99, 0x43, + 0xE7, 0x84, 0xA1, 0x43, 0xE7, 0x85, 0x85, 0x43, + 0xE7, 0x85, 0x89, 0x43, 0xE7, 0x85, 0xAE, 0x43, + // Bytes ec0 - eff + 0xE7, 0x86, 0x9C, 0x43, 0xE7, 0x87, 0x8E, 0x43, + 0xE7, 0x87, 0x90, 0x43, 0xE7, 0x88, 0x90, 0x43, + 0xE7, 0x88, 0x9B, 0x43, 0xE7, 0x88, 0xA8, 0x43, + 0xE7, 0x88, 0xAA, 0x43, 0xE7, 0x88, 0xAB, 0x43, + 0xE7, 0x88, 0xB5, 0x43, 0xE7, 0x88, 0xB6, 0x43, + 0xE7, 0x88, 0xBB, 0x43, 0xE7, 0x88, 0xBF, 0x43, + 0xE7, 0x89, 0x87, 0x43, 0xE7, 0x89, 0x90, 0x43, + 0xE7, 0x89, 0x99, 0x43, 0xE7, 0x89, 0x9B, 0x43, + // Bytes f00 - f3f + 0xE7, 0x89, 0xA2, 0x43, 0xE7, 0x89, 0xB9, 0x43, + 0xE7, 0x8A, 0x80, 0x43, 0xE7, 0x8A, 0x95, 0x43, + 0xE7, 0x8A, 0xAC, 0x43, 0xE7, 0x8A, 0xAF, 0x43, + 0xE7, 0x8B, 0x80, 0x43, 0xE7, 0x8B, 0xBC, 0x43, + 0xE7, 0x8C, 0xAA, 0x43, 0xE7, 0x8D, 0xB5, 0x43, + 0xE7, 0x8D, 0xBA, 0x43, 0xE7, 0x8E, 0x84, 0x43, + 0xE7, 0x8E, 0x87, 0x43, 0xE7, 0x8E, 0x89, 0x43, + 0xE7, 0x8E, 0x8B, 0x43, 0xE7, 0x8E, 0xA5, 0x43, + // Bytes f40 - f7f + 0xE7, 0x8E, 0xB2, 0x43, 0xE7, 0x8F, 0x9E, 0x43, + 0xE7, 0x90, 0x86, 0x43, 0xE7, 0x90, 0x89, 0x43, + 0xE7, 0x90, 0xA2, 0x43, 0xE7, 0x91, 0x87, 0x43, + 0xE7, 0x91, 0x9C, 0x43, 0xE7, 0x91, 0xA9, 0x43, + 0xE7, 0x91, 0xB1, 0x43, 0xE7, 0x92, 0x85, 0x43, + 0xE7, 0x92, 0x89, 0x43, 0xE7, 0x92, 0x98, 0x43, + 0xE7, 0x93, 0x8A, 0x43, 0xE7, 0x93, 0x9C, 0x43, + 0xE7, 0x93, 0xA6, 0x43, 0xE7, 0x94, 0x86, 0x43, + // Bytes f80 - fbf + 0xE7, 0x94, 0x98, 0x43, 0xE7, 0x94, 0x9F, 0x43, + 0xE7, 0x94, 0xA4, 0x43, 0xE7, 0x94, 0xA8, 0x43, + 0xE7, 0x94, 0xB0, 0x43, 0xE7, 0x94, 0xB2, 0x43, + 0xE7, 0x94, 0xB3, 0x43, 0xE7, 0x94, 0xB7, 0x43, + 0xE7, 0x94, 0xBB, 0x43, 0xE7, 0x94, 0xBE, 0x43, + 0xE7, 0x95, 0x99, 0x43, 0xE7, 0x95, 0xA5, 0x43, + 0xE7, 0x95, 0xB0, 0x43, 0xE7, 0x96, 0x8B, 0x43, + 0xE7, 0x96, 0x92, 0x43, 0xE7, 0x97, 0xA2, 0x43, + // Bytes fc0 - fff + 0xE7, 0x98, 0x90, 0x43, 0xE7, 0x98, 0x9D, 0x43, + 0xE7, 0x98, 0x9F, 0x43, 0xE7, 0x99, 0x82, 0x43, + 0xE7, 0x99, 0xA9, 0x43, 0xE7, 0x99, 0xB6, 0x43, + 0xE7, 0x99, 0xBD, 0x43, 0xE7, 0x9A, 0xAE, 0x43, + 0xE7, 0x9A, 0xBF, 0x43, 0xE7, 0x9B, 0x8A, 0x43, + 0xE7, 0x9B, 0x9B, 0x43, 0xE7, 0x9B, 0xA3, 0x43, + 0xE7, 0x9B, 0xA7, 0x43, 0xE7, 0x9B, 0xAE, 0x43, + 0xE7, 0x9B, 0xB4, 0x43, 0xE7, 0x9C, 0x81, 0x43, + // Bytes 1000 - 103f + 0xE7, 0x9C, 0x9E, 0x43, 0xE7, 0x9C, 0x9F, 0x43, + 0xE7, 0x9D, 0x80, 0x43, 0xE7, 0x9D, 0x8A, 0x43, + 0xE7, 0x9E, 0x8B, 0x43, 0xE7, 0x9E, 0xA7, 0x43, + 0xE7, 0x9F, 0x9B, 0x43, 0xE7, 0x9F, 0xA2, 0x43, + 0xE7, 0x9F, 0xB3, 0x43, 0xE7, 0xA1, 0x8E, 0x43, + 0xE7, 0xA1, 0xAB, 0x43, 0xE7, 0xA2, 0x8C, 0x43, + 0xE7, 0xA2, 0x91, 0x43, 0xE7, 0xA3, 0x8A, 0x43, + 0xE7, 0xA3, 0x8C, 0x43, 0xE7, 0xA3, 0xBB, 0x43, + // Bytes 1040 - 107f + 0xE7, 0xA4, 0xAA, 0x43, 0xE7, 0xA4, 0xBA, 0x43, + 0xE7, 0xA4, 0xBC, 0x43, 0xE7, 0xA4, 0xBE, 0x43, + 0xE7, 0xA5, 0x88, 0x43, 0xE7, 0xA5, 0x89, 0x43, + 0xE7, 0xA5, 0x90, 0x43, 0xE7, 0xA5, 0x96, 0x43, + 0xE7, 0xA5, 0x9D, 0x43, 0xE7, 0xA5, 0x9E, 0x43, + 0xE7, 0xA5, 0xA5, 0x43, 0xE7, 0xA5, 0xBF, 0x43, + 0xE7, 0xA6, 0x81, 0x43, 0xE7, 0xA6, 0x8D, 0x43, + 0xE7, 0xA6, 0x8E, 0x43, 0xE7, 0xA6, 0x8F, 0x43, + // Bytes 1080 - 10bf + 0xE7, 0xA6, 0xAE, 0x43, 0xE7, 0xA6, 0xB8, 0x43, + 0xE7, 0xA6, 0xBE, 0x43, 0xE7, 0xA7, 0x8A, 0x43, + 0xE7, 0xA7, 0x98, 0x43, 0xE7, 0xA7, 0xAB, 0x43, + 0xE7, 0xA8, 0x9C, 0x43, 0xE7, 0xA9, 0x80, 0x43, + 0xE7, 0xA9, 0x8A, 0x43, 0xE7, 0xA9, 0x8F, 0x43, + 0xE7, 0xA9, 0xB4, 0x43, 0xE7, 0xA9, 0xBA, 0x43, + 0xE7, 0xAA, 0x81, 0x43, 0xE7, 0xAA, 0xB1, 0x43, + 0xE7, 0xAB, 0x8B, 0x43, 0xE7, 0xAB, 0xAE, 0x43, + // Bytes 10c0 - 10ff + 0xE7, 0xAB, 0xB9, 0x43, 0xE7, 0xAC, 0xA0, 0x43, + 0xE7, 0xAE, 0x8F, 0x43, 0xE7, 0xAF, 0x80, 0x43, + 0xE7, 0xAF, 0x86, 0x43, 0xE7, 0xAF, 0x89, 0x43, + 0xE7, 0xB0, 0xBE, 0x43, 0xE7, 0xB1, 0xA0, 0x43, + 0xE7, 0xB1, 0xB3, 0x43, 0xE7, 0xB1, 0xBB, 0x43, + 0xE7, 0xB2, 0x92, 0x43, 0xE7, 0xB2, 0xBE, 0x43, + 0xE7, 0xB3, 0x92, 0x43, 0xE7, 0xB3, 0x96, 0x43, + 0xE7, 0xB3, 0xA3, 0x43, 0xE7, 0xB3, 0xA7, 0x43, + // Bytes 1100 - 113f + 0xE7, 0xB3, 0xA8, 0x43, 0xE7, 0xB3, 0xB8, 0x43, + 0xE7, 0xB4, 0x80, 0x43, 0xE7, 0xB4, 0x90, 0x43, + 0xE7, 0xB4, 0xA2, 0x43, 0xE7, 0xB4, 0xAF, 0x43, + 0xE7, 0xB5, 0x82, 0x43, 0xE7, 0xB5, 0x9B, 0x43, + 0xE7, 0xB5, 0xA3, 0x43, 0xE7, 0xB6, 0xA0, 0x43, + 0xE7, 0xB6, 0xBE, 0x43, 0xE7, 0xB7, 0x87, 0x43, + 0xE7, 0xB7, 0xB4, 0x43, 0xE7, 0xB8, 0x82, 0x43, + 0xE7, 0xB8, 0x89, 0x43, 0xE7, 0xB8, 0xB7, 0x43, + // Bytes 1140 - 117f + 0xE7, 0xB9, 0x81, 0x43, 0xE7, 0xB9, 0x85, 0x43, + 0xE7, 0xBC, 0xB6, 0x43, 0xE7, 0xBC, 0xBE, 0x43, + 0xE7, 0xBD, 0x91, 0x43, 0xE7, 0xBD, 0xB2, 0x43, + 0xE7, 0xBD, 0xB9, 0x43, 0xE7, 0xBD, 0xBA, 0x43, + 0xE7, 0xBE, 0x85, 0x43, 0xE7, 0xBE, 0x8A, 0x43, + 0xE7, 0xBE, 0x95, 0x43, 0xE7, 0xBE, 0x9A, 0x43, + 0xE7, 0xBE, 0xBD, 0x43, 0xE7, 0xBF, 0xBA, 0x43, + 0xE8, 0x80, 0x81, 0x43, 0xE8, 0x80, 0x85, 0x43, + // Bytes 1180 - 11bf + 0xE8, 0x80, 0x8C, 0x43, 0xE8, 0x80, 0x92, 0x43, + 0xE8, 0x80, 0xB3, 0x43, 0xE8, 0x81, 0x86, 0x43, + 0xE8, 0x81, 0xA0, 0x43, 0xE8, 0x81, 0xAF, 0x43, + 0xE8, 0x81, 0xB0, 0x43, 0xE8, 0x81, 0xBE, 0x43, + 0xE8, 0x81, 0xBF, 0x43, 0xE8, 0x82, 0x89, 0x43, + 0xE8, 0x82, 0x8B, 0x43, 0xE8, 0x82, 0xAD, 0x43, + 0xE8, 0x82, 0xB2, 0x43, 0xE8, 0x84, 0x83, 0x43, + 0xE8, 0x84, 0xBE, 0x43, 0xE8, 0x87, 0x98, 0x43, + // Bytes 11c0 - 11ff + 0xE8, 0x87, 0xA3, 0x43, 0xE8, 0x87, 0xA8, 0x43, + 0xE8, 0x87, 0xAA, 0x43, 0xE8, 0x87, 0xAD, 0x43, + 0xE8, 0x87, 0xB3, 0x43, 0xE8, 0x87, 0xBC, 0x43, + 0xE8, 0x88, 0x81, 0x43, 0xE8, 0x88, 0x84, 0x43, + 0xE8, 0x88, 0x8C, 0x43, 0xE8, 0x88, 0x98, 0x43, + 0xE8, 0x88, 0x9B, 0x43, 0xE8, 0x88, 0x9F, 0x43, + 0xE8, 0x89, 0xAE, 0x43, 0xE8, 0x89, 0xAF, 0x43, + 0xE8, 0x89, 0xB2, 0x43, 0xE8, 0x89, 0xB8, 0x43, + // Bytes 1200 - 123f + 0xE8, 0x89, 0xB9, 0x43, 0xE8, 0x8A, 0x8B, 0x43, + 0xE8, 0x8A, 0x91, 0x43, 0xE8, 0x8A, 0x9D, 0x43, + 0xE8, 0x8A, 0xB1, 0x43, 0xE8, 0x8A, 0xB3, 0x43, + 0xE8, 0x8A, 0xBD, 0x43, 0xE8, 0x8B, 0xA5, 0x43, + 0xE8, 0x8B, 0xA6, 0x43, 0xE8, 0x8C, 0x9D, 0x43, + 0xE8, 0x8C, 0xA3, 0x43, 0xE8, 0x8C, 0xB6, 0x43, + 0xE8, 0x8D, 0x92, 0x43, 0xE8, 0x8D, 0x93, 0x43, + 0xE8, 0x8D, 0xA3, 0x43, 0xE8, 0x8E, 0xAD, 0x43, + // Bytes 1240 - 127f + 0xE8, 0x8E, 0xBD, 0x43, 0xE8, 0x8F, 0x89, 0x43, + 0xE8, 0x8F, 0x8A, 0x43, 0xE8, 0x8F, 0x8C, 0x43, + 0xE8, 0x8F, 0x9C, 0x43, 0xE8, 0x8F, 0xA7, 0x43, + 0xE8, 0x8F, 0xAF, 0x43, 0xE8, 0x8F, 0xB1, 0x43, + 0xE8, 0x90, 0xBD, 0x43, 0xE8, 0x91, 0x89, 0x43, + 0xE8, 0x91, 0x97, 0x43, 0xE8, 0x93, 0xAE, 0x43, + 0xE8, 0x93, 0xB1, 0x43, 0xE8, 0x93, 0xB3, 0x43, + 0xE8, 0x93, 0xBC, 0x43, 0xE8, 0x94, 0x96, 0x43, + // Bytes 1280 - 12bf + 0xE8, 0x95, 0xA4, 0x43, 0xE8, 0x97, 0x8D, 0x43, + 0xE8, 0x97, 0xBA, 0x43, 0xE8, 0x98, 0x86, 0x43, + 0xE8, 0x98, 0x92, 0x43, 0xE8, 0x98, 0xAD, 0x43, + 0xE8, 0x98, 0xBF, 0x43, 0xE8, 0x99, 0x8D, 0x43, + 0xE8, 0x99, 0x90, 0x43, 0xE8, 0x99, 0x9C, 0x43, + 0xE8, 0x99, 0xA7, 0x43, 0xE8, 0x99, 0xA9, 0x43, + 0xE8, 0x99, 0xAB, 0x43, 0xE8, 0x9A, 0x88, 0x43, + 0xE8, 0x9A, 0xA9, 0x43, 0xE8, 0x9B, 0xA2, 0x43, + // Bytes 12c0 - 12ff + 0xE8, 0x9C, 0x8E, 0x43, 0xE8, 0x9C, 0xA8, 0x43, + 0xE8, 0x9D, 0xAB, 0x43, 0xE8, 0x9D, 0xB9, 0x43, + 0xE8, 0x9E, 0x86, 0x43, 0xE8, 0x9E, 0xBA, 0x43, + 0xE8, 0x9F, 0xA1, 0x43, 0xE8, 0xA0, 0x81, 0x43, + 0xE8, 0xA0, 0x9F, 0x43, 0xE8, 0xA1, 0x80, 0x43, + 0xE8, 0xA1, 0x8C, 0x43, 0xE8, 0xA1, 0xA0, 0x43, + 0xE8, 0xA1, 0xA3, 0x43, 0xE8, 0xA3, 0x82, 0x43, + 0xE8, 0xA3, 0x8F, 0x43, 0xE8, 0xA3, 0x97, 0x43, + // Bytes 1300 - 133f + 0xE8, 0xA3, 0x9E, 0x43, 0xE8, 0xA3, 0xA1, 0x43, + 0xE8, 0xA3, 0xB8, 0x43, 0xE8, 0xA3, 0xBA, 0x43, + 0xE8, 0xA4, 0x90, 0x43, 0xE8, 0xA5, 0x81, 0x43, + 0xE8, 0xA5, 0xA4, 0x43, 0xE8, 0xA5, 0xBE, 0x43, + 0xE8, 0xA6, 0x86, 0x43, 0xE8, 0xA6, 0x8B, 0x43, + 0xE8, 0xA6, 0x96, 0x43, 0xE8, 0xA7, 0x92, 0x43, + 0xE8, 0xA7, 0xA3, 0x43, 0xE8, 0xA8, 0x80, 0x43, + 0xE8, 0xAA, 0xA0, 0x43, 0xE8, 0xAA, 0xAA, 0x43, + // Bytes 1340 - 137f + 0xE8, 0xAA, 0xBF, 0x43, 0xE8, 0xAB, 0x8B, 0x43, + 0xE8, 0xAB, 0x92, 0x43, 0xE8, 0xAB, 0x96, 0x43, + 0xE8, 0xAB, 0xAD, 0x43, 0xE8, 0xAB, 0xB8, 0x43, + 0xE8, 0xAB, 0xBE, 0x43, 0xE8, 0xAC, 0x81, 0x43, + 0xE8, 0xAC, 0xB9, 0x43, 0xE8, 0xAD, 0x98, 0x43, + 0xE8, 0xAE, 0x80, 0x43, 0xE8, 0xAE, 0x8A, 0x43, + 0xE8, 0xB0, 0xB7, 0x43, 0xE8, 0xB1, 0x86, 0x43, + 0xE8, 0xB1, 0x88, 0x43, 0xE8, 0xB1, 0x95, 0x43, + // Bytes 1380 - 13bf + 0xE8, 0xB1, 0xB8, 0x43, 0xE8, 0xB2, 0x9D, 0x43, + 0xE8, 0xB2, 0xA1, 0x43, 0xE8, 0xB2, 0xA9, 0x43, + 0xE8, 0xB2, 0xAB, 0x43, 0xE8, 0xB3, 0x81, 0x43, + 0xE8, 0xB3, 0x82, 0x43, 0xE8, 0xB3, 0x87, 0x43, + 0xE8, 0xB3, 0x88, 0x43, 0xE8, 0xB3, 0x93, 0x43, + 0xE8, 0xB4, 0x88, 0x43, 0xE8, 0xB4, 0x9B, 0x43, + 0xE8, 0xB5, 0xA4, 0x43, 0xE8, 0xB5, 0xB0, 0x43, + 0xE8, 0xB5, 0xB7, 0x43, 0xE8, 0xB6, 0xB3, 0x43, + // Bytes 13c0 - 13ff + 0xE8, 0xB6, 0xBC, 0x43, 0xE8, 0xB7, 0x8B, 0x43, + 0xE8, 0xB7, 0xAF, 0x43, 0xE8, 0xB7, 0xB0, 0x43, + 0xE8, 0xBA, 0xAB, 0x43, 0xE8, 0xBB, 0x8A, 0x43, + 0xE8, 0xBB, 0x94, 0x43, 0xE8, 0xBC, 0xA6, 0x43, + 0xE8, 0xBC, 0xAA, 0x43, 0xE8, 0xBC, 0xB8, 0x43, + 0xE8, 0xBC, 0xBB, 0x43, 0xE8, 0xBD, 0xA2, 0x43, + 0xE8, 0xBE, 0x9B, 0x43, 0xE8, 0xBE, 0x9E, 0x43, + 0xE8, 0xBE, 0xB0, 0x43, 0xE8, 0xBE, 0xB5, 0x43, + // Bytes 1400 - 143f + 0xE8, 0xBE, 0xB6, 0x43, 0xE9, 0x80, 0xA3, 0x43, + 0xE9, 0x80, 0xB8, 0x43, 0xE9, 0x81, 0x8A, 0x43, + 0xE9, 0x81, 0xA9, 0x43, 0xE9, 0x81, 0xB2, 0x43, + 0xE9, 0x81, 0xBC, 0x43, 0xE9, 0x82, 0x8F, 0x43, + 0xE9, 0x82, 0x91, 0x43, 0xE9, 0x82, 0x94, 0x43, + 0xE9, 0x83, 0x8E, 0x43, 0xE9, 0x83, 0x9E, 0x43, + 0xE9, 0x83, 0xB1, 0x43, 0xE9, 0x83, 0xBD, 0x43, + 0xE9, 0x84, 0x91, 0x43, 0xE9, 0x84, 0x9B, 0x43, + // Bytes 1440 - 147f + 0xE9, 0x85, 0x89, 0x43, 0xE9, 0x85, 0x8D, 0x43, + 0xE9, 0x85, 0xAA, 0x43, 0xE9, 0x86, 0x99, 0x43, + 0xE9, 0x86, 0xB4, 0x43, 0xE9, 0x87, 0x86, 0x43, + 0xE9, 0x87, 0x8C, 0x43, 0xE9, 0x87, 0x8F, 0x43, + 0xE9, 0x87, 0x91, 0x43, 0xE9, 0x88, 0xB4, 0x43, + 0xE9, 0x88, 0xB8, 0x43, 0xE9, 0x89, 0xB6, 0x43, + 0xE9, 0x89, 0xBC, 0x43, 0xE9, 0x8B, 0x97, 0x43, + 0xE9, 0x8B, 0x98, 0x43, 0xE9, 0x8C, 0x84, 0x43, + // Bytes 1480 - 14bf + 0xE9, 0x8D, 0x8A, 0x43, 0xE9, 0x8F, 0xB9, 0x43, + 0xE9, 0x90, 0x95, 0x43, 0xE9, 0x95, 0xB7, 0x43, + 0xE9, 0x96, 0x80, 0x43, 0xE9, 0x96, 0x8B, 0x43, + 0xE9, 0x96, 0xAD, 0x43, 0xE9, 0x96, 0xB7, 0x43, + 0xE9, 0x98, 0x9C, 0x43, 0xE9, 0x98, 0xAE, 0x43, + 0xE9, 0x99, 0x8B, 0x43, 0xE9, 0x99, 0x8D, 0x43, + 0xE9, 0x99, 0xB5, 0x43, 0xE9, 0x99, 0xB8, 0x43, + 0xE9, 0x99, 0xBC, 0x43, 0xE9, 0x9A, 0x86, 0x43, + // Bytes 14c0 - 14ff + 0xE9, 0x9A, 0xA3, 0x43, 0xE9, 0x9A, 0xB6, 0x43, + 0xE9, 0x9A, 0xB7, 0x43, 0xE9, 0x9A, 0xB8, 0x43, + 0xE9, 0x9A, 0xB9, 0x43, 0xE9, 0x9B, 0x83, 0x43, + 0xE9, 0x9B, 0xA2, 0x43, 0xE9, 0x9B, 0xA3, 0x43, + 0xE9, 0x9B, 0xA8, 0x43, 0xE9, 0x9B, 0xB6, 0x43, + 0xE9, 0x9B, 0xB7, 0x43, 0xE9, 0x9C, 0xA3, 0x43, + 0xE9, 0x9C, 0xB2, 0x43, 0xE9, 0x9D, 0x88, 0x43, + 0xE9, 0x9D, 0x91, 0x43, 0xE9, 0x9D, 0x96, 0x43, + // Bytes 1500 - 153f + 0xE9, 0x9D, 0x9E, 0x43, 0xE9, 0x9D, 0xA2, 0x43, + 0xE9, 0x9D, 0xA9, 0x43, 0xE9, 0x9F, 0x8B, 0x43, + 0xE9, 0x9F, 0x9B, 0x43, 0xE9, 0x9F, 0xA0, 0x43, + 0xE9, 0x9F, 0xAD, 0x43, 0xE9, 0x9F, 0xB3, 0x43, + 0xE9, 0x9F, 0xBF, 0x43, 0xE9, 0xA0, 0x81, 0x43, + 0xE9, 0xA0, 0x85, 0x43, 0xE9, 0xA0, 0x8B, 0x43, + 0xE9, 0xA0, 0x98, 0x43, 0xE9, 0xA0, 0xA9, 0x43, + 0xE9, 0xA0, 0xBB, 0x43, 0xE9, 0xA1, 0x9E, 0x43, + // Bytes 1540 - 157f + 0xE9, 0xA2, 0xA8, 0x43, 0xE9, 0xA3, 0x9B, 0x43, + 0xE9, 0xA3, 0x9F, 0x43, 0xE9, 0xA3, 0xA2, 0x43, + 0xE9, 0xA3, 0xAF, 0x43, 0xE9, 0xA3, 0xBC, 0x43, + 0xE9, 0xA4, 0xA8, 0x43, 0xE9, 0xA4, 0xA9, 0x43, + 0xE9, 0xA6, 0x96, 0x43, 0xE9, 0xA6, 0x99, 0x43, + 0xE9, 0xA6, 0xA7, 0x43, 0xE9, 0xA6, 0xAC, 0x43, + 0xE9, 0xA7, 0x82, 0x43, 0xE9, 0xA7, 0xB1, 0x43, + 0xE9, 0xA7, 0xBE, 0x43, 0xE9, 0xA9, 0xAA, 0x43, + // Bytes 1580 - 15bf + 0xE9, 0xAA, 0xA8, 0x43, 0xE9, 0xAB, 0x98, 0x43, + 0xE9, 0xAB, 0x9F, 0x43, 0xE9, 0xAC, 0x92, 0x43, + 0xE9, 0xAC, 0xA5, 0x43, 0xE9, 0xAC, 0xAF, 0x43, + 0xE9, 0xAC, 0xB2, 0x43, 0xE9, 0xAC, 0xBC, 0x43, + 0xE9, 0xAD, 0x9A, 0x43, 0xE9, 0xAD, 0xAF, 0x43, + 0xE9, 0xB1, 0x80, 0x43, 0xE9, 0xB1, 0x97, 0x43, + 0xE9, 0xB3, 0xA5, 0x43, 0xE9, 0xB3, 0xBD, 0x43, + 0xE9, 0xB5, 0xA7, 0x43, 0xE9, 0xB6, 0xB4, 0x43, + // Bytes 15c0 - 15ff + 0xE9, 0xB7, 0xBA, 0x43, 0xE9, 0xB8, 0x9E, 0x43, + 0xE9, 0xB9, 0xB5, 0x43, 0xE9, 0xB9, 0xBF, 0x43, + 0xE9, 0xBA, 0x97, 0x43, 0xE9, 0xBA, 0x9F, 0x43, + 0xE9, 0xBA, 0xA5, 0x43, 0xE9, 0xBA, 0xBB, 0x43, + 0xE9, 0xBB, 0x83, 0x43, 0xE9, 0xBB, 0x8D, 0x43, + 0xE9, 0xBB, 0x8E, 0x43, 0xE9, 0xBB, 0x91, 0x43, + 0xE9, 0xBB, 0xB9, 0x43, 0xE9, 0xBB, 0xBD, 0x43, + 0xE9, 0xBB, 0xBE, 0x43, 0xE9, 0xBC, 0x85, 0x43, + // Bytes 1600 - 163f + 0xE9, 0xBC, 0x8E, 0x43, 0xE9, 0xBC, 0x8F, 0x43, + 0xE9, 0xBC, 0x93, 0x43, 0xE9, 0xBC, 0x96, 0x43, + 0xE9, 0xBC, 0xA0, 0x43, 0xE9, 0xBC, 0xBB, 0x43, + 0xE9, 0xBD, 0x83, 0x43, 0xE9, 0xBD, 0x8A, 0x43, + 0xE9, 0xBD, 0x92, 0x43, 0xE9, 0xBE, 0x8D, 0x43, + 0xE9, 0xBE, 0x8E, 0x43, 0xE9, 0xBE, 0x9C, 0x43, + 0xE9, 0xBE, 0x9F, 0x43, 0xE9, 0xBE, 0xA0, 0x43, + 0xEA, 0x9C, 0xA7, 0x43, 0xEA, 0x9D, 0xAF, 0x43, + // Bytes 1640 - 167f + 0xEA, 0xAC, 0xB7, 0x43, 0xEA, 0xAD, 0x92, 0x44, + 0xF0, 0xA0, 0x84, 0xA2, 0x44, 0xF0, 0xA0, 0x94, + 0x9C, 0x44, 0xF0, 0xA0, 0x94, 0xA5, 0x44, 0xF0, + 0xA0, 0x95, 0x8B, 0x44, 0xF0, 0xA0, 0x98, 0xBA, + 0x44, 0xF0, 0xA0, 0xA0, 0x84, 0x44, 0xF0, 0xA0, + 0xA3, 0x9E, 0x44, 0xF0, 0xA0, 0xA8, 0xAC, 0x44, + 0xF0, 0xA0, 0xAD, 0xA3, 0x44, 0xF0, 0xA1, 0x93, + 0xA4, 0x44, 0xF0, 0xA1, 0x9A, 0xA8, 0x44, 0xF0, + // Bytes 1680 - 16bf + 0xA1, 0x9B, 0xAA, 0x44, 0xF0, 0xA1, 0xA7, 0x88, + 0x44, 0xF0, 0xA1, 0xAC, 0x98, 0x44, 0xF0, 0xA1, + 0xB4, 0x8B, 0x44, 0xF0, 0xA1, 0xB7, 0xA4, 0x44, + 0xF0, 0xA1, 0xB7, 0xA6, 0x44, 0xF0, 0xA2, 0x86, + 0x83, 0x44, 0xF0, 0xA2, 0x86, 0x9F, 0x44, 0xF0, + 0xA2, 0x8C, 0xB1, 0x44, 0xF0, 0xA2, 0x9B, 0x94, + 0x44, 0xF0, 0xA2, 0xA1, 0x84, 0x44, 0xF0, 0xA2, + 0xA1, 0x8A, 0x44, 0xF0, 0xA2, 0xAC, 0x8C, 0x44, + // Bytes 16c0 - 16ff + 0xF0, 0xA2, 0xAF, 0xB1, 0x44, 0xF0, 0xA3, 0x80, + 0x8A, 0x44, 0xF0, 0xA3, 0x8A, 0xB8, 0x44, 0xF0, + 0xA3, 0x8D, 0x9F, 0x44, 0xF0, 0xA3, 0x8E, 0x93, + 0x44, 0xF0, 0xA3, 0x8E, 0x9C, 0x44, 0xF0, 0xA3, + 0x8F, 0x83, 0x44, 0xF0, 0xA3, 0x8F, 0x95, 0x44, + 0xF0, 0xA3, 0x91, 0xAD, 0x44, 0xF0, 0xA3, 0x9A, + 0xA3, 0x44, 0xF0, 0xA3, 0xA2, 0xA7, 0x44, 0xF0, + 0xA3, 0xAA, 0x8D, 0x44, 0xF0, 0xA3, 0xAB, 0xBA, + // Bytes 1700 - 173f + 0x44, 0xF0, 0xA3, 0xB2, 0xBC, 0x44, 0xF0, 0xA3, + 0xB4, 0x9E, 0x44, 0xF0, 0xA3, 0xBB, 0x91, 0x44, + 0xF0, 0xA3, 0xBD, 0x9E, 0x44, 0xF0, 0xA3, 0xBE, + 0x8E, 0x44, 0xF0, 0xA4, 0x89, 0xA3, 0x44, 0xF0, + 0xA4, 0x8B, 0xAE, 0x44, 0xF0, 0xA4, 0x8E, 0xAB, + 0x44, 0xF0, 0xA4, 0x98, 0x88, 0x44, 0xF0, 0xA4, + 0x9C, 0xB5, 0x44, 0xF0, 0xA4, 0xA0, 0x94, 0x44, + 0xF0, 0xA4, 0xB0, 0xB6, 0x44, 0xF0, 0xA4, 0xB2, + // Bytes 1740 - 177f + 0x92, 0x44, 0xF0, 0xA4, 0xBE, 0xA1, 0x44, 0xF0, + 0xA4, 0xBE, 0xB8, 0x44, 0xF0, 0xA5, 0x81, 0x84, + 0x44, 0xF0, 0xA5, 0x83, 0xB2, 0x44, 0xF0, 0xA5, + 0x83, 0xB3, 0x44, 0xF0, 0xA5, 0x84, 0x99, 0x44, + 0xF0, 0xA5, 0x84, 0xB3, 0x44, 0xF0, 0xA5, 0x89, + 0x89, 0x44, 0xF0, 0xA5, 0x90, 0x9D, 0x44, 0xF0, + 0xA5, 0x98, 0xA6, 0x44, 0xF0, 0xA5, 0x9A, 0x9A, + 0x44, 0xF0, 0xA5, 0x9B, 0x85, 0x44, 0xF0, 0xA5, + // Bytes 1780 - 17bf + 0xA5, 0xBC, 0x44, 0xF0, 0xA5, 0xAA, 0xA7, 0x44, + 0xF0, 0xA5, 0xAE, 0xAB, 0x44, 0xF0, 0xA5, 0xB2, + 0x80, 0x44, 0xF0, 0xA5, 0xB3, 0x90, 0x44, 0xF0, + 0xA5, 0xBE, 0x86, 0x44, 0xF0, 0xA6, 0x87, 0x9A, + 0x44, 0xF0, 0xA6, 0x88, 0xA8, 0x44, 0xF0, 0xA6, + 0x89, 0x87, 0x44, 0xF0, 0xA6, 0x8B, 0x99, 0x44, + 0xF0, 0xA6, 0x8C, 0xBE, 0x44, 0xF0, 0xA6, 0x93, + 0x9A, 0x44, 0xF0, 0xA6, 0x94, 0xA3, 0x44, 0xF0, + // Bytes 17c0 - 17ff + 0xA6, 0x96, 0xA8, 0x44, 0xF0, 0xA6, 0x9E, 0xA7, + 0x44, 0xF0, 0xA6, 0x9E, 0xB5, 0x44, 0xF0, 0xA6, + 0xAC, 0xBC, 0x44, 0xF0, 0xA6, 0xB0, 0xB6, 0x44, + 0xF0, 0xA6, 0xB3, 0x95, 0x44, 0xF0, 0xA6, 0xB5, + 0xAB, 0x44, 0xF0, 0xA6, 0xBC, 0xAC, 0x44, 0xF0, + 0xA6, 0xBE, 0xB1, 0x44, 0xF0, 0xA7, 0x83, 0x92, + 0x44, 0xF0, 0xA7, 0x8F, 0x8A, 0x44, 0xF0, 0xA7, + 0x99, 0xA7, 0x44, 0xF0, 0xA7, 0xA2, 0xAE, 0x44, + // Bytes 1800 - 183f + 0xF0, 0xA7, 0xA5, 0xA6, 0x44, 0xF0, 0xA7, 0xB2, + 0xA8, 0x44, 0xF0, 0xA7, 0xBB, 0x93, 0x44, 0xF0, + 0xA7, 0xBC, 0xAF, 0x44, 0xF0, 0xA8, 0x97, 0x92, + 0x44, 0xF0, 0xA8, 0x97, 0xAD, 0x44, 0xF0, 0xA8, + 0x9C, 0xAE, 0x44, 0xF0, 0xA8, 0xAF, 0xBA, 0x44, + 0xF0, 0xA8, 0xB5, 0xB7, 0x44, 0xF0, 0xA9, 0x85, + 0x85, 0x44, 0xF0, 0xA9, 0x87, 0x9F, 0x44, 0xF0, + 0xA9, 0x88, 0x9A, 0x44, 0xF0, 0xA9, 0x90, 0x8A, + // Bytes 1840 - 187f + 0x44, 0xF0, 0xA9, 0x92, 0x96, 0x44, 0xF0, 0xA9, + 0x96, 0xB6, 0x44, 0xF0, 0xA9, 0xAC, 0xB0, 0x44, + 0xF0, 0xAA, 0x83, 0x8E, 0x44, 0xF0, 0xAA, 0x84, + 0x85, 0x44, 0xF0, 0xAA, 0x88, 0x8E, 0x44, 0xF0, + 0xAA, 0x8A, 0x91, 0x44, 0xF0, 0xAA, 0x8E, 0x92, + 0x44, 0xF0, 0xAA, 0x98, 0x80, 0x42, 0x21, 0x21, + 0x42, 0x21, 0x3F, 0x42, 0x2E, 0x2E, 0x42, 0x30, + 0x2C, 0x42, 0x30, 0x2E, 0x42, 0x31, 0x2C, 0x42, + // Bytes 1880 - 18bf + 0x31, 0x2E, 0x42, 0x31, 0x30, 0x42, 0x31, 0x31, + 0x42, 0x31, 0x32, 0x42, 0x31, 0x33, 0x42, 0x31, + 0x34, 0x42, 0x31, 0x35, 0x42, 0x31, 0x36, 0x42, + 0x31, 0x37, 0x42, 0x31, 0x38, 0x42, 0x31, 0x39, + 0x42, 0x32, 0x2C, 0x42, 0x32, 0x2E, 0x42, 0x32, + 0x30, 0x42, 0x32, 0x31, 0x42, 0x32, 0x32, 0x42, + 0x32, 0x33, 0x42, 0x32, 0x34, 0x42, 0x32, 0x35, + 0x42, 0x32, 0x36, 0x42, 0x32, 0x37, 0x42, 0x32, + // Bytes 18c0 - 18ff + 0x38, 0x42, 0x32, 0x39, 0x42, 0x33, 0x2C, 0x42, + 0x33, 0x2E, 0x42, 0x33, 0x30, 0x42, 0x33, 0x31, + 0x42, 0x33, 0x32, 0x42, 0x33, 0x33, 0x42, 0x33, + 0x34, 0x42, 0x33, 0x35, 0x42, 0x33, 0x36, 0x42, + 0x33, 0x37, 0x42, 0x33, 0x38, 0x42, 0x33, 0x39, + 0x42, 0x34, 0x2C, 0x42, 0x34, 0x2E, 0x42, 0x34, + 0x30, 0x42, 0x34, 0x31, 0x42, 0x34, 0x32, 0x42, + 0x34, 0x33, 0x42, 0x34, 0x34, 0x42, 0x34, 0x35, + // Bytes 1900 - 193f + 0x42, 0x34, 0x36, 0x42, 0x34, 0x37, 0x42, 0x34, + 0x38, 0x42, 0x34, 0x39, 0x42, 0x35, 0x2C, 0x42, + 0x35, 0x2E, 0x42, 0x35, 0x30, 0x42, 0x36, 0x2C, + 0x42, 0x36, 0x2E, 0x42, 0x37, 0x2C, 0x42, 0x37, + 0x2E, 0x42, 0x38, 0x2C, 0x42, 0x38, 0x2E, 0x42, + 0x39, 0x2C, 0x42, 0x39, 0x2E, 0x42, 0x3D, 0x3D, + 0x42, 0x3F, 0x21, 0x42, 0x3F, 0x3F, 0x42, 0x41, + 0x55, 0x42, 0x42, 0x71, 0x42, 0x43, 0x44, 0x42, + // Bytes 1940 - 197f + 0x44, 0x4A, 0x42, 0x44, 0x5A, 0x42, 0x44, 0x7A, + 0x42, 0x47, 0x42, 0x42, 0x47, 0x79, 0x42, 0x48, + 0x50, 0x42, 0x48, 0x56, 0x42, 0x48, 0x67, 0x42, + 0x48, 0x7A, 0x42, 0x49, 0x49, 0x42, 0x49, 0x4A, + 0x42, 0x49, 0x55, 0x42, 0x49, 0x56, 0x42, 0x49, + 0x58, 0x42, 0x4B, 0x42, 0x42, 0x4B, 0x4B, 0x42, + 0x4B, 0x4D, 0x42, 0x4C, 0x4A, 0x42, 0x4C, 0x6A, + 0x42, 0x4D, 0x42, 0x42, 0x4D, 0x43, 0x42, 0x4D, + // Bytes 1980 - 19bf + 0x44, 0x42, 0x4D, 0x56, 0x42, 0x4D, 0x57, 0x42, + 0x4E, 0x4A, 0x42, 0x4E, 0x6A, 0x42, 0x4E, 0x6F, + 0x42, 0x50, 0x48, 0x42, 0x50, 0x52, 0x42, 0x50, + 0x61, 0x42, 0x52, 0x73, 0x42, 0x53, 0x44, 0x42, + 0x53, 0x4D, 0x42, 0x53, 0x53, 0x42, 0x53, 0x76, + 0x42, 0x54, 0x4D, 0x42, 0x56, 0x49, 0x42, 0x57, + 0x43, 0x42, 0x57, 0x5A, 0x42, 0x57, 0x62, 0x42, + 0x58, 0x49, 0x42, 0x63, 0x63, 0x42, 0x63, 0x64, + // Bytes 19c0 - 19ff + 0x42, 0x63, 0x6D, 0x42, 0x64, 0x42, 0x42, 0x64, + 0x61, 0x42, 0x64, 0x6C, 0x42, 0x64, 0x6D, 0x42, + 0x64, 0x7A, 0x42, 0x65, 0x56, 0x42, 0x66, 0x66, + 0x42, 0x66, 0x69, 0x42, 0x66, 0x6C, 0x42, 0x66, + 0x6D, 0x42, 0x68, 0x61, 0x42, 0x69, 0x69, 0x42, + 0x69, 0x6A, 0x42, 0x69, 0x6E, 0x42, 0x69, 0x76, + 0x42, 0x69, 0x78, 0x42, 0x6B, 0x41, 0x42, 0x6B, + 0x56, 0x42, 0x6B, 0x57, 0x42, 0x6B, 0x67, 0x42, + // Bytes 1a00 - 1a3f + 0x6B, 0x6C, 0x42, 0x6B, 0x6D, 0x42, 0x6B, 0x74, + 0x42, 0x6C, 0x6A, 0x42, 0x6C, 0x6D, 0x42, 0x6C, + 0x6E, 0x42, 0x6C, 0x78, 0x42, 0x6D, 0x32, 0x42, + 0x6D, 0x33, 0x42, 0x6D, 0x41, 0x42, 0x6D, 0x56, + 0x42, 0x6D, 0x57, 0x42, 0x6D, 0x62, 0x42, 0x6D, + 0x67, 0x42, 0x6D, 0x6C, 0x42, 0x6D, 0x6D, 0x42, + 0x6D, 0x73, 0x42, 0x6E, 0x41, 0x42, 0x6E, 0x46, + 0x42, 0x6E, 0x56, 0x42, 0x6E, 0x57, 0x42, 0x6E, + // Bytes 1a40 - 1a7f + 0x6A, 0x42, 0x6E, 0x6D, 0x42, 0x6E, 0x73, 0x42, + 0x6F, 0x56, 0x42, 0x70, 0x41, 0x42, 0x70, 0x46, + 0x42, 0x70, 0x56, 0x42, 0x70, 0x57, 0x42, 0x70, + 0x63, 0x42, 0x70, 0x73, 0x42, 0x73, 0x72, 0x42, + 0x73, 0x74, 0x42, 0x76, 0x69, 0x42, 0x78, 0x69, + 0x43, 0x28, 0x31, 0x29, 0x43, 0x28, 0x32, 0x29, + 0x43, 0x28, 0x33, 0x29, 0x43, 0x28, 0x34, 0x29, + 0x43, 0x28, 0x35, 0x29, 0x43, 0x28, 0x36, 0x29, + // Bytes 1a80 - 1abf + 0x43, 0x28, 0x37, 0x29, 0x43, 0x28, 0x38, 0x29, + 0x43, 0x28, 0x39, 0x29, 0x43, 0x28, 0x41, 0x29, + 0x43, 0x28, 0x42, 0x29, 0x43, 0x28, 0x43, 0x29, + 0x43, 0x28, 0x44, 0x29, 0x43, 0x28, 0x45, 0x29, + 0x43, 0x28, 0x46, 0x29, 0x43, 0x28, 0x47, 0x29, + 0x43, 0x28, 0x48, 0x29, 0x43, 0x28, 0x49, 0x29, + 0x43, 0x28, 0x4A, 0x29, 0x43, 0x28, 0x4B, 0x29, + 0x43, 0x28, 0x4C, 0x29, 0x43, 0x28, 0x4D, 0x29, + // Bytes 1ac0 - 1aff + 0x43, 0x28, 0x4E, 0x29, 0x43, 0x28, 0x4F, 0x29, + 0x43, 0x28, 0x50, 0x29, 0x43, 0x28, 0x51, 0x29, + 0x43, 0x28, 0x52, 0x29, 0x43, 0x28, 0x53, 0x29, + 0x43, 0x28, 0x54, 0x29, 0x43, 0x28, 0x55, 0x29, + 0x43, 0x28, 0x56, 0x29, 0x43, 0x28, 0x57, 0x29, + 0x43, 0x28, 0x58, 0x29, 0x43, 0x28, 0x59, 0x29, + 0x43, 0x28, 0x5A, 0x29, 0x43, 0x28, 0x61, 0x29, + 0x43, 0x28, 0x62, 0x29, 0x43, 0x28, 0x63, 0x29, + // Bytes 1b00 - 1b3f + 0x43, 0x28, 0x64, 0x29, 0x43, 0x28, 0x65, 0x29, + 0x43, 0x28, 0x66, 0x29, 0x43, 0x28, 0x67, 0x29, + 0x43, 0x28, 0x68, 0x29, 0x43, 0x28, 0x69, 0x29, + 0x43, 0x28, 0x6A, 0x29, 0x43, 0x28, 0x6B, 0x29, + 0x43, 0x28, 0x6C, 0x29, 0x43, 0x28, 0x6D, 0x29, + 0x43, 0x28, 0x6E, 0x29, 0x43, 0x28, 0x6F, 0x29, + 0x43, 0x28, 0x70, 0x29, 0x43, 0x28, 0x71, 0x29, + 0x43, 0x28, 0x72, 0x29, 0x43, 0x28, 0x73, 0x29, + // Bytes 1b40 - 1b7f + 0x43, 0x28, 0x74, 0x29, 0x43, 0x28, 0x75, 0x29, + 0x43, 0x28, 0x76, 0x29, 0x43, 0x28, 0x77, 0x29, + 0x43, 0x28, 0x78, 0x29, 0x43, 0x28, 0x79, 0x29, + 0x43, 0x28, 0x7A, 0x29, 0x43, 0x2E, 0x2E, 0x2E, + 0x43, 0x31, 0x30, 0x2E, 0x43, 0x31, 0x31, 0x2E, + 0x43, 0x31, 0x32, 0x2E, 0x43, 0x31, 0x33, 0x2E, + 0x43, 0x31, 0x34, 0x2E, 0x43, 0x31, 0x35, 0x2E, + 0x43, 0x31, 0x36, 0x2E, 0x43, 0x31, 0x37, 0x2E, + // Bytes 1b80 - 1bbf + 0x43, 0x31, 0x38, 0x2E, 0x43, 0x31, 0x39, 0x2E, + 0x43, 0x32, 0x30, 0x2E, 0x43, 0x3A, 0x3A, 0x3D, + 0x43, 0x3D, 0x3D, 0x3D, 0x43, 0x43, 0x6F, 0x2E, + 0x43, 0x46, 0x41, 0x58, 0x43, 0x47, 0x48, 0x7A, + 0x43, 0x47, 0x50, 0x61, 0x43, 0x49, 0x49, 0x49, + 0x43, 0x4C, 0x54, 0x44, 0x43, 0x4C, 0xC2, 0xB7, + 0x43, 0x4D, 0x48, 0x7A, 0x43, 0x4D, 0x50, 0x61, + 0x43, 0x4D, 0xCE, 0xA9, 0x43, 0x50, 0x50, 0x4D, + // Bytes 1bc0 - 1bff + 0x43, 0x50, 0x50, 0x56, 0x43, 0x50, 0x54, 0x45, + 0x43, 0x54, 0x45, 0x4C, 0x43, 0x54, 0x48, 0x7A, + 0x43, 0x56, 0x49, 0x49, 0x43, 0x58, 0x49, 0x49, + 0x43, 0x61, 0x2F, 0x63, 0x43, 0x61, 0x2F, 0x73, + 0x43, 0x61, 0xCA, 0xBE, 0x43, 0x62, 0x61, 0x72, + 0x43, 0x63, 0x2F, 0x6F, 0x43, 0x63, 0x2F, 0x75, + 0x43, 0x63, 0x61, 0x6C, 0x43, 0x63, 0x6D, 0x32, + 0x43, 0x63, 0x6D, 0x33, 0x43, 0x64, 0x6D, 0x32, + // Bytes 1c00 - 1c3f + 0x43, 0x64, 0x6D, 0x33, 0x43, 0x65, 0x72, 0x67, + 0x43, 0x66, 0x66, 0x69, 0x43, 0x66, 0x66, 0x6C, + 0x43, 0x67, 0x61, 0x6C, 0x43, 0x68, 0x50, 0x61, + 0x43, 0x69, 0x69, 0x69, 0x43, 0x6B, 0x48, 0x7A, + 0x43, 0x6B, 0x50, 0x61, 0x43, 0x6B, 0x6D, 0x32, + 0x43, 0x6B, 0x6D, 0x33, 0x43, 0x6B, 0xCE, 0xA9, + 0x43, 0x6C, 0x6F, 0x67, 0x43, 0x6C, 0xC2, 0xB7, + 0x43, 0x6D, 0x69, 0x6C, 0x43, 0x6D, 0x6D, 0x32, + // Bytes 1c40 - 1c7f + 0x43, 0x6D, 0x6D, 0x33, 0x43, 0x6D, 0x6F, 0x6C, + 0x43, 0x72, 0x61, 0x64, 0x43, 0x76, 0x69, 0x69, + 0x43, 0x78, 0x69, 0x69, 0x43, 0xC2, 0xB0, 0x43, + 0x43, 0xC2, 0xB0, 0x46, 0x43, 0xCA, 0xBC, 0x6E, + 0x43, 0xCE, 0xBC, 0x41, 0x43, 0xCE, 0xBC, 0x46, + 0x43, 0xCE, 0xBC, 0x56, 0x43, 0xCE, 0xBC, 0x57, + 0x43, 0xCE, 0xBC, 0x67, 0x43, 0xCE, 0xBC, 0x6C, + 0x43, 0xCE, 0xBC, 0x6D, 0x43, 0xCE, 0xBC, 0x73, + // Bytes 1c80 - 1cbf + 0x44, 0x28, 0x31, 0x30, 0x29, 0x44, 0x28, 0x31, + 0x31, 0x29, 0x44, 0x28, 0x31, 0x32, 0x29, 0x44, + 0x28, 0x31, 0x33, 0x29, 0x44, 0x28, 0x31, 0x34, + 0x29, 0x44, 0x28, 0x31, 0x35, 0x29, 0x44, 0x28, + 0x31, 0x36, 0x29, 0x44, 0x28, 0x31, 0x37, 0x29, + 0x44, 0x28, 0x31, 0x38, 0x29, 0x44, 0x28, 0x31, + 0x39, 0x29, 0x44, 0x28, 0x32, 0x30, 0x29, 0x44, + 0x30, 0xE7, 0x82, 0xB9, 0x44, 0x31, 0xE2, 0x81, + // Bytes 1cc0 - 1cff + 0x84, 0x44, 0x31, 0xE6, 0x97, 0xA5, 0x44, 0x31, + 0xE6, 0x9C, 0x88, 0x44, 0x31, 0xE7, 0x82, 0xB9, + 0x44, 0x32, 0xE6, 0x97, 0xA5, 0x44, 0x32, 0xE6, + 0x9C, 0x88, 0x44, 0x32, 0xE7, 0x82, 0xB9, 0x44, + 0x33, 0xE6, 0x97, 0xA5, 0x44, 0x33, 0xE6, 0x9C, + 0x88, 0x44, 0x33, 0xE7, 0x82, 0xB9, 0x44, 0x34, + 0xE6, 0x97, 0xA5, 0x44, 0x34, 0xE6, 0x9C, 0x88, + 0x44, 0x34, 0xE7, 0x82, 0xB9, 0x44, 0x35, 0xE6, + // Bytes 1d00 - 1d3f + 0x97, 0xA5, 0x44, 0x35, 0xE6, 0x9C, 0x88, 0x44, + 0x35, 0xE7, 0x82, 0xB9, 0x44, 0x36, 0xE6, 0x97, + 0xA5, 0x44, 0x36, 0xE6, 0x9C, 0x88, 0x44, 0x36, + 0xE7, 0x82, 0xB9, 0x44, 0x37, 0xE6, 0x97, 0xA5, + 0x44, 0x37, 0xE6, 0x9C, 0x88, 0x44, 0x37, 0xE7, + 0x82, 0xB9, 0x44, 0x38, 0xE6, 0x97, 0xA5, 0x44, + 0x38, 0xE6, 0x9C, 0x88, 0x44, 0x38, 0xE7, 0x82, + 0xB9, 0x44, 0x39, 0xE6, 0x97, 0xA5, 0x44, 0x39, + // Bytes 1d40 - 1d7f + 0xE6, 0x9C, 0x88, 0x44, 0x39, 0xE7, 0x82, 0xB9, + 0x44, 0x56, 0x49, 0x49, 0x49, 0x44, 0x61, 0x2E, + 0x6D, 0x2E, 0x44, 0x6B, 0x63, 0x61, 0x6C, 0x44, + 0x70, 0x2E, 0x6D, 0x2E, 0x44, 0x76, 0x69, 0x69, + 0x69, 0x44, 0xD5, 0xA5, 0xD6, 0x82, 0x44, 0xD5, + 0xB4, 0xD5, 0xA5, 0x44, 0xD5, 0xB4, 0xD5, 0xAB, + 0x44, 0xD5, 0xB4, 0xD5, 0xAD, 0x44, 0xD5, 0xB4, + 0xD5, 0xB6, 0x44, 0xD5, 0xBE, 0xD5, 0xB6, 0x44, + // Bytes 1d80 - 1dbf + 0xD7, 0x90, 0xD7, 0x9C, 0x44, 0xD8, 0xA7, 0xD9, + 0xB4, 0x44, 0xD8, 0xA8, 0xD8, 0xAC, 0x44, 0xD8, + 0xA8, 0xD8, 0xAD, 0x44, 0xD8, 0xA8, 0xD8, 0xAE, + 0x44, 0xD8, 0xA8, 0xD8, 0xB1, 0x44, 0xD8, 0xA8, + 0xD8, 0xB2, 0x44, 0xD8, 0xA8, 0xD9, 0x85, 0x44, + 0xD8, 0xA8, 0xD9, 0x86, 0x44, 0xD8, 0xA8, 0xD9, + 0x87, 0x44, 0xD8, 0xA8, 0xD9, 0x89, 0x44, 0xD8, + 0xA8, 0xD9, 0x8A, 0x44, 0xD8, 0xAA, 0xD8, 0xAC, + // Bytes 1dc0 - 1dff + 0x44, 0xD8, 0xAA, 0xD8, 0xAD, 0x44, 0xD8, 0xAA, + 0xD8, 0xAE, 0x44, 0xD8, 0xAA, 0xD8, 0xB1, 0x44, + 0xD8, 0xAA, 0xD8, 0xB2, 0x44, 0xD8, 0xAA, 0xD9, + 0x85, 0x44, 0xD8, 0xAA, 0xD9, 0x86, 0x44, 0xD8, + 0xAA, 0xD9, 0x87, 0x44, 0xD8, 0xAA, 0xD9, 0x89, + 0x44, 0xD8, 0xAA, 0xD9, 0x8A, 0x44, 0xD8, 0xAB, + 0xD8, 0xAC, 0x44, 0xD8, 0xAB, 0xD8, 0xB1, 0x44, + 0xD8, 0xAB, 0xD8, 0xB2, 0x44, 0xD8, 0xAB, 0xD9, + // Bytes 1e00 - 1e3f + 0x85, 0x44, 0xD8, 0xAB, 0xD9, 0x86, 0x44, 0xD8, + 0xAB, 0xD9, 0x87, 0x44, 0xD8, 0xAB, 0xD9, 0x89, + 0x44, 0xD8, 0xAB, 0xD9, 0x8A, 0x44, 0xD8, 0xAC, + 0xD8, 0xAD, 0x44, 0xD8, 0xAC, 0xD9, 0x85, 0x44, + 0xD8, 0xAC, 0xD9, 0x89, 0x44, 0xD8, 0xAC, 0xD9, + 0x8A, 0x44, 0xD8, 0xAD, 0xD8, 0xAC, 0x44, 0xD8, + 0xAD, 0xD9, 0x85, 0x44, 0xD8, 0xAD, 0xD9, 0x89, + 0x44, 0xD8, 0xAD, 0xD9, 0x8A, 0x44, 0xD8, 0xAE, + // Bytes 1e40 - 1e7f + 0xD8, 0xAC, 0x44, 0xD8, 0xAE, 0xD8, 0xAD, 0x44, + 0xD8, 0xAE, 0xD9, 0x85, 0x44, 0xD8, 0xAE, 0xD9, + 0x89, 0x44, 0xD8, 0xAE, 0xD9, 0x8A, 0x44, 0xD8, + 0xB3, 0xD8, 0xAC, 0x44, 0xD8, 0xB3, 0xD8, 0xAD, + 0x44, 0xD8, 0xB3, 0xD8, 0xAE, 0x44, 0xD8, 0xB3, + 0xD8, 0xB1, 0x44, 0xD8, 0xB3, 0xD9, 0x85, 0x44, + 0xD8, 0xB3, 0xD9, 0x87, 0x44, 0xD8, 0xB3, 0xD9, + 0x89, 0x44, 0xD8, 0xB3, 0xD9, 0x8A, 0x44, 0xD8, + // Bytes 1e80 - 1ebf + 0xB4, 0xD8, 0xAC, 0x44, 0xD8, 0xB4, 0xD8, 0xAD, + 0x44, 0xD8, 0xB4, 0xD8, 0xAE, 0x44, 0xD8, 0xB4, + 0xD8, 0xB1, 0x44, 0xD8, 0xB4, 0xD9, 0x85, 0x44, + 0xD8, 0xB4, 0xD9, 0x87, 0x44, 0xD8, 0xB4, 0xD9, + 0x89, 0x44, 0xD8, 0xB4, 0xD9, 0x8A, 0x44, 0xD8, + 0xB5, 0xD8, 0xAD, 0x44, 0xD8, 0xB5, 0xD8, 0xAE, + 0x44, 0xD8, 0xB5, 0xD8, 0xB1, 0x44, 0xD8, 0xB5, + 0xD9, 0x85, 0x44, 0xD8, 0xB5, 0xD9, 0x89, 0x44, + // Bytes 1ec0 - 1eff + 0xD8, 0xB5, 0xD9, 0x8A, 0x44, 0xD8, 0xB6, 0xD8, + 0xAC, 0x44, 0xD8, 0xB6, 0xD8, 0xAD, 0x44, 0xD8, + 0xB6, 0xD8, 0xAE, 0x44, 0xD8, 0xB6, 0xD8, 0xB1, + 0x44, 0xD8, 0xB6, 0xD9, 0x85, 0x44, 0xD8, 0xB6, + 0xD9, 0x89, 0x44, 0xD8, 0xB6, 0xD9, 0x8A, 0x44, + 0xD8, 0xB7, 0xD8, 0xAD, 0x44, 0xD8, 0xB7, 0xD9, + 0x85, 0x44, 0xD8, 0xB7, 0xD9, 0x89, 0x44, 0xD8, + 0xB7, 0xD9, 0x8A, 0x44, 0xD8, 0xB8, 0xD9, 0x85, + // Bytes 1f00 - 1f3f + 0x44, 0xD8, 0xB9, 0xD8, 0xAC, 0x44, 0xD8, 0xB9, + 0xD9, 0x85, 0x44, 0xD8, 0xB9, 0xD9, 0x89, 0x44, + 0xD8, 0xB9, 0xD9, 0x8A, 0x44, 0xD8, 0xBA, 0xD8, + 0xAC, 0x44, 0xD8, 0xBA, 0xD9, 0x85, 0x44, 0xD8, + 0xBA, 0xD9, 0x89, 0x44, 0xD8, 0xBA, 0xD9, 0x8A, + 0x44, 0xD9, 0x81, 0xD8, 0xAC, 0x44, 0xD9, 0x81, + 0xD8, 0xAD, 0x44, 0xD9, 0x81, 0xD8, 0xAE, 0x44, + 0xD9, 0x81, 0xD9, 0x85, 0x44, 0xD9, 0x81, 0xD9, + // Bytes 1f40 - 1f7f + 0x89, 0x44, 0xD9, 0x81, 0xD9, 0x8A, 0x44, 0xD9, + 0x82, 0xD8, 0xAD, 0x44, 0xD9, 0x82, 0xD9, 0x85, + 0x44, 0xD9, 0x82, 0xD9, 0x89, 0x44, 0xD9, 0x82, + 0xD9, 0x8A, 0x44, 0xD9, 0x83, 0xD8, 0xA7, 0x44, + 0xD9, 0x83, 0xD8, 0xAC, 0x44, 0xD9, 0x83, 0xD8, + 0xAD, 0x44, 0xD9, 0x83, 0xD8, 0xAE, 0x44, 0xD9, + 0x83, 0xD9, 0x84, 0x44, 0xD9, 0x83, 0xD9, 0x85, + 0x44, 0xD9, 0x83, 0xD9, 0x89, 0x44, 0xD9, 0x83, + // Bytes 1f80 - 1fbf + 0xD9, 0x8A, 0x44, 0xD9, 0x84, 0xD8, 0xA7, 0x44, + 0xD9, 0x84, 0xD8, 0xAC, 0x44, 0xD9, 0x84, 0xD8, + 0xAD, 0x44, 0xD9, 0x84, 0xD8, 0xAE, 0x44, 0xD9, + 0x84, 0xD9, 0x85, 0x44, 0xD9, 0x84, 0xD9, 0x87, + 0x44, 0xD9, 0x84, 0xD9, 0x89, 0x44, 0xD9, 0x84, + 0xD9, 0x8A, 0x44, 0xD9, 0x85, 0xD8, 0xA7, 0x44, + 0xD9, 0x85, 0xD8, 0xAC, 0x44, 0xD9, 0x85, 0xD8, + 0xAD, 0x44, 0xD9, 0x85, 0xD8, 0xAE, 0x44, 0xD9, + // Bytes 1fc0 - 1fff + 0x85, 0xD9, 0x85, 0x44, 0xD9, 0x85, 0xD9, 0x89, + 0x44, 0xD9, 0x85, 0xD9, 0x8A, 0x44, 0xD9, 0x86, + 0xD8, 0xAC, 0x44, 0xD9, 0x86, 0xD8, 0xAD, 0x44, + 0xD9, 0x86, 0xD8, 0xAE, 0x44, 0xD9, 0x86, 0xD8, + 0xB1, 0x44, 0xD9, 0x86, 0xD8, 0xB2, 0x44, 0xD9, + 0x86, 0xD9, 0x85, 0x44, 0xD9, 0x86, 0xD9, 0x86, + 0x44, 0xD9, 0x86, 0xD9, 0x87, 0x44, 0xD9, 0x86, + 0xD9, 0x89, 0x44, 0xD9, 0x86, 0xD9, 0x8A, 0x44, + // Bytes 2000 - 203f + 0xD9, 0x87, 0xD8, 0xAC, 0x44, 0xD9, 0x87, 0xD9, + 0x85, 0x44, 0xD9, 0x87, 0xD9, 0x89, 0x44, 0xD9, + 0x87, 0xD9, 0x8A, 0x44, 0xD9, 0x88, 0xD9, 0xB4, + 0x44, 0xD9, 0x8A, 0xD8, 0xAC, 0x44, 0xD9, 0x8A, + 0xD8, 0xAD, 0x44, 0xD9, 0x8A, 0xD8, 0xAE, 0x44, + 0xD9, 0x8A, 0xD8, 0xB1, 0x44, 0xD9, 0x8A, 0xD8, + 0xB2, 0x44, 0xD9, 0x8A, 0xD9, 0x85, 0x44, 0xD9, + 0x8A, 0xD9, 0x86, 0x44, 0xD9, 0x8A, 0xD9, 0x87, + // Bytes 2040 - 207f + 0x44, 0xD9, 0x8A, 0xD9, 0x89, 0x44, 0xD9, 0x8A, + 0xD9, 0x8A, 0x44, 0xD9, 0x8A, 0xD9, 0xB4, 0x44, + 0xDB, 0x87, 0xD9, 0xB4, 0x45, 0x28, 0xE1, 0x84, + 0x80, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x82, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x83, 0x29, 0x45, 0x28, + 0xE1, 0x84, 0x85, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x86, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x87, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x89, 0x29, 0x45, 0x28, + // Bytes 2080 - 20bf + 0xE1, 0x84, 0x8B, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x8C, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x8E, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x8F, 0x29, 0x45, 0x28, + 0xE1, 0x84, 0x90, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x91, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x92, 0x29, + 0x45, 0x28, 0xE4, 0xB8, 0x80, 0x29, 0x45, 0x28, + 0xE4, 0xB8, 0x83, 0x29, 0x45, 0x28, 0xE4, 0xB8, + 0x89, 0x29, 0x45, 0x28, 0xE4, 0xB9, 0x9D, 0x29, + // Bytes 20c0 - 20ff + 0x45, 0x28, 0xE4, 0xBA, 0x8C, 0x29, 0x45, 0x28, + 0xE4, 0xBA, 0x94, 0x29, 0x45, 0x28, 0xE4, 0xBB, + 0xA3, 0x29, 0x45, 0x28, 0xE4, 0xBC, 0x81, 0x29, + 0x45, 0x28, 0xE4, 0xBC, 0x91, 0x29, 0x45, 0x28, + 0xE5, 0x85, 0xAB, 0x29, 0x45, 0x28, 0xE5, 0x85, + 0xAD, 0x29, 0x45, 0x28, 0xE5, 0x8A, 0xB4, 0x29, + 0x45, 0x28, 0xE5, 0x8D, 0x81, 0x29, 0x45, 0x28, + 0xE5, 0x8D, 0x94, 0x29, 0x45, 0x28, 0xE5, 0x90, + // Bytes 2100 - 213f + 0x8D, 0x29, 0x45, 0x28, 0xE5, 0x91, 0xBC, 0x29, + 0x45, 0x28, 0xE5, 0x9B, 0x9B, 0x29, 0x45, 0x28, + 0xE5, 0x9C, 0x9F, 0x29, 0x45, 0x28, 0xE5, 0xAD, + 0xA6, 0x29, 0x45, 0x28, 0xE6, 0x97, 0xA5, 0x29, + 0x45, 0x28, 0xE6, 0x9C, 0x88, 0x29, 0x45, 0x28, + 0xE6, 0x9C, 0x89, 0x29, 0x45, 0x28, 0xE6, 0x9C, + 0xA8, 0x29, 0x45, 0x28, 0xE6, 0xA0, 0xAA, 0x29, + 0x45, 0x28, 0xE6, 0xB0, 0xB4, 0x29, 0x45, 0x28, + // Bytes 2140 - 217f + 0xE7, 0x81, 0xAB, 0x29, 0x45, 0x28, 0xE7, 0x89, + 0xB9, 0x29, 0x45, 0x28, 0xE7, 0x9B, 0xA3, 0x29, + 0x45, 0x28, 0xE7, 0xA4, 0xBE, 0x29, 0x45, 0x28, + 0xE7, 0xA5, 0x9D, 0x29, 0x45, 0x28, 0xE7, 0xA5, + 0xAD, 0x29, 0x45, 0x28, 0xE8, 0x87, 0xAA, 0x29, + 0x45, 0x28, 0xE8, 0x87, 0xB3, 0x29, 0x45, 0x28, + 0xE8, 0xB2, 0xA1, 0x29, 0x45, 0x28, 0xE8, 0xB3, + 0x87, 0x29, 0x45, 0x28, 0xE9, 0x87, 0x91, 0x29, + // Bytes 2180 - 21bf + 0x45, 0x30, 0xE2, 0x81, 0x84, 0x33, 0x45, 0x31, + 0x30, 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x30, 0xE6, + 0x9C, 0x88, 0x45, 0x31, 0x30, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x31, 0xE6, 0x97, 0xA5, 0x45, 0x31, + 0x31, 0xE6, 0x9C, 0x88, 0x45, 0x31, 0x31, 0xE7, + 0x82, 0xB9, 0x45, 0x31, 0x32, 0xE6, 0x97, 0xA5, + 0x45, 0x31, 0x32, 0xE6, 0x9C, 0x88, 0x45, 0x31, + 0x32, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x33, 0xE6, + // Bytes 21c0 - 21ff + 0x97, 0xA5, 0x45, 0x31, 0x33, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x34, 0xE6, 0x97, 0xA5, 0x45, 0x31, + 0x34, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x35, 0xE6, + 0x97, 0xA5, 0x45, 0x31, 0x35, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x36, 0xE6, 0x97, 0xA5, 0x45, 0x31, + 0x36, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x37, 0xE6, + 0x97, 0xA5, 0x45, 0x31, 0x37, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x38, 0xE6, 0x97, 0xA5, 0x45, 0x31, + // Bytes 2200 - 223f + 0x38, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x39, 0xE6, + 0x97, 0xA5, 0x45, 0x31, 0x39, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0xE2, 0x81, 0x84, 0x32, 0x45, 0x31, + 0xE2, 0x81, 0x84, 0x33, 0x45, 0x31, 0xE2, 0x81, + 0x84, 0x34, 0x45, 0x31, 0xE2, 0x81, 0x84, 0x35, + 0x45, 0x31, 0xE2, 0x81, 0x84, 0x36, 0x45, 0x31, + 0xE2, 0x81, 0x84, 0x37, 0x45, 0x31, 0xE2, 0x81, + 0x84, 0x38, 0x45, 0x31, 0xE2, 0x81, 0x84, 0x39, + // Bytes 2240 - 227f + 0x45, 0x32, 0x30, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x30, 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x31, 0xE6, + 0x97, 0xA5, 0x45, 0x32, 0x31, 0xE7, 0x82, 0xB9, + 0x45, 0x32, 0x32, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x32, 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x33, 0xE6, + 0x97, 0xA5, 0x45, 0x32, 0x33, 0xE7, 0x82, 0xB9, + 0x45, 0x32, 0x34, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x34, 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x35, 0xE6, + // Bytes 2280 - 22bf + 0x97, 0xA5, 0x45, 0x32, 0x36, 0xE6, 0x97, 0xA5, + 0x45, 0x32, 0x37, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x38, 0xE6, 0x97, 0xA5, 0x45, 0x32, 0x39, 0xE6, + 0x97, 0xA5, 0x45, 0x32, 0xE2, 0x81, 0x84, 0x33, + 0x45, 0x32, 0xE2, 0x81, 0x84, 0x35, 0x45, 0x33, + 0x30, 0xE6, 0x97, 0xA5, 0x45, 0x33, 0x31, 0xE6, + 0x97, 0xA5, 0x45, 0x33, 0xE2, 0x81, 0x84, 0x34, + 0x45, 0x33, 0xE2, 0x81, 0x84, 0x35, 0x45, 0x33, + // Bytes 22c0 - 22ff + 0xE2, 0x81, 0x84, 0x38, 0x45, 0x34, 0xE2, 0x81, + 0x84, 0x35, 0x45, 0x35, 0xE2, 0x81, 0x84, 0x36, + 0x45, 0x35, 0xE2, 0x81, 0x84, 0x38, 0x45, 0x37, + 0xE2, 0x81, 0x84, 0x38, 0x45, 0x41, 0xE2, 0x88, + 0x95, 0x6D, 0x45, 0x56, 0xE2, 0x88, 0x95, 0x6D, + 0x45, 0x6D, 0xE2, 0x88, 0x95, 0x73, 0x46, 0x31, + 0xE2, 0x81, 0x84, 0x31, 0x30, 0x46, 0x43, 0xE2, + 0x88, 0x95, 0x6B, 0x67, 0x46, 0x6D, 0xE2, 0x88, + // Bytes 2300 - 233f + 0x95, 0x73, 0x32, 0x46, 0xD8, 0xA8, 0xD8, 0xAD, + 0xD9, 0x8A, 0x46, 0xD8, 0xA8, 0xD8, 0xAE, 0xD9, + 0x8A, 0x46, 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, 0x85, + 0x46, 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, 0x89, 0x46, + 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD8, + 0xAA, 0xD8, 0xAD, 0xD8, 0xAC, 0x46, 0xD8, 0xAA, + 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD8, 0xAA, 0xD8, + 0xAE, 0xD9, 0x85, 0x46, 0xD8, 0xAA, 0xD8, 0xAE, + // Bytes 2340 - 237f + 0xD9, 0x89, 0x46, 0xD8, 0xAA, 0xD8, 0xAE, 0xD9, + 0x8A, 0x46, 0xD8, 0xAA, 0xD9, 0x85, 0xD8, 0xAC, + 0x46, 0xD8, 0xAA, 0xD9, 0x85, 0xD8, 0xAD, 0x46, + 0xD8, 0xAA, 0xD9, 0x85, 0xD8, 0xAE, 0x46, 0xD8, + 0xAA, 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xAA, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xAC, 0xD8, + 0xAD, 0xD9, 0x89, 0x46, 0xD8, 0xAC, 0xD8, 0xAD, + 0xD9, 0x8A, 0x46, 0xD8, 0xAC, 0xD9, 0x85, 0xD8, + // Bytes 2380 - 23bf + 0xAD, 0x46, 0xD8, 0xAC, 0xD9, 0x85, 0xD9, 0x89, + 0x46, 0xD8, 0xAC, 0xD9, 0x85, 0xD9, 0x8A, 0x46, + 0xD8, 0xAD, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD8, + 0xAD, 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xAD, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xB3, 0xD8, + 0xAC, 0xD8, 0xAD, 0x46, 0xD8, 0xB3, 0xD8, 0xAC, + 0xD9, 0x89, 0x46, 0xD8, 0xB3, 0xD8, 0xAD, 0xD8, + 0xAC, 0x46, 0xD8, 0xB3, 0xD8, 0xAE, 0xD9, 0x89, + // Bytes 23c0 - 23ff + 0x46, 0xD8, 0xB3, 0xD8, 0xAE, 0xD9, 0x8A, 0x46, + 0xD8, 0xB3, 0xD9, 0x85, 0xD8, 0xAC, 0x46, 0xD8, + 0xB3, 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD8, 0xB3, + 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xB4, 0xD8, + 0xAC, 0xD9, 0x8A, 0x46, 0xD8, 0xB4, 0xD8, 0xAD, + 0xD9, 0x85, 0x46, 0xD8, 0xB4, 0xD8, 0xAD, 0xD9, + 0x8A, 0x46, 0xD8, 0xB4, 0xD9, 0x85, 0xD8, 0xAE, + 0x46, 0xD8, 0xB4, 0xD9, 0x85, 0xD9, 0x85, 0x46, + // Bytes 2400 - 243f + 0xD8, 0xB5, 0xD8, 0xAD, 0xD8, 0xAD, 0x46, 0xD8, + 0xB5, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD8, 0xB5, + 0xD9, 0x84, 0xD9, 0x89, 0x46, 0xD8, 0xB5, 0xD9, + 0x84, 0xDB, 0x92, 0x46, 0xD8, 0xB5, 0xD9, 0x85, + 0xD9, 0x85, 0x46, 0xD8, 0xB6, 0xD8, 0xAD, 0xD9, + 0x89, 0x46, 0xD8, 0xB6, 0xD8, 0xAD, 0xD9, 0x8A, + 0x46, 0xD8, 0xB6, 0xD8, 0xAE, 0xD9, 0x85, 0x46, + 0xD8, 0xB7, 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD8, + // Bytes 2440 - 247f + 0xB7, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xB7, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xB9, 0xD8, + 0xAC, 0xD9, 0x85, 0x46, 0xD8, 0xB9, 0xD9, 0x85, + 0xD9, 0x85, 0x46, 0xD8, 0xB9, 0xD9, 0x85, 0xD9, + 0x89, 0x46, 0xD8, 0xB9, 0xD9, 0x85, 0xD9, 0x8A, + 0x46, 0xD8, 0xBA, 0xD9, 0x85, 0xD9, 0x85, 0x46, + 0xD8, 0xBA, 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, + 0xBA, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x81, + // Bytes 2480 - 24bf + 0xD8, 0xAE, 0xD9, 0x85, 0x46, 0xD9, 0x81, 0xD9, + 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x82, 0xD9, 0x84, + 0xDB, 0x92, 0x46, 0xD9, 0x82, 0xD9, 0x85, 0xD8, + 0xAD, 0x46, 0xD9, 0x82, 0xD9, 0x85, 0xD9, 0x85, + 0x46, 0xD9, 0x82, 0xD9, 0x85, 0xD9, 0x8A, 0x46, + 0xD9, 0x83, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD9, + 0x83, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x84, + 0xD8, 0xAC, 0xD8, 0xAC, 0x46, 0xD9, 0x84, 0xD8, + // Bytes 24c0 - 24ff + 0xAC, 0xD9, 0x85, 0x46, 0xD9, 0x84, 0xD8, 0xAC, + 0xD9, 0x8A, 0x46, 0xD9, 0x84, 0xD8, 0xAD, 0xD9, + 0x85, 0x46, 0xD9, 0x84, 0xD8, 0xAD, 0xD9, 0x89, + 0x46, 0xD9, 0x84, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, + 0xD9, 0x84, 0xD8, 0xAE, 0xD9, 0x85, 0x46, 0xD9, + 0x84, 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD9, 0x84, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x85, 0xD8, + 0xAC, 0xD8, 0xAD, 0x46, 0xD9, 0x85, 0xD8, 0xAC, + // Bytes 2500 - 253f + 0xD8, 0xAE, 0x46, 0xD9, 0x85, 0xD8, 0xAC, 0xD9, + 0x85, 0x46, 0xD9, 0x85, 0xD8, 0xAC, 0xD9, 0x8A, + 0x46, 0xD9, 0x85, 0xD8, 0xAD, 0xD8, 0xAC, 0x46, + 0xD9, 0x85, 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD9, + 0x85, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD9, 0x85, + 0xD8, 0xAE, 0xD8, 0xAC, 0x46, 0xD9, 0x85, 0xD8, + 0xAE, 0xD9, 0x85, 0x46, 0xD9, 0x85, 0xD8, 0xAE, + 0xD9, 0x8A, 0x46, 0xD9, 0x85, 0xD9, 0x85, 0xD9, + // Bytes 2540 - 257f + 0x8A, 0x46, 0xD9, 0x86, 0xD8, 0xAC, 0xD8, 0xAD, + 0x46, 0xD9, 0x86, 0xD8, 0xAC, 0xD9, 0x85, 0x46, + 0xD9, 0x86, 0xD8, 0xAC, 0xD9, 0x89, 0x46, 0xD9, + 0x86, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD9, 0x86, + 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD9, 0x86, 0xD8, + 0xAD, 0xD9, 0x89, 0x46, 0xD9, 0x86, 0xD8, 0xAD, + 0xD9, 0x8A, 0x46, 0xD9, 0x86, 0xD9, 0x85, 0xD9, + 0x89, 0x46, 0xD9, 0x86, 0xD9, 0x85, 0xD9, 0x8A, + // Bytes 2580 - 25bf + 0x46, 0xD9, 0x87, 0xD9, 0x85, 0xD8, 0xAC, 0x46, + 0xD9, 0x87, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD9, + 0x8A, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD9, 0x8A, + 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD9, 0x8A, 0xD9, + 0x85, 0xD9, 0x85, 0x46, 0xD9, 0x8A, 0xD9, 0x85, + 0xD9, 0x8A, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, + 0xA7, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xAC, + 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xAD, 0x46, + // Bytes 25c0 - 25ff + 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xAE, 0x46, 0xD9, + 0x8A, 0xD9, 0x94, 0xD8, 0xB1, 0x46, 0xD9, 0x8A, + 0xD9, 0x94, 0xD8, 0xB2, 0x46, 0xD9, 0x8A, 0xD9, + 0x94, 0xD9, 0x85, 0x46, 0xD9, 0x8A, 0xD9, 0x94, + 0xD9, 0x86, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, + 0x87, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x88, + 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x89, 0x46, + 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x8A, 0x46, 0xD9, + // Bytes 2600 - 263f + 0x8A, 0xD9, 0x94, 0xDB, 0x86, 0x46, 0xD9, 0x8A, + 0xD9, 0x94, 0xDB, 0x87, 0x46, 0xD9, 0x8A, 0xD9, + 0x94, 0xDB, 0x88, 0x46, 0xD9, 0x8A, 0xD9, 0x94, + 0xDB, 0x90, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xDB, + 0x95, 0x46, 0xE0, 0xB9, 0x8D, 0xE0, 0xB8, 0xB2, + 0x46, 0xE0, 0xBA, 0xAB, 0xE0, 0xBA, 0x99, 0x46, + 0xE0, 0xBA, 0xAB, 0xE0, 0xBA, 0xA1, 0x46, 0xE0, + 0xBB, 0x8D, 0xE0, 0xBA, 0xB2, 0x46, 0xE0, 0xBD, + // Bytes 2640 - 267f + 0x80, 0xE0, 0xBE, 0xB5, 0x46, 0xE0, 0xBD, 0x82, + 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBD, 0x8C, 0xE0, + 0xBE, 0xB7, 0x46, 0xE0, 0xBD, 0x91, 0xE0, 0xBE, + 0xB7, 0x46, 0xE0, 0xBD, 0x96, 0xE0, 0xBE, 0xB7, + 0x46, 0xE0, 0xBD, 0x9B, 0xE0, 0xBE, 0xB7, 0x46, + 0xE0, 0xBE, 0x90, 0xE0, 0xBE, 0xB5, 0x46, 0xE0, + 0xBE, 0x92, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, + 0x9C, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0xA1, + // Bytes 2680 - 26bf + 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0xA6, 0xE0, + 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0xAB, 0xE0, 0xBE, + 0xB7, 0x46, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, + 0x46, 0xE2, 0x80, 0xB5, 0xE2, 0x80, 0xB5, 0x46, + 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0x46, 0xE2, + 0x88, 0xAE, 0xE2, 0x88, 0xAE, 0x46, 0xE3, 0x81, + 0xBB, 0xE3, 0x81, 0x8B, 0x46, 0xE3, 0x82, 0x88, + 0xE3, 0x82, 0x8A, 0x46, 0xE3, 0x82, 0xAD, 0xE3, + // Bytes 26c0 - 26ff + 0x83, 0xAD, 0x46, 0xE3, 0x82, 0xB3, 0xE3, 0x82, + 0xB3, 0x46, 0xE3, 0x82, 0xB3, 0xE3, 0x83, 0x88, + 0x46, 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xB3, 0x46, + 0xE3, 0x83, 0x8A, 0xE3, 0x83, 0x8E, 0x46, 0xE3, + 0x83, 0x9B, 0xE3, 0x83, 0xB3, 0x46, 0xE3, 0x83, + 0x9F, 0xE3, 0x83, 0xAA, 0x46, 0xE3, 0x83, 0xAA, + 0xE3, 0x83, 0xA9, 0x46, 0xE3, 0x83, 0xAC, 0xE3, + 0x83, 0xA0, 0x46, 0xE5, 0xA4, 0xA7, 0xE6, 0xAD, + // Bytes 2700 - 273f + 0xA3, 0x46, 0xE5, 0xB9, 0xB3, 0xE6, 0x88, 0x90, + 0x46, 0xE6, 0x98, 0x8E, 0xE6, 0xB2, 0xBB, 0x46, + 0xE6, 0x98, 0xAD, 0xE5, 0x92, 0x8C, 0x47, 0x72, + 0x61, 0x64, 0xE2, 0x88, 0x95, 0x73, 0x47, 0xE3, + 0x80, 0x94, 0x53, 0xE3, 0x80, 0x95, 0x48, 0x28, + 0xE1, 0x84, 0x80, 0xE1, 0x85, 0xA1, 0x29, 0x48, + 0x28, 0xE1, 0x84, 0x82, 0xE1, 0x85, 0xA1, 0x29, + 0x48, 0x28, 0xE1, 0x84, 0x83, 0xE1, 0x85, 0xA1, + // Bytes 2740 - 277f + 0x29, 0x48, 0x28, 0xE1, 0x84, 0x85, 0xE1, 0x85, + 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x86, 0xE1, + 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x87, + 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, + 0x89, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, + 0x84, 0x8B, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, + 0xE1, 0x84, 0x8C, 0xE1, 0x85, 0xA1, 0x29, 0x48, + 0x28, 0xE1, 0x84, 0x8C, 0xE1, 0x85, 0xAE, 0x29, + // Bytes 2780 - 27bf + 0x48, 0x28, 0xE1, 0x84, 0x8E, 0xE1, 0x85, 0xA1, + 0x29, 0x48, 0x28, 0xE1, 0x84, 0x8F, 0xE1, 0x85, + 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x90, 0xE1, + 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x91, + 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, + 0x92, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x72, 0x61, + 0x64, 0xE2, 0x88, 0x95, 0x73, 0x32, 0x48, 0xD8, + 0xA7, 0xD9, 0x83, 0xD8, 0xA8, 0xD8, 0xB1, 0x48, + // Bytes 27c0 - 27ff + 0xD8, 0xA7, 0xD9, 0x84, 0xD9, 0x84, 0xD9, 0x87, + 0x48, 0xD8, 0xB1, 0xD8, 0xB3, 0xD9, 0x88, 0xD9, + 0x84, 0x48, 0xD8, 0xB1, 0xDB, 0x8C, 0xD8, 0xA7, + 0xD9, 0x84, 0x48, 0xD8, 0xB5, 0xD9, 0x84, 0xD8, + 0xB9, 0xD9, 0x85, 0x48, 0xD8, 0xB9, 0xD9, 0x84, + 0xD9, 0x8A, 0xD9, 0x87, 0x48, 0xD9, 0x85, 0xD8, + 0xAD, 0xD9, 0x85, 0xD8, 0xAF, 0x48, 0xD9, 0x88, + 0xD8, 0xB3, 0xD9, 0x84, 0xD9, 0x85, 0x49, 0xE2, + // Bytes 2800 - 283f + 0x80, 0xB2, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, + 0x49, 0xE2, 0x80, 0xB5, 0xE2, 0x80, 0xB5, 0xE2, + 0x80, 0xB5, 0x49, 0xE2, 0x88, 0xAB, 0xE2, 0x88, + 0xAB, 0xE2, 0x88, 0xAB, 0x49, 0xE2, 0x88, 0xAE, + 0xE2, 0x88, 0xAE, 0xE2, 0x88, 0xAE, 0x49, 0xE3, + 0x80, 0x94, 0xE4, 0xB8, 0x89, 0xE3, 0x80, 0x95, + 0x49, 0xE3, 0x80, 0x94, 0xE4, 0xBA, 0x8C, 0xE3, + 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE5, 0x8B, + // Bytes 2840 - 287f + 0x9D, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, + 0xE5, 0xAE, 0x89, 0xE3, 0x80, 0x95, 0x49, 0xE3, + 0x80, 0x94, 0xE6, 0x89, 0x93, 0xE3, 0x80, 0x95, + 0x49, 0xE3, 0x80, 0x94, 0xE6, 0x95, 0x97, 0xE3, + 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE6, 0x9C, + 0xAC, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, + 0xE7, 0x82, 0xB9, 0xE3, 0x80, 0x95, 0x49, 0xE3, + 0x80, 0x94, 0xE7, 0x9B, 0x97, 0xE3, 0x80, 0x95, + // Bytes 2880 - 28bf + 0x49, 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0xAB, 0x49, 0xE3, 0x82, 0xA4, 0xE3, 0x83, + 0xB3, 0xE3, 0x83, 0x81, 0x49, 0xE3, 0x82, 0xA6, + 0xE3, 0x82, 0xA9, 0xE3, 0x83, 0xB3, 0x49, 0xE3, + 0x82, 0xAA, 0xE3, 0x83, 0xB3, 0xE3, 0x82, 0xB9, + 0x49, 0xE3, 0x82, 0xAA, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0xA0, 0x49, 0xE3, 0x82, 0xAB, 0xE3, 0x82, + 0xA4, 0xE3, 0x83, 0xAA, 0x49, 0xE3, 0x82, 0xB1, + // Bytes 28c0 - 28ff + 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xB9, 0x49, 0xE3, + 0x82, 0xB3, 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0x8A, + 0x49, 0xE3, 0x82, 0xBB, 0xE3, 0x83, 0xB3, 0xE3, + 0x83, 0x81, 0x49, 0xE3, 0x82, 0xBB, 0xE3, 0x83, + 0xB3, 0xE3, 0x83, 0x88, 0x49, 0xE3, 0x83, 0x86, + 0xE3, 0x82, 0x99, 0xE3, 0x82, 0xB7, 0x49, 0xE3, + 0x83, 0x88, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAB, + 0x49, 0xE3, 0x83, 0x8E, 0xE3, 0x83, 0x83, 0xE3, + // Bytes 2900 - 293f + 0x83, 0x88, 0x49, 0xE3, 0x83, 0x8F, 0xE3, 0x82, + 0xA4, 0xE3, 0x83, 0x84, 0x49, 0xE3, 0x83, 0x92, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAB, 0x49, 0xE3, + 0x83, 0x92, 0xE3, 0x82, 0x9A, 0xE3, 0x82, 0xB3, + 0x49, 0xE3, 0x83, 0x95, 0xE3, 0x83, 0xA9, 0xE3, + 0x83, 0xB3, 0x49, 0xE3, 0x83, 0x98, 0xE3, 0x82, + 0x9A, 0xE3, 0x82, 0xBD, 0x49, 0xE3, 0x83, 0x98, + 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0x84, 0x49, 0xE3, + // Bytes 2940 - 297f + 0x83, 0x9B, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xAB, + 0x49, 0xE3, 0x83, 0x9B, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0xB3, 0x49, 0xE3, 0x83, 0x9E, 0xE3, 0x82, + 0xA4, 0xE3, 0x83, 0xAB, 0x49, 0xE3, 0x83, 0x9E, + 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x8F, 0x49, 0xE3, + 0x83, 0x9E, 0xE3, 0x83, 0xAB, 0xE3, 0x82, 0xAF, + 0x49, 0xE3, 0x83, 0xA4, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0xAB, 0x49, 0xE3, 0x83, 0xA6, 0xE3, 0x82, + // Bytes 2980 - 29bf + 0xA2, 0xE3, 0x83, 0xB3, 0x49, 0xE3, 0x83, 0xAF, + 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x88, 0x4C, 0xE2, + 0x80, 0xB2, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, + 0xE2, 0x80, 0xB2, 0x4C, 0xE2, 0x88, 0xAB, 0xE2, + 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB, + 0x4C, 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0xAB, 0xE3, + 0x83, 0x95, 0xE3, 0x82, 0xA1, 0x4C, 0xE3, 0x82, + 0xA8, 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xAB, 0xE3, + // Bytes 29c0 - 29ff + 0x83, 0xBC, 0x4C, 0xE3, 0x82, 0xAB, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xB3, 0x4C, + 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xB3, 0xE3, 0x83, 0x9E, 0x4C, 0xE3, 0x82, 0xAB, + 0xE3, 0x83, 0xA9, 0xE3, 0x83, 0x83, 0xE3, 0x83, + 0x88, 0x4C, 0xE3, 0x82, 0xAB, 0xE3, 0x83, 0xAD, + 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0xBC, 0x4C, 0xE3, + 0x82, 0xAD, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0x8B, + // Bytes 2a00 - 2a3f + 0xE3, 0x83, 0xBC, 0x4C, 0xE3, 0x82, 0xAD, 0xE3, + 0x83, 0xA5, 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0xBC, + 0x4C, 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0xE3, + 0x83, 0xA9, 0xE3, 0x83, 0xA0, 0x4C, 0xE3, 0x82, + 0xAF, 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0x8D, 0x4C, 0xE3, 0x82, 0xB5, 0xE3, 0x82, + 0xA4, 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAB, 0x4C, + 0xE3, 0x82, 0xBF, 0xE3, 0x82, 0x99, 0xE3, 0x83, + // Bytes 2a40 - 2a7f + 0xBC, 0xE3, 0x82, 0xB9, 0x4C, 0xE3, 0x83, 0x8F, + 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0x84, 0x4C, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, + 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAB, 0x4C, 0xE3, + 0x83, 0x95, 0xE3, 0x82, 0xA3, 0xE3, 0x83, 0xBC, + 0xE3, 0x83, 0x88, 0x4C, 0xE3, 0x83, 0x98, 0xE3, + 0x82, 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xBF, + 0x4C, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, 0xE3, + // Bytes 2a80 - 2abf + 0x83, 0x8B, 0xE3, 0x83, 0x92, 0x4C, 0xE3, 0x83, + 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xB3, 0xE3, + 0x82, 0xB9, 0x4C, 0xE3, 0x83, 0x9B, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0x88, 0x4C, + 0xE3, 0x83, 0x9E, 0xE3, 0x82, 0xA4, 0xE3, 0x82, + 0xAF, 0xE3, 0x83, 0xAD, 0x4C, 0xE3, 0x83, 0x9F, + 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAD, 0xE3, 0x83, + 0xB3, 0x4C, 0xE3, 0x83, 0xA1, 0xE3, 0x83, 0xBC, + // Bytes 2ac0 - 2aff + 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xAB, 0x4C, 0xE3, + 0x83, 0xAA, 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x88, + 0xE3, 0x83, 0xAB, 0x4C, 0xE3, 0x83, 0xAB, 0xE3, + 0x83, 0x92, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, + 0x4C, 0xE6, 0xA0, 0xAA, 0xE5, 0xBC, 0x8F, 0xE4, + 0xBC, 0x9A, 0xE7, 0xA4, 0xBE, 0x4E, 0x28, 0xE1, + 0x84, 0x8B, 0xE1, 0x85, 0xA9, 0xE1, 0x84, 0x92, + 0xE1, 0x85, 0xAE, 0x29, 0x4F, 0xD8, 0xAC, 0xD9, + // Bytes 2b00 - 2b3f + 0x84, 0x20, 0xD8, 0xAC, 0xD9, 0x84, 0xD8, 0xA7, + 0xD9, 0x84, 0xD9, 0x87, 0x4F, 0xE3, 0x82, 0xA2, + 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x9A, 0xE3, 0x83, + 0xBC, 0xE3, 0x83, 0x88, 0x4F, 0xE3, 0x82, 0xA2, + 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x98, 0xE3, 0x82, + 0x9A, 0xE3, 0x82, 0xA2, 0x4F, 0xE3, 0x82, 0xAD, + 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xAF, 0xE3, 0x83, + 0x83, 0xE3, 0x83, 0x88, 0x4F, 0xE3, 0x82, 0xB5, + // Bytes 2b40 - 2b7f + 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x81, 0xE3, 0x83, + 0xBC, 0xE3, 0x83, 0xA0, 0x4F, 0xE3, 0x83, 0x8F, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0xAC, 0xE3, 0x83, 0xAB, 0x4F, 0xE3, 0x83, 0x98, + 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0xBF, 0xE3, 0x83, + 0xBC, 0xE3, 0x83, 0xAB, 0x4F, 0xE3, 0x83, 0x9B, + 0xE3, 0x82, 0x9A, 0xE3, 0x82, 0xA4, 0xE3, 0x83, + 0xB3, 0xE3, 0x83, 0x88, 0x4F, 0xE3, 0x83, 0x9E, + // Bytes 2b80 - 2bbf + 0xE3, 0x83, 0xB3, 0xE3, 0x82, 0xB7, 0xE3, 0x83, + 0xA7, 0xE3, 0x83, 0xB3, 0x4F, 0xE3, 0x83, 0xA1, + 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0x88, 0xE3, 0x83, 0xB3, 0x4F, 0xE3, 0x83, 0xAB, + 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x95, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xAB, 0x51, 0x28, 0xE1, 0x84, + 0x8B, 0xE1, 0x85, 0xA9, 0xE1, 0x84, 0x8C, 0xE1, + 0x85, 0xA5, 0xE1, 0x86, 0xAB, 0x29, 0x52, 0xE3, + // Bytes 2bc0 - 2bff + 0x82, 0xAD, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAB, + 0xE3, 0x82, 0xBF, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xBC, 0x52, 0xE3, 0x82, 0xAD, 0xE3, 0x83, 0xAD, + 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xA9, 0xE3, 0x83, 0xA0, 0x52, 0xE3, 0x82, 0xAD, + 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xA1, 0xE3, 0x83, + 0xBC, 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xAB, 0x52, + 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0xE3, 0x83, + // Bytes 2c00 - 2c3f + 0xA9, 0xE3, 0x83, 0xA0, 0xE3, 0x83, 0x88, 0xE3, + 0x83, 0xB3, 0x52, 0xE3, 0x82, 0xAF, 0xE3, 0x83, + 0xAB, 0xE3, 0x82, 0xBB, 0xE3, 0x82, 0x99, 0xE3, + 0x82, 0xA4, 0xE3, 0x83, 0xAD, 0x52, 0xE3, 0x83, + 0x8F, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, 0xE3, + 0x82, 0xBB, 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x88, + 0x52, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, 0xE3, + 0x82, 0xA2, 0xE3, 0x82, 0xB9, 0xE3, 0x83, 0x88, + // Bytes 2c40 - 2c7f + 0xE3, 0x83, 0xAB, 0x52, 0xE3, 0x83, 0x95, 0xE3, + 0x82, 0x99, 0xE3, 0x83, 0x83, 0xE3, 0x82, 0xB7, + 0xE3, 0x82, 0xA7, 0xE3, 0x83, 0xAB, 0x52, 0xE3, + 0x83, 0x9F, 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0x8F, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0xAB, 0x52, 0xE3, 0x83, 0xAC, 0xE3, 0x83, 0xB3, + 0xE3, 0x83, 0x88, 0xE3, 0x82, 0xB1, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xB3, 0x61, 0xD8, 0xB5, 0xD9, + // Bytes 2c80 - 2cbf + 0x84, 0xD9, 0x89, 0x20, 0xD8, 0xA7, 0xD9, 0x84, + 0xD9, 0x84, 0xD9, 0x87, 0x20, 0xD8, 0xB9, 0xD9, + 0x84, 0xD9, 0x8A, 0xD9, 0x87, 0x20, 0xD9, 0x88, + 0xD8, 0xB3, 0xD9, 0x84, 0xD9, 0x85, 0x06, 0xE0, + 0xA7, 0x87, 0xE0, 0xA6, 0xBE, 0x01, 0x06, 0xE0, + 0xA7, 0x87, 0xE0, 0xA7, 0x97, 0x01, 0x06, 0xE0, + 0xAD, 0x87, 0xE0, 0xAC, 0xBE, 0x01, 0x06, 0xE0, + 0xAD, 0x87, 0xE0, 0xAD, 0x96, 0x01, 0x06, 0xE0, + // Bytes 2cc0 - 2cff + 0xAD, 0x87, 0xE0, 0xAD, 0x97, 0x01, 0x06, 0xE0, + 0xAE, 0x92, 0xE0, 0xAF, 0x97, 0x01, 0x06, 0xE0, + 0xAF, 0x86, 0xE0, 0xAE, 0xBE, 0x01, 0x06, 0xE0, + 0xAF, 0x86, 0xE0, 0xAF, 0x97, 0x01, 0x06, 0xE0, + 0xAF, 0x87, 0xE0, 0xAE, 0xBE, 0x01, 0x06, 0xE0, + 0xB2, 0xBF, 0xE0, 0xB3, 0x95, 0x01, 0x06, 0xE0, + 0xB3, 0x86, 0xE0, 0xB3, 0x95, 0x01, 0x06, 0xE0, + 0xB3, 0x86, 0xE0, 0xB3, 0x96, 0x01, 0x06, 0xE0, + // Bytes 2d00 - 2d3f + 0xB5, 0x86, 0xE0, 0xB4, 0xBE, 0x01, 0x06, 0xE0, + 0xB5, 0x86, 0xE0, 0xB5, 0x97, 0x01, 0x06, 0xE0, + 0xB5, 0x87, 0xE0, 0xB4, 0xBE, 0x01, 0x06, 0xE0, + 0xB7, 0x99, 0xE0, 0xB7, 0x9F, 0x01, 0x06, 0xE1, + 0x80, 0xA5, 0xE1, 0x80, 0xAE, 0x01, 0x06, 0xE1, + 0xAC, 0x85, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0x87, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0x89, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + // Bytes 2d40 - 2d7f + 0xAC, 0x8B, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0x8D, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0x91, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0xBA, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0xBC, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0xBE, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0xBF, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAD, 0x82, 0xE1, 0xAC, 0xB5, 0x01, 0x08, 0xF0, + // Bytes 2d80 - 2dbf + 0x91, 0x84, 0xB1, 0xF0, 0x91, 0x84, 0xA7, 0x01, + 0x08, 0xF0, 0x91, 0x84, 0xB2, 0xF0, 0x91, 0x84, + 0xA7, 0x01, 0x08, 0xF0, 0x91, 0x8D, 0x87, 0xF0, + 0x91, 0x8C, 0xBE, 0x01, 0x08, 0xF0, 0x91, 0x8D, + 0x87, 0xF0, 0x91, 0x8D, 0x97, 0x01, 0x08, 0xF0, + 0x91, 0x92, 0xB9, 0xF0, 0x91, 0x92, 0xB0, 0x01, + 0x08, 0xF0, 0x91, 0x92, 0xB9, 0xF0, 0x91, 0x92, + 0xBA, 0x01, 0x08, 0xF0, 0x91, 0x92, 0xB9, 0xF0, + // Bytes 2dc0 - 2dff + 0x91, 0x92, 0xBD, 0x01, 0x08, 0xF0, 0x91, 0x96, + 0xB8, 0xF0, 0x91, 0x96, 0xAF, 0x01, 0x08, 0xF0, + 0x91, 0x96, 0xB9, 0xF0, 0x91, 0x96, 0xAF, 0x01, + 0x09, 0xE0, 0xB3, 0x86, 0xE0, 0xB3, 0x82, 0xE0, + 0xB3, 0x95, 0x02, 0x09, 0xE0, 0xB7, 0x99, 0xE0, + 0xB7, 0x8F, 0xE0, 0xB7, 0x8A, 0x12, 0x44, 0x44, + 0x5A, 0xCC, 0x8C, 0xC9, 0x44, 0x44, 0x7A, 0xCC, + 0x8C, 0xC9, 0x44, 0x64, 0x7A, 0xCC, 0x8C, 0xC9, + // Bytes 2e00 - 2e3f + 0x46, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x93, 0xC9, + 0x46, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x94, 0xC9, + 0x46, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x95, 0xB5, + 0x46, 0xE1, 0x84, 0x80, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x82, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x83, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x85, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x86, 0xE1, 0x85, 0xA1, 0x01, + // Bytes 2e40 - 2e7f + 0x46, 0xE1, 0x84, 0x87, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x89, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xAE, 0x01, + 0x46, 0xE1, 0x84, 0x8C, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x8E, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x8F, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x90, 0xE1, 0x85, 0xA1, 0x01, + // Bytes 2e80 - 2ebf + 0x46, 0xE1, 0x84, 0x91, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x92, 0xE1, 0x85, 0xA1, 0x01, + 0x49, 0xE3, 0x83, 0xA1, 0xE3, 0x82, 0xAB, 0xE3, + 0x82, 0x99, 0x0D, 0x4C, 0xE1, 0x84, 0x8C, 0xE1, + 0x85, 0xAE, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xB4, + 0x01, 0x4C, 0xE3, 0x82, 0xAD, 0xE3, 0x82, 0x99, + 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0x0D, 0x4C, + 0xE3, 0x82, 0xB3, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + // Bytes 2ec0 - 2eff + 0x9B, 0xE3, 0x82, 0x9A, 0x0D, 0x4C, 0xE3, 0x83, + 0xA4, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, 0xE3, + 0x82, 0x99, 0x0D, 0x4F, 0xE1, 0x84, 0x8E, 0xE1, + 0x85, 0xA1, 0xE1, 0x86, 0xB7, 0xE1, 0x84, 0x80, + 0xE1, 0x85, 0xA9, 0x01, 0x4F, 0xE3, 0x82, 0xA4, + 0xE3, 0x83, 0x8B, 0xE3, 0x83, 0xB3, 0xE3, 0x82, + 0xAF, 0xE3, 0x82, 0x99, 0x0D, 0x4F, 0xE3, 0x82, + 0xB7, 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0xB3, 0xE3, + // Bytes 2f00 - 2f3f + 0x82, 0xAF, 0xE3, 0x82, 0x99, 0x0D, 0x4F, 0xE3, + 0x83, 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, + 0xE3, 0x82, 0xB7, 0xE3, 0x82, 0x99, 0x0D, 0x4F, + 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x9A, 0xE3, 0x83, + 0xB3, 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, 0x0D, + 0x52, 0xE3, 0x82, 0xA8, 0xE3, 0x82, 0xB9, 0xE3, + 0x82, 0xAF, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, + 0xE3, 0x82, 0x99, 0x0D, 0x52, 0xE3, 0x83, 0x95, + // Bytes 2f40 - 2f7f + 0xE3, 0x82, 0xA1, 0xE3, 0x83, 0xA9, 0xE3, 0x83, + 0x83, 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, 0x0D, + 0x86, 0xE0, 0xB3, 0x86, 0xE0, 0xB3, 0x82, 0x01, + 0x86, 0xE0, 0xB7, 0x99, 0xE0, 0xB7, 0x8F, 0x01, + 0x03, 0x3C, 0xCC, 0xB8, 0x05, 0x03, 0x3D, 0xCC, + 0xB8, 0x05, 0x03, 0x3E, 0xCC, 0xB8, 0x05, 0x03, + 0x41, 0xCC, 0x80, 0xC9, 0x03, 0x41, 0xCC, 0x81, + 0xC9, 0x03, 0x41, 0xCC, 0x83, 0xC9, 0x03, 0x41, + // Bytes 2f80 - 2fbf + 0xCC, 0x84, 0xC9, 0x03, 0x41, 0xCC, 0x89, 0xC9, + 0x03, 0x41, 0xCC, 0x8C, 0xC9, 0x03, 0x41, 0xCC, + 0x8F, 0xC9, 0x03, 0x41, 0xCC, 0x91, 0xC9, 0x03, + 0x41, 0xCC, 0xA5, 0xB5, 0x03, 0x41, 0xCC, 0xA8, + 0xA5, 0x03, 0x42, 0xCC, 0x87, 0xC9, 0x03, 0x42, + 0xCC, 0xA3, 0xB5, 0x03, 0x42, 0xCC, 0xB1, 0xB5, + 0x03, 0x43, 0xCC, 0x81, 0xC9, 0x03, 0x43, 0xCC, + 0x82, 0xC9, 0x03, 0x43, 0xCC, 0x87, 0xC9, 0x03, + // Bytes 2fc0 - 2fff + 0x43, 0xCC, 0x8C, 0xC9, 0x03, 0x44, 0xCC, 0x87, + 0xC9, 0x03, 0x44, 0xCC, 0x8C, 0xC9, 0x03, 0x44, + 0xCC, 0xA3, 0xB5, 0x03, 0x44, 0xCC, 0xA7, 0xA5, + 0x03, 0x44, 0xCC, 0xAD, 0xB5, 0x03, 0x44, 0xCC, + 0xB1, 0xB5, 0x03, 0x45, 0xCC, 0x80, 0xC9, 0x03, + 0x45, 0xCC, 0x81, 0xC9, 0x03, 0x45, 0xCC, 0x83, + 0xC9, 0x03, 0x45, 0xCC, 0x86, 0xC9, 0x03, 0x45, + 0xCC, 0x87, 0xC9, 0x03, 0x45, 0xCC, 0x88, 0xC9, + // Bytes 3000 - 303f + 0x03, 0x45, 0xCC, 0x89, 0xC9, 0x03, 0x45, 0xCC, + 0x8C, 0xC9, 0x03, 0x45, 0xCC, 0x8F, 0xC9, 0x03, + 0x45, 0xCC, 0x91, 0xC9, 0x03, 0x45, 0xCC, 0xA8, + 0xA5, 0x03, 0x45, 0xCC, 0xAD, 0xB5, 0x03, 0x45, + 0xCC, 0xB0, 0xB5, 0x03, 0x46, 0xCC, 0x87, 0xC9, + 0x03, 0x47, 0xCC, 0x81, 0xC9, 0x03, 0x47, 0xCC, + 0x82, 0xC9, 0x03, 0x47, 0xCC, 0x84, 0xC9, 0x03, + 0x47, 0xCC, 0x86, 0xC9, 0x03, 0x47, 0xCC, 0x87, + // Bytes 3040 - 307f + 0xC9, 0x03, 0x47, 0xCC, 0x8C, 0xC9, 0x03, 0x47, + 0xCC, 0xA7, 0xA5, 0x03, 0x48, 0xCC, 0x82, 0xC9, + 0x03, 0x48, 0xCC, 0x87, 0xC9, 0x03, 0x48, 0xCC, + 0x88, 0xC9, 0x03, 0x48, 0xCC, 0x8C, 0xC9, 0x03, + 0x48, 0xCC, 0xA3, 0xB5, 0x03, 0x48, 0xCC, 0xA7, + 0xA5, 0x03, 0x48, 0xCC, 0xAE, 0xB5, 0x03, 0x49, + 0xCC, 0x80, 0xC9, 0x03, 0x49, 0xCC, 0x81, 0xC9, + 0x03, 0x49, 0xCC, 0x82, 0xC9, 0x03, 0x49, 0xCC, + // Bytes 3080 - 30bf + 0x83, 0xC9, 0x03, 0x49, 0xCC, 0x84, 0xC9, 0x03, + 0x49, 0xCC, 0x86, 0xC9, 0x03, 0x49, 0xCC, 0x87, + 0xC9, 0x03, 0x49, 0xCC, 0x89, 0xC9, 0x03, 0x49, + 0xCC, 0x8C, 0xC9, 0x03, 0x49, 0xCC, 0x8F, 0xC9, + 0x03, 0x49, 0xCC, 0x91, 0xC9, 0x03, 0x49, 0xCC, + 0xA3, 0xB5, 0x03, 0x49, 0xCC, 0xA8, 0xA5, 0x03, + 0x49, 0xCC, 0xB0, 0xB5, 0x03, 0x4A, 0xCC, 0x82, + 0xC9, 0x03, 0x4B, 0xCC, 0x81, 0xC9, 0x03, 0x4B, + // Bytes 30c0 - 30ff + 0xCC, 0x8C, 0xC9, 0x03, 0x4B, 0xCC, 0xA3, 0xB5, + 0x03, 0x4B, 0xCC, 0xA7, 0xA5, 0x03, 0x4B, 0xCC, + 0xB1, 0xB5, 0x03, 0x4C, 0xCC, 0x81, 0xC9, 0x03, + 0x4C, 0xCC, 0x8C, 0xC9, 0x03, 0x4C, 0xCC, 0xA7, + 0xA5, 0x03, 0x4C, 0xCC, 0xAD, 0xB5, 0x03, 0x4C, + 0xCC, 0xB1, 0xB5, 0x03, 0x4D, 0xCC, 0x81, 0xC9, + 0x03, 0x4D, 0xCC, 0x87, 0xC9, 0x03, 0x4D, 0xCC, + 0xA3, 0xB5, 0x03, 0x4E, 0xCC, 0x80, 0xC9, 0x03, + // Bytes 3100 - 313f + 0x4E, 0xCC, 0x81, 0xC9, 0x03, 0x4E, 0xCC, 0x83, + 0xC9, 0x03, 0x4E, 0xCC, 0x87, 0xC9, 0x03, 0x4E, + 0xCC, 0x8C, 0xC9, 0x03, 0x4E, 0xCC, 0xA3, 0xB5, + 0x03, 0x4E, 0xCC, 0xA7, 0xA5, 0x03, 0x4E, 0xCC, + 0xAD, 0xB5, 0x03, 0x4E, 0xCC, 0xB1, 0xB5, 0x03, + 0x4F, 0xCC, 0x80, 0xC9, 0x03, 0x4F, 0xCC, 0x81, + 0xC9, 0x03, 0x4F, 0xCC, 0x86, 0xC9, 0x03, 0x4F, + 0xCC, 0x89, 0xC9, 0x03, 0x4F, 0xCC, 0x8B, 0xC9, + // Bytes 3140 - 317f + 0x03, 0x4F, 0xCC, 0x8C, 0xC9, 0x03, 0x4F, 0xCC, + 0x8F, 0xC9, 0x03, 0x4F, 0xCC, 0x91, 0xC9, 0x03, + 0x50, 0xCC, 0x81, 0xC9, 0x03, 0x50, 0xCC, 0x87, + 0xC9, 0x03, 0x52, 0xCC, 0x81, 0xC9, 0x03, 0x52, + 0xCC, 0x87, 0xC9, 0x03, 0x52, 0xCC, 0x8C, 0xC9, + 0x03, 0x52, 0xCC, 0x8F, 0xC9, 0x03, 0x52, 0xCC, + 0x91, 0xC9, 0x03, 0x52, 0xCC, 0xA7, 0xA5, 0x03, + 0x52, 0xCC, 0xB1, 0xB5, 0x03, 0x53, 0xCC, 0x82, + // Bytes 3180 - 31bf + 0xC9, 0x03, 0x53, 0xCC, 0x87, 0xC9, 0x03, 0x53, + 0xCC, 0xA6, 0xB5, 0x03, 0x53, 0xCC, 0xA7, 0xA5, + 0x03, 0x54, 0xCC, 0x87, 0xC9, 0x03, 0x54, 0xCC, + 0x8C, 0xC9, 0x03, 0x54, 0xCC, 0xA3, 0xB5, 0x03, + 0x54, 0xCC, 0xA6, 0xB5, 0x03, 0x54, 0xCC, 0xA7, + 0xA5, 0x03, 0x54, 0xCC, 0xAD, 0xB5, 0x03, 0x54, + 0xCC, 0xB1, 0xB5, 0x03, 0x55, 0xCC, 0x80, 0xC9, + 0x03, 0x55, 0xCC, 0x81, 0xC9, 0x03, 0x55, 0xCC, + // Bytes 31c0 - 31ff + 0x82, 0xC9, 0x03, 0x55, 0xCC, 0x86, 0xC9, 0x03, + 0x55, 0xCC, 0x89, 0xC9, 0x03, 0x55, 0xCC, 0x8A, + 0xC9, 0x03, 0x55, 0xCC, 0x8B, 0xC9, 0x03, 0x55, + 0xCC, 0x8C, 0xC9, 0x03, 0x55, 0xCC, 0x8F, 0xC9, + 0x03, 0x55, 0xCC, 0x91, 0xC9, 0x03, 0x55, 0xCC, + 0xA3, 0xB5, 0x03, 0x55, 0xCC, 0xA4, 0xB5, 0x03, + 0x55, 0xCC, 0xA8, 0xA5, 0x03, 0x55, 0xCC, 0xAD, + 0xB5, 0x03, 0x55, 0xCC, 0xB0, 0xB5, 0x03, 0x56, + // Bytes 3200 - 323f + 0xCC, 0x83, 0xC9, 0x03, 0x56, 0xCC, 0xA3, 0xB5, + 0x03, 0x57, 0xCC, 0x80, 0xC9, 0x03, 0x57, 0xCC, + 0x81, 0xC9, 0x03, 0x57, 0xCC, 0x82, 0xC9, 0x03, + 0x57, 0xCC, 0x87, 0xC9, 0x03, 0x57, 0xCC, 0x88, + 0xC9, 0x03, 0x57, 0xCC, 0xA3, 0xB5, 0x03, 0x58, + 0xCC, 0x87, 0xC9, 0x03, 0x58, 0xCC, 0x88, 0xC9, + 0x03, 0x59, 0xCC, 0x80, 0xC9, 0x03, 0x59, 0xCC, + 0x81, 0xC9, 0x03, 0x59, 0xCC, 0x82, 0xC9, 0x03, + // Bytes 3240 - 327f + 0x59, 0xCC, 0x83, 0xC9, 0x03, 0x59, 0xCC, 0x84, + 0xC9, 0x03, 0x59, 0xCC, 0x87, 0xC9, 0x03, 0x59, + 0xCC, 0x88, 0xC9, 0x03, 0x59, 0xCC, 0x89, 0xC9, + 0x03, 0x59, 0xCC, 0xA3, 0xB5, 0x03, 0x5A, 0xCC, + 0x81, 0xC9, 0x03, 0x5A, 0xCC, 0x82, 0xC9, 0x03, + 0x5A, 0xCC, 0x87, 0xC9, 0x03, 0x5A, 0xCC, 0x8C, + 0xC9, 0x03, 0x5A, 0xCC, 0xA3, 0xB5, 0x03, 0x5A, + 0xCC, 0xB1, 0xB5, 0x03, 0x61, 0xCC, 0x80, 0xC9, + // Bytes 3280 - 32bf + 0x03, 0x61, 0xCC, 0x81, 0xC9, 0x03, 0x61, 0xCC, + 0x83, 0xC9, 0x03, 0x61, 0xCC, 0x84, 0xC9, 0x03, + 0x61, 0xCC, 0x89, 0xC9, 0x03, 0x61, 0xCC, 0x8C, + 0xC9, 0x03, 0x61, 0xCC, 0x8F, 0xC9, 0x03, 0x61, + 0xCC, 0x91, 0xC9, 0x03, 0x61, 0xCC, 0xA5, 0xB5, + 0x03, 0x61, 0xCC, 0xA8, 0xA5, 0x03, 0x62, 0xCC, + 0x87, 0xC9, 0x03, 0x62, 0xCC, 0xA3, 0xB5, 0x03, + 0x62, 0xCC, 0xB1, 0xB5, 0x03, 0x63, 0xCC, 0x81, + // Bytes 32c0 - 32ff + 0xC9, 0x03, 0x63, 0xCC, 0x82, 0xC9, 0x03, 0x63, + 0xCC, 0x87, 0xC9, 0x03, 0x63, 0xCC, 0x8C, 0xC9, + 0x03, 0x64, 0xCC, 0x87, 0xC9, 0x03, 0x64, 0xCC, + 0x8C, 0xC9, 0x03, 0x64, 0xCC, 0xA3, 0xB5, 0x03, + 0x64, 0xCC, 0xA7, 0xA5, 0x03, 0x64, 0xCC, 0xAD, + 0xB5, 0x03, 0x64, 0xCC, 0xB1, 0xB5, 0x03, 0x65, + 0xCC, 0x80, 0xC9, 0x03, 0x65, 0xCC, 0x81, 0xC9, + 0x03, 0x65, 0xCC, 0x83, 0xC9, 0x03, 0x65, 0xCC, + // Bytes 3300 - 333f + 0x86, 0xC9, 0x03, 0x65, 0xCC, 0x87, 0xC9, 0x03, + 0x65, 0xCC, 0x88, 0xC9, 0x03, 0x65, 0xCC, 0x89, + 0xC9, 0x03, 0x65, 0xCC, 0x8C, 0xC9, 0x03, 0x65, + 0xCC, 0x8F, 0xC9, 0x03, 0x65, 0xCC, 0x91, 0xC9, + 0x03, 0x65, 0xCC, 0xA8, 0xA5, 0x03, 0x65, 0xCC, + 0xAD, 0xB5, 0x03, 0x65, 0xCC, 0xB0, 0xB5, 0x03, + 0x66, 0xCC, 0x87, 0xC9, 0x03, 0x67, 0xCC, 0x81, + 0xC9, 0x03, 0x67, 0xCC, 0x82, 0xC9, 0x03, 0x67, + // Bytes 3340 - 337f + 0xCC, 0x84, 0xC9, 0x03, 0x67, 0xCC, 0x86, 0xC9, + 0x03, 0x67, 0xCC, 0x87, 0xC9, 0x03, 0x67, 0xCC, + 0x8C, 0xC9, 0x03, 0x67, 0xCC, 0xA7, 0xA5, 0x03, + 0x68, 0xCC, 0x82, 0xC9, 0x03, 0x68, 0xCC, 0x87, + 0xC9, 0x03, 0x68, 0xCC, 0x88, 0xC9, 0x03, 0x68, + 0xCC, 0x8C, 0xC9, 0x03, 0x68, 0xCC, 0xA3, 0xB5, + 0x03, 0x68, 0xCC, 0xA7, 0xA5, 0x03, 0x68, 0xCC, + 0xAE, 0xB5, 0x03, 0x68, 0xCC, 0xB1, 0xB5, 0x03, + // Bytes 3380 - 33bf + 0x69, 0xCC, 0x80, 0xC9, 0x03, 0x69, 0xCC, 0x81, + 0xC9, 0x03, 0x69, 0xCC, 0x82, 0xC9, 0x03, 0x69, + 0xCC, 0x83, 0xC9, 0x03, 0x69, 0xCC, 0x84, 0xC9, + 0x03, 0x69, 0xCC, 0x86, 0xC9, 0x03, 0x69, 0xCC, + 0x89, 0xC9, 0x03, 0x69, 0xCC, 0x8C, 0xC9, 0x03, + 0x69, 0xCC, 0x8F, 0xC9, 0x03, 0x69, 0xCC, 0x91, + 0xC9, 0x03, 0x69, 0xCC, 0xA3, 0xB5, 0x03, 0x69, + 0xCC, 0xA8, 0xA5, 0x03, 0x69, 0xCC, 0xB0, 0xB5, + // Bytes 33c0 - 33ff + 0x03, 0x6A, 0xCC, 0x82, 0xC9, 0x03, 0x6A, 0xCC, + 0x8C, 0xC9, 0x03, 0x6B, 0xCC, 0x81, 0xC9, 0x03, + 0x6B, 0xCC, 0x8C, 0xC9, 0x03, 0x6B, 0xCC, 0xA3, + 0xB5, 0x03, 0x6B, 0xCC, 0xA7, 0xA5, 0x03, 0x6B, + 0xCC, 0xB1, 0xB5, 0x03, 0x6C, 0xCC, 0x81, 0xC9, + 0x03, 0x6C, 0xCC, 0x8C, 0xC9, 0x03, 0x6C, 0xCC, + 0xA7, 0xA5, 0x03, 0x6C, 0xCC, 0xAD, 0xB5, 0x03, + 0x6C, 0xCC, 0xB1, 0xB5, 0x03, 0x6D, 0xCC, 0x81, + // Bytes 3400 - 343f + 0xC9, 0x03, 0x6D, 0xCC, 0x87, 0xC9, 0x03, 0x6D, + 0xCC, 0xA3, 0xB5, 0x03, 0x6E, 0xCC, 0x80, 0xC9, + 0x03, 0x6E, 0xCC, 0x81, 0xC9, 0x03, 0x6E, 0xCC, + 0x83, 0xC9, 0x03, 0x6E, 0xCC, 0x87, 0xC9, 0x03, + 0x6E, 0xCC, 0x8C, 0xC9, 0x03, 0x6E, 0xCC, 0xA3, + 0xB5, 0x03, 0x6E, 0xCC, 0xA7, 0xA5, 0x03, 0x6E, + 0xCC, 0xAD, 0xB5, 0x03, 0x6E, 0xCC, 0xB1, 0xB5, + 0x03, 0x6F, 0xCC, 0x80, 0xC9, 0x03, 0x6F, 0xCC, + // Bytes 3440 - 347f + 0x81, 0xC9, 0x03, 0x6F, 0xCC, 0x86, 0xC9, 0x03, + 0x6F, 0xCC, 0x89, 0xC9, 0x03, 0x6F, 0xCC, 0x8B, + 0xC9, 0x03, 0x6F, 0xCC, 0x8C, 0xC9, 0x03, 0x6F, + 0xCC, 0x8F, 0xC9, 0x03, 0x6F, 0xCC, 0x91, 0xC9, + 0x03, 0x70, 0xCC, 0x81, 0xC9, 0x03, 0x70, 0xCC, + 0x87, 0xC9, 0x03, 0x72, 0xCC, 0x81, 0xC9, 0x03, + 0x72, 0xCC, 0x87, 0xC9, 0x03, 0x72, 0xCC, 0x8C, + 0xC9, 0x03, 0x72, 0xCC, 0x8F, 0xC9, 0x03, 0x72, + // Bytes 3480 - 34bf + 0xCC, 0x91, 0xC9, 0x03, 0x72, 0xCC, 0xA7, 0xA5, + 0x03, 0x72, 0xCC, 0xB1, 0xB5, 0x03, 0x73, 0xCC, + 0x82, 0xC9, 0x03, 0x73, 0xCC, 0x87, 0xC9, 0x03, + 0x73, 0xCC, 0xA6, 0xB5, 0x03, 0x73, 0xCC, 0xA7, + 0xA5, 0x03, 0x74, 0xCC, 0x87, 0xC9, 0x03, 0x74, + 0xCC, 0x88, 0xC9, 0x03, 0x74, 0xCC, 0x8C, 0xC9, + 0x03, 0x74, 0xCC, 0xA3, 0xB5, 0x03, 0x74, 0xCC, + 0xA6, 0xB5, 0x03, 0x74, 0xCC, 0xA7, 0xA5, 0x03, + // Bytes 34c0 - 34ff + 0x74, 0xCC, 0xAD, 0xB5, 0x03, 0x74, 0xCC, 0xB1, + 0xB5, 0x03, 0x75, 0xCC, 0x80, 0xC9, 0x03, 0x75, + 0xCC, 0x81, 0xC9, 0x03, 0x75, 0xCC, 0x82, 0xC9, + 0x03, 0x75, 0xCC, 0x86, 0xC9, 0x03, 0x75, 0xCC, + 0x89, 0xC9, 0x03, 0x75, 0xCC, 0x8A, 0xC9, 0x03, + 0x75, 0xCC, 0x8B, 0xC9, 0x03, 0x75, 0xCC, 0x8C, + 0xC9, 0x03, 0x75, 0xCC, 0x8F, 0xC9, 0x03, 0x75, + 0xCC, 0x91, 0xC9, 0x03, 0x75, 0xCC, 0xA3, 0xB5, + // Bytes 3500 - 353f + 0x03, 0x75, 0xCC, 0xA4, 0xB5, 0x03, 0x75, 0xCC, + 0xA8, 0xA5, 0x03, 0x75, 0xCC, 0xAD, 0xB5, 0x03, + 0x75, 0xCC, 0xB0, 0xB5, 0x03, 0x76, 0xCC, 0x83, + 0xC9, 0x03, 0x76, 0xCC, 0xA3, 0xB5, 0x03, 0x77, + 0xCC, 0x80, 0xC9, 0x03, 0x77, 0xCC, 0x81, 0xC9, + 0x03, 0x77, 0xCC, 0x82, 0xC9, 0x03, 0x77, 0xCC, + 0x87, 0xC9, 0x03, 0x77, 0xCC, 0x88, 0xC9, 0x03, + 0x77, 0xCC, 0x8A, 0xC9, 0x03, 0x77, 0xCC, 0xA3, + // Bytes 3540 - 357f + 0xB5, 0x03, 0x78, 0xCC, 0x87, 0xC9, 0x03, 0x78, + 0xCC, 0x88, 0xC9, 0x03, 0x79, 0xCC, 0x80, 0xC9, + 0x03, 0x79, 0xCC, 0x81, 0xC9, 0x03, 0x79, 0xCC, + 0x82, 0xC9, 0x03, 0x79, 0xCC, 0x83, 0xC9, 0x03, + 0x79, 0xCC, 0x84, 0xC9, 0x03, 0x79, 0xCC, 0x87, + 0xC9, 0x03, 0x79, 0xCC, 0x88, 0xC9, 0x03, 0x79, + 0xCC, 0x89, 0xC9, 0x03, 0x79, 0xCC, 0x8A, 0xC9, + 0x03, 0x79, 0xCC, 0xA3, 0xB5, 0x03, 0x7A, 0xCC, + // Bytes 3580 - 35bf + 0x81, 0xC9, 0x03, 0x7A, 0xCC, 0x82, 0xC9, 0x03, + 0x7A, 0xCC, 0x87, 0xC9, 0x03, 0x7A, 0xCC, 0x8C, + 0xC9, 0x03, 0x7A, 0xCC, 0xA3, 0xB5, 0x03, 0x7A, + 0xCC, 0xB1, 0xB5, 0x04, 0xC2, 0xA8, 0xCC, 0x80, + 0xCA, 0x04, 0xC2, 0xA8, 0xCC, 0x81, 0xCA, 0x04, + 0xC2, 0xA8, 0xCD, 0x82, 0xCA, 0x04, 0xC3, 0x86, + 0xCC, 0x81, 0xC9, 0x04, 0xC3, 0x86, 0xCC, 0x84, + 0xC9, 0x04, 0xC3, 0x98, 0xCC, 0x81, 0xC9, 0x04, + // Bytes 35c0 - 35ff + 0xC3, 0xA6, 0xCC, 0x81, 0xC9, 0x04, 0xC3, 0xA6, + 0xCC, 0x84, 0xC9, 0x04, 0xC3, 0xB8, 0xCC, 0x81, + 0xC9, 0x04, 0xC5, 0xBF, 0xCC, 0x87, 0xC9, 0x04, + 0xC6, 0xB7, 0xCC, 0x8C, 0xC9, 0x04, 0xCA, 0x92, + 0xCC, 0x8C, 0xC9, 0x04, 0xCE, 0x91, 0xCC, 0x80, + 0xC9, 0x04, 0xCE, 0x91, 0xCC, 0x81, 0xC9, 0x04, + 0xCE, 0x91, 0xCC, 0x84, 0xC9, 0x04, 0xCE, 0x91, + 0xCC, 0x86, 0xC9, 0x04, 0xCE, 0x91, 0xCD, 0x85, + // Bytes 3600 - 363f + 0xD9, 0x04, 0xCE, 0x95, 0xCC, 0x80, 0xC9, 0x04, + 0xCE, 0x95, 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0x97, + 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0x97, 0xCC, 0x81, + 0xC9, 0x04, 0xCE, 0x97, 0xCD, 0x85, 0xD9, 0x04, + 0xCE, 0x99, 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0x99, + 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0x99, 0xCC, 0x84, + 0xC9, 0x04, 0xCE, 0x99, 0xCC, 0x86, 0xC9, 0x04, + 0xCE, 0x99, 0xCC, 0x88, 0xC9, 0x04, 0xCE, 0x9F, + // Bytes 3640 - 367f + 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0x9F, 0xCC, 0x81, + 0xC9, 0x04, 0xCE, 0xA1, 0xCC, 0x94, 0xC9, 0x04, + 0xCE, 0xA5, 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0xA5, + 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0xA5, 0xCC, 0x84, + 0xC9, 0x04, 0xCE, 0xA5, 0xCC, 0x86, 0xC9, 0x04, + 0xCE, 0xA5, 0xCC, 0x88, 0xC9, 0x04, 0xCE, 0xA9, + 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0xA9, 0xCC, 0x81, + 0xC9, 0x04, 0xCE, 0xA9, 0xCD, 0x85, 0xD9, 0x04, + // Bytes 3680 - 36bf + 0xCE, 0xB1, 0xCC, 0x84, 0xC9, 0x04, 0xCE, 0xB1, + 0xCC, 0x86, 0xC9, 0x04, 0xCE, 0xB1, 0xCD, 0x85, + 0xD9, 0x04, 0xCE, 0xB5, 0xCC, 0x80, 0xC9, 0x04, + 0xCE, 0xB5, 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0xB7, + 0xCD, 0x85, 0xD9, 0x04, 0xCE, 0xB9, 0xCC, 0x80, + 0xC9, 0x04, 0xCE, 0xB9, 0xCC, 0x81, 0xC9, 0x04, + 0xCE, 0xB9, 0xCC, 0x84, 0xC9, 0x04, 0xCE, 0xB9, + 0xCC, 0x86, 0xC9, 0x04, 0xCE, 0xB9, 0xCD, 0x82, + // Bytes 36c0 - 36ff + 0xC9, 0x04, 0xCE, 0xBF, 0xCC, 0x80, 0xC9, 0x04, + 0xCE, 0xBF, 0xCC, 0x81, 0xC9, 0x04, 0xCF, 0x81, + 0xCC, 0x93, 0xC9, 0x04, 0xCF, 0x81, 0xCC, 0x94, + 0xC9, 0x04, 0xCF, 0x85, 0xCC, 0x80, 0xC9, 0x04, + 0xCF, 0x85, 0xCC, 0x81, 0xC9, 0x04, 0xCF, 0x85, + 0xCC, 0x84, 0xC9, 0x04, 0xCF, 0x85, 0xCC, 0x86, + 0xC9, 0x04, 0xCF, 0x85, 0xCD, 0x82, 0xC9, 0x04, + 0xCF, 0x89, 0xCD, 0x85, 0xD9, 0x04, 0xCF, 0x92, + // Bytes 3700 - 373f + 0xCC, 0x81, 0xC9, 0x04, 0xCF, 0x92, 0xCC, 0x88, + 0xC9, 0x04, 0xD0, 0x86, 0xCC, 0x88, 0xC9, 0x04, + 0xD0, 0x90, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0x90, + 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0x93, 0xCC, 0x81, + 0xC9, 0x04, 0xD0, 0x95, 0xCC, 0x80, 0xC9, 0x04, + 0xD0, 0x95, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0x95, + 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0x96, 0xCC, 0x86, + 0xC9, 0x04, 0xD0, 0x96, 0xCC, 0x88, 0xC9, 0x04, + // Bytes 3740 - 377f + 0xD0, 0x97, 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0x98, + 0xCC, 0x80, 0xC9, 0x04, 0xD0, 0x98, 0xCC, 0x84, + 0xC9, 0x04, 0xD0, 0x98, 0xCC, 0x86, 0xC9, 0x04, + 0xD0, 0x98, 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0x9A, + 0xCC, 0x81, 0xC9, 0x04, 0xD0, 0x9E, 0xCC, 0x88, + 0xC9, 0x04, 0xD0, 0xA3, 0xCC, 0x84, 0xC9, 0x04, + 0xD0, 0xA3, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0xA3, + 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xA3, 0xCC, 0x8B, + // Bytes 3780 - 37bf + 0xC9, 0x04, 0xD0, 0xA7, 0xCC, 0x88, 0xC9, 0x04, + 0xD0, 0xAB, 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xAD, + 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xB0, 0xCC, 0x86, + 0xC9, 0x04, 0xD0, 0xB0, 0xCC, 0x88, 0xC9, 0x04, + 0xD0, 0xB3, 0xCC, 0x81, 0xC9, 0x04, 0xD0, 0xB5, + 0xCC, 0x80, 0xC9, 0x04, 0xD0, 0xB5, 0xCC, 0x86, + 0xC9, 0x04, 0xD0, 0xB5, 0xCC, 0x88, 0xC9, 0x04, + 0xD0, 0xB6, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0xB6, + // Bytes 37c0 - 37ff + 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xB7, 0xCC, 0x88, + 0xC9, 0x04, 0xD0, 0xB8, 0xCC, 0x80, 0xC9, 0x04, + 0xD0, 0xB8, 0xCC, 0x84, 0xC9, 0x04, 0xD0, 0xB8, + 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0xB8, 0xCC, 0x88, + 0xC9, 0x04, 0xD0, 0xBA, 0xCC, 0x81, 0xC9, 0x04, + 0xD0, 0xBE, 0xCC, 0x88, 0xC9, 0x04, 0xD1, 0x83, + 0xCC, 0x84, 0xC9, 0x04, 0xD1, 0x83, 0xCC, 0x86, + 0xC9, 0x04, 0xD1, 0x83, 0xCC, 0x88, 0xC9, 0x04, + // Bytes 3800 - 383f + 0xD1, 0x83, 0xCC, 0x8B, 0xC9, 0x04, 0xD1, 0x87, + 0xCC, 0x88, 0xC9, 0x04, 0xD1, 0x8B, 0xCC, 0x88, + 0xC9, 0x04, 0xD1, 0x8D, 0xCC, 0x88, 0xC9, 0x04, + 0xD1, 0x96, 0xCC, 0x88, 0xC9, 0x04, 0xD1, 0xB4, + 0xCC, 0x8F, 0xC9, 0x04, 0xD1, 0xB5, 0xCC, 0x8F, + 0xC9, 0x04, 0xD3, 0x98, 0xCC, 0x88, 0xC9, 0x04, + 0xD3, 0x99, 0xCC, 0x88, 0xC9, 0x04, 0xD3, 0xA8, + 0xCC, 0x88, 0xC9, 0x04, 0xD3, 0xA9, 0xCC, 0x88, + // Bytes 3840 - 387f + 0xC9, 0x04, 0xD8, 0xA7, 0xD9, 0x93, 0xC9, 0x04, + 0xD8, 0xA7, 0xD9, 0x94, 0xC9, 0x04, 0xD8, 0xA7, + 0xD9, 0x95, 0xB5, 0x04, 0xD9, 0x88, 0xD9, 0x94, + 0xC9, 0x04, 0xD9, 0x8A, 0xD9, 0x94, 0xC9, 0x04, + 0xDB, 0x81, 0xD9, 0x94, 0xC9, 0x04, 0xDB, 0x92, + 0xD9, 0x94, 0xC9, 0x04, 0xDB, 0x95, 0xD9, 0x94, + 0xC9, 0x05, 0x41, 0xCC, 0x82, 0xCC, 0x80, 0xCA, + 0x05, 0x41, 0xCC, 0x82, 0xCC, 0x81, 0xCA, 0x05, + // Bytes 3880 - 38bf + 0x41, 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, 0x41, + 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x41, 0xCC, + 0x86, 0xCC, 0x80, 0xCA, 0x05, 0x41, 0xCC, 0x86, + 0xCC, 0x81, 0xCA, 0x05, 0x41, 0xCC, 0x86, 0xCC, + 0x83, 0xCA, 0x05, 0x41, 0xCC, 0x86, 0xCC, 0x89, + 0xCA, 0x05, 0x41, 0xCC, 0x87, 0xCC, 0x84, 0xCA, + 0x05, 0x41, 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, + 0x41, 0xCC, 0x8A, 0xCC, 0x81, 0xCA, 0x05, 0x41, + // Bytes 38c0 - 38ff + 0xCC, 0xA3, 0xCC, 0x82, 0xCA, 0x05, 0x41, 0xCC, + 0xA3, 0xCC, 0x86, 0xCA, 0x05, 0x43, 0xCC, 0xA7, + 0xCC, 0x81, 0xCA, 0x05, 0x45, 0xCC, 0x82, 0xCC, + 0x80, 0xCA, 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x81, + 0xCA, 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x83, 0xCA, + 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05, + 0x45, 0xCC, 0x84, 0xCC, 0x80, 0xCA, 0x05, 0x45, + 0xCC, 0x84, 0xCC, 0x81, 0xCA, 0x05, 0x45, 0xCC, + // Bytes 3900 - 393f + 0xA3, 0xCC, 0x82, 0xCA, 0x05, 0x45, 0xCC, 0xA7, + 0xCC, 0x86, 0xCA, 0x05, 0x49, 0xCC, 0x88, 0xCC, + 0x81, 0xCA, 0x05, 0x4C, 0xCC, 0xA3, 0xCC, 0x84, + 0xCA, 0x05, 0x4F, 0xCC, 0x82, 0xCC, 0x80, 0xCA, + 0x05, 0x4F, 0xCC, 0x82, 0xCC, 0x81, 0xCA, 0x05, + 0x4F, 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, 0x4F, + 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x4F, 0xCC, + 0x83, 0xCC, 0x81, 0xCA, 0x05, 0x4F, 0xCC, 0x83, + // Bytes 3940 - 397f + 0xCC, 0x84, 0xCA, 0x05, 0x4F, 0xCC, 0x83, 0xCC, + 0x88, 0xCA, 0x05, 0x4F, 0xCC, 0x84, 0xCC, 0x80, + 0xCA, 0x05, 0x4F, 0xCC, 0x84, 0xCC, 0x81, 0xCA, + 0x05, 0x4F, 0xCC, 0x87, 0xCC, 0x84, 0xCA, 0x05, + 0x4F, 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x4F, + 0xCC, 0x9B, 0xCC, 0x80, 0xCA, 0x05, 0x4F, 0xCC, + 0x9B, 0xCC, 0x81, 0xCA, 0x05, 0x4F, 0xCC, 0x9B, + 0xCC, 0x83, 0xCA, 0x05, 0x4F, 0xCC, 0x9B, 0xCC, + // Bytes 3980 - 39bf + 0x89, 0xCA, 0x05, 0x4F, 0xCC, 0x9B, 0xCC, 0xA3, + 0xB6, 0x05, 0x4F, 0xCC, 0xA3, 0xCC, 0x82, 0xCA, + 0x05, 0x4F, 0xCC, 0xA8, 0xCC, 0x84, 0xCA, 0x05, + 0x52, 0xCC, 0xA3, 0xCC, 0x84, 0xCA, 0x05, 0x53, + 0xCC, 0x81, 0xCC, 0x87, 0xCA, 0x05, 0x53, 0xCC, + 0x8C, 0xCC, 0x87, 0xCA, 0x05, 0x53, 0xCC, 0xA3, + 0xCC, 0x87, 0xCA, 0x05, 0x55, 0xCC, 0x83, 0xCC, + 0x81, 0xCA, 0x05, 0x55, 0xCC, 0x84, 0xCC, 0x88, + // Bytes 39c0 - 39ff + 0xCA, 0x05, 0x55, 0xCC, 0x88, 0xCC, 0x80, 0xCA, + 0x05, 0x55, 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x05, + 0x55, 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x55, + 0xCC, 0x88, 0xCC, 0x8C, 0xCA, 0x05, 0x55, 0xCC, + 0x9B, 0xCC, 0x80, 0xCA, 0x05, 0x55, 0xCC, 0x9B, + 0xCC, 0x81, 0xCA, 0x05, 0x55, 0xCC, 0x9B, 0xCC, + 0x83, 0xCA, 0x05, 0x55, 0xCC, 0x9B, 0xCC, 0x89, + 0xCA, 0x05, 0x55, 0xCC, 0x9B, 0xCC, 0xA3, 0xB6, + // Bytes 3a00 - 3a3f + 0x05, 0x61, 0xCC, 0x82, 0xCC, 0x80, 0xCA, 0x05, + 0x61, 0xCC, 0x82, 0xCC, 0x81, 0xCA, 0x05, 0x61, + 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, 0x61, 0xCC, + 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x61, 0xCC, 0x86, + 0xCC, 0x80, 0xCA, 0x05, 0x61, 0xCC, 0x86, 0xCC, + 0x81, 0xCA, 0x05, 0x61, 0xCC, 0x86, 0xCC, 0x83, + 0xCA, 0x05, 0x61, 0xCC, 0x86, 0xCC, 0x89, 0xCA, + 0x05, 0x61, 0xCC, 0x87, 0xCC, 0x84, 0xCA, 0x05, + // Bytes 3a40 - 3a7f + 0x61, 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x61, + 0xCC, 0x8A, 0xCC, 0x81, 0xCA, 0x05, 0x61, 0xCC, + 0xA3, 0xCC, 0x82, 0xCA, 0x05, 0x61, 0xCC, 0xA3, + 0xCC, 0x86, 0xCA, 0x05, 0x63, 0xCC, 0xA7, 0xCC, + 0x81, 0xCA, 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x80, + 0xCA, 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x81, 0xCA, + 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, + 0x65, 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x65, + // Bytes 3a80 - 3abf + 0xCC, 0x84, 0xCC, 0x80, 0xCA, 0x05, 0x65, 0xCC, + 0x84, 0xCC, 0x81, 0xCA, 0x05, 0x65, 0xCC, 0xA3, + 0xCC, 0x82, 0xCA, 0x05, 0x65, 0xCC, 0xA7, 0xCC, + 0x86, 0xCA, 0x05, 0x69, 0xCC, 0x88, 0xCC, 0x81, + 0xCA, 0x05, 0x6C, 0xCC, 0xA3, 0xCC, 0x84, 0xCA, + 0x05, 0x6F, 0xCC, 0x82, 0xCC, 0x80, 0xCA, 0x05, + 0x6F, 0xCC, 0x82, 0xCC, 0x81, 0xCA, 0x05, 0x6F, + 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, 0x6F, 0xCC, + // Bytes 3ac0 - 3aff + 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x6F, 0xCC, 0x83, + 0xCC, 0x81, 0xCA, 0x05, 0x6F, 0xCC, 0x83, 0xCC, + 0x84, 0xCA, 0x05, 0x6F, 0xCC, 0x83, 0xCC, 0x88, + 0xCA, 0x05, 0x6F, 0xCC, 0x84, 0xCC, 0x80, 0xCA, + 0x05, 0x6F, 0xCC, 0x84, 0xCC, 0x81, 0xCA, 0x05, + 0x6F, 0xCC, 0x87, 0xCC, 0x84, 0xCA, 0x05, 0x6F, + 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x6F, 0xCC, + 0x9B, 0xCC, 0x80, 0xCA, 0x05, 0x6F, 0xCC, 0x9B, + // Bytes 3b00 - 3b3f + 0xCC, 0x81, 0xCA, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, + 0x83, 0xCA, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, 0x89, + 0xCA, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, 0xA3, 0xB6, + 0x05, 0x6F, 0xCC, 0xA3, 0xCC, 0x82, 0xCA, 0x05, + 0x6F, 0xCC, 0xA8, 0xCC, 0x84, 0xCA, 0x05, 0x72, + 0xCC, 0xA3, 0xCC, 0x84, 0xCA, 0x05, 0x73, 0xCC, + 0x81, 0xCC, 0x87, 0xCA, 0x05, 0x73, 0xCC, 0x8C, + 0xCC, 0x87, 0xCA, 0x05, 0x73, 0xCC, 0xA3, 0xCC, + // Bytes 3b40 - 3b7f + 0x87, 0xCA, 0x05, 0x75, 0xCC, 0x83, 0xCC, 0x81, + 0xCA, 0x05, 0x75, 0xCC, 0x84, 0xCC, 0x88, 0xCA, + 0x05, 0x75, 0xCC, 0x88, 0xCC, 0x80, 0xCA, 0x05, + 0x75, 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x05, 0x75, + 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x75, 0xCC, + 0x88, 0xCC, 0x8C, 0xCA, 0x05, 0x75, 0xCC, 0x9B, + 0xCC, 0x80, 0xCA, 0x05, 0x75, 0xCC, 0x9B, 0xCC, + 0x81, 0xCA, 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0x83, + // Bytes 3b80 - 3bbf + 0xCA, 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0x89, 0xCA, + 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0xA3, 0xB6, 0x05, + 0xE1, 0xBE, 0xBF, 0xCC, 0x80, 0xCA, 0x05, 0xE1, + 0xBE, 0xBF, 0xCC, 0x81, 0xCA, 0x05, 0xE1, 0xBE, + 0xBF, 0xCD, 0x82, 0xCA, 0x05, 0xE1, 0xBF, 0xBE, + 0xCC, 0x80, 0xCA, 0x05, 0xE1, 0xBF, 0xBE, 0xCC, + 0x81, 0xCA, 0x05, 0xE1, 0xBF, 0xBE, 0xCD, 0x82, + 0xCA, 0x05, 0xE2, 0x86, 0x90, 0xCC, 0xB8, 0x05, + // Bytes 3bc0 - 3bff + 0x05, 0xE2, 0x86, 0x92, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x86, 0x94, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x87, 0x90, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x87, + 0x92, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x87, 0x94, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x88, 0x83, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x88, 0x88, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x88, 0x8B, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x88, 0xA3, 0xCC, 0xB8, 0x05, 0x05, + // Bytes 3c00 - 3c3f + 0xE2, 0x88, 0xA5, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x88, 0xBC, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, + 0x83, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x85, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x88, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x8D, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x89, 0xA1, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x89, 0xA4, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x89, 0xA5, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + // Bytes 3c40 - 3c7f + 0x89, 0xB2, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, + 0xB3, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xB6, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xB7, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xBA, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x89, 0xBB, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x89, 0xBC, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x89, 0xBD, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x8A, 0x82, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, + // Bytes 3c80 - 3cbf + 0x83, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x86, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x87, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x91, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x8A, 0x92, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x8A, 0xA2, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x8A, 0xA8, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x8A, 0xA9, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, + 0xAB, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB2, + // Bytes 3cc0 - 3cff + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB3, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB4, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x8A, 0xB5, 0xCC, 0xB8, 0x05, + 0x06, 0xCE, 0x91, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0x91, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0x95, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0x95, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0x95, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + // Bytes 3d00 - 3d3f + 0x06, 0xCE, 0x95, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0x97, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0x99, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0x99, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0x99, 0xCC, 0x93, 0xCD, 0x82, 0xCA, + 0x06, 0xCE, 0x99, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0x99, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + // Bytes 3d40 - 3d7f + 0x06, 0xCE, 0x99, 0xCC, 0x94, 0xCD, 0x82, 0xCA, + 0x06, 0xCE, 0x9F, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0x9F, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0x9F, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0x9F, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xA5, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xA5, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xA5, 0xCC, 0x94, 0xCD, 0x82, 0xCA, + // Bytes 3d80 - 3dbf + 0x06, 0xCE, 0xA9, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xA9, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB1, 0xCC, 0x80, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB1, 0xCC, 0x81, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB1, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB1, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB1, 0xCD, 0x82, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB5, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + // Bytes 3dc0 - 3dff + 0x06, 0xCE, 0xB5, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xB5, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xB5, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xB7, 0xCC, 0x80, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB7, 0xCC, 0x81, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB7, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB7, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB7, 0xCD, 0x82, 0xCD, 0x85, 0xDA, + // Bytes 3e00 - 3e3f + 0x06, 0xCE, 0xB9, 0xCC, 0x88, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x88, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x88, 0xCD, 0x82, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x93, 0xCD, 0x82, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + // Bytes 3e40 - 3e7f + 0x06, 0xCE, 0xB9, 0xCC, 0x94, 0xCD, 0x82, 0xCA, + 0x06, 0xCE, 0xBF, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xBF, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xBF, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xBF, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x88, 0xCC, 0x80, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x88, 0xCC, 0x81, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x88, 0xCD, 0x82, 0xCA, + // Bytes 3e80 - 3ebf + 0x06, 0xCF, 0x85, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x93, 0xCD, 0x82, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x94, 0xCD, 0x82, 0xCA, + 0x06, 0xCF, 0x89, 0xCC, 0x80, 0xCD, 0x85, 0xDA, + 0x06, 0xCF, 0x89, 0xCC, 0x81, 0xCD, 0x85, 0xDA, + // Bytes 3ec0 - 3eff + 0x06, 0xCF, 0x89, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCF, 0x89, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCF, 0x89, 0xCD, 0x82, 0xCD, 0x85, 0xDA, + 0x06, 0xE0, 0xA4, 0xA8, 0xE0, 0xA4, 0xBC, 0x09, + 0x06, 0xE0, 0xA4, 0xB0, 0xE0, 0xA4, 0xBC, 0x09, + 0x06, 0xE0, 0xA4, 0xB3, 0xE0, 0xA4, 0xBC, 0x09, + 0x06, 0xE0, 0xB1, 0x86, 0xE0, 0xB1, 0x96, 0x85, + 0x06, 0xE0, 0xB7, 0x99, 0xE0, 0xB7, 0x8A, 0x11, + // Bytes 3f00 - 3f3f + 0x06, 0xE3, 0x81, 0x86, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x8B, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x8D, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x8F, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x91, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x93, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x95, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x97, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 3f40 - 3f7f + 0x06, 0xE3, 0x81, 0x99, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x9B, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x9D, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x9F, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xA1, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xA4, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xA6, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xA8, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 3f80 - 3fbf + 0x06, 0xE3, 0x81, 0xAF, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xAF, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x81, 0xB2, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xB2, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x81, 0xB5, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xB5, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x81, 0xB8, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xB8, 0xE3, 0x82, 0x9A, 0x0D, + // Bytes 3fc0 - 3fff + 0x06, 0xE3, 0x81, 0xBB, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xBB, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x82, 0x9D, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xA6, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xAD, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xB1, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 4000 - 403f + 0x06, 0xE3, 0x82, 0xB3, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xB5, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xB7, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xB9, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xBB, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xBD, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xBF, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x81, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 4040 - 407f + 0x06, 0xE3, 0x83, 0x84, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x86, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 4080 - 40bf + 0x06, 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x83, 0xAF, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0xB0, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0xB1, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 40c0 - 40ff + 0x06, 0xE3, 0x83, 0xB2, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0xBD, 0xE3, 0x82, 0x99, 0x0D, + 0x08, 0xCE, 0x91, 0xCC, 0x93, 0xCC, 0x80, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0x91, 0xCC, 0x93, 0xCC, + 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x91, 0xCC, + 0x93, 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0x91, 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0x91, 0xCC, 0x94, 0xCC, 0x81, 0xCD, + // Bytes 4100 - 413f + 0x85, 0xDB, 0x08, 0xCE, 0x91, 0xCC, 0x94, 0xCD, + 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x97, 0xCC, + 0x93, 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0x97, 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x82, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0x97, 0xCC, 0x94, 0xCC, + 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x97, 0xCC, + 0x94, 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + // Bytes 4140 - 417f + 0x97, 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0xA9, 0xCC, 0x93, 0xCC, 0x80, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0xA9, 0xCC, 0x93, 0xCC, + 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xA9, 0xCC, + 0x93, 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0xA9, 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0xA9, 0xCC, 0x94, 0xCC, 0x81, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0xA9, 0xCC, 0x94, 0xCD, + // Bytes 4180 - 41bf + 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB1, 0xCC, + 0x93, 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0xB1, 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0xB1, 0xCC, 0x93, 0xCD, 0x82, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0xB1, 0xCC, 0x94, 0xCC, + 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB1, 0xCC, + 0x94, 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0xB1, 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDB, + // Bytes 41c0 - 41ff + 0x08, 0xCE, 0xB7, 0xCC, 0x93, 0xCC, 0x80, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0xB7, 0xCC, 0x93, 0xCC, + 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB7, 0xCC, + 0x93, 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0xB7, 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0xB7, 0xCC, 0x94, 0xCC, 0x81, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0xB7, 0xCC, 0x94, 0xCD, + 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCF, 0x89, 0xCC, + // Bytes 4200 - 423f + 0x93, 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCF, + 0x89, 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDB, + 0x08, 0xCF, 0x89, 0xCC, 0x93, 0xCD, 0x82, 0xCD, + 0x85, 0xDB, 0x08, 0xCF, 0x89, 0xCC, 0x94, 0xCC, + 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCF, 0x89, 0xCC, + 0x94, 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCF, + 0x89, 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDB, + 0x08, 0xF0, 0x91, 0x82, 0x99, 0xF0, 0x91, 0x82, + // Bytes 4240 - 427f + 0xBA, 0x09, 0x08, 0xF0, 0x91, 0x82, 0x9B, 0xF0, + 0x91, 0x82, 0xBA, 0x09, 0x08, 0xF0, 0x91, 0x82, + 0xA5, 0xF0, 0x91, 0x82, 0xBA, 0x09, 0x42, 0xC2, + 0xB4, 0x01, 0x43, 0x20, 0xCC, 0x81, 0xC9, 0x43, + 0x20, 0xCC, 0x83, 0xC9, 0x43, 0x20, 0xCC, 0x84, + 0xC9, 0x43, 0x20, 0xCC, 0x85, 0xC9, 0x43, 0x20, + 0xCC, 0x86, 0xC9, 0x43, 0x20, 0xCC, 0x87, 0xC9, + 0x43, 0x20, 0xCC, 0x88, 0xC9, 0x43, 0x20, 0xCC, + // Bytes 4280 - 42bf + 0x8A, 0xC9, 0x43, 0x20, 0xCC, 0x8B, 0xC9, 0x43, + 0x20, 0xCC, 0x93, 0xC9, 0x43, 0x20, 0xCC, 0x94, + 0xC9, 0x43, 0x20, 0xCC, 0xA7, 0xA5, 0x43, 0x20, + 0xCC, 0xA8, 0xA5, 0x43, 0x20, 0xCC, 0xB3, 0xB5, + 0x43, 0x20, 0xCD, 0x82, 0xC9, 0x43, 0x20, 0xCD, + 0x85, 0xD9, 0x43, 0x20, 0xD9, 0x8B, 0x59, 0x43, + 0x20, 0xD9, 0x8C, 0x5D, 0x43, 0x20, 0xD9, 0x8D, + 0x61, 0x43, 0x20, 0xD9, 0x8E, 0x65, 0x43, 0x20, + // Bytes 42c0 - 42ff + 0xD9, 0x8F, 0x69, 0x43, 0x20, 0xD9, 0x90, 0x6D, + 0x43, 0x20, 0xD9, 0x91, 0x71, 0x43, 0x20, 0xD9, + 0x92, 0x75, 0x43, 0x41, 0xCC, 0x8A, 0xC9, 0x43, + 0x73, 0xCC, 0x87, 0xC9, 0x44, 0x20, 0xE3, 0x82, + 0x99, 0x0D, 0x44, 0x20, 0xE3, 0x82, 0x9A, 0x0D, + 0x44, 0xC2, 0xA8, 0xCC, 0x81, 0xCA, 0x44, 0xCE, + 0x91, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0x95, 0xCC, + 0x81, 0xC9, 0x44, 0xCE, 0x97, 0xCC, 0x81, 0xC9, + // Bytes 4300 - 433f + 0x44, 0xCE, 0x99, 0xCC, 0x81, 0xC9, 0x44, 0xCE, + 0x9F, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0xA5, 0xCC, + 0x81, 0xC9, 0x44, 0xCE, 0xA5, 0xCC, 0x88, 0xC9, + 0x44, 0xCE, 0xA9, 0xCC, 0x81, 0xC9, 0x44, 0xCE, + 0xB1, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0xB5, 0xCC, + 0x81, 0xC9, 0x44, 0xCE, 0xB7, 0xCC, 0x81, 0xC9, + 0x44, 0xCE, 0xB9, 0xCC, 0x81, 0xC9, 0x44, 0xCE, + 0xBF, 0xCC, 0x81, 0xC9, 0x44, 0xCF, 0x85, 0xCC, + // Bytes 4340 - 437f + 0x81, 0xC9, 0x44, 0xCF, 0x89, 0xCC, 0x81, 0xC9, + 0x44, 0xD7, 0x90, 0xD6, 0xB7, 0x31, 0x44, 0xD7, + 0x90, 0xD6, 0xB8, 0x35, 0x44, 0xD7, 0x90, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0x91, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0x91, 0xD6, 0xBF, 0x49, 0x44, 0xD7, + 0x92, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x93, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0x94, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0x95, 0xD6, 0xB9, 0x39, 0x44, 0xD7, + // Bytes 4380 - 43bf + 0x95, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x96, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0x98, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0x99, 0xD6, 0xB4, 0x25, 0x44, 0xD7, + 0x99, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x9A, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0x9B, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0x9B, 0xD6, 0xBF, 0x49, 0x44, 0xD7, + 0x9C, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x9E, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0xA0, 0xD6, 0xBC, 0x41, + // Bytes 43c0 - 43ff + 0x44, 0xD7, 0xA1, 0xD6, 0xBC, 0x41, 0x44, 0xD7, + 0xA3, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xA4, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0xA4, 0xD6, 0xBF, 0x49, + 0x44, 0xD7, 0xA6, 0xD6, 0xBC, 0x41, 0x44, 0xD7, + 0xA7, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xA8, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0xA9, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0xA9, 0xD7, 0x81, 0x4D, 0x44, 0xD7, + 0xA9, 0xD7, 0x82, 0x51, 0x44, 0xD7, 0xAA, 0xD6, + // Bytes 4400 - 443f + 0xBC, 0x41, 0x44, 0xD7, 0xB2, 0xD6, 0xB7, 0x31, + 0x44, 0xD8, 0xA7, 0xD9, 0x8B, 0x59, 0x44, 0xD8, + 0xA7, 0xD9, 0x93, 0xC9, 0x44, 0xD8, 0xA7, 0xD9, + 0x94, 0xC9, 0x44, 0xD8, 0xA7, 0xD9, 0x95, 0xB5, + 0x44, 0xD8, 0xB0, 0xD9, 0xB0, 0x79, 0x44, 0xD8, + 0xB1, 0xD9, 0xB0, 0x79, 0x44, 0xD9, 0x80, 0xD9, + 0x8B, 0x59, 0x44, 0xD9, 0x80, 0xD9, 0x8E, 0x65, + 0x44, 0xD9, 0x80, 0xD9, 0x8F, 0x69, 0x44, 0xD9, + // Bytes 4440 - 447f + 0x80, 0xD9, 0x90, 0x6D, 0x44, 0xD9, 0x80, 0xD9, + 0x91, 0x71, 0x44, 0xD9, 0x80, 0xD9, 0x92, 0x75, + 0x44, 0xD9, 0x87, 0xD9, 0xB0, 0x79, 0x44, 0xD9, + 0x88, 0xD9, 0x94, 0xC9, 0x44, 0xD9, 0x89, 0xD9, + 0xB0, 0x79, 0x44, 0xD9, 0x8A, 0xD9, 0x94, 0xC9, + 0x44, 0xDB, 0x92, 0xD9, 0x94, 0xC9, 0x44, 0xDB, + 0x95, 0xD9, 0x94, 0xC9, 0x45, 0x20, 0xCC, 0x88, + 0xCC, 0x80, 0xCA, 0x45, 0x20, 0xCC, 0x88, 0xCC, + // Bytes 4480 - 44bf + 0x81, 0xCA, 0x45, 0x20, 0xCC, 0x88, 0xCD, 0x82, + 0xCA, 0x45, 0x20, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x45, 0x20, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x45, + 0x20, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x45, 0x20, + 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x45, 0x20, 0xCC, + 0x94, 0xCC, 0x81, 0xCA, 0x45, 0x20, 0xCC, 0x94, + 0xCD, 0x82, 0xCA, 0x45, 0x20, 0xD9, 0x8C, 0xD9, + 0x91, 0x72, 0x45, 0x20, 0xD9, 0x8D, 0xD9, 0x91, + // Bytes 44c0 - 44ff + 0x72, 0x45, 0x20, 0xD9, 0x8E, 0xD9, 0x91, 0x72, + 0x45, 0x20, 0xD9, 0x8F, 0xD9, 0x91, 0x72, 0x45, + 0x20, 0xD9, 0x90, 0xD9, 0x91, 0x72, 0x45, 0x20, + 0xD9, 0x91, 0xD9, 0xB0, 0x7A, 0x45, 0xE2, 0xAB, + 0x9D, 0xCC, 0xB8, 0x05, 0x46, 0xCE, 0xB9, 0xCC, + 0x88, 0xCC, 0x81, 0xCA, 0x46, 0xCF, 0x85, 0xCC, + 0x88, 0xCC, 0x81, 0xCA, 0x46, 0xD7, 0xA9, 0xD6, + 0xBC, 0xD7, 0x81, 0x4E, 0x46, 0xD7, 0xA9, 0xD6, + // Bytes 4500 - 453f + 0xBC, 0xD7, 0x82, 0x52, 0x46, 0xD9, 0x80, 0xD9, + 0x8E, 0xD9, 0x91, 0x72, 0x46, 0xD9, 0x80, 0xD9, + 0x8F, 0xD9, 0x91, 0x72, 0x46, 0xD9, 0x80, 0xD9, + 0x90, 0xD9, 0x91, 0x72, 0x46, 0xE0, 0xA4, 0x95, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0x96, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0x97, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0x9C, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xA1, + // Bytes 4540 - 457f + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xA2, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xAB, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xAF, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA6, 0xA1, + 0xE0, 0xA6, 0xBC, 0x09, 0x46, 0xE0, 0xA6, 0xA2, + 0xE0, 0xA6, 0xBC, 0x09, 0x46, 0xE0, 0xA6, 0xAF, + 0xE0, 0xA6, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0x96, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0x97, + // Bytes 4580 - 45bf + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0x9C, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0xAB, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0xB2, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0xB8, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xAC, 0xA1, + 0xE0, 0xAC, 0xBC, 0x09, 0x46, 0xE0, 0xAC, 0xA2, + 0xE0, 0xAC, 0xBC, 0x09, 0x46, 0xE0, 0xBE, 0xB2, + 0xE0, 0xBE, 0x80, 0x9D, 0x46, 0xE0, 0xBE, 0xB3, + // Bytes 45c0 - 45ff + 0xE0, 0xBE, 0x80, 0x9D, 0x46, 0xE3, 0x83, 0x86, + 0xE3, 0x82, 0x99, 0x0D, 0x48, 0xF0, 0x9D, 0x85, + 0x97, 0xF0, 0x9D, 0x85, 0xA5, 0xAD, 0x48, 0xF0, + 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xAD, + 0x48, 0xF0, 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85, + 0xA5, 0xAD, 0x48, 0xF0, 0x9D, 0x86, 0xBA, 0xF0, + 0x9D, 0x85, 0xA5, 0xAD, 0x49, 0xE0, 0xBE, 0xB2, + 0xE0, 0xBD, 0xB1, 0xE0, 0xBE, 0x80, 0x9E, 0x49, + // Bytes 4600 - 463f + 0xE0, 0xBE, 0xB3, 0xE0, 0xBD, 0xB1, 0xE0, 0xBE, + 0x80, 0x9E, 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, + 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAE, 0xAE, + 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, + 0xA5, 0xF0, 0x9D, 0x85, 0xAF, 0xAE, 0x4C, 0xF0, + 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, + 0x9D, 0x85, 0xB0, 0xAE, 0x4C, 0xF0, 0x9D, 0x85, + 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, + // Bytes 4640 - 467f + 0xB1, 0xAE, 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, + 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xB2, 0xAE, + 0x4C, 0xF0, 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85, + 0xA5, 0xF0, 0x9D, 0x85, 0xAE, 0xAE, 0x4C, 0xF0, + 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, + 0x9D, 0x85, 0xAF, 0xAE, 0x4C, 0xF0, 0x9D, 0x86, + 0xBA, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, + 0xAE, 0xAE, 0x4C, 0xF0, 0x9D, 0x86, 0xBA, 0xF0, + // Bytes 4680 - 46bf + 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAF, 0xAE, + 0x83, 0x41, 0xCC, 0x82, 0xC9, 0x83, 0x41, 0xCC, + 0x86, 0xC9, 0x83, 0x41, 0xCC, 0x87, 0xC9, 0x83, + 0x41, 0xCC, 0x88, 0xC9, 0x83, 0x41, 0xCC, 0x8A, + 0xC9, 0x83, 0x41, 0xCC, 0xA3, 0xB5, 0x83, 0x43, + 0xCC, 0xA7, 0xA5, 0x83, 0x45, 0xCC, 0x82, 0xC9, + 0x83, 0x45, 0xCC, 0x84, 0xC9, 0x83, 0x45, 0xCC, + 0xA3, 0xB5, 0x83, 0x45, 0xCC, 0xA7, 0xA5, 0x83, + // Bytes 46c0 - 46ff + 0x49, 0xCC, 0x88, 0xC9, 0x83, 0x4C, 0xCC, 0xA3, + 0xB5, 0x83, 0x4F, 0xCC, 0x82, 0xC9, 0x83, 0x4F, + 0xCC, 0x83, 0xC9, 0x83, 0x4F, 0xCC, 0x84, 0xC9, + 0x83, 0x4F, 0xCC, 0x87, 0xC9, 0x83, 0x4F, 0xCC, + 0x88, 0xC9, 0x83, 0x4F, 0xCC, 0x9B, 0xAD, 0x83, + 0x4F, 0xCC, 0xA3, 0xB5, 0x83, 0x4F, 0xCC, 0xA8, + 0xA5, 0x83, 0x52, 0xCC, 0xA3, 0xB5, 0x83, 0x53, + 0xCC, 0x81, 0xC9, 0x83, 0x53, 0xCC, 0x8C, 0xC9, + // Bytes 4700 - 473f + 0x83, 0x53, 0xCC, 0xA3, 0xB5, 0x83, 0x55, 0xCC, + 0x83, 0xC9, 0x83, 0x55, 0xCC, 0x84, 0xC9, 0x83, + 0x55, 0xCC, 0x88, 0xC9, 0x83, 0x55, 0xCC, 0x9B, + 0xAD, 0x83, 0x61, 0xCC, 0x82, 0xC9, 0x83, 0x61, + 0xCC, 0x86, 0xC9, 0x83, 0x61, 0xCC, 0x87, 0xC9, + 0x83, 0x61, 0xCC, 0x88, 0xC9, 0x83, 0x61, 0xCC, + 0x8A, 0xC9, 0x83, 0x61, 0xCC, 0xA3, 0xB5, 0x83, + 0x63, 0xCC, 0xA7, 0xA5, 0x83, 0x65, 0xCC, 0x82, + // Bytes 4740 - 477f + 0xC9, 0x83, 0x65, 0xCC, 0x84, 0xC9, 0x83, 0x65, + 0xCC, 0xA3, 0xB5, 0x83, 0x65, 0xCC, 0xA7, 0xA5, + 0x83, 0x69, 0xCC, 0x88, 0xC9, 0x83, 0x6C, 0xCC, + 0xA3, 0xB5, 0x83, 0x6F, 0xCC, 0x82, 0xC9, 0x83, + 0x6F, 0xCC, 0x83, 0xC9, 0x83, 0x6F, 0xCC, 0x84, + 0xC9, 0x83, 0x6F, 0xCC, 0x87, 0xC9, 0x83, 0x6F, + 0xCC, 0x88, 0xC9, 0x83, 0x6F, 0xCC, 0x9B, 0xAD, + 0x83, 0x6F, 0xCC, 0xA3, 0xB5, 0x83, 0x6F, 0xCC, + // Bytes 4780 - 47bf + 0xA8, 0xA5, 0x83, 0x72, 0xCC, 0xA3, 0xB5, 0x83, + 0x73, 0xCC, 0x81, 0xC9, 0x83, 0x73, 0xCC, 0x8C, + 0xC9, 0x83, 0x73, 0xCC, 0xA3, 0xB5, 0x83, 0x75, + 0xCC, 0x83, 0xC9, 0x83, 0x75, 0xCC, 0x84, 0xC9, + 0x83, 0x75, 0xCC, 0x88, 0xC9, 0x83, 0x75, 0xCC, + 0x9B, 0xAD, 0x84, 0xCE, 0x91, 0xCC, 0x93, 0xC9, + 0x84, 0xCE, 0x91, 0xCC, 0x94, 0xC9, 0x84, 0xCE, + 0x95, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0x95, 0xCC, + // Bytes 47c0 - 47ff + 0x94, 0xC9, 0x84, 0xCE, 0x97, 0xCC, 0x93, 0xC9, + 0x84, 0xCE, 0x97, 0xCC, 0x94, 0xC9, 0x84, 0xCE, + 0x99, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0x99, 0xCC, + 0x94, 0xC9, 0x84, 0xCE, 0x9F, 0xCC, 0x93, 0xC9, + 0x84, 0xCE, 0x9F, 0xCC, 0x94, 0xC9, 0x84, 0xCE, + 0xA5, 0xCC, 0x94, 0xC9, 0x84, 0xCE, 0xA9, 0xCC, + 0x93, 0xC9, 0x84, 0xCE, 0xA9, 0xCC, 0x94, 0xC9, + 0x84, 0xCE, 0xB1, 0xCC, 0x80, 0xC9, 0x84, 0xCE, + // Bytes 4800 - 483f + 0xB1, 0xCC, 0x81, 0xC9, 0x84, 0xCE, 0xB1, 0xCC, + 0x93, 0xC9, 0x84, 0xCE, 0xB1, 0xCC, 0x94, 0xC9, + 0x84, 0xCE, 0xB1, 0xCD, 0x82, 0xC9, 0x84, 0xCE, + 0xB5, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0xB5, 0xCC, + 0x94, 0xC9, 0x84, 0xCE, 0xB7, 0xCC, 0x80, 0xC9, + 0x84, 0xCE, 0xB7, 0xCC, 0x81, 0xC9, 0x84, 0xCE, + 0xB7, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0xB7, 0xCC, + 0x94, 0xC9, 0x84, 0xCE, 0xB7, 0xCD, 0x82, 0xC9, + // Bytes 4840 - 487f + 0x84, 0xCE, 0xB9, 0xCC, 0x88, 0xC9, 0x84, 0xCE, + 0xB9, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0xB9, 0xCC, + 0x94, 0xC9, 0x84, 0xCE, 0xBF, 0xCC, 0x93, 0xC9, + 0x84, 0xCE, 0xBF, 0xCC, 0x94, 0xC9, 0x84, 0xCF, + 0x85, 0xCC, 0x88, 0xC9, 0x84, 0xCF, 0x85, 0xCC, + 0x93, 0xC9, 0x84, 0xCF, 0x85, 0xCC, 0x94, 0xC9, + 0x84, 0xCF, 0x89, 0xCC, 0x80, 0xC9, 0x84, 0xCF, + 0x89, 0xCC, 0x81, 0xC9, 0x84, 0xCF, 0x89, 0xCC, + // Bytes 4880 - 48bf + 0x93, 0xC9, 0x84, 0xCF, 0x89, 0xCC, 0x94, 0xC9, + 0x84, 0xCF, 0x89, 0xCD, 0x82, 0xC9, 0x86, 0xCE, + 0x91, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + // Bytes 48c0 - 48ff + 0x97, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + // Bytes 4900 - 493f + 0xA9, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + // Bytes 4940 - 497f + 0xB1, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCF, + // Bytes 4980 - 49bf + 0x89, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x42, 0xCC, + 0x80, 0xC9, 0x32, 0x42, 0xCC, 0x81, 0xC9, 0x32, + 0x42, 0xCC, 0x93, 0xC9, 0x32, 0x43, 0xE1, 0x85, + // Bytes 49c0 - 49ff + 0xA1, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA2, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xA3, 0x01, 0x00, 0x43, + 0xE1, 0x85, 0xA4, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xA5, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA6, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xA7, 0x01, 0x00, 0x43, + 0xE1, 0x85, 0xA8, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xA9, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAA, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xAB, 0x01, 0x00, 0x43, + // Bytes 4a00 - 4a3f + 0xE1, 0x85, 0xAC, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xAD, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAE, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xAF, 0x01, 0x00, 0x43, + 0xE1, 0x85, 0xB0, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xB1, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xB2, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xB3, 0x01, 0x00, 0x43, + 0xE1, 0x85, 0xB4, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xB5, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xAA, 0x01, + // Bytes 4a40 - 4a7f + 0x00, 0x43, 0xE1, 0x86, 0xAC, 0x01, 0x00, 0x43, + 0xE1, 0x86, 0xAD, 0x01, 0x00, 0x43, 0xE1, 0x86, + 0xB0, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB1, 0x01, + 0x00, 0x43, 0xE1, 0x86, 0xB2, 0x01, 0x00, 0x43, + 0xE1, 0x86, 0xB3, 0x01, 0x00, 0x43, 0xE1, 0x86, + 0xB4, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB5, 0x01, + 0x00, 0x44, 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x32, + 0x43, 0xE3, 0x82, 0x99, 0x0D, 0x03, 0x43, 0xE3, + // Bytes 4a80 - 4abf + 0x82, 0x9A, 0x0D, 0x03, 0x46, 0xE0, 0xBD, 0xB1, + 0xE0, 0xBD, 0xB2, 0x9E, 0x26, 0x46, 0xE0, 0xBD, + 0xB1, 0xE0, 0xBD, 0xB4, 0xA2, 0x26, 0x46, 0xE0, + 0xBD, 0xB1, 0xE0, 0xBE, 0x80, 0x9E, 0x26, 0x00, + 0x01, +} + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfcTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfcTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfcValues[c0] + } + i := nfcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfcTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfcTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfcValues[c0] + } + i := nfcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// nfcTrie. Total size: 10442 bytes (10.20 KiB). Checksum: 4ba400a9d8208e03. +type nfcTrie struct{} + +func newNfcTrie(i int) *nfcTrie { + return &nfcTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *nfcTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 45: + return uint16(nfcValues[n<<6+uint32(b)]) + default: + n -= 45 + return uint16(nfcSparse.lookup(n, b)) + } +} + +// nfcValues: 47 blocks, 3008 entries, 6016 bytes +// The third block is the zero block. +var nfcValues = [3008]uint16{ + // Block 0x0, offset 0x0 + 0x3c: 0xa000, 0x3d: 0xa000, 0x3e: 0xa000, + // Block 0x1, offset 0x40 + 0x41: 0xa000, 0x42: 0xa000, 0x43: 0xa000, 0x44: 0xa000, 0x45: 0xa000, + 0x46: 0xa000, 0x47: 0xa000, 0x48: 0xa000, 0x49: 0xa000, 0x4a: 0xa000, 0x4b: 0xa000, + 0x4c: 0xa000, 0x4d: 0xa000, 0x4e: 0xa000, 0x4f: 0xa000, 0x50: 0xa000, + 0x52: 0xa000, 0x53: 0xa000, 0x54: 0xa000, 0x55: 0xa000, 0x56: 0xa000, 0x57: 0xa000, + 0x58: 0xa000, 0x59: 0xa000, 0x5a: 0xa000, + 0x61: 0xa000, 0x62: 0xa000, 0x63: 0xa000, + 0x64: 0xa000, 0x65: 0xa000, 0x66: 0xa000, 0x67: 0xa000, 0x68: 0xa000, 0x69: 0xa000, + 0x6a: 0xa000, 0x6b: 0xa000, 0x6c: 0xa000, 0x6d: 0xa000, 0x6e: 0xa000, 0x6f: 0xa000, + 0x70: 0xa000, 0x72: 0xa000, 0x73: 0xa000, 0x74: 0xa000, 0x75: 0xa000, + 0x76: 0xa000, 0x77: 0xa000, 0x78: 0xa000, 0x79: 0xa000, 0x7a: 0xa000, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x2f6f, 0xc1: 0x2f74, 0xc2: 0x4688, 0xc3: 0x2f79, 0xc4: 0x4697, 0xc5: 0x469c, + 0xc6: 0xa000, 0xc7: 0x46a6, 0xc8: 0x2fe2, 0xc9: 0x2fe7, 0xca: 0x46ab, 0xcb: 0x2ffb, + 0xcc: 0x306e, 0xcd: 0x3073, 0xce: 0x3078, 0xcf: 0x46bf, 0xd1: 0x3104, + 0xd2: 0x3127, 0xd3: 0x312c, 0xd4: 0x46c9, 0xd5: 0x46ce, 0xd6: 0x46dd, + 0xd8: 0xa000, 0xd9: 0x31b3, 0xda: 0x31b8, 0xdb: 0x31bd, 0xdc: 0x470f, 0xdd: 0x3235, + 0xe0: 0x327b, 0xe1: 0x3280, 0xe2: 0x4719, 0xe3: 0x3285, + 0xe4: 0x4728, 0xe5: 0x472d, 0xe6: 0xa000, 0xe7: 0x4737, 0xe8: 0x32ee, 0xe9: 0x32f3, + 0xea: 0x473c, 0xeb: 0x3307, 0xec: 0x337f, 0xed: 0x3384, 0xee: 0x3389, 0xef: 0x4750, + 0xf1: 0x3415, 0xf2: 0x3438, 0xf3: 0x343d, 0xf4: 0x475a, 0xf5: 0x475f, + 0xf6: 0x476e, 0xf8: 0xa000, 0xf9: 0x34c9, 0xfa: 0x34ce, 0xfb: 0x34d3, + 0xfc: 0x47a0, 0xfd: 0x3550, 0xff: 0x3569, + // Block 0x4, offset 0x100 + 0x100: 0x2f7e, 0x101: 0x328a, 0x102: 0x468d, 0x103: 0x471e, 0x104: 0x2f9c, 0x105: 0x32a8, + 0x106: 0x2fb0, 0x107: 0x32bc, 0x108: 0x2fb5, 0x109: 0x32c1, 0x10a: 0x2fba, 0x10b: 0x32c6, + 0x10c: 0x2fbf, 0x10d: 0x32cb, 0x10e: 0x2fc9, 0x10f: 0x32d5, + 0x112: 0x46b0, 0x113: 0x4741, 0x114: 0x2ff1, 0x115: 0x32fd, 0x116: 0x2ff6, 0x117: 0x3302, + 0x118: 0x3014, 0x119: 0x3320, 0x11a: 0x3005, 0x11b: 0x3311, 0x11c: 0x302d, 0x11d: 0x3339, + 0x11e: 0x3037, 0x11f: 0x3343, 0x120: 0x303c, 0x121: 0x3348, 0x122: 0x3046, 0x123: 0x3352, + 0x124: 0x304b, 0x125: 0x3357, 0x128: 0x307d, 0x129: 0x338e, + 0x12a: 0x3082, 0x12b: 0x3393, 0x12c: 0x3087, 0x12d: 0x3398, 0x12e: 0x30aa, 0x12f: 0x33b6, + 0x130: 0x308c, 0x134: 0x30b4, 0x135: 0x33c0, + 0x136: 0x30c8, 0x137: 0x33d9, 0x139: 0x30d2, 0x13a: 0x33e3, 0x13b: 0x30dc, + 0x13c: 0x33ed, 0x13d: 0x30d7, 0x13e: 0x33e8, + // Block 0x5, offset 0x140 + 0x143: 0x30ff, 0x144: 0x3410, 0x145: 0x3118, + 0x146: 0x3429, 0x147: 0x310e, 0x148: 0x341f, + 0x14c: 0x46d3, 0x14d: 0x4764, 0x14e: 0x3131, 0x14f: 0x3442, 0x150: 0x313b, 0x151: 0x344c, + 0x154: 0x3159, 0x155: 0x346a, 0x156: 0x3172, 0x157: 0x3483, + 0x158: 0x3163, 0x159: 0x3474, 0x15a: 0x46f6, 0x15b: 0x4787, 0x15c: 0x317c, 0x15d: 0x348d, + 0x15e: 0x318b, 0x15f: 0x349c, 0x160: 0x46fb, 0x161: 0x478c, 0x162: 0x31a4, 0x163: 0x34ba, + 0x164: 0x3195, 0x165: 0x34ab, 0x168: 0x4705, 0x169: 0x4796, + 0x16a: 0x470a, 0x16b: 0x479b, 0x16c: 0x31c2, 0x16d: 0x34d8, 0x16e: 0x31cc, 0x16f: 0x34e2, + 0x170: 0x31d1, 0x171: 0x34e7, 0x172: 0x31ef, 0x173: 0x3505, 0x174: 0x3212, 0x175: 0x3528, + 0x176: 0x323a, 0x177: 0x3555, 0x178: 0x324e, 0x179: 0x325d, 0x17a: 0x357d, 0x17b: 0x3267, + 0x17c: 0x3587, 0x17d: 0x326c, 0x17e: 0x358c, 0x17f: 0xa000, + // Block 0x6, offset 0x180 + 0x184: 0x8100, 0x185: 0x8100, + 0x186: 0x8100, + 0x18d: 0x2f88, 0x18e: 0x3294, 0x18f: 0x3096, 0x190: 0x33a2, 0x191: 0x3140, + 0x192: 0x3451, 0x193: 0x31d6, 0x194: 0x34ec, 0x195: 0x39cf, 0x196: 0x3b5e, 0x197: 0x39c8, + 0x198: 0x3b57, 0x199: 0x39d6, 0x19a: 0x3b65, 0x19b: 0x39c1, 0x19c: 0x3b50, + 0x19e: 0x38b0, 0x19f: 0x3a3f, 0x1a0: 0x38a9, 0x1a1: 0x3a38, 0x1a2: 0x35b3, 0x1a3: 0x35c5, + 0x1a6: 0x3041, 0x1a7: 0x334d, 0x1a8: 0x30be, 0x1a9: 0x33cf, + 0x1aa: 0x46ec, 0x1ab: 0x477d, 0x1ac: 0x3990, 0x1ad: 0x3b1f, 0x1ae: 0x35d7, 0x1af: 0x35dd, + 0x1b0: 0x33c5, 0x1b4: 0x3028, 0x1b5: 0x3334, + 0x1b8: 0x30fa, 0x1b9: 0x340b, 0x1ba: 0x38b7, 0x1bb: 0x3a46, + 0x1bc: 0x35ad, 0x1bd: 0x35bf, 0x1be: 0x35b9, 0x1bf: 0x35cb, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x2f8d, 0x1c1: 0x3299, 0x1c2: 0x2f92, 0x1c3: 0x329e, 0x1c4: 0x300a, 0x1c5: 0x3316, + 0x1c6: 0x300f, 0x1c7: 0x331b, 0x1c8: 0x309b, 0x1c9: 0x33a7, 0x1ca: 0x30a0, 0x1cb: 0x33ac, + 0x1cc: 0x3145, 0x1cd: 0x3456, 0x1ce: 0x314a, 0x1cf: 0x345b, 0x1d0: 0x3168, 0x1d1: 0x3479, + 0x1d2: 0x316d, 0x1d3: 0x347e, 0x1d4: 0x31db, 0x1d5: 0x34f1, 0x1d6: 0x31e0, 0x1d7: 0x34f6, + 0x1d8: 0x3186, 0x1d9: 0x3497, 0x1da: 0x319f, 0x1db: 0x34b5, + 0x1de: 0x305a, 0x1df: 0x3366, + 0x1e6: 0x4692, 0x1e7: 0x4723, 0x1e8: 0x46ba, 0x1e9: 0x474b, + 0x1ea: 0x395f, 0x1eb: 0x3aee, 0x1ec: 0x393c, 0x1ed: 0x3acb, 0x1ee: 0x46d8, 0x1ef: 0x4769, + 0x1f0: 0x3958, 0x1f1: 0x3ae7, 0x1f2: 0x3244, 0x1f3: 0x355f, + // Block 0x8, offset 0x200 + 0x200: 0x9932, 0x201: 0x9932, 0x202: 0x9932, 0x203: 0x9932, 0x204: 0x9932, 0x205: 0x8132, + 0x206: 0x9932, 0x207: 0x9932, 0x208: 0x9932, 0x209: 0x9932, 0x20a: 0x9932, 0x20b: 0x9932, + 0x20c: 0x9932, 0x20d: 0x8132, 0x20e: 0x8132, 0x20f: 0x9932, 0x210: 0x8132, 0x211: 0x9932, + 0x212: 0x8132, 0x213: 0x9932, 0x214: 0x9932, 0x215: 0x8133, 0x216: 0x812d, 0x217: 0x812d, + 0x218: 0x812d, 0x219: 0x812d, 0x21a: 0x8133, 0x21b: 0x992b, 0x21c: 0x812d, 0x21d: 0x812d, + 0x21e: 0x812d, 0x21f: 0x812d, 0x220: 0x812d, 0x221: 0x8129, 0x222: 0x8129, 0x223: 0x992d, + 0x224: 0x992d, 0x225: 0x992d, 0x226: 0x992d, 0x227: 0x9929, 0x228: 0x9929, 0x229: 0x812d, + 0x22a: 0x812d, 0x22b: 0x812d, 0x22c: 0x812d, 0x22d: 0x992d, 0x22e: 0x992d, 0x22f: 0x812d, + 0x230: 0x992d, 0x231: 0x992d, 0x232: 0x812d, 0x233: 0x812d, 0x234: 0x8101, 0x235: 0x8101, + 0x236: 0x8101, 0x237: 0x8101, 0x238: 0x9901, 0x239: 0x812d, 0x23a: 0x812d, 0x23b: 0x812d, + 0x23c: 0x812d, 0x23d: 0x8132, 0x23e: 0x8132, 0x23f: 0x8132, + // Block 0x9, offset 0x240 + 0x240: 0x49ae, 0x241: 0x49b3, 0x242: 0x9932, 0x243: 0x49b8, 0x244: 0x4a71, 0x245: 0x9936, + 0x246: 0x8132, 0x247: 0x812d, 0x248: 0x812d, 0x249: 0x812d, 0x24a: 0x8132, 0x24b: 0x8132, + 0x24c: 0x8132, 0x24d: 0x812d, 0x24e: 0x812d, 0x250: 0x8132, 0x251: 0x8132, + 0x252: 0x8132, 0x253: 0x812d, 0x254: 0x812d, 0x255: 0x812d, 0x256: 0x812d, 0x257: 0x8132, + 0x258: 0x8133, 0x259: 0x812d, 0x25a: 0x812d, 0x25b: 0x8132, 0x25c: 0x8134, 0x25d: 0x8135, + 0x25e: 0x8135, 0x25f: 0x8134, 0x260: 0x8135, 0x261: 0x8135, 0x262: 0x8134, 0x263: 0x8132, + 0x264: 0x8132, 0x265: 0x8132, 0x266: 0x8132, 0x267: 0x8132, 0x268: 0x8132, 0x269: 0x8132, + 0x26a: 0x8132, 0x26b: 0x8132, 0x26c: 0x8132, 0x26d: 0x8132, 0x26e: 0x8132, 0x26f: 0x8132, + 0x274: 0x0170, + 0x27a: 0x8100, + 0x27e: 0x0037, + // Block 0xa, offset 0x280 + 0x284: 0x8100, 0x285: 0x35a1, + 0x286: 0x35e9, 0x287: 0x00ce, 0x288: 0x3607, 0x289: 0x3613, 0x28a: 0x3625, + 0x28c: 0x3643, 0x28e: 0x3655, 0x28f: 0x3673, 0x290: 0x3e08, 0x291: 0xa000, + 0x295: 0xa000, 0x297: 0xa000, + 0x299: 0xa000, + 0x29f: 0xa000, 0x2a1: 0xa000, + 0x2a5: 0xa000, 0x2a9: 0xa000, + 0x2aa: 0x3637, 0x2ab: 0x3667, 0x2ac: 0x47fe, 0x2ad: 0x3697, 0x2ae: 0x4828, 0x2af: 0x36a9, + 0x2b0: 0x3e70, 0x2b1: 0xa000, 0x2b5: 0xa000, + 0x2b7: 0xa000, 0x2b9: 0xa000, + 0x2bf: 0xa000, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x3721, 0x2c1: 0x372d, 0x2c3: 0x371b, + 0x2c6: 0xa000, 0x2c7: 0x3709, + 0x2cc: 0x375d, 0x2cd: 0x3745, 0x2ce: 0x376f, 0x2d0: 0xa000, + 0x2d3: 0xa000, 0x2d5: 0xa000, 0x2d6: 0xa000, 0x2d7: 0xa000, + 0x2d8: 0xa000, 0x2d9: 0x3751, 0x2da: 0xa000, + 0x2de: 0xa000, 0x2e3: 0xa000, + 0x2e7: 0xa000, + 0x2eb: 0xa000, 0x2ed: 0xa000, + 0x2f0: 0xa000, 0x2f3: 0xa000, 0x2f5: 0xa000, + 0x2f6: 0xa000, 0x2f7: 0xa000, 0x2f8: 0xa000, 0x2f9: 0x37d5, 0x2fa: 0xa000, + 0x2fe: 0xa000, + // Block 0xc, offset 0x300 + 0x301: 0x3733, 0x302: 0x37b7, + 0x310: 0x370f, 0x311: 0x3793, + 0x312: 0x3715, 0x313: 0x3799, 0x316: 0x3727, 0x317: 0x37ab, + 0x318: 0xa000, 0x319: 0xa000, 0x31a: 0x3829, 0x31b: 0x382f, 0x31c: 0x3739, 0x31d: 0x37bd, + 0x31e: 0x373f, 0x31f: 0x37c3, 0x322: 0x374b, 0x323: 0x37cf, + 0x324: 0x3757, 0x325: 0x37db, 0x326: 0x3763, 0x327: 0x37e7, 0x328: 0xa000, 0x329: 0xa000, + 0x32a: 0x3835, 0x32b: 0x383b, 0x32c: 0x378d, 0x32d: 0x3811, 0x32e: 0x3769, 0x32f: 0x37ed, + 0x330: 0x3775, 0x331: 0x37f9, 0x332: 0x377b, 0x333: 0x37ff, 0x334: 0x3781, 0x335: 0x3805, + 0x338: 0x3787, 0x339: 0x380b, + // Block 0xd, offset 0x340 + 0x351: 0x812d, + 0x352: 0x8132, 0x353: 0x8132, 0x354: 0x8132, 0x355: 0x8132, 0x356: 0x812d, 0x357: 0x8132, + 0x358: 0x8132, 0x359: 0x8132, 0x35a: 0x812e, 0x35b: 0x812d, 0x35c: 0x8132, 0x35d: 0x8132, + 0x35e: 0x8132, 0x35f: 0x8132, 0x360: 0x8132, 0x361: 0x8132, 0x362: 0x812d, 0x363: 0x812d, + 0x364: 0x812d, 0x365: 0x812d, 0x366: 0x812d, 0x367: 0x812d, 0x368: 0x8132, 0x369: 0x8132, + 0x36a: 0x812d, 0x36b: 0x8132, 0x36c: 0x8132, 0x36d: 0x812e, 0x36e: 0x8131, 0x36f: 0x8132, + 0x370: 0x8105, 0x371: 0x8106, 0x372: 0x8107, 0x373: 0x8108, 0x374: 0x8109, 0x375: 0x810a, + 0x376: 0x810b, 0x377: 0x810c, 0x378: 0x810d, 0x379: 0x810e, 0x37a: 0x810e, 0x37b: 0x810f, + 0x37c: 0x8110, 0x37d: 0x8111, 0x37f: 0x8112, + // Block 0xe, offset 0x380 + 0x388: 0xa000, 0x38a: 0xa000, 0x38b: 0x8116, + 0x38c: 0x8117, 0x38d: 0x8118, 0x38e: 0x8119, 0x38f: 0x811a, 0x390: 0x811b, 0x391: 0x811c, + 0x392: 0x811d, 0x393: 0x9932, 0x394: 0x9932, 0x395: 0x992d, 0x396: 0x812d, 0x397: 0x8132, + 0x398: 0x8132, 0x399: 0x8132, 0x39a: 0x8132, 0x39b: 0x8132, 0x39c: 0x812d, 0x39d: 0x8132, + 0x39e: 0x8132, 0x39f: 0x812d, + 0x3b0: 0x811e, + // Block 0xf, offset 0x3c0 + 0x3c5: 0xa000, + 0x3c6: 0x2d26, 0x3c7: 0xa000, 0x3c8: 0x2d2e, 0x3c9: 0xa000, 0x3ca: 0x2d36, 0x3cb: 0xa000, + 0x3cc: 0x2d3e, 0x3cd: 0xa000, 0x3ce: 0x2d46, 0x3d1: 0xa000, + 0x3d2: 0x2d4e, + 0x3f4: 0x8102, 0x3f5: 0x9900, + 0x3fa: 0xa000, 0x3fb: 0x2d56, + 0x3fc: 0xa000, 0x3fd: 0x2d5e, 0x3fe: 0xa000, 0x3ff: 0xa000, + // Block 0x10, offset 0x400 + 0x400: 0x8132, 0x401: 0x8132, 0x402: 0x812d, 0x403: 0x8132, 0x404: 0x8132, 0x405: 0x8132, + 0x406: 0x8132, 0x407: 0x8132, 0x408: 0x8132, 0x409: 0x8132, 0x40a: 0x812d, 0x40b: 0x8132, + 0x40c: 0x8132, 0x40d: 0x8135, 0x40e: 0x812a, 0x40f: 0x812d, 0x410: 0x8129, 0x411: 0x8132, + 0x412: 0x8132, 0x413: 0x8132, 0x414: 0x8132, 0x415: 0x8132, 0x416: 0x8132, 0x417: 0x8132, + 0x418: 0x8132, 0x419: 0x8132, 0x41a: 0x8132, 0x41b: 0x8132, 0x41c: 0x8132, 0x41d: 0x8132, + 0x41e: 0x8132, 0x41f: 0x8132, 0x420: 0x8132, 0x421: 0x8132, 0x422: 0x8132, 0x423: 0x8132, + 0x424: 0x8132, 0x425: 0x8132, 0x426: 0x8132, 0x427: 0x8132, 0x428: 0x8132, 0x429: 0x8132, + 0x42a: 0x8132, 0x42b: 0x8132, 0x42c: 0x8132, 0x42d: 0x8132, 0x42e: 0x8132, 0x42f: 0x8132, + 0x430: 0x8132, 0x431: 0x8132, 0x432: 0x8132, 0x433: 0x8132, 0x434: 0x8132, 0x435: 0x8132, + 0x436: 0x8133, 0x437: 0x8131, 0x438: 0x8131, 0x439: 0x812d, 0x43b: 0x8132, + 0x43c: 0x8134, 0x43d: 0x812d, 0x43e: 0x8132, 0x43f: 0x812d, + // Block 0x11, offset 0x440 + 0x440: 0x2f97, 0x441: 0x32a3, 0x442: 0x2fa1, 0x443: 0x32ad, 0x444: 0x2fa6, 0x445: 0x32b2, + 0x446: 0x2fab, 0x447: 0x32b7, 0x448: 0x38cc, 0x449: 0x3a5b, 0x44a: 0x2fc4, 0x44b: 0x32d0, + 0x44c: 0x2fce, 0x44d: 0x32da, 0x44e: 0x2fdd, 0x44f: 0x32e9, 0x450: 0x2fd3, 0x451: 0x32df, + 0x452: 0x2fd8, 0x453: 0x32e4, 0x454: 0x38ef, 0x455: 0x3a7e, 0x456: 0x38f6, 0x457: 0x3a85, + 0x458: 0x3019, 0x459: 0x3325, 0x45a: 0x301e, 0x45b: 0x332a, 0x45c: 0x3904, 0x45d: 0x3a93, + 0x45e: 0x3023, 0x45f: 0x332f, 0x460: 0x3032, 0x461: 0x333e, 0x462: 0x3050, 0x463: 0x335c, + 0x464: 0x305f, 0x465: 0x336b, 0x466: 0x3055, 0x467: 0x3361, 0x468: 0x3064, 0x469: 0x3370, + 0x46a: 0x3069, 0x46b: 0x3375, 0x46c: 0x30af, 0x46d: 0x33bb, 0x46e: 0x390b, 0x46f: 0x3a9a, + 0x470: 0x30b9, 0x471: 0x33ca, 0x472: 0x30c3, 0x473: 0x33d4, 0x474: 0x30cd, 0x475: 0x33de, + 0x476: 0x46c4, 0x477: 0x4755, 0x478: 0x3912, 0x479: 0x3aa1, 0x47a: 0x30e6, 0x47b: 0x33f7, + 0x47c: 0x30e1, 0x47d: 0x33f2, 0x47e: 0x30eb, 0x47f: 0x33fc, + // Block 0x12, offset 0x480 + 0x480: 0x30f0, 0x481: 0x3401, 0x482: 0x30f5, 0x483: 0x3406, 0x484: 0x3109, 0x485: 0x341a, + 0x486: 0x3113, 0x487: 0x3424, 0x488: 0x3122, 0x489: 0x3433, 0x48a: 0x311d, 0x48b: 0x342e, + 0x48c: 0x3935, 0x48d: 0x3ac4, 0x48e: 0x3943, 0x48f: 0x3ad2, 0x490: 0x394a, 0x491: 0x3ad9, + 0x492: 0x3951, 0x493: 0x3ae0, 0x494: 0x314f, 0x495: 0x3460, 0x496: 0x3154, 0x497: 0x3465, + 0x498: 0x315e, 0x499: 0x346f, 0x49a: 0x46f1, 0x49b: 0x4782, 0x49c: 0x3997, 0x49d: 0x3b26, + 0x49e: 0x3177, 0x49f: 0x3488, 0x4a0: 0x3181, 0x4a1: 0x3492, 0x4a2: 0x4700, 0x4a3: 0x4791, + 0x4a4: 0x399e, 0x4a5: 0x3b2d, 0x4a6: 0x39a5, 0x4a7: 0x3b34, 0x4a8: 0x39ac, 0x4a9: 0x3b3b, + 0x4aa: 0x3190, 0x4ab: 0x34a1, 0x4ac: 0x319a, 0x4ad: 0x34b0, 0x4ae: 0x31ae, 0x4af: 0x34c4, + 0x4b0: 0x31a9, 0x4b1: 0x34bf, 0x4b2: 0x31ea, 0x4b3: 0x3500, 0x4b4: 0x31f9, 0x4b5: 0x350f, + 0x4b6: 0x31f4, 0x4b7: 0x350a, 0x4b8: 0x39b3, 0x4b9: 0x3b42, 0x4ba: 0x39ba, 0x4bb: 0x3b49, + 0x4bc: 0x31fe, 0x4bd: 0x3514, 0x4be: 0x3203, 0x4bf: 0x3519, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x3208, 0x4c1: 0x351e, 0x4c2: 0x320d, 0x4c3: 0x3523, 0x4c4: 0x321c, 0x4c5: 0x3532, + 0x4c6: 0x3217, 0x4c7: 0x352d, 0x4c8: 0x3221, 0x4c9: 0x353c, 0x4ca: 0x3226, 0x4cb: 0x3541, + 0x4cc: 0x322b, 0x4cd: 0x3546, 0x4ce: 0x3249, 0x4cf: 0x3564, 0x4d0: 0x3262, 0x4d1: 0x3582, + 0x4d2: 0x3271, 0x4d3: 0x3591, 0x4d4: 0x3276, 0x4d5: 0x3596, 0x4d6: 0x337a, 0x4d7: 0x34a6, + 0x4d8: 0x3537, 0x4d9: 0x3573, 0x4db: 0x35d1, + 0x4e0: 0x46a1, 0x4e1: 0x4732, 0x4e2: 0x2f83, 0x4e3: 0x328f, + 0x4e4: 0x3878, 0x4e5: 0x3a07, 0x4e6: 0x3871, 0x4e7: 0x3a00, 0x4e8: 0x3886, 0x4e9: 0x3a15, + 0x4ea: 0x387f, 0x4eb: 0x3a0e, 0x4ec: 0x38be, 0x4ed: 0x3a4d, 0x4ee: 0x3894, 0x4ef: 0x3a23, + 0x4f0: 0x388d, 0x4f1: 0x3a1c, 0x4f2: 0x38a2, 0x4f3: 0x3a31, 0x4f4: 0x389b, 0x4f5: 0x3a2a, + 0x4f6: 0x38c5, 0x4f7: 0x3a54, 0x4f8: 0x46b5, 0x4f9: 0x4746, 0x4fa: 0x3000, 0x4fb: 0x330c, + 0x4fc: 0x2fec, 0x4fd: 0x32f8, 0x4fe: 0x38da, 0x4ff: 0x3a69, + // Block 0x14, offset 0x500 + 0x500: 0x38d3, 0x501: 0x3a62, 0x502: 0x38e8, 0x503: 0x3a77, 0x504: 0x38e1, 0x505: 0x3a70, + 0x506: 0x38fd, 0x507: 0x3a8c, 0x508: 0x3091, 0x509: 0x339d, 0x50a: 0x30a5, 0x50b: 0x33b1, + 0x50c: 0x46e7, 0x50d: 0x4778, 0x50e: 0x3136, 0x50f: 0x3447, 0x510: 0x3920, 0x511: 0x3aaf, + 0x512: 0x3919, 0x513: 0x3aa8, 0x514: 0x392e, 0x515: 0x3abd, 0x516: 0x3927, 0x517: 0x3ab6, + 0x518: 0x3989, 0x519: 0x3b18, 0x51a: 0x396d, 0x51b: 0x3afc, 0x51c: 0x3966, 0x51d: 0x3af5, + 0x51e: 0x397b, 0x51f: 0x3b0a, 0x520: 0x3974, 0x521: 0x3b03, 0x522: 0x3982, 0x523: 0x3b11, + 0x524: 0x31e5, 0x525: 0x34fb, 0x526: 0x31c7, 0x527: 0x34dd, 0x528: 0x39e4, 0x529: 0x3b73, + 0x52a: 0x39dd, 0x52b: 0x3b6c, 0x52c: 0x39f2, 0x52d: 0x3b81, 0x52e: 0x39eb, 0x52f: 0x3b7a, + 0x530: 0x39f9, 0x531: 0x3b88, 0x532: 0x3230, 0x533: 0x354b, 0x534: 0x3258, 0x535: 0x3578, + 0x536: 0x3253, 0x537: 0x356e, 0x538: 0x323f, 0x539: 0x355a, + // Block 0x15, offset 0x540 + 0x540: 0x4804, 0x541: 0x480a, 0x542: 0x491e, 0x543: 0x4936, 0x544: 0x4926, 0x545: 0x493e, + 0x546: 0x492e, 0x547: 0x4946, 0x548: 0x47aa, 0x549: 0x47b0, 0x54a: 0x488e, 0x54b: 0x48a6, + 0x54c: 0x4896, 0x54d: 0x48ae, 0x54e: 0x489e, 0x54f: 0x48b6, 0x550: 0x4816, 0x551: 0x481c, + 0x552: 0x3db8, 0x553: 0x3dc8, 0x554: 0x3dc0, 0x555: 0x3dd0, + 0x558: 0x47b6, 0x559: 0x47bc, 0x55a: 0x3ce8, 0x55b: 0x3cf8, 0x55c: 0x3cf0, 0x55d: 0x3d00, + 0x560: 0x482e, 0x561: 0x4834, 0x562: 0x494e, 0x563: 0x4966, + 0x564: 0x4956, 0x565: 0x496e, 0x566: 0x495e, 0x567: 0x4976, 0x568: 0x47c2, 0x569: 0x47c8, + 0x56a: 0x48be, 0x56b: 0x48d6, 0x56c: 0x48c6, 0x56d: 0x48de, 0x56e: 0x48ce, 0x56f: 0x48e6, + 0x570: 0x4846, 0x571: 0x484c, 0x572: 0x3e18, 0x573: 0x3e30, 0x574: 0x3e20, 0x575: 0x3e38, + 0x576: 0x3e28, 0x577: 0x3e40, 0x578: 0x47ce, 0x579: 0x47d4, 0x57a: 0x3d18, 0x57b: 0x3d30, + 0x57c: 0x3d20, 0x57d: 0x3d38, 0x57e: 0x3d28, 0x57f: 0x3d40, + // Block 0x16, offset 0x580 + 0x580: 0x4852, 0x581: 0x4858, 0x582: 0x3e48, 0x583: 0x3e58, 0x584: 0x3e50, 0x585: 0x3e60, + 0x588: 0x47da, 0x589: 0x47e0, 0x58a: 0x3d48, 0x58b: 0x3d58, + 0x58c: 0x3d50, 0x58d: 0x3d60, 0x590: 0x4864, 0x591: 0x486a, + 0x592: 0x3e80, 0x593: 0x3e98, 0x594: 0x3e88, 0x595: 0x3ea0, 0x596: 0x3e90, 0x597: 0x3ea8, + 0x599: 0x47e6, 0x59b: 0x3d68, 0x59d: 0x3d70, + 0x59f: 0x3d78, 0x5a0: 0x487c, 0x5a1: 0x4882, 0x5a2: 0x497e, 0x5a3: 0x4996, + 0x5a4: 0x4986, 0x5a5: 0x499e, 0x5a6: 0x498e, 0x5a7: 0x49a6, 0x5a8: 0x47ec, 0x5a9: 0x47f2, + 0x5aa: 0x48ee, 0x5ab: 0x4906, 0x5ac: 0x48f6, 0x5ad: 0x490e, 0x5ae: 0x48fe, 0x5af: 0x4916, + 0x5b0: 0x47f8, 0x5b1: 0x431e, 0x5b2: 0x3691, 0x5b3: 0x4324, 0x5b4: 0x4822, 0x5b5: 0x432a, + 0x5b6: 0x36a3, 0x5b7: 0x4330, 0x5b8: 0x36c1, 0x5b9: 0x4336, 0x5ba: 0x36d9, 0x5bb: 0x433c, + 0x5bc: 0x4870, 0x5bd: 0x4342, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x3da0, 0x5c1: 0x3da8, 0x5c2: 0x4184, 0x5c3: 0x41a2, 0x5c4: 0x418e, 0x5c5: 0x41ac, + 0x5c6: 0x4198, 0x5c7: 0x41b6, 0x5c8: 0x3cd8, 0x5c9: 0x3ce0, 0x5ca: 0x40d0, 0x5cb: 0x40ee, + 0x5cc: 0x40da, 0x5cd: 0x40f8, 0x5ce: 0x40e4, 0x5cf: 0x4102, 0x5d0: 0x3de8, 0x5d1: 0x3df0, + 0x5d2: 0x41c0, 0x5d3: 0x41de, 0x5d4: 0x41ca, 0x5d5: 0x41e8, 0x5d6: 0x41d4, 0x5d7: 0x41f2, + 0x5d8: 0x3d08, 0x5d9: 0x3d10, 0x5da: 0x410c, 0x5db: 0x412a, 0x5dc: 0x4116, 0x5dd: 0x4134, + 0x5de: 0x4120, 0x5df: 0x413e, 0x5e0: 0x3ec0, 0x5e1: 0x3ec8, 0x5e2: 0x41fc, 0x5e3: 0x421a, + 0x5e4: 0x4206, 0x5e5: 0x4224, 0x5e6: 0x4210, 0x5e7: 0x422e, 0x5e8: 0x3d80, 0x5e9: 0x3d88, + 0x5ea: 0x4148, 0x5eb: 0x4166, 0x5ec: 0x4152, 0x5ed: 0x4170, 0x5ee: 0x415c, 0x5ef: 0x417a, + 0x5f0: 0x3685, 0x5f1: 0x367f, 0x5f2: 0x3d90, 0x5f3: 0x368b, 0x5f4: 0x3d98, + 0x5f6: 0x4810, 0x5f7: 0x3db0, 0x5f8: 0x35f5, 0x5f9: 0x35ef, 0x5fa: 0x35e3, 0x5fb: 0x42ee, + 0x5fc: 0x35fb, 0x5fd: 0x8100, 0x5fe: 0x01d3, 0x5ff: 0xa100, + // Block 0x18, offset 0x600 + 0x600: 0x8100, 0x601: 0x35a7, 0x602: 0x3dd8, 0x603: 0x369d, 0x604: 0x3de0, + 0x606: 0x483a, 0x607: 0x3df8, 0x608: 0x3601, 0x609: 0x42f4, 0x60a: 0x360d, 0x60b: 0x42fa, + 0x60c: 0x3619, 0x60d: 0x3b8f, 0x60e: 0x3b96, 0x60f: 0x3b9d, 0x610: 0x36b5, 0x611: 0x36af, + 0x612: 0x3e00, 0x613: 0x44e4, 0x616: 0x36bb, 0x617: 0x3e10, + 0x618: 0x3631, 0x619: 0x362b, 0x61a: 0x361f, 0x61b: 0x4300, 0x61d: 0x3ba4, + 0x61e: 0x3bab, 0x61f: 0x3bb2, 0x620: 0x36eb, 0x621: 0x36e5, 0x622: 0x3e68, 0x623: 0x44ec, + 0x624: 0x36cd, 0x625: 0x36d3, 0x626: 0x36f1, 0x627: 0x3e78, 0x628: 0x3661, 0x629: 0x365b, + 0x62a: 0x364f, 0x62b: 0x430c, 0x62c: 0x3649, 0x62d: 0x359b, 0x62e: 0x42e8, 0x62f: 0x0081, + 0x632: 0x3eb0, 0x633: 0x36f7, 0x634: 0x3eb8, + 0x636: 0x4888, 0x637: 0x3ed0, 0x638: 0x363d, 0x639: 0x4306, 0x63a: 0x366d, 0x63b: 0x4318, + 0x63c: 0x3679, 0x63d: 0x4256, 0x63e: 0xa100, + // Block 0x19, offset 0x640 + 0x641: 0x3c06, 0x643: 0xa000, 0x644: 0x3c0d, 0x645: 0xa000, + 0x647: 0x3c14, 0x648: 0xa000, 0x649: 0x3c1b, + 0x64d: 0xa000, + 0x660: 0x2f65, 0x661: 0xa000, 0x662: 0x3c29, + 0x664: 0xa000, 0x665: 0xa000, + 0x66d: 0x3c22, 0x66e: 0x2f60, 0x66f: 0x2f6a, + 0x670: 0x3c30, 0x671: 0x3c37, 0x672: 0xa000, 0x673: 0xa000, 0x674: 0x3c3e, 0x675: 0x3c45, + 0x676: 0xa000, 0x677: 0xa000, 0x678: 0x3c4c, 0x679: 0x3c53, 0x67a: 0xa000, 0x67b: 0xa000, + 0x67c: 0xa000, 0x67d: 0xa000, + // Block 0x1a, offset 0x680 + 0x680: 0x3c5a, 0x681: 0x3c61, 0x682: 0xa000, 0x683: 0xa000, 0x684: 0x3c76, 0x685: 0x3c7d, + 0x686: 0xa000, 0x687: 0xa000, 0x688: 0x3c84, 0x689: 0x3c8b, + 0x691: 0xa000, + 0x692: 0xa000, + 0x6a2: 0xa000, + 0x6a8: 0xa000, 0x6a9: 0xa000, + 0x6ab: 0xa000, 0x6ac: 0x3ca0, 0x6ad: 0x3ca7, 0x6ae: 0x3cae, 0x6af: 0x3cb5, + 0x6b2: 0xa000, 0x6b3: 0xa000, 0x6b4: 0xa000, 0x6b5: 0xa000, + // Block 0x1b, offset 0x6c0 + 0x6c6: 0xa000, 0x6cb: 0xa000, + 0x6cc: 0x3f08, 0x6cd: 0xa000, 0x6ce: 0x3f10, 0x6cf: 0xa000, 0x6d0: 0x3f18, 0x6d1: 0xa000, + 0x6d2: 0x3f20, 0x6d3: 0xa000, 0x6d4: 0x3f28, 0x6d5: 0xa000, 0x6d6: 0x3f30, 0x6d7: 0xa000, + 0x6d8: 0x3f38, 0x6d9: 0xa000, 0x6da: 0x3f40, 0x6db: 0xa000, 0x6dc: 0x3f48, 0x6dd: 0xa000, + 0x6de: 0x3f50, 0x6df: 0xa000, 0x6e0: 0x3f58, 0x6e1: 0xa000, 0x6e2: 0x3f60, + 0x6e4: 0xa000, 0x6e5: 0x3f68, 0x6e6: 0xa000, 0x6e7: 0x3f70, 0x6e8: 0xa000, 0x6e9: 0x3f78, + 0x6ef: 0xa000, + 0x6f0: 0x3f80, 0x6f1: 0x3f88, 0x6f2: 0xa000, 0x6f3: 0x3f90, 0x6f4: 0x3f98, 0x6f5: 0xa000, + 0x6f6: 0x3fa0, 0x6f7: 0x3fa8, 0x6f8: 0xa000, 0x6f9: 0x3fb0, 0x6fa: 0x3fb8, 0x6fb: 0xa000, + 0x6fc: 0x3fc0, 0x6fd: 0x3fc8, + // Block 0x1c, offset 0x700 + 0x714: 0x3f00, + 0x719: 0x9903, 0x71a: 0x9903, 0x71b: 0x8100, 0x71c: 0x8100, 0x71d: 0xa000, + 0x71e: 0x3fd0, + 0x726: 0xa000, + 0x72b: 0xa000, 0x72c: 0x3fe0, 0x72d: 0xa000, 0x72e: 0x3fe8, 0x72f: 0xa000, + 0x730: 0x3ff0, 0x731: 0xa000, 0x732: 0x3ff8, 0x733: 0xa000, 0x734: 0x4000, 0x735: 0xa000, + 0x736: 0x4008, 0x737: 0xa000, 0x738: 0x4010, 0x739: 0xa000, 0x73a: 0x4018, 0x73b: 0xa000, + 0x73c: 0x4020, 0x73d: 0xa000, 0x73e: 0x4028, 0x73f: 0xa000, + // Block 0x1d, offset 0x740 + 0x740: 0x4030, 0x741: 0xa000, 0x742: 0x4038, 0x744: 0xa000, 0x745: 0x4040, + 0x746: 0xa000, 0x747: 0x4048, 0x748: 0xa000, 0x749: 0x4050, + 0x74f: 0xa000, 0x750: 0x4058, 0x751: 0x4060, + 0x752: 0xa000, 0x753: 0x4068, 0x754: 0x4070, 0x755: 0xa000, 0x756: 0x4078, 0x757: 0x4080, + 0x758: 0xa000, 0x759: 0x4088, 0x75a: 0x4090, 0x75b: 0xa000, 0x75c: 0x4098, 0x75d: 0x40a0, + 0x76f: 0xa000, + 0x770: 0xa000, 0x771: 0xa000, 0x772: 0xa000, 0x774: 0x3fd8, + 0x777: 0x40a8, 0x778: 0x40b0, 0x779: 0x40b8, 0x77a: 0x40c0, + 0x77d: 0xa000, 0x77e: 0x40c8, + // Block 0x1e, offset 0x780 + 0x780: 0x1377, 0x781: 0x0cfb, 0x782: 0x13d3, 0x783: 0x139f, 0x784: 0x0e57, 0x785: 0x06eb, + 0x786: 0x08df, 0x787: 0x162b, 0x788: 0x162b, 0x789: 0x0a0b, 0x78a: 0x145f, 0x78b: 0x0943, + 0x78c: 0x0a07, 0x78d: 0x0bef, 0x78e: 0x0fcf, 0x78f: 0x115f, 0x790: 0x1297, 0x791: 0x12d3, + 0x792: 0x1307, 0x793: 0x141b, 0x794: 0x0d73, 0x795: 0x0dff, 0x796: 0x0eab, 0x797: 0x0f43, + 0x798: 0x125f, 0x799: 0x1447, 0x79a: 0x1573, 0x79b: 0x070f, 0x79c: 0x08b3, 0x79d: 0x0d87, + 0x79e: 0x0ecf, 0x79f: 0x1293, 0x7a0: 0x15c3, 0x7a1: 0x0ab3, 0x7a2: 0x0e77, 0x7a3: 0x1283, + 0x7a4: 0x1317, 0x7a5: 0x0c23, 0x7a6: 0x11bb, 0x7a7: 0x12df, 0x7a8: 0x0b1f, 0x7a9: 0x0d0f, + 0x7aa: 0x0e17, 0x7ab: 0x0f1b, 0x7ac: 0x1427, 0x7ad: 0x074f, 0x7ae: 0x07e7, 0x7af: 0x0853, + 0x7b0: 0x0c8b, 0x7b1: 0x0d7f, 0x7b2: 0x0ecb, 0x7b3: 0x0fef, 0x7b4: 0x1177, 0x7b5: 0x128b, + 0x7b6: 0x12a3, 0x7b7: 0x13c7, 0x7b8: 0x14ef, 0x7b9: 0x15a3, 0x7ba: 0x15bf, 0x7bb: 0x102b, + 0x7bc: 0x106b, 0x7bd: 0x1123, 0x7be: 0x1243, 0x7bf: 0x147b, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x15cb, 0x7c1: 0x134b, 0x7c2: 0x09c7, 0x7c3: 0x0b3b, 0x7c4: 0x10db, 0x7c5: 0x119b, + 0x7c6: 0x0eff, 0x7c7: 0x1033, 0x7c8: 0x1397, 0x7c9: 0x14e7, 0x7ca: 0x09c3, 0x7cb: 0x0a8f, + 0x7cc: 0x0d77, 0x7cd: 0x0e2b, 0x7ce: 0x0e5f, 0x7cf: 0x1113, 0x7d0: 0x113b, 0x7d1: 0x14a7, + 0x7d2: 0x084f, 0x7d3: 0x11a7, 0x7d4: 0x07f3, 0x7d5: 0x07ef, 0x7d6: 0x1097, 0x7d7: 0x1127, + 0x7d8: 0x125b, 0x7d9: 0x14af, 0x7da: 0x1367, 0x7db: 0x0c27, 0x7dc: 0x0d73, 0x7dd: 0x1357, + 0x7de: 0x06f7, 0x7df: 0x0a63, 0x7e0: 0x0b93, 0x7e1: 0x0f2f, 0x7e2: 0x0faf, 0x7e3: 0x0873, + 0x7e4: 0x103b, 0x7e5: 0x075f, 0x7e6: 0x0b77, 0x7e7: 0x06d7, 0x7e8: 0x0deb, 0x7e9: 0x0ca3, + 0x7ea: 0x110f, 0x7eb: 0x08c7, 0x7ec: 0x09b3, 0x7ed: 0x0ffb, 0x7ee: 0x1263, 0x7ef: 0x133b, + 0x7f0: 0x0db7, 0x7f1: 0x13f7, 0x7f2: 0x0de3, 0x7f3: 0x0c37, 0x7f4: 0x121b, 0x7f5: 0x0c57, + 0x7f6: 0x0fab, 0x7f7: 0x072b, 0x7f8: 0x07a7, 0x7f9: 0x07eb, 0x7fa: 0x0d53, 0x7fb: 0x10fb, + 0x7fc: 0x11f3, 0x7fd: 0x1347, 0x7fe: 0x145b, 0x7ff: 0x085b, + // Block 0x20, offset 0x800 + 0x800: 0x090f, 0x801: 0x0a17, 0x802: 0x0b2f, 0x803: 0x0cbf, 0x804: 0x0e7b, 0x805: 0x103f, + 0x806: 0x1497, 0x807: 0x157b, 0x808: 0x15cf, 0x809: 0x15e7, 0x80a: 0x0837, 0x80b: 0x0cf3, + 0x80c: 0x0da3, 0x80d: 0x13eb, 0x80e: 0x0afb, 0x80f: 0x0bd7, 0x810: 0x0bf3, 0x811: 0x0c83, + 0x812: 0x0e6b, 0x813: 0x0eb7, 0x814: 0x0f67, 0x815: 0x108b, 0x816: 0x112f, 0x817: 0x1193, + 0x818: 0x13db, 0x819: 0x126b, 0x81a: 0x1403, 0x81b: 0x147f, 0x81c: 0x080f, 0x81d: 0x083b, + 0x81e: 0x0923, 0x81f: 0x0ea7, 0x820: 0x12f3, 0x821: 0x133b, 0x822: 0x0b1b, 0x823: 0x0b8b, + 0x824: 0x0c4f, 0x825: 0x0daf, 0x826: 0x10d7, 0x827: 0x0f23, 0x828: 0x073b, 0x829: 0x097f, + 0x82a: 0x0a63, 0x82b: 0x0ac7, 0x82c: 0x0b97, 0x82d: 0x0f3f, 0x82e: 0x0f5b, 0x82f: 0x116b, + 0x830: 0x118b, 0x831: 0x1463, 0x832: 0x14e3, 0x833: 0x14f3, 0x834: 0x152f, 0x835: 0x0753, + 0x836: 0x107f, 0x837: 0x144f, 0x838: 0x14cb, 0x839: 0x0baf, 0x83a: 0x0717, 0x83b: 0x0777, + 0x83c: 0x0a67, 0x83d: 0x0a87, 0x83e: 0x0caf, 0x83f: 0x0d73, + // Block 0x21, offset 0x840 + 0x840: 0x0ec3, 0x841: 0x0fcb, 0x842: 0x1277, 0x843: 0x1417, 0x844: 0x1623, 0x845: 0x0ce3, + 0x846: 0x14a3, 0x847: 0x0833, 0x848: 0x0d2f, 0x849: 0x0d3b, 0x84a: 0x0e0f, 0x84b: 0x0e47, + 0x84c: 0x0f4b, 0x84d: 0x0fa7, 0x84e: 0x1027, 0x84f: 0x110b, 0x850: 0x153b, 0x851: 0x07af, + 0x852: 0x0c03, 0x853: 0x14b3, 0x854: 0x0767, 0x855: 0x0aab, 0x856: 0x0e2f, 0x857: 0x13df, + 0x858: 0x0b67, 0x859: 0x0bb7, 0x85a: 0x0d43, 0x85b: 0x0f2f, 0x85c: 0x14bb, 0x85d: 0x0817, + 0x85e: 0x08ff, 0x85f: 0x0a97, 0x860: 0x0cd3, 0x861: 0x0d1f, 0x862: 0x0d5f, 0x863: 0x0df3, + 0x864: 0x0f47, 0x865: 0x0fbb, 0x866: 0x1157, 0x867: 0x12f7, 0x868: 0x1303, 0x869: 0x1457, + 0x86a: 0x14d7, 0x86b: 0x0883, 0x86c: 0x0e4b, 0x86d: 0x0903, 0x86e: 0x0ec7, 0x86f: 0x0f6b, + 0x870: 0x1287, 0x871: 0x14bf, 0x872: 0x15ab, 0x873: 0x15d3, 0x874: 0x0d37, 0x875: 0x0e27, + 0x876: 0x11c3, 0x877: 0x10b7, 0x878: 0x10c3, 0x879: 0x10e7, 0x87a: 0x0f17, 0x87b: 0x0e9f, + 0x87c: 0x1363, 0x87d: 0x0733, 0x87e: 0x122b, 0x87f: 0x081b, + // Block 0x22, offset 0x880 + 0x880: 0x080b, 0x881: 0x0b0b, 0x882: 0x0c2b, 0x883: 0x10f3, 0x884: 0x0a53, 0x885: 0x0e03, + 0x886: 0x0cef, 0x887: 0x13e7, 0x888: 0x12e7, 0x889: 0x14ab, 0x88a: 0x1323, 0x88b: 0x0b27, + 0x88c: 0x0787, 0x88d: 0x095b, 0x890: 0x09af, + 0x892: 0x0cdf, 0x895: 0x07f7, 0x896: 0x0f1f, 0x897: 0x0fe3, + 0x898: 0x1047, 0x899: 0x1063, 0x89a: 0x1067, 0x89b: 0x107b, 0x89c: 0x14fb, 0x89d: 0x10eb, + 0x89e: 0x116f, 0x8a0: 0x128f, 0x8a2: 0x1353, + 0x8a5: 0x1407, 0x8a6: 0x1433, + 0x8aa: 0x154f, 0x8ab: 0x1553, 0x8ac: 0x1557, 0x8ad: 0x15bb, 0x8ae: 0x142b, 0x8af: 0x14c7, + 0x8b0: 0x0757, 0x8b1: 0x077b, 0x8b2: 0x078f, 0x8b3: 0x084b, 0x8b4: 0x0857, 0x8b5: 0x0897, + 0x8b6: 0x094b, 0x8b7: 0x0967, 0x8b8: 0x096f, 0x8b9: 0x09ab, 0x8ba: 0x09b7, 0x8bb: 0x0a93, + 0x8bc: 0x0a9b, 0x8bd: 0x0ba3, 0x8be: 0x0bcb, 0x8bf: 0x0bd3, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0beb, 0x8c1: 0x0c97, 0x8c2: 0x0cc7, 0x8c3: 0x0ce7, 0x8c4: 0x0d57, 0x8c5: 0x0e1b, + 0x8c6: 0x0e37, 0x8c7: 0x0e67, 0x8c8: 0x0ebb, 0x8c9: 0x0edb, 0x8ca: 0x0f4f, 0x8cb: 0x102f, + 0x8cc: 0x104b, 0x8cd: 0x1053, 0x8ce: 0x104f, 0x8cf: 0x1057, 0x8d0: 0x105b, 0x8d1: 0x105f, + 0x8d2: 0x1073, 0x8d3: 0x1077, 0x8d4: 0x109b, 0x8d5: 0x10af, 0x8d6: 0x10cb, 0x8d7: 0x112f, + 0x8d8: 0x1137, 0x8d9: 0x113f, 0x8da: 0x1153, 0x8db: 0x117b, 0x8dc: 0x11cb, 0x8dd: 0x11ff, + 0x8de: 0x11ff, 0x8df: 0x1267, 0x8e0: 0x130f, 0x8e1: 0x1327, 0x8e2: 0x135b, 0x8e3: 0x135f, + 0x8e4: 0x13a3, 0x8e5: 0x13a7, 0x8e6: 0x13ff, 0x8e7: 0x1407, 0x8e8: 0x14db, 0x8e9: 0x151f, + 0x8ea: 0x1537, 0x8eb: 0x0b9b, 0x8ec: 0x171e, 0x8ed: 0x11e3, + 0x8f0: 0x06df, 0x8f1: 0x07e3, 0x8f2: 0x07a3, 0x8f3: 0x074b, 0x8f4: 0x078b, 0x8f5: 0x07b7, + 0x8f6: 0x0847, 0x8f7: 0x0863, 0x8f8: 0x094b, 0x8f9: 0x0937, 0x8fa: 0x0947, 0x8fb: 0x0963, + 0x8fc: 0x09af, 0x8fd: 0x09bf, 0x8fe: 0x0a03, 0x8ff: 0x0a0f, + // Block 0x24, offset 0x900 + 0x900: 0x0a2b, 0x901: 0x0a3b, 0x902: 0x0b23, 0x903: 0x0b2b, 0x904: 0x0b5b, 0x905: 0x0b7b, + 0x906: 0x0bab, 0x907: 0x0bc3, 0x908: 0x0bb3, 0x909: 0x0bd3, 0x90a: 0x0bc7, 0x90b: 0x0beb, + 0x90c: 0x0c07, 0x90d: 0x0c5f, 0x90e: 0x0c6b, 0x90f: 0x0c73, 0x910: 0x0c9b, 0x911: 0x0cdf, + 0x912: 0x0d0f, 0x913: 0x0d13, 0x914: 0x0d27, 0x915: 0x0da7, 0x916: 0x0db7, 0x917: 0x0e0f, + 0x918: 0x0e5b, 0x919: 0x0e53, 0x91a: 0x0e67, 0x91b: 0x0e83, 0x91c: 0x0ebb, 0x91d: 0x1013, + 0x91e: 0x0edf, 0x91f: 0x0f13, 0x920: 0x0f1f, 0x921: 0x0f5f, 0x922: 0x0f7b, 0x923: 0x0f9f, + 0x924: 0x0fc3, 0x925: 0x0fc7, 0x926: 0x0fe3, 0x927: 0x0fe7, 0x928: 0x0ff7, 0x929: 0x100b, + 0x92a: 0x1007, 0x92b: 0x1037, 0x92c: 0x10b3, 0x92d: 0x10cb, 0x92e: 0x10e3, 0x92f: 0x111b, + 0x930: 0x112f, 0x931: 0x114b, 0x932: 0x117b, 0x933: 0x122f, 0x934: 0x1257, 0x935: 0x12cb, + 0x936: 0x1313, 0x937: 0x131f, 0x938: 0x1327, 0x939: 0x133f, 0x93a: 0x1353, 0x93b: 0x1343, + 0x93c: 0x135b, 0x93d: 0x1357, 0x93e: 0x134f, 0x93f: 0x135f, + // Block 0x25, offset 0x940 + 0x940: 0x136b, 0x941: 0x13a7, 0x942: 0x13e3, 0x943: 0x1413, 0x944: 0x144b, 0x945: 0x146b, + 0x946: 0x14b7, 0x947: 0x14db, 0x948: 0x14fb, 0x949: 0x150f, 0x94a: 0x151f, 0x94b: 0x152b, + 0x94c: 0x1537, 0x94d: 0x158b, 0x94e: 0x162b, 0x94f: 0x16b5, 0x950: 0x16b0, 0x951: 0x16e2, + 0x952: 0x0607, 0x953: 0x062f, 0x954: 0x0633, 0x955: 0x1764, 0x956: 0x1791, 0x957: 0x1809, + 0x958: 0x1617, 0x959: 0x1627, + // Block 0x26, offset 0x980 + 0x980: 0x06fb, 0x981: 0x06f3, 0x982: 0x0703, 0x983: 0x1647, 0x984: 0x0747, 0x985: 0x0757, + 0x986: 0x075b, 0x987: 0x0763, 0x988: 0x076b, 0x989: 0x076f, 0x98a: 0x077b, 0x98b: 0x0773, + 0x98c: 0x05b3, 0x98d: 0x165b, 0x98e: 0x078f, 0x98f: 0x0793, 0x990: 0x0797, 0x991: 0x07b3, + 0x992: 0x164c, 0x993: 0x05b7, 0x994: 0x079f, 0x995: 0x07bf, 0x996: 0x1656, 0x997: 0x07cf, + 0x998: 0x07d7, 0x999: 0x0737, 0x99a: 0x07df, 0x99b: 0x07e3, 0x99c: 0x1831, 0x99d: 0x07ff, + 0x99e: 0x0807, 0x99f: 0x05bf, 0x9a0: 0x081f, 0x9a1: 0x0823, 0x9a2: 0x082b, 0x9a3: 0x082f, + 0x9a4: 0x05c3, 0x9a5: 0x0847, 0x9a6: 0x084b, 0x9a7: 0x0857, 0x9a8: 0x0863, 0x9a9: 0x0867, + 0x9aa: 0x086b, 0x9ab: 0x0873, 0x9ac: 0x0893, 0x9ad: 0x0897, 0x9ae: 0x089f, 0x9af: 0x08af, + 0x9b0: 0x08b7, 0x9b1: 0x08bb, 0x9b2: 0x08bb, 0x9b3: 0x08bb, 0x9b4: 0x166a, 0x9b5: 0x0e93, + 0x9b6: 0x08cf, 0x9b7: 0x08d7, 0x9b8: 0x166f, 0x9b9: 0x08e3, 0x9ba: 0x08eb, 0x9bb: 0x08f3, + 0x9bc: 0x091b, 0x9bd: 0x0907, 0x9be: 0x0913, 0x9bf: 0x0917, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x091f, 0x9c1: 0x0927, 0x9c2: 0x092b, 0x9c3: 0x0933, 0x9c4: 0x093b, 0x9c5: 0x093f, + 0x9c6: 0x093f, 0x9c7: 0x0947, 0x9c8: 0x094f, 0x9c9: 0x0953, 0x9ca: 0x095f, 0x9cb: 0x0983, + 0x9cc: 0x0967, 0x9cd: 0x0987, 0x9ce: 0x096b, 0x9cf: 0x0973, 0x9d0: 0x080b, 0x9d1: 0x09cf, + 0x9d2: 0x0997, 0x9d3: 0x099b, 0x9d4: 0x099f, 0x9d5: 0x0993, 0x9d6: 0x09a7, 0x9d7: 0x09a3, + 0x9d8: 0x09bb, 0x9d9: 0x1674, 0x9da: 0x09d7, 0x9db: 0x09db, 0x9dc: 0x09e3, 0x9dd: 0x09ef, + 0x9de: 0x09f7, 0x9df: 0x0a13, 0x9e0: 0x1679, 0x9e1: 0x167e, 0x9e2: 0x0a1f, 0x9e3: 0x0a23, + 0x9e4: 0x0a27, 0x9e5: 0x0a1b, 0x9e6: 0x0a2f, 0x9e7: 0x05c7, 0x9e8: 0x05cb, 0x9e9: 0x0a37, + 0x9ea: 0x0a3f, 0x9eb: 0x0a3f, 0x9ec: 0x1683, 0x9ed: 0x0a5b, 0x9ee: 0x0a5f, 0x9ef: 0x0a63, + 0x9f0: 0x0a6b, 0x9f1: 0x1688, 0x9f2: 0x0a73, 0x9f3: 0x0a77, 0x9f4: 0x0b4f, 0x9f5: 0x0a7f, + 0x9f6: 0x05cf, 0x9f7: 0x0a8b, 0x9f8: 0x0a9b, 0x9f9: 0x0aa7, 0x9fa: 0x0aa3, 0x9fb: 0x1692, + 0x9fc: 0x0aaf, 0x9fd: 0x1697, 0x9fe: 0x0abb, 0x9ff: 0x0ab7, + // Block 0x28, offset 0xa00 + 0xa00: 0x0abf, 0xa01: 0x0acf, 0xa02: 0x0ad3, 0xa03: 0x05d3, 0xa04: 0x0ae3, 0xa05: 0x0aeb, + 0xa06: 0x0aef, 0xa07: 0x0af3, 0xa08: 0x05d7, 0xa09: 0x169c, 0xa0a: 0x05db, 0xa0b: 0x0b0f, + 0xa0c: 0x0b13, 0xa0d: 0x0b17, 0xa0e: 0x0b1f, 0xa0f: 0x1863, 0xa10: 0x0b37, 0xa11: 0x16a6, + 0xa12: 0x16a6, 0xa13: 0x11d7, 0xa14: 0x0b47, 0xa15: 0x0b47, 0xa16: 0x05df, 0xa17: 0x16c9, + 0xa18: 0x179b, 0xa19: 0x0b57, 0xa1a: 0x0b5f, 0xa1b: 0x05e3, 0xa1c: 0x0b73, 0xa1d: 0x0b83, + 0xa1e: 0x0b87, 0xa1f: 0x0b8f, 0xa20: 0x0b9f, 0xa21: 0x05eb, 0xa22: 0x05e7, 0xa23: 0x0ba3, + 0xa24: 0x16ab, 0xa25: 0x0ba7, 0xa26: 0x0bbb, 0xa27: 0x0bbf, 0xa28: 0x0bc3, 0xa29: 0x0bbf, + 0xa2a: 0x0bcf, 0xa2b: 0x0bd3, 0xa2c: 0x0be3, 0xa2d: 0x0bdb, 0xa2e: 0x0bdf, 0xa2f: 0x0be7, + 0xa30: 0x0beb, 0xa31: 0x0bef, 0xa32: 0x0bfb, 0xa33: 0x0bff, 0xa34: 0x0c17, 0xa35: 0x0c1f, + 0xa36: 0x0c2f, 0xa37: 0x0c43, 0xa38: 0x16ba, 0xa39: 0x0c3f, 0xa3a: 0x0c33, 0xa3b: 0x0c4b, + 0xa3c: 0x0c53, 0xa3d: 0x0c67, 0xa3e: 0x16bf, 0xa3f: 0x0c6f, + // Block 0x29, offset 0xa40 + 0xa40: 0x0c63, 0xa41: 0x0c5b, 0xa42: 0x05ef, 0xa43: 0x0c77, 0xa44: 0x0c7f, 0xa45: 0x0c87, + 0xa46: 0x0c7b, 0xa47: 0x05f3, 0xa48: 0x0c97, 0xa49: 0x0c9f, 0xa4a: 0x16c4, 0xa4b: 0x0ccb, + 0xa4c: 0x0cff, 0xa4d: 0x0cdb, 0xa4e: 0x05ff, 0xa4f: 0x0ce7, 0xa50: 0x05fb, 0xa51: 0x05f7, + 0xa52: 0x07c3, 0xa53: 0x07c7, 0xa54: 0x0d03, 0xa55: 0x0ceb, 0xa56: 0x11ab, 0xa57: 0x0663, + 0xa58: 0x0d0f, 0xa59: 0x0d13, 0xa5a: 0x0d17, 0xa5b: 0x0d2b, 0xa5c: 0x0d23, 0xa5d: 0x16dd, + 0xa5e: 0x0603, 0xa5f: 0x0d3f, 0xa60: 0x0d33, 0xa61: 0x0d4f, 0xa62: 0x0d57, 0xa63: 0x16e7, + 0xa64: 0x0d5b, 0xa65: 0x0d47, 0xa66: 0x0d63, 0xa67: 0x0607, 0xa68: 0x0d67, 0xa69: 0x0d6b, + 0xa6a: 0x0d6f, 0xa6b: 0x0d7b, 0xa6c: 0x16ec, 0xa6d: 0x0d83, 0xa6e: 0x060b, 0xa6f: 0x0d8f, + 0xa70: 0x16f1, 0xa71: 0x0d93, 0xa72: 0x060f, 0xa73: 0x0d9f, 0xa74: 0x0dab, 0xa75: 0x0db7, + 0xa76: 0x0dbb, 0xa77: 0x16f6, 0xa78: 0x168d, 0xa79: 0x16fb, 0xa7a: 0x0ddb, 0xa7b: 0x1700, + 0xa7c: 0x0de7, 0xa7d: 0x0def, 0xa7e: 0x0ddf, 0xa7f: 0x0dfb, + // Block 0x2a, offset 0xa80 + 0xa80: 0x0e0b, 0xa81: 0x0e1b, 0xa82: 0x0e0f, 0xa83: 0x0e13, 0xa84: 0x0e1f, 0xa85: 0x0e23, + 0xa86: 0x1705, 0xa87: 0x0e07, 0xa88: 0x0e3b, 0xa89: 0x0e3f, 0xa8a: 0x0613, 0xa8b: 0x0e53, + 0xa8c: 0x0e4f, 0xa8d: 0x170a, 0xa8e: 0x0e33, 0xa8f: 0x0e6f, 0xa90: 0x170f, 0xa91: 0x1714, + 0xa92: 0x0e73, 0xa93: 0x0e87, 0xa94: 0x0e83, 0xa95: 0x0e7f, 0xa96: 0x0617, 0xa97: 0x0e8b, + 0xa98: 0x0e9b, 0xa99: 0x0e97, 0xa9a: 0x0ea3, 0xa9b: 0x1651, 0xa9c: 0x0eb3, 0xa9d: 0x1719, + 0xa9e: 0x0ebf, 0xa9f: 0x1723, 0xaa0: 0x0ed3, 0xaa1: 0x0edf, 0xaa2: 0x0ef3, 0xaa3: 0x1728, + 0xaa4: 0x0f07, 0xaa5: 0x0f0b, 0xaa6: 0x172d, 0xaa7: 0x1732, 0xaa8: 0x0f27, 0xaa9: 0x0f37, + 0xaaa: 0x061b, 0xaab: 0x0f3b, 0xaac: 0x061f, 0xaad: 0x061f, 0xaae: 0x0f53, 0xaaf: 0x0f57, + 0xab0: 0x0f5f, 0xab1: 0x0f63, 0xab2: 0x0f6f, 0xab3: 0x0623, 0xab4: 0x0f87, 0xab5: 0x1737, + 0xab6: 0x0fa3, 0xab7: 0x173c, 0xab8: 0x0faf, 0xab9: 0x16a1, 0xaba: 0x0fbf, 0xabb: 0x1741, + 0xabc: 0x1746, 0xabd: 0x174b, 0xabe: 0x0627, 0xabf: 0x062b, + // Block 0x2b, offset 0xac0 + 0xac0: 0x0ff7, 0xac1: 0x1755, 0xac2: 0x1750, 0xac3: 0x175a, 0xac4: 0x175f, 0xac5: 0x0fff, + 0xac6: 0x1003, 0xac7: 0x1003, 0xac8: 0x100b, 0xac9: 0x0633, 0xaca: 0x100f, 0xacb: 0x0637, + 0xacc: 0x063b, 0xacd: 0x1769, 0xace: 0x1023, 0xacf: 0x102b, 0xad0: 0x1037, 0xad1: 0x063f, + 0xad2: 0x176e, 0xad3: 0x105b, 0xad4: 0x1773, 0xad5: 0x1778, 0xad6: 0x107b, 0xad7: 0x1093, + 0xad8: 0x0643, 0xad9: 0x109b, 0xada: 0x109f, 0xadb: 0x10a3, 0xadc: 0x177d, 0xadd: 0x1782, + 0xade: 0x1782, 0xadf: 0x10bb, 0xae0: 0x0647, 0xae1: 0x1787, 0xae2: 0x10cf, 0xae3: 0x10d3, + 0xae4: 0x064b, 0xae5: 0x178c, 0xae6: 0x10ef, 0xae7: 0x064f, 0xae8: 0x10ff, 0xae9: 0x10f7, + 0xaea: 0x1107, 0xaeb: 0x1796, 0xaec: 0x111f, 0xaed: 0x0653, 0xaee: 0x112b, 0xaef: 0x1133, + 0xaf0: 0x1143, 0xaf1: 0x0657, 0xaf2: 0x17a0, 0xaf3: 0x17a5, 0xaf4: 0x065b, 0xaf5: 0x17aa, + 0xaf6: 0x115b, 0xaf7: 0x17af, 0xaf8: 0x1167, 0xaf9: 0x1173, 0xafa: 0x117b, 0xafb: 0x17b4, + 0xafc: 0x17b9, 0xafd: 0x118f, 0xafe: 0x17be, 0xaff: 0x1197, + // Block 0x2c, offset 0xb00 + 0xb00: 0x16ce, 0xb01: 0x065f, 0xb02: 0x11af, 0xb03: 0x11b3, 0xb04: 0x0667, 0xb05: 0x11b7, + 0xb06: 0x0a33, 0xb07: 0x17c3, 0xb08: 0x17c8, 0xb09: 0x16d3, 0xb0a: 0x16d8, 0xb0b: 0x11d7, + 0xb0c: 0x11db, 0xb0d: 0x13f3, 0xb0e: 0x066b, 0xb0f: 0x1207, 0xb10: 0x1203, 0xb11: 0x120b, + 0xb12: 0x083f, 0xb13: 0x120f, 0xb14: 0x1213, 0xb15: 0x1217, 0xb16: 0x121f, 0xb17: 0x17cd, + 0xb18: 0x121b, 0xb19: 0x1223, 0xb1a: 0x1237, 0xb1b: 0x123b, 0xb1c: 0x1227, 0xb1d: 0x123f, + 0xb1e: 0x1253, 0xb1f: 0x1267, 0xb20: 0x1233, 0xb21: 0x1247, 0xb22: 0x124b, 0xb23: 0x124f, + 0xb24: 0x17d2, 0xb25: 0x17dc, 0xb26: 0x17d7, 0xb27: 0x066f, 0xb28: 0x126f, 0xb29: 0x1273, + 0xb2a: 0x127b, 0xb2b: 0x17f0, 0xb2c: 0x127f, 0xb2d: 0x17e1, 0xb2e: 0x0673, 0xb2f: 0x0677, + 0xb30: 0x17e6, 0xb31: 0x17eb, 0xb32: 0x067b, 0xb33: 0x129f, 0xb34: 0x12a3, 0xb35: 0x12a7, + 0xb36: 0x12ab, 0xb37: 0x12b7, 0xb38: 0x12b3, 0xb39: 0x12bf, 0xb3a: 0x12bb, 0xb3b: 0x12cb, + 0xb3c: 0x12c3, 0xb3d: 0x12c7, 0xb3e: 0x12cf, 0xb3f: 0x067f, + // Block 0x2d, offset 0xb40 + 0xb40: 0x12d7, 0xb41: 0x12db, 0xb42: 0x0683, 0xb43: 0x12eb, 0xb44: 0x12ef, 0xb45: 0x17f5, + 0xb46: 0x12fb, 0xb47: 0x12ff, 0xb48: 0x0687, 0xb49: 0x130b, 0xb4a: 0x05bb, 0xb4b: 0x17fa, + 0xb4c: 0x17ff, 0xb4d: 0x068b, 0xb4e: 0x068f, 0xb4f: 0x1337, 0xb50: 0x134f, 0xb51: 0x136b, + 0xb52: 0x137b, 0xb53: 0x1804, 0xb54: 0x138f, 0xb55: 0x1393, 0xb56: 0x13ab, 0xb57: 0x13b7, + 0xb58: 0x180e, 0xb59: 0x1660, 0xb5a: 0x13c3, 0xb5b: 0x13bf, 0xb5c: 0x13cb, 0xb5d: 0x1665, + 0xb5e: 0x13d7, 0xb5f: 0x13e3, 0xb60: 0x1813, 0xb61: 0x1818, 0xb62: 0x1423, 0xb63: 0x142f, + 0xb64: 0x1437, 0xb65: 0x181d, 0xb66: 0x143b, 0xb67: 0x1467, 0xb68: 0x1473, 0xb69: 0x1477, + 0xb6a: 0x146f, 0xb6b: 0x1483, 0xb6c: 0x1487, 0xb6d: 0x1822, 0xb6e: 0x1493, 0xb6f: 0x0693, + 0xb70: 0x149b, 0xb71: 0x1827, 0xb72: 0x0697, 0xb73: 0x14d3, 0xb74: 0x0ac3, 0xb75: 0x14eb, + 0xb76: 0x182c, 0xb77: 0x1836, 0xb78: 0x069b, 0xb79: 0x069f, 0xb7a: 0x1513, 0xb7b: 0x183b, + 0xb7c: 0x06a3, 0xb7d: 0x1840, 0xb7e: 0x152b, 0xb7f: 0x152b, + // Block 0x2e, offset 0xb80 + 0xb80: 0x1533, 0xb81: 0x1845, 0xb82: 0x154b, 0xb83: 0x06a7, 0xb84: 0x155b, 0xb85: 0x1567, + 0xb86: 0x156f, 0xb87: 0x1577, 0xb88: 0x06ab, 0xb89: 0x184a, 0xb8a: 0x158b, 0xb8b: 0x15a7, + 0xb8c: 0x15b3, 0xb8d: 0x06af, 0xb8e: 0x06b3, 0xb8f: 0x15b7, 0xb90: 0x184f, 0xb91: 0x06b7, + 0xb92: 0x1854, 0xb93: 0x1859, 0xb94: 0x185e, 0xb95: 0x15db, 0xb96: 0x06bb, 0xb97: 0x15ef, + 0xb98: 0x15f7, 0xb99: 0x15fb, 0xb9a: 0x1603, 0xb9b: 0x160b, 0xb9c: 0x1613, 0xb9d: 0x1868, +} + +// nfcIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var nfcIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x2d, 0xc3: 0x01, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x2e, 0xc7: 0x04, + 0xc8: 0x05, 0xca: 0x2f, 0xcb: 0x30, 0xcc: 0x06, 0xcd: 0x07, 0xce: 0x08, 0xcf: 0x31, + 0xd0: 0x09, 0xd1: 0x32, 0xd2: 0x33, 0xd3: 0x0a, 0xd6: 0x0b, 0xd7: 0x34, + 0xd8: 0x35, 0xd9: 0x0c, 0xdb: 0x36, 0xdc: 0x37, 0xdd: 0x38, 0xdf: 0x39, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x08, 0xed: 0x09, 0xef: 0x0a, + 0xf0: 0x13, + // Block 0x4, offset 0x100 + 0x120: 0x3a, 0x121: 0x3b, 0x123: 0x3c, 0x124: 0x3d, 0x125: 0x3e, 0x126: 0x3f, 0x127: 0x40, + 0x128: 0x41, 0x129: 0x42, 0x12a: 0x43, 0x12b: 0x44, 0x12c: 0x3f, 0x12d: 0x45, 0x12e: 0x46, 0x12f: 0x47, + 0x131: 0x48, 0x132: 0x49, 0x133: 0x4a, 0x134: 0x4b, 0x135: 0x4c, 0x137: 0x4d, + 0x138: 0x4e, 0x139: 0x4f, 0x13a: 0x50, 0x13b: 0x51, 0x13c: 0x52, 0x13d: 0x53, 0x13e: 0x54, 0x13f: 0x55, + // Block 0x5, offset 0x140 + 0x140: 0x56, 0x142: 0x57, 0x144: 0x58, 0x145: 0x59, 0x146: 0x5a, 0x147: 0x5b, + 0x14d: 0x5c, + 0x15c: 0x5d, 0x15f: 0x5e, + 0x162: 0x5f, 0x164: 0x60, + 0x168: 0x61, 0x169: 0x62, 0x16a: 0x63, 0x16c: 0x0d, 0x16d: 0x64, 0x16e: 0x65, 0x16f: 0x66, + 0x170: 0x67, 0x173: 0x68, 0x177: 0x0e, + 0x178: 0x0f, 0x179: 0x10, 0x17a: 0x11, 0x17b: 0x12, 0x17c: 0x13, 0x17d: 0x14, 0x17e: 0x15, 0x17f: 0x16, + // Block 0x6, offset 0x180 + 0x180: 0x69, 0x183: 0x6a, 0x184: 0x6b, 0x186: 0x6c, 0x187: 0x6d, + 0x188: 0x6e, 0x189: 0x17, 0x18a: 0x18, 0x18b: 0x6f, 0x18c: 0x70, + 0x1ab: 0x71, + 0x1b3: 0x72, 0x1b5: 0x73, 0x1b7: 0x74, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x75, 0x1c1: 0x19, 0x1c2: 0x1a, 0x1c3: 0x1b, 0x1c4: 0x76, 0x1c5: 0x77, + 0x1c9: 0x78, 0x1cc: 0x79, 0x1cd: 0x7a, + // Block 0x8, offset 0x200 + 0x219: 0x7b, 0x21a: 0x7c, 0x21b: 0x7d, + 0x220: 0x7e, 0x223: 0x7f, 0x224: 0x80, 0x225: 0x81, 0x226: 0x82, 0x227: 0x83, + 0x22a: 0x84, 0x22b: 0x85, 0x22f: 0x86, + 0x230: 0x87, 0x231: 0x88, 0x232: 0x89, 0x233: 0x8a, 0x234: 0x8b, 0x235: 0x8c, 0x236: 0x8d, 0x237: 0x87, + 0x238: 0x88, 0x239: 0x89, 0x23a: 0x8a, 0x23b: 0x8b, 0x23c: 0x8c, 0x23d: 0x8d, 0x23e: 0x87, 0x23f: 0x88, + // Block 0x9, offset 0x240 + 0x240: 0x89, 0x241: 0x8a, 0x242: 0x8b, 0x243: 0x8c, 0x244: 0x8d, 0x245: 0x87, 0x246: 0x88, 0x247: 0x89, + 0x248: 0x8a, 0x249: 0x8b, 0x24a: 0x8c, 0x24b: 0x8d, 0x24c: 0x87, 0x24d: 0x88, 0x24e: 0x89, 0x24f: 0x8a, + 0x250: 0x8b, 0x251: 0x8c, 0x252: 0x8d, 0x253: 0x87, 0x254: 0x88, 0x255: 0x89, 0x256: 0x8a, 0x257: 0x8b, + 0x258: 0x8c, 0x259: 0x8d, 0x25a: 0x87, 0x25b: 0x88, 0x25c: 0x89, 0x25d: 0x8a, 0x25e: 0x8b, 0x25f: 0x8c, + 0x260: 0x8d, 0x261: 0x87, 0x262: 0x88, 0x263: 0x89, 0x264: 0x8a, 0x265: 0x8b, 0x266: 0x8c, 0x267: 0x8d, + 0x268: 0x87, 0x269: 0x88, 0x26a: 0x89, 0x26b: 0x8a, 0x26c: 0x8b, 0x26d: 0x8c, 0x26e: 0x8d, 0x26f: 0x87, + 0x270: 0x88, 0x271: 0x89, 0x272: 0x8a, 0x273: 0x8b, 0x274: 0x8c, 0x275: 0x8d, 0x276: 0x87, 0x277: 0x88, + 0x278: 0x89, 0x279: 0x8a, 0x27a: 0x8b, 0x27b: 0x8c, 0x27c: 0x8d, 0x27d: 0x87, 0x27e: 0x88, 0x27f: 0x89, + // Block 0xa, offset 0x280 + 0x280: 0x8a, 0x281: 0x8b, 0x282: 0x8c, 0x283: 0x8d, 0x284: 0x87, 0x285: 0x88, 0x286: 0x89, 0x287: 0x8a, + 0x288: 0x8b, 0x289: 0x8c, 0x28a: 0x8d, 0x28b: 0x87, 0x28c: 0x88, 0x28d: 0x89, 0x28e: 0x8a, 0x28f: 0x8b, + 0x290: 0x8c, 0x291: 0x8d, 0x292: 0x87, 0x293: 0x88, 0x294: 0x89, 0x295: 0x8a, 0x296: 0x8b, 0x297: 0x8c, + 0x298: 0x8d, 0x299: 0x87, 0x29a: 0x88, 0x29b: 0x89, 0x29c: 0x8a, 0x29d: 0x8b, 0x29e: 0x8c, 0x29f: 0x8d, + 0x2a0: 0x87, 0x2a1: 0x88, 0x2a2: 0x89, 0x2a3: 0x8a, 0x2a4: 0x8b, 0x2a5: 0x8c, 0x2a6: 0x8d, 0x2a7: 0x87, + 0x2a8: 0x88, 0x2a9: 0x89, 0x2aa: 0x8a, 0x2ab: 0x8b, 0x2ac: 0x8c, 0x2ad: 0x8d, 0x2ae: 0x87, 0x2af: 0x88, + 0x2b0: 0x89, 0x2b1: 0x8a, 0x2b2: 0x8b, 0x2b3: 0x8c, 0x2b4: 0x8d, 0x2b5: 0x87, 0x2b6: 0x88, 0x2b7: 0x89, + 0x2b8: 0x8a, 0x2b9: 0x8b, 0x2ba: 0x8c, 0x2bb: 0x8d, 0x2bc: 0x87, 0x2bd: 0x88, 0x2be: 0x89, 0x2bf: 0x8a, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x8b, 0x2c1: 0x8c, 0x2c2: 0x8d, 0x2c3: 0x87, 0x2c4: 0x88, 0x2c5: 0x89, 0x2c6: 0x8a, 0x2c7: 0x8b, + 0x2c8: 0x8c, 0x2c9: 0x8d, 0x2ca: 0x87, 0x2cb: 0x88, 0x2cc: 0x89, 0x2cd: 0x8a, 0x2ce: 0x8b, 0x2cf: 0x8c, + 0x2d0: 0x8d, 0x2d1: 0x87, 0x2d2: 0x88, 0x2d3: 0x89, 0x2d4: 0x8a, 0x2d5: 0x8b, 0x2d6: 0x8c, 0x2d7: 0x8d, + 0x2d8: 0x87, 0x2d9: 0x88, 0x2da: 0x89, 0x2db: 0x8a, 0x2dc: 0x8b, 0x2dd: 0x8c, 0x2de: 0x8e, + // Block 0xc, offset 0x300 + 0x324: 0x1c, 0x325: 0x1d, 0x326: 0x1e, 0x327: 0x1f, + 0x328: 0x20, 0x329: 0x21, 0x32a: 0x22, 0x32b: 0x23, 0x32c: 0x8f, 0x32d: 0x90, 0x32e: 0x91, + 0x331: 0x92, 0x332: 0x93, 0x333: 0x94, 0x334: 0x95, + 0x338: 0x96, 0x339: 0x97, 0x33a: 0x98, 0x33b: 0x99, 0x33e: 0x9a, 0x33f: 0x9b, + // Block 0xd, offset 0x340 + 0x347: 0x9c, + 0x34b: 0x9d, 0x34d: 0x9e, + 0x368: 0x9f, 0x36b: 0xa0, + // Block 0xe, offset 0x380 + 0x381: 0xa1, 0x382: 0xa2, 0x384: 0xa3, 0x385: 0x82, 0x387: 0xa4, + 0x388: 0xa5, 0x38b: 0xa6, 0x38c: 0x3f, 0x38d: 0xa7, + 0x391: 0xa8, 0x392: 0xa9, 0x393: 0xaa, 0x396: 0xab, 0x397: 0xac, + 0x398: 0x73, 0x39a: 0xad, 0x39c: 0xae, + 0x3a8: 0xaf, 0x3a9: 0xb0, 0x3aa: 0xb1, + 0x3b0: 0x73, 0x3b5: 0xb2, + // Block 0xf, offset 0x3c0 + 0x3eb: 0xb3, 0x3ec: 0xb4, + // Block 0x10, offset 0x400 + 0x432: 0xb5, + // Block 0x11, offset 0x440 + 0x445: 0xb6, 0x446: 0xb7, 0x447: 0xb8, + 0x449: 0xb9, + // Block 0x12, offset 0x480 + 0x480: 0xba, + 0x4a3: 0xbb, 0x4a5: 0xbc, + // Block 0x13, offset 0x4c0 + 0x4c8: 0xbd, + // Block 0x14, offset 0x500 + 0x520: 0x24, 0x521: 0x25, 0x522: 0x26, 0x523: 0x27, 0x524: 0x28, 0x525: 0x29, 0x526: 0x2a, 0x527: 0x2b, + 0x528: 0x2c, + // Block 0x15, offset 0x540 + 0x550: 0x0b, 0x551: 0x0c, 0x556: 0x0d, + 0x55b: 0x0e, 0x55d: 0x0f, 0x55e: 0x10, 0x55f: 0x11, + 0x56f: 0x12, +} + +// nfcSparseOffset: 145 entries, 290 bytes +var nfcSparseOffset = []uint16{0x0, 0x5, 0x9, 0xb, 0xd, 0x18, 0x28, 0x2a, 0x2f, 0x3a, 0x49, 0x56, 0x5e, 0x62, 0x67, 0x69, 0x7a, 0x82, 0x89, 0x8c, 0x93, 0x97, 0x9b, 0x9d, 0x9f, 0xa8, 0xac, 0xb3, 0xb8, 0xbb, 0xc5, 0xc8, 0xcf, 0xd7, 0xda, 0xdc, 0xde, 0xe0, 0xe5, 0xf6, 0x102, 0x104, 0x10a, 0x10c, 0x10e, 0x110, 0x112, 0x114, 0x116, 0x119, 0x11c, 0x11e, 0x121, 0x124, 0x128, 0x12d, 0x136, 0x138, 0x13b, 0x13d, 0x148, 0x14c, 0x15a, 0x15d, 0x163, 0x169, 0x174, 0x178, 0x17a, 0x17c, 0x17e, 0x180, 0x182, 0x188, 0x18c, 0x18e, 0x190, 0x198, 0x19c, 0x19f, 0x1a1, 0x1a3, 0x1a5, 0x1a8, 0x1aa, 0x1ac, 0x1ae, 0x1b0, 0x1b6, 0x1b9, 0x1bb, 0x1c2, 0x1c8, 0x1ce, 0x1d6, 0x1dc, 0x1e2, 0x1e8, 0x1ec, 0x1fa, 0x203, 0x206, 0x209, 0x20b, 0x20e, 0x210, 0x214, 0x219, 0x21b, 0x21d, 0x222, 0x228, 0x22a, 0x22c, 0x22e, 0x234, 0x237, 0x23a, 0x242, 0x249, 0x24c, 0x24f, 0x251, 0x259, 0x25c, 0x263, 0x266, 0x26c, 0x26e, 0x271, 0x273, 0x275, 0x277, 0x279, 0x27c, 0x27e, 0x280, 0x282, 0x28f, 0x299, 0x29b, 0x29d, 0x2a3, 0x2a5, 0x2a8} + +// nfcSparseValues: 682 entries, 2728 bytes +var nfcSparseValues = [682]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x04}, + {value: 0xa100, lo: 0xa8, hi: 0xa8}, + {value: 0x8100, lo: 0xaf, hi: 0xaf}, + {value: 0x8100, lo: 0xb4, hi: 0xb4}, + {value: 0x8100, lo: 0xb8, hi: 0xb8}, + // Block 0x1, offset 0x5 + {value: 0x0091, lo: 0x03}, + {value: 0x46e2, lo: 0xa0, hi: 0xa1}, + {value: 0x4714, lo: 0xaf, hi: 0xb0}, + {value: 0xa000, lo: 0xb7, hi: 0xb7}, + // Block 0x2, offset 0x9 + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + // Block 0x3, offset 0xb + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x98, hi: 0x9d}, + // Block 0x4, offset 0xd + {value: 0x0006, lo: 0x0a}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x85, hi: 0x85}, + {value: 0xa000, lo: 0x89, hi: 0x89}, + {value: 0x4840, lo: 0x8a, hi: 0x8a}, + {value: 0x485e, lo: 0x8b, hi: 0x8b}, + {value: 0x36c7, lo: 0x8c, hi: 0x8c}, + {value: 0x36df, lo: 0x8d, hi: 0x8d}, + {value: 0x4876, lo: 0x8e, hi: 0x8e}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x36fd, lo: 0x93, hi: 0x94}, + // Block 0x5, offset 0x18 + {value: 0x0000, lo: 0x0f}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0xa000, lo: 0x8d, hi: 0x8d}, + {value: 0x37a5, lo: 0x90, hi: 0x90}, + {value: 0x37b1, lo: 0x91, hi: 0x91}, + {value: 0x379f, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x96, hi: 0x96}, + {value: 0x3817, lo: 0x97, hi: 0x97}, + {value: 0x37e1, lo: 0x9c, hi: 0x9c}, + {value: 0x37c9, lo: 0x9d, hi: 0x9d}, + {value: 0x37f3, lo: 0x9e, hi: 0x9e}, + {value: 0xa000, lo: 0xb4, hi: 0xb5}, + {value: 0x381d, lo: 0xb6, hi: 0xb6}, + {value: 0x3823, lo: 0xb7, hi: 0xb7}, + // Block 0x6, offset 0x28 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x83, hi: 0x87}, + // Block 0x7, offset 0x2a + {value: 0x0001, lo: 0x04}, + {value: 0x8113, lo: 0x81, hi: 0x82}, + {value: 0x8132, lo: 0x84, hi: 0x84}, + {value: 0x812d, lo: 0x85, hi: 0x85}, + {value: 0x810d, lo: 0x87, hi: 0x87}, + // Block 0x8, offset 0x2f + {value: 0x0000, lo: 0x0a}, + {value: 0x8132, lo: 0x90, hi: 0x97}, + {value: 0x8119, lo: 0x98, hi: 0x98}, + {value: 0x811a, lo: 0x99, hi: 0x99}, + {value: 0x811b, lo: 0x9a, hi: 0x9a}, + {value: 0x3841, lo: 0xa2, hi: 0xa2}, + {value: 0x3847, lo: 0xa3, hi: 0xa3}, + {value: 0x3853, lo: 0xa4, hi: 0xa4}, + {value: 0x384d, lo: 0xa5, hi: 0xa5}, + {value: 0x3859, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xa7, hi: 0xa7}, + // Block 0x9, offset 0x3a + {value: 0x0000, lo: 0x0e}, + {value: 0x386b, lo: 0x80, hi: 0x80}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0x385f, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x3865, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x95, hi: 0x95}, + {value: 0x8132, lo: 0x96, hi: 0x9c}, + {value: 0x8132, lo: 0x9f, hi: 0xa2}, + {value: 0x812d, lo: 0xa3, hi: 0xa3}, + {value: 0x8132, lo: 0xa4, hi: 0xa4}, + {value: 0x8132, lo: 0xa7, hi: 0xa8}, + {value: 0x812d, lo: 0xaa, hi: 0xaa}, + {value: 0x8132, lo: 0xab, hi: 0xac}, + {value: 0x812d, lo: 0xad, hi: 0xad}, + // Block 0xa, offset 0x49 + {value: 0x0000, lo: 0x0c}, + {value: 0x811f, lo: 0x91, hi: 0x91}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + {value: 0x812d, lo: 0xb1, hi: 0xb1}, + {value: 0x8132, lo: 0xb2, hi: 0xb3}, + {value: 0x812d, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb5, hi: 0xb6}, + {value: 0x812d, lo: 0xb7, hi: 0xb9}, + {value: 0x8132, lo: 0xba, hi: 0xba}, + {value: 0x812d, lo: 0xbb, hi: 0xbc}, + {value: 0x8132, lo: 0xbd, hi: 0xbd}, + {value: 0x812d, lo: 0xbe, hi: 0xbe}, + {value: 0x8132, lo: 0xbf, hi: 0xbf}, + // Block 0xb, offset 0x56 + {value: 0x0005, lo: 0x07}, + {value: 0x8132, lo: 0x80, hi: 0x80}, + {value: 0x8132, lo: 0x81, hi: 0x81}, + {value: 0x812d, lo: 0x82, hi: 0x83}, + {value: 0x812d, lo: 0x84, hi: 0x85}, + {value: 0x812d, lo: 0x86, hi: 0x87}, + {value: 0x812d, lo: 0x88, hi: 0x89}, + {value: 0x8132, lo: 0x8a, hi: 0x8a}, + // Block 0xc, offset 0x5e + {value: 0x0000, lo: 0x03}, + {value: 0x8132, lo: 0xab, hi: 0xb1}, + {value: 0x812d, lo: 0xb2, hi: 0xb2}, + {value: 0x8132, lo: 0xb3, hi: 0xb3}, + // Block 0xd, offset 0x62 + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0x96, hi: 0x99}, + {value: 0x8132, lo: 0x9b, hi: 0xa3}, + {value: 0x8132, lo: 0xa5, hi: 0xa7}, + {value: 0x8132, lo: 0xa9, hi: 0xad}, + // Block 0xe, offset 0x67 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x99, hi: 0x9b}, + // Block 0xf, offset 0x69 + {value: 0x0000, lo: 0x10}, + {value: 0x8132, lo: 0x94, hi: 0xa1}, + {value: 0x812d, lo: 0xa3, hi: 0xa3}, + {value: 0x8132, lo: 0xa4, hi: 0xa5}, + {value: 0x812d, lo: 0xa6, hi: 0xa6}, + {value: 0x8132, lo: 0xa7, hi: 0xa8}, + {value: 0x812d, lo: 0xa9, hi: 0xa9}, + {value: 0x8132, lo: 0xaa, hi: 0xac}, + {value: 0x812d, lo: 0xad, hi: 0xaf}, + {value: 0x8116, lo: 0xb0, hi: 0xb0}, + {value: 0x8117, lo: 0xb1, hi: 0xb1}, + {value: 0x8118, lo: 0xb2, hi: 0xb2}, + {value: 0x8132, lo: 0xb3, hi: 0xb5}, + {value: 0x812d, lo: 0xb6, hi: 0xb6}, + {value: 0x8132, lo: 0xb7, hi: 0xb8}, + {value: 0x812d, lo: 0xb9, hi: 0xba}, + {value: 0x8132, lo: 0xbb, hi: 0xbf}, + // Block 0x10, offset 0x7a + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0xa8, hi: 0xa8}, + {value: 0x3ed8, lo: 0xa9, hi: 0xa9}, + {value: 0xa000, lo: 0xb0, hi: 0xb0}, + {value: 0x3ee0, lo: 0xb1, hi: 0xb1}, + {value: 0xa000, lo: 0xb3, hi: 0xb3}, + {value: 0x3ee8, lo: 0xb4, hi: 0xb4}, + {value: 0x9902, lo: 0xbc, hi: 0xbc}, + // Block 0x11, offset 0x82 + {value: 0x0008, lo: 0x06}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x8132, lo: 0x91, hi: 0x91}, + {value: 0x812d, lo: 0x92, hi: 0x92}, + {value: 0x8132, lo: 0x93, hi: 0x93}, + {value: 0x8132, lo: 0x94, hi: 0x94}, + {value: 0x451c, lo: 0x98, hi: 0x9f}, + // Block 0x12, offset 0x89 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x13, offset 0x8c + {value: 0x0008, lo: 0x06}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2c9e, lo: 0x8b, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x455c, lo: 0x9c, hi: 0x9d}, + {value: 0x456c, lo: 0x9f, hi: 0x9f}, + // Block 0x14, offset 0x93 + {value: 0x0000, lo: 0x03}, + {value: 0x4594, lo: 0xb3, hi: 0xb3}, + {value: 0x459c, lo: 0xb6, hi: 0xb6}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + // Block 0x15, offset 0x97 + {value: 0x0008, lo: 0x03}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x4574, lo: 0x99, hi: 0x9b}, + {value: 0x458c, lo: 0x9e, hi: 0x9e}, + // Block 0x16, offset 0x9b + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + // Block 0x17, offset 0x9d + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + // Block 0x18, offset 0x9f + {value: 0x0000, lo: 0x08}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2cb6, lo: 0x88, hi: 0x88}, + {value: 0x2cae, lo: 0x8b, hi: 0x8b}, + {value: 0x2cbe, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x96, hi: 0x97}, + {value: 0x45a4, lo: 0x9c, hi: 0x9c}, + {value: 0x45ac, lo: 0x9d, hi: 0x9d}, + // Block 0x19, offset 0xa8 + {value: 0x0000, lo: 0x03}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x2cc6, lo: 0x94, hi: 0x94}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x1a, offset 0xac + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cce, lo: 0x8a, hi: 0x8a}, + {value: 0x2cde, lo: 0x8b, hi: 0x8b}, + {value: 0x2cd6, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1b, offset 0xb3 + {value: 0x1801, lo: 0x04}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x3ef0, lo: 0x88, hi: 0x88}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x8120, lo: 0x95, hi: 0x96}, + // Block 0x1c, offset 0xb8 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + {value: 0xa000, lo: 0xbf, hi: 0xbf}, + // Block 0x1d, offset 0xbb + {value: 0x0000, lo: 0x09}, + {value: 0x2ce6, lo: 0x80, hi: 0x80}, + {value: 0x9900, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x2cee, lo: 0x87, hi: 0x87}, + {value: 0x2cf6, lo: 0x88, hi: 0x88}, + {value: 0x2f50, lo: 0x8a, hi: 0x8a}, + {value: 0x2dd8, lo: 0x8b, hi: 0x8b}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x95, hi: 0x96}, + // Block 0x1e, offset 0xc5 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x1f, offset 0xc8 + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cfe, lo: 0x8a, hi: 0x8a}, + {value: 0x2d0e, lo: 0x8b, hi: 0x8b}, + {value: 0x2d06, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x20, offset 0xcf + {value: 0x6bea, lo: 0x07}, + {value: 0x9904, lo: 0x8a, hi: 0x8a}, + {value: 0x9900, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x3ef8, lo: 0x9a, hi: 0x9a}, + {value: 0x2f58, lo: 0x9c, hi: 0x9c}, + {value: 0x2de3, lo: 0x9d, hi: 0x9d}, + {value: 0x2d16, lo: 0x9e, hi: 0x9f}, + // Block 0x21, offset 0xd7 + {value: 0x0000, lo: 0x02}, + {value: 0x8122, lo: 0xb8, hi: 0xb9}, + {value: 0x8104, lo: 0xba, hi: 0xba}, + // Block 0x22, offset 0xda + {value: 0x0000, lo: 0x01}, + {value: 0x8123, lo: 0x88, hi: 0x8b}, + // Block 0x23, offset 0xdc + {value: 0x0000, lo: 0x01}, + {value: 0x8124, lo: 0xb8, hi: 0xb9}, + // Block 0x24, offset 0xde + {value: 0x0000, lo: 0x01}, + {value: 0x8125, lo: 0x88, hi: 0x8b}, + // Block 0x25, offset 0xe0 + {value: 0x0000, lo: 0x04}, + {value: 0x812d, lo: 0x98, hi: 0x99}, + {value: 0x812d, lo: 0xb5, hi: 0xb5}, + {value: 0x812d, lo: 0xb7, hi: 0xb7}, + {value: 0x812b, lo: 0xb9, hi: 0xb9}, + // Block 0x26, offset 0xe5 + {value: 0x0000, lo: 0x10}, + {value: 0x2644, lo: 0x83, hi: 0x83}, + {value: 0x264b, lo: 0x8d, hi: 0x8d}, + {value: 0x2652, lo: 0x92, hi: 0x92}, + {value: 0x2659, lo: 0x97, hi: 0x97}, + {value: 0x2660, lo: 0x9c, hi: 0x9c}, + {value: 0x263d, lo: 0xa9, hi: 0xa9}, + {value: 0x8126, lo: 0xb1, hi: 0xb1}, + {value: 0x8127, lo: 0xb2, hi: 0xb2}, + {value: 0x4a84, lo: 0xb3, hi: 0xb3}, + {value: 0x8128, lo: 0xb4, hi: 0xb4}, + {value: 0x4a8d, lo: 0xb5, hi: 0xb5}, + {value: 0x45b4, lo: 0xb6, hi: 0xb6}, + {value: 0x8200, lo: 0xb7, hi: 0xb7}, + {value: 0x45bc, lo: 0xb8, hi: 0xb8}, + {value: 0x8200, lo: 0xb9, hi: 0xb9}, + {value: 0x8127, lo: 0xba, hi: 0xbd}, + // Block 0x27, offset 0xf6 + {value: 0x0000, lo: 0x0b}, + {value: 0x8127, lo: 0x80, hi: 0x80}, + {value: 0x4a96, lo: 0x81, hi: 0x81}, + {value: 0x8132, lo: 0x82, hi: 0x83}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0x86, hi: 0x87}, + {value: 0x266e, lo: 0x93, hi: 0x93}, + {value: 0x2675, lo: 0x9d, hi: 0x9d}, + {value: 0x267c, lo: 0xa2, hi: 0xa2}, + {value: 0x2683, lo: 0xa7, hi: 0xa7}, + {value: 0x268a, lo: 0xac, hi: 0xac}, + {value: 0x2667, lo: 0xb9, hi: 0xb9}, + // Block 0x28, offset 0x102 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x86, hi: 0x86}, + // Block 0x29, offset 0x104 + {value: 0x0000, lo: 0x05}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x2d1e, lo: 0xa6, hi: 0xa6}, + {value: 0x9900, lo: 0xae, hi: 0xae}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + {value: 0x8104, lo: 0xb9, hi: 0xba}, + // Block 0x2a, offset 0x10a + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x8d, hi: 0x8d}, + // Block 0x2b, offset 0x10c + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x80, hi: 0x92}, + // Block 0x2c, offset 0x10e + {value: 0x0000, lo: 0x01}, + {value: 0xb900, lo: 0xa1, hi: 0xb5}, + // Block 0x2d, offset 0x110 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0xa8, hi: 0xbf}, + // Block 0x2e, offset 0x112 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0x80, hi: 0x82}, + // Block 0x2f, offset 0x114 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x9d, hi: 0x9f}, + // Block 0x30, offset 0x116 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x94, hi: 0x94}, + {value: 0x8104, lo: 0xb4, hi: 0xb4}, + // Block 0x31, offset 0x119 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x92, hi: 0x92}, + {value: 0x8132, lo: 0x9d, hi: 0x9d}, + // Block 0x32, offset 0x11c + {value: 0x0000, lo: 0x01}, + {value: 0x8131, lo: 0xa9, hi: 0xa9}, + // Block 0x33, offset 0x11e + {value: 0x0004, lo: 0x02}, + {value: 0x812e, lo: 0xb9, hi: 0xba}, + {value: 0x812d, lo: 0xbb, hi: 0xbb}, + // Block 0x34, offset 0x121 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x97, hi: 0x97}, + {value: 0x812d, lo: 0x98, hi: 0x98}, + // Block 0x35, offset 0x124 + {value: 0x0000, lo: 0x03}, + {value: 0x8104, lo: 0xa0, hi: 0xa0}, + {value: 0x8132, lo: 0xb5, hi: 0xbc}, + {value: 0x812d, lo: 0xbf, hi: 0xbf}, + // Block 0x36, offset 0x128 + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0xb0, hi: 0xb4}, + {value: 0x812d, lo: 0xb5, hi: 0xba}, + {value: 0x8132, lo: 0xbb, hi: 0xbc}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0x37, offset 0x12d + {value: 0x0000, lo: 0x08}, + {value: 0x2d66, lo: 0x80, hi: 0x80}, + {value: 0x2d6e, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x82, hi: 0x82}, + {value: 0x2d76, lo: 0x83, hi: 0x83}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0xab, hi: 0xab}, + {value: 0x812d, lo: 0xac, hi: 0xac}, + {value: 0x8132, lo: 0xad, hi: 0xb3}, + // Block 0x38, offset 0x136 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xaa, hi: 0xab}, + // Block 0x39, offset 0x138 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xa6, hi: 0xa6}, + {value: 0x8104, lo: 0xb2, hi: 0xb3}, + // Block 0x3a, offset 0x13b + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + // Block 0x3b, offset 0x13d + {value: 0x0000, lo: 0x0a}, + {value: 0x8132, lo: 0x90, hi: 0x92}, + {value: 0x8101, lo: 0x94, hi: 0x94}, + {value: 0x812d, lo: 0x95, hi: 0x99}, + {value: 0x8132, lo: 0x9a, hi: 0x9b}, + {value: 0x812d, lo: 0x9c, hi: 0x9f}, + {value: 0x8132, lo: 0xa0, hi: 0xa0}, + {value: 0x8101, lo: 0xa2, hi: 0xa8}, + {value: 0x812d, lo: 0xad, hi: 0xad}, + {value: 0x8132, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb8, hi: 0xb9}, + // Block 0x3c, offset 0x148 + {value: 0x0004, lo: 0x03}, + {value: 0x0433, lo: 0x80, hi: 0x81}, + {value: 0x8100, lo: 0x97, hi: 0x97}, + {value: 0x8100, lo: 0xbe, hi: 0xbe}, + // Block 0x3d, offset 0x14c + {value: 0x0000, lo: 0x0d}, + {value: 0x8132, lo: 0x90, hi: 0x91}, + {value: 0x8101, lo: 0x92, hi: 0x93}, + {value: 0x8132, lo: 0x94, hi: 0x97}, + {value: 0x8101, lo: 0x98, hi: 0x9a}, + {value: 0x8132, lo: 0x9b, hi: 0x9c}, + {value: 0x8132, lo: 0xa1, hi: 0xa1}, + {value: 0x8101, lo: 0xa5, hi: 0xa6}, + {value: 0x8132, lo: 0xa7, hi: 0xa7}, + {value: 0x812d, lo: 0xa8, hi: 0xa8}, + {value: 0x8132, lo: 0xa9, hi: 0xa9}, + {value: 0x8101, lo: 0xaa, hi: 0xab}, + {value: 0x812d, lo: 0xac, hi: 0xaf}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + // Block 0x3e, offset 0x15a + {value: 0x427b, lo: 0x02}, + {value: 0x01b8, lo: 0xa6, hi: 0xa6}, + {value: 0x0057, lo: 0xaa, hi: 0xab}, + // Block 0x3f, offset 0x15d + {value: 0x0007, lo: 0x05}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + {value: 0x3bb9, lo: 0x9a, hi: 0x9b}, + {value: 0x3bc7, lo: 0xae, hi: 0xae}, + // Block 0x40, offset 0x163 + {value: 0x000e, lo: 0x05}, + {value: 0x3bce, lo: 0x8d, hi: 0x8e}, + {value: 0x3bd5, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + // Block 0x41, offset 0x169 + {value: 0x6408, lo: 0x0a}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0x3be3, lo: 0x84, hi: 0x84}, + {value: 0xa000, lo: 0x88, hi: 0x88}, + {value: 0x3bea, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0x3bf1, lo: 0x8c, hi: 0x8c}, + {value: 0xa000, lo: 0xa3, hi: 0xa3}, + {value: 0x3bf8, lo: 0xa4, hi: 0xa5}, + {value: 0x3bff, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xbc, hi: 0xbc}, + // Block 0x42, offset 0x174 + {value: 0x0007, lo: 0x03}, + {value: 0x3c68, lo: 0xa0, hi: 0xa1}, + {value: 0x3c92, lo: 0xa2, hi: 0xa3}, + {value: 0x3cbc, lo: 0xaa, hi: 0xad}, + // Block 0x43, offset 0x178 + {value: 0x0004, lo: 0x01}, + {value: 0x048b, lo: 0xa9, hi: 0xaa}, + // Block 0x44, offset 0x17a + {value: 0x0000, lo: 0x01}, + {value: 0x44dd, lo: 0x9c, hi: 0x9c}, + // Block 0x45, offset 0x17c + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xaf, hi: 0xb1}, + // Block 0x46, offset 0x17e + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x47, offset 0x180 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa0, hi: 0xbf}, + // Block 0x48, offset 0x182 + {value: 0x0000, lo: 0x05}, + {value: 0x812c, lo: 0xaa, hi: 0xaa}, + {value: 0x8131, lo: 0xab, hi: 0xab}, + {value: 0x8133, lo: 0xac, hi: 0xac}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + {value: 0x812f, lo: 0xae, hi: 0xaf}, + // Block 0x49, offset 0x188 + {value: 0x0000, lo: 0x03}, + {value: 0x4a9f, lo: 0xb3, hi: 0xb3}, + {value: 0x4a9f, lo: 0xb5, hi: 0xb6}, + {value: 0x4a9f, lo: 0xba, hi: 0xbf}, + // Block 0x4a, offset 0x18c + {value: 0x0000, lo: 0x01}, + {value: 0x4a9f, lo: 0x8f, hi: 0xa3}, + // Block 0x4b, offset 0x18e + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xae, hi: 0xbe}, + // Block 0x4c, offset 0x190 + {value: 0x0000, lo: 0x07}, + {value: 0x8100, lo: 0x84, hi: 0x84}, + {value: 0x8100, lo: 0x87, hi: 0x87}, + {value: 0x8100, lo: 0x90, hi: 0x90}, + {value: 0x8100, lo: 0x9e, hi: 0x9e}, + {value: 0x8100, lo: 0xa1, hi: 0xa1}, + {value: 0x8100, lo: 0xb2, hi: 0xb2}, + {value: 0x8100, lo: 0xbb, hi: 0xbb}, + // Block 0x4d, offset 0x198 + {value: 0x0000, lo: 0x03}, + {value: 0x8100, lo: 0x80, hi: 0x80}, + {value: 0x8100, lo: 0x8b, hi: 0x8b}, + {value: 0x8100, lo: 0x8e, hi: 0x8e}, + // Block 0x4e, offset 0x19c + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0xaf, hi: 0xaf}, + {value: 0x8132, lo: 0xb4, hi: 0xbd}, + // Block 0x4f, offset 0x19f + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x9e, hi: 0x9f}, + // Block 0x50, offset 0x1a1 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb0, hi: 0xb1}, + // Block 0x51, offset 0x1a3 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x86, hi: 0x86}, + // Block 0x52, offset 0x1a5 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0xa0, hi: 0xb1}, + // Block 0x53, offset 0x1a8 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xab, hi: 0xad}, + // Block 0x54, offset 0x1aa + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x93, hi: 0x93}, + // Block 0x55, offset 0x1ac + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb3, hi: 0xb3}, + // Block 0x56, offset 0x1ae + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x80, hi: 0x80}, + // Block 0x57, offset 0x1b0 + {value: 0x0000, lo: 0x05}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + {value: 0x8132, lo: 0xb2, hi: 0xb3}, + {value: 0x812d, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb7, hi: 0xb8}, + {value: 0x8132, lo: 0xbe, hi: 0xbf}, + // Block 0x58, offset 0x1b6 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x81, hi: 0x81}, + {value: 0x8104, lo: 0xb6, hi: 0xb6}, + // Block 0x59, offset 0x1b9 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xad, hi: 0xad}, + // Block 0x5a, offset 0x1bb + {value: 0x0000, lo: 0x06}, + {value: 0xe500, lo: 0x80, hi: 0x80}, + {value: 0xc600, lo: 0x81, hi: 0x9b}, + {value: 0xe500, lo: 0x9c, hi: 0x9c}, + {value: 0xc600, lo: 0x9d, hi: 0xb7}, + {value: 0xe500, lo: 0xb8, hi: 0xb8}, + {value: 0xc600, lo: 0xb9, hi: 0xbf}, + // Block 0x5b, offset 0x1c2 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x93}, + {value: 0xe500, lo: 0x94, hi: 0x94}, + {value: 0xc600, lo: 0x95, hi: 0xaf}, + {value: 0xe500, lo: 0xb0, hi: 0xb0}, + {value: 0xc600, lo: 0xb1, hi: 0xbf}, + // Block 0x5c, offset 0x1c8 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8b}, + {value: 0xe500, lo: 0x8c, hi: 0x8c}, + {value: 0xc600, lo: 0x8d, hi: 0xa7}, + {value: 0xe500, lo: 0xa8, hi: 0xa8}, + {value: 0xc600, lo: 0xa9, hi: 0xbf}, + // Block 0x5d, offset 0x1ce + {value: 0x0000, lo: 0x07}, + {value: 0xc600, lo: 0x80, hi: 0x83}, + {value: 0xe500, lo: 0x84, hi: 0x84}, + {value: 0xc600, lo: 0x85, hi: 0x9f}, + {value: 0xe500, lo: 0xa0, hi: 0xa0}, + {value: 0xc600, lo: 0xa1, hi: 0xbb}, + {value: 0xe500, lo: 0xbc, hi: 0xbc}, + {value: 0xc600, lo: 0xbd, hi: 0xbf}, + // Block 0x5e, offset 0x1d6 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x97}, + {value: 0xe500, lo: 0x98, hi: 0x98}, + {value: 0xc600, lo: 0x99, hi: 0xb3}, + {value: 0xe500, lo: 0xb4, hi: 0xb4}, + {value: 0xc600, lo: 0xb5, hi: 0xbf}, + // Block 0x5f, offset 0x1dc + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8f}, + {value: 0xe500, lo: 0x90, hi: 0x90}, + {value: 0xc600, lo: 0x91, hi: 0xab}, + {value: 0xe500, lo: 0xac, hi: 0xac}, + {value: 0xc600, lo: 0xad, hi: 0xbf}, + // Block 0x60, offset 0x1e2 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + {value: 0xe500, lo: 0xa4, hi: 0xa4}, + {value: 0xc600, lo: 0xa5, hi: 0xbf}, + // Block 0x61, offset 0x1e8 + {value: 0x0000, lo: 0x03}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + // Block 0x62, offset 0x1ec + {value: 0x0006, lo: 0x0d}, + {value: 0x4390, lo: 0x9d, hi: 0x9d}, + {value: 0x8115, lo: 0x9e, hi: 0x9e}, + {value: 0x4402, lo: 0x9f, hi: 0x9f}, + {value: 0x43f0, lo: 0xaa, hi: 0xab}, + {value: 0x44f4, lo: 0xac, hi: 0xac}, + {value: 0x44fc, lo: 0xad, hi: 0xad}, + {value: 0x4348, lo: 0xae, hi: 0xb1}, + {value: 0x4366, lo: 0xb2, hi: 0xb4}, + {value: 0x437e, lo: 0xb5, hi: 0xb6}, + {value: 0x438a, lo: 0xb8, hi: 0xb8}, + {value: 0x4396, lo: 0xb9, hi: 0xbb}, + {value: 0x43ae, lo: 0xbc, hi: 0xbc}, + {value: 0x43b4, lo: 0xbe, hi: 0xbe}, + // Block 0x63, offset 0x1fa + {value: 0x0006, lo: 0x08}, + {value: 0x43ba, lo: 0x80, hi: 0x81}, + {value: 0x43c6, lo: 0x83, hi: 0x84}, + {value: 0x43d8, lo: 0x86, hi: 0x89}, + {value: 0x43fc, lo: 0x8a, hi: 0x8a}, + {value: 0x4378, lo: 0x8b, hi: 0x8b}, + {value: 0x4360, lo: 0x8c, hi: 0x8c}, + {value: 0x43a8, lo: 0x8d, hi: 0x8d}, + {value: 0x43d2, lo: 0x8e, hi: 0x8e}, + // Block 0x64, offset 0x203 + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0xa4, hi: 0xa5}, + {value: 0x8100, lo: 0xb0, hi: 0xb1}, + // Block 0x65, offset 0x206 + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0x9b, hi: 0x9d}, + {value: 0x8200, lo: 0x9e, hi: 0xa3}, + // Block 0x66, offset 0x209 + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x90, hi: 0x90}, + // Block 0x67, offset 0x20b + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0x99, hi: 0x99}, + {value: 0x8200, lo: 0xb2, hi: 0xb4}, + // Block 0x68, offset 0x20e + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xbc, hi: 0xbd}, + // Block 0x69, offset 0x210 + {value: 0x0000, lo: 0x03}, + {value: 0x8132, lo: 0xa0, hi: 0xa6}, + {value: 0x812d, lo: 0xa7, hi: 0xad}, + {value: 0x8132, lo: 0xae, hi: 0xaf}, + // Block 0x6a, offset 0x214 + {value: 0x0000, lo: 0x04}, + {value: 0x8100, lo: 0x89, hi: 0x8c}, + {value: 0x8100, lo: 0xb0, hi: 0xb2}, + {value: 0x8100, lo: 0xb4, hi: 0xb4}, + {value: 0x8100, lo: 0xb6, hi: 0xbf}, + // Block 0x6b, offset 0x219 + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x81, hi: 0x8c}, + // Block 0x6c, offset 0x21b + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xb5, hi: 0xba}, + // Block 0x6d, offset 0x21d + {value: 0x0000, lo: 0x04}, + {value: 0x4a9f, lo: 0x9e, hi: 0x9f}, + {value: 0x4a9f, lo: 0xa3, hi: 0xa3}, + {value: 0x4a9f, lo: 0xa5, hi: 0xa6}, + {value: 0x4a9f, lo: 0xaa, hi: 0xaf}, + // Block 0x6e, offset 0x222 + {value: 0x0000, lo: 0x05}, + {value: 0x4a9f, lo: 0x82, hi: 0x87}, + {value: 0x4a9f, lo: 0x8a, hi: 0x8f}, + {value: 0x4a9f, lo: 0x92, hi: 0x97}, + {value: 0x4a9f, lo: 0x9a, hi: 0x9c}, + {value: 0x8100, lo: 0xa3, hi: 0xa3}, + // Block 0x6f, offset 0x228 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0x70, offset 0x22a + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xa0, hi: 0xa0}, + // Block 0x71, offset 0x22c + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb6, hi: 0xba}, + // Block 0x72, offset 0x22e + {value: 0x002c, lo: 0x05}, + {value: 0x812d, lo: 0x8d, hi: 0x8d}, + {value: 0x8132, lo: 0x8f, hi: 0x8f}, + {value: 0x8132, lo: 0xb8, hi: 0xb8}, + {value: 0x8101, lo: 0xb9, hi: 0xba}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x73, offset 0x234 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0xa5, hi: 0xa5}, + {value: 0x812d, lo: 0xa6, hi: 0xa6}, + // Block 0x74, offset 0x237 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x86, hi: 0x86}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x75, offset 0x23a + {value: 0x17fe, lo: 0x07}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x4238, lo: 0x9a, hi: 0x9a}, + {value: 0xa000, lo: 0x9b, hi: 0x9b}, + {value: 0x4242, lo: 0x9c, hi: 0x9c}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x424c, lo: 0xab, hi: 0xab}, + {value: 0x8104, lo: 0xb9, hi: 0xba}, + // Block 0x76, offset 0x242 + {value: 0x0000, lo: 0x06}, + {value: 0x8132, lo: 0x80, hi: 0x82}, + {value: 0x9900, lo: 0xa7, hi: 0xa7}, + {value: 0x2d7e, lo: 0xae, hi: 0xae}, + {value: 0x2d88, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb1, hi: 0xb2}, + {value: 0x8104, lo: 0xb3, hi: 0xb4}, + // Block 0x77, offset 0x249 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x80, hi: 0x80}, + {value: 0x8102, lo: 0x8a, hi: 0x8a}, + // Block 0x78, offset 0x24c + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb5, hi: 0xb5}, + {value: 0x8102, lo: 0xb6, hi: 0xb6}, + // Block 0x79, offset 0x24f + {value: 0x0002, lo: 0x01}, + {value: 0x8102, lo: 0xa9, hi: 0xaa}, + // Block 0x7a, offset 0x251 + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2d92, lo: 0x8b, hi: 0x8b}, + {value: 0x2d9c, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x8132, lo: 0xa6, hi: 0xac}, + {value: 0x8132, lo: 0xb0, hi: 0xb4}, + // Block 0x7b, offset 0x259 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x82, hi: 0x82}, + {value: 0x8102, lo: 0x86, hi: 0x86}, + // Block 0x7c, offset 0x25c + {value: 0x6b5a, lo: 0x06}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb9, hi: 0xb9}, + {value: 0x9900, lo: 0xba, hi: 0xba}, + {value: 0x2db0, lo: 0xbb, hi: 0xbb}, + {value: 0x2da6, lo: 0xbc, hi: 0xbd}, + {value: 0x2dba, lo: 0xbe, hi: 0xbe}, + // Block 0x7d, offset 0x263 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x82, hi: 0x82}, + {value: 0x8102, lo: 0x83, hi: 0x83}, + // Block 0x7e, offset 0x266 + {value: 0x0000, lo: 0x05}, + {value: 0x9900, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb8, hi: 0xb9}, + {value: 0x2dc4, lo: 0xba, hi: 0xba}, + {value: 0x2dce, lo: 0xbb, hi: 0xbb}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x7f, offset 0x26c + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0x80, hi: 0x80}, + // Block 0x80, offset 0x26e + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb6, hi: 0xb6}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + // Block 0x81, offset 0x271 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xab, hi: 0xab}, + // Block 0x82, offset 0x273 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xb4, hi: 0xb4}, + // Block 0x83, offset 0x275 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x87, hi: 0x87}, + // Block 0x84, offset 0x277 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x99, hi: 0x99}, + // Block 0x85, offset 0x279 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0x82, hi: 0x82}, + {value: 0x8104, lo: 0x84, hi: 0x85}, + // Block 0x86, offset 0x27c + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0xb0, hi: 0xb4}, + // Block 0x87, offset 0x27e + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb0, hi: 0xb6}, + // Block 0x88, offset 0x280 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0x9e, hi: 0x9e}, + // Block 0x89, offset 0x282 + {value: 0x0000, lo: 0x0c}, + {value: 0x45cc, lo: 0x9e, hi: 0x9e}, + {value: 0x45d6, lo: 0x9f, hi: 0x9f}, + {value: 0x460a, lo: 0xa0, hi: 0xa0}, + {value: 0x4618, lo: 0xa1, hi: 0xa1}, + {value: 0x4626, lo: 0xa2, hi: 0xa2}, + {value: 0x4634, lo: 0xa3, hi: 0xa3}, + {value: 0x4642, lo: 0xa4, hi: 0xa4}, + {value: 0x812b, lo: 0xa5, hi: 0xa6}, + {value: 0x8101, lo: 0xa7, hi: 0xa9}, + {value: 0x8130, lo: 0xad, hi: 0xad}, + {value: 0x812b, lo: 0xae, hi: 0xb2}, + {value: 0x812d, lo: 0xbb, hi: 0xbf}, + // Block 0x8a, offset 0x28f + {value: 0x0000, lo: 0x09}, + {value: 0x812d, lo: 0x80, hi: 0x82}, + {value: 0x8132, lo: 0x85, hi: 0x89}, + {value: 0x812d, lo: 0x8a, hi: 0x8b}, + {value: 0x8132, lo: 0xaa, hi: 0xad}, + {value: 0x45e0, lo: 0xbb, hi: 0xbb}, + {value: 0x45ea, lo: 0xbc, hi: 0xbc}, + {value: 0x4650, lo: 0xbd, hi: 0xbd}, + {value: 0x466c, lo: 0xbe, hi: 0xbe}, + {value: 0x465e, lo: 0xbf, hi: 0xbf}, + // Block 0x8b, offset 0x299 + {value: 0x0000, lo: 0x01}, + {value: 0x467a, lo: 0x80, hi: 0x80}, + // Block 0x8c, offset 0x29b + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x82, hi: 0x84}, + // Block 0x8d, offset 0x29d + {value: 0x0000, lo: 0x05}, + {value: 0x8132, lo: 0x80, hi: 0x86}, + {value: 0x8132, lo: 0x88, hi: 0x98}, + {value: 0x8132, lo: 0x9b, hi: 0xa1}, + {value: 0x8132, lo: 0xa3, hi: 0xa4}, + {value: 0x8132, lo: 0xa6, hi: 0xaa}, + // Block 0x8e, offset 0x2a3 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x90, hi: 0x96}, + // Block 0x8f, offset 0x2a5 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x84, hi: 0x89}, + {value: 0x8102, lo: 0x8a, hi: 0x8a}, + // Block 0x90, offset 0x2a8 + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x93, hi: 0x93}, +} + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfkcTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfkcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfkcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfkcTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfkcValues[c0] + } + i := nfkcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfkcTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfkcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfkcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfkcTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfkcValues[c0] + } + i := nfkcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// nfkcTrie. Total size: 17104 bytes (16.70 KiB). Checksum: d985061cf5307b35. +type nfkcTrie struct{} + +func newNfkcTrie(i int) *nfkcTrie { + return &nfkcTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *nfkcTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 91: + return uint16(nfkcValues[n<<6+uint32(b)]) + default: + n -= 91 + return uint16(nfkcSparse.lookup(n, b)) + } +} + +// nfkcValues: 93 blocks, 5952 entries, 11904 bytes +// The third block is the zero block. +var nfkcValues = [5952]uint16{ + // Block 0x0, offset 0x0 + 0x3c: 0xa000, 0x3d: 0xa000, 0x3e: 0xa000, + // Block 0x1, offset 0x40 + 0x41: 0xa000, 0x42: 0xa000, 0x43: 0xa000, 0x44: 0xa000, 0x45: 0xa000, + 0x46: 0xa000, 0x47: 0xa000, 0x48: 0xa000, 0x49: 0xa000, 0x4a: 0xa000, 0x4b: 0xa000, + 0x4c: 0xa000, 0x4d: 0xa000, 0x4e: 0xa000, 0x4f: 0xa000, 0x50: 0xa000, + 0x52: 0xa000, 0x53: 0xa000, 0x54: 0xa000, 0x55: 0xa000, 0x56: 0xa000, 0x57: 0xa000, + 0x58: 0xa000, 0x59: 0xa000, 0x5a: 0xa000, + 0x61: 0xa000, 0x62: 0xa000, 0x63: 0xa000, + 0x64: 0xa000, 0x65: 0xa000, 0x66: 0xa000, 0x67: 0xa000, 0x68: 0xa000, 0x69: 0xa000, + 0x6a: 0xa000, 0x6b: 0xa000, 0x6c: 0xa000, 0x6d: 0xa000, 0x6e: 0xa000, 0x6f: 0xa000, + 0x70: 0xa000, 0x72: 0xa000, 0x73: 0xa000, 0x74: 0xa000, 0x75: 0xa000, + 0x76: 0xa000, 0x77: 0xa000, 0x78: 0xa000, 0x79: 0xa000, 0x7a: 0xa000, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x2f6f, 0xc1: 0x2f74, 0xc2: 0x4688, 0xc3: 0x2f79, 0xc4: 0x4697, 0xc5: 0x469c, + 0xc6: 0xa000, 0xc7: 0x46a6, 0xc8: 0x2fe2, 0xc9: 0x2fe7, 0xca: 0x46ab, 0xcb: 0x2ffb, + 0xcc: 0x306e, 0xcd: 0x3073, 0xce: 0x3078, 0xcf: 0x46bf, 0xd1: 0x3104, + 0xd2: 0x3127, 0xd3: 0x312c, 0xd4: 0x46c9, 0xd5: 0x46ce, 0xd6: 0x46dd, + 0xd8: 0xa000, 0xd9: 0x31b3, 0xda: 0x31b8, 0xdb: 0x31bd, 0xdc: 0x470f, 0xdd: 0x3235, + 0xe0: 0x327b, 0xe1: 0x3280, 0xe2: 0x4719, 0xe3: 0x3285, + 0xe4: 0x4728, 0xe5: 0x472d, 0xe6: 0xa000, 0xe7: 0x4737, 0xe8: 0x32ee, 0xe9: 0x32f3, + 0xea: 0x473c, 0xeb: 0x3307, 0xec: 0x337f, 0xed: 0x3384, 0xee: 0x3389, 0xef: 0x4750, + 0xf1: 0x3415, 0xf2: 0x3438, 0xf3: 0x343d, 0xf4: 0x475a, 0xf5: 0x475f, + 0xf6: 0x476e, 0xf8: 0xa000, 0xf9: 0x34c9, 0xfa: 0x34ce, 0xfb: 0x34d3, + 0xfc: 0x47a0, 0xfd: 0x3550, 0xff: 0x3569, + // Block 0x4, offset 0x100 + 0x100: 0x2f7e, 0x101: 0x328a, 0x102: 0x468d, 0x103: 0x471e, 0x104: 0x2f9c, 0x105: 0x32a8, + 0x106: 0x2fb0, 0x107: 0x32bc, 0x108: 0x2fb5, 0x109: 0x32c1, 0x10a: 0x2fba, 0x10b: 0x32c6, + 0x10c: 0x2fbf, 0x10d: 0x32cb, 0x10e: 0x2fc9, 0x10f: 0x32d5, + 0x112: 0x46b0, 0x113: 0x4741, 0x114: 0x2ff1, 0x115: 0x32fd, 0x116: 0x2ff6, 0x117: 0x3302, + 0x118: 0x3014, 0x119: 0x3320, 0x11a: 0x3005, 0x11b: 0x3311, 0x11c: 0x302d, 0x11d: 0x3339, + 0x11e: 0x3037, 0x11f: 0x3343, 0x120: 0x303c, 0x121: 0x3348, 0x122: 0x3046, 0x123: 0x3352, + 0x124: 0x304b, 0x125: 0x3357, 0x128: 0x307d, 0x129: 0x338e, + 0x12a: 0x3082, 0x12b: 0x3393, 0x12c: 0x3087, 0x12d: 0x3398, 0x12e: 0x30aa, 0x12f: 0x33b6, + 0x130: 0x308c, 0x132: 0x195d, 0x133: 0x19e7, 0x134: 0x30b4, 0x135: 0x33c0, + 0x136: 0x30c8, 0x137: 0x33d9, 0x139: 0x30d2, 0x13a: 0x33e3, 0x13b: 0x30dc, + 0x13c: 0x33ed, 0x13d: 0x30d7, 0x13e: 0x33e8, 0x13f: 0x1bac, + // Block 0x5, offset 0x140 + 0x140: 0x1c34, 0x143: 0x30ff, 0x144: 0x3410, 0x145: 0x3118, + 0x146: 0x3429, 0x147: 0x310e, 0x148: 0x341f, 0x149: 0x1c5c, + 0x14c: 0x46d3, 0x14d: 0x4764, 0x14e: 0x3131, 0x14f: 0x3442, 0x150: 0x313b, 0x151: 0x344c, + 0x154: 0x3159, 0x155: 0x346a, 0x156: 0x3172, 0x157: 0x3483, + 0x158: 0x3163, 0x159: 0x3474, 0x15a: 0x46f6, 0x15b: 0x4787, 0x15c: 0x317c, 0x15d: 0x348d, + 0x15e: 0x318b, 0x15f: 0x349c, 0x160: 0x46fb, 0x161: 0x478c, 0x162: 0x31a4, 0x163: 0x34ba, + 0x164: 0x3195, 0x165: 0x34ab, 0x168: 0x4705, 0x169: 0x4796, + 0x16a: 0x470a, 0x16b: 0x479b, 0x16c: 0x31c2, 0x16d: 0x34d8, 0x16e: 0x31cc, 0x16f: 0x34e2, + 0x170: 0x31d1, 0x171: 0x34e7, 0x172: 0x31ef, 0x173: 0x3505, 0x174: 0x3212, 0x175: 0x3528, + 0x176: 0x323a, 0x177: 0x3555, 0x178: 0x324e, 0x179: 0x325d, 0x17a: 0x357d, 0x17b: 0x3267, + 0x17c: 0x3587, 0x17d: 0x326c, 0x17e: 0x358c, 0x17f: 0x00a7, + // Block 0x6, offset 0x180 + 0x184: 0x2dee, 0x185: 0x2df4, + 0x186: 0x2dfa, 0x187: 0x1972, 0x188: 0x1975, 0x189: 0x1a08, 0x18a: 0x1987, 0x18b: 0x198a, + 0x18c: 0x1a3e, 0x18d: 0x2f88, 0x18e: 0x3294, 0x18f: 0x3096, 0x190: 0x33a2, 0x191: 0x3140, + 0x192: 0x3451, 0x193: 0x31d6, 0x194: 0x34ec, 0x195: 0x39cf, 0x196: 0x3b5e, 0x197: 0x39c8, + 0x198: 0x3b57, 0x199: 0x39d6, 0x19a: 0x3b65, 0x19b: 0x39c1, 0x19c: 0x3b50, + 0x19e: 0x38b0, 0x19f: 0x3a3f, 0x1a0: 0x38a9, 0x1a1: 0x3a38, 0x1a2: 0x35b3, 0x1a3: 0x35c5, + 0x1a6: 0x3041, 0x1a7: 0x334d, 0x1a8: 0x30be, 0x1a9: 0x33cf, + 0x1aa: 0x46ec, 0x1ab: 0x477d, 0x1ac: 0x3990, 0x1ad: 0x3b1f, 0x1ae: 0x35d7, 0x1af: 0x35dd, + 0x1b0: 0x33c5, 0x1b1: 0x1942, 0x1b2: 0x1945, 0x1b3: 0x19cf, 0x1b4: 0x3028, 0x1b5: 0x3334, + 0x1b8: 0x30fa, 0x1b9: 0x340b, 0x1ba: 0x38b7, 0x1bb: 0x3a46, + 0x1bc: 0x35ad, 0x1bd: 0x35bf, 0x1be: 0x35b9, 0x1bf: 0x35cb, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x2f8d, 0x1c1: 0x3299, 0x1c2: 0x2f92, 0x1c3: 0x329e, 0x1c4: 0x300a, 0x1c5: 0x3316, + 0x1c6: 0x300f, 0x1c7: 0x331b, 0x1c8: 0x309b, 0x1c9: 0x33a7, 0x1ca: 0x30a0, 0x1cb: 0x33ac, + 0x1cc: 0x3145, 0x1cd: 0x3456, 0x1ce: 0x314a, 0x1cf: 0x345b, 0x1d0: 0x3168, 0x1d1: 0x3479, + 0x1d2: 0x316d, 0x1d3: 0x347e, 0x1d4: 0x31db, 0x1d5: 0x34f1, 0x1d6: 0x31e0, 0x1d7: 0x34f6, + 0x1d8: 0x3186, 0x1d9: 0x3497, 0x1da: 0x319f, 0x1db: 0x34b5, + 0x1de: 0x305a, 0x1df: 0x3366, + 0x1e6: 0x4692, 0x1e7: 0x4723, 0x1e8: 0x46ba, 0x1e9: 0x474b, + 0x1ea: 0x395f, 0x1eb: 0x3aee, 0x1ec: 0x393c, 0x1ed: 0x3acb, 0x1ee: 0x46d8, 0x1ef: 0x4769, + 0x1f0: 0x3958, 0x1f1: 0x3ae7, 0x1f2: 0x3244, 0x1f3: 0x355f, + // Block 0x8, offset 0x200 + 0x200: 0x9932, 0x201: 0x9932, 0x202: 0x9932, 0x203: 0x9932, 0x204: 0x9932, 0x205: 0x8132, + 0x206: 0x9932, 0x207: 0x9932, 0x208: 0x9932, 0x209: 0x9932, 0x20a: 0x9932, 0x20b: 0x9932, + 0x20c: 0x9932, 0x20d: 0x8132, 0x20e: 0x8132, 0x20f: 0x9932, 0x210: 0x8132, 0x211: 0x9932, + 0x212: 0x8132, 0x213: 0x9932, 0x214: 0x9932, 0x215: 0x8133, 0x216: 0x812d, 0x217: 0x812d, + 0x218: 0x812d, 0x219: 0x812d, 0x21a: 0x8133, 0x21b: 0x992b, 0x21c: 0x812d, 0x21d: 0x812d, + 0x21e: 0x812d, 0x21f: 0x812d, 0x220: 0x812d, 0x221: 0x8129, 0x222: 0x8129, 0x223: 0x992d, + 0x224: 0x992d, 0x225: 0x992d, 0x226: 0x992d, 0x227: 0x9929, 0x228: 0x9929, 0x229: 0x812d, + 0x22a: 0x812d, 0x22b: 0x812d, 0x22c: 0x812d, 0x22d: 0x992d, 0x22e: 0x992d, 0x22f: 0x812d, + 0x230: 0x992d, 0x231: 0x992d, 0x232: 0x812d, 0x233: 0x812d, 0x234: 0x8101, 0x235: 0x8101, + 0x236: 0x8101, 0x237: 0x8101, 0x238: 0x9901, 0x239: 0x812d, 0x23a: 0x812d, 0x23b: 0x812d, + 0x23c: 0x812d, 0x23d: 0x8132, 0x23e: 0x8132, 0x23f: 0x8132, + // Block 0x9, offset 0x240 + 0x240: 0x49ae, 0x241: 0x49b3, 0x242: 0x9932, 0x243: 0x49b8, 0x244: 0x4a71, 0x245: 0x9936, + 0x246: 0x8132, 0x247: 0x812d, 0x248: 0x812d, 0x249: 0x812d, 0x24a: 0x8132, 0x24b: 0x8132, + 0x24c: 0x8132, 0x24d: 0x812d, 0x24e: 0x812d, 0x250: 0x8132, 0x251: 0x8132, + 0x252: 0x8132, 0x253: 0x812d, 0x254: 0x812d, 0x255: 0x812d, 0x256: 0x812d, 0x257: 0x8132, + 0x258: 0x8133, 0x259: 0x812d, 0x25a: 0x812d, 0x25b: 0x8132, 0x25c: 0x8134, 0x25d: 0x8135, + 0x25e: 0x8135, 0x25f: 0x8134, 0x260: 0x8135, 0x261: 0x8135, 0x262: 0x8134, 0x263: 0x8132, + 0x264: 0x8132, 0x265: 0x8132, 0x266: 0x8132, 0x267: 0x8132, 0x268: 0x8132, 0x269: 0x8132, + 0x26a: 0x8132, 0x26b: 0x8132, 0x26c: 0x8132, 0x26d: 0x8132, 0x26e: 0x8132, 0x26f: 0x8132, + 0x274: 0x0170, + 0x27a: 0x42a5, + 0x27e: 0x0037, + // Block 0xa, offset 0x280 + 0x284: 0x425a, 0x285: 0x447b, + 0x286: 0x35e9, 0x287: 0x00ce, 0x288: 0x3607, 0x289: 0x3613, 0x28a: 0x3625, + 0x28c: 0x3643, 0x28e: 0x3655, 0x28f: 0x3673, 0x290: 0x3e08, 0x291: 0xa000, + 0x295: 0xa000, 0x297: 0xa000, + 0x299: 0xa000, + 0x29f: 0xa000, 0x2a1: 0xa000, + 0x2a5: 0xa000, 0x2a9: 0xa000, + 0x2aa: 0x3637, 0x2ab: 0x3667, 0x2ac: 0x47fe, 0x2ad: 0x3697, 0x2ae: 0x4828, 0x2af: 0x36a9, + 0x2b0: 0x3e70, 0x2b1: 0xa000, 0x2b5: 0xa000, + 0x2b7: 0xa000, 0x2b9: 0xa000, + 0x2bf: 0xa000, + // Block 0xb, offset 0x2c0 + 0x2c1: 0xa000, 0x2c5: 0xa000, + 0x2c9: 0xa000, 0x2ca: 0x4840, 0x2cb: 0x485e, + 0x2cc: 0x36c7, 0x2cd: 0x36df, 0x2ce: 0x4876, 0x2d0: 0x01be, 0x2d1: 0x01d0, + 0x2d2: 0x01ac, 0x2d3: 0x430c, 0x2d4: 0x4312, 0x2d5: 0x01fa, 0x2d6: 0x01e8, + 0x2f0: 0x01d6, 0x2f1: 0x01eb, 0x2f2: 0x01ee, 0x2f4: 0x0188, 0x2f5: 0x01c7, + 0x2f9: 0x01a6, + // Block 0xc, offset 0x300 + 0x300: 0x3721, 0x301: 0x372d, 0x303: 0x371b, + 0x306: 0xa000, 0x307: 0x3709, + 0x30c: 0x375d, 0x30d: 0x3745, 0x30e: 0x376f, 0x310: 0xa000, + 0x313: 0xa000, 0x315: 0xa000, 0x316: 0xa000, 0x317: 0xa000, + 0x318: 0xa000, 0x319: 0x3751, 0x31a: 0xa000, + 0x31e: 0xa000, 0x323: 0xa000, + 0x327: 0xa000, + 0x32b: 0xa000, 0x32d: 0xa000, + 0x330: 0xa000, 0x333: 0xa000, 0x335: 0xa000, + 0x336: 0xa000, 0x337: 0xa000, 0x338: 0xa000, 0x339: 0x37d5, 0x33a: 0xa000, + 0x33e: 0xa000, + // Block 0xd, offset 0x340 + 0x341: 0x3733, 0x342: 0x37b7, + 0x350: 0x370f, 0x351: 0x3793, + 0x352: 0x3715, 0x353: 0x3799, 0x356: 0x3727, 0x357: 0x37ab, + 0x358: 0xa000, 0x359: 0xa000, 0x35a: 0x3829, 0x35b: 0x382f, 0x35c: 0x3739, 0x35d: 0x37bd, + 0x35e: 0x373f, 0x35f: 0x37c3, 0x362: 0x374b, 0x363: 0x37cf, + 0x364: 0x3757, 0x365: 0x37db, 0x366: 0x3763, 0x367: 0x37e7, 0x368: 0xa000, 0x369: 0xa000, + 0x36a: 0x3835, 0x36b: 0x383b, 0x36c: 0x378d, 0x36d: 0x3811, 0x36e: 0x3769, 0x36f: 0x37ed, + 0x370: 0x3775, 0x371: 0x37f9, 0x372: 0x377b, 0x373: 0x37ff, 0x374: 0x3781, 0x375: 0x3805, + 0x378: 0x3787, 0x379: 0x380b, + // Block 0xe, offset 0x380 + 0x387: 0x1d61, + 0x391: 0x812d, + 0x392: 0x8132, 0x393: 0x8132, 0x394: 0x8132, 0x395: 0x8132, 0x396: 0x812d, 0x397: 0x8132, + 0x398: 0x8132, 0x399: 0x8132, 0x39a: 0x812e, 0x39b: 0x812d, 0x39c: 0x8132, 0x39d: 0x8132, + 0x39e: 0x8132, 0x39f: 0x8132, 0x3a0: 0x8132, 0x3a1: 0x8132, 0x3a2: 0x812d, 0x3a3: 0x812d, + 0x3a4: 0x812d, 0x3a5: 0x812d, 0x3a6: 0x812d, 0x3a7: 0x812d, 0x3a8: 0x8132, 0x3a9: 0x8132, + 0x3aa: 0x812d, 0x3ab: 0x8132, 0x3ac: 0x8132, 0x3ad: 0x812e, 0x3ae: 0x8131, 0x3af: 0x8132, + 0x3b0: 0x8105, 0x3b1: 0x8106, 0x3b2: 0x8107, 0x3b3: 0x8108, 0x3b4: 0x8109, 0x3b5: 0x810a, + 0x3b6: 0x810b, 0x3b7: 0x810c, 0x3b8: 0x810d, 0x3b9: 0x810e, 0x3ba: 0x810e, 0x3bb: 0x810f, + 0x3bc: 0x8110, 0x3bd: 0x8111, 0x3bf: 0x8112, + // Block 0xf, offset 0x3c0 + 0x3c8: 0xa000, 0x3ca: 0xa000, 0x3cb: 0x8116, + 0x3cc: 0x8117, 0x3cd: 0x8118, 0x3ce: 0x8119, 0x3cf: 0x811a, 0x3d0: 0x811b, 0x3d1: 0x811c, + 0x3d2: 0x811d, 0x3d3: 0x9932, 0x3d4: 0x9932, 0x3d5: 0x992d, 0x3d6: 0x812d, 0x3d7: 0x8132, + 0x3d8: 0x8132, 0x3d9: 0x8132, 0x3da: 0x8132, 0x3db: 0x8132, 0x3dc: 0x812d, 0x3dd: 0x8132, + 0x3de: 0x8132, 0x3df: 0x812d, + 0x3f0: 0x811e, 0x3f5: 0x1d84, + 0x3f6: 0x2013, 0x3f7: 0x204f, 0x3f8: 0x204a, + // Block 0x10, offset 0x400 + 0x405: 0xa000, + 0x406: 0x2d26, 0x407: 0xa000, 0x408: 0x2d2e, 0x409: 0xa000, 0x40a: 0x2d36, 0x40b: 0xa000, + 0x40c: 0x2d3e, 0x40d: 0xa000, 0x40e: 0x2d46, 0x411: 0xa000, + 0x412: 0x2d4e, + 0x434: 0x8102, 0x435: 0x9900, + 0x43a: 0xa000, 0x43b: 0x2d56, + 0x43c: 0xa000, 0x43d: 0x2d5e, 0x43e: 0xa000, 0x43f: 0xa000, + // Block 0x11, offset 0x440 + 0x440: 0x0069, 0x441: 0x006b, 0x442: 0x006f, 0x443: 0x0083, 0x444: 0x00f5, 0x445: 0x00f8, + 0x446: 0x0413, 0x447: 0x0085, 0x448: 0x0089, 0x449: 0x008b, 0x44a: 0x0104, 0x44b: 0x0107, + 0x44c: 0x010a, 0x44d: 0x008f, 0x44f: 0x0097, 0x450: 0x009b, 0x451: 0x00e0, + 0x452: 0x009f, 0x453: 0x00fe, 0x454: 0x0417, 0x455: 0x041b, 0x456: 0x00a1, 0x457: 0x00a9, + 0x458: 0x00ab, 0x459: 0x0423, 0x45a: 0x012b, 0x45b: 0x00ad, 0x45c: 0x0427, 0x45d: 0x01be, + 0x45e: 0x01c1, 0x45f: 0x01c4, 0x460: 0x01fa, 0x461: 0x01fd, 0x462: 0x0093, 0x463: 0x00a5, + 0x464: 0x00ab, 0x465: 0x00ad, 0x466: 0x01be, 0x467: 0x01c1, 0x468: 0x01eb, 0x469: 0x01fa, + 0x46a: 0x01fd, + 0x478: 0x020c, + // Block 0x12, offset 0x480 + 0x49b: 0x00fb, 0x49c: 0x0087, 0x49d: 0x0101, + 0x49e: 0x00d4, 0x49f: 0x010a, 0x4a0: 0x008d, 0x4a1: 0x010d, 0x4a2: 0x0110, 0x4a3: 0x0116, + 0x4a4: 0x011c, 0x4a5: 0x011f, 0x4a6: 0x0122, 0x4a7: 0x042b, 0x4a8: 0x016a, 0x4a9: 0x0128, + 0x4aa: 0x042f, 0x4ab: 0x016d, 0x4ac: 0x0131, 0x4ad: 0x012e, 0x4ae: 0x0134, 0x4af: 0x0137, + 0x4b0: 0x013a, 0x4b1: 0x013d, 0x4b2: 0x0140, 0x4b3: 0x014c, 0x4b4: 0x014f, 0x4b5: 0x00ec, + 0x4b6: 0x0152, 0x4b7: 0x0155, 0x4b8: 0x041f, 0x4b9: 0x0158, 0x4ba: 0x015b, 0x4bb: 0x00b5, + 0x4bc: 0x015e, 0x4bd: 0x0161, 0x4be: 0x0164, 0x4bf: 0x01d0, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x8132, 0x4c1: 0x8132, 0x4c2: 0x812d, 0x4c3: 0x8132, 0x4c4: 0x8132, 0x4c5: 0x8132, + 0x4c6: 0x8132, 0x4c7: 0x8132, 0x4c8: 0x8132, 0x4c9: 0x8132, 0x4ca: 0x812d, 0x4cb: 0x8132, + 0x4cc: 0x8132, 0x4cd: 0x8135, 0x4ce: 0x812a, 0x4cf: 0x812d, 0x4d0: 0x8129, 0x4d1: 0x8132, + 0x4d2: 0x8132, 0x4d3: 0x8132, 0x4d4: 0x8132, 0x4d5: 0x8132, 0x4d6: 0x8132, 0x4d7: 0x8132, + 0x4d8: 0x8132, 0x4d9: 0x8132, 0x4da: 0x8132, 0x4db: 0x8132, 0x4dc: 0x8132, 0x4dd: 0x8132, + 0x4de: 0x8132, 0x4df: 0x8132, 0x4e0: 0x8132, 0x4e1: 0x8132, 0x4e2: 0x8132, 0x4e3: 0x8132, + 0x4e4: 0x8132, 0x4e5: 0x8132, 0x4e6: 0x8132, 0x4e7: 0x8132, 0x4e8: 0x8132, 0x4e9: 0x8132, + 0x4ea: 0x8132, 0x4eb: 0x8132, 0x4ec: 0x8132, 0x4ed: 0x8132, 0x4ee: 0x8132, 0x4ef: 0x8132, + 0x4f0: 0x8132, 0x4f1: 0x8132, 0x4f2: 0x8132, 0x4f3: 0x8132, 0x4f4: 0x8132, 0x4f5: 0x8132, + 0x4f6: 0x8133, 0x4f7: 0x8131, 0x4f8: 0x8131, 0x4f9: 0x812d, 0x4fb: 0x8132, + 0x4fc: 0x8134, 0x4fd: 0x812d, 0x4fe: 0x8132, 0x4ff: 0x812d, + // Block 0x14, offset 0x500 + 0x500: 0x2f97, 0x501: 0x32a3, 0x502: 0x2fa1, 0x503: 0x32ad, 0x504: 0x2fa6, 0x505: 0x32b2, + 0x506: 0x2fab, 0x507: 0x32b7, 0x508: 0x38cc, 0x509: 0x3a5b, 0x50a: 0x2fc4, 0x50b: 0x32d0, + 0x50c: 0x2fce, 0x50d: 0x32da, 0x50e: 0x2fdd, 0x50f: 0x32e9, 0x510: 0x2fd3, 0x511: 0x32df, + 0x512: 0x2fd8, 0x513: 0x32e4, 0x514: 0x38ef, 0x515: 0x3a7e, 0x516: 0x38f6, 0x517: 0x3a85, + 0x518: 0x3019, 0x519: 0x3325, 0x51a: 0x301e, 0x51b: 0x332a, 0x51c: 0x3904, 0x51d: 0x3a93, + 0x51e: 0x3023, 0x51f: 0x332f, 0x520: 0x3032, 0x521: 0x333e, 0x522: 0x3050, 0x523: 0x335c, + 0x524: 0x305f, 0x525: 0x336b, 0x526: 0x3055, 0x527: 0x3361, 0x528: 0x3064, 0x529: 0x3370, + 0x52a: 0x3069, 0x52b: 0x3375, 0x52c: 0x30af, 0x52d: 0x33bb, 0x52e: 0x390b, 0x52f: 0x3a9a, + 0x530: 0x30b9, 0x531: 0x33ca, 0x532: 0x30c3, 0x533: 0x33d4, 0x534: 0x30cd, 0x535: 0x33de, + 0x536: 0x46c4, 0x537: 0x4755, 0x538: 0x3912, 0x539: 0x3aa1, 0x53a: 0x30e6, 0x53b: 0x33f7, + 0x53c: 0x30e1, 0x53d: 0x33f2, 0x53e: 0x30eb, 0x53f: 0x33fc, + // Block 0x15, offset 0x540 + 0x540: 0x30f0, 0x541: 0x3401, 0x542: 0x30f5, 0x543: 0x3406, 0x544: 0x3109, 0x545: 0x341a, + 0x546: 0x3113, 0x547: 0x3424, 0x548: 0x3122, 0x549: 0x3433, 0x54a: 0x311d, 0x54b: 0x342e, + 0x54c: 0x3935, 0x54d: 0x3ac4, 0x54e: 0x3943, 0x54f: 0x3ad2, 0x550: 0x394a, 0x551: 0x3ad9, + 0x552: 0x3951, 0x553: 0x3ae0, 0x554: 0x314f, 0x555: 0x3460, 0x556: 0x3154, 0x557: 0x3465, + 0x558: 0x315e, 0x559: 0x346f, 0x55a: 0x46f1, 0x55b: 0x4782, 0x55c: 0x3997, 0x55d: 0x3b26, + 0x55e: 0x3177, 0x55f: 0x3488, 0x560: 0x3181, 0x561: 0x3492, 0x562: 0x4700, 0x563: 0x4791, + 0x564: 0x399e, 0x565: 0x3b2d, 0x566: 0x39a5, 0x567: 0x3b34, 0x568: 0x39ac, 0x569: 0x3b3b, + 0x56a: 0x3190, 0x56b: 0x34a1, 0x56c: 0x319a, 0x56d: 0x34b0, 0x56e: 0x31ae, 0x56f: 0x34c4, + 0x570: 0x31a9, 0x571: 0x34bf, 0x572: 0x31ea, 0x573: 0x3500, 0x574: 0x31f9, 0x575: 0x350f, + 0x576: 0x31f4, 0x577: 0x350a, 0x578: 0x39b3, 0x579: 0x3b42, 0x57a: 0x39ba, 0x57b: 0x3b49, + 0x57c: 0x31fe, 0x57d: 0x3514, 0x57e: 0x3203, 0x57f: 0x3519, + // Block 0x16, offset 0x580 + 0x580: 0x3208, 0x581: 0x351e, 0x582: 0x320d, 0x583: 0x3523, 0x584: 0x321c, 0x585: 0x3532, + 0x586: 0x3217, 0x587: 0x352d, 0x588: 0x3221, 0x589: 0x353c, 0x58a: 0x3226, 0x58b: 0x3541, + 0x58c: 0x322b, 0x58d: 0x3546, 0x58e: 0x3249, 0x58f: 0x3564, 0x590: 0x3262, 0x591: 0x3582, + 0x592: 0x3271, 0x593: 0x3591, 0x594: 0x3276, 0x595: 0x3596, 0x596: 0x337a, 0x597: 0x34a6, + 0x598: 0x3537, 0x599: 0x3573, 0x59a: 0x1be0, 0x59b: 0x42d7, + 0x5a0: 0x46a1, 0x5a1: 0x4732, 0x5a2: 0x2f83, 0x5a3: 0x328f, + 0x5a4: 0x3878, 0x5a5: 0x3a07, 0x5a6: 0x3871, 0x5a7: 0x3a00, 0x5a8: 0x3886, 0x5a9: 0x3a15, + 0x5aa: 0x387f, 0x5ab: 0x3a0e, 0x5ac: 0x38be, 0x5ad: 0x3a4d, 0x5ae: 0x3894, 0x5af: 0x3a23, + 0x5b0: 0x388d, 0x5b1: 0x3a1c, 0x5b2: 0x38a2, 0x5b3: 0x3a31, 0x5b4: 0x389b, 0x5b5: 0x3a2a, + 0x5b6: 0x38c5, 0x5b7: 0x3a54, 0x5b8: 0x46b5, 0x5b9: 0x4746, 0x5ba: 0x3000, 0x5bb: 0x330c, + 0x5bc: 0x2fec, 0x5bd: 0x32f8, 0x5be: 0x38da, 0x5bf: 0x3a69, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x38d3, 0x5c1: 0x3a62, 0x5c2: 0x38e8, 0x5c3: 0x3a77, 0x5c4: 0x38e1, 0x5c5: 0x3a70, + 0x5c6: 0x38fd, 0x5c7: 0x3a8c, 0x5c8: 0x3091, 0x5c9: 0x339d, 0x5ca: 0x30a5, 0x5cb: 0x33b1, + 0x5cc: 0x46e7, 0x5cd: 0x4778, 0x5ce: 0x3136, 0x5cf: 0x3447, 0x5d0: 0x3920, 0x5d1: 0x3aaf, + 0x5d2: 0x3919, 0x5d3: 0x3aa8, 0x5d4: 0x392e, 0x5d5: 0x3abd, 0x5d6: 0x3927, 0x5d7: 0x3ab6, + 0x5d8: 0x3989, 0x5d9: 0x3b18, 0x5da: 0x396d, 0x5db: 0x3afc, 0x5dc: 0x3966, 0x5dd: 0x3af5, + 0x5de: 0x397b, 0x5df: 0x3b0a, 0x5e0: 0x3974, 0x5e1: 0x3b03, 0x5e2: 0x3982, 0x5e3: 0x3b11, + 0x5e4: 0x31e5, 0x5e5: 0x34fb, 0x5e6: 0x31c7, 0x5e7: 0x34dd, 0x5e8: 0x39e4, 0x5e9: 0x3b73, + 0x5ea: 0x39dd, 0x5eb: 0x3b6c, 0x5ec: 0x39f2, 0x5ed: 0x3b81, 0x5ee: 0x39eb, 0x5ef: 0x3b7a, + 0x5f0: 0x39f9, 0x5f1: 0x3b88, 0x5f2: 0x3230, 0x5f3: 0x354b, 0x5f4: 0x3258, 0x5f5: 0x3578, + 0x5f6: 0x3253, 0x5f7: 0x356e, 0x5f8: 0x323f, 0x5f9: 0x355a, + // Block 0x18, offset 0x600 + 0x600: 0x4804, 0x601: 0x480a, 0x602: 0x491e, 0x603: 0x4936, 0x604: 0x4926, 0x605: 0x493e, + 0x606: 0x492e, 0x607: 0x4946, 0x608: 0x47aa, 0x609: 0x47b0, 0x60a: 0x488e, 0x60b: 0x48a6, + 0x60c: 0x4896, 0x60d: 0x48ae, 0x60e: 0x489e, 0x60f: 0x48b6, 0x610: 0x4816, 0x611: 0x481c, + 0x612: 0x3db8, 0x613: 0x3dc8, 0x614: 0x3dc0, 0x615: 0x3dd0, + 0x618: 0x47b6, 0x619: 0x47bc, 0x61a: 0x3ce8, 0x61b: 0x3cf8, 0x61c: 0x3cf0, 0x61d: 0x3d00, + 0x620: 0x482e, 0x621: 0x4834, 0x622: 0x494e, 0x623: 0x4966, + 0x624: 0x4956, 0x625: 0x496e, 0x626: 0x495e, 0x627: 0x4976, 0x628: 0x47c2, 0x629: 0x47c8, + 0x62a: 0x48be, 0x62b: 0x48d6, 0x62c: 0x48c6, 0x62d: 0x48de, 0x62e: 0x48ce, 0x62f: 0x48e6, + 0x630: 0x4846, 0x631: 0x484c, 0x632: 0x3e18, 0x633: 0x3e30, 0x634: 0x3e20, 0x635: 0x3e38, + 0x636: 0x3e28, 0x637: 0x3e40, 0x638: 0x47ce, 0x639: 0x47d4, 0x63a: 0x3d18, 0x63b: 0x3d30, + 0x63c: 0x3d20, 0x63d: 0x3d38, 0x63e: 0x3d28, 0x63f: 0x3d40, + // Block 0x19, offset 0x640 + 0x640: 0x4852, 0x641: 0x4858, 0x642: 0x3e48, 0x643: 0x3e58, 0x644: 0x3e50, 0x645: 0x3e60, + 0x648: 0x47da, 0x649: 0x47e0, 0x64a: 0x3d48, 0x64b: 0x3d58, + 0x64c: 0x3d50, 0x64d: 0x3d60, 0x650: 0x4864, 0x651: 0x486a, + 0x652: 0x3e80, 0x653: 0x3e98, 0x654: 0x3e88, 0x655: 0x3ea0, 0x656: 0x3e90, 0x657: 0x3ea8, + 0x659: 0x47e6, 0x65b: 0x3d68, 0x65d: 0x3d70, + 0x65f: 0x3d78, 0x660: 0x487c, 0x661: 0x4882, 0x662: 0x497e, 0x663: 0x4996, + 0x664: 0x4986, 0x665: 0x499e, 0x666: 0x498e, 0x667: 0x49a6, 0x668: 0x47ec, 0x669: 0x47f2, + 0x66a: 0x48ee, 0x66b: 0x4906, 0x66c: 0x48f6, 0x66d: 0x490e, 0x66e: 0x48fe, 0x66f: 0x4916, + 0x670: 0x47f8, 0x671: 0x431e, 0x672: 0x3691, 0x673: 0x4324, 0x674: 0x4822, 0x675: 0x432a, + 0x676: 0x36a3, 0x677: 0x4330, 0x678: 0x36c1, 0x679: 0x4336, 0x67a: 0x36d9, 0x67b: 0x433c, + 0x67c: 0x4870, 0x67d: 0x4342, + // Block 0x1a, offset 0x680 + 0x680: 0x3da0, 0x681: 0x3da8, 0x682: 0x4184, 0x683: 0x41a2, 0x684: 0x418e, 0x685: 0x41ac, + 0x686: 0x4198, 0x687: 0x41b6, 0x688: 0x3cd8, 0x689: 0x3ce0, 0x68a: 0x40d0, 0x68b: 0x40ee, + 0x68c: 0x40da, 0x68d: 0x40f8, 0x68e: 0x40e4, 0x68f: 0x4102, 0x690: 0x3de8, 0x691: 0x3df0, + 0x692: 0x41c0, 0x693: 0x41de, 0x694: 0x41ca, 0x695: 0x41e8, 0x696: 0x41d4, 0x697: 0x41f2, + 0x698: 0x3d08, 0x699: 0x3d10, 0x69a: 0x410c, 0x69b: 0x412a, 0x69c: 0x4116, 0x69d: 0x4134, + 0x69e: 0x4120, 0x69f: 0x413e, 0x6a0: 0x3ec0, 0x6a1: 0x3ec8, 0x6a2: 0x41fc, 0x6a3: 0x421a, + 0x6a4: 0x4206, 0x6a5: 0x4224, 0x6a6: 0x4210, 0x6a7: 0x422e, 0x6a8: 0x3d80, 0x6a9: 0x3d88, + 0x6aa: 0x4148, 0x6ab: 0x4166, 0x6ac: 0x4152, 0x6ad: 0x4170, 0x6ae: 0x415c, 0x6af: 0x417a, + 0x6b0: 0x3685, 0x6b1: 0x367f, 0x6b2: 0x3d90, 0x6b3: 0x368b, 0x6b4: 0x3d98, + 0x6b6: 0x4810, 0x6b7: 0x3db0, 0x6b8: 0x35f5, 0x6b9: 0x35ef, 0x6ba: 0x35e3, 0x6bb: 0x42ee, + 0x6bc: 0x35fb, 0x6bd: 0x4287, 0x6be: 0x01d3, 0x6bf: 0x4287, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x42a0, 0x6c1: 0x4482, 0x6c2: 0x3dd8, 0x6c3: 0x369d, 0x6c4: 0x3de0, + 0x6c6: 0x483a, 0x6c7: 0x3df8, 0x6c8: 0x3601, 0x6c9: 0x42f4, 0x6ca: 0x360d, 0x6cb: 0x42fa, + 0x6cc: 0x3619, 0x6cd: 0x4489, 0x6ce: 0x4490, 0x6cf: 0x4497, 0x6d0: 0x36b5, 0x6d1: 0x36af, + 0x6d2: 0x3e00, 0x6d3: 0x44e4, 0x6d6: 0x36bb, 0x6d7: 0x3e10, + 0x6d8: 0x3631, 0x6d9: 0x362b, 0x6da: 0x361f, 0x6db: 0x4300, 0x6dd: 0x449e, + 0x6de: 0x44a5, 0x6df: 0x44ac, 0x6e0: 0x36eb, 0x6e1: 0x36e5, 0x6e2: 0x3e68, 0x6e3: 0x44ec, + 0x6e4: 0x36cd, 0x6e5: 0x36d3, 0x6e6: 0x36f1, 0x6e7: 0x3e78, 0x6e8: 0x3661, 0x6e9: 0x365b, + 0x6ea: 0x364f, 0x6eb: 0x430c, 0x6ec: 0x3649, 0x6ed: 0x4474, 0x6ee: 0x447b, 0x6ef: 0x0081, + 0x6f2: 0x3eb0, 0x6f3: 0x36f7, 0x6f4: 0x3eb8, + 0x6f6: 0x4888, 0x6f7: 0x3ed0, 0x6f8: 0x363d, 0x6f9: 0x4306, 0x6fa: 0x366d, 0x6fb: 0x4318, + 0x6fc: 0x3679, 0x6fd: 0x425a, 0x6fe: 0x428c, + // Block 0x1c, offset 0x700 + 0x700: 0x1bd8, 0x701: 0x1bdc, 0x702: 0x0047, 0x703: 0x1c54, 0x705: 0x1be8, + 0x706: 0x1bec, 0x707: 0x00e9, 0x709: 0x1c58, 0x70a: 0x008f, 0x70b: 0x0051, + 0x70c: 0x0051, 0x70d: 0x0051, 0x70e: 0x0091, 0x70f: 0x00da, 0x710: 0x0053, 0x711: 0x0053, + 0x712: 0x0059, 0x713: 0x0099, 0x715: 0x005d, 0x716: 0x198d, + 0x719: 0x0061, 0x71a: 0x0063, 0x71b: 0x0065, 0x71c: 0x0065, 0x71d: 0x0065, + 0x720: 0x199f, 0x721: 0x1bc8, 0x722: 0x19a8, + 0x724: 0x0075, 0x726: 0x01b8, 0x728: 0x0075, + 0x72a: 0x0057, 0x72b: 0x42d2, 0x72c: 0x0045, 0x72d: 0x0047, 0x72f: 0x008b, + 0x730: 0x004b, 0x731: 0x004d, 0x733: 0x005b, 0x734: 0x009f, 0x735: 0x0215, + 0x736: 0x0218, 0x737: 0x021b, 0x738: 0x021e, 0x739: 0x0093, 0x73b: 0x1b98, + 0x73c: 0x01e8, 0x73d: 0x01c1, 0x73e: 0x0179, 0x73f: 0x01a0, + // Block 0x1d, offset 0x740 + 0x740: 0x0463, 0x745: 0x0049, + 0x746: 0x0089, 0x747: 0x008b, 0x748: 0x0093, 0x749: 0x0095, + 0x750: 0x222e, 0x751: 0x223a, + 0x752: 0x22ee, 0x753: 0x2216, 0x754: 0x229a, 0x755: 0x2222, 0x756: 0x22a0, 0x757: 0x22b8, + 0x758: 0x22c4, 0x759: 0x2228, 0x75a: 0x22ca, 0x75b: 0x2234, 0x75c: 0x22be, 0x75d: 0x22d0, + 0x75e: 0x22d6, 0x75f: 0x1cbc, 0x760: 0x0053, 0x761: 0x195a, 0x762: 0x1ba4, 0x763: 0x1963, + 0x764: 0x006d, 0x765: 0x19ab, 0x766: 0x1bd0, 0x767: 0x1d48, 0x768: 0x1966, 0x769: 0x0071, + 0x76a: 0x19b7, 0x76b: 0x1bd4, 0x76c: 0x0059, 0x76d: 0x0047, 0x76e: 0x0049, 0x76f: 0x005b, + 0x770: 0x0093, 0x771: 0x19e4, 0x772: 0x1c18, 0x773: 0x19ed, 0x774: 0x00ad, 0x775: 0x1a62, + 0x776: 0x1c4c, 0x777: 0x1d5c, 0x778: 0x19f0, 0x779: 0x00b1, 0x77a: 0x1a65, 0x77b: 0x1c50, + 0x77c: 0x0099, 0x77d: 0x0087, 0x77e: 0x0089, 0x77f: 0x009b, + // Block 0x1e, offset 0x780 + 0x781: 0x3c06, 0x783: 0xa000, 0x784: 0x3c0d, 0x785: 0xa000, + 0x787: 0x3c14, 0x788: 0xa000, 0x789: 0x3c1b, + 0x78d: 0xa000, + 0x7a0: 0x2f65, 0x7a1: 0xa000, 0x7a2: 0x3c29, + 0x7a4: 0xa000, 0x7a5: 0xa000, + 0x7ad: 0x3c22, 0x7ae: 0x2f60, 0x7af: 0x2f6a, + 0x7b0: 0x3c30, 0x7b1: 0x3c37, 0x7b2: 0xa000, 0x7b3: 0xa000, 0x7b4: 0x3c3e, 0x7b5: 0x3c45, + 0x7b6: 0xa000, 0x7b7: 0xa000, 0x7b8: 0x3c4c, 0x7b9: 0x3c53, 0x7ba: 0xa000, 0x7bb: 0xa000, + 0x7bc: 0xa000, 0x7bd: 0xa000, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x3c5a, 0x7c1: 0x3c61, 0x7c2: 0xa000, 0x7c3: 0xa000, 0x7c4: 0x3c76, 0x7c5: 0x3c7d, + 0x7c6: 0xa000, 0x7c7: 0xa000, 0x7c8: 0x3c84, 0x7c9: 0x3c8b, + 0x7d1: 0xa000, + 0x7d2: 0xa000, + 0x7e2: 0xa000, + 0x7e8: 0xa000, 0x7e9: 0xa000, + 0x7eb: 0xa000, 0x7ec: 0x3ca0, 0x7ed: 0x3ca7, 0x7ee: 0x3cae, 0x7ef: 0x3cb5, + 0x7f2: 0xa000, 0x7f3: 0xa000, 0x7f4: 0xa000, 0x7f5: 0xa000, + // Block 0x20, offset 0x800 + 0x820: 0x0023, 0x821: 0x0025, 0x822: 0x0027, 0x823: 0x0029, + 0x824: 0x002b, 0x825: 0x002d, 0x826: 0x002f, 0x827: 0x0031, 0x828: 0x0033, 0x829: 0x1882, + 0x82a: 0x1885, 0x82b: 0x1888, 0x82c: 0x188b, 0x82d: 0x188e, 0x82e: 0x1891, 0x82f: 0x1894, + 0x830: 0x1897, 0x831: 0x189a, 0x832: 0x189d, 0x833: 0x18a6, 0x834: 0x1a68, 0x835: 0x1a6c, + 0x836: 0x1a70, 0x837: 0x1a74, 0x838: 0x1a78, 0x839: 0x1a7c, 0x83a: 0x1a80, 0x83b: 0x1a84, + 0x83c: 0x1a88, 0x83d: 0x1c80, 0x83e: 0x1c85, 0x83f: 0x1c8a, + // Block 0x21, offset 0x840 + 0x840: 0x1c8f, 0x841: 0x1c94, 0x842: 0x1c99, 0x843: 0x1c9e, 0x844: 0x1ca3, 0x845: 0x1ca8, + 0x846: 0x1cad, 0x847: 0x1cb2, 0x848: 0x187f, 0x849: 0x18a3, 0x84a: 0x18c7, 0x84b: 0x18eb, + 0x84c: 0x190f, 0x84d: 0x1918, 0x84e: 0x191e, 0x84f: 0x1924, 0x850: 0x192a, 0x851: 0x1b60, + 0x852: 0x1b64, 0x853: 0x1b68, 0x854: 0x1b6c, 0x855: 0x1b70, 0x856: 0x1b74, 0x857: 0x1b78, + 0x858: 0x1b7c, 0x859: 0x1b80, 0x85a: 0x1b84, 0x85b: 0x1b88, 0x85c: 0x1af4, 0x85d: 0x1af8, + 0x85e: 0x1afc, 0x85f: 0x1b00, 0x860: 0x1b04, 0x861: 0x1b08, 0x862: 0x1b0c, 0x863: 0x1b10, + 0x864: 0x1b14, 0x865: 0x1b18, 0x866: 0x1b1c, 0x867: 0x1b20, 0x868: 0x1b24, 0x869: 0x1b28, + 0x86a: 0x1b2c, 0x86b: 0x1b30, 0x86c: 0x1b34, 0x86d: 0x1b38, 0x86e: 0x1b3c, 0x86f: 0x1b40, + 0x870: 0x1b44, 0x871: 0x1b48, 0x872: 0x1b4c, 0x873: 0x1b50, 0x874: 0x1b54, 0x875: 0x1b58, + 0x876: 0x0043, 0x877: 0x0045, 0x878: 0x0047, 0x879: 0x0049, 0x87a: 0x004b, 0x87b: 0x004d, + 0x87c: 0x004f, 0x87d: 0x0051, 0x87e: 0x0053, 0x87f: 0x0055, + // Block 0x22, offset 0x880 + 0x880: 0x06bf, 0x881: 0x06e3, 0x882: 0x06ef, 0x883: 0x06ff, 0x884: 0x0707, 0x885: 0x0713, + 0x886: 0x071b, 0x887: 0x0723, 0x888: 0x072f, 0x889: 0x0783, 0x88a: 0x079b, 0x88b: 0x07ab, + 0x88c: 0x07bb, 0x88d: 0x07cb, 0x88e: 0x07db, 0x88f: 0x07fb, 0x890: 0x07ff, 0x891: 0x0803, + 0x892: 0x0837, 0x893: 0x085f, 0x894: 0x086f, 0x895: 0x0877, 0x896: 0x087b, 0x897: 0x0887, + 0x898: 0x08a3, 0x899: 0x08a7, 0x89a: 0x08bf, 0x89b: 0x08c3, 0x89c: 0x08cb, 0x89d: 0x08db, + 0x89e: 0x0977, 0x89f: 0x098b, 0x8a0: 0x09cb, 0x8a1: 0x09df, 0x8a2: 0x09e7, 0x8a3: 0x09eb, + 0x8a4: 0x09fb, 0x8a5: 0x0a17, 0x8a6: 0x0a43, 0x8a7: 0x0a4f, 0x8a8: 0x0a6f, 0x8a9: 0x0a7b, + 0x8aa: 0x0a7f, 0x8ab: 0x0a83, 0x8ac: 0x0a9b, 0x8ad: 0x0a9f, 0x8ae: 0x0acb, 0x8af: 0x0ad7, + 0x8b0: 0x0adf, 0x8b1: 0x0ae7, 0x8b2: 0x0af7, 0x8b3: 0x0aff, 0x8b4: 0x0b07, 0x8b5: 0x0b33, + 0x8b6: 0x0b37, 0x8b7: 0x0b3f, 0x8b8: 0x0b43, 0x8b9: 0x0b4b, 0x8ba: 0x0b53, 0x8bb: 0x0b63, + 0x8bc: 0x0b7f, 0x8bd: 0x0bf7, 0x8be: 0x0c0b, 0x8bf: 0x0c0f, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0c8f, 0x8c1: 0x0c93, 0x8c2: 0x0ca7, 0x8c3: 0x0cab, 0x8c4: 0x0cb3, 0x8c5: 0x0cbb, + 0x8c6: 0x0cc3, 0x8c7: 0x0ccf, 0x8c8: 0x0cf7, 0x8c9: 0x0d07, 0x8ca: 0x0d1b, 0x8cb: 0x0d8b, + 0x8cc: 0x0d97, 0x8cd: 0x0da7, 0x8ce: 0x0db3, 0x8cf: 0x0dbf, 0x8d0: 0x0dc7, 0x8d1: 0x0dcb, + 0x8d2: 0x0dcf, 0x8d3: 0x0dd3, 0x8d4: 0x0dd7, 0x8d5: 0x0e8f, 0x8d6: 0x0ed7, 0x8d7: 0x0ee3, + 0x8d8: 0x0ee7, 0x8d9: 0x0eeb, 0x8da: 0x0eef, 0x8db: 0x0ef7, 0x8dc: 0x0efb, 0x8dd: 0x0f0f, + 0x8de: 0x0f2b, 0x8df: 0x0f33, 0x8e0: 0x0f73, 0x8e1: 0x0f77, 0x8e2: 0x0f7f, 0x8e3: 0x0f83, + 0x8e4: 0x0f8b, 0x8e5: 0x0f8f, 0x8e6: 0x0fb3, 0x8e7: 0x0fb7, 0x8e8: 0x0fd3, 0x8e9: 0x0fd7, + 0x8ea: 0x0fdb, 0x8eb: 0x0fdf, 0x8ec: 0x0ff3, 0x8ed: 0x1017, 0x8ee: 0x101b, 0x8ef: 0x101f, + 0x8f0: 0x1043, 0x8f1: 0x1083, 0x8f2: 0x1087, 0x8f3: 0x10a7, 0x8f4: 0x10b7, 0x8f5: 0x10bf, + 0x8f6: 0x10df, 0x8f7: 0x1103, 0x8f8: 0x1147, 0x8f9: 0x114f, 0x8fa: 0x1163, 0x8fb: 0x116f, + 0x8fc: 0x1177, 0x8fd: 0x117f, 0x8fe: 0x1183, 0x8ff: 0x1187, + // Block 0x24, offset 0x900 + 0x900: 0x119f, 0x901: 0x11a3, 0x902: 0x11bf, 0x903: 0x11c7, 0x904: 0x11cf, 0x905: 0x11d3, + 0x906: 0x11df, 0x907: 0x11e7, 0x908: 0x11eb, 0x909: 0x11ef, 0x90a: 0x11f7, 0x90b: 0x11fb, + 0x90c: 0x129b, 0x90d: 0x12af, 0x90e: 0x12e3, 0x90f: 0x12e7, 0x910: 0x12ef, 0x911: 0x131b, + 0x912: 0x1323, 0x913: 0x132b, 0x914: 0x1333, 0x915: 0x136f, 0x916: 0x1373, 0x917: 0x137b, + 0x918: 0x137f, 0x919: 0x1383, 0x91a: 0x13af, 0x91b: 0x13b3, 0x91c: 0x13bb, 0x91d: 0x13cf, + 0x91e: 0x13d3, 0x91f: 0x13ef, 0x920: 0x13f7, 0x921: 0x13fb, 0x922: 0x141f, 0x923: 0x143f, + 0x924: 0x1453, 0x925: 0x1457, 0x926: 0x145f, 0x927: 0x148b, 0x928: 0x148f, 0x929: 0x149f, + 0x92a: 0x14c3, 0x92b: 0x14cf, 0x92c: 0x14df, 0x92d: 0x14f7, 0x92e: 0x14ff, 0x92f: 0x1503, + 0x930: 0x1507, 0x931: 0x150b, 0x932: 0x1517, 0x933: 0x151b, 0x934: 0x1523, 0x935: 0x153f, + 0x936: 0x1543, 0x937: 0x1547, 0x938: 0x155f, 0x939: 0x1563, 0x93a: 0x156b, 0x93b: 0x157f, + 0x93c: 0x1583, 0x93d: 0x1587, 0x93e: 0x158f, 0x93f: 0x1593, + // Block 0x25, offset 0x940 + 0x946: 0xa000, 0x94b: 0xa000, + 0x94c: 0x3f08, 0x94d: 0xa000, 0x94e: 0x3f10, 0x94f: 0xa000, 0x950: 0x3f18, 0x951: 0xa000, + 0x952: 0x3f20, 0x953: 0xa000, 0x954: 0x3f28, 0x955: 0xa000, 0x956: 0x3f30, 0x957: 0xa000, + 0x958: 0x3f38, 0x959: 0xa000, 0x95a: 0x3f40, 0x95b: 0xa000, 0x95c: 0x3f48, 0x95d: 0xa000, + 0x95e: 0x3f50, 0x95f: 0xa000, 0x960: 0x3f58, 0x961: 0xa000, 0x962: 0x3f60, + 0x964: 0xa000, 0x965: 0x3f68, 0x966: 0xa000, 0x967: 0x3f70, 0x968: 0xa000, 0x969: 0x3f78, + 0x96f: 0xa000, + 0x970: 0x3f80, 0x971: 0x3f88, 0x972: 0xa000, 0x973: 0x3f90, 0x974: 0x3f98, 0x975: 0xa000, + 0x976: 0x3fa0, 0x977: 0x3fa8, 0x978: 0xa000, 0x979: 0x3fb0, 0x97a: 0x3fb8, 0x97b: 0xa000, + 0x97c: 0x3fc0, 0x97d: 0x3fc8, + // Block 0x26, offset 0x980 + 0x994: 0x3f00, + 0x999: 0x9903, 0x99a: 0x9903, 0x99b: 0x42dc, 0x99c: 0x42e2, 0x99d: 0xa000, + 0x99e: 0x3fd0, 0x99f: 0x26b4, + 0x9a6: 0xa000, + 0x9ab: 0xa000, 0x9ac: 0x3fe0, 0x9ad: 0xa000, 0x9ae: 0x3fe8, 0x9af: 0xa000, + 0x9b0: 0x3ff0, 0x9b1: 0xa000, 0x9b2: 0x3ff8, 0x9b3: 0xa000, 0x9b4: 0x4000, 0x9b5: 0xa000, + 0x9b6: 0x4008, 0x9b7: 0xa000, 0x9b8: 0x4010, 0x9b9: 0xa000, 0x9ba: 0x4018, 0x9bb: 0xa000, + 0x9bc: 0x4020, 0x9bd: 0xa000, 0x9be: 0x4028, 0x9bf: 0xa000, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x4030, 0x9c1: 0xa000, 0x9c2: 0x4038, 0x9c4: 0xa000, 0x9c5: 0x4040, + 0x9c6: 0xa000, 0x9c7: 0x4048, 0x9c8: 0xa000, 0x9c9: 0x4050, + 0x9cf: 0xa000, 0x9d0: 0x4058, 0x9d1: 0x4060, + 0x9d2: 0xa000, 0x9d3: 0x4068, 0x9d4: 0x4070, 0x9d5: 0xa000, 0x9d6: 0x4078, 0x9d7: 0x4080, + 0x9d8: 0xa000, 0x9d9: 0x4088, 0x9da: 0x4090, 0x9db: 0xa000, 0x9dc: 0x4098, 0x9dd: 0x40a0, + 0x9ef: 0xa000, + 0x9f0: 0xa000, 0x9f1: 0xa000, 0x9f2: 0xa000, 0x9f4: 0x3fd8, + 0x9f7: 0x40a8, 0x9f8: 0x40b0, 0x9f9: 0x40b8, 0x9fa: 0x40c0, + 0x9fd: 0xa000, 0x9fe: 0x40c8, 0x9ff: 0x26c9, + // Block 0x28, offset 0xa00 + 0xa00: 0x0367, 0xa01: 0x032b, 0xa02: 0x032f, 0xa03: 0x0333, 0xa04: 0x037b, 0xa05: 0x0337, + 0xa06: 0x033b, 0xa07: 0x033f, 0xa08: 0x0343, 0xa09: 0x0347, 0xa0a: 0x034b, 0xa0b: 0x034f, + 0xa0c: 0x0353, 0xa0d: 0x0357, 0xa0e: 0x035b, 0xa0f: 0x49bd, 0xa10: 0x49c3, 0xa11: 0x49c9, + 0xa12: 0x49cf, 0xa13: 0x49d5, 0xa14: 0x49db, 0xa15: 0x49e1, 0xa16: 0x49e7, 0xa17: 0x49ed, + 0xa18: 0x49f3, 0xa19: 0x49f9, 0xa1a: 0x49ff, 0xa1b: 0x4a05, 0xa1c: 0x4a0b, 0xa1d: 0x4a11, + 0xa1e: 0x4a17, 0xa1f: 0x4a1d, 0xa20: 0x4a23, 0xa21: 0x4a29, 0xa22: 0x4a2f, 0xa23: 0x4a35, + 0xa24: 0x03c3, 0xa25: 0x035f, 0xa26: 0x0363, 0xa27: 0x03e7, 0xa28: 0x03eb, 0xa29: 0x03ef, + 0xa2a: 0x03f3, 0xa2b: 0x03f7, 0xa2c: 0x03fb, 0xa2d: 0x03ff, 0xa2e: 0x036b, 0xa2f: 0x0403, + 0xa30: 0x0407, 0xa31: 0x036f, 0xa32: 0x0373, 0xa33: 0x0377, 0xa34: 0x037f, 0xa35: 0x0383, + 0xa36: 0x0387, 0xa37: 0x038b, 0xa38: 0x038f, 0xa39: 0x0393, 0xa3a: 0x0397, 0xa3b: 0x039b, + 0xa3c: 0x039f, 0xa3d: 0x03a3, 0xa3e: 0x03a7, 0xa3f: 0x03ab, + // Block 0x29, offset 0xa40 + 0xa40: 0x03af, 0xa41: 0x03b3, 0xa42: 0x040b, 0xa43: 0x040f, 0xa44: 0x03b7, 0xa45: 0x03bb, + 0xa46: 0x03bf, 0xa47: 0x03c7, 0xa48: 0x03cb, 0xa49: 0x03cf, 0xa4a: 0x03d3, 0xa4b: 0x03d7, + 0xa4c: 0x03db, 0xa4d: 0x03df, 0xa4e: 0x03e3, + 0xa52: 0x06bf, 0xa53: 0x071b, 0xa54: 0x06cb, 0xa55: 0x097b, 0xa56: 0x06cf, 0xa57: 0x06e7, + 0xa58: 0x06d3, 0xa59: 0x0f93, 0xa5a: 0x0707, 0xa5b: 0x06db, 0xa5c: 0x06c3, 0xa5d: 0x09ff, + 0xa5e: 0x098f, 0xa5f: 0x072f, + // Block 0x2a, offset 0xa80 + 0xa80: 0x2054, 0xa81: 0x205a, 0xa82: 0x2060, 0xa83: 0x2066, 0xa84: 0x206c, 0xa85: 0x2072, + 0xa86: 0x2078, 0xa87: 0x207e, 0xa88: 0x2084, 0xa89: 0x208a, 0xa8a: 0x2090, 0xa8b: 0x2096, + 0xa8c: 0x209c, 0xa8d: 0x20a2, 0xa8e: 0x2726, 0xa8f: 0x272f, 0xa90: 0x2738, 0xa91: 0x2741, + 0xa92: 0x274a, 0xa93: 0x2753, 0xa94: 0x275c, 0xa95: 0x2765, 0xa96: 0x276e, 0xa97: 0x2780, + 0xa98: 0x2789, 0xa99: 0x2792, 0xa9a: 0x279b, 0xa9b: 0x27a4, 0xa9c: 0x2777, 0xa9d: 0x2bac, + 0xa9e: 0x2aed, 0xaa0: 0x20a8, 0xaa1: 0x20c0, 0xaa2: 0x20b4, 0xaa3: 0x2108, + 0xaa4: 0x20c6, 0xaa5: 0x20e4, 0xaa6: 0x20ae, 0xaa7: 0x20de, 0xaa8: 0x20ba, 0xaa9: 0x20f0, + 0xaaa: 0x2120, 0xaab: 0x213e, 0xaac: 0x2138, 0xaad: 0x212c, 0xaae: 0x217a, 0xaaf: 0x210e, + 0xab0: 0x211a, 0xab1: 0x2132, 0xab2: 0x2126, 0xab3: 0x2150, 0xab4: 0x20fc, 0xab5: 0x2144, + 0xab6: 0x216e, 0xab7: 0x2156, 0xab8: 0x20ea, 0xab9: 0x20cc, 0xaba: 0x2102, 0xabb: 0x2114, + 0xabc: 0x214a, 0xabd: 0x20d2, 0xabe: 0x2174, 0xabf: 0x20f6, + // Block 0x2b, offset 0xac0 + 0xac0: 0x215c, 0xac1: 0x20d8, 0xac2: 0x2162, 0xac3: 0x2168, 0xac4: 0x092f, 0xac5: 0x0b03, + 0xac6: 0x0ca7, 0xac7: 0x10c7, + 0xad0: 0x1bc4, 0xad1: 0x18a9, + 0xad2: 0x18ac, 0xad3: 0x18af, 0xad4: 0x18b2, 0xad5: 0x18b5, 0xad6: 0x18b8, 0xad7: 0x18bb, + 0xad8: 0x18be, 0xad9: 0x18c1, 0xada: 0x18ca, 0xadb: 0x18cd, 0xadc: 0x18d0, 0xadd: 0x18d3, + 0xade: 0x18d6, 0xadf: 0x18d9, 0xae0: 0x0313, 0xae1: 0x031b, 0xae2: 0x031f, 0xae3: 0x0327, + 0xae4: 0x032b, 0xae5: 0x032f, 0xae6: 0x0337, 0xae7: 0x033f, 0xae8: 0x0343, 0xae9: 0x034b, + 0xaea: 0x034f, 0xaeb: 0x0353, 0xaec: 0x0357, 0xaed: 0x035b, 0xaee: 0x2e18, 0xaef: 0x2e20, + 0xaf0: 0x2e28, 0xaf1: 0x2e30, 0xaf2: 0x2e38, 0xaf3: 0x2e40, 0xaf4: 0x2e48, 0xaf5: 0x2e50, + 0xaf6: 0x2e60, 0xaf7: 0x2e68, 0xaf8: 0x2e70, 0xaf9: 0x2e78, 0xafa: 0x2e80, 0xafb: 0x2e88, + 0xafc: 0x2ed3, 0xafd: 0x2e9b, 0xafe: 0x2e58, + // Block 0x2c, offset 0xb00 + 0xb00: 0x06bf, 0xb01: 0x071b, 0xb02: 0x06cb, 0xb03: 0x097b, 0xb04: 0x071f, 0xb05: 0x07af, + 0xb06: 0x06c7, 0xb07: 0x07ab, 0xb08: 0x070b, 0xb09: 0x0887, 0xb0a: 0x0d07, 0xb0b: 0x0e8f, + 0xb0c: 0x0dd7, 0xb0d: 0x0d1b, 0xb0e: 0x145f, 0xb0f: 0x098b, 0xb10: 0x0ccf, 0xb11: 0x0d4b, + 0xb12: 0x0d0b, 0xb13: 0x104b, 0xb14: 0x08fb, 0xb15: 0x0f03, 0xb16: 0x1387, 0xb17: 0x105f, + 0xb18: 0x0843, 0xb19: 0x108f, 0xb1a: 0x0f9b, 0xb1b: 0x0a17, 0xb1c: 0x140f, 0xb1d: 0x077f, + 0xb1e: 0x08ab, 0xb1f: 0x0df7, 0xb20: 0x1527, 0xb21: 0x0743, 0xb22: 0x07d3, 0xb23: 0x0d9b, + 0xb24: 0x06cf, 0xb25: 0x06e7, 0xb26: 0x06d3, 0xb27: 0x0adb, 0xb28: 0x08ef, 0xb29: 0x087f, + 0xb2a: 0x0a57, 0xb2b: 0x0a4b, 0xb2c: 0x0feb, 0xb2d: 0x073f, 0xb2e: 0x139b, 0xb2f: 0x089b, + 0xb30: 0x09f3, 0xb31: 0x18dc, 0xb32: 0x18df, 0xb33: 0x18e2, 0xb34: 0x18e5, 0xb35: 0x18ee, + 0xb36: 0x18f1, 0xb37: 0x18f4, 0xb38: 0x18f7, 0xb39: 0x18fa, 0xb3a: 0x18fd, 0xb3b: 0x1900, + 0xb3c: 0x1903, 0xb3d: 0x1906, 0xb3e: 0x1909, 0xb3f: 0x1912, + // Block 0x2d, offset 0xb40 + 0xb40: 0x1cc6, 0xb41: 0x1cd5, 0xb42: 0x1ce4, 0xb43: 0x1cf3, 0xb44: 0x1d02, 0xb45: 0x1d11, + 0xb46: 0x1d20, 0xb47: 0x1d2f, 0xb48: 0x1d3e, 0xb49: 0x218c, 0xb4a: 0x219e, 0xb4b: 0x21b0, + 0xb4c: 0x1954, 0xb4d: 0x1c04, 0xb4e: 0x19d2, 0xb4f: 0x1ba8, 0xb50: 0x04cb, 0xb51: 0x04d3, + 0xb52: 0x04db, 0xb53: 0x04e3, 0xb54: 0x04eb, 0xb55: 0x04ef, 0xb56: 0x04f3, 0xb57: 0x04f7, + 0xb58: 0x04fb, 0xb59: 0x04ff, 0xb5a: 0x0503, 0xb5b: 0x0507, 0xb5c: 0x050b, 0xb5d: 0x050f, + 0xb5e: 0x0513, 0xb5f: 0x0517, 0xb60: 0x051b, 0xb61: 0x0523, 0xb62: 0x0527, 0xb63: 0x052b, + 0xb64: 0x052f, 0xb65: 0x0533, 0xb66: 0x0537, 0xb67: 0x053b, 0xb68: 0x053f, 0xb69: 0x0543, + 0xb6a: 0x0547, 0xb6b: 0x054b, 0xb6c: 0x054f, 0xb6d: 0x0553, 0xb6e: 0x0557, 0xb6f: 0x055b, + 0xb70: 0x055f, 0xb71: 0x0563, 0xb72: 0x0567, 0xb73: 0x056f, 0xb74: 0x0577, 0xb75: 0x057f, + 0xb76: 0x0583, 0xb77: 0x0587, 0xb78: 0x058b, 0xb79: 0x058f, 0xb7a: 0x0593, 0xb7b: 0x0597, + 0xb7c: 0x059b, 0xb7d: 0x059f, 0xb7e: 0x05a3, + // Block 0x2e, offset 0xb80 + 0xb80: 0x2b0c, 0xb81: 0x29a8, 0xb82: 0x2b1c, 0xb83: 0x2880, 0xb84: 0x2ee4, 0xb85: 0x288a, + 0xb86: 0x2894, 0xb87: 0x2f28, 0xb88: 0x29b5, 0xb89: 0x289e, 0xb8a: 0x28a8, 0xb8b: 0x28b2, + 0xb8c: 0x29dc, 0xb8d: 0x29e9, 0xb8e: 0x29c2, 0xb8f: 0x29cf, 0xb90: 0x2ea9, 0xb91: 0x29f6, + 0xb92: 0x2a03, 0xb93: 0x2bbe, 0xb94: 0x26bb, 0xb95: 0x2bd1, 0xb96: 0x2be4, 0xb97: 0x2b2c, + 0xb98: 0x2a10, 0xb99: 0x2bf7, 0xb9a: 0x2c0a, 0xb9b: 0x2a1d, 0xb9c: 0x28bc, 0xb9d: 0x28c6, + 0xb9e: 0x2eb7, 0xb9f: 0x2a2a, 0xba0: 0x2b3c, 0xba1: 0x2ef5, 0xba2: 0x28d0, 0xba3: 0x28da, + 0xba4: 0x2a37, 0xba5: 0x28e4, 0xba6: 0x28ee, 0xba7: 0x26d0, 0xba8: 0x26d7, 0xba9: 0x28f8, + 0xbaa: 0x2902, 0xbab: 0x2c1d, 0xbac: 0x2a44, 0xbad: 0x2b4c, 0xbae: 0x2c30, 0xbaf: 0x2a51, + 0xbb0: 0x2916, 0xbb1: 0x290c, 0xbb2: 0x2f3c, 0xbb3: 0x2a5e, 0xbb4: 0x2c43, 0xbb5: 0x2920, + 0xbb6: 0x2b5c, 0xbb7: 0x292a, 0xbb8: 0x2a78, 0xbb9: 0x2934, 0xbba: 0x2a85, 0xbbb: 0x2f06, + 0xbbc: 0x2a6b, 0xbbd: 0x2b6c, 0xbbe: 0x2a92, 0xbbf: 0x26de, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x2f17, 0xbc1: 0x293e, 0xbc2: 0x2948, 0xbc3: 0x2a9f, 0xbc4: 0x2952, 0xbc5: 0x295c, + 0xbc6: 0x2966, 0xbc7: 0x2b7c, 0xbc8: 0x2aac, 0xbc9: 0x26e5, 0xbca: 0x2c56, 0xbcb: 0x2e90, + 0xbcc: 0x2b8c, 0xbcd: 0x2ab9, 0xbce: 0x2ec5, 0xbcf: 0x2970, 0xbd0: 0x297a, 0xbd1: 0x2ac6, + 0xbd2: 0x26ec, 0xbd3: 0x2ad3, 0xbd4: 0x2b9c, 0xbd5: 0x26f3, 0xbd6: 0x2c69, 0xbd7: 0x2984, + 0xbd8: 0x1cb7, 0xbd9: 0x1ccb, 0xbda: 0x1cda, 0xbdb: 0x1ce9, 0xbdc: 0x1cf8, 0xbdd: 0x1d07, + 0xbde: 0x1d16, 0xbdf: 0x1d25, 0xbe0: 0x1d34, 0xbe1: 0x1d43, 0xbe2: 0x2192, 0xbe3: 0x21a4, + 0xbe4: 0x21b6, 0xbe5: 0x21c2, 0xbe6: 0x21ce, 0xbe7: 0x21da, 0xbe8: 0x21e6, 0xbe9: 0x21f2, + 0xbea: 0x21fe, 0xbeb: 0x220a, 0xbec: 0x2246, 0xbed: 0x2252, 0xbee: 0x225e, 0xbef: 0x226a, + 0xbf0: 0x2276, 0xbf1: 0x1c14, 0xbf2: 0x19c6, 0xbf3: 0x1936, 0xbf4: 0x1be4, 0xbf5: 0x1a47, + 0xbf6: 0x1a56, 0xbf7: 0x19cc, 0xbf8: 0x1bfc, 0xbf9: 0x1c00, 0xbfa: 0x1960, 0xbfb: 0x2701, + 0xbfc: 0x270f, 0xbfd: 0x26fa, 0xbfe: 0x2708, 0xbff: 0x2ae0, + // Block 0x30, offset 0xc00 + 0xc00: 0x1a4a, 0xc01: 0x1a32, 0xc02: 0x1c60, 0xc03: 0x1a1a, 0xc04: 0x19f3, 0xc05: 0x1969, + 0xc06: 0x1978, 0xc07: 0x1948, 0xc08: 0x1bf0, 0xc09: 0x1d52, 0xc0a: 0x1a4d, 0xc0b: 0x1a35, + 0xc0c: 0x1c64, 0xc0d: 0x1c70, 0xc0e: 0x1a26, 0xc0f: 0x19fc, 0xc10: 0x1957, 0xc11: 0x1c1c, + 0xc12: 0x1bb0, 0xc13: 0x1b9c, 0xc14: 0x1bcc, 0xc15: 0x1c74, 0xc16: 0x1a29, 0xc17: 0x19c9, + 0xc18: 0x19ff, 0xc19: 0x19de, 0xc1a: 0x1a41, 0xc1b: 0x1c78, 0xc1c: 0x1a2c, 0xc1d: 0x19c0, + 0xc1e: 0x1a02, 0xc1f: 0x1c3c, 0xc20: 0x1bf4, 0xc21: 0x1a14, 0xc22: 0x1c24, 0xc23: 0x1c40, + 0xc24: 0x1bf8, 0xc25: 0x1a17, 0xc26: 0x1c28, 0xc27: 0x22e8, 0xc28: 0x22fc, 0xc29: 0x1996, + 0xc2a: 0x1c20, 0xc2b: 0x1bb4, 0xc2c: 0x1ba0, 0xc2d: 0x1c48, 0xc2e: 0x2716, 0xc2f: 0x27ad, + 0xc30: 0x1a59, 0xc31: 0x1a44, 0xc32: 0x1c7c, 0xc33: 0x1a2f, 0xc34: 0x1a50, 0xc35: 0x1a38, + 0xc36: 0x1c68, 0xc37: 0x1a1d, 0xc38: 0x19f6, 0xc39: 0x1981, 0xc3a: 0x1a53, 0xc3b: 0x1a3b, + 0xc3c: 0x1c6c, 0xc3d: 0x1a20, 0xc3e: 0x19f9, 0xc3f: 0x1984, + // Block 0x31, offset 0xc40 + 0xc40: 0x1c2c, 0xc41: 0x1bb8, 0xc42: 0x1d4d, 0xc43: 0x1939, 0xc44: 0x19ba, 0xc45: 0x19bd, + 0xc46: 0x22f5, 0xc47: 0x1b94, 0xc48: 0x19c3, 0xc49: 0x194b, 0xc4a: 0x19e1, 0xc4b: 0x194e, + 0xc4c: 0x19ea, 0xc4d: 0x196c, 0xc4e: 0x196f, 0xc4f: 0x1a05, 0xc50: 0x1a0b, 0xc51: 0x1a0e, + 0xc52: 0x1c30, 0xc53: 0x1a11, 0xc54: 0x1a23, 0xc55: 0x1c38, 0xc56: 0x1c44, 0xc57: 0x1990, + 0xc58: 0x1d57, 0xc59: 0x1bbc, 0xc5a: 0x1993, 0xc5b: 0x1a5c, 0xc5c: 0x19a5, 0xc5d: 0x19b4, + 0xc5e: 0x22e2, 0xc5f: 0x22dc, 0xc60: 0x1cc1, 0xc61: 0x1cd0, 0xc62: 0x1cdf, 0xc63: 0x1cee, + 0xc64: 0x1cfd, 0xc65: 0x1d0c, 0xc66: 0x1d1b, 0xc67: 0x1d2a, 0xc68: 0x1d39, 0xc69: 0x2186, + 0xc6a: 0x2198, 0xc6b: 0x21aa, 0xc6c: 0x21bc, 0xc6d: 0x21c8, 0xc6e: 0x21d4, 0xc6f: 0x21e0, + 0xc70: 0x21ec, 0xc71: 0x21f8, 0xc72: 0x2204, 0xc73: 0x2240, 0xc74: 0x224c, 0xc75: 0x2258, + 0xc76: 0x2264, 0xc77: 0x2270, 0xc78: 0x227c, 0xc79: 0x2282, 0xc7a: 0x2288, 0xc7b: 0x228e, + 0xc7c: 0x2294, 0xc7d: 0x22a6, 0xc7e: 0x22ac, 0xc7f: 0x1c10, + // Block 0x32, offset 0xc80 + 0xc80: 0x1377, 0xc81: 0x0cfb, 0xc82: 0x13d3, 0xc83: 0x139f, 0xc84: 0x0e57, 0xc85: 0x06eb, + 0xc86: 0x08df, 0xc87: 0x162b, 0xc88: 0x162b, 0xc89: 0x0a0b, 0xc8a: 0x145f, 0xc8b: 0x0943, + 0xc8c: 0x0a07, 0xc8d: 0x0bef, 0xc8e: 0x0fcf, 0xc8f: 0x115f, 0xc90: 0x1297, 0xc91: 0x12d3, + 0xc92: 0x1307, 0xc93: 0x141b, 0xc94: 0x0d73, 0xc95: 0x0dff, 0xc96: 0x0eab, 0xc97: 0x0f43, + 0xc98: 0x125f, 0xc99: 0x1447, 0xc9a: 0x1573, 0xc9b: 0x070f, 0xc9c: 0x08b3, 0xc9d: 0x0d87, + 0xc9e: 0x0ecf, 0xc9f: 0x1293, 0xca0: 0x15c3, 0xca1: 0x0ab3, 0xca2: 0x0e77, 0xca3: 0x1283, + 0xca4: 0x1317, 0xca5: 0x0c23, 0xca6: 0x11bb, 0xca7: 0x12df, 0xca8: 0x0b1f, 0xca9: 0x0d0f, + 0xcaa: 0x0e17, 0xcab: 0x0f1b, 0xcac: 0x1427, 0xcad: 0x074f, 0xcae: 0x07e7, 0xcaf: 0x0853, + 0xcb0: 0x0c8b, 0xcb1: 0x0d7f, 0xcb2: 0x0ecb, 0xcb3: 0x0fef, 0xcb4: 0x1177, 0xcb5: 0x128b, + 0xcb6: 0x12a3, 0xcb7: 0x13c7, 0xcb8: 0x14ef, 0xcb9: 0x15a3, 0xcba: 0x15bf, 0xcbb: 0x102b, + 0xcbc: 0x106b, 0xcbd: 0x1123, 0xcbe: 0x1243, 0xcbf: 0x147b, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x15cb, 0xcc1: 0x134b, 0xcc2: 0x09c7, 0xcc3: 0x0b3b, 0xcc4: 0x10db, 0xcc5: 0x119b, + 0xcc6: 0x0eff, 0xcc7: 0x1033, 0xcc8: 0x1397, 0xcc9: 0x14e7, 0xcca: 0x09c3, 0xccb: 0x0a8f, + 0xccc: 0x0d77, 0xccd: 0x0e2b, 0xcce: 0x0e5f, 0xccf: 0x1113, 0xcd0: 0x113b, 0xcd1: 0x14a7, + 0xcd2: 0x084f, 0xcd3: 0x11a7, 0xcd4: 0x07f3, 0xcd5: 0x07ef, 0xcd6: 0x1097, 0xcd7: 0x1127, + 0xcd8: 0x125b, 0xcd9: 0x14af, 0xcda: 0x1367, 0xcdb: 0x0c27, 0xcdc: 0x0d73, 0xcdd: 0x1357, + 0xcde: 0x06f7, 0xcdf: 0x0a63, 0xce0: 0x0b93, 0xce1: 0x0f2f, 0xce2: 0x0faf, 0xce3: 0x0873, + 0xce4: 0x103b, 0xce5: 0x075f, 0xce6: 0x0b77, 0xce7: 0x06d7, 0xce8: 0x0deb, 0xce9: 0x0ca3, + 0xcea: 0x110f, 0xceb: 0x08c7, 0xcec: 0x09b3, 0xced: 0x0ffb, 0xcee: 0x1263, 0xcef: 0x133b, + 0xcf0: 0x0db7, 0xcf1: 0x13f7, 0xcf2: 0x0de3, 0xcf3: 0x0c37, 0xcf4: 0x121b, 0xcf5: 0x0c57, + 0xcf6: 0x0fab, 0xcf7: 0x072b, 0xcf8: 0x07a7, 0xcf9: 0x07eb, 0xcfa: 0x0d53, 0xcfb: 0x10fb, + 0xcfc: 0x11f3, 0xcfd: 0x1347, 0xcfe: 0x145b, 0xcff: 0x085b, + // Block 0x34, offset 0xd00 + 0xd00: 0x090f, 0xd01: 0x0a17, 0xd02: 0x0b2f, 0xd03: 0x0cbf, 0xd04: 0x0e7b, 0xd05: 0x103f, + 0xd06: 0x1497, 0xd07: 0x157b, 0xd08: 0x15cf, 0xd09: 0x15e7, 0xd0a: 0x0837, 0xd0b: 0x0cf3, + 0xd0c: 0x0da3, 0xd0d: 0x13eb, 0xd0e: 0x0afb, 0xd0f: 0x0bd7, 0xd10: 0x0bf3, 0xd11: 0x0c83, + 0xd12: 0x0e6b, 0xd13: 0x0eb7, 0xd14: 0x0f67, 0xd15: 0x108b, 0xd16: 0x112f, 0xd17: 0x1193, + 0xd18: 0x13db, 0xd19: 0x126b, 0xd1a: 0x1403, 0xd1b: 0x147f, 0xd1c: 0x080f, 0xd1d: 0x083b, + 0xd1e: 0x0923, 0xd1f: 0x0ea7, 0xd20: 0x12f3, 0xd21: 0x133b, 0xd22: 0x0b1b, 0xd23: 0x0b8b, + 0xd24: 0x0c4f, 0xd25: 0x0daf, 0xd26: 0x10d7, 0xd27: 0x0f23, 0xd28: 0x073b, 0xd29: 0x097f, + 0xd2a: 0x0a63, 0xd2b: 0x0ac7, 0xd2c: 0x0b97, 0xd2d: 0x0f3f, 0xd2e: 0x0f5b, 0xd2f: 0x116b, + 0xd30: 0x118b, 0xd31: 0x1463, 0xd32: 0x14e3, 0xd33: 0x14f3, 0xd34: 0x152f, 0xd35: 0x0753, + 0xd36: 0x107f, 0xd37: 0x144f, 0xd38: 0x14cb, 0xd39: 0x0baf, 0xd3a: 0x0717, 0xd3b: 0x0777, + 0xd3c: 0x0a67, 0xd3d: 0x0a87, 0xd3e: 0x0caf, 0xd3f: 0x0d73, + // Block 0x35, offset 0xd40 + 0xd40: 0x0ec3, 0xd41: 0x0fcb, 0xd42: 0x1277, 0xd43: 0x1417, 0xd44: 0x1623, 0xd45: 0x0ce3, + 0xd46: 0x14a3, 0xd47: 0x0833, 0xd48: 0x0d2f, 0xd49: 0x0d3b, 0xd4a: 0x0e0f, 0xd4b: 0x0e47, + 0xd4c: 0x0f4b, 0xd4d: 0x0fa7, 0xd4e: 0x1027, 0xd4f: 0x110b, 0xd50: 0x153b, 0xd51: 0x07af, + 0xd52: 0x0c03, 0xd53: 0x14b3, 0xd54: 0x0767, 0xd55: 0x0aab, 0xd56: 0x0e2f, 0xd57: 0x13df, + 0xd58: 0x0b67, 0xd59: 0x0bb7, 0xd5a: 0x0d43, 0xd5b: 0x0f2f, 0xd5c: 0x14bb, 0xd5d: 0x0817, + 0xd5e: 0x08ff, 0xd5f: 0x0a97, 0xd60: 0x0cd3, 0xd61: 0x0d1f, 0xd62: 0x0d5f, 0xd63: 0x0df3, + 0xd64: 0x0f47, 0xd65: 0x0fbb, 0xd66: 0x1157, 0xd67: 0x12f7, 0xd68: 0x1303, 0xd69: 0x1457, + 0xd6a: 0x14d7, 0xd6b: 0x0883, 0xd6c: 0x0e4b, 0xd6d: 0x0903, 0xd6e: 0x0ec7, 0xd6f: 0x0f6b, + 0xd70: 0x1287, 0xd71: 0x14bf, 0xd72: 0x15ab, 0xd73: 0x15d3, 0xd74: 0x0d37, 0xd75: 0x0e27, + 0xd76: 0x11c3, 0xd77: 0x10b7, 0xd78: 0x10c3, 0xd79: 0x10e7, 0xd7a: 0x0f17, 0xd7b: 0x0e9f, + 0xd7c: 0x1363, 0xd7d: 0x0733, 0xd7e: 0x122b, 0xd7f: 0x081b, + // Block 0x36, offset 0xd80 + 0xd80: 0x080b, 0xd81: 0x0b0b, 0xd82: 0x0c2b, 0xd83: 0x10f3, 0xd84: 0x0a53, 0xd85: 0x0e03, + 0xd86: 0x0cef, 0xd87: 0x13e7, 0xd88: 0x12e7, 0xd89: 0x14ab, 0xd8a: 0x1323, 0xd8b: 0x0b27, + 0xd8c: 0x0787, 0xd8d: 0x095b, 0xd90: 0x09af, + 0xd92: 0x0cdf, 0xd95: 0x07f7, 0xd96: 0x0f1f, 0xd97: 0x0fe3, + 0xd98: 0x1047, 0xd99: 0x1063, 0xd9a: 0x1067, 0xd9b: 0x107b, 0xd9c: 0x14fb, 0xd9d: 0x10eb, + 0xd9e: 0x116f, 0xda0: 0x128f, 0xda2: 0x1353, + 0xda5: 0x1407, 0xda6: 0x1433, + 0xdaa: 0x154f, 0xdab: 0x1553, 0xdac: 0x1557, 0xdad: 0x15bb, 0xdae: 0x142b, 0xdaf: 0x14c7, + 0xdb0: 0x0757, 0xdb1: 0x077b, 0xdb2: 0x078f, 0xdb3: 0x084b, 0xdb4: 0x0857, 0xdb5: 0x0897, + 0xdb6: 0x094b, 0xdb7: 0x0967, 0xdb8: 0x096f, 0xdb9: 0x09ab, 0xdba: 0x09b7, 0xdbb: 0x0a93, + 0xdbc: 0x0a9b, 0xdbd: 0x0ba3, 0xdbe: 0x0bcb, 0xdbf: 0x0bd3, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x0beb, 0xdc1: 0x0c97, 0xdc2: 0x0cc7, 0xdc3: 0x0ce7, 0xdc4: 0x0d57, 0xdc5: 0x0e1b, + 0xdc6: 0x0e37, 0xdc7: 0x0e67, 0xdc8: 0x0ebb, 0xdc9: 0x0edb, 0xdca: 0x0f4f, 0xdcb: 0x102f, + 0xdcc: 0x104b, 0xdcd: 0x1053, 0xdce: 0x104f, 0xdcf: 0x1057, 0xdd0: 0x105b, 0xdd1: 0x105f, + 0xdd2: 0x1073, 0xdd3: 0x1077, 0xdd4: 0x109b, 0xdd5: 0x10af, 0xdd6: 0x10cb, 0xdd7: 0x112f, + 0xdd8: 0x1137, 0xdd9: 0x113f, 0xdda: 0x1153, 0xddb: 0x117b, 0xddc: 0x11cb, 0xddd: 0x11ff, + 0xdde: 0x11ff, 0xddf: 0x1267, 0xde0: 0x130f, 0xde1: 0x1327, 0xde2: 0x135b, 0xde3: 0x135f, + 0xde4: 0x13a3, 0xde5: 0x13a7, 0xde6: 0x13ff, 0xde7: 0x1407, 0xde8: 0x14db, 0xde9: 0x151f, + 0xdea: 0x1537, 0xdeb: 0x0b9b, 0xdec: 0x171e, 0xded: 0x11e3, + 0xdf0: 0x06df, 0xdf1: 0x07e3, 0xdf2: 0x07a3, 0xdf3: 0x074b, 0xdf4: 0x078b, 0xdf5: 0x07b7, + 0xdf6: 0x0847, 0xdf7: 0x0863, 0xdf8: 0x094b, 0xdf9: 0x0937, 0xdfa: 0x0947, 0xdfb: 0x0963, + 0xdfc: 0x09af, 0xdfd: 0x09bf, 0xdfe: 0x0a03, 0xdff: 0x0a0f, + // Block 0x38, offset 0xe00 + 0xe00: 0x0a2b, 0xe01: 0x0a3b, 0xe02: 0x0b23, 0xe03: 0x0b2b, 0xe04: 0x0b5b, 0xe05: 0x0b7b, + 0xe06: 0x0bab, 0xe07: 0x0bc3, 0xe08: 0x0bb3, 0xe09: 0x0bd3, 0xe0a: 0x0bc7, 0xe0b: 0x0beb, + 0xe0c: 0x0c07, 0xe0d: 0x0c5f, 0xe0e: 0x0c6b, 0xe0f: 0x0c73, 0xe10: 0x0c9b, 0xe11: 0x0cdf, + 0xe12: 0x0d0f, 0xe13: 0x0d13, 0xe14: 0x0d27, 0xe15: 0x0da7, 0xe16: 0x0db7, 0xe17: 0x0e0f, + 0xe18: 0x0e5b, 0xe19: 0x0e53, 0xe1a: 0x0e67, 0xe1b: 0x0e83, 0xe1c: 0x0ebb, 0xe1d: 0x1013, + 0xe1e: 0x0edf, 0xe1f: 0x0f13, 0xe20: 0x0f1f, 0xe21: 0x0f5f, 0xe22: 0x0f7b, 0xe23: 0x0f9f, + 0xe24: 0x0fc3, 0xe25: 0x0fc7, 0xe26: 0x0fe3, 0xe27: 0x0fe7, 0xe28: 0x0ff7, 0xe29: 0x100b, + 0xe2a: 0x1007, 0xe2b: 0x1037, 0xe2c: 0x10b3, 0xe2d: 0x10cb, 0xe2e: 0x10e3, 0xe2f: 0x111b, + 0xe30: 0x112f, 0xe31: 0x114b, 0xe32: 0x117b, 0xe33: 0x122f, 0xe34: 0x1257, 0xe35: 0x12cb, + 0xe36: 0x1313, 0xe37: 0x131f, 0xe38: 0x1327, 0xe39: 0x133f, 0xe3a: 0x1353, 0xe3b: 0x1343, + 0xe3c: 0x135b, 0xe3d: 0x1357, 0xe3e: 0x134f, 0xe3f: 0x135f, + // Block 0x39, offset 0xe40 + 0xe40: 0x136b, 0xe41: 0x13a7, 0xe42: 0x13e3, 0xe43: 0x1413, 0xe44: 0x144b, 0xe45: 0x146b, + 0xe46: 0x14b7, 0xe47: 0x14db, 0xe48: 0x14fb, 0xe49: 0x150f, 0xe4a: 0x151f, 0xe4b: 0x152b, + 0xe4c: 0x1537, 0xe4d: 0x158b, 0xe4e: 0x162b, 0xe4f: 0x16b5, 0xe50: 0x16b0, 0xe51: 0x16e2, + 0xe52: 0x0607, 0xe53: 0x062f, 0xe54: 0x0633, 0xe55: 0x1764, 0xe56: 0x1791, 0xe57: 0x1809, + 0xe58: 0x1617, 0xe59: 0x1627, + // Block 0x3a, offset 0xe80 + 0xe80: 0x19d5, 0xe81: 0x19d8, 0xe82: 0x19db, 0xe83: 0x1c08, 0xe84: 0x1c0c, 0xe85: 0x1a5f, + 0xe86: 0x1a5f, + 0xe93: 0x1d75, 0xe94: 0x1d66, 0xe95: 0x1d6b, 0xe96: 0x1d7a, 0xe97: 0x1d70, + 0xe9d: 0x4390, + 0xe9e: 0x8115, 0xe9f: 0x4402, 0xea0: 0x022d, 0xea1: 0x0215, 0xea2: 0x021e, 0xea3: 0x0221, + 0xea4: 0x0224, 0xea5: 0x0227, 0xea6: 0x022a, 0xea7: 0x0230, 0xea8: 0x0233, 0xea9: 0x0017, + 0xeaa: 0x43f0, 0xeab: 0x43f6, 0xeac: 0x44f4, 0xead: 0x44fc, 0xeae: 0x4348, 0xeaf: 0x434e, + 0xeb0: 0x4354, 0xeb1: 0x435a, 0xeb2: 0x4366, 0xeb3: 0x436c, 0xeb4: 0x4372, 0xeb5: 0x437e, + 0xeb6: 0x4384, 0xeb8: 0x438a, 0xeb9: 0x4396, 0xeba: 0x439c, 0xebb: 0x43a2, + 0xebc: 0x43ae, 0xebe: 0x43b4, + // Block 0x3b, offset 0xec0 + 0xec0: 0x43ba, 0xec1: 0x43c0, 0xec3: 0x43c6, 0xec4: 0x43cc, + 0xec6: 0x43d8, 0xec7: 0x43de, 0xec8: 0x43e4, 0xec9: 0x43ea, 0xeca: 0x43fc, 0xecb: 0x4378, + 0xecc: 0x4360, 0xecd: 0x43a8, 0xece: 0x43d2, 0xecf: 0x1d7f, 0xed0: 0x0299, 0xed1: 0x0299, + 0xed2: 0x02a2, 0xed3: 0x02a2, 0xed4: 0x02a2, 0xed5: 0x02a2, 0xed6: 0x02a5, 0xed7: 0x02a5, + 0xed8: 0x02a5, 0xed9: 0x02a5, 0xeda: 0x02ab, 0xedb: 0x02ab, 0xedc: 0x02ab, 0xedd: 0x02ab, + 0xede: 0x029f, 0xedf: 0x029f, 0xee0: 0x029f, 0xee1: 0x029f, 0xee2: 0x02a8, 0xee3: 0x02a8, + 0xee4: 0x02a8, 0xee5: 0x02a8, 0xee6: 0x029c, 0xee7: 0x029c, 0xee8: 0x029c, 0xee9: 0x029c, + 0xeea: 0x02cf, 0xeeb: 0x02cf, 0xeec: 0x02cf, 0xeed: 0x02cf, 0xeee: 0x02d2, 0xeef: 0x02d2, + 0xef0: 0x02d2, 0xef1: 0x02d2, 0xef2: 0x02b1, 0xef3: 0x02b1, 0xef4: 0x02b1, 0xef5: 0x02b1, + 0xef6: 0x02ae, 0xef7: 0x02ae, 0xef8: 0x02ae, 0xef9: 0x02ae, 0xefa: 0x02b4, 0xefb: 0x02b4, + 0xefc: 0x02b4, 0xefd: 0x02b4, 0xefe: 0x02b7, 0xeff: 0x02b7, + // Block 0x3c, offset 0xf00 + 0xf00: 0x02b7, 0xf01: 0x02b7, 0xf02: 0x02c0, 0xf03: 0x02c0, 0xf04: 0x02bd, 0xf05: 0x02bd, + 0xf06: 0x02c3, 0xf07: 0x02c3, 0xf08: 0x02ba, 0xf09: 0x02ba, 0xf0a: 0x02c9, 0xf0b: 0x02c9, + 0xf0c: 0x02c6, 0xf0d: 0x02c6, 0xf0e: 0x02d5, 0xf0f: 0x02d5, 0xf10: 0x02d5, 0xf11: 0x02d5, + 0xf12: 0x02db, 0xf13: 0x02db, 0xf14: 0x02db, 0xf15: 0x02db, 0xf16: 0x02e1, 0xf17: 0x02e1, + 0xf18: 0x02e1, 0xf19: 0x02e1, 0xf1a: 0x02de, 0xf1b: 0x02de, 0xf1c: 0x02de, 0xf1d: 0x02de, + 0xf1e: 0x02e4, 0xf1f: 0x02e4, 0xf20: 0x02e7, 0xf21: 0x02e7, 0xf22: 0x02e7, 0xf23: 0x02e7, + 0xf24: 0x446e, 0xf25: 0x446e, 0xf26: 0x02ed, 0xf27: 0x02ed, 0xf28: 0x02ed, 0xf29: 0x02ed, + 0xf2a: 0x02ea, 0xf2b: 0x02ea, 0xf2c: 0x02ea, 0xf2d: 0x02ea, 0xf2e: 0x0308, 0xf2f: 0x0308, + 0xf30: 0x4468, 0xf31: 0x4468, + // Block 0x3d, offset 0xf40 + 0xf53: 0x02d8, 0xf54: 0x02d8, 0xf55: 0x02d8, 0xf56: 0x02d8, 0xf57: 0x02f6, + 0xf58: 0x02f6, 0xf59: 0x02f3, 0xf5a: 0x02f3, 0xf5b: 0x02f9, 0xf5c: 0x02f9, 0xf5d: 0x204f, + 0xf5e: 0x02ff, 0xf5f: 0x02ff, 0xf60: 0x02f0, 0xf61: 0x02f0, 0xf62: 0x02fc, 0xf63: 0x02fc, + 0xf64: 0x0305, 0xf65: 0x0305, 0xf66: 0x0305, 0xf67: 0x0305, 0xf68: 0x028d, 0xf69: 0x028d, + 0xf6a: 0x25aa, 0xf6b: 0x25aa, 0xf6c: 0x261a, 0xf6d: 0x261a, 0xf6e: 0x25e9, 0xf6f: 0x25e9, + 0xf70: 0x2605, 0xf71: 0x2605, 0xf72: 0x25fe, 0xf73: 0x25fe, 0xf74: 0x260c, 0xf75: 0x260c, + 0xf76: 0x2613, 0xf77: 0x2613, 0xf78: 0x2613, 0xf79: 0x25f0, 0xf7a: 0x25f0, 0xf7b: 0x25f0, + 0xf7c: 0x0302, 0xf7d: 0x0302, 0xf7e: 0x0302, 0xf7f: 0x0302, + // Block 0x3e, offset 0xf80 + 0xf80: 0x25b1, 0xf81: 0x25b8, 0xf82: 0x25d4, 0xf83: 0x25f0, 0xf84: 0x25f7, 0xf85: 0x1d89, + 0xf86: 0x1d8e, 0xf87: 0x1d93, 0xf88: 0x1da2, 0xf89: 0x1db1, 0xf8a: 0x1db6, 0xf8b: 0x1dbb, + 0xf8c: 0x1dc0, 0xf8d: 0x1dc5, 0xf8e: 0x1dd4, 0xf8f: 0x1de3, 0xf90: 0x1de8, 0xf91: 0x1ded, + 0xf92: 0x1dfc, 0xf93: 0x1e0b, 0xf94: 0x1e10, 0xf95: 0x1e15, 0xf96: 0x1e1a, 0xf97: 0x1e29, + 0xf98: 0x1e2e, 0xf99: 0x1e3d, 0xf9a: 0x1e42, 0xf9b: 0x1e47, 0xf9c: 0x1e56, 0xf9d: 0x1e5b, + 0xf9e: 0x1e60, 0xf9f: 0x1e6a, 0xfa0: 0x1ea6, 0xfa1: 0x1eb5, 0xfa2: 0x1ec4, 0xfa3: 0x1ec9, + 0xfa4: 0x1ece, 0xfa5: 0x1ed8, 0xfa6: 0x1ee7, 0xfa7: 0x1eec, 0xfa8: 0x1efb, 0xfa9: 0x1f00, + 0xfaa: 0x1f05, 0xfab: 0x1f14, 0xfac: 0x1f19, 0xfad: 0x1f28, 0xfae: 0x1f2d, 0xfaf: 0x1f32, + 0xfb0: 0x1f37, 0xfb1: 0x1f3c, 0xfb2: 0x1f41, 0xfb3: 0x1f46, 0xfb4: 0x1f4b, 0xfb5: 0x1f50, + 0xfb6: 0x1f55, 0xfb7: 0x1f5a, 0xfb8: 0x1f5f, 0xfb9: 0x1f64, 0xfba: 0x1f69, 0xfbb: 0x1f6e, + 0xfbc: 0x1f73, 0xfbd: 0x1f78, 0xfbe: 0x1f7d, 0xfbf: 0x1f87, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x1f8c, 0xfc1: 0x1f91, 0xfc2: 0x1f96, 0xfc3: 0x1fa0, 0xfc4: 0x1fa5, 0xfc5: 0x1faf, + 0xfc6: 0x1fb4, 0xfc7: 0x1fb9, 0xfc8: 0x1fbe, 0xfc9: 0x1fc3, 0xfca: 0x1fc8, 0xfcb: 0x1fcd, + 0xfcc: 0x1fd2, 0xfcd: 0x1fd7, 0xfce: 0x1fe6, 0xfcf: 0x1ff5, 0xfd0: 0x1ffa, 0xfd1: 0x1fff, + 0xfd2: 0x2004, 0xfd3: 0x2009, 0xfd4: 0x200e, 0xfd5: 0x2018, 0xfd6: 0x201d, 0xfd7: 0x2022, + 0xfd8: 0x2031, 0xfd9: 0x2040, 0xfda: 0x2045, 0xfdb: 0x4420, 0xfdc: 0x4426, 0xfdd: 0x445c, + 0xfde: 0x44b3, 0xfdf: 0x44ba, 0xfe0: 0x44c1, 0xfe1: 0x44c8, 0xfe2: 0x44cf, 0xfe3: 0x44d6, + 0xfe4: 0x25c6, 0xfe5: 0x25cd, 0xfe6: 0x25d4, 0xfe7: 0x25db, 0xfe8: 0x25f0, 0xfe9: 0x25f7, + 0xfea: 0x1d98, 0xfeb: 0x1d9d, 0xfec: 0x1da2, 0xfed: 0x1da7, 0xfee: 0x1db1, 0xfef: 0x1db6, + 0xff0: 0x1dca, 0xff1: 0x1dcf, 0xff2: 0x1dd4, 0xff3: 0x1dd9, 0xff4: 0x1de3, 0xff5: 0x1de8, + 0xff6: 0x1df2, 0xff7: 0x1df7, 0xff8: 0x1dfc, 0xff9: 0x1e01, 0xffa: 0x1e0b, 0xffb: 0x1e10, + 0xffc: 0x1f3c, 0xffd: 0x1f41, 0xffe: 0x1f50, 0xfff: 0x1f55, + // Block 0x40, offset 0x1000 + 0x1000: 0x1f5a, 0x1001: 0x1f6e, 0x1002: 0x1f73, 0x1003: 0x1f78, 0x1004: 0x1f7d, 0x1005: 0x1f96, + 0x1006: 0x1fa0, 0x1007: 0x1fa5, 0x1008: 0x1faa, 0x1009: 0x1fbe, 0x100a: 0x1fdc, 0x100b: 0x1fe1, + 0x100c: 0x1fe6, 0x100d: 0x1feb, 0x100e: 0x1ff5, 0x100f: 0x1ffa, 0x1010: 0x445c, 0x1011: 0x2027, + 0x1012: 0x202c, 0x1013: 0x2031, 0x1014: 0x2036, 0x1015: 0x2040, 0x1016: 0x2045, 0x1017: 0x25b1, + 0x1018: 0x25b8, 0x1019: 0x25bf, 0x101a: 0x25d4, 0x101b: 0x25e2, 0x101c: 0x1d89, 0x101d: 0x1d8e, + 0x101e: 0x1d93, 0x101f: 0x1da2, 0x1020: 0x1dac, 0x1021: 0x1dbb, 0x1022: 0x1dc0, 0x1023: 0x1dc5, + 0x1024: 0x1dd4, 0x1025: 0x1dde, 0x1026: 0x1dfc, 0x1027: 0x1e15, 0x1028: 0x1e1a, 0x1029: 0x1e29, + 0x102a: 0x1e2e, 0x102b: 0x1e3d, 0x102c: 0x1e47, 0x102d: 0x1e56, 0x102e: 0x1e5b, 0x102f: 0x1e60, + 0x1030: 0x1e6a, 0x1031: 0x1ea6, 0x1032: 0x1eab, 0x1033: 0x1eb5, 0x1034: 0x1ec4, 0x1035: 0x1ec9, + 0x1036: 0x1ece, 0x1037: 0x1ed8, 0x1038: 0x1ee7, 0x1039: 0x1efb, 0x103a: 0x1f00, 0x103b: 0x1f05, + 0x103c: 0x1f14, 0x103d: 0x1f19, 0x103e: 0x1f28, 0x103f: 0x1f2d, + // Block 0x41, offset 0x1040 + 0x1040: 0x1f32, 0x1041: 0x1f37, 0x1042: 0x1f46, 0x1043: 0x1f4b, 0x1044: 0x1f5f, 0x1045: 0x1f64, + 0x1046: 0x1f69, 0x1047: 0x1f6e, 0x1048: 0x1f73, 0x1049: 0x1f87, 0x104a: 0x1f8c, 0x104b: 0x1f91, + 0x104c: 0x1f96, 0x104d: 0x1f9b, 0x104e: 0x1faf, 0x104f: 0x1fb4, 0x1050: 0x1fb9, 0x1051: 0x1fbe, + 0x1052: 0x1fcd, 0x1053: 0x1fd2, 0x1054: 0x1fd7, 0x1055: 0x1fe6, 0x1056: 0x1ff0, 0x1057: 0x1fff, + 0x1058: 0x2004, 0x1059: 0x4450, 0x105a: 0x2018, 0x105b: 0x201d, 0x105c: 0x2022, 0x105d: 0x2031, + 0x105e: 0x203b, 0x105f: 0x25d4, 0x1060: 0x25e2, 0x1061: 0x1da2, 0x1062: 0x1dac, 0x1063: 0x1dd4, + 0x1064: 0x1dde, 0x1065: 0x1dfc, 0x1066: 0x1e06, 0x1067: 0x1e6a, 0x1068: 0x1e6f, 0x1069: 0x1e92, + 0x106a: 0x1e97, 0x106b: 0x1f6e, 0x106c: 0x1f73, 0x106d: 0x1f96, 0x106e: 0x1fe6, 0x106f: 0x1ff0, + 0x1070: 0x2031, 0x1071: 0x203b, 0x1072: 0x4504, 0x1073: 0x450c, 0x1074: 0x4514, 0x1075: 0x1ef1, + 0x1076: 0x1ef6, 0x1077: 0x1f0a, 0x1078: 0x1f0f, 0x1079: 0x1f1e, 0x107a: 0x1f23, 0x107b: 0x1e74, + 0x107c: 0x1e79, 0x107d: 0x1e9c, 0x107e: 0x1ea1, 0x107f: 0x1e33, + // Block 0x42, offset 0x1080 + 0x1080: 0x1e38, 0x1081: 0x1e1f, 0x1082: 0x1e24, 0x1083: 0x1e4c, 0x1084: 0x1e51, 0x1085: 0x1eba, + 0x1086: 0x1ebf, 0x1087: 0x1edd, 0x1088: 0x1ee2, 0x1089: 0x1e7e, 0x108a: 0x1e83, 0x108b: 0x1e88, + 0x108c: 0x1e92, 0x108d: 0x1e8d, 0x108e: 0x1e65, 0x108f: 0x1eb0, 0x1090: 0x1ed3, 0x1091: 0x1ef1, + 0x1092: 0x1ef6, 0x1093: 0x1f0a, 0x1094: 0x1f0f, 0x1095: 0x1f1e, 0x1096: 0x1f23, 0x1097: 0x1e74, + 0x1098: 0x1e79, 0x1099: 0x1e9c, 0x109a: 0x1ea1, 0x109b: 0x1e33, 0x109c: 0x1e38, 0x109d: 0x1e1f, + 0x109e: 0x1e24, 0x109f: 0x1e4c, 0x10a0: 0x1e51, 0x10a1: 0x1eba, 0x10a2: 0x1ebf, 0x10a3: 0x1edd, + 0x10a4: 0x1ee2, 0x10a5: 0x1e7e, 0x10a6: 0x1e83, 0x10a7: 0x1e88, 0x10a8: 0x1e92, 0x10a9: 0x1e8d, + 0x10aa: 0x1e65, 0x10ab: 0x1eb0, 0x10ac: 0x1ed3, 0x10ad: 0x1e7e, 0x10ae: 0x1e83, 0x10af: 0x1e88, + 0x10b0: 0x1e92, 0x10b1: 0x1e6f, 0x10b2: 0x1e97, 0x10b3: 0x1eec, 0x10b4: 0x1e56, 0x10b5: 0x1e5b, + 0x10b6: 0x1e60, 0x10b7: 0x1e7e, 0x10b8: 0x1e83, 0x10b9: 0x1e88, 0x10ba: 0x1eec, 0x10bb: 0x1efb, + 0x10bc: 0x4408, 0x10bd: 0x4408, + // Block 0x43, offset 0x10c0 + 0x10d0: 0x2311, 0x10d1: 0x2326, + 0x10d2: 0x2326, 0x10d3: 0x232d, 0x10d4: 0x2334, 0x10d5: 0x2349, 0x10d6: 0x2350, 0x10d7: 0x2357, + 0x10d8: 0x237a, 0x10d9: 0x237a, 0x10da: 0x239d, 0x10db: 0x2396, 0x10dc: 0x23b2, 0x10dd: 0x23a4, + 0x10de: 0x23ab, 0x10df: 0x23ce, 0x10e0: 0x23ce, 0x10e1: 0x23c7, 0x10e2: 0x23d5, 0x10e3: 0x23d5, + 0x10e4: 0x23ff, 0x10e5: 0x23ff, 0x10e6: 0x241b, 0x10e7: 0x23e3, 0x10e8: 0x23e3, 0x10e9: 0x23dc, + 0x10ea: 0x23f1, 0x10eb: 0x23f1, 0x10ec: 0x23f8, 0x10ed: 0x23f8, 0x10ee: 0x2422, 0x10ef: 0x2430, + 0x10f0: 0x2430, 0x10f1: 0x2437, 0x10f2: 0x2437, 0x10f3: 0x243e, 0x10f4: 0x2445, 0x10f5: 0x244c, + 0x10f6: 0x2453, 0x10f7: 0x2453, 0x10f8: 0x245a, 0x10f9: 0x2468, 0x10fa: 0x2476, 0x10fb: 0x246f, + 0x10fc: 0x247d, 0x10fd: 0x247d, 0x10fe: 0x2492, 0x10ff: 0x2499, + // Block 0x44, offset 0x1100 + 0x1100: 0x24ca, 0x1101: 0x24d8, 0x1102: 0x24d1, 0x1103: 0x24b5, 0x1104: 0x24b5, 0x1105: 0x24df, + 0x1106: 0x24df, 0x1107: 0x24e6, 0x1108: 0x24e6, 0x1109: 0x2510, 0x110a: 0x2517, 0x110b: 0x251e, + 0x110c: 0x24f4, 0x110d: 0x2502, 0x110e: 0x2525, 0x110f: 0x252c, + 0x1112: 0x24fb, 0x1113: 0x2580, 0x1114: 0x2587, 0x1115: 0x255d, 0x1116: 0x2564, 0x1117: 0x2548, + 0x1118: 0x2548, 0x1119: 0x254f, 0x111a: 0x2579, 0x111b: 0x2572, 0x111c: 0x259c, 0x111d: 0x259c, + 0x111e: 0x230a, 0x111f: 0x231f, 0x1120: 0x2318, 0x1121: 0x2342, 0x1122: 0x233b, 0x1123: 0x2365, + 0x1124: 0x235e, 0x1125: 0x2388, 0x1126: 0x236c, 0x1127: 0x2381, 0x1128: 0x23b9, 0x1129: 0x2406, + 0x112a: 0x23ea, 0x112b: 0x2429, 0x112c: 0x24c3, 0x112d: 0x24ed, 0x112e: 0x2595, 0x112f: 0x258e, + 0x1130: 0x25a3, 0x1131: 0x253a, 0x1132: 0x24a0, 0x1133: 0x256b, 0x1134: 0x2492, 0x1135: 0x24ca, + 0x1136: 0x2461, 0x1137: 0x24ae, 0x1138: 0x2541, 0x1139: 0x2533, 0x113a: 0x24bc, 0x113b: 0x24a7, + 0x113c: 0x24bc, 0x113d: 0x2541, 0x113e: 0x2373, 0x113f: 0x238f, + // Block 0x45, offset 0x1140 + 0x1140: 0x2509, 0x1141: 0x2484, 0x1142: 0x2303, 0x1143: 0x24a7, 0x1144: 0x244c, 0x1145: 0x241b, + 0x1146: 0x23c0, 0x1147: 0x2556, + 0x1170: 0x2414, 0x1171: 0x248b, 0x1172: 0x27bf, 0x1173: 0x27b6, 0x1174: 0x27ec, 0x1175: 0x27da, + 0x1176: 0x27c8, 0x1177: 0x27e3, 0x1178: 0x27f5, 0x1179: 0x240d, 0x117a: 0x2c7c, 0x117b: 0x2afc, + 0x117c: 0x27d1, + // Block 0x46, offset 0x1180 + 0x1190: 0x0019, 0x1191: 0x0483, + 0x1192: 0x0487, 0x1193: 0x0035, 0x1194: 0x0037, 0x1195: 0x0003, 0x1196: 0x003f, 0x1197: 0x04bf, + 0x1198: 0x04c3, 0x1199: 0x1b5c, + 0x11a0: 0x8132, 0x11a1: 0x8132, 0x11a2: 0x8132, 0x11a3: 0x8132, + 0x11a4: 0x8132, 0x11a5: 0x8132, 0x11a6: 0x8132, 0x11a7: 0x812d, 0x11a8: 0x812d, 0x11a9: 0x812d, + 0x11aa: 0x812d, 0x11ab: 0x812d, 0x11ac: 0x812d, 0x11ad: 0x812d, 0x11ae: 0x8132, 0x11af: 0x8132, + 0x11b0: 0x1873, 0x11b1: 0x0443, 0x11b2: 0x043f, 0x11b3: 0x007f, 0x11b4: 0x007f, 0x11b5: 0x0011, + 0x11b6: 0x0013, 0x11b7: 0x00b7, 0x11b8: 0x00bb, 0x11b9: 0x04b7, 0x11ba: 0x04bb, 0x11bb: 0x04ab, + 0x11bc: 0x04af, 0x11bd: 0x0493, 0x11be: 0x0497, 0x11bf: 0x048b, + // Block 0x47, offset 0x11c0 + 0x11c0: 0x048f, 0x11c1: 0x049b, 0x11c2: 0x049f, 0x11c3: 0x04a3, 0x11c4: 0x04a7, + 0x11c7: 0x0077, 0x11c8: 0x007b, 0x11c9: 0x4269, 0x11ca: 0x4269, 0x11cb: 0x4269, + 0x11cc: 0x4269, 0x11cd: 0x007f, 0x11ce: 0x007f, 0x11cf: 0x007f, 0x11d0: 0x0019, 0x11d1: 0x0483, + 0x11d2: 0x001d, 0x11d4: 0x0037, 0x11d5: 0x0035, 0x11d6: 0x003f, 0x11d7: 0x0003, + 0x11d8: 0x0443, 0x11d9: 0x0011, 0x11da: 0x0013, 0x11db: 0x00b7, 0x11dc: 0x00bb, 0x11dd: 0x04b7, + 0x11de: 0x04bb, 0x11df: 0x0007, 0x11e0: 0x000d, 0x11e1: 0x0015, 0x11e2: 0x0017, 0x11e3: 0x001b, + 0x11e4: 0x0039, 0x11e5: 0x003d, 0x11e6: 0x003b, 0x11e8: 0x0079, 0x11e9: 0x0009, + 0x11ea: 0x000b, 0x11eb: 0x0041, + 0x11f0: 0x42aa, 0x11f1: 0x442c, 0x11f2: 0x42af, 0x11f4: 0x42b4, + 0x11f6: 0x42b9, 0x11f7: 0x4432, 0x11f8: 0x42be, 0x11f9: 0x4438, 0x11fa: 0x42c3, 0x11fb: 0x443e, + 0x11fc: 0x42c8, 0x11fd: 0x4444, 0x11fe: 0x42cd, 0x11ff: 0x444a, + // Block 0x48, offset 0x1200 + 0x1200: 0x0236, 0x1201: 0x440e, 0x1202: 0x440e, 0x1203: 0x4414, 0x1204: 0x4414, 0x1205: 0x4456, + 0x1206: 0x4456, 0x1207: 0x441a, 0x1208: 0x441a, 0x1209: 0x4462, 0x120a: 0x4462, 0x120b: 0x4462, + 0x120c: 0x4462, 0x120d: 0x0239, 0x120e: 0x0239, 0x120f: 0x023c, 0x1210: 0x023c, 0x1211: 0x023c, + 0x1212: 0x023c, 0x1213: 0x023f, 0x1214: 0x023f, 0x1215: 0x0242, 0x1216: 0x0242, 0x1217: 0x0242, + 0x1218: 0x0242, 0x1219: 0x0245, 0x121a: 0x0245, 0x121b: 0x0245, 0x121c: 0x0245, 0x121d: 0x0248, + 0x121e: 0x0248, 0x121f: 0x0248, 0x1220: 0x0248, 0x1221: 0x024b, 0x1222: 0x024b, 0x1223: 0x024b, + 0x1224: 0x024b, 0x1225: 0x024e, 0x1226: 0x024e, 0x1227: 0x024e, 0x1228: 0x024e, 0x1229: 0x0251, + 0x122a: 0x0251, 0x122b: 0x0254, 0x122c: 0x0254, 0x122d: 0x0257, 0x122e: 0x0257, 0x122f: 0x025a, + 0x1230: 0x025a, 0x1231: 0x025d, 0x1232: 0x025d, 0x1233: 0x025d, 0x1234: 0x025d, 0x1235: 0x0260, + 0x1236: 0x0260, 0x1237: 0x0260, 0x1238: 0x0260, 0x1239: 0x0263, 0x123a: 0x0263, 0x123b: 0x0263, + 0x123c: 0x0263, 0x123d: 0x0266, 0x123e: 0x0266, 0x123f: 0x0266, + // Block 0x49, offset 0x1240 + 0x1240: 0x0266, 0x1241: 0x0269, 0x1242: 0x0269, 0x1243: 0x0269, 0x1244: 0x0269, 0x1245: 0x026c, + 0x1246: 0x026c, 0x1247: 0x026c, 0x1248: 0x026c, 0x1249: 0x026f, 0x124a: 0x026f, 0x124b: 0x026f, + 0x124c: 0x026f, 0x124d: 0x0272, 0x124e: 0x0272, 0x124f: 0x0272, 0x1250: 0x0272, 0x1251: 0x0275, + 0x1252: 0x0275, 0x1253: 0x0275, 0x1254: 0x0275, 0x1255: 0x0278, 0x1256: 0x0278, 0x1257: 0x0278, + 0x1258: 0x0278, 0x1259: 0x027b, 0x125a: 0x027b, 0x125b: 0x027b, 0x125c: 0x027b, 0x125d: 0x027e, + 0x125e: 0x027e, 0x125f: 0x027e, 0x1260: 0x027e, 0x1261: 0x0281, 0x1262: 0x0281, 0x1263: 0x0281, + 0x1264: 0x0281, 0x1265: 0x0284, 0x1266: 0x0284, 0x1267: 0x0284, 0x1268: 0x0284, 0x1269: 0x0287, + 0x126a: 0x0287, 0x126b: 0x0287, 0x126c: 0x0287, 0x126d: 0x028a, 0x126e: 0x028a, 0x126f: 0x028d, + 0x1270: 0x028d, 0x1271: 0x0290, 0x1272: 0x0290, 0x1273: 0x0290, 0x1274: 0x0290, 0x1275: 0x2e00, + 0x1276: 0x2e00, 0x1277: 0x2e08, 0x1278: 0x2e08, 0x1279: 0x2e10, 0x127a: 0x2e10, 0x127b: 0x1f82, + 0x127c: 0x1f82, + // Block 0x4a, offset 0x1280 + 0x1280: 0x0081, 0x1281: 0x0083, 0x1282: 0x0085, 0x1283: 0x0087, 0x1284: 0x0089, 0x1285: 0x008b, + 0x1286: 0x008d, 0x1287: 0x008f, 0x1288: 0x0091, 0x1289: 0x0093, 0x128a: 0x0095, 0x128b: 0x0097, + 0x128c: 0x0099, 0x128d: 0x009b, 0x128e: 0x009d, 0x128f: 0x009f, 0x1290: 0x00a1, 0x1291: 0x00a3, + 0x1292: 0x00a5, 0x1293: 0x00a7, 0x1294: 0x00a9, 0x1295: 0x00ab, 0x1296: 0x00ad, 0x1297: 0x00af, + 0x1298: 0x00b1, 0x1299: 0x00b3, 0x129a: 0x00b5, 0x129b: 0x00b7, 0x129c: 0x00b9, 0x129d: 0x00bb, + 0x129e: 0x00bd, 0x129f: 0x0477, 0x12a0: 0x047b, 0x12a1: 0x0487, 0x12a2: 0x049b, 0x12a3: 0x049f, + 0x12a4: 0x0483, 0x12a5: 0x05ab, 0x12a6: 0x05a3, 0x12a7: 0x04c7, 0x12a8: 0x04cf, 0x12a9: 0x04d7, + 0x12aa: 0x04df, 0x12ab: 0x04e7, 0x12ac: 0x056b, 0x12ad: 0x0573, 0x12ae: 0x057b, 0x12af: 0x051f, + 0x12b0: 0x05af, 0x12b1: 0x04cb, 0x12b2: 0x04d3, 0x12b3: 0x04db, 0x12b4: 0x04e3, 0x12b5: 0x04eb, + 0x12b6: 0x04ef, 0x12b7: 0x04f3, 0x12b8: 0x04f7, 0x12b9: 0x04fb, 0x12ba: 0x04ff, 0x12bb: 0x0503, + 0x12bc: 0x0507, 0x12bd: 0x050b, 0x12be: 0x050f, 0x12bf: 0x0513, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x0517, 0x12c1: 0x051b, 0x12c2: 0x0523, 0x12c3: 0x0527, 0x12c4: 0x052b, 0x12c5: 0x052f, + 0x12c6: 0x0533, 0x12c7: 0x0537, 0x12c8: 0x053b, 0x12c9: 0x053f, 0x12ca: 0x0543, 0x12cb: 0x0547, + 0x12cc: 0x054b, 0x12cd: 0x054f, 0x12ce: 0x0553, 0x12cf: 0x0557, 0x12d0: 0x055b, 0x12d1: 0x055f, + 0x12d2: 0x0563, 0x12d3: 0x0567, 0x12d4: 0x056f, 0x12d5: 0x0577, 0x12d6: 0x057f, 0x12d7: 0x0583, + 0x12d8: 0x0587, 0x12d9: 0x058b, 0x12da: 0x058f, 0x12db: 0x0593, 0x12dc: 0x0597, 0x12dd: 0x05a7, + 0x12de: 0x4a78, 0x12df: 0x4a7e, 0x12e0: 0x03c3, 0x12e1: 0x0313, 0x12e2: 0x0317, 0x12e3: 0x4a3b, + 0x12e4: 0x031b, 0x12e5: 0x4a41, 0x12e6: 0x4a47, 0x12e7: 0x031f, 0x12e8: 0x0323, 0x12e9: 0x0327, + 0x12ea: 0x4a4d, 0x12eb: 0x4a53, 0x12ec: 0x4a59, 0x12ed: 0x4a5f, 0x12ee: 0x4a65, 0x12ef: 0x4a6b, + 0x12f0: 0x0367, 0x12f1: 0x032b, 0x12f2: 0x032f, 0x12f3: 0x0333, 0x12f4: 0x037b, 0x12f5: 0x0337, + 0x12f6: 0x033b, 0x12f7: 0x033f, 0x12f8: 0x0343, 0x12f9: 0x0347, 0x12fa: 0x034b, 0x12fb: 0x034f, + 0x12fc: 0x0353, 0x12fd: 0x0357, 0x12fe: 0x035b, + // Block 0x4c, offset 0x1300 + 0x1302: 0x49bd, 0x1303: 0x49c3, 0x1304: 0x49c9, 0x1305: 0x49cf, + 0x1306: 0x49d5, 0x1307: 0x49db, 0x130a: 0x49e1, 0x130b: 0x49e7, + 0x130c: 0x49ed, 0x130d: 0x49f3, 0x130e: 0x49f9, 0x130f: 0x49ff, + 0x1312: 0x4a05, 0x1313: 0x4a0b, 0x1314: 0x4a11, 0x1315: 0x4a17, 0x1316: 0x4a1d, 0x1317: 0x4a23, + 0x131a: 0x4a29, 0x131b: 0x4a2f, 0x131c: 0x4a35, + 0x1320: 0x00bf, 0x1321: 0x00c2, 0x1322: 0x00cb, 0x1323: 0x4264, + 0x1324: 0x00c8, 0x1325: 0x00c5, 0x1326: 0x0447, 0x1328: 0x046b, 0x1329: 0x044b, + 0x132a: 0x044f, 0x132b: 0x0453, 0x132c: 0x0457, 0x132d: 0x046f, 0x132e: 0x0473, + // Block 0x4d, offset 0x1340 + 0x1340: 0x0063, 0x1341: 0x0065, 0x1342: 0x0067, 0x1343: 0x0069, 0x1344: 0x006b, 0x1345: 0x006d, + 0x1346: 0x006f, 0x1347: 0x0071, 0x1348: 0x0073, 0x1349: 0x0075, 0x134a: 0x0083, 0x134b: 0x0085, + 0x134c: 0x0087, 0x134d: 0x0089, 0x134e: 0x008b, 0x134f: 0x008d, 0x1350: 0x008f, 0x1351: 0x0091, + 0x1352: 0x0093, 0x1353: 0x0095, 0x1354: 0x0097, 0x1355: 0x0099, 0x1356: 0x009b, 0x1357: 0x009d, + 0x1358: 0x009f, 0x1359: 0x00a1, 0x135a: 0x00a3, 0x135b: 0x00a5, 0x135c: 0x00a7, 0x135d: 0x00a9, + 0x135e: 0x00ab, 0x135f: 0x00ad, 0x1360: 0x00af, 0x1361: 0x00b1, 0x1362: 0x00b3, 0x1363: 0x00b5, + 0x1364: 0x00dd, 0x1365: 0x00f2, 0x1368: 0x0173, 0x1369: 0x0176, + 0x136a: 0x0179, 0x136b: 0x017c, 0x136c: 0x017f, 0x136d: 0x0182, 0x136e: 0x0185, 0x136f: 0x0188, + 0x1370: 0x018b, 0x1371: 0x018e, 0x1372: 0x0191, 0x1373: 0x0194, 0x1374: 0x0197, 0x1375: 0x019a, + 0x1376: 0x019d, 0x1377: 0x01a0, 0x1378: 0x01a3, 0x1379: 0x0188, 0x137a: 0x01a6, 0x137b: 0x01a9, + 0x137c: 0x01ac, 0x137d: 0x01af, 0x137e: 0x01b2, 0x137f: 0x01b5, + // Block 0x4e, offset 0x1380 + 0x1380: 0x01fd, 0x1381: 0x0200, 0x1382: 0x0203, 0x1383: 0x045b, 0x1384: 0x01c7, 0x1385: 0x01d0, + 0x1386: 0x01d6, 0x1387: 0x01fa, 0x1388: 0x01eb, 0x1389: 0x01e8, 0x138a: 0x0206, 0x138b: 0x0209, + 0x138e: 0x0021, 0x138f: 0x0023, 0x1390: 0x0025, 0x1391: 0x0027, + 0x1392: 0x0029, 0x1393: 0x002b, 0x1394: 0x002d, 0x1395: 0x002f, 0x1396: 0x0031, 0x1397: 0x0033, + 0x1398: 0x0021, 0x1399: 0x0023, 0x139a: 0x0025, 0x139b: 0x0027, 0x139c: 0x0029, 0x139d: 0x002b, + 0x139e: 0x002d, 0x139f: 0x002f, 0x13a0: 0x0031, 0x13a1: 0x0033, 0x13a2: 0x0021, 0x13a3: 0x0023, + 0x13a4: 0x0025, 0x13a5: 0x0027, 0x13a6: 0x0029, 0x13a7: 0x002b, 0x13a8: 0x002d, 0x13a9: 0x002f, + 0x13aa: 0x0031, 0x13ab: 0x0033, 0x13ac: 0x0021, 0x13ad: 0x0023, 0x13ae: 0x0025, 0x13af: 0x0027, + 0x13b0: 0x0029, 0x13b1: 0x002b, 0x13b2: 0x002d, 0x13b3: 0x002f, 0x13b4: 0x0031, 0x13b5: 0x0033, + 0x13b6: 0x0021, 0x13b7: 0x0023, 0x13b8: 0x0025, 0x13b9: 0x0027, 0x13ba: 0x0029, 0x13bb: 0x002b, + 0x13bc: 0x002d, 0x13bd: 0x002f, 0x13be: 0x0031, 0x13bf: 0x0033, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x0239, 0x13c1: 0x023c, 0x13c2: 0x0248, 0x13c3: 0x0251, 0x13c5: 0x028a, + 0x13c6: 0x025a, 0x13c7: 0x024b, 0x13c8: 0x0269, 0x13c9: 0x0290, 0x13ca: 0x027b, 0x13cb: 0x027e, + 0x13cc: 0x0281, 0x13cd: 0x0284, 0x13ce: 0x025d, 0x13cf: 0x026f, 0x13d0: 0x0275, 0x13d1: 0x0263, + 0x13d2: 0x0278, 0x13d3: 0x0257, 0x13d4: 0x0260, 0x13d5: 0x0242, 0x13d6: 0x0245, 0x13d7: 0x024e, + 0x13d8: 0x0254, 0x13d9: 0x0266, 0x13da: 0x026c, 0x13db: 0x0272, 0x13dc: 0x0293, 0x13dd: 0x02e4, + 0x13de: 0x02cc, 0x13df: 0x0296, 0x13e1: 0x023c, 0x13e2: 0x0248, + 0x13e4: 0x0287, 0x13e7: 0x024b, 0x13e9: 0x0290, + 0x13ea: 0x027b, 0x13eb: 0x027e, 0x13ec: 0x0281, 0x13ed: 0x0284, 0x13ee: 0x025d, 0x13ef: 0x026f, + 0x13f0: 0x0275, 0x13f1: 0x0263, 0x13f2: 0x0278, 0x13f4: 0x0260, 0x13f5: 0x0242, + 0x13f6: 0x0245, 0x13f7: 0x024e, 0x13f9: 0x0266, 0x13fb: 0x0272, + // Block 0x50, offset 0x1400 + 0x1402: 0x0248, + 0x1407: 0x024b, 0x1409: 0x0290, 0x140b: 0x027e, + 0x140d: 0x0284, 0x140e: 0x025d, 0x140f: 0x026f, 0x1411: 0x0263, + 0x1412: 0x0278, 0x1414: 0x0260, 0x1417: 0x024e, + 0x1419: 0x0266, 0x141b: 0x0272, 0x141d: 0x02e4, + 0x141f: 0x0296, 0x1421: 0x023c, 0x1422: 0x0248, + 0x1424: 0x0287, 0x1427: 0x024b, 0x1428: 0x0269, 0x1429: 0x0290, + 0x142a: 0x027b, 0x142c: 0x0281, 0x142d: 0x0284, 0x142e: 0x025d, 0x142f: 0x026f, + 0x1430: 0x0275, 0x1431: 0x0263, 0x1432: 0x0278, 0x1434: 0x0260, 0x1435: 0x0242, + 0x1436: 0x0245, 0x1437: 0x024e, 0x1439: 0x0266, 0x143a: 0x026c, 0x143b: 0x0272, + 0x143c: 0x0293, 0x143e: 0x02cc, + // Block 0x51, offset 0x1440 + 0x1440: 0x0239, 0x1441: 0x023c, 0x1442: 0x0248, 0x1443: 0x0251, 0x1444: 0x0287, 0x1445: 0x028a, + 0x1446: 0x025a, 0x1447: 0x024b, 0x1448: 0x0269, 0x1449: 0x0290, 0x144b: 0x027e, + 0x144c: 0x0281, 0x144d: 0x0284, 0x144e: 0x025d, 0x144f: 0x026f, 0x1450: 0x0275, 0x1451: 0x0263, + 0x1452: 0x0278, 0x1453: 0x0257, 0x1454: 0x0260, 0x1455: 0x0242, 0x1456: 0x0245, 0x1457: 0x024e, + 0x1458: 0x0254, 0x1459: 0x0266, 0x145a: 0x026c, 0x145b: 0x0272, + 0x1461: 0x023c, 0x1462: 0x0248, 0x1463: 0x0251, + 0x1465: 0x028a, 0x1466: 0x025a, 0x1467: 0x024b, 0x1468: 0x0269, 0x1469: 0x0290, + 0x146b: 0x027e, 0x146c: 0x0281, 0x146d: 0x0284, 0x146e: 0x025d, 0x146f: 0x026f, + 0x1470: 0x0275, 0x1471: 0x0263, 0x1472: 0x0278, 0x1473: 0x0257, 0x1474: 0x0260, 0x1475: 0x0242, + 0x1476: 0x0245, 0x1477: 0x024e, 0x1478: 0x0254, 0x1479: 0x0266, 0x147a: 0x026c, 0x147b: 0x0272, + // Block 0x52, offset 0x1480 + 0x1480: 0x1879, 0x1481: 0x1876, 0x1482: 0x187c, 0x1483: 0x18a0, 0x1484: 0x18c4, 0x1485: 0x18e8, + 0x1486: 0x190c, 0x1487: 0x1915, 0x1488: 0x191b, 0x1489: 0x1921, 0x148a: 0x1927, + 0x1490: 0x1a8c, 0x1491: 0x1a90, + 0x1492: 0x1a94, 0x1493: 0x1a98, 0x1494: 0x1a9c, 0x1495: 0x1aa0, 0x1496: 0x1aa4, 0x1497: 0x1aa8, + 0x1498: 0x1aac, 0x1499: 0x1ab0, 0x149a: 0x1ab4, 0x149b: 0x1ab8, 0x149c: 0x1abc, 0x149d: 0x1ac0, + 0x149e: 0x1ac4, 0x149f: 0x1ac8, 0x14a0: 0x1acc, 0x14a1: 0x1ad0, 0x14a2: 0x1ad4, 0x14a3: 0x1ad8, + 0x14a4: 0x1adc, 0x14a5: 0x1ae0, 0x14a6: 0x1ae4, 0x14a7: 0x1ae8, 0x14a8: 0x1aec, 0x14a9: 0x1af0, + 0x14aa: 0x271e, 0x14ab: 0x0047, 0x14ac: 0x0065, 0x14ad: 0x193c, 0x14ae: 0x19b1, + 0x14b0: 0x0043, 0x14b1: 0x0045, 0x14b2: 0x0047, 0x14b3: 0x0049, 0x14b4: 0x004b, 0x14b5: 0x004d, + 0x14b6: 0x004f, 0x14b7: 0x0051, 0x14b8: 0x0053, 0x14b9: 0x0055, 0x14ba: 0x0057, 0x14bb: 0x0059, + 0x14bc: 0x005b, 0x14bd: 0x005d, 0x14be: 0x005f, 0x14bf: 0x0061, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x26ad, 0x14c1: 0x26c2, 0x14c2: 0x0503, + 0x14d0: 0x0c0f, 0x14d1: 0x0a47, + 0x14d2: 0x08d3, 0x14d3: 0x45c4, 0x14d4: 0x071b, 0x14d5: 0x09ef, 0x14d6: 0x132f, 0x14d7: 0x09ff, + 0x14d8: 0x0727, 0x14d9: 0x0cd7, 0x14da: 0x0eaf, 0x14db: 0x0caf, 0x14dc: 0x0827, 0x14dd: 0x0b6b, + 0x14de: 0x07bf, 0x14df: 0x0cb7, 0x14e0: 0x0813, 0x14e1: 0x1117, 0x14e2: 0x0f83, 0x14e3: 0x138b, + 0x14e4: 0x09d3, 0x14e5: 0x090b, 0x14e6: 0x0e63, 0x14e7: 0x0c1b, 0x14e8: 0x0c47, 0x14e9: 0x06bf, + 0x14ea: 0x06cb, 0x14eb: 0x140b, 0x14ec: 0x0adb, 0x14ed: 0x06e7, 0x14ee: 0x08ef, 0x14ef: 0x0c3b, + 0x14f0: 0x13b3, 0x14f1: 0x0c13, 0x14f2: 0x106f, 0x14f3: 0x10ab, 0x14f4: 0x08f7, 0x14f5: 0x0e43, + 0x14f6: 0x0d0b, 0x14f7: 0x0d07, 0x14f8: 0x0f97, 0x14f9: 0x082b, 0x14fa: 0x0957, 0x14fb: 0x1443, + // Block 0x54, offset 0x1500 + 0x1500: 0x06fb, 0x1501: 0x06f3, 0x1502: 0x0703, 0x1503: 0x1647, 0x1504: 0x0747, 0x1505: 0x0757, + 0x1506: 0x075b, 0x1507: 0x0763, 0x1508: 0x076b, 0x1509: 0x076f, 0x150a: 0x077b, 0x150b: 0x0773, + 0x150c: 0x05b3, 0x150d: 0x165b, 0x150e: 0x078f, 0x150f: 0x0793, 0x1510: 0x0797, 0x1511: 0x07b3, + 0x1512: 0x164c, 0x1513: 0x05b7, 0x1514: 0x079f, 0x1515: 0x07bf, 0x1516: 0x1656, 0x1517: 0x07cf, + 0x1518: 0x07d7, 0x1519: 0x0737, 0x151a: 0x07df, 0x151b: 0x07e3, 0x151c: 0x1831, 0x151d: 0x07ff, + 0x151e: 0x0807, 0x151f: 0x05bf, 0x1520: 0x081f, 0x1521: 0x0823, 0x1522: 0x082b, 0x1523: 0x082f, + 0x1524: 0x05c3, 0x1525: 0x0847, 0x1526: 0x084b, 0x1527: 0x0857, 0x1528: 0x0863, 0x1529: 0x0867, + 0x152a: 0x086b, 0x152b: 0x0873, 0x152c: 0x0893, 0x152d: 0x0897, 0x152e: 0x089f, 0x152f: 0x08af, + 0x1530: 0x08b7, 0x1531: 0x08bb, 0x1532: 0x08bb, 0x1533: 0x08bb, 0x1534: 0x166a, 0x1535: 0x0e93, + 0x1536: 0x08cf, 0x1537: 0x08d7, 0x1538: 0x166f, 0x1539: 0x08e3, 0x153a: 0x08eb, 0x153b: 0x08f3, + 0x153c: 0x091b, 0x153d: 0x0907, 0x153e: 0x0913, 0x153f: 0x0917, + // Block 0x55, offset 0x1540 + 0x1540: 0x091f, 0x1541: 0x0927, 0x1542: 0x092b, 0x1543: 0x0933, 0x1544: 0x093b, 0x1545: 0x093f, + 0x1546: 0x093f, 0x1547: 0x0947, 0x1548: 0x094f, 0x1549: 0x0953, 0x154a: 0x095f, 0x154b: 0x0983, + 0x154c: 0x0967, 0x154d: 0x0987, 0x154e: 0x096b, 0x154f: 0x0973, 0x1550: 0x080b, 0x1551: 0x09cf, + 0x1552: 0x0997, 0x1553: 0x099b, 0x1554: 0x099f, 0x1555: 0x0993, 0x1556: 0x09a7, 0x1557: 0x09a3, + 0x1558: 0x09bb, 0x1559: 0x1674, 0x155a: 0x09d7, 0x155b: 0x09db, 0x155c: 0x09e3, 0x155d: 0x09ef, + 0x155e: 0x09f7, 0x155f: 0x0a13, 0x1560: 0x1679, 0x1561: 0x167e, 0x1562: 0x0a1f, 0x1563: 0x0a23, + 0x1564: 0x0a27, 0x1565: 0x0a1b, 0x1566: 0x0a2f, 0x1567: 0x05c7, 0x1568: 0x05cb, 0x1569: 0x0a37, + 0x156a: 0x0a3f, 0x156b: 0x0a3f, 0x156c: 0x1683, 0x156d: 0x0a5b, 0x156e: 0x0a5f, 0x156f: 0x0a63, + 0x1570: 0x0a6b, 0x1571: 0x1688, 0x1572: 0x0a73, 0x1573: 0x0a77, 0x1574: 0x0b4f, 0x1575: 0x0a7f, + 0x1576: 0x05cf, 0x1577: 0x0a8b, 0x1578: 0x0a9b, 0x1579: 0x0aa7, 0x157a: 0x0aa3, 0x157b: 0x1692, + 0x157c: 0x0aaf, 0x157d: 0x1697, 0x157e: 0x0abb, 0x157f: 0x0ab7, + // Block 0x56, offset 0x1580 + 0x1580: 0x0abf, 0x1581: 0x0acf, 0x1582: 0x0ad3, 0x1583: 0x05d3, 0x1584: 0x0ae3, 0x1585: 0x0aeb, + 0x1586: 0x0aef, 0x1587: 0x0af3, 0x1588: 0x05d7, 0x1589: 0x169c, 0x158a: 0x05db, 0x158b: 0x0b0f, + 0x158c: 0x0b13, 0x158d: 0x0b17, 0x158e: 0x0b1f, 0x158f: 0x1863, 0x1590: 0x0b37, 0x1591: 0x16a6, + 0x1592: 0x16a6, 0x1593: 0x11d7, 0x1594: 0x0b47, 0x1595: 0x0b47, 0x1596: 0x05df, 0x1597: 0x16c9, + 0x1598: 0x179b, 0x1599: 0x0b57, 0x159a: 0x0b5f, 0x159b: 0x05e3, 0x159c: 0x0b73, 0x159d: 0x0b83, + 0x159e: 0x0b87, 0x159f: 0x0b8f, 0x15a0: 0x0b9f, 0x15a1: 0x05eb, 0x15a2: 0x05e7, 0x15a3: 0x0ba3, + 0x15a4: 0x16ab, 0x15a5: 0x0ba7, 0x15a6: 0x0bbb, 0x15a7: 0x0bbf, 0x15a8: 0x0bc3, 0x15a9: 0x0bbf, + 0x15aa: 0x0bcf, 0x15ab: 0x0bd3, 0x15ac: 0x0be3, 0x15ad: 0x0bdb, 0x15ae: 0x0bdf, 0x15af: 0x0be7, + 0x15b0: 0x0beb, 0x15b1: 0x0bef, 0x15b2: 0x0bfb, 0x15b3: 0x0bff, 0x15b4: 0x0c17, 0x15b5: 0x0c1f, + 0x15b6: 0x0c2f, 0x15b7: 0x0c43, 0x15b8: 0x16ba, 0x15b9: 0x0c3f, 0x15ba: 0x0c33, 0x15bb: 0x0c4b, + 0x15bc: 0x0c53, 0x15bd: 0x0c67, 0x15be: 0x16bf, 0x15bf: 0x0c6f, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x0c63, 0x15c1: 0x0c5b, 0x15c2: 0x05ef, 0x15c3: 0x0c77, 0x15c4: 0x0c7f, 0x15c5: 0x0c87, + 0x15c6: 0x0c7b, 0x15c7: 0x05f3, 0x15c8: 0x0c97, 0x15c9: 0x0c9f, 0x15ca: 0x16c4, 0x15cb: 0x0ccb, + 0x15cc: 0x0cff, 0x15cd: 0x0cdb, 0x15ce: 0x05ff, 0x15cf: 0x0ce7, 0x15d0: 0x05fb, 0x15d1: 0x05f7, + 0x15d2: 0x07c3, 0x15d3: 0x07c7, 0x15d4: 0x0d03, 0x15d5: 0x0ceb, 0x15d6: 0x11ab, 0x15d7: 0x0663, + 0x15d8: 0x0d0f, 0x15d9: 0x0d13, 0x15da: 0x0d17, 0x15db: 0x0d2b, 0x15dc: 0x0d23, 0x15dd: 0x16dd, + 0x15de: 0x0603, 0x15df: 0x0d3f, 0x15e0: 0x0d33, 0x15e1: 0x0d4f, 0x15e2: 0x0d57, 0x15e3: 0x16e7, + 0x15e4: 0x0d5b, 0x15e5: 0x0d47, 0x15e6: 0x0d63, 0x15e7: 0x0607, 0x15e8: 0x0d67, 0x15e9: 0x0d6b, + 0x15ea: 0x0d6f, 0x15eb: 0x0d7b, 0x15ec: 0x16ec, 0x15ed: 0x0d83, 0x15ee: 0x060b, 0x15ef: 0x0d8f, + 0x15f0: 0x16f1, 0x15f1: 0x0d93, 0x15f2: 0x060f, 0x15f3: 0x0d9f, 0x15f4: 0x0dab, 0x15f5: 0x0db7, + 0x15f6: 0x0dbb, 0x15f7: 0x16f6, 0x15f8: 0x168d, 0x15f9: 0x16fb, 0x15fa: 0x0ddb, 0x15fb: 0x1700, + 0x15fc: 0x0de7, 0x15fd: 0x0def, 0x15fe: 0x0ddf, 0x15ff: 0x0dfb, + // Block 0x58, offset 0x1600 + 0x1600: 0x0e0b, 0x1601: 0x0e1b, 0x1602: 0x0e0f, 0x1603: 0x0e13, 0x1604: 0x0e1f, 0x1605: 0x0e23, + 0x1606: 0x1705, 0x1607: 0x0e07, 0x1608: 0x0e3b, 0x1609: 0x0e3f, 0x160a: 0x0613, 0x160b: 0x0e53, + 0x160c: 0x0e4f, 0x160d: 0x170a, 0x160e: 0x0e33, 0x160f: 0x0e6f, 0x1610: 0x170f, 0x1611: 0x1714, + 0x1612: 0x0e73, 0x1613: 0x0e87, 0x1614: 0x0e83, 0x1615: 0x0e7f, 0x1616: 0x0617, 0x1617: 0x0e8b, + 0x1618: 0x0e9b, 0x1619: 0x0e97, 0x161a: 0x0ea3, 0x161b: 0x1651, 0x161c: 0x0eb3, 0x161d: 0x1719, + 0x161e: 0x0ebf, 0x161f: 0x1723, 0x1620: 0x0ed3, 0x1621: 0x0edf, 0x1622: 0x0ef3, 0x1623: 0x1728, + 0x1624: 0x0f07, 0x1625: 0x0f0b, 0x1626: 0x172d, 0x1627: 0x1732, 0x1628: 0x0f27, 0x1629: 0x0f37, + 0x162a: 0x061b, 0x162b: 0x0f3b, 0x162c: 0x061f, 0x162d: 0x061f, 0x162e: 0x0f53, 0x162f: 0x0f57, + 0x1630: 0x0f5f, 0x1631: 0x0f63, 0x1632: 0x0f6f, 0x1633: 0x0623, 0x1634: 0x0f87, 0x1635: 0x1737, + 0x1636: 0x0fa3, 0x1637: 0x173c, 0x1638: 0x0faf, 0x1639: 0x16a1, 0x163a: 0x0fbf, 0x163b: 0x1741, + 0x163c: 0x1746, 0x163d: 0x174b, 0x163e: 0x0627, 0x163f: 0x062b, + // Block 0x59, offset 0x1640 + 0x1640: 0x0ff7, 0x1641: 0x1755, 0x1642: 0x1750, 0x1643: 0x175a, 0x1644: 0x175f, 0x1645: 0x0fff, + 0x1646: 0x1003, 0x1647: 0x1003, 0x1648: 0x100b, 0x1649: 0x0633, 0x164a: 0x100f, 0x164b: 0x0637, + 0x164c: 0x063b, 0x164d: 0x1769, 0x164e: 0x1023, 0x164f: 0x102b, 0x1650: 0x1037, 0x1651: 0x063f, + 0x1652: 0x176e, 0x1653: 0x105b, 0x1654: 0x1773, 0x1655: 0x1778, 0x1656: 0x107b, 0x1657: 0x1093, + 0x1658: 0x0643, 0x1659: 0x109b, 0x165a: 0x109f, 0x165b: 0x10a3, 0x165c: 0x177d, 0x165d: 0x1782, + 0x165e: 0x1782, 0x165f: 0x10bb, 0x1660: 0x0647, 0x1661: 0x1787, 0x1662: 0x10cf, 0x1663: 0x10d3, + 0x1664: 0x064b, 0x1665: 0x178c, 0x1666: 0x10ef, 0x1667: 0x064f, 0x1668: 0x10ff, 0x1669: 0x10f7, + 0x166a: 0x1107, 0x166b: 0x1796, 0x166c: 0x111f, 0x166d: 0x0653, 0x166e: 0x112b, 0x166f: 0x1133, + 0x1670: 0x1143, 0x1671: 0x0657, 0x1672: 0x17a0, 0x1673: 0x17a5, 0x1674: 0x065b, 0x1675: 0x17aa, + 0x1676: 0x115b, 0x1677: 0x17af, 0x1678: 0x1167, 0x1679: 0x1173, 0x167a: 0x117b, 0x167b: 0x17b4, + 0x167c: 0x17b9, 0x167d: 0x118f, 0x167e: 0x17be, 0x167f: 0x1197, + // Block 0x5a, offset 0x1680 + 0x1680: 0x16ce, 0x1681: 0x065f, 0x1682: 0x11af, 0x1683: 0x11b3, 0x1684: 0x0667, 0x1685: 0x11b7, + 0x1686: 0x0a33, 0x1687: 0x17c3, 0x1688: 0x17c8, 0x1689: 0x16d3, 0x168a: 0x16d8, 0x168b: 0x11d7, + 0x168c: 0x11db, 0x168d: 0x13f3, 0x168e: 0x066b, 0x168f: 0x1207, 0x1690: 0x1203, 0x1691: 0x120b, + 0x1692: 0x083f, 0x1693: 0x120f, 0x1694: 0x1213, 0x1695: 0x1217, 0x1696: 0x121f, 0x1697: 0x17cd, + 0x1698: 0x121b, 0x1699: 0x1223, 0x169a: 0x1237, 0x169b: 0x123b, 0x169c: 0x1227, 0x169d: 0x123f, + 0x169e: 0x1253, 0x169f: 0x1267, 0x16a0: 0x1233, 0x16a1: 0x1247, 0x16a2: 0x124b, 0x16a3: 0x124f, + 0x16a4: 0x17d2, 0x16a5: 0x17dc, 0x16a6: 0x17d7, 0x16a7: 0x066f, 0x16a8: 0x126f, 0x16a9: 0x1273, + 0x16aa: 0x127b, 0x16ab: 0x17f0, 0x16ac: 0x127f, 0x16ad: 0x17e1, 0x16ae: 0x0673, 0x16af: 0x0677, + 0x16b0: 0x17e6, 0x16b1: 0x17eb, 0x16b2: 0x067b, 0x16b3: 0x129f, 0x16b4: 0x12a3, 0x16b5: 0x12a7, + 0x16b6: 0x12ab, 0x16b7: 0x12b7, 0x16b8: 0x12b3, 0x16b9: 0x12bf, 0x16ba: 0x12bb, 0x16bb: 0x12cb, + 0x16bc: 0x12c3, 0x16bd: 0x12c7, 0x16be: 0x12cf, 0x16bf: 0x067f, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x12d7, 0x16c1: 0x12db, 0x16c2: 0x0683, 0x16c3: 0x12eb, 0x16c4: 0x12ef, 0x16c5: 0x17f5, + 0x16c6: 0x12fb, 0x16c7: 0x12ff, 0x16c8: 0x0687, 0x16c9: 0x130b, 0x16ca: 0x05bb, 0x16cb: 0x17fa, + 0x16cc: 0x17ff, 0x16cd: 0x068b, 0x16ce: 0x068f, 0x16cf: 0x1337, 0x16d0: 0x134f, 0x16d1: 0x136b, + 0x16d2: 0x137b, 0x16d3: 0x1804, 0x16d4: 0x138f, 0x16d5: 0x1393, 0x16d6: 0x13ab, 0x16d7: 0x13b7, + 0x16d8: 0x180e, 0x16d9: 0x1660, 0x16da: 0x13c3, 0x16db: 0x13bf, 0x16dc: 0x13cb, 0x16dd: 0x1665, + 0x16de: 0x13d7, 0x16df: 0x13e3, 0x16e0: 0x1813, 0x16e1: 0x1818, 0x16e2: 0x1423, 0x16e3: 0x142f, + 0x16e4: 0x1437, 0x16e5: 0x181d, 0x16e6: 0x143b, 0x16e7: 0x1467, 0x16e8: 0x1473, 0x16e9: 0x1477, + 0x16ea: 0x146f, 0x16eb: 0x1483, 0x16ec: 0x1487, 0x16ed: 0x1822, 0x16ee: 0x1493, 0x16ef: 0x0693, + 0x16f0: 0x149b, 0x16f1: 0x1827, 0x16f2: 0x0697, 0x16f3: 0x14d3, 0x16f4: 0x0ac3, 0x16f5: 0x14eb, + 0x16f6: 0x182c, 0x16f7: 0x1836, 0x16f8: 0x069b, 0x16f9: 0x069f, 0x16fa: 0x1513, 0x16fb: 0x183b, + 0x16fc: 0x06a3, 0x16fd: 0x1840, 0x16fe: 0x152b, 0x16ff: 0x152b, + // Block 0x5c, offset 0x1700 + 0x1700: 0x1533, 0x1701: 0x1845, 0x1702: 0x154b, 0x1703: 0x06a7, 0x1704: 0x155b, 0x1705: 0x1567, + 0x1706: 0x156f, 0x1707: 0x1577, 0x1708: 0x06ab, 0x1709: 0x184a, 0x170a: 0x158b, 0x170b: 0x15a7, + 0x170c: 0x15b3, 0x170d: 0x06af, 0x170e: 0x06b3, 0x170f: 0x15b7, 0x1710: 0x184f, 0x1711: 0x06b7, + 0x1712: 0x1854, 0x1713: 0x1859, 0x1714: 0x185e, 0x1715: 0x15db, 0x1716: 0x06bb, 0x1717: 0x15ef, + 0x1718: 0x15f7, 0x1719: 0x15fb, 0x171a: 0x1603, 0x171b: 0x160b, 0x171c: 0x1613, 0x171d: 0x1868, +} + +// nfkcIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var nfkcIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x5b, 0xc3: 0x01, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x5c, 0xc7: 0x04, + 0xc8: 0x05, 0xca: 0x5d, 0xcb: 0x5e, 0xcc: 0x06, 0xcd: 0x07, 0xce: 0x08, 0xcf: 0x09, + 0xd0: 0x0a, 0xd1: 0x5f, 0xd2: 0x60, 0xd3: 0x0b, 0xd6: 0x0c, 0xd7: 0x61, + 0xd8: 0x62, 0xd9: 0x0d, 0xdb: 0x63, 0xdc: 0x64, 0xdd: 0x65, 0xdf: 0x66, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x08, 0xed: 0x09, 0xef: 0x0a, + 0xf0: 0x13, + // Block 0x4, offset 0x100 + 0x120: 0x67, 0x121: 0x68, 0x123: 0x69, 0x124: 0x6a, 0x125: 0x6b, 0x126: 0x6c, 0x127: 0x6d, + 0x128: 0x6e, 0x129: 0x6f, 0x12a: 0x70, 0x12b: 0x71, 0x12c: 0x6c, 0x12d: 0x72, 0x12e: 0x73, 0x12f: 0x74, + 0x131: 0x75, 0x132: 0x76, 0x133: 0x77, 0x134: 0x78, 0x135: 0x79, 0x137: 0x7a, + 0x138: 0x7b, 0x139: 0x7c, 0x13a: 0x7d, 0x13b: 0x7e, 0x13c: 0x7f, 0x13d: 0x80, 0x13e: 0x81, 0x13f: 0x82, + // Block 0x5, offset 0x140 + 0x140: 0x83, 0x142: 0x84, 0x143: 0x85, 0x144: 0x86, 0x145: 0x87, 0x146: 0x88, 0x147: 0x89, + 0x14d: 0x8a, + 0x15c: 0x8b, 0x15f: 0x8c, + 0x162: 0x8d, 0x164: 0x8e, + 0x168: 0x8f, 0x169: 0x90, 0x16a: 0x91, 0x16c: 0x0e, 0x16d: 0x92, 0x16e: 0x93, 0x16f: 0x94, + 0x170: 0x95, 0x173: 0x96, 0x174: 0x97, 0x175: 0x0f, 0x176: 0x10, 0x177: 0x11, + 0x178: 0x12, 0x179: 0x13, 0x17a: 0x14, 0x17b: 0x15, 0x17c: 0x16, 0x17d: 0x17, 0x17e: 0x18, 0x17f: 0x19, + // Block 0x6, offset 0x180 + 0x180: 0x98, 0x181: 0x99, 0x182: 0x9a, 0x183: 0x9b, 0x184: 0x1a, 0x185: 0x1b, 0x186: 0x9c, 0x187: 0x9d, + 0x188: 0x9e, 0x189: 0x1c, 0x18a: 0x1d, 0x18b: 0x9f, 0x18c: 0xa0, + 0x191: 0x1e, 0x192: 0x1f, 0x193: 0xa1, + 0x1a8: 0xa2, 0x1a9: 0xa3, 0x1ab: 0xa4, + 0x1b1: 0xa5, 0x1b3: 0xa6, 0x1b5: 0xa7, 0x1b7: 0xa8, + 0x1ba: 0xa9, 0x1bb: 0xaa, 0x1bc: 0x20, 0x1bd: 0x21, 0x1be: 0x22, 0x1bf: 0xab, + // Block 0x7, offset 0x1c0 + 0x1c0: 0xac, 0x1c1: 0x23, 0x1c2: 0x24, 0x1c3: 0x25, 0x1c4: 0xad, 0x1c5: 0x26, 0x1c6: 0x27, + 0x1c8: 0x28, 0x1c9: 0x29, 0x1ca: 0x2a, 0x1cb: 0x2b, 0x1cc: 0x2c, 0x1cd: 0x2d, 0x1ce: 0x2e, 0x1cf: 0x2f, + // Block 0x8, offset 0x200 + 0x219: 0xae, 0x21a: 0xaf, 0x21b: 0xb0, 0x21d: 0xb1, 0x21f: 0xb2, + 0x220: 0xb3, 0x223: 0xb4, 0x224: 0xb5, 0x225: 0xb6, 0x226: 0xb7, 0x227: 0xb8, + 0x22a: 0xb9, 0x22b: 0xba, 0x22d: 0xbb, 0x22f: 0xbc, + 0x230: 0xbd, 0x231: 0xbe, 0x232: 0xbf, 0x233: 0xc0, 0x234: 0xc1, 0x235: 0xc2, 0x236: 0xc3, 0x237: 0xbd, + 0x238: 0xbe, 0x239: 0xbf, 0x23a: 0xc0, 0x23b: 0xc1, 0x23c: 0xc2, 0x23d: 0xc3, 0x23e: 0xbd, 0x23f: 0xbe, + // Block 0x9, offset 0x240 + 0x240: 0xbf, 0x241: 0xc0, 0x242: 0xc1, 0x243: 0xc2, 0x244: 0xc3, 0x245: 0xbd, 0x246: 0xbe, 0x247: 0xbf, + 0x248: 0xc0, 0x249: 0xc1, 0x24a: 0xc2, 0x24b: 0xc3, 0x24c: 0xbd, 0x24d: 0xbe, 0x24e: 0xbf, 0x24f: 0xc0, + 0x250: 0xc1, 0x251: 0xc2, 0x252: 0xc3, 0x253: 0xbd, 0x254: 0xbe, 0x255: 0xbf, 0x256: 0xc0, 0x257: 0xc1, + 0x258: 0xc2, 0x259: 0xc3, 0x25a: 0xbd, 0x25b: 0xbe, 0x25c: 0xbf, 0x25d: 0xc0, 0x25e: 0xc1, 0x25f: 0xc2, + 0x260: 0xc3, 0x261: 0xbd, 0x262: 0xbe, 0x263: 0xbf, 0x264: 0xc0, 0x265: 0xc1, 0x266: 0xc2, 0x267: 0xc3, + 0x268: 0xbd, 0x269: 0xbe, 0x26a: 0xbf, 0x26b: 0xc0, 0x26c: 0xc1, 0x26d: 0xc2, 0x26e: 0xc3, 0x26f: 0xbd, + 0x270: 0xbe, 0x271: 0xbf, 0x272: 0xc0, 0x273: 0xc1, 0x274: 0xc2, 0x275: 0xc3, 0x276: 0xbd, 0x277: 0xbe, + 0x278: 0xbf, 0x279: 0xc0, 0x27a: 0xc1, 0x27b: 0xc2, 0x27c: 0xc3, 0x27d: 0xbd, 0x27e: 0xbe, 0x27f: 0xbf, + // Block 0xa, offset 0x280 + 0x280: 0xc0, 0x281: 0xc1, 0x282: 0xc2, 0x283: 0xc3, 0x284: 0xbd, 0x285: 0xbe, 0x286: 0xbf, 0x287: 0xc0, + 0x288: 0xc1, 0x289: 0xc2, 0x28a: 0xc3, 0x28b: 0xbd, 0x28c: 0xbe, 0x28d: 0xbf, 0x28e: 0xc0, 0x28f: 0xc1, + 0x290: 0xc2, 0x291: 0xc3, 0x292: 0xbd, 0x293: 0xbe, 0x294: 0xbf, 0x295: 0xc0, 0x296: 0xc1, 0x297: 0xc2, + 0x298: 0xc3, 0x299: 0xbd, 0x29a: 0xbe, 0x29b: 0xbf, 0x29c: 0xc0, 0x29d: 0xc1, 0x29e: 0xc2, 0x29f: 0xc3, + 0x2a0: 0xbd, 0x2a1: 0xbe, 0x2a2: 0xbf, 0x2a3: 0xc0, 0x2a4: 0xc1, 0x2a5: 0xc2, 0x2a6: 0xc3, 0x2a7: 0xbd, + 0x2a8: 0xbe, 0x2a9: 0xbf, 0x2aa: 0xc0, 0x2ab: 0xc1, 0x2ac: 0xc2, 0x2ad: 0xc3, 0x2ae: 0xbd, 0x2af: 0xbe, + 0x2b0: 0xbf, 0x2b1: 0xc0, 0x2b2: 0xc1, 0x2b3: 0xc2, 0x2b4: 0xc3, 0x2b5: 0xbd, 0x2b6: 0xbe, 0x2b7: 0xbf, + 0x2b8: 0xc0, 0x2b9: 0xc1, 0x2ba: 0xc2, 0x2bb: 0xc3, 0x2bc: 0xbd, 0x2bd: 0xbe, 0x2be: 0xbf, 0x2bf: 0xc0, + // Block 0xb, offset 0x2c0 + 0x2c0: 0xc1, 0x2c1: 0xc2, 0x2c2: 0xc3, 0x2c3: 0xbd, 0x2c4: 0xbe, 0x2c5: 0xbf, 0x2c6: 0xc0, 0x2c7: 0xc1, + 0x2c8: 0xc2, 0x2c9: 0xc3, 0x2ca: 0xbd, 0x2cb: 0xbe, 0x2cc: 0xbf, 0x2cd: 0xc0, 0x2ce: 0xc1, 0x2cf: 0xc2, + 0x2d0: 0xc3, 0x2d1: 0xbd, 0x2d2: 0xbe, 0x2d3: 0xbf, 0x2d4: 0xc0, 0x2d5: 0xc1, 0x2d6: 0xc2, 0x2d7: 0xc3, + 0x2d8: 0xbd, 0x2d9: 0xbe, 0x2da: 0xbf, 0x2db: 0xc0, 0x2dc: 0xc1, 0x2dd: 0xc2, 0x2de: 0xc4, + // Block 0xc, offset 0x300 + 0x324: 0x30, 0x325: 0x31, 0x326: 0x32, 0x327: 0x33, + 0x328: 0x34, 0x329: 0x35, 0x32a: 0x36, 0x32b: 0x37, 0x32c: 0x38, 0x32d: 0x39, 0x32e: 0x3a, 0x32f: 0x3b, + 0x330: 0x3c, 0x331: 0x3d, 0x332: 0x3e, 0x333: 0x3f, 0x334: 0x40, 0x335: 0x41, 0x336: 0x42, 0x337: 0x43, + 0x338: 0x44, 0x339: 0x45, 0x33a: 0x46, 0x33b: 0x47, 0x33c: 0xc5, 0x33d: 0x48, 0x33e: 0x49, 0x33f: 0x4a, + // Block 0xd, offset 0x340 + 0x347: 0xc6, + 0x34b: 0xc7, 0x34d: 0xc8, + 0x368: 0xc9, 0x36b: 0xca, + // Block 0xe, offset 0x380 + 0x381: 0xcb, 0x382: 0xcc, 0x384: 0xcd, 0x385: 0xb7, 0x387: 0xce, + 0x388: 0xcf, 0x38b: 0xd0, 0x38c: 0x6c, 0x38d: 0xd1, + 0x391: 0xd2, 0x392: 0xd3, 0x393: 0xd4, 0x396: 0xd5, 0x397: 0xd6, + 0x398: 0xd7, 0x39a: 0xd8, 0x39c: 0xd9, + 0x3a8: 0xda, 0x3a9: 0xdb, 0x3aa: 0xdc, + 0x3b0: 0xd7, 0x3b5: 0xdd, + // Block 0xf, offset 0x3c0 + 0x3eb: 0xde, 0x3ec: 0xdf, + // Block 0x10, offset 0x400 + 0x432: 0xe0, + // Block 0x11, offset 0x440 + 0x445: 0xe1, 0x446: 0xe2, 0x447: 0xe3, + 0x449: 0xe4, + 0x450: 0xe5, 0x451: 0xe6, 0x452: 0xe7, 0x453: 0xe8, 0x454: 0xe9, 0x455: 0xea, 0x456: 0xeb, 0x457: 0xec, + 0x458: 0xed, 0x459: 0xee, 0x45a: 0x4b, 0x45b: 0xef, 0x45c: 0xf0, 0x45d: 0xf1, 0x45e: 0xf2, 0x45f: 0x4c, + // Block 0x12, offset 0x480 + 0x480: 0xf3, + 0x4a3: 0xf4, 0x4a5: 0xf5, + 0x4b8: 0x4d, 0x4b9: 0x4e, 0x4ba: 0x4f, + // Block 0x13, offset 0x4c0 + 0x4c4: 0x50, 0x4c5: 0xf6, 0x4c6: 0xf7, + 0x4c8: 0x51, 0x4c9: 0xf8, + // Block 0x14, offset 0x500 + 0x520: 0x52, 0x521: 0x53, 0x522: 0x54, 0x523: 0x55, 0x524: 0x56, 0x525: 0x57, 0x526: 0x58, 0x527: 0x59, + 0x528: 0x5a, + // Block 0x15, offset 0x540 + 0x550: 0x0b, 0x551: 0x0c, 0x556: 0x0d, + 0x55b: 0x0e, 0x55d: 0x0f, 0x55e: 0x10, 0x55f: 0x11, + 0x56f: 0x12, +} + +// nfkcSparseOffset: 158 entries, 316 bytes +var nfkcSparseOffset = []uint16{0x0, 0xe, 0x12, 0x1b, 0x25, 0x35, 0x37, 0x3c, 0x47, 0x56, 0x63, 0x6b, 0x6f, 0x74, 0x76, 0x87, 0x8f, 0x96, 0x99, 0xa0, 0xa4, 0xa8, 0xaa, 0xac, 0xb5, 0xb9, 0xc0, 0xc5, 0xc8, 0xd2, 0xd5, 0xdc, 0xe4, 0xe8, 0xea, 0xed, 0xf1, 0xf7, 0x108, 0x114, 0x116, 0x11c, 0x11e, 0x120, 0x122, 0x124, 0x126, 0x128, 0x12a, 0x12d, 0x130, 0x132, 0x135, 0x138, 0x13c, 0x141, 0x14a, 0x14c, 0x14f, 0x151, 0x15c, 0x167, 0x175, 0x183, 0x193, 0x1a1, 0x1a8, 0x1ae, 0x1bd, 0x1c1, 0x1c3, 0x1c7, 0x1c9, 0x1cc, 0x1ce, 0x1d1, 0x1d3, 0x1d6, 0x1d8, 0x1da, 0x1dc, 0x1e8, 0x1f2, 0x1fc, 0x1ff, 0x203, 0x205, 0x207, 0x209, 0x20b, 0x20e, 0x210, 0x212, 0x214, 0x216, 0x21c, 0x21f, 0x223, 0x225, 0x22c, 0x232, 0x238, 0x240, 0x246, 0x24c, 0x252, 0x256, 0x258, 0x25a, 0x25c, 0x25e, 0x264, 0x267, 0x26a, 0x272, 0x279, 0x27c, 0x27f, 0x281, 0x289, 0x28c, 0x293, 0x296, 0x29c, 0x29e, 0x2a0, 0x2a3, 0x2a5, 0x2a7, 0x2a9, 0x2ab, 0x2ae, 0x2b0, 0x2b2, 0x2b4, 0x2c1, 0x2cb, 0x2cd, 0x2cf, 0x2d3, 0x2d8, 0x2e4, 0x2e9, 0x2f2, 0x2f8, 0x2fd, 0x301, 0x306, 0x30a, 0x31a, 0x328, 0x336, 0x344, 0x34a, 0x34c, 0x34f, 0x359, 0x35b} + +// nfkcSparseValues: 869 entries, 3476 bytes +var nfkcSparseValues = [869]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0002, lo: 0x0d}, + {value: 0x0001, lo: 0xa0, hi: 0xa0}, + {value: 0x4278, lo: 0xa8, hi: 0xa8}, + {value: 0x0083, lo: 0xaa, hi: 0xaa}, + {value: 0x4264, lo: 0xaf, hi: 0xaf}, + {value: 0x0025, lo: 0xb2, hi: 0xb3}, + {value: 0x425a, lo: 0xb4, hi: 0xb4}, + {value: 0x01dc, lo: 0xb5, hi: 0xb5}, + {value: 0x4291, lo: 0xb8, hi: 0xb8}, + {value: 0x0023, lo: 0xb9, hi: 0xb9}, + {value: 0x009f, lo: 0xba, hi: 0xba}, + {value: 0x221c, lo: 0xbc, hi: 0xbc}, + {value: 0x2210, lo: 0xbd, hi: 0xbd}, + {value: 0x22b2, lo: 0xbe, hi: 0xbe}, + // Block 0x1, offset 0xe + {value: 0x0091, lo: 0x03}, + {value: 0x46e2, lo: 0xa0, hi: 0xa1}, + {value: 0x4714, lo: 0xaf, hi: 0xb0}, + {value: 0xa000, lo: 0xb7, hi: 0xb7}, + // Block 0x2, offset 0x12 + {value: 0x0003, lo: 0x08}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x0091, lo: 0xb0, hi: 0xb0}, + {value: 0x0119, lo: 0xb1, hi: 0xb1}, + {value: 0x0095, lo: 0xb2, hi: 0xb2}, + {value: 0x00a5, lo: 0xb3, hi: 0xb3}, + {value: 0x0143, lo: 0xb4, hi: 0xb6}, + {value: 0x00af, lo: 0xb7, hi: 0xb7}, + {value: 0x00b3, lo: 0xb8, hi: 0xb8}, + // Block 0x3, offset 0x1b + {value: 0x000a, lo: 0x09}, + {value: 0x426e, lo: 0x98, hi: 0x98}, + {value: 0x4273, lo: 0x99, hi: 0x9a}, + {value: 0x4296, lo: 0x9b, hi: 0x9b}, + {value: 0x425f, lo: 0x9c, hi: 0x9c}, + {value: 0x4282, lo: 0x9d, hi: 0x9d}, + {value: 0x0113, lo: 0xa0, hi: 0xa0}, + {value: 0x0099, lo: 0xa1, hi: 0xa1}, + {value: 0x00a7, lo: 0xa2, hi: 0xa3}, + {value: 0x0167, lo: 0xa4, hi: 0xa4}, + // Block 0x4, offset 0x25 + {value: 0x0000, lo: 0x0f}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0xa000, lo: 0x8d, hi: 0x8d}, + {value: 0x37a5, lo: 0x90, hi: 0x90}, + {value: 0x37b1, lo: 0x91, hi: 0x91}, + {value: 0x379f, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x96, hi: 0x96}, + {value: 0x3817, lo: 0x97, hi: 0x97}, + {value: 0x37e1, lo: 0x9c, hi: 0x9c}, + {value: 0x37c9, lo: 0x9d, hi: 0x9d}, + {value: 0x37f3, lo: 0x9e, hi: 0x9e}, + {value: 0xa000, lo: 0xb4, hi: 0xb5}, + {value: 0x381d, lo: 0xb6, hi: 0xb6}, + {value: 0x3823, lo: 0xb7, hi: 0xb7}, + // Block 0x5, offset 0x35 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x83, hi: 0x87}, + // Block 0x6, offset 0x37 + {value: 0x0001, lo: 0x04}, + {value: 0x8113, lo: 0x81, hi: 0x82}, + {value: 0x8132, lo: 0x84, hi: 0x84}, + {value: 0x812d, lo: 0x85, hi: 0x85}, + {value: 0x810d, lo: 0x87, hi: 0x87}, + // Block 0x7, offset 0x3c + {value: 0x0000, lo: 0x0a}, + {value: 0x8132, lo: 0x90, hi: 0x97}, + {value: 0x8119, lo: 0x98, hi: 0x98}, + {value: 0x811a, lo: 0x99, hi: 0x99}, + {value: 0x811b, lo: 0x9a, hi: 0x9a}, + {value: 0x3841, lo: 0xa2, hi: 0xa2}, + {value: 0x3847, lo: 0xa3, hi: 0xa3}, + {value: 0x3853, lo: 0xa4, hi: 0xa4}, + {value: 0x384d, lo: 0xa5, hi: 0xa5}, + {value: 0x3859, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xa7, hi: 0xa7}, + // Block 0x8, offset 0x47 + {value: 0x0000, lo: 0x0e}, + {value: 0x386b, lo: 0x80, hi: 0x80}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0x385f, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x3865, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x95, hi: 0x95}, + {value: 0x8132, lo: 0x96, hi: 0x9c}, + {value: 0x8132, lo: 0x9f, hi: 0xa2}, + {value: 0x812d, lo: 0xa3, hi: 0xa3}, + {value: 0x8132, lo: 0xa4, hi: 0xa4}, + {value: 0x8132, lo: 0xa7, hi: 0xa8}, + {value: 0x812d, lo: 0xaa, hi: 0xaa}, + {value: 0x8132, lo: 0xab, hi: 0xac}, + {value: 0x812d, lo: 0xad, hi: 0xad}, + // Block 0x9, offset 0x56 + {value: 0x0000, lo: 0x0c}, + {value: 0x811f, lo: 0x91, hi: 0x91}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + {value: 0x812d, lo: 0xb1, hi: 0xb1}, + {value: 0x8132, lo: 0xb2, hi: 0xb3}, + {value: 0x812d, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb5, hi: 0xb6}, + {value: 0x812d, lo: 0xb7, hi: 0xb9}, + {value: 0x8132, lo: 0xba, hi: 0xba}, + {value: 0x812d, lo: 0xbb, hi: 0xbc}, + {value: 0x8132, lo: 0xbd, hi: 0xbd}, + {value: 0x812d, lo: 0xbe, hi: 0xbe}, + {value: 0x8132, lo: 0xbf, hi: 0xbf}, + // Block 0xa, offset 0x63 + {value: 0x0005, lo: 0x07}, + {value: 0x8132, lo: 0x80, hi: 0x80}, + {value: 0x8132, lo: 0x81, hi: 0x81}, + {value: 0x812d, lo: 0x82, hi: 0x83}, + {value: 0x812d, lo: 0x84, hi: 0x85}, + {value: 0x812d, lo: 0x86, hi: 0x87}, + {value: 0x812d, lo: 0x88, hi: 0x89}, + {value: 0x8132, lo: 0x8a, hi: 0x8a}, + // Block 0xb, offset 0x6b + {value: 0x0000, lo: 0x03}, + {value: 0x8132, lo: 0xab, hi: 0xb1}, + {value: 0x812d, lo: 0xb2, hi: 0xb2}, + {value: 0x8132, lo: 0xb3, hi: 0xb3}, + // Block 0xc, offset 0x6f + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0x96, hi: 0x99}, + {value: 0x8132, lo: 0x9b, hi: 0xa3}, + {value: 0x8132, lo: 0xa5, hi: 0xa7}, + {value: 0x8132, lo: 0xa9, hi: 0xad}, + // Block 0xd, offset 0x74 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x99, hi: 0x9b}, + // Block 0xe, offset 0x76 + {value: 0x0000, lo: 0x10}, + {value: 0x8132, lo: 0x94, hi: 0xa1}, + {value: 0x812d, lo: 0xa3, hi: 0xa3}, + {value: 0x8132, lo: 0xa4, hi: 0xa5}, + {value: 0x812d, lo: 0xa6, hi: 0xa6}, + {value: 0x8132, lo: 0xa7, hi: 0xa8}, + {value: 0x812d, lo: 0xa9, hi: 0xa9}, + {value: 0x8132, lo: 0xaa, hi: 0xac}, + {value: 0x812d, lo: 0xad, hi: 0xaf}, + {value: 0x8116, lo: 0xb0, hi: 0xb0}, + {value: 0x8117, lo: 0xb1, hi: 0xb1}, + {value: 0x8118, lo: 0xb2, hi: 0xb2}, + {value: 0x8132, lo: 0xb3, hi: 0xb5}, + {value: 0x812d, lo: 0xb6, hi: 0xb6}, + {value: 0x8132, lo: 0xb7, hi: 0xb8}, + {value: 0x812d, lo: 0xb9, hi: 0xba}, + {value: 0x8132, lo: 0xbb, hi: 0xbf}, + // Block 0xf, offset 0x87 + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0xa8, hi: 0xa8}, + {value: 0x3ed8, lo: 0xa9, hi: 0xa9}, + {value: 0xa000, lo: 0xb0, hi: 0xb0}, + {value: 0x3ee0, lo: 0xb1, hi: 0xb1}, + {value: 0xa000, lo: 0xb3, hi: 0xb3}, + {value: 0x3ee8, lo: 0xb4, hi: 0xb4}, + {value: 0x9902, lo: 0xbc, hi: 0xbc}, + // Block 0x10, offset 0x8f + {value: 0x0008, lo: 0x06}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x8132, lo: 0x91, hi: 0x91}, + {value: 0x812d, lo: 0x92, hi: 0x92}, + {value: 0x8132, lo: 0x93, hi: 0x93}, + {value: 0x8132, lo: 0x94, hi: 0x94}, + {value: 0x451c, lo: 0x98, hi: 0x9f}, + // Block 0x11, offset 0x96 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x12, offset 0x99 + {value: 0x0008, lo: 0x06}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2c9e, lo: 0x8b, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x455c, lo: 0x9c, hi: 0x9d}, + {value: 0x456c, lo: 0x9f, hi: 0x9f}, + // Block 0x13, offset 0xa0 + {value: 0x0000, lo: 0x03}, + {value: 0x4594, lo: 0xb3, hi: 0xb3}, + {value: 0x459c, lo: 0xb6, hi: 0xb6}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + // Block 0x14, offset 0xa4 + {value: 0x0008, lo: 0x03}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x4574, lo: 0x99, hi: 0x9b}, + {value: 0x458c, lo: 0x9e, hi: 0x9e}, + // Block 0x15, offset 0xa8 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + // Block 0x16, offset 0xaa + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + // Block 0x17, offset 0xac + {value: 0x0000, lo: 0x08}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2cb6, lo: 0x88, hi: 0x88}, + {value: 0x2cae, lo: 0x8b, hi: 0x8b}, + {value: 0x2cbe, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x96, hi: 0x97}, + {value: 0x45a4, lo: 0x9c, hi: 0x9c}, + {value: 0x45ac, lo: 0x9d, hi: 0x9d}, + // Block 0x18, offset 0xb5 + {value: 0x0000, lo: 0x03}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x2cc6, lo: 0x94, hi: 0x94}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x19, offset 0xb9 + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cce, lo: 0x8a, hi: 0x8a}, + {value: 0x2cde, lo: 0x8b, hi: 0x8b}, + {value: 0x2cd6, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1a, offset 0xc0 + {value: 0x1801, lo: 0x04}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x3ef0, lo: 0x88, hi: 0x88}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x8120, lo: 0x95, hi: 0x96}, + // Block 0x1b, offset 0xc5 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + {value: 0xa000, lo: 0xbf, hi: 0xbf}, + // Block 0x1c, offset 0xc8 + {value: 0x0000, lo: 0x09}, + {value: 0x2ce6, lo: 0x80, hi: 0x80}, + {value: 0x9900, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x2cee, lo: 0x87, hi: 0x87}, + {value: 0x2cf6, lo: 0x88, hi: 0x88}, + {value: 0x2f50, lo: 0x8a, hi: 0x8a}, + {value: 0x2dd8, lo: 0x8b, hi: 0x8b}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x95, hi: 0x96}, + // Block 0x1d, offset 0xd2 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x1e, offset 0xd5 + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cfe, lo: 0x8a, hi: 0x8a}, + {value: 0x2d0e, lo: 0x8b, hi: 0x8b}, + {value: 0x2d06, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1f, offset 0xdc + {value: 0x6bea, lo: 0x07}, + {value: 0x9904, lo: 0x8a, hi: 0x8a}, + {value: 0x9900, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x3ef8, lo: 0x9a, hi: 0x9a}, + {value: 0x2f58, lo: 0x9c, hi: 0x9c}, + {value: 0x2de3, lo: 0x9d, hi: 0x9d}, + {value: 0x2d16, lo: 0x9e, hi: 0x9f}, + // Block 0x20, offset 0xe4 + {value: 0x0000, lo: 0x03}, + {value: 0x2621, lo: 0xb3, hi: 0xb3}, + {value: 0x8122, lo: 0xb8, hi: 0xb9}, + {value: 0x8104, lo: 0xba, hi: 0xba}, + // Block 0x21, offset 0xe8 + {value: 0x0000, lo: 0x01}, + {value: 0x8123, lo: 0x88, hi: 0x8b}, + // Block 0x22, offset 0xea + {value: 0x0000, lo: 0x02}, + {value: 0x2636, lo: 0xb3, hi: 0xb3}, + {value: 0x8124, lo: 0xb8, hi: 0xb9}, + // Block 0x23, offset 0xed + {value: 0x0000, lo: 0x03}, + {value: 0x8125, lo: 0x88, hi: 0x8b}, + {value: 0x2628, lo: 0x9c, hi: 0x9c}, + {value: 0x262f, lo: 0x9d, hi: 0x9d}, + // Block 0x24, offset 0xf1 + {value: 0x0000, lo: 0x05}, + {value: 0x030b, lo: 0x8c, hi: 0x8c}, + {value: 0x812d, lo: 0x98, hi: 0x99}, + {value: 0x812d, lo: 0xb5, hi: 0xb5}, + {value: 0x812d, lo: 0xb7, hi: 0xb7}, + {value: 0x812b, lo: 0xb9, hi: 0xb9}, + // Block 0x25, offset 0xf7 + {value: 0x0000, lo: 0x10}, + {value: 0x2644, lo: 0x83, hi: 0x83}, + {value: 0x264b, lo: 0x8d, hi: 0x8d}, + {value: 0x2652, lo: 0x92, hi: 0x92}, + {value: 0x2659, lo: 0x97, hi: 0x97}, + {value: 0x2660, lo: 0x9c, hi: 0x9c}, + {value: 0x263d, lo: 0xa9, hi: 0xa9}, + {value: 0x8126, lo: 0xb1, hi: 0xb1}, + {value: 0x8127, lo: 0xb2, hi: 0xb2}, + {value: 0x4a84, lo: 0xb3, hi: 0xb3}, + {value: 0x8128, lo: 0xb4, hi: 0xb4}, + {value: 0x4a8d, lo: 0xb5, hi: 0xb5}, + {value: 0x45b4, lo: 0xb6, hi: 0xb6}, + {value: 0x45f4, lo: 0xb7, hi: 0xb7}, + {value: 0x45bc, lo: 0xb8, hi: 0xb8}, + {value: 0x45ff, lo: 0xb9, hi: 0xb9}, + {value: 0x8127, lo: 0xba, hi: 0xbd}, + // Block 0x26, offset 0x108 + {value: 0x0000, lo: 0x0b}, + {value: 0x8127, lo: 0x80, hi: 0x80}, + {value: 0x4a96, lo: 0x81, hi: 0x81}, + {value: 0x8132, lo: 0x82, hi: 0x83}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0x86, hi: 0x87}, + {value: 0x266e, lo: 0x93, hi: 0x93}, + {value: 0x2675, lo: 0x9d, hi: 0x9d}, + {value: 0x267c, lo: 0xa2, hi: 0xa2}, + {value: 0x2683, lo: 0xa7, hi: 0xa7}, + {value: 0x268a, lo: 0xac, hi: 0xac}, + {value: 0x2667, lo: 0xb9, hi: 0xb9}, + // Block 0x27, offset 0x114 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x86, hi: 0x86}, + // Block 0x28, offset 0x116 + {value: 0x0000, lo: 0x05}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x2d1e, lo: 0xa6, hi: 0xa6}, + {value: 0x9900, lo: 0xae, hi: 0xae}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + {value: 0x8104, lo: 0xb9, hi: 0xba}, + // Block 0x29, offset 0x11c + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x8d, hi: 0x8d}, + // Block 0x2a, offset 0x11e + {value: 0x0000, lo: 0x01}, + {value: 0x030f, lo: 0xbc, hi: 0xbc}, + // Block 0x2b, offset 0x120 + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x80, hi: 0x92}, + // Block 0x2c, offset 0x122 + {value: 0x0000, lo: 0x01}, + {value: 0xb900, lo: 0xa1, hi: 0xb5}, + // Block 0x2d, offset 0x124 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0xa8, hi: 0xbf}, + // Block 0x2e, offset 0x126 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0x80, hi: 0x82}, + // Block 0x2f, offset 0x128 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x9d, hi: 0x9f}, + // Block 0x30, offset 0x12a + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x94, hi: 0x94}, + {value: 0x8104, lo: 0xb4, hi: 0xb4}, + // Block 0x31, offset 0x12d + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x92, hi: 0x92}, + {value: 0x8132, lo: 0x9d, hi: 0x9d}, + // Block 0x32, offset 0x130 + {value: 0x0000, lo: 0x01}, + {value: 0x8131, lo: 0xa9, hi: 0xa9}, + // Block 0x33, offset 0x132 + {value: 0x0004, lo: 0x02}, + {value: 0x812e, lo: 0xb9, hi: 0xba}, + {value: 0x812d, lo: 0xbb, hi: 0xbb}, + // Block 0x34, offset 0x135 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x97, hi: 0x97}, + {value: 0x812d, lo: 0x98, hi: 0x98}, + // Block 0x35, offset 0x138 + {value: 0x0000, lo: 0x03}, + {value: 0x8104, lo: 0xa0, hi: 0xa0}, + {value: 0x8132, lo: 0xb5, hi: 0xbc}, + {value: 0x812d, lo: 0xbf, hi: 0xbf}, + // Block 0x36, offset 0x13c + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0xb0, hi: 0xb4}, + {value: 0x812d, lo: 0xb5, hi: 0xba}, + {value: 0x8132, lo: 0xbb, hi: 0xbc}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0x37, offset 0x141 + {value: 0x0000, lo: 0x08}, + {value: 0x2d66, lo: 0x80, hi: 0x80}, + {value: 0x2d6e, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x82, hi: 0x82}, + {value: 0x2d76, lo: 0x83, hi: 0x83}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0xab, hi: 0xab}, + {value: 0x812d, lo: 0xac, hi: 0xac}, + {value: 0x8132, lo: 0xad, hi: 0xb3}, + // Block 0x38, offset 0x14a + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xaa, hi: 0xab}, + // Block 0x39, offset 0x14c + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xa6, hi: 0xa6}, + {value: 0x8104, lo: 0xb2, hi: 0xb3}, + // Block 0x3a, offset 0x14f + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + // Block 0x3b, offset 0x151 + {value: 0x0000, lo: 0x0a}, + {value: 0x8132, lo: 0x90, hi: 0x92}, + {value: 0x8101, lo: 0x94, hi: 0x94}, + {value: 0x812d, lo: 0x95, hi: 0x99}, + {value: 0x8132, lo: 0x9a, hi: 0x9b}, + {value: 0x812d, lo: 0x9c, hi: 0x9f}, + {value: 0x8132, lo: 0xa0, hi: 0xa0}, + {value: 0x8101, lo: 0xa2, hi: 0xa8}, + {value: 0x812d, lo: 0xad, hi: 0xad}, + {value: 0x8132, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb8, hi: 0xb9}, + // Block 0x3c, offset 0x15c + {value: 0x0002, lo: 0x0a}, + {value: 0x0043, lo: 0xac, hi: 0xac}, + {value: 0x00d1, lo: 0xad, hi: 0xad}, + {value: 0x0045, lo: 0xae, hi: 0xae}, + {value: 0x0049, lo: 0xb0, hi: 0xb1}, + {value: 0x00e6, lo: 0xb2, hi: 0xb2}, + {value: 0x004f, lo: 0xb3, hi: 0xba}, + {value: 0x005f, lo: 0xbc, hi: 0xbc}, + {value: 0x00ef, lo: 0xbd, hi: 0xbd}, + {value: 0x0061, lo: 0xbe, hi: 0xbe}, + {value: 0x0065, lo: 0xbf, hi: 0xbf}, + // Block 0x3d, offset 0x167 + {value: 0x0000, lo: 0x0d}, + {value: 0x0001, lo: 0x80, hi: 0x8a}, + {value: 0x043b, lo: 0x91, hi: 0x91}, + {value: 0x429b, lo: 0x97, hi: 0x97}, + {value: 0x001d, lo: 0xa4, hi: 0xa4}, + {value: 0x1873, lo: 0xa5, hi: 0xa5}, + {value: 0x1b5c, lo: 0xa6, hi: 0xa6}, + {value: 0x0001, lo: 0xaf, hi: 0xaf}, + {value: 0x2691, lo: 0xb3, hi: 0xb3}, + {value: 0x27fe, lo: 0xb4, hi: 0xb4}, + {value: 0x2698, lo: 0xb6, hi: 0xb6}, + {value: 0x2808, lo: 0xb7, hi: 0xb7}, + {value: 0x186d, lo: 0xbc, hi: 0xbc}, + {value: 0x4269, lo: 0xbe, hi: 0xbe}, + // Block 0x3e, offset 0x175 + {value: 0x0002, lo: 0x0d}, + {value: 0x1933, lo: 0x87, hi: 0x87}, + {value: 0x1930, lo: 0x88, hi: 0x88}, + {value: 0x1870, lo: 0x89, hi: 0x89}, + {value: 0x298e, lo: 0x97, hi: 0x97}, + {value: 0x0001, lo: 0x9f, hi: 0x9f}, + {value: 0x0021, lo: 0xb0, hi: 0xb0}, + {value: 0x0093, lo: 0xb1, hi: 0xb1}, + {value: 0x0029, lo: 0xb4, hi: 0xb9}, + {value: 0x0017, lo: 0xba, hi: 0xba}, + {value: 0x0467, lo: 0xbb, hi: 0xbb}, + {value: 0x003b, lo: 0xbc, hi: 0xbc}, + {value: 0x0011, lo: 0xbd, hi: 0xbe}, + {value: 0x009d, lo: 0xbf, hi: 0xbf}, + // Block 0x3f, offset 0x183 + {value: 0x0002, lo: 0x0f}, + {value: 0x0021, lo: 0x80, hi: 0x89}, + {value: 0x0017, lo: 0x8a, hi: 0x8a}, + {value: 0x0467, lo: 0x8b, hi: 0x8b}, + {value: 0x003b, lo: 0x8c, hi: 0x8c}, + {value: 0x0011, lo: 0x8d, hi: 0x8e}, + {value: 0x0083, lo: 0x90, hi: 0x90}, + {value: 0x008b, lo: 0x91, hi: 0x91}, + {value: 0x009f, lo: 0x92, hi: 0x92}, + {value: 0x00b1, lo: 0x93, hi: 0x93}, + {value: 0x0104, lo: 0x94, hi: 0x94}, + {value: 0x0091, lo: 0x95, hi: 0x95}, + {value: 0x0097, lo: 0x96, hi: 0x99}, + {value: 0x00a1, lo: 0x9a, hi: 0x9a}, + {value: 0x00a7, lo: 0x9b, hi: 0x9c}, + {value: 0x1999, lo: 0xa8, hi: 0xa8}, + // Block 0x40, offset 0x193 + {value: 0x0000, lo: 0x0d}, + {value: 0x8132, lo: 0x90, hi: 0x91}, + {value: 0x8101, lo: 0x92, hi: 0x93}, + {value: 0x8132, lo: 0x94, hi: 0x97}, + {value: 0x8101, lo: 0x98, hi: 0x9a}, + {value: 0x8132, lo: 0x9b, hi: 0x9c}, + {value: 0x8132, lo: 0xa1, hi: 0xa1}, + {value: 0x8101, lo: 0xa5, hi: 0xa6}, + {value: 0x8132, lo: 0xa7, hi: 0xa7}, + {value: 0x812d, lo: 0xa8, hi: 0xa8}, + {value: 0x8132, lo: 0xa9, hi: 0xa9}, + {value: 0x8101, lo: 0xaa, hi: 0xab}, + {value: 0x812d, lo: 0xac, hi: 0xaf}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + // Block 0x41, offset 0x1a1 + {value: 0x0007, lo: 0x06}, + {value: 0x2180, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + {value: 0x3bb9, lo: 0x9a, hi: 0x9b}, + {value: 0x3bc7, lo: 0xae, hi: 0xae}, + // Block 0x42, offset 0x1a8 + {value: 0x000e, lo: 0x05}, + {value: 0x3bce, lo: 0x8d, hi: 0x8e}, + {value: 0x3bd5, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + // Block 0x43, offset 0x1ae + {value: 0x0173, lo: 0x0e}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0x3be3, lo: 0x84, hi: 0x84}, + {value: 0xa000, lo: 0x88, hi: 0x88}, + {value: 0x3bea, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0x3bf1, lo: 0x8c, hi: 0x8c}, + {value: 0xa000, lo: 0xa3, hi: 0xa3}, + {value: 0x3bf8, lo: 0xa4, hi: 0xa4}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x3bff, lo: 0xa6, hi: 0xa6}, + {value: 0x269f, lo: 0xac, hi: 0xad}, + {value: 0x26a6, lo: 0xaf, hi: 0xaf}, + {value: 0x281c, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xbc, hi: 0xbc}, + // Block 0x44, offset 0x1bd + {value: 0x0007, lo: 0x03}, + {value: 0x3c68, lo: 0xa0, hi: 0xa1}, + {value: 0x3c92, lo: 0xa2, hi: 0xa3}, + {value: 0x3cbc, lo: 0xaa, hi: 0xad}, + // Block 0x45, offset 0x1c1 + {value: 0x0004, lo: 0x01}, + {value: 0x048b, lo: 0xa9, hi: 0xaa}, + // Block 0x46, offset 0x1c3 + {value: 0x0002, lo: 0x03}, + {value: 0x0057, lo: 0x80, hi: 0x8f}, + {value: 0x0083, lo: 0x90, hi: 0xa9}, + {value: 0x0021, lo: 0xaa, hi: 0xaa}, + // Block 0x47, offset 0x1c7 + {value: 0x0000, lo: 0x01}, + {value: 0x299b, lo: 0x8c, hi: 0x8c}, + // Block 0x48, offset 0x1c9 + {value: 0x0263, lo: 0x02}, + {value: 0x1b8c, lo: 0xb4, hi: 0xb4}, + {value: 0x192d, lo: 0xb5, hi: 0xb6}, + // Block 0x49, offset 0x1cc + {value: 0x0000, lo: 0x01}, + {value: 0x44dd, lo: 0x9c, hi: 0x9c}, + // Block 0x4a, offset 0x1ce + {value: 0x0000, lo: 0x02}, + {value: 0x0095, lo: 0xbc, hi: 0xbc}, + {value: 0x006d, lo: 0xbd, hi: 0xbd}, + // Block 0x4b, offset 0x1d1 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xaf, hi: 0xb1}, + // Block 0x4c, offset 0x1d3 + {value: 0x0000, lo: 0x02}, + {value: 0x047f, lo: 0xaf, hi: 0xaf}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x4d, offset 0x1d6 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa0, hi: 0xbf}, + // Block 0x4e, offset 0x1d8 + {value: 0x0000, lo: 0x01}, + {value: 0x0dc3, lo: 0x9f, hi: 0x9f}, + // Block 0x4f, offset 0x1da + {value: 0x0000, lo: 0x01}, + {value: 0x162f, lo: 0xb3, hi: 0xb3}, + // Block 0x50, offset 0x1dc + {value: 0x0004, lo: 0x0b}, + {value: 0x1597, lo: 0x80, hi: 0x82}, + {value: 0x15af, lo: 0x83, hi: 0x83}, + {value: 0x15c7, lo: 0x84, hi: 0x85}, + {value: 0x15d7, lo: 0x86, hi: 0x89}, + {value: 0x15eb, lo: 0x8a, hi: 0x8c}, + {value: 0x15ff, lo: 0x8d, hi: 0x8d}, + {value: 0x1607, lo: 0x8e, hi: 0x8e}, + {value: 0x160f, lo: 0x8f, hi: 0x90}, + {value: 0x161b, lo: 0x91, hi: 0x93}, + {value: 0x162b, lo: 0x94, hi: 0x94}, + {value: 0x1633, lo: 0x95, hi: 0x95}, + // Block 0x51, offset 0x1e8 + {value: 0x0004, lo: 0x09}, + {value: 0x0001, lo: 0x80, hi: 0x80}, + {value: 0x812c, lo: 0xaa, hi: 0xaa}, + {value: 0x8131, lo: 0xab, hi: 0xab}, + {value: 0x8133, lo: 0xac, hi: 0xac}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + {value: 0x812f, lo: 0xae, hi: 0xae}, + {value: 0x812f, lo: 0xaf, hi: 0xaf}, + {value: 0x04b3, lo: 0xb6, hi: 0xb6}, + {value: 0x0887, lo: 0xb8, hi: 0xba}, + // Block 0x52, offset 0x1f2 + {value: 0x0006, lo: 0x09}, + {value: 0x0313, lo: 0xb1, hi: 0xb1}, + {value: 0x0317, lo: 0xb2, hi: 0xb2}, + {value: 0x4a3b, lo: 0xb3, hi: 0xb3}, + {value: 0x031b, lo: 0xb4, hi: 0xb4}, + {value: 0x4a41, lo: 0xb5, hi: 0xb6}, + {value: 0x031f, lo: 0xb7, hi: 0xb7}, + {value: 0x0323, lo: 0xb8, hi: 0xb8}, + {value: 0x0327, lo: 0xb9, hi: 0xb9}, + {value: 0x4a4d, lo: 0xba, hi: 0xbf}, + // Block 0x53, offset 0x1fc + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0xaf, hi: 0xaf}, + {value: 0x8132, lo: 0xb4, hi: 0xbd}, + // Block 0x54, offset 0x1ff + {value: 0x0000, lo: 0x03}, + {value: 0x020f, lo: 0x9c, hi: 0x9c}, + {value: 0x0212, lo: 0x9d, hi: 0x9d}, + {value: 0x8132, lo: 0x9e, hi: 0x9f}, + // Block 0x55, offset 0x203 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb0, hi: 0xb1}, + // Block 0x56, offset 0x205 + {value: 0x0000, lo: 0x01}, + {value: 0x163b, lo: 0xb0, hi: 0xb0}, + // Block 0x57, offset 0x207 + {value: 0x000c, lo: 0x01}, + {value: 0x00d7, lo: 0xb8, hi: 0xb9}, + // Block 0x58, offset 0x209 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x86, hi: 0x86}, + // Block 0x59, offset 0x20b + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0xa0, hi: 0xb1}, + // Block 0x5a, offset 0x20e + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xab, hi: 0xad}, + // Block 0x5b, offset 0x210 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x93, hi: 0x93}, + // Block 0x5c, offset 0x212 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb3, hi: 0xb3}, + // Block 0x5d, offset 0x214 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x80, hi: 0x80}, + // Block 0x5e, offset 0x216 + {value: 0x0000, lo: 0x05}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + {value: 0x8132, lo: 0xb2, hi: 0xb3}, + {value: 0x812d, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb7, hi: 0xb8}, + {value: 0x8132, lo: 0xbe, hi: 0xbf}, + // Block 0x5f, offset 0x21c + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x81, hi: 0x81}, + {value: 0x8104, lo: 0xb6, hi: 0xb6}, + // Block 0x60, offset 0x21f + {value: 0x0008, lo: 0x03}, + {value: 0x1637, lo: 0x9c, hi: 0x9d}, + {value: 0x0125, lo: 0x9e, hi: 0x9e}, + {value: 0x1643, lo: 0x9f, hi: 0x9f}, + // Block 0x61, offset 0x223 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xad, hi: 0xad}, + // Block 0x62, offset 0x225 + {value: 0x0000, lo: 0x06}, + {value: 0xe500, lo: 0x80, hi: 0x80}, + {value: 0xc600, lo: 0x81, hi: 0x9b}, + {value: 0xe500, lo: 0x9c, hi: 0x9c}, + {value: 0xc600, lo: 0x9d, hi: 0xb7}, + {value: 0xe500, lo: 0xb8, hi: 0xb8}, + {value: 0xc600, lo: 0xb9, hi: 0xbf}, + // Block 0x63, offset 0x22c + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x93}, + {value: 0xe500, lo: 0x94, hi: 0x94}, + {value: 0xc600, lo: 0x95, hi: 0xaf}, + {value: 0xe500, lo: 0xb0, hi: 0xb0}, + {value: 0xc600, lo: 0xb1, hi: 0xbf}, + // Block 0x64, offset 0x232 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8b}, + {value: 0xe500, lo: 0x8c, hi: 0x8c}, + {value: 0xc600, lo: 0x8d, hi: 0xa7}, + {value: 0xe500, lo: 0xa8, hi: 0xa8}, + {value: 0xc600, lo: 0xa9, hi: 0xbf}, + // Block 0x65, offset 0x238 + {value: 0x0000, lo: 0x07}, + {value: 0xc600, lo: 0x80, hi: 0x83}, + {value: 0xe500, lo: 0x84, hi: 0x84}, + {value: 0xc600, lo: 0x85, hi: 0x9f}, + {value: 0xe500, lo: 0xa0, hi: 0xa0}, + {value: 0xc600, lo: 0xa1, hi: 0xbb}, + {value: 0xe500, lo: 0xbc, hi: 0xbc}, + {value: 0xc600, lo: 0xbd, hi: 0xbf}, + // Block 0x66, offset 0x240 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x97}, + {value: 0xe500, lo: 0x98, hi: 0x98}, + {value: 0xc600, lo: 0x99, hi: 0xb3}, + {value: 0xe500, lo: 0xb4, hi: 0xb4}, + {value: 0xc600, lo: 0xb5, hi: 0xbf}, + // Block 0x67, offset 0x246 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8f}, + {value: 0xe500, lo: 0x90, hi: 0x90}, + {value: 0xc600, lo: 0x91, hi: 0xab}, + {value: 0xe500, lo: 0xac, hi: 0xac}, + {value: 0xc600, lo: 0xad, hi: 0xbf}, + // Block 0x68, offset 0x24c + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + {value: 0xe500, lo: 0xa4, hi: 0xa4}, + {value: 0xc600, lo: 0xa5, hi: 0xbf}, + // Block 0x69, offset 0x252 + {value: 0x0000, lo: 0x03}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + // Block 0x6a, offset 0x256 + {value: 0x0002, lo: 0x01}, + {value: 0x0003, lo: 0x81, hi: 0xbf}, + // Block 0x6b, offset 0x258 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0x6c, offset 0x25a + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xa0, hi: 0xa0}, + // Block 0x6d, offset 0x25c + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb6, hi: 0xba}, + // Block 0x6e, offset 0x25e + {value: 0x002c, lo: 0x05}, + {value: 0x812d, lo: 0x8d, hi: 0x8d}, + {value: 0x8132, lo: 0x8f, hi: 0x8f}, + {value: 0x8132, lo: 0xb8, hi: 0xb8}, + {value: 0x8101, lo: 0xb9, hi: 0xba}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x6f, offset 0x264 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0xa5, hi: 0xa5}, + {value: 0x812d, lo: 0xa6, hi: 0xa6}, + // Block 0x70, offset 0x267 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x86, hi: 0x86}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x71, offset 0x26a + {value: 0x17fe, lo: 0x07}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x4238, lo: 0x9a, hi: 0x9a}, + {value: 0xa000, lo: 0x9b, hi: 0x9b}, + {value: 0x4242, lo: 0x9c, hi: 0x9c}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x424c, lo: 0xab, hi: 0xab}, + {value: 0x8104, lo: 0xb9, hi: 0xba}, + // Block 0x72, offset 0x272 + {value: 0x0000, lo: 0x06}, + {value: 0x8132, lo: 0x80, hi: 0x82}, + {value: 0x9900, lo: 0xa7, hi: 0xa7}, + {value: 0x2d7e, lo: 0xae, hi: 0xae}, + {value: 0x2d88, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb1, hi: 0xb2}, + {value: 0x8104, lo: 0xb3, hi: 0xb4}, + // Block 0x73, offset 0x279 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x80, hi: 0x80}, + {value: 0x8102, lo: 0x8a, hi: 0x8a}, + // Block 0x74, offset 0x27c + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb5, hi: 0xb5}, + {value: 0x8102, lo: 0xb6, hi: 0xb6}, + // Block 0x75, offset 0x27f + {value: 0x0002, lo: 0x01}, + {value: 0x8102, lo: 0xa9, hi: 0xaa}, + // Block 0x76, offset 0x281 + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2d92, lo: 0x8b, hi: 0x8b}, + {value: 0x2d9c, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x8132, lo: 0xa6, hi: 0xac}, + {value: 0x8132, lo: 0xb0, hi: 0xb4}, + // Block 0x77, offset 0x289 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x82, hi: 0x82}, + {value: 0x8102, lo: 0x86, hi: 0x86}, + // Block 0x78, offset 0x28c + {value: 0x6b5a, lo: 0x06}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb9, hi: 0xb9}, + {value: 0x9900, lo: 0xba, hi: 0xba}, + {value: 0x2db0, lo: 0xbb, hi: 0xbb}, + {value: 0x2da6, lo: 0xbc, hi: 0xbd}, + {value: 0x2dba, lo: 0xbe, hi: 0xbe}, + // Block 0x79, offset 0x293 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x82, hi: 0x82}, + {value: 0x8102, lo: 0x83, hi: 0x83}, + // Block 0x7a, offset 0x296 + {value: 0x0000, lo: 0x05}, + {value: 0x9900, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb8, hi: 0xb9}, + {value: 0x2dc4, lo: 0xba, hi: 0xba}, + {value: 0x2dce, lo: 0xbb, hi: 0xbb}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x7b, offset 0x29c + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0x80, hi: 0x80}, + // Block 0x7c, offset 0x29e + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x7d, offset 0x2a0 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb6, hi: 0xb6}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + // Block 0x7e, offset 0x2a3 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xab, hi: 0xab}, + // Block 0x7f, offset 0x2a5 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xb4, hi: 0xb4}, + // Block 0x80, offset 0x2a7 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x87, hi: 0x87}, + // Block 0x81, offset 0x2a9 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x99, hi: 0x99}, + // Block 0x82, offset 0x2ab + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0x82, hi: 0x82}, + {value: 0x8104, lo: 0x84, hi: 0x85}, + // Block 0x83, offset 0x2ae + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0xb0, hi: 0xb4}, + // Block 0x84, offset 0x2b0 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb0, hi: 0xb6}, + // Block 0x85, offset 0x2b2 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0x9e, hi: 0x9e}, + // Block 0x86, offset 0x2b4 + {value: 0x0000, lo: 0x0c}, + {value: 0x45cc, lo: 0x9e, hi: 0x9e}, + {value: 0x45d6, lo: 0x9f, hi: 0x9f}, + {value: 0x460a, lo: 0xa0, hi: 0xa0}, + {value: 0x4618, lo: 0xa1, hi: 0xa1}, + {value: 0x4626, lo: 0xa2, hi: 0xa2}, + {value: 0x4634, lo: 0xa3, hi: 0xa3}, + {value: 0x4642, lo: 0xa4, hi: 0xa4}, + {value: 0x812b, lo: 0xa5, hi: 0xa6}, + {value: 0x8101, lo: 0xa7, hi: 0xa9}, + {value: 0x8130, lo: 0xad, hi: 0xad}, + {value: 0x812b, lo: 0xae, hi: 0xb2}, + {value: 0x812d, lo: 0xbb, hi: 0xbf}, + // Block 0x87, offset 0x2c1 + {value: 0x0000, lo: 0x09}, + {value: 0x812d, lo: 0x80, hi: 0x82}, + {value: 0x8132, lo: 0x85, hi: 0x89}, + {value: 0x812d, lo: 0x8a, hi: 0x8b}, + {value: 0x8132, lo: 0xaa, hi: 0xad}, + {value: 0x45e0, lo: 0xbb, hi: 0xbb}, + {value: 0x45ea, lo: 0xbc, hi: 0xbc}, + {value: 0x4650, lo: 0xbd, hi: 0xbd}, + {value: 0x466c, lo: 0xbe, hi: 0xbe}, + {value: 0x465e, lo: 0xbf, hi: 0xbf}, + // Block 0x88, offset 0x2cb + {value: 0x0000, lo: 0x01}, + {value: 0x467a, lo: 0x80, hi: 0x80}, + // Block 0x89, offset 0x2cd + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x82, hi: 0x84}, + // Block 0x8a, offset 0x2cf + {value: 0x0002, lo: 0x03}, + {value: 0x0043, lo: 0x80, hi: 0x99}, + {value: 0x0083, lo: 0x9a, hi: 0xb3}, + {value: 0x0043, lo: 0xb4, hi: 0xbf}, + // Block 0x8b, offset 0x2d3 + {value: 0x0002, lo: 0x04}, + {value: 0x005b, lo: 0x80, hi: 0x8d}, + {value: 0x0083, lo: 0x8e, hi: 0x94}, + {value: 0x0093, lo: 0x96, hi: 0xa7}, + {value: 0x0043, lo: 0xa8, hi: 0xbf}, + // Block 0x8c, offset 0x2d8 + {value: 0x0002, lo: 0x0b}, + {value: 0x0073, lo: 0x80, hi: 0x81}, + {value: 0x0083, lo: 0x82, hi: 0x9b}, + {value: 0x0043, lo: 0x9c, hi: 0x9c}, + {value: 0x0047, lo: 0x9e, hi: 0x9f}, + {value: 0x004f, lo: 0xa2, hi: 0xa2}, + {value: 0x0055, lo: 0xa5, hi: 0xa6}, + {value: 0x005d, lo: 0xa9, hi: 0xac}, + {value: 0x0067, lo: 0xae, hi: 0xb5}, + {value: 0x0083, lo: 0xb6, hi: 0xb9}, + {value: 0x008d, lo: 0xbb, hi: 0xbb}, + {value: 0x0091, lo: 0xbd, hi: 0xbf}, + // Block 0x8d, offset 0x2e4 + {value: 0x0002, lo: 0x04}, + {value: 0x0097, lo: 0x80, hi: 0x83}, + {value: 0x00a1, lo: 0x85, hi: 0x8f}, + {value: 0x0043, lo: 0x90, hi: 0xa9}, + {value: 0x0083, lo: 0xaa, hi: 0xbf}, + // Block 0x8e, offset 0x2e9 + {value: 0x0002, lo: 0x08}, + {value: 0x00af, lo: 0x80, hi: 0x83}, + {value: 0x0043, lo: 0x84, hi: 0x85}, + {value: 0x0049, lo: 0x87, hi: 0x8a}, + {value: 0x0055, lo: 0x8d, hi: 0x94}, + {value: 0x0067, lo: 0x96, hi: 0x9c}, + {value: 0x0083, lo: 0x9e, hi: 0xb7}, + {value: 0x0043, lo: 0xb8, hi: 0xb9}, + {value: 0x0049, lo: 0xbb, hi: 0xbe}, + // Block 0x8f, offset 0x2f2 + {value: 0x0002, lo: 0x05}, + {value: 0x0053, lo: 0x80, hi: 0x84}, + {value: 0x005f, lo: 0x86, hi: 0x86}, + {value: 0x0067, lo: 0x8a, hi: 0x90}, + {value: 0x0083, lo: 0x92, hi: 0xab}, + {value: 0x0043, lo: 0xac, hi: 0xbf}, + // Block 0x90, offset 0x2f8 + {value: 0x0002, lo: 0x04}, + {value: 0x006b, lo: 0x80, hi: 0x85}, + {value: 0x0083, lo: 0x86, hi: 0x9f}, + {value: 0x0043, lo: 0xa0, hi: 0xb9}, + {value: 0x0083, lo: 0xba, hi: 0xbf}, + // Block 0x91, offset 0x2fd + {value: 0x0002, lo: 0x03}, + {value: 0x008f, lo: 0x80, hi: 0x93}, + {value: 0x0043, lo: 0x94, hi: 0xad}, + {value: 0x0083, lo: 0xae, hi: 0xbf}, + // Block 0x92, offset 0x301 + {value: 0x0002, lo: 0x04}, + {value: 0x00a7, lo: 0x80, hi: 0x87}, + {value: 0x0043, lo: 0x88, hi: 0xa1}, + {value: 0x0083, lo: 0xa2, hi: 0xbb}, + {value: 0x0043, lo: 0xbc, hi: 0xbf}, + // Block 0x93, offset 0x306 + {value: 0x0002, lo: 0x03}, + {value: 0x004b, lo: 0x80, hi: 0x95}, + {value: 0x0083, lo: 0x96, hi: 0xaf}, + {value: 0x0043, lo: 0xb0, hi: 0xbf}, + // Block 0x94, offset 0x30a + {value: 0x0003, lo: 0x0f}, + {value: 0x01b8, lo: 0x80, hi: 0x80}, + {value: 0x045f, lo: 0x81, hi: 0x81}, + {value: 0x01bb, lo: 0x82, hi: 0x9a}, + {value: 0x045b, lo: 0x9b, hi: 0x9b}, + {value: 0x01c7, lo: 0x9c, hi: 0x9c}, + {value: 0x01d0, lo: 0x9d, hi: 0x9d}, + {value: 0x01d6, lo: 0x9e, hi: 0x9e}, + {value: 0x01fa, lo: 0x9f, hi: 0x9f}, + {value: 0x01eb, lo: 0xa0, hi: 0xa0}, + {value: 0x01e8, lo: 0xa1, hi: 0xa1}, + {value: 0x0173, lo: 0xa2, hi: 0xb2}, + {value: 0x0188, lo: 0xb3, hi: 0xb3}, + {value: 0x01a6, lo: 0xb4, hi: 0xba}, + {value: 0x045f, lo: 0xbb, hi: 0xbb}, + {value: 0x01bb, lo: 0xbc, hi: 0xbf}, + // Block 0x95, offset 0x31a + {value: 0x0003, lo: 0x0d}, + {value: 0x01c7, lo: 0x80, hi: 0x94}, + {value: 0x045b, lo: 0x95, hi: 0x95}, + {value: 0x01c7, lo: 0x96, hi: 0x96}, + {value: 0x01d0, lo: 0x97, hi: 0x97}, + {value: 0x01d6, lo: 0x98, hi: 0x98}, + {value: 0x01fa, lo: 0x99, hi: 0x99}, + {value: 0x01eb, lo: 0x9a, hi: 0x9a}, + {value: 0x01e8, lo: 0x9b, hi: 0x9b}, + {value: 0x0173, lo: 0x9c, hi: 0xac}, + {value: 0x0188, lo: 0xad, hi: 0xad}, + {value: 0x01a6, lo: 0xae, hi: 0xb4}, + {value: 0x045f, lo: 0xb5, hi: 0xb5}, + {value: 0x01bb, lo: 0xb6, hi: 0xbf}, + // Block 0x96, offset 0x328 + {value: 0x0003, lo: 0x0d}, + {value: 0x01d9, lo: 0x80, hi: 0x8e}, + {value: 0x045b, lo: 0x8f, hi: 0x8f}, + {value: 0x01c7, lo: 0x90, hi: 0x90}, + {value: 0x01d0, lo: 0x91, hi: 0x91}, + {value: 0x01d6, lo: 0x92, hi: 0x92}, + {value: 0x01fa, lo: 0x93, hi: 0x93}, + {value: 0x01eb, lo: 0x94, hi: 0x94}, + {value: 0x01e8, lo: 0x95, hi: 0x95}, + {value: 0x0173, lo: 0x96, hi: 0xa6}, + {value: 0x0188, lo: 0xa7, hi: 0xa7}, + {value: 0x01a6, lo: 0xa8, hi: 0xae}, + {value: 0x045f, lo: 0xaf, hi: 0xaf}, + {value: 0x01bb, lo: 0xb0, hi: 0xbf}, + // Block 0x97, offset 0x336 + {value: 0x0003, lo: 0x0d}, + {value: 0x01eb, lo: 0x80, hi: 0x88}, + {value: 0x045b, lo: 0x89, hi: 0x89}, + {value: 0x01c7, lo: 0x8a, hi: 0x8a}, + {value: 0x01d0, lo: 0x8b, hi: 0x8b}, + {value: 0x01d6, lo: 0x8c, hi: 0x8c}, + {value: 0x01fa, lo: 0x8d, hi: 0x8d}, + {value: 0x01eb, lo: 0x8e, hi: 0x8e}, + {value: 0x01e8, lo: 0x8f, hi: 0x8f}, + {value: 0x0173, lo: 0x90, hi: 0xa0}, + {value: 0x0188, lo: 0xa1, hi: 0xa1}, + {value: 0x01a6, lo: 0xa2, hi: 0xa8}, + {value: 0x045f, lo: 0xa9, hi: 0xa9}, + {value: 0x01bb, lo: 0xaa, hi: 0xbf}, + // Block 0x98, offset 0x344 + {value: 0x0000, lo: 0x05}, + {value: 0x8132, lo: 0x80, hi: 0x86}, + {value: 0x8132, lo: 0x88, hi: 0x98}, + {value: 0x8132, lo: 0x9b, hi: 0xa1}, + {value: 0x8132, lo: 0xa3, hi: 0xa4}, + {value: 0x8132, lo: 0xa6, hi: 0xaa}, + // Block 0x99, offset 0x34a + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x90, hi: 0x96}, + // Block 0x9a, offset 0x34c + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x84, hi: 0x89}, + {value: 0x8102, lo: 0x8a, hi: 0x8a}, + // Block 0x9b, offset 0x34f + {value: 0x0002, lo: 0x09}, + {value: 0x0063, lo: 0x80, hi: 0x89}, + {value: 0x1951, lo: 0x8a, hi: 0x8a}, + {value: 0x1981, lo: 0x8b, hi: 0x8b}, + {value: 0x199c, lo: 0x8c, hi: 0x8c}, + {value: 0x19a2, lo: 0x8d, hi: 0x8d}, + {value: 0x1bc0, lo: 0x8e, hi: 0x8e}, + {value: 0x19ae, lo: 0x8f, hi: 0x8f}, + {value: 0x197b, lo: 0xaa, hi: 0xaa}, + {value: 0x197e, lo: 0xab, hi: 0xab}, + // Block 0x9c, offset 0x359 + {value: 0x0000, lo: 0x01}, + {value: 0x193f, lo: 0x90, hi: 0x90}, + // Block 0x9d, offset 0x35b + {value: 0x0028, lo: 0x09}, + {value: 0x2862, lo: 0x80, hi: 0x80}, + {value: 0x2826, lo: 0x81, hi: 0x81}, + {value: 0x2830, lo: 0x82, hi: 0x82}, + {value: 0x2844, lo: 0x83, hi: 0x84}, + {value: 0x284e, lo: 0x85, hi: 0x86}, + {value: 0x283a, lo: 0x87, hi: 0x87}, + {value: 0x2858, lo: 0x88, hi: 0x88}, + {value: 0x0b6f, lo: 0x90, hi: 0x90}, + {value: 0x08e7, lo: 0x91, hi: 0x91}, +} + +// recompMap: 7520 bytes (entries only) +var recompMap = map[uint32]rune{ + 0x00410300: 0x00C0, + 0x00410301: 0x00C1, + 0x00410302: 0x00C2, + 0x00410303: 0x00C3, + 0x00410308: 0x00C4, + 0x0041030A: 0x00C5, + 0x00430327: 0x00C7, + 0x00450300: 0x00C8, + 0x00450301: 0x00C9, + 0x00450302: 0x00CA, + 0x00450308: 0x00CB, + 0x00490300: 0x00CC, + 0x00490301: 0x00CD, + 0x00490302: 0x00CE, + 0x00490308: 0x00CF, + 0x004E0303: 0x00D1, + 0x004F0300: 0x00D2, + 0x004F0301: 0x00D3, + 0x004F0302: 0x00D4, + 0x004F0303: 0x00D5, + 0x004F0308: 0x00D6, + 0x00550300: 0x00D9, + 0x00550301: 0x00DA, + 0x00550302: 0x00DB, + 0x00550308: 0x00DC, + 0x00590301: 0x00DD, + 0x00610300: 0x00E0, + 0x00610301: 0x00E1, + 0x00610302: 0x00E2, + 0x00610303: 0x00E3, + 0x00610308: 0x00E4, + 0x0061030A: 0x00E5, + 0x00630327: 0x00E7, + 0x00650300: 0x00E8, + 0x00650301: 0x00E9, + 0x00650302: 0x00EA, + 0x00650308: 0x00EB, + 0x00690300: 0x00EC, + 0x00690301: 0x00ED, + 0x00690302: 0x00EE, + 0x00690308: 0x00EF, + 0x006E0303: 0x00F1, + 0x006F0300: 0x00F2, + 0x006F0301: 0x00F3, + 0x006F0302: 0x00F4, + 0x006F0303: 0x00F5, + 0x006F0308: 0x00F6, + 0x00750300: 0x00F9, + 0x00750301: 0x00FA, + 0x00750302: 0x00FB, + 0x00750308: 0x00FC, + 0x00790301: 0x00FD, + 0x00790308: 0x00FF, + 0x00410304: 0x0100, + 0x00610304: 0x0101, + 0x00410306: 0x0102, + 0x00610306: 0x0103, + 0x00410328: 0x0104, + 0x00610328: 0x0105, + 0x00430301: 0x0106, + 0x00630301: 0x0107, + 0x00430302: 0x0108, + 0x00630302: 0x0109, + 0x00430307: 0x010A, + 0x00630307: 0x010B, + 0x0043030C: 0x010C, + 0x0063030C: 0x010D, + 0x0044030C: 0x010E, + 0x0064030C: 0x010F, + 0x00450304: 0x0112, + 0x00650304: 0x0113, + 0x00450306: 0x0114, + 0x00650306: 0x0115, + 0x00450307: 0x0116, + 0x00650307: 0x0117, + 0x00450328: 0x0118, + 0x00650328: 0x0119, + 0x0045030C: 0x011A, + 0x0065030C: 0x011B, + 0x00470302: 0x011C, + 0x00670302: 0x011D, + 0x00470306: 0x011E, + 0x00670306: 0x011F, + 0x00470307: 0x0120, + 0x00670307: 0x0121, + 0x00470327: 0x0122, + 0x00670327: 0x0123, + 0x00480302: 0x0124, + 0x00680302: 0x0125, + 0x00490303: 0x0128, + 0x00690303: 0x0129, + 0x00490304: 0x012A, + 0x00690304: 0x012B, + 0x00490306: 0x012C, + 0x00690306: 0x012D, + 0x00490328: 0x012E, + 0x00690328: 0x012F, + 0x00490307: 0x0130, + 0x004A0302: 0x0134, + 0x006A0302: 0x0135, + 0x004B0327: 0x0136, + 0x006B0327: 0x0137, + 0x004C0301: 0x0139, + 0x006C0301: 0x013A, + 0x004C0327: 0x013B, + 0x006C0327: 0x013C, + 0x004C030C: 0x013D, + 0x006C030C: 0x013E, + 0x004E0301: 0x0143, + 0x006E0301: 0x0144, + 0x004E0327: 0x0145, + 0x006E0327: 0x0146, + 0x004E030C: 0x0147, + 0x006E030C: 0x0148, + 0x004F0304: 0x014C, + 0x006F0304: 0x014D, + 0x004F0306: 0x014E, + 0x006F0306: 0x014F, + 0x004F030B: 0x0150, + 0x006F030B: 0x0151, + 0x00520301: 0x0154, + 0x00720301: 0x0155, + 0x00520327: 0x0156, + 0x00720327: 0x0157, + 0x0052030C: 0x0158, + 0x0072030C: 0x0159, + 0x00530301: 0x015A, + 0x00730301: 0x015B, + 0x00530302: 0x015C, + 0x00730302: 0x015D, + 0x00530327: 0x015E, + 0x00730327: 0x015F, + 0x0053030C: 0x0160, + 0x0073030C: 0x0161, + 0x00540327: 0x0162, + 0x00740327: 0x0163, + 0x0054030C: 0x0164, + 0x0074030C: 0x0165, + 0x00550303: 0x0168, + 0x00750303: 0x0169, + 0x00550304: 0x016A, + 0x00750304: 0x016B, + 0x00550306: 0x016C, + 0x00750306: 0x016D, + 0x0055030A: 0x016E, + 0x0075030A: 0x016F, + 0x0055030B: 0x0170, + 0x0075030B: 0x0171, + 0x00550328: 0x0172, + 0x00750328: 0x0173, + 0x00570302: 0x0174, + 0x00770302: 0x0175, + 0x00590302: 0x0176, + 0x00790302: 0x0177, + 0x00590308: 0x0178, + 0x005A0301: 0x0179, + 0x007A0301: 0x017A, + 0x005A0307: 0x017B, + 0x007A0307: 0x017C, + 0x005A030C: 0x017D, + 0x007A030C: 0x017E, + 0x004F031B: 0x01A0, + 0x006F031B: 0x01A1, + 0x0055031B: 0x01AF, + 0x0075031B: 0x01B0, + 0x0041030C: 0x01CD, + 0x0061030C: 0x01CE, + 0x0049030C: 0x01CF, + 0x0069030C: 0x01D0, + 0x004F030C: 0x01D1, + 0x006F030C: 0x01D2, + 0x0055030C: 0x01D3, + 0x0075030C: 0x01D4, + 0x00DC0304: 0x01D5, + 0x00FC0304: 0x01D6, + 0x00DC0301: 0x01D7, + 0x00FC0301: 0x01D8, + 0x00DC030C: 0x01D9, + 0x00FC030C: 0x01DA, + 0x00DC0300: 0x01DB, + 0x00FC0300: 0x01DC, + 0x00C40304: 0x01DE, + 0x00E40304: 0x01DF, + 0x02260304: 0x01E0, + 0x02270304: 0x01E1, + 0x00C60304: 0x01E2, + 0x00E60304: 0x01E3, + 0x0047030C: 0x01E6, + 0x0067030C: 0x01E7, + 0x004B030C: 0x01E8, + 0x006B030C: 0x01E9, + 0x004F0328: 0x01EA, + 0x006F0328: 0x01EB, + 0x01EA0304: 0x01EC, + 0x01EB0304: 0x01ED, + 0x01B7030C: 0x01EE, + 0x0292030C: 0x01EF, + 0x006A030C: 0x01F0, + 0x00470301: 0x01F4, + 0x00670301: 0x01F5, + 0x004E0300: 0x01F8, + 0x006E0300: 0x01F9, + 0x00C50301: 0x01FA, + 0x00E50301: 0x01FB, + 0x00C60301: 0x01FC, + 0x00E60301: 0x01FD, + 0x00D80301: 0x01FE, + 0x00F80301: 0x01FF, + 0x0041030F: 0x0200, + 0x0061030F: 0x0201, + 0x00410311: 0x0202, + 0x00610311: 0x0203, + 0x0045030F: 0x0204, + 0x0065030F: 0x0205, + 0x00450311: 0x0206, + 0x00650311: 0x0207, + 0x0049030F: 0x0208, + 0x0069030F: 0x0209, + 0x00490311: 0x020A, + 0x00690311: 0x020B, + 0x004F030F: 0x020C, + 0x006F030F: 0x020D, + 0x004F0311: 0x020E, + 0x006F0311: 0x020F, + 0x0052030F: 0x0210, + 0x0072030F: 0x0211, + 0x00520311: 0x0212, + 0x00720311: 0x0213, + 0x0055030F: 0x0214, + 0x0075030F: 0x0215, + 0x00550311: 0x0216, + 0x00750311: 0x0217, + 0x00530326: 0x0218, + 0x00730326: 0x0219, + 0x00540326: 0x021A, + 0x00740326: 0x021B, + 0x0048030C: 0x021E, + 0x0068030C: 0x021F, + 0x00410307: 0x0226, + 0x00610307: 0x0227, + 0x00450327: 0x0228, + 0x00650327: 0x0229, + 0x00D60304: 0x022A, + 0x00F60304: 0x022B, + 0x00D50304: 0x022C, + 0x00F50304: 0x022D, + 0x004F0307: 0x022E, + 0x006F0307: 0x022F, + 0x022E0304: 0x0230, + 0x022F0304: 0x0231, + 0x00590304: 0x0232, + 0x00790304: 0x0233, + 0x00A80301: 0x0385, + 0x03910301: 0x0386, + 0x03950301: 0x0388, + 0x03970301: 0x0389, + 0x03990301: 0x038A, + 0x039F0301: 0x038C, + 0x03A50301: 0x038E, + 0x03A90301: 0x038F, + 0x03CA0301: 0x0390, + 0x03990308: 0x03AA, + 0x03A50308: 0x03AB, + 0x03B10301: 0x03AC, + 0x03B50301: 0x03AD, + 0x03B70301: 0x03AE, + 0x03B90301: 0x03AF, + 0x03CB0301: 0x03B0, + 0x03B90308: 0x03CA, + 0x03C50308: 0x03CB, + 0x03BF0301: 0x03CC, + 0x03C50301: 0x03CD, + 0x03C90301: 0x03CE, + 0x03D20301: 0x03D3, + 0x03D20308: 0x03D4, + 0x04150300: 0x0400, + 0x04150308: 0x0401, + 0x04130301: 0x0403, + 0x04060308: 0x0407, + 0x041A0301: 0x040C, + 0x04180300: 0x040D, + 0x04230306: 0x040E, + 0x04180306: 0x0419, + 0x04380306: 0x0439, + 0x04350300: 0x0450, + 0x04350308: 0x0451, + 0x04330301: 0x0453, + 0x04560308: 0x0457, + 0x043A0301: 0x045C, + 0x04380300: 0x045D, + 0x04430306: 0x045E, + 0x0474030F: 0x0476, + 0x0475030F: 0x0477, + 0x04160306: 0x04C1, + 0x04360306: 0x04C2, + 0x04100306: 0x04D0, + 0x04300306: 0x04D1, + 0x04100308: 0x04D2, + 0x04300308: 0x04D3, + 0x04150306: 0x04D6, + 0x04350306: 0x04D7, + 0x04D80308: 0x04DA, + 0x04D90308: 0x04DB, + 0x04160308: 0x04DC, + 0x04360308: 0x04DD, + 0x04170308: 0x04DE, + 0x04370308: 0x04DF, + 0x04180304: 0x04E2, + 0x04380304: 0x04E3, + 0x04180308: 0x04E4, + 0x04380308: 0x04E5, + 0x041E0308: 0x04E6, + 0x043E0308: 0x04E7, + 0x04E80308: 0x04EA, + 0x04E90308: 0x04EB, + 0x042D0308: 0x04EC, + 0x044D0308: 0x04ED, + 0x04230304: 0x04EE, + 0x04430304: 0x04EF, + 0x04230308: 0x04F0, + 0x04430308: 0x04F1, + 0x0423030B: 0x04F2, + 0x0443030B: 0x04F3, + 0x04270308: 0x04F4, + 0x04470308: 0x04F5, + 0x042B0308: 0x04F8, + 0x044B0308: 0x04F9, + 0x06270653: 0x0622, + 0x06270654: 0x0623, + 0x06480654: 0x0624, + 0x06270655: 0x0625, + 0x064A0654: 0x0626, + 0x06D50654: 0x06C0, + 0x06C10654: 0x06C2, + 0x06D20654: 0x06D3, + 0x0928093C: 0x0929, + 0x0930093C: 0x0931, + 0x0933093C: 0x0934, + 0x09C709BE: 0x09CB, + 0x09C709D7: 0x09CC, + 0x0B470B56: 0x0B48, + 0x0B470B3E: 0x0B4B, + 0x0B470B57: 0x0B4C, + 0x0B920BD7: 0x0B94, + 0x0BC60BBE: 0x0BCA, + 0x0BC70BBE: 0x0BCB, + 0x0BC60BD7: 0x0BCC, + 0x0C460C56: 0x0C48, + 0x0CBF0CD5: 0x0CC0, + 0x0CC60CD5: 0x0CC7, + 0x0CC60CD6: 0x0CC8, + 0x0CC60CC2: 0x0CCA, + 0x0CCA0CD5: 0x0CCB, + 0x0D460D3E: 0x0D4A, + 0x0D470D3E: 0x0D4B, + 0x0D460D57: 0x0D4C, + 0x0DD90DCA: 0x0DDA, + 0x0DD90DCF: 0x0DDC, + 0x0DDC0DCA: 0x0DDD, + 0x0DD90DDF: 0x0DDE, + 0x1025102E: 0x1026, + 0x1B051B35: 0x1B06, + 0x1B071B35: 0x1B08, + 0x1B091B35: 0x1B0A, + 0x1B0B1B35: 0x1B0C, + 0x1B0D1B35: 0x1B0E, + 0x1B111B35: 0x1B12, + 0x1B3A1B35: 0x1B3B, + 0x1B3C1B35: 0x1B3D, + 0x1B3E1B35: 0x1B40, + 0x1B3F1B35: 0x1B41, + 0x1B421B35: 0x1B43, + 0x00410325: 0x1E00, + 0x00610325: 0x1E01, + 0x00420307: 0x1E02, + 0x00620307: 0x1E03, + 0x00420323: 0x1E04, + 0x00620323: 0x1E05, + 0x00420331: 0x1E06, + 0x00620331: 0x1E07, + 0x00C70301: 0x1E08, + 0x00E70301: 0x1E09, + 0x00440307: 0x1E0A, + 0x00640307: 0x1E0B, + 0x00440323: 0x1E0C, + 0x00640323: 0x1E0D, + 0x00440331: 0x1E0E, + 0x00640331: 0x1E0F, + 0x00440327: 0x1E10, + 0x00640327: 0x1E11, + 0x0044032D: 0x1E12, + 0x0064032D: 0x1E13, + 0x01120300: 0x1E14, + 0x01130300: 0x1E15, + 0x01120301: 0x1E16, + 0x01130301: 0x1E17, + 0x0045032D: 0x1E18, + 0x0065032D: 0x1E19, + 0x00450330: 0x1E1A, + 0x00650330: 0x1E1B, + 0x02280306: 0x1E1C, + 0x02290306: 0x1E1D, + 0x00460307: 0x1E1E, + 0x00660307: 0x1E1F, + 0x00470304: 0x1E20, + 0x00670304: 0x1E21, + 0x00480307: 0x1E22, + 0x00680307: 0x1E23, + 0x00480323: 0x1E24, + 0x00680323: 0x1E25, + 0x00480308: 0x1E26, + 0x00680308: 0x1E27, + 0x00480327: 0x1E28, + 0x00680327: 0x1E29, + 0x0048032E: 0x1E2A, + 0x0068032E: 0x1E2B, + 0x00490330: 0x1E2C, + 0x00690330: 0x1E2D, + 0x00CF0301: 0x1E2E, + 0x00EF0301: 0x1E2F, + 0x004B0301: 0x1E30, + 0x006B0301: 0x1E31, + 0x004B0323: 0x1E32, + 0x006B0323: 0x1E33, + 0x004B0331: 0x1E34, + 0x006B0331: 0x1E35, + 0x004C0323: 0x1E36, + 0x006C0323: 0x1E37, + 0x1E360304: 0x1E38, + 0x1E370304: 0x1E39, + 0x004C0331: 0x1E3A, + 0x006C0331: 0x1E3B, + 0x004C032D: 0x1E3C, + 0x006C032D: 0x1E3D, + 0x004D0301: 0x1E3E, + 0x006D0301: 0x1E3F, + 0x004D0307: 0x1E40, + 0x006D0307: 0x1E41, + 0x004D0323: 0x1E42, + 0x006D0323: 0x1E43, + 0x004E0307: 0x1E44, + 0x006E0307: 0x1E45, + 0x004E0323: 0x1E46, + 0x006E0323: 0x1E47, + 0x004E0331: 0x1E48, + 0x006E0331: 0x1E49, + 0x004E032D: 0x1E4A, + 0x006E032D: 0x1E4B, + 0x00D50301: 0x1E4C, + 0x00F50301: 0x1E4D, + 0x00D50308: 0x1E4E, + 0x00F50308: 0x1E4F, + 0x014C0300: 0x1E50, + 0x014D0300: 0x1E51, + 0x014C0301: 0x1E52, + 0x014D0301: 0x1E53, + 0x00500301: 0x1E54, + 0x00700301: 0x1E55, + 0x00500307: 0x1E56, + 0x00700307: 0x1E57, + 0x00520307: 0x1E58, + 0x00720307: 0x1E59, + 0x00520323: 0x1E5A, + 0x00720323: 0x1E5B, + 0x1E5A0304: 0x1E5C, + 0x1E5B0304: 0x1E5D, + 0x00520331: 0x1E5E, + 0x00720331: 0x1E5F, + 0x00530307: 0x1E60, + 0x00730307: 0x1E61, + 0x00530323: 0x1E62, + 0x00730323: 0x1E63, + 0x015A0307: 0x1E64, + 0x015B0307: 0x1E65, + 0x01600307: 0x1E66, + 0x01610307: 0x1E67, + 0x1E620307: 0x1E68, + 0x1E630307: 0x1E69, + 0x00540307: 0x1E6A, + 0x00740307: 0x1E6B, + 0x00540323: 0x1E6C, + 0x00740323: 0x1E6D, + 0x00540331: 0x1E6E, + 0x00740331: 0x1E6F, + 0x0054032D: 0x1E70, + 0x0074032D: 0x1E71, + 0x00550324: 0x1E72, + 0x00750324: 0x1E73, + 0x00550330: 0x1E74, + 0x00750330: 0x1E75, + 0x0055032D: 0x1E76, + 0x0075032D: 0x1E77, + 0x01680301: 0x1E78, + 0x01690301: 0x1E79, + 0x016A0308: 0x1E7A, + 0x016B0308: 0x1E7B, + 0x00560303: 0x1E7C, + 0x00760303: 0x1E7D, + 0x00560323: 0x1E7E, + 0x00760323: 0x1E7F, + 0x00570300: 0x1E80, + 0x00770300: 0x1E81, + 0x00570301: 0x1E82, + 0x00770301: 0x1E83, + 0x00570308: 0x1E84, + 0x00770308: 0x1E85, + 0x00570307: 0x1E86, + 0x00770307: 0x1E87, + 0x00570323: 0x1E88, + 0x00770323: 0x1E89, + 0x00580307: 0x1E8A, + 0x00780307: 0x1E8B, + 0x00580308: 0x1E8C, + 0x00780308: 0x1E8D, + 0x00590307: 0x1E8E, + 0x00790307: 0x1E8F, + 0x005A0302: 0x1E90, + 0x007A0302: 0x1E91, + 0x005A0323: 0x1E92, + 0x007A0323: 0x1E93, + 0x005A0331: 0x1E94, + 0x007A0331: 0x1E95, + 0x00680331: 0x1E96, + 0x00740308: 0x1E97, + 0x0077030A: 0x1E98, + 0x0079030A: 0x1E99, + 0x017F0307: 0x1E9B, + 0x00410323: 0x1EA0, + 0x00610323: 0x1EA1, + 0x00410309: 0x1EA2, + 0x00610309: 0x1EA3, + 0x00C20301: 0x1EA4, + 0x00E20301: 0x1EA5, + 0x00C20300: 0x1EA6, + 0x00E20300: 0x1EA7, + 0x00C20309: 0x1EA8, + 0x00E20309: 0x1EA9, + 0x00C20303: 0x1EAA, + 0x00E20303: 0x1EAB, + 0x1EA00302: 0x1EAC, + 0x1EA10302: 0x1EAD, + 0x01020301: 0x1EAE, + 0x01030301: 0x1EAF, + 0x01020300: 0x1EB0, + 0x01030300: 0x1EB1, + 0x01020309: 0x1EB2, + 0x01030309: 0x1EB3, + 0x01020303: 0x1EB4, + 0x01030303: 0x1EB5, + 0x1EA00306: 0x1EB6, + 0x1EA10306: 0x1EB7, + 0x00450323: 0x1EB8, + 0x00650323: 0x1EB9, + 0x00450309: 0x1EBA, + 0x00650309: 0x1EBB, + 0x00450303: 0x1EBC, + 0x00650303: 0x1EBD, + 0x00CA0301: 0x1EBE, + 0x00EA0301: 0x1EBF, + 0x00CA0300: 0x1EC0, + 0x00EA0300: 0x1EC1, + 0x00CA0309: 0x1EC2, + 0x00EA0309: 0x1EC3, + 0x00CA0303: 0x1EC4, + 0x00EA0303: 0x1EC5, + 0x1EB80302: 0x1EC6, + 0x1EB90302: 0x1EC7, + 0x00490309: 0x1EC8, + 0x00690309: 0x1EC9, + 0x00490323: 0x1ECA, + 0x00690323: 0x1ECB, + 0x004F0323: 0x1ECC, + 0x006F0323: 0x1ECD, + 0x004F0309: 0x1ECE, + 0x006F0309: 0x1ECF, + 0x00D40301: 0x1ED0, + 0x00F40301: 0x1ED1, + 0x00D40300: 0x1ED2, + 0x00F40300: 0x1ED3, + 0x00D40309: 0x1ED4, + 0x00F40309: 0x1ED5, + 0x00D40303: 0x1ED6, + 0x00F40303: 0x1ED7, + 0x1ECC0302: 0x1ED8, + 0x1ECD0302: 0x1ED9, + 0x01A00301: 0x1EDA, + 0x01A10301: 0x1EDB, + 0x01A00300: 0x1EDC, + 0x01A10300: 0x1EDD, + 0x01A00309: 0x1EDE, + 0x01A10309: 0x1EDF, + 0x01A00303: 0x1EE0, + 0x01A10303: 0x1EE1, + 0x01A00323: 0x1EE2, + 0x01A10323: 0x1EE3, + 0x00550323: 0x1EE4, + 0x00750323: 0x1EE5, + 0x00550309: 0x1EE6, + 0x00750309: 0x1EE7, + 0x01AF0301: 0x1EE8, + 0x01B00301: 0x1EE9, + 0x01AF0300: 0x1EEA, + 0x01B00300: 0x1EEB, + 0x01AF0309: 0x1EEC, + 0x01B00309: 0x1EED, + 0x01AF0303: 0x1EEE, + 0x01B00303: 0x1EEF, + 0x01AF0323: 0x1EF0, + 0x01B00323: 0x1EF1, + 0x00590300: 0x1EF2, + 0x00790300: 0x1EF3, + 0x00590323: 0x1EF4, + 0x00790323: 0x1EF5, + 0x00590309: 0x1EF6, + 0x00790309: 0x1EF7, + 0x00590303: 0x1EF8, + 0x00790303: 0x1EF9, + 0x03B10313: 0x1F00, + 0x03B10314: 0x1F01, + 0x1F000300: 0x1F02, + 0x1F010300: 0x1F03, + 0x1F000301: 0x1F04, + 0x1F010301: 0x1F05, + 0x1F000342: 0x1F06, + 0x1F010342: 0x1F07, + 0x03910313: 0x1F08, + 0x03910314: 0x1F09, + 0x1F080300: 0x1F0A, + 0x1F090300: 0x1F0B, + 0x1F080301: 0x1F0C, + 0x1F090301: 0x1F0D, + 0x1F080342: 0x1F0E, + 0x1F090342: 0x1F0F, + 0x03B50313: 0x1F10, + 0x03B50314: 0x1F11, + 0x1F100300: 0x1F12, + 0x1F110300: 0x1F13, + 0x1F100301: 0x1F14, + 0x1F110301: 0x1F15, + 0x03950313: 0x1F18, + 0x03950314: 0x1F19, + 0x1F180300: 0x1F1A, + 0x1F190300: 0x1F1B, + 0x1F180301: 0x1F1C, + 0x1F190301: 0x1F1D, + 0x03B70313: 0x1F20, + 0x03B70314: 0x1F21, + 0x1F200300: 0x1F22, + 0x1F210300: 0x1F23, + 0x1F200301: 0x1F24, + 0x1F210301: 0x1F25, + 0x1F200342: 0x1F26, + 0x1F210342: 0x1F27, + 0x03970313: 0x1F28, + 0x03970314: 0x1F29, + 0x1F280300: 0x1F2A, + 0x1F290300: 0x1F2B, + 0x1F280301: 0x1F2C, + 0x1F290301: 0x1F2D, + 0x1F280342: 0x1F2E, + 0x1F290342: 0x1F2F, + 0x03B90313: 0x1F30, + 0x03B90314: 0x1F31, + 0x1F300300: 0x1F32, + 0x1F310300: 0x1F33, + 0x1F300301: 0x1F34, + 0x1F310301: 0x1F35, + 0x1F300342: 0x1F36, + 0x1F310342: 0x1F37, + 0x03990313: 0x1F38, + 0x03990314: 0x1F39, + 0x1F380300: 0x1F3A, + 0x1F390300: 0x1F3B, + 0x1F380301: 0x1F3C, + 0x1F390301: 0x1F3D, + 0x1F380342: 0x1F3E, + 0x1F390342: 0x1F3F, + 0x03BF0313: 0x1F40, + 0x03BF0314: 0x1F41, + 0x1F400300: 0x1F42, + 0x1F410300: 0x1F43, + 0x1F400301: 0x1F44, + 0x1F410301: 0x1F45, + 0x039F0313: 0x1F48, + 0x039F0314: 0x1F49, + 0x1F480300: 0x1F4A, + 0x1F490300: 0x1F4B, + 0x1F480301: 0x1F4C, + 0x1F490301: 0x1F4D, + 0x03C50313: 0x1F50, + 0x03C50314: 0x1F51, + 0x1F500300: 0x1F52, + 0x1F510300: 0x1F53, + 0x1F500301: 0x1F54, + 0x1F510301: 0x1F55, + 0x1F500342: 0x1F56, + 0x1F510342: 0x1F57, + 0x03A50314: 0x1F59, + 0x1F590300: 0x1F5B, + 0x1F590301: 0x1F5D, + 0x1F590342: 0x1F5F, + 0x03C90313: 0x1F60, + 0x03C90314: 0x1F61, + 0x1F600300: 0x1F62, + 0x1F610300: 0x1F63, + 0x1F600301: 0x1F64, + 0x1F610301: 0x1F65, + 0x1F600342: 0x1F66, + 0x1F610342: 0x1F67, + 0x03A90313: 0x1F68, + 0x03A90314: 0x1F69, + 0x1F680300: 0x1F6A, + 0x1F690300: 0x1F6B, + 0x1F680301: 0x1F6C, + 0x1F690301: 0x1F6D, + 0x1F680342: 0x1F6E, + 0x1F690342: 0x1F6F, + 0x03B10300: 0x1F70, + 0x03B50300: 0x1F72, + 0x03B70300: 0x1F74, + 0x03B90300: 0x1F76, + 0x03BF0300: 0x1F78, + 0x03C50300: 0x1F7A, + 0x03C90300: 0x1F7C, + 0x1F000345: 0x1F80, + 0x1F010345: 0x1F81, + 0x1F020345: 0x1F82, + 0x1F030345: 0x1F83, + 0x1F040345: 0x1F84, + 0x1F050345: 0x1F85, + 0x1F060345: 0x1F86, + 0x1F070345: 0x1F87, + 0x1F080345: 0x1F88, + 0x1F090345: 0x1F89, + 0x1F0A0345: 0x1F8A, + 0x1F0B0345: 0x1F8B, + 0x1F0C0345: 0x1F8C, + 0x1F0D0345: 0x1F8D, + 0x1F0E0345: 0x1F8E, + 0x1F0F0345: 0x1F8F, + 0x1F200345: 0x1F90, + 0x1F210345: 0x1F91, + 0x1F220345: 0x1F92, + 0x1F230345: 0x1F93, + 0x1F240345: 0x1F94, + 0x1F250345: 0x1F95, + 0x1F260345: 0x1F96, + 0x1F270345: 0x1F97, + 0x1F280345: 0x1F98, + 0x1F290345: 0x1F99, + 0x1F2A0345: 0x1F9A, + 0x1F2B0345: 0x1F9B, + 0x1F2C0345: 0x1F9C, + 0x1F2D0345: 0x1F9D, + 0x1F2E0345: 0x1F9E, + 0x1F2F0345: 0x1F9F, + 0x1F600345: 0x1FA0, + 0x1F610345: 0x1FA1, + 0x1F620345: 0x1FA2, + 0x1F630345: 0x1FA3, + 0x1F640345: 0x1FA4, + 0x1F650345: 0x1FA5, + 0x1F660345: 0x1FA6, + 0x1F670345: 0x1FA7, + 0x1F680345: 0x1FA8, + 0x1F690345: 0x1FA9, + 0x1F6A0345: 0x1FAA, + 0x1F6B0345: 0x1FAB, + 0x1F6C0345: 0x1FAC, + 0x1F6D0345: 0x1FAD, + 0x1F6E0345: 0x1FAE, + 0x1F6F0345: 0x1FAF, + 0x03B10306: 0x1FB0, + 0x03B10304: 0x1FB1, + 0x1F700345: 0x1FB2, + 0x03B10345: 0x1FB3, + 0x03AC0345: 0x1FB4, + 0x03B10342: 0x1FB6, + 0x1FB60345: 0x1FB7, + 0x03910306: 0x1FB8, + 0x03910304: 0x1FB9, + 0x03910300: 0x1FBA, + 0x03910345: 0x1FBC, + 0x00A80342: 0x1FC1, + 0x1F740345: 0x1FC2, + 0x03B70345: 0x1FC3, + 0x03AE0345: 0x1FC4, + 0x03B70342: 0x1FC6, + 0x1FC60345: 0x1FC7, + 0x03950300: 0x1FC8, + 0x03970300: 0x1FCA, + 0x03970345: 0x1FCC, + 0x1FBF0300: 0x1FCD, + 0x1FBF0301: 0x1FCE, + 0x1FBF0342: 0x1FCF, + 0x03B90306: 0x1FD0, + 0x03B90304: 0x1FD1, + 0x03CA0300: 0x1FD2, + 0x03B90342: 0x1FD6, + 0x03CA0342: 0x1FD7, + 0x03990306: 0x1FD8, + 0x03990304: 0x1FD9, + 0x03990300: 0x1FDA, + 0x1FFE0300: 0x1FDD, + 0x1FFE0301: 0x1FDE, + 0x1FFE0342: 0x1FDF, + 0x03C50306: 0x1FE0, + 0x03C50304: 0x1FE1, + 0x03CB0300: 0x1FE2, + 0x03C10313: 0x1FE4, + 0x03C10314: 0x1FE5, + 0x03C50342: 0x1FE6, + 0x03CB0342: 0x1FE7, + 0x03A50306: 0x1FE8, + 0x03A50304: 0x1FE9, + 0x03A50300: 0x1FEA, + 0x03A10314: 0x1FEC, + 0x00A80300: 0x1FED, + 0x1F7C0345: 0x1FF2, + 0x03C90345: 0x1FF3, + 0x03CE0345: 0x1FF4, + 0x03C90342: 0x1FF6, + 0x1FF60345: 0x1FF7, + 0x039F0300: 0x1FF8, + 0x03A90300: 0x1FFA, + 0x03A90345: 0x1FFC, + 0x21900338: 0x219A, + 0x21920338: 0x219B, + 0x21940338: 0x21AE, + 0x21D00338: 0x21CD, + 0x21D40338: 0x21CE, + 0x21D20338: 0x21CF, + 0x22030338: 0x2204, + 0x22080338: 0x2209, + 0x220B0338: 0x220C, + 0x22230338: 0x2224, + 0x22250338: 0x2226, + 0x223C0338: 0x2241, + 0x22430338: 0x2244, + 0x22450338: 0x2247, + 0x22480338: 0x2249, + 0x003D0338: 0x2260, + 0x22610338: 0x2262, + 0x224D0338: 0x226D, + 0x003C0338: 0x226E, + 0x003E0338: 0x226F, + 0x22640338: 0x2270, + 0x22650338: 0x2271, + 0x22720338: 0x2274, + 0x22730338: 0x2275, + 0x22760338: 0x2278, + 0x22770338: 0x2279, + 0x227A0338: 0x2280, + 0x227B0338: 0x2281, + 0x22820338: 0x2284, + 0x22830338: 0x2285, + 0x22860338: 0x2288, + 0x22870338: 0x2289, + 0x22A20338: 0x22AC, + 0x22A80338: 0x22AD, + 0x22A90338: 0x22AE, + 0x22AB0338: 0x22AF, + 0x227C0338: 0x22E0, + 0x227D0338: 0x22E1, + 0x22910338: 0x22E2, + 0x22920338: 0x22E3, + 0x22B20338: 0x22EA, + 0x22B30338: 0x22EB, + 0x22B40338: 0x22EC, + 0x22B50338: 0x22ED, + 0x304B3099: 0x304C, + 0x304D3099: 0x304E, + 0x304F3099: 0x3050, + 0x30513099: 0x3052, + 0x30533099: 0x3054, + 0x30553099: 0x3056, + 0x30573099: 0x3058, + 0x30593099: 0x305A, + 0x305B3099: 0x305C, + 0x305D3099: 0x305E, + 0x305F3099: 0x3060, + 0x30613099: 0x3062, + 0x30643099: 0x3065, + 0x30663099: 0x3067, + 0x30683099: 0x3069, + 0x306F3099: 0x3070, + 0x306F309A: 0x3071, + 0x30723099: 0x3073, + 0x3072309A: 0x3074, + 0x30753099: 0x3076, + 0x3075309A: 0x3077, + 0x30783099: 0x3079, + 0x3078309A: 0x307A, + 0x307B3099: 0x307C, + 0x307B309A: 0x307D, + 0x30463099: 0x3094, + 0x309D3099: 0x309E, + 0x30AB3099: 0x30AC, + 0x30AD3099: 0x30AE, + 0x30AF3099: 0x30B0, + 0x30B13099: 0x30B2, + 0x30B33099: 0x30B4, + 0x30B53099: 0x30B6, + 0x30B73099: 0x30B8, + 0x30B93099: 0x30BA, + 0x30BB3099: 0x30BC, + 0x30BD3099: 0x30BE, + 0x30BF3099: 0x30C0, + 0x30C13099: 0x30C2, + 0x30C43099: 0x30C5, + 0x30C63099: 0x30C7, + 0x30C83099: 0x30C9, + 0x30CF3099: 0x30D0, + 0x30CF309A: 0x30D1, + 0x30D23099: 0x30D3, + 0x30D2309A: 0x30D4, + 0x30D53099: 0x30D6, + 0x30D5309A: 0x30D7, + 0x30D83099: 0x30D9, + 0x30D8309A: 0x30DA, + 0x30DB3099: 0x30DC, + 0x30DB309A: 0x30DD, + 0x30A63099: 0x30F4, + 0x30EF3099: 0x30F7, + 0x30F03099: 0x30F8, + 0x30F13099: 0x30F9, + 0x30F23099: 0x30FA, + 0x30FD3099: 0x30FE, + 0x109910BA: 0x1109A, + 0x109B10BA: 0x1109C, + 0x10A510BA: 0x110AB, + 0x11311127: 0x1112E, + 0x11321127: 0x1112F, + 0x1347133E: 0x1134B, + 0x13471357: 0x1134C, + 0x14B914BA: 0x114BB, + 0x14B914B0: 0x114BC, + 0x14B914BD: 0x114BE, + 0x15B815AF: 0x115BA, + 0x15B915AF: 0x115BB, +} + +// Total size of tables: 53KB (54226 bytes) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go new file mode 100644 index 00000000..a01274a8 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go @@ -0,0 +1,7633 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build !go1.10 + +package norm + +const ( + // Version is the Unicode edition from which the tables are derived. + Version = "9.0.0" + + // MaxTransformChunkSize indicates the maximum number of bytes that Transform + // may need to write atomically for any Form. Making a destination buffer at + // least this size ensures that Transform can always make progress and that + // the user does not need to grow the buffer on an ErrShortDst. + MaxTransformChunkSize = 35 + maxNonStarters*4 +) + +var ccc = [55]uint8{ + 0, 1, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 35, 36, + 84, 91, 103, 107, 118, 122, 129, 130, + 132, 202, 214, 216, 218, 220, 222, 224, + 226, 228, 230, 232, 233, 234, 240, +} + +const ( + firstMulti = 0x186D + firstCCC = 0x2C9E + endMulti = 0x2F60 + firstLeadingCCC = 0x49AE + firstCCCZeroExcept = 0x4A78 + firstStarterWithNLead = 0x4A9F + lastDecomp = 0x4AA1 + maxDecomp = 0x8000 +) + +// decomps: 19105 bytes +var decomps = [...]byte{ + // Bytes 0 - 3f + 0x00, 0x41, 0x20, 0x41, 0x21, 0x41, 0x22, 0x41, + 0x23, 0x41, 0x24, 0x41, 0x25, 0x41, 0x26, 0x41, + 0x27, 0x41, 0x28, 0x41, 0x29, 0x41, 0x2A, 0x41, + 0x2B, 0x41, 0x2C, 0x41, 0x2D, 0x41, 0x2E, 0x41, + 0x2F, 0x41, 0x30, 0x41, 0x31, 0x41, 0x32, 0x41, + 0x33, 0x41, 0x34, 0x41, 0x35, 0x41, 0x36, 0x41, + 0x37, 0x41, 0x38, 0x41, 0x39, 0x41, 0x3A, 0x41, + 0x3B, 0x41, 0x3C, 0x41, 0x3D, 0x41, 0x3E, 0x41, + // Bytes 40 - 7f + 0x3F, 0x41, 0x40, 0x41, 0x41, 0x41, 0x42, 0x41, + 0x43, 0x41, 0x44, 0x41, 0x45, 0x41, 0x46, 0x41, + 0x47, 0x41, 0x48, 0x41, 0x49, 0x41, 0x4A, 0x41, + 0x4B, 0x41, 0x4C, 0x41, 0x4D, 0x41, 0x4E, 0x41, + 0x4F, 0x41, 0x50, 0x41, 0x51, 0x41, 0x52, 0x41, + 0x53, 0x41, 0x54, 0x41, 0x55, 0x41, 0x56, 0x41, + 0x57, 0x41, 0x58, 0x41, 0x59, 0x41, 0x5A, 0x41, + 0x5B, 0x41, 0x5C, 0x41, 0x5D, 0x41, 0x5E, 0x41, + // Bytes 80 - bf + 0x5F, 0x41, 0x60, 0x41, 0x61, 0x41, 0x62, 0x41, + 0x63, 0x41, 0x64, 0x41, 0x65, 0x41, 0x66, 0x41, + 0x67, 0x41, 0x68, 0x41, 0x69, 0x41, 0x6A, 0x41, + 0x6B, 0x41, 0x6C, 0x41, 0x6D, 0x41, 0x6E, 0x41, + 0x6F, 0x41, 0x70, 0x41, 0x71, 0x41, 0x72, 0x41, + 0x73, 0x41, 0x74, 0x41, 0x75, 0x41, 0x76, 0x41, + 0x77, 0x41, 0x78, 0x41, 0x79, 0x41, 0x7A, 0x41, + 0x7B, 0x41, 0x7C, 0x41, 0x7D, 0x41, 0x7E, 0x42, + // Bytes c0 - ff + 0xC2, 0xA2, 0x42, 0xC2, 0xA3, 0x42, 0xC2, 0xA5, + 0x42, 0xC2, 0xA6, 0x42, 0xC2, 0xAC, 0x42, 0xC2, + 0xB7, 0x42, 0xC3, 0x86, 0x42, 0xC3, 0xB0, 0x42, + 0xC4, 0xA6, 0x42, 0xC4, 0xA7, 0x42, 0xC4, 0xB1, + 0x42, 0xC5, 0x8B, 0x42, 0xC5, 0x93, 0x42, 0xC6, + 0x8E, 0x42, 0xC6, 0x90, 0x42, 0xC6, 0xAB, 0x42, + 0xC8, 0xA2, 0x42, 0xC8, 0xB7, 0x42, 0xC9, 0x90, + 0x42, 0xC9, 0x91, 0x42, 0xC9, 0x92, 0x42, 0xC9, + // Bytes 100 - 13f + 0x94, 0x42, 0xC9, 0x95, 0x42, 0xC9, 0x99, 0x42, + 0xC9, 0x9B, 0x42, 0xC9, 0x9C, 0x42, 0xC9, 0x9F, + 0x42, 0xC9, 0xA1, 0x42, 0xC9, 0xA3, 0x42, 0xC9, + 0xA5, 0x42, 0xC9, 0xA6, 0x42, 0xC9, 0xA8, 0x42, + 0xC9, 0xA9, 0x42, 0xC9, 0xAA, 0x42, 0xC9, 0xAB, + 0x42, 0xC9, 0xAD, 0x42, 0xC9, 0xAF, 0x42, 0xC9, + 0xB0, 0x42, 0xC9, 0xB1, 0x42, 0xC9, 0xB2, 0x42, + 0xC9, 0xB3, 0x42, 0xC9, 0xB4, 0x42, 0xC9, 0xB5, + // Bytes 140 - 17f + 0x42, 0xC9, 0xB8, 0x42, 0xC9, 0xB9, 0x42, 0xC9, + 0xBB, 0x42, 0xCA, 0x81, 0x42, 0xCA, 0x82, 0x42, + 0xCA, 0x83, 0x42, 0xCA, 0x89, 0x42, 0xCA, 0x8A, + 0x42, 0xCA, 0x8B, 0x42, 0xCA, 0x8C, 0x42, 0xCA, + 0x90, 0x42, 0xCA, 0x91, 0x42, 0xCA, 0x92, 0x42, + 0xCA, 0x95, 0x42, 0xCA, 0x9D, 0x42, 0xCA, 0x9F, + 0x42, 0xCA, 0xB9, 0x42, 0xCE, 0x91, 0x42, 0xCE, + 0x92, 0x42, 0xCE, 0x93, 0x42, 0xCE, 0x94, 0x42, + // Bytes 180 - 1bf + 0xCE, 0x95, 0x42, 0xCE, 0x96, 0x42, 0xCE, 0x97, + 0x42, 0xCE, 0x98, 0x42, 0xCE, 0x99, 0x42, 0xCE, + 0x9A, 0x42, 0xCE, 0x9B, 0x42, 0xCE, 0x9C, 0x42, + 0xCE, 0x9D, 0x42, 0xCE, 0x9E, 0x42, 0xCE, 0x9F, + 0x42, 0xCE, 0xA0, 0x42, 0xCE, 0xA1, 0x42, 0xCE, + 0xA3, 0x42, 0xCE, 0xA4, 0x42, 0xCE, 0xA5, 0x42, + 0xCE, 0xA6, 0x42, 0xCE, 0xA7, 0x42, 0xCE, 0xA8, + 0x42, 0xCE, 0xA9, 0x42, 0xCE, 0xB1, 0x42, 0xCE, + // Bytes 1c0 - 1ff + 0xB2, 0x42, 0xCE, 0xB3, 0x42, 0xCE, 0xB4, 0x42, + 0xCE, 0xB5, 0x42, 0xCE, 0xB6, 0x42, 0xCE, 0xB7, + 0x42, 0xCE, 0xB8, 0x42, 0xCE, 0xB9, 0x42, 0xCE, + 0xBA, 0x42, 0xCE, 0xBB, 0x42, 0xCE, 0xBC, 0x42, + 0xCE, 0xBD, 0x42, 0xCE, 0xBE, 0x42, 0xCE, 0xBF, + 0x42, 0xCF, 0x80, 0x42, 0xCF, 0x81, 0x42, 0xCF, + 0x82, 0x42, 0xCF, 0x83, 0x42, 0xCF, 0x84, 0x42, + 0xCF, 0x85, 0x42, 0xCF, 0x86, 0x42, 0xCF, 0x87, + // Bytes 200 - 23f + 0x42, 0xCF, 0x88, 0x42, 0xCF, 0x89, 0x42, 0xCF, + 0x9C, 0x42, 0xCF, 0x9D, 0x42, 0xD0, 0xBD, 0x42, + 0xD1, 0x8A, 0x42, 0xD1, 0x8C, 0x42, 0xD7, 0x90, + 0x42, 0xD7, 0x91, 0x42, 0xD7, 0x92, 0x42, 0xD7, + 0x93, 0x42, 0xD7, 0x94, 0x42, 0xD7, 0x9B, 0x42, + 0xD7, 0x9C, 0x42, 0xD7, 0x9D, 0x42, 0xD7, 0xA2, + 0x42, 0xD7, 0xA8, 0x42, 0xD7, 0xAA, 0x42, 0xD8, + 0xA1, 0x42, 0xD8, 0xA7, 0x42, 0xD8, 0xA8, 0x42, + // Bytes 240 - 27f + 0xD8, 0xA9, 0x42, 0xD8, 0xAA, 0x42, 0xD8, 0xAB, + 0x42, 0xD8, 0xAC, 0x42, 0xD8, 0xAD, 0x42, 0xD8, + 0xAE, 0x42, 0xD8, 0xAF, 0x42, 0xD8, 0xB0, 0x42, + 0xD8, 0xB1, 0x42, 0xD8, 0xB2, 0x42, 0xD8, 0xB3, + 0x42, 0xD8, 0xB4, 0x42, 0xD8, 0xB5, 0x42, 0xD8, + 0xB6, 0x42, 0xD8, 0xB7, 0x42, 0xD8, 0xB8, 0x42, + 0xD8, 0xB9, 0x42, 0xD8, 0xBA, 0x42, 0xD9, 0x81, + 0x42, 0xD9, 0x82, 0x42, 0xD9, 0x83, 0x42, 0xD9, + // Bytes 280 - 2bf + 0x84, 0x42, 0xD9, 0x85, 0x42, 0xD9, 0x86, 0x42, + 0xD9, 0x87, 0x42, 0xD9, 0x88, 0x42, 0xD9, 0x89, + 0x42, 0xD9, 0x8A, 0x42, 0xD9, 0xAE, 0x42, 0xD9, + 0xAF, 0x42, 0xD9, 0xB1, 0x42, 0xD9, 0xB9, 0x42, + 0xD9, 0xBA, 0x42, 0xD9, 0xBB, 0x42, 0xD9, 0xBE, + 0x42, 0xD9, 0xBF, 0x42, 0xDA, 0x80, 0x42, 0xDA, + 0x83, 0x42, 0xDA, 0x84, 0x42, 0xDA, 0x86, 0x42, + 0xDA, 0x87, 0x42, 0xDA, 0x88, 0x42, 0xDA, 0x8C, + // Bytes 2c0 - 2ff + 0x42, 0xDA, 0x8D, 0x42, 0xDA, 0x8E, 0x42, 0xDA, + 0x91, 0x42, 0xDA, 0x98, 0x42, 0xDA, 0xA1, 0x42, + 0xDA, 0xA4, 0x42, 0xDA, 0xA6, 0x42, 0xDA, 0xA9, + 0x42, 0xDA, 0xAD, 0x42, 0xDA, 0xAF, 0x42, 0xDA, + 0xB1, 0x42, 0xDA, 0xB3, 0x42, 0xDA, 0xBA, 0x42, + 0xDA, 0xBB, 0x42, 0xDA, 0xBE, 0x42, 0xDB, 0x81, + 0x42, 0xDB, 0x85, 0x42, 0xDB, 0x86, 0x42, 0xDB, + 0x87, 0x42, 0xDB, 0x88, 0x42, 0xDB, 0x89, 0x42, + // Bytes 300 - 33f + 0xDB, 0x8B, 0x42, 0xDB, 0x8C, 0x42, 0xDB, 0x90, + 0x42, 0xDB, 0x92, 0x43, 0xE0, 0xBC, 0x8B, 0x43, + 0xE1, 0x83, 0x9C, 0x43, 0xE1, 0x84, 0x80, 0x43, + 0xE1, 0x84, 0x81, 0x43, 0xE1, 0x84, 0x82, 0x43, + 0xE1, 0x84, 0x83, 0x43, 0xE1, 0x84, 0x84, 0x43, + 0xE1, 0x84, 0x85, 0x43, 0xE1, 0x84, 0x86, 0x43, + 0xE1, 0x84, 0x87, 0x43, 0xE1, 0x84, 0x88, 0x43, + 0xE1, 0x84, 0x89, 0x43, 0xE1, 0x84, 0x8A, 0x43, + // Bytes 340 - 37f + 0xE1, 0x84, 0x8B, 0x43, 0xE1, 0x84, 0x8C, 0x43, + 0xE1, 0x84, 0x8D, 0x43, 0xE1, 0x84, 0x8E, 0x43, + 0xE1, 0x84, 0x8F, 0x43, 0xE1, 0x84, 0x90, 0x43, + 0xE1, 0x84, 0x91, 0x43, 0xE1, 0x84, 0x92, 0x43, + 0xE1, 0x84, 0x94, 0x43, 0xE1, 0x84, 0x95, 0x43, + 0xE1, 0x84, 0x9A, 0x43, 0xE1, 0x84, 0x9C, 0x43, + 0xE1, 0x84, 0x9D, 0x43, 0xE1, 0x84, 0x9E, 0x43, + 0xE1, 0x84, 0xA0, 0x43, 0xE1, 0x84, 0xA1, 0x43, + // Bytes 380 - 3bf + 0xE1, 0x84, 0xA2, 0x43, 0xE1, 0x84, 0xA3, 0x43, + 0xE1, 0x84, 0xA7, 0x43, 0xE1, 0x84, 0xA9, 0x43, + 0xE1, 0x84, 0xAB, 0x43, 0xE1, 0x84, 0xAC, 0x43, + 0xE1, 0x84, 0xAD, 0x43, 0xE1, 0x84, 0xAE, 0x43, + 0xE1, 0x84, 0xAF, 0x43, 0xE1, 0x84, 0xB2, 0x43, + 0xE1, 0x84, 0xB6, 0x43, 0xE1, 0x85, 0x80, 0x43, + 0xE1, 0x85, 0x87, 0x43, 0xE1, 0x85, 0x8C, 0x43, + 0xE1, 0x85, 0x97, 0x43, 0xE1, 0x85, 0x98, 0x43, + // Bytes 3c0 - 3ff + 0xE1, 0x85, 0x99, 0x43, 0xE1, 0x85, 0xA0, 0x43, + 0xE1, 0x86, 0x84, 0x43, 0xE1, 0x86, 0x85, 0x43, + 0xE1, 0x86, 0x88, 0x43, 0xE1, 0x86, 0x91, 0x43, + 0xE1, 0x86, 0x92, 0x43, 0xE1, 0x86, 0x94, 0x43, + 0xE1, 0x86, 0x9E, 0x43, 0xE1, 0x86, 0xA1, 0x43, + 0xE1, 0x87, 0x87, 0x43, 0xE1, 0x87, 0x88, 0x43, + 0xE1, 0x87, 0x8C, 0x43, 0xE1, 0x87, 0x8E, 0x43, + 0xE1, 0x87, 0x93, 0x43, 0xE1, 0x87, 0x97, 0x43, + // Bytes 400 - 43f + 0xE1, 0x87, 0x99, 0x43, 0xE1, 0x87, 0x9D, 0x43, + 0xE1, 0x87, 0x9F, 0x43, 0xE1, 0x87, 0xB1, 0x43, + 0xE1, 0x87, 0xB2, 0x43, 0xE1, 0xB4, 0x82, 0x43, + 0xE1, 0xB4, 0x96, 0x43, 0xE1, 0xB4, 0x97, 0x43, + 0xE1, 0xB4, 0x9C, 0x43, 0xE1, 0xB4, 0x9D, 0x43, + 0xE1, 0xB4, 0xA5, 0x43, 0xE1, 0xB5, 0xBB, 0x43, + 0xE1, 0xB6, 0x85, 0x43, 0xE2, 0x80, 0x82, 0x43, + 0xE2, 0x80, 0x83, 0x43, 0xE2, 0x80, 0x90, 0x43, + // Bytes 440 - 47f + 0xE2, 0x80, 0x93, 0x43, 0xE2, 0x80, 0x94, 0x43, + 0xE2, 0x82, 0xA9, 0x43, 0xE2, 0x86, 0x90, 0x43, + 0xE2, 0x86, 0x91, 0x43, 0xE2, 0x86, 0x92, 0x43, + 0xE2, 0x86, 0x93, 0x43, 0xE2, 0x88, 0x82, 0x43, + 0xE2, 0x88, 0x87, 0x43, 0xE2, 0x88, 0x91, 0x43, + 0xE2, 0x88, 0x92, 0x43, 0xE2, 0x94, 0x82, 0x43, + 0xE2, 0x96, 0xA0, 0x43, 0xE2, 0x97, 0x8B, 0x43, + 0xE2, 0xA6, 0x85, 0x43, 0xE2, 0xA6, 0x86, 0x43, + // Bytes 480 - 4bf + 0xE2, 0xB5, 0xA1, 0x43, 0xE3, 0x80, 0x81, 0x43, + 0xE3, 0x80, 0x82, 0x43, 0xE3, 0x80, 0x88, 0x43, + 0xE3, 0x80, 0x89, 0x43, 0xE3, 0x80, 0x8A, 0x43, + 0xE3, 0x80, 0x8B, 0x43, 0xE3, 0x80, 0x8C, 0x43, + 0xE3, 0x80, 0x8D, 0x43, 0xE3, 0x80, 0x8E, 0x43, + 0xE3, 0x80, 0x8F, 0x43, 0xE3, 0x80, 0x90, 0x43, + 0xE3, 0x80, 0x91, 0x43, 0xE3, 0x80, 0x92, 0x43, + 0xE3, 0x80, 0x94, 0x43, 0xE3, 0x80, 0x95, 0x43, + // Bytes 4c0 - 4ff + 0xE3, 0x80, 0x96, 0x43, 0xE3, 0x80, 0x97, 0x43, + 0xE3, 0x82, 0xA1, 0x43, 0xE3, 0x82, 0xA2, 0x43, + 0xE3, 0x82, 0xA3, 0x43, 0xE3, 0x82, 0xA4, 0x43, + 0xE3, 0x82, 0xA5, 0x43, 0xE3, 0x82, 0xA6, 0x43, + 0xE3, 0x82, 0xA7, 0x43, 0xE3, 0x82, 0xA8, 0x43, + 0xE3, 0x82, 0xA9, 0x43, 0xE3, 0x82, 0xAA, 0x43, + 0xE3, 0x82, 0xAB, 0x43, 0xE3, 0x82, 0xAD, 0x43, + 0xE3, 0x82, 0xAF, 0x43, 0xE3, 0x82, 0xB1, 0x43, + // Bytes 500 - 53f + 0xE3, 0x82, 0xB3, 0x43, 0xE3, 0x82, 0xB5, 0x43, + 0xE3, 0x82, 0xB7, 0x43, 0xE3, 0x82, 0xB9, 0x43, + 0xE3, 0x82, 0xBB, 0x43, 0xE3, 0x82, 0xBD, 0x43, + 0xE3, 0x82, 0xBF, 0x43, 0xE3, 0x83, 0x81, 0x43, + 0xE3, 0x83, 0x83, 0x43, 0xE3, 0x83, 0x84, 0x43, + 0xE3, 0x83, 0x86, 0x43, 0xE3, 0x83, 0x88, 0x43, + 0xE3, 0x83, 0x8A, 0x43, 0xE3, 0x83, 0x8B, 0x43, + 0xE3, 0x83, 0x8C, 0x43, 0xE3, 0x83, 0x8D, 0x43, + // Bytes 540 - 57f + 0xE3, 0x83, 0x8E, 0x43, 0xE3, 0x83, 0x8F, 0x43, + 0xE3, 0x83, 0x92, 0x43, 0xE3, 0x83, 0x95, 0x43, + 0xE3, 0x83, 0x98, 0x43, 0xE3, 0x83, 0x9B, 0x43, + 0xE3, 0x83, 0x9E, 0x43, 0xE3, 0x83, 0x9F, 0x43, + 0xE3, 0x83, 0xA0, 0x43, 0xE3, 0x83, 0xA1, 0x43, + 0xE3, 0x83, 0xA2, 0x43, 0xE3, 0x83, 0xA3, 0x43, + 0xE3, 0x83, 0xA4, 0x43, 0xE3, 0x83, 0xA5, 0x43, + 0xE3, 0x83, 0xA6, 0x43, 0xE3, 0x83, 0xA7, 0x43, + // Bytes 580 - 5bf + 0xE3, 0x83, 0xA8, 0x43, 0xE3, 0x83, 0xA9, 0x43, + 0xE3, 0x83, 0xAA, 0x43, 0xE3, 0x83, 0xAB, 0x43, + 0xE3, 0x83, 0xAC, 0x43, 0xE3, 0x83, 0xAD, 0x43, + 0xE3, 0x83, 0xAF, 0x43, 0xE3, 0x83, 0xB0, 0x43, + 0xE3, 0x83, 0xB1, 0x43, 0xE3, 0x83, 0xB2, 0x43, + 0xE3, 0x83, 0xB3, 0x43, 0xE3, 0x83, 0xBB, 0x43, + 0xE3, 0x83, 0xBC, 0x43, 0xE3, 0x92, 0x9E, 0x43, + 0xE3, 0x92, 0xB9, 0x43, 0xE3, 0x92, 0xBB, 0x43, + // Bytes 5c0 - 5ff + 0xE3, 0x93, 0x9F, 0x43, 0xE3, 0x94, 0x95, 0x43, + 0xE3, 0x9B, 0xAE, 0x43, 0xE3, 0x9B, 0xBC, 0x43, + 0xE3, 0x9E, 0x81, 0x43, 0xE3, 0xA0, 0xAF, 0x43, + 0xE3, 0xA1, 0xA2, 0x43, 0xE3, 0xA1, 0xBC, 0x43, + 0xE3, 0xA3, 0x87, 0x43, 0xE3, 0xA3, 0xA3, 0x43, + 0xE3, 0xA4, 0x9C, 0x43, 0xE3, 0xA4, 0xBA, 0x43, + 0xE3, 0xA8, 0xAE, 0x43, 0xE3, 0xA9, 0xAC, 0x43, + 0xE3, 0xAB, 0xA4, 0x43, 0xE3, 0xAC, 0x88, 0x43, + // Bytes 600 - 63f + 0xE3, 0xAC, 0x99, 0x43, 0xE3, 0xAD, 0x89, 0x43, + 0xE3, 0xAE, 0x9D, 0x43, 0xE3, 0xB0, 0x98, 0x43, + 0xE3, 0xB1, 0x8E, 0x43, 0xE3, 0xB4, 0xB3, 0x43, + 0xE3, 0xB6, 0x96, 0x43, 0xE3, 0xBA, 0xAC, 0x43, + 0xE3, 0xBA, 0xB8, 0x43, 0xE3, 0xBC, 0x9B, 0x43, + 0xE3, 0xBF, 0xBC, 0x43, 0xE4, 0x80, 0x88, 0x43, + 0xE4, 0x80, 0x98, 0x43, 0xE4, 0x80, 0xB9, 0x43, + 0xE4, 0x81, 0x86, 0x43, 0xE4, 0x82, 0x96, 0x43, + // Bytes 640 - 67f + 0xE4, 0x83, 0xA3, 0x43, 0xE4, 0x84, 0xAF, 0x43, + 0xE4, 0x88, 0x82, 0x43, 0xE4, 0x88, 0xA7, 0x43, + 0xE4, 0x8A, 0xA0, 0x43, 0xE4, 0x8C, 0x81, 0x43, + 0xE4, 0x8C, 0xB4, 0x43, 0xE4, 0x8D, 0x99, 0x43, + 0xE4, 0x8F, 0x95, 0x43, 0xE4, 0x8F, 0x99, 0x43, + 0xE4, 0x90, 0x8B, 0x43, 0xE4, 0x91, 0xAB, 0x43, + 0xE4, 0x94, 0xAB, 0x43, 0xE4, 0x95, 0x9D, 0x43, + 0xE4, 0x95, 0xA1, 0x43, 0xE4, 0x95, 0xAB, 0x43, + // Bytes 680 - 6bf + 0xE4, 0x97, 0x97, 0x43, 0xE4, 0x97, 0xB9, 0x43, + 0xE4, 0x98, 0xB5, 0x43, 0xE4, 0x9A, 0xBE, 0x43, + 0xE4, 0x9B, 0x87, 0x43, 0xE4, 0xA6, 0x95, 0x43, + 0xE4, 0xA7, 0xA6, 0x43, 0xE4, 0xA9, 0xAE, 0x43, + 0xE4, 0xA9, 0xB6, 0x43, 0xE4, 0xAA, 0xB2, 0x43, + 0xE4, 0xAC, 0xB3, 0x43, 0xE4, 0xAF, 0x8E, 0x43, + 0xE4, 0xB3, 0x8E, 0x43, 0xE4, 0xB3, 0xAD, 0x43, + 0xE4, 0xB3, 0xB8, 0x43, 0xE4, 0xB5, 0x96, 0x43, + // Bytes 6c0 - 6ff + 0xE4, 0xB8, 0x80, 0x43, 0xE4, 0xB8, 0x81, 0x43, + 0xE4, 0xB8, 0x83, 0x43, 0xE4, 0xB8, 0x89, 0x43, + 0xE4, 0xB8, 0x8A, 0x43, 0xE4, 0xB8, 0x8B, 0x43, + 0xE4, 0xB8, 0x8D, 0x43, 0xE4, 0xB8, 0x99, 0x43, + 0xE4, 0xB8, 0xA6, 0x43, 0xE4, 0xB8, 0xA8, 0x43, + 0xE4, 0xB8, 0xAD, 0x43, 0xE4, 0xB8, 0xB2, 0x43, + 0xE4, 0xB8, 0xB6, 0x43, 0xE4, 0xB8, 0xB8, 0x43, + 0xE4, 0xB8, 0xB9, 0x43, 0xE4, 0xB8, 0xBD, 0x43, + // Bytes 700 - 73f + 0xE4, 0xB8, 0xBF, 0x43, 0xE4, 0xB9, 0x81, 0x43, + 0xE4, 0xB9, 0x99, 0x43, 0xE4, 0xB9, 0x9D, 0x43, + 0xE4, 0xBA, 0x82, 0x43, 0xE4, 0xBA, 0x85, 0x43, + 0xE4, 0xBA, 0x86, 0x43, 0xE4, 0xBA, 0x8C, 0x43, + 0xE4, 0xBA, 0x94, 0x43, 0xE4, 0xBA, 0xA0, 0x43, + 0xE4, 0xBA, 0xA4, 0x43, 0xE4, 0xBA, 0xAE, 0x43, + 0xE4, 0xBA, 0xBA, 0x43, 0xE4, 0xBB, 0x80, 0x43, + 0xE4, 0xBB, 0x8C, 0x43, 0xE4, 0xBB, 0xA4, 0x43, + // Bytes 740 - 77f + 0xE4, 0xBC, 0x81, 0x43, 0xE4, 0xBC, 0x91, 0x43, + 0xE4, 0xBD, 0xA0, 0x43, 0xE4, 0xBE, 0x80, 0x43, + 0xE4, 0xBE, 0x86, 0x43, 0xE4, 0xBE, 0x8B, 0x43, + 0xE4, 0xBE, 0xAE, 0x43, 0xE4, 0xBE, 0xBB, 0x43, + 0xE4, 0xBE, 0xBF, 0x43, 0xE5, 0x80, 0x82, 0x43, + 0xE5, 0x80, 0xAB, 0x43, 0xE5, 0x81, 0xBA, 0x43, + 0xE5, 0x82, 0x99, 0x43, 0xE5, 0x83, 0x8F, 0x43, + 0xE5, 0x83, 0x9A, 0x43, 0xE5, 0x83, 0xA7, 0x43, + // Bytes 780 - 7bf + 0xE5, 0x84, 0xAA, 0x43, 0xE5, 0x84, 0xBF, 0x43, + 0xE5, 0x85, 0x80, 0x43, 0xE5, 0x85, 0x85, 0x43, + 0xE5, 0x85, 0x8D, 0x43, 0xE5, 0x85, 0x94, 0x43, + 0xE5, 0x85, 0xA4, 0x43, 0xE5, 0x85, 0xA5, 0x43, + 0xE5, 0x85, 0xA7, 0x43, 0xE5, 0x85, 0xA8, 0x43, + 0xE5, 0x85, 0xA9, 0x43, 0xE5, 0x85, 0xAB, 0x43, + 0xE5, 0x85, 0xAD, 0x43, 0xE5, 0x85, 0xB7, 0x43, + 0xE5, 0x86, 0x80, 0x43, 0xE5, 0x86, 0x82, 0x43, + // Bytes 7c0 - 7ff + 0xE5, 0x86, 0x8D, 0x43, 0xE5, 0x86, 0x92, 0x43, + 0xE5, 0x86, 0x95, 0x43, 0xE5, 0x86, 0x96, 0x43, + 0xE5, 0x86, 0x97, 0x43, 0xE5, 0x86, 0x99, 0x43, + 0xE5, 0x86, 0xA4, 0x43, 0xE5, 0x86, 0xAB, 0x43, + 0xE5, 0x86, 0xAC, 0x43, 0xE5, 0x86, 0xB5, 0x43, + 0xE5, 0x86, 0xB7, 0x43, 0xE5, 0x87, 0x89, 0x43, + 0xE5, 0x87, 0x8C, 0x43, 0xE5, 0x87, 0x9C, 0x43, + 0xE5, 0x87, 0x9E, 0x43, 0xE5, 0x87, 0xA0, 0x43, + // Bytes 800 - 83f + 0xE5, 0x87, 0xB5, 0x43, 0xE5, 0x88, 0x80, 0x43, + 0xE5, 0x88, 0x83, 0x43, 0xE5, 0x88, 0x87, 0x43, + 0xE5, 0x88, 0x97, 0x43, 0xE5, 0x88, 0x9D, 0x43, + 0xE5, 0x88, 0xA9, 0x43, 0xE5, 0x88, 0xBA, 0x43, + 0xE5, 0x88, 0xBB, 0x43, 0xE5, 0x89, 0x86, 0x43, + 0xE5, 0x89, 0x8D, 0x43, 0xE5, 0x89, 0xB2, 0x43, + 0xE5, 0x89, 0xB7, 0x43, 0xE5, 0x8A, 0x89, 0x43, + 0xE5, 0x8A, 0x9B, 0x43, 0xE5, 0x8A, 0xA3, 0x43, + // Bytes 840 - 87f + 0xE5, 0x8A, 0xB3, 0x43, 0xE5, 0x8A, 0xB4, 0x43, + 0xE5, 0x8B, 0x87, 0x43, 0xE5, 0x8B, 0x89, 0x43, + 0xE5, 0x8B, 0x92, 0x43, 0xE5, 0x8B, 0x9E, 0x43, + 0xE5, 0x8B, 0xA4, 0x43, 0xE5, 0x8B, 0xB5, 0x43, + 0xE5, 0x8B, 0xB9, 0x43, 0xE5, 0x8B, 0xBA, 0x43, + 0xE5, 0x8C, 0x85, 0x43, 0xE5, 0x8C, 0x86, 0x43, + 0xE5, 0x8C, 0x95, 0x43, 0xE5, 0x8C, 0x97, 0x43, + 0xE5, 0x8C, 0x9A, 0x43, 0xE5, 0x8C, 0xB8, 0x43, + // Bytes 880 - 8bf + 0xE5, 0x8C, 0xBB, 0x43, 0xE5, 0x8C, 0xBF, 0x43, + 0xE5, 0x8D, 0x81, 0x43, 0xE5, 0x8D, 0x84, 0x43, + 0xE5, 0x8D, 0x85, 0x43, 0xE5, 0x8D, 0x89, 0x43, + 0xE5, 0x8D, 0x91, 0x43, 0xE5, 0x8D, 0x94, 0x43, + 0xE5, 0x8D, 0x9A, 0x43, 0xE5, 0x8D, 0x9C, 0x43, + 0xE5, 0x8D, 0xA9, 0x43, 0xE5, 0x8D, 0xB0, 0x43, + 0xE5, 0x8D, 0xB3, 0x43, 0xE5, 0x8D, 0xB5, 0x43, + 0xE5, 0x8D, 0xBD, 0x43, 0xE5, 0x8D, 0xBF, 0x43, + // Bytes 8c0 - 8ff + 0xE5, 0x8E, 0x82, 0x43, 0xE5, 0x8E, 0xB6, 0x43, + 0xE5, 0x8F, 0x83, 0x43, 0xE5, 0x8F, 0x88, 0x43, + 0xE5, 0x8F, 0x8A, 0x43, 0xE5, 0x8F, 0x8C, 0x43, + 0xE5, 0x8F, 0x9F, 0x43, 0xE5, 0x8F, 0xA3, 0x43, + 0xE5, 0x8F, 0xA5, 0x43, 0xE5, 0x8F, 0xAB, 0x43, + 0xE5, 0x8F, 0xAF, 0x43, 0xE5, 0x8F, 0xB1, 0x43, + 0xE5, 0x8F, 0xB3, 0x43, 0xE5, 0x90, 0x86, 0x43, + 0xE5, 0x90, 0x88, 0x43, 0xE5, 0x90, 0x8D, 0x43, + // Bytes 900 - 93f + 0xE5, 0x90, 0x8F, 0x43, 0xE5, 0x90, 0x9D, 0x43, + 0xE5, 0x90, 0xB8, 0x43, 0xE5, 0x90, 0xB9, 0x43, + 0xE5, 0x91, 0x82, 0x43, 0xE5, 0x91, 0x88, 0x43, + 0xE5, 0x91, 0xA8, 0x43, 0xE5, 0x92, 0x9E, 0x43, + 0xE5, 0x92, 0xA2, 0x43, 0xE5, 0x92, 0xBD, 0x43, + 0xE5, 0x93, 0xB6, 0x43, 0xE5, 0x94, 0x90, 0x43, + 0xE5, 0x95, 0x8F, 0x43, 0xE5, 0x95, 0x93, 0x43, + 0xE5, 0x95, 0x95, 0x43, 0xE5, 0x95, 0xA3, 0x43, + // Bytes 940 - 97f + 0xE5, 0x96, 0x84, 0x43, 0xE5, 0x96, 0x87, 0x43, + 0xE5, 0x96, 0x99, 0x43, 0xE5, 0x96, 0x9D, 0x43, + 0xE5, 0x96, 0xAB, 0x43, 0xE5, 0x96, 0xB3, 0x43, + 0xE5, 0x96, 0xB6, 0x43, 0xE5, 0x97, 0x80, 0x43, + 0xE5, 0x97, 0x82, 0x43, 0xE5, 0x97, 0xA2, 0x43, + 0xE5, 0x98, 0x86, 0x43, 0xE5, 0x99, 0x91, 0x43, + 0xE5, 0x99, 0xA8, 0x43, 0xE5, 0x99, 0xB4, 0x43, + 0xE5, 0x9B, 0x97, 0x43, 0xE5, 0x9B, 0x9B, 0x43, + // Bytes 980 - 9bf + 0xE5, 0x9B, 0xB9, 0x43, 0xE5, 0x9C, 0x96, 0x43, + 0xE5, 0x9C, 0x97, 0x43, 0xE5, 0x9C, 0x9F, 0x43, + 0xE5, 0x9C, 0xB0, 0x43, 0xE5, 0x9E, 0x8B, 0x43, + 0xE5, 0x9F, 0x8E, 0x43, 0xE5, 0x9F, 0xB4, 0x43, + 0xE5, 0xA0, 0x8D, 0x43, 0xE5, 0xA0, 0xB1, 0x43, + 0xE5, 0xA0, 0xB2, 0x43, 0xE5, 0xA1, 0x80, 0x43, + 0xE5, 0xA1, 0x9A, 0x43, 0xE5, 0xA1, 0x9E, 0x43, + 0xE5, 0xA2, 0xA8, 0x43, 0xE5, 0xA2, 0xAC, 0x43, + // Bytes 9c0 - 9ff + 0xE5, 0xA2, 0xB3, 0x43, 0xE5, 0xA3, 0x98, 0x43, + 0xE5, 0xA3, 0x9F, 0x43, 0xE5, 0xA3, 0xAB, 0x43, + 0xE5, 0xA3, 0xAE, 0x43, 0xE5, 0xA3, 0xB0, 0x43, + 0xE5, 0xA3, 0xB2, 0x43, 0xE5, 0xA3, 0xB7, 0x43, + 0xE5, 0xA4, 0x82, 0x43, 0xE5, 0xA4, 0x86, 0x43, + 0xE5, 0xA4, 0x8A, 0x43, 0xE5, 0xA4, 0x95, 0x43, + 0xE5, 0xA4, 0x9A, 0x43, 0xE5, 0xA4, 0x9C, 0x43, + 0xE5, 0xA4, 0xA2, 0x43, 0xE5, 0xA4, 0xA7, 0x43, + // Bytes a00 - a3f + 0xE5, 0xA4, 0xA9, 0x43, 0xE5, 0xA5, 0x84, 0x43, + 0xE5, 0xA5, 0x88, 0x43, 0xE5, 0xA5, 0x91, 0x43, + 0xE5, 0xA5, 0x94, 0x43, 0xE5, 0xA5, 0xA2, 0x43, + 0xE5, 0xA5, 0xB3, 0x43, 0xE5, 0xA7, 0x98, 0x43, + 0xE5, 0xA7, 0xAC, 0x43, 0xE5, 0xA8, 0x9B, 0x43, + 0xE5, 0xA8, 0xA7, 0x43, 0xE5, 0xA9, 0xA2, 0x43, + 0xE5, 0xA9, 0xA6, 0x43, 0xE5, 0xAA, 0xB5, 0x43, + 0xE5, 0xAC, 0x88, 0x43, 0xE5, 0xAC, 0xA8, 0x43, + // Bytes a40 - a7f + 0xE5, 0xAC, 0xBE, 0x43, 0xE5, 0xAD, 0x90, 0x43, + 0xE5, 0xAD, 0x97, 0x43, 0xE5, 0xAD, 0xA6, 0x43, + 0xE5, 0xAE, 0x80, 0x43, 0xE5, 0xAE, 0x85, 0x43, + 0xE5, 0xAE, 0x97, 0x43, 0xE5, 0xAF, 0x83, 0x43, + 0xE5, 0xAF, 0x98, 0x43, 0xE5, 0xAF, 0xA7, 0x43, + 0xE5, 0xAF, 0xAE, 0x43, 0xE5, 0xAF, 0xB3, 0x43, + 0xE5, 0xAF, 0xB8, 0x43, 0xE5, 0xAF, 0xBF, 0x43, + 0xE5, 0xB0, 0x86, 0x43, 0xE5, 0xB0, 0x8F, 0x43, + // Bytes a80 - abf + 0xE5, 0xB0, 0xA2, 0x43, 0xE5, 0xB0, 0xB8, 0x43, + 0xE5, 0xB0, 0xBF, 0x43, 0xE5, 0xB1, 0xA0, 0x43, + 0xE5, 0xB1, 0xA2, 0x43, 0xE5, 0xB1, 0xA4, 0x43, + 0xE5, 0xB1, 0xA5, 0x43, 0xE5, 0xB1, 0xAE, 0x43, + 0xE5, 0xB1, 0xB1, 0x43, 0xE5, 0xB2, 0x8D, 0x43, + 0xE5, 0xB3, 0x80, 0x43, 0xE5, 0xB4, 0x99, 0x43, + 0xE5, 0xB5, 0x83, 0x43, 0xE5, 0xB5, 0x90, 0x43, + 0xE5, 0xB5, 0xAB, 0x43, 0xE5, 0xB5, 0xAE, 0x43, + // Bytes ac0 - aff + 0xE5, 0xB5, 0xBC, 0x43, 0xE5, 0xB6, 0xB2, 0x43, + 0xE5, 0xB6, 0xBA, 0x43, 0xE5, 0xB7, 0x9B, 0x43, + 0xE5, 0xB7, 0xA1, 0x43, 0xE5, 0xB7, 0xA2, 0x43, + 0xE5, 0xB7, 0xA5, 0x43, 0xE5, 0xB7, 0xA6, 0x43, + 0xE5, 0xB7, 0xB1, 0x43, 0xE5, 0xB7, 0xBD, 0x43, + 0xE5, 0xB7, 0xBE, 0x43, 0xE5, 0xB8, 0xA8, 0x43, + 0xE5, 0xB8, 0xBD, 0x43, 0xE5, 0xB9, 0xA9, 0x43, + 0xE5, 0xB9, 0xB2, 0x43, 0xE5, 0xB9, 0xB4, 0x43, + // Bytes b00 - b3f + 0xE5, 0xB9, 0xBA, 0x43, 0xE5, 0xB9, 0xBC, 0x43, + 0xE5, 0xB9, 0xBF, 0x43, 0xE5, 0xBA, 0xA6, 0x43, + 0xE5, 0xBA, 0xB0, 0x43, 0xE5, 0xBA, 0xB3, 0x43, + 0xE5, 0xBA, 0xB6, 0x43, 0xE5, 0xBB, 0x89, 0x43, + 0xE5, 0xBB, 0x8A, 0x43, 0xE5, 0xBB, 0x92, 0x43, + 0xE5, 0xBB, 0x93, 0x43, 0xE5, 0xBB, 0x99, 0x43, + 0xE5, 0xBB, 0xAC, 0x43, 0xE5, 0xBB, 0xB4, 0x43, + 0xE5, 0xBB, 0xBE, 0x43, 0xE5, 0xBC, 0x84, 0x43, + // Bytes b40 - b7f + 0xE5, 0xBC, 0x8B, 0x43, 0xE5, 0xBC, 0x93, 0x43, + 0xE5, 0xBC, 0xA2, 0x43, 0xE5, 0xBD, 0x90, 0x43, + 0xE5, 0xBD, 0x93, 0x43, 0xE5, 0xBD, 0xA1, 0x43, + 0xE5, 0xBD, 0xA2, 0x43, 0xE5, 0xBD, 0xA9, 0x43, + 0xE5, 0xBD, 0xAB, 0x43, 0xE5, 0xBD, 0xB3, 0x43, + 0xE5, 0xBE, 0x8B, 0x43, 0xE5, 0xBE, 0x8C, 0x43, + 0xE5, 0xBE, 0x97, 0x43, 0xE5, 0xBE, 0x9A, 0x43, + 0xE5, 0xBE, 0xA9, 0x43, 0xE5, 0xBE, 0xAD, 0x43, + // Bytes b80 - bbf + 0xE5, 0xBF, 0x83, 0x43, 0xE5, 0xBF, 0x8D, 0x43, + 0xE5, 0xBF, 0x97, 0x43, 0xE5, 0xBF, 0xB5, 0x43, + 0xE5, 0xBF, 0xB9, 0x43, 0xE6, 0x80, 0x92, 0x43, + 0xE6, 0x80, 0x9C, 0x43, 0xE6, 0x81, 0xB5, 0x43, + 0xE6, 0x82, 0x81, 0x43, 0xE6, 0x82, 0x94, 0x43, + 0xE6, 0x83, 0x87, 0x43, 0xE6, 0x83, 0x98, 0x43, + 0xE6, 0x83, 0xA1, 0x43, 0xE6, 0x84, 0x88, 0x43, + 0xE6, 0x85, 0x84, 0x43, 0xE6, 0x85, 0x88, 0x43, + // Bytes bc0 - bff + 0xE6, 0x85, 0x8C, 0x43, 0xE6, 0x85, 0x8E, 0x43, + 0xE6, 0x85, 0xA0, 0x43, 0xE6, 0x85, 0xA8, 0x43, + 0xE6, 0x85, 0xBA, 0x43, 0xE6, 0x86, 0x8E, 0x43, + 0xE6, 0x86, 0x90, 0x43, 0xE6, 0x86, 0xA4, 0x43, + 0xE6, 0x86, 0xAF, 0x43, 0xE6, 0x86, 0xB2, 0x43, + 0xE6, 0x87, 0x9E, 0x43, 0xE6, 0x87, 0xB2, 0x43, + 0xE6, 0x87, 0xB6, 0x43, 0xE6, 0x88, 0x80, 0x43, + 0xE6, 0x88, 0x88, 0x43, 0xE6, 0x88, 0x90, 0x43, + // Bytes c00 - c3f + 0xE6, 0x88, 0x9B, 0x43, 0xE6, 0x88, 0xAE, 0x43, + 0xE6, 0x88, 0xB4, 0x43, 0xE6, 0x88, 0xB6, 0x43, + 0xE6, 0x89, 0x8B, 0x43, 0xE6, 0x89, 0x93, 0x43, + 0xE6, 0x89, 0x9D, 0x43, 0xE6, 0x8A, 0x95, 0x43, + 0xE6, 0x8A, 0xB1, 0x43, 0xE6, 0x8B, 0x89, 0x43, + 0xE6, 0x8B, 0x8F, 0x43, 0xE6, 0x8B, 0x93, 0x43, + 0xE6, 0x8B, 0x94, 0x43, 0xE6, 0x8B, 0xBC, 0x43, + 0xE6, 0x8B, 0xBE, 0x43, 0xE6, 0x8C, 0x87, 0x43, + // Bytes c40 - c7f + 0xE6, 0x8C, 0xBD, 0x43, 0xE6, 0x8D, 0x90, 0x43, + 0xE6, 0x8D, 0x95, 0x43, 0xE6, 0x8D, 0xA8, 0x43, + 0xE6, 0x8D, 0xBB, 0x43, 0xE6, 0x8E, 0x83, 0x43, + 0xE6, 0x8E, 0xA0, 0x43, 0xE6, 0x8E, 0xA9, 0x43, + 0xE6, 0x8F, 0x84, 0x43, 0xE6, 0x8F, 0x85, 0x43, + 0xE6, 0x8F, 0xA4, 0x43, 0xE6, 0x90, 0x9C, 0x43, + 0xE6, 0x90, 0xA2, 0x43, 0xE6, 0x91, 0x92, 0x43, + 0xE6, 0x91, 0xA9, 0x43, 0xE6, 0x91, 0xB7, 0x43, + // Bytes c80 - cbf + 0xE6, 0x91, 0xBE, 0x43, 0xE6, 0x92, 0x9A, 0x43, + 0xE6, 0x92, 0x9D, 0x43, 0xE6, 0x93, 0x84, 0x43, + 0xE6, 0x94, 0xAF, 0x43, 0xE6, 0x94, 0xB4, 0x43, + 0xE6, 0x95, 0x8F, 0x43, 0xE6, 0x95, 0x96, 0x43, + 0xE6, 0x95, 0xAC, 0x43, 0xE6, 0x95, 0xB8, 0x43, + 0xE6, 0x96, 0x87, 0x43, 0xE6, 0x96, 0x97, 0x43, + 0xE6, 0x96, 0x99, 0x43, 0xE6, 0x96, 0xA4, 0x43, + 0xE6, 0x96, 0xB0, 0x43, 0xE6, 0x96, 0xB9, 0x43, + // Bytes cc0 - cff + 0xE6, 0x97, 0x85, 0x43, 0xE6, 0x97, 0xA0, 0x43, + 0xE6, 0x97, 0xA2, 0x43, 0xE6, 0x97, 0xA3, 0x43, + 0xE6, 0x97, 0xA5, 0x43, 0xE6, 0x98, 0x93, 0x43, + 0xE6, 0x98, 0xA0, 0x43, 0xE6, 0x99, 0x89, 0x43, + 0xE6, 0x99, 0xB4, 0x43, 0xE6, 0x9A, 0x88, 0x43, + 0xE6, 0x9A, 0x91, 0x43, 0xE6, 0x9A, 0x9C, 0x43, + 0xE6, 0x9A, 0xB4, 0x43, 0xE6, 0x9B, 0x86, 0x43, + 0xE6, 0x9B, 0xB0, 0x43, 0xE6, 0x9B, 0xB4, 0x43, + // Bytes d00 - d3f + 0xE6, 0x9B, 0xB8, 0x43, 0xE6, 0x9C, 0x80, 0x43, + 0xE6, 0x9C, 0x88, 0x43, 0xE6, 0x9C, 0x89, 0x43, + 0xE6, 0x9C, 0x97, 0x43, 0xE6, 0x9C, 0x9B, 0x43, + 0xE6, 0x9C, 0xA1, 0x43, 0xE6, 0x9C, 0xA8, 0x43, + 0xE6, 0x9D, 0x8E, 0x43, 0xE6, 0x9D, 0x93, 0x43, + 0xE6, 0x9D, 0x96, 0x43, 0xE6, 0x9D, 0x9E, 0x43, + 0xE6, 0x9D, 0xBB, 0x43, 0xE6, 0x9E, 0x85, 0x43, + 0xE6, 0x9E, 0x97, 0x43, 0xE6, 0x9F, 0xB3, 0x43, + // Bytes d40 - d7f + 0xE6, 0x9F, 0xBA, 0x43, 0xE6, 0xA0, 0x97, 0x43, + 0xE6, 0xA0, 0x9F, 0x43, 0xE6, 0xA0, 0xAA, 0x43, + 0xE6, 0xA1, 0x92, 0x43, 0xE6, 0xA2, 0x81, 0x43, + 0xE6, 0xA2, 0x85, 0x43, 0xE6, 0xA2, 0x8E, 0x43, + 0xE6, 0xA2, 0xA8, 0x43, 0xE6, 0xA4, 0x94, 0x43, + 0xE6, 0xA5, 0x82, 0x43, 0xE6, 0xA6, 0xA3, 0x43, + 0xE6, 0xA7, 0xAA, 0x43, 0xE6, 0xA8, 0x82, 0x43, + 0xE6, 0xA8, 0x93, 0x43, 0xE6, 0xAA, 0xA8, 0x43, + // Bytes d80 - dbf + 0xE6, 0xAB, 0x93, 0x43, 0xE6, 0xAB, 0x9B, 0x43, + 0xE6, 0xAC, 0x84, 0x43, 0xE6, 0xAC, 0xA0, 0x43, + 0xE6, 0xAC, 0xA1, 0x43, 0xE6, 0xAD, 0x94, 0x43, + 0xE6, 0xAD, 0xA2, 0x43, 0xE6, 0xAD, 0xA3, 0x43, + 0xE6, 0xAD, 0xB2, 0x43, 0xE6, 0xAD, 0xB7, 0x43, + 0xE6, 0xAD, 0xB9, 0x43, 0xE6, 0xAE, 0x9F, 0x43, + 0xE6, 0xAE, 0xAE, 0x43, 0xE6, 0xAE, 0xB3, 0x43, + 0xE6, 0xAE, 0xBA, 0x43, 0xE6, 0xAE, 0xBB, 0x43, + // Bytes dc0 - dff + 0xE6, 0xAF, 0x8B, 0x43, 0xE6, 0xAF, 0x8D, 0x43, + 0xE6, 0xAF, 0x94, 0x43, 0xE6, 0xAF, 0x9B, 0x43, + 0xE6, 0xB0, 0x8F, 0x43, 0xE6, 0xB0, 0x94, 0x43, + 0xE6, 0xB0, 0xB4, 0x43, 0xE6, 0xB1, 0x8E, 0x43, + 0xE6, 0xB1, 0xA7, 0x43, 0xE6, 0xB2, 0x88, 0x43, + 0xE6, 0xB2, 0xBF, 0x43, 0xE6, 0xB3, 0x8C, 0x43, + 0xE6, 0xB3, 0x8D, 0x43, 0xE6, 0xB3, 0xA5, 0x43, + 0xE6, 0xB3, 0xA8, 0x43, 0xE6, 0xB4, 0x96, 0x43, + // Bytes e00 - e3f + 0xE6, 0xB4, 0x9B, 0x43, 0xE6, 0xB4, 0x9E, 0x43, + 0xE6, 0xB4, 0xB4, 0x43, 0xE6, 0xB4, 0xBE, 0x43, + 0xE6, 0xB5, 0x81, 0x43, 0xE6, 0xB5, 0xA9, 0x43, + 0xE6, 0xB5, 0xAA, 0x43, 0xE6, 0xB5, 0xB7, 0x43, + 0xE6, 0xB5, 0xB8, 0x43, 0xE6, 0xB6, 0x85, 0x43, + 0xE6, 0xB7, 0x8B, 0x43, 0xE6, 0xB7, 0x9A, 0x43, + 0xE6, 0xB7, 0xAA, 0x43, 0xE6, 0xB7, 0xB9, 0x43, + 0xE6, 0xB8, 0x9A, 0x43, 0xE6, 0xB8, 0xAF, 0x43, + // Bytes e40 - e7f + 0xE6, 0xB9, 0xAE, 0x43, 0xE6, 0xBA, 0x80, 0x43, + 0xE6, 0xBA, 0x9C, 0x43, 0xE6, 0xBA, 0xBA, 0x43, + 0xE6, 0xBB, 0x87, 0x43, 0xE6, 0xBB, 0x8B, 0x43, + 0xE6, 0xBB, 0x91, 0x43, 0xE6, 0xBB, 0x9B, 0x43, + 0xE6, 0xBC, 0x8F, 0x43, 0xE6, 0xBC, 0x94, 0x43, + 0xE6, 0xBC, 0xA2, 0x43, 0xE6, 0xBC, 0xA3, 0x43, + 0xE6, 0xBD, 0xAE, 0x43, 0xE6, 0xBF, 0x86, 0x43, + 0xE6, 0xBF, 0xAB, 0x43, 0xE6, 0xBF, 0xBE, 0x43, + // Bytes e80 - ebf + 0xE7, 0x80, 0x9B, 0x43, 0xE7, 0x80, 0x9E, 0x43, + 0xE7, 0x80, 0xB9, 0x43, 0xE7, 0x81, 0x8A, 0x43, + 0xE7, 0x81, 0xAB, 0x43, 0xE7, 0x81, 0xB0, 0x43, + 0xE7, 0x81, 0xB7, 0x43, 0xE7, 0x81, 0xBD, 0x43, + 0xE7, 0x82, 0x99, 0x43, 0xE7, 0x82, 0xAD, 0x43, + 0xE7, 0x83, 0x88, 0x43, 0xE7, 0x83, 0x99, 0x43, + 0xE7, 0x84, 0xA1, 0x43, 0xE7, 0x85, 0x85, 0x43, + 0xE7, 0x85, 0x89, 0x43, 0xE7, 0x85, 0xAE, 0x43, + // Bytes ec0 - eff + 0xE7, 0x86, 0x9C, 0x43, 0xE7, 0x87, 0x8E, 0x43, + 0xE7, 0x87, 0x90, 0x43, 0xE7, 0x88, 0x90, 0x43, + 0xE7, 0x88, 0x9B, 0x43, 0xE7, 0x88, 0xA8, 0x43, + 0xE7, 0x88, 0xAA, 0x43, 0xE7, 0x88, 0xAB, 0x43, + 0xE7, 0x88, 0xB5, 0x43, 0xE7, 0x88, 0xB6, 0x43, + 0xE7, 0x88, 0xBB, 0x43, 0xE7, 0x88, 0xBF, 0x43, + 0xE7, 0x89, 0x87, 0x43, 0xE7, 0x89, 0x90, 0x43, + 0xE7, 0x89, 0x99, 0x43, 0xE7, 0x89, 0x9B, 0x43, + // Bytes f00 - f3f + 0xE7, 0x89, 0xA2, 0x43, 0xE7, 0x89, 0xB9, 0x43, + 0xE7, 0x8A, 0x80, 0x43, 0xE7, 0x8A, 0x95, 0x43, + 0xE7, 0x8A, 0xAC, 0x43, 0xE7, 0x8A, 0xAF, 0x43, + 0xE7, 0x8B, 0x80, 0x43, 0xE7, 0x8B, 0xBC, 0x43, + 0xE7, 0x8C, 0xAA, 0x43, 0xE7, 0x8D, 0xB5, 0x43, + 0xE7, 0x8D, 0xBA, 0x43, 0xE7, 0x8E, 0x84, 0x43, + 0xE7, 0x8E, 0x87, 0x43, 0xE7, 0x8E, 0x89, 0x43, + 0xE7, 0x8E, 0x8B, 0x43, 0xE7, 0x8E, 0xA5, 0x43, + // Bytes f40 - f7f + 0xE7, 0x8E, 0xB2, 0x43, 0xE7, 0x8F, 0x9E, 0x43, + 0xE7, 0x90, 0x86, 0x43, 0xE7, 0x90, 0x89, 0x43, + 0xE7, 0x90, 0xA2, 0x43, 0xE7, 0x91, 0x87, 0x43, + 0xE7, 0x91, 0x9C, 0x43, 0xE7, 0x91, 0xA9, 0x43, + 0xE7, 0x91, 0xB1, 0x43, 0xE7, 0x92, 0x85, 0x43, + 0xE7, 0x92, 0x89, 0x43, 0xE7, 0x92, 0x98, 0x43, + 0xE7, 0x93, 0x8A, 0x43, 0xE7, 0x93, 0x9C, 0x43, + 0xE7, 0x93, 0xA6, 0x43, 0xE7, 0x94, 0x86, 0x43, + // Bytes f80 - fbf + 0xE7, 0x94, 0x98, 0x43, 0xE7, 0x94, 0x9F, 0x43, + 0xE7, 0x94, 0xA4, 0x43, 0xE7, 0x94, 0xA8, 0x43, + 0xE7, 0x94, 0xB0, 0x43, 0xE7, 0x94, 0xB2, 0x43, + 0xE7, 0x94, 0xB3, 0x43, 0xE7, 0x94, 0xB7, 0x43, + 0xE7, 0x94, 0xBB, 0x43, 0xE7, 0x94, 0xBE, 0x43, + 0xE7, 0x95, 0x99, 0x43, 0xE7, 0x95, 0xA5, 0x43, + 0xE7, 0x95, 0xB0, 0x43, 0xE7, 0x96, 0x8B, 0x43, + 0xE7, 0x96, 0x92, 0x43, 0xE7, 0x97, 0xA2, 0x43, + // Bytes fc0 - fff + 0xE7, 0x98, 0x90, 0x43, 0xE7, 0x98, 0x9D, 0x43, + 0xE7, 0x98, 0x9F, 0x43, 0xE7, 0x99, 0x82, 0x43, + 0xE7, 0x99, 0xA9, 0x43, 0xE7, 0x99, 0xB6, 0x43, + 0xE7, 0x99, 0xBD, 0x43, 0xE7, 0x9A, 0xAE, 0x43, + 0xE7, 0x9A, 0xBF, 0x43, 0xE7, 0x9B, 0x8A, 0x43, + 0xE7, 0x9B, 0x9B, 0x43, 0xE7, 0x9B, 0xA3, 0x43, + 0xE7, 0x9B, 0xA7, 0x43, 0xE7, 0x9B, 0xAE, 0x43, + 0xE7, 0x9B, 0xB4, 0x43, 0xE7, 0x9C, 0x81, 0x43, + // Bytes 1000 - 103f + 0xE7, 0x9C, 0x9E, 0x43, 0xE7, 0x9C, 0x9F, 0x43, + 0xE7, 0x9D, 0x80, 0x43, 0xE7, 0x9D, 0x8A, 0x43, + 0xE7, 0x9E, 0x8B, 0x43, 0xE7, 0x9E, 0xA7, 0x43, + 0xE7, 0x9F, 0x9B, 0x43, 0xE7, 0x9F, 0xA2, 0x43, + 0xE7, 0x9F, 0xB3, 0x43, 0xE7, 0xA1, 0x8E, 0x43, + 0xE7, 0xA1, 0xAB, 0x43, 0xE7, 0xA2, 0x8C, 0x43, + 0xE7, 0xA2, 0x91, 0x43, 0xE7, 0xA3, 0x8A, 0x43, + 0xE7, 0xA3, 0x8C, 0x43, 0xE7, 0xA3, 0xBB, 0x43, + // Bytes 1040 - 107f + 0xE7, 0xA4, 0xAA, 0x43, 0xE7, 0xA4, 0xBA, 0x43, + 0xE7, 0xA4, 0xBC, 0x43, 0xE7, 0xA4, 0xBE, 0x43, + 0xE7, 0xA5, 0x88, 0x43, 0xE7, 0xA5, 0x89, 0x43, + 0xE7, 0xA5, 0x90, 0x43, 0xE7, 0xA5, 0x96, 0x43, + 0xE7, 0xA5, 0x9D, 0x43, 0xE7, 0xA5, 0x9E, 0x43, + 0xE7, 0xA5, 0xA5, 0x43, 0xE7, 0xA5, 0xBF, 0x43, + 0xE7, 0xA6, 0x81, 0x43, 0xE7, 0xA6, 0x8D, 0x43, + 0xE7, 0xA6, 0x8E, 0x43, 0xE7, 0xA6, 0x8F, 0x43, + // Bytes 1080 - 10bf + 0xE7, 0xA6, 0xAE, 0x43, 0xE7, 0xA6, 0xB8, 0x43, + 0xE7, 0xA6, 0xBE, 0x43, 0xE7, 0xA7, 0x8A, 0x43, + 0xE7, 0xA7, 0x98, 0x43, 0xE7, 0xA7, 0xAB, 0x43, + 0xE7, 0xA8, 0x9C, 0x43, 0xE7, 0xA9, 0x80, 0x43, + 0xE7, 0xA9, 0x8A, 0x43, 0xE7, 0xA9, 0x8F, 0x43, + 0xE7, 0xA9, 0xB4, 0x43, 0xE7, 0xA9, 0xBA, 0x43, + 0xE7, 0xAA, 0x81, 0x43, 0xE7, 0xAA, 0xB1, 0x43, + 0xE7, 0xAB, 0x8B, 0x43, 0xE7, 0xAB, 0xAE, 0x43, + // Bytes 10c0 - 10ff + 0xE7, 0xAB, 0xB9, 0x43, 0xE7, 0xAC, 0xA0, 0x43, + 0xE7, 0xAE, 0x8F, 0x43, 0xE7, 0xAF, 0x80, 0x43, + 0xE7, 0xAF, 0x86, 0x43, 0xE7, 0xAF, 0x89, 0x43, + 0xE7, 0xB0, 0xBE, 0x43, 0xE7, 0xB1, 0xA0, 0x43, + 0xE7, 0xB1, 0xB3, 0x43, 0xE7, 0xB1, 0xBB, 0x43, + 0xE7, 0xB2, 0x92, 0x43, 0xE7, 0xB2, 0xBE, 0x43, + 0xE7, 0xB3, 0x92, 0x43, 0xE7, 0xB3, 0x96, 0x43, + 0xE7, 0xB3, 0xA3, 0x43, 0xE7, 0xB3, 0xA7, 0x43, + // Bytes 1100 - 113f + 0xE7, 0xB3, 0xA8, 0x43, 0xE7, 0xB3, 0xB8, 0x43, + 0xE7, 0xB4, 0x80, 0x43, 0xE7, 0xB4, 0x90, 0x43, + 0xE7, 0xB4, 0xA2, 0x43, 0xE7, 0xB4, 0xAF, 0x43, + 0xE7, 0xB5, 0x82, 0x43, 0xE7, 0xB5, 0x9B, 0x43, + 0xE7, 0xB5, 0xA3, 0x43, 0xE7, 0xB6, 0xA0, 0x43, + 0xE7, 0xB6, 0xBE, 0x43, 0xE7, 0xB7, 0x87, 0x43, + 0xE7, 0xB7, 0xB4, 0x43, 0xE7, 0xB8, 0x82, 0x43, + 0xE7, 0xB8, 0x89, 0x43, 0xE7, 0xB8, 0xB7, 0x43, + // Bytes 1140 - 117f + 0xE7, 0xB9, 0x81, 0x43, 0xE7, 0xB9, 0x85, 0x43, + 0xE7, 0xBC, 0xB6, 0x43, 0xE7, 0xBC, 0xBE, 0x43, + 0xE7, 0xBD, 0x91, 0x43, 0xE7, 0xBD, 0xB2, 0x43, + 0xE7, 0xBD, 0xB9, 0x43, 0xE7, 0xBD, 0xBA, 0x43, + 0xE7, 0xBE, 0x85, 0x43, 0xE7, 0xBE, 0x8A, 0x43, + 0xE7, 0xBE, 0x95, 0x43, 0xE7, 0xBE, 0x9A, 0x43, + 0xE7, 0xBE, 0xBD, 0x43, 0xE7, 0xBF, 0xBA, 0x43, + 0xE8, 0x80, 0x81, 0x43, 0xE8, 0x80, 0x85, 0x43, + // Bytes 1180 - 11bf + 0xE8, 0x80, 0x8C, 0x43, 0xE8, 0x80, 0x92, 0x43, + 0xE8, 0x80, 0xB3, 0x43, 0xE8, 0x81, 0x86, 0x43, + 0xE8, 0x81, 0xA0, 0x43, 0xE8, 0x81, 0xAF, 0x43, + 0xE8, 0x81, 0xB0, 0x43, 0xE8, 0x81, 0xBE, 0x43, + 0xE8, 0x81, 0xBF, 0x43, 0xE8, 0x82, 0x89, 0x43, + 0xE8, 0x82, 0x8B, 0x43, 0xE8, 0x82, 0xAD, 0x43, + 0xE8, 0x82, 0xB2, 0x43, 0xE8, 0x84, 0x83, 0x43, + 0xE8, 0x84, 0xBE, 0x43, 0xE8, 0x87, 0x98, 0x43, + // Bytes 11c0 - 11ff + 0xE8, 0x87, 0xA3, 0x43, 0xE8, 0x87, 0xA8, 0x43, + 0xE8, 0x87, 0xAA, 0x43, 0xE8, 0x87, 0xAD, 0x43, + 0xE8, 0x87, 0xB3, 0x43, 0xE8, 0x87, 0xBC, 0x43, + 0xE8, 0x88, 0x81, 0x43, 0xE8, 0x88, 0x84, 0x43, + 0xE8, 0x88, 0x8C, 0x43, 0xE8, 0x88, 0x98, 0x43, + 0xE8, 0x88, 0x9B, 0x43, 0xE8, 0x88, 0x9F, 0x43, + 0xE8, 0x89, 0xAE, 0x43, 0xE8, 0x89, 0xAF, 0x43, + 0xE8, 0x89, 0xB2, 0x43, 0xE8, 0x89, 0xB8, 0x43, + // Bytes 1200 - 123f + 0xE8, 0x89, 0xB9, 0x43, 0xE8, 0x8A, 0x8B, 0x43, + 0xE8, 0x8A, 0x91, 0x43, 0xE8, 0x8A, 0x9D, 0x43, + 0xE8, 0x8A, 0xB1, 0x43, 0xE8, 0x8A, 0xB3, 0x43, + 0xE8, 0x8A, 0xBD, 0x43, 0xE8, 0x8B, 0xA5, 0x43, + 0xE8, 0x8B, 0xA6, 0x43, 0xE8, 0x8C, 0x9D, 0x43, + 0xE8, 0x8C, 0xA3, 0x43, 0xE8, 0x8C, 0xB6, 0x43, + 0xE8, 0x8D, 0x92, 0x43, 0xE8, 0x8D, 0x93, 0x43, + 0xE8, 0x8D, 0xA3, 0x43, 0xE8, 0x8E, 0xAD, 0x43, + // Bytes 1240 - 127f + 0xE8, 0x8E, 0xBD, 0x43, 0xE8, 0x8F, 0x89, 0x43, + 0xE8, 0x8F, 0x8A, 0x43, 0xE8, 0x8F, 0x8C, 0x43, + 0xE8, 0x8F, 0x9C, 0x43, 0xE8, 0x8F, 0xA7, 0x43, + 0xE8, 0x8F, 0xAF, 0x43, 0xE8, 0x8F, 0xB1, 0x43, + 0xE8, 0x90, 0xBD, 0x43, 0xE8, 0x91, 0x89, 0x43, + 0xE8, 0x91, 0x97, 0x43, 0xE8, 0x93, 0xAE, 0x43, + 0xE8, 0x93, 0xB1, 0x43, 0xE8, 0x93, 0xB3, 0x43, + 0xE8, 0x93, 0xBC, 0x43, 0xE8, 0x94, 0x96, 0x43, + // Bytes 1280 - 12bf + 0xE8, 0x95, 0xA4, 0x43, 0xE8, 0x97, 0x8D, 0x43, + 0xE8, 0x97, 0xBA, 0x43, 0xE8, 0x98, 0x86, 0x43, + 0xE8, 0x98, 0x92, 0x43, 0xE8, 0x98, 0xAD, 0x43, + 0xE8, 0x98, 0xBF, 0x43, 0xE8, 0x99, 0x8D, 0x43, + 0xE8, 0x99, 0x90, 0x43, 0xE8, 0x99, 0x9C, 0x43, + 0xE8, 0x99, 0xA7, 0x43, 0xE8, 0x99, 0xA9, 0x43, + 0xE8, 0x99, 0xAB, 0x43, 0xE8, 0x9A, 0x88, 0x43, + 0xE8, 0x9A, 0xA9, 0x43, 0xE8, 0x9B, 0xA2, 0x43, + // Bytes 12c0 - 12ff + 0xE8, 0x9C, 0x8E, 0x43, 0xE8, 0x9C, 0xA8, 0x43, + 0xE8, 0x9D, 0xAB, 0x43, 0xE8, 0x9D, 0xB9, 0x43, + 0xE8, 0x9E, 0x86, 0x43, 0xE8, 0x9E, 0xBA, 0x43, + 0xE8, 0x9F, 0xA1, 0x43, 0xE8, 0xA0, 0x81, 0x43, + 0xE8, 0xA0, 0x9F, 0x43, 0xE8, 0xA1, 0x80, 0x43, + 0xE8, 0xA1, 0x8C, 0x43, 0xE8, 0xA1, 0xA0, 0x43, + 0xE8, 0xA1, 0xA3, 0x43, 0xE8, 0xA3, 0x82, 0x43, + 0xE8, 0xA3, 0x8F, 0x43, 0xE8, 0xA3, 0x97, 0x43, + // Bytes 1300 - 133f + 0xE8, 0xA3, 0x9E, 0x43, 0xE8, 0xA3, 0xA1, 0x43, + 0xE8, 0xA3, 0xB8, 0x43, 0xE8, 0xA3, 0xBA, 0x43, + 0xE8, 0xA4, 0x90, 0x43, 0xE8, 0xA5, 0x81, 0x43, + 0xE8, 0xA5, 0xA4, 0x43, 0xE8, 0xA5, 0xBE, 0x43, + 0xE8, 0xA6, 0x86, 0x43, 0xE8, 0xA6, 0x8B, 0x43, + 0xE8, 0xA6, 0x96, 0x43, 0xE8, 0xA7, 0x92, 0x43, + 0xE8, 0xA7, 0xA3, 0x43, 0xE8, 0xA8, 0x80, 0x43, + 0xE8, 0xAA, 0xA0, 0x43, 0xE8, 0xAA, 0xAA, 0x43, + // Bytes 1340 - 137f + 0xE8, 0xAA, 0xBF, 0x43, 0xE8, 0xAB, 0x8B, 0x43, + 0xE8, 0xAB, 0x92, 0x43, 0xE8, 0xAB, 0x96, 0x43, + 0xE8, 0xAB, 0xAD, 0x43, 0xE8, 0xAB, 0xB8, 0x43, + 0xE8, 0xAB, 0xBE, 0x43, 0xE8, 0xAC, 0x81, 0x43, + 0xE8, 0xAC, 0xB9, 0x43, 0xE8, 0xAD, 0x98, 0x43, + 0xE8, 0xAE, 0x80, 0x43, 0xE8, 0xAE, 0x8A, 0x43, + 0xE8, 0xB0, 0xB7, 0x43, 0xE8, 0xB1, 0x86, 0x43, + 0xE8, 0xB1, 0x88, 0x43, 0xE8, 0xB1, 0x95, 0x43, + // Bytes 1380 - 13bf + 0xE8, 0xB1, 0xB8, 0x43, 0xE8, 0xB2, 0x9D, 0x43, + 0xE8, 0xB2, 0xA1, 0x43, 0xE8, 0xB2, 0xA9, 0x43, + 0xE8, 0xB2, 0xAB, 0x43, 0xE8, 0xB3, 0x81, 0x43, + 0xE8, 0xB3, 0x82, 0x43, 0xE8, 0xB3, 0x87, 0x43, + 0xE8, 0xB3, 0x88, 0x43, 0xE8, 0xB3, 0x93, 0x43, + 0xE8, 0xB4, 0x88, 0x43, 0xE8, 0xB4, 0x9B, 0x43, + 0xE8, 0xB5, 0xA4, 0x43, 0xE8, 0xB5, 0xB0, 0x43, + 0xE8, 0xB5, 0xB7, 0x43, 0xE8, 0xB6, 0xB3, 0x43, + // Bytes 13c0 - 13ff + 0xE8, 0xB6, 0xBC, 0x43, 0xE8, 0xB7, 0x8B, 0x43, + 0xE8, 0xB7, 0xAF, 0x43, 0xE8, 0xB7, 0xB0, 0x43, + 0xE8, 0xBA, 0xAB, 0x43, 0xE8, 0xBB, 0x8A, 0x43, + 0xE8, 0xBB, 0x94, 0x43, 0xE8, 0xBC, 0xA6, 0x43, + 0xE8, 0xBC, 0xAA, 0x43, 0xE8, 0xBC, 0xB8, 0x43, + 0xE8, 0xBC, 0xBB, 0x43, 0xE8, 0xBD, 0xA2, 0x43, + 0xE8, 0xBE, 0x9B, 0x43, 0xE8, 0xBE, 0x9E, 0x43, + 0xE8, 0xBE, 0xB0, 0x43, 0xE8, 0xBE, 0xB5, 0x43, + // Bytes 1400 - 143f + 0xE8, 0xBE, 0xB6, 0x43, 0xE9, 0x80, 0xA3, 0x43, + 0xE9, 0x80, 0xB8, 0x43, 0xE9, 0x81, 0x8A, 0x43, + 0xE9, 0x81, 0xA9, 0x43, 0xE9, 0x81, 0xB2, 0x43, + 0xE9, 0x81, 0xBC, 0x43, 0xE9, 0x82, 0x8F, 0x43, + 0xE9, 0x82, 0x91, 0x43, 0xE9, 0x82, 0x94, 0x43, + 0xE9, 0x83, 0x8E, 0x43, 0xE9, 0x83, 0x9E, 0x43, + 0xE9, 0x83, 0xB1, 0x43, 0xE9, 0x83, 0xBD, 0x43, + 0xE9, 0x84, 0x91, 0x43, 0xE9, 0x84, 0x9B, 0x43, + // Bytes 1440 - 147f + 0xE9, 0x85, 0x89, 0x43, 0xE9, 0x85, 0x8D, 0x43, + 0xE9, 0x85, 0xAA, 0x43, 0xE9, 0x86, 0x99, 0x43, + 0xE9, 0x86, 0xB4, 0x43, 0xE9, 0x87, 0x86, 0x43, + 0xE9, 0x87, 0x8C, 0x43, 0xE9, 0x87, 0x8F, 0x43, + 0xE9, 0x87, 0x91, 0x43, 0xE9, 0x88, 0xB4, 0x43, + 0xE9, 0x88, 0xB8, 0x43, 0xE9, 0x89, 0xB6, 0x43, + 0xE9, 0x89, 0xBC, 0x43, 0xE9, 0x8B, 0x97, 0x43, + 0xE9, 0x8B, 0x98, 0x43, 0xE9, 0x8C, 0x84, 0x43, + // Bytes 1480 - 14bf + 0xE9, 0x8D, 0x8A, 0x43, 0xE9, 0x8F, 0xB9, 0x43, + 0xE9, 0x90, 0x95, 0x43, 0xE9, 0x95, 0xB7, 0x43, + 0xE9, 0x96, 0x80, 0x43, 0xE9, 0x96, 0x8B, 0x43, + 0xE9, 0x96, 0xAD, 0x43, 0xE9, 0x96, 0xB7, 0x43, + 0xE9, 0x98, 0x9C, 0x43, 0xE9, 0x98, 0xAE, 0x43, + 0xE9, 0x99, 0x8B, 0x43, 0xE9, 0x99, 0x8D, 0x43, + 0xE9, 0x99, 0xB5, 0x43, 0xE9, 0x99, 0xB8, 0x43, + 0xE9, 0x99, 0xBC, 0x43, 0xE9, 0x9A, 0x86, 0x43, + // Bytes 14c0 - 14ff + 0xE9, 0x9A, 0xA3, 0x43, 0xE9, 0x9A, 0xB6, 0x43, + 0xE9, 0x9A, 0xB7, 0x43, 0xE9, 0x9A, 0xB8, 0x43, + 0xE9, 0x9A, 0xB9, 0x43, 0xE9, 0x9B, 0x83, 0x43, + 0xE9, 0x9B, 0xA2, 0x43, 0xE9, 0x9B, 0xA3, 0x43, + 0xE9, 0x9B, 0xA8, 0x43, 0xE9, 0x9B, 0xB6, 0x43, + 0xE9, 0x9B, 0xB7, 0x43, 0xE9, 0x9C, 0xA3, 0x43, + 0xE9, 0x9C, 0xB2, 0x43, 0xE9, 0x9D, 0x88, 0x43, + 0xE9, 0x9D, 0x91, 0x43, 0xE9, 0x9D, 0x96, 0x43, + // Bytes 1500 - 153f + 0xE9, 0x9D, 0x9E, 0x43, 0xE9, 0x9D, 0xA2, 0x43, + 0xE9, 0x9D, 0xA9, 0x43, 0xE9, 0x9F, 0x8B, 0x43, + 0xE9, 0x9F, 0x9B, 0x43, 0xE9, 0x9F, 0xA0, 0x43, + 0xE9, 0x9F, 0xAD, 0x43, 0xE9, 0x9F, 0xB3, 0x43, + 0xE9, 0x9F, 0xBF, 0x43, 0xE9, 0xA0, 0x81, 0x43, + 0xE9, 0xA0, 0x85, 0x43, 0xE9, 0xA0, 0x8B, 0x43, + 0xE9, 0xA0, 0x98, 0x43, 0xE9, 0xA0, 0xA9, 0x43, + 0xE9, 0xA0, 0xBB, 0x43, 0xE9, 0xA1, 0x9E, 0x43, + // Bytes 1540 - 157f + 0xE9, 0xA2, 0xA8, 0x43, 0xE9, 0xA3, 0x9B, 0x43, + 0xE9, 0xA3, 0x9F, 0x43, 0xE9, 0xA3, 0xA2, 0x43, + 0xE9, 0xA3, 0xAF, 0x43, 0xE9, 0xA3, 0xBC, 0x43, + 0xE9, 0xA4, 0xA8, 0x43, 0xE9, 0xA4, 0xA9, 0x43, + 0xE9, 0xA6, 0x96, 0x43, 0xE9, 0xA6, 0x99, 0x43, + 0xE9, 0xA6, 0xA7, 0x43, 0xE9, 0xA6, 0xAC, 0x43, + 0xE9, 0xA7, 0x82, 0x43, 0xE9, 0xA7, 0xB1, 0x43, + 0xE9, 0xA7, 0xBE, 0x43, 0xE9, 0xA9, 0xAA, 0x43, + // Bytes 1580 - 15bf + 0xE9, 0xAA, 0xA8, 0x43, 0xE9, 0xAB, 0x98, 0x43, + 0xE9, 0xAB, 0x9F, 0x43, 0xE9, 0xAC, 0x92, 0x43, + 0xE9, 0xAC, 0xA5, 0x43, 0xE9, 0xAC, 0xAF, 0x43, + 0xE9, 0xAC, 0xB2, 0x43, 0xE9, 0xAC, 0xBC, 0x43, + 0xE9, 0xAD, 0x9A, 0x43, 0xE9, 0xAD, 0xAF, 0x43, + 0xE9, 0xB1, 0x80, 0x43, 0xE9, 0xB1, 0x97, 0x43, + 0xE9, 0xB3, 0xA5, 0x43, 0xE9, 0xB3, 0xBD, 0x43, + 0xE9, 0xB5, 0xA7, 0x43, 0xE9, 0xB6, 0xB4, 0x43, + // Bytes 15c0 - 15ff + 0xE9, 0xB7, 0xBA, 0x43, 0xE9, 0xB8, 0x9E, 0x43, + 0xE9, 0xB9, 0xB5, 0x43, 0xE9, 0xB9, 0xBF, 0x43, + 0xE9, 0xBA, 0x97, 0x43, 0xE9, 0xBA, 0x9F, 0x43, + 0xE9, 0xBA, 0xA5, 0x43, 0xE9, 0xBA, 0xBB, 0x43, + 0xE9, 0xBB, 0x83, 0x43, 0xE9, 0xBB, 0x8D, 0x43, + 0xE9, 0xBB, 0x8E, 0x43, 0xE9, 0xBB, 0x91, 0x43, + 0xE9, 0xBB, 0xB9, 0x43, 0xE9, 0xBB, 0xBD, 0x43, + 0xE9, 0xBB, 0xBE, 0x43, 0xE9, 0xBC, 0x85, 0x43, + // Bytes 1600 - 163f + 0xE9, 0xBC, 0x8E, 0x43, 0xE9, 0xBC, 0x8F, 0x43, + 0xE9, 0xBC, 0x93, 0x43, 0xE9, 0xBC, 0x96, 0x43, + 0xE9, 0xBC, 0xA0, 0x43, 0xE9, 0xBC, 0xBB, 0x43, + 0xE9, 0xBD, 0x83, 0x43, 0xE9, 0xBD, 0x8A, 0x43, + 0xE9, 0xBD, 0x92, 0x43, 0xE9, 0xBE, 0x8D, 0x43, + 0xE9, 0xBE, 0x8E, 0x43, 0xE9, 0xBE, 0x9C, 0x43, + 0xE9, 0xBE, 0x9F, 0x43, 0xE9, 0xBE, 0xA0, 0x43, + 0xEA, 0x9C, 0xA7, 0x43, 0xEA, 0x9D, 0xAF, 0x43, + // Bytes 1640 - 167f + 0xEA, 0xAC, 0xB7, 0x43, 0xEA, 0xAD, 0x92, 0x44, + 0xF0, 0xA0, 0x84, 0xA2, 0x44, 0xF0, 0xA0, 0x94, + 0x9C, 0x44, 0xF0, 0xA0, 0x94, 0xA5, 0x44, 0xF0, + 0xA0, 0x95, 0x8B, 0x44, 0xF0, 0xA0, 0x98, 0xBA, + 0x44, 0xF0, 0xA0, 0xA0, 0x84, 0x44, 0xF0, 0xA0, + 0xA3, 0x9E, 0x44, 0xF0, 0xA0, 0xA8, 0xAC, 0x44, + 0xF0, 0xA0, 0xAD, 0xA3, 0x44, 0xF0, 0xA1, 0x93, + 0xA4, 0x44, 0xF0, 0xA1, 0x9A, 0xA8, 0x44, 0xF0, + // Bytes 1680 - 16bf + 0xA1, 0x9B, 0xAA, 0x44, 0xF0, 0xA1, 0xA7, 0x88, + 0x44, 0xF0, 0xA1, 0xAC, 0x98, 0x44, 0xF0, 0xA1, + 0xB4, 0x8B, 0x44, 0xF0, 0xA1, 0xB7, 0xA4, 0x44, + 0xF0, 0xA1, 0xB7, 0xA6, 0x44, 0xF0, 0xA2, 0x86, + 0x83, 0x44, 0xF0, 0xA2, 0x86, 0x9F, 0x44, 0xF0, + 0xA2, 0x8C, 0xB1, 0x44, 0xF0, 0xA2, 0x9B, 0x94, + 0x44, 0xF0, 0xA2, 0xA1, 0x84, 0x44, 0xF0, 0xA2, + 0xA1, 0x8A, 0x44, 0xF0, 0xA2, 0xAC, 0x8C, 0x44, + // Bytes 16c0 - 16ff + 0xF0, 0xA2, 0xAF, 0xB1, 0x44, 0xF0, 0xA3, 0x80, + 0x8A, 0x44, 0xF0, 0xA3, 0x8A, 0xB8, 0x44, 0xF0, + 0xA3, 0x8D, 0x9F, 0x44, 0xF0, 0xA3, 0x8E, 0x93, + 0x44, 0xF0, 0xA3, 0x8E, 0x9C, 0x44, 0xF0, 0xA3, + 0x8F, 0x83, 0x44, 0xF0, 0xA3, 0x8F, 0x95, 0x44, + 0xF0, 0xA3, 0x91, 0xAD, 0x44, 0xF0, 0xA3, 0x9A, + 0xA3, 0x44, 0xF0, 0xA3, 0xA2, 0xA7, 0x44, 0xF0, + 0xA3, 0xAA, 0x8D, 0x44, 0xF0, 0xA3, 0xAB, 0xBA, + // Bytes 1700 - 173f + 0x44, 0xF0, 0xA3, 0xB2, 0xBC, 0x44, 0xF0, 0xA3, + 0xB4, 0x9E, 0x44, 0xF0, 0xA3, 0xBB, 0x91, 0x44, + 0xF0, 0xA3, 0xBD, 0x9E, 0x44, 0xF0, 0xA3, 0xBE, + 0x8E, 0x44, 0xF0, 0xA4, 0x89, 0xA3, 0x44, 0xF0, + 0xA4, 0x8B, 0xAE, 0x44, 0xF0, 0xA4, 0x8E, 0xAB, + 0x44, 0xF0, 0xA4, 0x98, 0x88, 0x44, 0xF0, 0xA4, + 0x9C, 0xB5, 0x44, 0xF0, 0xA4, 0xA0, 0x94, 0x44, + 0xF0, 0xA4, 0xB0, 0xB6, 0x44, 0xF0, 0xA4, 0xB2, + // Bytes 1740 - 177f + 0x92, 0x44, 0xF0, 0xA4, 0xBE, 0xA1, 0x44, 0xF0, + 0xA4, 0xBE, 0xB8, 0x44, 0xF0, 0xA5, 0x81, 0x84, + 0x44, 0xF0, 0xA5, 0x83, 0xB2, 0x44, 0xF0, 0xA5, + 0x83, 0xB3, 0x44, 0xF0, 0xA5, 0x84, 0x99, 0x44, + 0xF0, 0xA5, 0x84, 0xB3, 0x44, 0xF0, 0xA5, 0x89, + 0x89, 0x44, 0xF0, 0xA5, 0x90, 0x9D, 0x44, 0xF0, + 0xA5, 0x98, 0xA6, 0x44, 0xF0, 0xA5, 0x9A, 0x9A, + 0x44, 0xF0, 0xA5, 0x9B, 0x85, 0x44, 0xF0, 0xA5, + // Bytes 1780 - 17bf + 0xA5, 0xBC, 0x44, 0xF0, 0xA5, 0xAA, 0xA7, 0x44, + 0xF0, 0xA5, 0xAE, 0xAB, 0x44, 0xF0, 0xA5, 0xB2, + 0x80, 0x44, 0xF0, 0xA5, 0xB3, 0x90, 0x44, 0xF0, + 0xA5, 0xBE, 0x86, 0x44, 0xF0, 0xA6, 0x87, 0x9A, + 0x44, 0xF0, 0xA6, 0x88, 0xA8, 0x44, 0xF0, 0xA6, + 0x89, 0x87, 0x44, 0xF0, 0xA6, 0x8B, 0x99, 0x44, + 0xF0, 0xA6, 0x8C, 0xBE, 0x44, 0xF0, 0xA6, 0x93, + 0x9A, 0x44, 0xF0, 0xA6, 0x94, 0xA3, 0x44, 0xF0, + // Bytes 17c0 - 17ff + 0xA6, 0x96, 0xA8, 0x44, 0xF0, 0xA6, 0x9E, 0xA7, + 0x44, 0xF0, 0xA6, 0x9E, 0xB5, 0x44, 0xF0, 0xA6, + 0xAC, 0xBC, 0x44, 0xF0, 0xA6, 0xB0, 0xB6, 0x44, + 0xF0, 0xA6, 0xB3, 0x95, 0x44, 0xF0, 0xA6, 0xB5, + 0xAB, 0x44, 0xF0, 0xA6, 0xBC, 0xAC, 0x44, 0xF0, + 0xA6, 0xBE, 0xB1, 0x44, 0xF0, 0xA7, 0x83, 0x92, + 0x44, 0xF0, 0xA7, 0x8F, 0x8A, 0x44, 0xF0, 0xA7, + 0x99, 0xA7, 0x44, 0xF0, 0xA7, 0xA2, 0xAE, 0x44, + // Bytes 1800 - 183f + 0xF0, 0xA7, 0xA5, 0xA6, 0x44, 0xF0, 0xA7, 0xB2, + 0xA8, 0x44, 0xF0, 0xA7, 0xBB, 0x93, 0x44, 0xF0, + 0xA7, 0xBC, 0xAF, 0x44, 0xF0, 0xA8, 0x97, 0x92, + 0x44, 0xF0, 0xA8, 0x97, 0xAD, 0x44, 0xF0, 0xA8, + 0x9C, 0xAE, 0x44, 0xF0, 0xA8, 0xAF, 0xBA, 0x44, + 0xF0, 0xA8, 0xB5, 0xB7, 0x44, 0xF0, 0xA9, 0x85, + 0x85, 0x44, 0xF0, 0xA9, 0x87, 0x9F, 0x44, 0xF0, + 0xA9, 0x88, 0x9A, 0x44, 0xF0, 0xA9, 0x90, 0x8A, + // Bytes 1840 - 187f + 0x44, 0xF0, 0xA9, 0x92, 0x96, 0x44, 0xF0, 0xA9, + 0x96, 0xB6, 0x44, 0xF0, 0xA9, 0xAC, 0xB0, 0x44, + 0xF0, 0xAA, 0x83, 0x8E, 0x44, 0xF0, 0xAA, 0x84, + 0x85, 0x44, 0xF0, 0xAA, 0x88, 0x8E, 0x44, 0xF0, + 0xAA, 0x8A, 0x91, 0x44, 0xF0, 0xAA, 0x8E, 0x92, + 0x44, 0xF0, 0xAA, 0x98, 0x80, 0x42, 0x21, 0x21, + 0x42, 0x21, 0x3F, 0x42, 0x2E, 0x2E, 0x42, 0x30, + 0x2C, 0x42, 0x30, 0x2E, 0x42, 0x31, 0x2C, 0x42, + // Bytes 1880 - 18bf + 0x31, 0x2E, 0x42, 0x31, 0x30, 0x42, 0x31, 0x31, + 0x42, 0x31, 0x32, 0x42, 0x31, 0x33, 0x42, 0x31, + 0x34, 0x42, 0x31, 0x35, 0x42, 0x31, 0x36, 0x42, + 0x31, 0x37, 0x42, 0x31, 0x38, 0x42, 0x31, 0x39, + 0x42, 0x32, 0x2C, 0x42, 0x32, 0x2E, 0x42, 0x32, + 0x30, 0x42, 0x32, 0x31, 0x42, 0x32, 0x32, 0x42, + 0x32, 0x33, 0x42, 0x32, 0x34, 0x42, 0x32, 0x35, + 0x42, 0x32, 0x36, 0x42, 0x32, 0x37, 0x42, 0x32, + // Bytes 18c0 - 18ff + 0x38, 0x42, 0x32, 0x39, 0x42, 0x33, 0x2C, 0x42, + 0x33, 0x2E, 0x42, 0x33, 0x30, 0x42, 0x33, 0x31, + 0x42, 0x33, 0x32, 0x42, 0x33, 0x33, 0x42, 0x33, + 0x34, 0x42, 0x33, 0x35, 0x42, 0x33, 0x36, 0x42, + 0x33, 0x37, 0x42, 0x33, 0x38, 0x42, 0x33, 0x39, + 0x42, 0x34, 0x2C, 0x42, 0x34, 0x2E, 0x42, 0x34, + 0x30, 0x42, 0x34, 0x31, 0x42, 0x34, 0x32, 0x42, + 0x34, 0x33, 0x42, 0x34, 0x34, 0x42, 0x34, 0x35, + // Bytes 1900 - 193f + 0x42, 0x34, 0x36, 0x42, 0x34, 0x37, 0x42, 0x34, + 0x38, 0x42, 0x34, 0x39, 0x42, 0x35, 0x2C, 0x42, + 0x35, 0x2E, 0x42, 0x35, 0x30, 0x42, 0x36, 0x2C, + 0x42, 0x36, 0x2E, 0x42, 0x37, 0x2C, 0x42, 0x37, + 0x2E, 0x42, 0x38, 0x2C, 0x42, 0x38, 0x2E, 0x42, + 0x39, 0x2C, 0x42, 0x39, 0x2E, 0x42, 0x3D, 0x3D, + 0x42, 0x3F, 0x21, 0x42, 0x3F, 0x3F, 0x42, 0x41, + 0x55, 0x42, 0x42, 0x71, 0x42, 0x43, 0x44, 0x42, + // Bytes 1940 - 197f + 0x44, 0x4A, 0x42, 0x44, 0x5A, 0x42, 0x44, 0x7A, + 0x42, 0x47, 0x42, 0x42, 0x47, 0x79, 0x42, 0x48, + 0x50, 0x42, 0x48, 0x56, 0x42, 0x48, 0x67, 0x42, + 0x48, 0x7A, 0x42, 0x49, 0x49, 0x42, 0x49, 0x4A, + 0x42, 0x49, 0x55, 0x42, 0x49, 0x56, 0x42, 0x49, + 0x58, 0x42, 0x4B, 0x42, 0x42, 0x4B, 0x4B, 0x42, + 0x4B, 0x4D, 0x42, 0x4C, 0x4A, 0x42, 0x4C, 0x6A, + 0x42, 0x4D, 0x42, 0x42, 0x4D, 0x43, 0x42, 0x4D, + // Bytes 1980 - 19bf + 0x44, 0x42, 0x4D, 0x56, 0x42, 0x4D, 0x57, 0x42, + 0x4E, 0x4A, 0x42, 0x4E, 0x6A, 0x42, 0x4E, 0x6F, + 0x42, 0x50, 0x48, 0x42, 0x50, 0x52, 0x42, 0x50, + 0x61, 0x42, 0x52, 0x73, 0x42, 0x53, 0x44, 0x42, + 0x53, 0x4D, 0x42, 0x53, 0x53, 0x42, 0x53, 0x76, + 0x42, 0x54, 0x4D, 0x42, 0x56, 0x49, 0x42, 0x57, + 0x43, 0x42, 0x57, 0x5A, 0x42, 0x57, 0x62, 0x42, + 0x58, 0x49, 0x42, 0x63, 0x63, 0x42, 0x63, 0x64, + // Bytes 19c0 - 19ff + 0x42, 0x63, 0x6D, 0x42, 0x64, 0x42, 0x42, 0x64, + 0x61, 0x42, 0x64, 0x6C, 0x42, 0x64, 0x6D, 0x42, + 0x64, 0x7A, 0x42, 0x65, 0x56, 0x42, 0x66, 0x66, + 0x42, 0x66, 0x69, 0x42, 0x66, 0x6C, 0x42, 0x66, + 0x6D, 0x42, 0x68, 0x61, 0x42, 0x69, 0x69, 0x42, + 0x69, 0x6A, 0x42, 0x69, 0x6E, 0x42, 0x69, 0x76, + 0x42, 0x69, 0x78, 0x42, 0x6B, 0x41, 0x42, 0x6B, + 0x56, 0x42, 0x6B, 0x57, 0x42, 0x6B, 0x67, 0x42, + // Bytes 1a00 - 1a3f + 0x6B, 0x6C, 0x42, 0x6B, 0x6D, 0x42, 0x6B, 0x74, + 0x42, 0x6C, 0x6A, 0x42, 0x6C, 0x6D, 0x42, 0x6C, + 0x6E, 0x42, 0x6C, 0x78, 0x42, 0x6D, 0x32, 0x42, + 0x6D, 0x33, 0x42, 0x6D, 0x41, 0x42, 0x6D, 0x56, + 0x42, 0x6D, 0x57, 0x42, 0x6D, 0x62, 0x42, 0x6D, + 0x67, 0x42, 0x6D, 0x6C, 0x42, 0x6D, 0x6D, 0x42, + 0x6D, 0x73, 0x42, 0x6E, 0x41, 0x42, 0x6E, 0x46, + 0x42, 0x6E, 0x56, 0x42, 0x6E, 0x57, 0x42, 0x6E, + // Bytes 1a40 - 1a7f + 0x6A, 0x42, 0x6E, 0x6D, 0x42, 0x6E, 0x73, 0x42, + 0x6F, 0x56, 0x42, 0x70, 0x41, 0x42, 0x70, 0x46, + 0x42, 0x70, 0x56, 0x42, 0x70, 0x57, 0x42, 0x70, + 0x63, 0x42, 0x70, 0x73, 0x42, 0x73, 0x72, 0x42, + 0x73, 0x74, 0x42, 0x76, 0x69, 0x42, 0x78, 0x69, + 0x43, 0x28, 0x31, 0x29, 0x43, 0x28, 0x32, 0x29, + 0x43, 0x28, 0x33, 0x29, 0x43, 0x28, 0x34, 0x29, + 0x43, 0x28, 0x35, 0x29, 0x43, 0x28, 0x36, 0x29, + // Bytes 1a80 - 1abf + 0x43, 0x28, 0x37, 0x29, 0x43, 0x28, 0x38, 0x29, + 0x43, 0x28, 0x39, 0x29, 0x43, 0x28, 0x41, 0x29, + 0x43, 0x28, 0x42, 0x29, 0x43, 0x28, 0x43, 0x29, + 0x43, 0x28, 0x44, 0x29, 0x43, 0x28, 0x45, 0x29, + 0x43, 0x28, 0x46, 0x29, 0x43, 0x28, 0x47, 0x29, + 0x43, 0x28, 0x48, 0x29, 0x43, 0x28, 0x49, 0x29, + 0x43, 0x28, 0x4A, 0x29, 0x43, 0x28, 0x4B, 0x29, + 0x43, 0x28, 0x4C, 0x29, 0x43, 0x28, 0x4D, 0x29, + // Bytes 1ac0 - 1aff + 0x43, 0x28, 0x4E, 0x29, 0x43, 0x28, 0x4F, 0x29, + 0x43, 0x28, 0x50, 0x29, 0x43, 0x28, 0x51, 0x29, + 0x43, 0x28, 0x52, 0x29, 0x43, 0x28, 0x53, 0x29, + 0x43, 0x28, 0x54, 0x29, 0x43, 0x28, 0x55, 0x29, + 0x43, 0x28, 0x56, 0x29, 0x43, 0x28, 0x57, 0x29, + 0x43, 0x28, 0x58, 0x29, 0x43, 0x28, 0x59, 0x29, + 0x43, 0x28, 0x5A, 0x29, 0x43, 0x28, 0x61, 0x29, + 0x43, 0x28, 0x62, 0x29, 0x43, 0x28, 0x63, 0x29, + // Bytes 1b00 - 1b3f + 0x43, 0x28, 0x64, 0x29, 0x43, 0x28, 0x65, 0x29, + 0x43, 0x28, 0x66, 0x29, 0x43, 0x28, 0x67, 0x29, + 0x43, 0x28, 0x68, 0x29, 0x43, 0x28, 0x69, 0x29, + 0x43, 0x28, 0x6A, 0x29, 0x43, 0x28, 0x6B, 0x29, + 0x43, 0x28, 0x6C, 0x29, 0x43, 0x28, 0x6D, 0x29, + 0x43, 0x28, 0x6E, 0x29, 0x43, 0x28, 0x6F, 0x29, + 0x43, 0x28, 0x70, 0x29, 0x43, 0x28, 0x71, 0x29, + 0x43, 0x28, 0x72, 0x29, 0x43, 0x28, 0x73, 0x29, + // Bytes 1b40 - 1b7f + 0x43, 0x28, 0x74, 0x29, 0x43, 0x28, 0x75, 0x29, + 0x43, 0x28, 0x76, 0x29, 0x43, 0x28, 0x77, 0x29, + 0x43, 0x28, 0x78, 0x29, 0x43, 0x28, 0x79, 0x29, + 0x43, 0x28, 0x7A, 0x29, 0x43, 0x2E, 0x2E, 0x2E, + 0x43, 0x31, 0x30, 0x2E, 0x43, 0x31, 0x31, 0x2E, + 0x43, 0x31, 0x32, 0x2E, 0x43, 0x31, 0x33, 0x2E, + 0x43, 0x31, 0x34, 0x2E, 0x43, 0x31, 0x35, 0x2E, + 0x43, 0x31, 0x36, 0x2E, 0x43, 0x31, 0x37, 0x2E, + // Bytes 1b80 - 1bbf + 0x43, 0x31, 0x38, 0x2E, 0x43, 0x31, 0x39, 0x2E, + 0x43, 0x32, 0x30, 0x2E, 0x43, 0x3A, 0x3A, 0x3D, + 0x43, 0x3D, 0x3D, 0x3D, 0x43, 0x43, 0x6F, 0x2E, + 0x43, 0x46, 0x41, 0x58, 0x43, 0x47, 0x48, 0x7A, + 0x43, 0x47, 0x50, 0x61, 0x43, 0x49, 0x49, 0x49, + 0x43, 0x4C, 0x54, 0x44, 0x43, 0x4C, 0xC2, 0xB7, + 0x43, 0x4D, 0x48, 0x7A, 0x43, 0x4D, 0x50, 0x61, + 0x43, 0x4D, 0xCE, 0xA9, 0x43, 0x50, 0x50, 0x4D, + // Bytes 1bc0 - 1bff + 0x43, 0x50, 0x50, 0x56, 0x43, 0x50, 0x54, 0x45, + 0x43, 0x54, 0x45, 0x4C, 0x43, 0x54, 0x48, 0x7A, + 0x43, 0x56, 0x49, 0x49, 0x43, 0x58, 0x49, 0x49, + 0x43, 0x61, 0x2F, 0x63, 0x43, 0x61, 0x2F, 0x73, + 0x43, 0x61, 0xCA, 0xBE, 0x43, 0x62, 0x61, 0x72, + 0x43, 0x63, 0x2F, 0x6F, 0x43, 0x63, 0x2F, 0x75, + 0x43, 0x63, 0x61, 0x6C, 0x43, 0x63, 0x6D, 0x32, + 0x43, 0x63, 0x6D, 0x33, 0x43, 0x64, 0x6D, 0x32, + // Bytes 1c00 - 1c3f + 0x43, 0x64, 0x6D, 0x33, 0x43, 0x65, 0x72, 0x67, + 0x43, 0x66, 0x66, 0x69, 0x43, 0x66, 0x66, 0x6C, + 0x43, 0x67, 0x61, 0x6C, 0x43, 0x68, 0x50, 0x61, + 0x43, 0x69, 0x69, 0x69, 0x43, 0x6B, 0x48, 0x7A, + 0x43, 0x6B, 0x50, 0x61, 0x43, 0x6B, 0x6D, 0x32, + 0x43, 0x6B, 0x6D, 0x33, 0x43, 0x6B, 0xCE, 0xA9, + 0x43, 0x6C, 0x6F, 0x67, 0x43, 0x6C, 0xC2, 0xB7, + 0x43, 0x6D, 0x69, 0x6C, 0x43, 0x6D, 0x6D, 0x32, + // Bytes 1c40 - 1c7f + 0x43, 0x6D, 0x6D, 0x33, 0x43, 0x6D, 0x6F, 0x6C, + 0x43, 0x72, 0x61, 0x64, 0x43, 0x76, 0x69, 0x69, + 0x43, 0x78, 0x69, 0x69, 0x43, 0xC2, 0xB0, 0x43, + 0x43, 0xC2, 0xB0, 0x46, 0x43, 0xCA, 0xBC, 0x6E, + 0x43, 0xCE, 0xBC, 0x41, 0x43, 0xCE, 0xBC, 0x46, + 0x43, 0xCE, 0xBC, 0x56, 0x43, 0xCE, 0xBC, 0x57, + 0x43, 0xCE, 0xBC, 0x67, 0x43, 0xCE, 0xBC, 0x6C, + 0x43, 0xCE, 0xBC, 0x6D, 0x43, 0xCE, 0xBC, 0x73, + // Bytes 1c80 - 1cbf + 0x44, 0x28, 0x31, 0x30, 0x29, 0x44, 0x28, 0x31, + 0x31, 0x29, 0x44, 0x28, 0x31, 0x32, 0x29, 0x44, + 0x28, 0x31, 0x33, 0x29, 0x44, 0x28, 0x31, 0x34, + 0x29, 0x44, 0x28, 0x31, 0x35, 0x29, 0x44, 0x28, + 0x31, 0x36, 0x29, 0x44, 0x28, 0x31, 0x37, 0x29, + 0x44, 0x28, 0x31, 0x38, 0x29, 0x44, 0x28, 0x31, + 0x39, 0x29, 0x44, 0x28, 0x32, 0x30, 0x29, 0x44, + 0x30, 0xE7, 0x82, 0xB9, 0x44, 0x31, 0xE2, 0x81, + // Bytes 1cc0 - 1cff + 0x84, 0x44, 0x31, 0xE6, 0x97, 0xA5, 0x44, 0x31, + 0xE6, 0x9C, 0x88, 0x44, 0x31, 0xE7, 0x82, 0xB9, + 0x44, 0x32, 0xE6, 0x97, 0xA5, 0x44, 0x32, 0xE6, + 0x9C, 0x88, 0x44, 0x32, 0xE7, 0x82, 0xB9, 0x44, + 0x33, 0xE6, 0x97, 0xA5, 0x44, 0x33, 0xE6, 0x9C, + 0x88, 0x44, 0x33, 0xE7, 0x82, 0xB9, 0x44, 0x34, + 0xE6, 0x97, 0xA5, 0x44, 0x34, 0xE6, 0x9C, 0x88, + 0x44, 0x34, 0xE7, 0x82, 0xB9, 0x44, 0x35, 0xE6, + // Bytes 1d00 - 1d3f + 0x97, 0xA5, 0x44, 0x35, 0xE6, 0x9C, 0x88, 0x44, + 0x35, 0xE7, 0x82, 0xB9, 0x44, 0x36, 0xE6, 0x97, + 0xA5, 0x44, 0x36, 0xE6, 0x9C, 0x88, 0x44, 0x36, + 0xE7, 0x82, 0xB9, 0x44, 0x37, 0xE6, 0x97, 0xA5, + 0x44, 0x37, 0xE6, 0x9C, 0x88, 0x44, 0x37, 0xE7, + 0x82, 0xB9, 0x44, 0x38, 0xE6, 0x97, 0xA5, 0x44, + 0x38, 0xE6, 0x9C, 0x88, 0x44, 0x38, 0xE7, 0x82, + 0xB9, 0x44, 0x39, 0xE6, 0x97, 0xA5, 0x44, 0x39, + // Bytes 1d40 - 1d7f + 0xE6, 0x9C, 0x88, 0x44, 0x39, 0xE7, 0x82, 0xB9, + 0x44, 0x56, 0x49, 0x49, 0x49, 0x44, 0x61, 0x2E, + 0x6D, 0x2E, 0x44, 0x6B, 0x63, 0x61, 0x6C, 0x44, + 0x70, 0x2E, 0x6D, 0x2E, 0x44, 0x76, 0x69, 0x69, + 0x69, 0x44, 0xD5, 0xA5, 0xD6, 0x82, 0x44, 0xD5, + 0xB4, 0xD5, 0xA5, 0x44, 0xD5, 0xB4, 0xD5, 0xAB, + 0x44, 0xD5, 0xB4, 0xD5, 0xAD, 0x44, 0xD5, 0xB4, + 0xD5, 0xB6, 0x44, 0xD5, 0xBE, 0xD5, 0xB6, 0x44, + // Bytes 1d80 - 1dbf + 0xD7, 0x90, 0xD7, 0x9C, 0x44, 0xD8, 0xA7, 0xD9, + 0xB4, 0x44, 0xD8, 0xA8, 0xD8, 0xAC, 0x44, 0xD8, + 0xA8, 0xD8, 0xAD, 0x44, 0xD8, 0xA8, 0xD8, 0xAE, + 0x44, 0xD8, 0xA8, 0xD8, 0xB1, 0x44, 0xD8, 0xA8, + 0xD8, 0xB2, 0x44, 0xD8, 0xA8, 0xD9, 0x85, 0x44, + 0xD8, 0xA8, 0xD9, 0x86, 0x44, 0xD8, 0xA8, 0xD9, + 0x87, 0x44, 0xD8, 0xA8, 0xD9, 0x89, 0x44, 0xD8, + 0xA8, 0xD9, 0x8A, 0x44, 0xD8, 0xAA, 0xD8, 0xAC, + // Bytes 1dc0 - 1dff + 0x44, 0xD8, 0xAA, 0xD8, 0xAD, 0x44, 0xD8, 0xAA, + 0xD8, 0xAE, 0x44, 0xD8, 0xAA, 0xD8, 0xB1, 0x44, + 0xD8, 0xAA, 0xD8, 0xB2, 0x44, 0xD8, 0xAA, 0xD9, + 0x85, 0x44, 0xD8, 0xAA, 0xD9, 0x86, 0x44, 0xD8, + 0xAA, 0xD9, 0x87, 0x44, 0xD8, 0xAA, 0xD9, 0x89, + 0x44, 0xD8, 0xAA, 0xD9, 0x8A, 0x44, 0xD8, 0xAB, + 0xD8, 0xAC, 0x44, 0xD8, 0xAB, 0xD8, 0xB1, 0x44, + 0xD8, 0xAB, 0xD8, 0xB2, 0x44, 0xD8, 0xAB, 0xD9, + // Bytes 1e00 - 1e3f + 0x85, 0x44, 0xD8, 0xAB, 0xD9, 0x86, 0x44, 0xD8, + 0xAB, 0xD9, 0x87, 0x44, 0xD8, 0xAB, 0xD9, 0x89, + 0x44, 0xD8, 0xAB, 0xD9, 0x8A, 0x44, 0xD8, 0xAC, + 0xD8, 0xAD, 0x44, 0xD8, 0xAC, 0xD9, 0x85, 0x44, + 0xD8, 0xAC, 0xD9, 0x89, 0x44, 0xD8, 0xAC, 0xD9, + 0x8A, 0x44, 0xD8, 0xAD, 0xD8, 0xAC, 0x44, 0xD8, + 0xAD, 0xD9, 0x85, 0x44, 0xD8, 0xAD, 0xD9, 0x89, + 0x44, 0xD8, 0xAD, 0xD9, 0x8A, 0x44, 0xD8, 0xAE, + // Bytes 1e40 - 1e7f + 0xD8, 0xAC, 0x44, 0xD8, 0xAE, 0xD8, 0xAD, 0x44, + 0xD8, 0xAE, 0xD9, 0x85, 0x44, 0xD8, 0xAE, 0xD9, + 0x89, 0x44, 0xD8, 0xAE, 0xD9, 0x8A, 0x44, 0xD8, + 0xB3, 0xD8, 0xAC, 0x44, 0xD8, 0xB3, 0xD8, 0xAD, + 0x44, 0xD8, 0xB3, 0xD8, 0xAE, 0x44, 0xD8, 0xB3, + 0xD8, 0xB1, 0x44, 0xD8, 0xB3, 0xD9, 0x85, 0x44, + 0xD8, 0xB3, 0xD9, 0x87, 0x44, 0xD8, 0xB3, 0xD9, + 0x89, 0x44, 0xD8, 0xB3, 0xD9, 0x8A, 0x44, 0xD8, + // Bytes 1e80 - 1ebf + 0xB4, 0xD8, 0xAC, 0x44, 0xD8, 0xB4, 0xD8, 0xAD, + 0x44, 0xD8, 0xB4, 0xD8, 0xAE, 0x44, 0xD8, 0xB4, + 0xD8, 0xB1, 0x44, 0xD8, 0xB4, 0xD9, 0x85, 0x44, + 0xD8, 0xB4, 0xD9, 0x87, 0x44, 0xD8, 0xB4, 0xD9, + 0x89, 0x44, 0xD8, 0xB4, 0xD9, 0x8A, 0x44, 0xD8, + 0xB5, 0xD8, 0xAD, 0x44, 0xD8, 0xB5, 0xD8, 0xAE, + 0x44, 0xD8, 0xB5, 0xD8, 0xB1, 0x44, 0xD8, 0xB5, + 0xD9, 0x85, 0x44, 0xD8, 0xB5, 0xD9, 0x89, 0x44, + // Bytes 1ec0 - 1eff + 0xD8, 0xB5, 0xD9, 0x8A, 0x44, 0xD8, 0xB6, 0xD8, + 0xAC, 0x44, 0xD8, 0xB6, 0xD8, 0xAD, 0x44, 0xD8, + 0xB6, 0xD8, 0xAE, 0x44, 0xD8, 0xB6, 0xD8, 0xB1, + 0x44, 0xD8, 0xB6, 0xD9, 0x85, 0x44, 0xD8, 0xB6, + 0xD9, 0x89, 0x44, 0xD8, 0xB6, 0xD9, 0x8A, 0x44, + 0xD8, 0xB7, 0xD8, 0xAD, 0x44, 0xD8, 0xB7, 0xD9, + 0x85, 0x44, 0xD8, 0xB7, 0xD9, 0x89, 0x44, 0xD8, + 0xB7, 0xD9, 0x8A, 0x44, 0xD8, 0xB8, 0xD9, 0x85, + // Bytes 1f00 - 1f3f + 0x44, 0xD8, 0xB9, 0xD8, 0xAC, 0x44, 0xD8, 0xB9, + 0xD9, 0x85, 0x44, 0xD8, 0xB9, 0xD9, 0x89, 0x44, + 0xD8, 0xB9, 0xD9, 0x8A, 0x44, 0xD8, 0xBA, 0xD8, + 0xAC, 0x44, 0xD8, 0xBA, 0xD9, 0x85, 0x44, 0xD8, + 0xBA, 0xD9, 0x89, 0x44, 0xD8, 0xBA, 0xD9, 0x8A, + 0x44, 0xD9, 0x81, 0xD8, 0xAC, 0x44, 0xD9, 0x81, + 0xD8, 0xAD, 0x44, 0xD9, 0x81, 0xD8, 0xAE, 0x44, + 0xD9, 0x81, 0xD9, 0x85, 0x44, 0xD9, 0x81, 0xD9, + // Bytes 1f40 - 1f7f + 0x89, 0x44, 0xD9, 0x81, 0xD9, 0x8A, 0x44, 0xD9, + 0x82, 0xD8, 0xAD, 0x44, 0xD9, 0x82, 0xD9, 0x85, + 0x44, 0xD9, 0x82, 0xD9, 0x89, 0x44, 0xD9, 0x82, + 0xD9, 0x8A, 0x44, 0xD9, 0x83, 0xD8, 0xA7, 0x44, + 0xD9, 0x83, 0xD8, 0xAC, 0x44, 0xD9, 0x83, 0xD8, + 0xAD, 0x44, 0xD9, 0x83, 0xD8, 0xAE, 0x44, 0xD9, + 0x83, 0xD9, 0x84, 0x44, 0xD9, 0x83, 0xD9, 0x85, + 0x44, 0xD9, 0x83, 0xD9, 0x89, 0x44, 0xD9, 0x83, + // Bytes 1f80 - 1fbf + 0xD9, 0x8A, 0x44, 0xD9, 0x84, 0xD8, 0xA7, 0x44, + 0xD9, 0x84, 0xD8, 0xAC, 0x44, 0xD9, 0x84, 0xD8, + 0xAD, 0x44, 0xD9, 0x84, 0xD8, 0xAE, 0x44, 0xD9, + 0x84, 0xD9, 0x85, 0x44, 0xD9, 0x84, 0xD9, 0x87, + 0x44, 0xD9, 0x84, 0xD9, 0x89, 0x44, 0xD9, 0x84, + 0xD9, 0x8A, 0x44, 0xD9, 0x85, 0xD8, 0xA7, 0x44, + 0xD9, 0x85, 0xD8, 0xAC, 0x44, 0xD9, 0x85, 0xD8, + 0xAD, 0x44, 0xD9, 0x85, 0xD8, 0xAE, 0x44, 0xD9, + // Bytes 1fc0 - 1fff + 0x85, 0xD9, 0x85, 0x44, 0xD9, 0x85, 0xD9, 0x89, + 0x44, 0xD9, 0x85, 0xD9, 0x8A, 0x44, 0xD9, 0x86, + 0xD8, 0xAC, 0x44, 0xD9, 0x86, 0xD8, 0xAD, 0x44, + 0xD9, 0x86, 0xD8, 0xAE, 0x44, 0xD9, 0x86, 0xD8, + 0xB1, 0x44, 0xD9, 0x86, 0xD8, 0xB2, 0x44, 0xD9, + 0x86, 0xD9, 0x85, 0x44, 0xD9, 0x86, 0xD9, 0x86, + 0x44, 0xD9, 0x86, 0xD9, 0x87, 0x44, 0xD9, 0x86, + 0xD9, 0x89, 0x44, 0xD9, 0x86, 0xD9, 0x8A, 0x44, + // Bytes 2000 - 203f + 0xD9, 0x87, 0xD8, 0xAC, 0x44, 0xD9, 0x87, 0xD9, + 0x85, 0x44, 0xD9, 0x87, 0xD9, 0x89, 0x44, 0xD9, + 0x87, 0xD9, 0x8A, 0x44, 0xD9, 0x88, 0xD9, 0xB4, + 0x44, 0xD9, 0x8A, 0xD8, 0xAC, 0x44, 0xD9, 0x8A, + 0xD8, 0xAD, 0x44, 0xD9, 0x8A, 0xD8, 0xAE, 0x44, + 0xD9, 0x8A, 0xD8, 0xB1, 0x44, 0xD9, 0x8A, 0xD8, + 0xB2, 0x44, 0xD9, 0x8A, 0xD9, 0x85, 0x44, 0xD9, + 0x8A, 0xD9, 0x86, 0x44, 0xD9, 0x8A, 0xD9, 0x87, + // Bytes 2040 - 207f + 0x44, 0xD9, 0x8A, 0xD9, 0x89, 0x44, 0xD9, 0x8A, + 0xD9, 0x8A, 0x44, 0xD9, 0x8A, 0xD9, 0xB4, 0x44, + 0xDB, 0x87, 0xD9, 0xB4, 0x45, 0x28, 0xE1, 0x84, + 0x80, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x82, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x83, 0x29, 0x45, 0x28, + 0xE1, 0x84, 0x85, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x86, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x87, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x89, 0x29, 0x45, 0x28, + // Bytes 2080 - 20bf + 0xE1, 0x84, 0x8B, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x8C, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x8E, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x8F, 0x29, 0x45, 0x28, + 0xE1, 0x84, 0x90, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x91, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x92, 0x29, + 0x45, 0x28, 0xE4, 0xB8, 0x80, 0x29, 0x45, 0x28, + 0xE4, 0xB8, 0x83, 0x29, 0x45, 0x28, 0xE4, 0xB8, + 0x89, 0x29, 0x45, 0x28, 0xE4, 0xB9, 0x9D, 0x29, + // Bytes 20c0 - 20ff + 0x45, 0x28, 0xE4, 0xBA, 0x8C, 0x29, 0x45, 0x28, + 0xE4, 0xBA, 0x94, 0x29, 0x45, 0x28, 0xE4, 0xBB, + 0xA3, 0x29, 0x45, 0x28, 0xE4, 0xBC, 0x81, 0x29, + 0x45, 0x28, 0xE4, 0xBC, 0x91, 0x29, 0x45, 0x28, + 0xE5, 0x85, 0xAB, 0x29, 0x45, 0x28, 0xE5, 0x85, + 0xAD, 0x29, 0x45, 0x28, 0xE5, 0x8A, 0xB4, 0x29, + 0x45, 0x28, 0xE5, 0x8D, 0x81, 0x29, 0x45, 0x28, + 0xE5, 0x8D, 0x94, 0x29, 0x45, 0x28, 0xE5, 0x90, + // Bytes 2100 - 213f + 0x8D, 0x29, 0x45, 0x28, 0xE5, 0x91, 0xBC, 0x29, + 0x45, 0x28, 0xE5, 0x9B, 0x9B, 0x29, 0x45, 0x28, + 0xE5, 0x9C, 0x9F, 0x29, 0x45, 0x28, 0xE5, 0xAD, + 0xA6, 0x29, 0x45, 0x28, 0xE6, 0x97, 0xA5, 0x29, + 0x45, 0x28, 0xE6, 0x9C, 0x88, 0x29, 0x45, 0x28, + 0xE6, 0x9C, 0x89, 0x29, 0x45, 0x28, 0xE6, 0x9C, + 0xA8, 0x29, 0x45, 0x28, 0xE6, 0xA0, 0xAA, 0x29, + 0x45, 0x28, 0xE6, 0xB0, 0xB4, 0x29, 0x45, 0x28, + // Bytes 2140 - 217f + 0xE7, 0x81, 0xAB, 0x29, 0x45, 0x28, 0xE7, 0x89, + 0xB9, 0x29, 0x45, 0x28, 0xE7, 0x9B, 0xA3, 0x29, + 0x45, 0x28, 0xE7, 0xA4, 0xBE, 0x29, 0x45, 0x28, + 0xE7, 0xA5, 0x9D, 0x29, 0x45, 0x28, 0xE7, 0xA5, + 0xAD, 0x29, 0x45, 0x28, 0xE8, 0x87, 0xAA, 0x29, + 0x45, 0x28, 0xE8, 0x87, 0xB3, 0x29, 0x45, 0x28, + 0xE8, 0xB2, 0xA1, 0x29, 0x45, 0x28, 0xE8, 0xB3, + 0x87, 0x29, 0x45, 0x28, 0xE9, 0x87, 0x91, 0x29, + // Bytes 2180 - 21bf + 0x45, 0x30, 0xE2, 0x81, 0x84, 0x33, 0x45, 0x31, + 0x30, 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x30, 0xE6, + 0x9C, 0x88, 0x45, 0x31, 0x30, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x31, 0xE6, 0x97, 0xA5, 0x45, 0x31, + 0x31, 0xE6, 0x9C, 0x88, 0x45, 0x31, 0x31, 0xE7, + 0x82, 0xB9, 0x45, 0x31, 0x32, 0xE6, 0x97, 0xA5, + 0x45, 0x31, 0x32, 0xE6, 0x9C, 0x88, 0x45, 0x31, + 0x32, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x33, 0xE6, + // Bytes 21c0 - 21ff + 0x97, 0xA5, 0x45, 0x31, 0x33, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x34, 0xE6, 0x97, 0xA5, 0x45, 0x31, + 0x34, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x35, 0xE6, + 0x97, 0xA5, 0x45, 0x31, 0x35, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x36, 0xE6, 0x97, 0xA5, 0x45, 0x31, + 0x36, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x37, 0xE6, + 0x97, 0xA5, 0x45, 0x31, 0x37, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x38, 0xE6, 0x97, 0xA5, 0x45, 0x31, + // Bytes 2200 - 223f + 0x38, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x39, 0xE6, + 0x97, 0xA5, 0x45, 0x31, 0x39, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0xE2, 0x81, 0x84, 0x32, 0x45, 0x31, + 0xE2, 0x81, 0x84, 0x33, 0x45, 0x31, 0xE2, 0x81, + 0x84, 0x34, 0x45, 0x31, 0xE2, 0x81, 0x84, 0x35, + 0x45, 0x31, 0xE2, 0x81, 0x84, 0x36, 0x45, 0x31, + 0xE2, 0x81, 0x84, 0x37, 0x45, 0x31, 0xE2, 0x81, + 0x84, 0x38, 0x45, 0x31, 0xE2, 0x81, 0x84, 0x39, + // Bytes 2240 - 227f + 0x45, 0x32, 0x30, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x30, 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x31, 0xE6, + 0x97, 0xA5, 0x45, 0x32, 0x31, 0xE7, 0x82, 0xB9, + 0x45, 0x32, 0x32, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x32, 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x33, 0xE6, + 0x97, 0xA5, 0x45, 0x32, 0x33, 0xE7, 0x82, 0xB9, + 0x45, 0x32, 0x34, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x34, 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x35, 0xE6, + // Bytes 2280 - 22bf + 0x97, 0xA5, 0x45, 0x32, 0x36, 0xE6, 0x97, 0xA5, + 0x45, 0x32, 0x37, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x38, 0xE6, 0x97, 0xA5, 0x45, 0x32, 0x39, 0xE6, + 0x97, 0xA5, 0x45, 0x32, 0xE2, 0x81, 0x84, 0x33, + 0x45, 0x32, 0xE2, 0x81, 0x84, 0x35, 0x45, 0x33, + 0x30, 0xE6, 0x97, 0xA5, 0x45, 0x33, 0x31, 0xE6, + 0x97, 0xA5, 0x45, 0x33, 0xE2, 0x81, 0x84, 0x34, + 0x45, 0x33, 0xE2, 0x81, 0x84, 0x35, 0x45, 0x33, + // Bytes 22c0 - 22ff + 0xE2, 0x81, 0x84, 0x38, 0x45, 0x34, 0xE2, 0x81, + 0x84, 0x35, 0x45, 0x35, 0xE2, 0x81, 0x84, 0x36, + 0x45, 0x35, 0xE2, 0x81, 0x84, 0x38, 0x45, 0x37, + 0xE2, 0x81, 0x84, 0x38, 0x45, 0x41, 0xE2, 0x88, + 0x95, 0x6D, 0x45, 0x56, 0xE2, 0x88, 0x95, 0x6D, + 0x45, 0x6D, 0xE2, 0x88, 0x95, 0x73, 0x46, 0x31, + 0xE2, 0x81, 0x84, 0x31, 0x30, 0x46, 0x43, 0xE2, + 0x88, 0x95, 0x6B, 0x67, 0x46, 0x6D, 0xE2, 0x88, + // Bytes 2300 - 233f + 0x95, 0x73, 0x32, 0x46, 0xD8, 0xA8, 0xD8, 0xAD, + 0xD9, 0x8A, 0x46, 0xD8, 0xA8, 0xD8, 0xAE, 0xD9, + 0x8A, 0x46, 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, 0x85, + 0x46, 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, 0x89, 0x46, + 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD8, + 0xAA, 0xD8, 0xAD, 0xD8, 0xAC, 0x46, 0xD8, 0xAA, + 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD8, 0xAA, 0xD8, + 0xAE, 0xD9, 0x85, 0x46, 0xD8, 0xAA, 0xD8, 0xAE, + // Bytes 2340 - 237f + 0xD9, 0x89, 0x46, 0xD8, 0xAA, 0xD8, 0xAE, 0xD9, + 0x8A, 0x46, 0xD8, 0xAA, 0xD9, 0x85, 0xD8, 0xAC, + 0x46, 0xD8, 0xAA, 0xD9, 0x85, 0xD8, 0xAD, 0x46, + 0xD8, 0xAA, 0xD9, 0x85, 0xD8, 0xAE, 0x46, 0xD8, + 0xAA, 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xAA, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xAC, 0xD8, + 0xAD, 0xD9, 0x89, 0x46, 0xD8, 0xAC, 0xD8, 0xAD, + 0xD9, 0x8A, 0x46, 0xD8, 0xAC, 0xD9, 0x85, 0xD8, + // Bytes 2380 - 23bf + 0xAD, 0x46, 0xD8, 0xAC, 0xD9, 0x85, 0xD9, 0x89, + 0x46, 0xD8, 0xAC, 0xD9, 0x85, 0xD9, 0x8A, 0x46, + 0xD8, 0xAD, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD8, + 0xAD, 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xAD, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xB3, 0xD8, + 0xAC, 0xD8, 0xAD, 0x46, 0xD8, 0xB3, 0xD8, 0xAC, + 0xD9, 0x89, 0x46, 0xD8, 0xB3, 0xD8, 0xAD, 0xD8, + 0xAC, 0x46, 0xD8, 0xB3, 0xD8, 0xAE, 0xD9, 0x89, + // Bytes 23c0 - 23ff + 0x46, 0xD8, 0xB3, 0xD8, 0xAE, 0xD9, 0x8A, 0x46, + 0xD8, 0xB3, 0xD9, 0x85, 0xD8, 0xAC, 0x46, 0xD8, + 0xB3, 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD8, 0xB3, + 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xB4, 0xD8, + 0xAC, 0xD9, 0x8A, 0x46, 0xD8, 0xB4, 0xD8, 0xAD, + 0xD9, 0x85, 0x46, 0xD8, 0xB4, 0xD8, 0xAD, 0xD9, + 0x8A, 0x46, 0xD8, 0xB4, 0xD9, 0x85, 0xD8, 0xAE, + 0x46, 0xD8, 0xB4, 0xD9, 0x85, 0xD9, 0x85, 0x46, + // Bytes 2400 - 243f + 0xD8, 0xB5, 0xD8, 0xAD, 0xD8, 0xAD, 0x46, 0xD8, + 0xB5, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD8, 0xB5, + 0xD9, 0x84, 0xD9, 0x89, 0x46, 0xD8, 0xB5, 0xD9, + 0x84, 0xDB, 0x92, 0x46, 0xD8, 0xB5, 0xD9, 0x85, + 0xD9, 0x85, 0x46, 0xD8, 0xB6, 0xD8, 0xAD, 0xD9, + 0x89, 0x46, 0xD8, 0xB6, 0xD8, 0xAD, 0xD9, 0x8A, + 0x46, 0xD8, 0xB6, 0xD8, 0xAE, 0xD9, 0x85, 0x46, + 0xD8, 0xB7, 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD8, + // Bytes 2440 - 247f + 0xB7, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xB7, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xB9, 0xD8, + 0xAC, 0xD9, 0x85, 0x46, 0xD8, 0xB9, 0xD9, 0x85, + 0xD9, 0x85, 0x46, 0xD8, 0xB9, 0xD9, 0x85, 0xD9, + 0x89, 0x46, 0xD8, 0xB9, 0xD9, 0x85, 0xD9, 0x8A, + 0x46, 0xD8, 0xBA, 0xD9, 0x85, 0xD9, 0x85, 0x46, + 0xD8, 0xBA, 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, + 0xBA, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x81, + // Bytes 2480 - 24bf + 0xD8, 0xAE, 0xD9, 0x85, 0x46, 0xD9, 0x81, 0xD9, + 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x82, 0xD9, 0x84, + 0xDB, 0x92, 0x46, 0xD9, 0x82, 0xD9, 0x85, 0xD8, + 0xAD, 0x46, 0xD9, 0x82, 0xD9, 0x85, 0xD9, 0x85, + 0x46, 0xD9, 0x82, 0xD9, 0x85, 0xD9, 0x8A, 0x46, + 0xD9, 0x83, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD9, + 0x83, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x84, + 0xD8, 0xAC, 0xD8, 0xAC, 0x46, 0xD9, 0x84, 0xD8, + // Bytes 24c0 - 24ff + 0xAC, 0xD9, 0x85, 0x46, 0xD9, 0x84, 0xD8, 0xAC, + 0xD9, 0x8A, 0x46, 0xD9, 0x84, 0xD8, 0xAD, 0xD9, + 0x85, 0x46, 0xD9, 0x84, 0xD8, 0xAD, 0xD9, 0x89, + 0x46, 0xD9, 0x84, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, + 0xD9, 0x84, 0xD8, 0xAE, 0xD9, 0x85, 0x46, 0xD9, + 0x84, 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD9, 0x84, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x85, 0xD8, + 0xAC, 0xD8, 0xAD, 0x46, 0xD9, 0x85, 0xD8, 0xAC, + // Bytes 2500 - 253f + 0xD8, 0xAE, 0x46, 0xD9, 0x85, 0xD8, 0xAC, 0xD9, + 0x85, 0x46, 0xD9, 0x85, 0xD8, 0xAC, 0xD9, 0x8A, + 0x46, 0xD9, 0x85, 0xD8, 0xAD, 0xD8, 0xAC, 0x46, + 0xD9, 0x85, 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD9, + 0x85, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD9, 0x85, + 0xD8, 0xAE, 0xD8, 0xAC, 0x46, 0xD9, 0x85, 0xD8, + 0xAE, 0xD9, 0x85, 0x46, 0xD9, 0x85, 0xD8, 0xAE, + 0xD9, 0x8A, 0x46, 0xD9, 0x85, 0xD9, 0x85, 0xD9, + // Bytes 2540 - 257f + 0x8A, 0x46, 0xD9, 0x86, 0xD8, 0xAC, 0xD8, 0xAD, + 0x46, 0xD9, 0x86, 0xD8, 0xAC, 0xD9, 0x85, 0x46, + 0xD9, 0x86, 0xD8, 0xAC, 0xD9, 0x89, 0x46, 0xD9, + 0x86, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD9, 0x86, + 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD9, 0x86, 0xD8, + 0xAD, 0xD9, 0x89, 0x46, 0xD9, 0x86, 0xD8, 0xAD, + 0xD9, 0x8A, 0x46, 0xD9, 0x86, 0xD9, 0x85, 0xD9, + 0x89, 0x46, 0xD9, 0x86, 0xD9, 0x85, 0xD9, 0x8A, + // Bytes 2580 - 25bf + 0x46, 0xD9, 0x87, 0xD9, 0x85, 0xD8, 0xAC, 0x46, + 0xD9, 0x87, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD9, + 0x8A, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD9, 0x8A, + 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD9, 0x8A, 0xD9, + 0x85, 0xD9, 0x85, 0x46, 0xD9, 0x8A, 0xD9, 0x85, + 0xD9, 0x8A, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, + 0xA7, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xAC, + 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xAD, 0x46, + // Bytes 25c0 - 25ff + 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xAE, 0x46, 0xD9, + 0x8A, 0xD9, 0x94, 0xD8, 0xB1, 0x46, 0xD9, 0x8A, + 0xD9, 0x94, 0xD8, 0xB2, 0x46, 0xD9, 0x8A, 0xD9, + 0x94, 0xD9, 0x85, 0x46, 0xD9, 0x8A, 0xD9, 0x94, + 0xD9, 0x86, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, + 0x87, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x88, + 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x89, 0x46, + 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x8A, 0x46, 0xD9, + // Bytes 2600 - 263f + 0x8A, 0xD9, 0x94, 0xDB, 0x86, 0x46, 0xD9, 0x8A, + 0xD9, 0x94, 0xDB, 0x87, 0x46, 0xD9, 0x8A, 0xD9, + 0x94, 0xDB, 0x88, 0x46, 0xD9, 0x8A, 0xD9, 0x94, + 0xDB, 0x90, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xDB, + 0x95, 0x46, 0xE0, 0xB9, 0x8D, 0xE0, 0xB8, 0xB2, + 0x46, 0xE0, 0xBA, 0xAB, 0xE0, 0xBA, 0x99, 0x46, + 0xE0, 0xBA, 0xAB, 0xE0, 0xBA, 0xA1, 0x46, 0xE0, + 0xBB, 0x8D, 0xE0, 0xBA, 0xB2, 0x46, 0xE0, 0xBD, + // Bytes 2640 - 267f + 0x80, 0xE0, 0xBE, 0xB5, 0x46, 0xE0, 0xBD, 0x82, + 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBD, 0x8C, 0xE0, + 0xBE, 0xB7, 0x46, 0xE0, 0xBD, 0x91, 0xE0, 0xBE, + 0xB7, 0x46, 0xE0, 0xBD, 0x96, 0xE0, 0xBE, 0xB7, + 0x46, 0xE0, 0xBD, 0x9B, 0xE0, 0xBE, 0xB7, 0x46, + 0xE0, 0xBE, 0x90, 0xE0, 0xBE, 0xB5, 0x46, 0xE0, + 0xBE, 0x92, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, + 0x9C, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0xA1, + // Bytes 2680 - 26bf + 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0xA6, 0xE0, + 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0xAB, 0xE0, 0xBE, + 0xB7, 0x46, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, + 0x46, 0xE2, 0x80, 0xB5, 0xE2, 0x80, 0xB5, 0x46, + 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0x46, 0xE2, + 0x88, 0xAE, 0xE2, 0x88, 0xAE, 0x46, 0xE3, 0x81, + 0xBB, 0xE3, 0x81, 0x8B, 0x46, 0xE3, 0x82, 0x88, + 0xE3, 0x82, 0x8A, 0x46, 0xE3, 0x82, 0xAD, 0xE3, + // Bytes 26c0 - 26ff + 0x83, 0xAD, 0x46, 0xE3, 0x82, 0xB3, 0xE3, 0x82, + 0xB3, 0x46, 0xE3, 0x82, 0xB3, 0xE3, 0x83, 0x88, + 0x46, 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xB3, 0x46, + 0xE3, 0x83, 0x8A, 0xE3, 0x83, 0x8E, 0x46, 0xE3, + 0x83, 0x9B, 0xE3, 0x83, 0xB3, 0x46, 0xE3, 0x83, + 0x9F, 0xE3, 0x83, 0xAA, 0x46, 0xE3, 0x83, 0xAA, + 0xE3, 0x83, 0xA9, 0x46, 0xE3, 0x83, 0xAC, 0xE3, + 0x83, 0xA0, 0x46, 0xE5, 0xA4, 0xA7, 0xE6, 0xAD, + // Bytes 2700 - 273f + 0xA3, 0x46, 0xE5, 0xB9, 0xB3, 0xE6, 0x88, 0x90, + 0x46, 0xE6, 0x98, 0x8E, 0xE6, 0xB2, 0xBB, 0x46, + 0xE6, 0x98, 0xAD, 0xE5, 0x92, 0x8C, 0x47, 0x72, + 0x61, 0x64, 0xE2, 0x88, 0x95, 0x73, 0x47, 0xE3, + 0x80, 0x94, 0x53, 0xE3, 0x80, 0x95, 0x48, 0x28, + 0xE1, 0x84, 0x80, 0xE1, 0x85, 0xA1, 0x29, 0x48, + 0x28, 0xE1, 0x84, 0x82, 0xE1, 0x85, 0xA1, 0x29, + 0x48, 0x28, 0xE1, 0x84, 0x83, 0xE1, 0x85, 0xA1, + // Bytes 2740 - 277f + 0x29, 0x48, 0x28, 0xE1, 0x84, 0x85, 0xE1, 0x85, + 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x86, 0xE1, + 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x87, + 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, + 0x89, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, + 0x84, 0x8B, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, + 0xE1, 0x84, 0x8C, 0xE1, 0x85, 0xA1, 0x29, 0x48, + 0x28, 0xE1, 0x84, 0x8C, 0xE1, 0x85, 0xAE, 0x29, + // Bytes 2780 - 27bf + 0x48, 0x28, 0xE1, 0x84, 0x8E, 0xE1, 0x85, 0xA1, + 0x29, 0x48, 0x28, 0xE1, 0x84, 0x8F, 0xE1, 0x85, + 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x90, 0xE1, + 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x91, + 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, + 0x92, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x72, 0x61, + 0x64, 0xE2, 0x88, 0x95, 0x73, 0x32, 0x48, 0xD8, + 0xA7, 0xD9, 0x83, 0xD8, 0xA8, 0xD8, 0xB1, 0x48, + // Bytes 27c0 - 27ff + 0xD8, 0xA7, 0xD9, 0x84, 0xD9, 0x84, 0xD9, 0x87, + 0x48, 0xD8, 0xB1, 0xD8, 0xB3, 0xD9, 0x88, 0xD9, + 0x84, 0x48, 0xD8, 0xB1, 0xDB, 0x8C, 0xD8, 0xA7, + 0xD9, 0x84, 0x48, 0xD8, 0xB5, 0xD9, 0x84, 0xD8, + 0xB9, 0xD9, 0x85, 0x48, 0xD8, 0xB9, 0xD9, 0x84, + 0xD9, 0x8A, 0xD9, 0x87, 0x48, 0xD9, 0x85, 0xD8, + 0xAD, 0xD9, 0x85, 0xD8, 0xAF, 0x48, 0xD9, 0x88, + 0xD8, 0xB3, 0xD9, 0x84, 0xD9, 0x85, 0x49, 0xE2, + // Bytes 2800 - 283f + 0x80, 0xB2, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, + 0x49, 0xE2, 0x80, 0xB5, 0xE2, 0x80, 0xB5, 0xE2, + 0x80, 0xB5, 0x49, 0xE2, 0x88, 0xAB, 0xE2, 0x88, + 0xAB, 0xE2, 0x88, 0xAB, 0x49, 0xE2, 0x88, 0xAE, + 0xE2, 0x88, 0xAE, 0xE2, 0x88, 0xAE, 0x49, 0xE3, + 0x80, 0x94, 0xE4, 0xB8, 0x89, 0xE3, 0x80, 0x95, + 0x49, 0xE3, 0x80, 0x94, 0xE4, 0xBA, 0x8C, 0xE3, + 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE5, 0x8B, + // Bytes 2840 - 287f + 0x9D, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, + 0xE5, 0xAE, 0x89, 0xE3, 0x80, 0x95, 0x49, 0xE3, + 0x80, 0x94, 0xE6, 0x89, 0x93, 0xE3, 0x80, 0x95, + 0x49, 0xE3, 0x80, 0x94, 0xE6, 0x95, 0x97, 0xE3, + 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE6, 0x9C, + 0xAC, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, + 0xE7, 0x82, 0xB9, 0xE3, 0x80, 0x95, 0x49, 0xE3, + 0x80, 0x94, 0xE7, 0x9B, 0x97, 0xE3, 0x80, 0x95, + // Bytes 2880 - 28bf + 0x49, 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0xAB, 0x49, 0xE3, 0x82, 0xA4, 0xE3, 0x83, + 0xB3, 0xE3, 0x83, 0x81, 0x49, 0xE3, 0x82, 0xA6, + 0xE3, 0x82, 0xA9, 0xE3, 0x83, 0xB3, 0x49, 0xE3, + 0x82, 0xAA, 0xE3, 0x83, 0xB3, 0xE3, 0x82, 0xB9, + 0x49, 0xE3, 0x82, 0xAA, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0xA0, 0x49, 0xE3, 0x82, 0xAB, 0xE3, 0x82, + 0xA4, 0xE3, 0x83, 0xAA, 0x49, 0xE3, 0x82, 0xB1, + // Bytes 28c0 - 28ff + 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xB9, 0x49, 0xE3, + 0x82, 0xB3, 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0x8A, + 0x49, 0xE3, 0x82, 0xBB, 0xE3, 0x83, 0xB3, 0xE3, + 0x83, 0x81, 0x49, 0xE3, 0x82, 0xBB, 0xE3, 0x83, + 0xB3, 0xE3, 0x83, 0x88, 0x49, 0xE3, 0x83, 0x86, + 0xE3, 0x82, 0x99, 0xE3, 0x82, 0xB7, 0x49, 0xE3, + 0x83, 0x88, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAB, + 0x49, 0xE3, 0x83, 0x8E, 0xE3, 0x83, 0x83, 0xE3, + // Bytes 2900 - 293f + 0x83, 0x88, 0x49, 0xE3, 0x83, 0x8F, 0xE3, 0x82, + 0xA4, 0xE3, 0x83, 0x84, 0x49, 0xE3, 0x83, 0x92, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAB, 0x49, 0xE3, + 0x83, 0x92, 0xE3, 0x82, 0x9A, 0xE3, 0x82, 0xB3, + 0x49, 0xE3, 0x83, 0x95, 0xE3, 0x83, 0xA9, 0xE3, + 0x83, 0xB3, 0x49, 0xE3, 0x83, 0x98, 0xE3, 0x82, + 0x9A, 0xE3, 0x82, 0xBD, 0x49, 0xE3, 0x83, 0x98, + 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0x84, 0x49, 0xE3, + // Bytes 2940 - 297f + 0x83, 0x9B, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xAB, + 0x49, 0xE3, 0x83, 0x9B, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0xB3, 0x49, 0xE3, 0x83, 0x9E, 0xE3, 0x82, + 0xA4, 0xE3, 0x83, 0xAB, 0x49, 0xE3, 0x83, 0x9E, + 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x8F, 0x49, 0xE3, + 0x83, 0x9E, 0xE3, 0x83, 0xAB, 0xE3, 0x82, 0xAF, + 0x49, 0xE3, 0x83, 0xA4, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0xAB, 0x49, 0xE3, 0x83, 0xA6, 0xE3, 0x82, + // Bytes 2980 - 29bf + 0xA2, 0xE3, 0x83, 0xB3, 0x49, 0xE3, 0x83, 0xAF, + 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x88, 0x4C, 0xE2, + 0x80, 0xB2, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, + 0xE2, 0x80, 0xB2, 0x4C, 0xE2, 0x88, 0xAB, 0xE2, + 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB, + 0x4C, 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0xAB, 0xE3, + 0x83, 0x95, 0xE3, 0x82, 0xA1, 0x4C, 0xE3, 0x82, + 0xA8, 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xAB, 0xE3, + // Bytes 29c0 - 29ff + 0x83, 0xBC, 0x4C, 0xE3, 0x82, 0xAB, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xB3, 0x4C, + 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xB3, 0xE3, 0x83, 0x9E, 0x4C, 0xE3, 0x82, 0xAB, + 0xE3, 0x83, 0xA9, 0xE3, 0x83, 0x83, 0xE3, 0x83, + 0x88, 0x4C, 0xE3, 0x82, 0xAB, 0xE3, 0x83, 0xAD, + 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0xBC, 0x4C, 0xE3, + 0x82, 0xAD, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0x8B, + // Bytes 2a00 - 2a3f + 0xE3, 0x83, 0xBC, 0x4C, 0xE3, 0x82, 0xAD, 0xE3, + 0x83, 0xA5, 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0xBC, + 0x4C, 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0xE3, + 0x83, 0xA9, 0xE3, 0x83, 0xA0, 0x4C, 0xE3, 0x82, + 0xAF, 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0x8D, 0x4C, 0xE3, 0x82, 0xB5, 0xE3, 0x82, + 0xA4, 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAB, 0x4C, + 0xE3, 0x82, 0xBF, 0xE3, 0x82, 0x99, 0xE3, 0x83, + // Bytes 2a40 - 2a7f + 0xBC, 0xE3, 0x82, 0xB9, 0x4C, 0xE3, 0x83, 0x8F, + 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0x84, 0x4C, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, + 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAB, 0x4C, 0xE3, + 0x83, 0x95, 0xE3, 0x82, 0xA3, 0xE3, 0x83, 0xBC, + 0xE3, 0x83, 0x88, 0x4C, 0xE3, 0x83, 0x98, 0xE3, + 0x82, 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xBF, + 0x4C, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, 0xE3, + // Bytes 2a80 - 2abf + 0x83, 0x8B, 0xE3, 0x83, 0x92, 0x4C, 0xE3, 0x83, + 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xB3, 0xE3, + 0x82, 0xB9, 0x4C, 0xE3, 0x83, 0x9B, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0x88, 0x4C, + 0xE3, 0x83, 0x9E, 0xE3, 0x82, 0xA4, 0xE3, 0x82, + 0xAF, 0xE3, 0x83, 0xAD, 0x4C, 0xE3, 0x83, 0x9F, + 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAD, 0xE3, 0x83, + 0xB3, 0x4C, 0xE3, 0x83, 0xA1, 0xE3, 0x83, 0xBC, + // Bytes 2ac0 - 2aff + 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xAB, 0x4C, 0xE3, + 0x83, 0xAA, 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x88, + 0xE3, 0x83, 0xAB, 0x4C, 0xE3, 0x83, 0xAB, 0xE3, + 0x83, 0x92, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, + 0x4C, 0xE6, 0xA0, 0xAA, 0xE5, 0xBC, 0x8F, 0xE4, + 0xBC, 0x9A, 0xE7, 0xA4, 0xBE, 0x4E, 0x28, 0xE1, + 0x84, 0x8B, 0xE1, 0x85, 0xA9, 0xE1, 0x84, 0x92, + 0xE1, 0x85, 0xAE, 0x29, 0x4F, 0xD8, 0xAC, 0xD9, + // Bytes 2b00 - 2b3f + 0x84, 0x20, 0xD8, 0xAC, 0xD9, 0x84, 0xD8, 0xA7, + 0xD9, 0x84, 0xD9, 0x87, 0x4F, 0xE3, 0x82, 0xA2, + 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x9A, 0xE3, 0x83, + 0xBC, 0xE3, 0x83, 0x88, 0x4F, 0xE3, 0x82, 0xA2, + 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x98, 0xE3, 0x82, + 0x9A, 0xE3, 0x82, 0xA2, 0x4F, 0xE3, 0x82, 0xAD, + 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xAF, 0xE3, 0x83, + 0x83, 0xE3, 0x83, 0x88, 0x4F, 0xE3, 0x82, 0xB5, + // Bytes 2b40 - 2b7f + 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x81, 0xE3, 0x83, + 0xBC, 0xE3, 0x83, 0xA0, 0x4F, 0xE3, 0x83, 0x8F, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0xAC, 0xE3, 0x83, 0xAB, 0x4F, 0xE3, 0x83, 0x98, + 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0xBF, 0xE3, 0x83, + 0xBC, 0xE3, 0x83, 0xAB, 0x4F, 0xE3, 0x83, 0x9B, + 0xE3, 0x82, 0x9A, 0xE3, 0x82, 0xA4, 0xE3, 0x83, + 0xB3, 0xE3, 0x83, 0x88, 0x4F, 0xE3, 0x83, 0x9E, + // Bytes 2b80 - 2bbf + 0xE3, 0x83, 0xB3, 0xE3, 0x82, 0xB7, 0xE3, 0x83, + 0xA7, 0xE3, 0x83, 0xB3, 0x4F, 0xE3, 0x83, 0xA1, + 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0x88, 0xE3, 0x83, 0xB3, 0x4F, 0xE3, 0x83, 0xAB, + 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x95, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xAB, 0x51, 0x28, 0xE1, 0x84, + 0x8B, 0xE1, 0x85, 0xA9, 0xE1, 0x84, 0x8C, 0xE1, + 0x85, 0xA5, 0xE1, 0x86, 0xAB, 0x29, 0x52, 0xE3, + // Bytes 2bc0 - 2bff + 0x82, 0xAD, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAB, + 0xE3, 0x82, 0xBF, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xBC, 0x52, 0xE3, 0x82, 0xAD, 0xE3, 0x83, 0xAD, + 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xA9, 0xE3, 0x83, 0xA0, 0x52, 0xE3, 0x82, 0xAD, + 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xA1, 0xE3, 0x83, + 0xBC, 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xAB, 0x52, + 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0xE3, 0x83, + // Bytes 2c00 - 2c3f + 0xA9, 0xE3, 0x83, 0xA0, 0xE3, 0x83, 0x88, 0xE3, + 0x83, 0xB3, 0x52, 0xE3, 0x82, 0xAF, 0xE3, 0x83, + 0xAB, 0xE3, 0x82, 0xBB, 0xE3, 0x82, 0x99, 0xE3, + 0x82, 0xA4, 0xE3, 0x83, 0xAD, 0x52, 0xE3, 0x83, + 0x8F, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, 0xE3, + 0x82, 0xBB, 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x88, + 0x52, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, 0xE3, + 0x82, 0xA2, 0xE3, 0x82, 0xB9, 0xE3, 0x83, 0x88, + // Bytes 2c40 - 2c7f + 0xE3, 0x83, 0xAB, 0x52, 0xE3, 0x83, 0x95, 0xE3, + 0x82, 0x99, 0xE3, 0x83, 0x83, 0xE3, 0x82, 0xB7, + 0xE3, 0x82, 0xA7, 0xE3, 0x83, 0xAB, 0x52, 0xE3, + 0x83, 0x9F, 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0x8F, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0xAB, 0x52, 0xE3, 0x83, 0xAC, 0xE3, 0x83, 0xB3, + 0xE3, 0x83, 0x88, 0xE3, 0x82, 0xB1, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xB3, 0x61, 0xD8, 0xB5, 0xD9, + // Bytes 2c80 - 2cbf + 0x84, 0xD9, 0x89, 0x20, 0xD8, 0xA7, 0xD9, 0x84, + 0xD9, 0x84, 0xD9, 0x87, 0x20, 0xD8, 0xB9, 0xD9, + 0x84, 0xD9, 0x8A, 0xD9, 0x87, 0x20, 0xD9, 0x88, + 0xD8, 0xB3, 0xD9, 0x84, 0xD9, 0x85, 0x06, 0xE0, + 0xA7, 0x87, 0xE0, 0xA6, 0xBE, 0x01, 0x06, 0xE0, + 0xA7, 0x87, 0xE0, 0xA7, 0x97, 0x01, 0x06, 0xE0, + 0xAD, 0x87, 0xE0, 0xAC, 0xBE, 0x01, 0x06, 0xE0, + 0xAD, 0x87, 0xE0, 0xAD, 0x96, 0x01, 0x06, 0xE0, + // Bytes 2cc0 - 2cff + 0xAD, 0x87, 0xE0, 0xAD, 0x97, 0x01, 0x06, 0xE0, + 0xAE, 0x92, 0xE0, 0xAF, 0x97, 0x01, 0x06, 0xE0, + 0xAF, 0x86, 0xE0, 0xAE, 0xBE, 0x01, 0x06, 0xE0, + 0xAF, 0x86, 0xE0, 0xAF, 0x97, 0x01, 0x06, 0xE0, + 0xAF, 0x87, 0xE0, 0xAE, 0xBE, 0x01, 0x06, 0xE0, + 0xB2, 0xBF, 0xE0, 0xB3, 0x95, 0x01, 0x06, 0xE0, + 0xB3, 0x86, 0xE0, 0xB3, 0x95, 0x01, 0x06, 0xE0, + 0xB3, 0x86, 0xE0, 0xB3, 0x96, 0x01, 0x06, 0xE0, + // Bytes 2d00 - 2d3f + 0xB5, 0x86, 0xE0, 0xB4, 0xBE, 0x01, 0x06, 0xE0, + 0xB5, 0x86, 0xE0, 0xB5, 0x97, 0x01, 0x06, 0xE0, + 0xB5, 0x87, 0xE0, 0xB4, 0xBE, 0x01, 0x06, 0xE0, + 0xB7, 0x99, 0xE0, 0xB7, 0x9F, 0x01, 0x06, 0xE1, + 0x80, 0xA5, 0xE1, 0x80, 0xAE, 0x01, 0x06, 0xE1, + 0xAC, 0x85, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0x87, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0x89, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + // Bytes 2d40 - 2d7f + 0xAC, 0x8B, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0x8D, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0x91, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0xBA, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0xBC, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0xBE, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0xBF, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAD, 0x82, 0xE1, 0xAC, 0xB5, 0x01, 0x08, 0xF0, + // Bytes 2d80 - 2dbf + 0x91, 0x84, 0xB1, 0xF0, 0x91, 0x84, 0xA7, 0x01, + 0x08, 0xF0, 0x91, 0x84, 0xB2, 0xF0, 0x91, 0x84, + 0xA7, 0x01, 0x08, 0xF0, 0x91, 0x8D, 0x87, 0xF0, + 0x91, 0x8C, 0xBE, 0x01, 0x08, 0xF0, 0x91, 0x8D, + 0x87, 0xF0, 0x91, 0x8D, 0x97, 0x01, 0x08, 0xF0, + 0x91, 0x92, 0xB9, 0xF0, 0x91, 0x92, 0xB0, 0x01, + 0x08, 0xF0, 0x91, 0x92, 0xB9, 0xF0, 0x91, 0x92, + 0xBA, 0x01, 0x08, 0xF0, 0x91, 0x92, 0xB9, 0xF0, + // Bytes 2dc0 - 2dff + 0x91, 0x92, 0xBD, 0x01, 0x08, 0xF0, 0x91, 0x96, + 0xB8, 0xF0, 0x91, 0x96, 0xAF, 0x01, 0x08, 0xF0, + 0x91, 0x96, 0xB9, 0xF0, 0x91, 0x96, 0xAF, 0x01, + 0x09, 0xE0, 0xB3, 0x86, 0xE0, 0xB3, 0x82, 0xE0, + 0xB3, 0x95, 0x02, 0x09, 0xE0, 0xB7, 0x99, 0xE0, + 0xB7, 0x8F, 0xE0, 0xB7, 0x8A, 0x12, 0x44, 0x44, + 0x5A, 0xCC, 0x8C, 0xC9, 0x44, 0x44, 0x7A, 0xCC, + 0x8C, 0xC9, 0x44, 0x64, 0x7A, 0xCC, 0x8C, 0xC9, + // Bytes 2e00 - 2e3f + 0x46, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x93, 0xC9, + 0x46, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x94, 0xC9, + 0x46, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x95, 0xB5, + 0x46, 0xE1, 0x84, 0x80, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x82, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x83, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x85, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x86, 0xE1, 0x85, 0xA1, 0x01, + // Bytes 2e40 - 2e7f + 0x46, 0xE1, 0x84, 0x87, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x89, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xAE, 0x01, + 0x46, 0xE1, 0x84, 0x8C, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x8E, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x8F, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x90, 0xE1, 0x85, 0xA1, 0x01, + // Bytes 2e80 - 2ebf + 0x46, 0xE1, 0x84, 0x91, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x92, 0xE1, 0x85, 0xA1, 0x01, + 0x49, 0xE3, 0x83, 0xA1, 0xE3, 0x82, 0xAB, 0xE3, + 0x82, 0x99, 0x0D, 0x4C, 0xE1, 0x84, 0x8C, 0xE1, + 0x85, 0xAE, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xB4, + 0x01, 0x4C, 0xE3, 0x82, 0xAD, 0xE3, 0x82, 0x99, + 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0x0D, 0x4C, + 0xE3, 0x82, 0xB3, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + // Bytes 2ec0 - 2eff + 0x9B, 0xE3, 0x82, 0x9A, 0x0D, 0x4C, 0xE3, 0x83, + 0xA4, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, 0xE3, + 0x82, 0x99, 0x0D, 0x4F, 0xE1, 0x84, 0x8E, 0xE1, + 0x85, 0xA1, 0xE1, 0x86, 0xB7, 0xE1, 0x84, 0x80, + 0xE1, 0x85, 0xA9, 0x01, 0x4F, 0xE3, 0x82, 0xA4, + 0xE3, 0x83, 0x8B, 0xE3, 0x83, 0xB3, 0xE3, 0x82, + 0xAF, 0xE3, 0x82, 0x99, 0x0D, 0x4F, 0xE3, 0x82, + 0xB7, 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0xB3, 0xE3, + // Bytes 2f00 - 2f3f + 0x82, 0xAF, 0xE3, 0x82, 0x99, 0x0D, 0x4F, 0xE3, + 0x83, 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, + 0xE3, 0x82, 0xB7, 0xE3, 0x82, 0x99, 0x0D, 0x4F, + 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x9A, 0xE3, 0x83, + 0xB3, 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, 0x0D, + 0x52, 0xE3, 0x82, 0xA8, 0xE3, 0x82, 0xB9, 0xE3, + 0x82, 0xAF, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, + 0xE3, 0x82, 0x99, 0x0D, 0x52, 0xE3, 0x83, 0x95, + // Bytes 2f40 - 2f7f + 0xE3, 0x82, 0xA1, 0xE3, 0x83, 0xA9, 0xE3, 0x83, + 0x83, 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, 0x0D, + 0x86, 0xE0, 0xB3, 0x86, 0xE0, 0xB3, 0x82, 0x01, + 0x86, 0xE0, 0xB7, 0x99, 0xE0, 0xB7, 0x8F, 0x01, + 0x03, 0x3C, 0xCC, 0xB8, 0x05, 0x03, 0x3D, 0xCC, + 0xB8, 0x05, 0x03, 0x3E, 0xCC, 0xB8, 0x05, 0x03, + 0x41, 0xCC, 0x80, 0xC9, 0x03, 0x41, 0xCC, 0x81, + 0xC9, 0x03, 0x41, 0xCC, 0x83, 0xC9, 0x03, 0x41, + // Bytes 2f80 - 2fbf + 0xCC, 0x84, 0xC9, 0x03, 0x41, 0xCC, 0x89, 0xC9, + 0x03, 0x41, 0xCC, 0x8C, 0xC9, 0x03, 0x41, 0xCC, + 0x8F, 0xC9, 0x03, 0x41, 0xCC, 0x91, 0xC9, 0x03, + 0x41, 0xCC, 0xA5, 0xB5, 0x03, 0x41, 0xCC, 0xA8, + 0xA5, 0x03, 0x42, 0xCC, 0x87, 0xC9, 0x03, 0x42, + 0xCC, 0xA3, 0xB5, 0x03, 0x42, 0xCC, 0xB1, 0xB5, + 0x03, 0x43, 0xCC, 0x81, 0xC9, 0x03, 0x43, 0xCC, + 0x82, 0xC9, 0x03, 0x43, 0xCC, 0x87, 0xC9, 0x03, + // Bytes 2fc0 - 2fff + 0x43, 0xCC, 0x8C, 0xC9, 0x03, 0x44, 0xCC, 0x87, + 0xC9, 0x03, 0x44, 0xCC, 0x8C, 0xC9, 0x03, 0x44, + 0xCC, 0xA3, 0xB5, 0x03, 0x44, 0xCC, 0xA7, 0xA5, + 0x03, 0x44, 0xCC, 0xAD, 0xB5, 0x03, 0x44, 0xCC, + 0xB1, 0xB5, 0x03, 0x45, 0xCC, 0x80, 0xC9, 0x03, + 0x45, 0xCC, 0x81, 0xC9, 0x03, 0x45, 0xCC, 0x83, + 0xC9, 0x03, 0x45, 0xCC, 0x86, 0xC9, 0x03, 0x45, + 0xCC, 0x87, 0xC9, 0x03, 0x45, 0xCC, 0x88, 0xC9, + // Bytes 3000 - 303f + 0x03, 0x45, 0xCC, 0x89, 0xC9, 0x03, 0x45, 0xCC, + 0x8C, 0xC9, 0x03, 0x45, 0xCC, 0x8F, 0xC9, 0x03, + 0x45, 0xCC, 0x91, 0xC9, 0x03, 0x45, 0xCC, 0xA8, + 0xA5, 0x03, 0x45, 0xCC, 0xAD, 0xB5, 0x03, 0x45, + 0xCC, 0xB0, 0xB5, 0x03, 0x46, 0xCC, 0x87, 0xC9, + 0x03, 0x47, 0xCC, 0x81, 0xC9, 0x03, 0x47, 0xCC, + 0x82, 0xC9, 0x03, 0x47, 0xCC, 0x84, 0xC9, 0x03, + 0x47, 0xCC, 0x86, 0xC9, 0x03, 0x47, 0xCC, 0x87, + // Bytes 3040 - 307f + 0xC9, 0x03, 0x47, 0xCC, 0x8C, 0xC9, 0x03, 0x47, + 0xCC, 0xA7, 0xA5, 0x03, 0x48, 0xCC, 0x82, 0xC9, + 0x03, 0x48, 0xCC, 0x87, 0xC9, 0x03, 0x48, 0xCC, + 0x88, 0xC9, 0x03, 0x48, 0xCC, 0x8C, 0xC9, 0x03, + 0x48, 0xCC, 0xA3, 0xB5, 0x03, 0x48, 0xCC, 0xA7, + 0xA5, 0x03, 0x48, 0xCC, 0xAE, 0xB5, 0x03, 0x49, + 0xCC, 0x80, 0xC9, 0x03, 0x49, 0xCC, 0x81, 0xC9, + 0x03, 0x49, 0xCC, 0x82, 0xC9, 0x03, 0x49, 0xCC, + // Bytes 3080 - 30bf + 0x83, 0xC9, 0x03, 0x49, 0xCC, 0x84, 0xC9, 0x03, + 0x49, 0xCC, 0x86, 0xC9, 0x03, 0x49, 0xCC, 0x87, + 0xC9, 0x03, 0x49, 0xCC, 0x89, 0xC9, 0x03, 0x49, + 0xCC, 0x8C, 0xC9, 0x03, 0x49, 0xCC, 0x8F, 0xC9, + 0x03, 0x49, 0xCC, 0x91, 0xC9, 0x03, 0x49, 0xCC, + 0xA3, 0xB5, 0x03, 0x49, 0xCC, 0xA8, 0xA5, 0x03, + 0x49, 0xCC, 0xB0, 0xB5, 0x03, 0x4A, 0xCC, 0x82, + 0xC9, 0x03, 0x4B, 0xCC, 0x81, 0xC9, 0x03, 0x4B, + // Bytes 30c0 - 30ff + 0xCC, 0x8C, 0xC9, 0x03, 0x4B, 0xCC, 0xA3, 0xB5, + 0x03, 0x4B, 0xCC, 0xA7, 0xA5, 0x03, 0x4B, 0xCC, + 0xB1, 0xB5, 0x03, 0x4C, 0xCC, 0x81, 0xC9, 0x03, + 0x4C, 0xCC, 0x8C, 0xC9, 0x03, 0x4C, 0xCC, 0xA7, + 0xA5, 0x03, 0x4C, 0xCC, 0xAD, 0xB5, 0x03, 0x4C, + 0xCC, 0xB1, 0xB5, 0x03, 0x4D, 0xCC, 0x81, 0xC9, + 0x03, 0x4D, 0xCC, 0x87, 0xC9, 0x03, 0x4D, 0xCC, + 0xA3, 0xB5, 0x03, 0x4E, 0xCC, 0x80, 0xC9, 0x03, + // Bytes 3100 - 313f + 0x4E, 0xCC, 0x81, 0xC9, 0x03, 0x4E, 0xCC, 0x83, + 0xC9, 0x03, 0x4E, 0xCC, 0x87, 0xC9, 0x03, 0x4E, + 0xCC, 0x8C, 0xC9, 0x03, 0x4E, 0xCC, 0xA3, 0xB5, + 0x03, 0x4E, 0xCC, 0xA7, 0xA5, 0x03, 0x4E, 0xCC, + 0xAD, 0xB5, 0x03, 0x4E, 0xCC, 0xB1, 0xB5, 0x03, + 0x4F, 0xCC, 0x80, 0xC9, 0x03, 0x4F, 0xCC, 0x81, + 0xC9, 0x03, 0x4F, 0xCC, 0x86, 0xC9, 0x03, 0x4F, + 0xCC, 0x89, 0xC9, 0x03, 0x4F, 0xCC, 0x8B, 0xC9, + // Bytes 3140 - 317f + 0x03, 0x4F, 0xCC, 0x8C, 0xC9, 0x03, 0x4F, 0xCC, + 0x8F, 0xC9, 0x03, 0x4F, 0xCC, 0x91, 0xC9, 0x03, + 0x50, 0xCC, 0x81, 0xC9, 0x03, 0x50, 0xCC, 0x87, + 0xC9, 0x03, 0x52, 0xCC, 0x81, 0xC9, 0x03, 0x52, + 0xCC, 0x87, 0xC9, 0x03, 0x52, 0xCC, 0x8C, 0xC9, + 0x03, 0x52, 0xCC, 0x8F, 0xC9, 0x03, 0x52, 0xCC, + 0x91, 0xC9, 0x03, 0x52, 0xCC, 0xA7, 0xA5, 0x03, + 0x52, 0xCC, 0xB1, 0xB5, 0x03, 0x53, 0xCC, 0x82, + // Bytes 3180 - 31bf + 0xC9, 0x03, 0x53, 0xCC, 0x87, 0xC9, 0x03, 0x53, + 0xCC, 0xA6, 0xB5, 0x03, 0x53, 0xCC, 0xA7, 0xA5, + 0x03, 0x54, 0xCC, 0x87, 0xC9, 0x03, 0x54, 0xCC, + 0x8C, 0xC9, 0x03, 0x54, 0xCC, 0xA3, 0xB5, 0x03, + 0x54, 0xCC, 0xA6, 0xB5, 0x03, 0x54, 0xCC, 0xA7, + 0xA5, 0x03, 0x54, 0xCC, 0xAD, 0xB5, 0x03, 0x54, + 0xCC, 0xB1, 0xB5, 0x03, 0x55, 0xCC, 0x80, 0xC9, + 0x03, 0x55, 0xCC, 0x81, 0xC9, 0x03, 0x55, 0xCC, + // Bytes 31c0 - 31ff + 0x82, 0xC9, 0x03, 0x55, 0xCC, 0x86, 0xC9, 0x03, + 0x55, 0xCC, 0x89, 0xC9, 0x03, 0x55, 0xCC, 0x8A, + 0xC9, 0x03, 0x55, 0xCC, 0x8B, 0xC9, 0x03, 0x55, + 0xCC, 0x8C, 0xC9, 0x03, 0x55, 0xCC, 0x8F, 0xC9, + 0x03, 0x55, 0xCC, 0x91, 0xC9, 0x03, 0x55, 0xCC, + 0xA3, 0xB5, 0x03, 0x55, 0xCC, 0xA4, 0xB5, 0x03, + 0x55, 0xCC, 0xA8, 0xA5, 0x03, 0x55, 0xCC, 0xAD, + 0xB5, 0x03, 0x55, 0xCC, 0xB0, 0xB5, 0x03, 0x56, + // Bytes 3200 - 323f + 0xCC, 0x83, 0xC9, 0x03, 0x56, 0xCC, 0xA3, 0xB5, + 0x03, 0x57, 0xCC, 0x80, 0xC9, 0x03, 0x57, 0xCC, + 0x81, 0xC9, 0x03, 0x57, 0xCC, 0x82, 0xC9, 0x03, + 0x57, 0xCC, 0x87, 0xC9, 0x03, 0x57, 0xCC, 0x88, + 0xC9, 0x03, 0x57, 0xCC, 0xA3, 0xB5, 0x03, 0x58, + 0xCC, 0x87, 0xC9, 0x03, 0x58, 0xCC, 0x88, 0xC9, + 0x03, 0x59, 0xCC, 0x80, 0xC9, 0x03, 0x59, 0xCC, + 0x81, 0xC9, 0x03, 0x59, 0xCC, 0x82, 0xC9, 0x03, + // Bytes 3240 - 327f + 0x59, 0xCC, 0x83, 0xC9, 0x03, 0x59, 0xCC, 0x84, + 0xC9, 0x03, 0x59, 0xCC, 0x87, 0xC9, 0x03, 0x59, + 0xCC, 0x88, 0xC9, 0x03, 0x59, 0xCC, 0x89, 0xC9, + 0x03, 0x59, 0xCC, 0xA3, 0xB5, 0x03, 0x5A, 0xCC, + 0x81, 0xC9, 0x03, 0x5A, 0xCC, 0x82, 0xC9, 0x03, + 0x5A, 0xCC, 0x87, 0xC9, 0x03, 0x5A, 0xCC, 0x8C, + 0xC9, 0x03, 0x5A, 0xCC, 0xA3, 0xB5, 0x03, 0x5A, + 0xCC, 0xB1, 0xB5, 0x03, 0x61, 0xCC, 0x80, 0xC9, + // Bytes 3280 - 32bf + 0x03, 0x61, 0xCC, 0x81, 0xC9, 0x03, 0x61, 0xCC, + 0x83, 0xC9, 0x03, 0x61, 0xCC, 0x84, 0xC9, 0x03, + 0x61, 0xCC, 0x89, 0xC9, 0x03, 0x61, 0xCC, 0x8C, + 0xC9, 0x03, 0x61, 0xCC, 0x8F, 0xC9, 0x03, 0x61, + 0xCC, 0x91, 0xC9, 0x03, 0x61, 0xCC, 0xA5, 0xB5, + 0x03, 0x61, 0xCC, 0xA8, 0xA5, 0x03, 0x62, 0xCC, + 0x87, 0xC9, 0x03, 0x62, 0xCC, 0xA3, 0xB5, 0x03, + 0x62, 0xCC, 0xB1, 0xB5, 0x03, 0x63, 0xCC, 0x81, + // Bytes 32c0 - 32ff + 0xC9, 0x03, 0x63, 0xCC, 0x82, 0xC9, 0x03, 0x63, + 0xCC, 0x87, 0xC9, 0x03, 0x63, 0xCC, 0x8C, 0xC9, + 0x03, 0x64, 0xCC, 0x87, 0xC9, 0x03, 0x64, 0xCC, + 0x8C, 0xC9, 0x03, 0x64, 0xCC, 0xA3, 0xB5, 0x03, + 0x64, 0xCC, 0xA7, 0xA5, 0x03, 0x64, 0xCC, 0xAD, + 0xB5, 0x03, 0x64, 0xCC, 0xB1, 0xB5, 0x03, 0x65, + 0xCC, 0x80, 0xC9, 0x03, 0x65, 0xCC, 0x81, 0xC9, + 0x03, 0x65, 0xCC, 0x83, 0xC9, 0x03, 0x65, 0xCC, + // Bytes 3300 - 333f + 0x86, 0xC9, 0x03, 0x65, 0xCC, 0x87, 0xC9, 0x03, + 0x65, 0xCC, 0x88, 0xC9, 0x03, 0x65, 0xCC, 0x89, + 0xC9, 0x03, 0x65, 0xCC, 0x8C, 0xC9, 0x03, 0x65, + 0xCC, 0x8F, 0xC9, 0x03, 0x65, 0xCC, 0x91, 0xC9, + 0x03, 0x65, 0xCC, 0xA8, 0xA5, 0x03, 0x65, 0xCC, + 0xAD, 0xB5, 0x03, 0x65, 0xCC, 0xB0, 0xB5, 0x03, + 0x66, 0xCC, 0x87, 0xC9, 0x03, 0x67, 0xCC, 0x81, + 0xC9, 0x03, 0x67, 0xCC, 0x82, 0xC9, 0x03, 0x67, + // Bytes 3340 - 337f + 0xCC, 0x84, 0xC9, 0x03, 0x67, 0xCC, 0x86, 0xC9, + 0x03, 0x67, 0xCC, 0x87, 0xC9, 0x03, 0x67, 0xCC, + 0x8C, 0xC9, 0x03, 0x67, 0xCC, 0xA7, 0xA5, 0x03, + 0x68, 0xCC, 0x82, 0xC9, 0x03, 0x68, 0xCC, 0x87, + 0xC9, 0x03, 0x68, 0xCC, 0x88, 0xC9, 0x03, 0x68, + 0xCC, 0x8C, 0xC9, 0x03, 0x68, 0xCC, 0xA3, 0xB5, + 0x03, 0x68, 0xCC, 0xA7, 0xA5, 0x03, 0x68, 0xCC, + 0xAE, 0xB5, 0x03, 0x68, 0xCC, 0xB1, 0xB5, 0x03, + // Bytes 3380 - 33bf + 0x69, 0xCC, 0x80, 0xC9, 0x03, 0x69, 0xCC, 0x81, + 0xC9, 0x03, 0x69, 0xCC, 0x82, 0xC9, 0x03, 0x69, + 0xCC, 0x83, 0xC9, 0x03, 0x69, 0xCC, 0x84, 0xC9, + 0x03, 0x69, 0xCC, 0x86, 0xC9, 0x03, 0x69, 0xCC, + 0x89, 0xC9, 0x03, 0x69, 0xCC, 0x8C, 0xC9, 0x03, + 0x69, 0xCC, 0x8F, 0xC9, 0x03, 0x69, 0xCC, 0x91, + 0xC9, 0x03, 0x69, 0xCC, 0xA3, 0xB5, 0x03, 0x69, + 0xCC, 0xA8, 0xA5, 0x03, 0x69, 0xCC, 0xB0, 0xB5, + // Bytes 33c0 - 33ff + 0x03, 0x6A, 0xCC, 0x82, 0xC9, 0x03, 0x6A, 0xCC, + 0x8C, 0xC9, 0x03, 0x6B, 0xCC, 0x81, 0xC9, 0x03, + 0x6B, 0xCC, 0x8C, 0xC9, 0x03, 0x6B, 0xCC, 0xA3, + 0xB5, 0x03, 0x6B, 0xCC, 0xA7, 0xA5, 0x03, 0x6B, + 0xCC, 0xB1, 0xB5, 0x03, 0x6C, 0xCC, 0x81, 0xC9, + 0x03, 0x6C, 0xCC, 0x8C, 0xC9, 0x03, 0x6C, 0xCC, + 0xA7, 0xA5, 0x03, 0x6C, 0xCC, 0xAD, 0xB5, 0x03, + 0x6C, 0xCC, 0xB1, 0xB5, 0x03, 0x6D, 0xCC, 0x81, + // Bytes 3400 - 343f + 0xC9, 0x03, 0x6D, 0xCC, 0x87, 0xC9, 0x03, 0x6D, + 0xCC, 0xA3, 0xB5, 0x03, 0x6E, 0xCC, 0x80, 0xC9, + 0x03, 0x6E, 0xCC, 0x81, 0xC9, 0x03, 0x6E, 0xCC, + 0x83, 0xC9, 0x03, 0x6E, 0xCC, 0x87, 0xC9, 0x03, + 0x6E, 0xCC, 0x8C, 0xC9, 0x03, 0x6E, 0xCC, 0xA3, + 0xB5, 0x03, 0x6E, 0xCC, 0xA7, 0xA5, 0x03, 0x6E, + 0xCC, 0xAD, 0xB5, 0x03, 0x6E, 0xCC, 0xB1, 0xB5, + 0x03, 0x6F, 0xCC, 0x80, 0xC9, 0x03, 0x6F, 0xCC, + // Bytes 3440 - 347f + 0x81, 0xC9, 0x03, 0x6F, 0xCC, 0x86, 0xC9, 0x03, + 0x6F, 0xCC, 0x89, 0xC9, 0x03, 0x6F, 0xCC, 0x8B, + 0xC9, 0x03, 0x6F, 0xCC, 0x8C, 0xC9, 0x03, 0x6F, + 0xCC, 0x8F, 0xC9, 0x03, 0x6F, 0xCC, 0x91, 0xC9, + 0x03, 0x70, 0xCC, 0x81, 0xC9, 0x03, 0x70, 0xCC, + 0x87, 0xC9, 0x03, 0x72, 0xCC, 0x81, 0xC9, 0x03, + 0x72, 0xCC, 0x87, 0xC9, 0x03, 0x72, 0xCC, 0x8C, + 0xC9, 0x03, 0x72, 0xCC, 0x8F, 0xC9, 0x03, 0x72, + // Bytes 3480 - 34bf + 0xCC, 0x91, 0xC9, 0x03, 0x72, 0xCC, 0xA7, 0xA5, + 0x03, 0x72, 0xCC, 0xB1, 0xB5, 0x03, 0x73, 0xCC, + 0x82, 0xC9, 0x03, 0x73, 0xCC, 0x87, 0xC9, 0x03, + 0x73, 0xCC, 0xA6, 0xB5, 0x03, 0x73, 0xCC, 0xA7, + 0xA5, 0x03, 0x74, 0xCC, 0x87, 0xC9, 0x03, 0x74, + 0xCC, 0x88, 0xC9, 0x03, 0x74, 0xCC, 0x8C, 0xC9, + 0x03, 0x74, 0xCC, 0xA3, 0xB5, 0x03, 0x74, 0xCC, + 0xA6, 0xB5, 0x03, 0x74, 0xCC, 0xA7, 0xA5, 0x03, + // Bytes 34c0 - 34ff + 0x74, 0xCC, 0xAD, 0xB5, 0x03, 0x74, 0xCC, 0xB1, + 0xB5, 0x03, 0x75, 0xCC, 0x80, 0xC9, 0x03, 0x75, + 0xCC, 0x81, 0xC9, 0x03, 0x75, 0xCC, 0x82, 0xC9, + 0x03, 0x75, 0xCC, 0x86, 0xC9, 0x03, 0x75, 0xCC, + 0x89, 0xC9, 0x03, 0x75, 0xCC, 0x8A, 0xC9, 0x03, + 0x75, 0xCC, 0x8B, 0xC9, 0x03, 0x75, 0xCC, 0x8C, + 0xC9, 0x03, 0x75, 0xCC, 0x8F, 0xC9, 0x03, 0x75, + 0xCC, 0x91, 0xC9, 0x03, 0x75, 0xCC, 0xA3, 0xB5, + // Bytes 3500 - 353f + 0x03, 0x75, 0xCC, 0xA4, 0xB5, 0x03, 0x75, 0xCC, + 0xA8, 0xA5, 0x03, 0x75, 0xCC, 0xAD, 0xB5, 0x03, + 0x75, 0xCC, 0xB0, 0xB5, 0x03, 0x76, 0xCC, 0x83, + 0xC9, 0x03, 0x76, 0xCC, 0xA3, 0xB5, 0x03, 0x77, + 0xCC, 0x80, 0xC9, 0x03, 0x77, 0xCC, 0x81, 0xC9, + 0x03, 0x77, 0xCC, 0x82, 0xC9, 0x03, 0x77, 0xCC, + 0x87, 0xC9, 0x03, 0x77, 0xCC, 0x88, 0xC9, 0x03, + 0x77, 0xCC, 0x8A, 0xC9, 0x03, 0x77, 0xCC, 0xA3, + // Bytes 3540 - 357f + 0xB5, 0x03, 0x78, 0xCC, 0x87, 0xC9, 0x03, 0x78, + 0xCC, 0x88, 0xC9, 0x03, 0x79, 0xCC, 0x80, 0xC9, + 0x03, 0x79, 0xCC, 0x81, 0xC9, 0x03, 0x79, 0xCC, + 0x82, 0xC9, 0x03, 0x79, 0xCC, 0x83, 0xC9, 0x03, + 0x79, 0xCC, 0x84, 0xC9, 0x03, 0x79, 0xCC, 0x87, + 0xC9, 0x03, 0x79, 0xCC, 0x88, 0xC9, 0x03, 0x79, + 0xCC, 0x89, 0xC9, 0x03, 0x79, 0xCC, 0x8A, 0xC9, + 0x03, 0x79, 0xCC, 0xA3, 0xB5, 0x03, 0x7A, 0xCC, + // Bytes 3580 - 35bf + 0x81, 0xC9, 0x03, 0x7A, 0xCC, 0x82, 0xC9, 0x03, + 0x7A, 0xCC, 0x87, 0xC9, 0x03, 0x7A, 0xCC, 0x8C, + 0xC9, 0x03, 0x7A, 0xCC, 0xA3, 0xB5, 0x03, 0x7A, + 0xCC, 0xB1, 0xB5, 0x04, 0xC2, 0xA8, 0xCC, 0x80, + 0xCA, 0x04, 0xC2, 0xA8, 0xCC, 0x81, 0xCA, 0x04, + 0xC2, 0xA8, 0xCD, 0x82, 0xCA, 0x04, 0xC3, 0x86, + 0xCC, 0x81, 0xC9, 0x04, 0xC3, 0x86, 0xCC, 0x84, + 0xC9, 0x04, 0xC3, 0x98, 0xCC, 0x81, 0xC9, 0x04, + // Bytes 35c0 - 35ff + 0xC3, 0xA6, 0xCC, 0x81, 0xC9, 0x04, 0xC3, 0xA6, + 0xCC, 0x84, 0xC9, 0x04, 0xC3, 0xB8, 0xCC, 0x81, + 0xC9, 0x04, 0xC5, 0xBF, 0xCC, 0x87, 0xC9, 0x04, + 0xC6, 0xB7, 0xCC, 0x8C, 0xC9, 0x04, 0xCA, 0x92, + 0xCC, 0x8C, 0xC9, 0x04, 0xCE, 0x91, 0xCC, 0x80, + 0xC9, 0x04, 0xCE, 0x91, 0xCC, 0x81, 0xC9, 0x04, + 0xCE, 0x91, 0xCC, 0x84, 0xC9, 0x04, 0xCE, 0x91, + 0xCC, 0x86, 0xC9, 0x04, 0xCE, 0x91, 0xCD, 0x85, + // Bytes 3600 - 363f + 0xD9, 0x04, 0xCE, 0x95, 0xCC, 0x80, 0xC9, 0x04, + 0xCE, 0x95, 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0x97, + 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0x97, 0xCC, 0x81, + 0xC9, 0x04, 0xCE, 0x97, 0xCD, 0x85, 0xD9, 0x04, + 0xCE, 0x99, 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0x99, + 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0x99, 0xCC, 0x84, + 0xC9, 0x04, 0xCE, 0x99, 0xCC, 0x86, 0xC9, 0x04, + 0xCE, 0x99, 0xCC, 0x88, 0xC9, 0x04, 0xCE, 0x9F, + // Bytes 3640 - 367f + 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0x9F, 0xCC, 0x81, + 0xC9, 0x04, 0xCE, 0xA1, 0xCC, 0x94, 0xC9, 0x04, + 0xCE, 0xA5, 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0xA5, + 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0xA5, 0xCC, 0x84, + 0xC9, 0x04, 0xCE, 0xA5, 0xCC, 0x86, 0xC9, 0x04, + 0xCE, 0xA5, 0xCC, 0x88, 0xC9, 0x04, 0xCE, 0xA9, + 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0xA9, 0xCC, 0x81, + 0xC9, 0x04, 0xCE, 0xA9, 0xCD, 0x85, 0xD9, 0x04, + // Bytes 3680 - 36bf + 0xCE, 0xB1, 0xCC, 0x84, 0xC9, 0x04, 0xCE, 0xB1, + 0xCC, 0x86, 0xC9, 0x04, 0xCE, 0xB1, 0xCD, 0x85, + 0xD9, 0x04, 0xCE, 0xB5, 0xCC, 0x80, 0xC9, 0x04, + 0xCE, 0xB5, 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0xB7, + 0xCD, 0x85, 0xD9, 0x04, 0xCE, 0xB9, 0xCC, 0x80, + 0xC9, 0x04, 0xCE, 0xB9, 0xCC, 0x81, 0xC9, 0x04, + 0xCE, 0xB9, 0xCC, 0x84, 0xC9, 0x04, 0xCE, 0xB9, + 0xCC, 0x86, 0xC9, 0x04, 0xCE, 0xB9, 0xCD, 0x82, + // Bytes 36c0 - 36ff + 0xC9, 0x04, 0xCE, 0xBF, 0xCC, 0x80, 0xC9, 0x04, + 0xCE, 0xBF, 0xCC, 0x81, 0xC9, 0x04, 0xCF, 0x81, + 0xCC, 0x93, 0xC9, 0x04, 0xCF, 0x81, 0xCC, 0x94, + 0xC9, 0x04, 0xCF, 0x85, 0xCC, 0x80, 0xC9, 0x04, + 0xCF, 0x85, 0xCC, 0x81, 0xC9, 0x04, 0xCF, 0x85, + 0xCC, 0x84, 0xC9, 0x04, 0xCF, 0x85, 0xCC, 0x86, + 0xC9, 0x04, 0xCF, 0x85, 0xCD, 0x82, 0xC9, 0x04, + 0xCF, 0x89, 0xCD, 0x85, 0xD9, 0x04, 0xCF, 0x92, + // Bytes 3700 - 373f + 0xCC, 0x81, 0xC9, 0x04, 0xCF, 0x92, 0xCC, 0x88, + 0xC9, 0x04, 0xD0, 0x86, 0xCC, 0x88, 0xC9, 0x04, + 0xD0, 0x90, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0x90, + 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0x93, 0xCC, 0x81, + 0xC9, 0x04, 0xD0, 0x95, 0xCC, 0x80, 0xC9, 0x04, + 0xD0, 0x95, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0x95, + 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0x96, 0xCC, 0x86, + 0xC9, 0x04, 0xD0, 0x96, 0xCC, 0x88, 0xC9, 0x04, + // Bytes 3740 - 377f + 0xD0, 0x97, 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0x98, + 0xCC, 0x80, 0xC9, 0x04, 0xD0, 0x98, 0xCC, 0x84, + 0xC9, 0x04, 0xD0, 0x98, 0xCC, 0x86, 0xC9, 0x04, + 0xD0, 0x98, 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0x9A, + 0xCC, 0x81, 0xC9, 0x04, 0xD0, 0x9E, 0xCC, 0x88, + 0xC9, 0x04, 0xD0, 0xA3, 0xCC, 0x84, 0xC9, 0x04, + 0xD0, 0xA3, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0xA3, + 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xA3, 0xCC, 0x8B, + // Bytes 3780 - 37bf + 0xC9, 0x04, 0xD0, 0xA7, 0xCC, 0x88, 0xC9, 0x04, + 0xD0, 0xAB, 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xAD, + 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xB0, 0xCC, 0x86, + 0xC9, 0x04, 0xD0, 0xB0, 0xCC, 0x88, 0xC9, 0x04, + 0xD0, 0xB3, 0xCC, 0x81, 0xC9, 0x04, 0xD0, 0xB5, + 0xCC, 0x80, 0xC9, 0x04, 0xD0, 0xB5, 0xCC, 0x86, + 0xC9, 0x04, 0xD0, 0xB5, 0xCC, 0x88, 0xC9, 0x04, + 0xD0, 0xB6, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0xB6, + // Bytes 37c0 - 37ff + 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xB7, 0xCC, 0x88, + 0xC9, 0x04, 0xD0, 0xB8, 0xCC, 0x80, 0xC9, 0x04, + 0xD0, 0xB8, 0xCC, 0x84, 0xC9, 0x04, 0xD0, 0xB8, + 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0xB8, 0xCC, 0x88, + 0xC9, 0x04, 0xD0, 0xBA, 0xCC, 0x81, 0xC9, 0x04, + 0xD0, 0xBE, 0xCC, 0x88, 0xC9, 0x04, 0xD1, 0x83, + 0xCC, 0x84, 0xC9, 0x04, 0xD1, 0x83, 0xCC, 0x86, + 0xC9, 0x04, 0xD1, 0x83, 0xCC, 0x88, 0xC9, 0x04, + // Bytes 3800 - 383f + 0xD1, 0x83, 0xCC, 0x8B, 0xC9, 0x04, 0xD1, 0x87, + 0xCC, 0x88, 0xC9, 0x04, 0xD1, 0x8B, 0xCC, 0x88, + 0xC9, 0x04, 0xD1, 0x8D, 0xCC, 0x88, 0xC9, 0x04, + 0xD1, 0x96, 0xCC, 0x88, 0xC9, 0x04, 0xD1, 0xB4, + 0xCC, 0x8F, 0xC9, 0x04, 0xD1, 0xB5, 0xCC, 0x8F, + 0xC9, 0x04, 0xD3, 0x98, 0xCC, 0x88, 0xC9, 0x04, + 0xD3, 0x99, 0xCC, 0x88, 0xC9, 0x04, 0xD3, 0xA8, + 0xCC, 0x88, 0xC9, 0x04, 0xD3, 0xA9, 0xCC, 0x88, + // Bytes 3840 - 387f + 0xC9, 0x04, 0xD8, 0xA7, 0xD9, 0x93, 0xC9, 0x04, + 0xD8, 0xA7, 0xD9, 0x94, 0xC9, 0x04, 0xD8, 0xA7, + 0xD9, 0x95, 0xB5, 0x04, 0xD9, 0x88, 0xD9, 0x94, + 0xC9, 0x04, 0xD9, 0x8A, 0xD9, 0x94, 0xC9, 0x04, + 0xDB, 0x81, 0xD9, 0x94, 0xC9, 0x04, 0xDB, 0x92, + 0xD9, 0x94, 0xC9, 0x04, 0xDB, 0x95, 0xD9, 0x94, + 0xC9, 0x05, 0x41, 0xCC, 0x82, 0xCC, 0x80, 0xCA, + 0x05, 0x41, 0xCC, 0x82, 0xCC, 0x81, 0xCA, 0x05, + // Bytes 3880 - 38bf + 0x41, 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, 0x41, + 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x41, 0xCC, + 0x86, 0xCC, 0x80, 0xCA, 0x05, 0x41, 0xCC, 0x86, + 0xCC, 0x81, 0xCA, 0x05, 0x41, 0xCC, 0x86, 0xCC, + 0x83, 0xCA, 0x05, 0x41, 0xCC, 0x86, 0xCC, 0x89, + 0xCA, 0x05, 0x41, 0xCC, 0x87, 0xCC, 0x84, 0xCA, + 0x05, 0x41, 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, + 0x41, 0xCC, 0x8A, 0xCC, 0x81, 0xCA, 0x05, 0x41, + // Bytes 38c0 - 38ff + 0xCC, 0xA3, 0xCC, 0x82, 0xCA, 0x05, 0x41, 0xCC, + 0xA3, 0xCC, 0x86, 0xCA, 0x05, 0x43, 0xCC, 0xA7, + 0xCC, 0x81, 0xCA, 0x05, 0x45, 0xCC, 0x82, 0xCC, + 0x80, 0xCA, 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x81, + 0xCA, 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x83, 0xCA, + 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05, + 0x45, 0xCC, 0x84, 0xCC, 0x80, 0xCA, 0x05, 0x45, + 0xCC, 0x84, 0xCC, 0x81, 0xCA, 0x05, 0x45, 0xCC, + // Bytes 3900 - 393f + 0xA3, 0xCC, 0x82, 0xCA, 0x05, 0x45, 0xCC, 0xA7, + 0xCC, 0x86, 0xCA, 0x05, 0x49, 0xCC, 0x88, 0xCC, + 0x81, 0xCA, 0x05, 0x4C, 0xCC, 0xA3, 0xCC, 0x84, + 0xCA, 0x05, 0x4F, 0xCC, 0x82, 0xCC, 0x80, 0xCA, + 0x05, 0x4F, 0xCC, 0x82, 0xCC, 0x81, 0xCA, 0x05, + 0x4F, 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, 0x4F, + 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x4F, 0xCC, + 0x83, 0xCC, 0x81, 0xCA, 0x05, 0x4F, 0xCC, 0x83, + // Bytes 3940 - 397f + 0xCC, 0x84, 0xCA, 0x05, 0x4F, 0xCC, 0x83, 0xCC, + 0x88, 0xCA, 0x05, 0x4F, 0xCC, 0x84, 0xCC, 0x80, + 0xCA, 0x05, 0x4F, 0xCC, 0x84, 0xCC, 0x81, 0xCA, + 0x05, 0x4F, 0xCC, 0x87, 0xCC, 0x84, 0xCA, 0x05, + 0x4F, 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x4F, + 0xCC, 0x9B, 0xCC, 0x80, 0xCA, 0x05, 0x4F, 0xCC, + 0x9B, 0xCC, 0x81, 0xCA, 0x05, 0x4F, 0xCC, 0x9B, + 0xCC, 0x83, 0xCA, 0x05, 0x4F, 0xCC, 0x9B, 0xCC, + // Bytes 3980 - 39bf + 0x89, 0xCA, 0x05, 0x4F, 0xCC, 0x9B, 0xCC, 0xA3, + 0xB6, 0x05, 0x4F, 0xCC, 0xA3, 0xCC, 0x82, 0xCA, + 0x05, 0x4F, 0xCC, 0xA8, 0xCC, 0x84, 0xCA, 0x05, + 0x52, 0xCC, 0xA3, 0xCC, 0x84, 0xCA, 0x05, 0x53, + 0xCC, 0x81, 0xCC, 0x87, 0xCA, 0x05, 0x53, 0xCC, + 0x8C, 0xCC, 0x87, 0xCA, 0x05, 0x53, 0xCC, 0xA3, + 0xCC, 0x87, 0xCA, 0x05, 0x55, 0xCC, 0x83, 0xCC, + 0x81, 0xCA, 0x05, 0x55, 0xCC, 0x84, 0xCC, 0x88, + // Bytes 39c0 - 39ff + 0xCA, 0x05, 0x55, 0xCC, 0x88, 0xCC, 0x80, 0xCA, + 0x05, 0x55, 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x05, + 0x55, 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x55, + 0xCC, 0x88, 0xCC, 0x8C, 0xCA, 0x05, 0x55, 0xCC, + 0x9B, 0xCC, 0x80, 0xCA, 0x05, 0x55, 0xCC, 0x9B, + 0xCC, 0x81, 0xCA, 0x05, 0x55, 0xCC, 0x9B, 0xCC, + 0x83, 0xCA, 0x05, 0x55, 0xCC, 0x9B, 0xCC, 0x89, + 0xCA, 0x05, 0x55, 0xCC, 0x9B, 0xCC, 0xA3, 0xB6, + // Bytes 3a00 - 3a3f + 0x05, 0x61, 0xCC, 0x82, 0xCC, 0x80, 0xCA, 0x05, + 0x61, 0xCC, 0x82, 0xCC, 0x81, 0xCA, 0x05, 0x61, + 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, 0x61, 0xCC, + 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x61, 0xCC, 0x86, + 0xCC, 0x80, 0xCA, 0x05, 0x61, 0xCC, 0x86, 0xCC, + 0x81, 0xCA, 0x05, 0x61, 0xCC, 0x86, 0xCC, 0x83, + 0xCA, 0x05, 0x61, 0xCC, 0x86, 0xCC, 0x89, 0xCA, + 0x05, 0x61, 0xCC, 0x87, 0xCC, 0x84, 0xCA, 0x05, + // Bytes 3a40 - 3a7f + 0x61, 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x61, + 0xCC, 0x8A, 0xCC, 0x81, 0xCA, 0x05, 0x61, 0xCC, + 0xA3, 0xCC, 0x82, 0xCA, 0x05, 0x61, 0xCC, 0xA3, + 0xCC, 0x86, 0xCA, 0x05, 0x63, 0xCC, 0xA7, 0xCC, + 0x81, 0xCA, 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x80, + 0xCA, 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x81, 0xCA, + 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, + 0x65, 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x65, + // Bytes 3a80 - 3abf + 0xCC, 0x84, 0xCC, 0x80, 0xCA, 0x05, 0x65, 0xCC, + 0x84, 0xCC, 0x81, 0xCA, 0x05, 0x65, 0xCC, 0xA3, + 0xCC, 0x82, 0xCA, 0x05, 0x65, 0xCC, 0xA7, 0xCC, + 0x86, 0xCA, 0x05, 0x69, 0xCC, 0x88, 0xCC, 0x81, + 0xCA, 0x05, 0x6C, 0xCC, 0xA3, 0xCC, 0x84, 0xCA, + 0x05, 0x6F, 0xCC, 0x82, 0xCC, 0x80, 0xCA, 0x05, + 0x6F, 0xCC, 0x82, 0xCC, 0x81, 0xCA, 0x05, 0x6F, + 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, 0x6F, 0xCC, + // Bytes 3ac0 - 3aff + 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x6F, 0xCC, 0x83, + 0xCC, 0x81, 0xCA, 0x05, 0x6F, 0xCC, 0x83, 0xCC, + 0x84, 0xCA, 0x05, 0x6F, 0xCC, 0x83, 0xCC, 0x88, + 0xCA, 0x05, 0x6F, 0xCC, 0x84, 0xCC, 0x80, 0xCA, + 0x05, 0x6F, 0xCC, 0x84, 0xCC, 0x81, 0xCA, 0x05, + 0x6F, 0xCC, 0x87, 0xCC, 0x84, 0xCA, 0x05, 0x6F, + 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x6F, 0xCC, + 0x9B, 0xCC, 0x80, 0xCA, 0x05, 0x6F, 0xCC, 0x9B, + // Bytes 3b00 - 3b3f + 0xCC, 0x81, 0xCA, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, + 0x83, 0xCA, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, 0x89, + 0xCA, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, 0xA3, 0xB6, + 0x05, 0x6F, 0xCC, 0xA3, 0xCC, 0x82, 0xCA, 0x05, + 0x6F, 0xCC, 0xA8, 0xCC, 0x84, 0xCA, 0x05, 0x72, + 0xCC, 0xA3, 0xCC, 0x84, 0xCA, 0x05, 0x73, 0xCC, + 0x81, 0xCC, 0x87, 0xCA, 0x05, 0x73, 0xCC, 0x8C, + 0xCC, 0x87, 0xCA, 0x05, 0x73, 0xCC, 0xA3, 0xCC, + // Bytes 3b40 - 3b7f + 0x87, 0xCA, 0x05, 0x75, 0xCC, 0x83, 0xCC, 0x81, + 0xCA, 0x05, 0x75, 0xCC, 0x84, 0xCC, 0x88, 0xCA, + 0x05, 0x75, 0xCC, 0x88, 0xCC, 0x80, 0xCA, 0x05, + 0x75, 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x05, 0x75, + 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x75, 0xCC, + 0x88, 0xCC, 0x8C, 0xCA, 0x05, 0x75, 0xCC, 0x9B, + 0xCC, 0x80, 0xCA, 0x05, 0x75, 0xCC, 0x9B, 0xCC, + 0x81, 0xCA, 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0x83, + // Bytes 3b80 - 3bbf + 0xCA, 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0x89, 0xCA, + 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0xA3, 0xB6, 0x05, + 0xE1, 0xBE, 0xBF, 0xCC, 0x80, 0xCA, 0x05, 0xE1, + 0xBE, 0xBF, 0xCC, 0x81, 0xCA, 0x05, 0xE1, 0xBE, + 0xBF, 0xCD, 0x82, 0xCA, 0x05, 0xE1, 0xBF, 0xBE, + 0xCC, 0x80, 0xCA, 0x05, 0xE1, 0xBF, 0xBE, 0xCC, + 0x81, 0xCA, 0x05, 0xE1, 0xBF, 0xBE, 0xCD, 0x82, + 0xCA, 0x05, 0xE2, 0x86, 0x90, 0xCC, 0xB8, 0x05, + // Bytes 3bc0 - 3bff + 0x05, 0xE2, 0x86, 0x92, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x86, 0x94, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x87, 0x90, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x87, + 0x92, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x87, 0x94, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x88, 0x83, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x88, 0x88, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x88, 0x8B, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x88, 0xA3, 0xCC, 0xB8, 0x05, 0x05, + // Bytes 3c00 - 3c3f + 0xE2, 0x88, 0xA5, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x88, 0xBC, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, + 0x83, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x85, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x88, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x8D, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x89, 0xA1, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x89, 0xA4, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x89, 0xA5, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + // Bytes 3c40 - 3c7f + 0x89, 0xB2, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, + 0xB3, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xB6, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xB7, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xBA, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x89, 0xBB, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x89, 0xBC, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x89, 0xBD, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x8A, 0x82, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, + // Bytes 3c80 - 3cbf + 0x83, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x86, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x87, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x91, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x8A, 0x92, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x8A, 0xA2, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x8A, 0xA8, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x8A, 0xA9, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, + 0xAB, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB2, + // Bytes 3cc0 - 3cff + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB3, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB4, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x8A, 0xB5, 0xCC, 0xB8, 0x05, + 0x06, 0xCE, 0x91, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0x91, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0x95, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0x95, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0x95, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + // Bytes 3d00 - 3d3f + 0x06, 0xCE, 0x95, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0x97, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0x99, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0x99, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0x99, 0xCC, 0x93, 0xCD, 0x82, 0xCA, + 0x06, 0xCE, 0x99, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0x99, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + // Bytes 3d40 - 3d7f + 0x06, 0xCE, 0x99, 0xCC, 0x94, 0xCD, 0x82, 0xCA, + 0x06, 0xCE, 0x9F, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0x9F, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0x9F, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0x9F, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xA5, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xA5, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xA5, 0xCC, 0x94, 0xCD, 0x82, 0xCA, + // Bytes 3d80 - 3dbf + 0x06, 0xCE, 0xA9, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xA9, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB1, 0xCC, 0x80, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB1, 0xCC, 0x81, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB1, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB1, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB1, 0xCD, 0x82, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB5, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + // Bytes 3dc0 - 3dff + 0x06, 0xCE, 0xB5, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xB5, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xB5, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xB7, 0xCC, 0x80, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB7, 0xCC, 0x81, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB7, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB7, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB7, 0xCD, 0x82, 0xCD, 0x85, 0xDA, + // Bytes 3e00 - 3e3f + 0x06, 0xCE, 0xB9, 0xCC, 0x88, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x88, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x88, 0xCD, 0x82, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x93, 0xCD, 0x82, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + // Bytes 3e40 - 3e7f + 0x06, 0xCE, 0xB9, 0xCC, 0x94, 0xCD, 0x82, 0xCA, + 0x06, 0xCE, 0xBF, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xBF, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xBF, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xBF, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x88, 0xCC, 0x80, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x88, 0xCC, 0x81, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x88, 0xCD, 0x82, 0xCA, + // Bytes 3e80 - 3ebf + 0x06, 0xCF, 0x85, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x93, 0xCD, 0x82, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x94, 0xCD, 0x82, 0xCA, + 0x06, 0xCF, 0x89, 0xCC, 0x80, 0xCD, 0x85, 0xDA, + 0x06, 0xCF, 0x89, 0xCC, 0x81, 0xCD, 0x85, 0xDA, + // Bytes 3ec0 - 3eff + 0x06, 0xCF, 0x89, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCF, 0x89, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCF, 0x89, 0xCD, 0x82, 0xCD, 0x85, 0xDA, + 0x06, 0xE0, 0xA4, 0xA8, 0xE0, 0xA4, 0xBC, 0x09, + 0x06, 0xE0, 0xA4, 0xB0, 0xE0, 0xA4, 0xBC, 0x09, + 0x06, 0xE0, 0xA4, 0xB3, 0xE0, 0xA4, 0xBC, 0x09, + 0x06, 0xE0, 0xB1, 0x86, 0xE0, 0xB1, 0x96, 0x85, + 0x06, 0xE0, 0xB7, 0x99, 0xE0, 0xB7, 0x8A, 0x11, + // Bytes 3f00 - 3f3f + 0x06, 0xE3, 0x81, 0x86, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x8B, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x8D, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x8F, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x91, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x93, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x95, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x97, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 3f40 - 3f7f + 0x06, 0xE3, 0x81, 0x99, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x9B, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x9D, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x9F, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xA1, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xA4, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xA6, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xA8, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 3f80 - 3fbf + 0x06, 0xE3, 0x81, 0xAF, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xAF, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x81, 0xB2, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xB2, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x81, 0xB5, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xB5, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x81, 0xB8, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xB8, 0xE3, 0x82, 0x9A, 0x0D, + // Bytes 3fc0 - 3fff + 0x06, 0xE3, 0x81, 0xBB, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xBB, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x82, 0x9D, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xA6, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xAD, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xB1, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 4000 - 403f + 0x06, 0xE3, 0x82, 0xB3, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xB5, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xB7, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xB9, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xBB, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xBD, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xBF, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x81, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 4040 - 407f + 0x06, 0xE3, 0x83, 0x84, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x86, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 4080 - 40bf + 0x06, 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x83, 0xAF, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0xB0, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0xB1, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 40c0 - 40ff + 0x06, 0xE3, 0x83, 0xB2, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0xBD, 0xE3, 0x82, 0x99, 0x0D, + 0x08, 0xCE, 0x91, 0xCC, 0x93, 0xCC, 0x80, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0x91, 0xCC, 0x93, 0xCC, + 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x91, 0xCC, + 0x93, 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0x91, 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0x91, 0xCC, 0x94, 0xCC, 0x81, 0xCD, + // Bytes 4100 - 413f + 0x85, 0xDB, 0x08, 0xCE, 0x91, 0xCC, 0x94, 0xCD, + 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x97, 0xCC, + 0x93, 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0x97, 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x82, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0x97, 0xCC, 0x94, 0xCC, + 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x97, 0xCC, + 0x94, 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + // Bytes 4140 - 417f + 0x97, 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0xA9, 0xCC, 0x93, 0xCC, 0x80, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0xA9, 0xCC, 0x93, 0xCC, + 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xA9, 0xCC, + 0x93, 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0xA9, 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0xA9, 0xCC, 0x94, 0xCC, 0x81, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0xA9, 0xCC, 0x94, 0xCD, + // Bytes 4180 - 41bf + 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB1, 0xCC, + 0x93, 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0xB1, 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0xB1, 0xCC, 0x93, 0xCD, 0x82, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0xB1, 0xCC, 0x94, 0xCC, + 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB1, 0xCC, + 0x94, 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0xB1, 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDB, + // Bytes 41c0 - 41ff + 0x08, 0xCE, 0xB7, 0xCC, 0x93, 0xCC, 0x80, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0xB7, 0xCC, 0x93, 0xCC, + 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB7, 0xCC, + 0x93, 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0xB7, 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0xB7, 0xCC, 0x94, 0xCC, 0x81, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0xB7, 0xCC, 0x94, 0xCD, + 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCF, 0x89, 0xCC, + // Bytes 4200 - 423f + 0x93, 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCF, + 0x89, 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDB, + 0x08, 0xCF, 0x89, 0xCC, 0x93, 0xCD, 0x82, 0xCD, + 0x85, 0xDB, 0x08, 0xCF, 0x89, 0xCC, 0x94, 0xCC, + 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCF, 0x89, 0xCC, + 0x94, 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCF, + 0x89, 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDB, + 0x08, 0xF0, 0x91, 0x82, 0x99, 0xF0, 0x91, 0x82, + // Bytes 4240 - 427f + 0xBA, 0x09, 0x08, 0xF0, 0x91, 0x82, 0x9B, 0xF0, + 0x91, 0x82, 0xBA, 0x09, 0x08, 0xF0, 0x91, 0x82, + 0xA5, 0xF0, 0x91, 0x82, 0xBA, 0x09, 0x42, 0xC2, + 0xB4, 0x01, 0x43, 0x20, 0xCC, 0x81, 0xC9, 0x43, + 0x20, 0xCC, 0x83, 0xC9, 0x43, 0x20, 0xCC, 0x84, + 0xC9, 0x43, 0x20, 0xCC, 0x85, 0xC9, 0x43, 0x20, + 0xCC, 0x86, 0xC9, 0x43, 0x20, 0xCC, 0x87, 0xC9, + 0x43, 0x20, 0xCC, 0x88, 0xC9, 0x43, 0x20, 0xCC, + // Bytes 4280 - 42bf + 0x8A, 0xC9, 0x43, 0x20, 0xCC, 0x8B, 0xC9, 0x43, + 0x20, 0xCC, 0x93, 0xC9, 0x43, 0x20, 0xCC, 0x94, + 0xC9, 0x43, 0x20, 0xCC, 0xA7, 0xA5, 0x43, 0x20, + 0xCC, 0xA8, 0xA5, 0x43, 0x20, 0xCC, 0xB3, 0xB5, + 0x43, 0x20, 0xCD, 0x82, 0xC9, 0x43, 0x20, 0xCD, + 0x85, 0xD9, 0x43, 0x20, 0xD9, 0x8B, 0x59, 0x43, + 0x20, 0xD9, 0x8C, 0x5D, 0x43, 0x20, 0xD9, 0x8D, + 0x61, 0x43, 0x20, 0xD9, 0x8E, 0x65, 0x43, 0x20, + // Bytes 42c0 - 42ff + 0xD9, 0x8F, 0x69, 0x43, 0x20, 0xD9, 0x90, 0x6D, + 0x43, 0x20, 0xD9, 0x91, 0x71, 0x43, 0x20, 0xD9, + 0x92, 0x75, 0x43, 0x41, 0xCC, 0x8A, 0xC9, 0x43, + 0x73, 0xCC, 0x87, 0xC9, 0x44, 0x20, 0xE3, 0x82, + 0x99, 0x0D, 0x44, 0x20, 0xE3, 0x82, 0x9A, 0x0D, + 0x44, 0xC2, 0xA8, 0xCC, 0x81, 0xCA, 0x44, 0xCE, + 0x91, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0x95, 0xCC, + 0x81, 0xC9, 0x44, 0xCE, 0x97, 0xCC, 0x81, 0xC9, + // Bytes 4300 - 433f + 0x44, 0xCE, 0x99, 0xCC, 0x81, 0xC9, 0x44, 0xCE, + 0x9F, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0xA5, 0xCC, + 0x81, 0xC9, 0x44, 0xCE, 0xA5, 0xCC, 0x88, 0xC9, + 0x44, 0xCE, 0xA9, 0xCC, 0x81, 0xC9, 0x44, 0xCE, + 0xB1, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0xB5, 0xCC, + 0x81, 0xC9, 0x44, 0xCE, 0xB7, 0xCC, 0x81, 0xC9, + 0x44, 0xCE, 0xB9, 0xCC, 0x81, 0xC9, 0x44, 0xCE, + 0xBF, 0xCC, 0x81, 0xC9, 0x44, 0xCF, 0x85, 0xCC, + // Bytes 4340 - 437f + 0x81, 0xC9, 0x44, 0xCF, 0x89, 0xCC, 0x81, 0xC9, + 0x44, 0xD7, 0x90, 0xD6, 0xB7, 0x31, 0x44, 0xD7, + 0x90, 0xD6, 0xB8, 0x35, 0x44, 0xD7, 0x90, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0x91, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0x91, 0xD6, 0xBF, 0x49, 0x44, 0xD7, + 0x92, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x93, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0x94, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0x95, 0xD6, 0xB9, 0x39, 0x44, 0xD7, + // Bytes 4380 - 43bf + 0x95, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x96, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0x98, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0x99, 0xD6, 0xB4, 0x25, 0x44, 0xD7, + 0x99, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x9A, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0x9B, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0x9B, 0xD6, 0xBF, 0x49, 0x44, 0xD7, + 0x9C, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x9E, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0xA0, 0xD6, 0xBC, 0x41, + // Bytes 43c0 - 43ff + 0x44, 0xD7, 0xA1, 0xD6, 0xBC, 0x41, 0x44, 0xD7, + 0xA3, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xA4, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0xA4, 0xD6, 0xBF, 0x49, + 0x44, 0xD7, 0xA6, 0xD6, 0xBC, 0x41, 0x44, 0xD7, + 0xA7, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xA8, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0xA9, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0xA9, 0xD7, 0x81, 0x4D, 0x44, 0xD7, + 0xA9, 0xD7, 0x82, 0x51, 0x44, 0xD7, 0xAA, 0xD6, + // Bytes 4400 - 443f + 0xBC, 0x41, 0x44, 0xD7, 0xB2, 0xD6, 0xB7, 0x31, + 0x44, 0xD8, 0xA7, 0xD9, 0x8B, 0x59, 0x44, 0xD8, + 0xA7, 0xD9, 0x93, 0xC9, 0x44, 0xD8, 0xA7, 0xD9, + 0x94, 0xC9, 0x44, 0xD8, 0xA7, 0xD9, 0x95, 0xB5, + 0x44, 0xD8, 0xB0, 0xD9, 0xB0, 0x79, 0x44, 0xD8, + 0xB1, 0xD9, 0xB0, 0x79, 0x44, 0xD9, 0x80, 0xD9, + 0x8B, 0x59, 0x44, 0xD9, 0x80, 0xD9, 0x8E, 0x65, + 0x44, 0xD9, 0x80, 0xD9, 0x8F, 0x69, 0x44, 0xD9, + // Bytes 4440 - 447f + 0x80, 0xD9, 0x90, 0x6D, 0x44, 0xD9, 0x80, 0xD9, + 0x91, 0x71, 0x44, 0xD9, 0x80, 0xD9, 0x92, 0x75, + 0x44, 0xD9, 0x87, 0xD9, 0xB0, 0x79, 0x44, 0xD9, + 0x88, 0xD9, 0x94, 0xC9, 0x44, 0xD9, 0x89, 0xD9, + 0xB0, 0x79, 0x44, 0xD9, 0x8A, 0xD9, 0x94, 0xC9, + 0x44, 0xDB, 0x92, 0xD9, 0x94, 0xC9, 0x44, 0xDB, + 0x95, 0xD9, 0x94, 0xC9, 0x45, 0x20, 0xCC, 0x88, + 0xCC, 0x80, 0xCA, 0x45, 0x20, 0xCC, 0x88, 0xCC, + // Bytes 4480 - 44bf + 0x81, 0xCA, 0x45, 0x20, 0xCC, 0x88, 0xCD, 0x82, + 0xCA, 0x45, 0x20, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x45, 0x20, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x45, + 0x20, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x45, 0x20, + 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x45, 0x20, 0xCC, + 0x94, 0xCC, 0x81, 0xCA, 0x45, 0x20, 0xCC, 0x94, + 0xCD, 0x82, 0xCA, 0x45, 0x20, 0xD9, 0x8C, 0xD9, + 0x91, 0x72, 0x45, 0x20, 0xD9, 0x8D, 0xD9, 0x91, + // Bytes 44c0 - 44ff + 0x72, 0x45, 0x20, 0xD9, 0x8E, 0xD9, 0x91, 0x72, + 0x45, 0x20, 0xD9, 0x8F, 0xD9, 0x91, 0x72, 0x45, + 0x20, 0xD9, 0x90, 0xD9, 0x91, 0x72, 0x45, 0x20, + 0xD9, 0x91, 0xD9, 0xB0, 0x7A, 0x45, 0xE2, 0xAB, + 0x9D, 0xCC, 0xB8, 0x05, 0x46, 0xCE, 0xB9, 0xCC, + 0x88, 0xCC, 0x81, 0xCA, 0x46, 0xCF, 0x85, 0xCC, + 0x88, 0xCC, 0x81, 0xCA, 0x46, 0xD7, 0xA9, 0xD6, + 0xBC, 0xD7, 0x81, 0x4E, 0x46, 0xD7, 0xA9, 0xD6, + // Bytes 4500 - 453f + 0xBC, 0xD7, 0x82, 0x52, 0x46, 0xD9, 0x80, 0xD9, + 0x8E, 0xD9, 0x91, 0x72, 0x46, 0xD9, 0x80, 0xD9, + 0x8F, 0xD9, 0x91, 0x72, 0x46, 0xD9, 0x80, 0xD9, + 0x90, 0xD9, 0x91, 0x72, 0x46, 0xE0, 0xA4, 0x95, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0x96, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0x97, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0x9C, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xA1, + // Bytes 4540 - 457f + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xA2, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xAB, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xAF, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA6, 0xA1, + 0xE0, 0xA6, 0xBC, 0x09, 0x46, 0xE0, 0xA6, 0xA2, + 0xE0, 0xA6, 0xBC, 0x09, 0x46, 0xE0, 0xA6, 0xAF, + 0xE0, 0xA6, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0x96, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0x97, + // Bytes 4580 - 45bf + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0x9C, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0xAB, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0xB2, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0xB8, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xAC, 0xA1, + 0xE0, 0xAC, 0xBC, 0x09, 0x46, 0xE0, 0xAC, 0xA2, + 0xE0, 0xAC, 0xBC, 0x09, 0x46, 0xE0, 0xBE, 0xB2, + 0xE0, 0xBE, 0x80, 0x9D, 0x46, 0xE0, 0xBE, 0xB3, + // Bytes 45c0 - 45ff + 0xE0, 0xBE, 0x80, 0x9D, 0x46, 0xE3, 0x83, 0x86, + 0xE3, 0x82, 0x99, 0x0D, 0x48, 0xF0, 0x9D, 0x85, + 0x97, 0xF0, 0x9D, 0x85, 0xA5, 0xAD, 0x48, 0xF0, + 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xAD, + 0x48, 0xF0, 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85, + 0xA5, 0xAD, 0x48, 0xF0, 0x9D, 0x86, 0xBA, 0xF0, + 0x9D, 0x85, 0xA5, 0xAD, 0x49, 0xE0, 0xBE, 0xB2, + 0xE0, 0xBD, 0xB1, 0xE0, 0xBE, 0x80, 0x9E, 0x49, + // Bytes 4600 - 463f + 0xE0, 0xBE, 0xB3, 0xE0, 0xBD, 0xB1, 0xE0, 0xBE, + 0x80, 0x9E, 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, + 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAE, 0xAE, + 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, + 0xA5, 0xF0, 0x9D, 0x85, 0xAF, 0xAE, 0x4C, 0xF0, + 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, + 0x9D, 0x85, 0xB0, 0xAE, 0x4C, 0xF0, 0x9D, 0x85, + 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, + // Bytes 4640 - 467f + 0xB1, 0xAE, 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, + 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xB2, 0xAE, + 0x4C, 0xF0, 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85, + 0xA5, 0xF0, 0x9D, 0x85, 0xAE, 0xAE, 0x4C, 0xF0, + 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, + 0x9D, 0x85, 0xAF, 0xAE, 0x4C, 0xF0, 0x9D, 0x86, + 0xBA, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, + 0xAE, 0xAE, 0x4C, 0xF0, 0x9D, 0x86, 0xBA, 0xF0, + // Bytes 4680 - 46bf + 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAF, 0xAE, + 0x83, 0x41, 0xCC, 0x82, 0xC9, 0x83, 0x41, 0xCC, + 0x86, 0xC9, 0x83, 0x41, 0xCC, 0x87, 0xC9, 0x83, + 0x41, 0xCC, 0x88, 0xC9, 0x83, 0x41, 0xCC, 0x8A, + 0xC9, 0x83, 0x41, 0xCC, 0xA3, 0xB5, 0x83, 0x43, + 0xCC, 0xA7, 0xA5, 0x83, 0x45, 0xCC, 0x82, 0xC9, + 0x83, 0x45, 0xCC, 0x84, 0xC9, 0x83, 0x45, 0xCC, + 0xA3, 0xB5, 0x83, 0x45, 0xCC, 0xA7, 0xA5, 0x83, + // Bytes 46c0 - 46ff + 0x49, 0xCC, 0x88, 0xC9, 0x83, 0x4C, 0xCC, 0xA3, + 0xB5, 0x83, 0x4F, 0xCC, 0x82, 0xC9, 0x83, 0x4F, + 0xCC, 0x83, 0xC9, 0x83, 0x4F, 0xCC, 0x84, 0xC9, + 0x83, 0x4F, 0xCC, 0x87, 0xC9, 0x83, 0x4F, 0xCC, + 0x88, 0xC9, 0x83, 0x4F, 0xCC, 0x9B, 0xAD, 0x83, + 0x4F, 0xCC, 0xA3, 0xB5, 0x83, 0x4F, 0xCC, 0xA8, + 0xA5, 0x83, 0x52, 0xCC, 0xA3, 0xB5, 0x83, 0x53, + 0xCC, 0x81, 0xC9, 0x83, 0x53, 0xCC, 0x8C, 0xC9, + // Bytes 4700 - 473f + 0x83, 0x53, 0xCC, 0xA3, 0xB5, 0x83, 0x55, 0xCC, + 0x83, 0xC9, 0x83, 0x55, 0xCC, 0x84, 0xC9, 0x83, + 0x55, 0xCC, 0x88, 0xC9, 0x83, 0x55, 0xCC, 0x9B, + 0xAD, 0x83, 0x61, 0xCC, 0x82, 0xC9, 0x83, 0x61, + 0xCC, 0x86, 0xC9, 0x83, 0x61, 0xCC, 0x87, 0xC9, + 0x83, 0x61, 0xCC, 0x88, 0xC9, 0x83, 0x61, 0xCC, + 0x8A, 0xC9, 0x83, 0x61, 0xCC, 0xA3, 0xB5, 0x83, + 0x63, 0xCC, 0xA7, 0xA5, 0x83, 0x65, 0xCC, 0x82, + // Bytes 4740 - 477f + 0xC9, 0x83, 0x65, 0xCC, 0x84, 0xC9, 0x83, 0x65, + 0xCC, 0xA3, 0xB5, 0x83, 0x65, 0xCC, 0xA7, 0xA5, + 0x83, 0x69, 0xCC, 0x88, 0xC9, 0x83, 0x6C, 0xCC, + 0xA3, 0xB5, 0x83, 0x6F, 0xCC, 0x82, 0xC9, 0x83, + 0x6F, 0xCC, 0x83, 0xC9, 0x83, 0x6F, 0xCC, 0x84, + 0xC9, 0x83, 0x6F, 0xCC, 0x87, 0xC9, 0x83, 0x6F, + 0xCC, 0x88, 0xC9, 0x83, 0x6F, 0xCC, 0x9B, 0xAD, + 0x83, 0x6F, 0xCC, 0xA3, 0xB5, 0x83, 0x6F, 0xCC, + // Bytes 4780 - 47bf + 0xA8, 0xA5, 0x83, 0x72, 0xCC, 0xA3, 0xB5, 0x83, + 0x73, 0xCC, 0x81, 0xC9, 0x83, 0x73, 0xCC, 0x8C, + 0xC9, 0x83, 0x73, 0xCC, 0xA3, 0xB5, 0x83, 0x75, + 0xCC, 0x83, 0xC9, 0x83, 0x75, 0xCC, 0x84, 0xC9, + 0x83, 0x75, 0xCC, 0x88, 0xC9, 0x83, 0x75, 0xCC, + 0x9B, 0xAD, 0x84, 0xCE, 0x91, 0xCC, 0x93, 0xC9, + 0x84, 0xCE, 0x91, 0xCC, 0x94, 0xC9, 0x84, 0xCE, + 0x95, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0x95, 0xCC, + // Bytes 47c0 - 47ff + 0x94, 0xC9, 0x84, 0xCE, 0x97, 0xCC, 0x93, 0xC9, + 0x84, 0xCE, 0x97, 0xCC, 0x94, 0xC9, 0x84, 0xCE, + 0x99, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0x99, 0xCC, + 0x94, 0xC9, 0x84, 0xCE, 0x9F, 0xCC, 0x93, 0xC9, + 0x84, 0xCE, 0x9F, 0xCC, 0x94, 0xC9, 0x84, 0xCE, + 0xA5, 0xCC, 0x94, 0xC9, 0x84, 0xCE, 0xA9, 0xCC, + 0x93, 0xC9, 0x84, 0xCE, 0xA9, 0xCC, 0x94, 0xC9, + 0x84, 0xCE, 0xB1, 0xCC, 0x80, 0xC9, 0x84, 0xCE, + // Bytes 4800 - 483f + 0xB1, 0xCC, 0x81, 0xC9, 0x84, 0xCE, 0xB1, 0xCC, + 0x93, 0xC9, 0x84, 0xCE, 0xB1, 0xCC, 0x94, 0xC9, + 0x84, 0xCE, 0xB1, 0xCD, 0x82, 0xC9, 0x84, 0xCE, + 0xB5, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0xB5, 0xCC, + 0x94, 0xC9, 0x84, 0xCE, 0xB7, 0xCC, 0x80, 0xC9, + 0x84, 0xCE, 0xB7, 0xCC, 0x81, 0xC9, 0x84, 0xCE, + 0xB7, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0xB7, 0xCC, + 0x94, 0xC9, 0x84, 0xCE, 0xB7, 0xCD, 0x82, 0xC9, + // Bytes 4840 - 487f + 0x84, 0xCE, 0xB9, 0xCC, 0x88, 0xC9, 0x84, 0xCE, + 0xB9, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0xB9, 0xCC, + 0x94, 0xC9, 0x84, 0xCE, 0xBF, 0xCC, 0x93, 0xC9, + 0x84, 0xCE, 0xBF, 0xCC, 0x94, 0xC9, 0x84, 0xCF, + 0x85, 0xCC, 0x88, 0xC9, 0x84, 0xCF, 0x85, 0xCC, + 0x93, 0xC9, 0x84, 0xCF, 0x85, 0xCC, 0x94, 0xC9, + 0x84, 0xCF, 0x89, 0xCC, 0x80, 0xC9, 0x84, 0xCF, + 0x89, 0xCC, 0x81, 0xC9, 0x84, 0xCF, 0x89, 0xCC, + // Bytes 4880 - 48bf + 0x93, 0xC9, 0x84, 0xCF, 0x89, 0xCC, 0x94, 0xC9, + 0x84, 0xCF, 0x89, 0xCD, 0x82, 0xC9, 0x86, 0xCE, + 0x91, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + // Bytes 48c0 - 48ff + 0x97, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + // Bytes 4900 - 493f + 0xA9, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + // Bytes 4940 - 497f + 0xB1, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCF, + // Bytes 4980 - 49bf + 0x89, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x42, 0xCC, + 0x80, 0xC9, 0x32, 0x42, 0xCC, 0x81, 0xC9, 0x32, + 0x42, 0xCC, 0x93, 0xC9, 0x32, 0x43, 0xE1, 0x85, + // Bytes 49c0 - 49ff + 0xA1, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA2, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xA3, 0x01, 0x00, 0x43, + 0xE1, 0x85, 0xA4, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xA5, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA6, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xA7, 0x01, 0x00, 0x43, + 0xE1, 0x85, 0xA8, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xA9, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAA, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xAB, 0x01, 0x00, 0x43, + // Bytes 4a00 - 4a3f + 0xE1, 0x85, 0xAC, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xAD, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAE, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xAF, 0x01, 0x00, 0x43, + 0xE1, 0x85, 0xB0, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xB1, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xB2, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xB3, 0x01, 0x00, 0x43, + 0xE1, 0x85, 0xB4, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xB5, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xAA, 0x01, + // Bytes 4a40 - 4a7f + 0x00, 0x43, 0xE1, 0x86, 0xAC, 0x01, 0x00, 0x43, + 0xE1, 0x86, 0xAD, 0x01, 0x00, 0x43, 0xE1, 0x86, + 0xB0, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB1, 0x01, + 0x00, 0x43, 0xE1, 0x86, 0xB2, 0x01, 0x00, 0x43, + 0xE1, 0x86, 0xB3, 0x01, 0x00, 0x43, 0xE1, 0x86, + 0xB4, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB5, 0x01, + 0x00, 0x44, 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x32, + 0x43, 0xE3, 0x82, 0x99, 0x0D, 0x03, 0x43, 0xE3, + // Bytes 4a80 - 4abf + 0x82, 0x9A, 0x0D, 0x03, 0x46, 0xE0, 0xBD, 0xB1, + 0xE0, 0xBD, 0xB2, 0x9E, 0x26, 0x46, 0xE0, 0xBD, + 0xB1, 0xE0, 0xBD, 0xB4, 0xA2, 0x26, 0x46, 0xE0, + 0xBD, 0xB1, 0xE0, 0xBE, 0x80, 0x9E, 0x26, 0x00, + 0x01, +} + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfcTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfcTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfcValues[c0] + } + i := nfcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfcTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfcTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfcValues[c0] + } + i := nfcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// nfcTrie. Total size: 10332 bytes (10.09 KiB). Checksum: 51cc525b297fc970. +type nfcTrie struct{} + +func newNfcTrie(i int) *nfcTrie { + return &nfcTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *nfcTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 44: + return uint16(nfcValues[n<<6+uint32(b)]) + default: + n -= 44 + return uint16(nfcSparse.lookup(n, b)) + } +} + +// nfcValues: 46 blocks, 2944 entries, 5888 bytes +// The third block is the zero block. +var nfcValues = [2944]uint16{ + // Block 0x0, offset 0x0 + 0x3c: 0xa000, 0x3d: 0xa000, 0x3e: 0xa000, + // Block 0x1, offset 0x40 + 0x41: 0xa000, 0x42: 0xa000, 0x43: 0xa000, 0x44: 0xa000, 0x45: 0xa000, + 0x46: 0xa000, 0x47: 0xa000, 0x48: 0xa000, 0x49: 0xa000, 0x4a: 0xa000, 0x4b: 0xa000, + 0x4c: 0xa000, 0x4d: 0xa000, 0x4e: 0xa000, 0x4f: 0xa000, 0x50: 0xa000, + 0x52: 0xa000, 0x53: 0xa000, 0x54: 0xa000, 0x55: 0xa000, 0x56: 0xa000, 0x57: 0xa000, + 0x58: 0xa000, 0x59: 0xa000, 0x5a: 0xa000, + 0x61: 0xa000, 0x62: 0xa000, 0x63: 0xa000, + 0x64: 0xa000, 0x65: 0xa000, 0x66: 0xa000, 0x67: 0xa000, 0x68: 0xa000, 0x69: 0xa000, + 0x6a: 0xa000, 0x6b: 0xa000, 0x6c: 0xa000, 0x6d: 0xa000, 0x6e: 0xa000, 0x6f: 0xa000, + 0x70: 0xa000, 0x72: 0xa000, 0x73: 0xa000, 0x74: 0xa000, 0x75: 0xa000, + 0x76: 0xa000, 0x77: 0xa000, 0x78: 0xa000, 0x79: 0xa000, 0x7a: 0xa000, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x2f6f, 0xc1: 0x2f74, 0xc2: 0x4688, 0xc3: 0x2f79, 0xc4: 0x4697, 0xc5: 0x469c, + 0xc6: 0xa000, 0xc7: 0x46a6, 0xc8: 0x2fe2, 0xc9: 0x2fe7, 0xca: 0x46ab, 0xcb: 0x2ffb, + 0xcc: 0x306e, 0xcd: 0x3073, 0xce: 0x3078, 0xcf: 0x46bf, 0xd1: 0x3104, + 0xd2: 0x3127, 0xd3: 0x312c, 0xd4: 0x46c9, 0xd5: 0x46ce, 0xd6: 0x46dd, + 0xd8: 0xa000, 0xd9: 0x31b3, 0xda: 0x31b8, 0xdb: 0x31bd, 0xdc: 0x470f, 0xdd: 0x3235, + 0xe0: 0x327b, 0xe1: 0x3280, 0xe2: 0x4719, 0xe3: 0x3285, + 0xe4: 0x4728, 0xe5: 0x472d, 0xe6: 0xa000, 0xe7: 0x4737, 0xe8: 0x32ee, 0xe9: 0x32f3, + 0xea: 0x473c, 0xeb: 0x3307, 0xec: 0x337f, 0xed: 0x3384, 0xee: 0x3389, 0xef: 0x4750, + 0xf1: 0x3415, 0xf2: 0x3438, 0xf3: 0x343d, 0xf4: 0x475a, 0xf5: 0x475f, + 0xf6: 0x476e, 0xf8: 0xa000, 0xf9: 0x34c9, 0xfa: 0x34ce, 0xfb: 0x34d3, + 0xfc: 0x47a0, 0xfd: 0x3550, 0xff: 0x3569, + // Block 0x4, offset 0x100 + 0x100: 0x2f7e, 0x101: 0x328a, 0x102: 0x468d, 0x103: 0x471e, 0x104: 0x2f9c, 0x105: 0x32a8, + 0x106: 0x2fb0, 0x107: 0x32bc, 0x108: 0x2fb5, 0x109: 0x32c1, 0x10a: 0x2fba, 0x10b: 0x32c6, + 0x10c: 0x2fbf, 0x10d: 0x32cb, 0x10e: 0x2fc9, 0x10f: 0x32d5, + 0x112: 0x46b0, 0x113: 0x4741, 0x114: 0x2ff1, 0x115: 0x32fd, 0x116: 0x2ff6, 0x117: 0x3302, + 0x118: 0x3014, 0x119: 0x3320, 0x11a: 0x3005, 0x11b: 0x3311, 0x11c: 0x302d, 0x11d: 0x3339, + 0x11e: 0x3037, 0x11f: 0x3343, 0x120: 0x303c, 0x121: 0x3348, 0x122: 0x3046, 0x123: 0x3352, + 0x124: 0x304b, 0x125: 0x3357, 0x128: 0x307d, 0x129: 0x338e, + 0x12a: 0x3082, 0x12b: 0x3393, 0x12c: 0x3087, 0x12d: 0x3398, 0x12e: 0x30aa, 0x12f: 0x33b6, + 0x130: 0x308c, 0x134: 0x30b4, 0x135: 0x33c0, + 0x136: 0x30c8, 0x137: 0x33d9, 0x139: 0x30d2, 0x13a: 0x33e3, 0x13b: 0x30dc, + 0x13c: 0x33ed, 0x13d: 0x30d7, 0x13e: 0x33e8, + // Block 0x5, offset 0x140 + 0x143: 0x30ff, 0x144: 0x3410, 0x145: 0x3118, + 0x146: 0x3429, 0x147: 0x310e, 0x148: 0x341f, + 0x14c: 0x46d3, 0x14d: 0x4764, 0x14e: 0x3131, 0x14f: 0x3442, 0x150: 0x313b, 0x151: 0x344c, + 0x154: 0x3159, 0x155: 0x346a, 0x156: 0x3172, 0x157: 0x3483, + 0x158: 0x3163, 0x159: 0x3474, 0x15a: 0x46f6, 0x15b: 0x4787, 0x15c: 0x317c, 0x15d: 0x348d, + 0x15e: 0x318b, 0x15f: 0x349c, 0x160: 0x46fb, 0x161: 0x478c, 0x162: 0x31a4, 0x163: 0x34ba, + 0x164: 0x3195, 0x165: 0x34ab, 0x168: 0x4705, 0x169: 0x4796, + 0x16a: 0x470a, 0x16b: 0x479b, 0x16c: 0x31c2, 0x16d: 0x34d8, 0x16e: 0x31cc, 0x16f: 0x34e2, + 0x170: 0x31d1, 0x171: 0x34e7, 0x172: 0x31ef, 0x173: 0x3505, 0x174: 0x3212, 0x175: 0x3528, + 0x176: 0x323a, 0x177: 0x3555, 0x178: 0x324e, 0x179: 0x325d, 0x17a: 0x357d, 0x17b: 0x3267, + 0x17c: 0x3587, 0x17d: 0x326c, 0x17e: 0x358c, 0x17f: 0xa000, + // Block 0x6, offset 0x180 + 0x184: 0x8100, 0x185: 0x8100, + 0x186: 0x8100, + 0x18d: 0x2f88, 0x18e: 0x3294, 0x18f: 0x3096, 0x190: 0x33a2, 0x191: 0x3140, + 0x192: 0x3451, 0x193: 0x31d6, 0x194: 0x34ec, 0x195: 0x39cf, 0x196: 0x3b5e, 0x197: 0x39c8, + 0x198: 0x3b57, 0x199: 0x39d6, 0x19a: 0x3b65, 0x19b: 0x39c1, 0x19c: 0x3b50, + 0x19e: 0x38b0, 0x19f: 0x3a3f, 0x1a0: 0x38a9, 0x1a1: 0x3a38, 0x1a2: 0x35b3, 0x1a3: 0x35c5, + 0x1a6: 0x3041, 0x1a7: 0x334d, 0x1a8: 0x30be, 0x1a9: 0x33cf, + 0x1aa: 0x46ec, 0x1ab: 0x477d, 0x1ac: 0x3990, 0x1ad: 0x3b1f, 0x1ae: 0x35d7, 0x1af: 0x35dd, + 0x1b0: 0x33c5, 0x1b4: 0x3028, 0x1b5: 0x3334, + 0x1b8: 0x30fa, 0x1b9: 0x340b, 0x1ba: 0x38b7, 0x1bb: 0x3a46, + 0x1bc: 0x35ad, 0x1bd: 0x35bf, 0x1be: 0x35b9, 0x1bf: 0x35cb, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x2f8d, 0x1c1: 0x3299, 0x1c2: 0x2f92, 0x1c3: 0x329e, 0x1c4: 0x300a, 0x1c5: 0x3316, + 0x1c6: 0x300f, 0x1c7: 0x331b, 0x1c8: 0x309b, 0x1c9: 0x33a7, 0x1ca: 0x30a0, 0x1cb: 0x33ac, + 0x1cc: 0x3145, 0x1cd: 0x3456, 0x1ce: 0x314a, 0x1cf: 0x345b, 0x1d0: 0x3168, 0x1d1: 0x3479, + 0x1d2: 0x316d, 0x1d3: 0x347e, 0x1d4: 0x31db, 0x1d5: 0x34f1, 0x1d6: 0x31e0, 0x1d7: 0x34f6, + 0x1d8: 0x3186, 0x1d9: 0x3497, 0x1da: 0x319f, 0x1db: 0x34b5, + 0x1de: 0x305a, 0x1df: 0x3366, + 0x1e6: 0x4692, 0x1e7: 0x4723, 0x1e8: 0x46ba, 0x1e9: 0x474b, + 0x1ea: 0x395f, 0x1eb: 0x3aee, 0x1ec: 0x393c, 0x1ed: 0x3acb, 0x1ee: 0x46d8, 0x1ef: 0x4769, + 0x1f0: 0x3958, 0x1f1: 0x3ae7, 0x1f2: 0x3244, 0x1f3: 0x355f, + // Block 0x8, offset 0x200 + 0x200: 0x9932, 0x201: 0x9932, 0x202: 0x9932, 0x203: 0x9932, 0x204: 0x9932, 0x205: 0x8132, + 0x206: 0x9932, 0x207: 0x9932, 0x208: 0x9932, 0x209: 0x9932, 0x20a: 0x9932, 0x20b: 0x9932, + 0x20c: 0x9932, 0x20d: 0x8132, 0x20e: 0x8132, 0x20f: 0x9932, 0x210: 0x8132, 0x211: 0x9932, + 0x212: 0x8132, 0x213: 0x9932, 0x214: 0x9932, 0x215: 0x8133, 0x216: 0x812d, 0x217: 0x812d, + 0x218: 0x812d, 0x219: 0x812d, 0x21a: 0x8133, 0x21b: 0x992b, 0x21c: 0x812d, 0x21d: 0x812d, + 0x21e: 0x812d, 0x21f: 0x812d, 0x220: 0x812d, 0x221: 0x8129, 0x222: 0x8129, 0x223: 0x992d, + 0x224: 0x992d, 0x225: 0x992d, 0x226: 0x992d, 0x227: 0x9929, 0x228: 0x9929, 0x229: 0x812d, + 0x22a: 0x812d, 0x22b: 0x812d, 0x22c: 0x812d, 0x22d: 0x992d, 0x22e: 0x992d, 0x22f: 0x812d, + 0x230: 0x992d, 0x231: 0x992d, 0x232: 0x812d, 0x233: 0x812d, 0x234: 0x8101, 0x235: 0x8101, + 0x236: 0x8101, 0x237: 0x8101, 0x238: 0x9901, 0x239: 0x812d, 0x23a: 0x812d, 0x23b: 0x812d, + 0x23c: 0x812d, 0x23d: 0x8132, 0x23e: 0x8132, 0x23f: 0x8132, + // Block 0x9, offset 0x240 + 0x240: 0x49ae, 0x241: 0x49b3, 0x242: 0x9932, 0x243: 0x49b8, 0x244: 0x4a71, 0x245: 0x9936, + 0x246: 0x8132, 0x247: 0x812d, 0x248: 0x812d, 0x249: 0x812d, 0x24a: 0x8132, 0x24b: 0x8132, + 0x24c: 0x8132, 0x24d: 0x812d, 0x24e: 0x812d, 0x250: 0x8132, 0x251: 0x8132, + 0x252: 0x8132, 0x253: 0x812d, 0x254: 0x812d, 0x255: 0x812d, 0x256: 0x812d, 0x257: 0x8132, + 0x258: 0x8133, 0x259: 0x812d, 0x25a: 0x812d, 0x25b: 0x8132, 0x25c: 0x8134, 0x25d: 0x8135, + 0x25e: 0x8135, 0x25f: 0x8134, 0x260: 0x8135, 0x261: 0x8135, 0x262: 0x8134, 0x263: 0x8132, + 0x264: 0x8132, 0x265: 0x8132, 0x266: 0x8132, 0x267: 0x8132, 0x268: 0x8132, 0x269: 0x8132, + 0x26a: 0x8132, 0x26b: 0x8132, 0x26c: 0x8132, 0x26d: 0x8132, 0x26e: 0x8132, 0x26f: 0x8132, + 0x274: 0x0170, + 0x27a: 0x8100, + 0x27e: 0x0037, + // Block 0xa, offset 0x280 + 0x284: 0x8100, 0x285: 0x35a1, + 0x286: 0x35e9, 0x287: 0x00ce, 0x288: 0x3607, 0x289: 0x3613, 0x28a: 0x3625, + 0x28c: 0x3643, 0x28e: 0x3655, 0x28f: 0x3673, 0x290: 0x3e08, 0x291: 0xa000, + 0x295: 0xa000, 0x297: 0xa000, + 0x299: 0xa000, + 0x29f: 0xa000, 0x2a1: 0xa000, + 0x2a5: 0xa000, 0x2a9: 0xa000, + 0x2aa: 0x3637, 0x2ab: 0x3667, 0x2ac: 0x47fe, 0x2ad: 0x3697, 0x2ae: 0x4828, 0x2af: 0x36a9, + 0x2b0: 0x3e70, 0x2b1: 0xa000, 0x2b5: 0xa000, + 0x2b7: 0xa000, 0x2b9: 0xa000, + 0x2bf: 0xa000, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x3721, 0x2c1: 0x372d, 0x2c3: 0x371b, + 0x2c6: 0xa000, 0x2c7: 0x3709, + 0x2cc: 0x375d, 0x2cd: 0x3745, 0x2ce: 0x376f, 0x2d0: 0xa000, + 0x2d3: 0xa000, 0x2d5: 0xa000, 0x2d6: 0xa000, 0x2d7: 0xa000, + 0x2d8: 0xa000, 0x2d9: 0x3751, 0x2da: 0xa000, + 0x2de: 0xa000, 0x2e3: 0xa000, + 0x2e7: 0xa000, + 0x2eb: 0xa000, 0x2ed: 0xa000, + 0x2f0: 0xa000, 0x2f3: 0xa000, 0x2f5: 0xa000, + 0x2f6: 0xa000, 0x2f7: 0xa000, 0x2f8: 0xa000, 0x2f9: 0x37d5, 0x2fa: 0xa000, + 0x2fe: 0xa000, + // Block 0xc, offset 0x300 + 0x301: 0x3733, 0x302: 0x37b7, + 0x310: 0x370f, 0x311: 0x3793, + 0x312: 0x3715, 0x313: 0x3799, 0x316: 0x3727, 0x317: 0x37ab, + 0x318: 0xa000, 0x319: 0xa000, 0x31a: 0x3829, 0x31b: 0x382f, 0x31c: 0x3739, 0x31d: 0x37bd, + 0x31e: 0x373f, 0x31f: 0x37c3, 0x322: 0x374b, 0x323: 0x37cf, + 0x324: 0x3757, 0x325: 0x37db, 0x326: 0x3763, 0x327: 0x37e7, 0x328: 0xa000, 0x329: 0xa000, + 0x32a: 0x3835, 0x32b: 0x383b, 0x32c: 0x378d, 0x32d: 0x3811, 0x32e: 0x3769, 0x32f: 0x37ed, + 0x330: 0x3775, 0x331: 0x37f9, 0x332: 0x377b, 0x333: 0x37ff, 0x334: 0x3781, 0x335: 0x3805, + 0x338: 0x3787, 0x339: 0x380b, + // Block 0xd, offset 0x340 + 0x351: 0x812d, + 0x352: 0x8132, 0x353: 0x8132, 0x354: 0x8132, 0x355: 0x8132, 0x356: 0x812d, 0x357: 0x8132, + 0x358: 0x8132, 0x359: 0x8132, 0x35a: 0x812e, 0x35b: 0x812d, 0x35c: 0x8132, 0x35d: 0x8132, + 0x35e: 0x8132, 0x35f: 0x8132, 0x360: 0x8132, 0x361: 0x8132, 0x362: 0x812d, 0x363: 0x812d, + 0x364: 0x812d, 0x365: 0x812d, 0x366: 0x812d, 0x367: 0x812d, 0x368: 0x8132, 0x369: 0x8132, + 0x36a: 0x812d, 0x36b: 0x8132, 0x36c: 0x8132, 0x36d: 0x812e, 0x36e: 0x8131, 0x36f: 0x8132, + 0x370: 0x8105, 0x371: 0x8106, 0x372: 0x8107, 0x373: 0x8108, 0x374: 0x8109, 0x375: 0x810a, + 0x376: 0x810b, 0x377: 0x810c, 0x378: 0x810d, 0x379: 0x810e, 0x37a: 0x810e, 0x37b: 0x810f, + 0x37c: 0x8110, 0x37d: 0x8111, 0x37f: 0x8112, + // Block 0xe, offset 0x380 + 0x388: 0xa000, 0x38a: 0xa000, 0x38b: 0x8116, + 0x38c: 0x8117, 0x38d: 0x8118, 0x38e: 0x8119, 0x38f: 0x811a, 0x390: 0x811b, 0x391: 0x811c, + 0x392: 0x811d, 0x393: 0x9932, 0x394: 0x9932, 0x395: 0x992d, 0x396: 0x812d, 0x397: 0x8132, + 0x398: 0x8132, 0x399: 0x8132, 0x39a: 0x8132, 0x39b: 0x8132, 0x39c: 0x812d, 0x39d: 0x8132, + 0x39e: 0x8132, 0x39f: 0x812d, + 0x3b0: 0x811e, + // Block 0xf, offset 0x3c0 + 0x3c5: 0xa000, + 0x3c6: 0x2d26, 0x3c7: 0xa000, 0x3c8: 0x2d2e, 0x3c9: 0xa000, 0x3ca: 0x2d36, 0x3cb: 0xa000, + 0x3cc: 0x2d3e, 0x3cd: 0xa000, 0x3ce: 0x2d46, 0x3d1: 0xa000, + 0x3d2: 0x2d4e, + 0x3f4: 0x8102, 0x3f5: 0x9900, + 0x3fa: 0xa000, 0x3fb: 0x2d56, + 0x3fc: 0xa000, 0x3fd: 0x2d5e, 0x3fe: 0xa000, 0x3ff: 0xa000, + // Block 0x10, offset 0x400 + 0x400: 0x2f97, 0x401: 0x32a3, 0x402: 0x2fa1, 0x403: 0x32ad, 0x404: 0x2fa6, 0x405: 0x32b2, + 0x406: 0x2fab, 0x407: 0x32b7, 0x408: 0x38cc, 0x409: 0x3a5b, 0x40a: 0x2fc4, 0x40b: 0x32d0, + 0x40c: 0x2fce, 0x40d: 0x32da, 0x40e: 0x2fdd, 0x40f: 0x32e9, 0x410: 0x2fd3, 0x411: 0x32df, + 0x412: 0x2fd8, 0x413: 0x32e4, 0x414: 0x38ef, 0x415: 0x3a7e, 0x416: 0x38f6, 0x417: 0x3a85, + 0x418: 0x3019, 0x419: 0x3325, 0x41a: 0x301e, 0x41b: 0x332a, 0x41c: 0x3904, 0x41d: 0x3a93, + 0x41e: 0x3023, 0x41f: 0x332f, 0x420: 0x3032, 0x421: 0x333e, 0x422: 0x3050, 0x423: 0x335c, + 0x424: 0x305f, 0x425: 0x336b, 0x426: 0x3055, 0x427: 0x3361, 0x428: 0x3064, 0x429: 0x3370, + 0x42a: 0x3069, 0x42b: 0x3375, 0x42c: 0x30af, 0x42d: 0x33bb, 0x42e: 0x390b, 0x42f: 0x3a9a, + 0x430: 0x30b9, 0x431: 0x33ca, 0x432: 0x30c3, 0x433: 0x33d4, 0x434: 0x30cd, 0x435: 0x33de, + 0x436: 0x46c4, 0x437: 0x4755, 0x438: 0x3912, 0x439: 0x3aa1, 0x43a: 0x30e6, 0x43b: 0x33f7, + 0x43c: 0x30e1, 0x43d: 0x33f2, 0x43e: 0x30eb, 0x43f: 0x33fc, + // Block 0x11, offset 0x440 + 0x440: 0x30f0, 0x441: 0x3401, 0x442: 0x30f5, 0x443: 0x3406, 0x444: 0x3109, 0x445: 0x341a, + 0x446: 0x3113, 0x447: 0x3424, 0x448: 0x3122, 0x449: 0x3433, 0x44a: 0x311d, 0x44b: 0x342e, + 0x44c: 0x3935, 0x44d: 0x3ac4, 0x44e: 0x3943, 0x44f: 0x3ad2, 0x450: 0x394a, 0x451: 0x3ad9, + 0x452: 0x3951, 0x453: 0x3ae0, 0x454: 0x314f, 0x455: 0x3460, 0x456: 0x3154, 0x457: 0x3465, + 0x458: 0x315e, 0x459: 0x346f, 0x45a: 0x46f1, 0x45b: 0x4782, 0x45c: 0x3997, 0x45d: 0x3b26, + 0x45e: 0x3177, 0x45f: 0x3488, 0x460: 0x3181, 0x461: 0x3492, 0x462: 0x4700, 0x463: 0x4791, + 0x464: 0x399e, 0x465: 0x3b2d, 0x466: 0x39a5, 0x467: 0x3b34, 0x468: 0x39ac, 0x469: 0x3b3b, + 0x46a: 0x3190, 0x46b: 0x34a1, 0x46c: 0x319a, 0x46d: 0x34b0, 0x46e: 0x31ae, 0x46f: 0x34c4, + 0x470: 0x31a9, 0x471: 0x34bf, 0x472: 0x31ea, 0x473: 0x3500, 0x474: 0x31f9, 0x475: 0x350f, + 0x476: 0x31f4, 0x477: 0x350a, 0x478: 0x39b3, 0x479: 0x3b42, 0x47a: 0x39ba, 0x47b: 0x3b49, + 0x47c: 0x31fe, 0x47d: 0x3514, 0x47e: 0x3203, 0x47f: 0x3519, + // Block 0x12, offset 0x480 + 0x480: 0x3208, 0x481: 0x351e, 0x482: 0x320d, 0x483: 0x3523, 0x484: 0x321c, 0x485: 0x3532, + 0x486: 0x3217, 0x487: 0x352d, 0x488: 0x3221, 0x489: 0x353c, 0x48a: 0x3226, 0x48b: 0x3541, + 0x48c: 0x322b, 0x48d: 0x3546, 0x48e: 0x3249, 0x48f: 0x3564, 0x490: 0x3262, 0x491: 0x3582, + 0x492: 0x3271, 0x493: 0x3591, 0x494: 0x3276, 0x495: 0x3596, 0x496: 0x337a, 0x497: 0x34a6, + 0x498: 0x3537, 0x499: 0x3573, 0x49b: 0x35d1, + 0x4a0: 0x46a1, 0x4a1: 0x4732, 0x4a2: 0x2f83, 0x4a3: 0x328f, + 0x4a4: 0x3878, 0x4a5: 0x3a07, 0x4a6: 0x3871, 0x4a7: 0x3a00, 0x4a8: 0x3886, 0x4a9: 0x3a15, + 0x4aa: 0x387f, 0x4ab: 0x3a0e, 0x4ac: 0x38be, 0x4ad: 0x3a4d, 0x4ae: 0x3894, 0x4af: 0x3a23, + 0x4b0: 0x388d, 0x4b1: 0x3a1c, 0x4b2: 0x38a2, 0x4b3: 0x3a31, 0x4b4: 0x389b, 0x4b5: 0x3a2a, + 0x4b6: 0x38c5, 0x4b7: 0x3a54, 0x4b8: 0x46b5, 0x4b9: 0x4746, 0x4ba: 0x3000, 0x4bb: 0x330c, + 0x4bc: 0x2fec, 0x4bd: 0x32f8, 0x4be: 0x38da, 0x4bf: 0x3a69, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x38d3, 0x4c1: 0x3a62, 0x4c2: 0x38e8, 0x4c3: 0x3a77, 0x4c4: 0x38e1, 0x4c5: 0x3a70, + 0x4c6: 0x38fd, 0x4c7: 0x3a8c, 0x4c8: 0x3091, 0x4c9: 0x339d, 0x4ca: 0x30a5, 0x4cb: 0x33b1, + 0x4cc: 0x46e7, 0x4cd: 0x4778, 0x4ce: 0x3136, 0x4cf: 0x3447, 0x4d0: 0x3920, 0x4d1: 0x3aaf, + 0x4d2: 0x3919, 0x4d3: 0x3aa8, 0x4d4: 0x392e, 0x4d5: 0x3abd, 0x4d6: 0x3927, 0x4d7: 0x3ab6, + 0x4d8: 0x3989, 0x4d9: 0x3b18, 0x4da: 0x396d, 0x4db: 0x3afc, 0x4dc: 0x3966, 0x4dd: 0x3af5, + 0x4de: 0x397b, 0x4df: 0x3b0a, 0x4e0: 0x3974, 0x4e1: 0x3b03, 0x4e2: 0x3982, 0x4e3: 0x3b11, + 0x4e4: 0x31e5, 0x4e5: 0x34fb, 0x4e6: 0x31c7, 0x4e7: 0x34dd, 0x4e8: 0x39e4, 0x4e9: 0x3b73, + 0x4ea: 0x39dd, 0x4eb: 0x3b6c, 0x4ec: 0x39f2, 0x4ed: 0x3b81, 0x4ee: 0x39eb, 0x4ef: 0x3b7a, + 0x4f0: 0x39f9, 0x4f1: 0x3b88, 0x4f2: 0x3230, 0x4f3: 0x354b, 0x4f4: 0x3258, 0x4f5: 0x3578, + 0x4f6: 0x3253, 0x4f7: 0x356e, 0x4f8: 0x323f, 0x4f9: 0x355a, + // Block 0x14, offset 0x500 + 0x500: 0x4804, 0x501: 0x480a, 0x502: 0x491e, 0x503: 0x4936, 0x504: 0x4926, 0x505: 0x493e, + 0x506: 0x492e, 0x507: 0x4946, 0x508: 0x47aa, 0x509: 0x47b0, 0x50a: 0x488e, 0x50b: 0x48a6, + 0x50c: 0x4896, 0x50d: 0x48ae, 0x50e: 0x489e, 0x50f: 0x48b6, 0x510: 0x4816, 0x511: 0x481c, + 0x512: 0x3db8, 0x513: 0x3dc8, 0x514: 0x3dc0, 0x515: 0x3dd0, + 0x518: 0x47b6, 0x519: 0x47bc, 0x51a: 0x3ce8, 0x51b: 0x3cf8, 0x51c: 0x3cf0, 0x51d: 0x3d00, + 0x520: 0x482e, 0x521: 0x4834, 0x522: 0x494e, 0x523: 0x4966, + 0x524: 0x4956, 0x525: 0x496e, 0x526: 0x495e, 0x527: 0x4976, 0x528: 0x47c2, 0x529: 0x47c8, + 0x52a: 0x48be, 0x52b: 0x48d6, 0x52c: 0x48c6, 0x52d: 0x48de, 0x52e: 0x48ce, 0x52f: 0x48e6, + 0x530: 0x4846, 0x531: 0x484c, 0x532: 0x3e18, 0x533: 0x3e30, 0x534: 0x3e20, 0x535: 0x3e38, + 0x536: 0x3e28, 0x537: 0x3e40, 0x538: 0x47ce, 0x539: 0x47d4, 0x53a: 0x3d18, 0x53b: 0x3d30, + 0x53c: 0x3d20, 0x53d: 0x3d38, 0x53e: 0x3d28, 0x53f: 0x3d40, + // Block 0x15, offset 0x540 + 0x540: 0x4852, 0x541: 0x4858, 0x542: 0x3e48, 0x543: 0x3e58, 0x544: 0x3e50, 0x545: 0x3e60, + 0x548: 0x47da, 0x549: 0x47e0, 0x54a: 0x3d48, 0x54b: 0x3d58, + 0x54c: 0x3d50, 0x54d: 0x3d60, 0x550: 0x4864, 0x551: 0x486a, + 0x552: 0x3e80, 0x553: 0x3e98, 0x554: 0x3e88, 0x555: 0x3ea0, 0x556: 0x3e90, 0x557: 0x3ea8, + 0x559: 0x47e6, 0x55b: 0x3d68, 0x55d: 0x3d70, + 0x55f: 0x3d78, 0x560: 0x487c, 0x561: 0x4882, 0x562: 0x497e, 0x563: 0x4996, + 0x564: 0x4986, 0x565: 0x499e, 0x566: 0x498e, 0x567: 0x49a6, 0x568: 0x47ec, 0x569: 0x47f2, + 0x56a: 0x48ee, 0x56b: 0x4906, 0x56c: 0x48f6, 0x56d: 0x490e, 0x56e: 0x48fe, 0x56f: 0x4916, + 0x570: 0x47f8, 0x571: 0x431e, 0x572: 0x3691, 0x573: 0x4324, 0x574: 0x4822, 0x575: 0x432a, + 0x576: 0x36a3, 0x577: 0x4330, 0x578: 0x36c1, 0x579: 0x4336, 0x57a: 0x36d9, 0x57b: 0x433c, + 0x57c: 0x4870, 0x57d: 0x4342, + // Block 0x16, offset 0x580 + 0x580: 0x3da0, 0x581: 0x3da8, 0x582: 0x4184, 0x583: 0x41a2, 0x584: 0x418e, 0x585: 0x41ac, + 0x586: 0x4198, 0x587: 0x41b6, 0x588: 0x3cd8, 0x589: 0x3ce0, 0x58a: 0x40d0, 0x58b: 0x40ee, + 0x58c: 0x40da, 0x58d: 0x40f8, 0x58e: 0x40e4, 0x58f: 0x4102, 0x590: 0x3de8, 0x591: 0x3df0, + 0x592: 0x41c0, 0x593: 0x41de, 0x594: 0x41ca, 0x595: 0x41e8, 0x596: 0x41d4, 0x597: 0x41f2, + 0x598: 0x3d08, 0x599: 0x3d10, 0x59a: 0x410c, 0x59b: 0x412a, 0x59c: 0x4116, 0x59d: 0x4134, + 0x59e: 0x4120, 0x59f: 0x413e, 0x5a0: 0x3ec0, 0x5a1: 0x3ec8, 0x5a2: 0x41fc, 0x5a3: 0x421a, + 0x5a4: 0x4206, 0x5a5: 0x4224, 0x5a6: 0x4210, 0x5a7: 0x422e, 0x5a8: 0x3d80, 0x5a9: 0x3d88, + 0x5aa: 0x4148, 0x5ab: 0x4166, 0x5ac: 0x4152, 0x5ad: 0x4170, 0x5ae: 0x415c, 0x5af: 0x417a, + 0x5b0: 0x3685, 0x5b1: 0x367f, 0x5b2: 0x3d90, 0x5b3: 0x368b, 0x5b4: 0x3d98, + 0x5b6: 0x4810, 0x5b7: 0x3db0, 0x5b8: 0x35f5, 0x5b9: 0x35ef, 0x5ba: 0x35e3, 0x5bb: 0x42ee, + 0x5bc: 0x35fb, 0x5bd: 0x8100, 0x5be: 0x01d3, 0x5bf: 0xa100, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x8100, 0x5c1: 0x35a7, 0x5c2: 0x3dd8, 0x5c3: 0x369d, 0x5c4: 0x3de0, + 0x5c6: 0x483a, 0x5c7: 0x3df8, 0x5c8: 0x3601, 0x5c9: 0x42f4, 0x5ca: 0x360d, 0x5cb: 0x42fa, + 0x5cc: 0x3619, 0x5cd: 0x3b8f, 0x5ce: 0x3b96, 0x5cf: 0x3b9d, 0x5d0: 0x36b5, 0x5d1: 0x36af, + 0x5d2: 0x3e00, 0x5d3: 0x44e4, 0x5d6: 0x36bb, 0x5d7: 0x3e10, + 0x5d8: 0x3631, 0x5d9: 0x362b, 0x5da: 0x361f, 0x5db: 0x4300, 0x5dd: 0x3ba4, + 0x5de: 0x3bab, 0x5df: 0x3bb2, 0x5e0: 0x36eb, 0x5e1: 0x36e5, 0x5e2: 0x3e68, 0x5e3: 0x44ec, + 0x5e4: 0x36cd, 0x5e5: 0x36d3, 0x5e6: 0x36f1, 0x5e7: 0x3e78, 0x5e8: 0x3661, 0x5e9: 0x365b, + 0x5ea: 0x364f, 0x5eb: 0x430c, 0x5ec: 0x3649, 0x5ed: 0x359b, 0x5ee: 0x42e8, 0x5ef: 0x0081, + 0x5f2: 0x3eb0, 0x5f3: 0x36f7, 0x5f4: 0x3eb8, + 0x5f6: 0x4888, 0x5f7: 0x3ed0, 0x5f8: 0x363d, 0x5f9: 0x4306, 0x5fa: 0x366d, 0x5fb: 0x4318, + 0x5fc: 0x3679, 0x5fd: 0x4256, 0x5fe: 0xa100, + // Block 0x18, offset 0x600 + 0x601: 0x3c06, 0x603: 0xa000, 0x604: 0x3c0d, 0x605: 0xa000, + 0x607: 0x3c14, 0x608: 0xa000, 0x609: 0x3c1b, + 0x60d: 0xa000, + 0x620: 0x2f65, 0x621: 0xa000, 0x622: 0x3c29, + 0x624: 0xa000, 0x625: 0xa000, + 0x62d: 0x3c22, 0x62e: 0x2f60, 0x62f: 0x2f6a, + 0x630: 0x3c30, 0x631: 0x3c37, 0x632: 0xa000, 0x633: 0xa000, 0x634: 0x3c3e, 0x635: 0x3c45, + 0x636: 0xa000, 0x637: 0xa000, 0x638: 0x3c4c, 0x639: 0x3c53, 0x63a: 0xa000, 0x63b: 0xa000, + 0x63c: 0xa000, 0x63d: 0xa000, + // Block 0x19, offset 0x640 + 0x640: 0x3c5a, 0x641: 0x3c61, 0x642: 0xa000, 0x643: 0xa000, 0x644: 0x3c76, 0x645: 0x3c7d, + 0x646: 0xa000, 0x647: 0xa000, 0x648: 0x3c84, 0x649: 0x3c8b, + 0x651: 0xa000, + 0x652: 0xa000, + 0x662: 0xa000, + 0x668: 0xa000, 0x669: 0xa000, + 0x66b: 0xa000, 0x66c: 0x3ca0, 0x66d: 0x3ca7, 0x66e: 0x3cae, 0x66f: 0x3cb5, + 0x672: 0xa000, 0x673: 0xa000, 0x674: 0xa000, 0x675: 0xa000, + // Block 0x1a, offset 0x680 + 0x686: 0xa000, 0x68b: 0xa000, + 0x68c: 0x3f08, 0x68d: 0xa000, 0x68e: 0x3f10, 0x68f: 0xa000, 0x690: 0x3f18, 0x691: 0xa000, + 0x692: 0x3f20, 0x693: 0xa000, 0x694: 0x3f28, 0x695: 0xa000, 0x696: 0x3f30, 0x697: 0xa000, + 0x698: 0x3f38, 0x699: 0xa000, 0x69a: 0x3f40, 0x69b: 0xa000, 0x69c: 0x3f48, 0x69d: 0xa000, + 0x69e: 0x3f50, 0x69f: 0xa000, 0x6a0: 0x3f58, 0x6a1: 0xa000, 0x6a2: 0x3f60, + 0x6a4: 0xa000, 0x6a5: 0x3f68, 0x6a6: 0xa000, 0x6a7: 0x3f70, 0x6a8: 0xa000, 0x6a9: 0x3f78, + 0x6af: 0xa000, + 0x6b0: 0x3f80, 0x6b1: 0x3f88, 0x6b2: 0xa000, 0x6b3: 0x3f90, 0x6b4: 0x3f98, 0x6b5: 0xa000, + 0x6b6: 0x3fa0, 0x6b7: 0x3fa8, 0x6b8: 0xa000, 0x6b9: 0x3fb0, 0x6ba: 0x3fb8, 0x6bb: 0xa000, + 0x6bc: 0x3fc0, 0x6bd: 0x3fc8, + // Block 0x1b, offset 0x6c0 + 0x6d4: 0x3f00, + 0x6d9: 0x9903, 0x6da: 0x9903, 0x6db: 0x8100, 0x6dc: 0x8100, 0x6dd: 0xa000, + 0x6de: 0x3fd0, + 0x6e6: 0xa000, + 0x6eb: 0xa000, 0x6ec: 0x3fe0, 0x6ed: 0xa000, 0x6ee: 0x3fe8, 0x6ef: 0xa000, + 0x6f0: 0x3ff0, 0x6f1: 0xa000, 0x6f2: 0x3ff8, 0x6f3: 0xa000, 0x6f4: 0x4000, 0x6f5: 0xa000, + 0x6f6: 0x4008, 0x6f7: 0xa000, 0x6f8: 0x4010, 0x6f9: 0xa000, 0x6fa: 0x4018, 0x6fb: 0xa000, + 0x6fc: 0x4020, 0x6fd: 0xa000, 0x6fe: 0x4028, 0x6ff: 0xa000, + // Block 0x1c, offset 0x700 + 0x700: 0x4030, 0x701: 0xa000, 0x702: 0x4038, 0x704: 0xa000, 0x705: 0x4040, + 0x706: 0xa000, 0x707: 0x4048, 0x708: 0xa000, 0x709: 0x4050, + 0x70f: 0xa000, 0x710: 0x4058, 0x711: 0x4060, + 0x712: 0xa000, 0x713: 0x4068, 0x714: 0x4070, 0x715: 0xa000, 0x716: 0x4078, 0x717: 0x4080, + 0x718: 0xa000, 0x719: 0x4088, 0x71a: 0x4090, 0x71b: 0xa000, 0x71c: 0x4098, 0x71d: 0x40a0, + 0x72f: 0xa000, + 0x730: 0xa000, 0x731: 0xa000, 0x732: 0xa000, 0x734: 0x3fd8, + 0x737: 0x40a8, 0x738: 0x40b0, 0x739: 0x40b8, 0x73a: 0x40c0, + 0x73d: 0xa000, 0x73e: 0x40c8, + // Block 0x1d, offset 0x740 + 0x740: 0x1377, 0x741: 0x0cfb, 0x742: 0x13d3, 0x743: 0x139f, 0x744: 0x0e57, 0x745: 0x06eb, + 0x746: 0x08df, 0x747: 0x162b, 0x748: 0x162b, 0x749: 0x0a0b, 0x74a: 0x145f, 0x74b: 0x0943, + 0x74c: 0x0a07, 0x74d: 0x0bef, 0x74e: 0x0fcf, 0x74f: 0x115f, 0x750: 0x1297, 0x751: 0x12d3, + 0x752: 0x1307, 0x753: 0x141b, 0x754: 0x0d73, 0x755: 0x0dff, 0x756: 0x0eab, 0x757: 0x0f43, + 0x758: 0x125f, 0x759: 0x1447, 0x75a: 0x1573, 0x75b: 0x070f, 0x75c: 0x08b3, 0x75d: 0x0d87, + 0x75e: 0x0ecf, 0x75f: 0x1293, 0x760: 0x15c3, 0x761: 0x0ab3, 0x762: 0x0e77, 0x763: 0x1283, + 0x764: 0x1317, 0x765: 0x0c23, 0x766: 0x11bb, 0x767: 0x12df, 0x768: 0x0b1f, 0x769: 0x0d0f, + 0x76a: 0x0e17, 0x76b: 0x0f1b, 0x76c: 0x1427, 0x76d: 0x074f, 0x76e: 0x07e7, 0x76f: 0x0853, + 0x770: 0x0c8b, 0x771: 0x0d7f, 0x772: 0x0ecb, 0x773: 0x0fef, 0x774: 0x1177, 0x775: 0x128b, + 0x776: 0x12a3, 0x777: 0x13c7, 0x778: 0x14ef, 0x779: 0x15a3, 0x77a: 0x15bf, 0x77b: 0x102b, + 0x77c: 0x106b, 0x77d: 0x1123, 0x77e: 0x1243, 0x77f: 0x147b, + // Block 0x1e, offset 0x780 + 0x780: 0x15cb, 0x781: 0x134b, 0x782: 0x09c7, 0x783: 0x0b3b, 0x784: 0x10db, 0x785: 0x119b, + 0x786: 0x0eff, 0x787: 0x1033, 0x788: 0x1397, 0x789: 0x14e7, 0x78a: 0x09c3, 0x78b: 0x0a8f, + 0x78c: 0x0d77, 0x78d: 0x0e2b, 0x78e: 0x0e5f, 0x78f: 0x1113, 0x790: 0x113b, 0x791: 0x14a7, + 0x792: 0x084f, 0x793: 0x11a7, 0x794: 0x07f3, 0x795: 0x07ef, 0x796: 0x1097, 0x797: 0x1127, + 0x798: 0x125b, 0x799: 0x14af, 0x79a: 0x1367, 0x79b: 0x0c27, 0x79c: 0x0d73, 0x79d: 0x1357, + 0x79e: 0x06f7, 0x79f: 0x0a63, 0x7a0: 0x0b93, 0x7a1: 0x0f2f, 0x7a2: 0x0faf, 0x7a3: 0x0873, + 0x7a4: 0x103b, 0x7a5: 0x075f, 0x7a6: 0x0b77, 0x7a7: 0x06d7, 0x7a8: 0x0deb, 0x7a9: 0x0ca3, + 0x7aa: 0x110f, 0x7ab: 0x08c7, 0x7ac: 0x09b3, 0x7ad: 0x0ffb, 0x7ae: 0x1263, 0x7af: 0x133b, + 0x7b0: 0x0db7, 0x7b1: 0x13f7, 0x7b2: 0x0de3, 0x7b3: 0x0c37, 0x7b4: 0x121b, 0x7b5: 0x0c57, + 0x7b6: 0x0fab, 0x7b7: 0x072b, 0x7b8: 0x07a7, 0x7b9: 0x07eb, 0x7ba: 0x0d53, 0x7bb: 0x10fb, + 0x7bc: 0x11f3, 0x7bd: 0x1347, 0x7be: 0x145b, 0x7bf: 0x085b, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x090f, 0x7c1: 0x0a17, 0x7c2: 0x0b2f, 0x7c3: 0x0cbf, 0x7c4: 0x0e7b, 0x7c5: 0x103f, + 0x7c6: 0x1497, 0x7c7: 0x157b, 0x7c8: 0x15cf, 0x7c9: 0x15e7, 0x7ca: 0x0837, 0x7cb: 0x0cf3, + 0x7cc: 0x0da3, 0x7cd: 0x13eb, 0x7ce: 0x0afb, 0x7cf: 0x0bd7, 0x7d0: 0x0bf3, 0x7d1: 0x0c83, + 0x7d2: 0x0e6b, 0x7d3: 0x0eb7, 0x7d4: 0x0f67, 0x7d5: 0x108b, 0x7d6: 0x112f, 0x7d7: 0x1193, + 0x7d8: 0x13db, 0x7d9: 0x126b, 0x7da: 0x1403, 0x7db: 0x147f, 0x7dc: 0x080f, 0x7dd: 0x083b, + 0x7de: 0x0923, 0x7df: 0x0ea7, 0x7e0: 0x12f3, 0x7e1: 0x133b, 0x7e2: 0x0b1b, 0x7e3: 0x0b8b, + 0x7e4: 0x0c4f, 0x7e5: 0x0daf, 0x7e6: 0x10d7, 0x7e7: 0x0f23, 0x7e8: 0x073b, 0x7e9: 0x097f, + 0x7ea: 0x0a63, 0x7eb: 0x0ac7, 0x7ec: 0x0b97, 0x7ed: 0x0f3f, 0x7ee: 0x0f5b, 0x7ef: 0x116b, + 0x7f0: 0x118b, 0x7f1: 0x1463, 0x7f2: 0x14e3, 0x7f3: 0x14f3, 0x7f4: 0x152f, 0x7f5: 0x0753, + 0x7f6: 0x107f, 0x7f7: 0x144f, 0x7f8: 0x14cb, 0x7f9: 0x0baf, 0x7fa: 0x0717, 0x7fb: 0x0777, + 0x7fc: 0x0a67, 0x7fd: 0x0a87, 0x7fe: 0x0caf, 0x7ff: 0x0d73, + // Block 0x20, offset 0x800 + 0x800: 0x0ec3, 0x801: 0x0fcb, 0x802: 0x1277, 0x803: 0x1417, 0x804: 0x1623, 0x805: 0x0ce3, + 0x806: 0x14a3, 0x807: 0x0833, 0x808: 0x0d2f, 0x809: 0x0d3b, 0x80a: 0x0e0f, 0x80b: 0x0e47, + 0x80c: 0x0f4b, 0x80d: 0x0fa7, 0x80e: 0x1027, 0x80f: 0x110b, 0x810: 0x153b, 0x811: 0x07af, + 0x812: 0x0c03, 0x813: 0x14b3, 0x814: 0x0767, 0x815: 0x0aab, 0x816: 0x0e2f, 0x817: 0x13df, + 0x818: 0x0b67, 0x819: 0x0bb7, 0x81a: 0x0d43, 0x81b: 0x0f2f, 0x81c: 0x14bb, 0x81d: 0x0817, + 0x81e: 0x08ff, 0x81f: 0x0a97, 0x820: 0x0cd3, 0x821: 0x0d1f, 0x822: 0x0d5f, 0x823: 0x0df3, + 0x824: 0x0f47, 0x825: 0x0fbb, 0x826: 0x1157, 0x827: 0x12f7, 0x828: 0x1303, 0x829: 0x1457, + 0x82a: 0x14d7, 0x82b: 0x0883, 0x82c: 0x0e4b, 0x82d: 0x0903, 0x82e: 0x0ec7, 0x82f: 0x0f6b, + 0x830: 0x1287, 0x831: 0x14bf, 0x832: 0x15ab, 0x833: 0x15d3, 0x834: 0x0d37, 0x835: 0x0e27, + 0x836: 0x11c3, 0x837: 0x10b7, 0x838: 0x10c3, 0x839: 0x10e7, 0x83a: 0x0f17, 0x83b: 0x0e9f, + 0x83c: 0x1363, 0x83d: 0x0733, 0x83e: 0x122b, 0x83f: 0x081b, + // Block 0x21, offset 0x840 + 0x840: 0x080b, 0x841: 0x0b0b, 0x842: 0x0c2b, 0x843: 0x10f3, 0x844: 0x0a53, 0x845: 0x0e03, + 0x846: 0x0cef, 0x847: 0x13e7, 0x848: 0x12e7, 0x849: 0x14ab, 0x84a: 0x1323, 0x84b: 0x0b27, + 0x84c: 0x0787, 0x84d: 0x095b, 0x850: 0x09af, + 0x852: 0x0cdf, 0x855: 0x07f7, 0x856: 0x0f1f, 0x857: 0x0fe3, + 0x858: 0x1047, 0x859: 0x1063, 0x85a: 0x1067, 0x85b: 0x107b, 0x85c: 0x14fb, 0x85d: 0x10eb, + 0x85e: 0x116f, 0x860: 0x128f, 0x862: 0x1353, + 0x865: 0x1407, 0x866: 0x1433, + 0x86a: 0x154f, 0x86b: 0x1553, 0x86c: 0x1557, 0x86d: 0x15bb, 0x86e: 0x142b, 0x86f: 0x14c7, + 0x870: 0x0757, 0x871: 0x077b, 0x872: 0x078f, 0x873: 0x084b, 0x874: 0x0857, 0x875: 0x0897, + 0x876: 0x094b, 0x877: 0x0967, 0x878: 0x096f, 0x879: 0x09ab, 0x87a: 0x09b7, 0x87b: 0x0a93, + 0x87c: 0x0a9b, 0x87d: 0x0ba3, 0x87e: 0x0bcb, 0x87f: 0x0bd3, + // Block 0x22, offset 0x880 + 0x880: 0x0beb, 0x881: 0x0c97, 0x882: 0x0cc7, 0x883: 0x0ce7, 0x884: 0x0d57, 0x885: 0x0e1b, + 0x886: 0x0e37, 0x887: 0x0e67, 0x888: 0x0ebb, 0x889: 0x0edb, 0x88a: 0x0f4f, 0x88b: 0x102f, + 0x88c: 0x104b, 0x88d: 0x1053, 0x88e: 0x104f, 0x88f: 0x1057, 0x890: 0x105b, 0x891: 0x105f, + 0x892: 0x1073, 0x893: 0x1077, 0x894: 0x109b, 0x895: 0x10af, 0x896: 0x10cb, 0x897: 0x112f, + 0x898: 0x1137, 0x899: 0x113f, 0x89a: 0x1153, 0x89b: 0x117b, 0x89c: 0x11cb, 0x89d: 0x11ff, + 0x89e: 0x11ff, 0x89f: 0x1267, 0x8a0: 0x130f, 0x8a1: 0x1327, 0x8a2: 0x135b, 0x8a3: 0x135f, + 0x8a4: 0x13a3, 0x8a5: 0x13a7, 0x8a6: 0x13ff, 0x8a7: 0x1407, 0x8a8: 0x14db, 0x8a9: 0x151f, + 0x8aa: 0x1537, 0x8ab: 0x0b9b, 0x8ac: 0x171e, 0x8ad: 0x11e3, + 0x8b0: 0x06df, 0x8b1: 0x07e3, 0x8b2: 0x07a3, 0x8b3: 0x074b, 0x8b4: 0x078b, 0x8b5: 0x07b7, + 0x8b6: 0x0847, 0x8b7: 0x0863, 0x8b8: 0x094b, 0x8b9: 0x0937, 0x8ba: 0x0947, 0x8bb: 0x0963, + 0x8bc: 0x09af, 0x8bd: 0x09bf, 0x8be: 0x0a03, 0x8bf: 0x0a0f, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0a2b, 0x8c1: 0x0a3b, 0x8c2: 0x0b23, 0x8c3: 0x0b2b, 0x8c4: 0x0b5b, 0x8c5: 0x0b7b, + 0x8c6: 0x0bab, 0x8c7: 0x0bc3, 0x8c8: 0x0bb3, 0x8c9: 0x0bd3, 0x8ca: 0x0bc7, 0x8cb: 0x0beb, + 0x8cc: 0x0c07, 0x8cd: 0x0c5f, 0x8ce: 0x0c6b, 0x8cf: 0x0c73, 0x8d0: 0x0c9b, 0x8d1: 0x0cdf, + 0x8d2: 0x0d0f, 0x8d3: 0x0d13, 0x8d4: 0x0d27, 0x8d5: 0x0da7, 0x8d6: 0x0db7, 0x8d7: 0x0e0f, + 0x8d8: 0x0e5b, 0x8d9: 0x0e53, 0x8da: 0x0e67, 0x8db: 0x0e83, 0x8dc: 0x0ebb, 0x8dd: 0x1013, + 0x8de: 0x0edf, 0x8df: 0x0f13, 0x8e0: 0x0f1f, 0x8e1: 0x0f5f, 0x8e2: 0x0f7b, 0x8e3: 0x0f9f, + 0x8e4: 0x0fc3, 0x8e5: 0x0fc7, 0x8e6: 0x0fe3, 0x8e7: 0x0fe7, 0x8e8: 0x0ff7, 0x8e9: 0x100b, + 0x8ea: 0x1007, 0x8eb: 0x1037, 0x8ec: 0x10b3, 0x8ed: 0x10cb, 0x8ee: 0x10e3, 0x8ef: 0x111b, + 0x8f0: 0x112f, 0x8f1: 0x114b, 0x8f2: 0x117b, 0x8f3: 0x122f, 0x8f4: 0x1257, 0x8f5: 0x12cb, + 0x8f6: 0x1313, 0x8f7: 0x131f, 0x8f8: 0x1327, 0x8f9: 0x133f, 0x8fa: 0x1353, 0x8fb: 0x1343, + 0x8fc: 0x135b, 0x8fd: 0x1357, 0x8fe: 0x134f, 0x8ff: 0x135f, + // Block 0x24, offset 0x900 + 0x900: 0x136b, 0x901: 0x13a7, 0x902: 0x13e3, 0x903: 0x1413, 0x904: 0x144b, 0x905: 0x146b, + 0x906: 0x14b7, 0x907: 0x14db, 0x908: 0x14fb, 0x909: 0x150f, 0x90a: 0x151f, 0x90b: 0x152b, + 0x90c: 0x1537, 0x90d: 0x158b, 0x90e: 0x162b, 0x90f: 0x16b5, 0x910: 0x16b0, 0x911: 0x16e2, + 0x912: 0x0607, 0x913: 0x062f, 0x914: 0x0633, 0x915: 0x1764, 0x916: 0x1791, 0x917: 0x1809, + 0x918: 0x1617, 0x919: 0x1627, + // Block 0x25, offset 0x940 + 0x940: 0x06fb, 0x941: 0x06f3, 0x942: 0x0703, 0x943: 0x1647, 0x944: 0x0747, 0x945: 0x0757, + 0x946: 0x075b, 0x947: 0x0763, 0x948: 0x076b, 0x949: 0x076f, 0x94a: 0x077b, 0x94b: 0x0773, + 0x94c: 0x05b3, 0x94d: 0x165b, 0x94e: 0x078f, 0x94f: 0x0793, 0x950: 0x0797, 0x951: 0x07b3, + 0x952: 0x164c, 0x953: 0x05b7, 0x954: 0x079f, 0x955: 0x07bf, 0x956: 0x1656, 0x957: 0x07cf, + 0x958: 0x07d7, 0x959: 0x0737, 0x95a: 0x07df, 0x95b: 0x07e3, 0x95c: 0x1831, 0x95d: 0x07ff, + 0x95e: 0x0807, 0x95f: 0x05bf, 0x960: 0x081f, 0x961: 0x0823, 0x962: 0x082b, 0x963: 0x082f, + 0x964: 0x05c3, 0x965: 0x0847, 0x966: 0x084b, 0x967: 0x0857, 0x968: 0x0863, 0x969: 0x0867, + 0x96a: 0x086b, 0x96b: 0x0873, 0x96c: 0x0893, 0x96d: 0x0897, 0x96e: 0x089f, 0x96f: 0x08af, + 0x970: 0x08b7, 0x971: 0x08bb, 0x972: 0x08bb, 0x973: 0x08bb, 0x974: 0x166a, 0x975: 0x0e93, + 0x976: 0x08cf, 0x977: 0x08d7, 0x978: 0x166f, 0x979: 0x08e3, 0x97a: 0x08eb, 0x97b: 0x08f3, + 0x97c: 0x091b, 0x97d: 0x0907, 0x97e: 0x0913, 0x97f: 0x0917, + // Block 0x26, offset 0x980 + 0x980: 0x091f, 0x981: 0x0927, 0x982: 0x092b, 0x983: 0x0933, 0x984: 0x093b, 0x985: 0x093f, + 0x986: 0x093f, 0x987: 0x0947, 0x988: 0x094f, 0x989: 0x0953, 0x98a: 0x095f, 0x98b: 0x0983, + 0x98c: 0x0967, 0x98d: 0x0987, 0x98e: 0x096b, 0x98f: 0x0973, 0x990: 0x080b, 0x991: 0x09cf, + 0x992: 0x0997, 0x993: 0x099b, 0x994: 0x099f, 0x995: 0x0993, 0x996: 0x09a7, 0x997: 0x09a3, + 0x998: 0x09bb, 0x999: 0x1674, 0x99a: 0x09d7, 0x99b: 0x09db, 0x99c: 0x09e3, 0x99d: 0x09ef, + 0x99e: 0x09f7, 0x99f: 0x0a13, 0x9a0: 0x1679, 0x9a1: 0x167e, 0x9a2: 0x0a1f, 0x9a3: 0x0a23, + 0x9a4: 0x0a27, 0x9a5: 0x0a1b, 0x9a6: 0x0a2f, 0x9a7: 0x05c7, 0x9a8: 0x05cb, 0x9a9: 0x0a37, + 0x9aa: 0x0a3f, 0x9ab: 0x0a3f, 0x9ac: 0x1683, 0x9ad: 0x0a5b, 0x9ae: 0x0a5f, 0x9af: 0x0a63, + 0x9b0: 0x0a6b, 0x9b1: 0x1688, 0x9b2: 0x0a73, 0x9b3: 0x0a77, 0x9b4: 0x0b4f, 0x9b5: 0x0a7f, + 0x9b6: 0x05cf, 0x9b7: 0x0a8b, 0x9b8: 0x0a9b, 0x9b9: 0x0aa7, 0x9ba: 0x0aa3, 0x9bb: 0x1692, + 0x9bc: 0x0aaf, 0x9bd: 0x1697, 0x9be: 0x0abb, 0x9bf: 0x0ab7, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x0abf, 0x9c1: 0x0acf, 0x9c2: 0x0ad3, 0x9c3: 0x05d3, 0x9c4: 0x0ae3, 0x9c5: 0x0aeb, + 0x9c6: 0x0aef, 0x9c7: 0x0af3, 0x9c8: 0x05d7, 0x9c9: 0x169c, 0x9ca: 0x05db, 0x9cb: 0x0b0f, + 0x9cc: 0x0b13, 0x9cd: 0x0b17, 0x9ce: 0x0b1f, 0x9cf: 0x1863, 0x9d0: 0x0b37, 0x9d1: 0x16a6, + 0x9d2: 0x16a6, 0x9d3: 0x11d7, 0x9d4: 0x0b47, 0x9d5: 0x0b47, 0x9d6: 0x05df, 0x9d7: 0x16c9, + 0x9d8: 0x179b, 0x9d9: 0x0b57, 0x9da: 0x0b5f, 0x9db: 0x05e3, 0x9dc: 0x0b73, 0x9dd: 0x0b83, + 0x9de: 0x0b87, 0x9df: 0x0b8f, 0x9e0: 0x0b9f, 0x9e1: 0x05eb, 0x9e2: 0x05e7, 0x9e3: 0x0ba3, + 0x9e4: 0x16ab, 0x9e5: 0x0ba7, 0x9e6: 0x0bbb, 0x9e7: 0x0bbf, 0x9e8: 0x0bc3, 0x9e9: 0x0bbf, + 0x9ea: 0x0bcf, 0x9eb: 0x0bd3, 0x9ec: 0x0be3, 0x9ed: 0x0bdb, 0x9ee: 0x0bdf, 0x9ef: 0x0be7, + 0x9f0: 0x0beb, 0x9f1: 0x0bef, 0x9f2: 0x0bfb, 0x9f3: 0x0bff, 0x9f4: 0x0c17, 0x9f5: 0x0c1f, + 0x9f6: 0x0c2f, 0x9f7: 0x0c43, 0x9f8: 0x16ba, 0x9f9: 0x0c3f, 0x9fa: 0x0c33, 0x9fb: 0x0c4b, + 0x9fc: 0x0c53, 0x9fd: 0x0c67, 0x9fe: 0x16bf, 0x9ff: 0x0c6f, + // Block 0x28, offset 0xa00 + 0xa00: 0x0c63, 0xa01: 0x0c5b, 0xa02: 0x05ef, 0xa03: 0x0c77, 0xa04: 0x0c7f, 0xa05: 0x0c87, + 0xa06: 0x0c7b, 0xa07: 0x05f3, 0xa08: 0x0c97, 0xa09: 0x0c9f, 0xa0a: 0x16c4, 0xa0b: 0x0ccb, + 0xa0c: 0x0cff, 0xa0d: 0x0cdb, 0xa0e: 0x05ff, 0xa0f: 0x0ce7, 0xa10: 0x05fb, 0xa11: 0x05f7, + 0xa12: 0x07c3, 0xa13: 0x07c7, 0xa14: 0x0d03, 0xa15: 0x0ceb, 0xa16: 0x11ab, 0xa17: 0x0663, + 0xa18: 0x0d0f, 0xa19: 0x0d13, 0xa1a: 0x0d17, 0xa1b: 0x0d2b, 0xa1c: 0x0d23, 0xa1d: 0x16dd, + 0xa1e: 0x0603, 0xa1f: 0x0d3f, 0xa20: 0x0d33, 0xa21: 0x0d4f, 0xa22: 0x0d57, 0xa23: 0x16e7, + 0xa24: 0x0d5b, 0xa25: 0x0d47, 0xa26: 0x0d63, 0xa27: 0x0607, 0xa28: 0x0d67, 0xa29: 0x0d6b, + 0xa2a: 0x0d6f, 0xa2b: 0x0d7b, 0xa2c: 0x16ec, 0xa2d: 0x0d83, 0xa2e: 0x060b, 0xa2f: 0x0d8f, + 0xa30: 0x16f1, 0xa31: 0x0d93, 0xa32: 0x060f, 0xa33: 0x0d9f, 0xa34: 0x0dab, 0xa35: 0x0db7, + 0xa36: 0x0dbb, 0xa37: 0x16f6, 0xa38: 0x168d, 0xa39: 0x16fb, 0xa3a: 0x0ddb, 0xa3b: 0x1700, + 0xa3c: 0x0de7, 0xa3d: 0x0def, 0xa3e: 0x0ddf, 0xa3f: 0x0dfb, + // Block 0x29, offset 0xa40 + 0xa40: 0x0e0b, 0xa41: 0x0e1b, 0xa42: 0x0e0f, 0xa43: 0x0e13, 0xa44: 0x0e1f, 0xa45: 0x0e23, + 0xa46: 0x1705, 0xa47: 0x0e07, 0xa48: 0x0e3b, 0xa49: 0x0e3f, 0xa4a: 0x0613, 0xa4b: 0x0e53, + 0xa4c: 0x0e4f, 0xa4d: 0x170a, 0xa4e: 0x0e33, 0xa4f: 0x0e6f, 0xa50: 0x170f, 0xa51: 0x1714, + 0xa52: 0x0e73, 0xa53: 0x0e87, 0xa54: 0x0e83, 0xa55: 0x0e7f, 0xa56: 0x0617, 0xa57: 0x0e8b, + 0xa58: 0x0e9b, 0xa59: 0x0e97, 0xa5a: 0x0ea3, 0xa5b: 0x1651, 0xa5c: 0x0eb3, 0xa5d: 0x1719, + 0xa5e: 0x0ebf, 0xa5f: 0x1723, 0xa60: 0x0ed3, 0xa61: 0x0edf, 0xa62: 0x0ef3, 0xa63: 0x1728, + 0xa64: 0x0f07, 0xa65: 0x0f0b, 0xa66: 0x172d, 0xa67: 0x1732, 0xa68: 0x0f27, 0xa69: 0x0f37, + 0xa6a: 0x061b, 0xa6b: 0x0f3b, 0xa6c: 0x061f, 0xa6d: 0x061f, 0xa6e: 0x0f53, 0xa6f: 0x0f57, + 0xa70: 0x0f5f, 0xa71: 0x0f63, 0xa72: 0x0f6f, 0xa73: 0x0623, 0xa74: 0x0f87, 0xa75: 0x1737, + 0xa76: 0x0fa3, 0xa77: 0x173c, 0xa78: 0x0faf, 0xa79: 0x16a1, 0xa7a: 0x0fbf, 0xa7b: 0x1741, + 0xa7c: 0x1746, 0xa7d: 0x174b, 0xa7e: 0x0627, 0xa7f: 0x062b, + // Block 0x2a, offset 0xa80 + 0xa80: 0x0ff7, 0xa81: 0x1755, 0xa82: 0x1750, 0xa83: 0x175a, 0xa84: 0x175f, 0xa85: 0x0fff, + 0xa86: 0x1003, 0xa87: 0x1003, 0xa88: 0x100b, 0xa89: 0x0633, 0xa8a: 0x100f, 0xa8b: 0x0637, + 0xa8c: 0x063b, 0xa8d: 0x1769, 0xa8e: 0x1023, 0xa8f: 0x102b, 0xa90: 0x1037, 0xa91: 0x063f, + 0xa92: 0x176e, 0xa93: 0x105b, 0xa94: 0x1773, 0xa95: 0x1778, 0xa96: 0x107b, 0xa97: 0x1093, + 0xa98: 0x0643, 0xa99: 0x109b, 0xa9a: 0x109f, 0xa9b: 0x10a3, 0xa9c: 0x177d, 0xa9d: 0x1782, + 0xa9e: 0x1782, 0xa9f: 0x10bb, 0xaa0: 0x0647, 0xaa1: 0x1787, 0xaa2: 0x10cf, 0xaa3: 0x10d3, + 0xaa4: 0x064b, 0xaa5: 0x178c, 0xaa6: 0x10ef, 0xaa7: 0x064f, 0xaa8: 0x10ff, 0xaa9: 0x10f7, + 0xaaa: 0x1107, 0xaab: 0x1796, 0xaac: 0x111f, 0xaad: 0x0653, 0xaae: 0x112b, 0xaaf: 0x1133, + 0xab0: 0x1143, 0xab1: 0x0657, 0xab2: 0x17a0, 0xab3: 0x17a5, 0xab4: 0x065b, 0xab5: 0x17aa, + 0xab6: 0x115b, 0xab7: 0x17af, 0xab8: 0x1167, 0xab9: 0x1173, 0xaba: 0x117b, 0xabb: 0x17b4, + 0xabc: 0x17b9, 0xabd: 0x118f, 0xabe: 0x17be, 0xabf: 0x1197, + // Block 0x2b, offset 0xac0 + 0xac0: 0x16ce, 0xac1: 0x065f, 0xac2: 0x11af, 0xac3: 0x11b3, 0xac4: 0x0667, 0xac5: 0x11b7, + 0xac6: 0x0a33, 0xac7: 0x17c3, 0xac8: 0x17c8, 0xac9: 0x16d3, 0xaca: 0x16d8, 0xacb: 0x11d7, + 0xacc: 0x11db, 0xacd: 0x13f3, 0xace: 0x066b, 0xacf: 0x1207, 0xad0: 0x1203, 0xad1: 0x120b, + 0xad2: 0x083f, 0xad3: 0x120f, 0xad4: 0x1213, 0xad5: 0x1217, 0xad6: 0x121f, 0xad7: 0x17cd, + 0xad8: 0x121b, 0xad9: 0x1223, 0xada: 0x1237, 0xadb: 0x123b, 0xadc: 0x1227, 0xadd: 0x123f, + 0xade: 0x1253, 0xadf: 0x1267, 0xae0: 0x1233, 0xae1: 0x1247, 0xae2: 0x124b, 0xae3: 0x124f, + 0xae4: 0x17d2, 0xae5: 0x17dc, 0xae6: 0x17d7, 0xae7: 0x066f, 0xae8: 0x126f, 0xae9: 0x1273, + 0xaea: 0x127b, 0xaeb: 0x17f0, 0xaec: 0x127f, 0xaed: 0x17e1, 0xaee: 0x0673, 0xaef: 0x0677, + 0xaf0: 0x17e6, 0xaf1: 0x17eb, 0xaf2: 0x067b, 0xaf3: 0x129f, 0xaf4: 0x12a3, 0xaf5: 0x12a7, + 0xaf6: 0x12ab, 0xaf7: 0x12b7, 0xaf8: 0x12b3, 0xaf9: 0x12bf, 0xafa: 0x12bb, 0xafb: 0x12cb, + 0xafc: 0x12c3, 0xafd: 0x12c7, 0xafe: 0x12cf, 0xaff: 0x067f, + // Block 0x2c, offset 0xb00 + 0xb00: 0x12d7, 0xb01: 0x12db, 0xb02: 0x0683, 0xb03: 0x12eb, 0xb04: 0x12ef, 0xb05: 0x17f5, + 0xb06: 0x12fb, 0xb07: 0x12ff, 0xb08: 0x0687, 0xb09: 0x130b, 0xb0a: 0x05bb, 0xb0b: 0x17fa, + 0xb0c: 0x17ff, 0xb0d: 0x068b, 0xb0e: 0x068f, 0xb0f: 0x1337, 0xb10: 0x134f, 0xb11: 0x136b, + 0xb12: 0x137b, 0xb13: 0x1804, 0xb14: 0x138f, 0xb15: 0x1393, 0xb16: 0x13ab, 0xb17: 0x13b7, + 0xb18: 0x180e, 0xb19: 0x1660, 0xb1a: 0x13c3, 0xb1b: 0x13bf, 0xb1c: 0x13cb, 0xb1d: 0x1665, + 0xb1e: 0x13d7, 0xb1f: 0x13e3, 0xb20: 0x1813, 0xb21: 0x1818, 0xb22: 0x1423, 0xb23: 0x142f, + 0xb24: 0x1437, 0xb25: 0x181d, 0xb26: 0x143b, 0xb27: 0x1467, 0xb28: 0x1473, 0xb29: 0x1477, + 0xb2a: 0x146f, 0xb2b: 0x1483, 0xb2c: 0x1487, 0xb2d: 0x1822, 0xb2e: 0x1493, 0xb2f: 0x0693, + 0xb30: 0x149b, 0xb31: 0x1827, 0xb32: 0x0697, 0xb33: 0x14d3, 0xb34: 0x0ac3, 0xb35: 0x14eb, + 0xb36: 0x182c, 0xb37: 0x1836, 0xb38: 0x069b, 0xb39: 0x069f, 0xb3a: 0x1513, 0xb3b: 0x183b, + 0xb3c: 0x06a3, 0xb3d: 0x1840, 0xb3e: 0x152b, 0xb3f: 0x152b, + // Block 0x2d, offset 0xb40 + 0xb40: 0x1533, 0xb41: 0x1845, 0xb42: 0x154b, 0xb43: 0x06a7, 0xb44: 0x155b, 0xb45: 0x1567, + 0xb46: 0x156f, 0xb47: 0x1577, 0xb48: 0x06ab, 0xb49: 0x184a, 0xb4a: 0x158b, 0xb4b: 0x15a7, + 0xb4c: 0x15b3, 0xb4d: 0x06af, 0xb4e: 0x06b3, 0xb4f: 0x15b7, 0xb50: 0x184f, 0xb51: 0x06b7, + 0xb52: 0x1854, 0xb53: 0x1859, 0xb54: 0x185e, 0xb55: 0x15db, 0xb56: 0x06bb, 0xb57: 0x15ef, + 0xb58: 0x15f7, 0xb59: 0x15fb, 0xb5a: 0x1603, 0xb5b: 0x160b, 0xb5c: 0x1613, 0xb5d: 0x1868, +} + +// nfcIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var nfcIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x2c, 0xc3: 0x01, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x2d, 0xc7: 0x04, + 0xc8: 0x05, 0xca: 0x2e, 0xcb: 0x2f, 0xcc: 0x06, 0xcd: 0x07, 0xce: 0x08, 0xcf: 0x30, + 0xd0: 0x09, 0xd1: 0x31, 0xd2: 0x32, 0xd3: 0x0a, 0xd6: 0x0b, 0xd7: 0x33, + 0xd8: 0x34, 0xd9: 0x0c, 0xdb: 0x35, 0xdc: 0x36, 0xdd: 0x37, 0xdf: 0x38, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x08, 0xed: 0x09, 0xef: 0x0a, + 0xf0: 0x13, + // Block 0x4, offset 0x100 + 0x120: 0x39, 0x121: 0x3a, 0x123: 0x3b, 0x124: 0x3c, 0x125: 0x3d, 0x126: 0x3e, 0x127: 0x3f, + 0x128: 0x40, 0x129: 0x41, 0x12a: 0x42, 0x12b: 0x43, 0x12c: 0x3e, 0x12d: 0x44, 0x12e: 0x45, 0x12f: 0x46, + 0x131: 0x47, 0x132: 0x48, 0x133: 0x49, 0x134: 0x4a, 0x135: 0x4b, 0x137: 0x4c, + 0x138: 0x4d, 0x139: 0x4e, 0x13a: 0x4f, 0x13b: 0x50, 0x13c: 0x51, 0x13d: 0x52, 0x13e: 0x53, 0x13f: 0x54, + // Block 0x5, offset 0x140 + 0x140: 0x55, 0x142: 0x56, 0x144: 0x57, 0x145: 0x58, 0x146: 0x59, 0x147: 0x5a, + 0x14d: 0x5b, + 0x15c: 0x5c, 0x15f: 0x5d, + 0x162: 0x5e, 0x164: 0x5f, + 0x168: 0x60, 0x169: 0x61, 0x16a: 0x62, 0x16c: 0x0d, 0x16d: 0x63, 0x16e: 0x64, 0x16f: 0x65, + 0x170: 0x66, 0x173: 0x67, 0x177: 0x68, + 0x178: 0x0e, 0x179: 0x0f, 0x17a: 0x10, 0x17b: 0x11, 0x17c: 0x12, 0x17d: 0x13, 0x17e: 0x14, 0x17f: 0x15, + // Block 0x6, offset 0x180 + 0x180: 0x69, 0x183: 0x6a, 0x184: 0x6b, 0x186: 0x6c, 0x187: 0x6d, + 0x188: 0x6e, 0x189: 0x16, 0x18a: 0x17, 0x18b: 0x6f, 0x18c: 0x70, + 0x1ab: 0x71, + 0x1b3: 0x72, 0x1b5: 0x73, 0x1b7: 0x74, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x75, 0x1c1: 0x18, 0x1c2: 0x19, 0x1c3: 0x1a, 0x1c4: 0x76, 0x1c5: 0x77, + 0x1c9: 0x78, 0x1cc: 0x79, 0x1cd: 0x7a, + // Block 0x8, offset 0x200 + 0x219: 0x7b, 0x21a: 0x7c, 0x21b: 0x7d, + 0x220: 0x7e, 0x223: 0x7f, 0x224: 0x80, 0x225: 0x81, 0x226: 0x82, 0x227: 0x83, + 0x22a: 0x84, 0x22b: 0x85, 0x22f: 0x86, + 0x230: 0x87, 0x231: 0x88, 0x232: 0x89, 0x233: 0x8a, 0x234: 0x8b, 0x235: 0x8c, 0x236: 0x8d, 0x237: 0x87, + 0x238: 0x88, 0x239: 0x89, 0x23a: 0x8a, 0x23b: 0x8b, 0x23c: 0x8c, 0x23d: 0x8d, 0x23e: 0x87, 0x23f: 0x88, + // Block 0x9, offset 0x240 + 0x240: 0x89, 0x241: 0x8a, 0x242: 0x8b, 0x243: 0x8c, 0x244: 0x8d, 0x245: 0x87, 0x246: 0x88, 0x247: 0x89, + 0x248: 0x8a, 0x249: 0x8b, 0x24a: 0x8c, 0x24b: 0x8d, 0x24c: 0x87, 0x24d: 0x88, 0x24e: 0x89, 0x24f: 0x8a, + 0x250: 0x8b, 0x251: 0x8c, 0x252: 0x8d, 0x253: 0x87, 0x254: 0x88, 0x255: 0x89, 0x256: 0x8a, 0x257: 0x8b, + 0x258: 0x8c, 0x259: 0x8d, 0x25a: 0x87, 0x25b: 0x88, 0x25c: 0x89, 0x25d: 0x8a, 0x25e: 0x8b, 0x25f: 0x8c, + 0x260: 0x8d, 0x261: 0x87, 0x262: 0x88, 0x263: 0x89, 0x264: 0x8a, 0x265: 0x8b, 0x266: 0x8c, 0x267: 0x8d, + 0x268: 0x87, 0x269: 0x88, 0x26a: 0x89, 0x26b: 0x8a, 0x26c: 0x8b, 0x26d: 0x8c, 0x26e: 0x8d, 0x26f: 0x87, + 0x270: 0x88, 0x271: 0x89, 0x272: 0x8a, 0x273: 0x8b, 0x274: 0x8c, 0x275: 0x8d, 0x276: 0x87, 0x277: 0x88, + 0x278: 0x89, 0x279: 0x8a, 0x27a: 0x8b, 0x27b: 0x8c, 0x27c: 0x8d, 0x27d: 0x87, 0x27e: 0x88, 0x27f: 0x89, + // Block 0xa, offset 0x280 + 0x280: 0x8a, 0x281: 0x8b, 0x282: 0x8c, 0x283: 0x8d, 0x284: 0x87, 0x285: 0x88, 0x286: 0x89, 0x287: 0x8a, + 0x288: 0x8b, 0x289: 0x8c, 0x28a: 0x8d, 0x28b: 0x87, 0x28c: 0x88, 0x28d: 0x89, 0x28e: 0x8a, 0x28f: 0x8b, + 0x290: 0x8c, 0x291: 0x8d, 0x292: 0x87, 0x293: 0x88, 0x294: 0x89, 0x295: 0x8a, 0x296: 0x8b, 0x297: 0x8c, + 0x298: 0x8d, 0x299: 0x87, 0x29a: 0x88, 0x29b: 0x89, 0x29c: 0x8a, 0x29d: 0x8b, 0x29e: 0x8c, 0x29f: 0x8d, + 0x2a0: 0x87, 0x2a1: 0x88, 0x2a2: 0x89, 0x2a3: 0x8a, 0x2a4: 0x8b, 0x2a5: 0x8c, 0x2a6: 0x8d, 0x2a7: 0x87, + 0x2a8: 0x88, 0x2a9: 0x89, 0x2aa: 0x8a, 0x2ab: 0x8b, 0x2ac: 0x8c, 0x2ad: 0x8d, 0x2ae: 0x87, 0x2af: 0x88, + 0x2b0: 0x89, 0x2b1: 0x8a, 0x2b2: 0x8b, 0x2b3: 0x8c, 0x2b4: 0x8d, 0x2b5: 0x87, 0x2b6: 0x88, 0x2b7: 0x89, + 0x2b8: 0x8a, 0x2b9: 0x8b, 0x2ba: 0x8c, 0x2bb: 0x8d, 0x2bc: 0x87, 0x2bd: 0x88, 0x2be: 0x89, 0x2bf: 0x8a, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x8b, 0x2c1: 0x8c, 0x2c2: 0x8d, 0x2c3: 0x87, 0x2c4: 0x88, 0x2c5: 0x89, 0x2c6: 0x8a, 0x2c7: 0x8b, + 0x2c8: 0x8c, 0x2c9: 0x8d, 0x2ca: 0x87, 0x2cb: 0x88, 0x2cc: 0x89, 0x2cd: 0x8a, 0x2ce: 0x8b, 0x2cf: 0x8c, + 0x2d0: 0x8d, 0x2d1: 0x87, 0x2d2: 0x88, 0x2d3: 0x89, 0x2d4: 0x8a, 0x2d5: 0x8b, 0x2d6: 0x8c, 0x2d7: 0x8d, + 0x2d8: 0x87, 0x2d9: 0x88, 0x2da: 0x89, 0x2db: 0x8a, 0x2dc: 0x8b, 0x2dd: 0x8c, 0x2de: 0x8e, + // Block 0xc, offset 0x300 + 0x324: 0x1b, 0x325: 0x1c, 0x326: 0x1d, 0x327: 0x1e, + 0x328: 0x1f, 0x329: 0x20, 0x32a: 0x21, 0x32b: 0x22, 0x32c: 0x8f, 0x32d: 0x90, 0x32e: 0x91, + 0x331: 0x92, 0x332: 0x93, 0x333: 0x94, 0x334: 0x95, + 0x338: 0x96, 0x339: 0x97, 0x33a: 0x98, 0x33b: 0x99, 0x33e: 0x9a, 0x33f: 0x9b, + // Block 0xd, offset 0x340 + 0x347: 0x9c, + 0x34b: 0x9d, 0x34d: 0x9e, + 0x368: 0x9f, 0x36b: 0xa0, + // Block 0xe, offset 0x380 + 0x381: 0xa1, 0x382: 0xa2, 0x384: 0xa3, 0x385: 0x82, 0x387: 0xa4, + 0x388: 0xa5, 0x38b: 0xa6, 0x38c: 0x3e, 0x38d: 0xa7, + 0x391: 0xa8, 0x392: 0xa9, 0x393: 0xaa, 0x396: 0xab, 0x397: 0xac, + 0x398: 0x73, 0x39a: 0xad, 0x39c: 0xae, + 0x3b0: 0x73, + // Block 0xf, offset 0x3c0 + 0x3eb: 0xaf, 0x3ec: 0xb0, + // Block 0x10, offset 0x400 + 0x432: 0xb1, + // Block 0x11, offset 0x440 + 0x445: 0xb2, 0x446: 0xb3, 0x447: 0xb4, + 0x449: 0xb5, + // Block 0x12, offset 0x480 + 0x480: 0xb6, + 0x4a3: 0xb7, 0x4a5: 0xb8, + // Block 0x13, offset 0x4c0 + 0x4c8: 0xb9, + // Block 0x14, offset 0x500 + 0x520: 0x23, 0x521: 0x24, 0x522: 0x25, 0x523: 0x26, 0x524: 0x27, 0x525: 0x28, 0x526: 0x29, 0x527: 0x2a, + 0x528: 0x2b, + // Block 0x15, offset 0x540 + 0x550: 0x0b, 0x551: 0x0c, 0x556: 0x0d, + 0x55b: 0x0e, 0x55d: 0x0f, 0x55e: 0x10, 0x55f: 0x11, + 0x56f: 0x12, +} + +// nfcSparseOffset: 142 entries, 284 bytes +var nfcSparseOffset = []uint16{0x0, 0x5, 0x9, 0xb, 0xd, 0x18, 0x28, 0x2a, 0x2f, 0x3a, 0x49, 0x56, 0x5e, 0x62, 0x67, 0x69, 0x7a, 0x82, 0x89, 0x8c, 0x93, 0x97, 0x9b, 0x9d, 0x9f, 0xa8, 0xac, 0xb3, 0xb8, 0xbb, 0xc5, 0xc7, 0xce, 0xd6, 0xd9, 0xdb, 0xdd, 0xdf, 0xe4, 0xf5, 0x101, 0x103, 0x109, 0x10b, 0x10d, 0x10f, 0x111, 0x113, 0x115, 0x118, 0x11b, 0x11d, 0x120, 0x123, 0x127, 0x12c, 0x135, 0x137, 0x13a, 0x13c, 0x147, 0x157, 0x15b, 0x169, 0x16c, 0x172, 0x178, 0x183, 0x187, 0x189, 0x18b, 0x18d, 0x18f, 0x191, 0x197, 0x19b, 0x19d, 0x19f, 0x1a7, 0x1ab, 0x1ae, 0x1b0, 0x1b2, 0x1b4, 0x1b7, 0x1b9, 0x1bb, 0x1bd, 0x1bf, 0x1c5, 0x1c8, 0x1ca, 0x1d1, 0x1d7, 0x1dd, 0x1e5, 0x1eb, 0x1f1, 0x1f7, 0x1fb, 0x209, 0x212, 0x215, 0x218, 0x21a, 0x21d, 0x21f, 0x223, 0x228, 0x22a, 0x22c, 0x231, 0x237, 0x239, 0x23b, 0x23d, 0x243, 0x246, 0x249, 0x251, 0x258, 0x25b, 0x25e, 0x260, 0x268, 0x26b, 0x272, 0x275, 0x27b, 0x27d, 0x280, 0x282, 0x284, 0x286, 0x288, 0x295, 0x29f, 0x2a1, 0x2a3, 0x2a9, 0x2ab, 0x2ae} + +// nfcSparseValues: 688 entries, 2752 bytes +var nfcSparseValues = [688]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x04}, + {value: 0xa100, lo: 0xa8, hi: 0xa8}, + {value: 0x8100, lo: 0xaf, hi: 0xaf}, + {value: 0x8100, lo: 0xb4, hi: 0xb4}, + {value: 0x8100, lo: 0xb8, hi: 0xb8}, + // Block 0x1, offset 0x5 + {value: 0x0091, lo: 0x03}, + {value: 0x46e2, lo: 0xa0, hi: 0xa1}, + {value: 0x4714, lo: 0xaf, hi: 0xb0}, + {value: 0xa000, lo: 0xb7, hi: 0xb7}, + // Block 0x2, offset 0x9 + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + // Block 0x3, offset 0xb + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x98, hi: 0x9d}, + // Block 0x4, offset 0xd + {value: 0x0006, lo: 0x0a}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x85, hi: 0x85}, + {value: 0xa000, lo: 0x89, hi: 0x89}, + {value: 0x4840, lo: 0x8a, hi: 0x8a}, + {value: 0x485e, lo: 0x8b, hi: 0x8b}, + {value: 0x36c7, lo: 0x8c, hi: 0x8c}, + {value: 0x36df, lo: 0x8d, hi: 0x8d}, + {value: 0x4876, lo: 0x8e, hi: 0x8e}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x36fd, lo: 0x93, hi: 0x94}, + // Block 0x5, offset 0x18 + {value: 0x0000, lo: 0x0f}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0xa000, lo: 0x8d, hi: 0x8d}, + {value: 0x37a5, lo: 0x90, hi: 0x90}, + {value: 0x37b1, lo: 0x91, hi: 0x91}, + {value: 0x379f, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x96, hi: 0x96}, + {value: 0x3817, lo: 0x97, hi: 0x97}, + {value: 0x37e1, lo: 0x9c, hi: 0x9c}, + {value: 0x37c9, lo: 0x9d, hi: 0x9d}, + {value: 0x37f3, lo: 0x9e, hi: 0x9e}, + {value: 0xa000, lo: 0xb4, hi: 0xb5}, + {value: 0x381d, lo: 0xb6, hi: 0xb6}, + {value: 0x3823, lo: 0xb7, hi: 0xb7}, + // Block 0x6, offset 0x28 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x83, hi: 0x87}, + // Block 0x7, offset 0x2a + {value: 0x0001, lo: 0x04}, + {value: 0x8113, lo: 0x81, hi: 0x82}, + {value: 0x8132, lo: 0x84, hi: 0x84}, + {value: 0x812d, lo: 0x85, hi: 0x85}, + {value: 0x810d, lo: 0x87, hi: 0x87}, + // Block 0x8, offset 0x2f + {value: 0x0000, lo: 0x0a}, + {value: 0x8132, lo: 0x90, hi: 0x97}, + {value: 0x8119, lo: 0x98, hi: 0x98}, + {value: 0x811a, lo: 0x99, hi: 0x99}, + {value: 0x811b, lo: 0x9a, hi: 0x9a}, + {value: 0x3841, lo: 0xa2, hi: 0xa2}, + {value: 0x3847, lo: 0xa3, hi: 0xa3}, + {value: 0x3853, lo: 0xa4, hi: 0xa4}, + {value: 0x384d, lo: 0xa5, hi: 0xa5}, + {value: 0x3859, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xa7, hi: 0xa7}, + // Block 0x9, offset 0x3a + {value: 0x0000, lo: 0x0e}, + {value: 0x386b, lo: 0x80, hi: 0x80}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0x385f, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x3865, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x95, hi: 0x95}, + {value: 0x8132, lo: 0x96, hi: 0x9c}, + {value: 0x8132, lo: 0x9f, hi: 0xa2}, + {value: 0x812d, lo: 0xa3, hi: 0xa3}, + {value: 0x8132, lo: 0xa4, hi: 0xa4}, + {value: 0x8132, lo: 0xa7, hi: 0xa8}, + {value: 0x812d, lo: 0xaa, hi: 0xaa}, + {value: 0x8132, lo: 0xab, hi: 0xac}, + {value: 0x812d, lo: 0xad, hi: 0xad}, + // Block 0xa, offset 0x49 + {value: 0x0000, lo: 0x0c}, + {value: 0x811f, lo: 0x91, hi: 0x91}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + {value: 0x812d, lo: 0xb1, hi: 0xb1}, + {value: 0x8132, lo: 0xb2, hi: 0xb3}, + {value: 0x812d, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb5, hi: 0xb6}, + {value: 0x812d, lo: 0xb7, hi: 0xb9}, + {value: 0x8132, lo: 0xba, hi: 0xba}, + {value: 0x812d, lo: 0xbb, hi: 0xbc}, + {value: 0x8132, lo: 0xbd, hi: 0xbd}, + {value: 0x812d, lo: 0xbe, hi: 0xbe}, + {value: 0x8132, lo: 0xbf, hi: 0xbf}, + // Block 0xb, offset 0x56 + {value: 0x0005, lo: 0x07}, + {value: 0x8132, lo: 0x80, hi: 0x80}, + {value: 0x8132, lo: 0x81, hi: 0x81}, + {value: 0x812d, lo: 0x82, hi: 0x83}, + {value: 0x812d, lo: 0x84, hi: 0x85}, + {value: 0x812d, lo: 0x86, hi: 0x87}, + {value: 0x812d, lo: 0x88, hi: 0x89}, + {value: 0x8132, lo: 0x8a, hi: 0x8a}, + // Block 0xc, offset 0x5e + {value: 0x0000, lo: 0x03}, + {value: 0x8132, lo: 0xab, hi: 0xb1}, + {value: 0x812d, lo: 0xb2, hi: 0xb2}, + {value: 0x8132, lo: 0xb3, hi: 0xb3}, + // Block 0xd, offset 0x62 + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0x96, hi: 0x99}, + {value: 0x8132, lo: 0x9b, hi: 0xa3}, + {value: 0x8132, lo: 0xa5, hi: 0xa7}, + {value: 0x8132, lo: 0xa9, hi: 0xad}, + // Block 0xe, offset 0x67 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x99, hi: 0x9b}, + // Block 0xf, offset 0x69 + {value: 0x0000, lo: 0x10}, + {value: 0x8132, lo: 0x94, hi: 0xa1}, + {value: 0x812d, lo: 0xa3, hi: 0xa3}, + {value: 0x8132, lo: 0xa4, hi: 0xa5}, + {value: 0x812d, lo: 0xa6, hi: 0xa6}, + {value: 0x8132, lo: 0xa7, hi: 0xa8}, + {value: 0x812d, lo: 0xa9, hi: 0xa9}, + {value: 0x8132, lo: 0xaa, hi: 0xac}, + {value: 0x812d, lo: 0xad, hi: 0xaf}, + {value: 0x8116, lo: 0xb0, hi: 0xb0}, + {value: 0x8117, lo: 0xb1, hi: 0xb1}, + {value: 0x8118, lo: 0xb2, hi: 0xb2}, + {value: 0x8132, lo: 0xb3, hi: 0xb5}, + {value: 0x812d, lo: 0xb6, hi: 0xb6}, + {value: 0x8132, lo: 0xb7, hi: 0xb8}, + {value: 0x812d, lo: 0xb9, hi: 0xba}, + {value: 0x8132, lo: 0xbb, hi: 0xbf}, + // Block 0x10, offset 0x7a + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0xa8, hi: 0xa8}, + {value: 0x3ed8, lo: 0xa9, hi: 0xa9}, + {value: 0xa000, lo: 0xb0, hi: 0xb0}, + {value: 0x3ee0, lo: 0xb1, hi: 0xb1}, + {value: 0xa000, lo: 0xb3, hi: 0xb3}, + {value: 0x3ee8, lo: 0xb4, hi: 0xb4}, + {value: 0x9902, lo: 0xbc, hi: 0xbc}, + // Block 0x11, offset 0x82 + {value: 0x0008, lo: 0x06}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x8132, lo: 0x91, hi: 0x91}, + {value: 0x812d, lo: 0x92, hi: 0x92}, + {value: 0x8132, lo: 0x93, hi: 0x93}, + {value: 0x8132, lo: 0x94, hi: 0x94}, + {value: 0x451c, lo: 0x98, hi: 0x9f}, + // Block 0x12, offset 0x89 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x13, offset 0x8c + {value: 0x0008, lo: 0x06}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2c9e, lo: 0x8b, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x455c, lo: 0x9c, hi: 0x9d}, + {value: 0x456c, lo: 0x9f, hi: 0x9f}, + // Block 0x14, offset 0x93 + {value: 0x0000, lo: 0x03}, + {value: 0x4594, lo: 0xb3, hi: 0xb3}, + {value: 0x459c, lo: 0xb6, hi: 0xb6}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + // Block 0x15, offset 0x97 + {value: 0x0008, lo: 0x03}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x4574, lo: 0x99, hi: 0x9b}, + {value: 0x458c, lo: 0x9e, hi: 0x9e}, + // Block 0x16, offset 0x9b + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + // Block 0x17, offset 0x9d + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + // Block 0x18, offset 0x9f + {value: 0x0000, lo: 0x08}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2cb6, lo: 0x88, hi: 0x88}, + {value: 0x2cae, lo: 0x8b, hi: 0x8b}, + {value: 0x2cbe, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x96, hi: 0x97}, + {value: 0x45a4, lo: 0x9c, hi: 0x9c}, + {value: 0x45ac, lo: 0x9d, hi: 0x9d}, + // Block 0x19, offset 0xa8 + {value: 0x0000, lo: 0x03}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x2cc6, lo: 0x94, hi: 0x94}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x1a, offset 0xac + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cce, lo: 0x8a, hi: 0x8a}, + {value: 0x2cde, lo: 0x8b, hi: 0x8b}, + {value: 0x2cd6, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1b, offset 0xb3 + {value: 0x1801, lo: 0x04}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x3ef0, lo: 0x88, hi: 0x88}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x8120, lo: 0x95, hi: 0x96}, + // Block 0x1c, offset 0xb8 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + {value: 0xa000, lo: 0xbf, hi: 0xbf}, + // Block 0x1d, offset 0xbb + {value: 0x0000, lo: 0x09}, + {value: 0x2ce6, lo: 0x80, hi: 0x80}, + {value: 0x9900, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x2cee, lo: 0x87, hi: 0x87}, + {value: 0x2cf6, lo: 0x88, hi: 0x88}, + {value: 0x2f50, lo: 0x8a, hi: 0x8a}, + {value: 0x2dd8, lo: 0x8b, hi: 0x8b}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x95, hi: 0x96}, + // Block 0x1e, offset 0xc5 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x1f, offset 0xc7 + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cfe, lo: 0x8a, hi: 0x8a}, + {value: 0x2d0e, lo: 0x8b, hi: 0x8b}, + {value: 0x2d06, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x20, offset 0xce + {value: 0x6bea, lo: 0x07}, + {value: 0x9904, lo: 0x8a, hi: 0x8a}, + {value: 0x9900, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x3ef8, lo: 0x9a, hi: 0x9a}, + {value: 0x2f58, lo: 0x9c, hi: 0x9c}, + {value: 0x2de3, lo: 0x9d, hi: 0x9d}, + {value: 0x2d16, lo: 0x9e, hi: 0x9f}, + // Block 0x21, offset 0xd6 + {value: 0x0000, lo: 0x02}, + {value: 0x8122, lo: 0xb8, hi: 0xb9}, + {value: 0x8104, lo: 0xba, hi: 0xba}, + // Block 0x22, offset 0xd9 + {value: 0x0000, lo: 0x01}, + {value: 0x8123, lo: 0x88, hi: 0x8b}, + // Block 0x23, offset 0xdb + {value: 0x0000, lo: 0x01}, + {value: 0x8124, lo: 0xb8, hi: 0xb9}, + // Block 0x24, offset 0xdd + {value: 0x0000, lo: 0x01}, + {value: 0x8125, lo: 0x88, hi: 0x8b}, + // Block 0x25, offset 0xdf + {value: 0x0000, lo: 0x04}, + {value: 0x812d, lo: 0x98, hi: 0x99}, + {value: 0x812d, lo: 0xb5, hi: 0xb5}, + {value: 0x812d, lo: 0xb7, hi: 0xb7}, + {value: 0x812b, lo: 0xb9, hi: 0xb9}, + // Block 0x26, offset 0xe4 + {value: 0x0000, lo: 0x10}, + {value: 0x2644, lo: 0x83, hi: 0x83}, + {value: 0x264b, lo: 0x8d, hi: 0x8d}, + {value: 0x2652, lo: 0x92, hi: 0x92}, + {value: 0x2659, lo: 0x97, hi: 0x97}, + {value: 0x2660, lo: 0x9c, hi: 0x9c}, + {value: 0x263d, lo: 0xa9, hi: 0xa9}, + {value: 0x8126, lo: 0xb1, hi: 0xb1}, + {value: 0x8127, lo: 0xb2, hi: 0xb2}, + {value: 0x4a84, lo: 0xb3, hi: 0xb3}, + {value: 0x8128, lo: 0xb4, hi: 0xb4}, + {value: 0x4a8d, lo: 0xb5, hi: 0xb5}, + {value: 0x45b4, lo: 0xb6, hi: 0xb6}, + {value: 0x8200, lo: 0xb7, hi: 0xb7}, + {value: 0x45bc, lo: 0xb8, hi: 0xb8}, + {value: 0x8200, lo: 0xb9, hi: 0xb9}, + {value: 0x8127, lo: 0xba, hi: 0xbd}, + // Block 0x27, offset 0xf5 + {value: 0x0000, lo: 0x0b}, + {value: 0x8127, lo: 0x80, hi: 0x80}, + {value: 0x4a96, lo: 0x81, hi: 0x81}, + {value: 0x8132, lo: 0x82, hi: 0x83}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0x86, hi: 0x87}, + {value: 0x266e, lo: 0x93, hi: 0x93}, + {value: 0x2675, lo: 0x9d, hi: 0x9d}, + {value: 0x267c, lo: 0xa2, hi: 0xa2}, + {value: 0x2683, lo: 0xa7, hi: 0xa7}, + {value: 0x268a, lo: 0xac, hi: 0xac}, + {value: 0x2667, lo: 0xb9, hi: 0xb9}, + // Block 0x28, offset 0x101 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x86, hi: 0x86}, + // Block 0x29, offset 0x103 + {value: 0x0000, lo: 0x05}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x2d1e, lo: 0xa6, hi: 0xa6}, + {value: 0x9900, lo: 0xae, hi: 0xae}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + {value: 0x8104, lo: 0xb9, hi: 0xba}, + // Block 0x2a, offset 0x109 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x8d, hi: 0x8d}, + // Block 0x2b, offset 0x10b + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x80, hi: 0x92}, + // Block 0x2c, offset 0x10d + {value: 0x0000, lo: 0x01}, + {value: 0xb900, lo: 0xa1, hi: 0xb5}, + // Block 0x2d, offset 0x10f + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0xa8, hi: 0xbf}, + // Block 0x2e, offset 0x111 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0x80, hi: 0x82}, + // Block 0x2f, offset 0x113 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x9d, hi: 0x9f}, + // Block 0x30, offset 0x115 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x94, hi: 0x94}, + {value: 0x8104, lo: 0xb4, hi: 0xb4}, + // Block 0x31, offset 0x118 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x92, hi: 0x92}, + {value: 0x8132, lo: 0x9d, hi: 0x9d}, + // Block 0x32, offset 0x11b + {value: 0x0000, lo: 0x01}, + {value: 0x8131, lo: 0xa9, hi: 0xa9}, + // Block 0x33, offset 0x11d + {value: 0x0004, lo: 0x02}, + {value: 0x812e, lo: 0xb9, hi: 0xba}, + {value: 0x812d, lo: 0xbb, hi: 0xbb}, + // Block 0x34, offset 0x120 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x97, hi: 0x97}, + {value: 0x812d, lo: 0x98, hi: 0x98}, + // Block 0x35, offset 0x123 + {value: 0x0000, lo: 0x03}, + {value: 0x8104, lo: 0xa0, hi: 0xa0}, + {value: 0x8132, lo: 0xb5, hi: 0xbc}, + {value: 0x812d, lo: 0xbf, hi: 0xbf}, + // Block 0x36, offset 0x127 + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0xb0, hi: 0xb4}, + {value: 0x812d, lo: 0xb5, hi: 0xba}, + {value: 0x8132, lo: 0xbb, hi: 0xbc}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0x37, offset 0x12c + {value: 0x0000, lo: 0x08}, + {value: 0x2d66, lo: 0x80, hi: 0x80}, + {value: 0x2d6e, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x82, hi: 0x82}, + {value: 0x2d76, lo: 0x83, hi: 0x83}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0xab, hi: 0xab}, + {value: 0x812d, lo: 0xac, hi: 0xac}, + {value: 0x8132, lo: 0xad, hi: 0xb3}, + // Block 0x38, offset 0x135 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xaa, hi: 0xab}, + // Block 0x39, offset 0x137 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xa6, hi: 0xa6}, + {value: 0x8104, lo: 0xb2, hi: 0xb3}, + // Block 0x3a, offset 0x13a + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + // Block 0x3b, offset 0x13c + {value: 0x0000, lo: 0x0a}, + {value: 0x8132, lo: 0x90, hi: 0x92}, + {value: 0x8101, lo: 0x94, hi: 0x94}, + {value: 0x812d, lo: 0x95, hi: 0x99}, + {value: 0x8132, lo: 0x9a, hi: 0x9b}, + {value: 0x812d, lo: 0x9c, hi: 0x9f}, + {value: 0x8132, lo: 0xa0, hi: 0xa0}, + {value: 0x8101, lo: 0xa2, hi: 0xa8}, + {value: 0x812d, lo: 0xad, hi: 0xad}, + {value: 0x8132, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb8, hi: 0xb9}, + // Block 0x3c, offset 0x147 + {value: 0x0000, lo: 0x0f}, + {value: 0x8132, lo: 0x80, hi: 0x81}, + {value: 0x812d, lo: 0x82, hi: 0x82}, + {value: 0x8132, lo: 0x83, hi: 0x89}, + {value: 0x812d, lo: 0x8a, hi: 0x8a}, + {value: 0x8132, lo: 0x8b, hi: 0x8c}, + {value: 0x8135, lo: 0x8d, hi: 0x8d}, + {value: 0x812a, lo: 0x8e, hi: 0x8e}, + {value: 0x812d, lo: 0x8f, hi: 0x8f}, + {value: 0x8129, lo: 0x90, hi: 0x90}, + {value: 0x8132, lo: 0x91, hi: 0xb5}, + {value: 0x8132, lo: 0xbb, hi: 0xbb}, + {value: 0x8134, lo: 0xbc, hi: 0xbc}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + {value: 0x8132, lo: 0xbe, hi: 0xbe}, + {value: 0x812d, lo: 0xbf, hi: 0xbf}, + // Block 0x3d, offset 0x157 + {value: 0x0004, lo: 0x03}, + {value: 0x0433, lo: 0x80, hi: 0x81}, + {value: 0x8100, lo: 0x97, hi: 0x97}, + {value: 0x8100, lo: 0xbe, hi: 0xbe}, + // Block 0x3e, offset 0x15b + {value: 0x0000, lo: 0x0d}, + {value: 0x8132, lo: 0x90, hi: 0x91}, + {value: 0x8101, lo: 0x92, hi: 0x93}, + {value: 0x8132, lo: 0x94, hi: 0x97}, + {value: 0x8101, lo: 0x98, hi: 0x9a}, + {value: 0x8132, lo: 0x9b, hi: 0x9c}, + {value: 0x8132, lo: 0xa1, hi: 0xa1}, + {value: 0x8101, lo: 0xa5, hi: 0xa6}, + {value: 0x8132, lo: 0xa7, hi: 0xa7}, + {value: 0x812d, lo: 0xa8, hi: 0xa8}, + {value: 0x8132, lo: 0xa9, hi: 0xa9}, + {value: 0x8101, lo: 0xaa, hi: 0xab}, + {value: 0x812d, lo: 0xac, hi: 0xaf}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + // Block 0x3f, offset 0x169 + {value: 0x427b, lo: 0x02}, + {value: 0x01b8, lo: 0xa6, hi: 0xa6}, + {value: 0x0057, lo: 0xaa, hi: 0xab}, + // Block 0x40, offset 0x16c + {value: 0x0007, lo: 0x05}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + {value: 0x3bb9, lo: 0x9a, hi: 0x9b}, + {value: 0x3bc7, lo: 0xae, hi: 0xae}, + // Block 0x41, offset 0x172 + {value: 0x000e, lo: 0x05}, + {value: 0x3bce, lo: 0x8d, hi: 0x8e}, + {value: 0x3bd5, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + // Block 0x42, offset 0x178 + {value: 0x6408, lo: 0x0a}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0x3be3, lo: 0x84, hi: 0x84}, + {value: 0xa000, lo: 0x88, hi: 0x88}, + {value: 0x3bea, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0x3bf1, lo: 0x8c, hi: 0x8c}, + {value: 0xa000, lo: 0xa3, hi: 0xa3}, + {value: 0x3bf8, lo: 0xa4, hi: 0xa5}, + {value: 0x3bff, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xbc, hi: 0xbc}, + // Block 0x43, offset 0x183 + {value: 0x0007, lo: 0x03}, + {value: 0x3c68, lo: 0xa0, hi: 0xa1}, + {value: 0x3c92, lo: 0xa2, hi: 0xa3}, + {value: 0x3cbc, lo: 0xaa, hi: 0xad}, + // Block 0x44, offset 0x187 + {value: 0x0004, lo: 0x01}, + {value: 0x048b, lo: 0xa9, hi: 0xaa}, + // Block 0x45, offset 0x189 + {value: 0x0000, lo: 0x01}, + {value: 0x44dd, lo: 0x9c, hi: 0x9c}, + // Block 0x46, offset 0x18b + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xaf, hi: 0xb1}, + // Block 0x47, offset 0x18d + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x48, offset 0x18f + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa0, hi: 0xbf}, + // Block 0x49, offset 0x191 + {value: 0x0000, lo: 0x05}, + {value: 0x812c, lo: 0xaa, hi: 0xaa}, + {value: 0x8131, lo: 0xab, hi: 0xab}, + {value: 0x8133, lo: 0xac, hi: 0xac}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + {value: 0x812f, lo: 0xae, hi: 0xaf}, + // Block 0x4a, offset 0x197 + {value: 0x0000, lo: 0x03}, + {value: 0x4a9f, lo: 0xb3, hi: 0xb3}, + {value: 0x4a9f, lo: 0xb5, hi: 0xb6}, + {value: 0x4a9f, lo: 0xba, hi: 0xbf}, + // Block 0x4b, offset 0x19b + {value: 0x0000, lo: 0x01}, + {value: 0x4a9f, lo: 0x8f, hi: 0xa3}, + // Block 0x4c, offset 0x19d + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xae, hi: 0xbe}, + // Block 0x4d, offset 0x19f + {value: 0x0000, lo: 0x07}, + {value: 0x8100, lo: 0x84, hi: 0x84}, + {value: 0x8100, lo: 0x87, hi: 0x87}, + {value: 0x8100, lo: 0x90, hi: 0x90}, + {value: 0x8100, lo: 0x9e, hi: 0x9e}, + {value: 0x8100, lo: 0xa1, hi: 0xa1}, + {value: 0x8100, lo: 0xb2, hi: 0xb2}, + {value: 0x8100, lo: 0xbb, hi: 0xbb}, + // Block 0x4e, offset 0x1a7 + {value: 0x0000, lo: 0x03}, + {value: 0x8100, lo: 0x80, hi: 0x80}, + {value: 0x8100, lo: 0x8b, hi: 0x8b}, + {value: 0x8100, lo: 0x8e, hi: 0x8e}, + // Block 0x4f, offset 0x1ab + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0xaf, hi: 0xaf}, + {value: 0x8132, lo: 0xb4, hi: 0xbd}, + // Block 0x50, offset 0x1ae + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x9e, hi: 0x9f}, + // Block 0x51, offset 0x1b0 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb0, hi: 0xb1}, + // Block 0x52, offset 0x1b2 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x86, hi: 0x86}, + // Block 0x53, offset 0x1b4 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0xa0, hi: 0xb1}, + // Block 0x54, offset 0x1b7 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xab, hi: 0xad}, + // Block 0x55, offset 0x1b9 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x93, hi: 0x93}, + // Block 0x56, offset 0x1bb + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb3, hi: 0xb3}, + // Block 0x57, offset 0x1bd + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x80, hi: 0x80}, + // Block 0x58, offset 0x1bf + {value: 0x0000, lo: 0x05}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + {value: 0x8132, lo: 0xb2, hi: 0xb3}, + {value: 0x812d, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb7, hi: 0xb8}, + {value: 0x8132, lo: 0xbe, hi: 0xbf}, + // Block 0x59, offset 0x1c5 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x81, hi: 0x81}, + {value: 0x8104, lo: 0xb6, hi: 0xb6}, + // Block 0x5a, offset 0x1c8 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xad, hi: 0xad}, + // Block 0x5b, offset 0x1ca + {value: 0x0000, lo: 0x06}, + {value: 0xe500, lo: 0x80, hi: 0x80}, + {value: 0xc600, lo: 0x81, hi: 0x9b}, + {value: 0xe500, lo: 0x9c, hi: 0x9c}, + {value: 0xc600, lo: 0x9d, hi: 0xb7}, + {value: 0xe500, lo: 0xb8, hi: 0xb8}, + {value: 0xc600, lo: 0xb9, hi: 0xbf}, + // Block 0x5c, offset 0x1d1 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x93}, + {value: 0xe500, lo: 0x94, hi: 0x94}, + {value: 0xc600, lo: 0x95, hi: 0xaf}, + {value: 0xe500, lo: 0xb0, hi: 0xb0}, + {value: 0xc600, lo: 0xb1, hi: 0xbf}, + // Block 0x5d, offset 0x1d7 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8b}, + {value: 0xe500, lo: 0x8c, hi: 0x8c}, + {value: 0xc600, lo: 0x8d, hi: 0xa7}, + {value: 0xe500, lo: 0xa8, hi: 0xa8}, + {value: 0xc600, lo: 0xa9, hi: 0xbf}, + // Block 0x5e, offset 0x1dd + {value: 0x0000, lo: 0x07}, + {value: 0xc600, lo: 0x80, hi: 0x83}, + {value: 0xe500, lo: 0x84, hi: 0x84}, + {value: 0xc600, lo: 0x85, hi: 0x9f}, + {value: 0xe500, lo: 0xa0, hi: 0xa0}, + {value: 0xc600, lo: 0xa1, hi: 0xbb}, + {value: 0xe500, lo: 0xbc, hi: 0xbc}, + {value: 0xc600, lo: 0xbd, hi: 0xbf}, + // Block 0x5f, offset 0x1e5 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x97}, + {value: 0xe500, lo: 0x98, hi: 0x98}, + {value: 0xc600, lo: 0x99, hi: 0xb3}, + {value: 0xe500, lo: 0xb4, hi: 0xb4}, + {value: 0xc600, lo: 0xb5, hi: 0xbf}, + // Block 0x60, offset 0x1eb + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8f}, + {value: 0xe500, lo: 0x90, hi: 0x90}, + {value: 0xc600, lo: 0x91, hi: 0xab}, + {value: 0xe500, lo: 0xac, hi: 0xac}, + {value: 0xc600, lo: 0xad, hi: 0xbf}, + // Block 0x61, offset 0x1f1 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + {value: 0xe500, lo: 0xa4, hi: 0xa4}, + {value: 0xc600, lo: 0xa5, hi: 0xbf}, + // Block 0x62, offset 0x1f7 + {value: 0x0000, lo: 0x03}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + // Block 0x63, offset 0x1fb + {value: 0x0006, lo: 0x0d}, + {value: 0x4390, lo: 0x9d, hi: 0x9d}, + {value: 0x8115, lo: 0x9e, hi: 0x9e}, + {value: 0x4402, lo: 0x9f, hi: 0x9f}, + {value: 0x43f0, lo: 0xaa, hi: 0xab}, + {value: 0x44f4, lo: 0xac, hi: 0xac}, + {value: 0x44fc, lo: 0xad, hi: 0xad}, + {value: 0x4348, lo: 0xae, hi: 0xb1}, + {value: 0x4366, lo: 0xb2, hi: 0xb4}, + {value: 0x437e, lo: 0xb5, hi: 0xb6}, + {value: 0x438a, lo: 0xb8, hi: 0xb8}, + {value: 0x4396, lo: 0xb9, hi: 0xbb}, + {value: 0x43ae, lo: 0xbc, hi: 0xbc}, + {value: 0x43b4, lo: 0xbe, hi: 0xbe}, + // Block 0x64, offset 0x209 + {value: 0x0006, lo: 0x08}, + {value: 0x43ba, lo: 0x80, hi: 0x81}, + {value: 0x43c6, lo: 0x83, hi: 0x84}, + {value: 0x43d8, lo: 0x86, hi: 0x89}, + {value: 0x43fc, lo: 0x8a, hi: 0x8a}, + {value: 0x4378, lo: 0x8b, hi: 0x8b}, + {value: 0x4360, lo: 0x8c, hi: 0x8c}, + {value: 0x43a8, lo: 0x8d, hi: 0x8d}, + {value: 0x43d2, lo: 0x8e, hi: 0x8e}, + // Block 0x65, offset 0x212 + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0xa4, hi: 0xa5}, + {value: 0x8100, lo: 0xb0, hi: 0xb1}, + // Block 0x66, offset 0x215 + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0x9b, hi: 0x9d}, + {value: 0x8200, lo: 0x9e, hi: 0xa3}, + // Block 0x67, offset 0x218 + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x90, hi: 0x90}, + // Block 0x68, offset 0x21a + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0x99, hi: 0x99}, + {value: 0x8200, lo: 0xb2, hi: 0xb4}, + // Block 0x69, offset 0x21d + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xbc, hi: 0xbd}, + // Block 0x6a, offset 0x21f + {value: 0x0000, lo: 0x03}, + {value: 0x8132, lo: 0xa0, hi: 0xa6}, + {value: 0x812d, lo: 0xa7, hi: 0xad}, + {value: 0x8132, lo: 0xae, hi: 0xaf}, + // Block 0x6b, offset 0x223 + {value: 0x0000, lo: 0x04}, + {value: 0x8100, lo: 0x89, hi: 0x8c}, + {value: 0x8100, lo: 0xb0, hi: 0xb2}, + {value: 0x8100, lo: 0xb4, hi: 0xb4}, + {value: 0x8100, lo: 0xb6, hi: 0xbf}, + // Block 0x6c, offset 0x228 + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x81, hi: 0x8c}, + // Block 0x6d, offset 0x22a + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xb5, hi: 0xba}, + // Block 0x6e, offset 0x22c + {value: 0x0000, lo: 0x04}, + {value: 0x4a9f, lo: 0x9e, hi: 0x9f}, + {value: 0x4a9f, lo: 0xa3, hi: 0xa3}, + {value: 0x4a9f, lo: 0xa5, hi: 0xa6}, + {value: 0x4a9f, lo: 0xaa, hi: 0xaf}, + // Block 0x6f, offset 0x231 + {value: 0x0000, lo: 0x05}, + {value: 0x4a9f, lo: 0x82, hi: 0x87}, + {value: 0x4a9f, lo: 0x8a, hi: 0x8f}, + {value: 0x4a9f, lo: 0x92, hi: 0x97}, + {value: 0x4a9f, lo: 0x9a, hi: 0x9c}, + {value: 0x8100, lo: 0xa3, hi: 0xa3}, + // Block 0x70, offset 0x237 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0x71, offset 0x239 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xa0, hi: 0xa0}, + // Block 0x72, offset 0x23b + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb6, hi: 0xba}, + // Block 0x73, offset 0x23d + {value: 0x002c, lo: 0x05}, + {value: 0x812d, lo: 0x8d, hi: 0x8d}, + {value: 0x8132, lo: 0x8f, hi: 0x8f}, + {value: 0x8132, lo: 0xb8, hi: 0xb8}, + {value: 0x8101, lo: 0xb9, hi: 0xba}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x74, offset 0x243 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0xa5, hi: 0xa5}, + {value: 0x812d, lo: 0xa6, hi: 0xa6}, + // Block 0x75, offset 0x246 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x86, hi: 0x86}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x76, offset 0x249 + {value: 0x17fe, lo: 0x07}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x4238, lo: 0x9a, hi: 0x9a}, + {value: 0xa000, lo: 0x9b, hi: 0x9b}, + {value: 0x4242, lo: 0x9c, hi: 0x9c}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x424c, lo: 0xab, hi: 0xab}, + {value: 0x8104, lo: 0xb9, hi: 0xba}, + // Block 0x77, offset 0x251 + {value: 0x0000, lo: 0x06}, + {value: 0x8132, lo: 0x80, hi: 0x82}, + {value: 0x9900, lo: 0xa7, hi: 0xa7}, + {value: 0x2d7e, lo: 0xae, hi: 0xae}, + {value: 0x2d88, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb1, hi: 0xb2}, + {value: 0x8104, lo: 0xb3, hi: 0xb4}, + // Block 0x78, offset 0x258 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x80, hi: 0x80}, + {value: 0x8102, lo: 0x8a, hi: 0x8a}, + // Block 0x79, offset 0x25b + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb5, hi: 0xb5}, + {value: 0x8102, lo: 0xb6, hi: 0xb6}, + // Block 0x7a, offset 0x25e + {value: 0x0002, lo: 0x01}, + {value: 0x8102, lo: 0xa9, hi: 0xaa}, + // Block 0x7b, offset 0x260 + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2d92, lo: 0x8b, hi: 0x8b}, + {value: 0x2d9c, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x8132, lo: 0xa6, hi: 0xac}, + {value: 0x8132, lo: 0xb0, hi: 0xb4}, + // Block 0x7c, offset 0x268 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x82, hi: 0x82}, + {value: 0x8102, lo: 0x86, hi: 0x86}, + // Block 0x7d, offset 0x26b + {value: 0x6b5a, lo: 0x06}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb9, hi: 0xb9}, + {value: 0x9900, lo: 0xba, hi: 0xba}, + {value: 0x2db0, lo: 0xbb, hi: 0xbb}, + {value: 0x2da6, lo: 0xbc, hi: 0xbd}, + {value: 0x2dba, lo: 0xbe, hi: 0xbe}, + // Block 0x7e, offset 0x272 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x82, hi: 0x82}, + {value: 0x8102, lo: 0x83, hi: 0x83}, + // Block 0x7f, offset 0x275 + {value: 0x0000, lo: 0x05}, + {value: 0x9900, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb8, hi: 0xb9}, + {value: 0x2dc4, lo: 0xba, hi: 0xba}, + {value: 0x2dce, lo: 0xbb, hi: 0xbb}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x80, offset 0x27b + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0x80, hi: 0x80}, + // Block 0x81, offset 0x27d + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb6, hi: 0xb6}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + // Block 0x82, offset 0x280 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xab, hi: 0xab}, + // Block 0x83, offset 0x282 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0xb0, hi: 0xb4}, + // Block 0x84, offset 0x284 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb0, hi: 0xb6}, + // Block 0x85, offset 0x286 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0x9e, hi: 0x9e}, + // Block 0x86, offset 0x288 + {value: 0x0000, lo: 0x0c}, + {value: 0x45cc, lo: 0x9e, hi: 0x9e}, + {value: 0x45d6, lo: 0x9f, hi: 0x9f}, + {value: 0x460a, lo: 0xa0, hi: 0xa0}, + {value: 0x4618, lo: 0xa1, hi: 0xa1}, + {value: 0x4626, lo: 0xa2, hi: 0xa2}, + {value: 0x4634, lo: 0xa3, hi: 0xa3}, + {value: 0x4642, lo: 0xa4, hi: 0xa4}, + {value: 0x812b, lo: 0xa5, hi: 0xa6}, + {value: 0x8101, lo: 0xa7, hi: 0xa9}, + {value: 0x8130, lo: 0xad, hi: 0xad}, + {value: 0x812b, lo: 0xae, hi: 0xb2}, + {value: 0x812d, lo: 0xbb, hi: 0xbf}, + // Block 0x87, offset 0x295 + {value: 0x0000, lo: 0x09}, + {value: 0x812d, lo: 0x80, hi: 0x82}, + {value: 0x8132, lo: 0x85, hi: 0x89}, + {value: 0x812d, lo: 0x8a, hi: 0x8b}, + {value: 0x8132, lo: 0xaa, hi: 0xad}, + {value: 0x45e0, lo: 0xbb, hi: 0xbb}, + {value: 0x45ea, lo: 0xbc, hi: 0xbc}, + {value: 0x4650, lo: 0xbd, hi: 0xbd}, + {value: 0x466c, lo: 0xbe, hi: 0xbe}, + {value: 0x465e, lo: 0xbf, hi: 0xbf}, + // Block 0x88, offset 0x29f + {value: 0x0000, lo: 0x01}, + {value: 0x467a, lo: 0x80, hi: 0x80}, + // Block 0x89, offset 0x2a1 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x82, hi: 0x84}, + // Block 0x8a, offset 0x2a3 + {value: 0x0000, lo: 0x05}, + {value: 0x8132, lo: 0x80, hi: 0x86}, + {value: 0x8132, lo: 0x88, hi: 0x98}, + {value: 0x8132, lo: 0x9b, hi: 0xa1}, + {value: 0x8132, lo: 0xa3, hi: 0xa4}, + {value: 0x8132, lo: 0xa6, hi: 0xaa}, + // Block 0x8b, offset 0x2a9 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x90, hi: 0x96}, + // Block 0x8c, offset 0x2ab + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x84, hi: 0x89}, + {value: 0x8102, lo: 0x8a, hi: 0x8a}, + // Block 0x8d, offset 0x2ae + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x93, hi: 0x93}, +} + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfkcTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfkcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfkcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfkcTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfkcValues[c0] + } + i := nfkcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfkcTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfkcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfkcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfkcTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfkcValues[c0] + } + i := nfkcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// nfkcTrie. Total size: 16994 bytes (16.60 KiB). Checksum: c3ed54ee046f3c46. +type nfkcTrie struct{} + +func newNfkcTrie(i int) *nfkcTrie { + return &nfkcTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *nfkcTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 90: + return uint16(nfkcValues[n<<6+uint32(b)]) + default: + n -= 90 + return uint16(nfkcSparse.lookup(n, b)) + } +} + +// nfkcValues: 92 blocks, 5888 entries, 11776 bytes +// The third block is the zero block. +var nfkcValues = [5888]uint16{ + // Block 0x0, offset 0x0 + 0x3c: 0xa000, 0x3d: 0xa000, 0x3e: 0xa000, + // Block 0x1, offset 0x40 + 0x41: 0xa000, 0x42: 0xa000, 0x43: 0xa000, 0x44: 0xa000, 0x45: 0xa000, + 0x46: 0xa000, 0x47: 0xa000, 0x48: 0xa000, 0x49: 0xa000, 0x4a: 0xa000, 0x4b: 0xa000, + 0x4c: 0xa000, 0x4d: 0xa000, 0x4e: 0xa000, 0x4f: 0xa000, 0x50: 0xa000, + 0x52: 0xa000, 0x53: 0xa000, 0x54: 0xa000, 0x55: 0xa000, 0x56: 0xa000, 0x57: 0xa000, + 0x58: 0xa000, 0x59: 0xa000, 0x5a: 0xa000, + 0x61: 0xa000, 0x62: 0xa000, 0x63: 0xa000, + 0x64: 0xa000, 0x65: 0xa000, 0x66: 0xa000, 0x67: 0xa000, 0x68: 0xa000, 0x69: 0xa000, + 0x6a: 0xa000, 0x6b: 0xa000, 0x6c: 0xa000, 0x6d: 0xa000, 0x6e: 0xa000, 0x6f: 0xa000, + 0x70: 0xa000, 0x72: 0xa000, 0x73: 0xa000, 0x74: 0xa000, 0x75: 0xa000, + 0x76: 0xa000, 0x77: 0xa000, 0x78: 0xa000, 0x79: 0xa000, 0x7a: 0xa000, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x2f6f, 0xc1: 0x2f74, 0xc2: 0x4688, 0xc3: 0x2f79, 0xc4: 0x4697, 0xc5: 0x469c, + 0xc6: 0xa000, 0xc7: 0x46a6, 0xc8: 0x2fe2, 0xc9: 0x2fe7, 0xca: 0x46ab, 0xcb: 0x2ffb, + 0xcc: 0x306e, 0xcd: 0x3073, 0xce: 0x3078, 0xcf: 0x46bf, 0xd1: 0x3104, + 0xd2: 0x3127, 0xd3: 0x312c, 0xd4: 0x46c9, 0xd5: 0x46ce, 0xd6: 0x46dd, + 0xd8: 0xa000, 0xd9: 0x31b3, 0xda: 0x31b8, 0xdb: 0x31bd, 0xdc: 0x470f, 0xdd: 0x3235, + 0xe0: 0x327b, 0xe1: 0x3280, 0xe2: 0x4719, 0xe3: 0x3285, + 0xe4: 0x4728, 0xe5: 0x472d, 0xe6: 0xa000, 0xe7: 0x4737, 0xe8: 0x32ee, 0xe9: 0x32f3, + 0xea: 0x473c, 0xeb: 0x3307, 0xec: 0x337f, 0xed: 0x3384, 0xee: 0x3389, 0xef: 0x4750, + 0xf1: 0x3415, 0xf2: 0x3438, 0xf3: 0x343d, 0xf4: 0x475a, 0xf5: 0x475f, + 0xf6: 0x476e, 0xf8: 0xa000, 0xf9: 0x34c9, 0xfa: 0x34ce, 0xfb: 0x34d3, + 0xfc: 0x47a0, 0xfd: 0x3550, 0xff: 0x3569, + // Block 0x4, offset 0x100 + 0x100: 0x2f7e, 0x101: 0x328a, 0x102: 0x468d, 0x103: 0x471e, 0x104: 0x2f9c, 0x105: 0x32a8, + 0x106: 0x2fb0, 0x107: 0x32bc, 0x108: 0x2fb5, 0x109: 0x32c1, 0x10a: 0x2fba, 0x10b: 0x32c6, + 0x10c: 0x2fbf, 0x10d: 0x32cb, 0x10e: 0x2fc9, 0x10f: 0x32d5, + 0x112: 0x46b0, 0x113: 0x4741, 0x114: 0x2ff1, 0x115: 0x32fd, 0x116: 0x2ff6, 0x117: 0x3302, + 0x118: 0x3014, 0x119: 0x3320, 0x11a: 0x3005, 0x11b: 0x3311, 0x11c: 0x302d, 0x11d: 0x3339, + 0x11e: 0x3037, 0x11f: 0x3343, 0x120: 0x303c, 0x121: 0x3348, 0x122: 0x3046, 0x123: 0x3352, + 0x124: 0x304b, 0x125: 0x3357, 0x128: 0x307d, 0x129: 0x338e, + 0x12a: 0x3082, 0x12b: 0x3393, 0x12c: 0x3087, 0x12d: 0x3398, 0x12e: 0x30aa, 0x12f: 0x33b6, + 0x130: 0x308c, 0x132: 0x195d, 0x133: 0x19e7, 0x134: 0x30b4, 0x135: 0x33c0, + 0x136: 0x30c8, 0x137: 0x33d9, 0x139: 0x30d2, 0x13a: 0x33e3, 0x13b: 0x30dc, + 0x13c: 0x33ed, 0x13d: 0x30d7, 0x13e: 0x33e8, 0x13f: 0x1bac, + // Block 0x5, offset 0x140 + 0x140: 0x1c34, 0x143: 0x30ff, 0x144: 0x3410, 0x145: 0x3118, + 0x146: 0x3429, 0x147: 0x310e, 0x148: 0x341f, 0x149: 0x1c5c, + 0x14c: 0x46d3, 0x14d: 0x4764, 0x14e: 0x3131, 0x14f: 0x3442, 0x150: 0x313b, 0x151: 0x344c, + 0x154: 0x3159, 0x155: 0x346a, 0x156: 0x3172, 0x157: 0x3483, + 0x158: 0x3163, 0x159: 0x3474, 0x15a: 0x46f6, 0x15b: 0x4787, 0x15c: 0x317c, 0x15d: 0x348d, + 0x15e: 0x318b, 0x15f: 0x349c, 0x160: 0x46fb, 0x161: 0x478c, 0x162: 0x31a4, 0x163: 0x34ba, + 0x164: 0x3195, 0x165: 0x34ab, 0x168: 0x4705, 0x169: 0x4796, + 0x16a: 0x470a, 0x16b: 0x479b, 0x16c: 0x31c2, 0x16d: 0x34d8, 0x16e: 0x31cc, 0x16f: 0x34e2, + 0x170: 0x31d1, 0x171: 0x34e7, 0x172: 0x31ef, 0x173: 0x3505, 0x174: 0x3212, 0x175: 0x3528, + 0x176: 0x323a, 0x177: 0x3555, 0x178: 0x324e, 0x179: 0x325d, 0x17a: 0x357d, 0x17b: 0x3267, + 0x17c: 0x3587, 0x17d: 0x326c, 0x17e: 0x358c, 0x17f: 0x00a7, + // Block 0x6, offset 0x180 + 0x184: 0x2dee, 0x185: 0x2df4, + 0x186: 0x2dfa, 0x187: 0x1972, 0x188: 0x1975, 0x189: 0x1a08, 0x18a: 0x1987, 0x18b: 0x198a, + 0x18c: 0x1a3e, 0x18d: 0x2f88, 0x18e: 0x3294, 0x18f: 0x3096, 0x190: 0x33a2, 0x191: 0x3140, + 0x192: 0x3451, 0x193: 0x31d6, 0x194: 0x34ec, 0x195: 0x39cf, 0x196: 0x3b5e, 0x197: 0x39c8, + 0x198: 0x3b57, 0x199: 0x39d6, 0x19a: 0x3b65, 0x19b: 0x39c1, 0x19c: 0x3b50, + 0x19e: 0x38b0, 0x19f: 0x3a3f, 0x1a0: 0x38a9, 0x1a1: 0x3a38, 0x1a2: 0x35b3, 0x1a3: 0x35c5, + 0x1a6: 0x3041, 0x1a7: 0x334d, 0x1a8: 0x30be, 0x1a9: 0x33cf, + 0x1aa: 0x46ec, 0x1ab: 0x477d, 0x1ac: 0x3990, 0x1ad: 0x3b1f, 0x1ae: 0x35d7, 0x1af: 0x35dd, + 0x1b0: 0x33c5, 0x1b1: 0x1942, 0x1b2: 0x1945, 0x1b3: 0x19cf, 0x1b4: 0x3028, 0x1b5: 0x3334, + 0x1b8: 0x30fa, 0x1b9: 0x340b, 0x1ba: 0x38b7, 0x1bb: 0x3a46, + 0x1bc: 0x35ad, 0x1bd: 0x35bf, 0x1be: 0x35b9, 0x1bf: 0x35cb, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x2f8d, 0x1c1: 0x3299, 0x1c2: 0x2f92, 0x1c3: 0x329e, 0x1c4: 0x300a, 0x1c5: 0x3316, + 0x1c6: 0x300f, 0x1c7: 0x331b, 0x1c8: 0x309b, 0x1c9: 0x33a7, 0x1ca: 0x30a0, 0x1cb: 0x33ac, + 0x1cc: 0x3145, 0x1cd: 0x3456, 0x1ce: 0x314a, 0x1cf: 0x345b, 0x1d0: 0x3168, 0x1d1: 0x3479, + 0x1d2: 0x316d, 0x1d3: 0x347e, 0x1d4: 0x31db, 0x1d5: 0x34f1, 0x1d6: 0x31e0, 0x1d7: 0x34f6, + 0x1d8: 0x3186, 0x1d9: 0x3497, 0x1da: 0x319f, 0x1db: 0x34b5, + 0x1de: 0x305a, 0x1df: 0x3366, + 0x1e6: 0x4692, 0x1e7: 0x4723, 0x1e8: 0x46ba, 0x1e9: 0x474b, + 0x1ea: 0x395f, 0x1eb: 0x3aee, 0x1ec: 0x393c, 0x1ed: 0x3acb, 0x1ee: 0x46d8, 0x1ef: 0x4769, + 0x1f0: 0x3958, 0x1f1: 0x3ae7, 0x1f2: 0x3244, 0x1f3: 0x355f, + // Block 0x8, offset 0x200 + 0x200: 0x9932, 0x201: 0x9932, 0x202: 0x9932, 0x203: 0x9932, 0x204: 0x9932, 0x205: 0x8132, + 0x206: 0x9932, 0x207: 0x9932, 0x208: 0x9932, 0x209: 0x9932, 0x20a: 0x9932, 0x20b: 0x9932, + 0x20c: 0x9932, 0x20d: 0x8132, 0x20e: 0x8132, 0x20f: 0x9932, 0x210: 0x8132, 0x211: 0x9932, + 0x212: 0x8132, 0x213: 0x9932, 0x214: 0x9932, 0x215: 0x8133, 0x216: 0x812d, 0x217: 0x812d, + 0x218: 0x812d, 0x219: 0x812d, 0x21a: 0x8133, 0x21b: 0x992b, 0x21c: 0x812d, 0x21d: 0x812d, + 0x21e: 0x812d, 0x21f: 0x812d, 0x220: 0x812d, 0x221: 0x8129, 0x222: 0x8129, 0x223: 0x992d, + 0x224: 0x992d, 0x225: 0x992d, 0x226: 0x992d, 0x227: 0x9929, 0x228: 0x9929, 0x229: 0x812d, + 0x22a: 0x812d, 0x22b: 0x812d, 0x22c: 0x812d, 0x22d: 0x992d, 0x22e: 0x992d, 0x22f: 0x812d, + 0x230: 0x992d, 0x231: 0x992d, 0x232: 0x812d, 0x233: 0x812d, 0x234: 0x8101, 0x235: 0x8101, + 0x236: 0x8101, 0x237: 0x8101, 0x238: 0x9901, 0x239: 0x812d, 0x23a: 0x812d, 0x23b: 0x812d, + 0x23c: 0x812d, 0x23d: 0x8132, 0x23e: 0x8132, 0x23f: 0x8132, + // Block 0x9, offset 0x240 + 0x240: 0x49ae, 0x241: 0x49b3, 0x242: 0x9932, 0x243: 0x49b8, 0x244: 0x4a71, 0x245: 0x9936, + 0x246: 0x8132, 0x247: 0x812d, 0x248: 0x812d, 0x249: 0x812d, 0x24a: 0x8132, 0x24b: 0x8132, + 0x24c: 0x8132, 0x24d: 0x812d, 0x24e: 0x812d, 0x250: 0x8132, 0x251: 0x8132, + 0x252: 0x8132, 0x253: 0x812d, 0x254: 0x812d, 0x255: 0x812d, 0x256: 0x812d, 0x257: 0x8132, + 0x258: 0x8133, 0x259: 0x812d, 0x25a: 0x812d, 0x25b: 0x8132, 0x25c: 0x8134, 0x25d: 0x8135, + 0x25e: 0x8135, 0x25f: 0x8134, 0x260: 0x8135, 0x261: 0x8135, 0x262: 0x8134, 0x263: 0x8132, + 0x264: 0x8132, 0x265: 0x8132, 0x266: 0x8132, 0x267: 0x8132, 0x268: 0x8132, 0x269: 0x8132, + 0x26a: 0x8132, 0x26b: 0x8132, 0x26c: 0x8132, 0x26d: 0x8132, 0x26e: 0x8132, 0x26f: 0x8132, + 0x274: 0x0170, + 0x27a: 0x42a5, + 0x27e: 0x0037, + // Block 0xa, offset 0x280 + 0x284: 0x425a, 0x285: 0x447b, + 0x286: 0x35e9, 0x287: 0x00ce, 0x288: 0x3607, 0x289: 0x3613, 0x28a: 0x3625, + 0x28c: 0x3643, 0x28e: 0x3655, 0x28f: 0x3673, 0x290: 0x3e08, 0x291: 0xa000, + 0x295: 0xa000, 0x297: 0xa000, + 0x299: 0xa000, + 0x29f: 0xa000, 0x2a1: 0xa000, + 0x2a5: 0xa000, 0x2a9: 0xa000, + 0x2aa: 0x3637, 0x2ab: 0x3667, 0x2ac: 0x47fe, 0x2ad: 0x3697, 0x2ae: 0x4828, 0x2af: 0x36a9, + 0x2b0: 0x3e70, 0x2b1: 0xa000, 0x2b5: 0xa000, + 0x2b7: 0xa000, 0x2b9: 0xa000, + 0x2bf: 0xa000, + // Block 0xb, offset 0x2c0 + 0x2c1: 0xa000, 0x2c5: 0xa000, + 0x2c9: 0xa000, 0x2ca: 0x4840, 0x2cb: 0x485e, + 0x2cc: 0x36c7, 0x2cd: 0x36df, 0x2ce: 0x4876, 0x2d0: 0x01be, 0x2d1: 0x01d0, + 0x2d2: 0x01ac, 0x2d3: 0x430c, 0x2d4: 0x4312, 0x2d5: 0x01fa, 0x2d6: 0x01e8, + 0x2f0: 0x01d6, 0x2f1: 0x01eb, 0x2f2: 0x01ee, 0x2f4: 0x0188, 0x2f5: 0x01c7, + 0x2f9: 0x01a6, + // Block 0xc, offset 0x300 + 0x300: 0x3721, 0x301: 0x372d, 0x303: 0x371b, + 0x306: 0xa000, 0x307: 0x3709, + 0x30c: 0x375d, 0x30d: 0x3745, 0x30e: 0x376f, 0x310: 0xa000, + 0x313: 0xa000, 0x315: 0xa000, 0x316: 0xa000, 0x317: 0xa000, + 0x318: 0xa000, 0x319: 0x3751, 0x31a: 0xa000, + 0x31e: 0xa000, 0x323: 0xa000, + 0x327: 0xa000, + 0x32b: 0xa000, 0x32d: 0xa000, + 0x330: 0xa000, 0x333: 0xa000, 0x335: 0xa000, + 0x336: 0xa000, 0x337: 0xa000, 0x338: 0xa000, 0x339: 0x37d5, 0x33a: 0xa000, + 0x33e: 0xa000, + // Block 0xd, offset 0x340 + 0x341: 0x3733, 0x342: 0x37b7, + 0x350: 0x370f, 0x351: 0x3793, + 0x352: 0x3715, 0x353: 0x3799, 0x356: 0x3727, 0x357: 0x37ab, + 0x358: 0xa000, 0x359: 0xa000, 0x35a: 0x3829, 0x35b: 0x382f, 0x35c: 0x3739, 0x35d: 0x37bd, + 0x35e: 0x373f, 0x35f: 0x37c3, 0x362: 0x374b, 0x363: 0x37cf, + 0x364: 0x3757, 0x365: 0x37db, 0x366: 0x3763, 0x367: 0x37e7, 0x368: 0xa000, 0x369: 0xa000, + 0x36a: 0x3835, 0x36b: 0x383b, 0x36c: 0x378d, 0x36d: 0x3811, 0x36e: 0x3769, 0x36f: 0x37ed, + 0x370: 0x3775, 0x371: 0x37f9, 0x372: 0x377b, 0x373: 0x37ff, 0x374: 0x3781, 0x375: 0x3805, + 0x378: 0x3787, 0x379: 0x380b, + // Block 0xe, offset 0x380 + 0x387: 0x1d61, + 0x391: 0x812d, + 0x392: 0x8132, 0x393: 0x8132, 0x394: 0x8132, 0x395: 0x8132, 0x396: 0x812d, 0x397: 0x8132, + 0x398: 0x8132, 0x399: 0x8132, 0x39a: 0x812e, 0x39b: 0x812d, 0x39c: 0x8132, 0x39d: 0x8132, + 0x39e: 0x8132, 0x39f: 0x8132, 0x3a0: 0x8132, 0x3a1: 0x8132, 0x3a2: 0x812d, 0x3a3: 0x812d, + 0x3a4: 0x812d, 0x3a5: 0x812d, 0x3a6: 0x812d, 0x3a7: 0x812d, 0x3a8: 0x8132, 0x3a9: 0x8132, + 0x3aa: 0x812d, 0x3ab: 0x8132, 0x3ac: 0x8132, 0x3ad: 0x812e, 0x3ae: 0x8131, 0x3af: 0x8132, + 0x3b0: 0x8105, 0x3b1: 0x8106, 0x3b2: 0x8107, 0x3b3: 0x8108, 0x3b4: 0x8109, 0x3b5: 0x810a, + 0x3b6: 0x810b, 0x3b7: 0x810c, 0x3b8: 0x810d, 0x3b9: 0x810e, 0x3ba: 0x810e, 0x3bb: 0x810f, + 0x3bc: 0x8110, 0x3bd: 0x8111, 0x3bf: 0x8112, + // Block 0xf, offset 0x3c0 + 0x3c8: 0xa000, 0x3ca: 0xa000, 0x3cb: 0x8116, + 0x3cc: 0x8117, 0x3cd: 0x8118, 0x3ce: 0x8119, 0x3cf: 0x811a, 0x3d0: 0x811b, 0x3d1: 0x811c, + 0x3d2: 0x811d, 0x3d3: 0x9932, 0x3d4: 0x9932, 0x3d5: 0x992d, 0x3d6: 0x812d, 0x3d7: 0x8132, + 0x3d8: 0x8132, 0x3d9: 0x8132, 0x3da: 0x8132, 0x3db: 0x8132, 0x3dc: 0x812d, 0x3dd: 0x8132, + 0x3de: 0x8132, 0x3df: 0x812d, + 0x3f0: 0x811e, 0x3f5: 0x1d84, + 0x3f6: 0x2013, 0x3f7: 0x204f, 0x3f8: 0x204a, + // Block 0x10, offset 0x400 + 0x405: 0xa000, + 0x406: 0x2d26, 0x407: 0xa000, 0x408: 0x2d2e, 0x409: 0xa000, 0x40a: 0x2d36, 0x40b: 0xa000, + 0x40c: 0x2d3e, 0x40d: 0xa000, 0x40e: 0x2d46, 0x411: 0xa000, + 0x412: 0x2d4e, + 0x434: 0x8102, 0x435: 0x9900, + 0x43a: 0xa000, 0x43b: 0x2d56, + 0x43c: 0xa000, 0x43d: 0x2d5e, 0x43e: 0xa000, 0x43f: 0xa000, + // Block 0x11, offset 0x440 + 0x440: 0x0069, 0x441: 0x006b, 0x442: 0x006f, 0x443: 0x0083, 0x444: 0x00f5, 0x445: 0x00f8, + 0x446: 0x0413, 0x447: 0x0085, 0x448: 0x0089, 0x449: 0x008b, 0x44a: 0x0104, 0x44b: 0x0107, + 0x44c: 0x010a, 0x44d: 0x008f, 0x44f: 0x0097, 0x450: 0x009b, 0x451: 0x00e0, + 0x452: 0x009f, 0x453: 0x00fe, 0x454: 0x0417, 0x455: 0x041b, 0x456: 0x00a1, 0x457: 0x00a9, + 0x458: 0x00ab, 0x459: 0x0423, 0x45a: 0x012b, 0x45b: 0x00ad, 0x45c: 0x0427, 0x45d: 0x01be, + 0x45e: 0x01c1, 0x45f: 0x01c4, 0x460: 0x01fa, 0x461: 0x01fd, 0x462: 0x0093, 0x463: 0x00a5, + 0x464: 0x00ab, 0x465: 0x00ad, 0x466: 0x01be, 0x467: 0x01c1, 0x468: 0x01eb, 0x469: 0x01fa, + 0x46a: 0x01fd, + 0x478: 0x020c, + // Block 0x12, offset 0x480 + 0x49b: 0x00fb, 0x49c: 0x0087, 0x49d: 0x0101, + 0x49e: 0x00d4, 0x49f: 0x010a, 0x4a0: 0x008d, 0x4a1: 0x010d, 0x4a2: 0x0110, 0x4a3: 0x0116, + 0x4a4: 0x011c, 0x4a5: 0x011f, 0x4a6: 0x0122, 0x4a7: 0x042b, 0x4a8: 0x016a, 0x4a9: 0x0128, + 0x4aa: 0x042f, 0x4ab: 0x016d, 0x4ac: 0x0131, 0x4ad: 0x012e, 0x4ae: 0x0134, 0x4af: 0x0137, + 0x4b0: 0x013a, 0x4b1: 0x013d, 0x4b2: 0x0140, 0x4b3: 0x014c, 0x4b4: 0x014f, 0x4b5: 0x00ec, + 0x4b6: 0x0152, 0x4b7: 0x0155, 0x4b8: 0x041f, 0x4b9: 0x0158, 0x4ba: 0x015b, 0x4bb: 0x00b5, + 0x4bc: 0x015e, 0x4bd: 0x0161, 0x4be: 0x0164, 0x4bf: 0x01d0, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x2f97, 0x4c1: 0x32a3, 0x4c2: 0x2fa1, 0x4c3: 0x32ad, 0x4c4: 0x2fa6, 0x4c5: 0x32b2, + 0x4c6: 0x2fab, 0x4c7: 0x32b7, 0x4c8: 0x38cc, 0x4c9: 0x3a5b, 0x4ca: 0x2fc4, 0x4cb: 0x32d0, + 0x4cc: 0x2fce, 0x4cd: 0x32da, 0x4ce: 0x2fdd, 0x4cf: 0x32e9, 0x4d0: 0x2fd3, 0x4d1: 0x32df, + 0x4d2: 0x2fd8, 0x4d3: 0x32e4, 0x4d4: 0x38ef, 0x4d5: 0x3a7e, 0x4d6: 0x38f6, 0x4d7: 0x3a85, + 0x4d8: 0x3019, 0x4d9: 0x3325, 0x4da: 0x301e, 0x4db: 0x332a, 0x4dc: 0x3904, 0x4dd: 0x3a93, + 0x4de: 0x3023, 0x4df: 0x332f, 0x4e0: 0x3032, 0x4e1: 0x333e, 0x4e2: 0x3050, 0x4e3: 0x335c, + 0x4e4: 0x305f, 0x4e5: 0x336b, 0x4e6: 0x3055, 0x4e7: 0x3361, 0x4e8: 0x3064, 0x4e9: 0x3370, + 0x4ea: 0x3069, 0x4eb: 0x3375, 0x4ec: 0x30af, 0x4ed: 0x33bb, 0x4ee: 0x390b, 0x4ef: 0x3a9a, + 0x4f0: 0x30b9, 0x4f1: 0x33ca, 0x4f2: 0x30c3, 0x4f3: 0x33d4, 0x4f4: 0x30cd, 0x4f5: 0x33de, + 0x4f6: 0x46c4, 0x4f7: 0x4755, 0x4f8: 0x3912, 0x4f9: 0x3aa1, 0x4fa: 0x30e6, 0x4fb: 0x33f7, + 0x4fc: 0x30e1, 0x4fd: 0x33f2, 0x4fe: 0x30eb, 0x4ff: 0x33fc, + // Block 0x14, offset 0x500 + 0x500: 0x30f0, 0x501: 0x3401, 0x502: 0x30f5, 0x503: 0x3406, 0x504: 0x3109, 0x505: 0x341a, + 0x506: 0x3113, 0x507: 0x3424, 0x508: 0x3122, 0x509: 0x3433, 0x50a: 0x311d, 0x50b: 0x342e, + 0x50c: 0x3935, 0x50d: 0x3ac4, 0x50e: 0x3943, 0x50f: 0x3ad2, 0x510: 0x394a, 0x511: 0x3ad9, + 0x512: 0x3951, 0x513: 0x3ae0, 0x514: 0x314f, 0x515: 0x3460, 0x516: 0x3154, 0x517: 0x3465, + 0x518: 0x315e, 0x519: 0x346f, 0x51a: 0x46f1, 0x51b: 0x4782, 0x51c: 0x3997, 0x51d: 0x3b26, + 0x51e: 0x3177, 0x51f: 0x3488, 0x520: 0x3181, 0x521: 0x3492, 0x522: 0x4700, 0x523: 0x4791, + 0x524: 0x399e, 0x525: 0x3b2d, 0x526: 0x39a5, 0x527: 0x3b34, 0x528: 0x39ac, 0x529: 0x3b3b, + 0x52a: 0x3190, 0x52b: 0x34a1, 0x52c: 0x319a, 0x52d: 0x34b0, 0x52e: 0x31ae, 0x52f: 0x34c4, + 0x530: 0x31a9, 0x531: 0x34bf, 0x532: 0x31ea, 0x533: 0x3500, 0x534: 0x31f9, 0x535: 0x350f, + 0x536: 0x31f4, 0x537: 0x350a, 0x538: 0x39b3, 0x539: 0x3b42, 0x53a: 0x39ba, 0x53b: 0x3b49, + 0x53c: 0x31fe, 0x53d: 0x3514, 0x53e: 0x3203, 0x53f: 0x3519, + // Block 0x15, offset 0x540 + 0x540: 0x3208, 0x541: 0x351e, 0x542: 0x320d, 0x543: 0x3523, 0x544: 0x321c, 0x545: 0x3532, + 0x546: 0x3217, 0x547: 0x352d, 0x548: 0x3221, 0x549: 0x353c, 0x54a: 0x3226, 0x54b: 0x3541, + 0x54c: 0x322b, 0x54d: 0x3546, 0x54e: 0x3249, 0x54f: 0x3564, 0x550: 0x3262, 0x551: 0x3582, + 0x552: 0x3271, 0x553: 0x3591, 0x554: 0x3276, 0x555: 0x3596, 0x556: 0x337a, 0x557: 0x34a6, + 0x558: 0x3537, 0x559: 0x3573, 0x55a: 0x1be0, 0x55b: 0x42d7, + 0x560: 0x46a1, 0x561: 0x4732, 0x562: 0x2f83, 0x563: 0x328f, + 0x564: 0x3878, 0x565: 0x3a07, 0x566: 0x3871, 0x567: 0x3a00, 0x568: 0x3886, 0x569: 0x3a15, + 0x56a: 0x387f, 0x56b: 0x3a0e, 0x56c: 0x38be, 0x56d: 0x3a4d, 0x56e: 0x3894, 0x56f: 0x3a23, + 0x570: 0x388d, 0x571: 0x3a1c, 0x572: 0x38a2, 0x573: 0x3a31, 0x574: 0x389b, 0x575: 0x3a2a, + 0x576: 0x38c5, 0x577: 0x3a54, 0x578: 0x46b5, 0x579: 0x4746, 0x57a: 0x3000, 0x57b: 0x330c, + 0x57c: 0x2fec, 0x57d: 0x32f8, 0x57e: 0x38da, 0x57f: 0x3a69, + // Block 0x16, offset 0x580 + 0x580: 0x38d3, 0x581: 0x3a62, 0x582: 0x38e8, 0x583: 0x3a77, 0x584: 0x38e1, 0x585: 0x3a70, + 0x586: 0x38fd, 0x587: 0x3a8c, 0x588: 0x3091, 0x589: 0x339d, 0x58a: 0x30a5, 0x58b: 0x33b1, + 0x58c: 0x46e7, 0x58d: 0x4778, 0x58e: 0x3136, 0x58f: 0x3447, 0x590: 0x3920, 0x591: 0x3aaf, + 0x592: 0x3919, 0x593: 0x3aa8, 0x594: 0x392e, 0x595: 0x3abd, 0x596: 0x3927, 0x597: 0x3ab6, + 0x598: 0x3989, 0x599: 0x3b18, 0x59a: 0x396d, 0x59b: 0x3afc, 0x59c: 0x3966, 0x59d: 0x3af5, + 0x59e: 0x397b, 0x59f: 0x3b0a, 0x5a0: 0x3974, 0x5a1: 0x3b03, 0x5a2: 0x3982, 0x5a3: 0x3b11, + 0x5a4: 0x31e5, 0x5a5: 0x34fb, 0x5a6: 0x31c7, 0x5a7: 0x34dd, 0x5a8: 0x39e4, 0x5a9: 0x3b73, + 0x5aa: 0x39dd, 0x5ab: 0x3b6c, 0x5ac: 0x39f2, 0x5ad: 0x3b81, 0x5ae: 0x39eb, 0x5af: 0x3b7a, + 0x5b0: 0x39f9, 0x5b1: 0x3b88, 0x5b2: 0x3230, 0x5b3: 0x354b, 0x5b4: 0x3258, 0x5b5: 0x3578, + 0x5b6: 0x3253, 0x5b7: 0x356e, 0x5b8: 0x323f, 0x5b9: 0x355a, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x4804, 0x5c1: 0x480a, 0x5c2: 0x491e, 0x5c3: 0x4936, 0x5c4: 0x4926, 0x5c5: 0x493e, + 0x5c6: 0x492e, 0x5c7: 0x4946, 0x5c8: 0x47aa, 0x5c9: 0x47b0, 0x5ca: 0x488e, 0x5cb: 0x48a6, + 0x5cc: 0x4896, 0x5cd: 0x48ae, 0x5ce: 0x489e, 0x5cf: 0x48b6, 0x5d0: 0x4816, 0x5d1: 0x481c, + 0x5d2: 0x3db8, 0x5d3: 0x3dc8, 0x5d4: 0x3dc0, 0x5d5: 0x3dd0, + 0x5d8: 0x47b6, 0x5d9: 0x47bc, 0x5da: 0x3ce8, 0x5db: 0x3cf8, 0x5dc: 0x3cf0, 0x5dd: 0x3d00, + 0x5e0: 0x482e, 0x5e1: 0x4834, 0x5e2: 0x494e, 0x5e3: 0x4966, + 0x5e4: 0x4956, 0x5e5: 0x496e, 0x5e6: 0x495e, 0x5e7: 0x4976, 0x5e8: 0x47c2, 0x5e9: 0x47c8, + 0x5ea: 0x48be, 0x5eb: 0x48d6, 0x5ec: 0x48c6, 0x5ed: 0x48de, 0x5ee: 0x48ce, 0x5ef: 0x48e6, + 0x5f0: 0x4846, 0x5f1: 0x484c, 0x5f2: 0x3e18, 0x5f3: 0x3e30, 0x5f4: 0x3e20, 0x5f5: 0x3e38, + 0x5f6: 0x3e28, 0x5f7: 0x3e40, 0x5f8: 0x47ce, 0x5f9: 0x47d4, 0x5fa: 0x3d18, 0x5fb: 0x3d30, + 0x5fc: 0x3d20, 0x5fd: 0x3d38, 0x5fe: 0x3d28, 0x5ff: 0x3d40, + // Block 0x18, offset 0x600 + 0x600: 0x4852, 0x601: 0x4858, 0x602: 0x3e48, 0x603: 0x3e58, 0x604: 0x3e50, 0x605: 0x3e60, + 0x608: 0x47da, 0x609: 0x47e0, 0x60a: 0x3d48, 0x60b: 0x3d58, + 0x60c: 0x3d50, 0x60d: 0x3d60, 0x610: 0x4864, 0x611: 0x486a, + 0x612: 0x3e80, 0x613: 0x3e98, 0x614: 0x3e88, 0x615: 0x3ea0, 0x616: 0x3e90, 0x617: 0x3ea8, + 0x619: 0x47e6, 0x61b: 0x3d68, 0x61d: 0x3d70, + 0x61f: 0x3d78, 0x620: 0x487c, 0x621: 0x4882, 0x622: 0x497e, 0x623: 0x4996, + 0x624: 0x4986, 0x625: 0x499e, 0x626: 0x498e, 0x627: 0x49a6, 0x628: 0x47ec, 0x629: 0x47f2, + 0x62a: 0x48ee, 0x62b: 0x4906, 0x62c: 0x48f6, 0x62d: 0x490e, 0x62e: 0x48fe, 0x62f: 0x4916, + 0x630: 0x47f8, 0x631: 0x431e, 0x632: 0x3691, 0x633: 0x4324, 0x634: 0x4822, 0x635: 0x432a, + 0x636: 0x36a3, 0x637: 0x4330, 0x638: 0x36c1, 0x639: 0x4336, 0x63a: 0x36d9, 0x63b: 0x433c, + 0x63c: 0x4870, 0x63d: 0x4342, + // Block 0x19, offset 0x640 + 0x640: 0x3da0, 0x641: 0x3da8, 0x642: 0x4184, 0x643: 0x41a2, 0x644: 0x418e, 0x645: 0x41ac, + 0x646: 0x4198, 0x647: 0x41b6, 0x648: 0x3cd8, 0x649: 0x3ce0, 0x64a: 0x40d0, 0x64b: 0x40ee, + 0x64c: 0x40da, 0x64d: 0x40f8, 0x64e: 0x40e4, 0x64f: 0x4102, 0x650: 0x3de8, 0x651: 0x3df0, + 0x652: 0x41c0, 0x653: 0x41de, 0x654: 0x41ca, 0x655: 0x41e8, 0x656: 0x41d4, 0x657: 0x41f2, + 0x658: 0x3d08, 0x659: 0x3d10, 0x65a: 0x410c, 0x65b: 0x412a, 0x65c: 0x4116, 0x65d: 0x4134, + 0x65e: 0x4120, 0x65f: 0x413e, 0x660: 0x3ec0, 0x661: 0x3ec8, 0x662: 0x41fc, 0x663: 0x421a, + 0x664: 0x4206, 0x665: 0x4224, 0x666: 0x4210, 0x667: 0x422e, 0x668: 0x3d80, 0x669: 0x3d88, + 0x66a: 0x4148, 0x66b: 0x4166, 0x66c: 0x4152, 0x66d: 0x4170, 0x66e: 0x415c, 0x66f: 0x417a, + 0x670: 0x3685, 0x671: 0x367f, 0x672: 0x3d90, 0x673: 0x368b, 0x674: 0x3d98, + 0x676: 0x4810, 0x677: 0x3db0, 0x678: 0x35f5, 0x679: 0x35ef, 0x67a: 0x35e3, 0x67b: 0x42ee, + 0x67c: 0x35fb, 0x67d: 0x4287, 0x67e: 0x01d3, 0x67f: 0x4287, + // Block 0x1a, offset 0x680 + 0x680: 0x42a0, 0x681: 0x4482, 0x682: 0x3dd8, 0x683: 0x369d, 0x684: 0x3de0, + 0x686: 0x483a, 0x687: 0x3df8, 0x688: 0x3601, 0x689: 0x42f4, 0x68a: 0x360d, 0x68b: 0x42fa, + 0x68c: 0x3619, 0x68d: 0x4489, 0x68e: 0x4490, 0x68f: 0x4497, 0x690: 0x36b5, 0x691: 0x36af, + 0x692: 0x3e00, 0x693: 0x44e4, 0x696: 0x36bb, 0x697: 0x3e10, + 0x698: 0x3631, 0x699: 0x362b, 0x69a: 0x361f, 0x69b: 0x4300, 0x69d: 0x449e, + 0x69e: 0x44a5, 0x69f: 0x44ac, 0x6a0: 0x36eb, 0x6a1: 0x36e5, 0x6a2: 0x3e68, 0x6a3: 0x44ec, + 0x6a4: 0x36cd, 0x6a5: 0x36d3, 0x6a6: 0x36f1, 0x6a7: 0x3e78, 0x6a8: 0x3661, 0x6a9: 0x365b, + 0x6aa: 0x364f, 0x6ab: 0x430c, 0x6ac: 0x3649, 0x6ad: 0x4474, 0x6ae: 0x447b, 0x6af: 0x0081, + 0x6b2: 0x3eb0, 0x6b3: 0x36f7, 0x6b4: 0x3eb8, + 0x6b6: 0x4888, 0x6b7: 0x3ed0, 0x6b8: 0x363d, 0x6b9: 0x4306, 0x6ba: 0x366d, 0x6bb: 0x4318, + 0x6bc: 0x3679, 0x6bd: 0x425a, 0x6be: 0x428c, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x1bd8, 0x6c1: 0x1bdc, 0x6c2: 0x0047, 0x6c3: 0x1c54, 0x6c5: 0x1be8, + 0x6c6: 0x1bec, 0x6c7: 0x00e9, 0x6c9: 0x1c58, 0x6ca: 0x008f, 0x6cb: 0x0051, + 0x6cc: 0x0051, 0x6cd: 0x0051, 0x6ce: 0x0091, 0x6cf: 0x00da, 0x6d0: 0x0053, 0x6d1: 0x0053, + 0x6d2: 0x0059, 0x6d3: 0x0099, 0x6d5: 0x005d, 0x6d6: 0x198d, + 0x6d9: 0x0061, 0x6da: 0x0063, 0x6db: 0x0065, 0x6dc: 0x0065, 0x6dd: 0x0065, + 0x6e0: 0x199f, 0x6e1: 0x1bc8, 0x6e2: 0x19a8, + 0x6e4: 0x0075, 0x6e6: 0x01b8, 0x6e8: 0x0075, + 0x6ea: 0x0057, 0x6eb: 0x42d2, 0x6ec: 0x0045, 0x6ed: 0x0047, 0x6ef: 0x008b, + 0x6f0: 0x004b, 0x6f1: 0x004d, 0x6f3: 0x005b, 0x6f4: 0x009f, 0x6f5: 0x0215, + 0x6f6: 0x0218, 0x6f7: 0x021b, 0x6f8: 0x021e, 0x6f9: 0x0093, 0x6fb: 0x1b98, + 0x6fc: 0x01e8, 0x6fd: 0x01c1, 0x6fe: 0x0179, 0x6ff: 0x01a0, + // Block 0x1c, offset 0x700 + 0x700: 0x0463, 0x705: 0x0049, + 0x706: 0x0089, 0x707: 0x008b, 0x708: 0x0093, 0x709: 0x0095, + 0x710: 0x222e, 0x711: 0x223a, + 0x712: 0x22ee, 0x713: 0x2216, 0x714: 0x229a, 0x715: 0x2222, 0x716: 0x22a0, 0x717: 0x22b8, + 0x718: 0x22c4, 0x719: 0x2228, 0x71a: 0x22ca, 0x71b: 0x2234, 0x71c: 0x22be, 0x71d: 0x22d0, + 0x71e: 0x22d6, 0x71f: 0x1cbc, 0x720: 0x0053, 0x721: 0x195a, 0x722: 0x1ba4, 0x723: 0x1963, + 0x724: 0x006d, 0x725: 0x19ab, 0x726: 0x1bd0, 0x727: 0x1d48, 0x728: 0x1966, 0x729: 0x0071, + 0x72a: 0x19b7, 0x72b: 0x1bd4, 0x72c: 0x0059, 0x72d: 0x0047, 0x72e: 0x0049, 0x72f: 0x005b, + 0x730: 0x0093, 0x731: 0x19e4, 0x732: 0x1c18, 0x733: 0x19ed, 0x734: 0x00ad, 0x735: 0x1a62, + 0x736: 0x1c4c, 0x737: 0x1d5c, 0x738: 0x19f0, 0x739: 0x00b1, 0x73a: 0x1a65, 0x73b: 0x1c50, + 0x73c: 0x0099, 0x73d: 0x0087, 0x73e: 0x0089, 0x73f: 0x009b, + // Block 0x1d, offset 0x740 + 0x741: 0x3c06, 0x743: 0xa000, 0x744: 0x3c0d, 0x745: 0xa000, + 0x747: 0x3c14, 0x748: 0xa000, 0x749: 0x3c1b, + 0x74d: 0xa000, + 0x760: 0x2f65, 0x761: 0xa000, 0x762: 0x3c29, + 0x764: 0xa000, 0x765: 0xa000, + 0x76d: 0x3c22, 0x76e: 0x2f60, 0x76f: 0x2f6a, + 0x770: 0x3c30, 0x771: 0x3c37, 0x772: 0xa000, 0x773: 0xa000, 0x774: 0x3c3e, 0x775: 0x3c45, + 0x776: 0xa000, 0x777: 0xa000, 0x778: 0x3c4c, 0x779: 0x3c53, 0x77a: 0xa000, 0x77b: 0xa000, + 0x77c: 0xa000, 0x77d: 0xa000, + // Block 0x1e, offset 0x780 + 0x780: 0x3c5a, 0x781: 0x3c61, 0x782: 0xa000, 0x783: 0xa000, 0x784: 0x3c76, 0x785: 0x3c7d, + 0x786: 0xa000, 0x787: 0xa000, 0x788: 0x3c84, 0x789: 0x3c8b, + 0x791: 0xa000, + 0x792: 0xa000, + 0x7a2: 0xa000, + 0x7a8: 0xa000, 0x7a9: 0xa000, + 0x7ab: 0xa000, 0x7ac: 0x3ca0, 0x7ad: 0x3ca7, 0x7ae: 0x3cae, 0x7af: 0x3cb5, + 0x7b2: 0xa000, 0x7b3: 0xa000, 0x7b4: 0xa000, 0x7b5: 0xa000, + // Block 0x1f, offset 0x7c0 + 0x7e0: 0x0023, 0x7e1: 0x0025, 0x7e2: 0x0027, 0x7e3: 0x0029, + 0x7e4: 0x002b, 0x7e5: 0x002d, 0x7e6: 0x002f, 0x7e7: 0x0031, 0x7e8: 0x0033, 0x7e9: 0x1882, + 0x7ea: 0x1885, 0x7eb: 0x1888, 0x7ec: 0x188b, 0x7ed: 0x188e, 0x7ee: 0x1891, 0x7ef: 0x1894, + 0x7f0: 0x1897, 0x7f1: 0x189a, 0x7f2: 0x189d, 0x7f3: 0x18a6, 0x7f4: 0x1a68, 0x7f5: 0x1a6c, + 0x7f6: 0x1a70, 0x7f7: 0x1a74, 0x7f8: 0x1a78, 0x7f9: 0x1a7c, 0x7fa: 0x1a80, 0x7fb: 0x1a84, + 0x7fc: 0x1a88, 0x7fd: 0x1c80, 0x7fe: 0x1c85, 0x7ff: 0x1c8a, + // Block 0x20, offset 0x800 + 0x800: 0x1c8f, 0x801: 0x1c94, 0x802: 0x1c99, 0x803: 0x1c9e, 0x804: 0x1ca3, 0x805: 0x1ca8, + 0x806: 0x1cad, 0x807: 0x1cb2, 0x808: 0x187f, 0x809: 0x18a3, 0x80a: 0x18c7, 0x80b: 0x18eb, + 0x80c: 0x190f, 0x80d: 0x1918, 0x80e: 0x191e, 0x80f: 0x1924, 0x810: 0x192a, 0x811: 0x1b60, + 0x812: 0x1b64, 0x813: 0x1b68, 0x814: 0x1b6c, 0x815: 0x1b70, 0x816: 0x1b74, 0x817: 0x1b78, + 0x818: 0x1b7c, 0x819: 0x1b80, 0x81a: 0x1b84, 0x81b: 0x1b88, 0x81c: 0x1af4, 0x81d: 0x1af8, + 0x81e: 0x1afc, 0x81f: 0x1b00, 0x820: 0x1b04, 0x821: 0x1b08, 0x822: 0x1b0c, 0x823: 0x1b10, + 0x824: 0x1b14, 0x825: 0x1b18, 0x826: 0x1b1c, 0x827: 0x1b20, 0x828: 0x1b24, 0x829: 0x1b28, + 0x82a: 0x1b2c, 0x82b: 0x1b30, 0x82c: 0x1b34, 0x82d: 0x1b38, 0x82e: 0x1b3c, 0x82f: 0x1b40, + 0x830: 0x1b44, 0x831: 0x1b48, 0x832: 0x1b4c, 0x833: 0x1b50, 0x834: 0x1b54, 0x835: 0x1b58, + 0x836: 0x0043, 0x837: 0x0045, 0x838: 0x0047, 0x839: 0x0049, 0x83a: 0x004b, 0x83b: 0x004d, + 0x83c: 0x004f, 0x83d: 0x0051, 0x83e: 0x0053, 0x83f: 0x0055, + // Block 0x21, offset 0x840 + 0x840: 0x06bf, 0x841: 0x06e3, 0x842: 0x06ef, 0x843: 0x06ff, 0x844: 0x0707, 0x845: 0x0713, + 0x846: 0x071b, 0x847: 0x0723, 0x848: 0x072f, 0x849: 0x0783, 0x84a: 0x079b, 0x84b: 0x07ab, + 0x84c: 0x07bb, 0x84d: 0x07cb, 0x84e: 0x07db, 0x84f: 0x07fb, 0x850: 0x07ff, 0x851: 0x0803, + 0x852: 0x0837, 0x853: 0x085f, 0x854: 0x086f, 0x855: 0x0877, 0x856: 0x087b, 0x857: 0x0887, + 0x858: 0x08a3, 0x859: 0x08a7, 0x85a: 0x08bf, 0x85b: 0x08c3, 0x85c: 0x08cb, 0x85d: 0x08db, + 0x85e: 0x0977, 0x85f: 0x098b, 0x860: 0x09cb, 0x861: 0x09df, 0x862: 0x09e7, 0x863: 0x09eb, + 0x864: 0x09fb, 0x865: 0x0a17, 0x866: 0x0a43, 0x867: 0x0a4f, 0x868: 0x0a6f, 0x869: 0x0a7b, + 0x86a: 0x0a7f, 0x86b: 0x0a83, 0x86c: 0x0a9b, 0x86d: 0x0a9f, 0x86e: 0x0acb, 0x86f: 0x0ad7, + 0x870: 0x0adf, 0x871: 0x0ae7, 0x872: 0x0af7, 0x873: 0x0aff, 0x874: 0x0b07, 0x875: 0x0b33, + 0x876: 0x0b37, 0x877: 0x0b3f, 0x878: 0x0b43, 0x879: 0x0b4b, 0x87a: 0x0b53, 0x87b: 0x0b63, + 0x87c: 0x0b7f, 0x87d: 0x0bf7, 0x87e: 0x0c0b, 0x87f: 0x0c0f, + // Block 0x22, offset 0x880 + 0x880: 0x0c8f, 0x881: 0x0c93, 0x882: 0x0ca7, 0x883: 0x0cab, 0x884: 0x0cb3, 0x885: 0x0cbb, + 0x886: 0x0cc3, 0x887: 0x0ccf, 0x888: 0x0cf7, 0x889: 0x0d07, 0x88a: 0x0d1b, 0x88b: 0x0d8b, + 0x88c: 0x0d97, 0x88d: 0x0da7, 0x88e: 0x0db3, 0x88f: 0x0dbf, 0x890: 0x0dc7, 0x891: 0x0dcb, + 0x892: 0x0dcf, 0x893: 0x0dd3, 0x894: 0x0dd7, 0x895: 0x0e8f, 0x896: 0x0ed7, 0x897: 0x0ee3, + 0x898: 0x0ee7, 0x899: 0x0eeb, 0x89a: 0x0eef, 0x89b: 0x0ef7, 0x89c: 0x0efb, 0x89d: 0x0f0f, + 0x89e: 0x0f2b, 0x89f: 0x0f33, 0x8a0: 0x0f73, 0x8a1: 0x0f77, 0x8a2: 0x0f7f, 0x8a3: 0x0f83, + 0x8a4: 0x0f8b, 0x8a5: 0x0f8f, 0x8a6: 0x0fb3, 0x8a7: 0x0fb7, 0x8a8: 0x0fd3, 0x8a9: 0x0fd7, + 0x8aa: 0x0fdb, 0x8ab: 0x0fdf, 0x8ac: 0x0ff3, 0x8ad: 0x1017, 0x8ae: 0x101b, 0x8af: 0x101f, + 0x8b0: 0x1043, 0x8b1: 0x1083, 0x8b2: 0x1087, 0x8b3: 0x10a7, 0x8b4: 0x10b7, 0x8b5: 0x10bf, + 0x8b6: 0x10df, 0x8b7: 0x1103, 0x8b8: 0x1147, 0x8b9: 0x114f, 0x8ba: 0x1163, 0x8bb: 0x116f, + 0x8bc: 0x1177, 0x8bd: 0x117f, 0x8be: 0x1183, 0x8bf: 0x1187, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x119f, 0x8c1: 0x11a3, 0x8c2: 0x11bf, 0x8c3: 0x11c7, 0x8c4: 0x11cf, 0x8c5: 0x11d3, + 0x8c6: 0x11df, 0x8c7: 0x11e7, 0x8c8: 0x11eb, 0x8c9: 0x11ef, 0x8ca: 0x11f7, 0x8cb: 0x11fb, + 0x8cc: 0x129b, 0x8cd: 0x12af, 0x8ce: 0x12e3, 0x8cf: 0x12e7, 0x8d0: 0x12ef, 0x8d1: 0x131b, + 0x8d2: 0x1323, 0x8d3: 0x132b, 0x8d4: 0x1333, 0x8d5: 0x136f, 0x8d6: 0x1373, 0x8d7: 0x137b, + 0x8d8: 0x137f, 0x8d9: 0x1383, 0x8da: 0x13af, 0x8db: 0x13b3, 0x8dc: 0x13bb, 0x8dd: 0x13cf, + 0x8de: 0x13d3, 0x8df: 0x13ef, 0x8e0: 0x13f7, 0x8e1: 0x13fb, 0x8e2: 0x141f, 0x8e3: 0x143f, + 0x8e4: 0x1453, 0x8e5: 0x1457, 0x8e6: 0x145f, 0x8e7: 0x148b, 0x8e8: 0x148f, 0x8e9: 0x149f, + 0x8ea: 0x14c3, 0x8eb: 0x14cf, 0x8ec: 0x14df, 0x8ed: 0x14f7, 0x8ee: 0x14ff, 0x8ef: 0x1503, + 0x8f0: 0x1507, 0x8f1: 0x150b, 0x8f2: 0x1517, 0x8f3: 0x151b, 0x8f4: 0x1523, 0x8f5: 0x153f, + 0x8f6: 0x1543, 0x8f7: 0x1547, 0x8f8: 0x155f, 0x8f9: 0x1563, 0x8fa: 0x156b, 0x8fb: 0x157f, + 0x8fc: 0x1583, 0x8fd: 0x1587, 0x8fe: 0x158f, 0x8ff: 0x1593, + // Block 0x24, offset 0x900 + 0x906: 0xa000, 0x90b: 0xa000, + 0x90c: 0x3f08, 0x90d: 0xa000, 0x90e: 0x3f10, 0x90f: 0xa000, 0x910: 0x3f18, 0x911: 0xa000, + 0x912: 0x3f20, 0x913: 0xa000, 0x914: 0x3f28, 0x915: 0xa000, 0x916: 0x3f30, 0x917: 0xa000, + 0x918: 0x3f38, 0x919: 0xa000, 0x91a: 0x3f40, 0x91b: 0xa000, 0x91c: 0x3f48, 0x91d: 0xa000, + 0x91e: 0x3f50, 0x91f: 0xa000, 0x920: 0x3f58, 0x921: 0xa000, 0x922: 0x3f60, + 0x924: 0xa000, 0x925: 0x3f68, 0x926: 0xa000, 0x927: 0x3f70, 0x928: 0xa000, 0x929: 0x3f78, + 0x92f: 0xa000, + 0x930: 0x3f80, 0x931: 0x3f88, 0x932: 0xa000, 0x933: 0x3f90, 0x934: 0x3f98, 0x935: 0xa000, + 0x936: 0x3fa0, 0x937: 0x3fa8, 0x938: 0xa000, 0x939: 0x3fb0, 0x93a: 0x3fb8, 0x93b: 0xa000, + 0x93c: 0x3fc0, 0x93d: 0x3fc8, + // Block 0x25, offset 0x940 + 0x954: 0x3f00, + 0x959: 0x9903, 0x95a: 0x9903, 0x95b: 0x42dc, 0x95c: 0x42e2, 0x95d: 0xa000, + 0x95e: 0x3fd0, 0x95f: 0x26b4, + 0x966: 0xa000, + 0x96b: 0xa000, 0x96c: 0x3fe0, 0x96d: 0xa000, 0x96e: 0x3fe8, 0x96f: 0xa000, + 0x970: 0x3ff0, 0x971: 0xa000, 0x972: 0x3ff8, 0x973: 0xa000, 0x974: 0x4000, 0x975: 0xa000, + 0x976: 0x4008, 0x977: 0xa000, 0x978: 0x4010, 0x979: 0xa000, 0x97a: 0x4018, 0x97b: 0xa000, + 0x97c: 0x4020, 0x97d: 0xa000, 0x97e: 0x4028, 0x97f: 0xa000, + // Block 0x26, offset 0x980 + 0x980: 0x4030, 0x981: 0xa000, 0x982: 0x4038, 0x984: 0xa000, 0x985: 0x4040, + 0x986: 0xa000, 0x987: 0x4048, 0x988: 0xa000, 0x989: 0x4050, + 0x98f: 0xa000, 0x990: 0x4058, 0x991: 0x4060, + 0x992: 0xa000, 0x993: 0x4068, 0x994: 0x4070, 0x995: 0xa000, 0x996: 0x4078, 0x997: 0x4080, + 0x998: 0xa000, 0x999: 0x4088, 0x99a: 0x4090, 0x99b: 0xa000, 0x99c: 0x4098, 0x99d: 0x40a0, + 0x9af: 0xa000, + 0x9b0: 0xa000, 0x9b1: 0xa000, 0x9b2: 0xa000, 0x9b4: 0x3fd8, + 0x9b7: 0x40a8, 0x9b8: 0x40b0, 0x9b9: 0x40b8, 0x9ba: 0x40c0, + 0x9bd: 0xa000, 0x9be: 0x40c8, 0x9bf: 0x26c9, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x0367, 0x9c1: 0x032b, 0x9c2: 0x032f, 0x9c3: 0x0333, 0x9c4: 0x037b, 0x9c5: 0x0337, + 0x9c6: 0x033b, 0x9c7: 0x033f, 0x9c8: 0x0343, 0x9c9: 0x0347, 0x9ca: 0x034b, 0x9cb: 0x034f, + 0x9cc: 0x0353, 0x9cd: 0x0357, 0x9ce: 0x035b, 0x9cf: 0x49bd, 0x9d0: 0x49c3, 0x9d1: 0x49c9, + 0x9d2: 0x49cf, 0x9d3: 0x49d5, 0x9d4: 0x49db, 0x9d5: 0x49e1, 0x9d6: 0x49e7, 0x9d7: 0x49ed, + 0x9d8: 0x49f3, 0x9d9: 0x49f9, 0x9da: 0x49ff, 0x9db: 0x4a05, 0x9dc: 0x4a0b, 0x9dd: 0x4a11, + 0x9de: 0x4a17, 0x9df: 0x4a1d, 0x9e0: 0x4a23, 0x9e1: 0x4a29, 0x9e2: 0x4a2f, 0x9e3: 0x4a35, + 0x9e4: 0x03c3, 0x9e5: 0x035f, 0x9e6: 0x0363, 0x9e7: 0x03e7, 0x9e8: 0x03eb, 0x9e9: 0x03ef, + 0x9ea: 0x03f3, 0x9eb: 0x03f7, 0x9ec: 0x03fb, 0x9ed: 0x03ff, 0x9ee: 0x036b, 0x9ef: 0x0403, + 0x9f0: 0x0407, 0x9f1: 0x036f, 0x9f2: 0x0373, 0x9f3: 0x0377, 0x9f4: 0x037f, 0x9f5: 0x0383, + 0x9f6: 0x0387, 0x9f7: 0x038b, 0x9f8: 0x038f, 0x9f9: 0x0393, 0x9fa: 0x0397, 0x9fb: 0x039b, + 0x9fc: 0x039f, 0x9fd: 0x03a3, 0x9fe: 0x03a7, 0x9ff: 0x03ab, + // Block 0x28, offset 0xa00 + 0xa00: 0x03af, 0xa01: 0x03b3, 0xa02: 0x040b, 0xa03: 0x040f, 0xa04: 0x03b7, 0xa05: 0x03bb, + 0xa06: 0x03bf, 0xa07: 0x03c7, 0xa08: 0x03cb, 0xa09: 0x03cf, 0xa0a: 0x03d3, 0xa0b: 0x03d7, + 0xa0c: 0x03db, 0xa0d: 0x03df, 0xa0e: 0x03e3, + 0xa12: 0x06bf, 0xa13: 0x071b, 0xa14: 0x06cb, 0xa15: 0x097b, 0xa16: 0x06cf, 0xa17: 0x06e7, + 0xa18: 0x06d3, 0xa19: 0x0f93, 0xa1a: 0x0707, 0xa1b: 0x06db, 0xa1c: 0x06c3, 0xa1d: 0x09ff, + 0xa1e: 0x098f, 0xa1f: 0x072f, + // Block 0x29, offset 0xa40 + 0xa40: 0x2054, 0xa41: 0x205a, 0xa42: 0x2060, 0xa43: 0x2066, 0xa44: 0x206c, 0xa45: 0x2072, + 0xa46: 0x2078, 0xa47: 0x207e, 0xa48: 0x2084, 0xa49: 0x208a, 0xa4a: 0x2090, 0xa4b: 0x2096, + 0xa4c: 0x209c, 0xa4d: 0x20a2, 0xa4e: 0x2726, 0xa4f: 0x272f, 0xa50: 0x2738, 0xa51: 0x2741, + 0xa52: 0x274a, 0xa53: 0x2753, 0xa54: 0x275c, 0xa55: 0x2765, 0xa56: 0x276e, 0xa57: 0x2780, + 0xa58: 0x2789, 0xa59: 0x2792, 0xa5a: 0x279b, 0xa5b: 0x27a4, 0xa5c: 0x2777, 0xa5d: 0x2bac, + 0xa5e: 0x2aed, 0xa60: 0x20a8, 0xa61: 0x20c0, 0xa62: 0x20b4, 0xa63: 0x2108, + 0xa64: 0x20c6, 0xa65: 0x20e4, 0xa66: 0x20ae, 0xa67: 0x20de, 0xa68: 0x20ba, 0xa69: 0x20f0, + 0xa6a: 0x2120, 0xa6b: 0x213e, 0xa6c: 0x2138, 0xa6d: 0x212c, 0xa6e: 0x217a, 0xa6f: 0x210e, + 0xa70: 0x211a, 0xa71: 0x2132, 0xa72: 0x2126, 0xa73: 0x2150, 0xa74: 0x20fc, 0xa75: 0x2144, + 0xa76: 0x216e, 0xa77: 0x2156, 0xa78: 0x20ea, 0xa79: 0x20cc, 0xa7a: 0x2102, 0xa7b: 0x2114, + 0xa7c: 0x214a, 0xa7d: 0x20d2, 0xa7e: 0x2174, 0xa7f: 0x20f6, + // Block 0x2a, offset 0xa80 + 0xa80: 0x215c, 0xa81: 0x20d8, 0xa82: 0x2162, 0xa83: 0x2168, 0xa84: 0x092f, 0xa85: 0x0b03, + 0xa86: 0x0ca7, 0xa87: 0x10c7, + 0xa90: 0x1bc4, 0xa91: 0x18a9, + 0xa92: 0x18ac, 0xa93: 0x18af, 0xa94: 0x18b2, 0xa95: 0x18b5, 0xa96: 0x18b8, 0xa97: 0x18bb, + 0xa98: 0x18be, 0xa99: 0x18c1, 0xa9a: 0x18ca, 0xa9b: 0x18cd, 0xa9c: 0x18d0, 0xa9d: 0x18d3, + 0xa9e: 0x18d6, 0xa9f: 0x18d9, 0xaa0: 0x0313, 0xaa1: 0x031b, 0xaa2: 0x031f, 0xaa3: 0x0327, + 0xaa4: 0x032b, 0xaa5: 0x032f, 0xaa6: 0x0337, 0xaa7: 0x033f, 0xaa8: 0x0343, 0xaa9: 0x034b, + 0xaaa: 0x034f, 0xaab: 0x0353, 0xaac: 0x0357, 0xaad: 0x035b, 0xaae: 0x2e18, 0xaaf: 0x2e20, + 0xab0: 0x2e28, 0xab1: 0x2e30, 0xab2: 0x2e38, 0xab3: 0x2e40, 0xab4: 0x2e48, 0xab5: 0x2e50, + 0xab6: 0x2e60, 0xab7: 0x2e68, 0xab8: 0x2e70, 0xab9: 0x2e78, 0xaba: 0x2e80, 0xabb: 0x2e88, + 0xabc: 0x2ed3, 0xabd: 0x2e9b, 0xabe: 0x2e58, + // Block 0x2b, offset 0xac0 + 0xac0: 0x06bf, 0xac1: 0x071b, 0xac2: 0x06cb, 0xac3: 0x097b, 0xac4: 0x071f, 0xac5: 0x07af, + 0xac6: 0x06c7, 0xac7: 0x07ab, 0xac8: 0x070b, 0xac9: 0x0887, 0xaca: 0x0d07, 0xacb: 0x0e8f, + 0xacc: 0x0dd7, 0xacd: 0x0d1b, 0xace: 0x145f, 0xacf: 0x098b, 0xad0: 0x0ccf, 0xad1: 0x0d4b, + 0xad2: 0x0d0b, 0xad3: 0x104b, 0xad4: 0x08fb, 0xad5: 0x0f03, 0xad6: 0x1387, 0xad7: 0x105f, + 0xad8: 0x0843, 0xad9: 0x108f, 0xada: 0x0f9b, 0xadb: 0x0a17, 0xadc: 0x140f, 0xadd: 0x077f, + 0xade: 0x08ab, 0xadf: 0x0df7, 0xae0: 0x1527, 0xae1: 0x0743, 0xae2: 0x07d3, 0xae3: 0x0d9b, + 0xae4: 0x06cf, 0xae5: 0x06e7, 0xae6: 0x06d3, 0xae7: 0x0adb, 0xae8: 0x08ef, 0xae9: 0x087f, + 0xaea: 0x0a57, 0xaeb: 0x0a4b, 0xaec: 0x0feb, 0xaed: 0x073f, 0xaee: 0x139b, 0xaef: 0x089b, + 0xaf0: 0x09f3, 0xaf1: 0x18dc, 0xaf2: 0x18df, 0xaf3: 0x18e2, 0xaf4: 0x18e5, 0xaf5: 0x18ee, + 0xaf6: 0x18f1, 0xaf7: 0x18f4, 0xaf8: 0x18f7, 0xaf9: 0x18fa, 0xafa: 0x18fd, 0xafb: 0x1900, + 0xafc: 0x1903, 0xafd: 0x1906, 0xafe: 0x1909, 0xaff: 0x1912, + // Block 0x2c, offset 0xb00 + 0xb00: 0x1cc6, 0xb01: 0x1cd5, 0xb02: 0x1ce4, 0xb03: 0x1cf3, 0xb04: 0x1d02, 0xb05: 0x1d11, + 0xb06: 0x1d20, 0xb07: 0x1d2f, 0xb08: 0x1d3e, 0xb09: 0x218c, 0xb0a: 0x219e, 0xb0b: 0x21b0, + 0xb0c: 0x1954, 0xb0d: 0x1c04, 0xb0e: 0x19d2, 0xb0f: 0x1ba8, 0xb10: 0x04cb, 0xb11: 0x04d3, + 0xb12: 0x04db, 0xb13: 0x04e3, 0xb14: 0x04eb, 0xb15: 0x04ef, 0xb16: 0x04f3, 0xb17: 0x04f7, + 0xb18: 0x04fb, 0xb19: 0x04ff, 0xb1a: 0x0503, 0xb1b: 0x0507, 0xb1c: 0x050b, 0xb1d: 0x050f, + 0xb1e: 0x0513, 0xb1f: 0x0517, 0xb20: 0x051b, 0xb21: 0x0523, 0xb22: 0x0527, 0xb23: 0x052b, + 0xb24: 0x052f, 0xb25: 0x0533, 0xb26: 0x0537, 0xb27: 0x053b, 0xb28: 0x053f, 0xb29: 0x0543, + 0xb2a: 0x0547, 0xb2b: 0x054b, 0xb2c: 0x054f, 0xb2d: 0x0553, 0xb2e: 0x0557, 0xb2f: 0x055b, + 0xb30: 0x055f, 0xb31: 0x0563, 0xb32: 0x0567, 0xb33: 0x056f, 0xb34: 0x0577, 0xb35: 0x057f, + 0xb36: 0x0583, 0xb37: 0x0587, 0xb38: 0x058b, 0xb39: 0x058f, 0xb3a: 0x0593, 0xb3b: 0x0597, + 0xb3c: 0x059b, 0xb3d: 0x059f, 0xb3e: 0x05a3, + // Block 0x2d, offset 0xb40 + 0xb40: 0x2b0c, 0xb41: 0x29a8, 0xb42: 0x2b1c, 0xb43: 0x2880, 0xb44: 0x2ee4, 0xb45: 0x288a, + 0xb46: 0x2894, 0xb47: 0x2f28, 0xb48: 0x29b5, 0xb49: 0x289e, 0xb4a: 0x28a8, 0xb4b: 0x28b2, + 0xb4c: 0x29dc, 0xb4d: 0x29e9, 0xb4e: 0x29c2, 0xb4f: 0x29cf, 0xb50: 0x2ea9, 0xb51: 0x29f6, + 0xb52: 0x2a03, 0xb53: 0x2bbe, 0xb54: 0x26bb, 0xb55: 0x2bd1, 0xb56: 0x2be4, 0xb57: 0x2b2c, + 0xb58: 0x2a10, 0xb59: 0x2bf7, 0xb5a: 0x2c0a, 0xb5b: 0x2a1d, 0xb5c: 0x28bc, 0xb5d: 0x28c6, + 0xb5e: 0x2eb7, 0xb5f: 0x2a2a, 0xb60: 0x2b3c, 0xb61: 0x2ef5, 0xb62: 0x28d0, 0xb63: 0x28da, + 0xb64: 0x2a37, 0xb65: 0x28e4, 0xb66: 0x28ee, 0xb67: 0x26d0, 0xb68: 0x26d7, 0xb69: 0x28f8, + 0xb6a: 0x2902, 0xb6b: 0x2c1d, 0xb6c: 0x2a44, 0xb6d: 0x2b4c, 0xb6e: 0x2c30, 0xb6f: 0x2a51, + 0xb70: 0x2916, 0xb71: 0x290c, 0xb72: 0x2f3c, 0xb73: 0x2a5e, 0xb74: 0x2c43, 0xb75: 0x2920, + 0xb76: 0x2b5c, 0xb77: 0x292a, 0xb78: 0x2a78, 0xb79: 0x2934, 0xb7a: 0x2a85, 0xb7b: 0x2f06, + 0xb7c: 0x2a6b, 0xb7d: 0x2b6c, 0xb7e: 0x2a92, 0xb7f: 0x26de, + // Block 0x2e, offset 0xb80 + 0xb80: 0x2f17, 0xb81: 0x293e, 0xb82: 0x2948, 0xb83: 0x2a9f, 0xb84: 0x2952, 0xb85: 0x295c, + 0xb86: 0x2966, 0xb87: 0x2b7c, 0xb88: 0x2aac, 0xb89: 0x26e5, 0xb8a: 0x2c56, 0xb8b: 0x2e90, + 0xb8c: 0x2b8c, 0xb8d: 0x2ab9, 0xb8e: 0x2ec5, 0xb8f: 0x2970, 0xb90: 0x297a, 0xb91: 0x2ac6, + 0xb92: 0x26ec, 0xb93: 0x2ad3, 0xb94: 0x2b9c, 0xb95: 0x26f3, 0xb96: 0x2c69, 0xb97: 0x2984, + 0xb98: 0x1cb7, 0xb99: 0x1ccb, 0xb9a: 0x1cda, 0xb9b: 0x1ce9, 0xb9c: 0x1cf8, 0xb9d: 0x1d07, + 0xb9e: 0x1d16, 0xb9f: 0x1d25, 0xba0: 0x1d34, 0xba1: 0x1d43, 0xba2: 0x2192, 0xba3: 0x21a4, + 0xba4: 0x21b6, 0xba5: 0x21c2, 0xba6: 0x21ce, 0xba7: 0x21da, 0xba8: 0x21e6, 0xba9: 0x21f2, + 0xbaa: 0x21fe, 0xbab: 0x220a, 0xbac: 0x2246, 0xbad: 0x2252, 0xbae: 0x225e, 0xbaf: 0x226a, + 0xbb0: 0x2276, 0xbb1: 0x1c14, 0xbb2: 0x19c6, 0xbb3: 0x1936, 0xbb4: 0x1be4, 0xbb5: 0x1a47, + 0xbb6: 0x1a56, 0xbb7: 0x19cc, 0xbb8: 0x1bfc, 0xbb9: 0x1c00, 0xbba: 0x1960, 0xbbb: 0x2701, + 0xbbc: 0x270f, 0xbbd: 0x26fa, 0xbbe: 0x2708, 0xbbf: 0x2ae0, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x1a4a, 0xbc1: 0x1a32, 0xbc2: 0x1c60, 0xbc3: 0x1a1a, 0xbc4: 0x19f3, 0xbc5: 0x1969, + 0xbc6: 0x1978, 0xbc7: 0x1948, 0xbc8: 0x1bf0, 0xbc9: 0x1d52, 0xbca: 0x1a4d, 0xbcb: 0x1a35, + 0xbcc: 0x1c64, 0xbcd: 0x1c70, 0xbce: 0x1a26, 0xbcf: 0x19fc, 0xbd0: 0x1957, 0xbd1: 0x1c1c, + 0xbd2: 0x1bb0, 0xbd3: 0x1b9c, 0xbd4: 0x1bcc, 0xbd5: 0x1c74, 0xbd6: 0x1a29, 0xbd7: 0x19c9, + 0xbd8: 0x19ff, 0xbd9: 0x19de, 0xbda: 0x1a41, 0xbdb: 0x1c78, 0xbdc: 0x1a2c, 0xbdd: 0x19c0, + 0xbde: 0x1a02, 0xbdf: 0x1c3c, 0xbe0: 0x1bf4, 0xbe1: 0x1a14, 0xbe2: 0x1c24, 0xbe3: 0x1c40, + 0xbe4: 0x1bf8, 0xbe5: 0x1a17, 0xbe6: 0x1c28, 0xbe7: 0x22e8, 0xbe8: 0x22fc, 0xbe9: 0x1996, + 0xbea: 0x1c20, 0xbeb: 0x1bb4, 0xbec: 0x1ba0, 0xbed: 0x1c48, 0xbee: 0x2716, 0xbef: 0x27ad, + 0xbf0: 0x1a59, 0xbf1: 0x1a44, 0xbf2: 0x1c7c, 0xbf3: 0x1a2f, 0xbf4: 0x1a50, 0xbf5: 0x1a38, + 0xbf6: 0x1c68, 0xbf7: 0x1a1d, 0xbf8: 0x19f6, 0xbf9: 0x1981, 0xbfa: 0x1a53, 0xbfb: 0x1a3b, + 0xbfc: 0x1c6c, 0xbfd: 0x1a20, 0xbfe: 0x19f9, 0xbff: 0x1984, + // Block 0x30, offset 0xc00 + 0xc00: 0x1c2c, 0xc01: 0x1bb8, 0xc02: 0x1d4d, 0xc03: 0x1939, 0xc04: 0x19ba, 0xc05: 0x19bd, + 0xc06: 0x22f5, 0xc07: 0x1b94, 0xc08: 0x19c3, 0xc09: 0x194b, 0xc0a: 0x19e1, 0xc0b: 0x194e, + 0xc0c: 0x19ea, 0xc0d: 0x196c, 0xc0e: 0x196f, 0xc0f: 0x1a05, 0xc10: 0x1a0b, 0xc11: 0x1a0e, + 0xc12: 0x1c30, 0xc13: 0x1a11, 0xc14: 0x1a23, 0xc15: 0x1c38, 0xc16: 0x1c44, 0xc17: 0x1990, + 0xc18: 0x1d57, 0xc19: 0x1bbc, 0xc1a: 0x1993, 0xc1b: 0x1a5c, 0xc1c: 0x19a5, 0xc1d: 0x19b4, + 0xc1e: 0x22e2, 0xc1f: 0x22dc, 0xc20: 0x1cc1, 0xc21: 0x1cd0, 0xc22: 0x1cdf, 0xc23: 0x1cee, + 0xc24: 0x1cfd, 0xc25: 0x1d0c, 0xc26: 0x1d1b, 0xc27: 0x1d2a, 0xc28: 0x1d39, 0xc29: 0x2186, + 0xc2a: 0x2198, 0xc2b: 0x21aa, 0xc2c: 0x21bc, 0xc2d: 0x21c8, 0xc2e: 0x21d4, 0xc2f: 0x21e0, + 0xc30: 0x21ec, 0xc31: 0x21f8, 0xc32: 0x2204, 0xc33: 0x2240, 0xc34: 0x224c, 0xc35: 0x2258, + 0xc36: 0x2264, 0xc37: 0x2270, 0xc38: 0x227c, 0xc39: 0x2282, 0xc3a: 0x2288, 0xc3b: 0x228e, + 0xc3c: 0x2294, 0xc3d: 0x22a6, 0xc3e: 0x22ac, 0xc3f: 0x1c10, + // Block 0x31, offset 0xc40 + 0xc40: 0x1377, 0xc41: 0x0cfb, 0xc42: 0x13d3, 0xc43: 0x139f, 0xc44: 0x0e57, 0xc45: 0x06eb, + 0xc46: 0x08df, 0xc47: 0x162b, 0xc48: 0x162b, 0xc49: 0x0a0b, 0xc4a: 0x145f, 0xc4b: 0x0943, + 0xc4c: 0x0a07, 0xc4d: 0x0bef, 0xc4e: 0x0fcf, 0xc4f: 0x115f, 0xc50: 0x1297, 0xc51: 0x12d3, + 0xc52: 0x1307, 0xc53: 0x141b, 0xc54: 0x0d73, 0xc55: 0x0dff, 0xc56: 0x0eab, 0xc57: 0x0f43, + 0xc58: 0x125f, 0xc59: 0x1447, 0xc5a: 0x1573, 0xc5b: 0x070f, 0xc5c: 0x08b3, 0xc5d: 0x0d87, + 0xc5e: 0x0ecf, 0xc5f: 0x1293, 0xc60: 0x15c3, 0xc61: 0x0ab3, 0xc62: 0x0e77, 0xc63: 0x1283, + 0xc64: 0x1317, 0xc65: 0x0c23, 0xc66: 0x11bb, 0xc67: 0x12df, 0xc68: 0x0b1f, 0xc69: 0x0d0f, + 0xc6a: 0x0e17, 0xc6b: 0x0f1b, 0xc6c: 0x1427, 0xc6d: 0x074f, 0xc6e: 0x07e7, 0xc6f: 0x0853, + 0xc70: 0x0c8b, 0xc71: 0x0d7f, 0xc72: 0x0ecb, 0xc73: 0x0fef, 0xc74: 0x1177, 0xc75: 0x128b, + 0xc76: 0x12a3, 0xc77: 0x13c7, 0xc78: 0x14ef, 0xc79: 0x15a3, 0xc7a: 0x15bf, 0xc7b: 0x102b, + 0xc7c: 0x106b, 0xc7d: 0x1123, 0xc7e: 0x1243, 0xc7f: 0x147b, + // Block 0x32, offset 0xc80 + 0xc80: 0x15cb, 0xc81: 0x134b, 0xc82: 0x09c7, 0xc83: 0x0b3b, 0xc84: 0x10db, 0xc85: 0x119b, + 0xc86: 0x0eff, 0xc87: 0x1033, 0xc88: 0x1397, 0xc89: 0x14e7, 0xc8a: 0x09c3, 0xc8b: 0x0a8f, + 0xc8c: 0x0d77, 0xc8d: 0x0e2b, 0xc8e: 0x0e5f, 0xc8f: 0x1113, 0xc90: 0x113b, 0xc91: 0x14a7, + 0xc92: 0x084f, 0xc93: 0x11a7, 0xc94: 0x07f3, 0xc95: 0x07ef, 0xc96: 0x1097, 0xc97: 0x1127, + 0xc98: 0x125b, 0xc99: 0x14af, 0xc9a: 0x1367, 0xc9b: 0x0c27, 0xc9c: 0x0d73, 0xc9d: 0x1357, + 0xc9e: 0x06f7, 0xc9f: 0x0a63, 0xca0: 0x0b93, 0xca1: 0x0f2f, 0xca2: 0x0faf, 0xca3: 0x0873, + 0xca4: 0x103b, 0xca5: 0x075f, 0xca6: 0x0b77, 0xca7: 0x06d7, 0xca8: 0x0deb, 0xca9: 0x0ca3, + 0xcaa: 0x110f, 0xcab: 0x08c7, 0xcac: 0x09b3, 0xcad: 0x0ffb, 0xcae: 0x1263, 0xcaf: 0x133b, + 0xcb0: 0x0db7, 0xcb1: 0x13f7, 0xcb2: 0x0de3, 0xcb3: 0x0c37, 0xcb4: 0x121b, 0xcb5: 0x0c57, + 0xcb6: 0x0fab, 0xcb7: 0x072b, 0xcb8: 0x07a7, 0xcb9: 0x07eb, 0xcba: 0x0d53, 0xcbb: 0x10fb, + 0xcbc: 0x11f3, 0xcbd: 0x1347, 0xcbe: 0x145b, 0xcbf: 0x085b, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x090f, 0xcc1: 0x0a17, 0xcc2: 0x0b2f, 0xcc3: 0x0cbf, 0xcc4: 0x0e7b, 0xcc5: 0x103f, + 0xcc6: 0x1497, 0xcc7: 0x157b, 0xcc8: 0x15cf, 0xcc9: 0x15e7, 0xcca: 0x0837, 0xccb: 0x0cf3, + 0xccc: 0x0da3, 0xccd: 0x13eb, 0xcce: 0x0afb, 0xccf: 0x0bd7, 0xcd0: 0x0bf3, 0xcd1: 0x0c83, + 0xcd2: 0x0e6b, 0xcd3: 0x0eb7, 0xcd4: 0x0f67, 0xcd5: 0x108b, 0xcd6: 0x112f, 0xcd7: 0x1193, + 0xcd8: 0x13db, 0xcd9: 0x126b, 0xcda: 0x1403, 0xcdb: 0x147f, 0xcdc: 0x080f, 0xcdd: 0x083b, + 0xcde: 0x0923, 0xcdf: 0x0ea7, 0xce0: 0x12f3, 0xce1: 0x133b, 0xce2: 0x0b1b, 0xce3: 0x0b8b, + 0xce4: 0x0c4f, 0xce5: 0x0daf, 0xce6: 0x10d7, 0xce7: 0x0f23, 0xce8: 0x073b, 0xce9: 0x097f, + 0xcea: 0x0a63, 0xceb: 0x0ac7, 0xcec: 0x0b97, 0xced: 0x0f3f, 0xcee: 0x0f5b, 0xcef: 0x116b, + 0xcf0: 0x118b, 0xcf1: 0x1463, 0xcf2: 0x14e3, 0xcf3: 0x14f3, 0xcf4: 0x152f, 0xcf5: 0x0753, + 0xcf6: 0x107f, 0xcf7: 0x144f, 0xcf8: 0x14cb, 0xcf9: 0x0baf, 0xcfa: 0x0717, 0xcfb: 0x0777, + 0xcfc: 0x0a67, 0xcfd: 0x0a87, 0xcfe: 0x0caf, 0xcff: 0x0d73, + // Block 0x34, offset 0xd00 + 0xd00: 0x0ec3, 0xd01: 0x0fcb, 0xd02: 0x1277, 0xd03: 0x1417, 0xd04: 0x1623, 0xd05: 0x0ce3, + 0xd06: 0x14a3, 0xd07: 0x0833, 0xd08: 0x0d2f, 0xd09: 0x0d3b, 0xd0a: 0x0e0f, 0xd0b: 0x0e47, + 0xd0c: 0x0f4b, 0xd0d: 0x0fa7, 0xd0e: 0x1027, 0xd0f: 0x110b, 0xd10: 0x153b, 0xd11: 0x07af, + 0xd12: 0x0c03, 0xd13: 0x14b3, 0xd14: 0x0767, 0xd15: 0x0aab, 0xd16: 0x0e2f, 0xd17: 0x13df, + 0xd18: 0x0b67, 0xd19: 0x0bb7, 0xd1a: 0x0d43, 0xd1b: 0x0f2f, 0xd1c: 0x14bb, 0xd1d: 0x0817, + 0xd1e: 0x08ff, 0xd1f: 0x0a97, 0xd20: 0x0cd3, 0xd21: 0x0d1f, 0xd22: 0x0d5f, 0xd23: 0x0df3, + 0xd24: 0x0f47, 0xd25: 0x0fbb, 0xd26: 0x1157, 0xd27: 0x12f7, 0xd28: 0x1303, 0xd29: 0x1457, + 0xd2a: 0x14d7, 0xd2b: 0x0883, 0xd2c: 0x0e4b, 0xd2d: 0x0903, 0xd2e: 0x0ec7, 0xd2f: 0x0f6b, + 0xd30: 0x1287, 0xd31: 0x14bf, 0xd32: 0x15ab, 0xd33: 0x15d3, 0xd34: 0x0d37, 0xd35: 0x0e27, + 0xd36: 0x11c3, 0xd37: 0x10b7, 0xd38: 0x10c3, 0xd39: 0x10e7, 0xd3a: 0x0f17, 0xd3b: 0x0e9f, + 0xd3c: 0x1363, 0xd3d: 0x0733, 0xd3e: 0x122b, 0xd3f: 0x081b, + // Block 0x35, offset 0xd40 + 0xd40: 0x080b, 0xd41: 0x0b0b, 0xd42: 0x0c2b, 0xd43: 0x10f3, 0xd44: 0x0a53, 0xd45: 0x0e03, + 0xd46: 0x0cef, 0xd47: 0x13e7, 0xd48: 0x12e7, 0xd49: 0x14ab, 0xd4a: 0x1323, 0xd4b: 0x0b27, + 0xd4c: 0x0787, 0xd4d: 0x095b, 0xd50: 0x09af, + 0xd52: 0x0cdf, 0xd55: 0x07f7, 0xd56: 0x0f1f, 0xd57: 0x0fe3, + 0xd58: 0x1047, 0xd59: 0x1063, 0xd5a: 0x1067, 0xd5b: 0x107b, 0xd5c: 0x14fb, 0xd5d: 0x10eb, + 0xd5e: 0x116f, 0xd60: 0x128f, 0xd62: 0x1353, + 0xd65: 0x1407, 0xd66: 0x1433, + 0xd6a: 0x154f, 0xd6b: 0x1553, 0xd6c: 0x1557, 0xd6d: 0x15bb, 0xd6e: 0x142b, 0xd6f: 0x14c7, + 0xd70: 0x0757, 0xd71: 0x077b, 0xd72: 0x078f, 0xd73: 0x084b, 0xd74: 0x0857, 0xd75: 0x0897, + 0xd76: 0x094b, 0xd77: 0x0967, 0xd78: 0x096f, 0xd79: 0x09ab, 0xd7a: 0x09b7, 0xd7b: 0x0a93, + 0xd7c: 0x0a9b, 0xd7d: 0x0ba3, 0xd7e: 0x0bcb, 0xd7f: 0x0bd3, + // Block 0x36, offset 0xd80 + 0xd80: 0x0beb, 0xd81: 0x0c97, 0xd82: 0x0cc7, 0xd83: 0x0ce7, 0xd84: 0x0d57, 0xd85: 0x0e1b, + 0xd86: 0x0e37, 0xd87: 0x0e67, 0xd88: 0x0ebb, 0xd89: 0x0edb, 0xd8a: 0x0f4f, 0xd8b: 0x102f, + 0xd8c: 0x104b, 0xd8d: 0x1053, 0xd8e: 0x104f, 0xd8f: 0x1057, 0xd90: 0x105b, 0xd91: 0x105f, + 0xd92: 0x1073, 0xd93: 0x1077, 0xd94: 0x109b, 0xd95: 0x10af, 0xd96: 0x10cb, 0xd97: 0x112f, + 0xd98: 0x1137, 0xd99: 0x113f, 0xd9a: 0x1153, 0xd9b: 0x117b, 0xd9c: 0x11cb, 0xd9d: 0x11ff, + 0xd9e: 0x11ff, 0xd9f: 0x1267, 0xda0: 0x130f, 0xda1: 0x1327, 0xda2: 0x135b, 0xda3: 0x135f, + 0xda4: 0x13a3, 0xda5: 0x13a7, 0xda6: 0x13ff, 0xda7: 0x1407, 0xda8: 0x14db, 0xda9: 0x151f, + 0xdaa: 0x1537, 0xdab: 0x0b9b, 0xdac: 0x171e, 0xdad: 0x11e3, + 0xdb0: 0x06df, 0xdb1: 0x07e3, 0xdb2: 0x07a3, 0xdb3: 0x074b, 0xdb4: 0x078b, 0xdb5: 0x07b7, + 0xdb6: 0x0847, 0xdb7: 0x0863, 0xdb8: 0x094b, 0xdb9: 0x0937, 0xdba: 0x0947, 0xdbb: 0x0963, + 0xdbc: 0x09af, 0xdbd: 0x09bf, 0xdbe: 0x0a03, 0xdbf: 0x0a0f, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x0a2b, 0xdc1: 0x0a3b, 0xdc2: 0x0b23, 0xdc3: 0x0b2b, 0xdc4: 0x0b5b, 0xdc5: 0x0b7b, + 0xdc6: 0x0bab, 0xdc7: 0x0bc3, 0xdc8: 0x0bb3, 0xdc9: 0x0bd3, 0xdca: 0x0bc7, 0xdcb: 0x0beb, + 0xdcc: 0x0c07, 0xdcd: 0x0c5f, 0xdce: 0x0c6b, 0xdcf: 0x0c73, 0xdd0: 0x0c9b, 0xdd1: 0x0cdf, + 0xdd2: 0x0d0f, 0xdd3: 0x0d13, 0xdd4: 0x0d27, 0xdd5: 0x0da7, 0xdd6: 0x0db7, 0xdd7: 0x0e0f, + 0xdd8: 0x0e5b, 0xdd9: 0x0e53, 0xdda: 0x0e67, 0xddb: 0x0e83, 0xddc: 0x0ebb, 0xddd: 0x1013, + 0xdde: 0x0edf, 0xddf: 0x0f13, 0xde0: 0x0f1f, 0xde1: 0x0f5f, 0xde2: 0x0f7b, 0xde3: 0x0f9f, + 0xde4: 0x0fc3, 0xde5: 0x0fc7, 0xde6: 0x0fe3, 0xde7: 0x0fe7, 0xde8: 0x0ff7, 0xde9: 0x100b, + 0xdea: 0x1007, 0xdeb: 0x1037, 0xdec: 0x10b3, 0xded: 0x10cb, 0xdee: 0x10e3, 0xdef: 0x111b, + 0xdf0: 0x112f, 0xdf1: 0x114b, 0xdf2: 0x117b, 0xdf3: 0x122f, 0xdf4: 0x1257, 0xdf5: 0x12cb, + 0xdf6: 0x1313, 0xdf7: 0x131f, 0xdf8: 0x1327, 0xdf9: 0x133f, 0xdfa: 0x1353, 0xdfb: 0x1343, + 0xdfc: 0x135b, 0xdfd: 0x1357, 0xdfe: 0x134f, 0xdff: 0x135f, + // Block 0x38, offset 0xe00 + 0xe00: 0x136b, 0xe01: 0x13a7, 0xe02: 0x13e3, 0xe03: 0x1413, 0xe04: 0x144b, 0xe05: 0x146b, + 0xe06: 0x14b7, 0xe07: 0x14db, 0xe08: 0x14fb, 0xe09: 0x150f, 0xe0a: 0x151f, 0xe0b: 0x152b, + 0xe0c: 0x1537, 0xe0d: 0x158b, 0xe0e: 0x162b, 0xe0f: 0x16b5, 0xe10: 0x16b0, 0xe11: 0x16e2, + 0xe12: 0x0607, 0xe13: 0x062f, 0xe14: 0x0633, 0xe15: 0x1764, 0xe16: 0x1791, 0xe17: 0x1809, + 0xe18: 0x1617, 0xe19: 0x1627, + // Block 0x39, offset 0xe40 + 0xe40: 0x19d5, 0xe41: 0x19d8, 0xe42: 0x19db, 0xe43: 0x1c08, 0xe44: 0x1c0c, 0xe45: 0x1a5f, + 0xe46: 0x1a5f, + 0xe53: 0x1d75, 0xe54: 0x1d66, 0xe55: 0x1d6b, 0xe56: 0x1d7a, 0xe57: 0x1d70, + 0xe5d: 0x4390, + 0xe5e: 0x8115, 0xe5f: 0x4402, 0xe60: 0x022d, 0xe61: 0x0215, 0xe62: 0x021e, 0xe63: 0x0221, + 0xe64: 0x0224, 0xe65: 0x0227, 0xe66: 0x022a, 0xe67: 0x0230, 0xe68: 0x0233, 0xe69: 0x0017, + 0xe6a: 0x43f0, 0xe6b: 0x43f6, 0xe6c: 0x44f4, 0xe6d: 0x44fc, 0xe6e: 0x4348, 0xe6f: 0x434e, + 0xe70: 0x4354, 0xe71: 0x435a, 0xe72: 0x4366, 0xe73: 0x436c, 0xe74: 0x4372, 0xe75: 0x437e, + 0xe76: 0x4384, 0xe78: 0x438a, 0xe79: 0x4396, 0xe7a: 0x439c, 0xe7b: 0x43a2, + 0xe7c: 0x43ae, 0xe7e: 0x43b4, + // Block 0x3a, offset 0xe80 + 0xe80: 0x43ba, 0xe81: 0x43c0, 0xe83: 0x43c6, 0xe84: 0x43cc, + 0xe86: 0x43d8, 0xe87: 0x43de, 0xe88: 0x43e4, 0xe89: 0x43ea, 0xe8a: 0x43fc, 0xe8b: 0x4378, + 0xe8c: 0x4360, 0xe8d: 0x43a8, 0xe8e: 0x43d2, 0xe8f: 0x1d7f, 0xe90: 0x0299, 0xe91: 0x0299, + 0xe92: 0x02a2, 0xe93: 0x02a2, 0xe94: 0x02a2, 0xe95: 0x02a2, 0xe96: 0x02a5, 0xe97: 0x02a5, + 0xe98: 0x02a5, 0xe99: 0x02a5, 0xe9a: 0x02ab, 0xe9b: 0x02ab, 0xe9c: 0x02ab, 0xe9d: 0x02ab, + 0xe9e: 0x029f, 0xe9f: 0x029f, 0xea0: 0x029f, 0xea1: 0x029f, 0xea2: 0x02a8, 0xea3: 0x02a8, + 0xea4: 0x02a8, 0xea5: 0x02a8, 0xea6: 0x029c, 0xea7: 0x029c, 0xea8: 0x029c, 0xea9: 0x029c, + 0xeaa: 0x02cf, 0xeab: 0x02cf, 0xeac: 0x02cf, 0xead: 0x02cf, 0xeae: 0x02d2, 0xeaf: 0x02d2, + 0xeb0: 0x02d2, 0xeb1: 0x02d2, 0xeb2: 0x02b1, 0xeb3: 0x02b1, 0xeb4: 0x02b1, 0xeb5: 0x02b1, + 0xeb6: 0x02ae, 0xeb7: 0x02ae, 0xeb8: 0x02ae, 0xeb9: 0x02ae, 0xeba: 0x02b4, 0xebb: 0x02b4, + 0xebc: 0x02b4, 0xebd: 0x02b4, 0xebe: 0x02b7, 0xebf: 0x02b7, + // Block 0x3b, offset 0xec0 + 0xec0: 0x02b7, 0xec1: 0x02b7, 0xec2: 0x02c0, 0xec3: 0x02c0, 0xec4: 0x02bd, 0xec5: 0x02bd, + 0xec6: 0x02c3, 0xec7: 0x02c3, 0xec8: 0x02ba, 0xec9: 0x02ba, 0xeca: 0x02c9, 0xecb: 0x02c9, + 0xecc: 0x02c6, 0xecd: 0x02c6, 0xece: 0x02d5, 0xecf: 0x02d5, 0xed0: 0x02d5, 0xed1: 0x02d5, + 0xed2: 0x02db, 0xed3: 0x02db, 0xed4: 0x02db, 0xed5: 0x02db, 0xed6: 0x02e1, 0xed7: 0x02e1, + 0xed8: 0x02e1, 0xed9: 0x02e1, 0xeda: 0x02de, 0xedb: 0x02de, 0xedc: 0x02de, 0xedd: 0x02de, + 0xede: 0x02e4, 0xedf: 0x02e4, 0xee0: 0x02e7, 0xee1: 0x02e7, 0xee2: 0x02e7, 0xee3: 0x02e7, + 0xee4: 0x446e, 0xee5: 0x446e, 0xee6: 0x02ed, 0xee7: 0x02ed, 0xee8: 0x02ed, 0xee9: 0x02ed, + 0xeea: 0x02ea, 0xeeb: 0x02ea, 0xeec: 0x02ea, 0xeed: 0x02ea, 0xeee: 0x0308, 0xeef: 0x0308, + 0xef0: 0x4468, 0xef1: 0x4468, + // Block 0x3c, offset 0xf00 + 0xf13: 0x02d8, 0xf14: 0x02d8, 0xf15: 0x02d8, 0xf16: 0x02d8, 0xf17: 0x02f6, + 0xf18: 0x02f6, 0xf19: 0x02f3, 0xf1a: 0x02f3, 0xf1b: 0x02f9, 0xf1c: 0x02f9, 0xf1d: 0x204f, + 0xf1e: 0x02ff, 0xf1f: 0x02ff, 0xf20: 0x02f0, 0xf21: 0x02f0, 0xf22: 0x02fc, 0xf23: 0x02fc, + 0xf24: 0x0305, 0xf25: 0x0305, 0xf26: 0x0305, 0xf27: 0x0305, 0xf28: 0x028d, 0xf29: 0x028d, + 0xf2a: 0x25aa, 0xf2b: 0x25aa, 0xf2c: 0x261a, 0xf2d: 0x261a, 0xf2e: 0x25e9, 0xf2f: 0x25e9, + 0xf30: 0x2605, 0xf31: 0x2605, 0xf32: 0x25fe, 0xf33: 0x25fe, 0xf34: 0x260c, 0xf35: 0x260c, + 0xf36: 0x2613, 0xf37: 0x2613, 0xf38: 0x2613, 0xf39: 0x25f0, 0xf3a: 0x25f0, 0xf3b: 0x25f0, + 0xf3c: 0x0302, 0xf3d: 0x0302, 0xf3e: 0x0302, 0xf3f: 0x0302, + // Block 0x3d, offset 0xf40 + 0xf40: 0x25b1, 0xf41: 0x25b8, 0xf42: 0x25d4, 0xf43: 0x25f0, 0xf44: 0x25f7, 0xf45: 0x1d89, + 0xf46: 0x1d8e, 0xf47: 0x1d93, 0xf48: 0x1da2, 0xf49: 0x1db1, 0xf4a: 0x1db6, 0xf4b: 0x1dbb, + 0xf4c: 0x1dc0, 0xf4d: 0x1dc5, 0xf4e: 0x1dd4, 0xf4f: 0x1de3, 0xf50: 0x1de8, 0xf51: 0x1ded, + 0xf52: 0x1dfc, 0xf53: 0x1e0b, 0xf54: 0x1e10, 0xf55: 0x1e15, 0xf56: 0x1e1a, 0xf57: 0x1e29, + 0xf58: 0x1e2e, 0xf59: 0x1e3d, 0xf5a: 0x1e42, 0xf5b: 0x1e47, 0xf5c: 0x1e56, 0xf5d: 0x1e5b, + 0xf5e: 0x1e60, 0xf5f: 0x1e6a, 0xf60: 0x1ea6, 0xf61: 0x1eb5, 0xf62: 0x1ec4, 0xf63: 0x1ec9, + 0xf64: 0x1ece, 0xf65: 0x1ed8, 0xf66: 0x1ee7, 0xf67: 0x1eec, 0xf68: 0x1efb, 0xf69: 0x1f00, + 0xf6a: 0x1f05, 0xf6b: 0x1f14, 0xf6c: 0x1f19, 0xf6d: 0x1f28, 0xf6e: 0x1f2d, 0xf6f: 0x1f32, + 0xf70: 0x1f37, 0xf71: 0x1f3c, 0xf72: 0x1f41, 0xf73: 0x1f46, 0xf74: 0x1f4b, 0xf75: 0x1f50, + 0xf76: 0x1f55, 0xf77: 0x1f5a, 0xf78: 0x1f5f, 0xf79: 0x1f64, 0xf7a: 0x1f69, 0xf7b: 0x1f6e, + 0xf7c: 0x1f73, 0xf7d: 0x1f78, 0xf7e: 0x1f7d, 0xf7f: 0x1f87, + // Block 0x3e, offset 0xf80 + 0xf80: 0x1f8c, 0xf81: 0x1f91, 0xf82: 0x1f96, 0xf83: 0x1fa0, 0xf84: 0x1fa5, 0xf85: 0x1faf, + 0xf86: 0x1fb4, 0xf87: 0x1fb9, 0xf88: 0x1fbe, 0xf89: 0x1fc3, 0xf8a: 0x1fc8, 0xf8b: 0x1fcd, + 0xf8c: 0x1fd2, 0xf8d: 0x1fd7, 0xf8e: 0x1fe6, 0xf8f: 0x1ff5, 0xf90: 0x1ffa, 0xf91: 0x1fff, + 0xf92: 0x2004, 0xf93: 0x2009, 0xf94: 0x200e, 0xf95: 0x2018, 0xf96: 0x201d, 0xf97: 0x2022, + 0xf98: 0x2031, 0xf99: 0x2040, 0xf9a: 0x2045, 0xf9b: 0x4420, 0xf9c: 0x4426, 0xf9d: 0x445c, + 0xf9e: 0x44b3, 0xf9f: 0x44ba, 0xfa0: 0x44c1, 0xfa1: 0x44c8, 0xfa2: 0x44cf, 0xfa3: 0x44d6, + 0xfa4: 0x25c6, 0xfa5: 0x25cd, 0xfa6: 0x25d4, 0xfa7: 0x25db, 0xfa8: 0x25f0, 0xfa9: 0x25f7, + 0xfaa: 0x1d98, 0xfab: 0x1d9d, 0xfac: 0x1da2, 0xfad: 0x1da7, 0xfae: 0x1db1, 0xfaf: 0x1db6, + 0xfb0: 0x1dca, 0xfb1: 0x1dcf, 0xfb2: 0x1dd4, 0xfb3: 0x1dd9, 0xfb4: 0x1de3, 0xfb5: 0x1de8, + 0xfb6: 0x1df2, 0xfb7: 0x1df7, 0xfb8: 0x1dfc, 0xfb9: 0x1e01, 0xfba: 0x1e0b, 0xfbb: 0x1e10, + 0xfbc: 0x1f3c, 0xfbd: 0x1f41, 0xfbe: 0x1f50, 0xfbf: 0x1f55, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x1f5a, 0xfc1: 0x1f6e, 0xfc2: 0x1f73, 0xfc3: 0x1f78, 0xfc4: 0x1f7d, 0xfc5: 0x1f96, + 0xfc6: 0x1fa0, 0xfc7: 0x1fa5, 0xfc8: 0x1faa, 0xfc9: 0x1fbe, 0xfca: 0x1fdc, 0xfcb: 0x1fe1, + 0xfcc: 0x1fe6, 0xfcd: 0x1feb, 0xfce: 0x1ff5, 0xfcf: 0x1ffa, 0xfd0: 0x445c, 0xfd1: 0x2027, + 0xfd2: 0x202c, 0xfd3: 0x2031, 0xfd4: 0x2036, 0xfd5: 0x2040, 0xfd6: 0x2045, 0xfd7: 0x25b1, + 0xfd8: 0x25b8, 0xfd9: 0x25bf, 0xfda: 0x25d4, 0xfdb: 0x25e2, 0xfdc: 0x1d89, 0xfdd: 0x1d8e, + 0xfde: 0x1d93, 0xfdf: 0x1da2, 0xfe0: 0x1dac, 0xfe1: 0x1dbb, 0xfe2: 0x1dc0, 0xfe3: 0x1dc5, + 0xfe4: 0x1dd4, 0xfe5: 0x1dde, 0xfe6: 0x1dfc, 0xfe7: 0x1e15, 0xfe8: 0x1e1a, 0xfe9: 0x1e29, + 0xfea: 0x1e2e, 0xfeb: 0x1e3d, 0xfec: 0x1e47, 0xfed: 0x1e56, 0xfee: 0x1e5b, 0xfef: 0x1e60, + 0xff0: 0x1e6a, 0xff1: 0x1ea6, 0xff2: 0x1eab, 0xff3: 0x1eb5, 0xff4: 0x1ec4, 0xff5: 0x1ec9, + 0xff6: 0x1ece, 0xff7: 0x1ed8, 0xff8: 0x1ee7, 0xff9: 0x1efb, 0xffa: 0x1f00, 0xffb: 0x1f05, + 0xffc: 0x1f14, 0xffd: 0x1f19, 0xffe: 0x1f28, 0xfff: 0x1f2d, + // Block 0x40, offset 0x1000 + 0x1000: 0x1f32, 0x1001: 0x1f37, 0x1002: 0x1f46, 0x1003: 0x1f4b, 0x1004: 0x1f5f, 0x1005: 0x1f64, + 0x1006: 0x1f69, 0x1007: 0x1f6e, 0x1008: 0x1f73, 0x1009: 0x1f87, 0x100a: 0x1f8c, 0x100b: 0x1f91, + 0x100c: 0x1f96, 0x100d: 0x1f9b, 0x100e: 0x1faf, 0x100f: 0x1fb4, 0x1010: 0x1fb9, 0x1011: 0x1fbe, + 0x1012: 0x1fcd, 0x1013: 0x1fd2, 0x1014: 0x1fd7, 0x1015: 0x1fe6, 0x1016: 0x1ff0, 0x1017: 0x1fff, + 0x1018: 0x2004, 0x1019: 0x4450, 0x101a: 0x2018, 0x101b: 0x201d, 0x101c: 0x2022, 0x101d: 0x2031, + 0x101e: 0x203b, 0x101f: 0x25d4, 0x1020: 0x25e2, 0x1021: 0x1da2, 0x1022: 0x1dac, 0x1023: 0x1dd4, + 0x1024: 0x1dde, 0x1025: 0x1dfc, 0x1026: 0x1e06, 0x1027: 0x1e6a, 0x1028: 0x1e6f, 0x1029: 0x1e92, + 0x102a: 0x1e97, 0x102b: 0x1f6e, 0x102c: 0x1f73, 0x102d: 0x1f96, 0x102e: 0x1fe6, 0x102f: 0x1ff0, + 0x1030: 0x2031, 0x1031: 0x203b, 0x1032: 0x4504, 0x1033: 0x450c, 0x1034: 0x4514, 0x1035: 0x1ef1, + 0x1036: 0x1ef6, 0x1037: 0x1f0a, 0x1038: 0x1f0f, 0x1039: 0x1f1e, 0x103a: 0x1f23, 0x103b: 0x1e74, + 0x103c: 0x1e79, 0x103d: 0x1e9c, 0x103e: 0x1ea1, 0x103f: 0x1e33, + // Block 0x41, offset 0x1040 + 0x1040: 0x1e38, 0x1041: 0x1e1f, 0x1042: 0x1e24, 0x1043: 0x1e4c, 0x1044: 0x1e51, 0x1045: 0x1eba, + 0x1046: 0x1ebf, 0x1047: 0x1edd, 0x1048: 0x1ee2, 0x1049: 0x1e7e, 0x104a: 0x1e83, 0x104b: 0x1e88, + 0x104c: 0x1e92, 0x104d: 0x1e8d, 0x104e: 0x1e65, 0x104f: 0x1eb0, 0x1050: 0x1ed3, 0x1051: 0x1ef1, + 0x1052: 0x1ef6, 0x1053: 0x1f0a, 0x1054: 0x1f0f, 0x1055: 0x1f1e, 0x1056: 0x1f23, 0x1057: 0x1e74, + 0x1058: 0x1e79, 0x1059: 0x1e9c, 0x105a: 0x1ea1, 0x105b: 0x1e33, 0x105c: 0x1e38, 0x105d: 0x1e1f, + 0x105e: 0x1e24, 0x105f: 0x1e4c, 0x1060: 0x1e51, 0x1061: 0x1eba, 0x1062: 0x1ebf, 0x1063: 0x1edd, + 0x1064: 0x1ee2, 0x1065: 0x1e7e, 0x1066: 0x1e83, 0x1067: 0x1e88, 0x1068: 0x1e92, 0x1069: 0x1e8d, + 0x106a: 0x1e65, 0x106b: 0x1eb0, 0x106c: 0x1ed3, 0x106d: 0x1e7e, 0x106e: 0x1e83, 0x106f: 0x1e88, + 0x1070: 0x1e92, 0x1071: 0x1e6f, 0x1072: 0x1e97, 0x1073: 0x1eec, 0x1074: 0x1e56, 0x1075: 0x1e5b, + 0x1076: 0x1e60, 0x1077: 0x1e7e, 0x1078: 0x1e83, 0x1079: 0x1e88, 0x107a: 0x1eec, 0x107b: 0x1efb, + 0x107c: 0x4408, 0x107d: 0x4408, + // Block 0x42, offset 0x1080 + 0x1090: 0x2311, 0x1091: 0x2326, + 0x1092: 0x2326, 0x1093: 0x232d, 0x1094: 0x2334, 0x1095: 0x2349, 0x1096: 0x2350, 0x1097: 0x2357, + 0x1098: 0x237a, 0x1099: 0x237a, 0x109a: 0x239d, 0x109b: 0x2396, 0x109c: 0x23b2, 0x109d: 0x23a4, + 0x109e: 0x23ab, 0x109f: 0x23ce, 0x10a0: 0x23ce, 0x10a1: 0x23c7, 0x10a2: 0x23d5, 0x10a3: 0x23d5, + 0x10a4: 0x23ff, 0x10a5: 0x23ff, 0x10a6: 0x241b, 0x10a7: 0x23e3, 0x10a8: 0x23e3, 0x10a9: 0x23dc, + 0x10aa: 0x23f1, 0x10ab: 0x23f1, 0x10ac: 0x23f8, 0x10ad: 0x23f8, 0x10ae: 0x2422, 0x10af: 0x2430, + 0x10b0: 0x2430, 0x10b1: 0x2437, 0x10b2: 0x2437, 0x10b3: 0x243e, 0x10b4: 0x2445, 0x10b5: 0x244c, + 0x10b6: 0x2453, 0x10b7: 0x2453, 0x10b8: 0x245a, 0x10b9: 0x2468, 0x10ba: 0x2476, 0x10bb: 0x246f, + 0x10bc: 0x247d, 0x10bd: 0x247d, 0x10be: 0x2492, 0x10bf: 0x2499, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x24ca, 0x10c1: 0x24d8, 0x10c2: 0x24d1, 0x10c3: 0x24b5, 0x10c4: 0x24b5, 0x10c5: 0x24df, + 0x10c6: 0x24df, 0x10c7: 0x24e6, 0x10c8: 0x24e6, 0x10c9: 0x2510, 0x10ca: 0x2517, 0x10cb: 0x251e, + 0x10cc: 0x24f4, 0x10cd: 0x2502, 0x10ce: 0x2525, 0x10cf: 0x252c, + 0x10d2: 0x24fb, 0x10d3: 0x2580, 0x10d4: 0x2587, 0x10d5: 0x255d, 0x10d6: 0x2564, 0x10d7: 0x2548, + 0x10d8: 0x2548, 0x10d9: 0x254f, 0x10da: 0x2579, 0x10db: 0x2572, 0x10dc: 0x259c, 0x10dd: 0x259c, + 0x10de: 0x230a, 0x10df: 0x231f, 0x10e0: 0x2318, 0x10e1: 0x2342, 0x10e2: 0x233b, 0x10e3: 0x2365, + 0x10e4: 0x235e, 0x10e5: 0x2388, 0x10e6: 0x236c, 0x10e7: 0x2381, 0x10e8: 0x23b9, 0x10e9: 0x2406, + 0x10ea: 0x23ea, 0x10eb: 0x2429, 0x10ec: 0x24c3, 0x10ed: 0x24ed, 0x10ee: 0x2595, 0x10ef: 0x258e, + 0x10f0: 0x25a3, 0x10f1: 0x253a, 0x10f2: 0x24a0, 0x10f3: 0x256b, 0x10f4: 0x2492, 0x10f5: 0x24ca, + 0x10f6: 0x2461, 0x10f7: 0x24ae, 0x10f8: 0x2541, 0x10f9: 0x2533, 0x10fa: 0x24bc, 0x10fb: 0x24a7, + 0x10fc: 0x24bc, 0x10fd: 0x2541, 0x10fe: 0x2373, 0x10ff: 0x238f, + // Block 0x44, offset 0x1100 + 0x1100: 0x2509, 0x1101: 0x2484, 0x1102: 0x2303, 0x1103: 0x24a7, 0x1104: 0x244c, 0x1105: 0x241b, + 0x1106: 0x23c0, 0x1107: 0x2556, + 0x1130: 0x2414, 0x1131: 0x248b, 0x1132: 0x27bf, 0x1133: 0x27b6, 0x1134: 0x27ec, 0x1135: 0x27da, + 0x1136: 0x27c8, 0x1137: 0x27e3, 0x1138: 0x27f5, 0x1139: 0x240d, 0x113a: 0x2c7c, 0x113b: 0x2afc, + 0x113c: 0x27d1, + // Block 0x45, offset 0x1140 + 0x1150: 0x0019, 0x1151: 0x0483, + 0x1152: 0x0487, 0x1153: 0x0035, 0x1154: 0x0037, 0x1155: 0x0003, 0x1156: 0x003f, 0x1157: 0x04bf, + 0x1158: 0x04c3, 0x1159: 0x1b5c, + 0x1160: 0x8132, 0x1161: 0x8132, 0x1162: 0x8132, 0x1163: 0x8132, + 0x1164: 0x8132, 0x1165: 0x8132, 0x1166: 0x8132, 0x1167: 0x812d, 0x1168: 0x812d, 0x1169: 0x812d, + 0x116a: 0x812d, 0x116b: 0x812d, 0x116c: 0x812d, 0x116d: 0x812d, 0x116e: 0x8132, 0x116f: 0x8132, + 0x1170: 0x1873, 0x1171: 0x0443, 0x1172: 0x043f, 0x1173: 0x007f, 0x1174: 0x007f, 0x1175: 0x0011, + 0x1176: 0x0013, 0x1177: 0x00b7, 0x1178: 0x00bb, 0x1179: 0x04b7, 0x117a: 0x04bb, 0x117b: 0x04ab, + 0x117c: 0x04af, 0x117d: 0x0493, 0x117e: 0x0497, 0x117f: 0x048b, + // Block 0x46, offset 0x1180 + 0x1180: 0x048f, 0x1181: 0x049b, 0x1182: 0x049f, 0x1183: 0x04a3, 0x1184: 0x04a7, + 0x1187: 0x0077, 0x1188: 0x007b, 0x1189: 0x4269, 0x118a: 0x4269, 0x118b: 0x4269, + 0x118c: 0x4269, 0x118d: 0x007f, 0x118e: 0x007f, 0x118f: 0x007f, 0x1190: 0x0019, 0x1191: 0x0483, + 0x1192: 0x001d, 0x1194: 0x0037, 0x1195: 0x0035, 0x1196: 0x003f, 0x1197: 0x0003, + 0x1198: 0x0443, 0x1199: 0x0011, 0x119a: 0x0013, 0x119b: 0x00b7, 0x119c: 0x00bb, 0x119d: 0x04b7, + 0x119e: 0x04bb, 0x119f: 0x0007, 0x11a0: 0x000d, 0x11a1: 0x0015, 0x11a2: 0x0017, 0x11a3: 0x001b, + 0x11a4: 0x0039, 0x11a5: 0x003d, 0x11a6: 0x003b, 0x11a8: 0x0079, 0x11a9: 0x0009, + 0x11aa: 0x000b, 0x11ab: 0x0041, + 0x11b0: 0x42aa, 0x11b1: 0x442c, 0x11b2: 0x42af, 0x11b4: 0x42b4, + 0x11b6: 0x42b9, 0x11b7: 0x4432, 0x11b8: 0x42be, 0x11b9: 0x4438, 0x11ba: 0x42c3, 0x11bb: 0x443e, + 0x11bc: 0x42c8, 0x11bd: 0x4444, 0x11be: 0x42cd, 0x11bf: 0x444a, + // Block 0x47, offset 0x11c0 + 0x11c0: 0x0236, 0x11c1: 0x440e, 0x11c2: 0x440e, 0x11c3: 0x4414, 0x11c4: 0x4414, 0x11c5: 0x4456, + 0x11c6: 0x4456, 0x11c7: 0x441a, 0x11c8: 0x441a, 0x11c9: 0x4462, 0x11ca: 0x4462, 0x11cb: 0x4462, + 0x11cc: 0x4462, 0x11cd: 0x0239, 0x11ce: 0x0239, 0x11cf: 0x023c, 0x11d0: 0x023c, 0x11d1: 0x023c, + 0x11d2: 0x023c, 0x11d3: 0x023f, 0x11d4: 0x023f, 0x11d5: 0x0242, 0x11d6: 0x0242, 0x11d7: 0x0242, + 0x11d8: 0x0242, 0x11d9: 0x0245, 0x11da: 0x0245, 0x11db: 0x0245, 0x11dc: 0x0245, 0x11dd: 0x0248, + 0x11de: 0x0248, 0x11df: 0x0248, 0x11e0: 0x0248, 0x11e1: 0x024b, 0x11e2: 0x024b, 0x11e3: 0x024b, + 0x11e4: 0x024b, 0x11e5: 0x024e, 0x11e6: 0x024e, 0x11e7: 0x024e, 0x11e8: 0x024e, 0x11e9: 0x0251, + 0x11ea: 0x0251, 0x11eb: 0x0254, 0x11ec: 0x0254, 0x11ed: 0x0257, 0x11ee: 0x0257, 0x11ef: 0x025a, + 0x11f0: 0x025a, 0x11f1: 0x025d, 0x11f2: 0x025d, 0x11f3: 0x025d, 0x11f4: 0x025d, 0x11f5: 0x0260, + 0x11f6: 0x0260, 0x11f7: 0x0260, 0x11f8: 0x0260, 0x11f9: 0x0263, 0x11fa: 0x0263, 0x11fb: 0x0263, + 0x11fc: 0x0263, 0x11fd: 0x0266, 0x11fe: 0x0266, 0x11ff: 0x0266, + // Block 0x48, offset 0x1200 + 0x1200: 0x0266, 0x1201: 0x0269, 0x1202: 0x0269, 0x1203: 0x0269, 0x1204: 0x0269, 0x1205: 0x026c, + 0x1206: 0x026c, 0x1207: 0x026c, 0x1208: 0x026c, 0x1209: 0x026f, 0x120a: 0x026f, 0x120b: 0x026f, + 0x120c: 0x026f, 0x120d: 0x0272, 0x120e: 0x0272, 0x120f: 0x0272, 0x1210: 0x0272, 0x1211: 0x0275, + 0x1212: 0x0275, 0x1213: 0x0275, 0x1214: 0x0275, 0x1215: 0x0278, 0x1216: 0x0278, 0x1217: 0x0278, + 0x1218: 0x0278, 0x1219: 0x027b, 0x121a: 0x027b, 0x121b: 0x027b, 0x121c: 0x027b, 0x121d: 0x027e, + 0x121e: 0x027e, 0x121f: 0x027e, 0x1220: 0x027e, 0x1221: 0x0281, 0x1222: 0x0281, 0x1223: 0x0281, + 0x1224: 0x0281, 0x1225: 0x0284, 0x1226: 0x0284, 0x1227: 0x0284, 0x1228: 0x0284, 0x1229: 0x0287, + 0x122a: 0x0287, 0x122b: 0x0287, 0x122c: 0x0287, 0x122d: 0x028a, 0x122e: 0x028a, 0x122f: 0x028d, + 0x1230: 0x028d, 0x1231: 0x0290, 0x1232: 0x0290, 0x1233: 0x0290, 0x1234: 0x0290, 0x1235: 0x2e00, + 0x1236: 0x2e00, 0x1237: 0x2e08, 0x1238: 0x2e08, 0x1239: 0x2e10, 0x123a: 0x2e10, 0x123b: 0x1f82, + 0x123c: 0x1f82, + // Block 0x49, offset 0x1240 + 0x1240: 0x0081, 0x1241: 0x0083, 0x1242: 0x0085, 0x1243: 0x0087, 0x1244: 0x0089, 0x1245: 0x008b, + 0x1246: 0x008d, 0x1247: 0x008f, 0x1248: 0x0091, 0x1249: 0x0093, 0x124a: 0x0095, 0x124b: 0x0097, + 0x124c: 0x0099, 0x124d: 0x009b, 0x124e: 0x009d, 0x124f: 0x009f, 0x1250: 0x00a1, 0x1251: 0x00a3, + 0x1252: 0x00a5, 0x1253: 0x00a7, 0x1254: 0x00a9, 0x1255: 0x00ab, 0x1256: 0x00ad, 0x1257: 0x00af, + 0x1258: 0x00b1, 0x1259: 0x00b3, 0x125a: 0x00b5, 0x125b: 0x00b7, 0x125c: 0x00b9, 0x125d: 0x00bb, + 0x125e: 0x00bd, 0x125f: 0x0477, 0x1260: 0x047b, 0x1261: 0x0487, 0x1262: 0x049b, 0x1263: 0x049f, + 0x1264: 0x0483, 0x1265: 0x05ab, 0x1266: 0x05a3, 0x1267: 0x04c7, 0x1268: 0x04cf, 0x1269: 0x04d7, + 0x126a: 0x04df, 0x126b: 0x04e7, 0x126c: 0x056b, 0x126d: 0x0573, 0x126e: 0x057b, 0x126f: 0x051f, + 0x1270: 0x05af, 0x1271: 0x04cb, 0x1272: 0x04d3, 0x1273: 0x04db, 0x1274: 0x04e3, 0x1275: 0x04eb, + 0x1276: 0x04ef, 0x1277: 0x04f3, 0x1278: 0x04f7, 0x1279: 0x04fb, 0x127a: 0x04ff, 0x127b: 0x0503, + 0x127c: 0x0507, 0x127d: 0x050b, 0x127e: 0x050f, 0x127f: 0x0513, + // Block 0x4a, offset 0x1280 + 0x1280: 0x0517, 0x1281: 0x051b, 0x1282: 0x0523, 0x1283: 0x0527, 0x1284: 0x052b, 0x1285: 0x052f, + 0x1286: 0x0533, 0x1287: 0x0537, 0x1288: 0x053b, 0x1289: 0x053f, 0x128a: 0x0543, 0x128b: 0x0547, + 0x128c: 0x054b, 0x128d: 0x054f, 0x128e: 0x0553, 0x128f: 0x0557, 0x1290: 0x055b, 0x1291: 0x055f, + 0x1292: 0x0563, 0x1293: 0x0567, 0x1294: 0x056f, 0x1295: 0x0577, 0x1296: 0x057f, 0x1297: 0x0583, + 0x1298: 0x0587, 0x1299: 0x058b, 0x129a: 0x058f, 0x129b: 0x0593, 0x129c: 0x0597, 0x129d: 0x05a7, + 0x129e: 0x4a78, 0x129f: 0x4a7e, 0x12a0: 0x03c3, 0x12a1: 0x0313, 0x12a2: 0x0317, 0x12a3: 0x4a3b, + 0x12a4: 0x031b, 0x12a5: 0x4a41, 0x12a6: 0x4a47, 0x12a7: 0x031f, 0x12a8: 0x0323, 0x12a9: 0x0327, + 0x12aa: 0x4a4d, 0x12ab: 0x4a53, 0x12ac: 0x4a59, 0x12ad: 0x4a5f, 0x12ae: 0x4a65, 0x12af: 0x4a6b, + 0x12b0: 0x0367, 0x12b1: 0x032b, 0x12b2: 0x032f, 0x12b3: 0x0333, 0x12b4: 0x037b, 0x12b5: 0x0337, + 0x12b6: 0x033b, 0x12b7: 0x033f, 0x12b8: 0x0343, 0x12b9: 0x0347, 0x12ba: 0x034b, 0x12bb: 0x034f, + 0x12bc: 0x0353, 0x12bd: 0x0357, 0x12be: 0x035b, + // Block 0x4b, offset 0x12c0 + 0x12c2: 0x49bd, 0x12c3: 0x49c3, 0x12c4: 0x49c9, 0x12c5: 0x49cf, + 0x12c6: 0x49d5, 0x12c7: 0x49db, 0x12ca: 0x49e1, 0x12cb: 0x49e7, + 0x12cc: 0x49ed, 0x12cd: 0x49f3, 0x12ce: 0x49f9, 0x12cf: 0x49ff, + 0x12d2: 0x4a05, 0x12d3: 0x4a0b, 0x12d4: 0x4a11, 0x12d5: 0x4a17, 0x12d6: 0x4a1d, 0x12d7: 0x4a23, + 0x12da: 0x4a29, 0x12db: 0x4a2f, 0x12dc: 0x4a35, + 0x12e0: 0x00bf, 0x12e1: 0x00c2, 0x12e2: 0x00cb, 0x12e3: 0x4264, + 0x12e4: 0x00c8, 0x12e5: 0x00c5, 0x12e6: 0x0447, 0x12e8: 0x046b, 0x12e9: 0x044b, + 0x12ea: 0x044f, 0x12eb: 0x0453, 0x12ec: 0x0457, 0x12ed: 0x046f, 0x12ee: 0x0473, + // Block 0x4c, offset 0x1300 + 0x1300: 0x0063, 0x1301: 0x0065, 0x1302: 0x0067, 0x1303: 0x0069, 0x1304: 0x006b, 0x1305: 0x006d, + 0x1306: 0x006f, 0x1307: 0x0071, 0x1308: 0x0073, 0x1309: 0x0075, 0x130a: 0x0083, 0x130b: 0x0085, + 0x130c: 0x0087, 0x130d: 0x0089, 0x130e: 0x008b, 0x130f: 0x008d, 0x1310: 0x008f, 0x1311: 0x0091, + 0x1312: 0x0093, 0x1313: 0x0095, 0x1314: 0x0097, 0x1315: 0x0099, 0x1316: 0x009b, 0x1317: 0x009d, + 0x1318: 0x009f, 0x1319: 0x00a1, 0x131a: 0x00a3, 0x131b: 0x00a5, 0x131c: 0x00a7, 0x131d: 0x00a9, + 0x131e: 0x00ab, 0x131f: 0x00ad, 0x1320: 0x00af, 0x1321: 0x00b1, 0x1322: 0x00b3, 0x1323: 0x00b5, + 0x1324: 0x00dd, 0x1325: 0x00f2, 0x1328: 0x0173, 0x1329: 0x0176, + 0x132a: 0x0179, 0x132b: 0x017c, 0x132c: 0x017f, 0x132d: 0x0182, 0x132e: 0x0185, 0x132f: 0x0188, + 0x1330: 0x018b, 0x1331: 0x018e, 0x1332: 0x0191, 0x1333: 0x0194, 0x1334: 0x0197, 0x1335: 0x019a, + 0x1336: 0x019d, 0x1337: 0x01a0, 0x1338: 0x01a3, 0x1339: 0x0188, 0x133a: 0x01a6, 0x133b: 0x01a9, + 0x133c: 0x01ac, 0x133d: 0x01af, 0x133e: 0x01b2, 0x133f: 0x01b5, + // Block 0x4d, offset 0x1340 + 0x1340: 0x01fd, 0x1341: 0x0200, 0x1342: 0x0203, 0x1343: 0x045b, 0x1344: 0x01c7, 0x1345: 0x01d0, + 0x1346: 0x01d6, 0x1347: 0x01fa, 0x1348: 0x01eb, 0x1349: 0x01e8, 0x134a: 0x0206, 0x134b: 0x0209, + 0x134e: 0x0021, 0x134f: 0x0023, 0x1350: 0x0025, 0x1351: 0x0027, + 0x1352: 0x0029, 0x1353: 0x002b, 0x1354: 0x002d, 0x1355: 0x002f, 0x1356: 0x0031, 0x1357: 0x0033, + 0x1358: 0x0021, 0x1359: 0x0023, 0x135a: 0x0025, 0x135b: 0x0027, 0x135c: 0x0029, 0x135d: 0x002b, + 0x135e: 0x002d, 0x135f: 0x002f, 0x1360: 0x0031, 0x1361: 0x0033, 0x1362: 0x0021, 0x1363: 0x0023, + 0x1364: 0x0025, 0x1365: 0x0027, 0x1366: 0x0029, 0x1367: 0x002b, 0x1368: 0x002d, 0x1369: 0x002f, + 0x136a: 0x0031, 0x136b: 0x0033, 0x136c: 0x0021, 0x136d: 0x0023, 0x136e: 0x0025, 0x136f: 0x0027, + 0x1370: 0x0029, 0x1371: 0x002b, 0x1372: 0x002d, 0x1373: 0x002f, 0x1374: 0x0031, 0x1375: 0x0033, + 0x1376: 0x0021, 0x1377: 0x0023, 0x1378: 0x0025, 0x1379: 0x0027, 0x137a: 0x0029, 0x137b: 0x002b, + 0x137c: 0x002d, 0x137d: 0x002f, 0x137e: 0x0031, 0x137f: 0x0033, + // Block 0x4e, offset 0x1380 + 0x1380: 0x0239, 0x1381: 0x023c, 0x1382: 0x0248, 0x1383: 0x0251, 0x1385: 0x028a, + 0x1386: 0x025a, 0x1387: 0x024b, 0x1388: 0x0269, 0x1389: 0x0290, 0x138a: 0x027b, 0x138b: 0x027e, + 0x138c: 0x0281, 0x138d: 0x0284, 0x138e: 0x025d, 0x138f: 0x026f, 0x1390: 0x0275, 0x1391: 0x0263, + 0x1392: 0x0278, 0x1393: 0x0257, 0x1394: 0x0260, 0x1395: 0x0242, 0x1396: 0x0245, 0x1397: 0x024e, + 0x1398: 0x0254, 0x1399: 0x0266, 0x139a: 0x026c, 0x139b: 0x0272, 0x139c: 0x0293, 0x139d: 0x02e4, + 0x139e: 0x02cc, 0x139f: 0x0296, 0x13a1: 0x023c, 0x13a2: 0x0248, + 0x13a4: 0x0287, 0x13a7: 0x024b, 0x13a9: 0x0290, + 0x13aa: 0x027b, 0x13ab: 0x027e, 0x13ac: 0x0281, 0x13ad: 0x0284, 0x13ae: 0x025d, 0x13af: 0x026f, + 0x13b0: 0x0275, 0x13b1: 0x0263, 0x13b2: 0x0278, 0x13b4: 0x0260, 0x13b5: 0x0242, + 0x13b6: 0x0245, 0x13b7: 0x024e, 0x13b9: 0x0266, 0x13bb: 0x0272, + // Block 0x4f, offset 0x13c0 + 0x13c2: 0x0248, + 0x13c7: 0x024b, 0x13c9: 0x0290, 0x13cb: 0x027e, + 0x13cd: 0x0284, 0x13ce: 0x025d, 0x13cf: 0x026f, 0x13d1: 0x0263, + 0x13d2: 0x0278, 0x13d4: 0x0260, 0x13d7: 0x024e, + 0x13d9: 0x0266, 0x13db: 0x0272, 0x13dd: 0x02e4, + 0x13df: 0x0296, 0x13e1: 0x023c, 0x13e2: 0x0248, + 0x13e4: 0x0287, 0x13e7: 0x024b, 0x13e8: 0x0269, 0x13e9: 0x0290, + 0x13ea: 0x027b, 0x13ec: 0x0281, 0x13ed: 0x0284, 0x13ee: 0x025d, 0x13ef: 0x026f, + 0x13f0: 0x0275, 0x13f1: 0x0263, 0x13f2: 0x0278, 0x13f4: 0x0260, 0x13f5: 0x0242, + 0x13f6: 0x0245, 0x13f7: 0x024e, 0x13f9: 0x0266, 0x13fa: 0x026c, 0x13fb: 0x0272, + 0x13fc: 0x0293, 0x13fe: 0x02cc, + // Block 0x50, offset 0x1400 + 0x1400: 0x0239, 0x1401: 0x023c, 0x1402: 0x0248, 0x1403: 0x0251, 0x1404: 0x0287, 0x1405: 0x028a, + 0x1406: 0x025a, 0x1407: 0x024b, 0x1408: 0x0269, 0x1409: 0x0290, 0x140b: 0x027e, + 0x140c: 0x0281, 0x140d: 0x0284, 0x140e: 0x025d, 0x140f: 0x026f, 0x1410: 0x0275, 0x1411: 0x0263, + 0x1412: 0x0278, 0x1413: 0x0257, 0x1414: 0x0260, 0x1415: 0x0242, 0x1416: 0x0245, 0x1417: 0x024e, + 0x1418: 0x0254, 0x1419: 0x0266, 0x141a: 0x026c, 0x141b: 0x0272, + 0x1421: 0x023c, 0x1422: 0x0248, 0x1423: 0x0251, + 0x1425: 0x028a, 0x1426: 0x025a, 0x1427: 0x024b, 0x1428: 0x0269, 0x1429: 0x0290, + 0x142b: 0x027e, 0x142c: 0x0281, 0x142d: 0x0284, 0x142e: 0x025d, 0x142f: 0x026f, + 0x1430: 0x0275, 0x1431: 0x0263, 0x1432: 0x0278, 0x1433: 0x0257, 0x1434: 0x0260, 0x1435: 0x0242, + 0x1436: 0x0245, 0x1437: 0x024e, 0x1438: 0x0254, 0x1439: 0x0266, 0x143a: 0x026c, 0x143b: 0x0272, + // Block 0x51, offset 0x1440 + 0x1440: 0x1879, 0x1441: 0x1876, 0x1442: 0x187c, 0x1443: 0x18a0, 0x1444: 0x18c4, 0x1445: 0x18e8, + 0x1446: 0x190c, 0x1447: 0x1915, 0x1448: 0x191b, 0x1449: 0x1921, 0x144a: 0x1927, + 0x1450: 0x1a8c, 0x1451: 0x1a90, + 0x1452: 0x1a94, 0x1453: 0x1a98, 0x1454: 0x1a9c, 0x1455: 0x1aa0, 0x1456: 0x1aa4, 0x1457: 0x1aa8, + 0x1458: 0x1aac, 0x1459: 0x1ab0, 0x145a: 0x1ab4, 0x145b: 0x1ab8, 0x145c: 0x1abc, 0x145d: 0x1ac0, + 0x145e: 0x1ac4, 0x145f: 0x1ac8, 0x1460: 0x1acc, 0x1461: 0x1ad0, 0x1462: 0x1ad4, 0x1463: 0x1ad8, + 0x1464: 0x1adc, 0x1465: 0x1ae0, 0x1466: 0x1ae4, 0x1467: 0x1ae8, 0x1468: 0x1aec, 0x1469: 0x1af0, + 0x146a: 0x271e, 0x146b: 0x0047, 0x146c: 0x0065, 0x146d: 0x193c, 0x146e: 0x19b1, + 0x1470: 0x0043, 0x1471: 0x0045, 0x1472: 0x0047, 0x1473: 0x0049, 0x1474: 0x004b, 0x1475: 0x004d, + 0x1476: 0x004f, 0x1477: 0x0051, 0x1478: 0x0053, 0x1479: 0x0055, 0x147a: 0x0057, 0x147b: 0x0059, + 0x147c: 0x005b, 0x147d: 0x005d, 0x147e: 0x005f, 0x147f: 0x0061, + // Block 0x52, offset 0x1480 + 0x1480: 0x26ad, 0x1481: 0x26c2, 0x1482: 0x0503, + 0x1490: 0x0c0f, 0x1491: 0x0a47, + 0x1492: 0x08d3, 0x1493: 0x45c4, 0x1494: 0x071b, 0x1495: 0x09ef, 0x1496: 0x132f, 0x1497: 0x09ff, + 0x1498: 0x0727, 0x1499: 0x0cd7, 0x149a: 0x0eaf, 0x149b: 0x0caf, 0x149c: 0x0827, 0x149d: 0x0b6b, + 0x149e: 0x07bf, 0x149f: 0x0cb7, 0x14a0: 0x0813, 0x14a1: 0x1117, 0x14a2: 0x0f83, 0x14a3: 0x138b, + 0x14a4: 0x09d3, 0x14a5: 0x090b, 0x14a6: 0x0e63, 0x14a7: 0x0c1b, 0x14a8: 0x0c47, 0x14a9: 0x06bf, + 0x14aa: 0x06cb, 0x14ab: 0x140b, 0x14ac: 0x0adb, 0x14ad: 0x06e7, 0x14ae: 0x08ef, 0x14af: 0x0c3b, + 0x14b0: 0x13b3, 0x14b1: 0x0c13, 0x14b2: 0x106f, 0x14b3: 0x10ab, 0x14b4: 0x08f7, 0x14b5: 0x0e43, + 0x14b6: 0x0d0b, 0x14b7: 0x0d07, 0x14b8: 0x0f97, 0x14b9: 0x082b, 0x14ba: 0x0957, 0x14bb: 0x1443, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x06fb, 0x14c1: 0x06f3, 0x14c2: 0x0703, 0x14c3: 0x1647, 0x14c4: 0x0747, 0x14c5: 0x0757, + 0x14c6: 0x075b, 0x14c7: 0x0763, 0x14c8: 0x076b, 0x14c9: 0x076f, 0x14ca: 0x077b, 0x14cb: 0x0773, + 0x14cc: 0x05b3, 0x14cd: 0x165b, 0x14ce: 0x078f, 0x14cf: 0x0793, 0x14d0: 0x0797, 0x14d1: 0x07b3, + 0x14d2: 0x164c, 0x14d3: 0x05b7, 0x14d4: 0x079f, 0x14d5: 0x07bf, 0x14d6: 0x1656, 0x14d7: 0x07cf, + 0x14d8: 0x07d7, 0x14d9: 0x0737, 0x14da: 0x07df, 0x14db: 0x07e3, 0x14dc: 0x1831, 0x14dd: 0x07ff, + 0x14de: 0x0807, 0x14df: 0x05bf, 0x14e0: 0x081f, 0x14e1: 0x0823, 0x14e2: 0x082b, 0x14e3: 0x082f, + 0x14e4: 0x05c3, 0x14e5: 0x0847, 0x14e6: 0x084b, 0x14e7: 0x0857, 0x14e8: 0x0863, 0x14e9: 0x0867, + 0x14ea: 0x086b, 0x14eb: 0x0873, 0x14ec: 0x0893, 0x14ed: 0x0897, 0x14ee: 0x089f, 0x14ef: 0x08af, + 0x14f0: 0x08b7, 0x14f1: 0x08bb, 0x14f2: 0x08bb, 0x14f3: 0x08bb, 0x14f4: 0x166a, 0x14f5: 0x0e93, + 0x14f6: 0x08cf, 0x14f7: 0x08d7, 0x14f8: 0x166f, 0x14f9: 0x08e3, 0x14fa: 0x08eb, 0x14fb: 0x08f3, + 0x14fc: 0x091b, 0x14fd: 0x0907, 0x14fe: 0x0913, 0x14ff: 0x0917, + // Block 0x54, offset 0x1500 + 0x1500: 0x091f, 0x1501: 0x0927, 0x1502: 0x092b, 0x1503: 0x0933, 0x1504: 0x093b, 0x1505: 0x093f, + 0x1506: 0x093f, 0x1507: 0x0947, 0x1508: 0x094f, 0x1509: 0x0953, 0x150a: 0x095f, 0x150b: 0x0983, + 0x150c: 0x0967, 0x150d: 0x0987, 0x150e: 0x096b, 0x150f: 0x0973, 0x1510: 0x080b, 0x1511: 0x09cf, + 0x1512: 0x0997, 0x1513: 0x099b, 0x1514: 0x099f, 0x1515: 0x0993, 0x1516: 0x09a7, 0x1517: 0x09a3, + 0x1518: 0x09bb, 0x1519: 0x1674, 0x151a: 0x09d7, 0x151b: 0x09db, 0x151c: 0x09e3, 0x151d: 0x09ef, + 0x151e: 0x09f7, 0x151f: 0x0a13, 0x1520: 0x1679, 0x1521: 0x167e, 0x1522: 0x0a1f, 0x1523: 0x0a23, + 0x1524: 0x0a27, 0x1525: 0x0a1b, 0x1526: 0x0a2f, 0x1527: 0x05c7, 0x1528: 0x05cb, 0x1529: 0x0a37, + 0x152a: 0x0a3f, 0x152b: 0x0a3f, 0x152c: 0x1683, 0x152d: 0x0a5b, 0x152e: 0x0a5f, 0x152f: 0x0a63, + 0x1530: 0x0a6b, 0x1531: 0x1688, 0x1532: 0x0a73, 0x1533: 0x0a77, 0x1534: 0x0b4f, 0x1535: 0x0a7f, + 0x1536: 0x05cf, 0x1537: 0x0a8b, 0x1538: 0x0a9b, 0x1539: 0x0aa7, 0x153a: 0x0aa3, 0x153b: 0x1692, + 0x153c: 0x0aaf, 0x153d: 0x1697, 0x153e: 0x0abb, 0x153f: 0x0ab7, + // Block 0x55, offset 0x1540 + 0x1540: 0x0abf, 0x1541: 0x0acf, 0x1542: 0x0ad3, 0x1543: 0x05d3, 0x1544: 0x0ae3, 0x1545: 0x0aeb, + 0x1546: 0x0aef, 0x1547: 0x0af3, 0x1548: 0x05d7, 0x1549: 0x169c, 0x154a: 0x05db, 0x154b: 0x0b0f, + 0x154c: 0x0b13, 0x154d: 0x0b17, 0x154e: 0x0b1f, 0x154f: 0x1863, 0x1550: 0x0b37, 0x1551: 0x16a6, + 0x1552: 0x16a6, 0x1553: 0x11d7, 0x1554: 0x0b47, 0x1555: 0x0b47, 0x1556: 0x05df, 0x1557: 0x16c9, + 0x1558: 0x179b, 0x1559: 0x0b57, 0x155a: 0x0b5f, 0x155b: 0x05e3, 0x155c: 0x0b73, 0x155d: 0x0b83, + 0x155e: 0x0b87, 0x155f: 0x0b8f, 0x1560: 0x0b9f, 0x1561: 0x05eb, 0x1562: 0x05e7, 0x1563: 0x0ba3, + 0x1564: 0x16ab, 0x1565: 0x0ba7, 0x1566: 0x0bbb, 0x1567: 0x0bbf, 0x1568: 0x0bc3, 0x1569: 0x0bbf, + 0x156a: 0x0bcf, 0x156b: 0x0bd3, 0x156c: 0x0be3, 0x156d: 0x0bdb, 0x156e: 0x0bdf, 0x156f: 0x0be7, + 0x1570: 0x0beb, 0x1571: 0x0bef, 0x1572: 0x0bfb, 0x1573: 0x0bff, 0x1574: 0x0c17, 0x1575: 0x0c1f, + 0x1576: 0x0c2f, 0x1577: 0x0c43, 0x1578: 0x16ba, 0x1579: 0x0c3f, 0x157a: 0x0c33, 0x157b: 0x0c4b, + 0x157c: 0x0c53, 0x157d: 0x0c67, 0x157e: 0x16bf, 0x157f: 0x0c6f, + // Block 0x56, offset 0x1580 + 0x1580: 0x0c63, 0x1581: 0x0c5b, 0x1582: 0x05ef, 0x1583: 0x0c77, 0x1584: 0x0c7f, 0x1585: 0x0c87, + 0x1586: 0x0c7b, 0x1587: 0x05f3, 0x1588: 0x0c97, 0x1589: 0x0c9f, 0x158a: 0x16c4, 0x158b: 0x0ccb, + 0x158c: 0x0cff, 0x158d: 0x0cdb, 0x158e: 0x05ff, 0x158f: 0x0ce7, 0x1590: 0x05fb, 0x1591: 0x05f7, + 0x1592: 0x07c3, 0x1593: 0x07c7, 0x1594: 0x0d03, 0x1595: 0x0ceb, 0x1596: 0x11ab, 0x1597: 0x0663, + 0x1598: 0x0d0f, 0x1599: 0x0d13, 0x159a: 0x0d17, 0x159b: 0x0d2b, 0x159c: 0x0d23, 0x159d: 0x16dd, + 0x159e: 0x0603, 0x159f: 0x0d3f, 0x15a0: 0x0d33, 0x15a1: 0x0d4f, 0x15a2: 0x0d57, 0x15a3: 0x16e7, + 0x15a4: 0x0d5b, 0x15a5: 0x0d47, 0x15a6: 0x0d63, 0x15a7: 0x0607, 0x15a8: 0x0d67, 0x15a9: 0x0d6b, + 0x15aa: 0x0d6f, 0x15ab: 0x0d7b, 0x15ac: 0x16ec, 0x15ad: 0x0d83, 0x15ae: 0x060b, 0x15af: 0x0d8f, + 0x15b0: 0x16f1, 0x15b1: 0x0d93, 0x15b2: 0x060f, 0x15b3: 0x0d9f, 0x15b4: 0x0dab, 0x15b5: 0x0db7, + 0x15b6: 0x0dbb, 0x15b7: 0x16f6, 0x15b8: 0x168d, 0x15b9: 0x16fb, 0x15ba: 0x0ddb, 0x15bb: 0x1700, + 0x15bc: 0x0de7, 0x15bd: 0x0def, 0x15be: 0x0ddf, 0x15bf: 0x0dfb, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x0e0b, 0x15c1: 0x0e1b, 0x15c2: 0x0e0f, 0x15c3: 0x0e13, 0x15c4: 0x0e1f, 0x15c5: 0x0e23, + 0x15c6: 0x1705, 0x15c7: 0x0e07, 0x15c8: 0x0e3b, 0x15c9: 0x0e3f, 0x15ca: 0x0613, 0x15cb: 0x0e53, + 0x15cc: 0x0e4f, 0x15cd: 0x170a, 0x15ce: 0x0e33, 0x15cf: 0x0e6f, 0x15d0: 0x170f, 0x15d1: 0x1714, + 0x15d2: 0x0e73, 0x15d3: 0x0e87, 0x15d4: 0x0e83, 0x15d5: 0x0e7f, 0x15d6: 0x0617, 0x15d7: 0x0e8b, + 0x15d8: 0x0e9b, 0x15d9: 0x0e97, 0x15da: 0x0ea3, 0x15db: 0x1651, 0x15dc: 0x0eb3, 0x15dd: 0x1719, + 0x15de: 0x0ebf, 0x15df: 0x1723, 0x15e0: 0x0ed3, 0x15e1: 0x0edf, 0x15e2: 0x0ef3, 0x15e3: 0x1728, + 0x15e4: 0x0f07, 0x15e5: 0x0f0b, 0x15e6: 0x172d, 0x15e7: 0x1732, 0x15e8: 0x0f27, 0x15e9: 0x0f37, + 0x15ea: 0x061b, 0x15eb: 0x0f3b, 0x15ec: 0x061f, 0x15ed: 0x061f, 0x15ee: 0x0f53, 0x15ef: 0x0f57, + 0x15f0: 0x0f5f, 0x15f1: 0x0f63, 0x15f2: 0x0f6f, 0x15f3: 0x0623, 0x15f4: 0x0f87, 0x15f5: 0x1737, + 0x15f6: 0x0fa3, 0x15f7: 0x173c, 0x15f8: 0x0faf, 0x15f9: 0x16a1, 0x15fa: 0x0fbf, 0x15fb: 0x1741, + 0x15fc: 0x1746, 0x15fd: 0x174b, 0x15fe: 0x0627, 0x15ff: 0x062b, + // Block 0x58, offset 0x1600 + 0x1600: 0x0ff7, 0x1601: 0x1755, 0x1602: 0x1750, 0x1603: 0x175a, 0x1604: 0x175f, 0x1605: 0x0fff, + 0x1606: 0x1003, 0x1607: 0x1003, 0x1608: 0x100b, 0x1609: 0x0633, 0x160a: 0x100f, 0x160b: 0x0637, + 0x160c: 0x063b, 0x160d: 0x1769, 0x160e: 0x1023, 0x160f: 0x102b, 0x1610: 0x1037, 0x1611: 0x063f, + 0x1612: 0x176e, 0x1613: 0x105b, 0x1614: 0x1773, 0x1615: 0x1778, 0x1616: 0x107b, 0x1617: 0x1093, + 0x1618: 0x0643, 0x1619: 0x109b, 0x161a: 0x109f, 0x161b: 0x10a3, 0x161c: 0x177d, 0x161d: 0x1782, + 0x161e: 0x1782, 0x161f: 0x10bb, 0x1620: 0x0647, 0x1621: 0x1787, 0x1622: 0x10cf, 0x1623: 0x10d3, + 0x1624: 0x064b, 0x1625: 0x178c, 0x1626: 0x10ef, 0x1627: 0x064f, 0x1628: 0x10ff, 0x1629: 0x10f7, + 0x162a: 0x1107, 0x162b: 0x1796, 0x162c: 0x111f, 0x162d: 0x0653, 0x162e: 0x112b, 0x162f: 0x1133, + 0x1630: 0x1143, 0x1631: 0x0657, 0x1632: 0x17a0, 0x1633: 0x17a5, 0x1634: 0x065b, 0x1635: 0x17aa, + 0x1636: 0x115b, 0x1637: 0x17af, 0x1638: 0x1167, 0x1639: 0x1173, 0x163a: 0x117b, 0x163b: 0x17b4, + 0x163c: 0x17b9, 0x163d: 0x118f, 0x163e: 0x17be, 0x163f: 0x1197, + // Block 0x59, offset 0x1640 + 0x1640: 0x16ce, 0x1641: 0x065f, 0x1642: 0x11af, 0x1643: 0x11b3, 0x1644: 0x0667, 0x1645: 0x11b7, + 0x1646: 0x0a33, 0x1647: 0x17c3, 0x1648: 0x17c8, 0x1649: 0x16d3, 0x164a: 0x16d8, 0x164b: 0x11d7, + 0x164c: 0x11db, 0x164d: 0x13f3, 0x164e: 0x066b, 0x164f: 0x1207, 0x1650: 0x1203, 0x1651: 0x120b, + 0x1652: 0x083f, 0x1653: 0x120f, 0x1654: 0x1213, 0x1655: 0x1217, 0x1656: 0x121f, 0x1657: 0x17cd, + 0x1658: 0x121b, 0x1659: 0x1223, 0x165a: 0x1237, 0x165b: 0x123b, 0x165c: 0x1227, 0x165d: 0x123f, + 0x165e: 0x1253, 0x165f: 0x1267, 0x1660: 0x1233, 0x1661: 0x1247, 0x1662: 0x124b, 0x1663: 0x124f, + 0x1664: 0x17d2, 0x1665: 0x17dc, 0x1666: 0x17d7, 0x1667: 0x066f, 0x1668: 0x126f, 0x1669: 0x1273, + 0x166a: 0x127b, 0x166b: 0x17f0, 0x166c: 0x127f, 0x166d: 0x17e1, 0x166e: 0x0673, 0x166f: 0x0677, + 0x1670: 0x17e6, 0x1671: 0x17eb, 0x1672: 0x067b, 0x1673: 0x129f, 0x1674: 0x12a3, 0x1675: 0x12a7, + 0x1676: 0x12ab, 0x1677: 0x12b7, 0x1678: 0x12b3, 0x1679: 0x12bf, 0x167a: 0x12bb, 0x167b: 0x12cb, + 0x167c: 0x12c3, 0x167d: 0x12c7, 0x167e: 0x12cf, 0x167f: 0x067f, + // Block 0x5a, offset 0x1680 + 0x1680: 0x12d7, 0x1681: 0x12db, 0x1682: 0x0683, 0x1683: 0x12eb, 0x1684: 0x12ef, 0x1685: 0x17f5, + 0x1686: 0x12fb, 0x1687: 0x12ff, 0x1688: 0x0687, 0x1689: 0x130b, 0x168a: 0x05bb, 0x168b: 0x17fa, + 0x168c: 0x17ff, 0x168d: 0x068b, 0x168e: 0x068f, 0x168f: 0x1337, 0x1690: 0x134f, 0x1691: 0x136b, + 0x1692: 0x137b, 0x1693: 0x1804, 0x1694: 0x138f, 0x1695: 0x1393, 0x1696: 0x13ab, 0x1697: 0x13b7, + 0x1698: 0x180e, 0x1699: 0x1660, 0x169a: 0x13c3, 0x169b: 0x13bf, 0x169c: 0x13cb, 0x169d: 0x1665, + 0x169e: 0x13d7, 0x169f: 0x13e3, 0x16a0: 0x1813, 0x16a1: 0x1818, 0x16a2: 0x1423, 0x16a3: 0x142f, + 0x16a4: 0x1437, 0x16a5: 0x181d, 0x16a6: 0x143b, 0x16a7: 0x1467, 0x16a8: 0x1473, 0x16a9: 0x1477, + 0x16aa: 0x146f, 0x16ab: 0x1483, 0x16ac: 0x1487, 0x16ad: 0x1822, 0x16ae: 0x1493, 0x16af: 0x0693, + 0x16b0: 0x149b, 0x16b1: 0x1827, 0x16b2: 0x0697, 0x16b3: 0x14d3, 0x16b4: 0x0ac3, 0x16b5: 0x14eb, + 0x16b6: 0x182c, 0x16b7: 0x1836, 0x16b8: 0x069b, 0x16b9: 0x069f, 0x16ba: 0x1513, 0x16bb: 0x183b, + 0x16bc: 0x06a3, 0x16bd: 0x1840, 0x16be: 0x152b, 0x16bf: 0x152b, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x1533, 0x16c1: 0x1845, 0x16c2: 0x154b, 0x16c3: 0x06a7, 0x16c4: 0x155b, 0x16c5: 0x1567, + 0x16c6: 0x156f, 0x16c7: 0x1577, 0x16c8: 0x06ab, 0x16c9: 0x184a, 0x16ca: 0x158b, 0x16cb: 0x15a7, + 0x16cc: 0x15b3, 0x16cd: 0x06af, 0x16ce: 0x06b3, 0x16cf: 0x15b7, 0x16d0: 0x184f, 0x16d1: 0x06b7, + 0x16d2: 0x1854, 0x16d3: 0x1859, 0x16d4: 0x185e, 0x16d5: 0x15db, 0x16d6: 0x06bb, 0x16d7: 0x15ef, + 0x16d8: 0x15f7, 0x16d9: 0x15fb, 0x16da: 0x1603, 0x16db: 0x160b, 0x16dc: 0x1613, 0x16dd: 0x1868, +} + +// nfkcIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var nfkcIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x5a, 0xc3: 0x01, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x5b, 0xc7: 0x04, + 0xc8: 0x05, 0xca: 0x5c, 0xcb: 0x5d, 0xcc: 0x06, 0xcd: 0x07, 0xce: 0x08, 0xcf: 0x09, + 0xd0: 0x0a, 0xd1: 0x5e, 0xd2: 0x5f, 0xd3: 0x0b, 0xd6: 0x0c, 0xd7: 0x60, + 0xd8: 0x61, 0xd9: 0x0d, 0xdb: 0x62, 0xdc: 0x63, 0xdd: 0x64, 0xdf: 0x65, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x08, 0xed: 0x09, 0xef: 0x0a, + 0xf0: 0x13, + // Block 0x4, offset 0x100 + 0x120: 0x66, 0x121: 0x67, 0x123: 0x68, 0x124: 0x69, 0x125: 0x6a, 0x126: 0x6b, 0x127: 0x6c, + 0x128: 0x6d, 0x129: 0x6e, 0x12a: 0x6f, 0x12b: 0x70, 0x12c: 0x6b, 0x12d: 0x71, 0x12e: 0x72, 0x12f: 0x73, + 0x131: 0x74, 0x132: 0x75, 0x133: 0x76, 0x134: 0x77, 0x135: 0x78, 0x137: 0x79, + 0x138: 0x7a, 0x139: 0x7b, 0x13a: 0x7c, 0x13b: 0x7d, 0x13c: 0x7e, 0x13d: 0x7f, 0x13e: 0x80, 0x13f: 0x81, + // Block 0x5, offset 0x140 + 0x140: 0x82, 0x142: 0x83, 0x143: 0x84, 0x144: 0x85, 0x145: 0x86, 0x146: 0x87, 0x147: 0x88, + 0x14d: 0x89, + 0x15c: 0x8a, 0x15f: 0x8b, + 0x162: 0x8c, 0x164: 0x8d, + 0x168: 0x8e, 0x169: 0x8f, 0x16a: 0x90, 0x16c: 0x0e, 0x16d: 0x91, 0x16e: 0x92, 0x16f: 0x93, + 0x170: 0x94, 0x173: 0x95, 0x174: 0x96, 0x175: 0x0f, 0x176: 0x10, 0x177: 0x97, + 0x178: 0x11, 0x179: 0x12, 0x17a: 0x13, 0x17b: 0x14, 0x17c: 0x15, 0x17d: 0x16, 0x17e: 0x17, 0x17f: 0x18, + // Block 0x6, offset 0x180 + 0x180: 0x98, 0x181: 0x99, 0x182: 0x9a, 0x183: 0x9b, 0x184: 0x19, 0x185: 0x1a, 0x186: 0x9c, 0x187: 0x9d, + 0x188: 0x9e, 0x189: 0x1b, 0x18a: 0x1c, 0x18b: 0x9f, 0x18c: 0xa0, + 0x191: 0x1d, 0x192: 0x1e, 0x193: 0xa1, + 0x1a8: 0xa2, 0x1a9: 0xa3, 0x1ab: 0xa4, + 0x1b1: 0xa5, 0x1b3: 0xa6, 0x1b5: 0xa7, 0x1b7: 0xa8, + 0x1ba: 0xa9, 0x1bb: 0xaa, 0x1bc: 0x1f, 0x1bd: 0x20, 0x1be: 0x21, 0x1bf: 0xab, + // Block 0x7, offset 0x1c0 + 0x1c0: 0xac, 0x1c1: 0x22, 0x1c2: 0x23, 0x1c3: 0x24, 0x1c4: 0xad, 0x1c5: 0x25, 0x1c6: 0x26, + 0x1c8: 0x27, 0x1c9: 0x28, 0x1ca: 0x29, 0x1cb: 0x2a, 0x1cc: 0x2b, 0x1cd: 0x2c, 0x1ce: 0x2d, 0x1cf: 0x2e, + // Block 0x8, offset 0x200 + 0x219: 0xae, 0x21a: 0xaf, 0x21b: 0xb0, 0x21d: 0xb1, 0x21f: 0xb2, + 0x220: 0xb3, 0x223: 0xb4, 0x224: 0xb5, 0x225: 0xb6, 0x226: 0xb7, 0x227: 0xb8, + 0x22a: 0xb9, 0x22b: 0xba, 0x22d: 0xbb, 0x22f: 0xbc, + 0x230: 0xbd, 0x231: 0xbe, 0x232: 0xbf, 0x233: 0xc0, 0x234: 0xc1, 0x235: 0xc2, 0x236: 0xc3, 0x237: 0xbd, + 0x238: 0xbe, 0x239: 0xbf, 0x23a: 0xc0, 0x23b: 0xc1, 0x23c: 0xc2, 0x23d: 0xc3, 0x23e: 0xbd, 0x23f: 0xbe, + // Block 0x9, offset 0x240 + 0x240: 0xbf, 0x241: 0xc0, 0x242: 0xc1, 0x243: 0xc2, 0x244: 0xc3, 0x245: 0xbd, 0x246: 0xbe, 0x247: 0xbf, + 0x248: 0xc0, 0x249: 0xc1, 0x24a: 0xc2, 0x24b: 0xc3, 0x24c: 0xbd, 0x24d: 0xbe, 0x24e: 0xbf, 0x24f: 0xc0, + 0x250: 0xc1, 0x251: 0xc2, 0x252: 0xc3, 0x253: 0xbd, 0x254: 0xbe, 0x255: 0xbf, 0x256: 0xc0, 0x257: 0xc1, + 0x258: 0xc2, 0x259: 0xc3, 0x25a: 0xbd, 0x25b: 0xbe, 0x25c: 0xbf, 0x25d: 0xc0, 0x25e: 0xc1, 0x25f: 0xc2, + 0x260: 0xc3, 0x261: 0xbd, 0x262: 0xbe, 0x263: 0xbf, 0x264: 0xc0, 0x265: 0xc1, 0x266: 0xc2, 0x267: 0xc3, + 0x268: 0xbd, 0x269: 0xbe, 0x26a: 0xbf, 0x26b: 0xc0, 0x26c: 0xc1, 0x26d: 0xc2, 0x26e: 0xc3, 0x26f: 0xbd, + 0x270: 0xbe, 0x271: 0xbf, 0x272: 0xc0, 0x273: 0xc1, 0x274: 0xc2, 0x275: 0xc3, 0x276: 0xbd, 0x277: 0xbe, + 0x278: 0xbf, 0x279: 0xc0, 0x27a: 0xc1, 0x27b: 0xc2, 0x27c: 0xc3, 0x27d: 0xbd, 0x27e: 0xbe, 0x27f: 0xbf, + // Block 0xa, offset 0x280 + 0x280: 0xc0, 0x281: 0xc1, 0x282: 0xc2, 0x283: 0xc3, 0x284: 0xbd, 0x285: 0xbe, 0x286: 0xbf, 0x287: 0xc0, + 0x288: 0xc1, 0x289: 0xc2, 0x28a: 0xc3, 0x28b: 0xbd, 0x28c: 0xbe, 0x28d: 0xbf, 0x28e: 0xc0, 0x28f: 0xc1, + 0x290: 0xc2, 0x291: 0xc3, 0x292: 0xbd, 0x293: 0xbe, 0x294: 0xbf, 0x295: 0xc0, 0x296: 0xc1, 0x297: 0xc2, + 0x298: 0xc3, 0x299: 0xbd, 0x29a: 0xbe, 0x29b: 0xbf, 0x29c: 0xc0, 0x29d: 0xc1, 0x29e: 0xc2, 0x29f: 0xc3, + 0x2a0: 0xbd, 0x2a1: 0xbe, 0x2a2: 0xbf, 0x2a3: 0xc0, 0x2a4: 0xc1, 0x2a5: 0xc2, 0x2a6: 0xc3, 0x2a7: 0xbd, + 0x2a8: 0xbe, 0x2a9: 0xbf, 0x2aa: 0xc0, 0x2ab: 0xc1, 0x2ac: 0xc2, 0x2ad: 0xc3, 0x2ae: 0xbd, 0x2af: 0xbe, + 0x2b0: 0xbf, 0x2b1: 0xc0, 0x2b2: 0xc1, 0x2b3: 0xc2, 0x2b4: 0xc3, 0x2b5: 0xbd, 0x2b6: 0xbe, 0x2b7: 0xbf, + 0x2b8: 0xc0, 0x2b9: 0xc1, 0x2ba: 0xc2, 0x2bb: 0xc3, 0x2bc: 0xbd, 0x2bd: 0xbe, 0x2be: 0xbf, 0x2bf: 0xc0, + // Block 0xb, offset 0x2c0 + 0x2c0: 0xc1, 0x2c1: 0xc2, 0x2c2: 0xc3, 0x2c3: 0xbd, 0x2c4: 0xbe, 0x2c5: 0xbf, 0x2c6: 0xc0, 0x2c7: 0xc1, + 0x2c8: 0xc2, 0x2c9: 0xc3, 0x2ca: 0xbd, 0x2cb: 0xbe, 0x2cc: 0xbf, 0x2cd: 0xc0, 0x2ce: 0xc1, 0x2cf: 0xc2, + 0x2d0: 0xc3, 0x2d1: 0xbd, 0x2d2: 0xbe, 0x2d3: 0xbf, 0x2d4: 0xc0, 0x2d5: 0xc1, 0x2d6: 0xc2, 0x2d7: 0xc3, + 0x2d8: 0xbd, 0x2d9: 0xbe, 0x2da: 0xbf, 0x2db: 0xc0, 0x2dc: 0xc1, 0x2dd: 0xc2, 0x2de: 0xc4, + // Block 0xc, offset 0x300 + 0x324: 0x2f, 0x325: 0x30, 0x326: 0x31, 0x327: 0x32, + 0x328: 0x33, 0x329: 0x34, 0x32a: 0x35, 0x32b: 0x36, 0x32c: 0x37, 0x32d: 0x38, 0x32e: 0x39, 0x32f: 0x3a, + 0x330: 0x3b, 0x331: 0x3c, 0x332: 0x3d, 0x333: 0x3e, 0x334: 0x3f, 0x335: 0x40, 0x336: 0x41, 0x337: 0x42, + 0x338: 0x43, 0x339: 0x44, 0x33a: 0x45, 0x33b: 0x46, 0x33c: 0xc5, 0x33d: 0x47, 0x33e: 0x48, 0x33f: 0x49, + // Block 0xd, offset 0x340 + 0x347: 0xc6, + 0x34b: 0xc7, 0x34d: 0xc8, + 0x368: 0xc9, 0x36b: 0xca, + // Block 0xe, offset 0x380 + 0x381: 0xcb, 0x382: 0xcc, 0x384: 0xcd, 0x385: 0xb7, 0x387: 0xce, + 0x388: 0xcf, 0x38b: 0xd0, 0x38c: 0x6b, 0x38d: 0xd1, + 0x391: 0xd2, 0x392: 0xd3, 0x393: 0xd4, 0x396: 0xd5, 0x397: 0xd6, + 0x398: 0xd7, 0x39a: 0xd8, 0x39c: 0xd9, + 0x3b0: 0xd7, + // Block 0xf, offset 0x3c0 + 0x3eb: 0xda, 0x3ec: 0xdb, + // Block 0x10, offset 0x400 + 0x432: 0xdc, + // Block 0x11, offset 0x440 + 0x445: 0xdd, 0x446: 0xde, 0x447: 0xdf, + 0x449: 0xe0, + 0x450: 0xe1, 0x451: 0xe2, 0x452: 0xe3, 0x453: 0xe4, 0x454: 0xe5, 0x455: 0xe6, 0x456: 0xe7, 0x457: 0xe8, + 0x458: 0xe9, 0x459: 0xea, 0x45a: 0x4a, 0x45b: 0xeb, 0x45c: 0xec, 0x45d: 0xed, 0x45e: 0xee, 0x45f: 0x4b, + // Block 0x12, offset 0x480 + 0x480: 0xef, + 0x4a3: 0xf0, 0x4a5: 0xf1, + 0x4b8: 0x4c, 0x4b9: 0x4d, 0x4ba: 0x4e, + // Block 0x13, offset 0x4c0 + 0x4c4: 0x4f, 0x4c5: 0xf2, 0x4c6: 0xf3, + 0x4c8: 0x50, 0x4c9: 0xf4, + // Block 0x14, offset 0x500 + 0x520: 0x51, 0x521: 0x52, 0x522: 0x53, 0x523: 0x54, 0x524: 0x55, 0x525: 0x56, 0x526: 0x57, 0x527: 0x58, + 0x528: 0x59, + // Block 0x15, offset 0x540 + 0x550: 0x0b, 0x551: 0x0c, 0x556: 0x0d, + 0x55b: 0x0e, 0x55d: 0x0f, 0x55e: 0x10, 0x55f: 0x11, + 0x56f: 0x12, +} + +// nfkcSparseOffset: 155 entries, 310 bytes +var nfkcSparseOffset = []uint16{0x0, 0xe, 0x12, 0x1b, 0x25, 0x35, 0x37, 0x3c, 0x47, 0x56, 0x63, 0x6b, 0x6f, 0x74, 0x76, 0x87, 0x8f, 0x96, 0x99, 0xa0, 0xa4, 0xa8, 0xaa, 0xac, 0xb5, 0xb9, 0xc0, 0xc5, 0xc8, 0xd2, 0xd4, 0xdb, 0xe3, 0xe7, 0xe9, 0xec, 0xf0, 0xf6, 0x107, 0x113, 0x115, 0x11b, 0x11d, 0x11f, 0x121, 0x123, 0x125, 0x127, 0x129, 0x12c, 0x12f, 0x131, 0x134, 0x137, 0x13b, 0x140, 0x149, 0x14b, 0x14e, 0x150, 0x15b, 0x166, 0x176, 0x184, 0x192, 0x1a2, 0x1b0, 0x1b7, 0x1bd, 0x1cc, 0x1d0, 0x1d2, 0x1d6, 0x1d8, 0x1db, 0x1dd, 0x1e0, 0x1e2, 0x1e5, 0x1e7, 0x1e9, 0x1eb, 0x1f7, 0x201, 0x20b, 0x20e, 0x212, 0x214, 0x216, 0x218, 0x21a, 0x21d, 0x21f, 0x221, 0x223, 0x225, 0x22b, 0x22e, 0x232, 0x234, 0x23b, 0x241, 0x247, 0x24f, 0x255, 0x25b, 0x261, 0x265, 0x267, 0x269, 0x26b, 0x26d, 0x273, 0x276, 0x279, 0x281, 0x288, 0x28b, 0x28e, 0x290, 0x298, 0x29b, 0x2a2, 0x2a5, 0x2ab, 0x2ad, 0x2af, 0x2b2, 0x2b4, 0x2b6, 0x2b8, 0x2ba, 0x2c7, 0x2d1, 0x2d3, 0x2d5, 0x2d9, 0x2de, 0x2ea, 0x2ef, 0x2f8, 0x2fe, 0x303, 0x307, 0x30c, 0x310, 0x320, 0x32e, 0x33c, 0x34a, 0x350, 0x352, 0x355, 0x35f, 0x361} + +// nfkcSparseValues: 875 entries, 3500 bytes +var nfkcSparseValues = [875]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0002, lo: 0x0d}, + {value: 0x0001, lo: 0xa0, hi: 0xa0}, + {value: 0x4278, lo: 0xa8, hi: 0xa8}, + {value: 0x0083, lo: 0xaa, hi: 0xaa}, + {value: 0x4264, lo: 0xaf, hi: 0xaf}, + {value: 0x0025, lo: 0xb2, hi: 0xb3}, + {value: 0x425a, lo: 0xb4, hi: 0xb4}, + {value: 0x01dc, lo: 0xb5, hi: 0xb5}, + {value: 0x4291, lo: 0xb8, hi: 0xb8}, + {value: 0x0023, lo: 0xb9, hi: 0xb9}, + {value: 0x009f, lo: 0xba, hi: 0xba}, + {value: 0x221c, lo: 0xbc, hi: 0xbc}, + {value: 0x2210, lo: 0xbd, hi: 0xbd}, + {value: 0x22b2, lo: 0xbe, hi: 0xbe}, + // Block 0x1, offset 0xe + {value: 0x0091, lo: 0x03}, + {value: 0x46e2, lo: 0xa0, hi: 0xa1}, + {value: 0x4714, lo: 0xaf, hi: 0xb0}, + {value: 0xa000, lo: 0xb7, hi: 0xb7}, + // Block 0x2, offset 0x12 + {value: 0x0003, lo: 0x08}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x0091, lo: 0xb0, hi: 0xb0}, + {value: 0x0119, lo: 0xb1, hi: 0xb1}, + {value: 0x0095, lo: 0xb2, hi: 0xb2}, + {value: 0x00a5, lo: 0xb3, hi: 0xb3}, + {value: 0x0143, lo: 0xb4, hi: 0xb6}, + {value: 0x00af, lo: 0xb7, hi: 0xb7}, + {value: 0x00b3, lo: 0xb8, hi: 0xb8}, + // Block 0x3, offset 0x1b + {value: 0x000a, lo: 0x09}, + {value: 0x426e, lo: 0x98, hi: 0x98}, + {value: 0x4273, lo: 0x99, hi: 0x9a}, + {value: 0x4296, lo: 0x9b, hi: 0x9b}, + {value: 0x425f, lo: 0x9c, hi: 0x9c}, + {value: 0x4282, lo: 0x9d, hi: 0x9d}, + {value: 0x0113, lo: 0xa0, hi: 0xa0}, + {value: 0x0099, lo: 0xa1, hi: 0xa1}, + {value: 0x00a7, lo: 0xa2, hi: 0xa3}, + {value: 0x0167, lo: 0xa4, hi: 0xa4}, + // Block 0x4, offset 0x25 + {value: 0x0000, lo: 0x0f}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0xa000, lo: 0x8d, hi: 0x8d}, + {value: 0x37a5, lo: 0x90, hi: 0x90}, + {value: 0x37b1, lo: 0x91, hi: 0x91}, + {value: 0x379f, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x96, hi: 0x96}, + {value: 0x3817, lo: 0x97, hi: 0x97}, + {value: 0x37e1, lo: 0x9c, hi: 0x9c}, + {value: 0x37c9, lo: 0x9d, hi: 0x9d}, + {value: 0x37f3, lo: 0x9e, hi: 0x9e}, + {value: 0xa000, lo: 0xb4, hi: 0xb5}, + {value: 0x381d, lo: 0xb6, hi: 0xb6}, + {value: 0x3823, lo: 0xb7, hi: 0xb7}, + // Block 0x5, offset 0x35 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x83, hi: 0x87}, + // Block 0x6, offset 0x37 + {value: 0x0001, lo: 0x04}, + {value: 0x8113, lo: 0x81, hi: 0x82}, + {value: 0x8132, lo: 0x84, hi: 0x84}, + {value: 0x812d, lo: 0x85, hi: 0x85}, + {value: 0x810d, lo: 0x87, hi: 0x87}, + // Block 0x7, offset 0x3c + {value: 0x0000, lo: 0x0a}, + {value: 0x8132, lo: 0x90, hi: 0x97}, + {value: 0x8119, lo: 0x98, hi: 0x98}, + {value: 0x811a, lo: 0x99, hi: 0x99}, + {value: 0x811b, lo: 0x9a, hi: 0x9a}, + {value: 0x3841, lo: 0xa2, hi: 0xa2}, + {value: 0x3847, lo: 0xa3, hi: 0xa3}, + {value: 0x3853, lo: 0xa4, hi: 0xa4}, + {value: 0x384d, lo: 0xa5, hi: 0xa5}, + {value: 0x3859, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xa7, hi: 0xa7}, + // Block 0x8, offset 0x47 + {value: 0x0000, lo: 0x0e}, + {value: 0x386b, lo: 0x80, hi: 0x80}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0x385f, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x3865, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x95, hi: 0x95}, + {value: 0x8132, lo: 0x96, hi: 0x9c}, + {value: 0x8132, lo: 0x9f, hi: 0xa2}, + {value: 0x812d, lo: 0xa3, hi: 0xa3}, + {value: 0x8132, lo: 0xa4, hi: 0xa4}, + {value: 0x8132, lo: 0xa7, hi: 0xa8}, + {value: 0x812d, lo: 0xaa, hi: 0xaa}, + {value: 0x8132, lo: 0xab, hi: 0xac}, + {value: 0x812d, lo: 0xad, hi: 0xad}, + // Block 0x9, offset 0x56 + {value: 0x0000, lo: 0x0c}, + {value: 0x811f, lo: 0x91, hi: 0x91}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + {value: 0x812d, lo: 0xb1, hi: 0xb1}, + {value: 0x8132, lo: 0xb2, hi: 0xb3}, + {value: 0x812d, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb5, hi: 0xb6}, + {value: 0x812d, lo: 0xb7, hi: 0xb9}, + {value: 0x8132, lo: 0xba, hi: 0xba}, + {value: 0x812d, lo: 0xbb, hi: 0xbc}, + {value: 0x8132, lo: 0xbd, hi: 0xbd}, + {value: 0x812d, lo: 0xbe, hi: 0xbe}, + {value: 0x8132, lo: 0xbf, hi: 0xbf}, + // Block 0xa, offset 0x63 + {value: 0x0005, lo: 0x07}, + {value: 0x8132, lo: 0x80, hi: 0x80}, + {value: 0x8132, lo: 0x81, hi: 0x81}, + {value: 0x812d, lo: 0x82, hi: 0x83}, + {value: 0x812d, lo: 0x84, hi: 0x85}, + {value: 0x812d, lo: 0x86, hi: 0x87}, + {value: 0x812d, lo: 0x88, hi: 0x89}, + {value: 0x8132, lo: 0x8a, hi: 0x8a}, + // Block 0xb, offset 0x6b + {value: 0x0000, lo: 0x03}, + {value: 0x8132, lo: 0xab, hi: 0xb1}, + {value: 0x812d, lo: 0xb2, hi: 0xb2}, + {value: 0x8132, lo: 0xb3, hi: 0xb3}, + // Block 0xc, offset 0x6f + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0x96, hi: 0x99}, + {value: 0x8132, lo: 0x9b, hi: 0xa3}, + {value: 0x8132, lo: 0xa5, hi: 0xa7}, + {value: 0x8132, lo: 0xa9, hi: 0xad}, + // Block 0xd, offset 0x74 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x99, hi: 0x9b}, + // Block 0xe, offset 0x76 + {value: 0x0000, lo: 0x10}, + {value: 0x8132, lo: 0x94, hi: 0xa1}, + {value: 0x812d, lo: 0xa3, hi: 0xa3}, + {value: 0x8132, lo: 0xa4, hi: 0xa5}, + {value: 0x812d, lo: 0xa6, hi: 0xa6}, + {value: 0x8132, lo: 0xa7, hi: 0xa8}, + {value: 0x812d, lo: 0xa9, hi: 0xa9}, + {value: 0x8132, lo: 0xaa, hi: 0xac}, + {value: 0x812d, lo: 0xad, hi: 0xaf}, + {value: 0x8116, lo: 0xb0, hi: 0xb0}, + {value: 0x8117, lo: 0xb1, hi: 0xb1}, + {value: 0x8118, lo: 0xb2, hi: 0xb2}, + {value: 0x8132, lo: 0xb3, hi: 0xb5}, + {value: 0x812d, lo: 0xb6, hi: 0xb6}, + {value: 0x8132, lo: 0xb7, hi: 0xb8}, + {value: 0x812d, lo: 0xb9, hi: 0xba}, + {value: 0x8132, lo: 0xbb, hi: 0xbf}, + // Block 0xf, offset 0x87 + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0xa8, hi: 0xa8}, + {value: 0x3ed8, lo: 0xa9, hi: 0xa9}, + {value: 0xa000, lo: 0xb0, hi: 0xb0}, + {value: 0x3ee0, lo: 0xb1, hi: 0xb1}, + {value: 0xa000, lo: 0xb3, hi: 0xb3}, + {value: 0x3ee8, lo: 0xb4, hi: 0xb4}, + {value: 0x9902, lo: 0xbc, hi: 0xbc}, + // Block 0x10, offset 0x8f + {value: 0x0008, lo: 0x06}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x8132, lo: 0x91, hi: 0x91}, + {value: 0x812d, lo: 0x92, hi: 0x92}, + {value: 0x8132, lo: 0x93, hi: 0x93}, + {value: 0x8132, lo: 0x94, hi: 0x94}, + {value: 0x451c, lo: 0x98, hi: 0x9f}, + // Block 0x11, offset 0x96 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x12, offset 0x99 + {value: 0x0008, lo: 0x06}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2c9e, lo: 0x8b, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x455c, lo: 0x9c, hi: 0x9d}, + {value: 0x456c, lo: 0x9f, hi: 0x9f}, + // Block 0x13, offset 0xa0 + {value: 0x0000, lo: 0x03}, + {value: 0x4594, lo: 0xb3, hi: 0xb3}, + {value: 0x459c, lo: 0xb6, hi: 0xb6}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + // Block 0x14, offset 0xa4 + {value: 0x0008, lo: 0x03}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x4574, lo: 0x99, hi: 0x9b}, + {value: 0x458c, lo: 0x9e, hi: 0x9e}, + // Block 0x15, offset 0xa8 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + // Block 0x16, offset 0xaa + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + // Block 0x17, offset 0xac + {value: 0x0000, lo: 0x08}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2cb6, lo: 0x88, hi: 0x88}, + {value: 0x2cae, lo: 0x8b, hi: 0x8b}, + {value: 0x2cbe, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x96, hi: 0x97}, + {value: 0x45a4, lo: 0x9c, hi: 0x9c}, + {value: 0x45ac, lo: 0x9d, hi: 0x9d}, + // Block 0x18, offset 0xb5 + {value: 0x0000, lo: 0x03}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x2cc6, lo: 0x94, hi: 0x94}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x19, offset 0xb9 + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cce, lo: 0x8a, hi: 0x8a}, + {value: 0x2cde, lo: 0x8b, hi: 0x8b}, + {value: 0x2cd6, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1a, offset 0xc0 + {value: 0x1801, lo: 0x04}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x3ef0, lo: 0x88, hi: 0x88}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x8120, lo: 0x95, hi: 0x96}, + // Block 0x1b, offset 0xc5 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + {value: 0xa000, lo: 0xbf, hi: 0xbf}, + // Block 0x1c, offset 0xc8 + {value: 0x0000, lo: 0x09}, + {value: 0x2ce6, lo: 0x80, hi: 0x80}, + {value: 0x9900, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x2cee, lo: 0x87, hi: 0x87}, + {value: 0x2cf6, lo: 0x88, hi: 0x88}, + {value: 0x2f50, lo: 0x8a, hi: 0x8a}, + {value: 0x2dd8, lo: 0x8b, hi: 0x8b}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x95, hi: 0x96}, + // Block 0x1d, offset 0xd2 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x1e, offset 0xd4 + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cfe, lo: 0x8a, hi: 0x8a}, + {value: 0x2d0e, lo: 0x8b, hi: 0x8b}, + {value: 0x2d06, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1f, offset 0xdb + {value: 0x6bea, lo: 0x07}, + {value: 0x9904, lo: 0x8a, hi: 0x8a}, + {value: 0x9900, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x3ef8, lo: 0x9a, hi: 0x9a}, + {value: 0x2f58, lo: 0x9c, hi: 0x9c}, + {value: 0x2de3, lo: 0x9d, hi: 0x9d}, + {value: 0x2d16, lo: 0x9e, hi: 0x9f}, + // Block 0x20, offset 0xe3 + {value: 0x0000, lo: 0x03}, + {value: 0x2621, lo: 0xb3, hi: 0xb3}, + {value: 0x8122, lo: 0xb8, hi: 0xb9}, + {value: 0x8104, lo: 0xba, hi: 0xba}, + // Block 0x21, offset 0xe7 + {value: 0x0000, lo: 0x01}, + {value: 0x8123, lo: 0x88, hi: 0x8b}, + // Block 0x22, offset 0xe9 + {value: 0x0000, lo: 0x02}, + {value: 0x2636, lo: 0xb3, hi: 0xb3}, + {value: 0x8124, lo: 0xb8, hi: 0xb9}, + // Block 0x23, offset 0xec + {value: 0x0000, lo: 0x03}, + {value: 0x8125, lo: 0x88, hi: 0x8b}, + {value: 0x2628, lo: 0x9c, hi: 0x9c}, + {value: 0x262f, lo: 0x9d, hi: 0x9d}, + // Block 0x24, offset 0xf0 + {value: 0x0000, lo: 0x05}, + {value: 0x030b, lo: 0x8c, hi: 0x8c}, + {value: 0x812d, lo: 0x98, hi: 0x99}, + {value: 0x812d, lo: 0xb5, hi: 0xb5}, + {value: 0x812d, lo: 0xb7, hi: 0xb7}, + {value: 0x812b, lo: 0xb9, hi: 0xb9}, + // Block 0x25, offset 0xf6 + {value: 0x0000, lo: 0x10}, + {value: 0x2644, lo: 0x83, hi: 0x83}, + {value: 0x264b, lo: 0x8d, hi: 0x8d}, + {value: 0x2652, lo: 0x92, hi: 0x92}, + {value: 0x2659, lo: 0x97, hi: 0x97}, + {value: 0x2660, lo: 0x9c, hi: 0x9c}, + {value: 0x263d, lo: 0xa9, hi: 0xa9}, + {value: 0x8126, lo: 0xb1, hi: 0xb1}, + {value: 0x8127, lo: 0xb2, hi: 0xb2}, + {value: 0x4a84, lo: 0xb3, hi: 0xb3}, + {value: 0x8128, lo: 0xb4, hi: 0xb4}, + {value: 0x4a8d, lo: 0xb5, hi: 0xb5}, + {value: 0x45b4, lo: 0xb6, hi: 0xb6}, + {value: 0x45f4, lo: 0xb7, hi: 0xb7}, + {value: 0x45bc, lo: 0xb8, hi: 0xb8}, + {value: 0x45ff, lo: 0xb9, hi: 0xb9}, + {value: 0x8127, lo: 0xba, hi: 0xbd}, + // Block 0x26, offset 0x107 + {value: 0x0000, lo: 0x0b}, + {value: 0x8127, lo: 0x80, hi: 0x80}, + {value: 0x4a96, lo: 0x81, hi: 0x81}, + {value: 0x8132, lo: 0x82, hi: 0x83}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0x86, hi: 0x87}, + {value: 0x266e, lo: 0x93, hi: 0x93}, + {value: 0x2675, lo: 0x9d, hi: 0x9d}, + {value: 0x267c, lo: 0xa2, hi: 0xa2}, + {value: 0x2683, lo: 0xa7, hi: 0xa7}, + {value: 0x268a, lo: 0xac, hi: 0xac}, + {value: 0x2667, lo: 0xb9, hi: 0xb9}, + // Block 0x27, offset 0x113 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x86, hi: 0x86}, + // Block 0x28, offset 0x115 + {value: 0x0000, lo: 0x05}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x2d1e, lo: 0xa6, hi: 0xa6}, + {value: 0x9900, lo: 0xae, hi: 0xae}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + {value: 0x8104, lo: 0xb9, hi: 0xba}, + // Block 0x29, offset 0x11b + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x8d, hi: 0x8d}, + // Block 0x2a, offset 0x11d + {value: 0x0000, lo: 0x01}, + {value: 0x030f, lo: 0xbc, hi: 0xbc}, + // Block 0x2b, offset 0x11f + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x80, hi: 0x92}, + // Block 0x2c, offset 0x121 + {value: 0x0000, lo: 0x01}, + {value: 0xb900, lo: 0xa1, hi: 0xb5}, + // Block 0x2d, offset 0x123 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0xa8, hi: 0xbf}, + // Block 0x2e, offset 0x125 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0x80, hi: 0x82}, + // Block 0x2f, offset 0x127 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x9d, hi: 0x9f}, + // Block 0x30, offset 0x129 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x94, hi: 0x94}, + {value: 0x8104, lo: 0xb4, hi: 0xb4}, + // Block 0x31, offset 0x12c + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x92, hi: 0x92}, + {value: 0x8132, lo: 0x9d, hi: 0x9d}, + // Block 0x32, offset 0x12f + {value: 0x0000, lo: 0x01}, + {value: 0x8131, lo: 0xa9, hi: 0xa9}, + // Block 0x33, offset 0x131 + {value: 0x0004, lo: 0x02}, + {value: 0x812e, lo: 0xb9, hi: 0xba}, + {value: 0x812d, lo: 0xbb, hi: 0xbb}, + // Block 0x34, offset 0x134 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x97, hi: 0x97}, + {value: 0x812d, lo: 0x98, hi: 0x98}, + // Block 0x35, offset 0x137 + {value: 0x0000, lo: 0x03}, + {value: 0x8104, lo: 0xa0, hi: 0xa0}, + {value: 0x8132, lo: 0xb5, hi: 0xbc}, + {value: 0x812d, lo: 0xbf, hi: 0xbf}, + // Block 0x36, offset 0x13b + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0xb0, hi: 0xb4}, + {value: 0x812d, lo: 0xb5, hi: 0xba}, + {value: 0x8132, lo: 0xbb, hi: 0xbc}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0x37, offset 0x140 + {value: 0x0000, lo: 0x08}, + {value: 0x2d66, lo: 0x80, hi: 0x80}, + {value: 0x2d6e, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x82, hi: 0x82}, + {value: 0x2d76, lo: 0x83, hi: 0x83}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0xab, hi: 0xab}, + {value: 0x812d, lo: 0xac, hi: 0xac}, + {value: 0x8132, lo: 0xad, hi: 0xb3}, + // Block 0x38, offset 0x149 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xaa, hi: 0xab}, + // Block 0x39, offset 0x14b + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xa6, hi: 0xa6}, + {value: 0x8104, lo: 0xb2, hi: 0xb3}, + // Block 0x3a, offset 0x14e + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + // Block 0x3b, offset 0x150 + {value: 0x0000, lo: 0x0a}, + {value: 0x8132, lo: 0x90, hi: 0x92}, + {value: 0x8101, lo: 0x94, hi: 0x94}, + {value: 0x812d, lo: 0x95, hi: 0x99}, + {value: 0x8132, lo: 0x9a, hi: 0x9b}, + {value: 0x812d, lo: 0x9c, hi: 0x9f}, + {value: 0x8132, lo: 0xa0, hi: 0xa0}, + {value: 0x8101, lo: 0xa2, hi: 0xa8}, + {value: 0x812d, lo: 0xad, hi: 0xad}, + {value: 0x8132, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb8, hi: 0xb9}, + // Block 0x3c, offset 0x15b + {value: 0x0002, lo: 0x0a}, + {value: 0x0043, lo: 0xac, hi: 0xac}, + {value: 0x00d1, lo: 0xad, hi: 0xad}, + {value: 0x0045, lo: 0xae, hi: 0xae}, + {value: 0x0049, lo: 0xb0, hi: 0xb1}, + {value: 0x00e6, lo: 0xb2, hi: 0xb2}, + {value: 0x004f, lo: 0xb3, hi: 0xba}, + {value: 0x005f, lo: 0xbc, hi: 0xbc}, + {value: 0x00ef, lo: 0xbd, hi: 0xbd}, + {value: 0x0061, lo: 0xbe, hi: 0xbe}, + {value: 0x0065, lo: 0xbf, hi: 0xbf}, + // Block 0x3d, offset 0x166 + {value: 0x0000, lo: 0x0f}, + {value: 0x8132, lo: 0x80, hi: 0x81}, + {value: 0x812d, lo: 0x82, hi: 0x82}, + {value: 0x8132, lo: 0x83, hi: 0x89}, + {value: 0x812d, lo: 0x8a, hi: 0x8a}, + {value: 0x8132, lo: 0x8b, hi: 0x8c}, + {value: 0x8135, lo: 0x8d, hi: 0x8d}, + {value: 0x812a, lo: 0x8e, hi: 0x8e}, + {value: 0x812d, lo: 0x8f, hi: 0x8f}, + {value: 0x8129, lo: 0x90, hi: 0x90}, + {value: 0x8132, lo: 0x91, hi: 0xb5}, + {value: 0x8132, lo: 0xbb, hi: 0xbb}, + {value: 0x8134, lo: 0xbc, hi: 0xbc}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + {value: 0x8132, lo: 0xbe, hi: 0xbe}, + {value: 0x812d, lo: 0xbf, hi: 0xbf}, + // Block 0x3e, offset 0x176 + {value: 0x0000, lo: 0x0d}, + {value: 0x0001, lo: 0x80, hi: 0x8a}, + {value: 0x043b, lo: 0x91, hi: 0x91}, + {value: 0x429b, lo: 0x97, hi: 0x97}, + {value: 0x001d, lo: 0xa4, hi: 0xa4}, + {value: 0x1873, lo: 0xa5, hi: 0xa5}, + {value: 0x1b5c, lo: 0xa6, hi: 0xa6}, + {value: 0x0001, lo: 0xaf, hi: 0xaf}, + {value: 0x2691, lo: 0xb3, hi: 0xb3}, + {value: 0x27fe, lo: 0xb4, hi: 0xb4}, + {value: 0x2698, lo: 0xb6, hi: 0xb6}, + {value: 0x2808, lo: 0xb7, hi: 0xb7}, + {value: 0x186d, lo: 0xbc, hi: 0xbc}, + {value: 0x4269, lo: 0xbe, hi: 0xbe}, + // Block 0x3f, offset 0x184 + {value: 0x0002, lo: 0x0d}, + {value: 0x1933, lo: 0x87, hi: 0x87}, + {value: 0x1930, lo: 0x88, hi: 0x88}, + {value: 0x1870, lo: 0x89, hi: 0x89}, + {value: 0x298e, lo: 0x97, hi: 0x97}, + {value: 0x0001, lo: 0x9f, hi: 0x9f}, + {value: 0x0021, lo: 0xb0, hi: 0xb0}, + {value: 0x0093, lo: 0xb1, hi: 0xb1}, + {value: 0x0029, lo: 0xb4, hi: 0xb9}, + {value: 0x0017, lo: 0xba, hi: 0xba}, + {value: 0x0467, lo: 0xbb, hi: 0xbb}, + {value: 0x003b, lo: 0xbc, hi: 0xbc}, + {value: 0x0011, lo: 0xbd, hi: 0xbe}, + {value: 0x009d, lo: 0xbf, hi: 0xbf}, + // Block 0x40, offset 0x192 + {value: 0x0002, lo: 0x0f}, + {value: 0x0021, lo: 0x80, hi: 0x89}, + {value: 0x0017, lo: 0x8a, hi: 0x8a}, + {value: 0x0467, lo: 0x8b, hi: 0x8b}, + {value: 0x003b, lo: 0x8c, hi: 0x8c}, + {value: 0x0011, lo: 0x8d, hi: 0x8e}, + {value: 0x0083, lo: 0x90, hi: 0x90}, + {value: 0x008b, lo: 0x91, hi: 0x91}, + {value: 0x009f, lo: 0x92, hi: 0x92}, + {value: 0x00b1, lo: 0x93, hi: 0x93}, + {value: 0x0104, lo: 0x94, hi: 0x94}, + {value: 0x0091, lo: 0x95, hi: 0x95}, + {value: 0x0097, lo: 0x96, hi: 0x99}, + {value: 0x00a1, lo: 0x9a, hi: 0x9a}, + {value: 0x00a7, lo: 0x9b, hi: 0x9c}, + {value: 0x1999, lo: 0xa8, hi: 0xa8}, + // Block 0x41, offset 0x1a2 + {value: 0x0000, lo: 0x0d}, + {value: 0x8132, lo: 0x90, hi: 0x91}, + {value: 0x8101, lo: 0x92, hi: 0x93}, + {value: 0x8132, lo: 0x94, hi: 0x97}, + {value: 0x8101, lo: 0x98, hi: 0x9a}, + {value: 0x8132, lo: 0x9b, hi: 0x9c}, + {value: 0x8132, lo: 0xa1, hi: 0xa1}, + {value: 0x8101, lo: 0xa5, hi: 0xa6}, + {value: 0x8132, lo: 0xa7, hi: 0xa7}, + {value: 0x812d, lo: 0xa8, hi: 0xa8}, + {value: 0x8132, lo: 0xa9, hi: 0xa9}, + {value: 0x8101, lo: 0xaa, hi: 0xab}, + {value: 0x812d, lo: 0xac, hi: 0xaf}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + // Block 0x42, offset 0x1b0 + {value: 0x0007, lo: 0x06}, + {value: 0x2180, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + {value: 0x3bb9, lo: 0x9a, hi: 0x9b}, + {value: 0x3bc7, lo: 0xae, hi: 0xae}, + // Block 0x43, offset 0x1b7 + {value: 0x000e, lo: 0x05}, + {value: 0x3bce, lo: 0x8d, hi: 0x8e}, + {value: 0x3bd5, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + // Block 0x44, offset 0x1bd + {value: 0x0173, lo: 0x0e}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0x3be3, lo: 0x84, hi: 0x84}, + {value: 0xa000, lo: 0x88, hi: 0x88}, + {value: 0x3bea, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0x3bf1, lo: 0x8c, hi: 0x8c}, + {value: 0xa000, lo: 0xa3, hi: 0xa3}, + {value: 0x3bf8, lo: 0xa4, hi: 0xa4}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x3bff, lo: 0xa6, hi: 0xa6}, + {value: 0x269f, lo: 0xac, hi: 0xad}, + {value: 0x26a6, lo: 0xaf, hi: 0xaf}, + {value: 0x281c, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xbc, hi: 0xbc}, + // Block 0x45, offset 0x1cc + {value: 0x0007, lo: 0x03}, + {value: 0x3c68, lo: 0xa0, hi: 0xa1}, + {value: 0x3c92, lo: 0xa2, hi: 0xa3}, + {value: 0x3cbc, lo: 0xaa, hi: 0xad}, + // Block 0x46, offset 0x1d0 + {value: 0x0004, lo: 0x01}, + {value: 0x048b, lo: 0xa9, hi: 0xaa}, + // Block 0x47, offset 0x1d2 + {value: 0x0002, lo: 0x03}, + {value: 0x0057, lo: 0x80, hi: 0x8f}, + {value: 0x0083, lo: 0x90, hi: 0xa9}, + {value: 0x0021, lo: 0xaa, hi: 0xaa}, + // Block 0x48, offset 0x1d6 + {value: 0x0000, lo: 0x01}, + {value: 0x299b, lo: 0x8c, hi: 0x8c}, + // Block 0x49, offset 0x1d8 + {value: 0x0263, lo: 0x02}, + {value: 0x1b8c, lo: 0xb4, hi: 0xb4}, + {value: 0x192d, lo: 0xb5, hi: 0xb6}, + // Block 0x4a, offset 0x1db + {value: 0x0000, lo: 0x01}, + {value: 0x44dd, lo: 0x9c, hi: 0x9c}, + // Block 0x4b, offset 0x1dd + {value: 0x0000, lo: 0x02}, + {value: 0x0095, lo: 0xbc, hi: 0xbc}, + {value: 0x006d, lo: 0xbd, hi: 0xbd}, + // Block 0x4c, offset 0x1e0 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xaf, hi: 0xb1}, + // Block 0x4d, offset 0x1e2 + {value: 0x0000, lo: 0x02}, + {value: 0x047f, lo: 0xaf, hi: 0xaf}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x4e, offset 0x1e5 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa0, hi: 0xbf}, + // Block 0x4f, offset 0x1e7 + {value: 0x0000, lo: 0x01}, + {value: 0x0dc3, lo: 0x9f, hi: 0x9f}, + // Block 0x50, offset 0x1e9 + {value: 0x0000, lo: 0x01}, + {value: 0x162f, lo: 0xb3, hi: 0xb3}, + // Block 0x51, offset 0x1eb + {value: 0x0004, lo: 0x0b}, + {value: 0x1597, lo: 0x80, hi: 0x82}, + {value: 0x15af, lo: 0x83, hi: 0x83}, + {value: 0x15c7, lo: 0x84, hi: 0x85}, + {value: 0x15d7, lo: 0x86, hi: 0x89}, + {value: 0x15eb, lo: 0x8a, hi: 0x8c}, + {value: 0x15ff, lo: 0x8d, hi: 0x8d}, + {value: 0x1607, lo: 0x8e, hi: 0x8e}, + {value: 0x160f, lo: 0x8f, hi: 0x90}, + {value: 0x161b, lo: 0x91, hi: 0x93}, + {value: 0x162b, lo: 0x94, hi: 0x94}, + {value: 0x1633, lo: 0x95, hi: 0x95}, + // Block 0x52, offset 0x1f7 + {value: 0x0004, lo: 0x09}, + {value: 0x0001, lo: 0x80, hi: 0x80}, + {value: 0x812c, lo: 0xaa, hi: 0xaa}, + {value: 0x8131, lo: 0xab, hi: 0xab}, + {value: 0x8133, lo: 0xac, hi: 0xac}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + {value: 0x812f, lo: 0xae, hi: 0xae}, + {value: 0x812f, lo: 0xaf, hi: 0xaf}, + {value: 0x04b3, lo: 0xb6, hi: 0xb6}, + {value: 0x0887, lo: 0xb8, hi: 0xba}, + // Block 0x53, offset 0x201 + {value: 0x0006, lo: 0x09}, + {value: 0x0313, lo: 0xb1, hi: 0xb1}, + {value: 0x0317, lo: 0xb2, hi: 0xb2}, + {value: 0x4a3b, lo: 0xb3, hi: 0xb3}, + {value: 0x031b, lo: 0xb4, hi: 0xb4}, + {value: 0x4a41, lo: 0xb5, hi: 0xb6}, + {value: 0x031f, lo: 0xb7, hi: 0xb7}, + {value: 0x0323, lo: 0xb8, hi: 0xb8}, + {value: 0x0327, lo: 0xb9, hi: 0xb9}, + {value: 0x4a4d, lo: 0xba, hi: 0xbf}, + // Block 0x54, offset 0x20b + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0xaf, hi: 0xaf}, + {value: 0x8132, lo: 0xb4, hi: 0xbd}, + // Block 0x55, offset 0x20e + {value: 0x0000, lo: 0x03}, + {value: 0x020f, lo: 0x9c, hi: 0x9c}, + {value: 0x0212, lo: 0x9d, hi: 0x9d}, + {value: 0x8132, lo: 0x9e, hi: 0x9f}, + // Block 0x56, offset 0x212 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb0, hi: 0xb1}, + // Block 0x57, offset 0x214 + {value: 0x0000, lo: 0x01}, + {value: 0x163b, lo: 0xb0, hi: 0xb0}, + // Block 0x58, offset 0x216 + {value: 0x000c, lo: 0x01}, + {value: 0x00d7, lo: 0xb8, hi: 0xb9}, + // Block 0x59, offset 0x218 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x86, hi: 0x86}, + // Block 0x5a, offset 0x21a + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0xa0, hi: 0xb1}, + // Block 0x5b, offset 0x21d + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xab, hi: 0xad}, + // Block 0x5c, offset 0x21f + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x93, hi: 0x93}, + // Block 0x5d, offset 0x221 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb3, hi: 0xb3}, + // Block 0x5e, offset 0x223 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x80, hi: 0x80}, + // Block 0x5f, offset 0x225 + {value: 0x0000, lo: 0x05}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + {value: 0x8132, lo: 0xb2, hi: 0xb3}, + {value: 0x812d, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb7, hi: 0xb8}, + {value: 0x8132, lo: 0xbe, hi: 0xbf}, + // Block 0x60, offset 0x22b + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x81, hi: 0x81}, + {value: 0x8104, lo: 0xb6, hi: 0xb6}, + // Block 0x61, offset 0x22e + {value: 0x0008, lo: 0x03}, + {value: 0x1637, lo: 0x9c, hi: 0x9d}, + {value: 0x0125, lo: 0x9e, hi: 0x9e}, + {value: 0x1643, lo: 0x9f, hi: 0x9f}, + // Block 0x62, offset 0x232 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xad, hi: 0xad}, + // Block 0x63, offset 0x234 + {value: 0x0000, lo: 0x06}, + {value: 0xe500, lo: 0x80, hi: 0x80}, + {value: 0xc600, lo: 0x81, hi: 0x9b}, + {value: 0xe500, lo: 0x9c, hi: 0x9c}, + {value: 0xc600, lo: 0x9d, hi: 0xb7}, + {value: 0xe500, lo: 0xb8, hi: 0xb8}, + {value: 0xc600, lo: 0xb9, hi: 0xbf}, + // Block 0x64, offset 0x23b + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x93}, + {value: 0xe500, lo: 0x94, hi: 0x94}, + {value: 0xc600, lo: 0x95, hi: 0xaf}, + {value: 0xe500, lo: 0xb0, hi: 0xb0}, + {value: 0xc600, lo: 0xb1, hi: 0xbf}, + // Block 0x65, offset 0x241 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8b}, + {value: 0xe500, lo: 0x8c, hi: 0x8c}, + {value: 0xc600, lo: 0x8d, hi: 0xa7}, + {value: 0xe500, lo: 0xa8, hi: 0xa8}, + {value: 0xc600, lo: 0xa9, hi: 0xbf}, + // Block 0x66, offset 0x247 + {value: 0x0000, lo: 0x07}, + {value: 0xc600, lo: 0x80, hi: 0x83}, + {value: 0xe500, lo: 0x84, hi: 0x84}, + {value: 0xc600, lo: 0x85, hi: 0x9f}, + {value: 0xe500, lo: 0xa0, hi: 0xa0}, + {value: 0xc600, lo: 0xa1, hi: 0xbb}, + {value: 0xe500, lo: 0xbc, hi: 0xbc}, + {value: 0xc600, lo: 0xbd, hi: 0xbf}, + // Block 0x67, offset 0x24f + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x97}, + {value: 0xe500, lo: 0x98, hi: 0x98}, + {value: 0xc600, lo: 0x99, hi: 0xb3}, + {value: 0xe500, lo: 0xb4, hi: 0xb4}, + {value: 0xc600, lo: 0xb5, hi: 0xbf}, + // Block 0x68, offset 0x255 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8f}, + {value: 0xe500, lo: 0x90, hi: 0x90}, + {value: 0xc600, lo: 0x91, hi: 0xab}, + {value: 0xe500, lo: 0xac, hi: 0xac}, + {value: 0xc600, lo: 0xad, hi: 0xbf}, + // Block 0x69, offset 0x25b + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + {value: 0xe500, lo: 0xa4, hi: 0xa4}, + {value: 0xc600, lo: 0xa5, hi: 0xbf}, + // Block 0x6a, offset 0x261 + {value: 0x0000, lo: 0x03}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + // Block 0x6b, offset 0x265 + {value: 0x0002, lo: 0x01}, + {value: 0x0003, lo: 0x81, hi: 0xbf}, + // Block 0x6c, offset 0x267 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0x6d, offset 0x269 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xa0, hi: 0xa0}, + // Block 0x6e, offset 0x26b + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb6, hi: 0xba}, + // Block 0x6f, offset 0x26d + {value: 0x002c, lo: 0x05}, + {value: 0x812d, lo: 0x8d, hi: 0x8d}, + {value: 0x8132, lo: 0x8f, hi: 0x8f}, + {value: 0x8132, lo: 0xb8, hi: 0xb8}, + {value: 0x8101, lo: 0xb9, hi: 0xba}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x70, offset 0x273 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0xa5, hi: 0xa5}, + {value: 0x812d, lo: 0xa6, hi: 0xa6}, + // Block 0x71, offset 0x276 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x86, hi: 0x86}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x72, offset 0x279 + {value: 0x17fe, lo: 0x07}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x4238, lo: 0x9a, hi: 0x9a}, + {value: 0xa000, lo: 0x9b, hi: 0x9b}, + {value: 0x4242, lo: 0x9c, hi: 0x9c}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x424c, lo: 0xab, hi: 0xab}, + {value: 0x8104, lo: 0xb9, hi: 0xba}, + // Block 0x73, offset 0x281 + {value: 0x0000, lo: 0x06}, + {value: 0x8132, lo: 0x80, hi: 0x82}, + {value: 0x9900, lo: 0xa7, hi: 0xa7}, + {value: 0x2d7e, lo: 0xae, hi: 0xae}, + {value: 0x2d88, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb1, hi: 0xb2}, + {value: 0x8104, lo: 0xb3, hi: 0xb4}, + // Block 0x74, offset 0x288 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x80, hi: 0x80}, + {value: 0x8102, lo: 0x8a, hi: 0x8a}, + // Block 0x75, offset 0x28b + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb5, hi: 0xb5}, + {value: 0x8102, lo: 0xb6, hi: 0xb6}, + // Block 0x76, offset 0x28e + {value: 0x0002, lo: 0x01}, + {value: 0x8102, lo: 0xa9, hi: 0xaa}, + // Block 0x77, offset 0x290 + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2d92, lo: 0x8b, hi: 0x8b}, + {value: 0x2d9c, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x8132, lo: 0xa6, hi: 0xac}, + {value: 0x8132, lo: 0xb0, hi: 0xb4}, + // Block 0x78, offset 0x298 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x82, hi: 0x82}, + {value: 0x8102, lo: 0x86, hi: 0x86}, + // Block 0x79, offset 0x29b + {value: 0x6b5a, lo: 0x06}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb9, hi: 0xb9}, + {value: 0x9900, lo: 0xba, hi: 0xba}, + {value: 0x2db0, lo: 0xbb, hi: 0xbb}, + {value: 0x2da6, lo: 0xbc, hi: 0xbd}, + {value: 0x2dba, lo: 0xbe, hi: 0xbe}, + // Block 0x7a, offset 0x2a2 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x82, hi: 0x82}, + {value: 0x8102, lo: 0x83, hi: 0x83}, + // Block 0x7b, offset 0x2a5 + {value: 0x0000, lo: 0x05}, + {value: 0x9900, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb8, hi: 0xb9}, + {value: 0x2dc4, lo: 0xba, hi: 0xba}, + {value: 0x2dce, lo: 0xbb, hi: 0xbb}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x7c, offset 0x2ab + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0x80, hi: 0x80}, + // Block 0x7d, offset 0x2ad + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x7e, offset 0x2af + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb6, hi: 0xb6}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + // Block 0x7f, offset 0x2b2 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xab, hi: 0xab}, + // Block 0x80, offset 0x2b4 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0xb0, hi: 0xb4}, + // Block 0x81, offset 0x2b6 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb0, hi: 0xb6}, + // Block 0x82, offset 0x2b8 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0x9e, hi: 0x9e}, + // Block 0x83, offset 0x2ba + {value: 0x0000, lo: 0x0c}, + {value: 0x45cc, lo: 0x9e, hi: 0x9e}, + {value: 0x45d6, lo: 0x9f, hi: 0x9f}, + {value: 0x460a, lo: 0xa0, hi: 0xa0}, + {value: 0x4618, lo: 0xa1, hi: 0xa1}, + {value: 0x4626, lo: 0xa2, hi: 0xa2}, + {value: 0x4634, lo: 0xa3, hi: 0xa3}, + {value: 0x4642, lo: 0xa4, hi: 0xa4}, + {value: 0x812b, lo: 0xa5, hi: 0xa6}, + {value: 0x8101, lo: 0xa7, hi: 0xa9}, + {value: 0x8130, lo: 0xad, hi: 0xad}, + {value: 0x812b, lo: 0xae, hi: 0xb2}, + {value: 0x812d, lo: 0xbb, hi: 0xbf}, + // Block 0x84, offset 0x2c7 + {value: 0x0000, lo: 0x09}, + {value: 0x812d, lo: 0x80, hi: 0x82}, + {value: 0x8132, lo: 0x85, hi: 0x89}, + {value: 0x812d, lo: 0x8a, hi: 0x8b}, + {value: 0x8132, lo: 0xaa, hi: 0xad}, + {value: 0x45e0, lo: 0xbb, hi: 0xbb}, + {value: 0x45ea, lo: 0xbc, hi: 0xbc}, + {value: 0x4650, lo: 0xbd, hi: 0xbd}, + {value: 0x466c, lo: 0xbe, hi: 0xbe}, + {value: 0x465e, lo: 0xbf, hi: 0xbf}, + // Block 0x85, offset 0x2d1 + {value: 0x0000, lo: 0x01}, + {value: 0x467a, lo: 0x80, hi: 0x80}, + // Block 0x86, offset 0x2d3 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x82, hi: 0x84}, + // Block 0x87, offset 0x2d5 + {value: 0x0002, lo: 0x03}, + {value: 0x0043, lo: 0x80, hi: 0x99}, + {value: 0x0083, lo: 0x9a, hi: 0xb3}, + {value: 0x0043, lo: 0xb4, hi: 0xbf}, + // Block 0x88, offset 0x2d9 + {value: 0x0002, lo: 0x04}, + {value: 0x005b, lo: 0x80, hi: 0x8d}, + {value: 0x0083, lo: 0x8e, hi: 0x94}, + {value: 0x0093, lo: 0x96, hi: 0xa7}, + {value: 0x0043, lo: 0xa8, hi: 0xbf}, + // Block 0x89, offset 0x2de + {value: 0x0002, lo: 0x0b}, + {value: 0x0073, lo: 0x80, hi: 0x81}, + {value: 0x0083, lo: 0x82, hi: 0x9b}, + {value: 0x0043, lo: 0x9c, hi: 0x9c}, + {value: 0x0047, lo: 0x9e, hi: 0x9f}, + {value: 0x004f, lo: 0xa2, hi: 0xa2}, + {value: 0x0055, lo: 0xa5, hi: 0xa6}, + {value: 0x005d, lo: 0xa9, hi: 0xac}, + {value: 0x0067, lo: 0xae, hi: 0xb5}, + {value: 0x0083, lo: 0xb6, hi: 0xb9}, + {value: 0x008d, lo: 0xbb, hi: 0xbb}, + {value: 0x0091, lo: 0xbd, hi: 0xbf}, + // Block 0x8a, offset 0x2ea + {value: 0x0002, lo: 0x04}, + {value: 0x0097, lo: 0x80, hi: 0x83}, + {value: 0x00a1, lo: 0x85, hi: 0x8f}, + {value: 0x0043, lo: 0x90, hi: 0xa9}, + {value: 0x0083, lo: 0xaa, hi: 0xbf}, + // Block 0x8b, offset 0x2ef + {value: 0x0002, lo: 0x08}, + {value: 0x00af, lo: 0x80, hi: 0x83}, + {value: 0x0043, lo: 0x84, hi: 0x85}, + {value: 0x0049, lo: 0x87, hi: 0x8a}, + {value: 0x0055, lo: 0x8d, hi: 0x94}, + {value: 0x0067, lo: 0x96, hi: 0x9c}, + {value: 0x0083, lo: 0x9e, hi: 0xb7}, + {value: 0x0043, lo: 0xb8, hi: 0xb9}, + {value: 0x0049, lo: 0xbb, hi: 0xbe}, + // Block 0x8c, offset 0x2f8 + {value: 0x0002, lo: 0x05}, + {value: 0x0053, lo: 0x80, hi: 0x84}, + {value: 0x005f, lo: 0x86, hi: 0x86}, + {value: 0x0067, lo: 0x8a, hi: 0x90}, + {value: 0x0083, lo: 0x92, hi: 0xab}, + {value: 0x0043, lo: 0xac, hi: 0xbf}, + // Block 0x8d, offset 0x2fe + {value: 0x0002, lo: 0x04}, + {value: 0x006b, lo: 0x80, hi: 0x85}, + {value: 0x0083, lo: 0x86, hi: 0x9f}, + {value: 0x0043, lo: 0xa0, hi: 0xb9}, + {value: 0x0083, lo: 0xba, hi: 0xbf}, + // Block 0x8e, offset 0x303 + {value: 0x0002, lo: 0x03}, + {value: 0x008f, lo: 0x80, hi: 0x93}, + {value: 0x0043, lo: 0x94, hi: 0xad}, + {value: 0x0083, lo: 0xae, hi: 0xbf}, + // Block 0x8f, offset 0x307 + {value: 0x0002, lo: 0x04}, + {value: 0x00a7, lo: 0x80, hi: 0x87}, + {value: 0x0043, lo: 0x88, hi: 0xa1}, + {value: 0x0083, lo: 0xa2, hi: 0xbb}, + {value: 0x0043, lo: 0xbc, hi: 0xbf}, + // Block 0x90, offset 0x30c + {value: 0x0002, lo: 0x03}, + {value: 0x004b, lo: 0x80, hi: 0x95}, + {value: 0x0083, lo: 0x96, hi: 0xaf}, + {value: 0x0043, lo: 0xb0, hi: 0xbf}, + // Block 0x91, offset 0x310 + {value: 0x0003, lo: 0x0f}, + {value: 0x01b8, lo: 0x80, hi: 0x80}, + {value: 0x045f, lo: 0x81, hi: 0x81}, + {value: 0x01bb, lo: 0x82, hi: 0x9a}, + {value: 0x045b, lo: 0x9b, hi: 0x9b}, + {value: 0x01c7, lo: 0x9c, hi: 0x9c}, + {value: 0x01d0, lo: 0x9d, hi: 0x9d}, + {value: 0x01d6, lo: 0x9e, hi: 0x9e}, + {value: 0x01fa, lo: 0x9f, hi: 0x9f}, + {value: 0x01eb, lo: 0xa0, hi: 0xa0}, + {value: 0x01e8, lo: 0xa1, hi: 0xa1}, + {value: 0x0173, lo: 0xa2, hi: 0xb2}, + {value: 0x0188, lo: 0xb3, hi: 0xb3}, + {value: 0x01a6, lo: 0xb4, hi: 0xba}, + {value: 0x045f, lo: 0xbb, hi: 0xbb}, + {value: 0x01bb, lo: 0xbc, hi: 0xbf}, + // Block 0x92, offset 0x320 + {value: 0x0003, lo: 0x0d}, + {value: 0x01c7, lo: 0x80, hi: 0x94}, + {value: 0x045b, lo: 0x95, hi: 0x95}, + {value: 0x01c7, lo: 0x96, hi: 0x96}, + {value: 0x01d0, lo: 0x97, hi: 0x97}, + {value: 0x01d6, lo: 0x98, hi: 0x98}, + {value: 0x01fa, lo: 0x99, hi: 0x99}, + {value: 0x01eb, lo: 0x9a, hi: 0x9a}, + {value: 0x01e8, lo: 0x9b, hi: 0x9b}, + {value: 0x0173, lo: 0x9c, hi: 0xac}, + {value: 0x0188, lo: 0xad, hi: 0xad}, + {value: 0x01a6, lo: 0xae, hi: 0xb4}, + {value: 0x045f, lo: 0xb5, hi: 0xb5}, + {value: 0x01bb, lo: 0xb6, hi: 0xbf}, + // Block 0x93, offset 0x32e + {value: 0x0003, lo: 0x0d}, + {value: 0x01d9, lo: 0x80, hi: 0x8e}, + {value: 0x045b, lo: 0x8f, hi: 0x8f}, + {value: 0x01c7, lo: 0x90, hi: 0x90}, + {value: 0x01d0, lo: 0x91, hi: 0x91}, + {value: 0x01d6, lo: 0x92, hi: 0x92}, + {value: 0x01fa, lo: 0x93, hi: 0x93}, + {value: 0x01eb, lo: 0x94, hi: 0x94}, + {value: 0x01e8, lo: 0x95, hi: 0x95}, + {value: 0x0173, lo: 0x96, hi: 0xa6}, + {value: 0x0188, lo: 0xa7, hi: 0xa7}, + {value: 0x01a6, lo: 0xa8, hi: 0xae}, + {value: 0x045f, lo: 0xaf, hi: 0xaf}, + {value: 0x01bb, lo: 0xb0, hi: 0xbf}, + // Block 0x94, offset 0x33c + {value: 0x0003, lo: 0x0d}, + {value: 0x01eb, lo: 0x80, hi: 0x88}, + {value: 0x045b, lo: 0x89, hi: 0x89}, + {value: 0x01c7, lo: 0x8a, hi: 0x8a}, + {value: 0x01d0, lo: 0x8b, hi: 0x8b}, + {value: 0x01d6, lo: 0x8c, hi: 0x8c}, + {value: 0x01fa, lo: 0x8d, hi: 0x8d}, + {value: 0x01eb, lo: 0x8e, hi: 0x8e}, + {value: 0x01e8, lo: 0x8f, hi: 0x8f}, + {value: 0x0173, lo: 0x90, hi: 0xa0}, + {value: 0x0188, lo: 0xa1, hi: 0xa1}, + {value: 0x01a6, lo: 0xa2, hi: 0xa8}, + {value: 0x045f, lo: 0xa9, hi: 0xa9}, + {value: 0x01bb, lo: 0xaa, hi: 0xbf}, + // Block 0x95, offset 0x34a + {value: 0x0000, lo: 0x05}, + {value: 0x8132, lo: 0x80, hi: 0x86}, + {value: 0x8132, lo: 0x88, hi: 0x98}, + {value: 0x8132, lo: 0x9b, hi: 0xa1}, + {value: 0x8132, lo: 0xa3, hi: 0xa4}, + {value: 0x8132, lo: 0xa6, hi: 0xaa}, + // Block 0x96, offset 0x350 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x90, hi: 0x96}, + // Block 0x97, offset 0x352 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x84, hi: 0x89}, + {value: 0x8102, lo: 0x8a, hi: 0x8a}, + // Block 0x98, offset 0x355 + {value: 0x0002, lo: 0x09}, + {value: 0x0063, lo: 0x80, hi: 0x89}, + {value: 0x1951, lo: 0x8a, hi: 0x8a}, + {value: 0x1981, lo: 0x8b, hi: 0x8b}, + {value: 0x199c, lo: 0x8c, hi: 0x8c}, + {value: 0x19a2, lo: 0x8d, hi: 0x8d}, + {value: 0x1bc0, lo: 0x8e, hi: 0x8e}, + {value: 0x19ae, lo: 0x8f, hi: 0x8f}, + {value: 0x197b, lo: 0xaa, hi: 0xaa}, + {value: 0x197e, lo: 0xab, hi: 0xab}, + // Block 0x99, offset 0x35f + {value: 0x0000, lo: 0x01}, + {value: 0x193f, lo: 0x90, hi: 0x90}, + // Block 0x9a, offset 0x361 + {value: 0x0028, lo: 0x09}, + {value: 0x2862, lo: 0x80, hi: 0x80}, + {value: 0x2826, lo: 0x81, hi: 0x81}, + {value: 0x2830, lo: 0x82, hi: 0x82}, + {value: 0x2844, lo: 0x83, hi: 0x84}, + {value: 0x284e, lo: 0x85, hi: 0x86}, + {value: 0x283a, lo: 0x87, hi: 0x87}, + {value: 0x2858, lo: 0x88, hi: 0x88}, + {value: 0x0b6f, lo: 0x90, hi: 0x90}, + {value: 0x08e7, lo: 0x91, hi: 0x91}, +} + +// recompMap: 7520 bytes (entries only) +var recompMap = map[uint32]rune{ + 0x00410300: 0x00C0, + 0x00410301: 0x00C1, + 0x00410302: 0x00C2, + 0x00410303: 0x00C3, + 0x00410308: 0x00C4, + 0x0041030A: 0x00C5, + 0x00430327: 0x00C7, + 0x00450300: 0x00C8, + 0x00450301: 0x00C9, + 0x00450302: 0x00CA, + 0x00450308: 0x00CB, + 0x00490300: 0x00CC, + 0x00490301: 0x00CD, + 0x00490302: 0x00CE, + 0x00490308: 0x00CF, + 0x004E0303: 0x00D1, + 0x004F0300: 0x00D2, + 0x004F0301: 0x00D3, + 0x004F0302: 0x00D4, + 0x004F0303: 0x00D5, + 0x004F0308: 0x00D6, + 0x00550300: 0x00D9, + 0x00550301: 0x00DA, + 0x00550302: 0x00DB, + 0x00550308: 0x00DC, + 0x00590301: 0x00DD, + 0x00610300: 0x00E0, + 0x00610301: 0x00E1, + 0x00610302: 0x00E2, + 0x00610303: 0x00E3, + 0x00610308: 0x00E4, + 0x0061030A: 0x00E5, + 0x00630327: 0x00E7, + 0x00650300: 0x00E8, + 0x00650301: 0x00E9, + 0x00650302: 0x00EA, + 0x00650308: 0x00EB, + 0x00690300: 0x00EC, + 0x00690301: 0x00ED, + 0x00690302: 0x00EE, + 0x00690308: 0x00EF, + 0x006E0303: 0x00F1, + 0x006F0300: 0x00F2, + 0x006F0301: 0x00F3, + 0x006F0302: 0x00F4, + 0x006F0303: 0x00F5, + 0x006F0308: 0x00F6, + 0x00750300: 0x00F9, + 0x00750301: 0x00FA, + 0x00750302: 0x00FB, + 0x00750308: 0x00FC, + 0x00790301: 0x00FD, + 0x00790308: 0x00FF, + 0x00410304: 0x0100, + 0x00610304: 0x0101, + 0x00410306: 0x0102, + 0x00610306: 0x0103, + 0x00410328: 0x0104, + 0x00610328: 0x0105, + 0x00430301: 0x0106, + 0x00630301: 0x0107, + 0x00430302: 0x0108, + 0x00630302: 0x0109, + 0x00430307: 0x010A, + 0x00630307: 0x010B, + 0x0043030C: 0x010C, + 0x0063030C: 0x010D, + 0x0044030C: 0x010E, + 0x0064030C: 0x010F, + 0x00450304: 0x0112, + 0x00650304: 0x0113, + 0x00450306: 0x0114, + 0x00650306: 0x0115, + 0x00450307: 0x0116, + 0x00650307: 0x0117, + 0x00450328: 0x0118, + 0x00650328: 0x0119, + 0x0045030C: 0x011A, + 0x0065030C: 0x011B, + 0x00470302: 0x011C, + 0x00670302: 0x011D, + 0x00470306: 0x011E, + 0x00670306: 0x011F, + 0x00470307: 0x0120, + 0x00670307: 0x0121, + 0x00470327: 0x0122, + 0x00670327: 0x0123, + 0x00480302: 0x0124, + 0x00680302: 0x0125, + 0x00490303: 0x0128, + 0x00690303: 0x0129, + 0x00490304: 0x012A, + 0x00690304: 0x012B, + 0x00490306: 0x012C, + 0x00690306: 0x012D, + 0x00490328: 0x012E, + 0x00690328: 0x012F, + 0x00490307: 0x0130, + 0x004A0302: 0x0134, + 0x006A0302: 0x0135, + 0x004B0327: 0x0136, + 0x006B0327: 0x0137, + 0x004C0301: 0x0139, + 0x006C0301: 0x013A, + 0x004C0327: 0x013B, + 0x006C0327: 0x013C, + 0x004C030C: 0x013D, + 0x006C030C: 0x013E, + 0x004E0301: 0x0143, + 0x006E0301: 0x0144, + 0x004E0327: 0x0145, + 0x006E0327: 0x0146, + 0x004E030C: 0x0147, + 0x006E030C: 0x0148, + 0x004F0304: 0x014C, + 0x006F0304: 0x014D, + 0x004F0306: 0x014E, + 0x006F0306: 0x014F, + 0x004F030B: 0x0150, + 0x006F030B: 0x0151, + 0x00520301: 0x0154, + 0x00720301: 0x0155, + 0x00520327: 0x0156, + 0x00720327: 0x0157, + 0x0052030C: 0x0158, + 0x0072030C: 0x0159, + 0x00530301: 0x015A, + 0x00730301: 0x015B, + 0x00530302: 0x015C, + 0x00730302: 0x015D, + 0x00530327: 0x015E, + 0x00730327: 0x015F, + 0x0053030C: 0x0160, + 0x0073030C: 0x0161, + 0x00540327: 0x0162, + 0x00740327: 0x0163, + 0x0054030C: 0x0164, + 0x0074030C: 0x0165, + 0x00550303: 0x0168, + 0x00750303: 0x0169, + 0x00550304: 0x016A, + 0x00750304: 0x016B, + 0x00550306: 0x016C, + 0x00750306: 0x016D, + 0x0055030A: 0x016E, + 0x0075030A: 0x016F, + 0x0055030B: 0x0170, + 0x0075030B: 0x0171, + 0x00550328: 0x0172, + 0x00750328: 0x0173, + 0x00570302: 0x0174, + 0x00770302: 0x0175, + 0x00590302: 0x0176, + 0x00790302: 0x0177, + 0x00590308: 0x0178, + 0x005A0301: 0x0179, + 0x007A0301: 0x017A, + 0x005A0307: 0x017B, + 0x007A0307: 0x017C, + 0x005A030C: 0x017D, + 0x007A030C: 0x017E, + 0x004F031B: 0x01A0, + 0x006F031B: 0x01A1, + 0x0055031B: 0x01AF, + 0x0075031B: 0x01B0, + 0x0041030C: 0x01CD, + 0x0061030C: 0x01CE, + 0x0049030C: 0x01CF, + 0x0069030C: 0x01D0, + 0x004F030C: 0x01D1, + 0x006F030C: 0x01D2, + 0x0055030C: 0x01D3, + 0x0075030C: 0x01D4, + 0x00DC0304: 0x01D5, + 0x00FC0304: 0x01D6, + 0x00DC0301: 0x01D7, + 0x00FC0301: 0x01D8, + 0x00DC030C: 0x01D9, + 0x00FC030C: 0x01DA, + 0x00DC0300: 0x01DB, + 0x00FC0300: 0x01DC, + 0x00C40304: 0x01DE, + 0x00E40304: 0x01DF, + 0x02260304: 0x01E0, + 0x02270304: 0x01E1, + 0x00C60304: 0x01E2, + 0x00E60304: 0x01E3, + 0x0047030C: 0x01E6, + 0x0067030C: 0x01E7, + 0x004B030C: 0x01E8, + 0x006B030C: 0x01E9, + 0x004F0328: 0x01EA, + 0x006F0328: 0x01EB, + 0x01EA0304: 0x01EC, + 0x01EB0304: 0x01ED, + 0x01B7030C: 0x01EE, + 0x0292030C: 0x01EF, + 0x006A030C: 0x01F0, + 0x00470301: 0x01F4, + 0x00670301: 0x01F5, + 0x004E0300: 0x01F8, + 0x006E0300: 0x01F9, + 0x00C50301: 0x01FA, + 0x00E50301: 0x01FB, + 0x00C60301: 0x01FC, + 0x00E60301: 0x01FD, + 0x00D80301: 0x01FE, + 0x00F80301: 0x01FF, + 0x0041030F: 0x0200, + 0x0061030F: 0x0201, + 0x00410311: 0x0202, + 0x00610311: 0x0203, + 0x0045030F: 0x0204, + 0x0065030F: 0x0205, + 0x00450311: 0x0206, + 0x00650311: 0x0207, + 0x0049030F: 0x0208, + 0x0069030F: 0x0209, + 0x00490311: 0x020A, + 0x00690311: 0x020B, + 0x004F030F: 0x020C, + 0x006F030F: 0x020D, + 0x004F0311: 0x020E, + 0x006F0311: 0x020F, + 0x0052030F: 0x0210, + 0x0072030F: 0x0211, + 0x00520311: 0x0212, + 0x00720311: 0x0213, + 0x0055030F: 0x0214, + 0x0075030F: 0x0215, + 0x00550311: 0x0216, + 0x00750311: 0x0217, + 0x00530326: 0x0218, + 0x00730326: 0x0219, + 0x00540326: 0x021A, + 0x00740326: 0x021B, + 0x0048030C: 0x021E, + 0x0068030C: 0x021F, + 0x00410307: 0x0226, + 0x00610307: 0x0227, + 0x00450327: 0x0228, + 0x00650327: 0x0229, + 0x00D60304: 0x022A, + 0x00F60304: 0x022B, + 0x00D50304: 0x022C, + 0x00F50304: 0x022D, + 0x004F0307: 0x022E, + 0x006F0307: 0x022F, + 0x022E0304: 0x0230, + 0x022F0304: 0x0231, + 0x00590304: 0x0232, + 0x00790304: 0x0233, + 0x00A80301: 0x0385, + 0x03910301: 0x0386, + 0x03950301: 0x0388, + 0x03970301: 0x0389, + 0x03990301: 0x038A, + 0x039F0301: 0x038C, + 0x03A50301: 0x038E, + 0x03A90301: 0x038F, + 0x03CA0301: 0x0390, + 0x03990308: 0x03AA, + 0x03A50308: 0x03AB, + 0x03B10301: 0x03AC, + 0x03B50301: 0x03AD, + 0x03B70301: 0x03AE, + 0x03B90301: 0x03AF, + 0x03CB0301: 0x03B0, + 0x03B90308: 0x03CA, + 0x03C50308: 0x03CB, + 0x03BF0301: 0x03CC, + 0x03C50301: 0x03CD, + 0x03C90301: 0x03CE, + 0x03D20301: 0x03D3, + 0x03D20308: 0x03D4, + 0x04150300: 0x0400, + 0x04150308: 0x0401, + 0x04130301: 0x0403, + 0x04060308: 0x0407, + 0x041A0301: 0x040C, + 0x04180300: 0x040D, + 0x04230306: 0x040E, + 0x04180306: 0x0419, + 0x04380306: 0x0439, + 0x04350300: 0x0450, + 0x04350308: 0x0451, + 0x04330301: 0x0453, + 0x04560308: 0x0457, + 0x043A0301: 0x045C, + 0x04380300: 0x045D, + 0x04430306: 0x045E, + 0x0474030F: 0x0476, + 0x0475030F: 0x0477, + 0x04160306: 0x04C1, + 0x04360306: 0x04C2, + 0x04100306: 0x04D0, + 0x04300306: 0x04D1, + 0x04100308: 0x04D2, + 0x04300308: 0x04D3, + 0x04150306: 0x04D6, + 0x04350306: 0x04D7, + 0x04D80308: 0x04DA, + 0x04D90308: 0x04DB, + 0x04160308: 0x04DC, + 0x04360308: 0x04DD, + 0x04170308: 0x04DE, + 0x04370308: 0x04DF, + 0x04180304: 0x04E2, + 0x04380304: 0x04E3, + 0x04180308: 0x04E4, + 0x04380308: 0x04E5, + 0x041E0308: 0x04E6, + 0x043E0308: 0x04E7, + 0x04E80308: 0x04EA, + 0x04E90308: 0x04EB, + 0x042D0308: 0x04EC, + 0x044D0308: 0x04ED, + 0x04230304: 0x04EE, + 0x04430304: 0x04EF, + 0x04230308: 0x04F0, + 0x04430308: 0x04F1, + 0x0423030B: 0x04F2, + 0x0443030B: 0x04F3, + 0x04270308: 0x04F4, + 0x04470308: 0x04F5, + 0x042B0308: 0x04F8, + 0x044B0308: 0x04F9, + 0x06270653: 0x0622, + 0x06270654: 0x0623, + 0x06480654: 0x0624, + 0x06270655: 0x0625, + 0x064A0654: 0x0626, + 0x06D50654: 0x06C0, + 0x06C10654: 0x06C2, + 0x06D20654: 0x06D3, + 0x0928093C: 0x0929, + 0x0930093C: 0x0931, + 0x0933093C: 0x0934, + 0x09C709BE: 0x09CB, + 0x09C709D7: 0x09CC, + 0x0B470B56: 0x0B48, + 0x0B470B3E: 0x0B4B, + 0x0B470B57: 0x0B4C, + 0x0B920BD7: 0x0B94, + 0x0BC60BBE: 0x0BCA, + 0x0BC70BBE: 0x0BCB, + 0x0BC60BD7: 0x0BCC, + 0x0C460C56: 0x0C48, + 0x0CBF0CD5: 0x0CC0, + 0x0CC60CD5: 0x0CC7, + 0x0CC60CD6: 0x0CC8, + 0x0CC60CC2: 0x0CCA, + 0x0CCA0CD5: 0x0CCB, + 0x0D460D3E: 0x0D4A, + 0x0D470D3E: 0x0D4B, + 0x0D460D57: 0x0D4C, + 0x0DD90DCA: 0x0DDA, + 0x0DD90DCF: 0x0DDC, + 0x0DDC0DCA: 0x0DDD, + 0x0DD90DDF: 0x0DDE, + 0x1025102E: 0x1026, + 0x1B051B35: 0x1B06, + 0x1B071B35: 0x1B08, + 0x1B091B35: 0x1B0A, + 0x1B0B1B35: 0x1B0C, + 0x1B0D1B35: 0x1B0E, + 0x1B111B35: 0x1B12, + 0x1B3A1B35: 0x1B3B, + 0x1B3C1B35: 0x1B3D, + 0x1B3E1B35: 0x1B40, + 0x1B3F1B35: 0x1B41, + 0x1B421B35: 0x1B43, + 0x00410325: 0x1E00, + 0x00610325: 0x1E01, + 0x00420307: 0x1E02, + 0x00620307: 0x1E03, + 0x00420323: 0x1E04, + 0x00620323: 0x1E05, + 0x00420331: 0x1E06, + 0x00620331: 0x1E07, + 0x00C70301: 0x1E08, + 0x00E70301: 0x1E09, + 0x00440307: 0x1E0A, + 0x00640307: 0x1E0B, + 0x00440323: 0x1E0C, + 0x00640323: 0x1E0D, + 0x00440331: 0x1E0E, + 0x00640331: 0x1E0F, + 0x00440327: 0x1E10, + 0x00640327: 0x1E11, + 0x0044032D: 0x1E12, + 0x0064032D: 0x1E13, + 0x01120300: 0x1E14, + 0x01130300: 0x1E15, + 0x01120301: 0x1E16, + 0x01130301: 0x1E17, + 0x0045032D: 0x1E18, + 0x0065032D: 0x1E19, + 0x00450330: 0x1E1A, + 0x00650330: 0x1E1B, + 0x02280306: 0x1E1C, + 0x02290306: 0x1E1D, + 0x00460307: 0x1E1E, + 0x00660307: 0x1E1F, + 0x00470304: 0x1E20, + 0x00670304: 0x1E21, + 0x00480307: 0x1E22, + 0x00680307: 0x1E23, + 0x00480323: 0x1E24, + 0x00680323: 0x1E25, + 0x00480308: 0x1E26, + 0x00680308: 0x1E27, + 0x00480327: 0x1E28, + 0x00680327: 0x1E29, + 0x0048032E: 0x1E2A, + 0x0068032E: 0x1E2B, + 0x00490330: 0x1E2C, + 0x00690330: 0x1E2D, + 0x00CF0301: 0x1E2E, + 0x00EF0301: 0x1E2F, + 0x004B0301: 0x1E30, + 0x006B0301: 0x1E31, + 0x004B0323: 0x1E32, + 0x006B0323: 0x1E33, + 0x004B0331: 0x1E34, + 0x006B0331: 0x1E35, + 0x004C0323: 0x1E36, + 0x006C0323: 0x1E37, + 0x1E360304: 0x1E38, + 0x1E370304: 0x1E39, + 0x004C0331: 0x1E3A, + 0x006C0331: 0x1E3B, + 0x004C032D: 0x1E3C, + 0x006C032D: 0x1E3D, + 0x004D0301: 0x1E3E, + 0x006D0301: 0x1E3F, + 0x004D0307: 0x1E40, + 0x006D0307: 0x1E41, + 0x004D0323: 0x1E42, + 0x006D0323: 0x1E43, + 0x004E0307: 0x1E44, + 0x006E0307: 0x1E45, + 0x004E0323: 0x1E46, + 0x006E0323: 0x1E47, + 0x004E0331: 0x1E48, + 0x006E0331: 0x1E49, + 0x004E032D: 0x1E4A, + 0x006E032D: 0x1E4B, + 0x00D50301: 0x1E4C, + 0x00F50301: 0x1E4D, + 0x00D50308: 0x1E4E, + 0x00F50308: 0x1E4F, + 0x014C0300: 0x1E50, + 0x014D0300: 0x1E51, + 0x014C0301: 0x1E52, + 0x014D0301: 0x1E53, + 0x00500301: 0x1E54, + 0x00700301: 0x1E55, + 0x00500307: 0x1E56, + 0x00700307: 0x1E57, + 0x00520307: 0x1E58, + 0x00720307: 0x1E59, + 0x00520323: 0x1E5A, + 0x00720323: 0x1E5B, + 0x1E5A0304: 0x1E5C, + 0x1E5B0304: 0x1E5D, + 0x00520331: 0x1E5E, + 0x00720331: 0x1E5F, + 0x00530307: 0x1E60, + 0x00730307: 0x1E61, + 0x00530323: 0x1E62, + 0x00730323: 0x1E63, + 0x015A0307: 0x1E64, + 0x015B0307: 0x1E65, + 0x01600307: 0x1E66, + 0x01610307: 0x1E67, + 0x1E620307: 0x1E68, + 0x1E630307: 0x1E69, + 0x00540307: 0x1E6A, + 0x00740307: 0x1E6B, + 0x00540323: 0x1E6C, + 0x00740323: 0x1E6D, + 0x00540331: 0x1E6E, + 0x00740331: 0x1E6F, + 0x0054032D: 0x1E70, + 0x0074032D: 0x1E71, + 0x00550324: 0x1E72, + 0x00750324: 0x1E73, + 0x00550330: 0x1E74, + 0x00750330: 0x1E75, + 0x0055032D: 0x1E76, + 0x0075032D: 0x1E77, + 0x01680301: 0x1E78, + 0x01690301: 0x1E79, + 0x016A0308: 0x1E7A, + 0x016B0308: 0x1E7B, + 0x00560303: 0x1E7C, + 0x00760303: 0x1E7D, + 0x00560323: 0x1E7E, + 0x00760323: 0x1E7F, + 0x00570300: 0x1E80, + 0x00770300: 0x1E81, + 0x00570301: 0x1E82, + 0x00770301: 0x1E83, + 0x00570308: 0x1E84, + 0x00770308: 0x1E85, + 0x00570307: 0x1E86, + 0x00770307: 0x1E87, + 0x00570323: 0x1E88, + 0x00770323: 0x1E89, + 0x00580307: 0x1E8A, + 0x00780307: 0x1E8B, + 0x00580308: 0x1E8C, + 0x00780308: 0x1E8D, + 0x00590307: 0x1E8E, + 0x00790307: 0x1E8F, + 0x005A0302: 0x1E90, + 0x007A0302: 0x1E91, + 0x005A0323: 0x1E92, + 0x007A0323: 0x1E93, + 0x005A0331: 0x1E94, + 0x007A0331: 0x1E95, + 0x00680331: 0x1E96, + 0x00740308: 0x1E97, + 0x0077030A: 0x1E98, + 0x0079030A: 0x1E99, + 0x017F0307: 0x1E9B, + 0x00410323: 0x1EA0, + 0x00610323: 0x1EA1, + 0x00410309: 0x1EA2, + 0x00610309: 0x1EA3, + 0x00C20301: 0x1EA4, + 0x00E20301: 0x1EA5, + 0x00C20300: 0x1EA6, + 0x00E20300: 0x1EA7, + 0x00C20309: 0x1EA8, + 0x00E20309: 0x1EA9, + 0x00C20303: 0x1EAA, + 0x00E20303: 0x1EAB, + 0x1EA00302: 0x1EAC, + 0x1EA10302: 0x1EAD, + 0x01020301: 0x1EAE, + 0x01030301: 0x1EAF, + 0x01020300: 0x1EB0, + 0x01030300: 0x1EB1, + 0x01020309: 0x1EB2, + 0x01030309: 0x1EB3, + 0x01020303: 0x1EB4, + 0x01030303: 0x1EB5, + 0x1EA00306: 0x1EB6, + 0x1EA10306: 0x1EB7, + 0x00450323: 0x1EB8, + 0x00650323: 0x1EB9, + 0x00450309: 0x1EBA, + 0x00650309: 0x1EBB, + 0x00450303: 0x1EBC, + 0x00650303: 0x1EBD, + 0x00CA0301: 0x1EBE, + 0x00EA0301: 0x1EBF, + 0x00CA0300: 0x1EC0, + 0x00EA0300: 0x1EC1, + 0x00CA0309: 0x1EC2, + 0x00EA0309: 0x1EC3, + 0x00CA0303: 0x1EC4, + 0x00EA0303: 0x1EC5, + 0x1EB80302: 0x1EC6, + 0x1EB90302: 0x1EC7, + 0x00490309: 0x1EC8, + 0x00690309: 0x1EC9, + 0x00490323: 0x1ECA, + 0x00690323: 0x1ECB, + 0x004F0323: 0x1ECC, + 0x006F0323: 0x1ECD, + 0x004F0309: 0x1ECE, + 0x006F0309: 0x1ECF, + 0x00D40301: 0x1ED0, + 0x00F40301: 0x1ED1, + 0x00D40300: 0x1ED2, + 0x00F40300: 0x1ED3, + 0x00D40309: 0x1ED4, + 0x00F40309: 0x1ED5, + 0x00D40303: 0x1ED6, + 0x00F40303: 0x1ED7, + 0x1ECC0302: 0x1ED8, + 0x1ECD0302: 0x1ED9, + 0x01A00301: 0x1EDA, + 0x01A10301: 0x1EDB, + 0x01A00300: 0x1EDC, + 0x01A10300: 0x1EDD, + 0x01A00309: 0x1EDE, + 0x01A10309: 0x1EDF, + 0x01A00303: 0x1EE0, + 0x01A10303: 0x1EE1, + 0x01A00323: 0x1EE2, + 0x01A10323: 0x1EE3, + 0x00550323: 0x1EE4, + 0x00750323: 0x1EE5, + 0x00550309: 0x1EE6, + 0x00750309: 0x1EE7, + 0x01AF0301: 0x1EE8, + 0x01B00301: 0x1EE9, + 0x01AF0300: 0x1EEA, + 0x01B00300: 0x1EEB, + 0x01AF0309: 0x1EEC, + 0x01B00309: 0x1EED, + 0x01AF0303: 0x1EEE, + 0x01B00303: 0x1EEF, + 0x01AF0323: 0x1EF0, + 0x01B00323: 0x1EF1, + 0x00590300: 0x1EF2, + 0x00790300: 0x1EF3, + 0x00590323: 0x1EF4, + 0x00790323: 0x1EF5, + 0x00590309: 0x1EF6, + 0x00790309: 0x1EF7, + 0x00590303: 0x1EF8, + 0x00790303: 0x1EF9, + 0x03B10313: 0x1F00, + 0x03B10314: 0x1F01, + 0x1F000300: 0x1F02, + 0x1F010300: 0x1F03, + 0x1F000301: 0x1F04, + 0x1F010301: 0x1F05, + 0x1F000342: 0x1F06, + 0x1F010342: 0x1F07, + 0x03910313: 0x1F08, + 0x03910314: 0x1F09, + 0x1F080300: 0x1F0A, + 0x1F090300: 0x1F0B, + 0x1F080301: 0x1F0C, + 0x1F090301: 0x1F0D, + 0x1F080342: 0x1F0E, + 0x1F090342: 0x1F0F, + 0x03B50313: 0x1F10, + 0x03B50314: 0x1F11, + 0x1F100300: 0x1F12, + 0x1F110300: 0x1F13, + 0x1F100301: 0x1F14, + 0x1F110301: 0x1F15, + 0x03950313: 0x1F18, + 0x03950314: 0x1F19, + 0x1F180300: 0x1F1A, + 0x1F190300: 0x1F1B, + 0x1F180301: 0x1F1C, + 0x1F190301: 0x1F1D, + 0x03B70313: 0x1F20, + 0x03B70314: 0x1F21, + 0x1F200300: 0x1F22, + 0x1F210300: 0x1F23, + 0x1F200301: 0x1F24, + 0x1F210301: 0x1F25, + 0x1F200342: 0x1F26, + 0x1F210342: 0x1F27, + 0x03970313: 0x1F28, + 0x03970314: 0x1F29, + 0x1F280300: 0x1F2A, + 0x1F290300: 0x1F2B, + 0x1F280301: 0x1F2C, + 0x1F290301: 0x1F2D, + 0x1F280342: 0x1F2E, + 0x1F290342: 0x1F2F, + 0x03B90313: 0x1F30, + 0x03B90314: 0x1F31, + 0x1F300300: 0x1F32, + 0x1F310300: 0x1F33, + 0x1F300301: 0x1F34, + 0x1F310301: 0x1F35, + 0x1F300342: 0x1F36, + 0x1F310342: 0x1F37, + 0x03990313: 0x1F38, + 0x03990314: 0x1F39, + 0x1F380300: 0x1F3A, + 0x1F390300: 0x1F3B, + 0x1F380301: 0x1F3C, + 0x1F390301: 0x1F3D, + 0x1F380342: 0x1F3E, + 0x1F390342: 0x1F3F, + 0x03BF0313: 0x1F40, + 0x03BF0314: 0x1F41, + 0x1F400300: 0x1F42, + 0x1F410300: 0x1F43, + 0x1F400301: 0x1F44, + 0x1F410301: 0x1F45, + 0x039F0313: 0x1F48, + 0x039F0314: 0x1F49, + 0x1F480300: 0x1F4A, + 0x1F490300: 0x1F4B, + 0x1F480301: 0x1F4C, + 0x1F490301: 0x1F4D, + 0x03C50313: 0x1F50, + 0x03C50314: 0x1F51, + 0x1F500300: 0x1F52, + 0x1F510300: 0x1F53, + 0x1F500301: 0x1F54, + 0x1F510301: 0x1F55, + 0x1F500342: 0x1F56, + 0x1F510342: 0x1F57, + 0x03A50314: 0x1F59, + 0x1F590300: 0x1F5B, + 0x1F590301: 0x1F5D, + 0x1F590342: 0x1F5F, + 0x03C90313: 0x1F60, + 0x03C90314: 0x1F61, + 0x1F600300: 0x1F62, + 0x1F610300: 0x1F63, + 0x1F600301: 0x1F64, + 0x1F610301: 0x1F65, + 0x1F600342: 0x1F66, + 0x1F610342: 0x1F67, + 0x03A90313: 0x1F68, + 0x03A90314: 0x1F69, + 0x1F680300: 0x1F6A, + 0x1F690300: 0x1F6B, + 0x1F680301: 0x1F6C, + 0x1F690301: 0x1F6D, + 0x1F680342: 0x1F6E, + 0x1F690342: 0x1F6F, + 0x03B10300: 0x1F70, + 0x03B50300: 0x1F72, + 0x03B70300: 0x1F74, + 0x03B90300: 0x1F76, + 0x03BF0300: 0x1F78, + 0x03C50300: 0x1F7A, + 0x03C90300: 0x1F7C, + 0x1F000345: 0x1F80, + 0x1F010345: 0x1F81, + 0x1F020345: 0x1F82, + 0x1F030345: 0x1F83, + 0x1F040345: 0x1F84, + 0x1F050345: 0x1F85, + 0x1F060345: 0x1F86, + 0x1F070345: 0x1F87, + 0x1F080345: 0x1F88, + 0x1F090345: 0x1F89, + 0x1F0A0345: 0x1F8A, + 0x1F0B0345: 0x1F8B, + 0x1F0C0345: 0x1F8C, + 0x1F0D0345: 0x1F8D, + 0x1F0E0345: 0x1F8E, + 0x1F0F0345: 0x1F8F, + 0x1F200345: 0x1F90, + 0x1F210345: 0x1F91, + 0x1F220345: 0x1F92, + 0x1F230345: 0x1F93, + 0x1F240345: 0x1F94, + 0x1F250345: 0x1F95, + 0x1F260345: 0x1F96, + 0x1F270345: 0x1F97, + 0x1F280345: 0x1F98, + 0x1F290345: 0x1F99, + 0x1F2A0345: 0x1F9A, + 0x1F2B0345: 0x1F9B, + 0x1F2C0345: 0x1F9C, + 0x1F2D0345: 0x1F9D, + 0x1F2E0345: 0x1F9E, + 0x1F2F0345: 0x1F9F, + 0x1F600345: 0x1FA0, + 0x1F610345: 0x1FA1, + 0x1F620345: 0x1FA2, + 0x1F630345: 0x1FA3, + 0x1F640345: 0x1FA4, + 0x1F650345: 0x1FA5, + 0x1F660345: 0x1FA6, + 0x1F670345: 0x1FA7, + 0x1F680345: 0x1FA8, + 0x1F690345: 0x1FA9, + 0x1F6A0345: 0x1FAA, + 0x1F6B0345: 0x1FAB, + 0x1F6C0345: 0x1FAC, + 0x1F6D0345: 0x1FAD, + 0x1F6E0345: 0x1FAE, + 0x1F6F0345: 0x1FAF, + 0x03B10306: 0x1FB0, + 0x03B10304: 0x1FB1, + 0x1F700345: 0x1FB2, + 0x03B10345: 0x1FB3, + 0x03AC0345: 0x1FB4, + 0x03B10342: 0x1FB6, + 0x1FB60345: 0x1FB7, + 0x03910306: 0x1FB8, + 0x03910304: 0x1FB9, + 0x03910300: 0x1FBA, + 0x03910345: 0x1FBC, + 0x00A80342: 0x1FC1, + 0x1F740345: 0x1FC2, + 0x03B70345: 0x1FC3, + 0x03AE0345: 0x1FC4, + 0x03B70342: 0x1FC6, + 0x1FC60345: 0x1FC7, + 0x03950300: 0x1FC8, + 0x03970300: 0x1FCA, + 0x03970345: 0x1FCC, + 0x1FBF0300: 0x1FCD, + 0x1FBF0301: 0x1FCE, + 0x1FBF0342: 0x1FCF, + 0x03B90306: 0x1FD0, + 0x03B90304: 0x1FD1, + 0x03CA0300: 0x1FD2, + 0x03B90342: 0x1FD6, + 0x03CA0342: 0x1FD7, + 0x03990306: 0x1FD8, + 0x03990304: 0x1FD9, + 0x03990300: 0x1FDA, + 0x1FFE0300: 0x1FDD, + 0x1FFE0301: 0x1FDE, + 0x1FFE0342: 0x1FDF, + 0x03C50306: 0x1FE0, + 0x03C50304: 0x1FE1, + 0x03CB0300: 0x1FE2, + 0x03C10313: 0x1FE4, + 0x03C10314: 0x1FE5, + 0x03C50342: 0x1FE6, + 0x03CB0342: 0x1FE7, + 0x03A50306: 0x1FE8, + 0x03A50304: 0x1FE9, + 0x03A50300: 0x1FEA, + 0x03A10314: 0x1FEC, + 0x00A80300: 0x1FED, + 0x1F7C0345: 0x1FF2, + 0x03C90345: 0x1FF3, + 0x03CE0345: 0x1FF4, + 0x03C90342: 0x1FF6, + 0x1FF60345: 0x1FF7, + 0x039F0300: 0x1FF8, + 0x03A90300: 0x1FFA, + 0x03A90345: 0x1FFC, + 0x21900338: 0x219A, + 0x21920338: 0x219B, + 0x21940338: 0x21AE, + 0x21D00338: 0x21CD, + 0x21D40338: 0x21CE, + 0x21D20338: 0x21CF, + 0x22030338: 0x2204, + 0x22080338: 0x2209, + 0x220B0338: 0x220C, + 0x22230338: 0x2224, + 0x22250338: 0x2226, + 0x223C0338: 0x2241, + 0x22430338: 0x2244, + 0x22450338: 0x2247, + 0x22480338: 0x2249, + 0x003D0338: 0x2260, + 0x22610338: 0x2262, + 0x224D0338: 0x226D, + 0x003C0338: 0x226E, + 0x003E0338: 0x226F, + 0x22640338: 0x2270, + 0x22650338: 0x2271, + 0x22720338: 0x2274, + 0x22730338: 0x2275, + 0x22760338: 0x2278, + 0x22770338: 0x2279, + 0x227A0338: 0x2280, + 0x227B0338: 0x2281, + 0x22820338: 0x2284, + 0x22830338: 0x2285, + 0x22860338: 0x2288, + 0x22870338: 0x2289, + 0x22A20338: 0x22AC, + 0x22A80338: 0x22AD, + 0x22A90338: 0x22AE, + 0x22AB0338: 0x22AF, + 0x227C0338: 0x22E0, + 0x227D0338: 0x22E1, + 0x22910338: 0x22E2, + 0x22920338: 0x22E3, + 0x22B20338: 0x22EA, + 0x22B30338: 0x22EB, + 0x22B40338: 0x22EC, + 0x22B50338: 0x22ED, + 0x304B3099: 0x304C, + 0x304D3099: 0x304E, + 0x304F3099: 0x3050, + 0x30513099: 0x3052, + 0x30533099: 0x3054, + 0x30553099: 0x3056, + 0x30573099: 0x3058, + 0x30593099: 0x305A, + 0x305B3099: 0x305C, + 0x305D3099: 0x305E, + 0x305F3099: 0x3060, + 0x30613099: 0x3062, + 0x30643099: 0x3065, + 0x30663099: 0x3067, + 0x30683099: 0x3069, + 0x306F3099: 0x3070, + 0x306F309A: 0x3071, + 0x30723099: 0x3073, + 0x3072309A: 0x3074, + 0x30753099: 0x3076, + 0x3075309A: 0x3077, + 0x30783099: 0x3079, + 0x3078309A: 0x307A, + 0x307B3099: 0x307C, + 0x307B309A: 0x307D, + 0x30463099: 0x3094, + 0x309D3099: 0x309E, + 0x30AB3099: 0x30AC, + 0x30AD3099: 0x30AE, + 0x30AF3099: 0x30B0, + 0x30B13099: 0x30B2, + 0x30B33099: 0x30B4, + 0x30B53099: 0x30B6, + 0x30B73099: 0x30B8, + 0x30B93099: 0x30BA, + 0x30BB3099: 0x30BC, + 0x30BD3099: 0x30BE, + 0x30BF3099: 0x30C0, + 0x30C13099: 0x30C2, + 0x30C43099: 0x30C5, + 0x30C63099: 0x30C7, + 0x30C83099: 0x30C9, + 0x30CF3099: 0x30D0, + 0x30CF309A: 0x30D1, + 0x30D23099: 0x30D3, + 0x30D2309A: 0x30D4, + 0x30D53099: 0x30D6, + 0x30D5309A: 0x30D7, + 0x30D83099: 0x30D9, + 0x30D8309A: 0x30DA, + 0x30DB3099: 0x30DC, + 0x30DB309A: 0x30DD, + 0x30A63099: 0x30F4, + 0x30EF3099: 0x30F7, + 0x30F03099: 0x30F8, + 0x30F13099: 0x30F9, + 0x30F23099: 0x30FA, + 0x30FD3099: 0x30FE, + 0x109910BA: 0x1109A, + 0x109B10BA: 0x1109C, + 0x10A510BA: 0x110AB, + 0x11311127: 0x1112E, + 0x11321127: 0x1112F, + 0x1347133E: 0x1134B, + 0x13471357: 0x1134C, + 0x14B914BA: 0x114BB, + 0x14B914B0: 0x114BC, + 0x14B914BD: 0x114BE, + 0x15B815AF: 0x115BA, + 0x15B915AF: 0x115BB, +} + +// Total size of tables: 53KB (54006 bytes) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/norm/transform.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/norm/transform.go new file mode 100644 index 00000000..9f47efba --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/norm/transform.go @@ -0,0 +1,88 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package norm + +import ( + "unicode/utf8" + + "golang.org/x/text/transform" +) + +// Reset implements the Reset method of the transform.Transformer interface. +func (Form) Reset() {} + +// Transform implements the Transform method of the transform.Transformer +// interface. It may need to write segments of up to MaxSegmentSize at once. +// Users should either catch ErrShortDst and allow dst to grow or have dst be at +// least of size MaxTransformChunkSize to be guaranteed of progress. +func (f Form) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + n := 0 + // Cap the maximum number of src bytes to check. + b := src + eof := atEOF + if ns := len(dst); ns < len(b) { + err = transform.ErrShortDst + eof = false + b = b[:ns] + } + i, ok := formTable[f].quickSpan(inputBytes(b), n, len(b), eof) + n += copy(dst[n:], b[n:i]) + if !ok { + nDst, nSrc, err = f.transform(dst[n:], src[n:], atEOF) + return nDst + n, nSrc + n, err + } + if n < len(src) && !atEOF { + err = transform.ErrShortSrc + } + return n, n, err +} + +func flushTransform(rb *reorderBuffer) bool { + // Write out (must fully fit in dst, or else it is an ErrShortDst). + if len(rb.out) < rb.nrune*utf8.UTFMax { + return false + } + rb.out = rb.out[rb.flushCopy(rb.out):] + return true +} + +var errs = []error{nil, transform.ErrShortDst, transform.ErrShortSrc} + +// transform implements the transform.Transformer interface. It is only called +// when quickSpan does not pass for a given string. +func (f Form) transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + // TODO: get rid of reorderBuffer. See CL 23460044. + rb := reorderBuffer{} + rb.init(f, src) + for { + // Load segment into reorder buffer. + rb.setFlusher(dst[nDst:], flushTransform) + end := decomposeSegment(&rb, nSrc, atEOF) + if end < 0 { + return nDst, nSrc, errs[-end] + } + nDst = len(dst) - len(rb.out) + nSrc = end + + // Next quickSpan. + end = rb.nsrc + eof := atEOF + if n := nSrc + len(dst) - nDst; n < end { + err = transform.ErrShortDst + end = n + eof = false + } + end, ok := rb.f.quickSpan(rb.src, nSrc, end, eof) + n := copy(dst[nDst:], rb.src.bytes[nSrc:end]) + nSrc += n + nDst += n + if ok { + if n < rb.nsrc && !atEOF { + err = transform.ErrShortSrc + } + return nDst, nSrc, err + } + } +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/norm/trie.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/norm/trie.go new file mode 100644 index 00000000..423386bf --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/unicode/norm/trie.go @@ -0,0 +1,54 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package norm + +type valueRange struct { + value uint16 // header: value:stride + lo, hi byte // header: lo:n +} + +type sparseBlocks struct { + values []valueRange + offset []uint16 +} + +var nfcSparse = sparseBlocks{ + values: nfcSparseValues[:], + offset: nfcSparseOffset[:], +} + +var nfkcSparse = sparseBlocks{ + values: nfkcSparseValues[:], + offset: nfkcSparseOffset[:], +} + +var ( + nfcData = newNfcTrie(0) + nfkcData = newNfkcTrie(0) +) + +// lookupValue determines the type of block n and looks up the value for b. +// For n < t.cutoff, the block is a simple lookup table. Otherwise, the block +// is a list of ranges with an accompanying value. Given a matching range r, +// the value for b is by r.value + (b - r.lo) * stride. +func (t *sparseBlocks) lookup(n uint32, b byte) uint16 { + offset := t.offset[n] + header := t.values[offset] + lo := offset + 1 + hi := lo + uint16(header.lo) + for lo < hi { + m := lo + (hi-lo)/2 + r := t.values[m] + if r.lo <= b && b <= r.hi { + return r.value + uint16(b-r.lo)*header.value + } + if b < r.lo { + hi = m + } else { + lo = m + 1 + } + } + return 0 +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/tools/go/loader/loader.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/tools/go/loader/loader.go index 1cd4d6eb..de756f7f 100644 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/tools/go/loader/loader.go +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/tools/go/loader/loader.go @@ -105,7 +105,7 @@ type Config struct { // conventions, for example. // // It must be safe to call concurrently from multiple goroutines. - FindPackage func(ctxt *build.Context, fromDir, importPath string, mode build.ImportMode) (*build.Package, error) + FindPackage func(ctxt *build.Context, importPath, fromDir string, mode build.ImportMode) (*build.Package, error) // AfterTypeCheck is called immediately after a list of files // has been type-checked and appended to info.Files. diff --git a/vendor/github.com/elastic/beats/vendor/vendor.json b/vendor/github.com/elastic/beats/vendor/vendor.json index c1f4d7d1..bafaa697 100644 --- a/vendor/github.com/elastic/beats/vendor/vendor.json +++ b/vendor/github.com/elastic/beats/vendor/vendor.json @@ -117,10 +117,12 @@ "versionExact": "v1.3.1" }, { - "checksumSHA1": "5rPfda8jFccr3A6heL+JAmi9K9g=", + "checksumSHA1": "dvabztWVQX8f6oMLRyv4dLH+TGY=", "path": "github.com/davecgh/go-spew/spew", - "revision": "5215b55f46b2b919f50a1df0eaa5886afe4e3b3d", - "revisionTime": "2015-11-05T21:09:06Z" + "revision": "346938d642f2ec3594ed81d874461961cd0faa76", + "revisionTime": "2016-10-29T20:57:26Z", + "version": "v1.1.0", + "versionExact": "v1.1.0" }, { "checksumSHA1": "Gj+xR1VgFKKmFXYOJMnAczC3Znk=", @@ -367,44 +369,44 @@ "revisionTime": "2016-08-05T00:47:13Z" }, { - "checksumSHA1": "FmPMalgdsaNNmghFB2DWm8fJjVA=", + "checksumSHA1": "hAB/G2FIWYwg3Rujgpt6UPtHMis=", "path": "github.com/elastic/go-libaudit", - "revision": "4a806edf821706e315ef7d4f3b5d0cac6d638b34", - "revisionTime": "2018-03-28T14:46:34Z", - "version": "v0.1.0", - "versionExact": "v0.1.0" + "revision": "55225d06b15c74082f9a7af75aa4284dbe48d20a", + "revisionTime": "2018-05-03T13:36:58Z", + "version": "v0.2.1", + "versionExact": "v0.2.1" }, { - "checksumSHA1": "uu4544BCRlonueK+mB7549opucs=", + "checksumSHA1": "3V0tnqlCgmCXnbocuTqUKluynm8=", "path": "github.com/elastic/go-libaudit/aucoalesce", - "revision": "4a806edf821706e315ef7d4f3b5d0cac6d638b34", - "revisionTime": "2018-03-28T14:46:34Z", - "version": "v0.1.0", - "versionExact": "v0.1.0" + "revision": "55225d06b15c74082f9a7af75aa4284dbe48d20a", + "revisionTime": "2018-05-03T13:36:58Z", + "version": "v0.2.1", + "versionExact": "v0.2.1" }, { - "checksumSHA1": "+L/ZGneCw2zrkK5Vlto9UB3LaEk=", + "checksumSHA1": "6OK3lLgocjmIUyLo8xNhYGpwE1E=", "path": "github.com/elastic/go-libaudit/auparse", - "revision": "4a806edf821706e315ef7d4f3b5d0cac6d638b34", - "revisionTime": "2018-03-28T14:46:34Z", - "version": "v0.1.0", - "versionExact": "v0.1.0" + "revision": "55225d06b15c74082f9a7af75aa4284dbe48d20a", + "revisionTime": "2018-05-03T13:36:58Z", + "version": "v0.2.1", + "versionExact": "v0.2.1" }, { - "checksumSHA1": "H0rnscnKHbkjmXc4whC3gtIPR0c=", + "checksumSHA1": "Rr15sVPpJRQ+ggimmx3/0s1gUJc=", "path": "github.com/elastic/go-libaudit/rule", - "revision": "4a806edf821706e315ef7d4f3b5d0cac6d638b34", - "revisionTime": "2018-03-28T14:46:34Z", - "version": "v0.1.0", - "versionExact": "v0.1.0" + "revision": "55225d06b15c74082f9a7af75aa4284dbe48d20a", + "revisionTime": "2018-05-03T13:36:58Z", + "version": "v0.2.1", + "versionExact": "v0.2.1" }, { - "checksumSHA1": "36UaYid29Kyhrsa5D8N6BoM8dVw=", + "checksumSHA1": "5C083BvwcAVSKquRXbxXa950/wE=", "path": "github.com/elastic/go-libaudit/rule/flags", - "revision": "4a806edf821706e315ef7d4f3b5d0cac6d638b34", - "revisionTime": "2018-03-28T14:46:34Z", - "version": "v0.1.0", - "versionExact": "v0.1.0" + "revision": "55225d06b15c74082f9a7af75aa4284dbe48d20a", + "revisionTime": "2018-05-03T13:36:58Z", + "version": "v0.2.1", + "versionExact": "v0.2.1" }, { "checksumSHA1": "3jizmlZPCyo6FAZY8Trk9jA8NH4=", @@ -442,6 +444,160 @@ "revision": "616041e345fc33c97bc0eb0fa6b388aa07bca3e1", "revisionTime": "2016-06-17T14:03:01Z" }, + { + "checksumSHA1": "AaEPt+KMknLXze11YOnBGKzP3aA=", + "path": "github.com/elastic/go-structform", + "revision": "0a66add879601f69f55663f4c913c72988218982", + "revisionTime": "2018-03-09T00:36:09Z", + "version": "v0.0.3", + "versionExact": "v0.0.3" + }, + { + "checksumSHA1": "SXsT/tWnjLqR8dBiJGZqxCyuNaY=", + "path": "github.com/elastic/go-structform/cborl", + "revision": "0a66add879601f69f55663f4c913c72988218982", + "revisionTime": "2018-03-09T00:36:09Z", + "version": "v0.0.3", + "versionExact": "v0.0.3" + }, + { + "checksumSHA1": "LUbWdzbpzhBh+5TizsScD8gTm4M=", + "path": "github.com/elastic/go-structform/gotype", + "revision": "0a66add879601f69f55663f4c913c72988218982", + "revisionTime": "2018-03-09T00:36:09Z", + "version": "v0.0.3", + "versionExact": "v0.0.3" + }, + { + "checksumSHA1": "s7k0vEuuqkoPXU0FtrD6Y0jxd7U=", + "path": "github.com/elastic/go-structform/internal/unsafe", + "revision": "0a66add879601f69f55663f4c913c72988218982", + "revisionTime": "2018-03-09T00:36:09Z", + "version": "v0.0.3", + "versionExact": "v0.0.3" + }, + { + "checksumSHA1": "s5nSu8O8TOv4DZhdbU7OC5x0C50=", + "path": "github.com/elastic/go-structform/json", + "revision": "0a66add879601f69f55663f4c913c72988218982", + "revisionTime": "2018-03-09T00:36:09Z", + "version": "v0.0.3", + "versionExact": "v0.0.3" + }, + { + "checksumSHA1": "PDqC4Sh2H2EIxVhWZHdsusBMPB8=", + "path": "github.com/elastic/go-structform/ubjson", + "revision": "0a66add879601f69f55663f4c913c72988218982", + "revisionTime": "2018-03-09T00:36:09Z", + "version": "v0.0.3", + "versionExact": "v0.0.3" + }, + { + "checksumSHA1": "493PQcWaAgD4s0nEb5dpeTb2Ky0=", + "path": "github.com/elastic/go-structform/visitors", + "revision": "0a66add879601f69f55663f4c913c72988218982", + "revisionTime": "2018-03-09T00:36:09Z", + "version": "v0.0.3", + "versionExact": "v0.0.3" + }, + { + "checksumSHA1": "FlkQkMcsKpnsm/o4b245uHPmYiM=", + "path": "github.com/elastic/go-sysinfo", + "revision": "fda017eee28b7420d931e08c7361c1c17f516aa2", + "revisionTime": "2018-04-05T17:40:33Z" + }, + { + "checksumSHA1": "n8Lx7ibyOcLxoEOjA5WF05mZAzo=", + "path": "github.com/elastic/go-sysinfo/internal/registry", + "revision": "fda017eee28b7420d931e08c7361c1c17f516aa2", + "revisionTime": "2018-04-05T17:40:33Z" + }, + { + "checksumSHA1": "1/8ow2nOpGS8KCDI97STMJ8q/ZA=", + "path": "github.com/elastic/go-sysinfo/providers/darwin", + "revision": "fda017eee28b7420d931e08c7361c1c17f516aa2", + "revisionTime": "2018-04-05T17:40:33Z" + }, + { + "checksumSHA1": "PhAK4PEi9JeWVk3O/G2F2CCiarA=", + "path": "github.com/elastic/go-sysinfo/providers/linux", + "revision": "fda017eee28b7420d931e08c7361c1c17f516aa2", + "revisionTime": "2018-04-05T17:40:33Z" + }, + { + "checksumSHA1": "NwdmCaM0lXIwHRwuqJ/7XtZrC7w=", + "path": "github.com/elastic/go-sysinfo/providers/shared", + "revision": "fda017eee28b7420d931e08c7361c1c17f516aa2", + "revisionTime": "2018-04-05T17:40:33Z" + }, + { + "checksumSHA1": "sEzts3Iek6edDSZQVOhi9gEdcDs=", + "path": "github.com/elastic/go-sysinfo/providers/windows", + "revision": "fda017eee28b7420d931e08c7361c1c17f516aa2", + "revisionTime": "2018-04-05T17:40:33Z" + }, + { + "checksumSHA1": "wTOvUZJc7EW2cVXkrL+C27tAqYw=", + "path": "github.com/elastic/go-sysinfo/types", + "revision": "fda017eee28b7420d931e08c7361c1c17f516aa2", + "revisionTime": "2018-04-05T17:40:33Z" + }, + { + "checksumSHA1": "IMU9C/FvHiOI52jKElb62Gokv0s=", + "path": "github.com/elastic/go-txfile", + "revision": "7e7e33cc236f30fff545f3ee2c35ada5b70b6b13", + "revisionTime": "2018-03-29T16:52:21Z", + "version": "v0.0.1", + "versionExact": "v0.0.1" + }, + { + "checksumSHA1": "cXIYcUhYjLs+sUW42/yIE9ellS0=", + "path": "github.com/elastic/go-txfile/internal/cleanup", + "revision": "7e7e33cc236f30fff545f3ee2c35ada5b70b6b13", + "revisionTime": "2018-03-29T16:52:21Z", + "version": "v0.0.1", + "versionExact": "v0.0.1" + }, + { + "checksumSHA1": "ERbGKs6IgC062SkwB1jI0EHfAM8=", + "path": "github.com/elastic/go-txfile/internal/invariant", + "revision": "7e7e33cc236f30fff545f3ee2c35ada5b70b6b13", + "revisionTime": "2018-03-29T16:52:21Z", + "version": "v0.0.1", + "versionExact": "v0.0.1" + }, + { + "checksumSHA1": "xaTyh6jlrYI94Kok8+QT2bpRkHM=", + "path": "github.com/elastic/go-txfile/internal/iter", + "revision": "7e7e33cc236f30fff545f3ee2c35ada5b70b6b13", + "revisionTime": "2018-03-29T16:52:21Z", + "version": "v0.0.1", + "versionExact": "v0.0.1" + }, + { + "checksumSHA1": "5tZTXzITZkrmyx4tamhtW9ilmhE=", + "path": "github.com/elastic/go-txfile/internal/tracelog", + "revision": "7e7e33cc236f30fff545f3ee2c35ada5b70b6b13", + "revisionTime": "2018-03-29T16:52:21Z", + "version": "v0.0.1", + "versionExact": "v0.0.1" + }, + { + "checksumSHA1": "cdu99vXUoUWPJww+ndmzlfh/Rsk=", + "path": "github.com/elastic/go-txfile/pq", + "revision": "7e7e33cc236f30fff545f3ee2c35ada5b70b6b13", + "revisionTime": "2018-03-29T16:52:21Z", + "version": "v0.0.1", + "versionExact": "v0.0.1" + }, + { + "checksumSHA1": "1ltkQVfITTz9qKLzO2XvgG+K7Zs=", + "path": "github.com/elastic/go-txfile/txfiletest", + "revision": "7e7e33cc236f30fff545f3ee2c35ada5b70b6b13", + "revisionTime": "2018-03-29T16:52:21Z", + "version": "v0.0.1", + "versionExact": "v0.0.1" + }, { "checksumSHA1": "61XUpyQ3zWnJ7Tlj0xLsHtnzwJY=", "path": "github.com/elastic/go-ucfg", @@ -481,222 +637,314 @@ "revisionTime": "2017-02-07T06:38:51Z" }, { - "checksumSHA1": "386qAesaBQFadNHu7di78fZT7xk=", + "checksumSHA1": "yu/X+qHftvfQlAnjPdYLwrDn2nI=", + "path": "github.com/elastic/go-windows", + "revision": "a730c8b4e08aef7e1ebb642928bf862996ad2383", + "revisionTime": "2018-04-05T16:13:57Z" + }, + { + "checksumSHA1": "RPOLNUpw00QUUaA/U4YbPVf6WlA=", "path": "github.com/elastic/gosigar", - "revision": "16df19fe5efee4ea2938bde5f56c02d9929dc054", - "revisionTime": "2018-01-22T22:24:54Z", - "version": "v0.8.0", - "versionExact": "v0.8.0" + "revision": "237dff72b4ba95da2cd985f96a9c0ede4aefc760", + "revisionTime": "2018-03-16T16:52:25Z", + "version": "v0.9.0", + "versionExact": "v0.9.0" }, { "checksumSHA1": "TX9y4oPL5YmT4Gb/OU4GIPTdQB4=", "path": "github.com/elastic/gosigar/cgroup", - "revision": "cdcbd8c3b8bf28ef6d8639ec1e45bf31d2745f2d", - "revisionTime": "2018-01-17T18:54:32Z", - "version": "v0.7.0", - "versionExact": "v0.7.0" + "revision": "237dff72b4ba95da2cd985f96a9c0ede4aefc760", + "revisionTime": "2018-03-16T16:52:25Z", + "version": "v0.9.0", + "versionExact": "v0.9.0" }, { - "checksumSHA1": "2VhOsaR4sv3S79HO6X+6dEphNKU=", + "checksumSHA1": "hPqGM3DENaGfipEODoyZ4mKogTQ=", + "path": "github.com/elastic/gosigar/sys", + "revision": "237dff72b4ba95da2cd985f96a9c0ede4aefc760", + "revisionTime": "2018-03-16T16:52:25Z", + "version": "v0.9.0", + "versionExact": "v0.9.0" + }, + { + "checksumSHA1": "mLq5lOyD0ZU39ysXuf1ETOLJ+f0=", "path": "github.com/elastic/gosigar/sys/linux", - "revision": "cdcbd8c3b8bf28ef6d8639ec1e45bf31d2745f2d", - "revisionTime": "2018-01-17T18:54:32Z", - "version": "v0.7.0", - "versionExact": "v0.7.0" + "revision": "237dff72b4ba95da2cd985f96a9c0ede4aefc760", + "revisionTime": "2018-03-16T16:52:25Z", + "version": "v0.9.0", + "versionExact": "v0.9.0" }, { "checksumSHA1": "qDsgp2kAeI9nhj565HUScaUyjU4=", "path": "github.com/elastic/gosigar/sys/windows", - "revision": "cdcbd8c3b8bf28ef6d8639ec1e45bf31d2745f2d", - "revisionTime": "2018-01-17T18:54:32Z", - "version": "v0.7.0", - "versionExact": "v0.7.0" - }, - { - "checksumSHA1": "P0CvGmmAM8uYPSE2ix4th/L9c/8=", - "path": "github.com/elastic/procfs", - "revision": "664e6bc79eb43c956507b6e20a867140516ad15a", - "revisionTime": "2016-09-16T08:04:11Z" + "revision": "237dff72b4ba95da2cd985f96a9c0ede4aefc760", + "revisionTime": "2018-03-16T16:52:25Z", + "version": "v0.9.0", + "versionExact": "v0.9.0" }, { - "checksumSHA1": "yKbtGvockvU0/yhbc5W+qm4O0iI=", + "checksumSHA1": "hTxFrbA619JCHysWjXHa9U6bfto=", "path": "github.com/ericchiang/k8s", - "revision": "5803ed75e31fc1998b5f781ac08e22ff985c3f8f", - "revisionTime": "2017-06-29T16:56:01Z" + "revision": "5912993f00cb7c971aaa54529a06bd3eecd6c3d4", + "revisionTime": "2018-01-20T20:28:12Z", + "version": "v1.0.0", + "versionExact": "v1.0.0" }, { "checksumSHA1": "uQuMoUlS7hAWsB+Mwr/1B7+35BU=", "path": "github.com/ericchiang/k8s/api/resource", "revision": "5803ed75e31fc1998b5f781ac08e22ff985c3f8f", - "revisionTime": "2017-06-29T16:56:01Z" + "revisionTime": "2017-06-29T16:56:01Z", + "version": "v1.0.0", + "versionExact": "v1.0.0" }, { "checksumSHA1": "XN1tbPrI03O0ishnZyfkWtTnrcQ=", "path": "github.com/ericchiang/k8s/api/unversioned", "revision": "5803ed75e31fc1998b5f781ac08e22ff985c3f8f", - "revisionTime": "2017-06-29T16:56:01Z" + "revisionTime": "2017-06-29T16:56:01Z", + "version": "v1.0.0", + "versionExact": "v1.0.0" }, { "checksumSHA1": "yfTg3/Qn7KiizNJ39JmPBFi9YDQ=", "path": "github.com/ericchiang/k8s/api/v1", "revision": "5803ed75e31fc1998b5f781ac08e22ff985c3f8f", - "revisionTime": "2017-06-29T16:56:01Z" + "revisionTime": "2017-06-29T16:56:01Z", + "version": "v1.0.0", + "versionExact": "v1.0.0" + }, + { + "checksumSHA1": "y8fNiBLSoGojnUsGDsdLlsJYyqQ=", + "path": "github.com/ericchiang/k8s/apis/apiextensions/v1beta1", + "revision": "5912993f00cb7c971aaa54529a06bd3eecd6c3d4", + "revisionTime": "2018-01-20T20:28:12Z", + "version": "v1.0.0", + "versionExact": "v1.0.0" }, { "checksumSHA1": "uw/3eB6WiVCSrQZS9ZZs/1kyu1I=", "path": "github.com/ericchiang/k8s/apis/apps/v1alpha1", "revision": "5803ed75e31fc1998b5f781ac08e22ff985c3f8f", - "revisionTime": "2017-06-29T16:56:01Z" + "revisionTime": "2017-06-29T16:56:01Z", + "version": "v1.0.0", + "versionExact": "v1.0.0" }, { - "checksumSHA1": "GPnvYx9Uxhpwmv01iygWR6+naTI=", + "checksumSHA1": "JxQ/zEWQSrncYNKifCuMctq+Tsw=", "path": "github.com/ericchiang/k8s/apis/apps/v1beta1", - "revision": "5803ed75e31fc1998b5f781ac08e22ff985c3f8f", - "revisionTime": "2017-06-29T16:56:01Z" + "revision": "5912993f00cb7c971aaa54529a06bd3eecd6c3d4", + "revisionTime": "2018-01-20T20:28:12Z", + "version": "v1.0.0", + "versionExact": "v1.0.0" }, { - "checksumSHA1": "Jjw5tBYv4k+Es+qPp03rnzyzRWA=", + "checksumSHA1": "bjklGt/pc6kWOZewAw87Hchw5oY=", "path": "github.com/ericchiang/k8s/apis/authentication/v1", - "revision": "5803ed75e31fc1998b5f781ac08e22ff985c3f8f", - "revisionTime": "2017-06-29T16:56:01Z" + "revision": "5912993f00cb7c971aaa54529a06bd3eecd6c3d4", + "revisionTime": "2018-01-20T20:28:12Z", + "version": "v1.0.0", + "versionExact": "v1.0.0" }, { - "checksumSHA1": "uR4S43Wc80fhS0vMDE3Z3hFg7J8=", + "checksumSHA1": "LExhnM9Vn0LQoLQWszQ7aIxDxb4=", "path": "github.com/ericchiang/k8s/apis/authentication/v1beta1", - "revision": "5803ed75e31fc1998b5f781ac08e22ff985c3f8f", - "revisionTime": "2017-06-29T16:56:01Z" + "revision": "5912993f00cb7c971aaa54529a06bd3eecd6c3d4", + "revisionTime": "2018-01-20T20:28:12Z", + "version": "v1.0.0", + "versionExact": "v1.0.0" }, { - "checksumSHA1": "aM2KSDZbHn8jJomPPeG6LKpMwhs=", + "checksumSHA1": "GM+PzOiBoq3cxx4h5RKVUb3UH60=", "path": "github.com/ericchiang/k8s/apis/authorization/v1", - "revision": "5803ed75e31fc1998b5f781ac08e22ff985c3f8f", - "revisionTime": "2017-06-29T16:56:01Z" + "revision": "5912993f00cb7c971aaa54529a06bd3eecd6c3d4", + "revisionTime": "2018-01-20T20:28:12Z", + "version": "v1.0.0", + "versionExact": "v1.0.0" }, { - "checksumSHA1": "4yWZvduAw2JNdHd1cXjTJBUy0lw=", + "checksumSHA1": "zfr5oUVjbWRfvXi2LJiGMfFeDQY=", "path": "github.com/ericchiang/k8s/apis/authorization/v1beta1", - "revision": "5803ed75e31fc1998b5f781ac08e22ff985c3f8f", - "revisionTime": "2017-06-29T16:56:01Z" + "revision": "5912993f00cb7c971aaa54529a06bd3eecd6c3d4", + "revisionTime": "2018-01-20T20:28:12Z", + "version": "v1.0.0", + "versionExact": "v1.0.0" }, { - "checksumSHA1": "1nMeCVQImIo1CpRRyOYMIqLoPBc=", + "checksumSHA1": "izkXNDp5a5WP45jU0hSfTrwyfvM=", "path": "github.com/ericchiang/k8s/apis/autoscaling/v1", - "revision": "5803ed75e31fc1998b5f781ac08e22ff985c3f8f", - "revisionTime": "2017-06-29T16:56:01Z" + "revision": "5912993f00cb7c971aaa54529a06bd3eecd6c3d4", + "revisionTime": "2018-01-20T20:28:12Z", + "version": "v1.0.0", + "versionExact": "v1.0.0" }, { "checksumSHA1": "kUXiQQA99K7zquvG9es3yauVjYw=", "path": "github.com/ericchiang/k8s/apis/autoscaling/v2alpha1", "revision": "5803ed75e31fc1998b5f781ac08e22ff985c3f8f", - "revisionTime": "2017-06-29T16:56:01Z" + "revisionTime": "2017-06-29T16:56:01Z", + "version": "v1.0.0", + "versionExact": "v1.0.0" }, { - "checksumSHA1": "vMWsdmHlmaAQZIT0c26dwxe9pDw=", + "checksumSHA1": "FryZuAxWn4Ig8zc913w9BdfYzvs=", "path": "github.com/ericchiang/k8s/apis/batch/v1", - "revision": "5803ed75e31fc1998b5f781ac08e22ff985c3f8f", - "revisionTime": "2017-06-29T16:56:01Z" + "revision": "5912993f00cb7c971aaa54529a06bd3eecd6c3d4", + "revisionTime": "2018-01-20T20:28:12Z", + "version": "v1.0.0", + "versionExact": "v1.0.0" }, { - "checksumSHA1": "bqaX0T9jycmp9ao1Ov41dfPn0Ng=", + "checksumSHA1": "ylo7Z8wyJD+tmICB7wsOVIBpO+U=", "path": "github.com/ericchiang/k8s/apis/batch/v2alpha1", - "revision": "5803ed75e31fc1998b5f781ac08e22ff985c3f8f", - "revisionTime": "2017-06-29T16:56:01Z" + "revision": "5912993f00cb7c971aaa54529a06bd3eecd6c3d4", + "revisionTime": "2018-01-20T20:28:12Z", + "version": "v1.0.0", + "versionExact": "v1.0.0" }, { "checksumSHA1": "9GRVPI+Tf4RrlX2aveUGEUHKIrM=", "path": "github.com/ericchiang/k8s/apis/certificates/v1alpha1", "revision": "5803ed75e31fc1998b5f781ac08e22ff985c3f8f", - "revisionTime": "2017-06-29T16:56:01Z" + "revisionTime": "2017-06-29T16:56:01Z", + "version": "v1.0.0", + "versionExact": "v1.0.0" }, { - "checksumSHA1": "k1dF56GRoEg6rooFKO7UvEJvBcE=", + "checksumSHA1": "+d8+mSdkdcPWQIpczXDZZW0lrjg=", "path": "github.com/ericchiang/k8s/apis/certificates/v1beta1", - "revision": "5803ed75e31fc1998b5f781ac08e22ff985c3f8f", - "revisionTime": "2017-06-29T16:56:01Z" + "revision": "5912993f00cb7c971aaa54529a06bd3eecd6c3d4", + "revisionTime": "2018-01-20T20:28:12Z", + "version": "v1.0.0", + "versionExact": "v1.0.0" + }, + { + "checksumSHA1": "S7AvxmCe/+WoFP/v9lZr0Mv66qg=", + "path": "github.com/ericchiang/k8s/apis/core/v1", + "revision": "5912993f00cb7c971aaa54529a06bd3eecd6c3d4", + "revisionTime": "2018-01-20T20:28:12Z", + "version": "v1.0.0", + "versionExact": "v1.0.0" }, { - "checksumSHA1": "4pDHINIk6BdPBYWGF20IwHNCg2Q=", + "checksumSHA1": "cWPoP6XZN7WMnEVMPcgPgg3Aw9Q=", "path": "github.com/ericchiang/k8s/apis/extensions/v1beta1", - "revision": "5803ed75e31fc1998b5f781ac08e22ff985c3f8f", - "revisionTime": "2017-06-29T16:56:01Z" + "revision": "5912993f00cb7c971aaa54529a06bd3eecd6c3d4", + "revisionTime": "2018-01-20T20:28:12Z", + "version": "v1.0.0", + "versionExact": "v1.0.0" }, { - "checksumSHA1": "NAL7OeKSEzTOoXHBFnC1B1VmBVs=", + "checksumSHA1": "vaNrBPcGWeDd1rXl8+uN08uxWhE=", "path": "github.com/ericchiang/k8s/apis/imagepolicy/v1alpha1", - "revision": "5803ed75e31fc1998b5f781ac08e22ff985c3f8f", - "revisionTime": "2017-06-29T16:56:01Z" + "revision": "5912993f00cb7c971aaa54529a06bd3eecd6c3d4", + "revisionTime": "2018-01-20T20:28:12Z", + "version": "v1.0.0", + "versionExact": "v1.0.0" }, { - "checksumSHA1": "Vg1/xjzLJHZlvuheWC4abghACwQ=", + "checksumSHA1": "UNTTH+Ppu4vImnF+bPkG3/NR3gg=", "path": "github.com/ericchiang/k8s/apis/meta/v1", - "revision": "5803ed75e31fc1998b5f781ac08e22ff985c3f8f", - "revisionTime": "2017-06-29T16:56:01Z" + "revision": "5912993f00cb7c971aaa54529a06bd3eecd6c3d4", + "revisionTime": "2018-01-20T20:28:12Z", + "version": "v1.0.0", + "versionExact": "v1.0.0" }, { "checksumSHA1": "wYSNb+W2L5gJlGO8n6mGOGft8R8=", "path": "github.com/ericchiang/k8s/apis/policy/v1alpha1", "revision": "5803ed75e31fc1998b5f781ac08e22ff985c3f8f", - "revisionTime": "2017-06-29T16:56:01Z" + "revisionTime": "2017-06-29T16:56:01Z", + "version": "v1.0.0", + "versionExact": "v1.0.0" }, { - "checksumSHA1": "ioJ28pdUN6fDkOp8dT+Tg3HSqmk=", + "checksumSHA1": "Mmyg9Wh+FCVR6fV8MGEKRxvqZ2k=", "path": "github.com/ericchiang/k8s/apis/policy/v1beta1", - "revision": "5803ed75e31fc1998b5f781ac08e22ff985c3f8f", - "revisionTime": "2017-06-29T16:56:01Z" + "revision": "5912993f00cb7c971aaa54529a06bd3eecd6c3d4", + "revisionTime": "2018-01-20T20:28:12Z", + "version": "v1.0.0", + "versionExact": "v1.0.0" }, { - "checksumSHA1": "UErnBsjjtmg3oYjLYU1S80oi3sk=", + "checksumSHA1": "bvwYS/wrBkyAfvCjzMbi/vKamrQ=", "path": "github.com/ericchiang/k8s/apis/rbac/v1alpha1", - "revision": "5803ed75e31fc1998b5f781ac08e22ff985c3f8f", - "revisionTime": "2017-06-29T16:56:01Z" + "revision": "5912993f00cb7c971aaa54529a06bd3eecd6c3d4", + "revisionTime": "2018-01-20T20:28:12Z", + "version": "v1.0.0", + "versionExact": "v1.0.0" }, { - "checksumSHA1": "Xl+Tm8ZOz0cMOrfLaQvu/lsWObU=", + "checksumSHA1": "m1Tde18NwewnvJoOYL3uykNcBuM=", "path": "github.com/ericchiang/k8s/apis/rbac/v1beta1", - "revision": "5803ed75e31fc1998b5f781ac08e22ff985c3f8f", - "revisionTime": "2017-06-29T16:56:01Z" + "revision": "5912993f00cb7c971aaa54529a06bd3eecd6c3d4", + "revisionTime": "2018-01-20T20:28:12Z", + "version": "v1.0.0", + "versionExact": "v1.0.0" + }, + { + "checksumSHA1": "JirJkoeIkWJRNrbprsQvqwisxds=", + "path": "github.com/ericchiang/k8s/apis/resource", + "revision": "5912993f00cb7c971aaa54529a06bd3eecd6c3d4", + "revisionTime": "2018-01-20T20:28:12Z", + "version": "v1.0.0", + "versionExact": "v1.0.0" }, { - "checksumSHA1": "YyZyaF0k2NAQAZvsCOVdhAkfVU0=", + "checksumSHA1": "rQZ69PjEClQQ+PGEHRKzkGVVQyw=", "path": "github.com/ericchiang/k8s/apis/settings/v1alpha1", - "revision": "5803ed75e31fc1998b5f781ac08e22ff985c3f8f", - "revisionTime": "2017-06-29T16:56:01Z" + "revision": "5912993f00cb7c971aaa54529a06bd3eecd6c3d4", + "revisionTime": "2018-01-20T20:28:12Z", + "version": "v1.0.0", + "versionExact": "v1.0.0" }, { - "checksumSHA1": "vUc3mf0rE7CQ3B52wfrMDyspLgA=", + "checksumSHA1": "pp0AetmPoKy7Rz0zNhBwUpExkbc=", "path": "github.com/ericchiang/k8s/apis/storage/v1", - "revision": "5803ed75e31fc1998b5f781ac08e22ff985c3f8f", - "revisionTime": "2017-06-29T16:56:01Z" + "revision": "5912993f00cb7c971aaa54529a06bd3eecd6c3d4", + "revisionTime": "2018-01-20T20:28:12Z", + "version": "v1.0.0", + "versionExact": "v1.0.0" }, { - "checksumSHA1": "7/oj1z0vG1pvRza+UuKQ6txdleI=", + "checksumSHA1": "WeACcIrS4EkeBm8TTftwuVniaWk=", "path": "github.com/ericchiang/k8s/apis/storage/v1beta1", - "revision": "5803ed75e31fc1998b5f781ac08e22ff985c3f8f", - "revisionTime": "2017-06-29T16:56:01Z" + "revision": "5912993f00cb7c971aaa54529a06bd3eecd6c3d4", + "revisionTime": "2018-01-20T20:28:12Z", + "version": "v1.0.0", + "versionExact": "v1.0.0" }, { - "checksumSHA1": "mm5iTFmLQ6h98DKgiUuTCpHP9H4=", + "checksumSHA1": "Su6wSR8V8HL2QZsF8icJ0R9AFq8=", "path": "github.com/ericchiang/k8s/runtime", - "revision": "5803ed75e31fc1998b5f781ac08e22ff985c3f8f", - "revisionTime": "2017-06-29T16:56:01Z" + "revision": "5912993f00cb7c971aaa54529a06bd3eecd6c3d4", + "revisionTime": "2018-01-20T20:28:12Z", + "version": "v1.0.0", + "versionExact": "v1.0.0" }, { - "checksumSHA1": "Kk1UDqUx2Pr1LyvIIgcJBApTlCk=", + "checksumSHA1": "8ETrRvIaXPfD21N7fa8kdbumL00=", "path": "github.com/ericchiang/k8s/runtime/schema", - "revision": "5803ed75e31fc1998b5f781ac08e22ff985c3f8f", - "revisionTime": "2017-06-29T16:56:01Z" + "revision": "5912993f00cb7c971aaa54529a06bd3eecd6c3d4", + "revisionTime": "2018-01-20T20:28:12Z", + "version": "v1.0.0", + "versionExact": "v1.0.0" }, { - "checksumSHA1": "LoxBND74egHIasOX6z98FeeW0zI=", + "checksumSHA1": "cMk3HE8/81ExHuEs0F5sZCclOFs=", "path": "github.com/ericchiang/k8s/util/intstr", - "revision": "5803ed75e31fc1998b5f781ac08e22ff985c3f8f", - "revisionTime": "2017-06-29T16:56:01Z" + "revision": "5912993f00cb7c971aaa54529a06bd3eecd6c3d4", + "revisionTime": "2018-01-20T20:28:12Z", + "version": "v1.0.0", + "versionExact": "v1.0.0" }, { "checksumSHA1": "fobEKiMk5D7IGvCSwh4HdG1o98c=", "path": "github.com/ericchiang/k8s/watch/versioned", - "revision": "5803ed75e31fc1998b5f781ac08e22ff985c3f8f", - "revisionTime": "2017-06-29T16:56:01Z" + "revision": "5912993f00cb7c971aaa54529a06bd3eecd6c3d4", + "revisionTime": "2018-01-20T20:28:12Z", + "version": "v1.0.0", + "versionExact": "v1.0.0" }, { "checksumSHA1": "AANTVr9CVVyzsgviODY6Wi2thuM=", @@ -707,18 +955,19 @@ "versionExact": "v1.5.0" }, { - "checksumSHA1": "dgmdG37hn1qma1kdpCaUJ6JltXk=", + "checksumSHA1": "ZRP2NldqpFv4H1VsE70PEeqVjaw=", "origin": "github.com/elastic/fsevents", "path": "github.com/fsnotify/fsevents", - "revision": "690cb784149d5facd7fe613c52757445c43afcde", - "revisionTime": "2017-12-04T13:54:51Z", + "revision": "70114c7d2e1e4d1ae5179b285d65ea21aae111cc", + "revisionTime": "2018-04-06T06:16:07Z", "tree": true }, { - "checksumSHA1": "x2Km0Qy3WgJJnV19Zv25VwTJcBM=", + "checksumSHA1": "xGjYGUfsd36pm3CqdV/RYT87xxM=", + "origin": "github.com/adriansr/fsnotify", "path": "github.com/fsnotify/fsnotify", - "revision": "4da3e2cfbabc9f751898f250b49f2439785783a1", - "revisionTime": "2017-03-29T04:21:07Z" + "revision": "c9bbe1f46f1da9904baf3916a4ba4aec7f1e9000", + "revisionTime": "2018-04-17T23:40:13Z" }, { "checksumSHA1": "2UmMbNHc8FBr98mJFN1k8ISOIHk=", @@ -895,6 +1144,38 @@ "revision": "ff09b135c25aae272398c51a07235b90a75aa4f0", "revisionTime": "2017-03-16T20:15:38Z" }, + { + "checksumSHA1": "LuFv4/jlrmFNnDb/5SCSEPAM9vU=", + "path": "github.com/pmezard/go-difflib/difflib", + "revision": "792786c7400a136282c1664665ae0a8db921c6c2", + "revisionTime": "2016-01-10T10:55:54Z", + "version": "v1.0.0", + "versionExact": "v1.0.0" + }, + { + "checksumSHA1": "Etvt6mgzvD7ARf4Ux03LHfgSlzU=", + "path": "github.com/prometheus/procfs", + "revision": "54d17b57dd7d4a3aa092476596b3f8a933bde349", + "revisionTime": "2018-03-10T14:15:09Z" + }, + { + "checksumSHA1": "lv9rIcjbVEGo8AT1UCUZXhXrfQc=", + "path": "github.com/prometheus/procfs/internal/util", + "revision": "54d17b57dd7d4a3aa092476596b3f8a933bde349", + "revisionTime": "2018-03-10T14:15:09Z" + }, + { + "checksumSHA1": "EekY1iRG9JY74mDD0jsbFCWbAFs=", + "path": "github.com/prometheus/procfs/nfs", + "revision": "54d17b57dd7d4a3aa092476596b3f8a933bde349", + "revisionTime": "2018-03-10T14:15:09Z" + }, + { + "checksumSHA1": "yItvTQLUVqm/ArLEbvEhqG0T5a0=", + "path": "github.com/prometheus/procfs/xfs", + "revision": "54d17b57dd7d4a3aa092476596b3f8a933bde349", + "revisionTime": "2018-03-10T14:15:09Z" + }, { "checksumSHA1": "KAzbLjI9MzW2tjfcAsK75lVRp6I=", "path": "github.com/rcrowley/go-metrics", @@ -926,28 +1207,28 @@ "revisionTime": "2017-03-21T23:07:31Z" }, { - "checksumSHA1": "9ev6lHyQOxl1/VndOHAnMfbLmvs=", + "checksumSHA1": "lZRqG0Rl5w+mkaTUOuEjcKvNU6c=", "path": "github.com/shirou/gopsutil/disk", - "revision": "9af92986dda65a8c367157a82b484553e1ec1c55", - "revisionTime": "2017-04-30T14:39:46Z", - "version": "v2.17.04", - "versionExact": "v2.17.04" + "revision": "c432be29ccce470088d07eea25b3ea7e68a8afbb", + "revisionTime": "2018-01-30T01:13:38Z", + "version": "v2.18.01", + "versionExact": "v2.18.01" }, { - "checksumSHA1": "hKDsT0KAOtA7UqiXYdO0RahnQZ8=", + "checksumSHA1": "jWpwWWcywJPNhKTYxi4RXds+amQ=", "path": "github.com/shirou/gopsutil/internal/common", - "revision": "9af92986dda65a8c367157a82b484553e1ec1c55", - "revisionTime": "2017-04-30T14:39:46Z", - "version": "v2.17.04", - "versionExact": "v2.17.04" + "revision": "c432be29ccce470088d07eea25b3ea7e68a8afbb", + "revisionTime": "2018-01-30T01:13:38Z", + "version": "v2.18.01", + "versionExact": "v2.18.01" }, { - "checksumSHA1": "/MH6TIdlj16nThxkU9Bu+/WBm2w=", + "checksumSHA1": "Z7FjZvR5J5xh6Ne572gD7tRUsc8=", "path": "github.com/shirou/gopsutil/net", - "revision": "9af92986dda65a8c367157a82b484553e1ec1c55", - "revisionTime": "2017-04-30T14:39:46Z", - "version": "v2.17.04", - "versionExact": "v2.17.04" + "revision": "c432be29ccce470088d07eea25b3ea7e68a8afbb", + "revisionTime": "2018-01-30T01:13:38Z", + "version": "v2.18.01", + "versionExact": "v2.18.01" }, { "checksumSHA1": "e7mAb9jMke2ASQGZepFgOmfBFzM=", @@ -962,17 +1243,26 @@ "revisionTime": "2017-05-08T18:43:26Z" }, { - "checksumSHA1": "Z7NuzgpfDQhKRvVq4qsbD/v9cIY=", - "path": "github.com/stretchr/testify", - "revision": "f390dcf405f7b83c997eac1b06768bb9f44dec18", - "revisionTime": "2016-01-09T20:38:47Z", - "tree": true + "checksumSHA1": "Le1psgZO0t6mRg6oY5dmnjH13hk=", + "path": "github.com/stretchr/testify/assert", + "revision": "b91bfb9ebec76498946beb6af7c0230c7cc7ba6c", + "revisionTime": "2017-12-30T17:54:59Z", + "version": "v1.2.0", + "versionExact": "v1.2.0" + }, + { + "checksumSHA1": "CpcG17Q/0k1g2uy8AL26Uu7TouU=", + "path": "github.com/theckman/go-flock", + "revision": "b139a2487364247d91814e4a7c7b8fdc69e342b2", + "revisionTime": "2018-01-24T01:19:07Z", + "version": "v0.4.0", + "versionExact": "v0.4.0" }, { "checksumSHA1": "M0S9278lG9Fztu+ZUsLUi40GDJU=", "path": "github.com/tsg/gopacket", - "revision": "8e703b9968693c15f25cabb6ba8be4370cf431d0", - "revisionTime": "2016-08-17T18:24:57Z" + "revision": "f289b3ea3e41a01b2822be9caf5f40c01fdda05c", + "revisionTime": "2018-03-16T21:03:30Z" }, { "checksumSHA1": "STY8i3sZLdZfFcKiyOdpV852nls=", @@ -999,76 +1289,16 @@ "revisionTime": "2016-08-17T18:24:57Z" }, { - "checksumSHA1": "ci7skUhpD1VwkoR0tKVdggTkYrE=", - "path": "github.com/urso/go-structform", - "revision": "844d7d44009e9e8c0f08016fc4dab64e136ca040", - "revisionTime": "2017-12-05T17:32:58Z", - "version": "v0.0.2", - "versionExact": "v0.0.2" - }, - { - "checksumSHA1": "da75fv5z2n4AHmTzUWLBs7pNkog=", - "path": "github.com/urso/go-structform/cborl", - "revision": "844d7d44009e9e8c0f08016fc4dab64e136ca040", - "revisionTime": "2017-12-05T17:32:58Z", - "version": "v0.0.2", - "versionExact": "v0.0.2" - }, - { - "checksumSHA1": "AkYoBxV+Imq2EBwNcb53hEGh7sQ=", - "path": "github.com/urso/go-structform/gotype", - "revision": "844d7d44009e9e8c0f08016fc4dab64e136ca040", - "revisionTime": "2017-12-05T17:32:58Z", - "version": "v0.0.2", - "versionExact": "v0.0.2" - }, - { - "checksumSHA1": "25sJtDsLVmS+aCILjB8zm/JBgPs=", - "path": "github.com/urso/go-structform/internal/gen", - "revision": "844d7d44009e9e8c0f08016fc4dab64e136ca040", - "revisionTime": "2017-12-05T17:32:58Z", - "version": "v0.0.2", - "versionExact": "v0.0.2" - }, - { - "checksumSHA1": "m9H8/w6okS+8ixvkq+eYLd/Kk3g=", - "path": "github.com/urso/go-structform/internal/unsafe", - "revision": "844d7d44009e9e8c0f08016fc4dab64e136ca040", - "revisionTime": "2017-12-05T17:32:58Z", - "version": "v0.0.2", - "versionExact": "v0.0.2" - }, - { - "checksumSHA1": "UXf4TzYK1UB8xAa6Phy9CvkvWL0=", - "path": "github.com/urso/go-structform/json", - "revision": "844d7d44009e9e8c0f08016fc4dab64e136ca040", - "revisionTime": "2017-12-05T17:32:58Z", - "version": "v0.0.2", - "versionExact": "v0.0.2" - }, - { - "checksumSHA1": "SVGAVpMy8NbNCI7GVBLCKuJ7JNQ=", - "path": "github.com/urso/go-structform/sftest", - "revision": "844d7d44009e9e8c0f08016fc4dab64e136ca040", - "revisionTime": "2017-12-05T17:32:58Z", - "version": "v0.0.2", - "versionExact": "v0.0.2" + "checksumSHA1": "H7tCgNt2ajKK4FBJIDNlevu9MAc=", + "path": "github.com/urso/go-bin", + "revision": "781c575c9f0eb3cb9dca94521bd7ad7d5aec7fd4", + "revisionTime": "2018-02-20T13:58:11Z" }, { - "checksumSHA1": "ttk3rguPusHM0+Pkp5diTbps/18=", - "path": "github.com/urso/go-structform/ubjson", - "revision": "844d7d44009e9e8c0f08016fc4dab64e136ca040", - "revisionTime": "2017-12-05T17:32:58Z", - "version": "v0.0.2", - "versionExact": "v0.0.2" - }, - { - "checksumSHA1": "MytQct/960akBAZm7xmbpi2BKGs=", - "path": "github.com/urso/go-structform/visitors", - "revision": "844d7d44009e9e8c0f08016fc4dab64e136ca040", - "revisionTime": "2017-12-05T17:32:58Z", - "version": "v0.0.2", - "versionExact": "v0.0.2" + "checksumSHA1": "eNXPqquHSqrsae/NydhZJSqOP5c=", + "path": "github.com/urso/qcgen", + "revision": "0b059e7db4f40a062ca3d975b7500c6a0a968d87", + "revisionTime": "2018-01-31T10:30:24Z" }, { "checksumSHA1": "Und+nhgN1YsNWvd0aDYO+0cMcAo=", @@ -1165,224 +1395,284 @@ { "checksumSHA1": "ppPg0bIlBAVJy0Pn13BfBnkp9V4=", "path": "golang.org/x/crypto/blake2b", - "revision": "d585fd2cc9195196078f516b69daff6744ef5e84", - "revisionTime": "2017-12-16T04:08:15Z" + "revision": "5119cf507ed5294cc409c092980c7497ee5d6fd2", + "revisionTime": "2018-01-22T10:39:14Z" }, { "checksumSHA1": "1MGpGDQqnUoRpv7VEcQrXOBydXE=", "path": "golang.org/x/crypto/pbkdf2", - "revision": "e8f229864d71a49e5fdc4a9a134c5f85c4c33d64", - "revisionTime": "2017-11-28T04:39:32Z" + "revision": "5119cf507ed5294cc409c092980c7497ee5d6fd2", + "revisionTime": "2018-01-22T10:39:14Z" }, { "checksumSHA1": "iNE2KX9BQzCptlQC2DdQEVmn4R4=", "path": "golang.org/x/crypto/sha3", - "revision": "9419663f5a44be8b34ca85f08abc5fe1be11f8a3", - "revisionTime": "2017-09-30T17:45:11Z" + "revision": "5119cf507ed5294cc409c092980c7497ee5d6fd2", + "revisionTime": "2018-01-22T10:39:14Z" }, { - "checksumSHA1": "X1NTlfcau2XcV6WtAHF6b/DECOA=", + "checksumSHA1": "6U7dCaxxIMjf5V02iWgyAwppczw=", "path": "golang.org/x/crypto/ssh/terminal", - "revision": "e8f229864d71a49e5fdc4a9a134c5f85c4c33d64", - "revisionTime": "2017-11-28T04:39:32Z" + "revision": "5119cf507ed5294cc409c092980c7497ee5d6fd2", + "revisionTime": "2018-01-22T10:39:14Z" }, { - "checksumSHA1": "tK8eFmQ0JeKpR3P0TjiGobzlIh0=", + "checksumSHA1": "uX2McdP4VcQ6zkAF0Q4oyd0rFtU=", "path": "golang.org/x/net/bpf", - "revision": "e90d6d0afc4c315a0d87a568ae68577cc15149a0", - "revisionTime": "2016-07-15T16:59:30Z" + "revision": "44b7c21cbf19450f38b337eb6b6fe4f6496fb5b3", + "revisionTime": "2017-10-22T17:59:00Z", + "version": "release-branch.go1.9", + "versionExact": "release-branch.go1.9" }, { - "checksumSHA1": "Y+HGqEkYM15ir+J93MEaHdyFy0c=", + "checksumSHA1": "dr5+PfIRzXeN+l1VG+s0lea9qz8=", "path": "golang.org/x/net/context", - "revision": "ffcf1bedda3b04ebb15a168a59800a73d6dc0f4d", - "revisionTime": "2017-03-29T01:43:45Z" + "revision": "44b7c21cbf19450f38b337eb6b6fe4f6496fb5b3", + "revisionTime": "2017-10-22T17:59:00Z", + "version": "release-branch.go1.9", + "versionExact": "release-branch.go1.9" }, { "checksumSHA1": "WHc3uByvGaMcnSoI21fhzYgbOgg=", "path": "golang.org/x/net/context/ctxhttp", - "revision": "48359f4f600b3a2d5cf657458e3f940021631a56", - "revisionTime": "2017-03-18T09:30:21Z" + "revision": "44b7c21cbf19450f38b337eb6b6fe4f6496fb5b3", + "revisionTime": "2017-10-22T17:59:00Z", + "version": "release-branch.go1.9", + "versionExact": "release-branch.go1.9" }, { - "checksumSHA1": "SPYGC6DQrH9jICccUsOfbvvhB4g=", + "checksumSHA1": "TWcqN2+KUWtdqnu18rruwn14UEQ=", "path": "golang.org/x/net/http2", - "revision": "e90d6d0afc4c315a0d87a568ae68577cc15149a0", - "revisionTime": "2017-04-13T17:15:43Z" + "revision": "44b7c21cbf19450f38b337eb6b6fe4f6496fb5b3", + "revisionTime": "2017-10-22T17:59:00Z", + "version": "release-branch.go1.9", + "versionExact": "release-branch.go1.9" }, { - "checksumSHA1": "EYNaHp7XdLWRydUCE0amEkKAtgk=", + "checksumSHA1": "ezWhc7n/FtqkLDQKeU2JbW+80tE=", "path": "golang.org/x/net/http2/hpack", - "revision": "e90d6d0afc4c315a0d87a568ae68577cc15149a0", - "revisionTime": "2017-04-13T17:15:43Z" + "revision": "44b7c21cbf19450f38b337eb6b6fe4f6496fb5b3", + "revisionTime": "2017-10-22T17:59:00Z", + "version": "release-branch.go1.9", + "versionExact": "release-branch.go1.9" }, { - "checksumSHA1": "vU7M5xs/uYYjyKv+P8ZqQCT3LeI=", + "checksumSHA1": "DqdFGWbLLyVFeDvzvXyf1Y678uA=", "path": "golang.org/x/net/icmp", - "revision": "e90d6d0afc4c315a0d87a568ae68577cc15149a0", - "revisionTime": "2016-07-15T16:59:30Z" + "revision": "44b7c21cbf19450f38b337eb6b6fe4f6496fb5b3", + "revisionTime": "2017-10-22T17:59:00Z", + "version": "release-branch.go1.9", + "versionExact": "release-branch.go1.9" + }, + { + "checksumSHA1": "RcrB7tgYS/GMW4QrwVdMOTNqIU8=", + "path": "golang.org/x/net/idna", + "revision": "f5dfe339be1d06f81b22525fe34671ee7d2c8904", + "revisionTime": "2018-02-04T03:50:36Z" }, { - "checksumSHA1": "yRuyntx9a59ugMi5NlN4ST0XRcI=", + "checksumSHA1": "5JWn/wMC+EWNDKI/AYE4JifQF54=", "path": "golang.org/x/net/internal/iana", - "revision": "e90d6d0afc4c315a0d87a568ae68577cc15149a0", - "revisionTime": "2016-07-15T16:59:30Z" + "revision": "44b7c21cbf19450f38b337eb6b6fe4f6496fb5b3", + "revisionTime": "2017-10-22T17:59:00Z", + "version": "release-branch.go1.9", + "versionExact": "release-branch.go1.9" }, { - "checksumSHA1": "BJrlL/vetMJZLv8XPuzvw3ZITaU=", + "checksumSHA1": "WnI4058Oj6W4YSvyXAnK3qCKqvo=", + "path": "golang.org/x/net/internal/socket", + "revision": "f5dfe339be1d06f81b22525fe34671ee7d2c8904", + "revisionTime": "2018-02-04T03:50:36Z" + }, + { + "checksumSHA1": "zPTKyZ1C55w1fk1W+/qGE15jaek=", "path": "golang.org/x/net/ipv4", - "revision": "e90d6d0afc4c315a0d87a568ae68577cc15149a0", - "revisionTime": "2016-07-15T16:59:30Z" + "revision": "44b7c21cbf19450f38b337eb6b6fe4f6496fb5b3", + "revisionTime": "2017-10-22T17:59:00Z", + "version": "release-branch.go1.9", + "versionExact": "release-branch.go1.9" }, { - "checksumSHA1": "oHDMN5yvCAux6ghrbdDYEyaOydo=", + "checksumSHA1": "3L3n7qKMO9X8E1ibA5mExKvwbmI=", "path": "golang.org/x/net/ipv6", - "revision": "e90d6d0afc4c315a0d87a568ae68577cc15149a0", - "revisionTime": "2016-07-15T16:59:30Z" + "revision": "44b7c21cbf19450f38b337eb6b6fe4f6496fb5b3", + "revisionTime": "2017-10-22T17:59:00Z", + "version": "release-branch.go1.9", + "versionExact": "release-branch.go1.9" }, { - "checksumSHA1": "yhndhWXMs/VSEDLks4dNyFMQStA=", + "checksumSHA1": "3xyuaSNmClqG4YWC7g0isQIbUTc=", "path": "golang.org/x/net/lex/httplex", - "revision": "e90d6d0afc4c315a0d87a568ae68577cc15149a0", - "revisionTime": "2017-04-13T17:15:43Z" + "revision": "44b7c21cbf19450f38b337eb6b6fe4f6496fb5b3", + "revisionTime": "2017-10-22T17:59:00Z", + "version": "release-branch.go1.9", + "versionExact": "release-branch.go1.9" }, { - "checksumSHA1": "LvdVRE0FqdR68SvVpRkHs1rxhcA=", + "checksumSHA1": "QEm/dePZ0lOnyOs+m22KjXfJ/IU=", "path": "golang.org/x/net/proxy", - "revision": "e90d6d0afc4c315a0d87a568ae68577cc15149a0", - "revisionTime": "2016-07-15T16:59:30Z" + "revision": "44b7c21cbf19450f38b337eb6b6fe4f6496fb5b3", + "revisionTime": "2017-10-22T17:59:00Z", + "version": "release-branch.go1.9", + "versionExact": "release-branch.go1.9" }, { - "checksumSHA1": "2uRCLUe0L2smIdTGnSaVDqNECOg=", + "checksumSHA1": "+7gSOeJWKSUhXho9cQPtyzJL+ok=", "path": "golang.org/x/net/publicsuffix", - "revision": "e90d6d0afc4c315a0d87a568ae68577cc15149a0", - "revisionTime": "2016-07-15T16:59:30Z" + "revision": "44b7c21cbf19450f38b337eb6b6fe4f6496fb5b3", + "revisionTime": "2017-10-22T17:59:00Z", + "version": "release-branch.go1.9", + "versionExact": "release-branch.go1.9" }, { - "checksumSHA1": "l/AvVB/e1LjZ28XXItkdOW9tKMQ=", + "checksumSHA1": "CNHEeGnucEUlTHJrLS2kHtfNbws=", "path": "golang.org/x/sys/unix", - "revision": "b76f9891dc1d975623261def70f9b89661f5baab", - "revisionTime": "2017-11-28T14:20:51Z" + "revision": "37707fdb30a5b38865cfb95e5aab41707daec7fd", + "revisionTime": "2018-02-02T13:35:31Z" }, { - "checksumSHA1": "cQoW6a5F9mbu10APPXM39YJtv48=", + "checksumSHA1": "eQq+ZoTWPjyizS9XalhZwfGjQao=", "path": "golang.org/x/sys/windows", - "revision": "b76f9891dc1d975623261def70f9b89661f5baab", - "revisionTime": "2017-11-28T14:20:51Z" + "revision": "37707fdb30a5b38865cfb95e5aab41707daec7fd", + "revisionTime": "2018-02-02T13:35:31Z" }, { - "checksumSHA1": "Qvq4DjtYrcpGgBT0O1s8nTTeFWQ=", + "checksumSHA1": "ZdFZFaXmCgEEaEhVPkyXrnhKhsg=", "path": "golang.org/x/sys/windows/registry", - "revision": "62bee037599929a6e9146f29d10dd5208c43507d", - "revisionTime": "2016-06-14T22:52:37Z" + "revision": "37707fdb30a5b38865cfb95e5aab41707daec7fd", + "revisionTime": "2018-02-02T13:35:31Z" }, { - "checksumSHA1": "IRqLaXM/VQRzkbXPuiqOxTb2W0Y=", + "checksumSHA1": "VNlkHemg81Ba7ElHfKKUU1h+U1U=", "path": "golang.org/x/sys/windows/svc", - "revision": "62bee037599929a6e9146f29d10dd5208c43507d", - "revisionTime": "2016-06-14T22:52:37Z" + "revision": "37707fdb30a5b38865cfb95e5aab41707daec7fd", + "revisionTime": "2018-02-02T13:35:31Z" }, { - "checksumSHA1": "1ouiyoHRaaMDNeq/nap9B0UuQw4=", + "checksumSHA1": "lZi+t2ilFyYSpqL1ThwNf8ot3WQ=", "path": "golang.org/x/sys/windows/svc/debug", - "revision": "62bee037599929a6e9146f29d10dd5208c43507d", - "revisionTime": "2016-06-14T22:52:37Z" + "revision": "37707fdb30a5b38865cfb95e5aab41707daec7fd", + "revisionTime": "2018-02-02T13:35:31Z" }, { "checksumSHA1": "uVlUSSKplihZG7N+QJ6fzDZ4Kh8=", "path": "golang.org/x/sys/windows/svc/eventlog", - "revision": "571f7bbbe08da2a8955aed9d4db316e78630e9a3", - "revisionTime": "2017-12-16T14:55:03Z" + "revision": "37707fdb30a5b38865cfb95e5aab41707daec7fd", + "revisionTime": "2018-02-02T13:35:31Z" + }, + { + "checksumSHA1": "rEd9Z4B9rbkOzxpwWSu0SN1PtDg=", + "path": "golang.org/x/text", + "revision": "4e4a3210bb54bb31f6ab2cdca2edcc0b50c420c1", + "revisionTime": "2018-02-04T03:07:25Z" }, { - "checksumSHA1": "oaglBTpGgEUgk7m92i6nuZbpicE=", + "checksumSHA1": "Mr4ur60bgQJnQFfJY0dGtwWwMPE=", "path": "golang.org/x/text/encoding", - "revision": "2910a502d2bf9e43193af9d68ca516529614eed3", - "revisionTime": "2016-07-21T22:28:28Z" + "revision": "4e4a3210bb54bb31f6ab2cdca2edcc0b50c420c1", + "revisionTime": "2018-02-04T03:07:25Z" }, { - "checksumSHA1": "ETIWndVBXuAJ7N6GOfQz1rnZnFo=", + "checksumSHA1": "DSdlK4MKI/a3U8Zaee2XKBe01Fo=", "path": "golang.org/x/text/encoding/charmap", - "revision": "2910a502d2bf9e43193af9d68ca516529614eed3", - "revisionTime": "2016-07-21T22:28:28Z" + "revision": "4e4a3210bb54bb31f6ab2cdca2edcc0b50c420c1", + "revisionTime": "2018-02-04T03:07:25Z" }, { - "checksumSHA1": "mI8YM2LehMxYDcauq5loMZr1pP8=", + "checksumSHA1": "tLQQZEU7qS/eYyCvd76Wqfz1oR8=", "path": "golang.org/x/text/encoding/htmlindex", - "revision": "2910a502d2bf9e43193af9d68ca516529614eed3", - "revisionTime": "2016-07-21T22:28:28Z" + "revision": "4e4a3210bb54bb31f6ab2cdca2edcc0b50c420c1", + "revisionTime": "2018-02-04T03:07:25Z" }, { "checksumSHA1": "zeHyHebIZl1tGuwGllIhjfci+wI=", "path": "golang.org/x/text/encoding/internal", - "revision": "2910a502d2bf9e43193af9d68ca516529614eed3", - "revisionTime": "2016-07-21T22:28:28Z" + "revision": "4e4a3210bb54bb31f6ab2cdca2edcc0b50c420c1", + "revisionTime": "2018-02-04T03:07:25Z" }, { - "checksumSHA1": "TF4hoIqHVEAvOq67rfnSLSkcZ1Y=", + "checksumSHA1": "7kYqxy64WhMjFIFZgN7tJ3lbKxM=", "path": "golang.org/x/text/encoding/internal/identifier", - "revision": "2910a502d2bf9e43193af9d68ca516529614eed3", - "revisionTime": "2016-07-21T22:28:28Z" + "revision": "4e4a3210bb54bb31f6ab2cdca2edcc0b50c420c1", + "revisionTime": "2018-02-04T03:07:25Z" }, { - "checksumSHA1": "HeZV82ktrmgyAaYLtNFS0qYgspI=", + "checksumSHA1": "2YqVpmvjWGEBATyUphTP1MS34JE=", "path": "golang.org/x/text/encoding/japanese", - "revision": "2910a502d2bf9e43193af9d68ca516529614eed3", - "revisionTime": "2016-07-21T22:28:28Z" + "revision": "4e4a3210bb54bb31f6ab2cdca2edcc0b50c420c1", + "revisionTime": "2018-02-04T03:07:25Z" }, { - "checksumSHA1": "8y87WJz3OkDWtPCIXxJcYpo+OY8=", + "checksumSHA1": "+ErWCAdaMwO4PLtrk9D/Hh+7oQM=", "path": "golang.org/x/text/encoding/korean", - "revision": "2910a502d2bf9e43193af9d68ca516529614eed3", - "revisionTime": "2016-07-21T22:28:28Z" + "revision": "4e4a3210bb54bb31f6ab2cdca2edcc0b50c420c1", + "revisionTime": "2018-02-04T03:07:25Z" }, { - "checksumSHA1": "WYfmebIyX5Zae8NUfu9PsQjQff0=", + "checksumSHA1": "mTuZi5urYwgDIO8+Gfql2pv8Vwg=", "path": "golang.org/x/text/encoding/simplifiedchinese", - "revision": "2910a502d2bf9e43193af9d68ca516529614eed3", - "revisionTime": "2016-07-21T22:28:28Z" + "revision": "4e4a3210bb54bb31f6ab2cdca2edcc0b50c420c1", + "revisionTime": "2018-02-04T03:07:25Z" }, { - "checksumSHA1": "KKqYmi6fxt3r3uo4lExss2yTMbs=", + "checksumSHA1": "D+VI4j0Wjzr8SeupWdOB5KBdFOw=", "path": "golang.org/x/text/encoding/traditionalchinese", - "revision": "2910a502d2bf9e43193af9d68ca516529614eed3", - "revisionTime": "2016-07-21T22:28:28Z" + "revision": "4e4a3210bb54bb31f6ab2cdca2edcc0b50c420c1", + "revisionTime": "2018-02-04T03:07:25Z" }, { "checksumSHA1": "G9LfJI9gySazd+MyyC6QbTHx4to=", "path": "golang.org/x/text/encoding/unicode", - "revision": "2910a502d2bf9e43193af9d68ca516529614eed3", - "revisionTime": "2016-07-21T22:28:28Z" + "revision": "4e4a3210bb54bb31f6ab2cdca2edcc0b50c420c1", + "revisionTime": "2018-02-04T03:07:25Z" }, { "checksumSHA1": "hyNCcTwMQnV6/MK8uUW9E5H0J0M=", "path": "golang.org/x/text/internal/tag", - "revision": "2910a502d2bf9e43193af9d68ca516529614eed3", - "revisionTime": "2016-07-21T22:28:28Z" + "revision": "4e4a3210bb54bb31f6ab2cdca2edcc0b50c420c1", + "revisionTime": "2018-02-04T03:07:25Z" }, { "checksumSHA1": "Qk7dljcrEK1BJkAEZguxAbG9dSo=", "path": "golang.org/x/text/internal/utf8internal", - "revision": "2910a502d2bf9e43193af9d68ca516529614eed3", - "revisionTime": "2016-07-21T22:28:28Z" + "revision": "4e4a3210bb54bb31f6ab2cdca2edcc0b50c420c1", + "revisionTime": "2018-02-04T03:07:25Z" }, { - "checksumSHA1": "vbYsvMa+yYFTrmD0bIVhA36DpgQ=", + "checksumSHA1": "/N4Gt0BoQcasJJ28JOlzLO5v/ug=", "path": "golang.org/x/text/language", - "revision": "2910a502d2bf9e43193af9d68ca516529614eed3", - "revisionTime": "2016-07-21T22:28:28Z" + "revision": "4e4a3210bb54bb31f6ab2cdca2edcc0b50c420c1", + "revisionTime": "2018-02-04T03:07:25Z" }, { - "checksumSHA1": "cQ4+8mXpYioml5/hO7ZyeECoFJc=", + "checksumSHA1": "IV4MN7KGBSocu/5NR3le3sxup4Y=", "path": "golang.org/x/text/runes", - "revision": "2910a502d2bf9e43193af9d68ca516529614eed3", - "revisionTime": "2016-07-21T22:28:28Z" + "revision": "4e4a3210bb54bb31f6ab2cdca2edcc0b50c420c1", + "revisionTime": "2018-02-04T03:07:25Z" + }, + { + "checksumSHA1": "CbpjEkkOeh0fdM/V8xKDdI0AA88=", + "path": "golang.org/x/text/secure/bidirule", + "revision": "4e4a3210bb54bb31f6ab2cdca2edcc0b50c420c1", + "revisionTime": "2018-02-04T03:07:25Z" }, { - "checksumSHA1": "TZDHZj3zWDc5LKqpoLamOKt6Nmo=", + "checksumSHA1": "ziMb9+ANGRJSSIuxYdRbA+cDRBQ=", "path": "golang.org/x/text/transform", - "revision": "2910a502d2bf9e43193af9d68ca516529614eed3", - "revisionTime": "2016-07-21T22:28:28Z" + "revision": "4e4a3210bb54bb31f6ab2cdca2edcc0b50c420c1", + "revisionTime": "2018-02-04T03:07:25Z" + }, + { + "checksumSHA1": "w8kDfZ1Ug+qAcVU0v8obbu3aDOY=", + "path": "golang.org/x/text/unicode/bidi", + "revision": "4e4a3210bb54bb31f6ab2cdca2edcc0b50c420c1", + "revisionTime": "2018-02-04T03:07:25Z" + }, + { + "checksumSHA1": "BCNYmf4Ek93G4lk5x3ucNi/lTwA=", + "path": "golang.org/x/text/unicode/norm", + "revision": "4e4a3210bb54bb31f6ab2cdca2edcc0b50c420c1", + "revisionTime": "2018-02-04T03:07:25Z" }, { "checksumSHA1": "vGfePfr0+weQUeTM/71mu+LCFuE=", @@ -1393,20 +1683,26 @@ { "checksumSHA1": "p3gWsy4fQOSXGRMUHr3TnmVFias=", "path": "golang.org/x/tools/go/ast/astutil", - "revision": "9be3b7cbc7ccd19baaa3b7704c22f57db5ebbdf2", - "revisionTime": "2017-06-28T06:26:50Z" + "revision": "5d2fd3ccab986d52112bf301d47a819783339d0e", + "revisionTime": "2017-08-07T23:04:23Z", + "version": "release-branch.go1.9", + "versionExact": "release-branch.go1.9" }, { "checksumSHA1": "AnXFEvmaJ7w2Q7hWPcLUmCbPgq0=", "path": "golang.org/x/tools/go/buildutil", - "revision": "9be3b7cbc7ccd19baaa3b7704c22f57db5ebbdf2", - "revisionTime": "2017-06-28T06:26:50Z" + "revision": "5d2fd3ccab986d52112bf301d47a819783339d0e", + "revisionTime": "2017-08-07T23:04:23Z", + "version": "release-branch.go1.9", + "versionExact": "release-branch.go1.9" }, { - "checksumSHA1": "aXtllsj+ZEheKoMt22s6hPTHdTM=", + "checksumSHA1": "ZpAR2KupZto/mWf9zu5e6IKDWt0=", "path": "golang.org/x/tools/go/loader", - "revision": "9be3b7cbc7ccd19baaa3b7704c22f57db5ebbdf2", - "revisionTime": "2017-06-28T06:26:50Z" + "revision": "5d2fd3ccab986d52112bf301d47a819783339d0e", + "revisionTime": "2017-08-07T23:04:23Z", + "version": "release-branch.go1.9", + "versionExact": "release-branch.go1.9" }, { "checksumSHA1": "6f8MEU31llHM1sLM/GGH4/Qxu0A=", diff --git a/vendor/github.com/elastic/beats/winlogbeat/_meta/fields.yml b/vendor/github.com/elastic/beats/winlogbeat/_meta/fields.yml index d86496bb..cbae8b3a 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/winlogbeat/_meta/fields.yml @@ -10,7 +10,7 @@ "wineventlog" for the Windows Event Log API or "eventlogging" for the Event Logging API. - The Event Logging API was designed for Windows Server 2003, Windows XP, + The Event Logging API was designed for Windows Server 2003 or Windows 2000 operating systems. In Windows Vista, the event logging infrastructure was redesigned. On Windows Vista or later operating systems, the Windows Event Log API is used. Winlogbeat automatically diff --git a/vendor/github.com/elastic/beats/winlogbeat/beater/eventlogger.go b/vendor/github.com/elastic/beats/winlogbeat/beater/eventlogger.go index 9c087a42..a79d5e0d 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/beater/eventlogger.go +++ b/vendor/github.com/elastic/beats/winlogbeat/beater/eventlogger.go @@ -82,7 +82,7 @@ func (e *eventLogger) run( client.Close() }() - err = api.Open(state.RecordNumber) + err = api.Open(state) if err != nil { logp.Warn("EventLog[%s] Open() error. No events will be read from "+ "this source. %v", api.Name(), err) diff --git a/vendor/github.com/elastic/beats/winlogbeat/beater/winlogbeat.go b/vendor/github.com/elastic/beats/winlogbeat/beater/winlogbeat.go index b7413586..2129c7fc 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/beater/winlogbeat.go +++ b/vendor/github.com/elastic/beats/winlogbeat/beater/winlogbeat.go @@ -5,7 +5,6 @@ Winlogbeat. The main event loop is implemented in this package. package beater import ( - "expvar" "fmt" "sync" "time" @@ -20,10 +19,6 @@ import ( "github.com/elastic/beats/winlogbeat/eventlog" ) -func init() { - expvar.Publish("uptime", expvar.Func(uptime)) -} - // Debug logging functions for this package. var ( debugf = logp.MakeDebug("winlogbeat") @@ -163,16 +158,3 @@ func (eb *Winlogbeat) processEventLog( defer wg.Done() logger.run(eb.done, eb.pipeline, state) } - -// uptime returns a map of uptime related metrics. -func uptime() interface{} { - now := time.Now().UTC() - uptimeDur := now.Sub(startTime) - - return map[string]interface{}{ - "start_time": startTime, - "uptime": uptimeDur.String(), - "uptime_ms": fmt.Sprintf("%d", uptimeDur.Nanoseconds()/int64(time.Microsecond)), - "server_time": now, - } -} diff --git a/vendor/github.com/elastic/beats/winlogbeat/checkpoint/checkpoint.go b/vendor/github.com/elastic/beats/winlogbeat/checkpoint/checkpoint.go index fbe9f81b..11d28855 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/checkpoint/checkpoint.go +++ b/vendor/github.com/elastic/beats/winlogbeat/checkpoint/checkpoint.go @@ -45,6 +45,7 @@ type EventLogState struct { Name string `yaml:"name"` RecordNumber uint64 `yaml:"record_number"` Timestamp time.Time `yaml:"timestamp"` + Bookmark string `yaml:"bookmark,omitempty"` } // NewCheckpoint creates and returns a new Checkpoint. This method loads state @@ -156,11 +157,12 @@ func (c *Checkpoint) States() map[string]EventLogState { } // Persist queues the given event log state information to be written to disk. -func (c *Checkpoint) Persist(name string, recordNumber uint64, ts time.Time) { +func (c *Checkpoint) Persist(name string, recordNumber uint64, ts time.Time, bookmark string) { c.PersistState(EventLogState{ Name: name, RecordNumber: recordNumber, Timestamp: ts, + Bookmark: bookmark, }) } diff --git a/vendor/github.com/elastic/beats/winlogbeat/checkpoint/checkpoint_test.go b/vendor/github.com/elastic/beats/winlogbeat/checkpoint/checkpoint_test.go index 4ac9d2b3..11a24ad9 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/checkpoint/checkpoint_test.go +++ b/vendor/github.com/elastic/beats/winlogbeat/checkpoint/checkpoint_test.go @@ -38,7 +38,7 @@ func TestWriteMaxUpdates(t *testing.T) { defer cp.Shutdown() // Send update - it's not written to disk but it's in memory. - cp.Persist("App", 1, time.Now()) + cp.Persist("App", 1, time.Now(), "") time.Sleep(500 * time.Millisecond) _, found := cp.States()["App"] assert.True(t, found) @@ -50,7 +50,7 @@ func TestWriteMaxUpdates(t *testing.T) { assert.Len(t, ps.States, 0) // Send update - it is written to disk. - cp.Persist("App", 2, time.Now()) + cp.Persist("App", 2, time.Now(), "") time.Sleep(750 * time.Millisecond) ps, err = cp.read() if err != nil { @@ -89,7 +89,7 @@ func TestWriteTimedFlush(t *testing.T) { // Send update then wait longer than the flush interval and it should be // on disk. - cp.Persist("App", 1, time.Now()) + cp.Persist("App", 1, time.Now(), "") time.Sleep(1500 * time.Millisecond) ps, err := cp.read() if err != nil { diff --git a/vendor/github.com/elastic/beats/winlogbeat/docs/fields.asciidoc b/vendor/github.com/elastic/beats/winlogbeat/docs/fields.asciidoc index 4fe0957a..e6145bb3 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/docs/fields.asciidoc +++ b/vendor/github.com/elastic/beats/winlogbeat/docs/fields.asciidoc @@ -17,6 +17,7 @@ grouped in the following categories: * <> * <> * <> +* <> * <> -- @@ -27,33 +28,41 @@ Contains common beat fields available in all event types. -[float] -=== `beat.name` - +*`beat.name`*:: ++ +-- The name of the Beat sending the log messages. If the Beat name is set in the configuration file, then that value is used. If it is not set, the hostname is used. To set the Beat name, use the `name` option in the configuration file. -[float] -=== `beat.hostname` +-- +*`beat.hostname`*:: ++ +-- The hostname as returned by the operating system on which the Beat is running. -[float] -=== `beat.timezone` +-- +*`beat.timezone`*:: ++ +-- The timezone as returned by the operating system on which the Beat is running. -[float] -=== `beat.version` +-- +*`beat.version`*:: ++ +-- The version of the beat that generated this event. -[float] -=== `@timestamp` +-- +*`@timestamp`*:: ++ +-- type: date example: August 26th 2016, 12:35:53.332 @@ -65,20 +74,26 @@ required: True The timestamp when the event log record was generated. -[float] -=== `tags` +-- +*`tags`*:: ++ +-- Arbitrary tags that can be set per Beat and per transaction type. -[float] -=== `fields` +-- +*`fields`*:: ++ +-- type: object Contains user configurable fields. +-- + [float] == error fields @@ -86,30 +101,36 @@ Error fields containing additional info in case of errors. -[float] -=== `error.message` - +*`error.message`*:: ++ +-- type: text Error message. -[float] -=== `error.code` +-- +*`error.code`*:: ++ +-- type: long Error code. -[float] -=== `error.type` +-- +*`error.type`*:: ++ +-- type: keyword Error type. +-- + [[exported-fields-cloud]] == Cloud provider metadata fields @@ -117,56 +138,70 @@ Metadata from cloud providers added by the add_cloud_metadata processor. -[float] -=== `meta.cloud.provider` - +*`meta.cloud.provider`*:: ++ +-- example: ec2 Name of the cloud provider. Possible values are ec2, gce, or digitalocean. -[float] -=== `meta.cloud.instance_id` +-- +*`meta.cloud.instance_id`*:: ++ +-- Instance ID of the host machine. -[float] -=== `meta.cloud.instance_name` +-- +*`meta.cloud.instance_name`*:: ++ +-- Instance name of the host machine. -[float] -=== `meta.cloud.machine_type` +-- +*`meta.cloud.machine_type`*:: ++ +-- example: t2.medium Machine type of the host machine. -[float] -=== `meta.cloud.availability_zone` +-- +*`meta.cloud.availability_zone`*:: ++ +-- example: us-east-1c Availability zone in which this host is running. -[float] -=== `meta.cloud.project_id` +-- +*`meta.cloud.project_id`*:: ++ +-- example: project-x Name of the project in Google Cloud. -[float] -=== `meta.cloud.region` +-- +*`meta.cloud.region`*:: ++ +-- Region in which this host is running. +-- + [[exported-fields-common]] == Common Winlogbeat fields @@ -174,14 +209,16 @@ Contains common fields available in all event types. -[float] -=== `type` - +*`type`*:: ++ +-- required: True The event log API type used to read the record. The possible values are "wineventlog" for the Windows Event Log API or "eventlogging" for the Event Logging API. -The Event Logging API was designed for Windows Server 2003, Windows XP, or Windows 2000 operating systems. In Windows Vista, the event logging infrastructure was redesigned. On Windows Vista or later operating systems, the Windows Event Log API is used. Winlogbeat automatically detects which API to use for reading event logs. +The Event Logging API was designed for Windows Server 2003 or Windows 2000 operating systems. In Windows Vista, the event logging infrastructure was redesigned. On Windows Vista or later operating systems, the Windows Event Log API is used. Winlogbeat automatically detects which API to use for reading event logs. + +-- [[exported-fields-docker-processor]] == Docker fields @@ -191,38 +228,46 @@ Docker stats collected from Docker. -[float] -=== `docker.container.id` - +*`docker.container.id`*:: ++ +-- type: keyword Unique container id. -[float] -=== `docker.container.image` +-- +*`docker.container.image`*:: ++ +-- type: keyword Name of the image the container was built on. -[float] -=== `docker.container.name` +-- +*`docker.container.name`*:: ++ +-- type: keyword Container name. -[float] -=== `docker.container.labels` +-- +*`docker.container.labels`*:: ++ +-- type: object Image labels. +-- + [[exported-fields-eventlog]] == Event log record fields @@ -230,9 +275,9 @@ Contains data from a Windows event log record. -[float] -=== `activity_id` - +*`activity_id`*:: ++ +-- type: keyword required: False @@ -240,9 +285,11 @@ required: False A globally unique identifier that identifies the current activity. The events that are published with this identifier are part of the same activity. -[float] -=== `computer_name` +-- +*`computer_name`*:: ++ +-- type: keyword required: True @@ -250,9 +297,11 @@ required: True The name of the computer that generated the record. When using Windows event forwarding, this name can differ from the `beat.hostname`. -[float] -=== `event_data` +-- +*`event_data`*:: ++ +-- type: object required: False @@ -260,9 +309,11 @@ required: False The event-specific data. This field is mutually exclusive with `user_data`. If you are capturing event data on versions prior to Windows Vista, the parameters in `event_data` are named `param1`, `param2`, and so on, because event log parameters are unnamed in earlier versions of Windows. -[float] -=== `event_id` +-- +*`event_id`*:: ++ +-- type: long required: True @@ -270,9 +321,11 @@ required: True The event identifier. The value is specific to the source of the event. -[float] -=== `keywords` +-- +*`keywords`*:: ++ +-- type: keyword required: False @@ -280,9 +333,11 @@ required: False The keywords are used to classify an event. -[float] -=== `log_name` +-- +*`log_name`*:: ++ +-- type: keyword required: True @@ -290,9 +345,11 @@ required: True The name of the event log from which this record was read. This value is one of the names from the `event_logs` collection in the configuration. -[float] -=== `level` +-- +*`level`*:: ++ +-- type: keyword required: False @@ -300,9 +357,11 @@ required: False The level of the event. There are five levels of events that can be logged: Success, Information, Warning, Error, Audit Success, and Audit Failure. -[float] -=== `message` +-- +*`message`*:: ++ +-- type: text required: False @@ -310,9 +369,11 @@ required: False The message from the event log record. -[float] -=== `message_error` +-- +*`message_error`*:: ++ +-- type: keyword required: False @@ -320,9 +381,11 @@ required: False The error that occurred while reading and formatting the message from the log. -[float] -=== `record_number` +-- +*`record_number`*:: ++ +-- type: keyword required: True @@ -330,9 +393,11 @@ required: True The record number of the event log record. The first record written to an event log is record number 1, and other records are numbered sequentially. If the record number reaches the maximum value (2^32^ for the Event Logging API and 2^64^ for the Windows Event Log API), the next record number will be 0. -[float] -=== `related_activity_id` +-- +*`related_activity_id`*:: ++ +-- type: keyword required: False @@ -340,9 +405,11 @@ required: False A globally unique identifier that identifies the activity to which control was transferred to. The related events would then have this identifier as their `activity_id` identifier. -[float] -=== `opcode` +-- +*`opcode`*:: ++ +-- type: keyword required: False @@ -350,9 +417,11 @@ required: False The opcode defined in the event. Task and opcode are typically used to identify the location in the application from where the event was logged. -[float] -=== `provider_guid` +-- +*`provider_guid`*:: ++ +-- type: keyword required: False @@ -360,9 +429,11 @@ required: False A globally unique identifier that identifies the provider that logged the event. -[float] -=== `process_id` +-- +*`process_id`*:: ++ +-- type: long required: False @@ -370,9 +441,11 @@ required: False The process_id identifies the process that generated the event. -[float] -=== `source_name` +-- +*`source_name`*:: ++ +-- type: keyword required: True @@ -380,9 +453,11 @@ required: True The source of the event log record (the application or service that logged the record). -[float] -=== `task` +-- +*`task`*:: ++ +-- type: keyword required: False @@ -390,9 +465,11 @@ required: False The task defined in the event. Task and opcode are typically used to identify the location in the application from where the event was logged. The category used by the Event Logging API (on pre Windows Vista operating systems) is written to this field. -[float] -=== `thread_id` +-- +*`thread_id`*:: ++ +-- type: long required: False @@ -400,9 +477,11 @@ required: False The thread_id identifies the thread that generated the event. -[float] -=== `user_data` +-- +*`user_data`*:: ++ +-- type: object required: False @@ -410,9 +489,11 @@ required: False The event specific data. This field is mutually exclusive with `event_data`. -[float] -=== `user.identifier` +-- +*`user.identifier`*:: ++ +-- type: keyword example: S-1-5-21-3541430928-2051711210-1391384369-1001 @@ -424,9 +505,11 @@ The Windows security identifier (SID) of the account associated with this event. If Winlogbeat cannot resolve the SID to a name, then the `user.name`, `user.domain`, and `user.type` fields will be omitted from the event. If you discover Winlogbeat not resolving SIDs, review the log for clues as to what the problem may be. -[float] -=== `user.name` +-- +*`user.name`*:: ++ +-- type: keyword required: False @@ -434,9 +517,11 @@ required: False The name of the account associated with this event. -[float] -=== `user.domain` +-- +*`user.domain`*:: ++ +-- type: keyword required: False @@ -444,9 +529,11 @@ required: False The domain that the account associated with this event is a member of. -[float] -=== `user.type` +-- +*`user.type`*:: ++ +-- type: keyword required: False @@ -454,18 +541,22 @@ required: False The type of account associated with this event. -[float] -=== `version` +-- +*`version`*:: ++ +-- type: long required: False The version number of the event's definition. -[float] -=== `xml` +-- +*`xml`*:: ++ +-- type: text required: False @@ -475,6 +566,76 @@ The raw XML representation of the event obtained from Windows. This field is onl The XML representation of the event is useful for troubleshooting purposes. The data in the fields reported by Winlogbeat can be compared to the data in the XML to diagnose problems. +-- + +[[exported-fields-host-processor]] +== Host fields + +Info collected for the host machine. + + + + +*`host.name`*:: ++ +-- +type: keyword + +Hostname. + + +-- + +*`host.id`*:: ++ +-- +type: keyword + +Unique host id. + + +-- + +*`host.architecture`*:: ++ +-- +type: keyword + +Host architecture (e.g. x86_64, arm, ppc, mips). + + +-- + +*`host.os.platform`*:: ++ +-- +type: keyword + +OS platform (e.g. centos, ubuntu, windows). + + +-- + +*`host.os.version`*:: ++ +-- +type: keyword + +OS version. + + +-- + +*`host.os.family`*:: ++ +-- +type: keyword + +OS family (e.g. redhat, debian, freebsd, windows). + + +-- + [[exported-fields-kubernetes-processor]] == Kubernetes fields @@ -483,59 +644,73 @@ Kubernetes metadata added by the kubernetes processor -[float] -=== `kubernetes.pod.name` - +*`kubernetes.pod.name`*:: ++ +-- type: keyword Kubernetes pod name -[float] -=== `kubernetes.namespace` +-- +*`kubernetes.namespace`*:: ++ +-- type: keyword Kubernetes namespace -[float] -=== `kubernetes.node.name` +-- +*`kubernetes.node.name`*:: ++ +-- type: keyword Kubernetes node name -[float] -=== `kubernetes.labels` +-- +*`kubernetes.labels`*:: ++ +-- type: object Kubernetes labels map -[float] -=== `kubernetes.annotations` +-- +*`kubernetes.annotations`*:: ++ +-- type: object Kubernetes annotations map -[float] -=== `kubernetes.container.name` +-- +*`kubernetes.container.name`*:: ++ +-- type: keyword Kubernetes container name -[float] -=== `kubernetes.container.image` +-- +*`kubernetes.container.image`*:: ++ +-- type: keyword Kubernetes container image +-- + diff --git a/vendor/github.com/elastic/beats/winlogbeat/docs/getting-started.asciidoc b/vendor/github.com/elastic/beats/winlogbeat/docs/getting-started.asciidoc index 3fbd7731..1f4a55f0 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/docs/getting-started.asciidoc +++ b/vendor/github.com/elastic/beats/winlogbeat/docs/getting-started.asciidoc @@ -1,17 +1,7 @@ [[winlogbeat-getting-started]] == Getting Started With Winlogbeat -To get started with your own Winlogbeat setup, install and configure these -related products: - - * Elasticsearch for storage and indexing the data. - * Kibana for the UI. - * Logstash (optional) for inserting data into Elasticsearch. - -See {libbeat}/getting-started.html[Getting Started with Beats and the Elastic -Stack] for more information. - -After installing the Elastic Stack, read the following topics to learn how to install, configure, and run Winlogbeat: +include::../../libbeat/docs/shared-getting-started-intro.asciidoc[] * <> * <> @@ -24,13 +14,15 @@ After installing the Elastic Stack, read the following topics to learn how to in [[winlogbeat-installation]] === Step 1: Install Winlogbeat +*Before you begin*: If you haven't installed the {stack}, do that now. See +{stack-ov}/get-started-elastic-stack.html[Getting started with the {stack}]. + . Download the Winlogbeat zip file from the https://www.elastic.co/downloads/beats/winlogbeat[downloads page]. . Extract the contents into `C:\Program Files`. . Rename the `winlogbeat-` directory to `Winlogbeat`. . Open a PowerShell prompt as an Administrator (right-click on the PowerShell -icon and select Run As Administrator). If you are running Windows XP, you may -need to download and install PowerShell. +icon and select Run As Administrator). . From the PowerShell prompt, run the following commands to install the service. ["source","sh",subs="attributes,callouts"] diff --git a/vendor/github.com/elastic/beats/winlogbeat/docs/index.asciidoc b/vendor/github.com/elastic/beats/winlogbeat/docs/index.asciidoc index 658ef0aa..2ec6be40 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/docs/index.asciidoc +++ b/vendor/github.com/elastic/beats/winlogbeat/docs/index.asciidoc @@ -2,19 +2,21 @@ include::../../libbeat/docs/version.asciidoc[] -include::{asciidoc-dir}/../../shared/attributes62.asciidoc[] +include::{asciidoc-dir}/../../shared/attributes.asciidoc[] :version: {stack-version} :beatname_lc: winlogbeat :beatname_uc: Winlogbeat :beatname_pkg: {beatname_lc} +:github_repo_name: beats +:discuss_forum: beats/{beatname_lc} +:beat_default_index_prefix: {beatname_lc} +:has_ml_jobs: yes include::../../libbeat/docs/shared-beats-attributes.asciidoc[] include::./overview.asciidoc[] -include::../../libbeat/docs/contributing-to-beats.asciidoc[] - include::./getting-started.asciidoc[] include::./setting-up-running.asciidoc[] @@ -34,3 +36,6 @@ include::../../libbeat/docs/security/securing-beats.asciidoc[] include::./troubleshooting.asciidoc[] include::./faq.asciidoc[] + +include::../../libbeat/docs/contributing-to-beats.asciidoc[] + diff --git a/vendor/github.com/elastic/beats/winlogbeat/docs/overview.asciidoc b/vendor/github.com/elastic/beats/winlogbeat/docs/overview.asciidoc index 1441b1f5..7da16c63 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/docs/overview.asciidoc +++ b/vendor/github.com/elastic/beats/winlogbeat/docs/overview.asciidoc @@ -5,7 +5,7 @@ ++++ Winlogbeat ships Windows event logs to Elasticsearch or Logstash. You can -install it as a Windows service on Windows XP or later. +install it as a Windows service. Winlogbeat reads from one or more event logs using Windows APIs, filters the events based on user-configured criteria, then sends the event data to the diff --git a/vendor/github.com/elastic/beats/winlogbeat/docs/securing-winlogbeat.asciidoc b/vendor/github.com/elastic/beats/winlogbeat/docs/securing-winlogbeat.asciidoc index 8b1bc8fa..32090bde 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/docs/securing-winlogbeat.asciidoc +++ b/vendor/github.com/elastic/beats/winlogbeat/docs/securing-winlogbeat.asciidoc @@ -8,6 +8,7 @@ The following topics describe how to secure communication between Winlogbeat and * <> * <> +* <> //sets block macro for https.asciidoc included in next section diff --git a/vendor/github.com/elastic/beats/winlogbeat/docs/setting-up-running.asciidoc b/vendor/github.com/elastic/beats/winlogbeat/docs/setting-up-running.asciidoc index a3fa01c2..42f7831d 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/docs/setting-up-running.asciidoc +++ b/vendor/github.com/elastic/beats/winlogbeat/docs/setting-up-running.asciidoc @@ -4,7 +4,7 @@ // that is unique to each beat. ///// -[[seting-up-and-running]] +[[setting-up-and-running]] == Setting up and running {beatname_uc} Before reading this section, see the @@ -25,3 +25,5 @@ include::../../libbeat/docs/shared-directory-layout.asciidoc[] include::../../libbeat/docs/keystore.asciidoc[] include::../../libbeat/docs/command-reference.asciidoc[] + +include::../../libbeat/docs/shared-shutdown.asciidoc[] diff --git a/vendor/github.com/elastic/beats/winlogbeat/eventlog/common_test.go b/vendor/github.com/elastic/beats/winlogbeat/eventlog/common_test.go index d5f4b35e..175cb36e 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/eventlog/common_test.go +++ b/vendor/github.com/elastic/beats/winlogbeat/eventlog/common_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/winlogbeat/checkpoint" ) type factory func(*common.Config) (EventLog, error) @@ -25,6 +26,8 @@ func newTestEventLog(t *testing.T, factory factory, options map[string]interface func setupEventLog(t *testing.T, factory factory, recordID uint64, options map[string]interface{}) (EventLog, teardown) { eventLog := newTestEventLog(t, factory, options) - fatalErr(t, eventLog.Open(recordID)) + fatalErr(t, eventLog.Open(checkpoint.EventLogState{ + RecordNumber: recordID, + })) return eventLog, func() { fatalErr(t, eventLog.Close()) } } diff --git a/vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlog.go b/vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlog.go index 5f459972..677ebd3a 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlog.go +++ b/vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlog.go @@ -38,10 +38,10 @@ var ( // EventLog is an interface to a Windows Event Log. type EventLog interface { - // Open the event log. recordNumber is the last successfully read event log - // record number. Read will resume from recordNumber + 1. To start reading - // from the first event specify a recordNumber of 0. - Open(recordNumber uint64) error + // Open the event log. state points to the last successfully read event + // in this event log. Read will resume from the next record. To start reading + // from the first event specify a zero-valued EventLogState. + Open(state checkpoint.EventLogState) error // Read records from the event log. Read() ([]Record, error) @@ -56,8 +56,9 @@ type EventLog interface { // Record represents a single event from the log. type Record struct { sys.Event - API string // The event log API type used to read the record. - XML string // XML representation of the event. + API string // The event log API type used to read the record. + XML string // XML representation of the event. + Offset checkpoint.EventLogState // Position of the record within its source stream. } // ToMapStr returns a new MapStr containing the data from this Record. @@ -112,11 +113,7 @@ func (e Record) ToEvent() beat.Event { return beat.Event{ Timestamp: e.TimeCreated.SystemTime, Fields: m, - Private: checkpoint.EventLogState{ - Name: e.Channel, - RecordNumber: e.RecordID, - Timestamp: e.TimeCreated.SystemTime, - }, + Private: e.Offset, } } diff --git a/vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlogging.go b/vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlogging.go index 4401d966..1ca5268b 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlogging.go +++ b/vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlogging.go @@ -11,6 +11,7 @@ import ( "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/logp" + "github.com/elastic/beats/winlogbeat/checkpoint" "github.com/elastic/beats/winlogbeat/sys" win "github.com/elastic/beats/winlogbeat/sys/eventlogging" ) @@ -63,6 +64,7 @@ type eventLogging struct { handle win.Handle // Handle to the event log. readBuf []byte // Buffer for reading in events. formatBuf []byte // Buffer for formatting messages. + insertBuf win.StringInserts // Buffer for parsing insert strings. handles *messageFilesCache // Cached mapping of source name to event message file handles. logPrefix string // Prefix to add to all log entries. @@ -76,9 +78,9 @@ func (l eventLogging) Name() string { return l.name } -func (l *eventLogging) Open(recordNumber uint64) error { +func (l *eventLogging) Open(state checkpoint.EventLogState) error { detailf("%s Open(recordNumber=%d) calling OpenEventLog(uncServerPath=, "+ - "providerName=%s)", l.logPrefix, recordNumber, l.name) + "providerName=%s)", l.logPrefix, state.RecordNumber, l.name) handle, err := win.OpenEventLog("", l.name) if err != nil { return err @@ -91,7 +93,7 @@ func (l *eventLogging) Open(recordNumber uint64) error { var oldestRecord, newestRecord uint32 if numRecords > 0 { - l.recordNumber = uint32(recordNumber) + l.recordNumber = uint32(state.RecordNumber) l.seek = true l.ignoreFirst = true @@ -148,7 +150,7 @@ func (l *eventLogging) Read() ([]Record, error) { l.readBuf = l.readBuf[0:numBytesRead] events, _, err := win.RenderEvents( - l.readBuf[:numBytesRead], 0, l.formatBuf, l.handles.get) + l.readBuf[:numBytesRead], 0, l.formatBuf, &l.insertBuf, l.handles.get) if err != nil { return nil, err } @@ -169,6 +171,11 @@ func (l *eventLogging) Read() ([]Record, error) { records = append(records, Record{ API: eventLoggingAPIName, Event: e, + Offset: checkpoint.EventLogState{ + Name: l.name, + RecordNumber: e.RecordID, + Timestamp: e.TimeCreated.SystemTime, + }, }) } @@ -208,7 +215,10 @@ func (l *eventLogging) readRetryErrorHandler(err error) error { if reopen { l.Close() - return l.Open(uint64(l.recordNumber)) + return l.Open(checkpoint.EventLogState{ + Name: l.name, + RecordNumber: uint64(l.recordNumber), + }) } } return err diff --git a/vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlogging_test.go b/vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlogging_test.go index ea9a49b2..702a598f 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlogging_test.go +++ b/vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlogging_test.go @@ -15,6 +15,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/elastic/beats/libbeat/logp" + "github.com/elastic/beats/winlogbeat/checkpoint" "github.com/elastic/beats/winlogbeat/sys/eventlogging" ) @@ -389,7 +390,7 @@ func TestOpenInvalidProvider(t *testing.T) { configureLogp() el := newTestEventLogging(t, map[string]interface{}{"name": "nonExistentProvider"}) - assert.NoError(t, el.Open(0), "Calling Open() on an unknown provider "+ + assert.NoError(t, el.Open(checkpoint.EventLogState{}), "Calling Open() on an unknown provider "+ "should automatically open Application.") _, err := el.Read() assert.NoError(t, err) @@ -474,6 +475,49 @@ func TestReadWhileCleared(t *testing.T) { } } +// Test event messages that include less parameters than required for message +// formating (caused a crash in previous versions) +func TestReadMissingParameters(t *testing.T) { + configureLogp() + log, err := initLog(providerName, sourceName, servicesMsgFile) + if err != nil { + t.Fatal(err) + } + defer func() { + err := uninstallLog(providerName, sourceName, log) + if err != nil { + t.Fatal(err) + } + }() + + var eventID uint32 = 1073748860 + // Missing parameters will be substituted by "(null)" + template := "The %s service entered the (null) state." + msgs := []string{"Windows Update"} + err = log.Report(elog.Info, eventID, msgs) + if err != nil { + t.Fatal(err) + } + + // Read messages: + eventlog, teardown := setupEventLogging(t, 0, map[string]interface{}{"name": providerName}) + defer teardown() + + records, err := eventlog.Read() + if err != nil { + t.Fatal(err) + } + + // Verify the message contents: + assert.Len(t, records, 1) + if len(records) != 1 { + t.FailNow() + } + assert.Equal(t, eventID&0xFFFF, records[0].EventIdentifier.ID) + assert.Equal(t, fmt.Sprintf(template, msgs[0]), + strings.TrimRight(records[0].Message, "\r\n")) +} + func newTestEventLogging(t *testing.T, options map[string]interface{}) EventLog { return newTestEventLog(t, newEventLogging, options) } diff --git a/vendor/github.com/elastic/beats/winlogbeat/eventlog/wineventlog.go b/vendor/github.com/elastic/beats/winlogbeat/eventlog/wineventlog.go index 1f7576a8..25c974fa 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/eventlog/wineventlog.go +++ b/vendor/github.com/elastic/beats/winlogbeat/eventlog/wineventlog.go @@ -14,6 +14,7 @@ import ( "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/logp" + "github.com/elastic/beats/winlogbeat/checkpoint" "github.com/elastic/beats/winlogbeat/sys" win "github.com/elastic/beats/winlogbeat/sys/wineventlog" ) @@ -71,10 +72,10 @@ var _ EventLog = &winEventLog{} type winEventLog struct { config winEventLogConfig query string - channelName string // Name of the channel from which to read. - subscription win.EvtHandle // Handle to the subscription. - maxRead int // Maximum number returned in one Read. - lastRead uint64 // Record number of the last read event. + channelName string // Name of the channel from which to read. + subscription win.EvtHandle // Handle to the subscription. + maxRead int // Maximum number returned in one Read. + lastRead checkpoint.EventLogState // Record number of the last read event. render func(event win.EvtHandle, out io.Writer) error // Function for rendering the event to XML. renderBuf []byte // Buffer used for rendering event. @@ -89,8 +90,14 @@ func (l *winEventLog) Name() string { return l.channelName } -func (l *winEventLog) Open(recordNumber uint64) error { - bookmark, err := win.CreateBookmark(l.channelName, recordNumber) +func (l *winEventLog) Open(state checkpoint.EventLogState) error { + var bookmark win.EvtHandle + var err error + if len(state.Bookmark) > 0 { + bookmark, err = win.CreateBookmarkFromXML(state.Bookmark) + } else { + bookmark, err = win.CreateBookmarkFromRecordID(l.channelName, state.RecordNumber) + } if err != nil { return err } @@ -154,8 +161,17 @@ func (l *winEventLog) Read() ([]Record, error) { incrementMetric(dropReasons, err) continue } + + r.Offset = checkpoint.EventLogState{ + Name: l.channelName, + RecordNumber: r.RecordID, + Timestamp: r.TimeCreated.SystemTime, + } + if r.Offset.Bookmark, err = l.createBookmarkFromEvent(h); err != nil { + logp.Warn("%s failed creating bookmark: %v", l.logPrefix, err) + } records = append(records, r) - l.lastRead = r.RecordID + l.lastRead = r.Offset } debugf("%s Read() is returning %d records", l.logPrefix, len(records)) @@ -300,6 +316,17 @@ func newWinEventLog(options *common.Config) (EventLog, error) { return l, nil } +func (l *winEventLog) createBookmarkFromEvent(evtHandle win.EvtHandle) (string, error) { + bmHandle, err := win.CreateBookmarkFromEvent(evtHandle) + if err != nil { + return "", err + } + l.outputBuf.Reset() + err = win.RenderBookmarkXML(bmHandle, l.renderBuf, l.outputBuf) + win.Close(bmHandle) + return string(l.outputBuf.Bytes()), err +} + func init() { // Register wineventlog API if it is available. available, _ := win.IsAvailable() diff --git a/vendor/github.com/elastic/beats/winlogbeat/sys/eventlogging/eventlogging_windows.go b/vendor/github.com/elastic/beats/winlogbeat/sys/eventlogging/eventlogging_windows.go index 24d5ba72..18a24c7c 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/sys/eventlogging/eventlogging_windows.go +++ b/vendor/github.com/elastic/beats/winlogbeat/sys/eventlogging/eventlogging_windows.go @@ -4,7 +4,6 @@ import ( "bytes" "encoding/binary" "fmt" - "reflect" "strings" "syscall" "time" @@ -100,6 +99,7 @@ func RenderEvents( eventsRaw []byte, lang uint32, buffer []byte, + insertStrings *StringInserts, pubHandleProvider func(string) sys.MessageFiles, ) ([]sys.Event, int, error) { var events []sys.Event @@ -139,21 +139,25 @@ func RenderEvents( event.User = *sid } + if record.numStrings > MaxInsertStrings { + logp.Warn("Record contains %d strings, more than the limit %d. Excess will be ignored.", + record.numStrings, MaxInsertStrings) + record.numStrings = MaxInsertStrings + } // Parse the UTF-16 message insert strings. - stringInserts, stringInsertPtrs, err := parseInsertStrings(record, recordBuf) - if err != nil { + if err = insertStrings.Parse(record, recordBuf); err != nil { event.RenderErr = err.Error() events = append(events, event) continue } - for _, s := range stringInserts { + for _, s := range insertStrings.Strings() { event.EventData.Pairs = append(event.EventData.Pairs, sys.KeyValue{Value: s}) } // Format the parametrized message using the insert strings. event.Message, err = formatMessage(record.sourceName, - record.eventID, lang, stringInsertPtrs, buffer, pubHandleProvider) + record.eventID, lang, insertStrings.Pointer(), buffer, pubHandleProvider) if err != nil { event.RenderErr = err.Error() if errno, ok := err.(syscall.Errno); ok { @@ -180,15 +184,10 @@ func formatMessage( sourceName string, eventID uint32, lang uint32, - stringInserts []uintptr, + stringInserts uintptr, buffer []byte, pubHandleProvider func(string) sys.MessageFiles, ) (string, error) { - var addr uintptr - if len(stringInserts) > 0 { - addr = reflect.ValueOf(&stringInserts[0]).Pointer() - } - messageFiles := pubHandleProvider(sourceName) var lastErr error @@ -208,7 +207,7 @@ func formatMessage( lang, &buffer[0], // Max size allowed is 64k bytes. uint32(len(buffer)/2), // Size of buffer in TCHARS - addr) + stringInserts) // bufferUsed = numChars * sizeof(TCHAR) + sizeof(null-terminator) bufferUsed := int(numChars*2 + 2) if err == syscall.ERROR_INSUFFICIENT_BUFFER { @@ -391,38 +390,6 @@ func parseEventLogRecord(buffer []byte) (eventLogRecord, error) { return record, nil } -// parseInsertStrings parses the insert strings from buffer which should contain -// an eventLogRecord. It returns an array of strings (data is copied and -// converted to UTF-8) and an array of pointers to the null-terminated UTF-16 -// strings within buffer. -func parseInsertStrings(record eventLogRecord, buffer []byte) ([]string, []uintptr, error) { - if record.numStrings < 1 { - return nil, nil, nil - } - - inserts := make([]string, record.numStrings) - insertPtrs := make([]uintptr, record.numStrings) - offset := int(record.stringOffset) - bufferPtr := reflect.ValueOf(&buffer[0]).Pointer() - - for i := 0; i < int(record.numStrings); i++ { - if offset > len(buffer) { - return nil, nil, fmt.Errorf("Failed reading string number %d, "+ - "offset=%d, len(buffer)=%d, record=%+v", i+1, offset, - len(buffer), record) - } - insertStr, length, err := sys.UTF16BytesToString(buffer[offset:]) - if err != nil { - return nil, nil, err - } - inserts[i] = insertStr - insertPtrs[i] = bufferPtr + uintptr(offset) - offset += length - } - - return inserts, insertPtrs, nil -} - func parseSID(record eventLogRecord, buffer []byte) (*sys.SID, error) { if record.userSidLength == 0 { return nil, nil diff --git a/vendor/github.com/elastic/beats/winlogbeat/sys/eventlogging/stringinserts_windows.go b/vendor/github.com/elastic/beats/winlogbeat/sys/eventlogging/stringinserts_windows.go new file mode 100644 index 00000000..599d6f1c --- /dev/null +++ b/vendor/github.com/elastic/beats/winlogbeat/sys/eventlogging/stringinserts_windows.go @@ -0,0 +1,86 @@ +package eventlogging + +import ( + "fmt" + "reflect" + "unsafe" + + "github.com/elastic/beats/winlogbeat/sys" +) + +const ( + // MaxInsertStrings is the maximum number of strings that can be formatted by + // FormatMessage API. + MaxInsertStrings = 99 +) + +var ( + nullPlaceholder = []byte{'(', 0, 'n', 0, 'u', 0, 'l', 0, 'l', 0, ')', 0, 0, 0} + nullPlaceholderPtr = uintptr(unsafe.Pointer(&nullPlaceholder[0])) +) + +// StringInserts stores the string inserts for an event, as arrays of string +// and pointer to UTF-16 zero-terminated string suitable to be passed to +// the Windows API. The array of pointers has enough entries to ensure that +// a call to FormatMessage will never crash. +type StringInserts struct { + pointers [MaxInsertStrings]uintptr + inserts []string + address uintptr +} + +// Parse parses the insert strings from buffer which should contain +// an eventLogRecord. +func (b *StringInserts) Parse(record eventLogRecord, buffer []byte) error { + if b.inserts == nil { // initialise struct + b.inserts = make([]string, 0, MaxInsertStrings) + b.address = reflect.ValueOf(&b.pointers[0]).Pointer() + } + b.clear() + + n := int(record.numStrings) + if n > MaxInsertStrings { + return fmt.Errorf("number of insert strings in the record (%d) is larger than the limit (%d)", n, MaxInsertStrings) + } + + b.inserts = b.inserts[:n] + if n == 0 { + return nil + } + offset := int(record.stringOffset) + bufferPtr := reflect.ValueOf(&buffer[0]).Pointer() + + for i := 0; i < n; i++ { + if offset > len(buffer) { + return fmt.Errorf("Failed reading string number %d, "+ + "offset=%d, len(buffer)=%d, record=%+v", i+1, offset, + len(buffer), record) + } + insertStr, length, err := sys.UTF16BytesToString(buffer[offset:]) + if err != nil { + return err + } + b.inserts[i] = insertStr + b.pointers[i] = bufferPtr + uintptr(offset) + offset += length + } + + return nil +} + +// Strings returns the array of strings representing the insert strings. +func (b *StringInserts) Strings() []string { + return b.inserts +} + +// Pointer returns a pointer to an array of UTF-16 strings suitable to be +// passed to the FormatMessage API. +func (b *StringInserts) Pointer() uintptr { + return b.address +} + +func (b *StringInserts) clear() { + for i := 0; i < MaxInsertStrings && b.pointers[i] != nullPlaceholderPtr; i++ { + b.pointers[i] = nullPlaceholderPtr + } +} diff --git a/vendor/github.com/elastic/beats/winlogbeat/sys/wineventlog/wineventlog_windows.go b/vendor/github.com/elastic/beats/winlogbeat/sys/wineventlog/wineventlog_windows.go index 6b7c36fc..97f4359d 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/sys/wineventlog/wineventlog_windows.go +++ b/vendor/github.com/elastic/beats/winlogbeat/sys/wineventlog/wineventlog_windows.go @@ -198,27 +198,18 @@ func RenderEvent( // XML will not include the message, and in this case RenderEvent should be // used. func RenderEventXML(eventHandle EvtHandle, renderBuf []byte, out io.Writer) error { - var bufferUsed, propertyCount uint32 - err := _EvtRender(0, eventHandle, EvtRenderEventXml, uint32(len(renderBuf)), - &renderBuf[0], &bufferUsed, &propertyCount) - if err == ERROR_INSUFFICIENT_BUFFER { - return sys.InsufficientBufferError{err, int(bufferUsed)} - } - if err != nil { - return err - } + return renderXML(eventHandle, EvtRenderEventXml, renderBuf, out) +} - if int(bufferUsed) > len(renderBuf) { - return fmt.Errorf("Windows EvtRender reported that wrote %d bytes "+ - "to the buffer, but the buffer can only hold %d bytes", - bufferUsed, len(renderBuf)) - } - return sys.UTF16ToUTF8Bytes(renderBuf[:bufferUsed], out) +// RenderBookmarkXML renders a bookmark as XML. +func RenderBookmarkXML(bookmarkHandle EvtHandle, renderBuf []byte, out io.Writer) error { + return renderXML(bookmarkHandle, EvtRenderBookmark, renderBuf, out) } -// CreateBookmark creates a new handle to a bookmark. Close must be called on -// returned EvtHandle when finished with the handle. -func CreateBookmark(channel string, recordID uint64) (EvtHandle, error) { +// CreateBookmarkFromRecordID creates a new bookmark pointing to the given recordID +// within the supplied channel. Close must be called on returned EvtHandle when +// finished with the handle. +func CreateBookmarkFromRecordID(channel string, recordID uint64) (EvtHandle, error) { xml := fmt.Sprintf(bookmarkTemplate, channel, recordID) p, err := syscall.UTF16PtrFromString(xml) if err != nil { @@ -233,6 +224,30 @@ func CreateBookmark(channel string, recordID uint64) (EvtHandle, error) { return h, nil } +// CreateBookmarkFromEvent creates a new bookmark pointing to the given event. +// Close must be called on returned EvtHandle when finished with the handle. +func CreateBookmarkFromEvent(handle EvtHandle) (EvtHandle, error) { + h, err := _EvtCreateBookmark(nil) + if err != nil { + return 0, err + } + if err = _EvtUpdateBookmark(h, handle); err != nil { + return 0, err + } + return h, nil +} + +// CreateBookmarkFromXML creates a new bookmark from the serialised representation +// of an existing bookmark. Close must be called on returned EvtHandle when +// finished with the handle. +func CreateBookmarkFromXML(bookmarkXML string) (EvtHandle, error) { + xml, err := syscall.UTF16PtrFromString(bookmarkXML) + if err != nil { + return 0, err + } + return _EvtCreateBookmark(xml) +} + // CreateRenderContext creates a render context. Close must be called on // returned EvtHandle when finished with the handle. func CreateRenderContext(valuePaths []string, flag EvtRenderContextFlag) (EvtHandle, error) { @@ -412,3 +427,22 @@ func evtRenderProviderName(renderBuf []byte, eventHandle EvtHandle) (string, err reader := bytes.NewReader(renderBuf) return readString(renderBuf, reader) } + +func renderXML(eventHandle EvtHandle, flag EvtRenderFlag, renderBuf []byte, out io.Writer) error { + var bufferUsed, propertyCount uint32 + err := _EvtRender(0, eventHandle, flag, uint32(len(renderBuf)), + &renderBuf[0], &bufferUsed, &propertyCount) + if err == ERROR_INSUFFICIENT_BUFFER { + return sys.InsufficientBufferError{err, int(bufferUsed)} + } + if err != nil { + return err + } + + if int(bufferUsed) > len(renderBuf) { + return fmt.Errorf("Windows EvtRender reported that wrote %d bytes "+ + "to the buffer, but the buffer can only hold %d bytes", + bufferUsed, len(renderBuf)) + } + return sys.UTF16ToUTF8Bytes(renderBuf[:bufferUsed], out) +} diff --git a/vendor/github.com/elastic/beats/winlogbeat/tests/system/test_eventlogging.py b/vendor/github.com/elastic/beats/winlogbeat/tests/system/test_eventlogging.py index 2f0d4eb3..adfbef28 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/tests/system/test_eventlogging.py +++ b/vendor/github.com/elastic/beats/winlogbeat/tests/system/test_eventlogging.py @@ -1,3 +1,4 @@ +import os import sys import time import unittest @@ -29,6 +30,27 @@ def test_read_one_event(self): self.assertTrue(len(evts), 1) self.assert_common_fields(evts[0], msg=msg) + def test_resume_reading_events(self): + """ + eventlogging - Resume reading events + """ + msg = "First event" + self.write_event_log(msg) + evts = self.read_events() + self.assertTrue(len(evts), 1) + self.assert_common_fields(evts[0], msg=msg) + + # remove the output file, otherwise there is a race condition + # in read_events() below where it reads the results of the previous + # execution + os.unlink(os.path.join(self.working_dir, "output", self.beat_name)) + + msg = "Second event" + self.write_event_log(msg) + evts = self.read_events() + self.assertTrue(len(evts), 1) + self.assert_common_fields(evts[0], msg=msg) + def test_read_unknown_event_id(self): """ eventlogging - Read unknown event ID @@ -178,7 +200,7 @@ def test_registry_data(self): evts = self.read_events() self.assertTrue(len(evts), 1) - event_logs = self.read_registry() + event_logs = self.read_registry(requireBookmark=False) self.assertTrue(len(event_logs.keys()), 1) self.assertIn(self.providerName, event_logs) record_number = event_logs[self.providerName]["record_number"] diff --git a/vendor/github.com/elastic/beats/winlogbeat/tests/system/test_wineventlog.py b/vendor/github.com/elastic/beats/winlogbeat/tests/system/test_wineventlog.py index b456d480..757bdbc7 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/tests/system/test_wineventlog.py +++ b/vendor/github.com/elastic/beats/winlogbeat/tests/system/test_wineventlog.py @@ -1,3 +1,4 @@ +import os import sys import time import unittest @@ -33,6 +34,33 @@ def test_read_one_event(self): "opcode": "Info", }) + def test_resume_reading_events(self): + """ + wineventlog - Resume reading events + """ + msg = "First event" + self.write_event_log(msg) + evts = self.read_events() + self.assertTrue(len(evts), 1) + self.assert_common_fields(evts[0], msg=msg, extra={ + "keywords": ["Classic"], + "opcode": "Info", + }) + + # remove the output file, otherwise there is a race condition + # in read_events() below where it reads the results of the previous + # execution + os.unlink(os.path.join(self.working_dir, "output", self.beat_name)) + + msg = "Second event" + self.write_event_log(msg) + evts = self.read_events() + self.assertTrue(len(evts), 1) + self.assert_common_fields(evts[0], msg=msg, extra={ + "keywords": ["Classic"], + "opcode": "Info", + }) + def test_read_unknown_event_id(self): """ wineventlog - Read unknown event ID @@ -316,7 +344,7 @@ def test_registry_data(self): evts = self.read_events() self.assertTrue(len(evts), 1) - event_logs = self.read_registry() + event_logs = self.read_registry(requireBookmark=True) self.assertTrue(len(event_logs.keys()), 1) self.assertIn(self.providerName, event_logs) record_number = event_logs[self.providerName]["record_number"] diff --git a/vendor/github.com/elastic/beats/winlogbeat/tests/system/winlogbeat.py b/vendor/github.com/elastic/beats/winlogbeat/tests/system/winlogbeat.py index 2e5498fd..94656e4f 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/tests/system/winlogbeat.py +++ b/vendor/github.com/elastic/beats/winlogbeat/tests/system/winlogbeat.py @@ -93,10 +93,9 @@ def read_events(self, config=None, expected_events=1): proc = self.start_beat() self.wait_until(lambda: self.output_has(expected_events)) proc.check_kill_and_wait() - return self.read_output() - def read_registry(self): + def read_registry(self, requireBookmark=False): f = open(os.path.join(self.working_dir, "data", ".winlogbeat.yml"), "r") data = yaml.load(f) self.assertIn("update_time", data) @@ -107,6 +106,8 @@ def read_registry(self): self.assertIn("name", event_log) self.assertIn("record_number", event_log) self.assertIn("timestamp", event_log) + if requireBookmark: + self.assertIn("bookmark", event_log) name = event_log["name"] event_logs[name] = event_log @@ -114,7 +115,8 @@ def read_registry(self): def assert_common_fields(self, evt, msg=None, eventID=10, sid=None, level="Information", extra=None): - assert evt["computer_name"].lower() == platform.node().lower() + + assert host_name(evt["computer_name"]).lower() == host_name(platform.node()).lower() assert "record_number" in evt self.assertDictContainsSubset({ "event_id": eventID, @@ -143,3 +145,7 @@ def assert_common_fields(self, evt, msg=None, eventID=10, sid=None, if extra != None: self.assertDictContainsSubset(extra, evt) + + +def host_name(fqdn): + return fqdn.split('.')[0] diff --git a/vendor/github.com/elastic/beats/winlogbeat/winlogbeat.reference.yml b/vendor/github.com/elastic/beats/winlogbeat/winlogbeat.reference.yml index 866d9bf3..bf7be70e 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/winlogbeat.reference.yml +++ b/vendor/github.com/elastic/beats/winlogbeat/winlogbeat.reference.yml @@ -63,7 +63,8 @@ winlogbeat.event_logs: # Hints the minimum number of events stored in the queue, # before providing a batch of events to the outputs. - # A value of 0 (the default) ensures events are immediately available + # The default value is set to 2048. + # A value of 0 ensures events are immediately available # to be sent to the outputs. #flush.min_events: 2048 @@ -71,6 +72,66 @@ winlogbeat.event_logs: # if the number of events stored in the queue is < min_flush_events. #flush.timeout: 1s + # The spool queue will store events in a local spool file, before + # forwarding the events to the outputs. + # + # Beta: spooling to disk is currently a beta feature. Use with care. + # + # The spool file is a circular buffer, which blocks once the file/buffer is full. + # Events are put into a write buffer and flushed once the write buffer + # is full or the flush_timeout is triggered. + # Once ACKed by the output, events are removed immediately from the queue, + # making space for new events to be persisted. + #spool: + # The file namespace configures the file path and the file creation settings. + # Once the file exists, the `size`, `page_size` and `prealloc` settings + # will have no more effect. + #file: + # Location of spool file. The default value is ${path.data}/spool.dat. + #path: "${path.data}/spool.dat" + + # Configure file permissions if file is created. The default value is 0600. + #permissions: 0600 + + # File size hint. The spool blocks, once this limit is reached. The default value is 100 MiB. + #size: 100MiB + + # The files page size. A file is split into multiple pages of the same size. The default value is 4KiB. + #page_size: 4KiB + + # If prealloc is set, the required space for the file is reserved using + # truncate. The default value is true. + #prealloc: true + + # Spool writer settings + # Events are serialized into a write buffer. The write buffer is flushed if: + # - The buffer limit has been reached. + # - The configured limit of buffered events is reached. + # - The flush timeout is triggered. + #write: + # Sets the write buffer size. + #buffer_size: 1MiB + + # Maximum duration after which events are flushed, if the write buffer + # is not full yet. The default value is 1s. + #flush.timeout: 1s + + # Number of maximum buffered events. The write buffer is flushed once the + # limit is reached. + #flush.events: 16384 + + # Configure the on-disk event encoding. The encoding can be changed + # between restarts. + # Valid encodings are: json, ubjson, and cbor. + #codec: cbor + #read: + # Reader flush timeout, waiting for more events to become available, so + # to fill a complete batch, as required by the outputs. + # If flush_timeout is 0, all available events are forwarded to the + # outputs immediately. + # The default value is 0s. + #flush.timeout: 0s + # Sets the maximum number of CPUs that can be executing simultaneously. The # default is the number of logical CPUs available in the system. #max_procs: @@ -105,6 +166,14 @@ winlogbeat.event_logs: # equals: # http.code: 200 # +# The following example renames the field a to b: +# +#processors: +#- rename: +# fields: +# - from: "a" +# to: "b" +# # The following example enriches each event with metadata from the cloud # provider about the host machine. It works on EC2, GCE, DigitalOcean, # Tencent Cloud, and Alibaba Cloud. @@ -129,6 +198,7 @@ winlogbeat.event_logs: # match_pids: ["process.pid", "process.ppid"] # match_source: true # match_source_index: 4 +# match_short_id: false # cleanup_timeout: 60 # # To connect to Docker over TLS you must specify a client and CA certificate. # #ssl: @@ -142,6 +212,7 @@ winlogbeat.event_logs: # #processors: #- add_docker_metadata: ~ +#- add_host_metadata: ~ #============================= Elastic Cloud ================================== @@ -214,7 +285,18 @@ output.elasticsearch: # The default is 50. #bulk_max_size: 50 - # Configure http request timeout before failing an request to Elasticsearch. + # The number of seconds to wait before trying to reconnect to Elasticsearch + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Elasticsearch after a network error. The default is 60s. + #backoff.max: 60s + + # Configure http request timeout before failing a request to Elasticsearch. #timeout: 90 # Use SSL settings for HTTPS. @@ -278,7 +360,7 @@ output.elasticsearch: # Optional load balance the events between the Logstash hosts. Default is false. #loadbalance: false - # Number of batches to be sent asynchronously to logstash while processing + # Number of batches to be sent asynchronously to Logstash while processing # new batches. #pipelining: 2 @@ -287,6 +369,17 @@ output.elasticsearch: # if no error is encountered. #slow_start: false + # The number of seconds to wait before trying to reconnect to Logstash + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Logstash after a network error. The default is 60s. + #backoff.max: 60s + # Optional index name. The default index name is set to winlogbeat # in all lowercase. #index: 'winlogbeat' @@ -631,6 +724,10 @@ output.elasticsearch: # the default for the logs path is a logs subdirectory inside the home path. #path.logs: ${path.home}/logs +#================================ Keystore ========================================== +# Location of the Keystore containing the keys and their sensitive values. +#keystore.path: "${path.config}/beats.keystore" + #============================== Dashboards ===================================== # These settings control loading the sample dashboards to the Kibana index. Loading # the dashboards are disabled by default and can be enabled either by setting the @@ -665,6 +762,17 @@ output.elasticsearch: # how to install the dashboards by first querying Elasticsearch. #setup.dashboards.always_kibana: false +# If true and Kibana is not reachable at the time when dashboards are loaded, +# it will retry to reconnect to Kibana instead of exiting with an error. +#setup.dashboards.retry.enabled: false + +# Duration interval between Kibana connection retries. +#setup.dashboards.retry.interval: 1s + +# Maximum number of retries before exiting with an error, 0 for unlimited retrying. +#setup.dashboards.retry.maximum: 0 + + #============================== Template ===================================== # A template is used to set the mapping in Elasticsearch diff --git a/vendor/github.com/elastic/beats/x-pack/README.md b/vendor/github.com/elastic/beats/x-pack/README.md new file mode 100644 index 00000000..20fbdc31 --- /dev/null +++ b/vendor/github.com/elastic/beats/x-pack/README.md @@ -0,0 +1,5 @@ +# Elastic License Functionality + +This tree contains files subject to the Elastic License. The files subject to +the Elastic License are grouped in this directory to help clearly separate these +from files licensed under the Apache License 2.0.